file_name
stringlengths
4
45
method_name
stringlengths
3
58
code_before
stringlengths
980
1.05M
code_after
stringlengths
1.13k
1.05M
func_before
stringlengths
55
114k
func_after
stringlengths
71
114k
diff
stringlengths
75
133k
num_lines_added
float64
1
1.49k
num_lines_deleted
float64
1
1.13k
num_lines_in_file
float64
27
23.2k
num_tokens_in_file
float64
143
192k
repo
stringclasses
259 values
cve_id
stringlengths
13
16
cwe_id
stringclasses
73 values
driver.c
ChunkedDecode
/* * The contents of this file are subject to the Mozilla Public License * Version 1.1 (the "License"); you may not use this file except in * compliance with the License. You may obtain a copy of the License at * http://mozilla.org/. * * Software distributed under the License is distributed on an "AS IS" * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See * the License for the specific language governing rights and limitations * under the License. * * The Original Code is AOLserver Code and related documentation * distributed by AOL. * * The Initial Developer of the Original Code is America Online, * Inc. Portions created by AOL are Copyright (C) 1999 America Online, * Inc. All Rights Reserved. * * Alternatively, the contents of this file may be used under the terms * of the GNU General Public License (the "GPL"), in which case the * provisions of GPL are applicable instead of those above. If you wish * to allow use of your version of this file only under the terms of the * GPL and not to allow others to use your version of this file under the * License, indicate your decision by deleting the provisions above and * replace them with the notice and other provisions required by the GPL. * If you do not delete the provisions above, a recipient may use your * version of this file under either the License or the GPL. */ /* * driver.c -- * * Connection I/O for loadable socket drivers. */ #include "nsd.h" /* * The following are valid driver state flags. */ #define DRIVER_STARTED 1u #define DRIVER_STOPPED 2u #define DRIVER_SHUTDOWN 4u #define DRIVER_FAILED 8u /* * Constants for SockState return and reason codes. */ typedef enum { SOCK_READY = 0, SOCK_MORE = 1, SOCK_SPOOL = 2, SOCK_ERROR = -1, SOCK_CLOSE = -2, SOCK_CLOSETIMEOUT = -3, SOCK_READTIMEOUT = -4, SOCK_WRITETIMEOUT = -5, SOCK_READERROR = -6, SOCK_WRITEERROR = -7, SOCK_SHUTERROR = -8, SOCK_BADREQUEST = -9, SOCK_ENTITYTOOLARGE = -10, SOCK_BADHEADER = -11, SOCK_TOOMANYHEADERS = -12 } SockState; /* * Subset for spooler states */ typedef enum { SPOOLER_CLOSE = SOCK_CLOSE, SPOOLER_OK = SOCK_READY, SPOOLER_READERROR = SOCK_READERROR, SPOOLER_WRITEERROR = SOCK_WRITEERROR, SPOOLER_CLOSETIMEOUT = SOCK_CLOSETIMEOUT } SpoolerState; typedef struct { SpoolerState spoolerState; SockState sockState; } SpoolerStateMap; /* * ServerMap maintains Host header to server mappings. */ typedef struct ServerMap { NsServer *servPtr; char location[1]; } ServerMap; /* * The following maintains the spooler state mapping */ static const SpoolerStateMap spoolerStateMap[] = { {SPOOLER_CLOSE, SOCK_CLOSE}, {SPOOLER_READERROR, SOCK_READERROR}, {SPOOLER_WRITEERROR, SOCK_WRITEERROR}, {SPOOLER_CLOSETIMEOUT, SOCK_CLOSETIMEOUT}, {SPOOLER_OK, SOCK_READY} }; /* * The following structure manages polling. The PollIn macro is * used for the common case of checking for readability. */ typedef struct PollData { unsigned int nfds; /* Number of fds being monitored. */ unsigned int maxfds; /* Max fds (will grow as needed). */ struct pollfd *pfds; /* Dynamic array of poll structs. */ Ns_Time timeout; /* Min timeout, if any, for next spin. */ } PollData; #define PollIn(ppd, i) (((ppd)->pfds[(i)].revents & POLLIN) == POLLIN ) #define PollOut(ppd, i) (((ppd)->pfds[(i)].revents & POLLOUT) == POLLOUT) #define PollHup(ppd, i) (((ppd)->pfds[(i)].revents & POLLHUP) == POLLHUP) /* * Collected informationof writer threads for per pool rates, necessary for * per pool bandwidth management. */ typedef struct ConnPoolInfo { size_t threadSlot; int currentPoolRate; int deltaPercentage; } ConnPoolInfo; /* * The following structure maintains writer socket */ typedef struct WriterSock { struct WriterSock *nextPtr; struct Sock *sockPtr; struct SpoolerQueue *queuePtr; struct Conn *connPtr; SpoolerState status; int err; int refCount; unsigned int flags; Tcl_WideInt nsent; size_t size; NsWriterStreamState doStream; int fd; char *headerString; struct ConnPool *poolPtr; union { struct { struct iovec *bufs; /* incoming bufs to be sent */ int nbufs; int bufIdx; struct iovec sbufs[UIO_SMALLIOV]; /* scratch bufs for handling partial sends */ int nsbufs; int sbufIdx; struct iovec preallocated_bufs[UIO_SMALLIOV]; struct FileMap fmap; } mem; struct { size_t maxsize; size_t bufsize; off_t bufoffset; size_t toRead; unsigned char *buf; Ns_FileVec *bufs; int nbufs; int currentbuf; Ns_Mutex fdlock; } file; } c; char *clientData; Ns_Time startTime; int rateLimit; int currentRate; ConnPoolInfo *infoPtr; bool keep; } WriterSock; /* * Async writer definitions */ typedef struct AsyncWriter { Ns_Mutex lock; /* Lock around writer queues */ SpoolerQueue *firstPtr; /* List of writer threads */ } AsyncWriter; /* * AsyncWriteData is similar to WriterSock */ typedef struct AsyncWriteData { struct AsyncWriteData *nextPtr; char *data; int fd; Tcl_WideInt nsent; size_t size; size_t bufsize; const char *buf; } AsyncWriteData; static AsyncWriter *asyncWriter = NULL; /* * Static functions defined in this file. */ static Ns_ThreadProc DriverThread; static Ns_ThreadProc SpoolerThread; static Ns_ThreadProc WriterThread; static Ns_ThreadProc AsyncWriterThread; static Tcl_ObjCmdProc WriterListObjCmd; static Tcl_ObjCmdProc WriterSizeObjCmd; static Tcl_ObjCmdProc WriterStreamingObjCmd; static Tcl_ObjCmdProc WriterSubmitObjCmd; static Tcl_ObjCmdProc WriterSubmitFileObjCmd; static Tcl_ObjCmdProc AsyncLogfileWriteObjCmd; static Tcl_ObjCmdProc AsyncLogfileOpenObjCmd; static Tcl_ObjCmdProc AsyncLogfileCloseObjCmd; static Ns_ReturnCode DriverWriterFromObj(Tcl_Interp *interp, Tcl_Obj *driverObj, Ns_Conn *conn, DrvWriter **wrPtrPtr) NS_GNUC_NONNULL(1) NS_GNUC_NONNULL(4); static NS_SOCKET DriverListen(Driver *drvPtr, const char *bindaddr) NS_GNUC_NONNULL(1) NS_GNUC_NONNULL(2); static NS_DRIVER_ACCEPT_STATUS DriverAccept(Sock *sockPtr, NS_SOCKET sock) NS_GNUC_NONNULL(1); static bool DriverKeep(Sock *sockPtr) NS_GNUC_NONNULL(1); static void DriverClose(Sock *sockPtr) NS_GNUC_NONNULL(1); static Ns_ReturnCode DriverInit(const char *server, const char *moduleName, const char *threadName, const Ns_DriverInitData *init, NsServer *servPtr, const char *path, const char *bindaddrs, const char *defserver, const char *host) NS_GNUC_NONNULL(2) NS_GNUC_NONNULL(3) NS_GNUC_NONNULL(4) NS_GNUC_NONNULL(6) NS_GNUC_NONNULL(7) NS_GNUC_NONNULL(9); static bool DriverModuleInitialized(const char *module) NS_GNUC_NONNULL(1); static void SockSetServer(Sock *sockPtr) NS_GNUC_NONNULL(1); static SockState SockAccept(Driver *drvPtr, NS_SOCKET sock, Sock **sockPtrPtr, const Ns_Time *nowPtr) NS_GNUC_NONNULL(1); static Ns_ReturnCode SockQueue(Sock *sockPtr, const Ns_Time *timePtr) NS_GNUC_NONNULL(1); static Sock *SockNew(Driver *drvPtr) NS_GNUC_NONNULL(1) NS_GNUC_RETURNS_NONNULL; static void SockRelease(Sock *sockPtr, SockState reason, int err) NS_GNUC_NONNULL(1); static void SockError(Sock *sockPtr, SockState reason, int err) NS_GNUC_NONNULL(1); static void SockSendResponse(Sock *sockPtr, int code, const char *errMsg) NS_GNUC_NONNULL(1) NS_GNUC_NONNULL(3); static void SockTrigger(NS_SOCKET sock); static void SockTimeout(Sock *sockPtr, const Ns_Time *nowPtr, const Ns_Time *timeout) NS_GNUC_NONNULL(1); static void SockClose(Sock *sockPtr, int keep) NS_GNUC_NONNULL(1); static SockState SockRead(Sock *sockPtr, int spooler, const Ns_Time *timePtr) NS_GNUC_NONNULL(1); static SockState SockParse(Sock *sockPtr) NS_GNUC_NONNULL(1); static void SockPoll(Sock *sockPtr, short type, PollData *pdata) NS_GNUC_NONNULL(1) NS_GNUC_NONNULL(3); static int SockSpoolerQueue(Driver *drvPtr, Sock *sockPtr) NS_GNUC_NONNULL(1) NS_GNUC_NONNULL(2); static void SpoolerQueueStart(SpoolerQueue *queuePtr, Ns_ThreadProc *proc) NS_GNUC_NONNULL(2); static void SpoolerQueueStop(SpoolerQueue *queuePtr, const Ns_Time *timeoutPtr, const char *name) NS_GNUC_NONNULL(2) NS_GNUC_NONNULL(3); static void PollCreate(PollData *pdata) NS_GNUC_NONNULL(1); static void PollFree(PollData *pdata) NS_GNUC_NONNULL(1); static void PollReset(PollData *pdata) NS_GNUC_NONNULL(1); static NS_POLL_NFDS_TYPE PollSet(PollData *pdata, NS_SOCKET sock, short type, const Ns_Time *timeoutPtr) NS_GNUC_NONNULL(1); static int PollWait(const PollData *pdata, int timeout) NS_GNUC_NONNULL(1); static bool ChunkedDecode(Request *reqPtr, bool update) NS_GNUC_NONNULL(1); static WriterSock *WriterSockRequire(const Conn *connPtr) NS_GNUC_NONNULL(1); static void WriterSockRelease(WriterSock *wrSockPtr) NS_GNUC_NONNULL(1); static SpoolerState WriterReadFromSpool(WriterSock *curPtr) NS_GNUC_NONNULL(1); static SpoolerState WriterSend(WriterSock *curPtr, int *err) NS_GNUC_NONNULL(1) NS_GNUC_NONNULL(2); static Ns_ReturnCode WriterSetupStreamingMode(Conn *connPtr, struct iovec *bufs, int nbufs, int *fdPtr) NS_GNUC_NONNULL(1) NS_GNUC_NONNULL(2) NS_GNUC_NONNULL(4); static void WriterSockFileVecCleanup(WriterSock *wrSockPtr) NS_GNUC_NONNULL(1); static int WriterGetMemunitFromDict(Tcl_Interp *interp, Tcl_Obj *dictObj, Tcl_Obj *keyObj, Ns_ObjvValueRange *rangePtr, Tcl_WideInt *valuePtr) NS_GNUC_NONNULL(1) NS_GNUC_NONNULL(2) NS_GNUC_NONNULL(3) NS_GNUC_NONNULL(5); static void AsyncWriterRelease(AsyncWriteData *wdPtr) NS_GNUC_NONNULL(1); static void WriteWarningRaw(const char *msg, int fd, size_t wantWrite, ssize_t written) NS_GNUC_NONNULL(1); static const char *GetSockStateName(SockState sockState); static size_t EndOfHeader(Sock *sockPtr) NS_GNUC_NONNULL(1); static void RequestNew(Sock *sockPtr) NS_GNUC_NONNULL(1); static void RequestFree(Sock *sockPtr) NS_GNUC_NONNULL(1); static void LogBuffer(Ns_LogSeverity severity, const char *msg, const char *buffer, size_t len) NS_GNUC_NONNULL(2) NS_GNUC_NONNULL(3); static void ServerMapEntryAdd(Tcl_DString *dsPtr, const char *host, NsServer *servPtr, Driver *drvPtr, bool addDefaultMapEntry) NS_GNUC_NONNULL(1) NS_GNUC_NONNULL(2) NS_GNUC_NONNULL(3) NS_GNUC_NONNULL(4); static Driver *LookupDriver(Tcl_Interp *interp, const char* protocol, const char *driverName) NS_GNUC_NONNULL(1) NS_GNUC_NONNULL(2); static void WriterPerPoolRates(WriterSock *writePtr, Tcl_HashTable *pools) NS_GNUC_NONNULL(1) NS_GNUC_NONNULL(2); static ConnPoolInfo *WriterGetInfoPtr(WriterSock *curPtr, Tcl_HashTable *pools) NS_GNUC_NONNULL(1) NS_GNUC_NONNULL(2); /* * Global variables defined in this file. */ //NS_EXTERN Ns_LogSeverity Ns_LogAccessDebug; Ns_LogSeverity Ns_LogTaskDebug; Ns_LogSeverity Ns_LogRequestDebug; Ns_LogSeverity Ns_LogConnchanDebug; Ns_LogSeverity Ns_LogUrlspaceDebug; Ns_LogSeverity Ns_LogAccessDebug; Ns_LogSeverity Ns_LogTimeoutDebug; NS_EXPORT Ns_LogSeverity Ns_LogAccessDebug; bool NsWriterBandwidthManagement = NS_FALSE; static Ns_LogSeverity WriterDebug; /* Severity at which to log verbose debugging. */ static Ns_LogSeverity DriverDebug; /* Severity at which to log verbose debugging. */ static Ns_Mutex reqLock = NULL; /* Lock for allocated Request structure pool */ static Ns_Mutex writerlock = NULL; /* Lock updating streaming information in the writer */ static Request *firstReqPtr = NULL; /* Allocated request structures kept in a pool */ static Driver *firstDrvPtr = NULL; /* First in list of all drivers */ #define Push(x, xs) ((x)->nextPtr = (xs), (xs) = (x)) /* *---------------------------------------------------------------------- * * WriteWarningRaw -- * * Write a warning message to stderr. This function is for cases, where * writing to Ns_Log can't be used (e.g. in the AsyncWriter, which is * used for writing also to the system log). * * Results: * None. * * Side effects: * Line to stderr. * *---------------------------------------------------------------------- */ static void WriteWarningRaw(const char *msg, int fd, size_t wantWrite, ssize_t written) { fprintf(stderr, "%s: Warning: wanted to write %" PRIuz " bytes, wrote %ld to file descriptor %d\n", msg, wantWrite, (long)written, fd); } /* *---------------------------------------------------------------------- * * GetSockStateName -- * * Return human readable names for StockState values. * * Results: * string * * Side effects: * None. * *---------------------------------------------------------------------- */ static const char * GetSockStateName(SockState sockState) { int sockStateInt = (int)sockState; static const char *sockStateStrings[] = { "SOCK_READY", "SOCK_MORE", "SOCK_SPOOL", "SOCK_ERROR", "SOCK_CLOSE", "SOCK_CLOSETIMEOUT", "SOCK_READTIMEOUT", "SOCK_WRITETIMEOUT", "SOCK_READERROR", "SOCK_WRITEERROR", "SOCK_SHUTERROR", "SOCK_BADREQUEST", "SOCK_ENTITYTOOLARGE", "SOCK_BADHEADER", "SOCK_TOOMANYHEADERS", NULL }; if (sockStateInt < 0) { sockStateInt = (- sockStateInt) + 2; } assert(sockStateInt < Ns_NrElements(sockStateStrings)); return sockStateStrings[sockStateInt]; } /* *---------------------------------------------------------------------- * * NsInitDrivers -- * * Init drivers system. * * Results: * None. * * Side effects: * None. * *---------------------------------------------------------------------- */ void NsInitDrivers(void) { DriverDebug = Ns_CreateLogSeverity("Debug(ns:driver)"); WriterDebug = Ns_CreateLogSeverity("Debug(writer)"); Ns_LogTaskDebug = Ns_CreateLogSeverity("Debug(task)"); Ns_LogRequestDebug = Ns_CreateLogSeverity("Debug(request)"); Ns_LogConnchanDebug = Ns_CreateLogSeverity("Debug(connchan)"); Ns_LogUrlspaceDebug = Ns_CreateLogSeverity("Debug(urlspace)"); Ns_LogAccessDebug = Ns_CreateLogSeverity("Debug(access)"); Ns_LogTimeoutDebug = Ns_CreateLogSeverity("Debug(timeout)"); Ns_MutexInit(&reqLock); Ns_MutexInit(&writerlock); Ns_MutexSetName2(&reqLock, "ns:driver", "requestpool"); Ns_MutexSetName2(&writerlock, "ns:writer", "stream"); } /* *---------------------------------------------------------------------- * * DriverModuleInitialized -- * * Check if a driver with the specified name is already initialized. * * Results: * Boolean * * Side effects: * None. * *---------------------------------------------------------------------- */ static bool DriverModuleInitialized(const char *module) { Driver *drvPtr; bool found = NS_FALSE; NS_NONNULL_ASSERT(module != NULL); for (drvPtr = firstDrvPtr; drvPtr != NULL; drvPtr = drvPtr->nextPtr) { if (strcmp(drvPtr->moduleName, module) == 0) { found = NS_TRUE; Ns_Log(Notice, "Driver %s is already initialized", module); break; } } return found; } /* *---------------------------------------------------------------------- * * Ns_DriverInit -- * * Initialize a driver. * * Results: * NS_OK if initialized, NS_ERROR if config or other error. * * Side effects: * Listen socket will be opened later in NsStartDrivers. * *---------------------------------------------------------------------- */ Ns_ReturnCode Ns_DriverInit(const char *server, const char *module, const Ns_DriverInitData *init) { Ns_ReturnCode status = NS_OK; NsServer *servPtr = NULL; bool alreadyInitialized = NS_FALSE; NS_NONNULL_ASSERT(module != NULL); NS_NONNULL_ASSERT(init != NULL); /* * If a server is provided, servPtr must be set. */ if (server != NULL) { servPtr = NsGetServer(server); if (unlikely(servPtr == NULL)) { Ns_Log(Bug, "cannot lookup server structure for server: %s", module); status = NS_ERROR; } } else { alreadyInitialized = DriverModuleInitialized(module); } /* * Check versions of drivers. */ if (status == NS_OK && init->version < NS_DRIVER_VERSION_4) { Ns_Log(Warning, "%s: driver version is too old (version %d), Version 4 is recommended", module, init->version); } #ifdef HAVE_IPV6 if (status == NS_OK && init->version < NS_DRIVER_VERSION_3) { Ns_Log(Error, "%s: driver version is too old (version %d) and does not support IPv6", module, init->version); status = NS_ERROR; } #endif if (status == NS_OK && init->version < NS_DRIVER_VERSION_2) { Ns_Log(Error, "%s: version field of driver is invalid: %d", module, init->version); status = NS_ERROR; } if (!alreadyInitialized && status == NS_OK) { const char *path, *host, *address, *defserver; bool noHostNameGiven; int nrDrivers, nrBindaddrs = 0, result; Ns_Set *set; Tcl_Obj *bindaddrsObj, **objv; path = ((init->path != NULL) ? init->path : Ns_ConfigGetPath(server, module, (char *)0L)); set = Ns_ConfigCreateSection(path); /* * Determine the "defaultserver" the "hostname" / "address" for * binding to and/or the HTTP location string. */ defserver = Ns_ConfigGetValue(path, "defaultserver"); address = Ns_ConfigGetValue(path, "address"); host = Ns_ConfigGetValue(path, "hostname"); noHostNameGiven = (host == NULL); /* * If the listen address was not specified, attempt to determine it * through a DNS lookup of the specified hostname or the server's * primary hostname. */ if (address == NULL) { Tcl_DString ds; Tcl_DStringInit(&ds); if (noHostNameGiven) { host = Ns_InfoHostname(); } if (Ns_GetAllAddrByHost(&ds, host) == NS_TRUE) { address = ns_strdup(Tcl_DStringValue(&ds)); if (path != NULL) { Ns_SetUpdate(set, "address", address); } Ns_Log(Notice, "no address given, obtained address '%s' from host name %s", address, host); } Tcl_DStringFree(&ds); } if (address == NULL) { address = NS_IP_UNSPECIFIED; Ns_Log(Notice, "no address given, set address to unspecified address %s", address); } bindaddrsObj = Tcl_NewStringObj(address, -1); result = Tcl_ListObjGetElements(NULL, bindaddrsObj, &nrBindaddrs, &objv); if (result != TCL_OK || nrBindaddrs < 1 || nrBindaddrs >= MAX_LISTEN_ADDR_PER_DRIVER) { Ns_Fatal("%s: bindaddrs '%s' is not a valid Tcl list containing addresses (max %d)", module, address, MAX_LISTEN_ADDR_PER_DRIVER); } Tcl_IncrRefCount(bindaddrsObj); /* * If the hostname was not specified and not determined by the lookup * above, set it to the first specified or derived IP address string. */ if (host == NULL) { host = ns_strdup(Tcl_GetString(objv[0])); } if (noHostNameGiven && host != NULL && path != NULL) { Ns_SetUpdate(set, "hostname", host); } Tcl_DecrRefCount(bindaddrsObj); /* * Get configured number of driver threads. */ nrDrivers = Ns_ConfigIntRange(path, "driverthreads", 1, 1, 64); if (nrDrivers > 1) { #if !defined(SO_REUSEPORT) Ns_Log(Warning, "server %s module %s requests %d driverthreads, but is not supported by the operating system", server, module, nrDrivers); Ns_SetUpdate(set, "driverthreads", "1"); nrDrivers = 1; #endif } /* * The common parameters are determined, create the driver thread(s) */ { size_t maxModuleNameLength = strlen(module) + (size_t)TCL_INTEGER_SPACE + 1u; char *moduleName = ns_malloc(maxModuleNameLength); int i; if (host == NULL) { host = Ns_InfoHostname(); } for (i = 0; i < nrDrivers; i++) { snprintf(moduleName, maxModuleNameLength, "%s:%d", module, i); status = DriverInit(server, module, moduleName, init, servPtr, path, address, defserver, host); if (status != NS_OK) { break; } } ns_free(moduleName); } } return status; } /* *---------------------------------------------------------------------- * * ServerMapEntryAdd -- * * Add an entry to the virtual server map. The entry consists of the * value as provided by the host header field and location string, * containing as well the protocol. * * Results: * None * * Side effects: * Potentially adding an entry to the virtual server map. * *---------------------------------------------------------------------- */ static void ServerMapEntryAdd(Tcl_DString *dsPtr, const char *host, NsServer *servPtr, Driver *drvPtr, bool addDefaultMapEntry) { Tcl_HashEntry *hPtr; int isNew; NS_NONNULL_ASSERT(dsPtr != NULL); NS_NONNULL_ASSERT(host != NULL); NS_NONNULL_ASSERT(servPtr != NULL); NS_NONNULL_ASSERT(drvPtr != NULL); hPtr = Tcl_CreateHashEntry(&drvPtr->hosts, host, &isNew); if (isNew != 0) { ServerMap *mapPtr; (void) Ns_DStringVarAppend(dsPtr, drvPtr->protocol, "://", host, (char *)0L); mapPtr = ns_malloc(sizeof(ServerMap) + (size_t)dsPtr->length); mapPtr->servPtr = servPtr; memcpy(mapPtr->location, dsPtr->string, (size_t)dsPtr->length + 1u); Tcl_SetHashValue(hPtr, mapPtr); Ns_Log(Notice, "%s: adding virtual host entry for host <%s> location: %s mapped to server: %s", drvPtr->threadName, host, mapPtr->location, servPtr->server); if (addDefaultMapEntry) { drvPtr->defMapPtr = mapPtr; } /* * Always reset the Tcl_DString */ Ns_DStringSetLength(dsPtr, 0); } else { Ns_Log(Notice, "%s: ignore duplicate virtual host entry: %s", drvPtr->threadName, host); } } /* *---------------------------------------------------------------------- * * NsDriverMapVirtualServers -- * * Map "Host:" headers for drivers not bound to physical servers. This * function has to be called a time, when all servers are already defined * such that NsGetServer(server) can succeed. * * Results: * None. * * Side effects: * Add an entry to the virtual server map via ServerMapEntryAdd() * *---------------------------------------------------------------------- */ void NsDriverMapVirtualServers(void) { Driver *drvPtr; for (drvPtr = firstDrvPtr; drvPtr != NULL; drvPtr = drvPtr->nextPtr) { const Ns_Set *lset; size_t j; Tcl_DString ds, *dsPtr = &ds; const char *path, *defserver, *moduleName; moduleName = drvPtr->moduleName; defserver = drvPtr->defserver; /* * Check for a "/servers" section for this driver module. */ path = Ns_ConfigGetPath(NULL, moduleName, "servers", (char *)0L); lset = Ns_ConfigGetSection(path); if (lset == NULL || Ns_SetSize(lset) == 0u) { /* * The driver module has no (or empty) ".../servers" section. * There is no mapping from host name to virtual server defined. */ if (drvPtr->server == NULL) { /* * We have a global driver module. If there is at least a * default server configured, we can use this for the mapping * to the default server. */ if (defserver != NULL) { NsServer *servPtr = NsGetServer(defserver); Tcl_DStringInit(dsPtr); ServerMapEntryAdd(dsPtr, Ns_InfoHostname(), servPtr, drvPtr, NS_TRUE); Tcl_DStringFree(dsPtr); Ns_Log(Notice, "Global driver has no mapping from host to server (section '%s' missing)", moduleName); } else { /* * Global driver, which has no default server, and no servers section. */ Ns_Fatal("%s: virtual servers configured," " but '%s' has no defaultserver defined", moduleName, path); } } continue; } /* * We have a ".../servers" section, the driver might be global or * local. It is not clear, why we need the server map for the local * driver, but for compatibility, we keep this. * */ if (defserver == NULL) { if (drvPtr->server != NULL) { /* * We have a local (server specific) driver. Since the code * below assumes that we have a "defserver" set, we take the * actual server as defserver. */ defserver = drvPtr->server; } else { /* * We have a global driver, but no defserver. */ Ns_Fatal("%s: virtual servers configured," " but '%s' has no defaultserver defined", moduleName, path); } } assert(defserver != NULL); drvPtr->defMapPtr = NULL; Ns_DStringInit(dsPtr); for (j = 0u; j < Ns_SetSize(lset); ++j) { const char *server = Ns_SetKey(lset, j); const char *host = Ns_SetValue(lset, j); NsServer *servPtr; /* * Perform an explicit lookup of the server. */ servPtr = NsGetServer(server); if (servPtr == NULL) { Ns_Log(Error, "%s: no such server: %s", moduleName, server); } else { char *writableHost, *hostName, *portStart; writableHost = ns_strdup(host); Ns_HttpParseHost(writableHost, &hostName, &portStart); if (portStart == NULL) { Tcl_DString hostDString; /* * The provided host entry does NOT contain a port. * * Add the provided entry to the virtual server map only, * when the configured port is the default port for the * protocol. */ if (drvPtr->port == drvPtr->defport) { ServerMapEntryAdd(dsPtr, host, servPtr, drvPtr, STREQ(defserver, server)); } /* * Auto-add configured port: Add always an entry with the * explicitly configured port of the driver. */ Tcl_DStringInit(&hostDString); Tcl_DStringAppend(&hostDString, host, -1); (void) Ns_DStringPrintf(&hostDString, ":%hu", drvPtr->port); ServerMapEntryAdd(dsPtr, hostDString.string, servPtr, drvPtr, STREQ(defserver, server)); Tcl_DStringFree(&hostDString); } else { /* * The provided host entry does contain a port. * * In case, the provided port is equal to the configured port * of the driver, add an entry. */ unsigned short providedPort = (unsigned short)strtol(portStart+1, NULL, 10); if (providedPort == drvPtr->port) { ServerMapEntryAdd(dsPtr, host, servPtr, drvPtr, STREQ(defserver, server)); /* * In case, the provided port is equal to the default * port of the driver, make sure that we have an entry * without the port. */ if (providedPort == drvPtr->defport) { ServerMapEntryAdd(dsPtr, hostName, servPtr, drvPtr, STREQ(defserver, server)); } } else { Ns_Log(Warning, "%s: driver is listening on port %hu; " "virtual host entry %s ignored", moduleName, drvPtr->port, host); } } ns_free(writableHost); } } Ns_DStringFree(dsPtr); if (drvPtr->defMapPtr == NULL) { fprintf(stderr, "--- Server Map: ---\n"); Ns_SetPrint(lset); Ns_Fatal("%s: default server '%s' not defined in '%s'", moduleName, defserver, path); } } } /* *---------------------------------------------------------------------- * * DriverInit -- * * Helper function of Ns_DriverInit. This function actually allocates and * initialized the driver structure. * * Results: * NS_OK if initialized, NS_ERROR if config or other error. * * Side effects: * Listen socket will be opened later in NsStartDrivers. * *---------------------------------------------------------------------- */ static Ns_ReturnCode DriverInit(const char *server, const char *moduleName, const char *threadName, const Ns_DriverInitData *init, NsServer *servPtr, const char *path, const char *bindaddrs, const char *defserver, const char *host) { const char *defproto; Driver *drvPtr; DrvWriter *wrPtr; DrvSpooler *spPtr; int i; unsigned short defport; NS_NONNULL_ASSERT(threadName != NULL); NS_NONNULL_ASSERT(init != NULL); NS_NONNULL_ASSERT(path != NULL); NS_NONNULL_ASSERT(bindaddrs != NULL); NS_NONNULL_ASSERT(host != NULL); /* * Set the protocol and port defaults. */ if (init->protocol != NULL) { defproto = init->protocol; defport = init->defaultPort; } else { defproto = "unknown"; defport = 0u; } Ns_Log(DriverDebug, "DriverInit server <%s> threadName %s proto %s port %hu", server, threadName, defproto, defport); /* * Allocate a new driver instance and set configurable parameters. */ drvPtr = ns_calloc(1u, sizeof(Driver)); Ns_MutexInit(&drvPtr->lock); Ns_MutexSetName2(&drvPtr->lock, "ns:drv", threadName); Ns_MutexInit(&drvPtr->spooler.lock); Ns_MutexSetName2(&drvPtr->spooler.lock, "ns:drv:spool", threadName); Ns_MutexInit(&drvPtr->writer.lock); Ns_MutexSetName2(&drvPtr->writer.lock, "ns:drv:writer", threadName); if (ns_sockpair(drvPtr->trigger) != 0) { Ns_Fatal("ns_sockpair() failed: %s", ns_sockstrerror(ns_sockerrno)); } drvPtr->server = server; drvPtr->type = init->name; drvPtr->moduleName = ns_strdup(moduleName); drvPtr->threadName = ns_strdup(threadName); drvPtr->defserver = defserver; drvPtr->listenProc = init->listenProc; drvPtr->acceptProc = init->acceptProc; drvPtr->recvProc = init->recvProc; drvPtr->sendProc = init->sendProc; drvPtr->sendFileProc = init->sendFileProc; drvPtr->keepProc = init->keepProc; drvPtr->requestProc = init->requestProc; drvPtr->closeProc = init->closeProc; drvPtr->clientInitProc = init->clientInitProc; drvPtr->arg = init->arg; drvPtr->opts = init->opts; drvPtr->servPtr = servPtr; drvPtr->defport = defport; drvPtr->bufsize = (size_t)Ns_ConfigMemUnitRange(path, "bufsize", 16384, 1024, INT_MAX); drvPtr->maxinput = Ns_ConfigMemUnitRange(path, "maxinput", 1024*1024, 1024, LLONG_MAX); drvPtr->maxupload = Ns_ConfigMemUnitRange(path, "maxupload", 0, 0, (Tcl_WideInt)drvPtr->maxinput); drvPtr->readahead = Ns_ConfigMemUnitRange(path, "readahead", (Tcl_WideInt)drvPtr->bufsize, (Tcl_WideInt)drvPtr->bufsize, drvPtr->maxinput); drvPtr->maxline = Ns_ConfigIntRange(path, "maxline", 8192, 256, INT_MAX); drvPtr->maxheaders = Ns_ConfigIntRange(path, "maxheaders", 128, 8, INT_MAX); drvPtr->maxqueuesize = Ns_ConfigIntRange(path, "maxqueuesize", 1024, 1, INT_MAX); Ns_ConfigTimeUnitRange(path, "sendwait", "30s", 1, 0, INT_MAX, 0, &drvPtr->sendwait); Ns_ConfigTimeUnitRange(path, "recvwait", "30s", 1, 0, INT_MAX, 0, &drvPtr->recvwait); Ns_ConfigTimeUnitRange(path, "closewait", "2s", 0, 0, INT_MAX, 0, &drvPtr->closewait); Ns_ConfigTimeUnitRange(path, "keepwait", "5s", 0, 0, INT_MAX, 0, &drvPtr->keepwait); drvPtr->backlog = Ns_ConfigIntRange(path, "backlog", 256, 1, INT_MAX); drvPtr->driverthreads = Ns_ConfigIntRange(path, "driverthreads", 1, 1, 32); drvPtr->reuseport = Ns_ConfigBool(path, "reuseport", NS_FALSE); drvPtr->acceptsize = Ns_ConfigIntRange(path, "acceptsize", drvPtr->backlog, 1, INT_MAX); drvPtr->keepmaxuploadsize = (size_t)Ns_ConfigMemUnitRange(path, "keepalivemaxuploadsize", 0, 0, INT_MAX); drvPtr->keepmaxdownloadsize = (size_t)Ns_ConfigMemUnitRange(path, "keepalivemaxdownloadsize", 0, 0, INT_MAX); drvPtr->recvTimeout = drvPtr->recvwait; Tcl_InitHashTable(&drvPtr->hosts, TCL_STRING_KEYS); if (drvPtr->driverthreads > 1) { #if !defined(SO_REUSEPORT) drvPtr->driverthreads = 1; drvPtr->reuseport = NS_FALSE; #else /* * When driver threads > 1, "reuseport" has to be active. */ drvPtr->reuseport = NS_TRUE; #endif } if (drvPtr->reuseport) { /* * Reuseport was specified */ #if !defined(SO_REUSEPORT) Ns_Log(Warning, "parameter %s reuseport was specified, but is not supported by the operating system", path); drvPtr->reuseport = NS_FALSE; #endif } drvPtr->uploadpath = ns_strdup(Ns_ConfigString(path, "uploadpath", nsconf.tmpDir)); /* * If activated, "maxupload" has to be at least "readahead" bytes. Tell * the user in case the config values are overruled. */ if ((drvPtr->maxupload > 0) && (drvPtr->maxupload < drvPtr->readahead)) { Ns_Log(Warning, "parameter %s maxupload % " TCL_LL_MODIFIER "d invalid; can be either 0 or must be >= %" TCL_LL_MODIFIER "d (size of readahead)", path, drvPtr->maxupload, drvPtr->readahead); drvPtr->maxupload = drvPtr->readahead; } /* * Determine the port and then set the HTTP location string either * as specified in the config file or constructed from the * protocol, hostname and port. */ drvPtr->protocol = ns_strdup(defproto); drvPtr->address = ns_strdup(bindaddrs); drvPtr->port = (unsigned short)Ns_ConfigIntRange(path, "port", (int)defport, 0, 65535); drvPtr->location = Ns_ConfigGetValue(path, "location"); if (drvPtr->location != NULL && (strstr(drvPtr->location, "://") != NULL)) { drvPtr->location = ns_strdup(drvPtr->location); } else { Tcl_DString ds, *dsPtr = &ds; Ns_DStringInit(dsPtr); Ns_HttpLocationString(dsPtr, drvPtr->protocol, host, drvPtr->port, defport); drvPtr->location = Ns_DStringExport(dsPtr); } drvPtr->nextPtr = firstDrvPtr; firstDrvPtr = drvPtr; /* * Add driver specific extra headers. */ drvPtr->extraHeaders = Ns_ConfigSet(path, "extraheaders"); /* * Check if upload spooler are enabled */ spPtr = &drvPtr->spooler; spPtr->threads = Ns_ConfigIntRange(path, "spoolerthreads", 0, 0, 32); if (spPtr->threads > 0) { Ns_Log(Notice, "%s: enable %d spooler thread(s) " "for uploads >= %" TCL_LL_MODIFIER "d bytes", threadName, spPtr->threads, drvPtr->readahead); for (i = 0; i < spPtr->threads; i++) { SpoolerQueue *queuePtr = ns_calloc(1u, sizeof(SpoolerQueue)); char buffer[100]; snprintf(buffer, sizeof(buffer), "ns:driver:spooler:%d", i); Ns_MutexSetName2(&queuePtr->lock, buffer, "queue"); queuePtr->id = i; Push(queuePtr, spPtr->firstPtr); } } else { Ns_Log(Notice, "%s: enable %d spooler thread(s) ", threadName, spPtr->threads); } /* * Enable writer threads */ wrPtr = &drvPtr->writer; wrPtr->threads = Ns_ConfigIntRange(path, "writerthreads", 0, 0, 32); if (wrPtr->threads > 0) { wrPtr->writersize = (size_t)Ns_ConfigMemUnitRange(path, "writersize", 1024*1024, 1024, INT_MAX); wrPtr->bufsize = (size_t)Ns_ConfigMemUnitRange(path, "writerbufsize", 8192, 512, INT_MAX); wrPtr->rateLimit = Ns_ConfigIntRange(path, "writerratelimit", 0, 0, INT_MAX); wrPtr->doStream = Ns_ConfigBool(path, "writerstreaming", NS_FALSE) ? NS_WRITER_STREAM_ACTIVE : NS_WRITER_STREAM_NONE; Ns_Log(Notice, "%s: enable %d writer thread(s) " "for downloads >= %" PRIdz " bytes, bufsize=%" PRIdz " bytes, HTML streaming %d", threadName, wrPtr->threads, wrPtr->writersize, wrPtr->bufsize, wrPtr->doStream); for (i = 0; i < wrPtr->threads; i++) { SpoolerQueue *queuePtr = ns_calloc(1u, sizeof(SpoolerQueue)); char buffer[100]; snprintf(buffer, sizeof(buffer), "ns:driver:writer:%d", i); Ns_MutexSetName2(&queuePtr->lock, buffer, "queue"); queuePtr->id = i; Push(queuePtr, wrPtr->firstPtr); } } else { Ns_Log(Notice, "%s: enable %d writer thread(s) ", threadName, wrPtr->threads); } return NS_OK; } /* *---------------------------------------------------------------------- * * NsStartDrivers -- * * Listen on all driver address/ports and start the DriverThread. * * Results: * None. * * Side effects: * See DriverThread. * *---------------------------------------------------------------------- */ void NsStartDrivers(void) { Driver *drvPtr; /* * Signal and wait for each driver to start. */ for (drvPtr = firstDrvPtr; drvPtr != NULL; drvPtr = drvPtr->nextPtr) { if (drvPtr->port == 0u) { /* * Don't start a driver having port zero. */ continue; } Ns_ThreadCreate(DriverThread, drvPtr, 0, &drvPtr->thread); Ns_MutexLock(&drvPtr->lock); while ((drvPtr->flags & DRIVER_STARTED) == 0u) { Ns_CondWait(&drvPtr->cond, &drvPtr->lock); } /*if ((drvPtr->flags & DRIVER_FAILED)) { status = NS_ERROR; }*/ Ns_MutexUnlock(&drvPtr->lock); } } /* *---------------------------------------------------------------------- * * NsStopDrivers -- * * Trigger the DriverThread to begin shutdown. * * Results: * None. * * Side effects: * DriverThread will close listen sockets and then exit after all * outstanding connections are complete and closed. * *---------------------------------------------------------------------- */ void NsStopDrivers(void) { Driver *drvPtr; NsAsyncWriterQueueDisable(NS_TRUE); for (drvPtr = firstDrvPtr; drvPtr != NULL; drvPtr = drvPtr->nextPtr) { Tcl_HashEntry *hPtr; Tcl_HashSearch search; if ((drvPtr->flags & DRIVER_STARTED) == 0u) { continue; } Ns_MutexLock(&drvPtr->lock); Ns_Log(Notice, "[driver:%s]: stopping", drvPtr->threadName); drvPtr->flags |= DRIVER_SHUTDOWN; Ns_CondBroadcast(&drvPtr->cond); Ns_MutexUnlock(&drvPtr->lock); SockTrigger(drvPtr->trigger[1]); hPtr = Tcl_FirstHashEntry(&drvPtr->hosts, &search); while (hPtr != NULL) { Tcl_DeleteHashEntry(hPtr); hPtr = Tcl_NextHashEntry(&search); } } } void NsStopSpoolers(void) { const Driver *drvPtr; Ns_Log(Notice, "driver: stopping writer and spooler threads"); /* * Shutdown all spooler and writer threads */ for (drvPtr = firstDrvPtr; drvPtr != NULL; drvPtr = drvPtr->nextPtr) { Ns_Time timeout; if ((drvPtr->flags & DRIVER_STARTED) == 0u) { continue; } Ns_GetTime(&timeout); Ns_IncrTime(&timeout, nsconf.shutdowntimeout.sec, nsconf.shutdowntimeout.usec); SpoolerQueueStop(drvPtr->writer.firstPtr, &timeout, "writer"); SpoolerQueueStop(drvPtr->spooler.firstPtr, &timeout, "spooler"); } } /* *---------------------------------------------------------------------- * * DriverInfoObjCmd -- * * Return public info of all drivers. * Subcommand of NsTclDriverObjCmd. * * Results: * Standard Tcl Result. * * Side effects: * None. * *---------------------------------------------------------------------- */ static int DriverInfoObjCmd(ClientData UNUSED(clientData), Tcl_Interp *interp, int objc, Tcl_Obj *const* objv) { int result = TCL_OK; if (Ns_ParseObjv(NULL, NULL, interp, 2, objc, objv) != NS_OK) { result = TCL_ERROR; } else { const Driver *drvPtr; Tcl_Obj *resultObj = Tcl_NewListObj(0, NULL); Tcl_HashTable driverNames; /* names of the driver modules without duplicates */ Tcl_InitHashTable(&driverNames, TCL_STRING_KEYS); /* * Iterate over all modules, not necessarily all driver threads */ for (drvPtr = firstDrvPtr; drvPtr != NULL; drvPtr = drvPtr->nextPtr) { int isNew = 0; (void)Tcl_CreateHashEntry(&driverNames, drvPtr->moduleName, &isNew); if (isNew == 1) { Tcl_Obj *listObj = Tcl_NewListObj(0, NULL); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj("module", 6)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj(drvPtr->moduleName, -1)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj("type", 4)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj(drvPtr->type, -1)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj("server", 6)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj(drvPtr->server != NULL ? drvPtr->server : NS_EMPTY_STRING, -1)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj("location", 8)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj(drvPtr->location, -1)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj("address", 7)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj(drvPtr->address, -1)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj("protocol", 8)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj(drvPtr->protocol, -1)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj("sendwait", 8)); Tcl_ListObjAppendElement(interp, listObj, Ns_TclNewTimeObj(&drvPtr->sendwait)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj("recvwait", 8)); Tcl_ListObjAppendElement(interp, listObj, Ns_TclNewTimeObj(&drvPtr->sendwait)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj("extraheaders", 12)); if (drvPtr->extraHeaders != NULL) { Tcl_DString ds; Tcl_DStringInit(&ds); Ns_DStringAppendSet(&ds, drvPtr->extraHeaders); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj(ds.string, ds.length)); Tcl_DStringFree(&ds); } else { Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj("", 0)); } Tcl_ListObjAppendElement(interp, resultObj, listObj); } } Tcl_SetObjResult(interp, resultObj); Tcl_DeleteHashTable(&driverNames); } return result; } /* *---------------------------------------------------------------------- * * DriverStatsObjCmd -- * * Return statistics of all drivers. * Subcommand of NsTclDriverObjCmd. * * Results: * Standard Tcl Result. * * Side effects: * None. * *---------------------------------------------------------------------- */ static int DriverStatsObjCmd(ClientData UNUSED(clientData), Tcl_Interp *interp, int objc, Tcl_Obj *const* objv) { int result = TCL_OK; if (Ns_ParseObjv(NULL, NULL, interp, 2, objc, objv) != NS_OK) { result = TCL_ERROR; } else { const Driver *drvPtr; Tcl_Obj *resultObj = Tcl_NewListObj(0, NULL); /* * Iterate over all drivers and collect results. */ for (drvPtr = firstDrvPtr; drvPtr != NULL; drvPtr = drvPtr->nextPtr) { Tcl_Obj *listObj = Tcl_NewListObj(0, NULL); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj("thread", 6)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj(drvPtr->threadName, -1)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj("module", 6)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj(drvPtr->moduleName, -1)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj("received", 8)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewWideIntObj(drvPtr->stats.received)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj("spooled", 7)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewWideIntObj(drvPtr->stats.spooled)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj("partial", 7)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewWideIntObj(drvPtr->stats.partial)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj("errors", 6)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewWideIntObj(drvPtr->stats.errors)); Tcl_ListObjAppendElement(interp, resultObj, listObj); } Tcl_SetObjResult(interp, resultObj); } return result; } /* *---------------------------------------------------------------------- * * DriverThreadsObjCmd -- * * Return the names of driver threads * * Results: * Standard Tcl Result. * * Side effects: * None. * *---------------------------------------------------------------------- */ static int DriverThreadsObjCmd(ClientData UNUSED(clientData), Tcl_Interp *interp, int objc, Tcl_Obj *const* objv) { int result = TCL_OK; if (Ns_ParseObjv(NULL, NULL, interp, 2, objc, objv) != NS_OK) { result = TCL_ERROR; } else { const Driver *drvPtr; Tcl_Obj *resultObj = Tcl_NewListObj(0, NULL); /* * Iterate over all drivers and collect results. */ for (drvPtr = firstDrvPtr; drvPtr != NULL; drvPtr = drvPtr->nextPtr) { Tcl_ListObjAppendElement(interp, resultObj, Tcl_NewStringObj(drvPtr->threadName, -1)); } Tcl_SetObjResult(interp, resultObj); } return result; } /* *---------------------------------------------------------------------- * * DriverNamesObjCmd -- * * Return the names of drivers. * * Results: * Standard Tcl Result. * * Side effects: * None. * *---------------------------------------------------------------------- */ static int DriverNamesObjCmd(ClientData UNUSED(clientData), Tcl_Interp *interp, int objc, Tcl_Obj *const* objv) { int result = TCL_OK; if (Ns_ParseObjv(NULL, NULL, interp, 2, objc, objv) != NS_OK) { result = TCL_ERROR; } else { const Driver *drvPtr; Tcl_Obj *resultObj = Tcl_NewListObj(0, NULL); Tcl_HashTable driverNames; /* names of the drivers without duplicates */ Tcl_InitHashTable(&driverNames, TCL_STRING_KEYS); /* * Iterate over all drivers and collect results. */ for (drvPtr = firstDrvPtr; drvPtr != NULL; drvPtr = drvPtr->nextPtr) { int isNew; (void)Tcl_CreateHashEntry(&driverNames, drvPtr->moduleName, &isNew); if (isNew == 1) { Tcl_ListObjAppendElement(interp, resultObj, Tcl_NewStringObj(drvPtr->moduleName, -1)); } } Tcl_SetObjResult(interp, resultObj); Tcl_DeleteHashTable(&driverNames); } return result; } /* *---------------------------------------------------------------------- * * NsTclDriverObjCmd - * * Give information about drivers. Currently, just the statistics. * * Results: * Standard Tcl result. * * Side effects: * None. * *---------------------------------------------------------------------- */ int NsTclDriverObjCmd(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *const* objv) { const Ns_SubCmdSpec subcmds[] = { {"info", DriverInfoObjCmd}, {"names", DriverNamesObjCmd}, {"threads", DriverThreadsObjCmd}, {"stats", DriverStatsObjCmd}, {NULL, NULL} }; return Ns_SubcmdObjv(subcmds, clientData, interp, objc, objv); } /* *---------------------------------------------------------------------- * * NsWakeupDriver -- * * Wake up the associated DriverThread. * * Results: * None. * * Side effects: * The poll waiting for this trigger will be interrupted. * *---------------------------------------------------------------------- */ void NsWakeupDriver(const Driver *drvPtr) { NS_NONNULL_ASSERT(drvPtr != NULL); SockTrigger(drvPtr->trigger[1]); } /* *---------------------------------------------------------------------- * * NsWaitDriversShutdown -- * * Wait for exit of DriverThread. This callback is invoked later * by the timed shutdown thread. * * Results: * None. * * Side effects: * Driver thread is joined and trigger pipe closed. * *---------------------------------------------------------------------- */ void NsWaitDriversShutdown(const Ns_Time *toPtr) { Driver *drvPtr; Ns_ReturnCode status = NS_OK; for (drvPtr = firstDrvPtr; drvPtr != NULL; drvPtr = drvPtr->nextPtr) { if ((drvPtr->flags & DRIVER_STARTED) == 0u) { continue; } Ns_MutexLock(&drvPtr->lock); while ((drvPtr->flags & DRIVER_STOPPED) == 0u && status == NS_OK) { status = Ns_CondTimedWait(&drvPtr->cond, &drvPtr->lock, toPtr); } Ns_MutexUnlock(&drvPtr->lock); if (status != NS_OK) { Ns_Log(Warning, "[driver:%s]: shutdown timeout", drvPtr->threadName); } else { Ns_Log(Notice, "[driver:%s]: stopped", drvPtr->threadName); Ns_ThreadJoin(&drvPtr->thread, NULL); drvPtr->thread = NULL; } } } /* *---------------------------------------------------------------------- * * NsGetRequest -- * * Return the request buffer, reading it if necessary (i.e., if not an * async read-ahead connection). This function is called at the start of * connection processing. * Results: * Pointer to Request structure or NULL on error. * * Side effects: * May wait for content to arrive if necessary. * *---------------------------------------------------------------------- */ Request * NsGetRequest(Sock *sockPtr, const Ns_Time *nowPtr) { Request *reqPtr; NS_NONNULL_ASSERT(sockPtr != NULL); /* * The underlying "Request" structure is allocated by RequestNew(), which * must be called for the "sockPtr" prior to calling this * function. "reqPtr" should be NULL just in error cases. */ reqPtr = sockPtr->reqPtr; if (likely(reqPtr != NULL)) { if (likely(reqPtr->request.line != NULL)) { Ns_Log(DriverDebug, "NsGetRequest got the pre-parsed request <%s> from the driver", reqPtr->request.line); } else if (sockPtr->drvPtr->requestProc == NULL) { /* * Non-HTTP driver can send the drvPtr->requestProc to perform * their own request handling. */ SockState status; Ns_Log(DriverDebug, "NsGetRequest has to read+parse the request"); /* * We have no parsed request so far. So, do it now. */ do { Ns_Log(DriverDebug, "NsGetRequest calls SockRead"); status = SockRead(sockPtr, 0, nowPtr); } while (status == SOCK_MORE); /* * If anything went wrong, clean the request provided by * SockRead() and flag the error by returning NULL. */ if (status != SOCK_READY) { if (sockPtr->reqPtr != NULL) { Ns_Log(DriverDebug, "NsGetRequest calls RequestFree"); RequestFree(sockPtr); } reqPtr = NULL; } } else { Ns_Log(DriverDebug, "NsGetRequest found driver specific request Proc, " "probably from a non-HTTP driver"); } } else { Ns_Log(DriverDebug, "NsGetRequest has reqPtr NULL"); } return reqPtr; } /* *---------------------------------------------------------------------- * * NsSockClose -- * * Return a connection to the DriverThread for closing or keepalive. * "keep" might be NS_TRUE/NS_FALSE or -1 if undecided. * * Results: * None. * * Side effects: * Socket may be reused by a keepalive connection. * *---------------------------------------------------------------------- */ void NsSockClose(Sock *sockPtr, int keep) { Driver *drvPtr; bool trigger = NS_FALSE; NS_NONNULL_ASSERT(sockPtr != NULL); drvPtr = sockPtr->drvPtr; Ns_Log(DriverDebug, "NsSockClose sockPtr %p (%d) keep %d", (void *)sockPtr, ((Ns_Sock*)sockPtr)->sock, keep); SockClose(sockPtr, keep); /* * Free the request, unless it is from a non-HTTP driver (who might not * fill out the request structure). */ if (sockPtr->reqPtr != NULL) { Ns_Log(DriverDebug, "NsSockClose calls RequestFree"); RequestFree(sockPtr); } Ns_MutexLock(&drvPtr->lock); if (drvPtr->closePtr == NULL) { trigger = NS_TRUE; } sockPtr->nextPtr = drvPtr->closePtr; drvPtr->closePtr = sockPtr; Ns_MutexUnlock(&drvPtr->lock); if (trigger) { SockTrigger(drvPtr->trigger[1]); } } /* *---------------------------------------------------------------------- * * DriverListen -- * * Open a listening socket for accepting connections. * * Results: * File description of socket, or NS_INVALID_SOCKET on error. * * Side effects: * Depends on driver. * *---------------------------------------------------------------------- */ static NS_SOCKET DriverListen(Driver *drvPtr, const char *bindaddr) { NS_SOCKET sock; NS_NONNULL_ASSERT(drvPtr != NULL); NS_NONNULL_ASSERT(bindaddr != NULL); sock = (*drvPtr->listenProc)((Ns_Driver *) drvPtr, bindaddr, drvPtr->port, drvPtr->backlog, drvPtr->reuseport); if (sock == NS_INVALID_SOCKET) { Ns_Log(Error, "%s: failed to listen on [%s]:%d: %s", drvPtr->threadName, bindaddr, drvPtr->port, ns_sockstrerror(ns_sockerrno)); } else { Ns_Log(Notice, #ifdef HAVE_IPV6 "%s: listening on [%s]:%d", #else "%s: listening on %s:%d", #endif drvPtr->threadName, bindaddr, drvPtr->port); } return sock; } /* *---------------------------------------------------------------------- * * DriverAccept -- * * Accept a new socket. It will be in non-blocking mode. * * Results: * _ACCEPT: a socket was accepted, poll for data * _ACCEPT_DATA: a socket was accepted, data present, read immediately * if in async mode, defer reading to connection thread * _ACCEPT_QUEUE: a socket was accepted, queue immediately * _ACCEPT_ERROR: no socket was accepted * * Side effects: * Depends on driver. * *---------------------------------------------------------------------- */ static NS_DRIVER_ACCEPT_STATUS DriverAccept(Sock *sockPtr, NS_SOCKET sock) { socklen_t n = (socklen_t)sizeof(struct NS_SOCKADDR_STORAGE); NS_NONNULL_ASSERT(sockPtr != NULL); return (*sockPtr->drvPtr->acceptProc)((Ns_Sock *) sockPtr, sock, (struct sockaddr *) &(sockPtr->sa), &n); } /* *---------------------------------------------------------------------- * * NsDriverRecv -- * * Read data from the socket into the given vector of buffers. * * Results: * Number of bytes read, or -1 on error. * * Side effects: * Depends on driver. * *---------------------------------------------------------------------- */ ssize_t NsDriverRecv(Sock *sockPtr, struct iovec *bufs, int nbufs, Ns_Time *timeoutPtr) { ssize_t result; const Driver *drvPtr; NS_NONNULL_ASSERT(sockPtr != NULL); drvPtr = sockPtr->drvPtr; if (likely(drvPtr->recvProc != NULL)) { result = (*drvPtr->recvProc)((Ns_Sock *) sockPtr, bufs, nbufs, timeoutPtr, 0u); } else { Ns_Log(Warning, "driver: no recvProc registered for driver %s", drvPtr->threadName); result = -1; } return result; } /* *---------------------------------------------------------------------- * * NsDriverSend -- * * Write a vector of buffers to the socket via the driver callback. * May not send all of the data. * * Results: * Number of bytes written or -1 on error. * May return 0 (zero) when socket is not writable. * * Side effects: * Depends on the driver. * *---------------------------------------------------------------------- */ ssize_t NsDriverSend(Sock *sockPtr, const struct iovec *bufs, int nbufs, unsigned int flags) { ssize_t sent = -1; const Driver *drvPtr; NS_NONNULL_ASSERT(sockPtr != NULL); drvPtr = sockPtr->drvPtr; NS_NONNULL_ASSERT(drvPtr != NULL); if (likely(drvPtr->sendProc != NULL)) { /* * TODO: The Ns_DriverSendProc signature should be modified * to omit the timeout argument. Same with recvProc(). */ sent = (*drvPtr->sendProc)((Ns_Sock *) sockPtr, bufs, nbufs, NULL, flags); } else { Ns_Log(Warning, "no sendProc registered for driver %s", drvPtr->threadName); } return sent; } /* *---------------------------------------------------------------------- * * NsDriverSendFile -- * * Write a vector of file buffers to the socket via the driver * callback. * * Results: * Number of bytes written, -1 on error. * May not send all the data. * * Side effects: * May block on disk read. * *---------------------------------------------------------------------- */ ssize_t NsDriverSendFile(Sock *sockPtr, Ns_FileVec *bufs, int nbufs, unsigned int flags) { ssize_t sent = -1; const Driver *drvPtr; NS_NONNULL_ASSERT(sockPtr != NULL); NS_NONNULL_ASSERT(bufs != NULL); drvPtr = sockPtr->drvPtr; NS_NONNULL_ASSERT(drvPtr != NULL); if (drvPtr->sendFileProc != NULL) { /* * TODO: The Ns_DriverSendFileProc signature should be modified * to omit the timeout argument. */ sent = (*drvPtr->sendFileProc)((Ns_Sock *)sockPtr, bufs, nbufs, NULL, flags); } else { sent = Ns_SockSendFileBufs((Ns_Sock *)sockPtr, bufs, nbufs, flags); } return sent; } /* *---------------------------------------------------------------------- * * DriverKeep -- * * Can the given socket be kept open in the hopes that another * request will arrive before the keepwait timeout expires? * * Results: * NS_TRUE if the socket is OK for keepalive, NS_FALSE if this is not possible. * * Side effects: * Depends on driver. * *---------------------------------------------------------------------- */ static bool DriverKeep(Sock *sockPtr) { Ns_DriverKeepProc *keepProc; bool result; NS_NONNULL_ASSERT(sockPtr != NULL); keepProc = sockPtr->drvPtr->keepProc; if (keepProc == NULL) { result = NS_FALSE; } else { result = (keepProc)((Ns_Sock *) sockPtr); } return result; } /* *---------------------------------------------------------------------- * * DriverClose -- * * Close the given socket. * * Results: * None. * * Side effects: * Depends on driver. * *---------------------------------------------------------------------- */ static void DriverClose(Sock *sockPtr) { NS_NONNULL_ASSERT(sockPtr != NULL); (*sockPtr->drvPtr->closeProc)((Ns_Sock *) sockPtr); } /* *---------------------------------------------------------------------- * * DriverThread -- * * Main listening socket driver thread. * * Results: * None. * * Side effects: * Connections are accepted on the configured listen sockets, * placed on the run queue to be serviced, and gracefully * closed when done. Async sockets have the entire request read * here before queuing as well. * *---------------------------------------------------------------------- */ static void DriverThread(void *arg) { Driver *drvPtr = (Driver*)arg; Ns_Time now, diff; char charBuffer[1], drain[1024]; int pollTimeout, accepted, nrBindaddrs = 0; bool stopping; unsigned int flags; Sock *sockPtr, *closePtr, *nextPtr, *waitPtr, *readPtr; PollData pdata; Ns_ThreadSetName("-driver:%s-", drvPtr->threadName); Ns_Log(Notice, "starting"); flags = DRIVER_STARTED; { Tcl_Obj *bindaddrsObj, **objv; int j = 0, result; bindaddrsObj = Tcl_NewStringObj(drvPtr->address, -1); Tcl_IncrRefCount(bindaddrsObj); result = Tcl_ListObjGetElements(NULL, bindaddrsObj, &nrBindaddrs, &objv); /* * "result" was ok during startup, it has still to be ok. */ assert(result == TCL_OK); if (result == TCL_OK) { int i; /* * Bind all provided addresses. */ for (i = 0; i < nrBindaddrs; i++) { drvPtr->listenfd[j] = DriverListen(drvPtr, Tcl_GetString(objv[i])); if (drvPtr->listenfd[j] != NS_INVALID_SOCKET) { j ++; } } if (j > 0 && j < nrBindaddrs) { Ns_Log(Warning, "could only bind to %d out of %d addresses", j, nrBindaddrs); } } /* * "j" refers to the number of successful listen() operations. */ nrBindaddrs = j; Tcl_DecrRefCount(bindaddrsObj); } if (nrBindaddrs > 0) { SpoolerQueueStart(drvPtr->spooler.firstPtr, SpoolerThread); SpoolerQueueStart(drvPtr->writer.firstPtr, WriterThread); } else { Ns_Log(Warning, "could no bind any of the following addresses, stopping this driver: %s", drvPtr->address); flags |= (DRIVER_FAILED | DRIVER_SHUTDOWN); } Ns_MutexLock(&drvPtr->lock); drvPtr->flags |= flags; Ns_CondBroadcast(&drvPtr->cond); Ns_MutexUnlock(&drvPtr->lock); /* * Loop forever until signaled to shut down and all * connections are complete and gracefully closed. */ PollCreate(&pdata); Ns_GetTime(&now); closePtr = waitPtr = readPtr = NULL; stopping = ((flags & DRIVER_SHUTDOWN) != 0u); if (!stopping) { Ns_Log(Notice, "driver: accepting connections"); } while (!stopping) { int n; /* * Set the bits for all active drivers if a connection * isn't already pending. */ PollReset(&pdata); (void)PollSet(&pdata, drvPtr->trigger[0], (short)POLLIN, NULL); if (likely(waitPtr == NULL)) { for (n = 0; n < nrBindaddrs; n++) { drvPtr->pidx[n] = PollSet(&pdata, drvPtr->listenfd[n], (short)POLLIN, NULL); } } /* * If there are any closing or read-ahead sockets, set the bits * and determine the minimum relative timeout. * * TODO: the various poll timeouts should probably be configurable. */ if (readPtr == NULL && closePtr == NULL) { pollTimeout = 10 * 1000; } else { for (sockPtr = readPtr; sockPtr != NULL; sockPtr = sockPtr->nextPtr) { SockPoll(sockPtr, (short)POLLIN, &pdata); } for (sockPtr = closePtr; sockPtr != NULL; sockPtr = sockPtr->nextPtr) { SockPoll(sockPtr, (short)POLLIN, &pdata); } if (Ns_DiffTime(&pdata.timeout, &now, &diff) > 0) { /* * The resolution of "pollTimeout" is ms, therefore, we round * up. If we would round down (e.g. 500 microseconds to 0 ms), * the time comparison later would determine that it is too * early. */ pollTimeout = (int)Ns_TimeToMilliseconds(&diff) + 1; } else { pollTimeout = 0; } } n = PollWait(&pdata, pollTimeout); Ns_Log(DriverDebug, "=== PollWait returned %d, trigger[0] %d", n, PollIn(&pdata, 0)); if (PollIn(&pdata, 0) && unlikely(ns_recv(drvPtr->trigger[0], charBuffer, 1u, 0) != 1)) { const char *errstr = ns_sockstrerror(ns_sockerrno); Ns_Fatal("driver: trigger ns_recv() failed: %s", errstr); } /* * Check whether we should re-animate some connection threads, * when e.g. the number of current threads dropped below the * minimal value. Perform this test on timeouts (n == 0; * just for safety reasons) or on explicit wakeup calls. */ if ((n == 0) || PollIn(&pdata, 0)) { NsServer *servPtr = drvPtr->servPtr; if (servPtr != NULL) { /* * Check if we have to reanimate the current server. */ NsEnsureRunningConnectionThreads(servPtr, NULL); } else { Ns_Set *servers = Ns_ConfigCreateSection("ns/servers"); size_t j; /* * Reanimation check on all servers. */ for (j = 0u; j < Ns_SetSize(servers); ++j) { const char *server = Ns_SetKey(servers, j); servPtr = NsGetServer(server); if (servPtr != NULL) { NsEnsureRunningConnectionThreads(servPtr, NULL); } } } } /* * Update the current time and drain and/or release any * closing sockets. */ Ns_GetTime(&now); if (closePtr != NULL) { sockPtr = closePtr; closePtr = NULL; while (sockPtr != NULL) { nextPtr = sockPtr->nextPtr; if (unlikely(PollHup(&pdata, sockPtr->pidx))) { /* * Peer has closed the connection */ SockRelease(sockPtr, SOCK_CLOSE, 0); } else if (likely(PollIn(&pdata, sockPtr->pidx))) { /* * Got some data */ ssize_t received = ns_recv(sockPtr->sock, drain, sizeof(drain), 0); if (received <= 0) { Ns_Log(DriverDebug, "poll closewait pollin; sockrelease SOCK_READERROR (sock %d)", sockPtr->sock); SockRelease(sockPtr, SOCK_READERROR, 0); } else { Push(sockPtr, closePtr); } } else if (Ns_DiffTime(&sockPtr->timeout, &now, &diff) <= 0) { /* no PollHup, no PollIn, maybe timeout */ Ns_Log(DriverDebug, "poll closewait timeout; sockrelease SOCK_CLOSETIMEOUT (sock %d)", sockPtr->sock); SockRelease(sockPtr, SOCK_CLOSETIMEOUT, 0); } else { /* too early, keep waiting */ Push(sockPtr, closePtr); } sockPtr = nextPtr; } } /* * Attempt read-ahead of any new connections. */ sockPtr = readPtr; readPtr = NULL; while (likely(sockPtr != NULL)) { nextPtr = sockPtr->nextPtr; if (unlikely(PollHup(&pdata, sockPtr->pidx))) { /* * Peer has closed the connection */ SockRelease(sockPtr, SOCK_CLOSE, 0); } else if (unlikely(!PollIn(&pdata, sockPtr->pidx)) && ((sockPtr->reqPtr == NULL) || (sockPtr->reqPtr->leftover == 0u))) { /* * Got no data for this sockPtr. */ if (Ns_DiffTime(&sockPtr->timeout, &now, &diff) <= 0) { SockRelease(sockPtr, SOCK_READTIMEOUT, 0); } else { Push(sockPtr, readPtr); } } else { /* * Got some data for this sockPtr. * If enabled, perform read-ahead now. */ assert(drvPtr == sockPtr->drvPtr); if (likely((drvPtr->opts & NS_DRIVER_ASYNC) != 0u)) { SockState s = SockRead(sockPtr, 0, &now); /* * Queue for connection processing if ready. */ switch (s) { case SOCK_SPOOL: drvPtr->stats.spooled++; if (SockSpoolerQueue(drvPtr, sockPtr) == 0) { Push(sockPtr, readPtr); } break; case SOCK_MORE: drvPtr->stats.partial++; SockTimeout(sockPtr, &now, &drvPtr->recvwait); Push(sockPtr, readPtr); break; case SOCK_READY: if (SockQueue(sockPtr, &now) == NS_TIMEOUT) { Push(sockPtr, waitPtr); } break; /* * Already handled or normal cases */ case SOCK_ENTITYTOOLARGE: NS_FALL_THROUGH; /* fall through */ case SOCK_BADREQUEST: NS_FALL_THROUGH; /* fall through */ case SOCK_BADHEADER: NS_FALL_THROUGH; /* fall through */ case SOCK_TOOMANYHEADERS: NS_FALL_THROUGH; /* fall through */ case SOCK_CLOSE: SockRelease(sockPtr, s, errno); break; /* * Exceptions */ case SOCK_READERROR: NS_FALL_THROUGH; /* fall through */ case SOCK_CLOSETIMEOUT: NS_FALL_THROUGH; /* fall through */ case SOCK_ERROR: NS_FALL_THROUGH; /* fall through */ case SOCK_READTIMEOUT: NS_FALL_THROUGH; /* fall through */ case SOCK_SHUTERROR: NS_FALL_THROUGH; /* fall through */ case SOCK_WRITEERROR: NS_FALL_THROUGH; /* fall through */ case SOCK_WRITETIMEOUT: drvPtr->stats.errors++; Ns_Log(Warning, "sockread returned unexpected result %s (err %s); close socket (%d)", GetSockStateName(s), ((errno != 0) ? strerror(errno) : NS_EMPTY_STRING), sockPtr->sock); SockRelease(sockPtr, s, errno); break; } } else { /* * Potentially blocking driver, NS_DRIVER_ASYNC is not defined */ if (Ns_DiffTime(&sockPtr->timeout, &now, &diff) <= 0) { drvPtr->stats.errors++; Ns_Log(Notice, "read-ahead has some data, no async sock read ===== diff time %ld", Ns_DiffTime(&sockPtr->timeout, &now, &diff)); sockPtr->keep = NS_FALSE; SockRelease(sockPtr, SOCK_READTIMEOUT, 0); } else { if (SockQueue(sockPtr, &now) == NS_TIMEOUT) { Push(sockPtr, waitPtr); } } } } sockPtr = nextPtr; } /* * Attempt to queue any pending connection after reversing the * list to ensure oldest connections are tried first. */ if (waitPtr != NULL) { sockPtr = NULL; while ((nextPtr = waitPtr) != NULL) { waitPtr = nextPtr->nextPtr; Push(nextPtr, sockPtr); } while (sockPtr != NULL) { nextPtr = sockPtr->nextPtr; if (SockQueue(sockPtr, &now) == NS_TIMEOUT) { Push(sockPtr, waitPtr); } sockPtr = nextPtr; } } /* * If no connections are waiting, attempt to accept more. */ if (waitPtr == NULL) { /* * If configured, try to accept more than one request, under heavy load * this helps to process more requests */ SockState s; bool acceptMore = NS_TRUE; accepted = 0; while (acceptMore && accepted < drvPtr->acceptsize && drvPtr->queuesize < drvPtr->maxqueuesize ) { bool gotRequests = NS_FALSE; /* * Check for input data on all bind addresses. Stop checking, * when one round of checking on all addresses fails. */ for (n = 0; n < nrBindaddrs; n++) { if ( PollIn(&pdata, drvPtr->pidx[n]) && (s = SockAccept(drvPtr, pdata.pfds[drvPtr->pidx[n]].fd, &sockPtr, &now)) != SOCK_ERROR) { switch (s) { case SOCK_SPOOL: drvPtr->stats.spooled++; if (SockSpoolerQueue(drvPtr, sockPtr) == 0) { Push(sockPtr, readPtr); } break; case SOCK_MORE: drvPtr->stats.partial++; SockTimeout(sockPtr, &now, &drvPtr->recvwait); Push(sockPtr, readPtr); break; case SOCK_READY: if (SockQueue(sockPtr, &now) == NS_TIMEOUT) { Push(sockPtr, waitPtr); } break; case SOCK_BADHEADER: NS_FALL_THROUGH; /* fall through */ case SOCK_BADREQUEST: NS_FALL_THROUGH; /* fall through */ case SOCK_CLOSE: NS_FALL_THROUGH; /* fall through */ case SOCK_CLOSETIMEOUT: NS_FALL_THROUGH; /* fall through */ case SOCK_ENTITYTOOLARGE: NS_FALL_THROUGH; /* fall through */ case SOCK_ERROR: NS_FALL_THROUGH; /* fall through */ case SOCK_READERROR: NS_FALL_THROUGH; /* fall through */ case SOCK_READTIMEOUT: NS_FALL_THROUGH; /* fall through */ case SOCK_SHUTERROR: NS_FALL_THROUGH; /* fall through */ case SOCK_TOOMANYHEADERS: NS_FALL_THROUGH; /* fall through */ case SOCK_WRITEERROR: NS_FALL_THROUGH; /* fall through */ case SOCK_WRITETIMEOUT: Ns_Fatal("driver: SockAccept returned: %s", GetSockStateName(s)); } accepted++; gotRequests = NS_TRUE; #ifdef __APPLE__ /* * On Darwin, the first accept() succeeds typically, but it is * useless to try, since this leads always to an EAGAIN */ acceptMore = NS_FALSE; break; #endif } } if (!gotRequests) { acceptMore = NS_FALSE; } } if (accepted > 1) { Ns_Log(Notice, "... sockAccept accepted %d connections", accepted); } } /* * Check for shut down and get the list of any closing or * keep-alive sockets. */ Ns_MutexLock(&drvPtr->lock); sockPtr = drvPtr->closePtr; drvPtr->closePtr = NULL; flags = drvPtr->flags; Ns_MutexUnlock(&drvPtr->lock); stopping = ((flags & DRIVER_SHUTDOWN) != 0u); /* * Update the timeout for each closing socket and add to the * close list if some data has been read from the socket * (i.e., it's not a closing keep-alive connection). */ while (sockPtr != NULL) { nextPtr = sockPtr->nextPtr; if (sockPtr->keep) { assert(drvPtr == sockPtr->drvPtr); Ns_Log(DriverDebug, "setting keepwait %ld.%6ld for socket %d", drvPtr->keepwait.sec, drvPtr->keepwait.usec, sockPtr->sock); SockTimeout(sockPtr, &now, &drvPtr->keepwait); Push(sockPtr, readPtr); } else { /* * Purely packet oriented drivers set on close the fd to * NS_INVALID_SOCKET. Since we cannot "shutdown" an UDP-socket * for writing, we bypass this call. */ assert(drvPtr == sockPtr->drvPtr); if (sockPtr->sock == NS_INVALID_SOCKET) { SockRelease(sockPtr, SOCK_CLOSE, errno); Ns_Log(DriverDebug, "DRIVER SockRelease: errno %d drvPtr->closewait %ld.%6ld", errno, drvPtr->closewait.sec, drvPtr->closewait.usec); } else if (shutdown(sockPtr->sock, SHUT_WR) != 0) { SockRelease(sockPtr, SOCK_SHUTERROR, errno); } else { Ns_Log(DriverDebug, "setting closewait %ld.%6ld for socket %d", drvPtr->closewait.sec, drvPtr->closewait.usec, sockPtr->sock); SockTimeout(sockPtr, &now, &drvPtr->closewait); Push(sockPtr, closePtr); } } sockPtr = nextPtr; } /* * Close the active drivers if shutdown is pending. */ if (stopping) { for (n = 0; n < nrBindaddrs; n++) { ns_sockclose(drvPtr->listenfd[n]); drvPtr->listenfd[n] = NS_INVALID_SOCKET; } } } PollFree(&pdata); Ns_Log(Notice, "exiting"); Ns_MutexLock(&drvPtr->lock); drvPtr->flags |= DRIVER_STOPPED; Ns_CondBroadcast(&drvPtr->cond); Ns_MutexUnlock(&drvPtr->lock); } static void PollCreate(PollData *pdata) { NS_NONNULL_ASSERT(pdata != NULL); memset(pdata, 0, sizeof(PollData)); } static void PollFree(PollData *pdata) { NS_NONNULL_ASSERT(pdata != NULL); ns_free(pdata->pfds); memset(pdata, 0, sizeof(PollData)); } static void PollReset(PollData *pdata) { NS_NONNULL_ASSERT(pdata != NULL); pdata->nfds = 0u; pdata->timeout.sec = TIME_T_MAX; pdata->timeout.usec = 0; } static NS_POLL_NFDS_TYPE PollSet(PollData *pdata, NS_SOCKET sock, short type, const Ns_Time *timeoutPtr) { NS_NONNULL_ASSERT(pdata != NULL); /* * Grow the pfds array if necessary. */ if (unlikely(pdata->nfds >= pdata->maxfds)) { pdata->maxfds += 100u; pdata->pfds = ns_realloc(pdata->pfds, pdata->maxfds * sizeof(struct pollfd)); } /* * Set the next pollfd struct with this socket. */ pdata->pfds[pdata->nfds].fd = sock; pdata->pfds[pdata->nfds].events = type; pdata->pfds[pdata->nfds].revents = 0; /* * Check for new minimum timeout. */ if (timeoutPtr != NULL && Ns_DiffTime(timeoutPtr, &pdata->timeout, NULL) < 0) { pdata->timeout = *timeoutPtr; } return pdata->nfds++; } static int PollWait(const PollData *pdata, int timeout) { int n; NS_NONNULL_ASSERT(pdata != NULL); do { n = ns_poll(pdata->pfds, pdata->nfds, timeout); } while (n < 0 && errno == NS_EINTR); if (n < 0) { Ns_Fatal("PollWait: ns_poll() failed: %s", ns_sockstrerror(ns_sockerrno)); } return n; } /* *---------------------------------------------------------------------- * * RequestNew * * Prepares for reading from the socket, allocates a "Request" * struct for the given socket. It might be reused from the pool * or freshly allocated. Counterpart of RequestFree(). * * Results: * None * * Side effects: * None * *---------------------------------------------------------------------- */ static void RequestNew(Sock *sockPtr) { Request *reqPtr; bool reuseRequest = NS_TRUE; NS_NONNULL_ASSERT(sockPtr != NULL); /* * Try to get a request from the pool of allocated Requests. */ Ns_MutexLock(&reqLock); reqPtr = firstReqPtr; if (likely(reqPtr != NULL)) { firstReqPtr = reqPtr->nextPtr; } else { reuseRequest = NS_FALSE; } Ns_MutexUnlock(&reqLock); if (reuseRequest) { Ns_Log(DriverDebug, "RequestNew reuses a Request"); } /* * In case we failed, allocate a new Request. */ if (reqPtr == NULL) { Ns_Log(DriverDebug, "RequestNew gets a fresh Request"); reqPtr = ns_calloc(1u, sizeof(Request)); Tcl_DStringInit(&reqPtr->buffer); reqPtr->headers = Ns_SetCreate(NULL); } sockPtr->reqPtr = reqPtr; } /* *---------------------------------------------------------------------- * * RequestFree -- * * Free/clean a socket request structure. This routine is called * at the end of connection processing or on a socket which * times out during async read-ahead. Counterpart of RequestNew(). * * Results: * None. * * Side effects: * None. * *---------------------------------------------------------------------- */ static void RequestFree(Sock *sockPtr) { Request *reqPtr; bool keep; NS_NONNULL_ASSERT(sockPtr != NULL); reqPtr = sockPtr->reqPtr; assert(reqPtr != NULL); Ns_Log(DriverDebug, "=== RequestFree cleans %p (avail %" PRIuz " keep %d length %" PRIuz " contentLength %" PRIuz ")", (void *)reqPtr, reqPtr->avail, sockPtr->keep, reqPtr->length, reqPtr->contentLength); keep = (sockPtr->keep) && (reqPtr->avail > reqPtr->contentLength); if (keep) { size_t leftover = reqPtr->avail - reqPtr->contentLength; const char *offset = reqPtr->buffer.string + ((size_t)reqPtr->buffer.length - leftover); Ns_Log(DriverDebug, "setting leftover to %" PRIuz " bytes", leftover); /* * Here it is safe to move the data in the buffer, although the * reqPtr->content might point to it, since we re-init the content. In * case the terminating null character was written to the end of the * previous buffer, we have to restore the first character. */ memmove(reqPtr->buffer.string, offset, leftover); if (reqPtr->savedChar != '\0') { reqPtr->buffer.string[0] = reqPtr->savedChar; } Tcl_DStringSetLength(&reqPtr->buffer, (int)leftover); LogBuffer(DriverDebug, "KEEP BUFFER", reqPtr->buffer.string, leftover); reqPtr->leftover = leftover; } else { /* * Clean large buffers in order to avoid memory growth on huge * uploads (when maxupload is huge) */ /*fprintf(stderr, "=== reuse buffer size %d avail %d dynamic %d\n", reqPtr->buffer.length, reqPtr->buffer.spaceAvl, reqPtr->buffer.string == reqPtr->buffer.staticSpace);*/ if (Tcl_DStringLength(&reqPtr->buffer) > 65536) { Tcl_DStringFree(&reqPtr->buffer); } else { /* * Reuse buffer, but set length to 0. */ Tcl_DStringSetLength(&reqPtr->buffer, 0); } reqPtr->leftover = 0u; } reqPtr->next = NULL; reqPtr->content = NULL; reqPtr->length = 0u; reqPtr->contentLength = 0u; reqPtr->expectedLength = 0u; reqPtr->chunkStartOff = 0u; reqPtr->chunkWriteOff = 0u; reqPtr->roff = 0u; reqPtr->woff = 0u; reqPtr->coff = 0u; reqPtr->avail = 0u; reqPtr->savedChar = '\0'; Ns_SetTrunc(reqPtr->headers, 0u); if (reqPtr->auth != NULL) { Ns_SetFree(reqPtr->auth); reqPtr->auth = NULL; } if (reqPtr->request.line != NULL) { Ns_Log(DriverDebug, "RequestFree calls Ns_ResetRequest on %p", (void*)&reqPtr->request); Ns_ResetRequest(&reqPtr->request); } else { Ns_Log(DriverDebug, "RequestFree does not call Ns_ResetRequest on %p", (void*)&reqPtr->request); } if (!keep) { /* * Push the reqPtr to the pool for reuse in other connections. */ sockPtr->reqPtr = NULL; Ns_MutexLock(&reqLock); reqPtr->nextPtr = firstReqPtr; firstReqPtr = reqPtr; Ns_MutexUnlock(&reqLock); } else { /* * Keep the partly cleaned up reqPtr associated with the connection. */ Ns_Log(DriverDebug, "=== KEEP request structure in sockPtr (don't push into the pool)"); } } /* *---------------------------------------------------------------------- * * SockQueue -- * * Puts socket into connection queue * * Results: * NS_OK if queued, * NS_ERROR if socket closed because of error * NS_TIMEOUT if queue is full * * Side effects: * None. * *---------------------------------------------------------------------- */ static Ns_ReturnCode SockQueue(Sock *sockPtr, const Ns_Time *timePtr) { Ns_ReturnCode result; NS_NONNULL_ASSERT(sockPtr != NULL); /* * Verify the conditions. Request struct must exist already. */ assert(sockPtr->reqPtr != NULL); SockSetServer(sockPtr); assert(sockPtr->servPtr != NULL); /* * Actual queueing, if not ready spool to the waiting list. */ if (!NsQueueConn(sockPtr, timePtr)) { result = NS_TIMEOUT; } else { result = NS_OK; } return result; } /* *---------------------------------------------------------------------- * * SockPoll -- * * Arrange for given Sock to be monitored. * * Results: * None. * * Side effects: * Sock fd will be monitored for readability on next spin of * DriverThread. * *---------------------------------------------------------------------- */ static void SockPoll(Sock *sockPtr, short type, PollData *pdata) { NS_NONNULL_ASSERT(sockPtr != NULL); NS_NONNULL_ASSERT(pdata != NULL); sockPtr->pidx = PollSet(pdata, sockPtr->sock, type, &sockPtr->timeout); } /* *---------------------------------------------------------------------- * * SockTimeout -- * * Update socket with timeout * * Results: * None. * * Side effects: * Socket timeout will have nowPtr + timeout value * *---------------------------------------------------------------------- */ static void SockTimeout(Sock *sockPtr, const Ns_Time *nowPtr, const Ns_Time *timeout) { NS_NONNULL_ASSERT(sockPtr != NULL); sockPtr->timeout = *nowPtr; Ns_IncrTime(&sockPtr->timeout, timeout->sec, timeout->usec); } /* *---------------------------------------------------------------------- * * SockAccept -- * * Accept and initialize a new Sock in sockPtrPtr. * * Results: * SOCK_READY, SOCK_MORE, SOCK_SPOOL, * SOCK_ERROR + NULL sockPtr. * * Side effects: * Read-ahead may be attempted on new socket. * *---------------------------------------------------------------------- */ static SockState SockAccept(Driver *drvPtr, NS_SOCKET sock, Sock **sockPtrPtr, const Ns_Time *nowPtr) { Sock *sockPtr; SockState sockStatus; NS_DRIVER_ACCEPT_STATUS status; NS_NONNULL_ASSERT(drvPtr != NULL); sockPtr = SockNew(drvPtr); /* * Accept the new connection. */ status = DriverAccept(sockPtr, sock); if (unlikely(status == NS_DRIVER_ACCEPT_ERROR)) { sockStatus = SOCK_ERROR; /* * We reach the place frequently, especially on Linux, when we try to * accept multiple connection in one sweep. Usually, the errno is * EAGAIN. */ Ns_MutexLock(&drvPtr->lock); sockPtr->nextPtr = drvPtr->sockPtr; drvPtr->sockPtr = sockPtr; Ns_MutexUnlock(&drvPtr->lock); sockPtr = NULL; } else { sockPtr->acceptTime = *nowPtr; drvPtr->queuesize++; if (status == NS_DRIVER_ACCEPT_DATA) { /* * If there is already data present then read it without * polling if we're in async mode. */ if ((drvPtr->opts & NS_DRIVER_ASYNC) != 0u) { sockStatus = SockRead(sockPtr, 0, nowPtr); if ((int)sockStatus < 0) { Ns_Log(DriverDebug, "SockRead returned error %s", GetSockStateName(sockStatus)); SockRelease(sockPtr, sockStatus, errno); sockStatus = SOCK_ERROR; sockPtr = NULL; } } else { /* * Queue this socket without reading, NsGetRequest() in the * connection thread will perform actual reading of the * request. */ sockStatus = SOCK_READY; } } else if (status == NS_DRIVER_ACCEPT_QUEUE) { /* * We need to call RequestNew() to make sure socket has request * structure allocated, otherwise NsGetRequest() will call * SockRead() which is not what this driver wants. */ if (sockPtr->reqPtr == NULL) { RequestNew(sockPtr); } sockStatus = SOCK_READY; } else { sockStatus = SOCK_MORE; } } *sockPtrPtr = sockPtr; return sockStatus; } /* *---------------------------------------------------------------------- * * SockNew -- * * Allocate and/or initialize a Sock structure. Counterpart of * SockRelease(). * * Results: * SockPtr * * Side effects: * Potentially new memory is allocated. * *---------------------------------------------------------------------- */ static Sock * SockNew(Driver *drvPtr) { Sock *sockPtr; NS_NONNULL_ASSERT(drvPtr != NULL); Ns_MutexLock(&drvPtr->lock); sockPtr = drvPtr->sockPtr; if (likely(sockPtr != NULL)) { drvPtr->sockPtr = sockPtr->nextPtr; sockPtr->keep = NS_FALSE; } Ns_MutexUnlock(&drvPtr->lock); if (sockPtr == NULL) { size_t sockSize = sizeof(Sock) + (nsconf.nextSlsId * sizeof(Ns_Callback *)); sockPtr = ns_calloc(1u, sockSize); sockPtr->drvPtr = drvPtr; } else { sockPtr->tfd = 0; sockPtr->taddr = NULL; sockPtr->flags = 0u; sockPtr->arg = NULL; sockPtr->recvSockState = NS_SOCK_NONE; } return sockPtr; } /* *---------------------------------------------------------------------- * * SockRelease -- * * Close a socket and release the connection structure for * re-use. * * Results: * None. * * Side effects: * None. * *---------------------------------------------------------------------- */ static void SockRelease(Sock *sockPtr, SockState reason, int err) { Driver *drvPtr; NS_NONNULL_ASSERT(sockPtr != NULL); Ns_Log(DriverDebug, "SockRelease reason %s err %d (sock %d)", GetSockStateName(reason), err, sockPtr->sock); drvPtr = sockPtr->drvPtr; assert(drvPtr != NULL); SockError(sockPtr, reason, err); if (sockPtr->sock != NS_INVALID_SOCKET) { SockClose(sockPtr, (int)NS_FALSE); } else { Ns_Log(DriverDebug, "SockRelease bypasses SockClose, since we have an invalid socket"); } NsSlsCleanup(sockPtr); drvPtr->queuesize--; if (sockPtr->reqPtr != NULL) { Ns_Log(DriverDebug, "SockRelease calls RequestFree"); RequestFree(sockPtr); } Ns_MutexLock(&drvPtr->lock); sockPtr->nextPtr = drvPtr->sockPtr; drvPtr->sockPtr = sockPtr; Ns_MutexUnlock(&drvPtr->lock); } /* *---------------------------------------------------------------------- * * SockError -- * * Log error message for given socket * re-use. * * Results: * None. * * Side effects: * None. * *---------------------------------------------------------------------- */ static void SockError(Sock *sockPtr, SockState reason, int err) { const char *errMsg = NULL; NS_NONNULL_ASSERT(sockPtr != NULL); switch (reason) { case SOCK_READY: case SOCK_SPOOL: case SOCK_MORE: case SOCK_CLOSE: case SOCK_CLOSETIMEOUT: /* This is normal, never log. */ break; case SOCK_READTIMEOUT: /* * For this case, whether this is acceptable or not * depends upon whether this sock was a keep-alive * that we were allowing to 'linger'. */ if (!sockPtr->keep) { errMsg = "Timeout during read"; } break; case SOCK_WRITETIMEOUT: errMsg = "Timeout during write"; break; case SOCK_READERROR: errMsg = "Unable to read request"; break; case SOCK_WRITEERROR: errMsg = "Unable to write request"; break; case SOCK_SHUTERROR: errMsg = "Unable to shutdown socket"; break; case SOCK_BADREQUEST: errMsg = "Bad Request"; SockSendResponse(sockPtr, 400, errMsg); break; case SOCK_TOOMANYHEADERS: errMsg = "Too Many Request Headers"; SockSendResponse(sockPtr, 414, errMsg); break; case SOCK_BADHEADER: errMsg = "Invalid Request Header"; SockSendResponse(sockPtr, 400, errMsg); break; case SOCK_ENTITYTOOLARGE: errMsg = "Request Entity Too Large"; SockSendResponse(sockPtr, 413, errMsg); break; case SOCK_ERROR: errMsg = "Unknown Error"; SockSendResponse(sockPtr, 400, errMsg); break; } if (errMsg != NULL) { char ipString[NS_IPADDR_SIZE]; Ns_Log(DriverDebug, "SockError: %s (%d: %s), sock: %d, peer: [%s]:%d, request: %.99s", errMsg, err, (err != 0) ? strerror(err) : NS_EMPTY_STRING, sockPtr->sock, ns_inet_ntop((struct sockaddr *)&(sockPtr->sa), ipString, sizeof(ipString)), Ns_SockaddrGetPort((struct sockaddr *)&(sockPtr->sa)), (sockPtr->reqPtr != NULL) ? sockPtr->reqPtr->buffer.string : NS_EMPTY_STRING); } } /* *---------------------------------------------------------------------- * * SockSendResponse -- * * Send an HTTP response directly to the client using the * driver callback. * * Results: * None. * * Side effects: * May not sent the complete response to the client * if encountering non-writable connection socket. * *---------------------------------------------------------------------- */ static void SockSendResponse(Sock *sockPtr, int code, const char *errMsg) { struct iovec iov[3]; char header[32]; ssize_t sent, tosend; NS_NONNULL_ASSERT(sockPtr != NULL); NS_NONNULL_ASSERT(errMsg != NULL); snprintf(header, sizeof(header), "HTTP/1.0 %d ", code); iov[0].iov_base = header; iov[0].iov_len = strlen(header); iov[1].iov_base = (void *)errMsg; iov[1].iov_len = strlen(errMsg); iov[2].iov_base = (void *)"\r\n\r\n"; iov[2].iov_len = 4u; tosend = (ssize_t)(iov[0].iov_len + iov[1].iov_len + iov[2].iov_len); sent = NsDriverSend(sockPtr, iov, 3, 0u); if (sent < tosend) { Ns_Log(Warning, "Driver: partial write while sending response;" " %" PRIdz " < %" PRIdz, sent, tosend); } /* * In case we have a request structure, complain the system log about * the bad request. */ if (sockPtr->reqPtr != NULL) { Request *reqPtr = sockPtr->reqPtr; const char *requestLine = (reqPtr->request.line != NULL) ? reqPtr->request.line : NS_EMPTY_STRING; (void)ns_inet_ntop((struct sockaddr *)&(sockPtr->sa), sockPtr->reqPtr->peer, NS_IPADDR_SIZE); /* * Check, if bad request looks like a TLS handshake. If yes, there is * no need to print out the received buffer. */ if (requestLine[0] == (char)0x16 && requestLine[1] >= 3 && requestLine[2] == 1) { Ns_Log(Warning, "invalid request %d (%s) from peer %s: received TLS handshake on a non-TLS connection", code, errMsg, reqPtr->peer); } else { Tcl_DString dsReqLine; Tcl_DStringInit(&dsReqLine); Ns_Log(Warning, "invalid request: %d (%s) from peer %s request '%s' offsets: read %" PRIuz " write %" PRIuz " content %" PRIuz " avail %" PRIuz, code, errMsg, reqPtr->peer, Ns_DStringAppendPrintable(&dsReqLine, NS_FALSE, requestLine, strlen(requestLine)), reqPtr->roff, reqPtr->woff, reqPtr->coff, reqPtr->avail); Tcl_DStringFree(&dsReqLine); LogBuffer(Warning, "REQ BUFFER", reqPtr->buffer.string, (size_t)reqPtr->buffer.length); } } else { Ns_Log(Warning, "invalid request: %d (%s) - no request information available", code, errMsg); } } /* *---------------------------------------------------------------------- * * SockTrigger -- * * Wakeup DriversThread from blocking ns_poll(). * * Results: * None. * * Side effects: * DriversThread will wake up. * *---------------------------------------------------------------------- */ static void SockTrigger(NS_SOCKET sock) { if (send(sock, NS_EMPTY_STRING, 1, 0) != 1) { const char *errstr = ns_sockstrerror(ns_sockerrno); Ns_Log(Error, "driver: trigger send() failed: %s", errstr); } } /* *---------------------------------------------------------------------- * * SockClose -- * * Closes connection socket, does all cleanups. The input parameter * "keep" might be NS_TRUE/NS_FALSE or -1 if undecided. * * Results: * None. * * Side effects: * None * *---------------------------------------------------------------------- */ static void SockClose(Sock *sockPtr, int keep) { NS_NONNULL_ASSERT(sockPtr != NULL); if (keep != 0) { bool driverKeep = DriverKeep(sockPtr); keep = (int)driverKeep; } if (keep == (int)NS_FALSE) { DriverClose(sockPtr); } Ns_MutexLock(&sockPtr->drvPtr->lock); sockPtr->keep = (bool)keep; Ns_MutexUnlock(&sockPtr->drvPtr->lock); /* * Unconditionally remove temporary file, connection thread * should take care about very large uploads. */ if (sockPtr->tfile != NULL) { unlink(sockPtr->tfile); ns_free(sockPtr->tfile); sockPtr->tfile = NULL; if (sockPtr->tfd > 0) { /* * Close and reset fd. The fd should be > 0 unless we are in error * conditions. */ (void) ns_close(sockPtr->tfd); } sockPtr->tfd = 0; } else if (sockPtr->tfd > 0) { /* * This must be a fd allocated via Ns_GetTemp(); */ Ns_ReleaseTemp(sockPtr->tfd); sockPtr->tfd = 0; } #ifndef _WIN32 /* * Un-map temp file used for spooled content. */ if (sockPtr->taddr != NULL) { munmap(sockPtr->taddr, (size_t)sockPtr->tsize); sockPtr->taddr = NULL; } #endif } /* *---------------------------------------------------------------------- * * ChunkedDecode -- * * Reads the content form the incoming request buffer and tries * to decode chunked encoding parts. The function can be called * repeatedly and with incomplete input and overwrites the buffer * with the decoded data optionally. The decoded data is always * shorter than the encoded one. * * Results: * NS_TRUE when chunk was complete, NS_FALSE otherwise * * Side effects: * updates the buffer if update is true (and adjusts reqPtr->chunkWriteOff) * updates always reqPtr->chunkStartOff to allow incremental operations * *---------------------------------------------------------------------- */ static bool ChunkedDecode(Request *reqPtr, bool update) { const Tcl_DString *bufPtr; const char *end, *chunkStart; bool success = NS_TRUE; NS_NONNULL_ASSERT(reqPtr != NULL); bufPtr = &reqPtr->buffer; end = bufPtr->string + bufPtr->length; chunkStart = bufPtr->string + reqPtr->chunkStartOff; while (reqPtr->chunkStartOff < (size_t)bufPtr->length) { char *p = strstr(chunkStart, "\r\n"); size_t chunk_length; if (p == NULL) { Ns_Log(DriverDebug, "ChunkedDecode: chunk did not find end-of-line"); success = NS_FALSE; break; } *p = '\0'; chunk_length = (size_t)strtol(chunkStart, NULL, 16); *p = '\r'; if (p + 2 + chunk_length > end) { Ns_Log(DriverDebug, "ChunkedDecode: chunk length past end of buffer"); success = NS_FALSE; break; } if (update) { char *writeBuffer = bufPtr->string + reqPtr->chunkWriteOff; memmove(writeBuffer, p + 2, chunk_length); reqPtr->chunkWriteOff += chunk_length; *(writeBuffer + chunk_length) = '\0'; } reqPtr->chunkStartOff += (size_t)(p - chunkStart) + 4u + chunk_length; chunkStart = bufPtr->string + reqPtr->chunkStartOff; } return success; } /* *---------------------------------------------------------------------- * * SockRead -- * * Read content from the given Sock, processing the input as * necessary. This is the core callback routine designed to * either be called repeatedly within the DriverThread during * an async read-ahead or in a blocking loop in NsGetRequest() * at the start of connection processing. * * Results: * SOCK_READY: Request is ready for processing. * SOCK_MORE: More input is required. * SOCK_ERROR: Client drop or timeout. * SOCK_SPOOL: Pass input handling to spooler * SOCK_CLOSE: peer closed connection * SOCK_BADREQUEST * SOCK_BADHEADER * SOCK_TOOMANYHEADERS * * Side effects: * The Request structure will be built up for use by the * connection thread. Also, before returning SOCK_READY, * the next byte to read mark and bytes available are set * to the beginning of the content, just beyond the headers. * * Contents may be spooled into temp file and mmap-ed * *---------------------------------------------------------------------- */ static SockState SockRead(Sock *sockPtr, int spooler, const Ns_Time *timePtr) { const Driver *drvPtr; Request *reqPtr; Tcl_DString *bufPtr; struct iovec buf; char tbuf[16384]; size_t buflen, nread; ssize_t n; SockState resultState; NS_NONNULL_ASSERT(sockPtr != NULL); drvPtr = sockPtr->drvPtr; tbuf[0] = '\0'; /* * In case of "keepwait", the accept time is not meaningful and * reset to 0. In such cases, update "acceptTime" to the actual * begin of a request. This part is intended for async drivers. */ if (sockPtr->acceptTime.sec == 0) { assert(timePtr != NULL); sockPtr->acceptTime = *timePtr; } /* * Initialize request structure if needed. */ if (sockPtr->reqPtr == NULL) { RequestNew(sockPtr); } /* * On the first read, attempt to read-ahead "bufsize" bytes. * Otherwise, read only the number of bytes left in the * content. */ reqPtr = sockPtr->reqPtr; bufPtr = &reqPtr->buffer; if (reqPtr->length == 0u) { nread = drvPtr->bufsize; } else { nread = reqPtr->length - reqPtr->avail; } /* * Grow the buffer to include space for the next bytes. */ buflen = (size_t)bufPtr->length; n = (ssize_t)(buflen + nread); if (unlikely(n > drvPtr->maxinput)) { n = (ssize_t)drvPtr->maxinput; nread = (size_t)n - buflen; if (nread == 0u) { Ns_Log(DriverDebug, "SockRead: maxinput reached %" TCL_LL_MODIFIER "d", drvPtr->maxinput); return SOCK_ERROR; } } /* * Use temp file for content larger than "readahead" bytes. */ #ifndef _WIN32 if (reqPtr->coff > 0u /* We are in the content part (after the header) */ && !reqPtr->chunkStartOff /* Never spool chunked encoded data since we decode in memory */ && reqPtr->length > (size_t)drvPtr->readahead /* We need more data */ && sockPtr->tfd <= 0 /* We have no spool fd */ ) { const DrvSpooler *spPtr = &drvPtr->spooler; Ns_Log(DriverDebug, "SockRead: require tmp file for content spooling (length %" PRIuz" > readahead " "%" TCL_LL_MODIFIER "d)", reqPtr->length, drvPtr->readahead); /* * In driver mode send this Sock to the spooler thread if * it is running */ if (spooler == 0 && spPtr->threads > 0) { return SOCK_SPOOL; } /* * If "maxupload" is specified and content size exceeds the configured * values, spool uploads into normal temp file (not deleted). We do * not want to map such large files into memory. */ if (drvPtr->maxupload > 0 && reqPtr->length > (size_t)drvPtr->maxupload ) { size_t tfileLength = strlen(drvPtr->uploadpath) + 16u; sockPtr->tfile = ns_malloc(tfileLength); snprintf(sockPtr->tfile, tfileLength, "%s/%d.XXXXXX", drvPtr->uploadpath, sockPtr->sock); sockPtr->tfd = ns_mkstemp(sockPtr->tfile); if (sockPtr->tfd == NS_INVALID_FD) { Ns_Log(Error, "SockRead: cannot create spool file with template '%s': %s", sockPtr->tfile, strerror(errno)); } } else { /* * Get a temporary fd. These FDs are used for mmapping. */ sockPtr->tfd = Ns_GetTemp(); } if (unlikely(sockPtr->tfd == NS_INVALID_FD)) { Ns_Log(DriverDebug, "SockRead: spool fd invalid"); return SOCK_ERROR; } n = (ssize_t)((size_t)bufPtr->length - reqPtr->coff); assert(n >= 0); if (ns_write(sockPtr->tfd, bufPtr->string + reqPtr->coff, (size_t)n) != n) { return SOCK_WRITEERROR; } Tcl_DStringSetLength(bufPtr, 0); } #endif if (sockPtr->tfd > 0) { buf.iov_base = tbuf; buf.iov_len = MIN(nread, sizeof(tbuf)); } else { Tcl_DStringSetLength(bufPtr, (int)(buflen + nread)); buf.iov_base = bufPtr->string + reqPtr->woff; buf.iov_len = nread; } if (reqPtr->leftover > 0u) { /* * There is some leftover in the buffer, don't read but take the * leftover instead as input. */ n = (ssize_t)reqPtr->leftover; reqPtr->leftover = 0u; buflen = 0u; Ns_Log(DriverDebug, "SockRead receive from leftover %" PRIdz " bytes", n); } else { /* * Receive actually some data from the driver. */ n = NsDriverRecv(sockPtr, &buf, 1, NULL); Ns_Log(DriverDebug, "SockRead receive from network %" PRIdz " bytes sockState %.2x", n, (int)sockPtr->recvSockState); } { Ns_SockState nsSockState = sockPtr->recvSockState; /* * The nsSockState has one of the following values, when provided: * * NS_SOCK_READ, NS_SOCK_DONE, NS_SOCK_AGAIN, NS_SOCK_EXCEPTION, * NS_SOCK_TIMEOUT */ switch (nsSockState) { case NS_SOCK_TIMEOUT: NS_FALL_THROUGH; /* fall through */ case NS_SOCK_EXCEPTION: return SOCK_READERROR; case NS_SOCK_AGAIN: Tcl_DStringSetLength(bufPtr, (int)buflen); return SOCK_MORE; case NS_SOCK_DONE: return SOCK_CLOSE; case NS_SOCK_READ: break; case NS_SOCK_CANCEL: NS_FALL_THROUGH; /* fall through */ case NS_SOCK_EXIT: NS_FALL_THROUGH; /* fall through */ case NS_SOCK_INIT: NS_FALL_THROUGH; /* fall through */ case NS_SOCK_WRITE: Ns_Log(Warning, "SockRead received unexpected state %.2x from driver", nsSockState); return SOCK_READERROR; case NS_SOCK_NONE: /* * Old style state management based on "n" and "errno", which is * more fragile. We keep there for old-style drivers. */ if (n < 0) { Tcl_DStringSetLength(bufPtr, (int)buflen); /* * The driver returns -1 when the peer closed the connection, but * clears the errno such we can distinguish from error conditions. */ if (errno == 0) { return SOCK_CLOSE; } return SOCK_READERROR; } if (n == 0) { Tcl_DStringSetLength(bufPtr, (int)buflen); return SOCK_MORE; } break; } } if (sockPtr->tfd > 0) { if (ns_write(sockPtr->tfd, tbuf, (size_t)n) != n) { return SOCK_WRITEERROR; } } else { Tcl_DStringSetLength(bufPtr, (int)(buflen + (size_t)n)); } reqPtr->woff += (size_t)n; reqPtr->avail += (size_t)n; /* * This driver needs raw buffer, it is binary or non-HTTP request */ if ((drvPtr->opts & NS_DRIVER_NOPARSE) != 0u) { return SOCK_READY; } resultState = SockParse(sockPtr); return resultState; } /*---------------------------------------------------------------------- * * LogBuffer -- * * Debug function to output buffer content when the provided severity is * enabled. The function prints just visible characters and space as is * and prints the hex code otherwise. * * Results: * None. * * Side effects: * Writes to error.log * *---------------------------------------------------------------------- */ static void LogBuffer(Ns_LogSeverity severity, const char *msg, const char *buffer, size_t len) { Tcl_DString ds; NS_NONNULL_ASSERT(msg != NULL); NS_NONNULL_ASSERT(buffer != NULL); if (Ns_LogSeverityEnabled(severity)) { Tcl_DStringInit(&ds); Tcl_DStringAppend(&ds, msg, -1); Tcl_DStringAppend(&ds, ": ", 2); (void)Ns_DStringAppendPrintable(&ds, NS_FALSE, buffer, len); Ns_Log(severity, "%s", ds.string); Tcl_DStringFree(&ds); } } /*---------------------------------------------------------------------- * * EndOfHeader -- * * Function to be called (once), when end of header is reached. At this * time, all request header lines were parsed already correctly. * * Results: * None. * * Side effects: * Update various reqPtr fields and signal certain facts and error * conditions via sockPtr->flags. In error conditions, sockPtr->keep is * set to NS_FALSE. * *---------------------------------------------------------------------- */ static size_t EndOfHeader(Sock *sockPtr) { Request *reqPtr; const char *s; NS_NONNULL_ASSERT(sockPtr != NULL); reqPtr = sockPtr->reqPtr; assert(reqPtr != NULL); reqPtr->chunkStartOff = 0u; /* * Check for "expect: 100-continue" and clear flag in case we have * pipelining. */ sockPtr->flags &= ~(NS_CONN_CONTINUE); s = Ns_SetIGet(reqPtr->headers, "expect"); if (s != NULL) { if (*s == '1' && *(s+1) == '0' && *(s+2) == '0' && *(s+3) == '-') { char *dup = ns_strdup(s+4); Ns_StrToLower(dup); if (STREQ(dup, "continue")) { sockPtr->flags |= NS_CONN_CONTINUE; } ns_free(dup); } } /* * Handle content-length, which might be provided or not. * Clear length specific error flags. */ sockPtr->flags &= ~(NS_CONN_ENTITYTOOLARGE); s = Ns_SetIGet(reqPtr->headers, "content-length"); if (s == NULL) { s = Ns_SetIGet(reqPtr->headers, "Transfer-Encoding"); if (s != NULL) { /* Lower case is in the standard, capitalized by macOS */ if (STREQ(s, "chunked") || STREQ(s, "Chunked")) { Tcl_WideInt expected; reqPtr->chunkStartOff = reqPtr->roff; reqPtr->chunkWriteOff = reqPtr->chunkStartOff; reqPtr->contentLength = 0u; /* * We need reqPtr->expectedLength for safely terminating read loop. */ s = Ns_SetIGet(reqPtr->headers, "X-Expected-Entity-Length"); if ((s != NULL) && (Ns_StrToWideInt(s, &expected) == NS_OK) && (expected > 0) ) { reqPtr->expectedLength = (size_t)expected; } s = NULL; } } } /* * In case a valid and meaningful was provided, the string with the * content length ("s") is not NULL. */ if (s != NULL) { Tcl_WideInt length; if ((Ns_StrToWideInt(s, &length) == NS_OK) && (length > 0)) { reqPtr->length = (size_t)length; /* * Handle too large input requests. */ if (reqPtr->length > (size_t)sockPtr->drvPtr->maxinput) { Ns_Log(Warning, "SockParse: request too large, length=%" PRIdz ", maxinput=%" TCL_LL_MODIFIER "d", reqPtr->length, sockPtr->drvPtr->maxinput); sockPtr->keep = NS_FALSE; sockPtr->flags |= NS_CONN_ENTITYTOOLARGE; } reqPtr->contentLength = (size_t)length; } } /* * Compression format handling: parse information from request headers * indicating allowed compression formats for quick access. * * Clear compression accepted flag */ sockPtr->flags &= ~(NS_CONN_ZIPACCEPTED|NS_CONN_BROTLIACCEPTED); s = Ns_SetIGet(reqPtr->headers, "Accept-Encoding"); if (s != NULL) { bool gzipAccept, brotliAccept; /* * Get allowed compression formats from "accept-encoding" headers. */ NsParseAcceptEncoding(reqPtr->request.version, s, &gzipAccept, &brotliAccept); if (gzipAccept || brotliAccept) { /* * Don't allow compression formats for Range requests. */ s = Ns_SetIGet(reqPtr->headers, "Range"); if (s == NULL) { if (gzipAccept) { sockPtr->flags |= NS_CONN_ZIPACCEPTED; } if (brotliAccept) { sockPtr->flags |= NS_CONN_BROTLIACCEPTED; } } } } /* * Set up request length for spooling and further read operations */ if (reqPtr->contentLength != 0u) { /* * Content-Length was provided, use it */ reqPtr->length = reqPtr->contentLength; } return reqPtr->roff; } /*---------------------------------------------------------------------- * * SockParse -- * * Construct the given conn by parsing input buffer until end of * headers. Return SOCK_READY when finished parsing. * * Results: * SOCK_READY: Conn is ready for processing. * SOCK_MORE: More input is required. * SOCK_ERROR: Malformed request. * SOCK_BADREQUEST * SOCK_BADHEADER * SOCK_TOOMANYHEADERS * * Side effects: * An Ns_Request and/or Ns_Set may be allocated. * Ns_Conn buffer management offsets updated. * *---------------------------------------------------------------------- */ static SockState SockParse(Sock *sockPtr) { const Tcl_DString *bufPtr; const Driver *drvPtr; Request *reqPtr; char save; SockState result; NS_NONNULL_ASSERT(sockPtr != NULL); drvPtr = sockPtr->drvPtr; NsUpdateProgress((Ns_Sock *) sockPtr); reqPtr = sockPtr->reqPtr; bufPtr = &reqPtr->buffer; /* * Scan lines (header) until start of content (body-part) */ while (reqPtr->coff == 0u) { char *s, *e; size_t cnt; /* * Find the next header line. */ s = bufPtr->string + reqPtr->roff; e = memchr(s, INTCHAR('\n'), reqPtr->avail); if (unlikely(e == NULL)) { /* * Input not yet newline terminated - request more data. */ return SOCK_MORE; } /* * Check for max single line overflows. * * Previous versions if the driver returned here directly an * error code, which was handled via HTTP error message * provided via SockError(). However, the SockError() handling * closes the connection immediately. This has the * consequence, that the HTTP client might never see the error * message, since the request was not yet fully transmitted, * but it will see a "broken pipe: 13" message instead. We * read now the full request and return the message via * ConnRunRequest(). */ if (unlikely((e - s) > drvPtr->maxline)) { sockPtr->keep = NS_FALSE; if (reqPtr->request.line == NULL) { Ns_Log(DriverDebug, "SockParse: maxline reached of %d bytes", drvPtr->maxline); sockPtr->flags = NS_CONN_REQUESTURITOOLONG; Ns_Log(Warning, "request line is too long (%d bytes)", (int)(e - s)); } else { sockPtr->flags = NS_CONN_LINETOOLONG; Ns_Log(Warning, "request header line is too long (%d bytes)", (int)(e - s)); } } /* * Update next read pointer to end of this line. */ cnt = (size_t)(e - s) + 1u; reqPtr->roff += cnt; reqPtr->avail -= cnt; /* * Adjust end pointer to the last content character before the line * terminator. */ if (likely(e > s) && likely(*(e-1) == '\r')) { --e; } /* * Check for end of headers in case we have not done it yet. */ if (unlikely(e == s) && (reqPtr->coff == 0u)) { /* * We are at end of headers. */ reqPtr->coff = EndOfHeader(sockPtr); /* * In cases the client sent "expect: 100-continue", report back that * everything is fine with the headers. */ if ((sockPtr->flags & NS_CONN_CONTINUE) != 0u) { Ns_Log(Ns_LogRequestDebug, "honoring 100-continue"); /* * In case, the request entity (body) was too large, we can * return immediately the error message, when the client has * flagged this via "Expect:". Otherwise we have to read the * full request (although it is too large) to drain the * channel. Otherwise, the server might close the connection * *before* it has received full request with its body from * the client. We just keep the flag and let * Ns_ConnRunRequest() handle the error message. */ if ((sockPtr->flags & NS_CONN_ENTITYTOOLARGE) != 0u) { Ns_Log(Ns_LogRequestDebug, "100-continue: entity too large"); return SOCK_ENTITYTOOLARGE; /* * We have no other error message flagged (future ones * have to be handled here). */ } else { struct iovec iov[1]; ssize_t sent; /* * Reply with "100 continue". */ Ns_Log(Ns_LogRequestDebug, "100-continue: reply CONTINUE"); iov[0].iov_base = (char *)"HTTP/1.1 100 Continue\r\n\r\n"; iov[0].iov_len = strlen(iov[0].iov_base); sent = Ns_SockSendBufs((Ns_Sock *)sockPtr, iov, 1, NULL, 0u); if (sent != (ssize_t)iov[0].iov_len) { Ns_Log(Warning, "could not deliver response: 100 Continue"); /* * Should we bail out here? */ } } } } else { /* * We have the request-line or a header line to process. */ save = *e; *e = '\0'; if (unlikely(reqPtr->request.line == NULL)) { /* * There is no request-line set. The received line must the * the request-line. */ Ns_Log(DriverDebug, "SockParse (%d): parse request line <%s>", sockPtr->sock, s); if (Ns_ParseRequest(&reqPtr->request, s) == NS_ERROR) { /* * Invalid request. */ return SOCK_BADREQUEST; } /* * HTTP 0.9 did not have a HTTP-version number or request headers * and no empty line terminating the request header. */ if (unlikely(reqPtr->request.version < 1.0)) { /* * Pre-HTTP/1.0 request. */ reqPtr->coff = reqPtr->roff; Ns_Log(Notice, "pre-HTTP/1.0 request <%s>", reqPtr->request.line); } } else if (Ns_ParseHeader(reqPtr->headers, s, Preserve) != NS_OK) { /* * Invalid header. */ return SOCK_BADHEADER; } else { /* * Check for max number of headers */ if (unlikely(Ns_SetSize(reqPtr->headers) > (size_t)drvPtr->maxheaders)) { Ns_Log(DriverDebug, "SockParse (%d): maxheaders reached of %d bytes", sockPtr->sock, drvPtr->maxheaders); return SOCK_TOOMANYHEADERS; } } *e = save; } } if (unlikely(reqPtr->request.line == NULL)) { /* * We are at end of headers, but we have not parsed a request line * (maybe just two linefeeds). */ return SOCK_BADREQUEST; } /* * We are in the request body. */ assert(reqPtr->coff > 0u); assert(reqPtr->request.line != NULL); /* * Check if all content has arrived. */ Ns_Log(Dev, "=== length < avail (length %" PRIuz ", avail %" PRIuz ") tfd %d tfile %p chunkStartOff %" PRIuz, reqPtr->length, reqPtr->avail, sockPtr->tfd, (void *)sockPtr->tfile, reqPtr->chunkStartOff); if (reqPtr->chunkStartOff != 0u) { /* * Chunked encoding was provided. */ bool complete; size_t currentContentLength; complete = ChunkedDecode(reqPtr, NS_TRUE); currentContentLength = reqPtr->chunkWriteOff - reqPtr->coff; /* * A chunk might be complete, but it might not be the last * chunk from the client. The best thing would be to be able * to read until EOF here. In cases, where the (optional) * "expectedLength" was provided by the client, we terminate * depending on that information */ if ((!complete) || (reqPtr->expectedLength != 0u && currentContentLength < reqPtr->expectedLength)) { /* * ChunkedDecode wants more data. */ return SOCK_MORE; } /* * ChunkedDecode has enough data. */ reqPtr->length = (size_t)currentContentLength; } if (reqPtr->avail < reqPtr->length) { Ns_Log(DriverDebug, "SockRead wait for more input"); /* * Wait for more input. */ return SOCK_MORE; } Ns_Log(Dev, "=== all required data is available (avail %" PRIuz", length %" PRIuz ", " "readahead %" TCL_LL_MODIFIER "d maxupload %" TCL_LL_MODIFIER "d) tfd %d", reqPtr->avail, reqPtr->length, drvPtr->readahead, drvPtr->maxupload, sockPtr->tfd); /* * We have all required data in the receive buffer or in a temporary file. * * - Uploads > "readahead": these are put into temporary files. * * - Uploads > "maxupload": these are put into temporary files * without mmapping, no content parsing will be performed in memory. */ result = SOCK_READY; if (sockPtr->tfile != NULL) { reqPtr->content = NULL; reqPtr->next = NULL; reqPtr->avail = 0u; Ns_Log(DriverDebug, "content spooled to file: size %" PRIdz ", file %s", reqPtr->length, sockPtr->tfile); /* * Nothing more to do, return via SOCK_READY; */ } else { /* * Uploads < "maxupload" are spooled to files and mmapped in order to * provide the usual interface via [ns_conn content]. */ if (sockPtr->tfd > 0) { #ifdef _WIN32 /* * For _WIN32, tfd should never be set, since tfd-spooling is not * implemented for windows. */ assert(0); #else int prot = PROT_READ | PROT_WRITE; /* * Add a byte to make sure, the string termination with \0 below falls * always into the mmapped area. On some older OSes this might lead to * crashes when we hitting page boundaries. */ ssize_t rc = ns_write(sockPtr->tfd, "\0", 1); if (rc == -1) { Ns_Log(Error, "socket: could not append terminating 0-byte"); } sockPtr->tsize = reqPtr->length + 1; sockPtr->taddr = mmap(0, sockPtr->tsize, prot, MAP_PRIVATE, sockPtr->tfd, 0); if (sockPtr->taddr == MAP_FAILED) { sockPtr->taddr = NULL; result = SOCK_ERROR; } else { reqPtr->content = sockPtr->taddr; Ns_Log(Debug, "content spooled to mmapped file: readahead=%" TCL_LL_MODIFIER "d, filesize=%" PRIdz, drvPtr->readahead, sockPtr->tsize); } #endif } else { /* * Set the content the begin of the remaining buffer (content offset). * This happens as well when reqPtr->contentLength is 0, but it is * needed for chunked input processing. */ reqPtr->content = bufPtr->string + reqPtr->coff; } reqPtr->next = reqPtr->content; /* * Add a terminating null character. The content might be from the receive * buffer (Tcl_DString) or from the mmapped file. Non-mmapped files are handled * above. */ if (reqPtr->length > 0u) { Ns_Log(DriverDebug, "SockRead adds null terminating character at content[%" PRIuz "]", reqPtr->length); reqPtr->savedChar = reqPtr->content[reqPtr->length]; reqPtr->content[reqPtr->length] = '\0'; if (sockPtr->taddr == NULL) { LogBuffer(DriverDebug, "UPDATED BUFFER", sockPtr->reqPtr->buffer.string, (size_t)reqPtr->buffer.length); } } } return result; } /* *---------------------------------------------------------------------- * * SockSetServer -- * * Set virtual server from driver context or Host header. * * Results: * void. * * Side effects: * * Updates sockPtr->servPtr. In case an invalid server set, or the * required host field in HTTP/1.1 is missing the HTTP-method is set to * the constant "BAD". * *---------------------------------------------------------------------- */ static void SockSetServer(Sock *sockPtr) { char *host; Request *reqPtr; bool bad_request = NS_FALSE; Driver *drvPtr; NS_NONNULL_ASSERT(sockPtr != NULL); reqPtr = sockPtr->reqPtr; assert(reqPtr != NULL); drvPtr = sockPtr->drvPtr; assert(drvPtr != NULL); sockPtr->servPtr = drvPtr->servPtr; sockPtr->location = drvPtr->location; host = Ns_SetIGet(reqPtr->headers, "Host"); Ns_Log(DriverDebug, "SockSetServer host '%s' request line '%s'", host, reqPtr->request.line); if (unlikely((host == NULL) && (reqPtr->request.version >= 1.1))) { /* * HTTP/1.1 requires host header */ Ns_Log(Notice, "request header field \"Host\" is missing in HTTP/1.1 request: \"%s\"\n", reqPtr->request.line); bad_request = NS_TRUE; } if (sockPtr->servPtr == NULL) { const ServerMap *mapPtr = NULL; if (host != NULL) { const Tcl_HashEntry *hPtr; size_t hostLength = strlen(host); /* * Remove trailing dot of host header field, since RFC 2976 allows * fully qualified "absolute" DNS names in host fields (see e.g. §3.2.2). */ if (host[hostLength] == '.') { host[hostLength] = '\0'; } /* * Convert provided host header field to lower case before hash * lookup. */ Ns_StrToLower(host); hPtr = Tcl_FindHashEntry(&drvPtr->hosts, host); Ns_Log(DriverDebug, "SockSetServer driver '%s' host '%s' => %p", drvPtr->moduleName, host, (void*)hPtr); if (hPtr != NULL) { /* * Request with provided host header field could be resolved * against a certain server. */ mapPtr = Tcl_GetHashValue(hPtr); } else { /* * Host header field content is not found in the mapping table. */ Ns_Log(DriverDebug, "cannot locate host header content '%s' in virtual hosts " "table of driver '%s', fall back to default '%s'", host, drvPtr->moduleName, drvPtr->defMapPtr->location); if (Ns_LogSeverityEnabled(DriverDebug)) { Tcl_HashEntry *hPtr2; Tcl_HashSearch search; hPtr2 = Tcl_FirstHashEntry(&drvPtr->hosts, &search); while (hPtr2 != NULL) { Ns_Log(Notice, "... host entry: '%s'\n", (char *)Tcl_GetHashKey(&drvPtr->hosts, hPtr2)); hPtr2 = Tcl_NextHashEntry(&search); } } } } if (mapPtr == NULL) { /* * Could not lookup the virtual host, Get the default mapping from the driver. */ mapPtr = drvPtr->defMapPtr; } if (mapPtr != NULL) { sockPtr->servPtr = mapPtr->servPtr; sockPtr->location = mapPtr->location; } if (sockPtr->servPtr == NULL) { Ns_Log(Warning, "cannot determine server for request: \"%s\" (host \"%s\")\n", reqPtr->request.line, host); bad_request = NS_TRUE; } } if (unlikely(bad_request)) { Ns_Log(DriverDebug, "SockSetServer sets method to BAD"); ns_free((char *)reqPtr->request.method); reqPtr->request.method = ns_strdup("BAD"); } } /* *====================================================================== * Spooler Thread: Receive asynchronously from the client socket *====================================================================== */ /* *---------------------------------------------------------------------- * * SpoolerThread -- * * Spooling socket driver thread. * * Results: * None. * * Side effects: * Connections are accepted on the configured listen sockets, * placed on the run queue to be serviced, and gracefully * closed when done. Async sockets have the entire request read * here before queuing as well. * *---------------------------------------------------------------------- */ static void SpoolerThread(void *arg) { SpoolerQueue *queuePtr = (SpoolerQueue*)arg; char charBuffer[1]; int pollTimeout; bool stopping; Sock *sockPtr, *nextPtr, *waitPtr, *readPtr; Ns_Time now, diff; const Driver *drvPtr; PollData pdata; Ns_ThreadSetName("-spooler%d-", queuePtr->id); queuePtr->threadName = Ns_ThreadGetName(); /* * Loop forever until signaled to shut down and all * connections are complete and gracefully closed. */ Ns_Log(Notice, "spooler%d: accepting connections", queuePtr->id); PollCreate(&pdata); Ns_GetTime(&now); waitPtr = readPtr = NULL; stopping = NS_FALSE; while (!stopping) { /* * If there are any read sockets, set the bits * and determine the minimum relative timeout. */ PollReset(&pdata); (void)PollSet(&pdata, queuePtr->pipe[0], (short)POLLIN, NULL); if (readPtr == NULL) { pollTimeout = 30 * 1000; } else { sockPtr = readPtr; while (sockPtr != NULL) { SockPoll(sockPtr, (short)POLLIN, &pdata); sockPtr = sockPtr->nextPtr; } pollTimeout = -1; } /* * Select and drain the trigger pipe if necessary. */ /*n =*/ (void) PollWait(&pdata, pollTimeout); if (PollIn(&pdata, 0) && unlikely(ns_recv(queuePtr->pipe[0], charBuffer, 1u, 0) != 1)) { Ns_Fatal("spooler: trigger ns_recv() failed: %s", ns_sockstrerror(ns_sockerrno)); } /* * Attempt read-ahead of any new connections. */ Ns_GetTime(&now); sockPtr = readPtr; readPtr = NULL; while (sockPtr != NULL) { nextPtr = sockPtr->nextPtr; drvPtr = sockPtr->drvPtr; if (unlikely(PollHup(&pdata, sockPtr->pidx))) { /* * Peer has closed the connection */ SockRelease(sockPtr, SOCK_CLOSE, 0); } else if (!PollIn(&pdata, sockPtr->pidx)) { /* * Got no data */ if (Ns_DiffTime(&sockPtr->timeout, &now, &diff) <= 0) { SockRelease(sockPtr, SOCK_READTIMEOUT, 0); queuePtr->queuesize--; } else { Push(sockPtr, readPtr); } } else { /* * Got some data */ SockState n = SockRead(sockPtr, 1, &now); switch (n) { case SOCK_MORE: SockTimeout(sockPtr, &now, &drvPtr->recvwait); Push(sockPtr, readPtr); break; case SOCK_READY: assert(sockPtr->reqPtr != NULL); SockSetServer(sockPtr); Push(sockPtr, waitPtr); break; case SOCK_BADHEADER: NS_FALL_THROUGH; /* fall through */ case SOCK_BADREQUEST: NS_FALL_THROUGH; /* fall through */ case SOCK_CLOSE: NS_FALL_THROUGH; /* fall through */ case SOCK_CLOSETIMEOUT: NS_FALL_THROUGH; /* fall through */ case SOCK_ENTITYTOOLARGE: NS_FALL_THROUGH; /* fall through */ case SOCK_ERROR: NS_FALL_THROUGH; /* fall through */ case SOCK_READERROR: NS_FALL_THROUGH; /* fall through */ case SOCK_READTIMEOUT: NS_FALL_THROUGH; /* fall through */ case SOCK_SHUTERROR: NS_FALL_THROUGH; /* fall through */ case SOCK_SPOOL: NS_FALL_THROUGH; /* fall through */ case SOCK_TOOMANYHEADERS: NS_FALL_THROUGH; /* fall through */ case SOCK_WRITEERROR: NS_FALL_THROUGH; /* fall through */ case SOCK_WRITETIMEOUT: SockRelease(sockPtr, n, errno); queuePtr->queuesize--; break; } } sockPtr = nextPtr; } /* * Attempt to queue any pending connection * after reversing the list to ensure oldest * connections are tried first. */ if (waitPtr != NULL) { sockPtr = NULL; while ((nextPtr = waitPtr) != NULL) { waitPtr = nextPtr->nextPtr; Push(nextPtr, sockPtr); } while (sockPtr != NULL) { nextPtr = sockPtr->nextPtr; if (!NsQueueConn(sockPtr, &now)) { Push(sockPtr, waitPtr); } else { queuePtr->queuesize--; } sockPtr = nextPtr; } } /* * Add more connections from the spooler queue */ Ns_MutexLock(&queuePtr->lock); if (waitPtr == NULL) { sockPtr = (Sock*)queuePtr->sockPtr; queuePtr->sockPtr = NULL; while (sockPtr != NULL) { nextPtr = sockPtr->nextPtr; drvPtr = sockPtr->drvPtr; SockTimeout(sockPtr, &now, &drvPtr->recvwait); Push(sockPtr, readPtr); queuePtr->queuesize++; sockPtr = nextPtr; } } /* * Check for shutdown */ stopping = queuePtr->shutdown; Ns_MutexUnlock(&queuePtr->lock); } PollFree(&pdata); Ns_Log(Notice, "exiting"); Ns_MutexLock(&queuePtr->lock); queuePtr->stopped = NS_TRUE; Ns_CondBroadcast(&queuePtr->cond); Ns_MutexUnlock(&queuePtr->lock); } static void SpoolerQueueStart(SpoolerQueue *queuePtr, Ns_ThreadProc *proc) { NS_NONNULL_ASSERT(proc != NULL); while (queuePtr != NULL) { if (ns_sockpair(queuePtr->pipe) != 0) { Ns_Fatal("ns_sockpair() failed: %s", ns_sockstrerror(ns_sockerrno)); } Ns_ThreadCreate(proc, queuePtr, 0, &queuePtr->thread); queuePtr = queuePtr->nextPtr; } } static void SpoolerQueueStop(SpoolerQueue *queuePtr, const Ns_Time *timeoutPtr, const char *name) { NS_NONNULL_ASSERT(timeoutPtr != NULL); NS_NONNULL_ASSERT(name != NULL); while (queuePtr != NULL) { Ns_ReturnCode status; Ns_MutexLock(&queuePtr->lock); if (!queuePtr->stopped && !queuePtr->shutdown) { Ns_Log(Debug, "%s%d: triggering shutdown", name, queuePtr->id); queuePtr->shutdown = NS_TRUE; SockTrigger(queuePtr->pipe[1]); } status = NS_OK; while (!queuePtr->stopped && status == NS_OK) { status = Ns_CondTimedWait(&queuePtr->cond, &queuePtr->lock, timeoutPtr); } if (status != NS_OK) { Ns_Log(Warning, "%s%d: timeout waiting for shutdown", name, queuePtr->id); } else { /*Ns_Log(Notice, "%s%d: shutdown complete", name, queuePtr->id);*/ if (queuePtr->thread != NULL) { Ns_ThreadJoin(&queuePtr->thread, NULL); queuePtr->thread = NULL; } else { Ns_Log(Notice, "%s%d: shutdown: thread already gone", name, queuePtr->id); } ns_sockclose(queuePtr->pipe[0]); ns_sockclose(queuePtr->pipe[1]); } Ns_MutexUnlock(&queuePtr->lock); queuePtr = queuePtr->nextPtr; } } static int SockSpoolerQueue(Driver *drvPtr, Sock *sockPtr) { bool trigger = NS_FALSE; SpoolerQueue *queuePtr; NS_NONNULL_ASSERT(drvPtr != NULL); NS_NONNULL_ASSERT(sockPtr != NULL); /* * Get the next spooler thread from the list, all spooler requests are * rotated between all spooler threads */ Ns_MutexLock(&drvPtr->spooler.lock); if (drvPtr->spooler.curPtr == NULL) { drvPtr->spooler.curPtr = drvPtr->spooler.firstPtr; } queuePtr = drvPtr->spooler.curPtr; drvPtr->spooler.curPtr = drvPtr->spooler.curPtr->nextPtr; Ns_MutexUnlock(&drvPtr->spooler.lock); Ns_Log(Debug, "Spooler: %d: started fd=%d: %" PRIdz " bytes", queuePtr->id, sockPtr->sock, sockPtr->reqPtr->length); Ns_MutexLock(&queuePtr->lock); if (queuePtr->sockPtr == NULL) { trigger = NS_TRUE; } Push(sockPtr, queuePtr->sockPtr); Ns_MutexUnlock(&queuePtr->lock); /* * Wake up spooler thread */ if (trigger) { SockTrigger(queuePtr->pipe[1]); } return 1; } /* *====================================================================== * Writer Thread: Write asynchronously to the client socket *====================================================================== */ /* *---------------------------------------------------------------------- * * NsWriterLock, NsWriterUnlock -- * * Provide an API for locking and unlocking context information * for streaming asynchronous writer jobs. The locks are just * needed for managing linkage between "connPtr" and a writer * entry. The lock operations are rather infrequent and the * lock duration is very short, such that at a single global * appears sufficient. * * Results: * None * * Side effects: * Change Mutex state. * *---------------------------------------------------------------------- */ void NsWriterLock(void) { Ns_MutexLock(&writerlock); } void NsWriterUnlock(void) { Ns_MutexUnlock(&writerlock); } /* *---------------------------------------------------------------------- * * WriterSockFileVecCleanup -- * * Cleanup function for FileVec array in WriterSock structure. * * Results: * None. * * Side effects: * Closing potentially file descriptors, freeing Ns_FileVec memory. * *---------------------------------------------------------------------- */ static void WriterSockFileVecCleanup(WriterSock *wrSockPtr) { NS_NONNULL_ASSERT(wrSockPtr != NULL); if ( wrSockPtr->c.file.nbufs > 0) { int i; Ns_Log(DriverDebug, "WriterSockRelease nbufs %d", wrSockPtr->c.file.nbufs); for (i = 0; i < wrSockPtr->c.file.nbufs; i++) { /* * The fd of c.file.currentbuf is always the same as * wrSockPtr->fd and therefore already closed at this point. */ if ( (i != wrSockPtr->c.file.currentbuf) && (wrSockPtr->c.file.bufs[i].fd != NS_INVALID_FD) ) { Ns_Log(DriverDebug, "WriterSockRelease must close fd %d", wrSockPtr->c.file.bufs[i].fd); ns_close(wrSockPtr->c.file.bufs[i].fd); } } ns_free(wrSockPtr->c.file.bufs); } ns_free(wrSockPtr->c.file.buf); } /* *---------------------------------------------------------------------- * * WriterSockRequire, WriterSockRelease -- * * Management functions for WriterSocks. WriterSockRequire() and * WriterSockRelease() are responsible for obtaining and * freeing "WriterSock" structures. When shuch a structure is finally * released, it is removed from the queue, the socket is * closed and the memory is freed. * * Results: * WriterSockRequire() returns a WriterSock from a connection, * the other functions return nothing. * * Side effects: * Updating reference counters, closing socket, freeing memory. * *---------------------------------------------------------------------- */ static WriterSock * WriterSockRequire(const Conn *connPtr) { WriterSock *wrSockPtr; NS_NONNULL_ASSERT(connPtr != NULL); NsWriterLock(); wrSockPtr = (WriterSock *)connPtr->strWriter; if (wrSockPtr != NULL) { wrSockPtr->refCount ++; } NsWriterUnlock(); return wrSockPtr; } static void WriterSockRelease(WriterSock *wrSockPtr) { SpoolerQueue *queuePtr; NS_NONNULL_ASSERT(wrSockPtr != NULL); wrSockPtr->refCount --; Ns_Log(DriverDebug, "WriterSockRelease %p refCount %d keep %d", (void *)wrSockPtr, wrSockPtr->refCount, wrSockPtr->keep); if (wrSockPtr->refCount > 0) { return; } Ns_Log(DriverDebug, "Writer: closed sock %d, file fd %d, error %d/%d, " "sent=%" TCL_LL_MODIFIER "d, flags=%X", wrSockPtr->sockPtr->sock, wrSockPtr->fd, wrSockPtr->status, wrSockPtr->err, wrSockPtr->nsent, wrSockPtr->flags); NsPoolAddBytesSent(wrSockPtr->poolPtr, wrSockPtr->nsent); if (wrSockPtr->doStream != NS_WRITER_STREAM_NONE) { Conn *connPtr; NsWriterLock(); connPtr = wrSockPtr->connPtr; if (connPtr != NULL && connPtr->strWriter != NULL) { connPtr->strWriter = NULL; } NsWriterUnlock(); /* * In case, writer streams are activated for this wrSockPtr, make sure * to release the tmp file. See thread Naviserver Open Files on the * sourceforge mailing list (starting July 2019). */ if (wrSockPtr->doStream == NS_WRITER_STREAM_FINISH) { Ns_ReleaseTemp(wrSockPtr->fd); } } /* * Remove the entry from the queue and decrement counter */ queuePtr = wrSockPtr->queuePtr; if (queuePtr->curPtr == wrSockPtr) { queuePtr->curPtr = wrSockPtr->nextPtr; queuePtr->queuesize--; } else { WriterSock *curPtr, *lastPtr = queuePtr->curPtr; for (curPtr = (lastPtr != NULL) ? lastPtr->nextPtr : NULL; curPtr != NULL; lastPtr = curPtr, curPtr = curPtr->nextPtr ) { if (curPtr == wrSockPtr) { lastPtr->nextPtr = wrSockPtr->nextPtr; queuePtr->queuesize--; break; } } } if ((wrSockPtr->err != 0) || (wrSockPtr->status != SPOOLER_OK)) { int i; /* * Lookup the matching sockState from the spooler state. The array has * just 5 elements, on average, just 2 comparisons are needed (since * OK is at the end). */ for (i = 0; i < Ns_NrElements(spoolerStateMap); i++) { if (spoolerStateMap[i].spoolerState == wrSockPtr->status) { SockError(wrSockPtr->sockPtr, spoolerStateMap[i].sockState, wrSockPtr->err); break; } } NsSockClose(wrSockPtr->sockPtr, (int)NS_FALSE); } else { NsSockClose(wrSockPtr->sockPtr, (int)wrSockPtr->keep); } if (wrSockPtr->clientData != NULL) { ns_free(wrSockPtr->clientData); } if (wrSockPtr->fd != NS_INVALID_FD) { if (wrSockPtr->doStream != NS_WRITER_STREAM_FINISH) { (void) ns_close(wrSockPtr->fd); } WriterSockFileVecCleanup(wrSockPtr); } else if (wrSockPtr->c.mem.bufs != NULL) { if (wrSockPtr->c.mem.fmap.addr != NULL) { NsMemUmap(&wrSockPtr->c.mem.fmap); } else { int i; for (i = 0; i < wrSockPtr->c.mem.nbufs; i++) { ns_free((char *)wrSockPtr->c.mem.bufs[i].iov_base); } } if (wrSockPtr->c.mem.bufs != wrSockPtr->c.mem.preallocated_bufs) { ns_free(wrSockPtr->c.mem.bufs); } } if (wrSockPtr->headerString != NULL) { ns_free(wrSockPtr->headerString); } ns_free(wrSockPtr); } /* *---------------------------------------------------------------------- * * WriterReadFromSpool -- * * Utility function of the WriterThread to read blocks from a * file into the output buffer of the writer. It handles * left overs from previous send attempts and takes care for * locking in case simultaneous reading and writing from the * same file. * * Results: * None. * * Side effects: * Fills up curPtr->c.file.buf and updates counters/sizes. * *---------------------------------------------------------------------- */ static SpoolerState WriterReadFromSpool(WriterSock *curPtr) { NsWriterStreamState doStream; SpoolerState status = SPOOLER_OK; size_t maxsize, toRead; unsigned char *bufPtr; NS_NONNULL_ASSERT(curPtr != NULL); doStream = curPtr->doStream; if (doStream != NS_WRITER_STREAM_NONE) { Ns_MutexLock(&curPtr->c.file.fdlock); toRead = curPtr->c.file.toRead; Ns_MutexUnlock(&curPtr->c.file.fdlock); } else { toRead = curPtr->c.file.toRead; Ns_Log(DriverDebug, "### WriterReadFromSpool [%d]: fd %d tosend %lu files %d", curPtr->c.file.currentbuf, curPtr->fd, toRead, curPtr->c.file.nbufs); } maxsize = curPtr->c.file.maxsize; bufPtr = curPtr->c.file.buf; /* * When bufsize > 0 we have a leftover from previous send. In such * cases, move the leftover to the front, and fill the reminder of * the buffer with new data from curPtr->c. */ if (curPtr->c.file.bufsize > 0u) { Ns_Log(DriverDebug, "### WriterReadFromSpool %p %.6x leftover %" PRIdz " offset %ld", (void *)curPtr, curPtr->flags, curPtr->c.file.bufsize, (long)curPtr->c.file.bufoffset); if (likely(curPtr->c.file.bufoffset > 0)) { memmove(curPtr->c.file.buf, curPtr->c.file.buf + curPtr->c.file.bufoffset, curPtr->c.file.bufsize); } bufPtr = curPtr->c.file.buf + curPtr->c.file.bufsize; maxsize -= curPtr->c.file.bufsize; } if (toRead > maxsize) { toRead = maxsize; } /* * Read content from the file into the buffer. */ if (toRead > 0u) { ssize_t n; if (doStream != NS_WRITER_STREAM_NONE) { /* * In streaming mode, the connection thread writes to the * spool file and the writer thread reads from the same * file. Therefore, we have to re-adjust the current * read/writer position, which might be changed by the * other thread. These positions have to be locked, since * seeking might be subject to race conditions. Here we * set the read pointer to the position after the last * send operation. */ Ns_MutexLock(&curPtr->c.file.fdlock); (void) ns_lseek(curPtr->fd, (off_t)curPtr->nsent, SEEK_SET); } if (curPtr->c.file.nbufs == 0) { /* * Working on a single fd. */ n = ns_read(curPtr->fd, bufPtr, toRead); } else { /* * Working on a Ns_FileVec. */ int currentbuf = curPtr->c.file.currentbuf; size_t wantRead = curPtr->c.file.bufs[currentbuf].length; size_t segSize = (wantRead > toRead ? toRead : wantRead); n = ns_read(curPtr->fd, bufPtr, segSize); Ns_Log(DriverDebug, "### WriterReadFromSpool [%d] (nbufs %d): read from fd %d want %lu got %ld (remain %lu)", currentbuf, curPtr->c.file.nbufs, curPtr->fd, segSize, n, wantRead); if (n > 0) { /* * Reduce the remaining length in the Ns_FileVec for the * next iteration. */ curPtr->c.file.bufs[currentbuf].length -= (size_t)n; if ((size_t)n < wantRead) { /* * Partial read on a segment. */ Ns_Log(DriverDebug, "### WriterReadFromSpool [%d] (nbufs %d): partial read on fd %d (got %ld)", currentbuf, curPtr->c.file.nbufs, curPtr->fd, n); } else if (currentbuf < curPtr->c.file.nbufs - 1 /* && (n == wantRead) */) { /* * All read from this segment, setup next read. */ ns_close(curPtr->fd); curPtr->c.file.bufs[currentbuf].fd = NS_INVALID_FD; curPtr->c.file.currentbuf ++; curPtr->fd = curPtr->c.file.bufs[curPtr->c.file.currentbuf].fd; Ns_Log(DriverDebug, "### WriterReadFromSpool switch to [%d] fd %d", curPtr->c.file.currentbuf, curPtr->fd); } } } if (n <= 0) { status = SPOOLER_READERROR; } else { /* * curPtr->c.file.toRead is still protected by * curPtr->c.file.fdlock when needed (in streaming mode). */ curPtr->c.file.toRead -= (size_t)n; curPtr->c.file.bufsize += (size_t)n; } if (doStream != NS_WRITER_STREAM_NONE) { Ns_MutexUnlock(&curPtr->c.file.fdlock); } } return status; } /* *---------------------------------------------------------------------- * * WriterSend -- * * Utility function of the WriterThread to send content to the client. It * handles partial write operations from the lower level driver * infrastructure. * * Results: * either NS_OK or SOCK_ERROR; * * Side effects: * Sends data, might reshuffle iovec. * *---------------------------------------------------------------------- */ static SpoolerState WriterSend(WriterSock *curPtr, int *err) { const struct iovec *bufs; struct iovec vbuf; int nbufs; SpoolerState status = SPOOLER_OK; size_t toWrite; ssize_t n; NS_NONNULL_ASSERT(curPtr != NULL); NS_NONNULL_ASSERT(err != NULL); /* * Prepare send operation */ if (curPtr->fd != NS_INVALID_FD) { /* * We have a valid file descriptor, send data from file. * * Prepare sending a single buffer with curPtr->c.file.bufsize bytes * from the curPtr->c.file.buf to the client. */ vbuf.iov_len = curPtr->c.file.bufsize; vbuf.iov_base = (void *)curPtr->c.file.buf; bufs = &vbuf; nbufs = 1; toWrite = curPtr->c.file.bufsize; } else { int i; /* * Prepare sending multiple memory buffers. Get length of remaining * buffers. */ toWrite = 0u; for (i = 0; i < curPtr->c.mem.nsbufs; i ++) { toWrite += curPtr->c.mem.sbufs[i].iov_len; } Ns_Log(DriverDebug, "### Writer wants to send remainder nbufs %d len %" PRIdz, curPtr->c.mem.nsbufs, toWrite); /* * Add buffers from the source and fill structure up to max */ while (curPtr->c.mem.bufIdx < curPtr->c.mem.nbufs && curPtr->c.mem.sbufIdx < UIO_SMALLIOV) { const struct iovec *vPtr = &curPtr->c.mem.bufs[curPtr->c.mem.bufIdx]; if (vPtr->iov_len > 0u && vPtr->iov_base != NULL) { Ns_Log(DriverDebug, "### Writer copies source %d to scratch %d len %" PRIiovlen, curPtr->c.mem.bufIdx, curPtr->c.mem.sbufIdx, vPtr->iov_len); toWrite += Ns_SetVec(curPtr->c.mem.sbufs, curPtr->c.mem.sbufIdx++, vPtr->iov_base, vPtr->iov_len); curPtr->c.mem.nsbufs++; } curPtr->c.mem.bufIdx++; } bufs = curPtr->c.mem.sbufs; nbufs = curPtr->c.mem.nsbufs; Ns_Log(DriverDebug, "### Writer wants to send %d bufs size %" PRIdz, nbufs, toWrite); } /* * Perform the actual send operation. */ n = NsDriverSend(curPtr->sockPtr, bufs, nbufs, 0u); if (n == -1) { *err = ns_sockerrno; status = SPOOLER_WRITEERROR; } else { /* * We have sent zero or more bytes. */ if (curPtr->doStream != NS_WRITER_STREAM_NONE) { Ns_MutexLock(&curPtr->c.file.fdlock); curPtr->size -= (size_t)n; Ns_MutexUnlock(&curPtr->c.file.fdlock); } else { curPtr->size -= (size_t)n; } curPtr->nsent += n; curPtr->sockPtr->timeout.sec = 0; if (curPtr->fd != NS_INVALID_FD) { /* * File-descriptor based send operation. Reduce the (remainig) * buffer size the amount of data sent and adjust the buffer * offset. For partial send operations, this will lead to a * remaining buffer size > 0. */ curPtr->c.file.bufsize -= (size_t)n; curPtr->c.file.bufoffset = (off_t)n; } else { if (n < (ssize_t)toWrite) { /* * We have a partial transmit from the iovec * structure. We have to compact it to fill content in * the next round. */ curPtr->c.mem.sbufIdx = Ns_ResetVec(curPtr->c.mem.sbufs, curPtr->c.mem.nsbufs, (size_t)n); curPtr->c.mem.nsbufs -= curPtr->c.mem.sbufIdx; memmove(curPtr->c.mem.sbufs, curPtr->c.mem.sbufs + curPtr->c.mem.sbufIdx, /* move the iovecs to the start of the scratch buffers */ sizeof(struct iovec) * (size_t)curPtr->c.mem.nsbufs); } } } return status; } /* *---------------------------------------------------------------------- * * WriterGetInfoPtr -- * * Helper function to obtain ConnPoolInfo structure for a WriterSock. * * The connInfoPtr is allocated only once per pool and cached in the * WriterSock. Only the first time, a writer thread "sees" a pool, it * allocates the structure for it. * * Results: * None. * * Side effects: * Can allocate memory * *---------------------------------------------------------------------- */ static ConnPoolInfo * WriterGetInfoPtr(WriterSock *curPtr, Tcl_HashTable *pools) { NS_NONNULL_ASSERT(curPtr != NULL); NS_NONNULL_ASSERT(pools != NULL); if (curPtr->infoPtr == NULL) { int isNew; Tcl_HashEntry *hPtr; hPtr = Tcl_CreateHashEntry(pools, (void*)curPtr->poolPtr, &isNew); if (isNew == 1) { /* * This is a pool that we have not seen yet. */ curPtr->infoPtr = ns_malloc(sizeof(ConnPoolInfo)); curPtr->infoPtr->currentPoolRate = 0; curPtr->infoPtr->threadSlot = NsPoolAllocateThreadSlot(curPtr->poolPtr, Ns_ThreadId()); Tcl_SetHashValue(hPtr, curPtr->infoPtr); Ns_Log(DriverDebug, "poollimit: pool '%s' allocate infoPtr with slot %lu poolLimit %d", curPtr->poolPtr->pool, curPtr->infoPtr->threadSlot, curPtr->poolPtr->rate.poolLimit); } else { curPtr->infoPtr = (ConnPoolInfo *)Tcl_GetHashValue(hPtr); } } return curPtr->infoPtr; } /* *---------------------------------------------------------------------- * * WriterPerPoolRates -- * * Compute current bandwidths per pool and writer. * * Since we have potentially multiple writer threads running, all these * might have writer threads of the same pool. In order to minimize * locking, we compute first writer thread specific subresults and combine * these later with with the results of the other threads. * * Results: * None. * * Side effects: * Connections are accepted and their SockPtr is set to NULL * such that closing actual connection does not close the socket. * *---------------------------------------------------------------------- */ static void WriterPerPoolRates(WriterSock *writePtr, Tcl_HashTable *pools) { WriterSock *curPtr; Tcl_HashSearch search; Tcl_HashEntry *hPtr; NS_NONNULL_ASSERT(writePtr != NULL); NS_NONNULL_ASSERT(pools != NULL); /* * First reset pool total rate. We keep the bandwidth managed pools in a * thread-local memory. Before, we accumulate the data, we reset it. */ hPtr = Tcl_FirstHashEntry(pools, &search); while (hPtr != NULL) { ConnPoolInfo *infoPtr = (ConnPoolInfo *)Tcl_GetHashValue(hPtr); infoPtr->currentPoolRate = 0; hPtr = Tcl_NextHashEntry(&search); } /* * Sum the actual rates per bandwidth limited pool for all active writer * jobs. */ for (curPtr = writePtr; curPtr != NULL; curPtr = curPtr->nextPtr) { /* * Does the writer come form a badwidth limited pool? */ if (curPtr->poolPtr->rate.poolLimit > 0 && curPtr->currentRate > 0) { /* * Add the actual rate to the writer specific pool rate. */ ConnPoolInfo *infoPtr = WriterGetInfoPtr(curPtr, pools); infoPtr->currentPoolRate += curPtr->currentRate; Ns_Log(DriverDebug, "poollimit pool '%s' added rate poolLimit %d poolRate %d", curPtr->poolPtr->pool, curPtr->poolPtr->rate.poolLimit, infoPtr->currentPoolRate); } } /* * Now iterate over the pools used by this thread and sum the specific * pool rates from all writer threads. */ hPtr = Tcl_FirstHashEntry(pools, &search); while (hPtr != NULL) { ConnPool *poolPtr = (ConnPool *)Tcl_GetHashKey(pools, hPtr); int totalPoolRate, writerThreadCount, threadDeltaRate; ConnPoolInfo *infoPtr; /* * Compute the following indicators: * - totalPoolRate: accumulated pool rates from all writer threads. * * - threadDeltaRate: how much of the available bandwidth can i used * the current thread. We assume that the distribution of writers * between all writer threads is even, so we can split the * available rate by the number of writer threads working on this * pool. * * - deltaPercentage: adjust in a single iteration just a fraction * (e.g. 10 percent) of the potential change. This function is * called often enough to justify delayed adjustments. */ infoPtr = (ConnPoolInfo *)Tcl_GetHashValue(hPtr); totalPoolRate = NsPoolTotalRate(poolPtr, infoPtr->threadSlot, infoPtr->currentPoolRate, &writerThreadCount); /* * If nothing is going on, allow a thread the full rate. */ if (infoPtr->currentPoolRate == 0) { threadDeltaRate = (poolPtr->rate.poolLimit - totalPoolRate); } else { threadDeltaRate = (poolPtr->rate.poolLimit - totalPoolRate) / writerThreadCount; } infoPtr->deltaPercentage = threadDeltaRate / 10; if (infoPtr->deltaPercentage < -50) { infoPtr->deltaPercentage = -50; } if (totalPoolRate > 0) { Ns_Log(Notice, "... pool '%s' thread's pool rate %d total pool rate %d limit %d " "(#%d writer threads) -> computed rate %d (%d%%) ", NsPoolName(poolPtr->pool), infoPtr->currentPoolRate, totalPoolRate, poolPtr->rate.poolLimit, writerThreadCount, threadDeltaRate, infoPtr->deltaPercentage ); } hPtr = Tcl_NextHashEntry(&search); } } /* *---------------------------------------------------------------------- * * WriterThread -- * * Thread that writes files to clients. * * Results: * None. * * Side effects: * Connections are accepted and their SockPtr is set to NULL * such that closing actual connection does not close the socket. * *---------------------------------------------------------------------- */ static void WriterThread(void *arg) { SpoolerQueue *queuePtr = (SpoolerQueue*)arg; int err, pollTimeout; bool stopping; Ns_Time now; Sock *sockPtr; const Driver *drvPtr; WriterSock *curPtr, *nextPtr, *writePtr; PollData pdata; Tcl_HashTable pools; /* used for accumulating bandwidth per pool */ Ns_ThreadSetName("-writer%d-", queuePtr->id); queuePtr->threadName = Ns_ThreadGetName(); Tcl_InitHashTable(&pools, TCL_ONE_WORD_KEYS); /* * Loop forever until signaled to shut down and all * connections are complete and gracefully closed. */ Ns_Log(Notice, "writer%d: accepting connections", queuePtr->id); PollCreate(&pdata); writePtr = NULL; stopping = NS_FALSE; while (!stopping) { char charBuffer[1]; /* * If there are any write sockets, set the bits. */ PollReset(&pdata); (void)PollSet(&pdata, queuePtr->pipe[0], (short)POLLIN, NULL); if (writePtr == NULL) { pollTimeout = 30 * 1000; } else { /* * If per-pool bandwidth management is requested, compute the base * data for the adjustment. If there is no bandwidth management * requested, there is no slowdow. */ if (NsWriterBandwidthManagement) { WriterPerPoolRates(writePtr, &pools); } /* * There are writers active. Determine on which writers we poll * and compute the maximal poll wait time. */ pollTimeout = 1000; for (curPtr = writePtr; curPtr != NULL; curPtr = curPtr->nextPtr) { int sleepTimeMs = 0; Ns_Log(DriverDebug, "### Writer poll collect %p size %" PRIdz " streaming %d rateLimit %d", (void *)curPtr, curPtr->size, curPtr->doStream, curPtr->rateLimit); if (curPtr->rateLimit > 0 && curPtr->nsent > 0 && curPtr->currentRate > 0 ) { int currentMs, targetTimeMs; /* * Perform per-pool rate management, when * - a poolLimit is provided, * - we have performance data of thee pool, and * - changes are possible (as flagged by deltaPercentage). */ if (NsWriterBandwidthManagement && curPtr->poolPtr->rate.poolLimit > 0 && curPtr->infoPtr != NULL && curPtr->infoPtr->deltaPercentage != 0 ) { /* * Only adjust data for busy writer jobs, which * are close to their limits. */ bool onLimit = (curPtr->currentRate*100 / curPtr->rateLimit) > 90; Ns_Log(DriverDebug, "we allowed %d we use %d on limit %d (%d) , we can do %d%%", curPtr->rateLimit, curPtr->currentRate, (int)onLimit, curPtr->currentRate*100/curPtr->rateLimit, curPtr->infoPtr->deltaPercentage); if (onLimit) { /* * Compute new rate limit based on * positive/negative delta percentage. */ int newRate = curPtr->currentRate + (curPtr->currentRate * curPtr->infoPtr->deltaPercentage / 100); /* * Sanity checks: * - never allow more than poolLimit * - never kill connections completely (e.g. minRate 5KB/s) */ if (newRate > curPtr->poolPtr->rate.poolLimit) { newRate = curPtr->poolPtr->rate.poolLimit; } else if (newRate < 5) { newRate = 5; } Ns_Log(Notice, "... pool '%s' new rate limit changed from %d to %d KB/s (delta %d%%)", curPtr->poolPtr->pool, curPtr->rateLimit, newRate, curPtr->infoPtr->deltaPercentage); curPtr->rateLimit = newRate; } } /* * Adjust rate to the rate limit. */ currentMs = (int)(curPtr->nsent/(Tcl_WideInt)curPtr->currentRate); targetTimeMs = (int)(curPtr->nsent/(Tcl_WideInt)curPtr->rateLimit); sleepTimeMs = 1 + targetTimeMs - currentMs; Ns_Log(WriterDebug, "### Writer(%d)" " byte sent %" TCL_LL_MODIFIER "d msecs %d rate %d KB/s" " targetRate %d KB/s sleep %d", curPtr->sockPtr->sock, curPtr->nsent, currentMs, curPtr->currentRate, curPtr->rateLimit, sleepTimeMs); } if (likely(curPtr->size > 0u)) { if (sleepTimeMs <= 0) { SockPoll(curPtr->sockPtr, (short)POLLOUT, &pdata); pollTimeout = -1; } else { pollTimeout = MIN(sleepTimeMs, pollTimeout); } } else if (unlikely(curPtr->doStream == NS_WRITER_STREAM_FINISH)) { pollTimeout = -1; } } } Ns_Log(DriverDebug, "### Writer final pollTimeout %d", pollTimeout); /* * Select and drain the trigger pipe if necessary. */ (void) PollWait(&pdata, pollTimeout); if (PollIn(&pdata, 0) && unlikely(ns_recv(queuePtr->pipe[0], charBuffer, 1u, 0) != 1)) { Ns_Fatal("writer: trigger ns_recv() failed: %s", ns_sockstrerror(ns_sockerrno)); } /* * Write to all available sockets */ Ns_GetTime(&now); curPtr = writePtr; writePtr = NULL; while (curPtr != NULL) { NsWriterStreamState doStream; SpoolerState spoolerState = SPOOLER_OK; nextPtr = curPtr->nextPtr; sockPtr = curPtr->sockPtr; err = 0; /* * The truth value of doStream does not change through * concurrency. */ doStream = curPtr->doStream; if (unlikely(PollHup(&pdata, sockPtr->pidx))) { Ns_Log(DriverDebug, "### Writer %p reached POLLHUP fd %d", (void *)curPtr, sockPtr->sock); spoolerState = SPOOLER_CLOSE; err = 0; curPtr->infoPtr = WriterGetInfoPtr(curPtr, &pools); curPtr->infoPtr->currentPoolRate += curPtr->currentRate; } else if (likely(PollOut(&pdata, sockPtr->pidx)) || (doStream == NS_WRITER_STREAM_FINISH)) { /* * The socket is writable, we can compute the rate, when * something was sent already and some kind of rate limiting * is in place ... and we have sent enough data to make a good * estimate (just after the 2nd send, so more than driver * buffer size. */ Ns_Log(DriverDebug, "Socket of pool '%s' is writable, writer limit %d nsent %ld", curPtr->poolPtr->pool, curPtr->rateLimit, (long)curPtr->nsent); if (curPtr->rateLimit > 0 && (size_t)curPtr->nsent > curPtr->sockPtr->drvPtr->bufsize ) { Ns_Time diff; long currentMs; Ns_DiffTime(&now, &curPtr->startTime, &diff); currentMs = Ns_TimeToMilliseconds(&diff); if (currentMs > 0) { curPtr->currentRate = (int)((curPtr->nsent)/(Tcl_WideInt)currentMs); Ns_Log(DriverDebug, "Socket of pool '%s' is writable, currentMs %ld has updated current rate %d", curPtr->poolPtr->pool, currentMs,curPtr->currentRate); } } Ns_Log(DriverDebug, "### Writer %p can write to client fd %d (trigger %d) streaming %.6x" " size %" PRIdz " nsent %" TCL_LL_MODIFIER "d bufsize %" PRIdz, (void *)curPtr, sockPtr->sock, PollIn(&pdata, 0), doStream, curPtr->size, curPtr->nsent, curPtr->c.file.bufsize); if (unlikely(curPtr->size < 1u)) { /* * Size < 1 means that everything was sent. */ if (doStream != NS_WRITER_STREAM_ACTIVE) { if (doStream == NS_WRITER_STREAM_FINISH) { Ns_ReleaseTemp(curPtr->fd); } spoolerState = SPOOLER_CLOSE; } } else { /* * If size > 0, there is still something to send. * If we are spooling from a file, read some data * from the (spool) file and place it into curPtr->c.file.buf. */ if (curPtr->fd != NS_INVALID_FD) { spoolerState = WriterReadFromSpool(curPtr); } if (spoolerState == SPOOLER_OK) { spoolerState = WriterSend(curPtr, &err); } } } else { /* * Mark when first timeout occurred or check if it is already * for too long and we need to stop this socket */ if (sockPtr->timeout.sec == 0) { Ns_Log(DriverDebug, "Writer %p fd %d setting sendwait %ld.%6ld", (void *)curPtr, sockPtr->sock, curPtr->sockPtr->drvPtr->sendwait.sec, curPtr->sockPtr->drvPtr->sendwait.usec); SockTimeout(sockPtr, &now, &curPtr->sockPtr->drvPtr->sendwait); } else if (Ns_DiffTime(&sockPtr->timeout, &now, NULL) <= 0) { Ns_Log(DriverDebug, "Writer %p fd %d timeout", (void *)curPtr, sockPtr->sock); err = ETIMEDOUT; spoolerState = SPOOLER_CLOSETIMEOUT; } } /* * Check result status and close the socket in case of * timeout or completion */ Ns_MutexLock(&queuePtr->lock); if (spoolerState == SPOOLER_OK) { if (curPtr->size > 0u || doStream == NS_WRITER_STREAM_ACTIVE) { Ns_Log(DriverDebug, "Writer %p continue OK (size %" PRIdz ") => PUSH", (void *)curPtr, curPtr->size); Push(curPtr, writePtr); } else { Ns_Log(DriverDebug, "Writer %p done OK (size %" PRIdz ") => RELEASE", (void *)curPtr, curPtr->size); WriterSockRelease(curPtr); } } else { /* * spoolerState might be SPOOLER_CLOSE or SPOOLER_*TIMEOUT, or SPOOLER_*ERROR */ Ns_Log(DriverDebug, "Writer %p fd %d release, not OK (status %d) => RELEASE", (void *)curPtr, curPtr->sockPtr->sock, (int)spoolerState); curPtr->status = spoolerState; curPtr->err = err; WriterSockRelease(curPtr); } Ns_MutexUnlock(&queuePtr->lock); curPtr = nextPtr; } /* * Add more sockets to the writer queue */ if (queuePtr->sockPtr != NULL) { Ns_MutexLock(&queuePtr->lock); if (queuePtr->sockPtr != NULL) { curPtr = queuePtr->sockPtr; queuePtr->sockPtr = NULL; while (curPtr != NULL) { nextPtr = curPtr->nextPtr; sockPtr = curPtr->sockPtr; drvPtr = sockPtr->drvPtr; SockTimeout(sockPtr, &now, &drvPtr->sendwait); Push(curPtr, writePtr); queuePtr->queuesize++; curPtr = nextPtr; } queuePtr->curPtr = writePtr; } Ns_MutexUnlock(&queuePtr->lock); } /* * Check for shutdown */ stopping = queuePtr->shutdown; } PollFree(&pdata); { /* * Free ConnPoolInfo */ Tcl_HashSearch search; Tcl_HashEntry *hPtr = Tcl_FirstHashEntry(&pools, &search); while (hPtr != NULL) { ConnPoolInfo *infoPtr = (ConnPoolInfo *)Tcl_GetHashValue(hPtr); ns_free(infoPtr); hPtr = Tcl_NextHashEntry(&search); } /* * Delete the hash table for pools. */ Tcl_DeleteHashTable(&pools); } Ns_Log(Notice, "exiting"); Ns_MutexLock(&queuePtr->lock); queuePtr->stopped = NS_TRUE; Ns_CondBroadcast(&queuePtr->cond); Ns_MutexUnlock(&queuePtr->lock); } /* *---------------------------------------------------------------------- * * NsWriterFinish -- * * Finish a streaming writer job (typically called at the close * of a connection). A streaming writer job is fed typically by a * sequence of ns_write operations. After such an operation, the * WriterThread has to keep the writer job alive. * NsWriterFinish() tells the WriterThread that no more * other writer jobs will come from this connection. * * Results: * None. * * Side effects: * Change the state of the writer job and trigger the queue. * *---------------------------------------------------------------------- */ void NsWriterFinish(NsWriterSock *wrSockPtr) { WriterSock *writerSockPtr = (WriterSock *)wrSockPtr; NS_NONNULL_ASSERT(wrSockPtr != NULL); Ns_Log(DriverDebug, "NsWriterFinish: %p", (void *)writerSockPtr); writerSockPtr->doStream = NS_WRITER_STREAM_FINISH; SockTrigger(writerSockPtr->queuePtr->pipe[1]); } /* *---------------------------------------------------------------------- * * WriterSetupStreamingMode -- * * In streaming mode, setup a temporary fd which is used as input and * output. Streaming i/o will append to the file, while the write will * read from it. * * Results: * Ns_ReturnCode (NS_OK, NS_ERROR, NS_FILTER_BREAK). In the last case * signals that all processing was already performed and the caller can * stop handling more data. On success, the function returns an fd as * last argument. * * Side effects: * Potentially allocating temp file and updating connPtr members. * *---------------------------------------------------------------------- */ Ns_ReturnCode WriterSetupStreamingMode(Conn *connPtr, struct iovec *bufs, int nbufs, int *fdPtr) { bool first; size_t wrote = 0u; WriterSock *wrSockPtr1; Ns_ReturnCode status = NS_OK; NS_NONNULL_ASSERT(connPtr != NULL); NS_NONNULL_ASSERT(bufs != NULL); NS_NONNULL_ASSERT(fdPtr != NULL); Ns_Log(DriverDebug, "NsWriterQueue: streaming writer job"); if (connPtr->fd == 0) { /* * Create a new temporary spool file and provide the fd to the * connection thread via connPtr. */ first = NS_TRUE; wrSockPtr1 = NULL; *fdPtr = Ns_GetTemp(); connPtr->fd = *fdPtr; Ns_Log(DriverDebug, "NsWriterQueue: new tmp file has fd %d", *fdPtr); } else { /* * Reuse previously created spool file. */ first = NS_FALSE; wrSockPtr1 = WriterSockRequire(connPtr); if (wrSockPtr1 == NULL) { Ns_Log(Notice, "NsWriterQueue: writer job was already canceled (fd %d); maybe user dropped connection", connPtr->fd); return NS_ERROR; } else { /* * lock only, when first == NS_FALSE. */ Ns_MutexLock(&wrSockPtr1->c.file.fdlock); (void)ns_lseek(connPtr->fd, 0, SEEK_END); } } /* * For the time being, handle just "string data" in streaming * output (iovec bufs). Write the content to the spool file. */ { int i; for (i = 0; i < nbufs; i++) { ssize_t j = ns_write(connPtr->fd, bufs[i].iov_base, bufs[i].iov_len); if (j > 0) { wrote += (size_t)j; Ns_Log(Debug, "NsWriterQueue: fd %d [%d] spooled %" PRIdz " of %" PRIiovlen " OK %d", connPtr->fd, i, j, bufs[i].iov_len, (j == (ssize_t)bufs[i].iov_len)); } else { Ns_Log(Warning, "NsWriterQueue: spool to fd %d write operation failed", connPtr->fd); } } } if (first) { //bufs = NULL; connPtr->nContentSent = wrote; #ifndef _WIN32 /* * sock_set_blocking can't be used under windows, since sockets * are under windows no file descriptors. */ (void)ns_sock_set_blocking(connPtr->fd, NS_FALSE); #endif /* * Fall through to register stream writer with temp file */ } else { WriterSock *writerSockPtr; /* * This is a later streaming operation, where the writer job * (strWriter) was previously established. */ assert(wrSockPtr1 != NULL); /* * Update the controlling variables (size and toread) in the connPtr, * and the length info for the access log, and trigger the writer to * notify it about the change. */ writerSockPtr = (WriterSock *)connPtr->strWriter; writerSockPtr->size += wrote; writerSockPtr->c.file.toRead += wrote; Ns_MutexUnlock(&wrSockPtr1->c.file.fdlock); connPtr->nContentSent += wrote; if (likely(wrSockPtr1->queuePtr != NULL)) { SockTrigger(wrSockPtr1->queuePtr->pipe[1]); } WriterSockRelease(wrSockPtr1); status = NS_FILTER_BREAK; } return status; } /* *---------------------------------------------------------------------- * * NsWriterQueue -- * * Submit a new job to the writer queue. * * Results: * * NS_ERROR means that the Writer thread refuses to accept this * job and that the client (the connection thread) has to handle * this data. NS_OK means that the Writer thread cares for * transmitting the content to the client. * * Side effects: * Potentially adding a job to the writer queue. * *---------------------------------------------------------------------- */ Ns_ReturnCode NsWriterQueue(Ns_Conn *conn, size_t nsend, Tcl_Channel chan, FILE *fp, int fd, struct iovec *bufs, int nbufs, const Ns_FileVec *filebufs, int nfilebufs, bool everysize) { Conn *connPtr; WriterSock *wrSockPtr; SpoolerQueue *queuePtr; DrvWriter *wrPtr; bool trigger = NS_FALSE; size_t headerSize; Ns_ReturnCode status = NS_OK; Ns_FileVec *fbufs = NULL; int nfbufs = 0; NS_NONNULL_ASSERT(conn != NULL); connPtr = (Conn *)conn; if (unlikely(connPtr->sockPtr == NULL)) { Ns_Log(Warning, "NsWriterQueue: called without sockPtr size %" PRIdz " bufs %d flags %.6x stream %.6x chan %p fd %d", nsend, nbufs, connPtr->flags, connPtr->flags & NS_CONN_STREAM, (void *)chan, fd); status = NS_ERROR; wrPtr = NULL; } else { wrPtr = &connPtr->sockPtr->drvPtr->writer; Ns_Log(DriverDebug, "NsWriterQueue: size %" PRIdz " bufs %p (%d) flags %.6x stream %.6x chan %p fd %d thread %d", nsend, (void *)bufs, nbufs, connPtr->flags, connPtr->flags & NS_CONN_STREAM, (void *)chan, fd, wrPtr->threads); if (unlikely(wrPtr->threads == 0)) { Ns_Log(DriverDebug, "NsWriterQueue: no writer threads configured"); status = NS_ERROR; } else if (nsend < (size_t)wrPtr->writersize && !everysize && connPtr->fd == 0) { Ns_Log(DriverDebug, "NsWriterQueue: file is too small(%" PRIdz " < %" PRIdz ")", nsend, wrPtr->writersize); status = NS_ERROR; } } if (status != NS_OK) { return status; } assert(wrPtr != NULL); /* * In streaming mode, setup a temporary fd which is used as input and * output. Streaming i/o will append to the file, while the write will * read from it. */ if (((connPtr->flags & NS_CONN_STREAM) != 0u) || connPtr->fd > 0) { if (wrPtr->doStream == NS_WRITER_STREAM_NONE) { status = NS_ERROR; } else if (unlikely(fp != NULL || fd != NS_INVALID_FD)) { Ns_Log(DriverDebug, "NsWriterQueue: does not stream from this source via writer"); status = NS_ERROR; } else { status = WriterSetupStreamingMode(connPtr, bufs, nbufs, &fd); } if (unlikely(status != NS_OK)) { if (status == NS_FILTER_BREAK) { status = NS_OK; } return status; } /* * As a result of successful WriterSetupStreamingMode(), we have fd * set. */ assert(fd != NS_INVALID_FD); } else { if (fp != NULL) { /* * The client provided an open file pointer and closes it */ fd = ns_dup(fileno(fp)); } else if (fd != NS_INVALID_FD) { /* * The client provided an open file descriptor and closes it */ fd = ns_dup(fd); } else if (chan != NULL) { ClientData clientData; /* * The client provided an open Tcl channel and closes it */ if (Tcl_GetChannelHandle(chan, TCL_READABLE, &clientData) != TCL_OK) { return NS_ERROR; } fd = ns_dup(PTR2INT(clientData)); } else if (filebufs != NULL && nfilebufs > 0) { /* * The client provided Ns_FileVec with open files. The client is * responsible for closing it, like in all other cases. */ size_t i; /* * This is the only case, where fbufs will be != NULL, * i.e. keeping a duplicate of the passed-in Ns_FileVec structure * for which the client is responsible. */ fbufs = (Ns_FileVec *)ns_calloc((size_t)nfilebufs, sizeof(Ns_FileVec)); nfbufs = nfilebufs; for (i = 0u; i < (size_t)nfilebufs; i++) { fbufs[i].fd = ns_dup(filebufs[i].fd); fbufs[i].length = filebufs[i].length; fbufs[i].offset = filebufs[i].offset; } /* * Place the fd of the first Ns_FileVec to fd. */ fd = fbufs[0].fd; Ns_Log(DriverDebug, "NsWriterQueue: filevec mode, take first fd %d tosend %lu", fd, nsend); } } Ns_Log(DriverDebug, "NsWriterQueue: writer threads %d nsend %" PRIdz " writersize %" PRIdz, wrPtr->threads, nsend, wrPtr->writersize); assert(connPtr->poolPtr != NULL); connPtr->poolPtr->stats.spool++; wrSockPtr = (WriterSock *)ns_calloc(1u, sizeof(WriterSock)); wrSockPtr->sockPtr = connPtr->sockPtr; wrSockPtr->poolPtr = connPtr->poolPtr; /* just for being able to trace back the origin, e.g. list */ wrSockPtr->sockPtr->timeout.sec = 0; wrSockPtr->flags = connPtr->flags; wrSockPtr->refCount = 1; /* * Take the rate limit from the connection. */ wrSockPtr->rateLimit = connPtr->rateLimit; if (wrSockPtr->rateLimit == -1) { /* * The value was not specified via connection. Use either the pool * limit as a base for the computation or fall back to the driver * default value. */ if (connPtr->poolPtr->rate.poolLimit > 0) { /* * Very optimistic start value, but values will float through via * bandwidth management. */ wrSockPtr->rateLimit = connPtr->poolPtr->rate.poolLimit / 2; } else { wrSockPtr->rateLimit = wrPtr->rateLimit; } } Ns_Log(WriterDebug, "### Writer(%d): initial rate limit %d KB/s", wrSockPtr->sockPtr->sock, wrSockPtr->rateLimit); /* * Make sure we have proper content length header for * keep-alive/pipelining. */ Ns_ConnSetLengthHeader(conn, nsend, (wrSockPtr->flags & NS_CONN_STREAM) != 0u); /* * Flush the headers */ if ((conn->flags & NS_CONN_SENTHDRS) == 0u) { Tcl_DString ds; Ns_DStringInit(&ds); Ns_Log(DriverDebug, "### Writer(%d): add header", fd); conn->flags |= NS_CONN_SENTHDRS; (void)Ns_CompleteHeaders(conn, nsend, 0u, &ds); headerSize = (size_t)Ns_DStringLength(&ds); if (headerSize > 0u) { wrSockPtr->headerString = ns_strdup(Tcl_DStringValue(&ds)); } Ns_DStringFree(&ds); } else { headerSize = 0u; } if (fd != NS_INVALID_FD) { /* maybe add mmap support for files (fd != NS_INVALID_FD) */ wrSockPtr->fd = fd; wrSockPtr->c.file.bufs = fbufs; wrSockPtr->c.file.nbufs = nfbufs; Ns_Log(DriverDebug, "### Writer(%d) tosend %" PRIdz " files %d bufsize %" PRIdz, fd, nsend, nfbufs, wrPtr->bufsize); if (unlikely(headerSize >= wrPtr->bufsize)) { /* * We have a header which is larger than bufsize; place it * as "leftover" and use the headerString as buffer for file * reads (rather rare case) */ wrSockPtr->c.file.buf = (unsigned char *)wrSockPtr->headerString; wrSockPtr->c.file.maxsize = headerSize; wrSockPtr->c.file.bufsize = headerSize; wrSockPtr->headerString = NULL; } else if (headerSize > 0u) { /* * We have a header that fits into the bufsize; place it * as "leftover" at the end of the buffer. */ wrSockPtr->c.file.buf = ns_malloc(wrPtr->bufsize); memcpy(wrSockPtr->c.file.buf, wrSockPtr->headerString, headerSize); wrSockPtr->c.file.bufsize = headerSize; wrSockPtr->c.file.maxsize = wrPtr->bufsize; ns_free(wrSockPtr->headerString); wrSockPtr->headerString = NULL; } else { assert(wrSockPtr->headerString == NULL); wrSockPtr->c.file.buf = ns_malloc(wrPtr->bufsize); wrSockPtr->c.file.maxsize = wrPtr->bufsize; } wrSockPtr->c.file.bufoffset = 0; wrSockPtr->c.file.toRead = nsend; } else if (bufs != NULL) { int i, j, headerbufs = (headerSize > 0u ? 1 : 0); wrSockPtr->fd = NS_INVALID_FD; if (nbufs+headerbufs < UIO_SMALLIOV) { wrSockPtr->c.mem.bufs = wrSockPtr->c.mem.preallocated_bufs; } else { Ns_Log(DriverDebug, "NsWriterQueue: alloc %d iovecs", nbufs); wrSockPtr->c.mem.bufs = ns_calloc((size_t)nbufs + (size_t)headerbufs, sizeof(struct iovec)); } wrSockPtr->c.mem.nbufs = nbufs+headerbufs; if (headerbufs != 0) { wrSockPtr->c.mem.bufs[0].iov_base = wrSockPtr->headerString; wrSockPtr->c.mem.bufs[0].iov_len = headerSize; } if (connPtr->fmap.addr != NULL) { Ns_Log(DriverDebug, "NsWriterQueue: deliver fmapped %p", (void *)connPtr->fmap.addr); /* * Deliver an mmapped file, no need to copy content */ for (i = 0, j=headerbufs; i < nbufs; i++, j++) { wrSockPtr->c.mem.bufs[j].iov_base = bufs[i].iov_base; wrSockPtr->c.mem.bufs[j].iov_len = bufs[i].iov_len; } /* * Make a copy of the fmap structure and make clear that * we unmap in the writer thread. */ wrSockPtr->c.mem.fmap = connPtr->fmap; connPtr->fmap.addr = NULL; /* header string will be freed via wrSockPtr->headerString */ } else { /* * Deliver a content from iovec. The lifetime of the * source is unknown, we have to copy the c. */ for (i = 0, j=headerbufs; i < nbufs; i++, j++) { wrSockPtr->c.mem.bufs[j].iov_base = ns_malloc(bufs[i].iov_len); wrSockPtr->c.mem.bufs[j].iov_len = bufs[i].iov_len; memcpy(wrSockPtr->c.mem.bufs[j].iov_base, bufs[i].iov_base, bufs[i].iov_len); } /* header string will be freed a buf[0] */ wrSockPtr->headerString = NULL; } } else { ns_free(wrSockPtr); return NS_ERROR; } /* * Add header size to total size. */ nsend += headerSize; if (connPtr->clientData != NULL) { wrSockPtr->clientData = ns_strdup(connPtr->clientData); } wrSockPtr->startTime = *Ns_ConnStartTime(conn); /* * Setup streaming context before sending potentially headers. */ if ((wrSockPtr->flags & NS_CONN_STREAM) != 0u) { wrSockPtr->doStream = NS_WRITER_STREAM_ACTIVE; assert(connPtr->strWriter == NULL); /* * Add a reference to the stream writer to the connection such * it can efficiently append to a stream when multiple output * operations happen. The backpointer (from the stream writer * to the connection is needed to clear the reference to the * writer in case the writer is deleted. No locks are needed, * since nobody can share this structure yet. */ connPtr->strWriter = (NsWriterSock *)wrSockPtr; wrSockPtr->connPtr = connPtr; } /* * Tell connection, that writer handles the output (including * closing the connection to the client). */ connPtr->flags |= NS_CONN_SENT_VIA_WRITER; wrSockPtr->keep = connPtr->keep > 0 ? NS_TRUE : NS_FALSE; wrSockPtr->size = nsend; Ns_Log(DriverDebug, "NsWriterQueue NS_CONN_SENT_VIA_WRITER connPtr %p", (void*)connPtr); if ((wrSockPtr->flags & NS_CONN_STREAM) == 0u) { Ns_Log(DriverDebug, "NsWriterQueue NS_CONN_SENT_VIA_WRITER connPtr %p clear sockPtr %p", (void*)connPtr, (void*)connPtr->sockPtr); connPtr->sockPtr = NULL; connPtr->flags |= NS_CONN_CLOSED; connPtr->nContentSent = nsend - headerSize; } /* * Get the next writer thread from the list, all writer requests are * rotated between all writer threads */ Ns_MutexLock(&wrPtr->lock); if (wrPtr->curPtr == NULL) { wrPtr->curPtr = wrPtr->firstPtr; } queuePtr = wrPtr->curPtr; wrPtr->curPtr = wrPtr->curPtr->nextPtr; Ns_MutexUnlock(&wrPtr->lock); Ns_Log(WriterDebug, "Writer(%d): started: id=%d fd=%d, " "size=%" PRIdz ", flags=%X, rate %d KB/s: %s", wrSockPtr->sockPtr->sock, queuePtr->id, wrSockPtr->fd, nsend, wrSockPtr->flags, wrSockPtr->rateLimit, connPtr->request.line); /* * Now add new writer socket to the writer thread's queue */ wrSockPtr->queuePtr = queuePtr; Ns_MutexLock(&queuePtr->lock); if (queuePtr->sockPtr == NULL) { trigger = NS_TRUE; } Push(wrSockPtr, queuePtr->sockPtr); Ns_MutexUnlock(&queuePtr->lock); /* * Wake up writer thread */ if (trigger) { SockTrigger(queuePtr->pipe[1]); } return NS_OK; } /* *---------------------------------------------------------------------- * * DriverWriterFromObj -- * * Lookup driver by name and return its DrvWriter. When driverObj is * NULL, get the driver from the conn. * * Results: * Ns_ReturnCode * * Side effects: * Set error message in interp in case of failure. * *---------------------------------------------------------------------- */ static Ns_ReturnCode DriverWriterFromObj( Tcl_Interp *interp, Tcl_Obj *driverObj, Ns_Conn *conn, DrvWriter **wrPtrPtr) { Driver *drvPtr; const char *driverName = NULL; int driverNameLen = 0; DrvWriter *wrPtr = NULL; Ns_ReturnCode result; /* * If no driver is provided, take the current driver. The caller has * to make sure that in cases, where no driver is specified, the * command is run in a connection thread. */ if (driverObj == NULL) { if (conn != NULL) { driverName = Ns_ConnDriverName(conn); driverNameLen = (int)strlen(driverName); } } else { driverName = Tcl_GetStringFromObj(driverObj, &driverNameLen); } if (driverName != NULL) { for (drvPtr = firstDrvPtr; drvPtr != NULL; drvPtr = drvPtr->nextPtr) { if (strncmp(driverName, drvPtr->threadName, (size_t)driverNameLen) == 0) { if (drvPtr->writer.firstPtr != NULL) { wrPtr = &drvPtr->writer; } break; } } } if (unlikely(wrPtr == NULL)) { Ns_TclPrintfResult(interp, "no writer configured for a driver with name %s", driverName); result = NS_ERROR; } else { *wrPtrPtr = wrPtr; result = NS_OK; } return result; } /* *---------------------------------------------------------------------- * * WriterSubmitObjCmd - subcommand of NsTclWriterObjCmd -- * * Implements "ns_writer submit" command. * Send the provided data to the client. * * Results: * Standard Tcl result. * * Side effects: * None. * *---------------------------------------------------------------------- */ static int WriterSubmitObjCmd(ClientData UNUSED(clientData), Tcl_Interp *interp, int objc, Tcl_Obj *const* objv) { int result = TCL_OK; Ns_Conn *conn; Tcl_Obj *dataObj; Ns_ObjvSpec args[] = { {"data", Ns_ObjvObj, &dataObj, NULL}, {NULL, NULL, NULL, NULL} }; if (Ns_ParseObjv(NULL, args, interp, 2, objc, objv) != NS_OK || NsConnRequire(interp, NS_CONN_REQUIRE_ALL, &conn) != NS_OK) { result = TCL_ERROR; } else { int size; unsigned char *data = Tcl_GetByteArrayFromObj(dataObj, &size); if (data != NULL) { struct iovec vbuf; Ns_ReturnCode status; vbuf.iov_base = (void *)data; vbuf.iov_len = (size_t)size; status = NsWriterQueue(conn, (size_t)size, NULL, NULL, NS_INVALID_FD, &vbuf, 1, NULL, 0, NS_TRUE); Tcl_SetObjResult(interp, Tcl_NewBooleanObj(status == NS_OK ? 1 : 0)); } } return result; } /* *---------------------------------------------------------------------- * * WriterCheckInputParams - * * Helper command for WriterSubmitFileObjCmd and WriterSubmitFilesObjCmd * to check validity of filename, offset and size. * * Results: * Standard Tcl result. Returns on success also fd and nrbytes. * * Side effects: * None. * *---------------------------------------------------------------------- */ static int WriterCheckInputParams(Tcl_Interp *interp, const char *filenameString, size_t size, off_t offset, int *fdPtr, size_t *nrbytesPtr) { int result = TCL_OK, rc; struct stat st; Ns_Log(DriverDebug, "WriterCheckInputParams %s offset %" PROTd " size %" PRIdz, filenameString, offset, size); /* * Use stat() call to obtain information about the actual file to check * later the plausibility of the parameters. */ rc = stat(filenameString, &st); if (unlikely(rc != 0)) { Ns_TclPrintfResult(interp, "file does not exist '%s'", filenameString); result = TCL_ERROR; } else { size_t nrbytes = 0u; int fd; /* * Try to open the file and check offset and size parameters. */ fd = ns_open(filenameString, O_RDONLY | O_CLOEXEC, 0); if (unlikely(fd == NS_INVALID_FD)) { Ns_TclPrintfResult(interp, "could not open file '%s'", filenameString); result = TCL_ERROR; } else if (unlikely(offset > st.st_size) || offset < 0) { Ns_TclPrintfResult(interp, "offset must be a positive value less or equal filesize"); result = TCL_ERROR; } else if (size > 0) { if (unlikely((off_t)size + offset > st.st_size)) { Ns_TclPrintfResult(interp, "offset + size must be less or equal filesize"); result = TCL_ERROR; } else { nrbytes = (size_t)size; } } else { nrbytes = (size_t)st.st_size - (size_t)offset; } /* * When an offset is provide, jump to this offset. */ if (offset > 0 && result == TCL_OK) { if (ns_lseek(fd, (off_t)offset, SEEK_SET) == -1) { Ns_TclPrintfResult(interp, "cannot seek to position %ld", (long)offset); result = TCL_ERROR; } } if (result == TCL_OK) { *fdPtr = fd; *nrbytesPtr = nrbytes; } else if (fd != NS_INVALID_FD) { /* * On invalid parameters, close the fd. */ ns_close(fd); } } return result; } /* *---------------------------------------------------------------------- * * WriterSubmitFileObjCmd - subcommand of NsTclWriterObjCmd -- * * Implements "ns_writer submitfile" command. * Send the provided file to the client. * * Results: * Standard Tcl result. * * Side effects: * None. * *---------------------------------------------------------------------- */ static int WriterSubmitFileObjCmd(ClientData UNUSED(clientData), Tcl_Interp *interp, int objc, Tcl_Obj *const* objv) { int result = TCL_OK; Ns_Conn *conn; char *fileNameString; int headers = 0; Tcl_WideInt offset = 0, size = 0; Ns_ObjvValueRange offsetRange = {0, LLONG_MAX}; Ns_ObjvValueRange sizeRange = {1, LLONG_MAX}; Ns_ObjvSpec lopts[] = { {"-headers", Ns_ObjvBool, &headers, INT2PTR(NS_TRUE)}, {"-offset", Ns_ObjvMemUnit, &offset, &offsetRange}, {"-size", Ns_ObjvMemUnit, &size, &sizeRange}, {NULL, NULL, NULL, NULL} }; Ns_ObjvSpec args[] = { {"file", Ns_ObjvString, &fileNameString, NULL}, {NULL, NULL, NULL, NULL} }; if (unlikely(Ns_ParseObjv(lopts, args, interp, 2, objc, objv) != NS_OK) || NsConnRequire(interp, NS_CONN_REQUIRE_ALL, &conn) != NS_OK) { result = TCL_ERROR; } else if (unlikely( Ns_ConnSockPtr(conn) == NULL )) { Ns_Log(Warning, "NsWriterQueue: called without valid sockPtr, maybe connection already closed"); Ns_TclPrintfResult(interp, "0"); result = TCL_OK; } else { size_t nrbytes = 0u; int fd = NS_INVALID_FD; result = WriterCheckInputParams(interp, fileNameString, (size_t)size, offset, &fd, &nrbytes); if (likely(result == TCL_OK)) { Ns_ReturnCode status; /* * The caller requested that we build required headers */ if (headers != 0) { Ns_ConnSetTypeHeader(conn, Ns_GetMimeType(fileNameString)); } status = NsWriterQueue(conn, nrbytes, NULL, NULL, fd, NULL, 0, NULL, 0, NS_TRUE); Tcl_SetObjResult(interp, Tcl_NewBooleanObj(status == NS_OK ? 1 : 0)); if (fd != NS_INVALID_FD) { (void) ns_close(fd); } else { Ns_Log(Warning, "WriterSubmitFileObjCmd called with invalid fd"); } } else if (fd != NS_INVALID_FD) { (void) ns_close(fd); } } return result; } /* *---------------------------------------------------------------------- * * WriterGetMemunitFromDict -- * * Helper function to obtain a memory unit from a dict structure, * optionally checking the value range. * * Results: * Standard Tcl result. * * Side effects: * On errors, an error message is left in the interpreter. * *---------------------------------------------------------------------- */ static int WriterGetMemunitFromDict(Tcl_Interp *interp, Tcl_Obj *dictObj, Tcl_Obj *keyObj, Ns_ObjvValueRange *rangePtr, Tcl_WideInt *valuePtr) { Tcl_Obj *intObj = NULL; int result; NS_NONNULL_ASSERT(interp != NULL); NS_NONNULL_ASSERT(dictObj != NULL); NS_NONNULL_ASSERT(keyObj != NULL); NS_NONNULL_ASSERT(valuePtr != NULL); result = Tcl_DictObjGet(interp, dictObj, keyObj, &intObj); if (result == TCL_OK && intObj != NULL) { result = Ns_TclGetMemUnitFromObj(interp, intObj, valuePtr); if (result == TCL_OK && rangePtr != NULL) { result = Ns_CheckWideRange(interp, Tcl_GetString(keyObj), rangePtr, *valuePtr); } } return result; } /* *---------------------------------------------------------------------- * * WriterSubmitFilesObjCmd - subcommand of NsTclWriterObjCmd -- * * Implements "ns_writer submitfiles" command. Send the provided files * to the client. "files" are provided as a list of dicts, where every * dict must contain a "filename" element and can contain an "-offset" * and/or a "-length" element. * * Results: * Standard Tcl result. * * Side effects: * None. * *---------------------------------------------------------------------- */ static int WriterSubmitFilesObjCmd(ClientData UNUSED(clientData), Tcl_Interp *interp, int objc, Tcl_Obj *const* objv) { int result = TCL_OK; Ns_Conn *conn; int headers = 0, nrFiles; Tcl_Obj *filesObj = NULL, **fileObjv; Ns_ObjvSpec lopts[] = { {"-headers", Ns_ObjvBool, &headers, INT2PTR(NS_TRUE)}, {NULL, NULL, NULL, NULL} }; Ns_ObjvSpec args[] = { {"files", Ns_ObjvObj, &filesObj, NULL}, {NULL, NULL, NULL, NULL} }; if (unlikely(Ns_ParseObjv(lopts, args, interp, 2, objc, objv) != NS_OK) || NsConnRequire(interp, NS_CONN_REQUIRE_ALL, &conn) != NS_OK) { result = TCL_ERROR; } else if (unlikely( Ns_ConnSockPtr(conn) == NULL )) { Ns_Log(Warning, "NsWriterQueue: called without valid sockPtr, " "maybe connection already closed"); Ns_TclPrintfResult(interp, "0"); result = TCL_OK; } else if (Tcl_ListObjGetElements(interp, filesObj, &nrFiles, &fileObjv) != TCL_OK) { Ns_TclPrintfResult(interp, "not a valid list of files: '%s'", Tcl_GetString(filesObj)); result = TCL_ERROR; } else if (nrFiles == 0) { Ns_TclPrintfResult(interp, "The provided list has to contain at least one file spec"); result = TCL_ERROR; } else { size_t totalbytes = 0u, i; Tcl_Obj *keys[3], *filenameObj = NULL; Ns_FileVec *filebufs; const char *firstFilenameString = NULL; Ns_ObjvValueRange offsetRange = {0, LLONG_MAX}; Ns_ObjvValueRange sizeRange = {1, LLONG_MAX}; filebufs = (Ns_FileVec *)ns_calloc((size_t)nrFiles, sizeof(Ns_FileVec)); keys[0] = Tcl_NewStringObj("filename", 8); keys[1] = Tcl_NewStringObj("-offset", 7); keys[2] = Tcl_NewStringObj("-size", 5); Tcl_IncrRefCount(keys[0]); Tcl_IncrRefCount(keys[1]); Tcl_IncrRefCount(keys[2]); for (i = 0u; i < (size_t)nrFiles; i++) { filebufs[i].fd = NS_INVALID_FD; } /* * Iterate over the list of dicts. */ for (i = 0u; i < (size_t)nrFiles; i++) { Tcl_WideInt offset = 0, size = 0; int rc, fd = NS_INVALID_FD; const char *filenameString; size_t nrbytes; /* * Get required "filename" element. */ filenameObj = NULL; rc = Tcl_DictObjGet(interp, fileObjv[i], keys[0], &filenameObj); if (rc != TCL_OK || filenameObj == NULL) { Ns_TclPrintfResult(interp, "missing filename in dict '%s'", Tcl_GetString(fileObjv[i])); result = TCL_ERROR; break; } filenameString = Tcl_GetString(filenameObj); if (firstFilenameString == NULL) { firstFilenameString = filenameString; } /* * Get optional "-offset" and "-size" elements. */ if (WriterGetMemunitFromDict(interp, fileObjv[i], keys[1], &offsetRange, &offset) != TCL_OK) { result = TCL_ERROR; break; } if (WriterGetMemunitFromDict(interp, fileObjv[i], keys[2], &sizeRange, &size) != TCL_OK) { result = TCL_ERROR; break; } /* * Check validity of the provided values */ result = WriterCheckInputParams(interp, Tcl_GetString(filenameObj), (size_t)size, (off_t)offset, &fd, &nrbytes); if (result != TCL_OK) { break; } filebufs[i].fd = fd; filebufs[i].offset = offset; filebufs[i].length = nrbytes; totalbytes = totalbytes + (size_t)nrbytes; } Tcl_DecrRefCount(keys[0]); Tcl_DecrRefCount(keys[1]); Tcl_DecrRefCount(keys[2]); /* * If everything is ok, submit the request to the writer queue. */ if (result == TCL_OK) { Ns_ReturnCode status; if (headers != 0 && firstFilenameString != NULL) { Ns_ConnSetTypeHeader(conn, Ns_GetMimeType(firstFilenameString)); } status = NsWriterQueue(conn, totalbytes, NULL, NULL, NS_INVALID_FD, NULL, 0, filebufs, nrFiles, NS_TRUE); /* * Provide a soft error like for "ns_writer submitfile". */ Tcl_SetObjResult(interp, Tcl_NewBooleanObj(status == NS_OK ? 1 : 0)); } /* * The NsWriterQueue() API makes the usual duplicates of the file * descriptors and the Ns_FileVec structure, so we have to cleanup * here. */ for (i = 0u; i < (size_t)nrFiles; i++) { if (filebufs[i].fd != NS_INVALID_FD) { (void) ns_close(filebufs[i].fd); } } ns_free(filebufs); } return result; } /* *---------------------------------------------------------------------- * * WriterListObjCmd - subcommand of NsTclWriterObjCmd -- * * Implements "ns_writer list" command. * List the current writer jobs. * * Results: * Standard Tcl result. * * Side effects: * None. * *---------------------------------------------------------------------- */ static int WriterListObjCmd(ClientData UNUSED(clientData), Tcl_Interp *interp, int objc, Tcl_Obj *const* objv) { int result = TCL_OK; NsServer *servPtr = NULL; Ns_ObjvSpec lopts[] = { {"-server", Ns_ObjvServer, &servPtr, NULL}, {NULL, NULL, NULL, NULL} }; if (unlikely(Ns_ParseObjv(lopts, NULL, interp, 2, objc, objv) != NS_OK)) { result = TCL_ERROR; } else { Tcl_DString ds, *dsPtr = &ds; const Driver *drvPtr; SpoolerQueue *queuePtr; Tcl_DStringInit(dsPtr); for (drvPtr = firstDrvPtr; drvPtr != NULL; drvPtr = drvPtr->nextPtr) { const DrvWriter *wrPtr; /* * If server was specified, list only results from this server. */ if (servPtr != NULL && servPtr != drvPtr->servPtr) { continue; } wrPtr = &drvPtr->writer; queuePtr = wrPtr->firstPtr; while (queuePtr != NULL) { const WriterSock *wrSockPtr; Ns_MutexLock(&queuePtr->lock); wrSockPtr = queuePtr->curPtr; while (wrSockPtr != NULL) { char ipString[NS_IPADDR_SIZE]; ns_inet_ntop((struct sockaddr *)&(wrSockPtr->sockPtr->sa), ipString,sizeof(ipString)); (void) Ns_DStringNAppend(dsPtr, "{", 1); (void) Ns_DStringAppendTime(dsPtr, &wrSockPtr->startTime); (void) Ns_DStringNAppend(dsPtr, " ", 1); (void) Ns_DStringAppend(dsPtr, queuePtr->threadName); (void) Ns_DStringNAppend(dsPtr, " ", 1); (void) Ns_DStringAppend(dsPtr, drvPtr->threadName); (void) Ns_DStringNAppend(dsPtr, " ", 1); (void) Ns_DStringAppend(dsPtr, NsPoolName(wrSockPtr->poolPtr->pool)); (void) Ns_DStringNAppend(dsPtr, " ", 1); (void) Ns_DStringAppend(dsPtr, ipString); (void) Ns_DStringPrintf(dsPtr, " %d %" PRIdz " %" TCL_LL_MODIFIER "d %d %d ", wrSockPtr->fd, wrSockPtr->size, wrSockPtr->nsent, wrSockPtr->currentRate, wrSockPtr->rateLimit); (void) Ns_DStringAppendElement(dsPtr, (wrSockPtr->clientData != NULL) ? wrSockPtr->clientData : NS_EMPTY_STRING); (void) Ns_DStringNAppend(dsPtr, "} ", 2); wrSockPtr = wrSockPtr->nextPtr; } Ns_MutexUnlock(&queuePtr->lock); queuePtr = queuePtr->nextPtr; } } Tcl_DStringResult(interp, &ds); } return result; } /* *---------------------------------------------------------------------- * * WriterSizeObjCmd - subcommand of NsTclWriterObjCmd -- * * Implements "ns_writer size" command. * Sets or queries size limit for sending via writer. * * Results: * Standard Tcl result. * * Side effects: * None. * *---------------------------------------------------------------------- */ static int WriterSizeObjCmd(ClientData UNUSED(clientData), Tcl_Interp *interp, int objc, Tcl_Obj *const* objv) { int result = TCL_OK; Tcl_Obj *driverObj = NULL; Ns_Conn *conn = NULL; Tcl_WideInt intValue = -1; const char *firstArgString; Ns_ObjvValueRange range = {1024, INT_MAX}; Ns_ObjvSpec *opts, optsNew[] = { {"-driver", Ns_ObjvObj, &driverObj, NULL}, {NULL, NULL, NULL, NULL} }; Ns_ObjvSpec *args, argsNew[] = { {"?value", Ns_ObjvMemUnit, &intValue, &range}, {NULL, NULL, NULL, NULL} }; Ns_ObjvSpec argsLegacy[] = { {"driver", Ns_ObjvObj, &driverObj, NULL}, {"?value", Ns_ObjvMemUnit, &intValue, &range}, {NULL, NULL, NULL, NULL} }; firstArgString = objc > 2 ? Tcl_GetString(objv[2]) : NULL; if (firstArgString != NULL) { if (*firstArgString != '-' && ((objc == 3 && CHARTYPE(digit, *firstArgString) == 0) || objc == 4)) { args = argsLegacy; opts = NULL; Ns_LogDeprecated(objv, objc, "ns_writer size ?-driver drv? ?size?", NULL); } else { args = argsNew; opts = optsNew; } } else { args = argsNew; opts = optsNew; } if (Ns_ParseObjv(opts, args, interp, 2, objc, objv) != NS_OK) { result = TCL_ERROR; } else if ((driverObj == NULL) && NsConnRequire(interp, NS_CONN_REQUIRE_ALL, &conn) != NS_OK) { result = TCL_ERROR; } else { DrvWriter *wrPtr; if (DriverWriterFromObj(interp, driverObj, conn, &wrPtr) != NS_OK) { result = TCL_ERROR; } else if (intValue != -1) { /* * The optional argument was provided. */ wrPtr->writersize = (size_t)intValue; } if (result == TCL_OK) { Tcl_SetObjResult(interp, Tcl_NewIntObj((int)wrPtr->writersize)); } } return result; } /* *---------------------------------------------------------------------- * * WriterStreamingObjCmd - subcommand of NsTclWriterObjCmd -- * * Implements "ns_writer streaming" command. * Sets or queries streaming state of the writer. * * Results: * Standard Tcl result. * * Side effects: * None. * *---------------------------------------------------------------------- */ static int WriterStreamingObjCmd(ClientData UNUSED(clientData), Tcl_Interp *interp, int objc, Tcl_Obj *const* objv) { int boolValue = -1, result = TCL_OK; Tcl_Obj *driverObj = NULL; Ns_Conn *conn = NULL; const char *firstArgString; Ns_ObjvSpec *opts, optsNew[] = { {"-driver", Ns_ObjvObj, &driverObj, NULL}, {NULL, NULL, NULL, NULL} }; Ns_ObjvSpec *args, argsNew[] = { {"?value", Ns_ObjvBool, &boolValue, NULL}, {NULL, NULL, NULL, NULL} }; Ns_ObjvSpec argsLegacy[] = { {"driver", Ns_ObjvObj, &driverObj, NULL}, {"?value", Ns_ObjvBool, &boolValue, NULL}, {NULL, NULL, NULL, NULL} }; firstArgString = objc > 2 ? Tcl_GetString(objv[2]) : NULL; if (firstArgString != NULL) { int argValue; if (*firstArgString != '-' && ((objc == 3 && Tcl_ExprBoolean(interp, firstArgString, &argValue) == TCL_OK) || objc == 4)) { args = argsLegacy; opts = NULL; Ns_LogDeprecated(objv, objc, "ns_writer streaming ?-driver drv? ?value?", NULL); } else { args = argsNew; opts = optsNew; } } else { args = argsNew; opts = optsNew; } if (Ns_ParseObjv(opts, args, interp, 2, objc, objv) != NS_OK) { result = TCL_ERROR; } else if ((driverObj == NULL) && NsConnRequire(interp, NS_CONN_REQUIRE_ALL, &conn) != NS_OK) { result = TCL_ERROR; } else { DrvWriter *wrPtr; if (DriverWriterFromObj(interp, driverObj, conn, &wrPtr) != NS_OK) { result = TCL_ERROR; } else if (boolValue != -1) { /* * The optional argument was provided. */ wrPtr->doStream = (boolValue == 1 ? NS_WRITER_STREAM_ACTIVE : NS_WRITER_STREAM_NONE); } if (result == TCL_OK) { Tcl_SetObjResult(interp, Tcl_NewIntObj(wrPtr->doStream == NS_WRITER_STREAM_ACTIVE ? 1 : 0)); } } return result; } /* *---------------------------------------------------------------------- * * NsTclWriterObjCmd -- * * Implements "ns_writer" command for submitting data to the writer * threads and to configure and query the state of the writer threads at * runtime. * * Results: * Standard Tcl result. * * Side effects: * None. * *---------------------------------------------------------------------- */ int NsTclWriterObjCmd(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *const* objv) { const Ns_SubCmdSpec subcmds[] = { {"list", WriterListObjCmd}, {"size", WriterSizeObjCmd}, {"streaming", WriterStreamingObjCmd}, {"submit", WriterSubmitObjCmd}, {"submitfile", WriterSubmitFileObjCmd}, {"submitfiles",WriterSubmitFilesObjCmd}, {NULL, NULL} }; return Ns_SubcmdObjv(subcmds, clientData, interp, objc, objv); } /* *====================================================================== * Async (log) writer: Write asynchronously to a disk *====================================================================== */ /* *---------------------------------------------------------------------- * * NsAsyncWriterQueueEnable -- * * Enable async writing and start the AsyncWriterThread if * necessary * * Results: * None. * * Side effects: * Potentially starting a thread and set "stopped" to NS_FALSE. * *---------------------------------------------------------------------- */ void NsAsyncWriterQueueEnable(void) { if (Ns_ConfigBool(NS_CONFIG_PARAMETERS, "asynclogwriter", NS_FALSE) == NS_TRUE) { SpoolerQueue *queuePtr; /* * In case, the async writer has not started, the static variable * asyncWriter is NULL. */ if (asyncWriter == NULL) { Ns_MutexLock(&reqLock); if (likely(asyncWriter == NULL)) { /* * Allocate and initialize writer thread context. */ asyncWriter = ns_calloc(1u, sizeof(AsyncWriter)); Ns_MutexUnlock(&reqLock); Ns_MutexSetName2(&asyncWriter->lock, "ns:driver", "async-writer"); /* * Allocate and initialize a Spooler Queue for this thread. */ queuePtr = ns_calloc(1u, sizeof(SpoolerQueue)); Ns_MutexSetName2(&queuePtr->lock, "ns:driver:async-writer", "queue"); asyncWriter->firstPtr = queuePtr; /* * Start the spooler queue */ SpoolerQueueStart(queuePtr, AsyncWriterThread); } else { Ns_MutexUnlock(&reqLock); } } assert(asyncWriter != NULL); queuePtr = asyncWriter->firstPtr; assert(queuePtr != NULL); Ns_MutexLock(&queuePtr->lock); queuePtr->stopped = NS_FALSE; Ns_MutexUnlock(&queuePtr->lock); } } /* *---------------------------------------------------------------------- * * NsAsyncWriterQueueDisable -- * * Disable async writing but don't touch the writer thread. * * Results: * None. * * Side effects: * Disable async writing by setting stopped to 1. * *---------------------------------------------------------------------- */ void NsAsyncWriterQueueDisable(bool shutdown) { if (asyncWriter != NULL) { SpoolerQueue *queuePtr = asyncWriter->firstPtr; Ns_Time timeout; assert(queuePtr != NULL); Ns_GetTime(&timeout); Ns_IncrTime(&timeout, nsconf.shutdowntimeout.sec, nsconf.shutdowntimeout.usec); Ns_MutexLock(&queuePtr->lock); queuePtr->stopped = NS_TRUE; queuePtr->shutdown = shutdown; /* * Trigger the AsyncWriter Thread to drain the spooler queue. */ SockTrigger(queuePtr->pipe[1]); (void)Ns_CondTimedWait(&queuePtr->cond, &queuePtr->lock, &timeout); Ns_MutexUnlock(&queuePtr->lock); if (shutdown) { ns_free(queuePtr); ns_free(asyncWriter); asyncWriter = NULL; } } } /* *---------------------------------------------------------------------- * * NsAsyncWrite -- * * Perform an asynchronous write operation via a writer thread in * case a writer thread is configured and running. The intention * of the asynchronous write operations is to reduce latencies in * connection threads. * * Results: * NS_OK, when write was performed via writer thread, * NS_ERROR otherwise (but data is written). * * Side effects: * I/O Operation. * *---------------------------------------------------------------------- */ Ns_ReturnCode NsAsyncWrite(int fd, const char *buffer, size_t nbyte) { Ns_ReturnCode returnCode = NS_OK; NS_NONNULL_ASSERT(buffer != NULL); /* * If the async writer has not started or is deactivated, behave like a * ns_write() command. If the ns_write() fails, we can't do much, since * the writing of an error message to the log might bring us into an * infinite loop. So we print simple to stderr. */ if (asyncWriter == NULL || asyncWriter->firstPtr->stopped) { ssize_t written = ns_write(fd, buffer, nbyte); if (unlikely(written != (ssize_t)nbyte)) { int retries = 100; /* * Don't go into an infinite loop when multiple subsequent disk * write operations return 0 (maybe disk full). */ returnCode = NS_ERROR; do { if (written < 0) { fprintf(stderr, "error during async write (fd %d): %s\n", fd, strerror(errno)); break; } /* * All partial writes (written >= 0) */ WriteWarningRaw("partial write", fd, nbyte, written); nbyte -= (size_t)written; buffer += written; written = ns_write(fd, buffer, nbyte); if (written == (ssize_t)nbyte) { returnCode = NS_OK; break; } } while (retries-- > 0); } } else { SpoolerQueue *queuePtr; bool trigger = NS_FALSE; const AsyncWriteData *wdPtr; AsyncWriteData *newWdPtr; /* * Allocate a writer cmd and initialize it. In order to provide an * interface compatible to ns_write(), we copy the provided data, * such it can be freed by the caller. When we would give up the * interface, we could free the memory block after writing, and * save a malloc/free operation on the data. */ newWdPtr = ns_calloc(1u, sizeof(AsyncWriteData)); newWdPtr->fd = fd; newWdPtr->bufsize = nbyte; newWdPtr->data = ns_malloc(nbyte + 1u); memcpy(newWdPtr->data, buffer, newWdPtr->bufsize); newWdPtr->buf = newWdPtr->data; newWdPtr->size = newWdPtr->bufsize; /* * Now add new writer socket to the writer thread's queue. In most * cases, the queue will be empty. */ queuePtr = asyncWriter->firstPtr; assert(queuePtr != NULL); Ns_MutexLock(&queuePtr->lock); wdPtr = queuePtr->sockPtr; if (wdPtr != NULL) { newWdPtr->nextPtr = queuePtr->sockPtr; queuePtr->sockPtr = newWdPtr; } else { queuePtr->sockPtr = newWdPtr; trigger = NS_TRUE; } Ns_MutexUnlock(&queuePtr->lock); /* * Wake up writer thread if desired */ if (trigger) { SockTrigger(queuePtr->pipe[1]); } } return returnCode; } /* *---------------------------------------------------------------------- * * AsyncWriterRelease -- * * Deallocate write data. * * Results: * None * * Side effects: * free memory * *---------------------------------------------------------------------- */ static void AsyncWriterRelease(AsyncWriteData *wdPtr) { NS_NONNULL_ASSERT(wdPtr != NULL); ns_free(wdPtr->data); ns_free(wdPtr); } /* *---------------------------------------------------------------------- * * AsyncWriterThread -- * * Thread that implements non-blocking write operations to files * * Results: * None. * * Side effects: * Write to files. * *---------------------------------------------------------------------- */ static void AsyncWriterThread(void *arg) { SpoolerQueue *queuePtr = (SpoolerQueue*)arg; char charBuffer[1]; int pollTimeout; Ns_ReturnCode status; bool stopping; AsyncWriteData *curPtr, *nextPtr, *writePtr; PollData pdata; Ns_ThreadSetName("-asynclogwriter%d-", queuePtr->id); queuePtr->threadName = Ns_ThreadGetName(); /* * Allocate and initialize controlling variables */ PollCreate(&pdata); writePtr = NULL; stopping = NS_FALSE; /* * Loop forever until signaled to shutdown and all * connections are complete and gracefully closed. */ while (!stopping) { /* * Always listen to the trigger pipe. We could as well perform * in the writer thread async write operations, but for the * effect of reducing latency in connection threads, this is * not an issue. To keep things simple, we perform the * typically small write operations without testing for POLLOUT. */ PollReset(&pdata); (void)PollSet(&pdata, queuePtr->pipe[0], (short)POLLIN, NULL); if (writePtr == NULL) { pollTimeout = 30 * 1000; } else { pollTimeout = 0; } /* * Wait for data */ /*n =*/ (void) PollWait(&pdata, pollTimeout); /* * Select and drain the trigger pipe if necessary. */ if (PollIn(&pdata, 0)) { if (ns_recv(queuePtr->pipe[0], charBuffer, 1u, 0) != 1) { Ns_Fatal("asynclogwriter: trigger ns_recv() failed: %s", ns_sockstrerror(ns_sockerrno)); } if (queuePtr->stopped) { /* * Drain the queue from everything */ for (curPtr = writePtr; curPtr != NULL; curPtr = curPtr->nextPtr) { ssize_t written = ns_write(curPtr->fd, curPtr->buf, curPtr->bufsize); if (unlikely(written != (ssize_t)curPtr->bufsize)) { WriteWarningRaw("drain writer", curPtr->fd, curPtr->bufsize, written); } } writePtr = NULL; for (curPtr = queuePtr->sockPtr; curPtr != NULL; curPtr = curPtr->nextPtr) { ssize_t written = ns_write(curPtr->fd, curPtr->buf, curPtr->bufsize); if (unlikely(written != (ssize_t)curPtr->bufsize)) { WriteWarningRaw("drain queue", curPtr->fd, curPtr->bufsize, written); } } queuePtr->sockPtr = NULL; /* * Notify the caller (normally * NsAsyncWriterQueueDisable()) that we are done */ Ns_CondBroadcast(&queuePtr->cond); } } /* * Write to all available file descriptors */ curPtr = writePtr; writePtr = NULL; while (curPtr != NULL) { ssize_t written; nextPtr = curPtr->nextPtr; status = NS_OK; /* * Write the actual data and allow for partial write operations. */ written = ns_write(curPtr->fd, curPtr->buf, curPtr->bufsize); if (unlikely(written < 0)) { status = NS_ERROR; } else { curPtr->size -= (size_t)written; curPtr->nsent += written; curPtr->bufsize -= (size_t)written; if (curPtr->data != NULL) { curPtr->buf += written; } } if (unlikely(status != NS_OK)) { AsyncWriterRelease(curPtr); queuePtr->queuesize--; } else { /* * The write operation was successful. Check if there * is some remaining data to write. If not we are done * with this request can release the write buffer. */ if (curPtr->size > 0u) { Push(curPtr, writePtr); } else { AsyncWriterRelease(curPtr); queuePtr->queuesize--; } } curPtr = nextPtr; } /* * Check for shutdown */ stopping = queuePtr->shutdown; if (stopping) { curPtr = queuePtr->sockPtr; assert(writePtr == NULL); while (curPtr != NULL) { ssize_t written = ns_write(curPtr->fd, curPtr->buf, curPtr->bufsize); if (unlikely(written != (ssize_t)curPtr->bufsize)) { WriteWarningRaw("shutdown", curPtr->fd, curPtr->bufsize, written); } curPtr = curPtr->nextPtr; } } else { /* * Add fresh jobs to the writer queue. This means actually to * move jobs from queuePtr->sockPtr (kept name for being able * to use the same queue as above) to the currently active * jobs in queuePtr->curPtr. */ Ns_MutexLock(&queuePtr->lock); curPtr = queuePtr->sockPtr; queuePtr->sockPtr = NULL; while (curPtr != NULL) { nextPtr = curPtr->nextPtr; Push(curPtr, writePtr); queuePtr->queuesize++; curPtr = nextPtr; } queuePtr->curPtr = writePtr; Ns_MutexUnlock(&queuePtr->lock); } } PollFree(&pdata); queuePtr->stopped = NS_TRUE; Ns_Log(Notice, "exiting"); } /* *---------------------------------------------------------------------- * * AsyncLogfileWriteObjCmd - * * Implements "ns_asynclogfile write" command. Write to a file * descriptor via async writer thread. The command handles partial write * operations internally. * * Results: * Standard Tcl result. * * Side effects: * None. * *---------------------------------------------------------------------- */ static int AsyncLogfileWriteObjCmd(ClientData UNUSED(clientData), Tcl_Interp *interp, int objc, Tcl_Obj *const* objv) { int result = TCL_OK, binary = (int)NS_FALSE, sanitize; Tcl_Obj *stringObj; int fd = 0; Ns_ObjvValueRange fd_range = {0, INT_MAX}; Ns_ObjvValueRange sanitize_range = {0, 2}; Ns_ObjvSpec opts[] = { {"-binary", Ns_ObjvBool, &binary, INT2PTR(NS_TRUE)}, {"-sanitize", Ns_ObjvInt, &sanitize, &sanitize_range}, {NULL, NULL, NULL, NULL} }; Ns_ObjvSpec args[] = { {"fd", Ns_ObjvInt, &fd, &fd_range}, {"buffer", Ns_ObjvObj, &stringObj, NULL}, {NULL, NULL, NULL, NULL} }; /* * Take the config value as default for "-sanitize", but let the used * override it on a per-case basis. */ sanitize = nsconf.sanitize_logfiles; if (unlikely(Ns_ParseObjv(opts, args, interp, 2, objc, objv) != NS_OK)) { result = TCL_ERROR; } else { const char *buffer; int length; Ns_ReturnCode rc; if (binary == (int)NS_TRUE || NsTclObjIsByteArray(stringObj)) { buffer = (const char *) Tcl_GetByteArrayFromObj(stringObj, &length); } else { buffer = Tcl_GetStringFromObj(stringObj, &length); } if (length > 0) { if (sanitize > 0) { Tcl_DString ds; bool lastCharNewline = (buffer[length-1] == '\n'); Tcl_DStringInit(&ds); if (lastCharNewline) { length --; } Ns_DStringAppendPrintable(&ds, sanitize == 2, buffer, (size_t)length); if (lastCharNewline) { Tcl_DStringAppend(&ds, "\n", 1); } rc = NsAsyncWrite(fd, ds.string, (size_t)ds.length); Tcl_DStringFree(&ds); } else { rc = NsAsyncWrite(fd, buffer, (size_t)length); } if (rc != NS_OK) { Ns_TclPrintfResult(interp, "ns_asynclogfile: error during write operation on fd %d: %s", fd, Tcl_PosixError(interp)); result = TCL_ERROR; } } else { result = TCL_OK; } } return result; } /* *---------------------------------------------------------------------- * * AsyncLogfileOpenObjCmd - * * Implements "ns_asynclogfile open" command. The command opens a * write-only log file and return a thread-shareable handle (actually a * numeric file descriptor) which can be used in subsequent "write" or * "close" operations. * * Results: * Standard Tcl result. * * Side effects: * None. * *---------------------------------------------------------------------- */ static int AsyncLogfileOpenObjCmd(ClientData UNUSED(clientData), Tcl_Interp *interp, int objc, Tcl_Obj *const* objv) { int result = TCL_OK; unsigned int flags = O_APPEND; char *fileNameString; Tcl_Obj *flagsObj = NULL; Ns_ObjvTable flagTable[] = { {"APPEND", O_APPEND}, {"EXCL", O_EXCL}, #ifdef O_DSYNC {"DSYNC", O_DSYNC}, #endif #ifdef O_SYNC {"SYNC", O_SYNC}, #endif {"TRUNC", O_TRUNC}, {NULL, 0u} }; Ns_ObjvSpec args[] = { {"filename", Ns_ObjvString, &fileNameString, NULL}, {"?flags", Ns_ObjvObj, &flagsObj, NULL}, //{"mode", Ns_ObjvString, &mode, NULL}, {NULL, NULL, NULL, NULL} }; if (unlikely(Ns_ParseObjv(NULL, args, interp, 2, objc, objv) != NS_OK)) { result = TCL_ERROR; } else if (flagsObj != NULL) { Tcl_Obj **ov; int oc; result = Tcl_ListObjGetElements(interp, flagsObj, &oc, &ov); if (result == TCL_OK && oc > 0) { int i, opt; flags = 0u; for (i = 0; i < oc; i++) { result = Tcl_GetIndexFromObjStruct(interp, ov[i], flagTable, (int)sizeof(flagTable[0]), "flag", 0, &opt); if (result != TCL_OK) { break; } else { flags = flagTable[opt].value; } } } } if (result == TCL_OK) { int fd; fd = ns_open(fileNameString, (int)(O_CREAT | O_WRONLY | O_CLOEXEC | flags), 0644); if (unlikely(fd == NS_INVALID_FD)) { Ns_TclPrintfResult(interp, "could not open file '%s': %s", fileNameString, Tcl_PosixError(interp)); result = TCL_ERROR; } else { Tcl_SetObjResult(interp, Tcl_NewIntObj(fd)); } } return result; } /* *---------------------------------------------------------------------- * * AsyncLogfileCloseObjCmd - * * Implements "ns_asynclogfile close" command. Close the logfile * previously created via "ns_asynclogfile open". * * Results: * Standard Tcl result. * * Side effects: * None. * *---------------------------------------------------------------------- */ static int AsyncLogfileCloseObjCmd(ClientData UNUSED(clientData), Tcl_Interp *interp, int objc, Tcl_Obj *const* objv) { int fd, result = TCL_OK; Ns_ObjvValueRange range = {0, INT_MAX}; Ns_ObjvSpec args[] = { {"fd", Ns_ObjvInt, &fd, &range}, {NULL, NULL, NULL, NULL} }; if (unlikely(Ns_ParseObjv(NULL, args, interp, 2, objc, objv) != NS_OK)) { result = TCL_ERROR; } else { int rc = ns_close(fd); if (rc != 0) { Ns_TclPrintfResult(interp, "could not close fd %d: %s", fd, Tcl_PosixError(interp)); result = TCL_ERROR; } } return result; } /* *---------------------------------------------------------------------- * * NsTclAsyncLogfileObjCmd - * * Wrapper for "ns_asynclogfile open|write|close" commands. * * Results: * Standard Tcl result. * * Side effects: * None. * *---------------------------------------------------------------------- */ int NsTclAsyncLogfileObjCmd(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *const* objv) { const Ns_SubCmdSpec subcmds[] = { {"open", AsyncLogfileOpenObjCmd}, {"write", AsyncLogfileWriteObjCmd}, {"close", AsyncLogfileCloseObjCmd}, {NULL, NULL} }; return Ns_SubcmdObjv(subcmds, clientData, interp, objc, objv); } /* *---------------------------------------------------------------------- * * LookupDriver -- * * Find a matching driver for the specified protocol and optionally the * specified driver name. * * Results: * Driver pointer or NULL on failure. * * Side effects: * When no driver is found, an error is left in the interp result. * *---------------------------------------------------------------------- */ static Driver * LookupDriver(Tcl_Interp *interp, const char* protocol, const char *driverName) { Driver *drvPtr; NS_NONNULL_ASSERT(interp != NULL); NS_NONNULL_ASSERT(protocol != NULL); for (drvPtr = firstDrvPtr; drvPtr != NULL; drvPtr = drvPtr->nextPtr) { Ns_Log(DriverDebug, "... check Driver proto <%s> server %s name %s location %s", drvPtr->protocol, drvPtr->server, drvPtr->threadName, drvPtr->location); if (STREQ(drvPtr->protocol, protocol)) { if (driverName == NULL) { /* * If there is no driver name given, take the first driver * with the matching protocol. */ break; } else if (STREQ(drvPtr->moduleName, driverName)) { /* * The driver name (name of the loaded module) is equal */ break; } } } if (drvPtr == NULL) { if (driverName != NULL) { Ns_TclPrintfResult(interp, "no driver for protocol '%s' & driver name '%s' found", protocol, driverName); } else { Ns_TclPrintfResult(interp, "no driver for protocol '%s' found", protocol); } } return drvPtr; } /* *---------------------------------------------------------------------- * * NSDriverClientOpen -- * * Open a client HTTP connection using the driver interface * * Results: * Tcl return code. * * Side effects: * Opening a connection * *---------------------------------------------------------------------- */ int NSDriverClientOpen(Tcl_Interp *interp, const char *driverName, const char *url, const char *httpMethod, const char *version, const Ns_Time *timeoutPtr, Sock **sockPtrPtr) { char *protocol, *host, *portString, *path, *tail, *url2; int result = TCL_OK; NS_NONNULL_ASSERT(interp != NULL); NS_NONNULL_ASSERT(url != NULL); NS_NONNULL_ASSERT(httpMethod != NULL); NS_NONNULL_ASSERT(version != NULL); NS_NONNULL_ASSERT(sockPtrPtr != NULL); url2 = ns_strdup(url); /* * We need here a fully qualified URL, otherwise raise an error */ if (unlikely(Ns_ParseUrl(url2, &protocol, &host, &portString, &path, &tail) != NS_OK) || protocol == NULL || host == NULL || path == NULL || tail == NULL) { Ns_Log(Notice, "driver: invalid URL '%s' passed to NSDriverClientOpen", url2); result = TCL_ERROR; } else { Driver *drvPtr; unsigned short portNr = 0u; /* make static checker happy */ assert(protocol != NULL); assert(host != NULL); assert(path != NULL); assert(tail != NULL); /* * Find a matching driver for the specified protocol and optionally * the specified driver name. */ drvPtr = LookupDriver(interp, protocol, driverName); if (drvPtr == NULL) { result = TCL_ERROR; } else if (portString != NULL) { portNr = (unsigned short) strtol(portString, NULL, 10); } else if (drvPtr->defport != 0u) { /* * Get the default port from the driver structure; */ portNr = drvPtr->defport; } else { Ns_TclPrintfResult(interp, "no default port for protocol '%s' defined", protocol); result = TCL_ERROR; } if (result == TCL_OK) { NS_SOCKET sock; Ns_ReturnCode status; sock = Ns_SockTimedConnect2(host, portNr, NULL, 0u, timeoutPtr, &status); if (sock == NS_INVALID_SOCKET) { Ns_SockConnectError(interp, host, portNr, status); result = TCL_ERROR; } else { const char *query; Tcl_DString ds, *dsPtr = &ds; Request *reqPtr; Sock *sockPtr; assert(drvPtr != NULL); sockPtr = SockNew(drvPtr); sockPtr->sock = sock; sockPtr->servPtr = drvPtr->servPtr; if (sockPtr->servPtr == NULL) { const NsInterp *itPtr = NsGetInterpData(interp); sockPtr->servPtr = itPtr->servPtr; } RequestNew(sockPtr); Ns_GetTime(&sockPtr->acceptTime); reqPtr = sockPtr->reqPtr; Tcl_DStringInit(dsPtr); Ns_DStringAppend(dsPtr, httpMethod); Ns_StrToUpper(Ns_DStringValue(dsPtr)); Tcl_DStringAppend(dsPtr, " /", 2); if (*path != '\0') { if (*path == '/') { path ++; } Tcl_DStringAppend(dsPtr, path, -1); Tcl_DStringAppend(dsPtr, "/", 1); } Tcl_DStringAppend(dsPtr, tail, -1); Tcl_DStringAppend(dsPtr, " HTTP/", 6); Tcl_DStringAppend(dsPtr, version, -1); reqPtr->request.line = Ns_DStringExport(dsPtr); reqPtr->request.method = ns_strdup(httpMethod); reqPtr->request.protocol = ns_strdup(protocol); reqPtr->request.host = ns_strdup(host); query = strchr(tail, INTCHAR('?')); if (query != NULL) { reqPtr->request.query = ns_strdup(query+1); } else { reqPtr->request.query = NULL; } /*Ns_Log(Notice, "REQUEST LINE <%s> query <%s>", reqPtr->request.line, reqPtr->request.query);*/ *sockPtrPtr = sockPtr; } } } ns_free(url2); return result; } /* *---------------------------------------------------------------------- * * NSDriverSockNew -- * * Create a Sock structure based on the driver interface * * Results: * Tcl return code. * * Side effects: * Accepting a connection * *---------------------------------------------------------------------- */ int NSDriverSockNew(Tcl_Interp *interp, NS_SOCKET sock, const char *protocol, const char *driverName, const char *methodName, Sock **sockPtrPtr) { int result = TCL_OK; Driver *drvPtr; NS_NONNULL_ASSERT(interp != NULL); NS_NONNULL_ASSERT(protocol != NULL); NS_NONNULL_ASSERT(methodName != NULL); NS_NONNULL_ASSERT(sockPtrPtr != NULL); drvPtr = LookupDriver(interp, protocol, driverName); if (drvPtr == NULL) { result = TCL_ERROR; } else { Sock *sockPtr; Tcl_DString ds, *dsPtr = &ds; Request *reqPtr; sockPtr = SockNew(drvPtr); sockPtr->servPtr = drvPtr->servPtr; sockPtr->sock = sock; RequestNew(sockPtr); // not sure if needed // peerAddr is missing Ns_GetTime(&sockPtr->acceptTime); reqPtr = sockPtr->reqPtr; Tcl_DStringInit(dsPtr); Ns_DStringAppend(dsPtr, methodName); Ns_StrToUpper(Ns_DStringValue(dsPtr)); reqPtr->request.line = Ns_DStringExport(dsPtr); reqPtr->request.method = ns_strdup(methodName); reqPtr->request.protocol = ns_strdup(protocol); reqPtr->request.host = NULL; reqPtr->request.query = NULL; /* Ns_Log(Notice, "REQUEST LINE <%s>", reqPtr->request.line);*/ *sockPtrPtr = sockPtr; } return result; } /* * Local Variables: * mode: c * c-basic-offset: 4 * fill-column: 78 * indent-tabs-mode: nil * End: */
/* * The contents of this file are subject to the Mozilla Public License * Version 1.1 (the "License"); you may not use this file except in * compliance with the License. You may obtain a copy of the License at * http://mozilla.org/. * * Software distributed under the License is distributed on an "AS IS" * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See * the License for the specific language governing rights and limitations * under the License. * * The Original Code is AOLserver Code and related documentation * distributed by AOL. * * The Initial Developer of the Original Code is America Online, * Inc. Portions created by AOL are Copyright (C) 1999 America Online, * Inc. All Rights Reserved. * * Alternatively, the contents of this file may be used under the terms * of the GNU General Public License (the "GPL"), in which case the * provisions of GPL are applicable instead of those above. If you wish * to allow use of your version of this file only under the terms of the * GPL and not to allow others to use your version of this file under the * License, indicate your decision by deleting the provisions above and * replace them with the notice and other provisions required by the GPL. * If you do not delete the provisions above, a recipient may use your * version of this file under either the License or the GPL. */ /* * driver.c -- * * Connection I/O for loadable socket drivers. */ #include "nsd.h" /* * The following are valid driver state flags. */ #define DRIVER_STARTED 1u #define DRIVER_STOPPED 2u #define DRIVER_SHUTDOWN 4u #define DRIVER_FAILED 8u /* * Constants for SockState return and reason codes. */ typedef enum { SOCK_READY = 0, SOCK_MORE = 1, SOCK_SPOOL = 2, SOCK_ERROR = -1, SOCK_CLOSE = -2, SOCK_CLOSETIMEOUT = -3, SOCK_READTIMEOUT = -4, SOCK_WRITETIMEOUT = -5, SOCK_READERROR = -6, SOCK_WRITEERROR = -7, SOCK_SHUTERROR = -8, SOCK_BADREQUEST = -9, SOCK_ENTITYTOOLARGE = -10, SOCK_BADHEADER = -11, SOCK_TOOMANYHEADERS = -12 } SockState; /* * Subset for spooler states */ typedef enum { SPOOLER_CLOSE = SOCK_CLOSE, SPOOLER_OK = SOCK_READY, SPOOLER_READERROR = SOCK_READERROR, SPOOLER_WRITEERROR = SOCK_WRITEERROR, SPOOLER_CLOSETIMEOUT = SOCK_CLOSETIMEOUT } SpoolerState; typedef struct { SpoolerState spoolerState; SockState sockState; } SpoolerStateMap; /* * ServerMap maintains Host header to server mappings. */ typedef struct ServerMap { NsServer *servPtr; char location[1]; } ServerMap; /* * The following maintains the spooler state mapping */ static const SpoolerStateMap spoolerStateMap[] = { {SPOOLER_CLOSE, SOCK_CLOSE}, {SPOOLER_READERROR, SOCK_READERROR}, {SPOOLER_WRITEERROR, SOCK_WRITEERROR}, {SPOOLER_CLOSETIMEOUT, SOCK_CLOSETIMEOUT}, {SPOOLER_OK, SOCK_READY} }; /* * The following structure manages polling. The PollIn macro is * used for the common case of checking for readability. */ typedef struct PollData { unsigned int nfds; /* Number of fds being monitored. */ unsigned int maxfds; /* Max fds (will grow as needed). */ struct pollfd *pfds; /* Dynamic array of poll structs. */ Ns_Time timeout; /* Min timeout, if any, for next spin. */ } PollData; #define PollIn(ppd, i) (((ppd)->pfds[(i)].revents & POLLIN) == POLLIN ) #define PollOut(ppd, i) (((ppd)->pfds[(i)].revents & POLLOUT) == POLLOUT) #define PollHup(ppd, i) (((ppd)->pfds[(i)].revents & POLLHUP) == POLLHUP) /* * Collected informationof writer threads for per pool rates, necessary for * per pool bandwidth management. */ typedef struct ConnPoolInfo { size_t threadSlot; int currentPoolRate; int deltaPercentage; } ConnPoolInfo; /* * The following structure maintains writer socket */ typedef struct WriterSock { struct WriterSock *nextPtr; struct Sock *sockPtr; struct SpoolerQueue *queuePtr; struct Conn *connPtr; SpoolerState status; int err; int refCount; unsigned int flags; Tcl_WideInt nsent; size_t size; NsWriterStreamState doStream; int fd; char *headerString; struct ConnPool *poolPtr; union { struct { struct iovec *bufs; /* incoming bufs to be sent */ int nbufs; int bufIdx; struct iovec sbufs[UIO_SMALLIOV]; /* scratch bufs for handling partial sends */ int nsbufs; int sbufIdx; struct iovec preallocated_bufs[UIO_SMALLIOV]; struct FileMap fmap; } mem; struct { size_t maxsize; size_t bufsize; off_t bufoffset; size_t toRead; unsigned char *buf; Ns_FileVec *bufs; int nbufs; int currentbuf; Ns_Mutex fdlock; } file; } c; char *clientData; Ns_Time startTime; int rateLimit; int currentRate; ConnPoolInfo *infoPtr; bool keep; } WriterSock; /* * Async writer definitions */ typedef struct AsyncWriter { Ns_Mutex lock; /* Lock around writer queues */ SpoolerQueue *firstPtr; /* List of writer threads */ } AsyncWriter; /* * AsyncWriteData is similar to WriterSock */ typedef struct AsyncWriteData { struct AsyncWriteData *nextPtr; char *data; int fd; Tcl_WideInt nsent; size_t size; size_t bufsize; const char *buf; } AsyncWriteData; static AsyncWriter *asyncWriter = NULL; /* * Static functions defined in this file. */ static Ns_ThreadProc DriverThread; static Ns_ThreadProc SpoolerThread; static Ns_ThreadProc WriterThread; static Ns_ThreadProc AsyncWriterThread; static Tcl_ObjCmdProc WriterListObjCmd; static Tcl_ObjCmdProc WriterSizeObjCmd; static Tcl_ObjCmdProc WriterStreamingObjCmd; static Tcl_ObjCmdProc WriterSubmitObjCmd; static Tcl_ObjCmdProc WriterSubmitFileObjCmd; static Tcl_ObjCmdProc AsyncLogfileWriteObjCmd; static Tcl_ObjCmdProc AsyncLogfileOpenObjCmd; static Tcl_ObjCmdProc AsyncLogfileCloseObjCmd; static Ns_ReturnCode DriverWriterFromObj(Tcl_Interp *interp, Tcl_Obj *driverObj, Ns_Conn *conn, DrvWriter **wrPtrPtr) NS_GNUC_NONNULL(1) NS_GNUC_NONNULL(4); static NS_SOCKET DriverListen(Driver *drvPtr, const char *bindaddr) NS_GNUC_NONNULL(1) NS_GNUC_NONNULL(2); static NS_DRIVER_ACCEPT_STATUS DriverAccept(Sock *sockPtr, NS_SOCKET sock) NS_GNUC_NONNULL(1); static bool DriverKeep(Sock *sockPtr) NS_GNUC_NONNULL(1); static void DriverClose(Sock *sockPtr) NS_GNUC_NONNULL(1); static Ns_ReturnCode DriverInit(const char *server, const char *moduleName, const char *threadName, const Ns_DriverInitData *init, NsServer *servPtr, const char *path, const char *bindaddrs, const char *defserver, const char *host) NS_GNUC_NONNULL(2) NS_GNUC_NONNULL(3) NS_GNUC_NONNULL(4) NS_GNUC_NONNULL(6) NS_GNUC_NONNULL(7) NS_GNUC_NONNULL(9); static bool DriverModuleInitialized(const char *module) NS_GNUC_NONNULL(1); static void SockSetServer(Sock *sockPtr) NS_GNUC_NONNULL(1); static SockState SockAccept(Driver *drvPtr, NS_SOCKET sock, Sock **sockPtrPtr, const Ns_Time *nowPtr) NS_GNUC_NONNULL(1); static Ns_ReturnCode SockQueue(Sock *sockPtr, const Ns_Time *timePtr) NS_GNUC_NONNULL(1); static Sock *SockNew(Driver *drvPtr) NS_GNUC_NONNULL(1) NS_GNUC_RETURNS_NONNULL; static void SockRelease(Sock *sockPtr, SockState reason, int err) NS_GNUC_NONNULL(1); static void SockError(Sock *sockPtr, SockState reason, int err) NS_GNUC_NONNULL(1); static void SockSendResponse(Sock *sockPtr, int code, const char *errMsg) NS_GNUC_NONNULL(1) NS_GNUC_NONNULL(3); static void SockTrigger(NS_SOCKET sock); static void SockTimeout(Sock *sockPtr, const Ns_Time *nowPtr, const Ns_Time *timeout) NS_GNUC_NONNULL(1); static void SockClose(Sock *sockPtr, int keep) NS_GNUC_NONNULL(1); static SockState SockRead(Sock *sockPtr, int spooler, const Ns_Time *timePtr) NS_GNUC_NONNULL(1); static SockState SockParse(Sock *sockPtr) NS_GNUC_NONNULL(1); static void SockPoll(Sock *sockPtr, short type, PollData *pdata) NS_GNUC_NONNULL(1) NS_GNUC_NONNULL(3); static int SockSpoolerQueue(Driver *drvPtr, Sock *sockPtr) NS_GNUC_NONNULL(1) NS_GNUC_NONNULL(2); static void SpoolerQueueStart(SpoolerQueue *queuePtr, Ns_ThreadProc *proc) NS_GNUC_NONNULL(2); static void SpoolerQueueStop(SpoolerQueue *queuePtr, const Ns_Time *timeoutPtr, const char *name) NS_GNUC_NONNULL(2) NS_GNUC_NONNULL(3); static void PollCreate(PollData *pdata) NS_GNUC_NONNULL(1); static void PollFree(PollData *pdata) NS_GNUC_NONNULL(1); static void PollReset(PollData *pdata) NS_GNUC_NONNULL(1); static NS_POLL_NFDS_TYPE PollSet(PollData *pdata, NS_SOCKET sock, short type, const Ns_Time *timeoutPtr) NS_GNUC_NONNULL(1); static int PollWait(const PollData *pdata, int timeout) NS_GNUC_NONNULL(1); static SockState ChunkedDecode(Request *reqPtr, bool update) NS_GNUC_NONNULL(1); static WriterSock *WriterSockRequire(const Conn *connPtr) NS_GNUC_NONNULL(1); static void WriterSockRelease(WriterSock *wrSockPtr) NS_GNUC_NONNULL(1); static SpoolerState WriterReadFromSpool(WriterSock *curPtr) NS_GNUC_NONNULL(1); static SpoolerState WriterSend(WriterSock *curPtr, int *err) NS_GNUC_NONNULL(1) NS_GNUC_NONNULL(2); static Ns_ReturnCode WriterSetupStreamingMode(Conn *connPtr, struct iovec *bufs, int nbufs, int *fdPtr) NS_GNUC_NONNULL(1) NS_GNUC_NONNULL(2) NS_GNUC_NONNULL(4); static void WriterSockFileVecCleanup(WriterSock *wrSockPtr) NS_GNUC_NONNULL(1); static int WriterGetMemunitFromDict(Tcl_Interp *interp, Tcl_Obj *dictObj, Tcl_Obj *keyObj, Ns_ObjvValueRange *rangePtr, Tcl_WideInt *valuePtr) NS_GNUC_NONNULL(1) NS_GNUC_NONNULL(2) NS_GNUC_NONNULL(3) NS_GNUC_NONNULL(5); static void AsyncWriterRelease(AsyncWriteData *wdPtr) NS_GNUC_NONNULL(1); static void WriteWarningRaw(const char *msg, int fd, size_t wantWrite, ssize_t written) NS_GNUC_NONNULL(1); static const char *GetSockStateName(SockState sockState); static size_t EndOfHeader(Sock *sockPtr) NS_GNUC_NONNULL(1); static void RequestNew(Sock *sockPtr) NS_GNUC_NONNULL(1); static void RequestFree(Sock *sockPtr) NS_GNUC_NONNULL(1); static void LogBuffer(Ns_LogSeverity severity, const char *msg, const char *buffer, size_t len) NS_GNUC_NONNULL(2) NS_GNUC_NONNULL(3); static void ServerMapEntryAdd(Tcl_DString *dsPtr, const char *host, NsServer *servPtr, Driver *drvPtr, bool addDefaultMapEntry) NS_GNUC_NONNULL(1) NS_GNUC_NONNULL(2) NS_GNUC_NONNULL(3) NS_GNUC_NONNULL(4); static Driver *LookupDriver(Tcl_Interp *interp, const char* protocol, const char *driverName) NS_GNUC_NONNULL(1) NS_GNUC_NONNULL(2); static void WriterPerPoolRates(WriterSock *writePtr, Tcl_HashTable *pools) NS_GNUC_NONNULL(1) NS_GNUC_NONNULL(2); static ConnPoolInfo *WriterGetInfoPtr(WriterSock *curPtr, Tcl_HashTable *pools) NS_GNUC_NONNULL(1) NS_GNUC_NONNULL(2); /* * Global variables defined in this file. */ //NS_EXTERN Ns_LogSeverity Ns_LogAccessDebug; Ns_LogSeverity Ns_LogTaskDebug; Ns_LogSeverity Ns_LogRequestDebug; Ns_LogSeverity Ns_LogConnchanDebug; Ns_LogSeverity Ns_LogUrlspaceDebug; Ns_LogSeverity Ns_LogAccessDebug; Ns_LogSeverity Ns_LogTimeoutDebug; NS_EXPORT Ns_LogSeverity Ns_LogAccessDebug; bool NsWriterBandwidthManagement = NS_FALSE; static Ns_LogSeverity WriterDebug; /* Severity at which to log verbose debugging. */ static Ns_LogSeverity DriverDebug; /* Severity at which to log verbose debugging. */ static Ns_Mutex reqLock = NULL; /* Lock for allocated Request structure pool */ static Ns_Mutex writerlock = NULL; /* Lock updating streaming information in the writer */ static Request *firstReqPtr = NULL; /* Allocated request structures kept in a pool */ static Driver *firstDrvPtr = NULL; /* First in list of all drivers */ #define Push(x, xs) ((x)->nextPtr = (xs), (xs) = (x)) /* *---------------------------------------------------------------------- * * WriteWarningRaw -- * * Write a warning message to stderr. This function is for cases, where * writing to Ns_Log can't be used (e.g. in the AsyncWriter, which is * used for writing also to the system log). * * Results: * None. * * Side effects: * Line to stderr. * *---------------------------------------------------------------------- */ static void WriteWarningRaw(const char *msg, int fd, size_t wantWrite, ssize_t written) { fprintf(stderr, "%s: Warning: wanted to write %" PRIuz " bytes, wrote %ld to file descriptor %d\n", msg, wantWrite, (long)written, fd); } /* *---------------------------------------------------------------------- * * GetSockStateName -- * * Return human readable names for StockState values. * * Results: * string * * Side effects: * None. * *---------------------------------------------------------------------- */ static const char * GetSockStateName(SockState sockState) { int sockStateInt = (int)sockState; static const char *sockStateStrings[] = { "SOCK_READY", "SOCK_MORE", "SOCK_SPOOL", "SOCK_ERROR", "SOCK_CLOSE", "SOCK_CLOSETIMEOUT", "SOCK_READTIMEOUT", "SOCK_WRITETIMEOUT", "SOCK_READERROR", "SOCK_WRITEERROR", "SOCK_SHUTERROR", "SOCK_BADREQUEST", "SOCK_ENTITYTOOLARGE", "SOCK_BADHEADER", "SOCK_TOOMANYHEADERS", NULL }; if (sockStateInt < 0) { sockStateInt = (- sockStateInt) + 2; } assert(sockStateInt < Ns_NrElements(sockStateStrings)); return sockStateStrings[sockStateInt]; } /* *---------------------------------------------------------------------- * * NsInitDrivers -- * * Init drivers system. * * Results: * None. * * Side effects: * None. * *---------------------------------------------------------------------- */ void NsInitDrivers(void) { DriverDebug = Ns_CreateLogSeverity("Debug(ns:driver)"); WriterDebug = Ns_CreateLogSeverity("Debug(writer)"); Ns_LogTaskDebug = Ns_CreateLogSeverity("Debug(task)"); Ns_LogRequestDebug = Ns_CreateLogSeverity("Debug(request)"); Ns_LogConnchanDebug = Ns_CreateLogSeverity("Debug(connchan)"); Ns_LogUrlspaceDebug = Ns_CreateLogSeverity("Debug(urlspace)"); Ns_LogAccessDebug = Ns_CreateLogSeverity("Debug(access)"); Ns_LogTimeoutDebug = Ns_CreateLogSeverity("Debug(timeout)"); Ns_MutexInit(&reqLock); Ns_MutexInit(&writerlock); Ns_MutexSetName2(&reqLock, "ns:driver", "requestpool"); Ns_MutexSetName2(&writerlock, "ns:writer", "stream"); } /* *---------------------------------------------------------------------- * * DriverModuleInitialized -- * * Check if a driver with the specified name is already initialized. * * Results: * Boolean * * Side effects: * None. * *---------------------------------------------------------------------- */ static bool DriverModuleInitialized(const char *module) { Driver *drvPtr; bool found = NS_FALSE; NS_NONNULL_ASSERT(module != NULL); for (drvPtr = firstDrvPtr; drvPtr != NULL; drvPtr = drvPtr->nextPtr) { if (strcmp(drvPtr->moduleName, module) == 0) { found = NS_TRUE; Ns_Log(Notice, "Driver %s is already initialized", module); break; } } return found; } /* *---------------------------------------------------------------------- * * Ns_DriverInit -- * * Initialize a driver. * * Results: * NS_OK if initialized, NS_ERROR if config or other error. * * Side effects: * Listen socket will be opened later in NsStartDrivers. * *---------------------------------------------------------------------- */ Ns_ReturnCode Ns_DriverInit(const char *server, const char *module, const Ns_DriverInitData *init) { Ns_ReturnCode status = NS_OK; NsServer *servPtr = NULL; bool alreadyInitialized = NS_FALSE; NS_NONNULL_ASSERT(module != NULL); NS_NONNULL_ASSERT(init != NULL); /* * If a server is provided, servPtr must be set. */ if (server != NULL) { servPtr = NsGetServer(server); if (unlikely(servPtr == NULL)) { Ns_Log(Bug, "cannot lookup server structure for server: %s", module); status = NS_ERROR; } } else { alreadyInitialized = DriverModuleInitialized(module); } /* * Check versions of drivers. */ if (status == NS_OK && init->version < NS_DRIVER_VERSION_4) { Ns_Log(Warning, "%s: driver version is too old (version %d), Version 4 is recommended", module, init->version); } #ifdef HAVE_IPV6 if (status == NS_OK && init->version < NS_DRIVER_VERSION_3) { Ns_Log(Error, "%s: driver version is too old (version %d) and does not support IPv6", module, init->version); status = NS_ERROR; } #endif if (status == NS_OK && init->version < NS_DRIVER_VERSION_2) { Ns_Log(Error, "%s: version field of driver is invalid: %d", module, init->version); status = NS_ERROR; } if (!alreadyInitialized && status == NS_OK) { const char *path, *host, *address, *defserver; bool noHostNameGiven; int nrDrivers, nrBindaddrs = 0, result; Ns_Set *set; Tcl_Obj *bindaddrsObj, **objv; path = ((init->path != NULL) ? init->path : Ns_ConfigGetPath(server, module, (char *)0L)); set = Ns_ConfigCreateSection(path); /* * Determine the "defaultserver" the "hostname" / "address" for * binding to and/or the HTTP location string. */ defserver = Ns_ConfigGetValue(path, "defaultserver"); address = Ns_ConfigGetValue(path, "address"); host = Ns_ConfigGetValue(path, "hostname"); noHostNameGiven = (host == NULL); /* * If the listen address was not specified, attempt to determine it * through a DNS lookup of the specified hostname or the server's * primary hostname. */ if (address == NULL) { Tcl_DString ds; Tcl_DStringInit(&ds); if (noHostNameGiven) { host = Ns_InfoHostname(); } if (Ns_GetAllAddrByHost(&ds, host) == NS_TRUE) { address = ns_strdup(Tcl_DStringValue(&ds)); if (path != NULL) { Ns_SetUpdate(set, "address", address); } Ns_Log(Notice, "no address given, obtained address '%s' from host name %s", address, host); } Tcl_DStringFree(&ds); } if (address == NULL) { address = NS_IP_UNSPECIFIED; Ns_Log(Notice, "no address given, set address to unspecified address %s", address); } bindaddrsObj = Tcl_NewStringObj(address, -1); result = Tcl_ListObjGetElements(NULL, bindaddrsObj, &nrBindaddrs, &objv); if (result != TCL_OK || nrBindaddrs < 1 || nrBindaddrs >= MAX_LISTEN_ADDR_PER_DRIVER) { Ns_Fatal("%s: bindaddrs '%s' is not a valid Tcl list containing addresses (max %d)", module, address, MAX_LISTEN_ADDR_PER_DRIVER); } Tcl_IncrRefCount(bindaddrsObj); /* * If the hostname was not specified and not determined by the lookup * above, set it to the first specified or derived IP address string. */ if (host == NULL) { host = ns_strdup(Tcl_GetString(objv[0])); } if (noHostNameGiven && host != NULL && path != NULL) { Ns_SetUpdate(set, "hostname", host); } Tcl_DecrRefCount(bindaddrsObj); /* * Get configured number of driver threads. */ nrDrivers = Ns_ConfigIntRange(path, "driverthreads", 1, 1, 64); if (nrDrivers > 1) { #if !defined(SO_REUSEPORT) Ns_Log(Warning, "server %s module %s requests %d driverthreads, but is not supported by the operating system", server, module, nrDrivers); Ns_SetUpdate(set, "driverthreads", "1"); nrDrivers = 1; #endif } /* * The common parameters are determined, create the driver thread(s) */ { size_t maxModuleNameLength = strlen(module) + (size_t)TCL_INTEGER_SPACE + 1u; char *moduleName = ns_malloc(maxModuleNameLength); int i; if (host == NULL) { host = Ns_InfoHostname(); } for (i = 0; i < nrDrivers; i++) { snprintf(moduleName, maxModuleNameLength, "%s:%d", module, i); status = DriverInit(server, module, moduleName, init, servPtr, path, address, defserver, host); if (status != NS_OK) { break; } } ns_free(moduleName); } } return status; } /* *---------------------------------------------------------------------- * * ServerMapEntryAdd -- * * Add an entry to the virtual server map. The entry consists of the * value as provided by the host header field and location string, * containing as well the protocol. * * Results: * None * * Side effects: * Potentially adding an entry to the virtual server map. * *---------------------------------------------------------------------- */ static void ServerMapEntryAdd(Tcl_DString *dsPtr, const char *host, NsServer *servPtr, Driver *drvPtr, bool addDefaultMapEntry) { Tcl_HashEntry *hPtr; int isNew; NS_NONNULL_ASSERT(dsPtr != NULL); NS_NONNULL_ASSERT(host != NULL); NS_NONNULL_ASSERT(servPtr != NULL); NS_NONNULL_ASSERT(drvPtr != NULL); hPtr = Tcl_CreateHashEntry(&drvPtr->hosts, host, &isNew); if (isNew != 0) { ServerMap *mapPtr; (void) Ns_DStringVarAppend(dsPtr, drvPtr->protocol, "://", host, (char *)0L); mapPtr = ns_malloc(sizeof(ServerMap) + (size_t)dsPtr->length); mapPtr->servPtr = servPtr; memcpy(mapPtr->location, dsPtr->string, (size_t)dsPtr->length + 1u); Tcl_SetHashValue(hPtr, mapPtr); Ns_Log(Notice, "%s: adding virtual host entry for host <%s> location: %s mapped to server: %s", drvPtr->threadName, host, mapPtr->location, servPtr->server); if (addDefaultMapEntry) { drvPtr->defMapPtr = mapPtr; } /* * Always reset the Tcl_DString */ Ns_DStringSetLength(dsPtr, 0); } else { Ns_Log(Notice, "%s: ignore duplicate virtual host entry: %s", drvPtr->threadName, host); } } /* *---------------------------------------------------------------------- * * NsDriverMapVirtualServers -- * * Map "Host:" headers for drivers not bound to physical servers. This * function has to be called a time, when all servers are already defined * such that NsGetServer(server) can succeed. * * Results: * None. * * Side effects: * Add an entry to the virtual server map via ServerMapEntryAdd() * *---------------------------------------------------------------------- */ void NsDriverMapVirtualServers(void) { Driver *drvPtr; for (drvPtr = firstDrvPtr; drvPtr != NULL; drvPtr = drvPtr->nextPtr) { const Ns_Set *lset; size_t j; Tcl_DString ds, *dsPtr = &ds; const char *path, *defserver, *moduleName; moduleName = drvPtr->moduleName; defserver = drvPtr->defserver; /* * Check for a "/servers" section for this driver module. */ path = Ns_ConfigGetPath(NULL, moduleName, "servers", (char *)0L); lset = Ns_ConfigGetSection(path); if (lset == NULL || Ns_SetSize(lset) == 0u) { /* * The driver module has no (or empty) ".../servers" section. * There is no mapping from host name to virtual server defined. */ if (drvPtr->server == NULL) { /* * We have a global driver module. If there is at least a * default server configured, we can use this for the mapping * to the default server. */ if (defserver != NULL) { NsServer *servPtr = NsGetServer(defserver); Tcl_DStringInit(dsPtr); ServerMapEntryAdd(dsPtr, Ns_InfoHostname(), servPtr, drvPtr, NS_TRUE); Tcl_DStringFree(dsPtr); Ns_Log(Notice, "Global driver has no mapping from host to server (section '%s' missing)", moduleName); } else { /* * Global driver, which has no default server, and no servers section. */ Ns_Fatal("%s: virtual servers configured," " but '%s' has no defaultserver defined", moduleName, path); } } continue; } /* * We have a ".../servers" section, the driver might be global or * local. It is not clear, why we need the server map for the local * driver, but for compatibility, we keep this. * */ if (defserver == NULL) { if (drvPtr->server != NULL) { /* * We have a local (server specific) driver. Since the code * below assumes that we have a "defserver" set, we take the * actual server as defserver. */ defserver = drvPtr->server; } else { /* * We have a global driver, but no defserver. */ Ns_Fatal("%s: virtual servers configured," " but '%s' has no defaultserver defined", moduleName, path); } } assert(defserver != NULL); drvPtr->defMapPtr = NULL; Ns_DStringInit(dsPtr); for (j = 0u; j < Ns_SetSize(lset); ++j) { const char *server = Ns_SetKey(lset, j); const char *host = Ns_SetValue(lset, j); NsServer *servPtr; /* * Perform an explicit lookup of the server. */ servPtr = NsGetServer(server); if (servPtr == NULL) { Ns_Log(Error, "%s: no such server: %s", moduleName, server); } else { char *writableHost, *hostName, *portStart; writableHost = ns_strdup(host); Ns_HttpParseHost(writableHost, &hostName, &portStart); if (portStart == NULL) { Tcl_DString hostDString; /* * The provided host entry does NOT contain a port. * * Add the provided entry to the virtual server map only, * when the configured port is the default port for the * protocol. */ if (drvPtr->port == drvPtr->defport) { ServerMapEntryAdd(dsPtr, host, servPtr, drvPtr, STREQ(defserver, server)); } /* * Auto-add configured port: Add always an entry with the * explicitly configured port of the driver. */ Tcl_DStringInit(&hostDString); Tcl_DStringAppend(&hostDString, host, -1); (void) Ns_DStringPrintf(&hostDString, ":%hu", drvPtr->port); ServerMapEntryAdd(dsPtr, hostDString.string, servPtr, drvPtr, STREQ(defserver, server)); Tcl_DStringFree(&hostDString); } else { /* * The provided host entry does contain a port. * * In case, the provided port is equal to the configured port * of the driver, add an entry. */ unsigned short providedPort = (unsigned short)strtol(portStart+1, NULL, 10); if (providedPort == drvPtr->port) { ServerMapEntryAdd(dsPtr, host, servPtr, drvPtr, STREQ(defserver, server)); /* * In case, the provided port is equal to the default * port of the driver, make sure that we have an entry * without the port. */ if (providedPort == drvPtr->defport) { ServerMapEntryAdd(dsPtr, hostName, servPtr, drvPtr, STREQ(defserver, server)); } } else { Ns_Log(Warning, "%s: driver is listening on port %hu; " "virtual host entry %s ignored", moduleName, drvPtr->port, host); } } ns_free(writableHost); } } Ns_DStringFree(dsPtr); if (drvPtr->defMapPtr == NULL) { fprintf(stderr, "--- Server Map: ---\n"); Ns_SetPrint(lset); Ns_Fatal("%s: default server '%s' not defined in '%s'", moduleName, defserver, path); } } } /* *---------------------------------------------------------------------- * * DriverInit -- * * Helper function of Ns_DriverInit. This function actually allocates and * initialized the driver structure. * * Results: * NS_OK if initialized, NS_ERROR if config or other error. * * Side effects: * Listen socket will be opened later in NsStartDrivers. * *---------------------------------------------------------------------- */ static Ns_ReturnCode DriverInit(const char *server, const char *moduleName, const char *threadName, const Ns_DriverInitData *init, NsServer *servPtr, const char *path, const char *bindaddrs, const char *defserver, const char *host) { const char *defproto; Driver *drvPtr; DrvWriter *wrPtr; DrvSpooler *spPtr; int i; unsigned short defport; NS_NONNULL_ASSERT(threadName != NULL); NS_NONNULL_ASSERT(init != NULL); NS_NONNULL_ASSERT(path != NULL); NS_NONNULL_ASSERT(bindaddrs != NULL); NS_NONNULL_ASSERT(host != NULL); /* * Set the protocol and port defaults. */ if (init->protocol != NULL) { defproto = init->protocol; defport = init->defaultPort; } else { defproto = "unknown"; defport = 0u; } Ns_Log(DriverDebug, "DriverInit server <%s> threadName %s proto %s port %hu", server, threadName, defproto, defport); /* * Allocate a new driver instance and set configurable parameters. */ drvPtr = ns_calloc(1u, sizeof(Driver)); Ns_MutexInit(&drvPtr->lock); Ns_MutexSetName2(&drvPtr->lock, "ns:drv", threadName); Ns_MutexInit(&drvPtr->spooler.lock); Ns_MutexSetName2(&drvPtr->spooler.lock, "ns:drv:spool", threadName); Ns_MutexInit(&drvPtr->writer.lock); Ns_MutexSetName2(&drvPtr->writer.lock, "ns:drv:writer", threadName); if (ns_sockpair(drvPtr->trigger) != 0) { Ns_Fatal("ns_sockpair() failed: %s", ns_sockstrerror(ns_sockerrno)); } drvPtr->server = server; drvPtr->type = init->name; drvPtr->moduleName = ns_strdup(moduleName); drvPtr->threadName = ns_strdup(threadName); drvPtr->defserver = defserver; drvPtr->listenProc = init->listenProc; drvPtr->acceptProc = init->acceptProc; drvPtr->recvProc = init->recvProc; drvPtr->sendProc = init->sendProc; drvPtr->sendFileProc = init->sendFileProc; drvPtr->keepProc = init->keepProc; drvPtr->requestProc = init->requestProc; drvPtr->closeProc = init->closeProc; drvPtr->clientInitProc = init->clientInitProc; drvPtr->arg = init->arg; drvPtr->opts = init->opts; drvPtr->servPtr = servPtr; drvPtr->defport = defport; drvPtr->bufsize = (size_t)Ns_ConfigMemUnitRange(path, "bufsize", 16384, 1024, INT_MAX); drvPtr->maxinput = Ns_ConfigMemUnitRange(path, "maxinput", 1024*1024, 1024, LLONG_MAX); drvPtr->maxupload = Ns_ConfigMemUnitRange(path, "maxupload", 0, 0, (Tcl_WideInt)drvPtr->maxinput); drvPtr->readahead = Ns_ConfigMemUnitRange(path, "readahead", (Tcl_WideInt)drvPtr->bufsize, (Tcl_WideInt)drvPtr->bufsize, drvPtr->maxinput); drvPtr->maxline = Ns_ConfigIntRange(path, "maxline", 8192, 256, INT_MAX); drvPtr->maxheaders = Ns_ConfigIntRange(path, "maxheaders", 128, 8, INT_MAX); drvPtr->maxqueuesize = Ns_ConfigIntRange(path, "maxqueuesize", 1024, 1, INT_MAX); Ns_ConfigTimeUnitRange(path, "sendwait", "30s", 1, 0, INT_MAX, 0, &drvPtr->sendwait); Ns_ConfigTimeUnitRange(path, "recvwait", "30s", 1, 0, INT_MAX, 0, &drvPtr->recvwait); Ns_ConfigTimeUnitRange(path, "closewait", "2s", 0, 0, INT_MAX, 0, &drvPtr->closewait); Ns_ConfigTimeUnitRange(path, "keepwait", "5s", 0, 0, INT_MAX, 0, &drvPtr->keepwait); drvPtr->backlog = Ns_ConfigIntRange(path, "backlog", 256, 1, INT_MAX); drvPtr->driverthreads = Ns_ConfigIntRange(path, "driverthreads", 1, 1, 32); drvPtr->reuseport = Ns_ConfigBool(path, "reuseport", NS_FALSE); drvPtr->acceptsize = Ns_ConfigIntRange(path, "acceptsize", drvPtr->backlog, 1, INT_MAX); drvPtr->keepmaxuploadsize = (size_t)Ns_ConfigMemUnitRange(path, "keepalivemaxuploadsize", 0, 0, INT_MAX); drvPtr->keepmaxdownloadsize = (size_t)Ns_ConfigMemUnitRange(path, "keepalivemaxdownloadsize", 0, 0, INT_MAX); drvPtr->recvTimeout = drvPtr->recvwait; Tcl_InitHashTable(&drvPtr->hosts, TCL_STRING_KEYS); if (drvPtr->driverthreads > 1) { #if !defined(SO_REUSEPORT) drvPtr->driverthreads = 1; drvPtr->reuseport = NS_FALSE; #else /* * When driver threads > 1, "reuseport" has to be active. */ drvPtr->reuseport = NS_TRUE; #endif } if (drvPtr->reuseport) { /* * Reuseport was specified */ #if !defined(SO_REUSEPORT) Ns_Log(Warning, "parameter %s reuseport was specified, but is not supported by the operating system", path); drvPtr->reuseport = NS_FALSE; #endif } drvPtr->uploadpath = ns_strdup(Ns_ConfigString(path, "uploadpath", nsconf.tmpDir)); /* * If activated, "maxupload" has to be at least "readahead" bytes. Tell * the user in case the config values are overruled. */ if ((drvPtr->maxupload > 0) && (drvPtr->maxupload < drvPtr->readahead)) { Ns_Log(Warning, "parameter %s maxupload % " TCL_LL_MODIFIER "d invalid; can be either 0 or must be >= %" TCL_LL_MODIFIER "d (size of readahead)", path, drvPtr->maxupload, drvPtr->readahead); drvPtr->maxupload = drvPtr->readahead; } /* * Determine the port and then set the HTTP location string either * as specified in the config file or constructed from the * protocol, hostname and port. */ drvPtr->protocol = ns_strdup(defproto); drvPtr->address = ns_strdup(bindaddrs); drvPtr->port = (unsigned short)Ns_ConfigIntRange(path, "port", (int)defport, 0, 65535); drvPtr->location = Ns_ConfigGetValue(path, "location"); if (drvPtr->location != NULL && (strstr(drvPtr->location, "://") != NULL)) { drvPtr->location = ns_strdup(drvPtr->location); } else { Tcl_DString ds, *dsPtr = &ds; Ns_DStringInit(dsPtr); Ns_HttpLocationString(dsPtr, drvPtr->protocol, host, drvPtr->port, defport); drvPtr->location = Ns_DStringExport(dsPtr); } drvPtr->nextPtr = firstDrvPtr; firstDrvPtr = drvPtr; /* * Add driver specific extra headers. */ drvPtr->extraHeaders = Ns_ConfigSet(path, "extraheaders"); /* * Check if upload spooler are enabled */ spPtr = &drvPtr->spooler; spPtr->threads = Ns_ConfigIntRange(path, "spoolerthreads", 0, 0, 32); if (spPtr->threads > 0) { Ns_Log(Notice, "%s: enable %d spooler thread(s) " "for uploads >= %" TCL_LL_MODIFIER "d bytes", threadName, spPtr->threads, drvPtr->readahead); for (i = 0; i < spPtr->threads; i++) { SpoolerQueue *queuePtr = ns_calloc(1u, sizeof(SpoolerQueue)); char buffer[100]; snprintf(buffer, sizeof(buffer), "ns:driver:spooler:%d", i); Ns_MutexSetName2(&queuePtr->lock, buffer, "queue"); queuePtr->id = i; Push(queuePtr, spPtr->firstPtr); } } else { Ns_Log(Notice, "%s: enable %d spooler thread(s) ", threadName, spPtr->threads); } /* * Enable writer threads */ wrPtr = &drvPtr->writer; wrPtr->threads = Ns_ConfigIntRange(path, "writerthreads", 0, 0, 32); if (wrPtr->threads > 0) { wrPtr->writersize = (size_t)Ns_ConfigMemUnitRange(path, "writersize", 1024*1024, 1024, INT_MAX); wrPtr->bufsize = (size_t)Ns_ConfigMemUnitRange(path, "writerbufsize", 8192, 512, INT_MAX); wrPtr->rateLimit = Ns_ConfigIntRange(path, "writerratelimit", 0, 0, INT_MAX); wrPtr->doStream = Ns_ConfigBool(path, "writerstreaming", NS_FALSE) ? NS_WRITER_STREAM_ACTIVE : NS_WRITER_STREAM_NONE; Ns_Log(Notice, "%s: enable %d writer thread(s) " "for downloads >= %" PRIdz " bytes, bufsize=%" PRIdz " bytes, HTML streaming %d", threadName, wrPtr->threads, wrPtr->writersize, wrPtr->bufsize, wrPtr->doStream); for (i = 0; i < wrPtr->threads; i++) { SpoolerQueue *queuePtr = ns_calloc(1u, sizeof(SpoolerQueue)); char buffer[100]; snprintf(buffer, sizeof(buffer), "ns:driver:writer:%d", i); Ns_MutexSetName2(&queuePtr->lock, buffer, "queue"); queuePtr->id = i; Push(queuePtr, wrPtr->firstPtr); } } else { Ns_Log(Notice, "%s: enable %d writer thread(s) ", threadName, wrPtr->threads); } return NS_OK; } /* *---------------------------------------------------------------------- * * NsStartDrivers -- * * Listen on all driver address/ports and start the DriverThread. * * Results: * None. * * Side effects: * See DriverThread. * *---------------------------------------------------------------------- */ void NsStartDrivers(void) { Driver *drvPtr; /* * Signal and wait for each driver to start. */ for (drvPtr = firstDrvPtr; drvPtr != NULL; drvPtr = drvPtr->nextPtr) { if (drvPtr->port == 0u) { /* * Don't start a driver having port zero. */ continue; } Ns_ThreadCreate(DriverThread, drvPtr, 0, &drvPtr->thread); Ns_MutexLock(&drvPtr->lock); while ((drvPtr->flags & DRIVER_STARTED) == 0u) { Ns_CondWait(&drvPtr->cond, &drvPtr->lock); } /*if ((drvPtr->flags & DRIVER_FAILED)) { status = NS_ERROR; }*/ Ns_MutexUnlock(&drvPtr->lock); } } /* *---------------------------------------------------------------------- * * NsStopDrivers -- * * Trigger the DriverThread to begin shutdown. * * Results: * None. * * Side effects: * DriverThread will close listen sockets and then exit after all * outstanding connections are complete and closed. * *---------------------------------------------------------------------- */ void NsStopDrivers(void) { Driver *drvPtr; NsAsyncWriterQueueDisable(NS_TRUE); for (drvPtr = firstDrvPtr; drvPtr != NULL; drvPtr = drvPtr->nextPtr) { Tcl_HashEntry *hPtr; Tcl_HashSearch search; if ((drvPtr->flags & DRIVER_STARTED) == 0u) { continue; } Ns_MutexLock(&drvPtr->lock); Ns_Log(Notice, "[driver:%s]: stopping", drvPtr->threadName); drvPtr->flags |= DRIVER_SHUTDOWN; Ns_CondBroadcast(&drvPtr->cond); Ns_MutexUnlock(&drvPtr->lock); SockTrigger(drvPtr->trigger[1]); hPtr = Tcl_FirstHashEntry(&drvPtr->hosts, &search); while (hPtr != NULL) { Tcl_DeleteHashEntry(hPtr); hPtr = Tcl_NextHashEntry(&search); } } } void NsStopSpoolers(void) { const Driver *drvPtr; Ns_Log(Notice, "driver: stopping writer and spooler threads"); /* * Shutdown all spooler and writer threads */ for (drvPtr = firstDrvPtr; drvPtr != NULL; drvPtr = drvPtr->nextPtr) { Ns_Time timeout; if ((drvPtr->flags & DRIVER_STARTED) == 0u) { continue; } Ns_GetTime(&timeout); Ns_IncrTime(&timeout, nsconf.shutdowntimeout.sec, nsconf.shutdowntimeout.usec); SpoolerQueueStop(drvPtr->writer.firstPtr, &timeout, "writer"); SpoolerQueueStop(drvPtr->spooler.firstPtr, &timeout, "spooler"); } } /* *---------------------------------------------------------------------- * * DriverInfoObjCmd -- * * Return public info of all drivers. * Subcommand of NsTclDriverObjCmd. * * Results: * Standard Tcl Result. * * Side effects: * None. * *---------------------------------------------------------------------- */ static int DriverInfoObjCmd(ClientData UNUSED(clientData), Tcl_Interp *interp, int objc, Tcl_Obj *const* objv) { int result = TCL_OK; if (Ns_ParseObjv(NULL, NULL, interp, 2, objc, objv) != NS_OK) { result = TCL_ERROR; } else { const Driver *drvPtr; Tcl_Obj *resultObj = Tcl_NewListObj(0, NULL); Tcl_HashTable driverNames; /* names of the driver modules without duplicates */ Tcl_InitHashTable(&driverNames, TCL_STRING_KEYS); /* * Iterate over all modules, not necessarily all driver threads */ for (drvPtr = firstDrvPtr; drvPtr != NULL; drvPtr = drvPtr->nextPtr) { int isNew = 0; (void)Tcl_CreateHashEntry(&driverNames, drvPtr->moduleName, &isNew); if (isNew == 1) { Tcl_Obj *listObj = Tcl_NewListObj(0, NULL); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj("module", 6)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj(drvPtr->moduleName, -1)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj("type", 4)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj(drvPtr->type, -1)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj("server", 6)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj(drvPtr->server != NULL ? drvPtr->server : NS_EMPTY_STRING, -1)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj("location", 8)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj(drvPtr->location, -1)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj("address", 7)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj(drvPtr->address, -1)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj("protocol", 8)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj(drvPtr->protocol, -1)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj("sendwait", 8)); Tcl_ListObjAppendElement(interp, listObj, Ns_TclNewTimeObj(&drvPtr->sendwait)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj("recvwait", 8)); Tcl_ListObjAppendElement(interp, listObj, Ns_TclNewTimeObj(&drvPtr->sendwait)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj("extraheaders", 12)); if (drvPtr->extraHeaders != NULL) { Tcl_DString ds; Tcl_DStringInit(&ds); Ns_DStringAppendSet(&ds, drvPtr->extraHeaders); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj(ds.string, ds.length)); Tcl_DStringFree(&ds); } else { Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj("", 0)); } Tcl_ListObjAppendElement(interp, resultObj, listObj); } } Tcl_SetObjResult(interp, resultObj); Tcl_DeleteHashTable(&driverNames); } return result; } /* *---------------------------------------------------------------------- * * DriverStatsObjCmd -- * * Return statistics of all drivers. * Subcommand of NsTclDriverObjCmd. * * Results: * Standard Tcl Result. * * Side effects: * None. * *---------------------------------------------------------------------- */ static int DriverStatsObjCmd(ClientData UNUSED(clientData), Tcl_Interp *interp, int objc, Tcl_Obj *const* objv) { int result = TCL_OK; if (Ns_ParseObjv(NULL, NULL, interp, 2, objc, objv) != NS_OK) { result = TCL_ERROR; } else { const Driver *drvPtr; Tcl_Obj *resultObj = Tcl_NewListObj(0, NULL); /* * Iterate over all drivers and collect results. */ for (drvPtr = firstDrvPtr; drvPtr != NULL; drvPtr = drvPtr->nextPtr) { Tcl_Obj *listObj = Tcl_NewListObj(0, NULL); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj("thread", 6)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj(drvPtr->threadName, -1)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj("module", 6)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj(drvPtr->moduleName, -1)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj("received", 8)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewWideIntObj(drvPtr->stats.received)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj("spooled", 7)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewWideIntObj(drvPtr->stats.spooled)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj("partial", 7)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewWideIntObj(drvPtr->stats.partial)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj("errors", 6)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewWideIntObj(drvPtr->stats.errors)); Tcl_ListObjAppendElement(interp, resultObj, listObj); } Tcl_SetObjResult(interp, resultObj); } return result; } /* *---------------------------------------------------------------------- * * DriverThreadsObjCmd -- * * Return the names of driver threads * * Results: * Standard Tcl Result. * * Side effects: * None. * *---------------------------------------------------------------------- */ static int DriverThreadsObjCmd(ClientData UNUSED(clientData), Tcl_Interp *interp, int objc, Tcl_Obj *const* objv) { int result = TCL_OK; if (Ns_ParseObjv(NULL, NULL, interp, 2, objc, objv) != NS_OK) { result = TCL_ERROR; } else { const Driver *drvPtr; Tcl_Obj *resultObj = Tcl_NewListObj(0, NULL); /* * Iterate over all drivers and collect results. */ for (drvPtr = firstDrvPtr; drvPtr != NULL; drvPtr = drvPtr->nextPtr) { Tcl_ListObjAppendElement(interp, resultObj, Tcl_NewStringObj(drvPtr->threadName, -1)); } Tcl_SetObjResult(interp, resultObj); } return result; } /* *---------------------------------------------------------------------- * * DriverNamesObjCmd -- * * Return the names of drivers. * * Results: * Standard Tcl Result. * * Side effects: * None. * *---------------------------------------------------------------------- */ static int DriverNamesObjCmd(ClientData UNUSED(clientData), Tcl_Interp *interp, int objc, Tcl_Obj *const* objv) { int result = TCL_OK; if (Ns_ParseObjv(NULL, NULL, interp, 2, objc, objv) != NS_OK) { result = TCL_ERROR; } else { const Driver *drvPtr; Tcl_Obj *resultObj = Tcl_NewListObj(0, NULL); Tcl_HashTable driverNames; /* names of the drivers without duplicates */ Tcl_InitHashTable(&driverNames, TCL_STRING_KEYS); /* * Iterate over all drivers and collect results. */ for (drvPtr = firstDrvPtr; drvPtr != NULL; drvPtr = drvPtr->nextPtr) { int isNew; (void)Tcl_CreateHashEntry(&driverNames, drvPtr->moduleName, &isNew); if (isNew == 1) { Tcl_ListObjAppendElement(interp, resultObj, Tcl_NewStringObj(drvPtr->moduleName, -1)); } } Tcl_SetObjResult(interp, resultObj); Tcl_DeleteHashTable(&driverNames); } return result; } /* *---------------------------------------------------------------------- * * NsTclDriverObjCmd - * * Give information about drivers. Currently, just the statistics. * * Results: * Standard Tcl result. * * Side effects: * None. * *---------------------------------------------------------------------- */ int NsTclDriverObjCmd(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *const* objv) { const Ns_SubCmdSpec subcmds[] = { {"info", DriverInfoObjCmd}, {"names", DriverNamesObjCmd}, {"threads", DriverThreadsObjCmd}, {"stats", DriverStatsObjCmd}, {NULL, NULL} }; return Ns_SubcmdObjv(subcmds, clientData, interp, objc, objv); } /* *---------------------------------------------------------------------- * * NsWakeupDriver -- * * Wake up the associated DriverThread. * * Results: * None. * * Side effects: * The poll waiting for this trigger will be interrupted. * *---------------------------------------------------------------------- */ void NsWakeupDriver(const Driver *drvPtr) { NS_NONNULL_ASSERT(drvPtr != NULL); SockTrigger(drvPtr->trigger[1]); } /* *---------------------------------------------------------------------- * * NsWaitDriversShutdown -- * * Wait for exit of DriverThread. This callback is invoked later * by the timed shutdown thread. * * Results: * None. * * Side effects: * Driver thread is joined and trigger pipe closed. * *---------------------------------------------------------------------- */ void NsWaitDriversShutdown(const Ns_Time *toPtr) { Driver *drvPtr; Ns_ReturnCode status = NS_OK; for (drvPtr = firstDrvPtr; drvPtr != NULL; drvPtr = drvPtr->nextPtr) { if ((drvPtr->flags & DRIVER_STARTED) == 0u) { continue; } Ns_MutexLock(&drvPtr->lock); while ((drvPtr->flags & DRIVER_STOPPED) == 0u && status == NS_OK) { status = Ns_CondTimedWait(&drvPtr->cond, &drvPtr->lock, toPtr); } Ns_MutexUnlock(&drvPtr->lock); if (status != NS_OK) { Ns_Log(Warning, "[driver:%s]: shutdown timeout", drvPtr->threadName); } else { Ns_Log(Notice, "[driver:%s]: stopped", drvPtr->threadName); Ns_ThreadJoin(&drvPtr->thread, NULL); drvPtr->thread = NULL; } } } /* *---------------------------------------------------------------------- * * NsGetRequest -- * * Return the request buffer, reading it if necessary (i.e., if not an * async read-ahead connection). This function is called at the start of * connection processing. * Results: * Pointer to Request structure or NULL on error. * * Side effects: * May wait for content to arrive if necessary. * *---------------------------------------------------------------------- */ Request * NsGetRequest(Sock *sockPtr, const Ns_Time *nowPtr) { Request *reqPtr; NS_NONNULL_ASSERT(sockPtr != NULL); /* * The underlying "Request" structure is allocated by RequestNew(), which * must be called for the "sockPtr" prior to calling this * function. "reqPtr" should be NULL just in error cases. */ reqPtr = sockPtr->reqPtr; if (likely(reqPtr != NULL)) { if (likely(reqPtr->request.line != NULL)) { Ns_Log(DriverDebug, "NsGetRequest got the pre-parsed request <%s> from the driver", reqPtr->request.line); } else if (sockPtr->drvPtr->requestProc == NULL) { /* * Non-HTTP driver can send the drvPtr->requestProc to perform * their own request handling. */ SockState status; Ns_Log(DriverDebug, "NsGetRequest has to read+parse the request"); /* * We have no parsed request so far. So, do it now. */ do { Ns_Log(DriverDebug, "NsGetRequest calls SockRead"); status = SockRead(sockPtr, 0, nowPtr); } while (status == SOCK_MORE); /* * If anything went wrong, clean the request provided by * SockRead() and flag the error by returning NULL. */ if (status != SOCK_READY) { if (sockPtr->reqPtr != NULL) { Ns_Log(DriverDebug, "NsGetRequest calls RequestFree"); RequestFree(sockPtr); } reqPtr = NULL; } } else { Ns_Log(DriverDebug, "NsGetRequest found driver specific request Proc, " "probably from a non-HTTP driver"); } } else { Ns_Log(DriverDebug, "NsGetRequest has reqPtr NULL"); } return reqPtr; } /* *---------------------------------------------------------------------- * * NsSockClose -- * * Return a connection to the DriverThread for closing or keepalive. * "keep" might be NS_TRUE/NS_FALSE or -1 if undecided. * * Results: * None. * * Side effects: * Socket may be reused by a keepalive connection. * *---------------------------------------------------------------------- */ void NsSockClose(Sock *sockPtr, int keep) { Driver *drvPtr; bool trigger = NS_FALSE; NS_NONNULL_ASSERT(sockPtr != NULL); drvPtr = sockPtr->drvPtr; Ns_Log(DriverDebug, "NsSockClose sockPtr %p (%d) keep %d", (void *)sockPtr, ((Ns_Sock*)sockPtr)->sock, keep); SockClose(sockPtr, keep); /* * Free the request, unless it is from a non-HTTP driver (who might not * fill out the request structure). */ if (sockPtr->reqPtr != NULL) { Ns_Log(DriverDebug, "NsSockClose calls RequestFree"); RequestFree(sockPtr); } Ns_MutexLock(&drvPtr->lock); if (drvPtr->closePtr == NULL) { trigger = NS_TRUE; } sockPtr->nextPtr = drvPtr->closePtr; drvPtr->closePtr = sockPtr; Ns_MutexUnlock(&drvPtr->lock); if (trigger) { SockTrigger(drvPtr->trigger[1]); } } /* *---------------------------------------------------------------------- * * DriverListen -- * * Open a listening socket for accepting connections. * * Results: * File description of socket, or NS_INVALID_SOCKET on error. * * Side effects: * Depends on driver. * *---------------------------------------------------------------------- */ static NS_SOCKET DriverListen(Driver *drvPtr, const char *bindaddr) { NS_SOCKET sock; NS_NONNULL_ASSERT(drvPtr != NULL); NS_NONNULL_ASSERT(bindaddr != NULL); sock = (*drvPtr->listenProc)((Ns_Driver *) drvPtr, bindaddr, drvPtr->port, drvPtr->backlog, drvPtr->reuseport); if (sock == NS_INVALID_SOCKET) { Ns_Log(Error, "%s: failed to listen on [%s]:%d: %s", drvPtr->threadName, bindaddr, drvPtr->port, ns_sockstrerror(ns_sockerrno)); } else { Ns_Log(Notice, #ifdef HAVE_IPV6 "%s: listening on [%s]:%d", #else "%s: listening on %s:%d", #endif drvPtr->threadName, bindaddr, drvPtr->port); } return sock; } /* *---------------------------------------------------------------------- * * DriverAccept -- * * Accept a new socket. It will be in non-blocking mode. * * Results: * _ACCEPT: a socket was accepted, poll for data * _ACCEPT_DATA: a socket was accepted, data present, read immediately * if in async mode, defer reading to connection thread * _ACCEPT_QUEUE: a socket was accepted, queue immediately * _ACCEPT_ERROR: no socket was accepted * * Side effects: * Depends on driver. * *---------------------------------------------------------------------- */ static NS_DRIVER_ACCEPT_STATUS DriverAccept(Sock *sockPtr, NS_SOCKET sock) { socklen_t n = (socklen_t)sizeof(struct NS_SOCKADDR_STORAGE); NS_NONNULL_ASSERT(sockPtr != NULL); return (*sockPtr->drvPtr->acceptProc)((Ns_Sock *) sockPtr, sock, (struct sockaddr *) &(sockPtr->sa), &n); } /* *---------------------------------------------------------------------- * * NsDriverRecv -- * * Read data from the socket into the given vector of buffers. * * Results: * Number of bytes read, or -1 on error. * * Side effects: * Depends on driver. * *---------------------------------------------------------------------- */ ssize_t NsDriverRecv(Sock *sockPtr, struct iovec *bufs, int nbufs, Ns_Time *timeoutPtr) { ssize_t result; const Driver *drvPtr; NS_NONNULL_ASSERT(sockPtr != NULL); drvPtr = sockPtr->drvPtr; if (likely(drvPtr->recvProc != NULL)) { result = (*drvPtr->recvProc)((Ns_Sock *) sockPtr, bufs, nbufs, timeoutPtr, 0u); } else { Ns_Log(Warning, "driver: no recvProc registered for driver %s", drvPtr->threadName); result = -1; } return result; } /* *---------------------------------------------------------------------- * * NsDriverSend -- * * Write a vector of buffers to the socket via the driver callback. * May not send all of the data. * * Results: * Number of bytes written or -1 on error. * May return 0 (zero) when socket is not writable. * * Side effects: * Depends on the driver. * *---------------------------------------------------------------------- */ ssize_t NsDriverSend(Sock *sockPtr, const struct iovec *bufs, int nbufs, unsigned int flags) { ssize_t sent = -1; const Driver *drvPtr; NS_NONNULL_ASSERT(sockPtr != NULL); drvPtr = sockPtr->drvPtr; NS_NONNULL_ASSERT(drvPtr != NULL); if (likely(drvPtr->sendProc != NULL)) { /* * TODO: The Ns_DriverSendProc signature should be modified * to omit the timeout argument. Same with recvProc(). */ sent = (*drvPtr->sendProc)((Ns_Sock *) sockPtr, bufs, nbufs, NULL, flags); } else { Ns_Log(Warning, "no sendProc registered for driver %s", drvPtr->threadName); } return sent; } /* *---------------------------------------------------------------------- * * NsDriverSendFile -- * * Write a vector of file buffers to the socket via the driver * callback. * * Results: * Number of bytes written, -1 on error. * May not send all the data. * * Side effects: * May block on disk read. * *---------------------------------------------------------------------- */ ssize_t NsDriverSendFile(Sock *sockPtr, Ns_FileVec *bufs, int nbufs, unsigned int flags) { ssize_t sent = -1; const Driver *drvPtr; NS_NONNULL_ASSERT(sockPtr != NULL); NS_NONNULL_ASSERT(bufs != NULL); drvPtr = sockPtr->drvPtr; NS_NONNULL_ASSERT(drvPtr != NULL); if (drvPtr->sendFileProc != NULL) { /* * TODO: The Ns_DriverSendFileProc signature should be modified * to omit the timeout argument. */ sent = (*drvPtr->sendFileProc)((Ns_Sock *)sockPtr, bufs, nbufs, NULL, flags); } else { sent = Ns_SockSendFileBufs((Ns_Sock *)sockPtr, bufs, nbufs, flags); } return sent; } /* *---------------------------------------------------------------------- * * DriverKeep -- * * Can the given socket be kept open in the hopes that another * request will arrive before the keepwait timeout expires? * * Results: * NS_TRUE if the socket is OK for keepalive, NS_FALSE if this is not possible. * * Side effects: * Depends on driver. * *---------------------------------------------------------------------- */ static bool DriverKeep(Sock *sockPtr) { Ns_DriverKeepProc *keepProc; bool result; NS_NONNULL_ASSERT(sockPtr != NULL); keepProc = sockPtr->drvPtr->keepProc; if (keepProc == NULL) { result = NS_FALSE; } else { result = (keepProc)((Ns_Sock *) sockPtr); } return result; } /* *---------------------------------------------------------------------- * * DriverClose -- * * Close the given socket. * * Results: * None. * * Side effects: * Depends on driver. * *---------------------------------------------------------------------- */ static void DriverClose(Sock *sockPtr) { NS_NONNULL_ASSERT(sockPtr != NULL); (*sockPtr->drvPtr->closeProc)((Ns_Sock *) sockPtr); } /* *---------------------------------------------------------------------- * * DriverThread -- * * Main listening socket driver thread. * * Results: * None. * * Side effects: * Connections are accepted on the configured listen sockets, * placed on the run queue to be serviced, and gracefully * closed when done. Async sockets have the entire request read * here before queuing as well. * *---------------------------------------------------------------------- */ static void DriverThread(void *arg) { Driver *drvPtr = (Driver*)arg; Ns_Time now, diff; char charBuffer[1], drain[1024]; int pollTimeout, accepted, nrBindaddrs = 0; bool stopping; unsigned int flags; Sock *sockPtr, *closePtr, *nextPtr, *waitPtr, *readPtr; PollData pdata; Ns_ThreadSetName("-driver:%s-", drvPtr->threadName); Ns_Log(Notice, "starting"); flags = DRIVER_STARTED; { Tcl_Obj *bindaddrsObj, **objv; int j = 0, result; bindaddrsObj = Tcl_NewStringObj(drvPtr->address, -1); Tcl_IncrRefCount(bindaddrsObj); result = Tcl_ListObjGetElements(NULL, bindaddrsObj, &nrBindaddrs, &objv); /* * "result" was ok during startup, it has still to be ok. */ assert(result == TCL_OK); if (result == TCL_OK) { int i; /* * Bind all provided addresses. */ for (i = 0; i < nrBindaddrs; i++) { drvPtr->listenfd[j] = DriverListen(drvPtr, Tcl_GetString(objv[i])); if (drvPtr->listenfd[j] != NS_INVALID_SOCKET) { j ++; } } if (j > 0 && j < nrBindaddrs) { Ns_Log(Warning, "could only bind to %d out of %d addresses", j, nrBindaddrs); } } /* * "j" refers to the number of successful listen() operations. */ nrBindaddrs = j; Tcl_DecrRefCount(bindaddrsObj); } if (nrBindaddrs > 0) { SpoolerQueueStart(drvPtr->spooler.firstPtr, SpoolerThread); SpoolerQueueStart(drvPtr->writer.firstPtr, WriterThread); } else { Ns_Log(Warning, "could no bind any of the following addresses, stopping this driver: %s", drvPtr->address); flags |= (DRIVER_FAILED | DRIVER_SHUTDOWN); } Ns_MutexLock(&drvPtr->lock); drvPtr->flags |= flags; Ns_CondBroadcast(&drvPtr->cond); Ns_MutexUnlock(&drvPtr->lock); /* * Loop forever until signaled to shut down and all * connections are complete and gracefully closed. */ PollCreate(&pdata); Ns_GetTime(&now); closePtr = waitPtr = readPtr = NULL; stopping = ((flags & DRIVER_SHUTDOWN) != 0u); if (!stopping) { Ns_Log(Notice, "driver: accepting connections"); } while (!stopping) { int n; /* * Set the bits for all active drivers if a connection * isn't already pending. */ PollReset(&pdata); (void)PollSet(&pdata, drvPtr->trigger[0], (short)POLLIN, NULL); if (likely(waitPtr == NULL)) { for (n = 0; n < nrBindaddrs; n++) { drvPtr->pidx[n] = PollSet(&pdata, drvPtr->listenfd[n], (short)POLLIN, NULL); } } /* * If there are any closing or read-ahead sockets, set the bits * and determine the minimum relative timeout. * * TODO: the various poll timeouts should probably be configurable. */ if (readPtr == NULL && closePtr == NULL) { pollTimeout = 10 * 1000; } else { for (sockPtr = readPtr; sockPtr != NULL; sockPtr = sockPtr->nextPtr) { SockPoll(sockPtr, (short)POLLIN, &pdata); } for (sockPtr = closePtr; sockPtr != NULL; sockPtr = sockPtr->nextPtr) { SockPoll(sockPtr, (short)POLLIN, &pdata); } if (Ns_DiffTime(&pdata.timeout, &now, &diff) > 0) { /* * The resolution of "pollTimeout" is ms, therefore, we round * up. If we would round down (e.g. 500 microseconds to 0 ms), * the time comparison later would determine that it is too * early. */ pollTimeout = (int)Ns_TimeToMilliseconds(&diff) + 1; } else { pollTimeout = 0; } } n = PollWait(&pdata, pollTimeout); Ns_Log(DriverDebug, "=== PollWait returned %d, trigger[0] %d", n, PollIn(&pdata, 0)); if (PollIn(&pdata, 0) && unlikely(ns_recv(drvPtr->trigger[0], charBuffer, 1u, 0) != 1)) { const char *errstr = ns_sockstrerror(ns_sockerrno); Ns_Fatal("driver: trigger ns_recv() failed: %s", errstr); } /* * Check whether we should re-animate some connection threads, * when e.g. the number of current threads dropped below the * minimal value. Perform this test on timeouts (n == 0; * just for safety reasons) or on explicit wakeup calls. */ if ((n == 0) || PollIn(&pdata, 0)) { NsServer *servPtr = drvPtr->servPtr; if (servPtr != NULL) { /* * Check if we have to reanimate the current server. */ NsEnsureRunningConnectionThreads(servPtr, NULL); } else { Ns_Set *servers = Ns_ConfigCreateSection("ns/servers"); size_t j; /* * Reanimation check on all servers. */ for (j = 0u; j < Ns_SetSize(servers); ++j) { const char *server = Ns_SetKey(servers, j); servPtr = NsGetServer(server); if (servPtr != NULL) { NsEnsureRunningConnectionThreads(servPtr, NULL); } } } } /* * Update the current time and drain and/or release any * closing sockets. */ Ns_GetTime(&now); if (closePtr != NULL) { sockPtr = closePtr; closePtr = NULL; while (sockPtr != NULL) { nextPtr = sockPtr->nextPtr; if (unlikely(PollHup(&pdata, sockPtr->pidx))) { /* * Peer has closed the connection */ SockRelease(sockPtr, SOCK_CLOSE, 0); } else if (likely(PollIn(&pdata, sockPtr->pidx))) { /* * Got some data */ ssize_t received = ns_recv(sockPtr->sock, drain, sizeof(drain), 0); if (received <= 0) { Ns_Log(DriverDebug, "poll closewait pollin; sockrelease SOCK_READERROR (sock %d)", sockPtr->sock); SockRelease(sockPtr, SOCK_READERROR, 0); } else { Push(sockPtr, closePtr); } } else if (Ns_DiffTime(&sockPtr->timeout, &now, &diff) <= 0) { /* no PollHup, no PollIn, maybe timeout */ Ns_Log(DriverDebug, "poll closewait timeout; sockrelease SOCK_CLOSETIMEOUT (sock %d)", sockPtr->sock); SockRelease(sockPtr, SOCK_CLOSETIMEOUT, 0); } else { /* too early, keep waiting */ Push(sockPtr, closePtr); } sockPtr = nextPtr; } } /* * Attempt read-ahead of any new connections. */ sockPtr = readPtr; readPtr = NULL; while (likely(sockPtr != NULL)) { nextPtr = sockPtr->nextPtr; if (unlikely(PollHup(&pdata, sockPtr->pidx))) { /* * Peer has closed the connection */ SockRelease(sockPtr, SOCK_CLOSE, 0); } else if (unlikely(!PollIn(&pdata, sockPtr->pidx)) && ((sockPtr->reqPtr == NULL) || (sockPtr->reqPtr->leftover == 0u))) { /* * Got no data for this sockPtr. */ if (Ns_DiffTime(&sockPtr->timeout, &now, &diff) <= 0) { SockRelease(sockPtr, SOCK_READTIMEOUT, 0); } else { Push(sockPtr, readPtr); } } else { /* * Got some data for this sockPtr. * If enabled, perform read-ahead now. */ assert(drvPtr == sockPtr->drvPtr); if (likely((drvPtr->opts & NS_DRIVER_ASYNC) != 0u)) { SockState s = SockRead(sockPtr, 0, &now); /* * Queue for connection processing if ready. */ switch (s) { case SOCK_SPOOL: drvPtr->stats.spooled++; if (SockSpoolerQueue(drvPtr, sockPtr) == 0) { Push(sockPtr, readPtr); } break; case SOCK_MORE: drvPtr->stats.partial++; SockTimeout(sockPtr, &now, &drvPtr->recvwait); Push(sockPtr, readPtr); break; case SOCK_READY: if (SockQueue(sockPtr, &now) == NS_TIMEOUT) { Push(sockPtr, waitPtr); } break; /* * Already handled or normal cases */ case SOCK_ENTITYTOOLARGE: NS_FALL_THROUGH; /* fall through */ case SOCK_BADREQUEST: NS_FALL_THROUGH; /* fall through */ case SOCK_BADHEADER: NS_FALL_THROUGH; /* fall through */ case SOCK_TOOMANYHEADERS: NS_FALL_THROUGH; /* fall through */ case SOCK_CLOSE: SockRelease(sockPtr, s, errno); break; /* * Exceptions */ case SOCK_READERROR: NS_FALL_THROUGH; /* fall through */ case SOCK_CLOSETIMEOUT: NS_FALL_THROUGH; /* fall through */ case SOCK_ERROR: NS_FALL_THROUGH; /* fall through */ case SOCK_READTIMEOUT: NS_FALL_THROUGH; /* fall through */ case SOCK_SHUTERROR: NS_FALL_THROUGH; /* fall through */ case SOCK_WRITEERROR: NS_FALL_THROUGH; /* fall through */ case SOCK_WRITETIMEOUT: drvPtr->stats.errors++; Ns_Log(Warning, "sockread returned unexpected result %s (err %s); close socket (%d)", GetSockStateName(s), ((errno != 0) ? strerror(errno) : NS_EMPTY_STRING), sockPtr->sock); SockRelease(sockPtr, s, errno); break; } } else { /* * Potentially blocking driver, NS_DRIVER_ASYNC is not defined */ if (Ns_DiffTime(&sockPtr->timeout, &now, &diff) <= 0) { drvPtr->stats.errors++; Ns_Log(Notice, "read-ahead has some data, no async sock read ===== diff time %ld", Ns_DiffTime(&sockPtr->timeout, &now, &diff)); sockPtr->keep = NS_FALSE; SockRelease(sockPtr, SOCK_READTIMEOUT, 0); } else { if (SockQueue(sockPtr, &now) == NS_TIMEOUT) { Push(sockPtr, waitPtr); } } } } sockPtr = nextPtr; } /* * Attempt to queue any pending connection after reversing the * list to ensure oldest connections are tried first. */ if (waitPtr != NULL) { sockPtr = NULL; while ((nextPtr = waitPtr) != NULL) { waitPtr = nextPtr->nextPtr; Push(nextPtr, sockPtr); } while (sockPtr != NULL) { nextPtr = sockPtr->nextPtr; if (SockQueue(sockPtr, &now) == NS_TIMEOUT) { Push(sockPtr, waitPtr); } sockPtr = nextPtr; } } /* * If no connections are waiting, attempt to accept more. */ if (waitPtr == NULL) { /* * If configured, try to accept more than one request, under heavy load * this helps to process more requests */ SockState s; bool acceptMore = NS_TRUE; accepted = 0; while (acceptMore && accepted < drvPtr->acceptsize && drvPtr->queuesize < drvPtr->maxqueuesize ) { bool gotRequests = NS_FALSE; /* * Check for input data on all bind addresses. Stop checking, * when one round of checking on all addresses fails. */ for (n = 0; n < nrBindaddrs; n++) { if ( PollIn(&pdata, drvPtr->pidx[n]) && (s = SockAccept(drvPtr, pdata.pfds[drvPtr->pidx[n]].fd, &sockPtr, &now)) != SOCK_ERROR) { switch (s) { case SOCK_SPOOL: drvPtr->stats.spooled++; if (SockSpoolerQueue(drvPtr, sockPtr) == 0) { Push(sockPtr, readPtr); } break; case SOCK_MORE: drvPtr->stats.partial++; SockTimeout(sockPtr, &now, &drvPtr->recvwait); Push(sockPtr, readPtr); break; case SOCK_READY: if (SockQueue(sockPtr, &now) == NS_TIMEOUT) { Push(sockPtr, waitPtr); } break; case SOCK_BADHEADER: NS_FALL_THROUGH; /* fall through */ case SOCK_BADREQUEST: NS_FALL_THROUGH; /* fall through */ case SOCK_CLOSE: NS_FALL_THROUGH; /* fall through */ case SOCK_CLOSETIMEOUT: NS_FALL_THROUGH; /* fall through */ case SOCK_ENTITYTOOLARGE: NS_FALL_THROUGH; /* fall through */ case SOCK_ERROR: NS_FALL_THROUGH; /* fall through */ case SOCK_READERROR: NS_FALL_THROUGH; /* fall through */ case SOCK_READTIMEOUT: NS_FALL_THROUGH; /* fall through */ case SOCK_SHUTERROR: NS_FALL_THROUGH; /* fall through */ case SOCK_TOOMANYHEADERS: NS_FALL_THROUGH; /* fall through */ case SOCK_WRITEERROR: NS_FALL_THROUGH; /* fall through */ case SOCK_WRITETIMEOUT: Ns_Fatal("driver: SockAccept returned: %s", GetSockStateName(s)); } accepted++; gotRequests = NS_TRUE; #ifdef __APPLE__ /* * On Darwin, the first accept() succeeds typically, but it is * useless to try, since this leads always to an EAGAIN */ acceptMore = NS_FALSE; break; #endif } } if (!gotRequests) { acceptMore = NS_FALSE; } } if (accepted > 1) { Ns_Log(Notice, "... sockAccept accepted %d connections", accepted); } } /* * Check for shut down and get the list of any closing or * keep-alive sockets. */ Ns_MutexLock(&drvPtr->lock); sockPtr = drvPtr->closePtr; drvPtr->closePtr = NULL; flags = drvPtr->flags; Ns_MutexUnlock(&drvPtr->lock); stopping = ((flags & DRIVER_SHUTDOWN) != 0u); /* * Update the timeout for each closing socket and add to the * close list if some data has been read from the socket * (i.e., it's not a closing keep-alive connection). */ while (sockPtr != NULL) { nextPtr = sockPtr->nextPtr; if (sockPtr->keep) { assert(drvPtr == sockPtr->drvPtr); Ns_Log(DriverDebug, "setting keepwait %ld.%6ld for socket %d", drvPtr->keepwait.sec, drvPtr->keepwait.usec, sockPtr->sock); SockTimeout(sockPtr, &now, &drvPtr->keepwait); Push(sockPtr, readPtr); } else { /* * Purely packet oriented drivers set on close the fd to * NS_INVALID_SOCKET. Since we cannot "shutdown" an UDP-socket * for writing, we bypass this call. */ assert(drvPtr == sockPtr->drvPtr); if (sockPtr->sock == NS_INVALID_SOCKET) { SockRelease(sockPtr, SOCK_CLOSE, errno); Ns_Log(DriverDebug, "DRIVER SockRelease: errno %d drvPtr->closewait %ld.%6ld", errno, drvPtr->closewait.sec, drvPtr->closewait.usec); } else if (shutdown(sockPtr->sock, SHUT_WR) != 0) { SockRelease(sockPtr, SOCK_SHUTERROR, errno); } else { Ns_Log(DriverDebug, "setting closewait %ld.%6ld for socket %d", drvPtr->closewait.sec, drvPtr->closewait.usec, sockPtr->sock); SockTimeout(sockPtr, &now, &drvPtr->closewait); Push(sockPtr, closePtr); } } sockPtr = nextPtr; } /* * Close the active drivers if shutdown is pending. */ if (stopping) { for (n = 0; n < nrBindaddrs; n++) { ns_sockclose(drvPtr->listenfd[n]); drvPtr->listenfd[n] = NS_INVALID_SOCKET; } } } PollFree(&pdata); Ns_Log(Notice, "exiting"); Ns_MutexLock(&drvPtr->lock); drvPtr->flags |= DRIVER_STOPPED; Ns_CondBroadcast(&drvPtr->cond); Ns_MutexUnlock(&drvPtr->lock); } static void PollCreate(PollData *pdata) { NS_NONNULL_ASSERT(pdata != NULL); memset(pdata, 0, sizeof(PollData)); } static void PollFree(PollData *pdata) { NS_NONNULL_ASSERT(pdata != NULL); ns_free(pdata->pfds); memset(pdata, 0, sizeof(PollData)); } static void PollReset(PollData *pdata) { NS_NONNULL_ASSERT(pdata != NULL); pdata->nfds = 0u; pdata->timeout.sec = TIME_T_MAX; pdata->timeout.usec = 0; } static NS_POLL_NFDS_TYPE PollSet(PollData *pdata, NS_SOCKET sock, short type, const Ns_Time *timeoutPtr) { NS_NONNULL_ASSERT(pdata != NULL); /* * Grow the pfds array if necessary. */ if (unlikely(pdata->nfds >= pdata->maxfds)) { pdata->maxfds += 100u; pdata->pfds = ns_realloc(pdata->pfds, pdata->maxfds * sizeof(struct pollfd)); } /* * Set the next pollfd struct with this socket. */ pdata->pfds[pdata->nfds].fd = sock; pdata->pfds[pdata->nfds].events = type; pdata->pfds[pdata->nfds].revents = 0; /* * Check for new minimum timeout. */ if (timeoutPtr != NULL && Ns_DiffTime(timeoutPtr, &pdata->timeout, NULL) < 0) { pdata->timeout = *timeoutPtr; } return pdata->nfds++; } static int PollWait(const PollData *pdata, int timeout) { int n; NS_NONNULL_ASSERT(pdata != NULL); do { n = ns_poll(pdata->pfds, pdata->nfds, timeout); } while (n < 0 && errno == NS_EINTR); if (n < 0) { Ns_Fatal("PollWait: ns_poll() failed: %s", ns_sockstrerror(ns_sockerrno)); } return n; } /* *---------------------------------------------------------------------- * * RequestNew * * Prepares for reading from the socket, allocates a "Request" * struct for the given socket. It might be reused from the pool * or freshly allocated. Counterpart of RequestFree(). * * Results: * None * * Side effects: * None * *---------------------------------------------------------------------- */ static void RequestNew(Sock *sockPtr) { Request *reqPtr; bool reuseRequest = NS_TRUE; NS_NONNULL_ASSERT(sockPtr != NULL); /* * Try to get a request from the pool of allocated Requests. */ Ns_MutexLock(&reqLock); reqPtr = firstReqPtr; if (likely(reqPtr != NULL)) { firstReqPtr = reqPtr->nextPtr; } else { reuseRequest = NS_FALSE; } Ns_MutexUnlock(&reqLock); if (reuseRequest) { Ns_Log(DriverDebug, "RequestNew reuses a Request"); } /* * In case we failed, allocate a new Request. */ if (reqPtr == NULL) { Ns_Log(DriverDebug, "RequestNew gets a fresh Request"); reqPtr = ns_calloc(1u, sizeof(Request)); Tcl_DStringInit(&reqPtr->buffer); reqPtr->headers = Ns_SetCreate(NULL); } sockPtr->reqPtr = reqPtr; } /* *---------------------------------------------------------------------- * * RequestFree -- * * Free/clean a socket request structure. This routine is called * at the end of connection processing or on a socket which * times out during async read-ahead. Counterpart of RequestNew(). * * Results: * None. * * Side effects: * None. * *---------------------------------------------------------------------- */ static void RequestFree(Sock *sockPtr) { Request *reqPtr; bool keep; NS_NONNULL_ASSERT(sockPtr != NULL); reqPtr = sockPtr->reqPtr; assert(reqPtr != NULL); Ns_Log(DriverDebug, "=== RequestFree cleans %p (avail %" PRIuz " keep %d length %" PRIuz " contentLength %" PRIuz ")", (void *)reqPtr, reqPtr->avail, sockPtr->keep, reqPtr->length, reqPtr->contentLength); keep = (sockPtr->keep) && (reqPtr->avail > reqPtr->contentLength); if (keep) { size_t leftover = reqPtr->avail - reqPtr->contentLength; const char *offset = reqPtr->buffer.string + ((size_t)reqPtr->buffer.length - leftover); Ns_Log(DriverDebug, "setting leftover to %" PRIuz " bytes", leftover); /* * Here it is safe to move the data in the buffer, although the * reqPtr->content might point to it, since we re-init the content. In * case the terminating null character was written to the end of the * previous buffer, we have to restore the first character. */ memmove(reqPtr->buffer.string, offset, leftover); if (reqPtr->savedChar != '\0') { reqPtr->buffer.string[0] = reqPtr->savedChar; } Tcl_DStringSetLength(&reqPtr->buffer, (int)leftover); LogBuffer(DriverDebug, "KEEP BUFFER", reqPtr->buffer.string, leftover); reqPtr->leftover = leftover; } else { /* * Clean large buffers in order to avoid memory growth on huge * uploads (when maxupload is huge) */ /*fprintf(stderr, "=== reuse buffer size %d avail %d dynamic %d\n", reqPtr->buffer.length, reqPtr->buffer.spaceAvl, reqPtr->buffer.string == reqPtr->buffer.staticSpace);*/ if (Tcl_DStringLength(&reqPtr->buffer) > 65536) { Tcl_DStringFree(&reqPtr->buffer); } else { /* * Reuse buffer, but set length to 0. */ Tcl_DStringSetLength(&reqPtr->buffer, 0); } reqPtr->leftover = 0u; } reqPtr->next = NULL; reqPtr->content = NULL; reqPtr->length = 0u; reqPtr->contentLength = 0u; reqPtr->expectedLength = 0u; reqPtr->chunkStartOff = 0u; reqPtr->chunkWriteOff = 0u; reqPtr->roff = 0u; reqPtr->woff = 0u; reqPtr->coff = 0u; reqPtr->avail = 0u; reqPtr->savedChar = '\0'; Ns_SetTrunc(reqPtr->headers, 0u); if (reqPtr->auth != NULL) { Ns_SetFree(reqPtr->auth); reqPtr->auth = NULL; } if (reqPtr->request.line != NULL) { Ns_Log(DriverDebug, "RequestFree calls Ns_ResetRequest on %p", (void*)&reqPtr->request); Ns_ResetRequest(&reqPtr->request); } else { Ns_Log(DriverDebug, "RequestFree does not call Ns_ResetRequest on %p", (void*)&reqPtr->request); } if (!keep) { /* * Push the reqPtr to the pool for reuse in other connections. */ sockPtr->reqPtr = NULL; Ns_MutexLock(&reqLock); reqPtr->nextPtr = firstReqPtr; firstReqPtr = reqPtr; Ns_MutexUnlock(&reqLock); } else { /* * Keep the partly cleaned up reqPtr associated with the connection. */ Ns_Log(DriverDebug, "=== KEEP request structure in sockPtr (don't push into the pool)"); } } /* *---------------------------------------------------------------------- * * SockQueue -- * * Puts socket into connection queue * * Results: * NS_OK if queued, * NS_ERROR if socket closed because of error * NS_TIMEOUT if queue is full * * Side effects: * None. * *---------------------------------------------------------------------- */ static Ns_ReturnCode SockQueue(Sock *sockPtr, const Ns_Time *timePtr) { Ns_ReturnCode result; NS_NONNULL_ASSERT(sockPtr != NULL); /* * Verify the conditions. Request struct must exist already. */ assert(sockPtr->reqPtr != NULL); SockSetServer(sockPtr); assert(sockPtr->servPtr != NULL); /* * Actual queueing, if not ready spool to the waiting list. */ if (!NsQueueConn(sockPtr, timePtr)) { result = NS_TIMEOUT; } else { result = NS_OK; } return result; } /* *---------------------------------------------------------------------- * * SockPoll -- * * Arrange for given Sock to be monitored. * * Results: * None. * * Side effects: * Sock fd will be monitored for readability on next spin of * DriverThread. * *---------------------------------------------------------------------- */ static void SockPoll(Sock *sockPtr, short type, PollData *pdata) { NS_NONNULL_ASSERT(sockPtr != NULL); NS_NONNULL_ASSERT(pdata != NULL); sockPtr->pidx = PollSet(pdata, sockPtr->sock, type, &sockPtr->timeout); } /* *---------------------------------------------------------------------- * * SockTimeout -- * * Update socket with timeout * * Results: * None. * * Side effects: * Socket timeout will have nowPtr + timeout value * *---------------------------------------------------------------------- */ static void SockTimeout(Sock *sockPtr, const Ns_Time *nowPtr, const Ns_Time *timeout) { NS_NONNULL_ASSERT(sockPtr != NULL); sockPtr->timeout = *nowPtr; Ns_IncrTime(&sockPtr->timeout, timeout->sec, timeout->usec); } /* *---------------------------------------------------------------------- * * SockAccept -- * * Accept and initialize a new Sock in sockPtrPtr. * * Results: * SOCK_READY, SOCK_MORE, SOCK_SPOOL, * SOCK_ERROR + NULL sockPtr. * * Side effects: * Read-ahead may be attempted on new socket. * *---------------------------------------------------------------------- */ static SockState SockAccept(Driver *drvPtr, NS_SOCKET sock, Sock **sockPtrPtr, const Ns_Time *nowPtr) { Sock *sockPtr; SockState sockStatus; NS_DRIVER_ACCEPT_STATUS status; NS_NONNULL_ASSERT(drvPtr != NULL); sockPtr = SockNew(drvPtr); /* * Accept the new connection. */ status = DriverAccept(sockPtr, sock); if (unlikely(status == NS_DRIVER_ACCEPT_ERROR)) { sockStatus = SOCK_ERROR; /* * We reach the place frequently, especially on Linux, when we try to * accept multiple connection in one sweep. Usually, the errno is * EAGAIN. */ Ns_MutexLock(&drvPtr->lock); sockPtr->nextPtr = drvPtr->sockPtr; drvPtr->sockPtr = sockPtr; Ns_MutexUnlock(&drvPtr->lock); sockPtr = NULL; } else { sockPtr->acceptTime = *nowPtr; drvPtr->queuesize++; if (status == NS_DRIVER_ACCEPT_DATA) { /* * If there is already data present then read it without * polling if we're in async mode. */ if ((drvPtr->opts & NS_DRIVER_ASYNC) != 0u) { sockStatus = SockRead(sockPtr, 0, nowPtr); if ((int)sockStatus < 0) { Ns_Log(DriverDebug, "SockRead returned error %s", GetSockStateName(sockStatus)); SockRelease(sockPtr, sockStatus, errno); sockStatus = SOCK_ERROR; sockPtr = NULL; } } else { /* * Queue this socket without reading, NsGetRequest() in the * connection thread will perform actual reading of the * request. */ sockStatus = SOCK_READY; } } else if (status == NS_DRIVER_ACCEPT_QUEUE) { /* * We need to call RequestNew() to make sure socket has request * structure allocated, otherwise NsGetRequest() will call * SockRead() which is not what this driver wants. */ if (sockPtr->reqPtr == NULL) { RequestNew(sockPtr); } sockStatus = SOCK_READY; } else { sockStatus = SOCK_MORE; } } *sockPtrPtr = sockPtr; return sockStatus; } /* *---------------------------------------------------------------------- * * SockNew -- * * Allocate and/or initialize a Sock structure. Counterpart of * SockRelease(). * * Results: * SockPtr * * Side effects: * Potentially new memory is allocated. * *---------------------------------------------------------------------- */ static Sock * SockNew(Driver *drvPtr) { Sock *sockPtr; NS_NONNULL_ASSERT(drvPtr != NULL); Ns_MutexLock(&drvPtr->lock); sockPtr = drvPtr->sockPtr; if (likely(sockPtr != NULL)) { drvPtr->sockPtr = sockPtr->nextPtr; sockPtr->keep = NS_FALSE; } Ns_MutexUnlock(&drvPtr->lock); if (sockPtr == NULL) { size_t sockSize = sizeof(Sock) + (nsconf.nextSlsId * sizeof(Ns_Callback *)); sockPtr = ns_calloc(1u, sockSize); sockPtr->drvPtr = drvPtr; } else { sockPtr->tfd = 0; sockPtr->taddr = NULL; sockPtr->flags = 0u; sockPtr->arg = NULL; sockPtr->recvSockState = NS_SOCK_NONE; } return sockPtr; } /* *---------------------------------------------------------------------- * * SockRelease -- * * Close a socket and release the connection structure for * re-use. * * Results: * None. * * Side effects: * None. * *---------------------------------------------------------------------- */ static void SockRelease(Sock *sockPtr, SockState reason, int err) { Driver *drvPtr; NS_NONNULL_ASSERT(sockPtr != NULL); Ns_Log(DriverDebug, "SockRelease reason %s err %d (sock %d)", GetSockStateName(reason), err, sockPtr->sock); drvPtr = sockPtr->drvPtr; assert(drvPtr != NULL); SockError(sockPtr, reason, err); if (sockPtr->sock != NS_INVALID_SOCKET) { SockClose(sockPtr, (int)NS_FALSE); } else { Ns_Log(DriverDebug, "SockRelease bypasses SockClose, since we have an invalid socket"); } NsSlsCleanup(sockPtr); drvPtr->queuesize--; if (sockPtr->reqPtr != NULL) { Ns_Log(DriverDebug, "SockRelease calls RequestFree"); RequestFree(sockPtr); } Ns_MutexLock(&drvPtr->lock); sockPtr->nextPtr = drvPtr->sockPtr; drvPtr->sockPtr = sockPtr; Ns_MutexUnlock(&drvPtr->lock); } /* *---------------------------------------------------------------------- * * SockError -- * * Log error message for given socket * re-use. * * Results: * None. * * Side effects: * None. * *---------------------------------------------------------------------- */ static void SockError(Sock *sockPtr, SockState reason, int err) { const char *errMsg = NULL; NS_NONNULL_ASSERT(sockPtr != NULL); switch (reason) { case SOCK_READY: case SOCK_SPOOL: case SOCK_MORE: case SOCK_CLOSE: case SOCK_CLOSETIMEOUT: /* This is normal, never log. */ break; case SOCK_READTIMEOUT: /* * For this case, whether this is acceptable or not * depends upon whether this sock was a keep-alive * that we were allowing to 'linger'. */ if (!sockPtr->keep) { errMsg = "Timeout during read"; } break; case SOCK_WRITETIMEOUT: errMsg = "Timeout during write"; break; case SOCK_READERROR: errMsg = "Unable to read request"; break; case SOCK_WRITEERROR: errMsg = "Unable to write request"; break; case SOCK_SHUTERROR: errMsg = "Unable to shutdown socket"; break; case SOCK_BADREQUEST: errMsg = "Bad Request"; SockSendResponse(sockPtr, 400, errMsg); break; case SOCK_TOOMANYHEADERS: errMsg = "Too Many Request Headers"; SockSendResponse(sockPtr, 414, errMsg); break; case SOCK_BADHEADER: errMsg = "Invalid Request Header"; SockSendResponse(sockPtr, 400, errMsg); break; case SOCK_ENTITYTOOLARGE: errMsg = "Request Entity Too Large"; SockSendResponse(sockPtr, 413, errMsg); break; case SOCK_ERROR: errMsg = "Unknown Error"; SockSendResponse(sockPtr, 400, errMsg); break; } if (errMsg != NULL) { char ipString[NS_IPADDR_SIZE]; Ns_Log(DriverDebug, "SockError: %s (%d: %s), sock: %d, peer: [%s]:%d, request: %.99s", errMsg, err, (err != 0) ? strerror(err) : NS_EMPTY_STRING, sockPtr->sock, ns_inet_ntop((struct sockaddr *)&(sockPtr->sa), ipString, sizeof(ipString)), Ns_SockaddrGetPort((struct sockaddr *)&(sockPtr->sa)), (sockPtr->reqPtr != NULL) ? sockPtr->reqPtr->buffer.string : NS_EMPTY_STRING); } } /* *---------------------------------------------------------------------- * * SockSendResponse -- * * Send an HTTP response directly to the client using the * driver callback. * * Results: * None. * * Side effects: * May not sent the complete response to the client * if encountering non-writable connection socket. * *---------------------------------------------------------------------- */ static void SockSendResponse(Sock *sockPtr, int code, const char *errMsg) { struct iovec iov[3]; char header[32]; ssize_t sent, tosend; NS_NONNULL_ASSERT(sockPtr != NULL); NS_NONNULL_ASSERT(errMsg != NULL); snprintf(header, sizeof(header), "HTTP/1.0 %d ", code); iov[0].iov_base = header; iov[0].iov_len = strlen(header); iov[1].iov_base = (void *)errMsg; iov[1].iov_len = strlen(errMsg); iov[2].iov_base = (void *)"\r\n\r\n"; iov[2].iov_len = 4u; tosend = (ssize_t)(iov[0].iov_len + iov[1].iov_len + iov[2].iov_len); sent = NsDriverSend(sockPtr, iov, 3, 0u); if (sent < tosend) { Ns_Log(Warning, "Driver: partial write while sending response;" " %" PRIdz " < %" PRIdz, sent, tosend); } /* * In case we have a request structure, complain the system log about * the bad request. */ if (sockPtr->reqPtr != NULL) { Request *reqPtr = sockPtr->reqPtr; const char *requestLine = (reqPtr->request.line != NULL) ? reqPtr->request.line : NS_EMPTY_STRING; (void)ns_inet_ntop((struct sockaddr *)&(sockPtr->sa), sockPtr->reqPtr->peer, NS_IPADDR_SIZE); /* * Check, if bad request looks like a TLS handshake. If yes, there is * no need to print out the received buffer. */ if (requestLine[0] == (char)0x16 && requestLine[1] >= 3 && requestLine[2] == 1) { Ns_Log(Warning, "invalid request %d (%s) from peer %s: received TLS handshake on a non-TLS connection", code, errMsg, reqPtr->peer); } else { Tcl_DString dsReqLine; Tcl_DStringInit(&dsReqLine); Ns_Log(Warning, "invalid request: %d (%s) from peer %s request '%s' offsets: read %" PRIuz " write %" PRIuz " content %" PRIuz " avail %" PRIuz, code, errMsg, reqPtr->peer, Ns_DStringAppendPrintable(&dsReqLine, NS_FALSE, requestLine, strlen(requestLine)), reqPtr->roff, reqPtr->woff, reqPtr->coff, reqPtr->avail); Tcl_DStringFree(&dsReqLine); LogBuffer(Warning, "REQ BUFFER", reqPtr->buffer.string, (size_t)reqPtr->buffer.length); } } else { Ns_Log(Warning, "invalid request: %d (%s) - no request information available", code, errMsg); } } /* *---------------------------------------------------------------------- * * SockTrigger -- * * Wakeup DriversThread from blocking ns_poll(). * * Results: * None. * * Side effects: * DriversThread will wake up. * *---------------------------------------------------------------------- */ static void SockTrigger(NS_SOCKET sock) { if (send(sock, NS_EMPTY_STRING, 1, 0) != 1) { const char *errstr = ns_sockstrerror(ns_sockerrno); Ns_Log(Error, "driver: trigger send() failed: %s", errstr); } } /* *---------------------------------------------------------------------- * * SockClose -- * * Closes connection socket, does all cleanups. The input parameter * "keep" might be NS_TRUE/NS_FALSE or -1 if undecided. * * Results: * None. * * Side effects: * None * *---------------------------------------------------------------------- */ static void SockClose(Sock *sockPtr, int keep) { NS_NONNULL_ASSERT(sockPtr != NULL); if (keep != 0) { bool driverKeep = DriverKeep(sockPtr); keep = (int)driverKeep; } if (keep == (int)NS_FALSE) { DriverClose(sockPtr); } Ns_MutexLock(&sockPtr->drvPtr->lock); sockPtr->keep = (bool)keep; Ns_MutexUnlock(&sockPtr->drvPtr->lock); /* * Unconditionally remove temporary file, connection thread * should take care about very large uploads. */ if (sockPtr->tfile != NULL) { unlink(sockPtr->tfile); ns_free(sockPtr->tfile); sockPtr->tfile = NULL; if (sockPtr->tfd > 0) { /* * Close and reset fd. The fd should be > 0 unless we are in error * conditions. */ (void) ns_close(sockPtr->tfd); } sockPtr->tfd = 0; } else if (sockPtr->tfd > 0) { /* * This must be a fd allocated via Ns_GetTemp(); */ Ns_ReleaseTemp(sockPtr->tfd); sockPtr->tfd = 0; } #ifndef _WIN32 /* * Un-map temp file used for spooled content. */ if (sockPtr->taddr != NULL) { munmap(sockPtr->taddr, (size_t)sockPtr->tsize); sockPtr->taddr = NULL; } #endif } /* *---------------------------------------------------------------------- * * ChunkedDecode -- * * Reads the content form the incoming request buffer and tries * to decode chunked encoding parts. The function can be called * repeatedly and with incomplete input and overwrites the buffer * with the decoded data optionally. The decoded data is always * shorter than the encoded one. * * Results: * SOCK_READY when chunk was complete, SOCK_MORE when more data is * requried, or some error condition. * * Side effects: * Updates the buffer if update is true (and adjusts * reqPtr->chunkWriteOff). Updates always reqPtr->chunkStartOff to allow * incremental operations. * *---------------------------------------------------------------------- */ static SockState ChunkedDecode(Request *reqPtr, bool update) { const Tcl_DString *bufPtr; const char *end, *chunkStart; SockState result = SOCK_READY; NS_NONNULL_ASSERT(reqPtr != NULL); bufPtr = &reqPtr->buffer; end = bufPtr->string + bufPtr->length; chunkStart = bufPtr->string + reqPtr->chunkStartOff; while (reqPtr->chunkStartOff < (size_t)bufPtr->length) { char *p = strstr(chunkStart, "\r\n"); long chunkLength; if (p == NULL) { Ns_Log(DriverDebug, "ChunkedDecode: chunk did not find end-of-line"); result = SOCK_MORE; break; } *p = '\0'; chunkLength = strtol(chunkStart, NULL, 16); *p = '\r'; if (chunkLength < 0) { Ns_Log(Warning, "ChunkedDecode: negative chunk length"); result = SOCK_BADREQUEST; break; } *p = '\r'; if (p + 2 + chunkLength > end) { Ns_Log(DriverDebug, "ChunkedDecode: chunk length past end of buffer"); result = SOCK_MORE; break; } if (update) { char *writeBuffer = bufPtr->string + reqPtr->chunkWriteOff; memmove(writeBuffer, p + 2, (size_t)chunkLength); reqPtr->chunkWriteOff += (size_t)chunkLength; *(writeBuffer + chunkLength) = '\0'; } reqPtr->chunkStartOff += (size_t)(p - chunkStart) + 4u + (size_t)chunkLength; chunkStart = bufPtr->string + reqPtr->chunkStartOff; } return result; } /* *---------------------------------------------------------------------- * * SockRead -- * * Read content from the given Sock, processing the input as * necessary. This is the core callback routine designed to * either be called repeatedly within the DriverThread during * an async read-ahead or in a blocking loop in NsGetRequest() * at the start of connection processing. * * Results: * SOCK_READY: Request is ready for processing. * SOCK_MORE: More input is required. * SOCK_ERROR: Client drop or timeout. * SOCK_SPOOL: Pass input handling to spooler * SOCK_CLOSE: peer closed connection * SOCK_BADREQUEST * SOCK_BADHEADER * SOCK_TOOMANYHEADERS * * Side effects: * The Request structure will be built up for use by the * connection thread. Also, before returning SOCK_READY, * the next byte to read mark and bytes available are set * to the beginning of the content, just beyond the headers. * * Contents may be spooled into temp file and mmap-ed * *---------------------------------------------------------------------- */ static SockState SockRead(Sock *sockPtr, int spooler, const Ns_Time *timePtr) { const Driver *drvPtr; Request *reqPtr; Tcl_DString *bufPtr; struct iovec buf; char tbuf[16384]; size_t buflen, nread; ssize_t n; SockState resultState; NS_NONNULL_ASSERT(sockPtr != NULL); drvPtr = sockPtr->drvPtr; tbuf[0] = '\0'; /* * In case of "keepwait", the accept time is not meaningful and * reset to 0. In such cases, update "acceptTime" to the actual * begin of a request. This part is intended for async drivers. */ if (sockPtr->acceptTime.sec == 0) { assert(timePtr != NULL); sockPtr->acceptTime = *timePtr; } /* * Initialize request structure if needed. */ if (sockPtr->reqPtr == NULL) { RequestNew(sockPtr); } /* * On the first read, attempt to read-ahead "bufsize" bytes. * Otherwise, read only the number of bytes left in the * content. */ reqPtr = sockPtr->reqPtr; bufPtr = &reqPtr->buffer; if (reqPtr->length == 0u) { nread = drvPtr->bufsize; } else { nread = reqPtr->length - reqPtr->avail; } /* * Grow the buffer to include space for the next bytes. */ buflen = (size_t)bufPtr->length; n = (ssize_t)(buflen + nread); if (unlikely(n > drvPtr->maxinput)) { n = (ssize_t)drvPtr->maxinput; nread = (size_t)n - buflen; if (nread == 0u) { Ns_Log(DriverDebug, "SockRead: maxinput reached %" TCL_LL_MODIFIER "d", drvPtr->maxinput); return SOCK_ERROR; } } /* * Use temp file for content larger than "readahead" bytes. */ #ifndef _WIN32 if (reqPtr->coff > 0u /* We are in the content part (after the header) */ && !reqPtr->chunkStartOff /* Never spool chunked encoded data since we decode in memory */ && reqPtr->length > (size_t)drvPtr->readahead /* We need more data */ && sockPtr->tfd <= 0 /* We have no spool fd */ ) { const DrvSpooler *spPtr = &drvPtr->spooler; Ns_Log(DriverDebug, "SockRead: require tmp file for content spooling (length %" PRIuz" > readahead " "%" TCL_LL_MODIFIER "d)", reqPtr->length, drvPtr->readahead); /* * In driver mode send this Sock to the spooler thread if * it is running */ if (spooler == 0 && spPtr->threads > 0) { return SOCK_SPOOL; } /* * If "maxupload" is specified and content size exceeds the configured * values, spool uploads into normal temp file (not deleted). We do * not want to map such large files into memory. */ if (drvPtr->maxupload > 0 && reqPtr->length > (size_t)drvPtr->maxupload ) { size_t tfileLength = strlen(drvPtr->uploadpath) + 16u; sockPtr->tfile = ns_malloc(tfileLength); snprintf(sockPtr->tfile, tfileLength, "%s/%d.XXXXXX", drvPtr->uploadpath, sockPtr->sock); sockPtr->tfd = ns_mkstemp(sockPtr->tfile); if (sockPtr->tfd == NS_INVALID_FD) { Ns_Log(Error, "SockRead: cannot create spool file with template '%s': %s", sockPtr->tfile, strerror(errno)); } } else { /* * Get a temporary fd. These FDs are used for mmapping. */ sockPtr->tfd = Ns_GetTemp(); } if (unlikely(sockPtr->tfd == NS_INVALID_FD)) { Ns_Log(DriverDebug, "SockRead: spool fd invalid"); return SOCK_ERROR; } n = (ssize_t)((size_t)bufPtr->length - reqPtr->coff); assert(n >= 0); if (ns_write(sockPtr->tfd, bufPtr->string + reqPtr->coff, (size_t)n) != n) { return SOCK_WRITEERROR; } Tcl_DStringSetLength(bufPtr, 0); } #endif if (sockPtr->tfd > 0) { buf.iov_base = tbuf; buf.iov_len = MIN(nread, sizeof(tbuf)); } else { Tcl_DStringSetLength(bufPtr, (int)(buflen + nread)); buf.iov_base = bufPtr->string + reqPtr->woff; buf.iov_len = nread; } if (reqPtr->leftover > 0u) { /* * There is some leftover in the buffer, don't read but take the * leftover instead as input. */ n = (ssize_t)reqPtr->leftover; reqPtr->leftover = 0u; buflen = 0u; Ns_Log(DriverDebug, "SockRead receive from leftover %" PRIdz " bytes", n); } else { /* * Receive actually some data from the driver. */ n = NsDriverRecv(sockPtr, &buf, 1, NULL); Ns_Log(DriverDebug, "SockRead receive from network %" PRIdz " bytes sockState %.2x", n, (int)sockPtr->recvSockState); } { Ns_SockState nsSockState = sockPtr->recvSockState; /* * The nsSockState has one of the following values, when provided: * * NS_SOCK_READ, NS_SOCK_DONE, NS_SOCK_AGAIN, NS_SOCK_EXCEPTION, * NS_SOCK_TIMEOUT */ switch (nsSockState) { case NS_SOCK_TIMEOUT: NS_FALL_THROUGH; /* fall through */ case NS_SOCK_EXCEPTION: return SOCK_READERROR; case NS_SOCK_AGAIN: Tcl_DStringSetLength(bufPtr, (int)buflen); return SOCK_MORE; case NS_SOCK_DONE: return SOCK_CLOSE; case NS_SOCK_READ: break; case NS_SOCK_CANCEL: NS_FALL_THROUGH; /* fall through */ case NS_SOCK_EXIT: NS_FALL_THROUGH; /* fall through */ case NS_SOCK_INIT: NS_FALL_THROUGH; /* fall through */ case NS_SOCK_WRITE: Ns_Log(Warning, "SockRead received unexpected state %.2x from driver", nsSockState); return SOCK_READERROR; case NS_SOCK_NONE: /* * Old style state management based on "n" and "errno", which is * more fragile. We keep there for old-style drivers. */ if (n < 0) { Tcl_DStringSetLength(bufPtr, (int)buflen); /* * The driver returns -1 when the peer closed the connection, but * clears the errno such we can distinguish from error conditions. */ if (errno == 0) { return SOCK_CLOSE; } return SOCK_READERROR; } if (n == 0) { Tcl_DStringSetLength(bufPtr, (int)buflen); return SOCK_MORE; } break; } } if (sockPtr->tfd > 0) { if (ns_write(sockPtr->tfd, tbuf, (size_t)n) != n) { return SOCK_WRITEERROR; } } else { Tcl_DStringSetLength(bufPtr, (int)(buflen + (size_t)n)); } reqPtr->woff += (size_t)n; reqPtr->avail += (size_t)n; /* * This driver needs raw buffer, it is binary or non-HTTP request */ if ((drvPtr->opts & NS_DRIVER_NOPARSE) != 0u) { return SOCK_READY; } resultState = SockParse(sockPtr); return resultState; } /*---------------------------------------------------------------------- * * LogBuffer -- * * Debug function to output buffer content when the provided severity is * enabled. The function prints just visible characters and space as is * and prints the hex code otherwise. * * Results: * None. * * Side effects: * Writes to error.log * *---------------------------------------------------------------------- */ static void LogBuffer(Ns_LogSeverity severity, const char *msg, const char *buffer, size_t len) { Tcl_DString ds; NS_NONNULL_ASSERT(msg != NULL); NS_NONNULL_ASSERT(buffer != NULL); if (Ns_LogSeverityEnabled(severity)) { Tcl_DStringInit(&ds); Tcl_DStringAppend(&ds, msg, -1); Tcl_DStringAppend(&ds, ": ", 2); (void)Ns_DStringAppendPrintable(&ds, NS_FALSE, buffer, len); Ns_Log(severity, "%s", ds.string); Tcl_DStringFree(&ds); } } /*---------------------------------------------------------------------- * * EndOfHeader -- * * Function to be called (once), when end of header is reached. At this * time, all request header lines were parsed already correctly. * * Results: * None. * * Side effects: * Update various reqPtr fields and signal certain facts and error * conditions via sockPtr->flags. In error conditions, sockPtr->keep is * set to NS_FALSE. * *---------------------------------------------------------------------- */ static size_t EndOfHeader(Sock *sockPtr) { Request *reqPtr; const char *s; NS_NONNULL_ASSERT(sockPtr != NULL); reqPtr = sockPtr->reqPtr; assert(reqPtr != NULL); reqPtr->chunkStartOff = 0u; /* * Check for "expect: 100-continue" and clear flag in case we have * pipelining. */ sockPtr->flags &= ~(NS_CONN_CONTINUE); s = Ns_SetIGet(reqPtr->headers, "expect"); if (s != NULL) { if (*s == '1' && *(s+1) == '0' && *(s+2) == '0' && *(s+3) == '-') { char *dup = ns_strdup(s+4); Ns_StrToLower(dup); if (STREQ(dup, "continue")) { sockPtr->flags |= NS_CONN_CONTINUE; } ns_free(dup); } } /* * Handle content-length, which might be provided or not. * Clear length specific error flags. */ sockPtr->flags &= ~(NS_CONN_ENTITYTOOLARGE); s = Ns_SetIGet(reqPtr->headers, "content-length"); if (s == NULL) { s = Ns_SetIGet(reqPtr->headers, "Transfer-Encoding"); if (s != NULL) { /* Lower case is in the standard, capitalized by macOS */ if (STREQ(s, "chunked") || STREQ(s, "Chunked")) { Tcl_WideInt expected; reqPtr->chunkStartOff = reqPtr->roff; reqPtr->chunkWriteOff = reqPtr->chunkStartOff; reqPtr->contentLength = 0u; /* * We need reqPtr->expectedLength for safely terminating read loop. */ s = Ns_SetIGet(reqPtr->headers, "X-Expected-Entity-Length"); if ((s != NULL) && (Ns_StrToWideInt(s, &expected) == NS_OK) && (expected > 0) ) { reqPtr->expectedLength = (size_t)expected; } s = NULL; } } } /* * In case a valid and meaningful was provided, the string with the * content length ("s") is not NULL. */ if (s != NULL) { Tcl_WideInt length; if ((Ns_StrToWideInt(s, &length) == NS_OK) && (length > 0)) { reqPtr->length = (size_t)length; /* * Handle too large input requests. */ if (reqPtr->length > (size_t)sockPtr->drvPtr->maxinput) { Ns_Log(Warning, "SockParse: request too large, length=%" PRIdz ", maxinput=%" TCL_LL_MODIFIER "d", reqPtr->length, sockPtr->drvPtr->maxinput); sockPtr->keep = NS_FALSE; sockPtr->flags |= NS_CONN_ENTITYTOOLARGE; } reqPtr->contentLength = (size_t)length; } } /* * Compression format handling: parse information from request headers * indicating allowed compression formats for quick access. * * Clear compression accepted flag */ sockPtr->flags &= ~(NS_CONN_ZIPACCEPTED|NS_CONN_BROTLIACCEPTED); s = Ns_SetIGet(reqPtr->headers, "Accept-Encoding"); if (s != NULL) { bool gzipAccept, brotliAccept; /* * Get allowed compression formats from "accept-encoding" headers. */ NsParseAcceptEncoding(reqPtr->request.version, s, &gzipAccept, &brotliAccept); if (gzipAccept || brotliAccept) { /* * Don't allow compression formats for Range requests. */ s = Ns_SetIGet(reqPtr->headers, "Range"); if (s == NULL) { if (gzipAccept) { sockPtr->flags |= NS_CONN_ZIPACCEPTED; } if (brotliAccept) { sockPtr->flags |= NS_CONN_BROTLIACCEPTED; } } } } /* * Set up request length for spooling and further read operations */ if (reqPtr->contentLength != 0u) { /* * Content-Length was provided, use it */ reqPtr->length = reqPtr->contentLength; } return reqPtr->roff; } /*---------------------------------------------------------------------- * * SockParse -- * * Construct the given conn by parsing input buffer until end of * headers. Return SOCK_READY when finished parsing. * * Results: * SOCK_READY: Conn is ready for processing. * SOCK_MORE: More input is required. * SOCK_ERROR: Malformed request. * SOCK_BADREQUEST * SOCK_BADHEADER * SOCK_TOOMANYHEADERS * * Side effects: * An Ns_Request and/or Ns_Set may be allocated. * Ns_Conn buffer management offsets updated. * *---------------------------------------------------------------------- */ static SockState SockParse(Sock *sockPtr) { const Tcl_DString *bufPtr; const Driver *drvPtr; Request *reqPtr; char save; SockState result; NS_NONNULL_ASSERT(sockPtr != NULL); drvPtr = sockPtr->drvPtr; NsUpdateProgress((Ns_Sock *) sockPtr); reqPtr = sockPtr->reqPtr; bufPtr = &reqPtr->buffer; /* * Scan lines (header) until start of content (body-part) */ while (reqPtr->coff == 0u) { char *s, *e; size_t cnt; /* * Find the next header line. */ s = bufPtr->string + reqPtr->roff; e = memchr(s, INTCHAR('\n'), reqPtr->avail); if (unlikely(e == NULL)) { /* * Input not yet newline terminated - request more data. */ return SOCK_MORE; } /* * Check for max single line overflows. * * Previous versions if the driver returned here directly an * error code, which was handled via HTTP error message * provided via SockError(). However, the SockError() handling * closes the connection immediately. This has the * consequence, that the HTTP client might never see the error * message, since the request was not yet fully transmitted, * but it will see a "broken pipe: 13" message instead. We * read now the full request and return the message via * ConnRunRequest(). */ if (unlikely((e - s) > drvPtr->maxline)) { sockPtr->keep = NS_FALSE; if (reqPtr->request.line == NULL) { Ns_Log(DriverDebug, "SockParse: maxline reached of %d bytes", drvPtr->maxline); sockPtr->flags = NS_CONN_REQUESTURITOOLONG; Ns_Log(Warning, "request line is too long (%d bytes)", (int)(e - s)); } else { sockPtr->flags = NS_CONN_LINETOOLONG; Ns_Log(Warning, "request header line is too long (%d bytes)", (int)(e - s)); } } /* * Update next read pointer to end of this line. */ cnt = (size_t)(e - s) + 1u; reqPtr->roff += cnt; reqPtr->avail -= cnt; /* * Adjust end pointer to the last content character before the line * terminator. */ if (likely(e > s) && likely(*(e-1) == '\r')) { --e; } /* * Check for end of headers in case we have not done it yet. */ if (unlikely(e == s) && (reqPtr->coff == 0u)) { /* * We are at end of headers. */ reqPtr->coff = EndOfHeader(sockPtr); /* * In cases the client sent "expect: 100-continue", report back that * everything is fine with the headers. */ if ((sockPtr->flags & NS_CONN_CONTINUE) != 0u) { Ns_Log(Ns_LogRequestDebug, "honoring 100-continue"); /* * In case, the request entity (body) was too large, we can * return immediately the error message, when the client has * flagged this via "Expect:". Otherwise we have to read the * full request (although it is too large) to drain the * channel. Otherwise, the server might close the connection * *before* it has received full request with its body from * the client. We just keep the flag and let * Ns_ConnRunRequest() handle the error message. */ if ((sockPtr->flags & NS_CONN_ENTITYTOOLARGE) != 0u) { Ns_Log(Ns_LogRequestDebug, "100-continue: entity too large"); return SOCK_ENTITYTOOLARGE; /* * We have no other error message flagged (future ones * have to be handled here). */ } else { struct iovec iov[1]; ssize_t sent; /* * Reply with "100 continue". */ Ns_Log(Ns_LogRequestDebug, "100-continue: reply CONTINUE"); iov[0].iov_base = (char *)"HTTP/1.1 100 Continue\r\n\r\n"; iov[0].iov_len = strlen(iov[0].iov_base); sent = Ns_SockSendBufs((Ns_Sock *)sockPtr, iov, 1, NULL, 0u); if (sent != (ssize_t)iov[0].iov_len) { Ns_Log(Warning, "could not deliver response: 100 Continue"); /* * Should we bail out here? */ } } } } else { /* * We have the request-line or a header line to process. */ save = *e; *e = '\0'; if (unlikely(reqPtr->request.line == NULL)) { /* * There is no request-line set. The received line must the * the request-line. */ Ns_Log(DriverDebug, "SockParse (%d): parse request line <%s>", sockPtr->sock, s); if (Ns_ParseRequest(&reqPtr->request, s) == NS_ERROR) { /* * Invalid request. */ return SOCK_BADREQUEST; } /* * HTTP 0.9 did not have a HTTP-version number or request headers * and no empty line terminating the request header. */ if (unlikely(reqPtr->request.version < 1.0)) { /* * Pre-HTTP/1.0 request. */ reqPtr->coff = reqPtr->roff; Ns_Log(Notice, "pre-HTTP/1.0 request <%s>", reqPtr->request.line); } } else if (Ns_ParseHeader(reqPtr->headers, s, Preserve) != NS_OK) { /* * Invalid header. */ return SOCK_BADHEADER; } else { /* * Check for max number of headers */ if (unlikely(Ns_SetSize(reqPtr->headers) > (size_t)drvPtr->maxheaders)) { Ns_Log(DriverDebug, "SockParse (%d): maxheaders reached of %d bytes", sockPtr->sock, drvPtr->maxheaders); return SOCK_TOOMANYHEADERS; } } *e = save; } } if (unlikely(reqPtr->request.line == NULL)) { /* * We are at end of headers, but we have not parsed a request line * (maybe just two linefeeds). */ return SOCK_BADREQUEST; } /* * We are in the request body. */ assert(reqPtr->coff > 0u); assert(reqPtr->request.line != NULL); /* * Check if all content has arrived. */ Ns_Log(Debug, "=== length < avail (length %" PRIuz ", avail %" PRIuz ") tfd %d tfile %p chunkStartOff %" PRIuz, reqPtr->length, reqPtr->avail, sockPtr->tfd, (void *)sockPtr->tfile, reqPtr->chunkStartOff); if (reqPtr->chunkStartOff != 0u) { /* * Chunked encoding was provided. */ SockState chunkState; size_t currentContentLength; chunkState = ChunkedDecode(reqPtr, NS_TRUE); currentContentLength = reqPtr->chunkWriteOff - reqPtr->coff; /* * A chunk might be complete, but it might not be the last * chunk from the client. The best thing would be to be able * to read until EOF here. In cases, where the (optional) * "expectedLength" was provided by the client, we terminate * depending on that information */ if ((chunkState == SOCK_MORE) || (reqPtr->expectedLength != 0u && currentContentLength < reqPtr->expectedLength)) { /* * ChunkedDecode wants more data. */ return SOCK_MORE; } else if (chunkState != SOCK_READY) { return chunkState; } /* * ChunkedDecode has enough data. */ reqPtr->length = (size_t)currentContentLength; } if (reqPtr->avail < reqPtr->length) { Ns_Log(DriverDebug, "SockRead wait for more input"); /* * Wait for more input. */ return SOCK_MORE; } Ns_Log(Dev, "=== all required data is available (avail %" PRIuz", length %" PRIuz ", " "readahead %" TCL_LL_MODIFIER "d maxupload %" TCL_LL_MODIFIER "d) tfd %d", reqPtr->avail, reqPtr->length, drvPtr->readahead, drvPtr->maxupload, sockPtr->tfd); /* * We have all required data in the receive buffer or in a temporary file. * * - Uploads > "readahead": these are put into temporary files. * * - Uploads > "maxupload": these are put into temporary files * without mmapping, no content parsing will be performed in memory. */ result = SOCK_READY; if (sockPtr->tfile != NULL) { reqPtr->content = NULL; reqPtr->next = NULL; reqPtr->avail = 0u; Ns_Log(DriverDebug, "content spooled to file: size %" PRIdz ", file %s", reqPtr->length, sockPtr->tfile); /* * Nothing more to do, return via SOCK_READY; */ } else { /* * Uploads < "maxupload" are spooled to files and mmapped in order to * provide the usual interface via [ns_conn content]. */ if (sockPtr->tfd > 0) { #ifdef _WIN32 /* * For _WIN32, tfd should never be set, since tfd-spooling is not * implemented for windows. */ assert(0); #else int prot = PROT_READ | PROT_WRITE; /* * Add a byte to make sure, the string termination with \0 below falls * always into the mmapped area. On some older OSes this might lead to * crashes when we hitting page boundaries. */ ssize_t rc = ns_write(sockPtr->tfd, "\0", 1); if (rc == -1) { Ns_Log(Error, "socket: could not append terminating 0-byte"); } sockPtr->tsize = reqPtr->length + 1; sockPtr->taddr = mmap(0, sockPtr->tsize, prot, MAP_PRIVATE, sockPtr->tfd, 0); if (sockPtr->taddr == MAP_FAILED) { sockPtr->taddr = NULL; result = SOCK_ERROR; } else { reqPtr->content = sockPtr->taddr; Ns_Log(Debug, "content spooled to mmapped file: readahead=%" TCL_LL_MODIFIER "d, filesize=%" PRIdz, drvPtr->readahead, sockPtr->tsize); } #endif } else { /* * Set the content the begin of the remaining buffer (content offset). * This happens as well when reqPtr->contentLength is 0, but it is * needed for chunked input processing. */ reqPtr->content = bufPtr->string + reqPtr->coff; } reqPtr->next = reqPtr->content; /* * Add a terminating null character. The content might be from the receive * buffer (Tcl_DString) or from the mmapped file. Non-mmapped files are handled * above. */ if (reqPtr->length > 0u) { Ns_Log(DriverDebug, "SockRead adds null terminating character at content[%" PRIuz "]", reqPtr->length); reqPtr->savedChar = reqPtr->content[reqPtr->length]; reqPtr->content[reqPtr->length] = '\0'; if (sockPtr->taddr == NULL) { LogBuffer(DriverDebug, "UPDATED BUFFER", sockPtr->reqPtr->buffer.string, (size_t)reqPtr->buffer.length); } } } return result; } /* *---------------------------------------------------------------------- * * SockSetServer -- * * Set virtual server from driver context or Host header. * * Results: * void. * * Side effects: * * Updates sockPtr->servPtr. In case an invalid server set, or the * required host field in HTTP/1.1 is missing the HTTP-method is set to * the constant "BAD". * *---------------------------------------------------------------------- */ static void SockSetServer(Sock *sockPtr) { char *host; Request *reqPtr; bool bad_request = NS_FALSE; Driver *drvPtr; NS_NONNULL_ASSERT(sockPtr != NULL); reqPtr = sockPtr->reqPtr; assert(reqPtr != NULL); drvPtr = sockPtr->drvPtr; assert(drvPtr != NULL); sockPtr->servPtr = drvPtr->servPtr; sockPtr->location = drvPtr->location; host = Ns_SetIGet(reqPtr->headers, "Host"); Ns_Log(DriverDebug, "SockSetServer host '%s' request line '%s'", host, reqPtr->request.line); if (unlikely((host == NULL) && (reqPtr->request.version >= 1.1))) { /* * HTTP/1.1 requires host header */ Ns_Log(Notice, "request header field \"Host\" is missing in HTTP/1.1 request: \"%s\"\n", reqPtr->request.line); bad_request = NS_TRUE; } if (sockPtr->servPtr == NULL) { const ServerMap *mapPtr = NULL; if (host != NULL) { const Tcl_HashEntry *hPtr; size_t hostLength = strlen(host); /* * Remove trailing dot of host header field, since RFC 2976 allows * fully qualified "absolute" DNS names in host fields (see e.g. §3.2.2). */ if (host[hostLength] == '.') { host[hostLength] = '\0'; } /* * Convert provided host header field to lower case before hash * lookup. */ Ns_StrToLower(host); hPtr = Tcl_FindHashEntry(&drvPtr->hosts, host); Ns_Log(DriverDebug, "SockSetServer driver '%s' host '%s' => %p", drvPtr->moduleName, host, (void*)hPtr); if (hPtr != NULL) { /* * Request with provided host header field could be resolved * against a certain server. */ mapPtr = Tcl_GetHashValue(hPtr); } else { /* * Host header field content is not found in the mapping table. */ Ns_Log(DriverDebug, "cannot locate host header content '%s' in virtual hosts " "table of driver '%s', fall back to default '%s'", host, drvPtr->moduleName, drvPtr->defMapPtr->location); if (Ns_LogSeverityEnabled(DriverDebug)) { Tcl_HashEntry *hPtr2; Tcl_HashSearch search; hPtr2 = Tcl_FirstHashEntry(&drvPtr->hosts, &search); while (hPtr2 != NULL) { Ns_Log(Notice, "... host entry: '%s'\n", (char *)Tcl_GetHashKey(&drvPtr->hosts, hPtr2)); hPtr2 = Tcl_NextHashEntry(&search); } } } } if (mapPtr == NULL) { /* * Could not lookup the virtual host, Get the default mapping from the driver. */ mapPtr = drvPtr->defMapPtr; } if (mapPtr != NULL) { sockPtr->servPtr = mapPtr->servPtr; sockPtr->location = mapPtr->location; } if (sockPtr->servPtr == NULL) { Ns_Log(Warning, "cannot determine server for request: \"%s\" (host \"%s\")\n", reqPtr->request.line, host); bad_request = NS_TRUE; } } if (unlikely(bad_request)) { Ns_Log(DriverDebug, "SockSetServer sets method to BAD"); ns_free((char *)reqPtr->request.method); reqPtr->request.method = ns_strdup("BAD"); } } /* *====================================================================== * Spooler Thread: Receive asynchronously from the client socket *====================================================================== */ /* *---------------------------------------------------------------------- * * SpoolerThread -- * * Spooling socket driver thread. * * Results: * None. * * Side effects: * Connections are accepted on the configured listen sockets, * placed on the run queue to be serviced, and gracefully * closed when done. Async sockets have the entire request read * here before queuing as well. * *---------------------------------------------------------------------- */ static void SpoolerThread(void *arg) { SpoolerQueue *queuePtr = (SpoolerQueue*)arg; char charBuffer[1]; int pollTimeout; bool stopping; Sock *sockPtr, *nextPtr, *waitPtr, *readPtr; Ns_Time now, diff; const Driver *drvPtr; PollData pdata; Ns_ThreadSetName("-spooler%d-", queuePtr->id); queuePtr->threadName = Ns_ThreadGetName(); /* * Loop forever until signaled to shut down and all * connections are complete and gracefully closed. */ Ns_Log(Notice, "spooler%d: accepting connections", queuePtr->id); PollCreate(&pdata); Ns_GetTime(&now); waitPtr = readPtr = NULL; stopping = NS_FALSE; while (!stopping) { /* * If there are any read sockets, set the bits * and determine the minimum relative timeout. */ PollReset(&pdata); (void)PollSet(&pdata, queuePtr->pipe[0], (short)POLLIN, NULL); if (readPtr == NULL) { pollTimeout = 30 * 1000; } else { sockPtr = readPtr; while (sockPtr != NULL) { SockPoll(sockPtr, (short)POLLIN, &pdata); sockPtr = sockPtr->nextPtr; } pollTimeout = -1; } /* * Select and drain the trigger pipe if necessary. */ /*n =*/ (void) PollWait(&pdata, pollTimeout); if (PollIn(&pdata, 0) && unlikely(ns_recv(queuePtr->pipe[0], charBuffer, 1u, 0) != 1)) { Ns_Fatal("spooler: trigger ns_recv() failed: %s", ns_sockstrerror(ns_sockerrno)); } /* * Attempt read-ahead of any new connections. */ Ns_GetTime(&now); sockPtr = readPtr; readPtr = NULL; while (sockPtr != NULL) { nextPtr = sockPtr->nextPtr; drvPtr = sockPtr->drvPtr; if (unlikely(PollHup(&pdata, sockPtr->pidx))) { /* * Peer has closed the connection */ SockRelease(sockPtr, SOCK_CLOSE, 0); } else if (!PollIn(&pdata, sockPtr->pidx)) { /* * Got no data */ if (Ns_DiffTime(&sockPtr->timeout, &now, &diff) <= 0) { SockRelease(sockPtr, SOCK_READTIMEOUT, 0); queuePtr->queuesize--; } else { Push(sockPtr, readPtr); } } else { /* * Got some data */ SockState n = SockRead(sockPtr, 1, &now); switch (n) { case SOCK_MORE: SockTimeout(sockPtr, &now, &drvPtr->recvwait); Push(sockPtr, readPtr); break; case SOCK_READY: assert(sockPtr->reqPtr != NULL); SockSetServer(sockPtr); Push(sockPtr, waitPtr); break; case SOCK_BADHEADER: NS_FALL_THROUGH; /* fall through */ case SOCK_BADREQUEST: NS_FALL_THROUGH; /* fall through */ case SOCK_CLOSE: NS_FALL_THROUGH; /* fall through */ case SOCK_CLOSETIMEOUT: NS_FALL_THROUGH; /* fall through */ case SOCK_ENTITYTOOLARGE: NS_FALL_THROUGH; /* fall through */ case SOCK_ERROR: NS_FALL_THROUGH; /* fall through */ case SOCK_READERROR: NS_FALL_THROUGH; /* fall through */ case SOCK_READTIMEOUT: NS_FALL_THROUGH; /* fall through */ case SOCK_SHUTERROR: NS_FALL_THROUGH; /* fall through */ case SOCK_SPOOL: NS_FALL_THROUGH; /* fall through */ case SOCK_TOOMANYHEADERS: NS_FALL_THROUGH; /* fall through */ case SOCK_WRITEERROR: NS_FALL_THROUGH; /* fall through */ case SOCK_WRITETIMEOUT: SockRelease(sockPtr, n, errno); queuePtr->queuesize--; break; } } sockPtr = nextPtr; } /* * Attempt to queue any pending connection * after reversing the list to ensure oldest * connections are tried first. */ if (waitPtr != NULL) { sockPtr = NULL; while ((nextPtr = waitPtr) != NULL) { waitPtr = nextPtr->nextPtr; Push(nextPtr, sockPtr); } while (sockPtr != NULL) { nextPtr = sockPtr->nextPtr; if (!NsQueueConn(sockPtr, &now)) { Push(sockPtr, waitPtr); } else { queuePtr->queuesize--; } sockPtr = nextPtr; } } /* * Add more connections from the spooler queue */ Ns_MutexLock(&queuePtr->lock); if (waitPtr == NULL) { sockPtr = (Sock*)queuePtr->sockPtr; queuePtr->sockPtr = NULL; while (sockPtr != NULL) { nextPtr = sockPtr->nextPtr; drvPtr = sockPtr->drvPtr; SockTimeout(sockPtr, &now, &drvPtr->recvwait); Push(sockPtr, readPtr); queuePtr->queuesize++; sockPtr = nextPtr; } } /* * Check for shutdown */ stopping = queuePtr->shutdown; Ns_MutexUnlock(&queuePtr->lock); } PollFree(&pdata); Ns_Log(Notice, "exiting"); Ns_MutexLock(&queuePtr->lock); queuePtr->stopped = NS_TRUE; Ns_CondBroadcast(&queuePtr->cond); Ns_MutexUnlock(&queuePtr->lock); } static void SpoolerQueueStart(SpoolerQueue *queuePtr, Ns_ThreadProc *proc) { NS_NONNULL_ASSERT(proc != NULL); while (queuePtr != NULL) { if (ns_sockpair(queuePtr->pipe) != 0) { Ns_Fatal("ns_sockpair() failed: %s", ns_sockstrerror(ns_sockerrno)); } Ns_ThreadCreate(proc, queuePtr, 0, &queuePtr->thread); queuePtr = queuePtr->nextPtr; } } static void SpoolerQueueStop(SpoolerQueue *queuePtr, const Ns_Time *timeoutPtr, const char *name) { NS_NONNULL_ASSERT(timeoutPtr != NULL); NS_NONNULL_ASSERT(name != NULL); while (queuePtr != NULL) { Ns_ReturnCode status; Ns_MutexLock(&queuePtr->lock); if (!queuePtr->stopped && !queuePtr->shutdown) { Ns_Log(Debug, "%s%d: triggering shutdown", name, queuePtr->id); queuePtr->shutdown = NS_TRUE; SockTrigger(queuePtr->pipe[1]); } status = NS_OK; while (!queuePtr->stopped && status == NS_OK) { status = Ns_CondTimedWait(&queuePtr->cond, &queuePtr->lock, timeoutPtr); } if (status != NS_OK) { Ns_Log(Warning, "%s%d: timeout waiting for shutdown", name, queuePtr->id); } else { /*Ns_Log(Notice, "%s%d: shutdown complete", name, queuePtr->id);*/ if (queuePtr->thread != NULL) { Ns_ThreadJoin(&queuePtr->thread, NULL); queuePtr->thread = NULL; } else { Ns_Log(Notice, "%s%d: shutdown: thread already gone", name, queuePtr->id); } ns_sockclose(queuePtr->pipe[0]); ns_sockclose(queuePtr->pipe[1]); } Ns_MutexUnlock(&queuePtr->lock); queuePtr = queuePtr->nextPtr; } } static int SockSpoolerQueue(Driver *drvPtr, Sock *sockPtr) { bool trigger = NS_FALSE; SpoolerQueue *queuePtr; NS_NONNULL_ASSERT(drvPtr != NULL); NS_NONNULL_ASSERT(sockPtr != NULL); /* * Get the next spooler thread from the list, all spooler requests are * rotated between all spooler threads */ Ns_MutexLock(&drvPtr->spooler.lock); if (drvPtr->spooler.curPtr == NULL) { drvPtr->spooler.curPtr = drvPtr->spooler.firstPtr; } queuePtr = drvPtr->spooler.curPtr; drvPtr->spooler.curPtr = drvPtr->spooler.curPtr->nextPtr; Ns_MutexUnlock(&drvPtr->spooler.lock); Ns_Log(Debug, "Spooler: %d: started fd=%d: %" PRIdz " bytes", queuePtr->id, sockPtr->sock, sockPtr->reqPtr->length); Ns_MutexLock(&queuePtr->lock); if (queuePtr->sockPtr == NULL) { trigger = NS_TRUE; } Push(sockPtr, queuePtr->sockPtr); Ns_MutexUnlock(&queuePtr->lock); /* * Wake up spooler thread */ if (trigger) { SockTrigger(queuePtr->pipe[1]); } return 1; } /* *====================================================================== * Writer Thread: Write asynchronously to the client socket *====================================================================== */ /* *---------------------------------------------------------------------- * * NsWriterLock, NsWriterUnlock -- * * Provide an API for locking and unlocking context information * for streaming asynchronous writer jobs. The locks are just * needed for managing linkage between "connPtr" and a writer * entry. The lock operations are rather infrequent and the * lock duration is very short, such that at a single global * appears sufficient. * * Results: * None * * Side effects: * Change Mutex state. * *---------------------------------------------------------------------- */ void NsWriterLock(void) { Ns_MutexLock(&writerlock); } void NsWriterUnlock(void) { Ns_MutexUnlock(&writerlock); } /* *---------------------------------------------------------------------- * * WriterSockFileVecCleanup -- * * Cleanup function for FileVec array in WriterSock structure. * * Results: * None. * * Side effects: * Closing potentially file descriptors, freeing Ns_FileVec memory. * *---------------------------------------------------------------------- */ static void WriterSockFileVecCleanup(WriterSock *wrSockPtr) { NS_NONNULL_ASSERT(wrSockPtr != NULL); if ( wrSockPtr->c.file.nbufs > 0) { int i; Ns_Log(DriverDebug, "WriterSockRelease nbufs %d", wrSockPtr->c.file.nbufs); for (i = 0; i < wrSockPtr->c.file.nbufs; i++) { /* * The fd of c.file.currentbuf is always the same as * wrSockPtr->fd and therefore already closed at this point. */ if ( (i != wrSockPtr->c.file.currentbuf) && (wrSockPtr->c.file.bufs[i].fd != NS_INVALID_FD) ) { Ns_Log(DriverDebug, "WriterSockRelease must close fd %d", wrSockPtr->c.file.bufs[i].fd); ns_close(wrSockPtr->c.file.bufs[i].fd); } } ns_free(wrSockPtr->c.file.bufs); } ns_free(wrSockPtr->c.file.buf); } /* *---------------------------------------------------------------------- * * WriterSockRequire, WriterSockRelease -- * * Management functions for WriterSocks. WriterSockRequire() and * WriterSockRelease() are responsible for obtaining and * freeing "WriterSock" structures. When shuch a structure is finally * released, it is removed from the queue, the socket is * closed and the memory is freed. * * Results: * WriterSockRequire() returns a WriterSock from a connection, * the other functions return nothing. * * Side effects: * Updating reference counters, closing socket, freeing memory. * *---------------------------------------------------------------------- */ static WriterSock * WriterSockRequire(const Conn *connPtr) { WriterSock *wrSockPtr; NS_NONNULL_ASSERT(connPtr != NULL); NsWriterLock(); wrSockPtr = (WriterSock *)connPtr->strWriter; if (wrSockPtr != NULL) { wrSockPtr->refCount ++; } NsWriterUnlock(); return wrSockPtr; } static void WriterSockRelease(WriterSock *wrSockPtr) { SpoolerQueue *queuePtr; NS_NONNULL_ASSERT(wrSockPtr != NULL); wrSockPtr->refCount --; Ns_Log(DriverDebug, "WriterSockRelease %p refCount %d keep %d", (void *)wrSockPtr, wrSockPtr->refCount, wrSockPtr->keep); if (wrSockPtr->refCount > 0) { return; } Ns_Log(DriverDebug, "Writer: closed sock %d, file fd %d, error %d/%d, " "sent=%" TCL_LL_MODIFIER "d, flags=%X", wrSockPtr->sockPtr->sock, wrSockPtr->fd, wrSockPtr->status, wrSockPtr->err, wrSockPtr->nsent, wrSockPtr->flags); NsPoolAddBytesSent(wrSockPtr->poolPtr, wrSockPtr->nsent); if (wrSockPtr->doStream != NS_WRITER_STREAM_NONE) { Conn *connPtr; NsWriterLock(); connPtr = wrSockPtr->connPtr; if (connPtr != NULL && connPtr->strWriter != NULL) { connPtr->strWriter = NULL; } NsWriterUnlock(); /* * In case, writer streams are activated for this wrSockPtr, make sure * to release the tmp file. See thread Naviserver Open Files on the * sourceforge mailing list (starting July 2019). */ if (wrSockPtr->doStream == NS_WRITER_STREAM_FINISH) { Ns_ReleaseTemp(wrSockPtr->fd); } } /* * Remove the entry from the queue and decrement counter */ queuePtr = wrSockPtr->queuePtr; if (queuePtr->curPtr == wrSockPtr) { queuePtr->curPtr = wrSockPtr->nextPtr; queuePtr->queuesize--; } else { WriterSock *curPtr, *lastPtr = queuePtr->curPtr; for (curPtr = (lastPtr != NULL) ? lastPtr->nextPtr : NULL; curPtr != NULL; lastPtr = curPtr, curPtr = curPtr->nextPtr ) { if (curPtr == wrSockPtr) { lastPtr->nextPtr = wrSockPtr->nextPtr; queuePtr->queuesize--; break; } } } if ((wrSockPtr->err != 0) || (wrSockPtr->status != SPOOLER_OK)) { int i; /* * Lookup the matching sockState from the spooler state. The array has * just 5 elements, on average, just 2 comparisons are needed (since * OK is at the end). */ for (i = 0; i < Ns_NrElements(spoolerStateMap); i++) { if (spoolerStateMap[i].spoolerState == wrSockPtr->status) { SockError(wrSockPtr->sockPtr, spoolerStateMap[i].sockState, wrSockPtr->err); break; } } NsSockClose(wrSockPtr->sockPtr, (int)NS_FALSE); } else { NsSockClose(wrSockPtr->sockPtr, (int)wrSockPtr->keep); } if (wrSockPtr->clientData != NULL) { ns_free(wrSockPtr->clientData); } if (wrSockPtr->fd != NS_INVALID_FD) { if (wrSockPtr->doStream != NS_WRITER_STREAM_FINISH) { (void) ns_close(wrSockPtr->fd); } WriterSockFileVecCleanup(wrSockPtr); } else if (wrSockPtr->c.mem.bufs != NULL) { if (wrSockPtr->c.mem.fmap.addr != NULL) { NsMemUmap(&wrSockPtr->c.mem.fmap); } else { int i; for (i = 0; i < wrSockPtr->c.mem.nbufs; i++) { ns_free((char *)wrSockPtr->c.mem.bufs[i].iov_base); } } if (wrSockPtr->c.mem.bufs != wrSockPtr->c.mem.preallocated_bufs) { ns_free(wrSockPtr->c.mem.bufs); } } if (wrSockPtr->headerString != NULL) { ns_free(wrSockPtr->headerString); } ns_free(wrSockPtr); } /* *---------------------------------------------------------------------- * * WriterReadFromSpool -- * * Utility function of the WriterThread to read blocks from a * file into the output buffer of the writer. It handles * left overs from previous send attempts and takes care for * locking in case simultaneous reading and writing from the * same file. * * Results: * None. * * Side effects: * Fills up curPtr->c.file.buf and updates counters/sizes. * *---------------------------------------------------------------------- */ static SpoolerState WriterReadFromSpool(WriterSock *curPtr) { NsWriterStreamState doStream; SpoolerState status = SPOOLER_OK; size_t maxsize, toRead; unsigned char *bufPtr; NS_NONNULL_ASSERT(curPtr != NULL); doStream = curPtr->doStream; if (doStream != NS_WRITER_STREAM_NONE) { Ns_MutexLock(&curPtr->c.file.fdlock); toRead = curPtr->c.file.toRead; Ns_MutexUnlock(&curPtr->c.file.fdlock); } else { toRead = curPtr->c.file.toRead; Ns_Log(DriverDebug, "### WriterReadFromSpool [%d]: fd %d tosend %lu files %d", curPtr->c.file.currentbuf, curPtr->fd, toRead, curPtr->c.file.nbufs); } maxsize = curPtr->c.file.maxsize; bufPtr = curPtr->c.file.buf; /* * When bufsize > 0 we have a leftover from previous send. In such * cases, move the leftover to the front, and fill the reminder of * the buffer with new data from curPtr->c. */ if (curPtr->c.file.bufsize > 0u) { Ns_Log(DriverDebug, "### WriterReadFromSpool %p %.6x leftover %" PRIdz " offset %ld", (void *)curPtr, curPtr->flags, curPtr->c.file.bufsize, (long)curPtr->c.file.bufoffset); if (likely(curPtr->c.file.bufoffset > 0)) { memmove(curPtr->c.file.buf, curPtr->c.file.buf + curPtr->c.file.bufoffset, curPtr->c.file.bufsize); } bufPtr = curPtr->c.file.buf + curPtr->c.file.bufsize; maxsize -= curPtr->c.file.bufsize; } if (toRead > maxsize) { toRead = maxsize; } /* * Read content from the file into the buffer. */ if (toRead > 0u) { ssize_t n; if (doStream != NS_WRITER_STREAM_NONE) { /* * In streaming mode, the connection thread writes to the * spool file and the writer thread reads from the same * file. Therefore, we have to re-adjust the current * read/writer position, which might be changed by the * other thread. These positions have to be locked, since * seeking might be subject to race conditions. Here we * set the read pointer to the position after the last * send operation. */ Ns_MutexLock(&curPtr->c.file.fdlock); (void) ns_lseek(curPtr->fd, (off_t)curPtr->nsent, SEEK_SET); } if (curPtr->c.file.nbufs == 0) { /* * Working on a single fd. */ n = ns_read(curPtr->fd, bufPtr, toRead); } else { /* * Working on a Ns_FileVec. */ int currentbuf = curPtr->c.file.currentbuf; size_t wantRead = curPtr->c.file.bufs[currentbuf].length; size_t segSize = (wantRead > toRead ? toRead : wantRead); n = ns_read(curPtr->fd, bufPtr, segSize); Ns_Log(DriverDebug, "### WriterReadFromSpool [%d] (nbufs %d): read from fd %d want %lu got %ld (remain %lu)", currentbuf, curPtr->c.file.nbufs, curPtr->fd, segSize, n, wantRead); if (n > 0) { /* * Reduce the remaining length in the Ns_FileVec for the * next iteration. */ curPtr->c.file.bufs[currentbuf].length -= (size_t)n; if ((size_t)n < wantRead) { /* * Partial read on a segment. */ Ns_Log(DriverDebug, "### WriterReadFromSpool [%d] (nbufs %d): partial read on fd %d (got %ld)", currentbuf, curPtr->c.file.nbufs, curPtr->fd, n); } else if (currentbuf < curPtr->c.file.nbufs - 1 /* && (n == wantRead) */) { /* * All read from this segment, setup next read. */ ns_close(curPtr->fd); curPtr->c.file.bufs[currentbuf].fd = NS_INVALID_FD; curPtr->c.file.currentbuf ++; curPtr->fd = curPtr->c.file.bufs[curPtr->c.file.currentbuf].fd; Ns_Log(DriverDebug, "### WriterReadFromSpool switch to [%d] fd %d", curPtr->c.file.currentbuf, curPtr->fd); } } } if (n <= 0) { status = SPOOLER_READERROR; } else { /* * curPtr->c.file.toRead is still protected by * curPtr->c.file.fdlock when needed (in streaming mode). */ curPtr->c.file.toRead -= (size_t)n; curPtr->c.file.bufsize += (size_t)n; } if (doStream != NS_WRITER_STREAM_NONE) { Ns_MutexUnlock(&curPtr->c.file.fdlock); } } return status; } /* *---------------------------------------------------------------------- * * WriterSend -- * * Utility function of the WriterThread to send content to the client. It * handles partial write operations from the lower level driver * infrastructure. * * Results: * either NS_OK or SOCK_ERROR; * * Side effects: * Sends data, might reshuffle iovec. * *---------------------------------------------------------------------- */ static SpoolerState WriterSend(WriterSock *curPtr, int *err) { const struct iovec *bufs; struct iovec vbuf; int nbufs; SpoolerState status = SPOOLER_OK; size_t toWrite; ssize_t n; NS_NONNULL_ASSERT(curPtr != NULL); NS_NONNULL_ASSERT(err != NULL); /* * Prepare send operation */ if (curPtr->fd != NS_INVALID_FD) { /* * We have a valid file descriptor, send data from file. * * Prepare sending a single buffer with curPtr->c.file.bufsize bytes * from the curPtr->c.file.buf to the client. */ vbuf.iov_len = curPtr->c.file.bufsize; vbuf.iov_base = (void *)curPtr->c.file.buf; bufs = &vbuf; nbufs = 1; toWrite = curPtr->c.file.bufsize; } else { int i; /* * Prepare sending multiple memory buffers. Get length of remaining * buffers. */ toWrite = 0u; for (i = 0; i < curPtr->c.mem.nsbufs; i ++) { toWrite += curPtr->c.mem.sbufs[i].iov_len; } Ns_Log(DriverDebug, "### Writer wants to send remainder nbufs %d len %" PRIdz, curPtr->c.mem.nsbufs, toWrite); /* * Add buffers from the source and fill structure up to max */ while (curPtr->c.mem.bufIdx < curPtr->c.mem.nbufs && curPtr->c.mem.sbufIdx < UIO_SMALLIOV) { const struct iovec *vPtr = &curPtr->c.mem.bufs[curPtr->c.mem.bufIdx]; if (vPtr->iov_len > 0u && vPtr->iov_base != NULL) { Ns_Log(DriverDebug, "### Writer copies source %d to scratch %d len %" PRIiovlen, curPtr->c.mem.bufIdx, curPtr->c.mem.sbufIdx, vPtr->iov_len); toWrite += Ns_SetVec(curPtr->c.mem.sbufs, curPtr->c.mem.sbufIdx++, vPtr->iov_base, vPtr->iov_len); curPtr->c.mem.nsbufs++; } curPtr->c.mem.bufIdx++; } bufs = curPtr->c.mem.sbufs; nbufs = curPtr->c.mem.nsbufs; Ns_Log(DriverDebug, "### Writer wants to send %d bufs size %" PRIdz, nbufs, toWrite); } /* * Perform the actual send operation. */ n = NsDriverSend(curPtr->sockPtr, bufs, nbufs, 0u); if (n == -1) { *err = ns_sockerrno; status = SPOOLER_WRITEERROR; } else { /* * We have sent zero or more bytes. */ if (curPtr->doStream != NS_WRITER_STREAM_NONE) { Ns_MutexLock(&curPtr->c.file.fdlock); curPtr->size -= (size_t)n; Ns_MutexUnlock(&curPtr->c.file.fdlock); } else { curPtr->size -= (size_t)n; } curPtr->nsent += n; curPtr->sockPtr->timeout.sec = 0; if (curPtr->fd != NS_INVALID_FD) { /* * File-descriptor based send operation. Reduce the (remainig) * buffer size the amount of data sent and adjust the buffer * offset. For partial send operations, this will lead to a * remaining buffer size > 0. */ curPtr->c.file.bufsize -= (size_t)n; curPtr->c.file.bufoffset = (off_t)n; } else { if (n < (ssize_t)toWrite) { /* * We have a partial transmit from the iovec * structure. We have to compact it to fill content in * the next round. */ curPtr->c.mem.sbufIdx = Ns_ResetVec(curPtr->c.mem.sbufs, curPtr->c.mem.nsbufs, (size_t)n); curPtr->c.mem.nsbufs -= curPtr->c.mem.sbufIdx; memmove(curPtr->c.mem.sbufs, curPtr->c.mem.sbufs + curPtr->c.mem.sbufIdx, /* move the iovecs to the start of the scratch buffers */ sizeof(struct iovec) * (size_t)curPtr->c.mem.nsbufs); } } } return status; } /* *---------------------------------------------------------------------- * * WriterGetInfoPtr -- * * Helper function to obtain ConnPoolInfo structure for a WriterSock. * * The connInfoPtr is allocated only once per pool and cached in the * WriterSock. Only the first time, a writer thread "sees" a pool, it * allocates the structure for it. * * Results: * None. * * Side effects: * Can allocate memory * *---------------------------------------------------------------------- */ static ConnPoolInfo * WriterGetInfoPtr(WriterSock *curPtr, Tcl_HashTable *pools) { NS_NONNULL_ASSERT(curPtr != NULL); NS_NONNULL_ASSERT(pools != NULL); if (curPtr->infoPtr == NULL) { int isNew; Tcl_HashEntry *hPtr; hPtr = Tcl_CreateHashEntry(pools, (void*)curPtr->poolPtr, &isNew); if (isNew == 1) { /* * This is a pool that we have not seen yet. */ curPtr->infoPtr = ns_malloc(sizeof(ConnPoolInfo)); curPtr->infoPtr->currentPoolRate = 0; curPtr->infoPtr->threadSlot = NsPoolAllocateThreadSlot(curPtr->poolPtr, Ns_ThreadId()); Tcl_SetHashValue(hPtr, curPtr->infoPtr); Ns_Log(DriverDebug, "poollimit: pool '%s' allocate infoPtr with slot %lu poolLimit %d", curPtr->poolPtr->pool, curPtr->infoPtr->threadSlot, curPtr->poolPtr->rate.poolLimit); } else { curPtr->infoPtr = (ConnPoolInfo *)Tcl_GetHashValue(hPtr); } } return curPtr->infoPtr; } /* *---------------------------------------------------------------------- * * WriterPerPoolRates -- * * Compute current bandwidths per pool and writer. * * Since we have potentially multiple writer threads running, all these * might have writer threads of the same pool. In order to minimize * locking, we compute first writer thread specific subresults and combine * these later with with the results of the other threads. * * Results: * None. * * Side effects: * Connections are accepted and their SockPtr is set to NULL * such that closing actual connection does not close the socket. * *---------------------------------------------------------------------- */ static void WriterPerPoolRates(WriterSock *writePtr, Tcl_HashTable *pools) { WriterSock *curPtr; Tcl_HashSearch search; Tcl_HashEntry *hPtr; NS_NONNULL_ASSERT(writePtr != NULL); NS_NONNULL_ASSERT(pools != NULL); /* * First reset pool total rate. We keep the bandwidth managed pools in a * thread-local memory. Before, we accumulate the data, we reset it. */ hPtr = Tcl_FirstHashEntry(pools, &search); while (hPtr != NULL) { ConnPoolInfo *infoPtr = (ConnPoolInfo *)Tcl_GetHashValue(hPtr); infoPtr->currentPoolRate = 0; hPtr = Tcl_NextHashEntry(&search); } /* * Sum the actual rates per bandwidth limited pool for all active writer * jobs. */ for (curPtr = writePtr; curPtr != NULL; curPtr = curPtr->nextPtr) { /* * Does the writer come form a badwidth limited pool? */ if (curPtr->poolPtr->rate.poolLimit > 0 && curPtr->currentRate > 0) { /* * Add the actual rate to the writer specific pool rate. */ ConnPoolInfo *infoPtr = WriterGetInfoPtr(curPtr, pools); infoPtr->currentPoolRate += curPtr->currentRate; Ns_Log(DriverDebug, "poollimit pool '%s' added rate poolLimit %d poolRate %d", curPtr->poolPtr->pool, curPtr->poolPtr->rate.poolLimit, infoPtr->currentPoolRate); } } /* * Now iterate over the pools used by this thread and sum the specific * pool rates from all writer threads. */ hPtr = Tcl_FirstHashEntry(pools, &search); while (hPtr != NULL) { ConnPool *poolPtr = (ConnPool *)Tcl_GetHashKey(pools, hPtr); int totalPoolRate, writerThreadCount, threadDeltaRate; ConnPoolInfo *infoPtr; /* * Compute the following indicators: * - totalPoolRate: accumulated pool rates from all writer threads. * * - threadDeltaRate: how much of the available bandwidth can i used * the current thread. We assume that the distribution of writers * between all writer threads is even, so we can split the * available rate by the number of writer threads working on this * pool. * * - deltaPercentage: adjust in a single iteration just a fraction * (e.g. 10 percent) of the potential change. This function is * called often enough to justify delayed adjustments. */ infoPtr = (ConnPoolInfo *)Tcl_GetHashValue(hPtr); totalPoolRate = NsPoolTotalRate(poolPtr, infoPtr->threadSlot, infoPtr->currentPoolRate, &writerThreadCount); /* * If nothing is going on, allow a thread the full rate. */ if (infoPtr->currentPoolRate == 0) { threadDeltaRate = (poolPtr->rate.poolLimit - totalPoolRate); } else { threadDeltaRate = (poolPtr->rate.poolLimit - totalPoolRate) / writerThreadCount; } infoPtr->deltaPercentage = threadDeltaRate / 10; if (infoPtr->deltaPercentage < -50) { infoPtr->deltaPercentage = -50; } if (totalPoolRate > 0) { Ns_Log(Notice, "... pool '%s' thread's pool rate %d total pool rate %d limit %d " "(#%d writer threads) -> computed rate %d (%d%%) ", NsPoolName(poolPtr->pool), infoPtr->currentPoolRate, totalPoolRate, poolPtr->rate.poolLimit, writerThreadCount, threadDeltaRate, infoPtr->deltaPercentage ); } hPtr = Tcl_NextHashEntry(&search); } } /* *---------------------------------------------------------------------- * * WriterThread -- * * Thread that writes files to clients. * * Results: * None. * * Side effects: * Connections are accepted and their SockPtr is set to NULL * such that closing actual connection does not close the socket. * *---------------------------------------------------------------------- */ static void WriterThread(void *arg) { SpoolerQueue *queuePtr = (SpoolerQueue*)arg; int err, pollTimeout; bool stopping; Ns_Time now; Sock *sockPtr; const Driver *drvPtr; WriterSock *curPtr, *nextPtr, *writePtr; PollData pdata; Tcl_HashTable pools; /* used for accumulating bandwidth per pool */ Ns_ThreadSetName("-writer%d-", queuePtr->id); queuePtr->threadName = Ns_ThreadGetName(); Tcl_InitHashTable(&pools, TCL_ONE_WORD_KEYS); /* * Loop forever until signaled to shut down and all * connections are complete and gracefully closed. */ Ns_Log(Notice, "writer%d: accepting connections", queuePtr->id); PollCreate(&pdata); writePtr = NULL; stopping = NS_FALSE; while (!stopping) { char charBuffer[1]; /* * If there are any write sockets, set the bits. */ PollReset(&pdata); (void)PollSet(&pdata, queuePtr->pipe[0], (short)POLLIN, NULL); if (writePtr == NULL) { pollTimeout = 30 * 1000; } else { /* * If per-pool bandwidth management is requested, compute the base * data for the adjustment. If there is no bandwidth management * requested, there is no slowdow. */ if (NsWriterBandwidthManagement) { WriterPerPoolRates(writePtr, &pools); } /* * There are writers active. Determine on which writers we poll * and compute the maximal poll wait time. */ pollTimeout = 1000; for (curPtr = writePtr; curPtr != NULL; curPtr = curPtr->nextPtr) { int sleepTimeMs = 0; Ns_Log(DriverDebug, "### Writer poll collect %p size %" PRIdz " streaming %d rateLimit %d", (void *)curPtr, curPtr->size, curPtr->doStream, curPtr->rateLimit); if (curPtr->rateLimit > 0 && curPtr->nsent > 0 && curPtr->currentRate > 0 ) { int currentMs, targetTimeMs; /* * Perform per-pool rate management, when * - a poolLimit is provided, * - we have performance data of thee pool, and * - changes are possible (as flagged by deltaPercentage). */ if (NsWriterBandwidthManagement && curPtr->poolPtr->rate.poolLimit > 0 && curPtr->infoPtr != NULL && curPtr->infoPtr->deltaPercentage != 0 ) { /* * Only adjust data for busy writer jobs, which * are close to their limits. */ bool onLimit = (curPtr->currentRate*100 / curPtr->rateLimit) > 90; Ns_Log(DriverDebug, "we allowed %d we use %d on limit %d (%d) , we can do %d%%", curPtr->rateLimit, curPtr->currentRate, (int)onLimit, curPtr->currentRate*100/curPtr->rateLimit, curPtr->infoPtr->deltaPercentage); if (onLimit) { /* * Compute new rate limit based on * positive/negative delta percentage. */ int newRate = curPtr->currentRate + (curPtr->currentRate * curPtr->infoPtr->deltaPercentage / 100); /* * Sanity checks: * - never allow more than poolLimit * - never kill connections completely (e.g. minRate 5KB/s) */ if (newRate > curPtr->poolPtr->rate.poolLimit) { newRate = curPtr->poolPtr->rate.poolLimit; } else if (newRate < 5) { newRate = 5; } Ns_Log(Notice, "... pool '%s' new rate limit changed from %d to %d KB/s (delta %d%%)", curPtr->poolPtr->pool, curPtr->rateLimit, newRate, curPtr->infoPtr->deltaPercentage); curPtr->rateLimit = newRate; } } /* * Adjust rate to the rate limit. */ currentMs = (int)(curPtr->nsent/(Tcl_WideInt)curPtr->currentRate); targetTimeMs = (int)(curPtr->nsent/(Tcl_WideInt)curPtr->rateLimit); sleepTimeMs = 1 + targetTimeMs - currentMs; Ns_Log(WriterDebug, "### Writer(%d)" " byte sent %" TCL_LL_MODIFIER "d msecs %d rate %d KB/s" " targetRate %d KB/s sleep %d", curPtr->sockPtr->sock, curPtr->nsent, currentMs, curPtr->currentRate, curPtr->rateLimit, sleepTimeMs); } if (likely(curPtr->size > 0u)) { if (sleepTimeMs <= 0) { SockPoll(curPtr->sockPtr, (short)POLLOUT, &pdata); pollTimeout = -1; } else { pollTimeout = MIN(sleepTimeMs, pollTimeout); } } else if (unlikely(curPtr->doStream == NS_WRITER_STREAM_FINISH)) { pollTimeout = -1; } } } Ns_Log(DriverDebug, "### Writer final pollTimeout %d", pollTimeout); /* * Select and drain the trigger pipe if necessary. */ (void) PollWait(&pdata, pollTimeout); if (PollIn(&pdata, 0) && unlikely(ns_recv(queuePtr->pipe[0], charBuffer, 1u, 0) != 1)) { Ns_Fatal("writer: trigger ns_recv() failed: %s", ns_sockstrerror(ns_sockerrno)); } /* * Write to all available sockets */ Ns_GetTime(&now); curPtr = writePtr; writePtr = NULL; while (curPtr != NULL) { NsWriterStreamState doStream; SpoolerState spoolerState = SPOOLER_OK; nextPtr = curPtr->nextPtr; sockPtr = curPtr->sockPtr; err = 0; /* * The truth value of doStream does not change through * concurrency. */ doStream = curPtr->doStream; if (unlikely(PollHup(&pdata, sockPtr->pidx))) { Ns_Log(DriverDebug, "### Writer %p reached POLLHUP fd %d", (void *)curPtr, sockPtr->sock); spoolerState = SPOOLER_CLOSE; err = 0; curPtr->infoPtr = WriterGetInfoPtr(curPtr, &pools); curPtr->infoPtr->currentPoolRate += curPtr->currentRate; } else if (likely(PollOut(&pdata, sockPtr->pidx)) || (doStream == NS_WRITER_STREAM_FINISH)) { /* * The socket is writable, we can compute the rate, when * something was sent already and some kind of rate limiting * is in place ... and we have sent enough data to make a good * estimate (just after the 2nd send, so more than driver * buffer size. */ Ns_Log(DriverDebug, "Socket of pool '%s' is writable, writer limit %d nsent %ld", curPtr->poolPtr->pool, curPtr->rateLimit, (long)curPtr->nsent); if (curPtr->rateLimit > 0 && (size_t)curPtr->nsent > curPtr->sockPtr->drvPtr->bufsize ) { Ns_Time diff; long currentMs; Ns_DiffTime(&now, &curPtr->startTime, &diff); currentMs = Ns_TimeToMilliseconds(&diff); if (currentMs > 0) { curPtr->currentRate = (int)((curPtr->nsent)/(Tcl_WideInt)currentMs); Ns_Log(DriverDebug, "Socket of pool '%s' is writable, currentMs %ld has updated current rate %d", curPtr->poolPtr->pool, currentMs,curPtr->currentRate); } } Ns_Log(DriverDebug, "### Writer %p can write to client fd %d (trigger %d) streaming %.6x" " size %" PRIdz " nsent %" TCL_LL_MODIFIER "d bufsize %" PRIdz, (void *)curPtr, sockPtr->sock, PollIn(&pdata, 0), doStream, curPtr->size, curPtr->nsent, curPtr->c.file.bufsize); if (unlikely(curPtr->size < 1u)) { /* * Size < 1 means that everything was sent. */ if (doStream != NS_WRITER_STREAM_ACTIVE) { if (doStream == NS_WRITER_STREAM_FINISH) { Ns_ReleaseTemp(curPtr->fd); } spoolerState = SPOOLER_CLOSE; } } else { /* * If size > 0, there is still something to send. * If we are spooling from a file, read some data * from the (spool) file and place it into curPtr->c.file.buf. */ if (curPtr->fd != NS_INVALID_FD) { spoolerState = WriterReadFromSpool(curPtr); } if (spoolerState == SPOOLER_OK) { spoolerState = WriterSend(curPtr, &err); } } } else { /* * Mark when first timeout occurred or check if it is already * for too long and we need to stop this socket */ if (sockPtr->timeout.sec == 0) { Ns_Log(DriverDebug, "Writer %p fd %d setting sendwait %ld.%6ld", (void *)curPtr, sockPtr->sock, curPtr->sockPtr->drvPtr->sendwait.sec, curPtr->sockPtr->drvPtr->sendwait.usec); SockTimeout(sockPtr, &now, &curPtr->sockPtr->drvPtr->sendwait); } else if (Ns_DiffTime(&sockPtr->timeout, &now, NULL) <= 0) { Ns_Log(DriverDebug, "Writer %p fd %d timeout", (void *)curPtr, sockPtr->sock); err = ETIMEDOUT; spoolerState = SPOOLER_CLOSETIMEOUT; } } /* * Check result status and close the socket in case of * timeout or completion */ Ns_MutexLock(&queuePtr->lock); if (spoolerState == SPOOLER_OK) { if (curPtr->size > 0u || doStream == NS_WRITER_STREAM_ACTIVE) { Ns_Log(DriverDebug, "Writer %p continue OK (size %" PRIdz ") => PUSH", (void *)curPtr, curPtr->size); Push(curPtr, writePtr); } else { Ns_Log(DriverDebug, "Writer %p done OK (size %" PRIdz ") => RELEASE", (void *)curPtr, curPtr->size); WriterSockRelease(curPtr); } } else { /* * spoolerState might be SPOOLER_CLOSE or SPOOLER_*TIMEOUT, or SPOOLER_*ERROR */ Ns_Log(DriverDebug, "Writer %p fd %d release, not OK (status %d) => RELEASE", (void *)curPtr, curPtr->sockPtr->sock, (int)spoolerState); curPtr->status = spoolerState; curPtr->err = err; WriterSockRelease(curPtr); } Ns_MutexUnlock(&queuePtr->lock); curPtr = nextPtr; } /* * Add more sockets to the writer queue */ if (queuePtr->sockPtr != NULL) { Ns_MutexLock(&queuePtr->lock); if (queuePtr->sockPtr != NULL) { curPtr = queuePtr->sockPtr; queuePtr->sockPtr = NULL; while (curPtr != NULL) { nextPtr = curPtr->nextPtr; sockPtr = curPtr->sockPtr; drvPtr = sockPtr->drvPtr; SockTimeout(sockPtr, &now, &drvPtr->sendwait); Push(curPtr, writePtr); queuePtr->queuesize++; curPtr = nextPtr; } queuePtr->curPtr = writePtr; } Ns_MutexUnlock(&queuePtr->lock); } /* * Check for shutdown */ stopping = queuePtr->shutdown; } PollFree(&pdata); { /* * Free ConnPoolInfo */ Tcl_HashSearch search; Tcl_HashEntry *hPtr = Tcl_FirstHashEntry(&pools, &search); while (hPtr != NULL) { ConnPoolInfo *infoPtr = (ConnPoolInfo *)Tcl_GetHashValue(hPtr); ns_free(infoPtr); hPtr = Tcl_NextHashEntry(&search); } /* * Delete the hash table for pools. */ Tcl_DeleteHashTable(&pools); } Ns_Log(Notice, "exiting"); Ns_MutexLock(&queuePtr->lock); queuePtr->stopped = NS_TRUE; Ns_CondBroadcast(&queuePtr->cond); Ns_MutexUnlock(&queuePtr->lock); } /* *---------------------------------------------------------------------- * * NsWriterFinish -- * * Finish a streaming writer job (typically called at the close * of a connection). A streaming writer job is fed typically by a * sequence of ns_write operations. After such an operation, the * WriterThread has to keep the writer job alive. * NsWriterFinish() tells the WriterThread that no more * other writer jobs will come from this connection. * * Results: * None. * * Side effects: * Change the state of the writer job and trigger the queue. * *---------------------------------------------------------------------- */ void NsWriterFinish(NsWriterSock *wrSockPtr) { WriterSock *writerSockPtr = (WriterSock *)wrSockPtr; NS_NONNULL_ASSERT(wrSockPtr != NULL); Ns_Log(DriverDebug, "NsWriterFinish: %p", (void *)writerSockPtr); writerSockPtr->doStream = NS_WRITER_STREAM_FINISH; SockTrigger(writerSockPtr->queuePtr->pipe[1]); } /* *---------------------------------------------------------------------- * * WriterSetupStreamingMode -- * * In streaming mode, setup a temporary fd which is used as input and * output. Streaming i/o will append to the file, while the write will * read from it. * * Results: * Ns_ReturnCode (NS_OK, NS_ERROR, NS_FILTER_BREAK). In the last case * signals that all processing was already performed and the caller can * stop handling more data. On success, the function returns an fd as * last argument. * * Side effects: * Potentially allocating temp file and updating connPtr members. * *---------------------------------------------------------------------- */ Ns_ReturnCode WriterSetupStreamingMode(Conn *connPtr, struct iovec *bufs, int nbufs, int *fdPtr) { bool first; size_t wrote = 0u; WriterSock *wrSockPtr1; Ns_ReturnCode status = NS_OK; NS_NONNULL_ASSERT(connPtr != NULL); NS_NONNULL_ASSERT(bufs != NULL); NS_NONNULL_ASSERT(fdPtr != NULL); Ns_Log(DriverDebug, "NsWriterQueue: streaming writer job"); if (connPtr->fd == 0) { /* * Create a new temporary spool file and provide the fd to the * connection thread via connPtr. */ first = NS_TRUE; wrSockPtr1 = NULL; *fdPtr = Ns_GetTemp(); connPtr->fd = *fdPtr; Ns_Log(DriverDebug, "NsWriterQueue: new tmp file has fd %d", *fdPtr); } else { /* * Reuse previously created spool file. */ first = NS_FALSE; wrSockPtr1 = WriterSockRequire(connPtr); if (wrSockPtr1 == NULL) { Ns_Log(Notice, "NsWriterQueue: writer job was already canceled (fd %d); maybe user dropped connection", connPtr->fd); return NS_ERROR; } else { /* * lock only, when first == NS_FALSE. */ Ns_MutexLock(&wrSockPtr1->c.file.fdlock); (void)ns_lseek(connPtr->fd, 0, SEEK_END); } } /* * For the time being, handle just "string data" in streaming * output (iovec bufs). Write the content to the spool file. */ { int i; for (i = 0; i < nbufs; i++) { ssize_t j = ns_write(connPtr->fd, bufs[i].iov_base, bufs[i].iov_len); if (j > 0) { wrote += (size_t)j; Ns_Log(Debug, "NsWriterQueue: fd %d [%d] spooled %" PRIdz " of %" PRIiovlen " OK %d", connPtr->fd, i, j, bufs[i].iov_len, (j == (ssize_t)bufs[i].iov_len)); } else { Ns_Log(Warning, "NsWriterQueue: spool to fd %d write operation failed", connPtr->fd); } } } if (first) { //bufs = NULL; connPtr->nContentSent = wrote; #ifndef _WIN32 /* * sock_set_blocking can't be used under windows, since sockets * are under windows no file descriptors. */ (void)ns_sock_set_blocking(connPtr->fd, NS_FALSE); #endif /* * Fall through to register stream writer with temp file */ } else { WriterSock *writerSockPtr; /* * This is a later streaming operation, where the writer job * (strWriter) was previously established. */ assert(wrSockPtr1 != NULL); /* * Update the controlling variables (size and toread) in the connPtr, * and the length info for the access log, and trigger the writer to * notify it about the change. */ writerSockPtr = (WriterSock *)connPtr->strWriter; writerSockPtr->size += wrote; writerSockPtr->c.file.toRead += wrote; Ns_MutexUnlock(&wrSockPtr1->c.file.fdlock); connPtr->nContentSent += wrote; if (likely(wrSockPtr1->queuePtr != NULL)) { SockTrigger(wrSockPtr1->queuePtr->pipe[1]); } WriterSockRelease(wrSockPtr1); status = NS_FILTER_BREAK; } return status; } /* *---------------------------------------------------------------------- * * NsWriterQueue -- * * Submit a new job to the writer queue. * * Results: * * NS_ERROR means that the Writer thread refuses to accept this * job and that the client (the connection thread) has to handle * this data. NS_OK means that the Writer thread cares for * transmitting the content to the client. * * Side effects: * Potentially adding a job to the writer queue. * *---------------------------------------------------------------------- */ Ns_ReturnCode NsWriterQueue(Ns_Conn *conn, size_t nsend, Tcl_Channel chan, FILE *fp, int fd, struct iovec *bufs, int nbufs, const Ns_FileVec *filebufs, int nfilebufs, bool everysize) { Conn *connPtr; WriterSock *wrSockPtr; SpoolerQueue *queuePtr; DrvWriter *wrPtr; bool trigger = NS_FALSE; size_t headerSize; Ns_ReturnCode status = NS_OK; Ns_FileVec *fbufs = NULL; int nfbufs = 0; NS_NONNULL_ASSERT(conn != NULL); connPtr = (Conn *)conn; if (unlikely(connPtr->sockPtr == NULL)) { Ns_Log(Warning, "NsWriterQueue: called without sockPtr size %" PRIdz " bufs %d flags %.6x stream %.6x chan %p fd %d", nsend, nbufs, connPtr->flags, connPtr->flags & NS_CONN_STREAM, (void *)chan, fd); status = NS_ERROR; wrPtr = NULL; } else { wrPtr = &connPtr->sockPtr->drvPtr->writer; Ns_Log(DriverDebug, "NsWriterQueue: size %" PRIdz " bufs %p (%d) flags %.6x stream %.6x chan %p fd %d thread %d", nsend, (void *)bufs, nbufs, connPtr->flags, connPtr->flags & NS_CONN_STREAM, (void *)chan, fd, wrPtr->threads); if (unlikely(wrPtr->threads == 0)) { Ns_Log(DriverDebug, "NsWriterQueue: no writer threads configured"); status = NS_ERROR; } else if (nsend < (size_t)wrPtr->writersize && !everysize && connPtr->fd == 0) { Ns_Log(DriverDebug, "NsWriterQueue: file is too small(%" PRIdz " < %" PRIdz ")", nsend, wrPtr->writersize); status = NS_ERROR; } } if (status != NS_OK) { return status; } assert(wrPtr != NULL); /* * In streaming mode, setup a temporary fd which is used as input and * output. Streaming i/o will append to the file, while the write will * read from it. */ if (((connPtr->flags & NS_CONN_STREAM) != 0u) || connPtr->fd > 0) { if (wrPtr->doStream == NS_WRITER_STREAM_NONE) { status = NS_ERROR; } else if (unlikely(fp != NULL || fd != NS_INVALID_FD)) { Ns_Log(DriverDebug, "NsWriterQueue: does not stream from this source via writer"); status = NS_ERROR; } else { status = WriterSetupStreamingMode(connPtr, bufs, nbufs, &fd); } if (unlikely(status != NS_OK)) { if (status == NS_FILTER_BREAK) { status = NS_OK; } return status; } /* * As a result of successful WriterSetupStreamingMode(), we have fd * set. */ assert(fd != NS_INVALID_FD); } else { if (fp != NULL) { /* * The client provided an open file pointer and closes it */ fd = ns_dup(fileno(fp)); } else if (fd != NS_INVALID_FD) { /* * The client provided an open file descriptor and closes it */ fd = ns_dup(fd); } else if (chan != NULL) { ClientData clientData; /* * The client provided an open Tcl channel and closes it */ if (Tcl_GetChannelHandle(chan, TCL_READABLE, &clientData) != TCL_OK) { return NS_ERROR; } fd = ns_dup(PTR2INT(clientData)); } else if (filebufs != NULL && nfilebufs > 0) { /* * The client provided Ns_FileVec with open files. The client is * responsible for closing it, like in all other cases. */ size_t i; /* * This is the only case, where fbufs will be != NULL, * i.e. keeping a duplicate of the passed-in Ns_FileVec structure * for which the client is responsible. */ fbufs = (Ns_FileVec *)ns_calloc((size_t)nfilebufs, sizeof(Ns_FileVec)); nfbufs = nfilebufs; for (i = 0u; i < (size_t)nfilebufs; i++) { fbufs[i].fd = ns_dup(filebufs[i].fd); fbufs[i].length = filebufs[i].length; fbufs[i].offset = filebufs[i].offset; } /* * Place the fd of the first Ns_FileVec to fd. */ fd = fbufs[0].fd; Ns_Log(DriverDebug, "NsWriterQueue: filevec mode, take first fd %d tosend %lu", fd, nsend); } } Ns_Log(DriverDebug, "NsWriterQueue: writer threads %d nsend %" PRIdz " writersize %" PRIdz, wrPtr->threads, nsend, wrPtr->writersize); assert(connPtr->poolPtr != NULL); connPtr->poolPtr->stats.spool++; wrSockPtr = (WriterSock *)ns_calloc(1u, sizeof(WriterSock)); wrSockPtr->sockPtr = connPtr->sockPtr; wrSockPtr->poolPtr = connPtr->poolPtr; /* just for being able to trace back the origin, e.g. list */ wrSockPtr->sockPtr->timeout.sec = 0; wrSockPtr->flags = connPtr->flags; wrSockPtr->refCount = 1; /* * Take the rate limit from the connection. */ wrSockPtr->rateLimit = connPtr->rateLimit; if (wrSockPtr->rateLimit == -1) { /* * The value was not specified via connection. Use either the pool * limit as a base for the computation or fall back to the driver * default value. */ if (connPtr->poolPtr->rate.poolLimit > 0) { /* * Very optimistic start value, but values will float through via * bandwidth management. */ wrSockPtr->rateLimit = connPtr->poolPtr->rate.poolLimit / 2; } else { wrSockPtr->rateLimit = wrPtr->rateLimit; } } Ns_Log(WriterDebug, "### Writer(%d): initial rate limit %d KB/s", wrSockPtr->sockPtr->sock, wrSockPtr->rateLimit); /* * Make sure we have proper content length header for * keep-alive/pipelining. */ Ns_ConnSetLengthHeader(conn, nsend, (wrSockPtr->flags & NS_CONN_STREAM) != 0u); /* * Flush the headers */ if ((conn->flags & NS_CONN_SENTHDRS) == 0u) { Tcl_DString ds; Ns_DStringInit(&ds); Ns_Log(DriverDebug, "### Writer(%d): add header", fd); conn->flags |= NS_CONN_SENTHDRS; (void)Ns_CompleteHeaders(conn, nsend, 0u, &ds); headerSize = (size_t)Ns_DStringLength(&ds); if (headerSize > 0u) { wrSockPtr->headerString = ns_strdup(Tcl_DStringValue(&ds)); } Ns_DStringFree(&ds); } else { headerSize = 0u; } if (fd != NS_INVALID_FD) { /* maybe add mmap support for files (fd != NS_INVALID_FD) */ wrSockPtr->fd = fd; wrSockPtr->c.file.bufs = fbufs; wrSockPtr->c.file.nbufs = nfbufs; Ns_Log(DriverDebug, "### Writer(%d) tosend %" PRIdz " files %d bufsize %" PRIdz, fd, nsend, nfbufs, wrPtr->bufsize); if (unlikely(headerSize >= wrPtr->bufsize)) { /* * We have a header which is larger than bufsize; place it * as "leftover" and use the headerString as buffer for file * reads (rather rare case) */ wrSockPtr->c.file.buf = (unsigned char *)wrSockPtr->headerString; wrSockPtr->c.file.maxsize = headerSize; wrSockPtr->c.file.bufsize = headerSize; wrSockPtr->headerString = NULL; } else if (headerSize > 0u) { /* * We have a header that fits into the bufsize; place it * as "leftover" at the end of the buffer. */ wrSockPtr->c.file.buf = ns_malloc(wrPtr->bufsize); memcpy(wrSockPtr->c.file.buf, wrSockPtr->headerString, headerSize); wrSockPtr->c.file.bufsize = headerSize; wrSockPtr->c.file.maxsize = wrPtr->bufsize; ns_free(wrSockPtr->headerString); wrSockPtr->headerString = NULL; } else { assert(wrSockPtr->headerString == NULL); wrSockPtr->c.file.buf = ns_malloc(wrPtr->bufsize); wrSockPtr->c.file.maxsize = wrPtr->bufsize; } wrSockPtr->c.file.bufoffset = 0; wrSockPtr->c.file.toRead = nsend; } else if (bufs != NULL) { int i, j, headerbufs = (headerSize > 0u ? 1 : 0); wrSockPtr->fd = NS_INVALID_FD; if (nbufs+headerbufs < UIO_SMALLIOV) { wrSockPtr->c.mem.bufs = wrSockPtr->c.mem.preallocated_bufs; } else { Ns_Log(DriverDebug, "NsWriterQueue: alloc %d iovecs", nbufs); wrSockPtr->c.mem.bufs = ns_calloc((size_t)nbufs + (size_t)headerbufs, sizeof(struct iovec)); } wrSockPtr->c.mem.nbufs = nbufs+headerbufs; if (headerbufs != 0) { wrSockPtr->c.mem.bufs[0].iov_base = wrSockPtr->headerString; wrSockPtr->c.mem.bufs[0].iov_len = headerSize; } if (connPtr->fmap.addr != NULL) { Ns_Log(DriverDebug, "NsWriterQueue: deliver fmapped %p", (void *)connPtr->fmap.addr); /* * Deliver an mmapped file, no need to copy content */ for (i = 0, j=headerbufs; i < nbufs; i++, j++) { wrSockPtr->c.mem.bufs[j].iov_base = bufs[i].iov_base; wrSockPtr->c.mem.bufs[j].iov_len = bufs[i].iov_len; } /* * Make a copy of the fmap structure and make clear that * we unmap in the writer thread. */ wrSockPtr->c.mem.fmap = connPtr->fmap; connPtr->fmap.addr = NULL; /* header string will be freed via wrSockPtr->headerString */ } else { /* * Deliver a content from iovec. The lifetime of the * source is unknown, we have to copy the c. */ for (i = 0, j=headerbufs; i < nbufs; i++, j++) { wrSockPtr->c.mem.bufs[j].iov_base = ns_malloc(bufs[i].iov_len); wrSockPtr->c.mem.bufs[j].iov_len = bufs[i].iov_len; memcpy(wrSockPtr->c.mem.bufs[j].iov_base, bufs[i].iov_base, bufs[i].iov_len); } /* header string will be freed a buf[0] */ wrSockPtr->headerString = NULL; } } else { ns_free(wrSockPtr); return NS_ERROR; } /* * Add header size to total size. */ nsend += headerSize; if (connPtr->clientData != NULL) { wrSockPtr->clientData = ns_strdup(connPtr->clientData); } wrSockPtr->startTime = *Ns_ConnStartTime(conn); /* * Setup streaming context before sending potentially headers. */ if ((wrSockPtr->flags & NS_CONN_STREAM) != 0u) { wrSockPtr->doStream = NS_WRITER_STREAM_ACTIVE; assert(connPtr->strWriter == NULL); /* * Add a reference to the stream writer to the connection such * it can efficiently append to a stream when multiple output * operations happen. The backpointer (from the stream writer * to the connection is needed to clear the reference to the * writer in case the writer is deleted. No locks are needed, * since nobody can share this structure yet. */ connPtr->strWriter = (NsWriterSock *)wrSockPtr; wrSockPtr->connPtr = connPtr; } /* * Tell connection, that writer handles the output (including * closing the connection to the client). */ connPtr->flags |= NS_CONN_SENT_VIA_WRITER; wrSockPtr->keep = connPtr->keep > 0 ? NS_TRUE : NS_FALSE; wrSockPtr->size = nsend; Ns_Log(DriverDebug, "NsWriterQueue NS_CONN_SENT_VIA_WRITER connPtr %p", (void*)connPtr); if ((wrSockPtr->flags & NS_CONN_STREAM) == 0u) { Ns_Log(DriverDebug, "NsWriterQueue NS_CONN_SENT_VIA_WRITER connPtr %p clear sockPtr %p", (void*)connPtr, (void*)connPtr->sockPtr); connPtr->sockPtr = NULL; connPtr->flags |= NS_CONN_CLOSED; connPtr->nContentSent = nsend - headerSize; } /* * Get the next writer thread from the list, all writer requests are * rotated between all writer threads */ Ns_MutexLock(&wrPtr->lock); if (wrPtr->curPtr == NULL) { wrPtr->curPtr = wrPtr->firstPtr; } queuePtr = wrPtr->curPtr; wrPtr->curPtr = wrPtr->curPtr->nextPtr; Ns_MutexUnlock(&wrPtr->lock); Ns_Log(WriterDebug, "Writer(%d): started: id=%d fd=%d, " "size=%" PRIdz ", flags=%X, rate %d KB/s: %s", wrSockPtr->sockPtr->sock, queuePtr->id, wrSockPtr->fd, nsend, wrSockPtr->flags, wrSockPtr->rateLimit, connPtr->request.line); /* * Now add new writer socket to the writer thread's queue */ wrSockPtr->queuePtr = queuePtr; Ns_MutexLock(&queuePtr->lock); if (queuePtr->sockPtr == NULL) { trigger = NS_TRUE; } Push(wrSockPtr, queuePtr->sockPtr); Ns_MutexUnlock(&queuePtr->lock); /* * Wake up writer thread */ if (trigger) { SockTrigger(queuePtr->pipe[1]); } return NS_OK; } /* *---------------------------------------------------------------------- * * DriverWriterFromObj -- * * Lookup driver by name and return its DrvWriter. When driverObj is * NULL, get the driver from the conn. * * Results: * Ns_ReturnCode * * Side effects: * Set error message in interp in case of failure. * *---------------------------------------------------------------------- */ static Ns_ReturnCode DriverWriterFromObj( Tcl_Interp *interp, Tcl_Obj *driverObj, Ns_Conn *conn, DrvWriter **wrPtrPtr) { Driver *drvPtr; const char *driverName = NULL; int driverNameLen = 0; DrvWriter *wrPtr = NULL; Ns_ReturnCode result; /* * If no driver is provided, take the current driver. The caller has * to make sure that in cases, where no driver is specified, the * command is run in a connection thread. */ if (driverObj == NULL) { if (conn != NULL) { driverName = Ns_ConnDriverName(conn); driverNameLen = (int)strlen(driverName); } } else { driverName = Tcl_GetStringFromObj(driverObj, &driverNameLen); } if (driverName != NULL) { for (drvPtr = firstDrvPtr; drvPtr != NULL; drvPtr = drvPtr->nextPtr) { if (strncmp(driverName, drvPtr->threadName, (size_t)driverNameLen) == 0) { if (drvPtr->writer.firstPtr != NULL) { wrPtr = &drvPtr->writer; } break; } } } if (unlikely(wrPtr == NULL)) { Ns_TclPrintfResult(interp, "no writer configured for a driver with name %s", driverName); result = NS_ERROR; } else { *wrPtrPtr = wrPtr; result = NS_OK; } return result; } /* *---------------------------------------------------------------------- * * WriterSubmitObjCmd - subcommand of NsTclWriterObjCmd -- * * Implements "ns_writer submit" command. * Send the provided data to the client. * * Results: * Standard Tcl result. * * Side effects: * None. * *---------------------------------------------------------------------- */ static int WriterSubmitObjCmd(ClientData UNUSED(clientData), Tcl_Interp *interp, int objc, Tcl_Obj *const* objv) { int result = TCL_OK; Ns_Conn *conn; Tcl_Obj *dataObj; Ns_ObjvSpec args[] = { {"data", Ns_ObjvObj, &dataObj, NULL}, {NULL, NULL, NULL, NULL} }; if (Ns_ParseObjv(NULL, args, interp, 2, objc, objv) != NS_OK || NsConnRequire(interp, NS_CONN_REQUIRE_ALL, &conn) != NS_OK) { result = TCL_ERROR; } else { int size; unsigned char *data = Tcl_GetByteArrayFromObj(dataObj, &size); if (data != NULL) { struct iovec vbuf; Ns_ReturnCode status; vbuf.iov_base = (void *)data; vbuf.iov_len = (size_t)size; status = NsWriterQueue(conn, (size_t)size, NULL, NULL, NS_INVALID_FD, &vbuf, 1, NULL, 0, NS_TRUE); Tcl_SetObjResult(interp, Tcl_NewBooleanObj(status == NS_OK ? 1 : 0)); } } return result; } /* *---------------------------------------------------------------------- * * WriterCheckInputParams - * * Helper command for WriterSubmitFileObjCmd and WriterSubmitFilesObjCmd * to check validity of filename, offset and size. * * Results: * Standard Tcl result. Returns on success also fd and nrbytes. * * Side effects: * None. * *---------------------------------------------------------------------- */ static int WriterCheckInputParams(Tcl_Interp *interp, const char *filenameString, size_t size, off_t offset, int *fdPtr, size_t *nrbytesPtr) { int result = TCL_OK, rc; struct stat st; Ns_Log(DriverDebug, "WriterCheckInputParams %s offset %" PROTd " size %" PRIdz, filenameString, offset, size); /* * Use stat() call to obtain information about the actual file to check * later the plausibility of the parameters. */ rc = stat(filenameString, &st); if (unlikely(rc != 0)) { Ns_TclPrintfResult(interp, "file does not exist '%s'", filenameString); result = TCL_ERROR; } else { size_t nrbytes = 0u; int fd; /* * Try to open the file and check offset and size parameters. */ fd = ns_open(filenameString, O_RDONLY | O_CLOEXEC, 0); if (unlikely(fd == NS_INVALID_FD)) { Ns_TclPrintfResult(interp, "could not open file '%s'", filenameString); result = TCL_ERROR; } else if (unlikely(offset > st.st_size) || offset < 0) { Ns_TclPrintfResult(interp, "offset must be a positive value less or equal filesize"); result = TCL_ERROR; } else if (size > 0) { if (unlikely((off_t)size + offset > st.st_size)) { Ns_TclPrintfResult(interp, "offset + size must be less or equal filesize"); result = TCL_ERROR; } else { nrbytes = (size_t)size; } } else { nrbytes = (size_t)st.st_size - (size_t)offset; } /* * When an offset is provide, jump to this offset. */ if (offset > 0 && result == TCL_OK) { if (ns_lseek(fd, (off_t)offset, SEEK_SET) == -1) { Ns_TclPrintfResult(interp, "cannot seek to position %ld", (long)offset); result = TCL_ERROR; } } if (result == TCL_OK) { *fdPtr = fd; *nrbytesPtr = nrbytes; } else if (fd != NS_INVALID_FD) { /* * On invalid parameters, close the fd. */ ns_close(fd); } } return result; } /* *---------------------------------------------------------------------- * * WriterSubmitFileObjCmd - subcommand of NsTclWriterObjCmd -- * * Implements "ns_writer submitfile" command. * Send the provided file to the client. * * Results: * Standard Tcl result. * * Side effects: * None. * *---------------------------------------------------------------------- */ static int WriterSubmitFileObjCmd(ClientData UNUSED(clientData), Tcl_Interp *interp, int objc, Tcl_Obj *const* objv) { int result = TCL_OK; Ns_Conn *conn; char *fileNameString; int headers = 0; Tcl_WideInt offset = 0, size = 0; Ns_ObjvValueRange offsetRange = {0, LLONG_MAX}; Ns_ObjvValueRange sizeRange = {1, LLONG_MAX}; Ns_ObjvSpec lopts[] = { {"-headers", Ns_ObjvBool, &headers, INT2PTR(NS_TRUE)}, {"-offset", Ns_ObjvMemUnit, &offset, &offsetRange}, {"-size", Ns_ObjvMemUnit, &size, &sizeRange}, {NULL, NULL, NULL, NULL} }; Ns_ObjvSpec args[] = { {"file", Ns_ObjvString, &fileNameString, NULL}, {NULL, NULL, NULL, NULL} }; if (unlikely(Ns_ParseObjv(lopts, args, interp, 2, objc, objv) != NS_OK) || NsConnRequire(interp, NS_CONN_REQUIRE_ALL, &conn) != NS_OK) { result = TCL_ERROR; } else if (unlikely( Ns_ConnSockPtr(conn) == NULL )) { Ns_Log(Warning, "NsWriterQueue: called without valid sockPtr, maybe connection already closed"); Ns_TclPrintfResult(interp, "0"); result = TCL_OK; } else { size_t nrbytes = 0u; int fd = NS_INVALID_FD; result = WriterCheckInputParams(interp, fileNameString, (size_t)size, offset, &fd, &nrbytes); if (likely(result == TCL_OK)) { Ns_ReturnCode status; /* * The caller requested that we build required headers */ if (headers != 0) { Ns_ConnSetTypeHeader(conn, Ns_GetMimeType(fileNameString)); } status = NsWriterQueue(conn, nrbytes, NULL, NULL, fd, NULL, 0, NULL, 0, NS_TRUE); Tcl_SetObjResult(interp, Tcl_NewBooleanObj(status == NS_OK ? 1 : 0)); if (fd != NS_INVALID_FD) { (void) ns_close(fd); } else { Ns_Log(Warning, "WriterSubmitFileObjCmd called with invalid fd"); } } else if (fd != NS_INVALID_FD) { (void) ns_close(fd); } } return result; } /* *---------------------------------------------------------------------- * * WriterGetMemunitFromDict -- * * Helper function to obtain a memory unit from a dict structure, * optionally checking the value range. * * Results: * Standard Tcl result. * * Side effects: * On errors, an error message is left in the interpreter. * *---------------------------------------------------------------------- */ static int WriterGetMemunitFromDict(Tcl_Interp *interp, Tcl_Obj *dictObj, Tcl_Obj *keyObj, Ns_ObjvValueRange *rangePtr, Tcl_WideInt *valuePtr) { Tcl_Obj *intObj = NULL; int result; NS_NONNULL_ASSERT(interp != NULL); NS_NONNULL_ASSERT(dictObj != NULL); NS_NONNULL_ASSERT(keyObj != NULL); NS_NONNULL_ASSERT(valuePtr != NULL); result = Tcl_DictObjGet(interp, dictObj, keyObj, &intObj); if (result == TCL_OK && intObj != NULL) { result = Ns_TclGetMemUnitFromObj(interp, intObj, valuePtr); if (result == TCL_OK && rangePtr != NULL) { result = Ns_CheckWideRange(interp, Tcl_GetString(keyObj), rangePtr, *valuePtr); } } return result; } /* *---------------------------------------------------------------------- * * WriterSubmitFilesObjCmd - subcommand of NsTclWriterObjCmd -- * * Implements "ns_writer submitfiles" command. Send the provided files * to the client. "files" are provided as a list of dicts, where every * dict must contain a "filename" element and can contain an "-offset" * and/or a "-length" element. * * Results: * Standard Tcl result. * * Side effects: * None. * *---------------------------------------------------------------------- */ static int WriterSubmitFilesObjCmd(ClientData UNUSED(clientData), Tcl_Interp *interp, int objc, Tcl_Obj *const* objv) { int result = TCL_OK; Ns_Conn *conn; int headers = 0, nrFiles; Tcl_Obj *filesObj = NULL, **fileObjv; Ns_ObjvSpec lopts[] = { {"-headers", Ns_ObjvBool, &headers, INT2PTR(NS_TRUE)}, {NULL, NULL, NULL, NULL} }; Ns_ObjvSpec args[] = { {"files", Ns_ObjvObj, &filesObj, NULL}, {NULL, NULL, NULL, NULL} }; if (unlikely(Ns_ParseObjv(lopts, args, interp, 2, objc, objv) != NS_OK) || NsConnRequire(interp, NS_CONN_REQUIRE_ALL, &conn) != NS_OK) { result = TCL_ERROR; } else if (unlikely( Ns_ConnSockPtr(conn) == NULL )) { Ns_Log(Warning, "NsWriterQueue: called without valid sockPtr, " "maybe connection already closed"); Ns_TclPrintfResult(interp, "0"); result = TCL_OK; } else if (Tcl_ListObjGetElements(interp, filesObj, &nrFiles, &fileObjv) != TCL_OK) { Ns_TclPrintfResult(interp, "not a valid list of files: '%s'", Tcl_GetString(filesObj)); result = TCL_ERROR; } else if (nrFiles == 0) { Ns_TclPrintfResult(interp, "The provided list has to contain at least one file spec"); result = TCL_ERROR; } else { size_t totalbytes = 0u, i; Tcl_Obj *keys[3], *filenameObj = NULL; Ns_FileVec *filebufs; const char *firstFilenameString = NULL; Ns_ObjvValueRange offsetRange = {0, LLONG_MAX}; Ns_ObjvValueRange sizeRange = {1, LLONG_MAX}; filebufs = (Ns_FileVec *)ns_calloc((size_t)nrFiles, sizeof(Ns_FileVec)); keys[0] = Tcl_NewStringObj("filename", 8); keys[1] = Tcl_NewStringObj("-offset", 7); keys[2] = Tcl_NewStringObj("-size", 5); Tcl_IncrRefCount(keys[0]); Tcl_IncrRefCount(keys[1]); Tcl_IncrRefCount(keys[2]); for (i = 0u; i < (size_t)nrFiles; i++) { filebufs[i].fd = NS_INVALID_FD; } /* * Iterate over the list of dicts. */ for (i = 0u; i < (size_t)nrFiles; i++) { Tcl_WideInt offset = 0, size = 0; int rc, fd = NS_INVALID_FD; const char *filenameString; size_t nrbytes; /* * Get required "filename" element. */ filenameObj = NULL; rc = Tcl_DictObjGet(interp, fileObjv[i], keys[0], &filenameObj); if (rc != TCL_OK || filenameObj == NULL) { Ns_TclPrintfResult(interp, "missing filename in dict '%s'", Tcl_GetString(fileObjv[i])); result = TCL_ERROR; break; } filenameString = Tcl_GetString(filenameObj); if (firstFilenameString == NULL) { firstFilenameString = filenameString; } /* * Get optional "-offset" and "-size" elements. */ if (WriterGetMemunitFromDict(interp, fileObjv[i], keys[1], &offsetRange, &offset) != TCL_OK) { result = TCL_ERROR; break; } if (WriterGetMemunitFromDict(interp, fileObjv[i], keys[2], &sizeRange, &size) != TCL_OK) { result = TCL_ERROR; break; } /* * Check validity of the provided values */ result = WriterCheckInputParams(interp, Tcl_GetString(filenameObj), (size_t)size, (off_t)offset, &fd, &nrbytes); if (result != TCL_OK) { break; } filebufs[i].fd = fd; filebufs[i].offset = offset; filebufs[i].length = nrbytes; totalbytes = totalbytes + (size_t)nrbytes; } Tcl_DecrRefCount(keys[0]); Tcl_DecrRefCount(keys[1]); Tcl_DecrRefCount(keys[2]); /* * If everything is ok, submit the request to the writer queue. */ if (result == TCL_OK) { Ns_ReturnCode status; if (headers != 0 && firstFilenameString != NULL) { Ns_ConnSetTypeHeader(conn, Ns_GetMimeType(firstFilenameString)); } status = NsWriterQueue(conn, totalbytes, NULL, NULL, NS_INVALID_FD, NULL, 0, filebufs, nrFiles, NS_TRUE); /* * Provide a soft error like for "ns_writer submitfile". */ Tcl_SetObjResult(interp, Tcl_NewBooleanObj(status == NS_OK ? 1 : 0)); } /* * The NsWriterQueue() API makes the usual duplicates of the file * descriptors and the Ns_FileVec structure, so we have to cleanup * here. */ for (i = 0u; i < (size_t)nrFiles; i++) { if (filebufs[i].fd != NS_INVALID_FD) { (void) ns_close(filebufs[i].fd); } } ns_free(filebufs); } return result; } /* *---------------------------------------------------------------------- * * WriterListObjCmd - subcommand of NsTclWriterObjCmd -- * * Implements "ns_writer list" command. * List the current writer jobs. * * Results: * Standard Tcl result. * * Side effects: * None. * *---------------------------------------------------------------------- */ static int WriterListObjCmd(ClientData UNUSED(clientData), Tcl_Interp *interp, int objc, Tcl_Obj *const* objv) { int result = TCL_OK; NsServer *servPtr = NULL; Ns_ObjvSpec lopts[] = { {"-server", Ns_ObjvServer, &servPtr, NULL}, {NULL, NULL, NULL, NULL} }; if (unlikely(Ns_ParseObjv(lopts, NULL, interp, 2, objc, objv) != NS_OK)) { result = TCL_ERROR; } else { Tcl_DString ds, *dsPtr = &ds; const Driver *drvPtr; SpoolerQueue *queuePtr; Tcl_DStringInit(dsPtr); for (drvPtr = firstDrvPtr; drvPtr != NULL; drvPtr = drvPtr->nextPtr) { const DrvWriter *wrPtr; /* * If server was specified, list only results from this server. */ if (servPtr != NULL && servPtr != drvPtr->servPtr) { continue; } wrPtr = &drvPtr->writer; queuePtr = wrPtr->firstPtr; while (queuePtr != NULL) { const WriterSock *wrSockPtr; Ns_MutexLock(&queuePtr->lock); wrSockPtr = queuePtr->curPtr; while (wrSockPtr != NULL) { char ipString[NS_IPADDR_SIZE]; ns_inet_ntop((struct sockaddr *)&(wrSockPtr->sockPtr->sa), ipString,sizeof(ipString)); (void) Ns_DStringNAppend(dsPtr, "{", 1); (void) Ns_DStringAppendTime(dsPtr, &wrSockPtr->startTime); (void) Ns_DStringNAppend(dsPtr, " ", 1); (void) Ns_DStringAppend(dsPtr, queuePtr->threadName); (void) Ns_DStringNAppend(dsPtr, " ", 1); (void) Ns_DStringAppend(dsPtr, drvPtr->threadName); (void) Ns_DStringNAppend(dsPtr, " ", 1); (void) Ns_DStringAppend(dsPtr, NsPoolName(wrSockPtr->poolPtr->pool)); (void) Ns_DStringNAppend(dsPtr, " ", 1); (void) Ns_DStringAppend(dsPtr, ipString); (void) Ns_DStringPrintf(dsPtr, " %d %" PRIdz " %" TCL_LL_MODIFIER "d %d %d ", wrSockPtr->fd, wrSockPtr->size, wrSockPtr->nsent, wrSockPtr->currentRate, wrSockPtr->rateLimit); (void) Ns_DStringAppendElement(dsPtr, (wrSockPtr->clientData != NULL) ? wrSockPtr->clientData : NS_EMPTY_STRING); (void) Ns_DStringNAppend(dsPtr, "} ", 2); wrSockPtr = wrSockPtr->nextPtr; } Ns_MutexUnlock(&queuePtr->lock); queuePtr = queuePtr->nextPtr; } } Tcl_DStringResult(interp, &ds); } return result; } /* *---------------------------------------------------------------------- * * WriterSizeObjCmd - subcommand of NsTclWriterObjCmd -- * * Implements "ns_writer size" command. * Sets or queries size limit for sending via writer. * * Results: * Standard Tcl result. * * Side effects: * None. * *---------------------------------------------------------------------- */ static int WriterSizeObjCmd(ClientData UNUSED(clientData), Tcl_Interp *interp, int objc, Tcl_Obj *const* objv) { int result = TCL_OK; Tcl_Obj *driverObj = NULL; Ns_Conn *conn = NULL; Tcl_WideInt intValue = -1; const char *firstArgString; Ns_ObjvValueRange range = {1024, INT_MAX}; Ns_ObjvSpec *opts, optsNew[] = { {"-driver", Ns_ObjvObj, &driverObj, NULL}, {NULL, NULL, NULL, NULL} }; Ns_ObjvSpec *args, argsNew[] = { {"?value", Ns_ObjvMemUnit, &intValue, &range}, {NULL, NULL, NULL, NULL} }; Ns_ObjvSpec argsLegacy[] = { {"driver", Ns_ObjvObj, &driverObj, NULL}, {"?value", Ns_ObjvMemUnit, &intValue, &range}, {NULL, NULL, NULL, NULL} }; firstArgString = objc > 2 ? Tcl_GetString(objv[2]) : NULL; if (firstArgString != NULL) { if (*firstArgString != '-' && ((objc == 3 && CHARTYPE(digit, *firstArgString) == 0) || objc == 4)) { args = argsLegacy; opts = NULL; Ns_LogDeprecated(objv, objc, "ns_writer size ?-driver drv? ?size?", NULL); } else { args = argsNew; opts = optsNew; } } else { args = argsNew; opts = optsNew; } if (Ns_ParseObjv(opts, args, interp, 2, objc, objv) != NS_OK) { result = TCL_ERROR; } else if ((driverObj == NULL) && NsConnRequire(interp, NS_CONN_REQUIRE_ALL, &conn) != NS_OK) { result = TCL_ERROR; } else { DrvWriter *wrPtr; if (DriverWriterFromObj(interp, driverObj, conn, &wrPtr) != NS_OK) { result = TCL_ERROR; } else if (intValue != -1) { /* * The optional argument was provided. */ wrPtr->writersize = (size_t)intValue; } if (result == TCL_OK) { Tcl_SetObjResult(interp, Tcl_NewIntObj((int)wrPtr->writersize)); } } return result; } /* *---------------------------------------------------------------------- * * WriterStreamingObjCmd - subcommand of NsTclWriterObjCmd -- * * Implements "ns_writer streaming" command. * Sets or queries streaming state of the writer. * * Results: * Standard Tcl result. * * Side effects: * None. * *---------------------------------------------------------------------- */ static int WriterStreamingObjCmd(ClientData UNUSED(clientData), Tcl_Interp *interp, int objc, Tcl_Obj *const* objv) { int boolValue = -1, result = TCL_OK; Tcl_Obj *driverObj = NULL; Ns_Conn *conn = NULL; const char *firstArgString; Ns_ObjvSpec *opts, optsNew[] = { {"-driver", Ns_ObjvObj, &driverObj, NULL}, {NULL, NULL, NULL, NULL} }; Ns_ObjvSpec *args, argsNew[] = { {"?value", Ns_ObjvBool, &boolValue, NULL}, {NULL, NULL, NULL, NULL} }; Ns_ObjvSpec argsLegacy[] = { {"driver", Ns_ObjvObj, &driverObj, NULL}, {"?value", Ns_ObjvBool, &boolValue, NULL}, {NULL, NULL, NULL, NULL} }; firstArgString = objc > 2 ? Tcl_GetString(objv[2]) : NULL; if (firstArgString != NULL) { int argValue; if (*firstArgString != '-' && ((objc == 3 && Tcl_ExprBoolean(interp, firstArgString, &argValue) == TCL_OK) || objc == 4)) { args = argsLegacy; opts = NULL; Ns_LogDeprecated(objv, objc, "ns_writer streaming ?-driver drv? ?value?", NULL); } else { args = argsNew; opts = optsNew; } } else { args = argsNew; opts = optsNew; } if (Ns_ParseObjv(opts, args, interp, 2, objc, objv) != NS_OK) { result = TCL_ERROR; } else if ((driverObj == NULL) && NsConnRequire(interp, NS_CONN_REQUIRE_ALL, &conn) != NS_OK) { result = TCL_ERROR; } else { DrvWriter *wrPtr; if (DriverWriterFromObj(interp, driverObj, conn, &wrPtr) != NS_OK) { result = TCL_ERROR; } else if (boolValue != -1) { /* * The optional argument was provided. */ wrPtr->doStream = (boolValue == 1 ? NS_WRITER_STREAM_ACTIVE : NS_WRITER_STREAM_NONE); } if (result == TCL_OK) { Tcl_SetObjResult(interp, Tcl_NewIntObj(wrPtr->doStream == NS_WRITER_STREAM_ACTIVE ? 1 : 0)); } } return result; } /* *---------------------------------------------------------------------- * * NsTclWriterObjCmd -- * * Implements "ns_writer" command for submitting data to the writer * threads and to configure and query the state of the writer threads at * runtime. * * Results: * Standard Tcl result. * * Side effects: * None. * *---------------------------------------------------------------------- */ int NsTclWriterObjCmd(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *const* objv) { const Ns_SubCmdSpec subcmds[] = { {"list", WriterListObjCmd}, {"size", WriterSizeObjCmd}, {"streaming", WriterStreamingObjCmd}, {"submit", WriterSubmitObjCmd}, {"submitfile", WriterSubmitFileObjCmd}, {"submitfiles",WriterSubmitFilesObjCmd}, {NULL, NULL} }; return Ns_SubcmdObjv(subcmds, clientData, interp, objc, objv); } /* *====================================================================== * Async (log) writer: Write asynchronously to a disk *====================================================================== */ /* *---------------------------------------------------------------------- * * NsAsyncWriterQueueEnable -- * * Enable async writing and start the AsyncWriterThread if * necessary * * Results: * None. * * Side effects: * Potentially starting a thread and set "stopped" to NS_FALSE. * *---------------------------------------------------------------------- */ void NsAsyncWriterQueueEnable(void) { if (Ns_ConfigBool(NS_CONFIG_PARAMETERS, "asynclogwriter", NS_FALSE) == NS_TRUE) { SpoolerQueue *queuePtr; /* * In case, the async writer has not started, the static variable * asyncWriter is NULL. */ if (asyncWriter == NULL) { Ns_MutexLock(&reqLock); if (likely(asyncWriter == NULL)) { /* * Allocate and initialize writer thread context. */ asyncWriter = ns_calloc(1u, sizeof(AsyncWriter)); Ns_MutexUnlock(&reqLock); Ns_MutexSetName2(&asyncWriter->lock, "ns:driver", "async-writer"); /* * Allocate and initialize a Spooler Queue for this thread. */ queuePtr = ns_calloc(1u, sizeof(SpoolerQueue)); Ns_MutexSetName2(&queuePtr->lock, "ns:driver:async-writer", "queue"); asyncWriter->firstPtr = queuePtr; /* * Start the spooler queue */ SpoolerQueueStart(queuePtr, AsyncWriterThread); } else { Ns_MutexUnlock(&reqLock); } } assert(asyncWriter != NULL); queuePtr = asyncWriter->firstPtr; assert(queuePtr != NULL); Ns_MutexLock(&queuePtr->lock); queuePtr->stopped = NS_FALSE; Ns_MutexUnlock(&queuePtr->lock); } } /* *---------------------------------------------------------------------- * * NsAsyncWriterQueueDisable -- * * Disable async writing but don't touch the writer thread. * * Results: * None. * * Side effects: * Disable async writing by setting stopped to 1. * *---------------------------------------------------------------------- */ void NsAsyncWriterQueueDisable(bool shutdown) { if (asyncWriter != NULL) { SpoolerQueue *queuePtr = asyncWriter->firstPtr; Ns_Time timeout; assert(queuePtr != NULL); Ns_GetTime(&timeout); Ns_IncrTime(&timeout, nsconf.shutdowntimeout.sec, nsconf.shutdowntimeout.usec); Ns_MutexLock(&queuePtr->lock); queuePtr->stopped = NS_TRUE; queuePtr->shutdown = shutdown; /* * Trigger the AsyncWriter Thread to drain the spooler queue. */ SockTrigger(queuePtr->pipe[1]); (void)Ns_CondTimedWait(&queuePtr->cond, &queuePtr->lock, &timeout); Ns_MutexUnlock(&queuePtr->lock); if (shutdown) { ns_free(queuePtr); ns_free(asyncWriter); asyncWriter = NULL; } } } /* *---------------------------------------------------------------------- * * NsAsyncWrite -- * * Perform an asynchronous write operation via a writer thread in * case a writer thread is configured and running. The intention * of the asynchronous write operations is to reduce latencies in * connection threads. * * Results: * NS_OK, when write was performed via writer thread, * NS_ERROR otherwise (but data is written). * * Side effects: * I/O Operation. * *---------------------------------------------------------------------- */ Ns_ReturnCode NsAsyncWrite(int fd, const char *buffer, size_t nbyte) { Ns_ReturnCode returnCode = NS_OK; NS_NONNULL_ASSERT(buffer != NULL); /* * If the async writer has not started or is deactivated, behave like a * ns_write() command. If the ns_write() fails, we can't do much, since * the writing of an error message to the log might bring us into an * infinite loop. So we print simple to stderr. */ if (asyncWriter == NULL || asyncWriter->firstPtr->stopped) { ssize_t written = ns_write(fd, buffer, nbyte); if (unlikely(written != (ssize_t)nbyte)) { int retries = 100; /* * Don't go into an infinite loop when multiple subsequent disk * write operations return 0 (maybe disk full). */ returnCode = NS_ERROR; do { if (written < 0) { fprintf(stderr, "error during async write (fd %d): %s\n", fd, strerror(errno)); break; } /* * All partial writes (written >= 0) */ WriteWarningRaw("partial write", fd, nbyte, written); nbyte -= (size_t)written; buffer += written; written = ns_write(fd, buffer, nbyte); if (written == (ssize_t)nbyte) { returnCode = NS_OK; break; } } while (retries-- > 0); } } else { SpoolerQueue *queuePtr; bool trigger = NS_FALSE; const AsyncWriteData *wdPtr; AsyncWriteData *newWdPtr; /* * Allocate a writer cmd and initialize it. In order to provide an * interface compatible to ns_write(), we copy the provided data, * such it can be freed by the caller. When we would give up the * interface, we could free the memory block after writing, and * save a malloc/free operation on the data. */ newWdPtr = ns_calloc(1u, sizeof(AsyncWriteData)); newWdPtr->fd = fd; newWdPtr->bufsize = nbyte; newWdPtr->data = ns_malloc(nbyte + 1u); memcpy(newWdPtr->data, buffer, newWdPtr->bufsize); newWdPtr->buf = newWdPtr->data; newWdPtr->size = newWdPtr->bufsize; /* * Now add new writer socket to the writer thread's queue. In most * cases, the queue will be empty. */ queuePtr = asyncWriter->firstPtr; assert(queuePtr != NULL); Ns_MutexLock(&queuePtr->lock); wdPtr = queuePtr->sockPtr; if (wdPtr != NULL) { newWdPtr->nextPtr = queuePtr->sockPtr; queuePtr->sockPtr = newWdPtr; } else { queuePtr->sockPtr = newWdPtr; trigger = NS_TRUE; } Ns_MutexUnlock(&queuePtr->lock); /* * Wake up writer thread if desired */ if (trigger) { SockTrigger(queuePtr->pipe[1]); } } return returnCode; } /* *---------------------------------------------------------------------- * * AsyncWriterRelease -- * * Deallocate write data. * * Results: * None * * Side effects: * free memory * *---------------------------------------------------------------------- */ static void AsyncWriterRelease(AsyncWriteData *wdPtr) { NS_NONNULL_ASSERT(wdPtr != NULL); ns_free(wdPtr->data); ns_free(wdPtr); } /* *---------------------------------------------------------------------- * * AsyncWriterThread -- * * Thread that implements non-blocking write operations to files * * Results: * None. * * Side effects: * Write to files. * *---------------------------------------------------------------------- */ static void AsyncWriterThread(void *arg) { SpoolerQueue *queuePtr = (SpoolerQueue*)arg; char charBuffer[1]; int pollTimeout; Ns_ReturnCode status; bool stopping; AsyncWriteData *curPtr, *nextPtr, *writePtr; PollData pdata; Ns_ThreadSetName("-asynclogwriter%d-", queuePtr->id); queuePtr->threadName = Ns_ThreadGetName(); /* * Allocate and initialize controlling variables */ PollCreate(&pdata); writePtr = NULL; stopping = NS_FALSE; /* * Loop forever until signaled to shutdown and all * connections are complete and gracefully closed. */ while (!stopping) { /* * Always listen to the trigger pipe. We could as well perform * in the writer thread async write operations, but for the * effect of reducing latency in connection threads, this is * not an issue. To keep things simple, we perform the * typically small write operations without testing for POLLOUT. */ PollReset(&pdata); (void)PollSet(&pdata, queuePtr->pipe[0], (short)POLLIN, NULL); if (writePtr == NULL) { pollTimeout = 30 * 1000; } else { pollTimeout = 0; } /* * Wait for data */ /*n =*/ (void) PollWait(&pdata, pollTimeout); /* * Select and drain the trigger pipe if necessary. */ if (PollIn(&pdata, 0)) { if (ns_recv(queuePtr->pipe[0], charBuffer, 1u, 0) != 1) { Ns_Fatal("asynclogwriter: trigger ns_recv() failed: %s", ns_sockstrerror(ns_sockerrno)); } if (queuePtr->stopped) { /* * Drain the queue from everything */ for (curPtr = writePtr; curPtr != NULL; curPtr = curPtr->nextPtr) { ssize_t written = ns_write(curPtr->fd, curPtr->buf, curPtr->bufsize); if (unlikely(written != (ssize_t)curPtr->bufsize)) { WriteWarningRaw("drain writer", curPtr->fd, curPtr->bufsize, written); } } writePtr = NULL; for (curPtr = queuePtr->sockPtr; curPtr != NULL; curPtr = curPtr->nextPtr) { ssize_t written = ns_write(curPtr->fd, curPtr->buf, curPtr->bufsize); if (unlikely(written != (ssize_t)curPtr->bufsize)) { WriteWarningRaw("drain queue", curPtr->fd, curPtr->bufsize, written); } } queuePtr->sockPtr = NULL; /* * Notify the caller (normally * NsAsyncWriterQueueDisable()) that we are done */ Ns_CondBroadcast(&queuePtr->cond); } } /* * Write to all available file descriptors */ curPtr = writePtr; writePtr = NULL; while (curPtr != NULL) { ssize_t written; nextPtr = curPtr->nextPtr; status = NS_OK; /* * Write the actual data and allow for partial write operations. */ written = ns_write(curPtr->fd, curPtr->buf, curPtr->bufsize); if (unlikely(written < 0)) { status = NS_ERROR; } else { curPtr->size -= (size_t)written; curPtr->nsent += written; curPtr->bufsize -= (size_t)written; if (curPtr->data != NULL) { curPtr->buf += written; } } if (unlikely(status != NS_OK)) { AsyncWriterRelease(curPtr); queuePtr->queuesize--; } else { /* * The write operation was successful. Check if there * is some remaining data to write. If not we are done * with this request can release the write buffer. */ if (curPtr->size > 0u) { Push(curPtr, writePtr); } else { AsyncWriterRelease(curPtr); queuePtr->queuesize--; } } curPtr = nextPtr; } /* * Check for shutdown */ stopping = queuePtr->shutdown; if (stopping) { curPtr = queuePtr->sockPtr; assert(writePtr == NULL); while (curPtr != NULL) { ssize_t written = ns_write(curPtr->fd, curPtr->buf, curPtr->bufsize); if (unlikely(written != (ssize_t)curPtr->bufsize)) { WriteWarningRaw("shutdown", curPtr->fd, curPtr->bufsize, written); } curPtr = curPtr->nextPtr; } } else { /* * Add fresh jobs to the writer queue. This means actually to * move jobs from queuePtr->sockPtr (kept name for being able * to use the same queue as above) to the currently active * jobs in queuePtr->curPtr. */ Ns_MutexLock(&queuePtr->lock); curPtr = queuePtr->sockPtr; queuePtr->sockPtr = NULL; while (curPtr != NULL) { nextPtr = curPtr->nextPtr; Push(curPtr, writePtr); queuePtr->queuesize++; curPtr = nextPtr; } queuePtr->curPtr = writePtr; Ns_MutexUnlock(&queuePtr->lock); } } PollFree(&pdata); queuePtr->stopped = NS_TRUE; Ns_Log(Notice, "exiting"); } /* *---------------------------------------------------------------------- * * AsyncLogfileWriteObjCmd - * * Implements "ns_asynclogfile write" command. Write to a file * descriptor via async writer thread. The command handles partial write * operations internally. * * Results: * Standard Tcl result. * * Side effects: * None. * *---------------------------------------------------------------------- */ static int AsyncLogfileWriteObjCmd(ClientData UNUSED(clientData), Tcl_Interp *interp, int objc, Tcl_Obj *const* objv) { int result = TCL_OK, binary = (int)NS_FALSE, sanitize; Tcl_Obj *stringObj; int fd = 0; Ns_ObjvValueRange fd_range = {0, INT_MAX}; Ns_ObjvValueRange sanitize_range = {0, 2}; Ns_ObjvSpec opts[] = { {"-binary", Ns_ObjvBool, &binary, INT2PTR(NS_TRUE)}, {"-sanitize", Ns_ObjvInt, &sanitize, &sanitize_range}, {NULL, NULL, NULL, NULL} }; Ns_ObjvSpec args[] = { {"fd", Ns_ObjvInt, &fd, &fd_range}, {"buffer", Ns_ObjvObj, &stringObj, NULL}, {NULL, NULL, NULL, NULL} }; /* * Take the config value as default for "-sanitize", but let the used * override it on a per-case basis. */ sanitize = nsconf.sanitize_logfiles; if (unlikely(Ns_ParseObjv(opts, args, interp, 2, objc, objv) != NS_OK)) { result = TCL_ERROR; } else { const char *buffer; int length; Ns_ReturnCode rc; if (binary == (int)NS_TRUE || NsTclObjIsByteArray(stringObj)) { buffer = (const char *) Tcl_GetByteArrayFromObj(stringObj, &length); } else { buffer = Tcl_GetStringFromObj(stringObj, &length); } if (length > 0) { if (sanitize > 0) { Tcl_DString ds; bool lastCharNewline = (buffer[length-1] == '\n'); Tcl_DStringInit(&ds); if (lastCharNewline) { length --; } Ns_DStringAppendPrintable(&ds, sanitize == 2, buffer, (size_t)length); if (lastCharNewline) { Tcl_DStringAppend(&ds, "\n", 1); } rc = NsAsyncWrite(fd, ds.string, (size_t)ds.length); Tcl_DStringFree(&ds); } else { rc = NsAsyncWrite(fd, buffer, (size_t)length); } if (rc != NS_OK) { Ns_TclPrintfResult(interp, "ns_asynclogfile: error during write operation on fd %d: %s", fd, Tcl_PosixError(interp)); result = TCL_ERROR; } } else { result = TCL_OK; } } return result; } /* *---------------------------------------------------------------------- * * AsyncLogfileOpenObjCmd - * * Implements "ns_asynclogfile open" command. The command opens a * write-only log file and return a thread-shareable handle (actually a * numeric file descriptor) which can be used in subsequent "write" or * "close" operations. * * Results: * Standard Tcl result. * * Side effects: * None. * *---------------------------------------------------------------------- */ static int AsyncLogfileOpenObjCmd(ClientData UNUSED(clientData), Tcl_Interp *interp, int objc, Tcl_Obj *const* objv) { int result = TCL_OK; unsigned int flags = O_APPEND; char *fileNameString; Tcl_Obj *flagsObj = NULL; Ns_ObjvTable flagTable[] = { {"APPEND", O_APPEND}, {"EXCL", O_EXCL}, #ifdef O_DSYNC {"DSYNC", O_DSYNC}, #endif #ifdef O_SYNC {"SYNC", O_SYNC}, #endif {"TRUNC", O_TRUNC}, {NULL, 0u} }; Ns_ObjvSpec args[] = { {"filename", Ns_ObjvString, &fileNameString, NULL}, {"?flags", Ns_ObjvObj, &flagsObj, NULL}, //{"mode", Ns_ObjvString, &mode, NULL}, {NULL, NULL, NULL, NULL} }; if (unlikely(Ns_ParseObjv(NULL, args, interp, 2, objc, objv) != NS_OK)) { result = TCL_ERROR; } else if (flagsObj != NULL) { Tcl_Obj **ov; int oc; result = Tcl_ListObjGetElements(interp, flagsObj, &oc, &ov); if (result == TCL_OK && oc > 0) { int i, opt; flags = 0u; for (i = 0; i < oc; i++) { result = Tcl_GetIndexFromObjStruct(interp, ov[i], flagTable, (int)sizeof(flagTable[0]), "flag", 0, &opt); if (result != TCL_OK) { break; } else { flags = flagTable[opt].value; } } } } if (result == TCL_OK) { int fd; fd = ns_open(fileNameString, (int)(O_CREAT | O_WRONLY | O_CLOEXEC | flags), 0644); if (unlikely(fd == NS_INVALID_FD)) { Ns_TclPrintfResult(interp, "could not open file '%s': %s", fileNameString, Tcl_PosixError(interp)); result = TCL_ERROR; } else { Tcl_SetObjResult(interp, Tcl_NewIntObj(fd)); } } return result; } /* *---------------------------------------------------------------------- * * AsyncLogfileCloseObjCmd - * * Implements "ns_asynclogfile close" command. Close the logfile * previously created via "ns_asynclogfile open". * * Results: * Standard Tcl result. * * Side effects: * None. * *---------------------------------------------------------------------- */ static int AsyncLogfileCloseObjCmd(ClientData UNUSED(clientData), Tcl_Interp *interp, int objc, Tcl_Obj *const* objv) { int fd, result = TCL_OK; Ns_ObjvValueRange range = {0, INT_MAX}; Ns_ObjvSpec args[] = { {"fd", Ns_ObjvInt, &fd, &range}, {NULL, NULL, NULL, NULL} }; if (unlikely(Ns_ParseObjv(NULL, args, interp, 2, objc, objv) != NS_OK)) { result = TCL_ERROR; } else { int rc = ns_close(fd); if (rc != 0) { Ns_TclPrintfResult(interp, "could not close fd %d: %s", fd, Tcl_PosixError(interp)); result = TCL_ERROR; } } return result; } /* *---------------------------------------------------------------------- * * NsTclAsyncLogfileObjCmd - * * Wrapper for "ns_asynclogfile open|write|close" commands. * * Results: * Standard Tcl result. * * Side effects: * None. * *---------------------------------------------------------------------- */ int NsTclAsyncLogfileObjCmd(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *const* objv) { const Ns_SubCmdSpec subcmds[] = { {"open", AsyncLogfileOpenObjCmd}, {"write", AsyncLogfileWriteObjCmd}, {"close", AsyncLogfileCloseObjCmd}, {NULL, NULL} }; return Ns_SubcmdObjv(subcmds, clientData, interp, objc, objv); } /* *---------------------------------------------------------------------- * * LookupDriver -- * * Find a matching driver for the specified protocol and optionally the * specified driver name. * * Results: * Driver pointer or NULL on failure. * * Side effects: * When no driver is found, an error is left in the interp result. * *---------------------------------------------------------------------- */ static Driver * LookupDriver(Tcl_Interp *interp, const char* protocol, const char *driverName) { Driver *drvPtr; NS_NONNULL_ASSERT(interp != NULL); NS_NONNULL_ASSERT(protocol != NULL); for (drvPtr = firstDrvPtr; drvPtr != NULL; drvPtr = drvPtr->nextPtr) { Ns_Log(DriverDebug, "... check Driver proto <%s> server %s name %s location %s", drvPtr->protocol, drvPtr->server, drvPtr->threadName, drvPtr->location); if (STREQ(drvPtr->protocol, protocol)) { if (driverName == NULL) { /* * If there is no driver name given, take the first driver * with the matching protocol. */ break; } else if (STREQ(drvPtr->moduleName, driverName)) { /* * The driver name (name of the loaded module) is equal */ break; } } } if (drvPtr == NULL) { if (driverName != NULL) { Ns_TclPrintfResult(interp, "no driver for protocol '%s' & driver name '%s' found", protocol, driverName); } else { Ns_TclPrintfResult(interp, "no driver for protocol '%s' found", protocol); } } return drvPtr; } /* *---------------------------------------------------------------------- * * NSDriverClientOpen -- * * Open a client HTTP connection using the driver interface * * Results: * Tcl return code. * * Side effects: * Opening a connection * *---------------------------------------------------------------------- */ int NSDriverClientOpen(Tcl_Interp *interp, const char *driverName, const char *url, const char *httpMethod, const char *version, const Ns_Time *timeoutPtr, Sock **sockPtrPtr) { char *protocol, *host, *portString, *path, *tail, *url2; int result = TCL_OK; NS_NONNULL_ASSERT(interp != NULL); NS_NONNULL_ASSERT(url != NULL); NS_NONNULL_ASSERT(httpMethod != NULL); NS_NONNULL_ASSERT(version != NULL); NS_NONNULL_ASSERT(sockPtrPtr != NULL); url2 = ns_strdup(url); /* * We need here a fully qualified URL, otherwise raise an error */ if (unlikely(Ns_ParseUrl(url2, &protocol, &host, &portString, &path, &tail) != NS_OK) || protocol == NULL || host == NULL || path == NULL || tail == NULL) { Ns_Log(Notice, "driver: invalid URL '%s' passed to NSDriverClientOpen", url2); result = TCL_ERROR; } else { Driver *drvPtr; unsigned short portNr = 0u; /* make static checker happy */ assert(protocol != NULL); assert(host != NULL); assert(path != NULL); assert(tail != NULL); /* * Find a matching driver for the specified protocol and optionally * the specified driver name. */ drvPtr = LookupDriver(interp, protocol, driverName); if (drvPtr == NULL) { result = TCL_ERROR; } else if (portString != NULL) { portNr = (unsigned short) strtol(portString, NULL, 10); } else if (drvPtr->defport != 0u) { /* * Get the default port from the driver structure; */ portNr = drvPtr->defport; } else { Ns_TclPrintfResult(interp, "no default port for protocol '%s' defined", protocol); result = TCL_ERROR; } if (result == TCL_OK) { NS_SOCKET sock; Ns_ReturnCode status; sock = Ns_SockTimedConnect2(host, portNr, NULL, 0u, timeoutPtr, &status); if (sock == NS_INVALID_SOCKET) { Ns_SockConnectError(interp, host, portNr, status); result = TCL_ERROR; } else { const char *query; Tcl_DString ds, *dsPtr = &ds; Request *reqPtr; Sock *sockPtr; assert(drvPtr != NULL); sockPtr = SockNew(drvPtr); sockPtr->sock = sock; sockPtr->servPtr = drvPtr->servPtr; if (sockPtr->servPtr == NULL) { const NsInterp *itPtr = NsGetInterpData(interp); sockPtr->servPtr = itPtr->servPtr; } RequestNew(sockPtr); Ns_GetTime(&sockPtr->acceptTime); reqPtr = sockPtr->reqPtr; Tcl_DStringInit(dsPtr); Ns_DStringAppend(dsPtr, httpMethod); Ns_StrToUpper(Ns_DStringValue(dsPtr)); Tcl_DStringAppend(dsPtr, " /", 2); if (*path != '\0') { if (*path == '/') { path ++; } Tcl_DStringAppend(dsPtr, path, -1); Tcl_DStringAppend(dsPtr, "/", 1); } Tcl_DStringAppend(dsPtr, tail, -1); Tcl_DStringAppend(dsPtr, " HTTP/", 6); Tcl_DStringAppend(dsPtr, version, -1); reqPtr->request.line = Ns_DStringExport(dsPtr); reqPtr->request.method = ns_strdup(httpMethod); reqPtr->request.protocol = ns_strdup(protocol); reqPtr->request.host = ns_strdup(host); query = strchr(tail, INTCHAR('?')); if (query != NULL) { reqPtr->request.query = ns_strdup(query+1); } else { reqPtr->request.query = NULL; } /*Ns_Log(Notice, "REQUEST LINE <%s> query <%s>", reqPtr->request.line, reqPtr->request.query);*/ *sockPtrPtr = sockPtr; } } } ns_free(url2); return result; } /* *---------------------------------------------------------------------- * * NSDriverSockNew -- * * Create a Sock structure based on the driver interface * * Results: * Tcl return code. * * Side effects: * Accepting a connection * *---------------------------------------------------------------------- */ int NSDriverSockNew(Tcl_Interp *interp, NS_SOCKET sock, const char *protocol, const char *driverName, const char *methodName, Sock **sockPtrPtr) { int result = TCL_OK; Driver *drvPtr; NS_NONNULL_ASSERT(interp != NULL); NS_NONNULL_ASSERT(protocol != NULL); NS_NONNULL_ASSERT(methodName != NULL); NS_NONNULL_ASSERT(sockPtrPtr != NULL); drvPtr = LookupDriver(interp, protocol, driverName); if (drvPtr == NULL) { result = TCL_ERROR; } else { Sock *sockPtr; Tcl_DString ds, *dsPtr = &ds; Request *reqPtr; sockPtr = SockNew(drvPtr); sockPtr->servPtr = drvPtr->servPtr; sockPtr->sock = sock; RequestNew(sockPtr); // not sure if needed // peerAddr is missing Ns_GetTime(&sockPtr->acceptTime); reqPtr = sockPtr->reqPtr; Tcl_DStringInit(dsPtr); Ns_DStringAppend(dsPtr, methodName); Ns_StrToUpper(Ns_DStringValue(dsPtr)); reqPtr->request.line = Ns_DStringExport(dsPtr); reqPtr->request.method = ns_strdup(methodName); reqPtr->request.protocol = ns_strdup(protocol); reqPtr->request.host = NULL; reqPtr->request.query = NULL; /* Ns_Log(Notice, "REQUEST LINE <%s>", reqPtr->request.line);*/ *sockPtrPtr = sockPtr; } return result; } /* * Local Variables: * mode: c * c-basic-offset: 4 * fill-column: 78 * indent-tabs-mode: nil * End: */
ChunkedDecode(Request *reqPtr, bool update) { const Tcl_DString *bufPtr; const char *end, *chunkStart; bool success = NS_TRUE; NS_NONNULL_ASSERT(reqPtr != NULL); bufPtr = &reqPtr->buffer; end = bufPtr->string + bufPtr->length; chunkStart = bufPtr->string + reqPtr->chunkStartOff; while (reqPtr->chunkStartOff < (size_t)bufPtr->length) { char *p = strstr(chunkStart, "\r\n"); size_t chunk_length; if (p == NULL) { Ns_Log(DriverDebug, "ChunkedDecode: chunk did not find end-of-line"); success = NS_FALSE; break; } *p = '\0'; chunk_length = (size_t)strtol(chunkStart, NULL, 16); *p = '\r'; if (p + 2 + chunk_length > end) { Ns_Log(DriverDebug, "ChunkedDecode: chunk length past end of buffer"); success = NS_FALSE; break; } if (update) { char *writeBuffer = bufPtr->string + reqPtr->chunkWriteOff; memmove(writeBuffer, p + 2, chunk_length); reqPtr->chunkWriteOff += chunk_length; *(writeBuffer + chunk_length) = '\0'; } reqPtr->chunkStartOff += (size_t)(p - chunkStart) + 4u + chunk_length; chunkStart = bufPtr->string + reqPtr->chunkStartOff; } return success; }
ChunkedDecode(Request *reqPtr, bool update) { const Tcl_DString *bufPtr; const char *end, *chunkStart; SockState result = SOCK_READY; NS_NONNULL_ASSERT(reqPtr != NULL); bufPtr = &reqPtr->buffer; end = bufPtr->string + bufPtr->length; chunkStart = bufPtr->string + reqPtr->chunkStartOff; while (reqPtr->chunkStartOff < (size_t)bufPtr->length) { char *p = strstr(chunkStart, "\r\n"); long chunkLength; if (p == NULL) { Ns_Log(DriverDebug, "ChunkedDecode: chunk did not find end-of-line"); result = SOCK_MORE; break; } *p = '\0'; chunkLength = strtol(chunkStart, NULL, 16); *p = '\r'; if (chunkLength < 0) { Ns_Log(Warning, "ChunkedDecode: negative chunk length"); result = SOCK_BADREQUEST; break; } *p = '\r'; if (p + 2 + chunkLength > end) { Ns_Log(DriverDebug, "ChunkedDecode: chunk length past end of buffer"); result = SOCK_MORE; break; } if (update) { char *writeBuffer = bufPtr->string + reqPtr->chunkWriteOff; memmove(writeBuffer, p + 2, (size_t)chunkLength); reqPtr->chunkWriteOff += (size_t)chunkLength; *(writeBuffer + chunkLength) = '\0'; } reqPtr->chunkStartOff += (size_t)(p - chunkStart) + 4u + (size_t)chunkLength; chunkStart = bufPtr->string + reqPtr->chunkStartOff; } return result; }
{'added': [(290, 'static SockState ChunkedDecode(Request *reqPtr, bool update)'), (3377, ' * SOCK_READY when chunk was complete, SOCK_MORE when more data is'), (3378, ' * requried, or some error condition.'), (3381, ' * Updates the buffer if update is true (and adjusts'), (3382, ' * reqPtr->chunkWriteOff). Updates always reqPtr->chunkStartOff to allow'), (3383, ' * incremental operations.'), (3387, 'static SockState'), (3392, ' SockState result = SOCK_READY;'), (3402, ' long chunkLength;'), (3406, ' result = SOCK_MORE;'), (3411, ' chunkLength = strtol(chunkStart, NULL, 16);'), (3412, " *p = '\\r';"), (3413, ' if (chunkLength < 0) {'), (3414, ' Ns_Log(Warning, "ChunkedDecode: negative chunk length");'), (3415, ' result = SOCK_BADREQUEST;'), (3416, ' break;'), (3417, ' }'), (3420, ' if (p + 2 + chunkLength > end) {'), (3422, ' result = SOCK_MORE;'), (3428, ' memmove(writeBuffer, p + 2, (size_t)chunkLength);'), (3429, ' reqPtr->chunkWriteOff += (size_t)chunkLength;'), (3430, " *(writeBuffer + chunkLength) = '\\0';"), (3432, ' reqPtr->chunkStartOff += (size_t)(p - chunkStart) + 4u + (size_t)chunkLength;'), (3436, ' return result;'), (4125, ' Ns_Log(Debug, "=== length < avail (length %" PRIuz'), (4134, ' SockState chunkState;'), (4135, ' size_t currentContentLength;'), (4137, ' chunkState = ChunkedDecode(reqPtr, NS_TRUE);'), (4147, ' if ((chunkState == SOCK_MORE)'), (4153, ''), (4154, ' } else if (chunkState != SOCK_READY) {'), (4155, ' return chunkState;')], 'deleted': [(290, 'static bool ChunkedDecode(Request *reqPtr, bool update)'), (3377, ' * NS_TRUE when chunk was complete, NS_FALSE otherwise'), (3380, ' * updates the buffer if update is true (and adjusts reqPtr->chunkWriteOff)'), (3381, ' * updates always reqPtr->chunkStartOff to allow incremental operations'), (3385, 'static bool'), (3390, ' bool success = NS_TRUE;'), (3400, ' size_t chunk_length;'), (3404, ' success = NS_FALSE;'), (3409, ' chunk_length = (size_t)strtol(chunkStart, NULL, 16);'), (3412, ' if (p + 2 + chunk_length > end) {'), (3414, ' success = NS_FALSE;'), (3420, ' memmove(writeBuffer, p + 2, chunk_length);'), (3421, ' reqPtr->chunkWriteOff += chunk_length;'), (3422, " *(writeBuffer + chunk_length) = '\\0';"), (3424, ' reqPtr->chunkStartOff += (size_t)(p - chunkStart) + 4u + chunk_length;'), (3428, ' return success;'), (4117, ' Ns_Log(Dev, "=== length < avail (length %" PRIuz'), (4126, ' bool complete;'), (4127, ' size_t currentContentLength;'), (4129, ' complete = ChunkedDecode(reqPtr, NS_TRUE);'), (4139, ' if ((!complete)')]}
32
21
4,343
29,706
https://bitbucket.org/naviserver/naviserver
CVE-2020-13111
['CWE-20', 'CWE-787']
driver.c
SockParse
/* * The contents of this file are subject to the Mozilla Public License * Version 1.1 (the "License"); you may not use this file except in * compliance with the License. You may obtain a copy of the License at * http://mozilla.org/. * * Software distributed under the License is distributed on an "AS IS" * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See * the License for the specific language governing rights and limitations * under the License. * * The Original Code is AOLserver Code and related documentation * distributed by AOL. * * The Initial Developer of the Original Code is America Online, * Inc. Portions created by AOL are Copyright (C) 1999 America Online, * Inc. All Rights Reserved. * * Alternatively, the contents of this file may be used under the terms * of the GNU General Public License (the "GPL"), in which case the * provisions of GPL are applicable instead of those above. If you wish * to allow use of your version of this file only under the terms of the * GPL and not to allow others to use your version of this file under the * License, indicate your decision by deleting the provisions above and * replace them with the notice and other provisions required by the GPL. * If you do not delete the provisions above, a recipient may use your * version of this file under either the License or the GPL. */ /* * driver.c -- * * Connection I/O for loadable socket drivers. */ #include "nsd.h" /* * The following are valid driver state flags. */ #define DRIVER_STARTED 1u #define DRIVER_STOPPED 2u #define DRIVER_SHUTDOWN 4u #define DRIVER_FAILED 8u /* * Constants for SockState return and reason codes. */ typedef enum { SOCK_READY = 0, SOCK_MORE = 1, SOCK_SPOOL = 2, SOCK_ERROR = -1, SOCK_CLOSE = -2, SOCK_CLOSETIMEOUT = -3, SOCK_READTIMEOUT = -4, SOCK_WRITETIMEOUT = -5, SOCK_READERROR = -6, SOCK_WRITEERROR = -7, SOCK_SHUTERROR = -8, SOCK_BADREQUEST = -9, SOCK_ENTITYTOOLARGE = -10, SOCK_BADHEADER = -11, SOCK_TOOMANYHEADERS = -12 } SockState; /* * Subset for spooler states */ typedef enum { SPOOLER_CLOSE = SOCK_CLOSE, SPOOLER_OK = SOCK_READY, SPOOLER_READERROR = SOCK_READERROR, SPOOLER_WRITEERROR = SOCK_WRITEERROR, SPOOLER_CLOSETIMEOUT = SOCK_CLOSETIMEOUT } SpoolerState; typedef struct { SpoolerState spoolerState; SockState sockState; } SpoolerStateMap; /* * ServerMap maintains Host header to server mappings. */ typedef struct ServerMap { NsServer *servPtr; char location[1]; } ServerMap; /* * The following maintains the spooler state mapping */ static const SpoolerStateMap spoolerStateMap[] = { {SPOOLER_CLOSE, SOCK_CLOSE}, {SPOOLER_READERROR, SOCK_READERROR}, {SPOOLER_WRITEERROR, SOCK_WRITEERROR}, {SPOOLER_CLOSETIMEOUT, SOCK_CLOSETIMEOUT}, {SPOOLER_OK, SOCK_READY} }; /* * The following structure manages polling. The PollIn macro is * used for the common case of checking for readability. */ typedef struct PollData { unsigned int nfds; /* Number of fds being monitored. */ unsigned int maxfds; /* Max fds (will grow as needed). */ struct pollfd *pfds; /* Dynamic array of poll structs. */ Ns_Time timeout; /* Min timeout, if any, for next spin. */ } PollData; #define PollIn(ppd, i) (((ppd)->pfds[(i)].revents & POLLIN) == POLLIN ) #define PollOut(ppd, i) (((ppd)->pfds[(i)].revents & POLLOUT) == POLLOUT) #define PollHup(ppd, i) (((ppd)->pfds[(i)].revents & POLLHUP) == POLLHUP) /* * Collected informationof writer threads for per pool rates, necessary for * per pool bandwidth management. */ typedef struct ConnPoolInfo { size_t threadSlot; int currentPoolRate; int deltaPercentage; } ConnPoolInfo; /* * The following structure maintains writer socket */ typedef struct WriterSock { struct WriterSock *nextPtr; struct Sock *sockPtr; struct SpoolerQueue *queuePtr; struct Conn *connPtr; SpoolerState status; int err; int refCount; unsigned int flags; Tcl_WideInt nsent; size_t size; NsWriterStreamState doStream; int fd; char *headerString; struct ConnPool *poolPtr; union { struct { struct iovec *bufs; /* incoming bufs to be sent */ int nbufs; int bufIdx; struct iovec sbufs[UIO_SMALLIOV]; /* scratch bufs for handling partial sends */ int nsbufs; int sbufIdx; struct iovec preallocated_bufs[UIO_SMALLIOV]; struct FileMap fmap; } mem; struct { size_t maxsize; size_t bufsize; off_t bufoffset; size_t toRead; unsigned char *buf; Ns_FileVec *bufs; int nbufs; int currentbuf; Ns_Mutex fdlock; } file; } c; char *clientData; Ns_Time startTime; int rateLimit; int currentRate; ConnPoolInfo *infoPtr; bool keep; } WriterSock; /* * Async writer definitions */ typedef struct AsyncWriter { Ns_Mutex lock; /* Lock around writer queues */ SpoolerQueue *firstPtr; /* List of writer threads */ } AsyncWriter; /* * AsyncWriteData is similar to WriterSock */ typedef struct AsyncWriteData { struct AsyncWriteData *nextPtr; char *data; int fd; Tcl_WideInt nsent; size_t size; size_t bufsize; const char *buf; } AsyncWriteData; static AsyncWriter *asyncWriter = NULL; /* * Static functions defined in this file. */ static Ns_ThreadProc DriverThread; static Ns_ThreadProc SpoolerThread; static Ns_ThreadProc WriterThread; static Ns_ThreadProc AsyncWriterThread; static Tcl_ObjCmdProc WriterListObjCmd; static Tcl_ObjCmdProc WriterSizeObjCmd; static Tcl_ObjCmdProc WriterStreamingObjCmd; static Tcl_ObjCmdProc WriterSubmitObjCmd; static Tcl_ObjCmdProc WriterSubmitFileObjCmd; static Tcl_ObjCmdProc AsyncLogfileWriteObjCmd; static Tcl_ObjCmdProc AsyncLogfileOpenObjCmd; static Tcl_ObjCmdProc AsyncLogfileCloseObjCmd; static Ns_ReturnCode DriverWriterFromObj(Tcl_Interp *interp, Tcl_Obj *driverObj, Ns_Conn *conn, DrvWriter **wrPtrPtr) NS_GNUC_NONNULL(1) NS_GNUC_NONNULL(4); static NS_SOCKET DriverListen(Driver *drvPtr, const char *bindaddr) NS_GNUC_NONNULL(1) NS_GNUC_NONNULL(2); static NS_DRIVER_ACCEPT_STATUS DriverAccept(Sock *sockPtr, NS_SOCKET sock) NS_GNUC_NONNULL(1); static bool DriverKeep(Sock *sockPtr) NS_GNUC_NONNULL(1); static void DriverClose(Sock *sockPtr) NS_GNUC_NONNULL(1); static Ns_ReturnCode DriverInit(const char *server, const char *moduleName, const char *threadName, const Ns_DriverInitData *init, NsServer *servPtr, const char *path, const char *bindaddrs, const char *defserver, const char *host) NS_GNUC_NONNULL(2) NS_GNUC_NONNULL(3) NS_GNUC_NONNULL(4) NS_GNUC_NONNULL(6) NS_GNUC_NONNULL(7) NS_GNUC_NONNULL(9); static bool DriverModuleInitialized(const char *module) NS_GNUC_NONNULL(1); static void SockSetServer(Sock *sockPtr) NS_GNUC_NONNULL(1); static SockState SockAccept(Driver *drvPtr, NS_SOCKET sock, Sock **sockPtrPtr, const Ns_Time *nowPtr) NS_GNUC_NONNULL(1); static Ns_ReturnCode SockQueue(Sock *sockPtr, const Ns_Time *timePtr) NS_GNUC_NONNULL(1); static Sock *SockNew(Driver *drvPtr) NS_GNUC_NONNULL(1) NS_GNUC_RETURNS_NONNULL; static void SockRelease(Sock *sockPtr, SockState reason, int err) NS_GNUC_NONNULL(1); static void SockError(Sock *sockPtr, SockState reason, int err) NS_GNUC_NONNULL(1); static void SockSendResponse(Sock *sockPtr, int code, const char *errMsg) NS_GNUC_NONNULL(1) NS_GNUC_NONNULL(3); static void SockTrigger(NS_SOCKET sock); static void SockTimeout(Sock *sockPtr, const Ns_Time *nowPtr, const Ns_Time *timeout) NS_GNUC_NONNULL(1); static void SockClose(Sock *sockPtr, int keep) NS_GNUC_NONNULL(1); static SockState SockRead(Sock *sockPtr, int spooler, const Ns_Time *timePtr) NS_GNUC_NONNULL(1); static SockState SockParse(Sock *sockPtr) NS_GNUC_NONNULL(1); static void SockPoll(Sock *sockPtr, short type, PollData *pdata) NS_GNUC_NONNULL(1) NS_GNUC_NONNULL(3); static int SockSpoolerQueue(Driver *drvPtr, Sock *sockPtr) NS_GNUC_NONNULL(1) NS_GNUC_NONNULL(2); static void SpoolerQueueStart(SpoolerQueue *queuePtr, Ns_ThreadProc *proc) NS_GNUC_NONNULL(2); static void SpoolerQueueStop(SpoolerQueue *queuePtr, const Ns_Time *timeoutPtr, const char *name) NS_GNUC_NONNULL(2) NS_GNUC_NONNULL(3); static void PollCreate(PollData *pdata) NS_GNUC_NONNULL(1); static void PollFree(PollData *pdata) NS_GNUC_NONNULL(1); static void PollReset(PollData *pdata) NS_GNUC_NONNULL(1); static NS_POLL_NFDS_TYPE PollSet(PollData *pdata, NS_SOCKET sock, short type, const Ns_Time *timeoutPtr) NS_GNUC_NONNULL(1); static int PollWait(const PollData *pdata, int timeout) NS_GNUC_NONNULL(1); static bool ChunkedDecode(Request *reqPtr, bool update) NS_GNUC_NONNULL(1); static WriterSock *WriterSockRequire(const Conn *connPtr) NS_GNUC_NONNULL(1); static void WriterSockRelease(WriterSock *wrSockPtr) NS_GNUC_NONNULL(1); static SpoolerState WriterReadFromSpool(WriterSock *curPtr) NS_GNUC_NONNULL(1); static SpoolerState WriterSend(WriterSock *curPtr, int *err) NS_GNUC_NONNULL(1) NS_GNUC_NONNULL(2); static Ns_ReturnCode WriterSetupStreamingMode(Conn *connPtr, struct iovec *bufs, int nbufs, int *fdPtr) NS_GNUC_NONNULL(1) NS_GNUC_NONNULL(2) NS_GNUC_NONNULL(4); static void WriterSockFileVecCleanup(WriterSock *wrSockPtr) NS_GNUC_NONNULL(1); static int WriterGetMemunitFromDict(Tcl_Interp *interp, Tcl_Obj *dictObj, Tcl_Obj *keyObj, Ns_ObjvValueRange *rangePtr, Tcl_WideInt *valuePtr) NS_GNUC_NONNULL(1) NS_GNUC_NONNULL(2) NS_GNUC_NONNULL(3) NS_GNUC_NONNULL(5); static void AsyncWriterRelease(AsyncWriteData *wdPtr) NS_GNUC_NONNULL(1); static void WriteWarningRaw(const char *msg, int fd, size_t wantWrite, ssize_t written) NS_GNUC_NONNULL(1); static const char *GetSockStateName(SockState sockState); static size_t EndOfHeader(Sock *sockPtr) NS_GNUC_NONNULL(1); static void RequestNew(Sock *sockPtr) NS_GNUC_NONNULL(1); static void RequestFree(Sock *sockPtr) NS_GNUC_NONNULL(1); static void LogBuffer(Ns_LogSeverity severity, const char *msg, const char *buffer, size_t len) NS_GNUC_NONNULL(2) NS_GNUC_NONNULL(3); static void ServerMapEntryAdd(Tcl_DString *dsPtr, const char *host, NsServer *servPtr, Driver *drvPtr, bool addDefaultMapEntry) NS_GNUC_NONNULL(1) NS_GNUC_NONNULL(2) NS_GNUC_NONNULL(3) NS_GNUC_NONNULL(4); static Driver *LookupDriver(Tcl_Interp *interp, const char* protocol, const char *driverName) NS_GNUC_NONNULL(1) NS_GNUC_NONNULL(2); static void WriterPerPoolRates(WriterSock *writePtr, Tcl_HashTable *pools) NS_GNUC_NONNULL(1) NS_GNUC_NONNULL(2); static ConnPoolInfo *WriterGetInfoPtr(WriterSock *curPtr, Tcl_HashTable *pools) NS_GNUC_NONNULL(1) NS_GNUC_NONNULL(2); /* * Global variables defined in this file. */ //NS_EXTERN Ns_LogSeverity Ns_LogAccessDebug; Ns_LogSeverity Ns_LogTaskDebug; Ns_LogSeverity Ns_LogRequestDebug; Ns_LogSeverity Ns_LogConnchanDebug; Ns_LogSeverity Ns_LogUrlspaceDebug; Ns_LogSeverity Ns_LogAccessDebug; Ns_LogSeverity Ns_LogTimeoutDebug; NS_EXPORT Ns_LogSeverity Ns_LogAccessDebug; bool NsWriterBandwidthManagement = NS_FALSE; static Ns_LogSeverity WriterDebug; /* Severity at which to log verbose debugging. */ static Ns_LogSeverity DriverDebug; /* Severity at which to log verbose debugging. */ static Ns_Mutex reqLock = NULL; /* Lock for allocated Request structure pool */ static Ns_Mutex writerlock = NULL; /* Lock updating streaming information in the writer */ static Request *firstReqPtr = NULL; /* Allocated request structures kept in a pool */ static Driver *firstDrvPtr = NULL; /* First in list of all drivers */ #define Push(x, xs) ((x)->nextPtr = (xs), (xs) = (x)) /* *---------------------------------------------------------------------- * * WriteWarningRaw -- * * Write a warning message to stderr. This function is for cases, where * writing to Ns_Log can't be used (e.g. in the AsyncWriter, which is * used for writing also to the system log). * * Results: * None. * * Side effects: * Line to stderr. * *---------------------------------------------------------------------- */ static void WriteWarningRaw(const char *msg, int fd, size_t wantWrite, ssize_t written) { fprintf(stderr, "%s: Warning: wanted to write %" PRIuz " bytes, wrote %ld to file descriptor %d\n", msg, wantWrite, (long)written, fd); } /* *---------------------------------------------------------------------- * * GetSockStateName -- * * Return human readable names for StockState values. * * Results: * string * * Side effects: * None. * *---------------------------------------------------------------------- */ static const char * GetSockStateName(SockState sockState) { int sockStateInt = (int)sockState; static const char *sockStateStrings[] = { "SOCK_READY", "SOCK_MORE", "SOCK_SPOOL", "SOCK_ERROR", "SOCK_CLOSE", "SOCK_CLOSETIMEOUT", "SOCK_READTIMEOUT", "SOCK_WRITETIMEOUT", "SOCK_READERROR", "SOCK_WRITEERROR", "SOCK_SHUTERROR", "SOCK_BADREQUEST", "SOCK_ENTITYTOOLARGE", "SOCK_BADHEADER", "SOCK_TOOMANYHEADERS", NULL }; if (sockStateInt < 0) { sockStateInt = (- sockStateInt) + 2; } assert(sockStateInt < Ns_NrElements(sockStateStrings)); return sockStateStrings[sockStateInt]; } /* *---------------------------------------------------------------------- * * NsInitDrivers -- * * Init drivers system. * * Results: * None. * * Side effects: * None. * *---------------------------------------------------------------------- */ void NsInitDrivers(void) { DriverDebug = Ns_CreateLogSeverity("Debug(ns:driver)"); WriterDebug = Ns_CreateLogSeverity("Debug(writer)"); Ns_LogTaskDebug = Ns_CreateLogSeverity("Debug(task)"); Ns_LogRequestDebug = Ns_CreateLogSeverity("Debug(request)"); Ns_LogConnchanDebug = Ns_CreateLogSeverity("Debug(connchan)"); Ns_LogUrlspaceDebug = Ns_CreateLogSeverity("Debug(urlspace)"); Ns_LogAccessDebug = Ns_CreateLogSeverity("Debug(access)"); Ns_LogTimeoutDebug = Ns_CreateLogSeverity("Debug(timeout)"); Ns_MutexInit(&reqLock); Ns_MutexInit(&writerlock); Ns_MutexSetName2(&reqLock, "ns:driver", "requestpool"); Ns_MutexSetName2(&writerlock, "ns:writer", "stream"); } /* *---------------------------------------------------------------------- * * DriverModuleInitialized -- * * Check if a driver with the specified name is already initialized. * * Results: * Boolean * * Side effects: * None. * *---------------------------------------------------------------------- */ static bool DriverModuleInitialized(const char *module) { Driver *drvPtr; bool found = NS_FALSE; NS_NONNULL_ASSERT(module != NULL); for (drvPtr = firstDrvPtr; drvPtr != NULL; drvPtr = drvPtr->nextPtr) { if (strcmp(drvPtr->moduleName, module) == 0) { found = NS_TRUE; Ns_Log(Notice, "Driver %s is already initialized", module); break; } } return found; } /* *---------------------------------------------------------------------- * * Ns_DriverInit -- * * Initialize a driver. * * Results: * NS_OK if initialized, NS_ERROR if config or other error. * * Side effects: * Listen socket will be opened later in NsStartDrivers. * *---------------------------------------------------------------------- */ Ns_ReturnCode Ns_DriverInit(const char *server, const char *module, const Ns_DriverInitData *init) { Ns_ReturnCode status = NS_OK; NsServer *servPtr = NULL; bool alreadyInitialized = NS_FALSE; NS_NONNULL_ASSERT(module != NULL); NS_NONNULL_ASSERT(init != NULL); /* * If a server is provided, servPtr must be set. */ if (server != NULL) { servPtr = NsGetServer(server); if (unlikely(servPtr == NULL)) { Ns_Log(Bug, "cannot lookup server structure for server: %s", module); status = NS_ERROR; } } else { alreadyInitialized = DriverModuleInitialized(module); } /* * Check versions of drivers. */ if (status == NS_OK && init->version < NS_DRIVER_VERSION_4) { Ns_Log(Warning, "%s: driver version is too old (version %d), Version 4 is recommended", module, init->version); } #ifdef HAVE_IPV6 if (status == NS_OK && init->version < NS_DRIVER_VERSION_3) { Ns_Log(Error, "%s: driver version is too old (version %d) and does not support IPv6", module, init->version); status = NS_ERROR; } #endif if (status == NS_OK && init->version < NS_DRIVER_VERSION_2) { Ns_Log(Error, "%s: version field of driver is invalid: %d", module, init->version); status = NS_ERROR; } if (!alreadyInitialized && status == NS_OK) { const char *path, *host, *address, *defserver; bool noHostNameGiven; int nrDrivers, nrBindaddrs = 0, result; Ns_Set *set; Tcl_Obj *bindaddrsObj, **objv; path = ((init->path != NULL) ? init->path : Ns_ConfigGetPath(server, module, (char *)0L)); set = Ns_ConfigCreateSection(path); /* * Determine the "defaultserver" the "hostname" / "address" for * binding to and/or the HTTP location string. */ defserver = Ns_ConfigGetValue(path, "defaultserver"); address = Ns_ConfigGetValue(path, "address"); host = Ns_ConfigGetValue(path, "hostname"); noHostNameGiven = (host == NULL); /* * If the listen address was not specified, attempt to determine it * through a DNS lookup of the specified hostname or the server's * primary hostname. */ if (address == NULL) { Tcl_DString ds; Tcl_DStringInit(&ds); if (noHostNameGiven) { host = Ns_InfoHostname(); } if (Ns_GetAllAddrByHost(&ds, host) == NS_TRUE) { address = ns_strdup(Tcl_DStringValue(&ds)); if (path != NULL) { Ns_SetUpdate(set, "address", address); } Ns_Log(Notice, "no address given, obtained address '%s' from host name %s", address, host); } Tcl_DStringFree(&ds); } if (address == NULL) { address = NS_IP_UNSPECIFIED; Ns_Log(Notice, "no address given, set address to unspecified address %s", address); } bindaddrsObj = Tcl_NewStringObj(address, -1); result = Tcl_ListObjGetElements(NULL, bindaddrsObj, &nrBindaddrs, &objv); if (result != TCL_OK || nrBindaddrs < 1 || nrBindaddrs >= MAX_LISTEN_ADDR_PER_DRIVER) { Ns_Fatal("%s: bindaddrs '%s' is not a valid Tcl list containing addresses (max %d)", module, address, MAX_LISTEN_ADDR_PER_DRIVER); } Tcl_IncrRefCount(bindaddrsObj); /* * If the hostname was not specified and not determined by the lookup * above, set it to the first specified or derived IP address string. */ if (host == NULL) { host = ns_strdup(Tcl_GetString(objv[0])); } if (noHostNameGiven && host != NULL && path != NULL) { Ns_SetUpdate(set, "hostname", host); } Tcl_DecrRefCount(bindaddrsObj); /* * Get configured number of driver threads. */ nrDrivers = Ns_ConfigIntRange(path, "driverthreads", 1, 1, 64); if (nrDrivers > 1) { #if !defined(SO_REUSEPORT) Ns_Log(Warning, "server %s module %s requests %d driverthreads, but is not supported by the operating system", server, module, nrDrivers); Ns_SetUpdate(set, "driverthreads", "1"); nrDrivers = 1; #endif } /* * The common parameters are determined, create the driver thread(s) */ { size_t maxModuleNameLength = strlen(module) + (size_t)TCL_INTEGER_SPACE + 1u; char *moduleName = ns_malloc(maxModuleNameLength); int i; if (host == NULL) { host = Ns_InfoHostname(); } for (i = 0; i < nrDrivers; i++) { snprintf(moduleName, maxModuleNameLength, "%s:%d", module, i); status = DriverInit(server, module, moduleName, init, servPtr, path, address, defserver, host); if (status != NS_OK) { break; } } ns_free(moduleName); } } return status; } /* *---------------------------------------------------------------------- * * ServerMapEntryAdd -- * * Add an entry to the virtual server map. The entry consists of the * value as provided by the host header field and location string, * containing as well the protocol. * * Results: * None * * Side effects: * Potentially adding an entry to the virtual server map. * *---------------------------------------------------------------------- */ static void ServerMapEntryAdd(Tcl_DString *dsPtr, const char *host, NsServer *servPtr, Driver *drvPtr, bool addDefaultMapEntry) { Tcl_HashEntry *hPtr; int isNew; NS_NONNULL_ASSERT(dsPtr != NULL); NS_NONNULL_ASSERT(host != NULL); NS_NONNULL_ASSERT(servPtr != NULL); NS_NONNULL_ASSERT(drvPtr != NULL); hPtr = Tcl_CreateHashEntry(&drvPtr->hosts, host, &isNew); if (isNew != 0) { ServerMap *mapPtr; (void) Ns_DStringVarAppend(dsPtr, drvPtr->protocol, "://", host, (char *)0L); mapPtr = ns_malloc(sizeof(ServerMap) + (size_t)dsPtr->length); mapPtr->servPtr = servPtr; memcpy(mapPtr->location, dsPtr->string, (size_t)dsPtr->length + 1u); Tcl_SetHashValue(hPtr, mapPtr); Ns_Log(Notice, "%s: adding virtual host entry for host <%s> location: %s mapped to server: %s", drvPtr->threadName, host, mapPtr->location, servPtr->server); if (addDefaultMapEntry) { drvPtr->defMapPtr = mapPtr; } /* * Always reset the Tcl_DString */ Ns_DStringSetLength(dsPtr, 0); } else { Ns_Log(Notice, "%s: ignore duplicate virtual host entry: %s", drvPtr->threadName, host); } } /* *---------------------------------------------------------------------- * * NsDriverMapVirtualServers -- * * Map "Host:" headers for drivers not bound to physical servers. This * function has to be called a time, when all servers are already defined * such that NsGetServer(server) can succeed. * * Results: * None. * * Side effects: * Add an entry to the virtual server map via ServerMapEntryAdd() * *---------------------------------------------------------------------- */ void NsDriverMapVirtualServers(void) { Driver *drvPtr; for (drvPtr = firstDrvPtr; drvPtr != NULL; drvPtr = drvPtr->nextPtr) { const Ns_Set *lset; size_t j; Tcl_DString ds, *dsPtr = &ds; const char *path, *defserver, *moduleName; moduleName = drvPtr->moduleName; defserver = drvPtr->defserver; /* * Check for a "/servers" section for this driver module. */ path = Ns_ConfigGetPath(NULL, moduleName, "servers", (char *)0L); lset = Ns_ConfigGetSection(path); if (lset == NULL || Ns_SetSize(lset) == 0u) { /* * The driver module has no (or empty) ".../servers" section. * There is no mapping from host name to virtual server defined. */ if (drvPtr->server == NULL) { /* * We have a global driver module. If there is at least a * default server configured, we can use this for the mapping * to the default server. */ if (defserver != NULL) { NsServer *servPtr = NsGetServer(defserver); Tcl_DStringInit(dsPtr); ServerMapEntryAdd(dsPtr, Ns_InfoHostname(), servPtr, drvPtr, NS_TRUE); Tcl_DStringFree(dsPtr); Ns_Log(Notice, "Global driver has no mapping from host to server (section '%s' missing)", moduleName); } else { /* * Global driver, which has no default server, and no servers section. */ Ns_Fatal("%s: virtual servers configured," " but '%s' has no defaultserver defined", moduleName, path); } } continue; } /* * We have a ".../servers" section, the driver might be global or * local. It is not clear, why we need the server map for the local * driver, but for compatibility, we keep this. * */ if (defserver == NULL) { if (drvPtr->server != NULL) { /* * We have a local (server specific) driver. Since the code * below assumes that we have a "defserver" set, we take the * actual server as defserver. */ defserver = drvPtr->server; } else { /* * We have a global driver, but no defserver. */ Ns_Fatal("%s: virtual servers configured," " but '%s' has no defaultserver defined", moduleName, path); } } assert(defserver != NULL); drvPtr->defMapPtr = NULL; Ns_DStringInit(dsPtr); for (j = 0u; j < Ns_SetSize(lset); ++j) { const char *server = Ns_SetKey(lset, j); const char *host = Ns_SetValue(lset, j); NsServer *servPtr; /* * Perform an explicit lookup of the server. */ servPtr = NsGetServer(server); if (servPtr == NULL) { Ns_Log(Error, "%s: no such server: %s", moduleName, server); } else { char *writableHost, *hostName, *portStart; writableHost = ns_strdup(host); Ns_HttpParseHost(writableHost, &hostName, &portStart); if (portStart == NULL) { Tcl_DString hostDString; /* * The provided host entry does NOT contain a port. * * Add the provided entry to the virtual server map only, * when the configured port is the default port for the * protocol. */ if (drvPtr->port == drvPtr->defport) { ServerMapEntryAdd(dsPtr, host, servPtr, drvPtr, STREQ(defserver, server)); } /* * Auto-add configured port: Add always an entry with the * explicitly configured port of the driver. */ Tcl_DStringInit(&hostDString); Tcl_DStringAppend(&hostDString, host, -1); (void) Ns_DStringPrintf(&hostDString, ":%hu", drvPtr->port); ServerMapEntryAdd(dsPtr, hostDString.string, servPtr, drvPtr, STREQ(defserver, server)); Tcl_DStringFree(&hostDString); } else { /* * The provided host entry does contain a port. * * In case, the provided port is equal to the configured port * of the driver, add an entry. */ unsigned short providedPort = (unsigned short)strtol(portStart+1, NULL, 10); if (providedPort == drvPtr->port) { ServerMapEntryAdd(dsPtr, host, servPtr, drvPtr, STREQ(defserver, server)); /* * In case, the provided port is equal to the default * port of the driver, make sure that we have an entry * without the port. */ if (providedPort == drvPtr->defport) { ServerMapEntryAdd(dsPtr, hostName, servPtr, drvPtr, STREQ(defserver, server)); } } else { Ns_Log(Warning, "%s: driver is listening on port %hu; " "virtual host entry %s ignored", moduleName, drvPtr->port, host); } } ns_free(writableHost); } } Ns_DStringFree(dsPtr); if (drvPtr->defMapPtr == NULL) { fprintf(stderr, "--- Server Map: ---\n"); Ns_SetPrint(lset); Ns_Fatal("%s: default server '%s' not defined in '%s'", moduleName, defserver, path); } } } /* *---------------------------------------------------------------------- * * DriverInit -- * * Helper function of Ns_DriverInit. This function actually allocates and * initialized the driver structure. * * Results: * NS_OK if initialized, NS_ERROR if config or other error. * * Side effects: * Listen socket will be opened later in NsStartDrivers. * *---------------------------------------------------------------------- */ static Ns_ReturnCode DriverInit(const char *server, const char *moduleName, const char *threadName, const Ns_DriverInitData *init, NsServer *servPtr, const char *path, const char *bindaddrs, const char *defserver, const char *host) { const char *defproto; Driver *drvPtr; DrvWriter *wrPtr; DrvSpooler *spPtr; int i; unsigned short defport; NS_NONNULL_ASSERT(threadName != NULL); NS_NONNULL_ASSERT(init != NULL); NS_NONNULL_ASSERT(path != NULL); NS_NONNULL_ASSERT(bindaddrs != NULL); NS_NONNULL_ASSERT(host != NULL); /* * Set the protocol and port defaults. */ if (init->protocol != NULL) { defproto = init->protocol; defport = init->defaultPort; } else { defproto = "unknown"; defport = 0u; } Ns_Log(DriverDebug, "DriverInit server <%s> threadName %s proto %s port %hu", server, threadName, defproto, defport); /* * Allocate a new driver instance and set configurable parameters. */ drvPtr = ns_calloc(1u, sizeof(Driver)); Ns_MutexInit(&drvPtr->lock); Ns_MutexSetName2(&drvPtr->lock, "ns:drv", threadName); Ns_MutexInit(&drvPtr->spooler.lock); Ns_MutexSetName2(&drvPtr->spooler.lock, "ns:drv:spool", threadName); Ns_MutexInit(&drvPtr->writer.lock); Ns_MutexSetName2(&drvPtr->writer.lock, "ns:drv:writer", threadName); if (ns_sockpair(drvPtr->trigger) != 0) { Ns_Fatal("ns_sockpair() failed: %s", ns_sockstrerror(ns_sockerrno)); } drvPtr->server = server; drvPtr->type = init->name; drvPtr->moduleName = ns_strdup(moduleName); drvPtr->threadName = ns_strdup(threadName); drvPtr->defserver = defserver; drvPtr->listenProc = init->listenProc; drvPtr->acceptProc = init->acceptProc; drvPtr->recvProc = init->recvProc; drvPtr->sendProc = init->sendProc; drvPtr->sendFileProc = init->sendFileProc; drvPtr->keepProc = init->keepProc; drvPtr->requestProc = init->requestProc; drvPtr->closeProc = init->closeProc; drvPtr->clientInitProc = init->clientInitProc; drvPtr->arg = init->arg; drvPtr->opts = init->opts; drvPtr->servPtr = servPtr; drvPtr->defport = defport; drvPtr->bufsize = (size_t)Ns_ConfigMemUnitRange(path, "bufsize", 16384, 1024, INT_MAX); drvPtr->maxinput = Ns_ConfigMemUnitRange(path, "maxinput", 1024*1024, 1024, LLONG_MAX); drvPtr->maxupload = Ns_ConfigMemUnitRange(path, "maxupload", 0, 0, (Tcl_WideInt)drvPtr->maxinput); drvPtr->readahead = Ns_ConfigMemUnitRange(path, "readahead", (Tcl_WideInt)drvPtr->bufsize, (Tcl_WideInt)drvPtr->bufsize, drvPtr->maxinput); drvPtr->maxline = Ns_ConfigIntRange(path, "maxline", 8192, 256, INT_MAX); drvPtr->maxheaders = Ns_ConfigIntRange(path, "maxheaders", 128, 8, INT_MAX); drvPtr->maxqueuesize = Ns_ConfigIntRange(path, "maxqueuesize", 1024, 1, INT_MAX); Ns_ConfigTimeUnitRange(path, "sendwait", "30s", 1, 0, INT_MAX, 0, &drvPtr->sendwait); Ns_ConfigTimeUnitRange(path, "recvwait", "30s", 1, 0, INT_MAX, 0, &drvPtr->recvwait); Ns_ConfigTimeUnitRange(path, "closewait", "2s", 0, 0, INT_MAX, 0, &drvPtr->closewait); Ns_ConfigTimeUnitRange(path, "keepwait", "5s", 0, 0, INT_MAX, 0, &drvPtr->keepwait); drvPtr->backlog = Ns_ConfigIntRange(path, "backlog", 256, 1, INT_MAX); drvPtr->driverthreads = Ns_ConfigIntRange(path, "driverthreads", 1, 1, 32); drvPtr->reuseport = Ns_ConfigBool(path, "reuseport", NS_FALSE); drvPtr->acceptsize = Ns_ConfigIntRange(path, "acceptsize", drvPtr->backlog, 1, INT_MAX); drvPtr->keepmaxuploadsize = (size_t)Ns_ConfigMemUnitRange(path, "keepalivemaxuploadsize", 0, 0, INT_MAX); drvPtr->keepmaxdownloadsize = (size_t)Ns_ConfigMemUnitRange(path, "keepalivemaxdownloadsize", 0, 0, INT_MAX); drvPtr->recvTimeout = drvPtr->recvwait; Tcl_InitHashTable(&drvPtr->hosts, TCL_STRING_KEYS); if (drvPtr->driverthreads > 1) { #if !defined(SO_REUSEPORT) drvPtr->driverthreads = 1; drvPtr->reuseport = NS_FALSE; #else /* * When driver threads > 1, "reuseport" has to be active. */ drvPtr->reuseport = NS_TRUE; #endif } if (drvPtr->reuseport) { /* * Reuseport was specified */ #if !defined(SO_REUSEPORT) Ns_Log(Warning, "parameter %s reuseport was specified, but is not supported by the operating system", path); drvPtr->reuseport = NS_FALSE; #endif } drvPtr->uploadpath = ns_strdup(Ns_ConfigString(path, "uploadpath", nsconf.tmpDir)); /* * If activated, "maxupload" has to be at least "readahead" bytes. Tell * the user in case the config values are overruled. */ if ((drvPtr->maxupload > 0) && (drvPtr->maxupload < drvPtr->readahead)) { Ns_Log(Warning, "parameter %s maxupload % " TCL_LL_MODIFIER "d invalid; can be either 0 or must be >= %" TCL_LL_MODIFIER "d (size of readahead)", path, drvPtr->maxupload, drvPtr->readahead); drvPtr->maxupload = drvPtr->readahead; } /* * Determine the port and then set the HTTP location string either * as specified in the config file or constructed from the * protocol, hostname and port. */ drvPtr->protocol = ns_strdup(defproto); drvPtr->address = ns_strdup(bindaddrs); drvPtr->port = (unsigned short)Ns_ConfigIntRange(path, "port", (int)defport, 0, 65535); drvPtr->location = Ns_ConfigGetValue(path, "location"); if (drvPtr->location != NULL && (strstr(drvPtr->location, "://") != NULL)) { drvPtr->location = ns_strdup(drvPtr->location); } else { Tcl_DString ds, *dsPtr = &ds; Ns_DStringInit(dsPtr); Ns_HttpLocationString(dsPtr, drvPtr->protocol, host, drvPtr->port, defport); drvPtr->location = Ns_DStringExport(dsPtr); } drvPtr->nextPtr = firstDrvPtr; firstDrvPtr = drvPtr; /* * Add driver specific extra headers. */ drvPtr->extraHeaders = Ns_ConfigSet(path, "extraheaders"); /* * Check if upload spooler are enabled */ spPtr = &drvPtr->spooler; spPtr->threads = Ns_ConfigIntRange(path, "spoolerthreads", 0, 0, 32); if (spPtr->threads > 0) { Ns_Log(Notice, "%s: enable %d spooler thread(s) " "for uploads >= %" TCL_LL_MODIFIER "d bytes", threadName, spPtr->threads, drvPtr->readahead); for (i = 0; i < spPtr->threads; i++) { SpoolerQueue *queuePtr = ns_calloc(1u, sizeof(SpoolerQueue)); char buffer[100]; snprintf(buffer, sizeof(buffer), "ns:driver:spooler:%d", i); Ns_MutexSetName2(&queuePtr->lock, buffer, "queue"); queuePtr->id = i; Push(queuePtr, spPtr->firstPtr); } } else { Ns_Log(Notice, "%s: enable %d spooler thread(s) ", threadName, spPtr->threads); } /* * Enable writer threads */ wrPtr = &drvPtr->writer; wrPtr->threads = Ns_ConfigIntRange(path, "writerthreads", 0, 0, 32); if (wrPtr->threads > 0) { wrPtr->writersize = (size_t)Ns_ConfigMemUnitRange(path, "writersize", 1024*1024, 1024, INT_MAX); wrPtr->bufsize = (size_t)Ns_ConfigMemUnitRange(path, "writerbufsize", 8192, 512, INT_MAX); wrPtr->rateLimit = Ns_ConfigIntRange(path, "writerratelimit", 0, 0, INT_MAX); wrPtr->doStream = Ns_ConfigBool(path, "writerstreaming", NS_FALSE) ? NS_WRITER_STREAM_ACTIVE : NS_WRITER_STREAM_NONE; Ns_Log(Notice, "%s: enable %d writer thread(s) " "for downloads >= %" PRIdz " bytes, bufsize=%" PRIdz " bytes, HTML streaming %d", threadName, wrPtr->threads, wrPtr->writersize, wrPtr->bufsize, wrPtr->doStream); for (i = 0; i < wrPtr->threads; i++) { SpoolerQueue *queuePtr = ns_calloc(1u, sizeof(SpoolerQueue)); char buffer[100]; snprintf(buffer, sizeof(buffer), "ns:driver:writer:%d", i); Ns_MutexSetName2(&queuePtr->lock, buffer, "queue"); queuePtr->id = i; Push(queuePtr, wrPtr->firstPtr); } } else { Ns_Log(Notice, "%s: enable %d writer thread(s) ", threadName, wrPtr->threads); } return NS_OK; } /* *---------------------------------------------------------------------- * * NsStartDrivers -- * * Listen on all driver address/ports and start the DriverThread. * * Results: * None. * * Side effects: * See DriverThread. * *---------------------------------------------------------------------- */ void NsStartDrivers(void) { Driver *drvPtr; /* * Signal and wait for each driver to start. */ for (drvPtr = firstDrvPtr; drvPtr != NULL; drvPtr = drvPtr->nextPtr) { if (drvPtr->port == 0u) { /* * Don't start a driver having port zero. */ continue; } Ns_ThreadCreate(DriverThread, drvPtr, 0, &drvPtr->thread); Ns_MutexLock(&drvPtr->lock); while ((drvPtr->flags & DRIVER_STARTED) == 0u) { Ns_CondWait(&drvPtr->cond, &drvPtr->lock); } /*if ((drvPtr->flags & DRIVER_FAILED)) { status = NS_ERROR; }*/ Ns_MutexUnlock(&drvPtr->lock); } } /* *---------------------------------------------------------------------- * * NsStopDrivers -- * * Trigger the DriverThread to begin shutdown. * * Results: * None. * * Side effects: * DriverThread will close listen sockets and then exit after all * outstanding connections are complete and closed. * *---------------------------------------------------------------------- */ void NsStopDrivers(void) { Driver *drvPtr; NsAsyncWriterQueueDisable(NS_TRUE); for (drvPtr = firstDrvPtr; drvPtr != NULL; drvPtr = drvPtr->nextPtr) { Tcl_HashEntry *hPtr; Tcl_HashSearch search; if ((drvPtr->flags & DRIVER_STARTED) == 0u) { continue; } Ns_MutexLock(&drvPtr->lock); Ns_Log(Notice, "[driver:%s]: stopping", drvPtr->threadName); drvPtr->flags |= DRIVER_SHUTDOWN; Ns_CondBroadcast(&drvPtr->cond); Ns_MutexUnlock(&drvPtr->lock); SockTrigger(drvPtr->trigger[1]); hPtr = Tcl_FirstHashEntry(&drvPtr->hosts, &search); while (hPtr != NULL) { Tcl_DeleteHashEntry(hPtr); hPtr = Tcl_NextHashEntry(&search); } } } void NsStopSpoolers(void) { const Driver *drvPtr; Ns_Log(Notice, "driver: stopping writer and spooler threads"); /* * Shutdown all spooler and writer threads */ for (drvPtr = firstDrvPtr; drvPtr != NULL; drvPtr = drvPtr->nextPtr) { Ns_Time timeout; if ((drvPtr->flags & DRIVER_STARTED) == 0u) { continue; } Ns_GetTime(&timeout); Ns_IncrTime(&timeout, nsconf.shutdowntimeout.sec, nsconf.shutdowntimeout.usec); SpoolerQueueStop(drvPtr->writer.firstPtr, &timeout, "writer"); SpoolerQueueStop(drvPtr->spooler.firstPtr, &timeout, "spooler"); } } /* *---------------------------------------------------------------------- * * DriverInfoObjCmd -- * * Return public info of all drivers. * Subcommand of NsTclDriverObjCmd. * * Results: * Standard Tcl Result. * * Side effects: * None. * *---------------------------------------------------------------------- */ static int DriverInfoObjCmd(ClientData UNUSED(clientData), Tcl_Interp *interp, int objc, Tcl_Obj *const* objv) { int result = TCL_OK; if (Ns_ParseObjv(NULL, NULL, interp, 2, objc, objv) != NS_OK) { result = TCL_ERROR; } else { const Driver *drvPtr; Tcl_Obj *resultObj = Tcl_NewListObj(0, NULL); Tcl_HashTable driverNames; /* names of the driver modules without duplicates */ Tcl_InitHashTable(&driverNames, TCL_STRING_KEYS); /* * Iterate over all modules, not necessarily all driver threads */ for (drvPtr = firstDrvPtr; drvPtr != NULL; drvPtr = drvPtr->nextPtr) { int isNew = 0; (void)Tcl_CreateHashEntry(&driverNames, drvPtr->moduleName, &isNew); if (isNew == 1) { Tcl_Obj *listObj = Tcl_NewListObj(0, NULL); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj("module", 6)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj(drvPtr->moduleName, -1)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj("type", 4)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj(drvPtr->type, -1)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj("server", 6)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj(drvPtr->server != NULL ? drvPtr->server : NS_EMPTY_STRING, -1)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj("location", 8)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj(drvPtr->location, -1)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj("address", 7)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj(drvPtr->address, -1)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj("protocol", 8)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj(drvPtr->protocol, -1)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj("sendwait", 8)); Tcl_ListObjAppendElement(interp, listObj, Ns_TclNewTimeObj(&drvPtr->sendwait)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj("recvwait", 8)); Tcl_ListObjAppendElement(interp, listObj, Ns_TclNewTimeObj(&drvPtr->sendwait)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj("extraheaders", 12)); if (drvPtr->extraHeaders != NULL) { Tcl_DString ds; Tcl_DStringInit(&ds); Ns_DStringAppendSet(&ds, drvPtr->extraHeaders); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj(ds.string, ds.length)); Tcl_DStringFree(&ds); } else { Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj("", 0)); } Tcl_ListObjAppendElement(interp, resultObj, listObj); } } Tcl_SetObjResult(interp, resultObj); Tcl_DeleteHashTable(&driverNames); } return result; } /* *---------------------------------------------------------------------- * * DriverStatsObjCmd -- * * Return statistics of all drivers. * Subcommand of NsTclDriverObjCmd. * * Results: * Standard Tcl Result. * * Side effects: * None. * *---------------------------------------------------------------------- */ static int DriverStatsObjCmd(ClientData UNUSED(clientData), Tcl_Interp *interp, int objc, Tcl_Obj *const* objv) { int result = TCL_OK; if (Ns_ParseObjv(NULL, NULL, interp, 2, objc, objv) != NS_OK) { result = TCL_ERROR; } else { const Driver *drvPtr; Tcl_Obj *resultObj = Tcl_NewListObj(0, NULL); /* * Iterate over all drivers and collect results. */ for (drvPtr = firstDrvPtr; drvPtr != NULL; drvPtr = drvPtr->nextPtr) { Tcl_Obj *listObj = Tcl_NewListObj(0, NULL); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj("thread", 6)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj(drvPtr->threadName, -1)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj("module", 6)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj(drvPtr->moduleName, -1)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj("received", 8)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewWideIntObj(drvPtr->stats.received)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj("spooled", 7)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewWideIntObj(drvPtr->stats.spooled)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj("partial", 7)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewWideIntObj(drvPtr->stats.partial)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj("errors", 6)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewWideIntObj(drvPtr->stats.errors)); Tcl_ListObjAppendElement(interp, resultObj, listObj); } Tcl_SetObjResult(interp, resultObj); } return result; } /* *---------------------------------------------------------------------- * * DriverThreadsObjCmd -- * * Return the names of driver threads * * Results: * Standard Tcl Result. * * Side effects: * None. * *---------------------------------------------------------------------- */ static int DriverThreadsObjCmd(ClientData UNUSED(clientData), Tcl_Interp *interp, int objc, Tcl_Obj *const* objv) { int result = TCL_OK; if (Ns_ParseObjv(NULL, NULL, interp, 2, objc, objv) != NS_OK) { result = TCL_ERROR; } else { const Driver *drvPtr; Tcl_Obj *resultObj = Tcl_NewListObj(0, NULL); /* * Iterate over all drivers and collect results. */ for (drvPtr = firstDrvPtr; drvPtr != NULL; drvPtr = drvPtr->nextPtr) { Tcl_ListObjAppendElement(interp, resultObj, Tcl_NewStringObj(drvPtr->threadName, -1)); } Tcl_SetObjResult(interp, resultObj); } return result; } /* *---------------------------------------------------------------------- * * DriverNamesObjCmd -- * * Return the names of drivers. * * Results: * Standard Tcl Result. * * Side effects: * None. * *---------------------------------------------------------------------- */ static int DriverNamesObjCmd(ClientData UNUSED(clientData), Tcl_Interp *interp, int objc, Tcl_Obj *const* objv) { int result = TCL_OK; if (Ns_ParseObjv(NULL, NULL, interp, 2, objc, objv) != NS_OK) { result = TCL_ERROR; } else { const Driver *drvPtr; Tcl_Obj *resultObj = Tcl_NewListObj(0, NULL); Tcl_HashTable driverNames; /* names of the drivers without duplicates */ Tcl_InitHashTable(&driverNames, TCL_STRING_KEYS); /* * Iterate over all drivers and collect results. */ for (drvPtr = firstDrvPtr; drvPtr != NULL; drvPtr = drvPtr->nextPtr) { int isNew; (void)Tcl_CreateHashEntry(&driverNames, drvPtr->moduleName, &isNew); if (isNew == 1) { Tcl_ListObjAppendElement(interp, resultObj, Tcl_NewStringObj(drvPtr->moduleName, -1)); } } Tcl_SetObjResult(interp, resultObj); Tcl_DeleteHashTable(&driverNames); } return result; } /* *---------------------------------------------------------------------- * * NsTclDriverObjCmd - * * Give information about drivers. Currently, just the statistics. * * Results: * Standard Tcl result. * * Side effects: * None. * *---------------------------------------------------------------------- */ int NsTclDriverObjCmd(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *const* objv) { const Ns_SubCmdSpec subcmds[] = { {"info", DriverInfoObjCmd}, {"names", DriverNamesObjCmd}, {"threads", DriverThreadsObjCmd}, {"stats", DriverStatsObjCmd}, {NULL, NULL} }; return Ns_SubcmdObjv(subcmds, clientData, interp, objc, objv); } /* *---------------------------------------------------------------------- * * NsWakeupDriver -- * * Wake up the associated DriverThread. * * Results: * None. * * Side effects: * The poll waiting for this trigger will be interrupted. * *---------------------------------------------------------------------- */ void NsWakeupDriver(const Driver *drvPtr) { NS_NONNULL_ASSERT(drvPtr != NULL); SockTrigger(drvPtr->trigger[1]); } /* *---------------------------------------------------------------------- * * NsWaitDriversShutdown -- * * Wait for exit of DriverThread. This callback is invoked later * by the timed shutdown thread. * * Results: * None. * * Side effects: * Driver thread is joined and trigger pipe closed. * *---------------------------------------------------------------------- */ void NsWaitDriversShutdown(const Ns_Time *toPtr) { Driver *drvPtr; Ns_ReturnCode status = NS_OK; for (drvPtr = firstDrvPtr; drvPtr != NULL; drvPtr = drvPtr->nextPtr) { if ((drvPtr->flags & DRIVER_STARTED) == 0u) { continue; } Ns_MutexLock(&drvPtr->lock); while ((drvPtr->flags & DRIVER_STOPPED) == 0u && status == NS_OK) { status = Ns_CondTimedWait(&drvPtr->cond, &drvPtr->lock, toPtr); } Ns_MutexUnlock(&drvPtr->lock); if (status != NS_OK) { Ns_Log(Warning, "[driver:%s]: shutdown timeout", drvPtr->threadName); } else { Ns_Log(Notice, "[driver:%s]: stopped", drvPtr->threadName); Ns_ThreadJoin(&drvPtr->thread, NULL); drvPtr->thread = NULL; } } } /* *---------------------------------------------------------------------- * * NsGetRequest -- * * Return the request buffer, reading it if necessary (i.e., if not an * async read-ahead connection). This function is called at the start of * connection processing. * Results: * Pointer to Request structure or NULL on error. * * Side effects: * May wait for content to arrive if necessary. * *---------------------------------------------------------------------- */ Request * NsGetRequest(Sock *sockPtr, const Ns_Time *nowPtr) { Request *reqPtr; NS_NONNULL_ASSERT(sockPtr != NULL); /* * The underlying "Request" structure is allocated by RequestNew(), which * must be called for the "sockPtr" prior to calling this * function. "reqPtr" should be NULL just in error cases. */ reqPtr = sockPtr->reqPtr; if (likely(reqPtr != NULL)) { if (likely(reqPtr->request.line != NULL)) { Ns_Log(DriverDebug, "NsGetRequest got the pre-parsed request <%s> from the driver", reqPtr->request.line); } else if (sockPtr->drvPtr->requestProc == NULL) { /* * Non-HTTP driver can send the drvPtr->requestProc to perform * their own request handling. */ SockState status; Ns_Log(DriverDebug, "NsGetRequest has to read+parse the request"); /* * We have no parsed request so far. So, do it now. */ do { Ns_Log(DriverDebug, "NsGetRequest calls SockRead"); status = SockRead(sockPtr, 0, nowPtr); } while (status == SOCK_MORE); /* * If anything went wrong, clean the request provided by * SockRead() and flag the error by returning NULL. */ if (status != SOCK_READY) { if (sockPtr->reqPtr != NULL) { Ns_Log(DriverDebug, "NsGetRequest calls RequestFree"); RequestFree(sockPtr); } reqPtr = NULL; } } else { Ns_Log(DriverDebug, "NsGetRequest found driver specific request Proc, " "probably from a non-HTTP driver"); } } else { Ns_Log(DriverDebug, "NsGetRequest has reqPtr NULL"); } return reqPtr; } /* *---------------------------------------------------------------------- * * NsSockClose -- * * Return a connection to the DriverThread for closing or keepalive. * "keep" might be NS_TRUE/NS_FALSE or -1 if undecided. * * Results: * None. * * Side effects: * Socket may be reused by a keepalive connection. * *---------------------------------------------------------------------- */ void NsSockClose(Sock *sockPtr, int keep) { Driver *drvPtr; bool trigger = NS_FALSE; NS_NONNULL_ASSERT(sockPtr != NULL); drvPtr = sockPtr->drvPtr; Ns_Log(DriverDebug, "NsSockClose sockPtr %p (%d) keep %d", (void *)sockPtr, ((Ns_Sock*)sockPtr)->sock, keep); SockClose(sockPtr, keep); /* * Free the request, unless it is from a non-HTTP driver (who might not * fill out the request structure). */ if (sockPtr->reqPtr != NULL) { Ns_Log(DriverDebug, "NsSockClose calls RequestFree"); RequestFree(sockPtr); } Ns_MutexLock(&drvPtr->lock); if (drvPtr->closePtr == NULL) { trigger = NS_TRUE; } sockPtr->nextPtr = drvPtr->closePtr; drvPtr->closePtr = sockPtr; Ns_MutexUnlock(&drvPtr->lock); if (trigger) { SockTrigger(drvPtr->trigger[1]); } } /* *---------------------------------------------------------------------- * * DriverListen -- * * Open a listening socket for accepting connections. * * Results: * File description of socket, or NS_INVALID_SOCKET on error. * * Side effects: * Depends on driver. * *---------------------------------------------------------------------- */ static NS_SOCKET DriverListen(Driver *drvPtr, const char *bindaddr) { NS_SOCKET sock; NS_NONNULL_ASSERT(drvPtr != NULL); NS_NONNULL_ASSERT(bindaddr != NULL); sock = (*drvPtr->listenProc)((Ns_Driver *) drvPtr, bindaddr, drvPtr->port, drvPtr->backlog, drvPtr->reuseport); if (sock == NS_INVALID_SOCKET) { Ns_Log(Error, "%s: failed to listen on [%s]:%d: %s", drvPtr->threadName, bindaddr, drvPtr->port, ns_sockstrerror(ns_sockerrno)); } else { Ns_Log(Notice, #ifdef HAVE_IPV6 "%s: listening on [%s]:%d", #else "%s: listening on %s:%d", #endif drvPtr->threadName, bindaddr, drvPtr->port); } return sock; } /* *---------------------------------------------------------------------- * * DriverAccept -- * * Accept a new socket. It will be in non-blocking mode. * * Results: * _ACCEPT: a socket was accepted, poll for data * _ACCEPT_DATA: a socket was accepted, data present, read immediately * if in async mode, defer reading to connection thread * _ACCEPT_QUEUE: a socket was accepted, queue immediately * _ACCEPT_ERROR: no socket was accepted * * Side effects: * Depends on driver. * *---------------------------------------------------------------------- */ static NS_DRIVER_ACCEPT_STATUS DriverAccept(Sock *sockPtr, NS_SOCKET sock) { socklen_t n = (socklen_t)sizeof(struct NS_SOCKADDR_STORAGE); NS_NONNULL_ASSERT(sockPtr != NULL); return (*sockPtr->drvPtr->acceptProc)((Ns_Sock *) sockPtr, sock, (struct sockaddr *) &(sockPtr->sa), &n); } /* *---------------------------------------------------------------------- * * NsDriverRecv -- * * Read data from the socket into the given vector of buffers. * * Results: * Number of bytes read, or -1 on error. * * Side effects: * Depends on driver. * *---------------------------------------------------------------------- */ ssize_t NsDriverRecv(Sock *sockPtr, struct iovec *bufs, int nbufs, Ns_Time *timeoutPtr) { ssize_t result; const Driver *drvPtr; NS_NONNULL_ASSERT(sockPtr != NULL); drvPtr = sockPtr->drvPtr; if (likely(drvPtr->recvProc != NULL)) { result = (*drvPtr->recvProc)((Ns_Sock *) sockPtr, bufs, nbufs, timeoutPtr, 0u); } else { Ns_Log(Warning, "driver: no recvProc registered for driver %s", drvPtr->threadName); result = -1; } return result; } /* *---------------------------------------------------------------------- * * NsDriverSend -- * * Write a vector of buffers to the socket via the driver callback. * May not send all of the data. * * Results: * Number of bytes written or -1 on error. * May return 0 (zero) when socket is not writable. * * Side effects: * Depends on the driver. * *---------------------------------------------------------------------- */ ssize_t NsDriverSend(Sock *sockPtr, const struct iovec *bufs, int nbufs, unsigned int flags) { ssize_t sent = -1; const Driver *drvPtr; NS_NONNULL_ASSERT(sockPtr != NULL); drvPtr = sockPtr->drvPtr; NS_NONNULL_ASSERT(drvPtr != NULL); if (likely(drvPtr->sendProc != NULL)) { /* * TODO: The Ns_DriverSendProc signature should be modified * to omit the timeout argument. Same with recvProc(). */ sent = (*drvPtr->sendProc)((Ns_Sock *) sockPtr, bufs, nbufs, NULL, flags); } else { Ns_Log(Warning, "no sendProc registered for driver %s", drvPtr->threadName); } return sent; } /* *---------------------------------------------------------------------- * * NsDriverSendFile -- * * Write a vector of file buffers to the socket via the driver * callback. * * Results: * Number of bytes written, -1 on error. * May not send all the data. * * Side effects: * May block on disk read. * *---------------------------------------------------------------------- */ ssize_t NsDriverSendFile(Sock *sockPtr, Ns_FileVec *bufs, int nbufs, unsigned int flags) { ssize_t sent = -1; const Driver *drvPtr; NS_NONNULL_ASSERT(sockPtr != NULL); NS_NONNULL_ASSERT(bufs != NULL); drvPtr = sockPtr->drvPtr; NS_NONNULL_ASSERT(drvPtr != NULL); if (drvPtr->sendFileProc != NULL) { /* * TODO: The Ns_DriverSendFileProc signature should be modified * to omit the timeout argument. */ sent = (*drvPtr->sendFileProc)((Ns_Sock *)sockPtr, bufs, nbufs, NULL, flags); } else { sent = Ns_SockSendFileBufs((Ns_Sock *)sockPtr, bufs, nbufs, flags); } return sent; } /* *---------------------------------------------------------------------- * * DriverKeep -- * * Can the given socket be kept open in the hopes that another * request will arrive before the keepwait timeout expires? * * Results: * NS_TRUE if the socket is OK for keepalive, NS_FALSE if this is not possible. * * Side effects: * Depends on driver. * *---------------------------------------------------------------------- */ static bool DriverKeep(Sock *sockPtr) { Ns_DriverKeepProc *keepProc; bool result; NS_NONNULL_ASSERT(sockPtr != NULL); keepProc = sockPtr->drvPtr->keepProc; if (keepProc == NULL) { result = NS_FALSE; } else { result = (keepProc)((Ns_Sock *) sockPtr); } return result; } /* *---------------------------------------------------------------------- * * DriverClose -- * * Close the given socket. * * Results: * None. * * Side effects: * Depends on driver. * *---------------------------------------------------------------------- */ static void DriverClose(Sock *sockPtr) { NS_NONNULL_ASSERT(sockPtr != NULL); (*sockPtr->drvPtr->closeProc)((Ns_Sock *) sockPtr); } /* *---------------------------------------------------------------------- * * DriverThread -- * * Main listening socket driver thread. * * Results: * None. * * Side effects: * Connections are accepted on the configured listen sockets, * placed on the run queue to be serviced, and gracefully * closed when done. Async sockets have the entire request read * here before queuing as well. * *---------------------------------------------------------------------- */ static void DriverThread(void *arg) { Driver *drvPtr = (Driver*)arg; Ns_Time now, diff; char charBuffer[1], drain[1024]; int pollTimeout, accepted, nrBindaddrs = 0; bool stopping; unsigned int flags; Sock *sockPtr, *closePtr, *nextPtr, *waitPtr, *readPtr; PollData pdata; Ns_ThreadSetName("-driver:%s-", drvPtr->threadName); Ns_Log(Notice, "starting"); flags = DRIVER_STARTED; { Tcl_Obj *bindaddrsObj, **objv; int j = 0, result; bindaddrsObj = Tcl_NewStringObj(drvPtr->address, -1); Tcl_IncrRefCount(bindaddrsObj); result = Tcl_ListObjGetElements(NULL, bindaddrsObj, &nrBindaddrs, &objv); /* * "result" was ok during startup, it has still to be ok. */ assert(result == TCL_OK); if (result == TCL_OK) { int i; /* * Bind all provided addresses. */ for (i = 0; i < nrBindaddrs; i++) { drvPtr->listenfd[j] = DriverListen(drvPtr, Tcl_GetString(objv[i])); if (drvPtr->listenfd[j] != NS_INVALID_SOCKET) { j ++; } } if (j > 0 && j < nrBindaddrs) { Ns_Log(Warning, "could only bind to %d out of %d addresses", j, nrBindaddrs); } } /* * "j" refers to the number of successful listen() operations. */ nrBindaddrs = j; Tcl_DecrRefCount(bindaddrsObj); } if (nrBindaddrs > 0) { SpoolerQueueStart(drvPtr->spooler.firstPtr, SpoolerThread); SpoolerQueueStart(drvPtr->writer.firstPtr, WriterThread); } else { Ns_Log(Warning, "could no bind any of the following addresses, stopping this driver: %s", drvPtr->address); flags |= (DRIVER_FAILED | DRIVER_SHUTDOWN); } Ns_MutexLock(&drvPtr->lock); drvPtr->flags |= flags; Ns_CondBroadcast(&drvPtr->cond); Ns_MutexUnlock(&drvPtr->lock); /* * Loop forever until signaled to shut down and all * connections are complete and gracefully closed. */ PollCreate(&pdata); Ns_GetTime(&now); closePtr = waitPtr = readPtr = NULL; stopping = ((flags & DRIVER_SHUTDOWN) != 0u); if (!stopping) { Ns_Log(Notice, "driver: accepting connections"); } while (!stopping) { int n; /* * Set the bits for all active drivers if a connection * isn't already pending. */ PollReset(&pdata); (void)PollSet(&pdata, drvPtr->trigger[0], (short)POLLIN, NULL); if (likely(waitPtr == NULL)) { for (n = 0; n < nrBindaddrs; n++) { drvPtr->pidx[n] = PollSet(&pdata, drvPtr->listenfd[n], (short)POLLIN, NULL); } } /* * If there are any closing or read-ahead sockets, set the bits * and determine the minimum relative timeout. * * TODO: the various poll timeouts should probably be configurable. */ if (readPtr == NULL && closePtr == NULL) { pollTimeout = 10 * 1000; } else { for (sockPtr = readPtr; sockPtr != NULL; sockPtr = sockPtr->nextPtr) { SockPoll(sockPtr, (short)POLLIN, &pdata); } for (sockPtr = closePtr; sockPtr != NULL; sockPtr = sockPtr->nextPtr) { SockPoll(sockPtr, (short)POLLIN, &pdata); } if (Ns_DiffTime(&pdata.timeout, &now, &diff) > 0) { /* * The resolution of "pollTimeout" is ms, therefore, we round * up. If we would round down (e.g. 500 microseconds to 0 ms), * the time comparison later would determine that it is too * early. */ pollTimeout = (int)Ns_TimeToMilliseconds(&diff) + 1; } else { pollTimeout = 0; } } n = PollWait(&pdata, pollTimeout); Ns_Log(DriverDebug, "=== PollWait returned %d, trigger[0] %d", n, PollIn(&pdata, 0)); if (PollIn(&pdata, 0) && unlikely(ns_recv(drvPtr->trigger[0], charBuffer, 1u, 0) != 1)) { const char *errstr = ns_sockstrerror(ns_sockerrno); Ns_Fatal("driver: trigger ns_recv() failed: %s", errstr); } /* * Check whether we should re-animate some connection threads, * when e.g. the number of current threads dropped below the * minimal value. Perform this test on timeouts (n == 0; * just for safety reasons) or on explicit wakeup calls. */ if ((n == 0) || PollIn(&pdata, 0)) { NsServer *servPtr = drvPtr->servPtr; if (servPtr != NULL) { /* * Check if we have to reanimate the current server. */ NsEnsureRunningConnectionThreads(servPtr, NULL); } else { Ns_Set *servers = Ns_ConfigCreateSection("ns/servers"); size_t j; /* * Reanimation check on all servers. */ for (j = 0u; j < Ns_SetSize(servers); ++j) { const char *server = Ns_SetKey(servers, j); servPtr = NsGetServer(server); if (servPtr != NULL) { NsEnsureRunningConnectionThreads(servPtr, NULL); } } } } /* * Update the current time and drain and/or release any * closing sockets. */ Ns_GetTime(&now); if (closePtr != NULL) { sockPtr = closePtr; closePtr = NULL; while (sockPtr != NULL) { nextPtr = sockPtr->nextPtr; if (unlikely(PollHup(&pdata, sockPtr->pidx))) { /* * Peer has closed the connection */ SockRelease(sockPtr, SOCK_CLOSE, 0); } else if (likely(PollIn(&pdata, sockPtr->pidx))) { /* * Got some data */ ssize_t received = ns_recv(sockPtr->sock, drain, sizeof(drain), 0); if (received <= 0) { Ns_Log(DriverDebug, "poll closewait pollin; sockrelease SOCK_READERROR (sock %d)", sockPtr->sock); SockRelease(sockPtr, SOCK_READERROR, 0); } else { Push(sockPtr, closePtr); } } else if (Ns_DiffTime(&sockPtr->timeout, &now, &diff) <= 0) { /* no PollHup, no PollIn, maybe timeout */ Ns_Log(DriverDebug, "poll closewait timeout; sockrelease SOCK_CLOSETIMEOUT (sock %d)", sockPtr->sock); SockRelease(sockPtr, SOCK_CLOSETIMEOUT, 0); } else { /* too early, keep waiting */ Push(sockPtr, closePtr); } sockPtr = nextPtr; } } /* * Attempt read-ahead of any new connections. */ sockPtr = readPtr; readPtr = NULL; while (likely(sockPtr != NULL)) { nextPtr = sockPtr->nextPtr; if (unlikely(PollHup(&pdata, sockPtr->pidx))) { /* * Peer has closed the connection */ SockRelease(sockPtr, SOCK_CLOSE, 0); } else if (unlikely(!PollIn(&pdata, sockPtr->pidx)) && ((sockPtr->reqPtr == NULL) || (sockPtr->reqPtr->leftover == 0u))) { /* * Got no data for this sockPtr. */ if (Ns_DiffTime(&sockPtr->timeout, &now, &diff) <= 0) { SockRelease(sockPtr, SOCK_READTIMEOUT, 0); } else { Push(sockPtr, readPtr); } } else { /* * Got some data for this sockPtr. * If enabled, perform read-ahead now. */ assert(drvPtr == sockPtr->drvPtr); if (likely((drvPtr->opts & NS_DRIVER_ASYNC) != 0u)) { SockState s = SockRead(sockPtr, 0, &now); /* * Queue for connection processing if ready. */ switch (s) { case SOCK_SPOOL: drvPtr->stats.spooled++; if (SockSpoolerQueue(drvPtr, sockPtr) == 0) { Push(sockPtr, readPtr); } break; case SOCK_MORE: drvPtr->stats.partial++; SockTimeout(sockPtr, &now, &drvPtr->recvwait); Push(sockPtr, readPtr); break; case SOCK_READY: if (SockQueue(sockPtr, &now) == NS_TIMEOUT) { Push(sockPtr, waitPtr); } break; /* * Already handled or normal cases */ case SOCK_ENTITYTOOLARGE: NS_FALL_THROUGH; /* fall through */ case SOCK_BADREQUEST: NS_FALL_THROUGH; /* fall through */ case SOCK_BADHEADER: NS_FALL_THROUGH; /* fall through */ case SOCK_TOOMANYHEADERS: NS_FALL_THROUGH; /* fall through */ case SOCK_CLOSE: SockRelease(sockPtr, s, errno); break; /* * Exceptions */ case SOCK_READERROR: NS_FALL_THROUGH; /* fall through */ case SOCK_CLOSETIMEOUT: NS_FALL_THROUGH; /* fall through */ case SOCK_ERROR: NS_FALL_THROUGH; /* fall through */ case SOCK_READTIMEOUT: NS_FALL_THROUGH; /* fall through */ case SOCK_SHUTERROR: NS_FALL_THROUGH; /* fall through */ case SOCK_WRITEERROR: NS_FALL_THROUGH; /* fall through */ case SOCK_WRITETIMEOUT: drvPtr->stats.errors++; Ns_Log(Warning, "sockread returned unexpected result %s (err %s); close socket (%d)", GetSockStateName(s), ((errno != 0) ? strerror(errno) : NS_EMPTY_STRING), sockPtr->sock); SockRelease(sockPtr, s, errno); break; } } else { /* * Potentially blocking driver, NS_DRIVER_ASYNC is not defined */ if (Ns_DiffTime(&sockPtr->timeout, &now, &diff) <= 0) { drvPtr->stats.errors++; Ns_Log(Notice, "read-ahead has some data, no async sock read ===== diff time %ld", Ns_DiffTime(&sockPtr->timeout, &now, &diff)); sockPtr->keep = NS_FALSE; SockRelease(sockPtr, SOCK_READTIMEOUT, 0); } else { if (SockQueue(sockPtr, &now) == NS_TIMEOUT) { Push(sockPtr, waitPtr); } } } } sockPtr = nextPtr; } /* * Attempt to queue any pending connection after reversing the * list to ensure oldest connections are tried first. */ if (waitPtr != NULL) { sockPtr = NULL; while ((nextPtr = waitPtr) != NULL) { waitPtr = nextPtr->nextPtr; Push(nextPtr, sockPtr); } while (sockPtr != NULL) { nextPtr = sockPtr->nextPtr; if (SockQueue(sockPtr, &now) == NS_TIMEOUT) { Push(sockPtr, waitPtr); } sockPtr = nextPtr; } } /* * If no connections are waiting, attempt to accept more. */ if (waitPtr == NULL) { /* * If configured, try to accept more than one request, under heavy load * this helps to process more requests */ SockState s; bool acceptMore = NS_TRUE; accepted = 0; while (acceptMore && accepted < drvPtr->acceptsize && drvPtr->queuesize < drvPtr->maxqueuesize ) { bool gotRequests = NS_FALSE; /* * Check for input data on all bind addresses. Stop checking, * when one round of checking on all addresses fails. */ for (n = 0; n < nrBindaddrs; n++) { if ( PollIn(&pdata, drvPtr->pidx[n]) && (s = SockAccept(drvPtr, pdata.pfds[drvPtr->pidx[n]].fd, &sockPtr, &now)) != SOCK_ERROR) { switch (s) { case SOCK_SPOOL: drvPtr->stats.spooled++; if (SockSpoolerQueue(drvPtr, sockPtr) == 0) { Push(sockPtr, readPtr); } break; case SOCK_MORE: drvPtr->stats.partial++; SockTimeout(sockPtr, &now, &drvPtr->recvwait); Push(sockPtr, readPtr); break; case SOCK_READY: if (SockQueue(sockPtr, &now) == NS_TIMEOUT) { Push(sockPtr, waitPtr); } break; case SOCK_BADHEADER: NS_FALL_THROUGH; /* fall through */ case SOCK_BADREQUEST: NS_FALL_THROUGH; /* fall through */ case SOCK_CLOSE: NS_FALL_THROUGH; /* fall through */ case SOCK_CLOSETIMEOUT: NS_FALL_THROUGH; /* fall through */ case SOCK_ENTITYTOOLARGE: NS_FALL_THROUGH; /* fall through */ case SOCK_ERROR: NS_FALL_THROUGH; /* fall through */ case SOCK_READERROR: NS_FALL_THROUGH; /* fall through */ case SOCK_READTIMEOUT: NS_FALL_THROUGH; /* fall through */ case SOCK_SHUTERROR: NS_FALL_THROUGH; /* fall through */ case SOCK_TOOMANYHEADERS: NS_FALL_THROUGH; /* fall through */ case SOCK_WRITEERROR: NS_FALL_THROUGH; /* fall through */ case SOCK_WRITETIMEOUT: Ns_Fatal("driver: SockAccept returned: %s", GetSockStateName(s)); } accepted++; gotRequests = NS_TRUE; #ifdef __APPLE__ /* * On Darwin, the first accept() succeeds typically, but it is * useless to try, since this leads always to an EAGAIN */ acceptMore = NS_FALSE; break; #endif } } if (!gotRequests) { acceptMore = NS_FALSE; } } if (accepted > 1) { Ns_Log(Notice, "... sockAccept accepted %d connections", accepted); } } /* * Check for shut down and get the list of any closing or * keep-alive sockets. */ Ns_MutexLock(&drvPtr->lock); sockPtr = drvPtr->closePtr; drvPtr->closePtr = NULL; flags = drvPtr->flags; Ns_MutexUnlock(&drvPtr->lock); stopping = ((flags & DRIVER_SHUTDOWN) != 0u); /* * Update the timeout for each closing socket and add to the * close list if some data has been read from the socket * (i.e., it's not a closing keep-alive connection). */ while (sockPtr != NULL) { nextPtr = sockPtr->nextPtr; if (sockPtr->keep) { assert(drvPtr == sockPtr->drvPtr); Ns_Log(DriverDebug, "setting keepwait %ld.%6ld for socket %d", drvPtr->keepwait.sec, drvPtr->keepwait.usec, sockPtr->sock); SockTimeout(sockPtr, &now, &drvPtr->keepwait); Push(sockPtr, readPtr); } else { /* * Purely packet oriented drivers set on close the fd to * NS_INVALID_SOCKET. Since we cannot "shutdown" an UDP-socket * for writing, we bypass this call. */ assert(drvPtr == sockPtr->drvPtr); if (sockPtr->sock == NS_INVALID_SOCKET) { SockRelease(sockPtr, SOCK_CLOSE, errno); Ns_Log(DriverDebug, "DRIVER SockRelease: errno %d drvPtr->closewait %ld.%6ld", errno, drvPtr->closewait.sec, drvPtr->closewait.usec); } else if (shutdown(sockPtr->sock, SHUT_WR) != 0) { SockRelease(sockPtr, SOCK_SHUTERROR, errno); } else { Ns_Log(DriverDebug, "setting closewait %ld.%6ld for socket %d", drvPtr->closewait.sec, drvPtr->closewait.usec, sockPtr->sock); SockTimeout(sockPtr, &now, &drvPtr->closewait); Push(sockPtr, closePtr); } } sockPtr = nextPtr; } /* * Close the active drivers if shutdown is pending. */ if (stopping) { for (n = 0; n < nrBindaddrs; n++) { ns_sockclose(drvPtr->listenfd[n]); drvPtr->listenfd[n] = NS_INVALID_SOCKET; } } } PollFree(&pdata); Ns_Log(Notice, "exiting"); Ns_MutexLock(&drvPtr->lock); drvPtr->flags |= DRIVER_STOPPED; Ns_CondBroadcast(&drvPtr->cond); Ns_MutexUnlock(&drvPtr->lock); } static void PollCreate(PollData *pdata) { NS_NONNULL_ASSERT(pdata != NULL); memset(pdata, 0, sizeof(PollData)); } static void PollFree(PollData *pdata) { NS_NONNULL_ASSERT(pdata != NULL); ns_free(pdata->pfds); memset(pdata, 0, sizeof(PollData)); } static void PollReset(PollData *pdata) { NS_NONNULL_ASSERT(pdata != NULL); pdata->nfds = 0u; pdata->timeout.sec = TIME_T_MAX; pdata->timeout.usec = 0; } static NS_POLL_NFDS_TYPE PollSet(PollData *pdata, NS_SOCKET sock, short type, const Ns_Time *timeoutPtr) { NS_NONNULL_ASSERT(pdata != NULL); /* * Grow the pfds array if necessary. */ if (unlikely(pdata->nfds >= pdata->maxfds)) { pdata->maxfds += 100u; pdata->pfds = ns_realloc(pdata->pfds, pdata->maxfds * sizeof(struct pollfd)); } /* * Set the next pollfd struct with this socket. */ pdata->pfds[pdata->nfds].fd = sock; pdata->pfds[pdata->nfds].events = type; pdata->pfds[pdata->nfds].revents = 0; /* * Check for new minimum timeout. */ if (timeoutPtr != NULL && Ns_DiffTime(timeoutPtr, &pdata->timeout, NULL) < 0) { pdata->timeout = *timeoutPtr; } return pdata->nfds++; } static int PollWait(const PollData *pdata, int timeout) { int n; NS_NONNULL_ASSERT(pdata != NULL); do { n = ns_poll(pdata->pfds, pdata->nfds, timeout); } while (n < 0 && errno == NS_EINTR); if (n < 0) { Ns_Fatal("PollWait: ns_poll() failed: %s", ns_sockstrerror(ns_sockerrno)); } return n; } /* *---------------------------------------------------------------------- * * RequestNew * * Prepares for reading from the socket, allocates a "Request" * struct for the given socket. It might be reused from the pool * or freshly allocated. Counterpart of RequestFree(). * * Results: * None * * Side effects: * None * *---------------------------------------------------------------------- */ static void RequestNew(Sock *sockPtr) { Request *reqPtr; bool reuseRequest = NS_TRUE; NS_NONNULL_ASSERT(sockPtr != NULL); /* * Try to get a request from the pool of allocated Requests. */ Ns_MutexLock(&reqLock); reqPtr = firstReqPtr; if (likely(reqPtr != NULL)) { firstReqPtr = reqPtr->nextPtr; } else { reuseRequest = NS_FALSE; } Ns_MutexUnlock(&reqLock); if (reuseRequest) { Ns_Log(DriverDebug, "RequestNew reuses a Request"); } /* * In case we failed, allocate a new Request. */ if (reqPtr == NULL) { Ns_Log(DriverDebug, "RequestNew gets a fresh Request"); reqPtr = ns_calloc(1u, sizeof(Request)); Tcl_DStringInit(&reqPtr->buffer); reqPtr->headers = Ns_SetCreate(NULL); } sockPtr->reqPtr = reqPtr; } /* *---------------------------------------------------------------------- * * RequestFree -- * * Free/clean a socket request structure. This routine is called * at the end of connection processing or on a socket which * times out during async read-ahead. Counterpart of RequestNew(). * * Results: * None. * * Side effects: * None. * *---------------------------------------------------------------------- */ static void RequestFree(Sock *sockPtr) { Request *reqPtr; bool keep; NS_NONNULL_ASSERT(sockPtr != NULL); reqPtr = sockPtr->reqPtr; assert(reqPtr != NULL); Ns_Log(DriverDebug, "=== RequestFree cleans %p (avail %" PRIuz " keep %d length %" PRIuz " contentLength %" PRIuz ")", (void *)reqPtr, reqPtr->avail, sockPtr->keep, reqPtr->length, reqPtr->contentLength); keep = (sockPtr->keep) && (reqPtr->avail > reqPtr->contentLength); if (keep) { size_t leftover = reqPtr->avail - reqPtr->contentLength; const char *offset = reqPtr->buffer.string + ((size_t)reqPtr->buffer.length - leftover); Ns_Log(DriverDebug, "setting leftover to %" PRIuz " bytes", leftover); /* * Here it is safe to move the data in the buffer, although the * reqPtr->content might point to it, since we re-init the content. In * case the terminating null character was written to the end of the * previous buffer, we have to restore the first character. */ memmove(reqPtr->buffer.string, offset, leftover); if (reqPtr->savedChar != '\0') { reqPtr->buffer.string[0] = reqPtr->savedChar; } Tcl_DStringSetLength(&reqPtr->buffer, (int)leftover); LogBuffer(DriverDebug, "KEEP BUFFER", reqPtr->buffer.string, leftover); reqPtr->leftover = leftover; } else { /* * Clean large buffers in order to avoid memory growth on huge * uploads (when maxupload is huge) */ /*fprintf(stderr, "=== reuse buffer size %d avail %d dynamic %d\n", reqPtr->buffer.length, reqPtr->buffer.spaceAvl, reqPtr->buffer.string == reqPtr->buffer.staticSpace);*/ if (Tcl_DStringLength(&reqPtr->buffer) > 65536) { Tcl_DStringFree(&reqPtr->buffer); } else { /* * Reuse buffer, but set length to 0. */ Tcl_DStringSetLength(&reqPtr->buffer, 0); } reqPtr->leftover = 0u; } reqPtr->next = NULL; reqPtr->content = NULL; reqPtr->length = 0u; reqPtr->contentLength = 0u; reqPtr->expectedLength = 0u; reqPtr->chunkStartOff = 0u; reqPtr->chunkWriteOff = 0u; reqPtr->roff = 0u; reqPtr->woff = 0u; reqPtr->coff = 0u; reqPtr->avail = 0u; reqPtr->savedChar = '\0'; Ns_SetTrunc(reqPtr->headers, 0u); if (reqPtr->auth != NULL) { Ns_SetFree(reqPtr->auth); reqPtr->auth = NULL; } if (reqPtr->request.line != NULL) { Ns_Log(DriverDebug, "RequestFree calls Ns_ResetRequest on %p", (void*)&reqPtr->request); Ns_ResetRequest(&reqPtr->request); } else { Ns_Log(DriverDebug, "RequestFree does not call Ns_ResetRequest on %p", (void*)&reqPtr->request); } if (!keep) { /* * Push the reqPtr to the pool for reuse in other connections. */ sockPtr->reqPtr = NULL; Ns_MutexLock(&reqLock); reqPtr->nextPtr = firstReqPtr; firstReqPtr = reqPtr; Ns_MutexUnlock(&reqLock); } else { /* * Keep the partly cleaned up reqPtr associated with the connection. */ Ns_Log(DriverDebug, "=== KEEP request structure in sockPtr (don't push into the pool)"); } } /* *---------------------------------------------------------------------- * * SockQueue -- * * Puts socket into connection queue * * Results: * NS_OK if queued, * NS_ERROR if socket closed because of error * NS_TIMEOUT if queue is full * * Side effects: * None. * *---------------------------------------------------------------------- */ static Ns_ReturnCode SockQueue(Sock *sockPtr, const Ns_Time *timePtr) { Ns_ReturnCode result; NS_NONNULL_ASSERT(sockPtr != NULL); /* * Verify the conditions. Request struct must exist already. */ assert(sockPtr->reqPtr != NULL); SockSetServer(sockPtr); assert(sockPtr->servPtr != NULL); /* * Actual queueing, if not ready spool to the waiting list. */ if (!NsQueueConn(sockPtr, timePtr)) { result = NS_TIMEOUT; } else { result = NS_OK; } return result; } /* *---------------------------------------------------------------------- * * SockPoll -- * * Arrange for given Sock to be monitored. * * Results: * None. * * Side effects: * Sock fd will be monitored for readability on next spin of * DriverThread. * *---------------------------------------------------------------------- */ static void SockPoll(Sock *sockPtr, short type, PollData *pdata) { NS_NONNULL_ASSERT(sockPtr != NULL); NS_NONNULL_ASSERT(pdata != NULL); sockPtr->pidx = PollSet(pdata, sockPtr->sock, type, &sockPtr->timeout); } /* *---------------------------------------------------------------------- * * SockTimeout -- * * Update socket with timeout * * Results: * None. * * Side effects: * Socket timeout will have nowPtr + timeout value * *---------------------------------------------------------------------- */ static void SockTimeout(Sock *sockPtr, const Ns_Time *nowPtr, const Ns_Time *timeout) { NS_NONNULL_ASSERT(sockPtr != NULL); sockPtr->timeout = *nowPtr; Ns_IncrTime(&sockPtr->timeout, timeout->sec, timeout->usec); } /* *---------------------------------------------------------------------- * * SockAccept -- * * Accept and initialize a new Sock in sockPtrPtr. * * Results: * SOCK_READY, SOCK_MORE, SOCK_SPOOL, * SOCK_ERROR + NULL sockPtr. * * Side effects: * Read-ahead may be attempted on new socket. * *---------------------------------------------------------------------- */ static SockState SockAccept(Driver *drvPtr, NS_SOCKET sock, Sock **sockPtrPtr, const Ns_Time *nowPtr) { Sock *sockPtr; SockState sockStatus; NS_DRIVER_ACCEPT_STATUS status; NS_NONNULL_ASSERT(drvPtr != NULL); sockPtr = SockNew(drvPtr); /* * Accept the new connection. */ status = DriverAccept(sockPtr, sock); if (unlikely(status == NS_DRIVER_ACCEPT_ERROR)) { sockStatus = SOCK_ERROR; /* * We reach the place frequently, especially on Linux, when we try to * accept multiple connection in one sweep. Usually, the errno is * EAGAIN. */ Ns_MutexLock(&drvPtr->lock); sockPtr->nextPtr = drvPtr->sockPtr; drvPtr->sockPtr = sockPtr; Ns_MutexUnlock(&drvPtr->lock); sockPtr = NULL; } else { sockPtr->acceptTime = *nowPtr; drvPtr->queuesize++; if (status == NS_DRIVER_ACCEPT_DATA) { /* * If there is already data present then read it without * polling if we're in async mode. */ if ((drvPtr->opts & NS_DRIVER_ASYNC) != 0u) { sockStatus = SockRead(sockPtr, 0, nowPtr); if ((int)sockStatus < 0) { Ns_Log(DriverDebug, "SockRead returned error %s", GetSockStateName(sockStatus)); SockRelease(sockPtr, sockStatus, errno); sockStatus = SOCK_ERROR; sockPtr = NULL; } } else { /* * Queue this socket without reading, NsGetRequest() in the * connection thread will perform actual reading of the * request. */ sockStatus = SOCK_READY; } } else if (status == NS_DRIVER_ACCEPT_QUEUE) { /* * We need to call RequestNew() to make sure socket has request * structure allocated, otherwise NsGetRequest() will call * SockRead() which is not what this driver wants. */ if (sockPtr->reqPtr == NULL) { RequestNew(sockPtr); } sockStatus = SOCK_READY; } else { sockStatus = SOCK_MORE; } } *sockPtrPtr = sockPtr; return sockStatus; } /* *---------------------------------------------------------------------- * * SockNew -- * * Allocate and/or initialize a Sock structure. Counterpart of * SockRelease(). * * Results: * SockPtr * * Side effects: * Potentially new memory is allocated. * *---------------------------------------------------------------------- */ static Sock * SockNew(Driver *drvPtr) { Sock *sockPtr; NS_NONNULL_ASSERT(drvPtr != NULL); Ns_MutexLock(&drvPtr->lock); sockPtr = drvPtr->sockPtr; if (likely(sockPtr != NULL)) { drvPtr->sockPtr = sockPtr->nextPtr; sockPtr->keep = NS_FALSE; } Ns_MutexUnlock(&drvPtr->lock); if (sockPtr == NULL) { size_t sockSize = sizeof(Sock) + (nsconf.nextSlsId * sizeof(Ns_Callback *)); sockPtr = ns_calloc(1u, sockSize); sockPtr->drvPtr = drvPtr; } else { sockPtr->tfd = 0; sockPtr->taddr = NULL; sockPtr->flags = 0u; sockPtr->arg = NULL; sockPtr->recvSockState = NS_SOCK_NONE; } return sockPtr; } /* *---------------------------------------------------------------------- * * SockRelease -- * * Close a socket and release the connection structure for * re-use. * * Results: * None. * * Side effects: * None. * *---------------------------------------------------------------------- */ static void SockRelease(Sock *sockPtr, SockState reason, int err) { Driver *drvPtr; NS_NONNULL_ASSERT(sockPtr != NULL); Ns_Log(DriverDebug, "SockRelease reason %s err %d (sock %d)", GetSockStateName(reason), err, sockPtr->sock); drvPtr = sockPtr->drvPtr; assert(drvPtr != NULL); SockError(sockPtr, reason, err); if (sockPtr->sock != NS_INVALID_SOCKET) { SockClose(sockPtr, (int)NS_FALSE); } else { Ns_Log(DriverDebug, "SockRelease bypasses SockClose, since we have an invalid socket"); } NsSlsCleanup(sockPtr); drvPtr->queuesize--; if (sockPtr->reqPtr != NULL) { Ns_Log(DriverDebug, "SockRelease calls RequestFree"); RequestFree(sockPtr); } Ns_MutexLock(&drvPtr->lock); sockPtr->nextPtr = drvPtr->sockPtr; drvPtr->sockPtr = sockPtr; Ns_MutexUnlock(&drvPtr->lock); } /* *---------------------------------------------------------------------- * * SockError -- * * Log error message for given socket * re-use. * * Results: * None. * * Side effects: * None. * *---------------------------------------------------------------------- */ static void SockError(Sock *sockPtr, SockState reason, int err) { const char *errMsg = NULL; NS_NONNULL_ASSERT(sockPtr != NULL); switch (reason) { case SOCK_READY: case SOCK_SPOOL: case SOCK_MORE: case SOCK_CLOSE: case SOCK_CLOSETIMEOUT: /* This is normal, never log. */ break; case SOCK_READTIMEOUT: /* * For this case, whether this is acceptable or not * depends upon whether this sock was a keep-alive * that we were allowing to 'linger'. */ if (!sockPtr->keep) { errMsg = "Timeout during read"; } break; case SOCK_WRITETIMEOUT: errMsg = "Timeout during write"; break; case SOCK_READERROR: errMsg = "Unable to read request"; break; case SOCK_WRITEERROR: errMsg = "Unable to write request"; break; case SOCK_SHUTERROR: errMsg = "Unable to shutdown socket"; break; case SOCK_BADREQUEST: errMsg = "Bad Request"; SockSendResponse(sockPtr, 400, errMsg); break; case SOCK_TOOMANYHEADERS: errMsg = "Too Many Request Headers"; SockSendResponse(sockPtr, 414, errMsg); break; case SOCK_BADHEADER: errMsg = "Invalid Request Header"; SockSendResponse(sockPtr, 400, errMsg); break; case SOCK_ENTITYTOOLARGE: errMsg = "Request Entity Too Large"; SockSendResponse(sockPtr, 413, errMsg); break; case SOCK_ERROR: errMsg = "Unknown Error"; SockSendResponse(sockPtr, 400, errMsg); break; } if (errMsg != NULL) { char ipString[NS_IPADDR_SIZE]; Ns_Log(DriverDebug, "SockError: %s (%d: %s), sock: %d, peer: [%s]:%d, request: %.99s", errMsg, err, (err != 0) ? strerror(err) : NS_EMPTY_STRING, sockPtr->sock, ns_inet_ntop((struct sockaddr *)&(sockPtr->sa), ipString, sizeof(ipString)), Ns_SockaddrGetPort((struct sockaddr *)&(sockPtr->sa)), (sockPtr->reqPtr != NULL) ? sockPtr->reqPtr->buffer.string : NS_EMPTY_STRING); } } /* *---------------------------------------------------------------------- * * SockSendResponse -- * * Send an HTTP response directly to the client using the * driver callback. * * Results: * None. * * Side effects: * May not sent the complete response to the client * if encountering non-writable connection socket. * *---------------------------------------------------------------------- */ static void SockSendResponse(Sock *sockPtr, int code, const char *errMsg) { struct iovec iov[3]; char header[32]; ssize_t sent, tosend; NS_NONNULL_ASSERT(sockPtr != NULL); NS_NONNULL_ASSERT(errMsg != NULL); snprintf(header, sizeof(header), "HTTP/1.0 %d ", code); iov[0].iov_base = header; iov[0].iov_len = strlen(header); iov[1].iov_base = (void *)errMsg; iov[1].iov_len = strlen(errMsg); iov[2].iov_base = (void *)"\r\n\r\n"; iov[2].iov_len = 4u; tosend = (ssize_t)(iov[0].iov_len + iov[1].iov_len + iov[2].iov_len); sent = NsDriverSend(sockPtr, iov, 3, 0u); if (sent < tosend) { Ns_Log(Warning, "Driver: partial write while sending response;" " %" PRIdz " < %" PRIdz, sent, tosend); } /* * In case we have a request structure, complain the system log about * the bad request. */ if (sockPtr->reqPtr != NULL) { Request *reqPtr = sockPtr->reqPtr; const char *requestLine = (reqPtr->request.line != NULL) ? reqPtr->request.line : NS_EMPTY_STRING; (void)ns_inet_ntop((struct sockaddr *)&(sockPtr->sa), sockPtr->reqPtr->peer, NS_IPADDR_SIZE); /* * Check, if bad request looks like a TLS handshake. If yes, there is * no need to print out the received buffer. */ if (requestLine[0] == (char)0x16 && requestLine[1] >= 3 && requestLine[2] == 1) { Ns_Log(Warning, "invalid request %d (%s) from peer %s: received TLS handshake on a non-TLS connection", code, errMsg, reqPtr->peer); } else { Tcl_DString dsReqLine; Tcl_DStringInit(&dsReqLine); Ns_Log(Warning, "invalid request: %d (%s) from peer %s request '%s' offsets: read %" PRIuz " write %" PRIuz " content %" PRIuz " avail %" PRIuz, code, errMsg, reqPtr->peer, Ns_DStringAppendPrintable(&dsReqLine, NS_FALSE, requestLine, strlen(requestLine)), reqPtr->roff, reqPtr->woff, reqPtr->coff, reqPtr->avail); Tcl_DStringFree(&dsReqLine); LogBuffer(Warning, "REQ BUFFER", reqPtr->buffer.string, (size_t)reqPtr->buffer.length); } } else { Ns_Log(Warning, "invalid request: %d (%s) - no request information available", code, errMsg); } } /* *---------------------------------------------------------------------- * * SockTrigger -- * * Wakeup DriversThread from blocking ns_poll(). * * Results: * None. * * Side effects: * DriversThread will wake up. * *---------------------------------------------------------------------- */ static void SockTrigger(NS_SOCKET sock) { if (send(sock, NS_EMPTY_STRING, 1, 0) != 1) { const char *errstr = ns_sockstrerror(ns_sockerrno); Ns_Log(Error, "driver: trigger send() failed: %s", errstr); } } /* *---------------------------------------------------------------------- * * SockClose -- * * Closes connection socket, does all cleanups. The input parameter * "keep" might be NS_TRUE/NS_FALSE or -1 if undecided. * * Results: * None. * * Side effects: * None * *---------------------------------------------------------------------- */ static void SockClose(Sock *sockPtr, int keep) { NS_NONNULL_ASSERT(sockPtr != NULL); if (keep != 0) { bool driverKeep = DriverKeep(sockPtr); keep = (int)driverKeep; } if (keep == (int)NS_FALSE) { DriverClose(sockPtr); } Ns_MutexLock(&sockPtr->drvPtr->lock); sockPtr->keep = (bool)keep; Ns_MutexUnlock(&sockPtr->drvPtr->lock); /* * Unconditionally remove temporary file, connection thread * should take care about very large uploads. */ if (sockPtr->tfile != NULL) { unlink(sockPtr->tfile); ns_free(sockPtr->tfile); sockPtr->tfile = NULL; if (sockPtr->tfd > 0) { /* * Close and reset fd. The fd should be > 0 unless we are in error * conditions. */ (void) ns_close(sockPtr->tfd); } sockPtr->tfd = 0; } else if (sockPtr->tfd > 0) { /* * This must be a fd allocated via Ns_GetTemp(); */ Ns_ReleaseTemp(sockPtr->tfd); sockPtr->tfd = 0; } #ifndef _WIN32 /* * Un-map temp file used for spooled content. */ if (sockPtr->taddr != NULL) { munmap(sockPtr->taddr, (size_t)sockPtr->tsize); sockPtr->taddr = NULL; } #endif } /* *---------------------------------------------------------------------- * * ChunkedDecode -- * * Reads the content form the incoming request buffer and tries * to decode chunked encoding parts. The function can be called * repeatedly and with incomplete input and overwrites the buffer * with the decoded data optionally. The decoded data is always * shorter than the encoded one. * * Results: * NS_TRUE when chunk was complete, NS_FALSE otherwise * * Side effects: * updates the buffer if update is true (and adjusts reqPtr->chunkWriteOff) * updates always reqPtr->chunkStartOff to allow incremental operations * *---------------------------------------------------------------------- */ static bool ChunkedDecode(Request *reqPtr, bool update) { const Tcl_DString *bufPtr; const char *end, *chunkStart; bool success = NS_TRUE; NS_NONNULL_ASSERT(reqPtr != NULL); bufPtr = &reqPtr->buffer; end = bufPtr->string + bufPtr->length; chunkStart = bufPtr->string + reqPtr->chunkStartOff; while (reqPtr->chunkStartOff < (size_t)bufPtr->length) { char *p = strstr(chunkStart, "\r\n"); size_t chunk_length; if (p == NULL) { Ns_Log(DriverDebug, "ChunkedDecode: chunk did not find end-of-line"); success = NS_FALSE; break; } *p = '\0'; chunk_length = (size_t)strtol(chunkStart, NULL, 16); *p = '\r'; if (p + 2 + chunk_length > end) { Ns_Log(DriverDebug, "ChunkedDecode: chunk length past end of buffer"); success = NS_FALSE; break; } if (update) { char *writeBuffer = bufPtr->string + reqPtr->chunkWriteOff; memmove(writeBuffer, p + 2, chunk_length); reqPtr->chunkWriteOff += chunk_length; *(writeBuffer + chunk_length) = '\0'; } reqPtr->chunkStartOff += (size_t)(p - chunkStart) + 4u + chunk_length; chunkStart = bufPtr->string + reqPtr->chunkStartOff; } return success; } /* *---------------------------------------------------------------------- * * SockRead -- * * Read content from the given Sock, processing the input as * necessary. This is the core callback routine designed to * either be called repeatedly within the DriverThread during * an async read-ahead or in a blocking loop in NsGetRequest() * at the start of connection processing. * * Results: * SOCK_READY: Request is ready for processing. * SOCK_MORE: More input is required. * SOCK_ERROR: Client drop or timeout. * SOCK_SPOOL: Pass input handling to spooler * SOCK_CLOSE: peer closed connection * SOCK_BADREQUEST * SOCK_BADHEADER * SOCK_TOOMANYHEADERS * * Side effects: * The Request structure will be built up for use by the * connection thread. Also, before returning SOCK_READY, * the next byte to read mark and bytes available are set * to the beginning of the content, just beyond the headers. * * Contents may be spooled into temp file and mmap-ed * *---------------------------------------------------------------------- */ static SockState SockRead(Sock *sockPtr, int spooler, const Ns_Time *timePtr) { const Driver *drvPtr; Request *reqPtr; Tcl_DString *bufPtr; struct iovec buf; char tbuf[16384]; size_t buflen, nread; ssize_t n; SockState resultState; NS_NONNULL_ASSERT(sockPtr != NULL); drvPtr = sockPtr->drvPtr; tbuf[0] = '\0'; /* * In case of "keepwait", the accept time is not meaningful and * reset to 0. In such cases, update "acceptTime" to the actual * begin of a request. This part is intended for async drivers. */ if (sockPtr->acceptTime.sec == 0) { assert(timePtr != NULL); sockPtr->acceptTime = *timePtr; } /* * Initialize request structure if needed. */ if (sockPtr->reqPtr == NULL) { RequestNew(sockPtr); } /* * On the first read, attempt to read-ahead "bufsize" bytes. * Otherwise, read only the number of bytes left in the * content. */ reqPtr = sockPtr->reqPtr; bufPtr = &reqPtr->buffer; if (reqPtr->length == 0u) { nread = drvPtr->bufsize; } else { nread = reqPtr->length - reqPtr->avail; } /* * Grow the buffer to include space for the next bytes. */ buflen = (size_t)bufPtr->length; n = (ssize_t)(buflen + nread); if (unlikely(n > drvPtr->maxinput)) { n = (ssize_t)drvPtr->maxinput; nread = (size_t)n - buflen; if (nread == 0u) { Ns_Log(DriverDebug, "SockRead: maxinput reached %" TCL_LL_MODIFIER "d", drvPtr->maxinput); return SOCK_ERROR; } } /* * Use temp file for content larger than "readahead" bytes. */ #ifndef _WIN32 if (reqPtr->coff > 0u /* We are in the content part (after the header) */ && !reqPtr->chunkStartOff /* Never spool chunked encoded data since we decode in memory */ && reqPtr->length > (size_t)drvPtr->readahead /* We need more data */ && sockPtr->tfd <= 0 /* We have no spool fd */ ) { const DrvSpooler *spPtr = &drvPtr->spooler; Ns_Log(DriverDebug, "SockRead: require tmp file for content spooling (length %" PRIuz" > readahead " "%" TCL_LL_MODIFIER "d)", reqPtr->length, drvPtr->readahead); /* * In driver mode send this Sock to the spooler thread if * it is running */ if (spooler == 0 && spPtr->threads > 0) { return SOCK_SPOOL; } /* * If "maxupload" is specified and content size exceeds the configured * values, spool uploads into normal temp file (not deleted). We do * not want to map such large files into memory. */ if (drvPtr->maxupload > 0 && reqPtr->length > (size_t)drvPtr->maxupload ) { size_t tfileLength = strlen(drvPtr->uploadpath) + 16u; sockPtr->tfile = ns_malloc(tfileLength); snprintf(sockPtr->tfile, tfileLength, "%s/%d.XXXXXX", drvPtr->uploadpath, sockPtr->sock); sockPtr->tfd = ns_mkstemp(sockPtr->tfile); if (sockPtr->tfd == NS_INVALID_FD) { Ns_Log(Error, "SockRead: cannot create spool file with template '%s': %s", sockPtr->tfile, strerror(errno)); } } else { /* * Get a temporary fd. These FDs are used for mmapping. */ sockPtr->tfd = Ns_GetTemp(); } if (unlikely(sockPtr->tfd == NS_INVALID_FD)) { Ns_Log(DriverDebug, "SockRead: spool fd invalid"); return SOCK_ERROR; } n = (ssize_t)((size_t)bufPtr->length - reqPtr->coff); assert(n >= 0); if (ns_write(sockPtr->tfd, bufPtr->string + reqPtr->coff, (size_t)n) != n) { return SOCK_WRITEERROR; } Tcl_DStringSetLength(bufPtr, 0); } #endif if (sockPtr->tfd > 0) { buf.iov_base = tbuf; buf.iov_len = MIN(nread, sizeof(tbuf)); } else { Tcl_DStringSetLength(bufPtr, (int)(buflen + nread)); buf.iov_base = bufPtr->string + reqPtr->woff; buf.iov_len = nread; } if (reqPtr->leftover > 0u) { /* * There is some leftover in the buffer, don't read but take the * leftover instead as input. */ n = (ssize_t)reqPtr->leftover; reqPtr->leftover = 0u; buflen = 0u; Ns_Log(DriverDebug, "SockRead receive from leftover %" PRIdz " bytes", n); } else { /* * Receive actually some data from the driver. */ n = NsDriverRecv(sockPtr, &buf, 1, NULL); Ns_Log(DriverDebug, "SockRead receive from network %" PRIdz " bytes sockState %.2x", n, (int)sockPtr->recvSockState); } { Ns_SockState nsSockState = sockPtr->recvSockState; /* * The nsSockState has one of the following values, when provided: * * NS_SOCK_READ, NS_SOCK_DONE, NS_SOCK_AGAIN, NS_SOCK_EXCEPTION, * NS_SOCK_TIMEOUT */ switch (nsSockState) { case NS_SOCK_TIMEOUT: NS_FALL_THROUGH; /* fall through */ case NS_SOCK_EXCEPTION: return SOCK_READERROR; case NS_SOCK_AGAIN: Tcl_DStringSetLength(bufPtr, (int)buflen); return SOCK_MORE; case NS_SOCK_DONE: return SOCK_CLOSE; case NS_SOCK_READ: break; case NS_SOCK_CANCEL: NS_FALL_THROUGH; /* fall through */ case NS_SOCK_EXIT: NS_FALL_THROUGH; /* fall through */ case NS_SOCK_INIT: NS_FALL_THROUGH; /* fall through */ case NS_SOCK_WRITE: Ns_Log(Warning, "SockRead received unexpected state %.2x from driver", nsSockState); return SOCK_READERROR; case NS_SOCK_NONE: /* * Old style state management based on "n" and "errno", which is * more fragile. We keep there for old-style drivers. */ if (n < 0) { Tcl_DStringSetLength(bufPtr, (int)buflen); /* * The driver returns -1 when the peer closed the connection, but * clears the errno such we can distinguish from error conditions. */ if (errno == 0) { return SOCK_CLOSE; } return SOCK_READERROR; } if (n == 0) { Tcl_DStringSetLength(bufPtr, (int)buflen); return SOCK_MORE; } break; } } if (sockPtr->tfd > 0) { if (ns_write(sockPtr->tfd, tbuf, (size_t)n) != n) { return SOCK_WRITEERROR; } } else { Tcl_DStringSetLength(bufPtr, (int)(buflen + (size_t)n)); } reqPtr->woff += (size_t)n; reqPtr->avail += (size_t)n; /* * This driver needs raw buffer, it is binary or non-HTTP request */ if ((drvPtr->opts & NS_DRIVER_NOPARSE) != 0u) { return SOCK_READY; } resultState = SockParse(sockPtr); return resultState; } /*---------------------------------------------------------------------- * * LogBuffer -- * * Debug function to output buffer content when the provided severity is * enabled. The function prints just visible characters and space as is * and prints the hex code otherwise. * * Results: * None. * * Side effects: * Writes to error.log * *---------------------------------------------------------------------- */ static void LogBuffer(Ns_LogSeverity severity, const char *msg, const char *buffer, size_t len) { Tcl_DString ds; NS_NONNULL_ASSERT(msg != NULL); NS_NONNULL_ASSERT(buffer != NULL); if (Ns_LogSeverityEnabled(severity)) { Tcl_DStringInit(&ds); Tcl_DStringAppend(&ds, msg, -1); Tcl_DStringAppend(&ds, ": ", 2); (void)Ns_DStringAppendPrintable(&ds, NS_FALSE, buffer, len); Ns_Log(severity, "%s", ds.string); Tcl_DStringFree(&ds); } } /*---------------------------------------------------------------------- * * EndOfHeader -- * * Function to be called (once), when end of header is reached. At this * time, all request header lines were parsed already correctly. * * Results: * None. * * Side effects: * Update various reqPtr fields and signal certain facts and error * conditions via sockPtr->flags. In error conditions, sockPtr->keep is * set to NS_FALSE. * *---------------------------------------------------------------------- */ static size_t EndOfHeader(Sock *sockPtr) { Request *reqPtr; const char *s; NS_NONNULL_ASSERT(sockPtr != NULL); reqPtr = sockPtr->reqPtr; assert(reqPtr != NULL); reqPtr->chunkStartOff = 0u; /* * Check for "expect: 100-continue" and clear flag in case we have * pipelining. */ sockPtr->flags &= ~(NS_CONN_CONTINUE); s = Ns_SetIGet(reqPtr->headers, "expect"); if (s != NULL) { if (*s == '1' && *(s+1) == '0' && *(s+2) == '0' && *(s+3) == '-') { char *dup = ns_strdup(s+4); Ns_StrToLower(dup); if (STREQ(dup, "continue")) { sockPtr->flags |= NS_CONN_CONTINUE; } ns_free(dup); } } /* * Handle content-length, which might be provided or not. * Clear length specific error flags. */ sockPtr->flags &= ~(NS_CONN_ENTITYTOOLARGE); s = Ns_SetIGet(reqPtr->headers, "content-length"); if (s == NULL) { s = Ns_SetIGet(reqPtr->headers, "Transfer-Encoding"); if (s != NULL) { /* Lower case is in the standard, capitalized by macOS */ if (STREQ(s, "chunked") || STREQ(s, "Chunked")) { Tcl_WideInt expected; reqPtr->chunkStartOff = reqPtr->roff; reqPtr->chunkWriteOff = reqPtr->chunkStartOff; reqPtr->contentLength = 0u; /* * We need reqPtr->expectedLength for safely terminating read loop. */ s = Ns_SetIGet(reqPtr->headers, "X-Expected-Entity-Length"); if ((s != NULL) && (Ns_StrToWideInt(s, &expected) == NS_OK) && (expected > 0) ) { reqPtr->expectedLength = (size_t)expected; } s = NULL; } } } /* * In case a valid and meaningful was provided, the string with the * content length ("s") is not NULL. */ if (s != NULL) { Tcl_WideInt length; if ((Ns_StrToWideInt(s, &length) == NS_OK) && (length > 0)) { reqPtr->length = (size_t)length; /* * Handle too large input requests. */ if (reqPtr->length > (size_t)sockPtr->drvPtr->maxinput) { Ns_Log(Warning, "SockParse: request too large, length=%" PRIdz ", maxinput=%" TCL_LL_MODIFIER "d", reqPtr->length, sockPtr->drvPtr->maxinput); sockPtr->keep = NS_FALSE; sockPtr->flags |= NS_CONN_ENTITYTOOLARGE; } reqPtr->contentLength = (size_t)length; } } /* * Compression format handling: parse information from request headers * indicating allowed compression formats for quick access. * * Clear compression accepted flag */ sockPtr->flags &= ~(NS_CONN_ZIPACCEPTED|NS_CONN_BROTLIACCEPTED); s = Ns_SetIGet(reqPtr->headers, "Accept-Encoding"); if (s != NULL) { bool gzipAccept, brotliAccept; /* * Get allowed compression formats from "accept-encoding" headers. */ NsParseAcceptEncoding(reqPtr->request.version, s, &gzipAccept, &brotliAccept); if (gzipAccept || brotliAccept) { /* * Don't allow compression formats for Range requests. */ s = Ns_SetIGet(reqPtr->headers, "Range"); if (s == NULL) { if (gzipAccept) { sockPtr->flags |= NS_CONN_ZIPACCEPTED; } if (brotliAccept) { sockPtr->flags |= NS_CONN_BROTLIACCEPTED; } } } } /* * Set up request length for spooling and further read operations */ if (reqPtr->contentLength != 0u) { /* * Content-Length was provided, use it */ reqPtr->length = reqPtr->contentLength; } return reqPtr->roff; } /*---------------------------------------------------------------------- * * SockParse -- * * Construct the given conn by parsing input buffer until end of * headers. Return SOCK_READY when finished parsing. * * Results: * SOCK_READY: Conn is ready for processing. * SOCK_MORE: More input is required. * SOCK_ERROR: Malformed request. * SOCK_BADREQUEST * SOCK_BADHEADER * SOCK_TOOMANYHEADERS * * Side effects: * An Ns_Request and/or Ns_Set may be allocated. * Ns_Conn buffer management offsets updated. * *---------------------------------------------------------------------- */ static SockState SockParse(Sock *sockPtr) { const Tcl_DString *bufPtr; const Driver *drvPtr; Request *reqPtr; char save; SockState result; NS_NONNULL_ASSERT(sockPtr != NULL); drvPtr = sockPtr->drvPtr; NsUpdateProgress((Ns_Sock *) sockPtr); reqPtr = sockPtr->reqPtr; bufPtr = &reqPtr->buffer; /* * Scan lines (header) until start of content (body-part) */ while (reqPtr->coff == 0u) { char *s, *e; size_t cnt; /* * Find the next header line. */ s = bufPtr->string + reqPtr->roff; e = memchr(s, INTCHAR('\n'), reqPtr->avail); if (unlikely(e == NULL)) { /* * Input not yet newline terminated - request more data. */ return SOCK_MORE; } /* * Check for max single line overflows. * * Previous versions if the driver returned here directly an * error code, which was handled via HTTP error message * provided via SockError(). However, the SockError() handling * closes the connection immediately. This has the * consequence, that the HTTP client might never see the error * message, since the request was not yet fully transmitted, * but it will see a "broken pipe: 13" message instead. We * read now the full request and return the message via * ConnRunRequest(). */ if (unlikely((e - s) > drvPtr->maxline)) { sockPtr->keep = NS_FALSE; if (reqPtr->request.line == NULL) { Ns_Log(DriverDebug, "SockParse: maxline reached of %d bytes", drvPtr->maxline); sockPtr->flags = NS_CONN_REQUESTURITOOLONG; Ns_Log(Warning, "request line is too long (%d bytes)", (int)(e - s)); } else { sockPtr->flags = NS_CONN_LINETOOLONG; Ns_Log(Warning, "request header line is too long (%d bytes)", (int)(e - s)); } } /* * Update next read pointer to end of this line. */ cnt = (size_t)(e - s) + 1u; reqPtr->roff += cnt; reqPtr->avail -= cnt; /* * Adjust end pointer to the last content character before the line * terminator. */ if (likely(e > s) && likely(*(e-1) == '\r')) { --e; } /* * Check for end of headers in case we have not done it yet. */ if (unlikely(e == s) && (reqPtr->coff == 0u)) { /* * We are at end of headers. */ reqPtr->coff = EndOfHeader(sockPtr); /* * In cases the client sent "expect: 100-continue", report back that * everything is fine with the headers. */ if ((sockPtr->flags & NS_CONN_CONTINUE) != 0u) { Ns_Log(Ns_LogRequestDebug, "honoring 100-continue"); /* * In case, the request entity (body) was too large, we can * return immediately the error message, when the client has * flagged this via "Expect:". Otherwise we have to read the * full request (although it is too large) to drain the * channel. Otherwise, the server might close the connection * *before* it has received full request with its body from * the client. We just keep the flag and let * Ns_ConnRunRequest() handle the error message. */ if ((sockPtr->flags & NS_CONN_ENTITYTOOLARGE) != 0u) { Ns_Log(Ns_LogRequestDebug, "100-continue: entity too large"); return SOCK_ENTITYTOOLARGE; /* * We have no other error message flagged (future ones * have to be handled here). */ } else { struct iovec iov[1]; ssize_t sent; /* * Reply with "100 continue". */ Ns_Log(Ns_LogRequestDebug, "100-continue: reply CONTINUE"); iov[0].iov_base = (char *)"HTTP/1.1 100 Continue\r\n\r\n"; iov[0].iov_len = strlen(iov[0].iov_base); sent = Ns_SockSendBufs((Ns_Sock *)sockPtr, iov, 1, NULL, 0u); if (sent != (ssize_t)iov[0].iov_len) { Ns_Log(Warning, "could not deliver response: 100 Continue"); /* * Should we bail out here? */ } } } } else { /* * We have the request-line or a header line to process. */ save = *e; *e = '\0'; if (unlikely(reqPtr->request.line == NULL)) { /* * There is no request-line set. The received line must the * the request-line. */ Ns_Log(DriverDebug, "SockParse (%d): parse request line <%s>", sockPtr->sock, s); if (Ns_ParseRequest(&reqPtr->request, s) == NS_ERROR) { /* * Invalid request. */ return SOCK_BADREQUEST; } /* * HTTP 0.9 did not have a HTTP-version number or request headers * and no empty line terminating the request header. */ if (unlikely(reqPtr->request.version < 1.0)) { /* * Pre-HTTP/1.0 request. */ reqPtr->coff = reqPtr->roff; Ns_Log(Notice, "pre-HTTP/1.0 request <%s>", reqPtr->request.line); } } else if (Ns_ParseHeader(reqPtr->headers, s, Preserve) != NS_OK) { /* * Invalid header. */ return SOCK_BADHEADER; } else { /* * Check for max number of headers */ if (unlikely(Ns_SetSize(reqPtr->headers) > (size_t)drvPtr->maxheaders)) { Ns_Log(DriverDebug, "SockParse (%d): maxheaders reached of %d bytes", sockPtr->sock, drvPtr->maxheaders); return SOCK_TOOMANYHEADERS; } } *e = save; } } if (unlikely(reqPtr->request.line == NULL)) { /* * We are at end of headers, but we have not parsed a request line * (maybe just two linefeeds). */ return SOCK_BADREQUEST; } /* * We are in the request body. */ assert(reqPtr->coff > 0u); assert(reqPtr->request.line != NULL); /* * Check if all content has arrived. */ Ns_Log(Dev, "=== length < avail (length %" PRIuz ", avail %" PRIuz ") tfd %d tfile %p chunkStartOff %" PRIuz, reqPtr->length, reqPtr->avail, sockPtr->tfd, (void *)sockPtr->tfile, reqPtr->chunkStartOff); if (reqPtr->chunkStartOff != 0u) { /* * Chunked encoding was provided. */ bool complete; size_t currentContentLength; complete = ChunkedDecode(reqPtr, NS_TRUE); currentContentLength = reqPtr->chunkWriteOff - reqPtr->coff; /* * A chunk might be complete, but it might not be the last * chunk from the client. The best thing would be to be able * to read until EOF here. In cases, where the (optional) * "expectedLength" was provided by the client, we terminate * depending on that information */ if ((!complete) || (reqPtr->expectedLength != 0u && currentContentLength < reqPtr->expectedLength)) { /* * ChunkedDecode wants more data. */ return SOCK_MORE; } /* * ChunkedDecode has enough data. */ reqPtr->length = (size_t)currentContentLength; } if (reqPtr->avail < reqPtr->length) { Ns_Log(DriverDebug, "SockRead wait for more input"); /* * Wait for more input. */ return SOCK_MORE; } Ns_Log(Dev, "=== all required data is available (avail %" PRIuz", length %" PRIuz ", " "readahead %" TCL_LL_MODIFIER "d maxupload %" TCL_LL_MODIFIER "d) tfd %d", reqPtr->avail, reqPtr->length, drvPtr->readahead, drvPtr->maxupload, sockPtr->tfd); /* * We have all required data in the receive buffer or in a temporary file. * * - Uploads > "readahead": these are put into temporary files. * * - Uploads > "maxupload": these are put into temporary files * without mmapping, no content parsing will be performed in memory. */ result = SOCK_READY; if (sockPtr->tfile != NULL) { reqPtr->content = NULL; reqPtr->next = NULL; reqPtr->avail = 0u; Ns_Log(DriverDebug, "content spooled to file: size %" PRIdz ", file %s", reqPtr->length, sockPtr->tfile); /* * Nothing more to do, return via SOCK_READY; */ } else { /* * Uploads < "maxupload" are spooled to files and mmapped in order to * provide the usual interface via [ns_conn content]. */ if (sockPtr->tfd > 0) { #ifdef _WIN32 /* * For _WIN32, tfd should never be set, since tfd-spooling is not * implemented for windows. */ assert(0); #else int prot = PROT_READ | PROT_WRITE; /* * Add a byte to make sure, the string termination with \0 below falls * always into the mmapped area. On some older OSes this might lead to * crashes when we hitting page boundaries. */ ssize_t rc = ns_write(sockPtr->tfd, "\0", 1); if (rc == -1) { Ns_Log(Error, "socket: could not append terminating 0-byte"); } sockPtr->tsize = reqPtr->length + 1; sockPtr->taddr = mmap(0, sockPtr->tsize, prot, MAP_PRIVATE, sockPtr->tfd, 0); if (sockPtr->taddr == MAP_FAILED) { sockPtr->taddr = NULL; result = SOCK_ERROR; } else { reqPtr->content = sockPtr->taddr; Ns_Log(Debug, "content spooled to mmapped file: readahead=%" TCL_LL_MODIFIER "d, filesize=%" PRIdz, drvPtr->readahead, sockPtr->tsize); } #endif } else { /* * Set the content the begin of the remaining buffer (content offset). * This happens as well when reqPtr->contentLength is 0, but it is * needed for chunked input processing. */ reqPtr->content = bufPtr->string + reqPtr->coff; } reqPtr->next = reqPtr->content; /* * Add a terminating null character. The content might be from the receive * buffer (Tcl_DString) or from the mmapped file. Non-mmapped files are handled * above. */ if (reqPtr->length > 0u) { Ns_Log(DriverDebug, "SockRead adds null terminating character at content[%" PRIuz "]", reqPtr->length); reqPtr->savedChar = reqPtr->content[reqPtr->length]; reqPtr->content[reqPtr->length] = '\0'; if (sockPtr->taddr == NULL) { LogBuffer(DriverDebug, "UPDATED BUFFER", sockPtr->reqPtr->buffer.string, (size_t)reqPtr->buffer.length); } } } return result; } /* *---------------------------------------------------------------------- * * SockSetServer -- * * Set virtual server from driver context or Host header. * * Results: * void. * * Side effects: * * Updates sockPtr->servPtr. In case an invalid server set, or the * required host field in HTTP/1.1 is missing the HTTP-method is set to * the constant "BAD". * *---------------------------------------------------------------------- */ static void SockSetServer(Sock *sockPtr) { char *host; Request *reqPtr; bool bad_request = NS_FALSE; Driver *drvPtr; NS_NONNULL_ASSERT(sockPtr != NULL); reqPtr = sockPtr->reqPtr; assert(reqPtr != NULL); drvPtr = sockPtr->drvPtr; assert(drvPtr != NULL); sockPtr->servPtr = drvPtr->servPtr; sockPtr->location = drvPtr->location; host = Ns_SetIGet(reqPtr->headers, "Host"); Ns_Log(DriverDebug, "SockSetServer host '%s' request line '%s'", host, reqPtr->request.line); if (unlikely((host == NULL) && (reqPtr->request.version >= 1.1))) { /* * HTTP/1.1 requires host header */ Ns_Log(Notice, "request header field \"Host\" is missing in HTTP/1.1 request: \"%s\"\n", reqPtr->request.line); bad_request = NS_TRUE; } if (sockPtr->servPtr == NULL) { const ServerMap *mapPtr = NULL; if (host != NULL) { const Tcl_HashEntry *hPtr; size_t hostLength = strlen(host); /* * Remove trailing dot of host header field, since RFC 2976 allows * fully qualified "absolute" DNS names in host fields (see e.g. §3.2.2). */ if (host[hostLength] == '.') { host[hostLength] = '\0'; } /* * Convert provided host header field to lower case before hash * lookup. */ Ns_StrToLower(host); hPtr = Tcl_FindHashEntry(&drvPtr->hosts, host); Ns_Log(DriverDebug, "SockSetServer driver '%s' host '%s' => %p", drvPtr->moduleName, host, (void*)hPtr); if (hPtr != NULL) { /* * Request with provided host header field could be resolved * against a certain server. */ mapPtr = Tcl_GetHashValue(hPtr); } else { /* * Host header field content is not found in the mapping table. */ Ns_Log(DriverDebug, "cannot locate host header content '%s' in virtual hosts " "table of driver '%s', fall back to default '%s'", host, drvPtr->moduleName, drvPtr->defMapPtr->location); if (Ns_LogSeverityEnabled(DriverDebug)) { Tcl_HashEntry *hPtr2; Tcl_HashSearch search; hPtr2 = Tcl_FirstHashEntry(&drvPtr->hosts, &search); while (hPtr2 != NULL) { Ns_Log(Notice, "... host entry: '%s'\n", (char *)Tcl_GetHashKey(&drvPtr->hosts, hPtr2)); hPtr2 = Tcl_NextHashEntry(&search); } } } } if (mapPtr == NULL) { /* * Could not lookup the virtual host, Get the default mapping from the driver. */ mapPtr = drvPtr->defMapPtr; } if (mapPtr != NULL) { sockPtr->servPtr = mapPtr->servPtr; sockPtr->location = mapPtr->location; } if (sockPtr->servPtr == NULL) { Ns_Log(Warning, "cannot determine server for request: \"%s\" (host \"%s\")\n", reqPtr->request.line, host); bad_request = NS_TRUE; } } if (unlikely(bad_request)) { Ns_Log(DriverDebug, "SockSetServer sets method to BAD"); ns_free((char *)reqPtr->request.method); reqPtr->request.method = ns_strdup("BAD"); } } /* *====================================================================== * Spooler Thread: Receive asynchronously from the client socket *====================================================================== */ /* *---------------------------------------------------------------------- * * SpoolerThread -- * * Spooling socket driver thread. * * Results: * None. * * Side effects: * Connections are accepted on the configured listen sockets, * placed on the run queue to be serviced, and gracefully * closed when done. Async sockets have the entire request read * here before queuing as well. * *---------------------------------------------------------------------- */ static void SpoolerThread(void *arg) { SpoolerQueue *queuePtr = (SpoolerQueue*)arg; char charBuffer[1]; int pollTimeout; bool stopping; Sock *sockPtr, *nextPtr, *waitPtr, *readPtr; Ns_Time now, diff; const Driver *drvPtr; PollData pdata; Ns_ThreadSetName("-spooler%d-", queuePtr->id); queuePtr->threadName = Ns_ThreadGetName(); /* * Loop forever until signaled to shut down and all * connections are complete and gracefully closed. */ Ns_Log(Notice, "spooler%d: accepting connections", queuePtr->id); PollCreate(&pdata); Ns_GetTime(&now); waitPtr = readPtr = NULL; stopping = NS_FALSE; while (!stopping) { /* * If there are any read sockets, set the bits * and determine the minimum relative timeout. */ PollReset(&pdata); (void)PollSet(&pdata, queuePtr->pipe[0], (short)POLLIN, NULL); if (readPtr == NULL) { pollTimeout = 30 * 1000; } else { sockPtr = readPtr; while (sockPtr != NULL) { SockPoll(sockPtr, (short)POLLIN, &pdata); sockPtr = sockPtr->nextPtr; } pollTimeout = -1; } /* * Select and drain the trigger pipe if necessary. */ /*n =*/ (void) PollWait(&pdata, pollTimeout); if (PollIn(&pdata, 0) && unlikely(ns_recv(queuePtr->pipe[0], charBuffer, 1u, 0) != 1)) { Ns_Fatal("spooler: trigger ns_recv() failed: %s", ns_sockstrerror(ns_sockerrno)); } /* * Attempt read-ahead of any new connections. */ Ns_GetTime(&now); sockPtr = readPtr; readPtr = NULL; while (sockPtr != NULL) { nextPtr = sockPtr->nextPtr; drvPtr = sockPtr->drvPtr; if (unlikely(PollHup(&pdata, sockPtr->pidx))) { /* * Peer has closed the connection */ SockRelease(sockPtr, SOCK_CLOSE, 0); } else if (!PollIn(&pdata, sockPtr->pidx)) { /* * Got no data */ if (Ns_DiffTime(&sockPtr->timeout, &now, &diff) <= 0) { SockRelease(sockPtr, SOCK_READTIMEOUT, 0); queuePtr->queuesize--; } else { Push(sockPtr, readPtr); } } else { /* * Got some data */ SockState n = SockRead(sockPtr, 1, &now); switch (n) { case SOCK_MORE: SockTimeout(sockPtr, &now, &drvPtr->recvwait); Push(sockPtr, readPtr); break; case SOCK_READY: assert(sockPtr->reqPtr != NULL); SockSetServer(sockPtr); Push(sockPtr, waitPtr); break; case SOCK_BADHEADER: NS_FALL_THROUGH; /* fall through */ case SOCK_BADREQUEST: NS_FALL_THROUGH; /* fall through */ case SOCK_CLOSE: NS_FALL_THROUGH; /* fall through */ case SOCK_CLOSETIMEOUT: NS_FALL_THROUGH; /* fall through */ case SOCK_ENTITYTOOLARGE: NS_FALL_THROUGH; /* fall through */ case SOCK_ERROR: NS_FALL_THROUGH; /* fall through */ case SOCK_READERROR: NS_FALL_THROUGH; /* fall through */ case SOCK_READTIMEOUT: NS_FALL_THROUGH; /* fall through */ case SOCK_SHUTERROR: NS_FALL_THROUGH; /* fall through */ case SOCK_SPOOL: NS_FALL_THROUGH; /* fall through */ case SOCK_TOOMANYHEADERS: NS_FALL_THROUGH; /* fall through */ case SOCK_WRITEERROR: NS_FALL_THROUGH; /* fall through */ case SOCK_WRITETIMEOUT: SockRelease(sockPtr, n, errno); queuePtr->queuesize--; break; } } sockPtr = nextPtr; } /* * Attempt to queue any pending connection * after reversing the list to ensure oldest * connections are tried first. */ if (waitPtr != NULL) { sockPtr = NULL; while ((nextPtr = waitPtr) != NULL) { waitPtr = nextPtr->nextPtr; Push(nextPtr, sockPtr); } while (sockPtr != NULL) { nextPtr = sockPtr->nextPtr; if (!NsQueueConn(sockPtr, &now)) { Push(sockPtr, waitPtr); } else { queuePtr->queuesize--; } sockPtr = nextPtr; } } /* * Add more connections from the spooler queue */ Ns_MutexLock(&queuePtr->lock); if (waitPtr == NULL) { sockPtr = (Sock*)queuePtr->sockPtr; queuePtr->sockPtr = NULL; while (sockPtr != NULL) { nextPtr = sockPtr->nextPtr; drvPtr = sockPtr->drvPtr; SockTimeout(sockPtr, &now, &drvPtr->recvwait); Push(sockPtr, readPtr); queuePtr->queuesize++; sockPtr = nextPtr; } } /* * Check for shutdown */ stopping = queuePtr->shutdown; Ns_MutexUnlock(&queuePtr->lock); } PollFree(&pdata); Ns_Log(Notice, "exiting"); Ns_MutexLock(&queuePtr->lock); queuePtr->stopped = NS_TRUE; Ns_CondBroadcast(&queuePtr->cond); Ns_MutexUnlock(&queuePtr->lock); } static void SpoolerQueueStart(SpoolerQueue *queuePtr, Ns_ThreadProc *proc) { NS_NONNULL_ASSERT(proc != NULL); while (queuePtr != NULL) { if (ns_sockpair(queuePtr->pipe) != 0) { Ns_Fatal("ns_sockpair() failed: %s", ns_sockstrerror(ns_sockerrno)); } Ns_ThreadCreate(proc, queuePtr, 0, &queuePtr->thread); queuePtr = queuePtr->nextPtr; } } static void SpoolerQueueStop(SpoolerQueue *queuePtr, const Ns_Time *timeoutPtr, const char *name) { NS_NONNULL_ASSERT(timeoutPtr != NULL); NS_NONNULL_ASSERT(name != NULL); while (queuePtr != NULL) { Ns_ReturnCode status; Ns_MutexLock(&queuePtr->lock); if (!queuePtr->stopped && !queuePtr->shutdown) { Ns_Log(Debug, "%s%d: triggering shutdown", name, queuePtr->id); queuePtr->shutdown = NS_TRUE; SockTrigger(queuePtr->pipe[1]); } status = NS_OK; while (!queuePtr->stopped && status == NS_OK) { status = Ns_CondTimedWait(&queuePtr->cond, &queuePtr->lock, timeoutPtr); } if (status != NS_OK) { Ns_Log(Warning, "%s%d: timeout waiting for shutdown", name, queuePtr->id); } else { /*Ns_Log(Notice, "%s%d: shutdown complete", name, queuePtr->id);*/ if (queuePtr->thread != NULL) { Ns_ThreadJoin(&queuePtr->thread, NULL); queuePtr->thread = NULL; } else { Ns_Log(Notice, "%s%d: shutdown: thread already gone", name, queuePtr->id); } ns_sockclose(queuePtr->pipe[0]); ns_sockclose(queuePtr->pipe[1]); } Ns_MutexUnlock(&queuePtr->lock); queuePtr = queuePtr->nextPtr; } } static int SockSpoolerQueue(Driver *drvPtr, Sock *sockPtr) { bool trigger = NS_FALSE; SpoolerQueue *queuePtr; NS_NONNULL_ASSERT(drvPtr != NULL); NS_NONNULL_ASSERT(sockPtr != NULL); /* * Get the next spooler thread from the list, all spooler requests are * rotated between all spooler threads */ Ns_MutexLock(&drvPtr->spooler.lock); if (drvPtr->spooler.curPtr == NULL) { drvPtr->spooler.curPtr = drvPtr->spooler.firstPtr; } queuePtr = drvPtr->spooler.curPtr; drvPtr->spooler.curPtr = drvPtr->spooler.curPtr->nextPtr; Ns_MutexUnlock(&drvPtr->spooler.lock); Ns_Log(Debug, "Spooler: %d: started fd=%d: %" PRIdz " bytes", queuePtr->id, sockPtr->sock, sockPtr->reqPtr->length); Ns_MutexLock(&queuePtr->lock); if (queuePtr->sockPtr == NULL) { trigger = NS_TRUE; } Push(sockPtr, queuePtr->sockPtr); Ns_MutexUnlock(&queuePtr->lock); /* * Wake up spooler thread */ if (trigger) { SockTrigger(queuePtr->pipe[1]); } return 1; } /* *====================================================================== * Writer Thread: Write asynchronously to the client socket *====================================================================== */ /* *---------------------------------------------------------------------- * * NsWriterLock, NsWriterUnlock -- * * Provide an API for locking and unlocking context information * for streaming asynchronous writer jobs. The locks are just * needed for managing linkage between "connPtr" and a writer * entry. The lock operations are rather infrequent and the * lock duration is very short, such that at a single global * appears sufficient. * * Results: * None * * Side effects: * Change Mutex state. * *---------------------------------------------------------------------- */ void NsWriterLock(void) { Ns_MutexLock(&writerlock); } void NsWriterUnlock(void) { Ns_MutexUnlock(&writerlock); } /* *---------------------------------------------------------------------- * * WriterSockFileVecCleanup -- * * Cleanup function for FileVec array in WriterSock structure. * * Results: * None. * * Side effects: * Closing potentially file descriptors, freeing Ns_FileVec memory. * *---------------------------------------------------------------------- */ static void WriterSockFileVecCleanup(WriterSock *wrSockPtr) { NS_NONNULL_ASSERT(wrSockPtr != NULL); if ( wrSockPtr->c.file.nbufs > 0) { int i; Ns_Log(DriverDebug, "WriterSockRelease nbufs %d", wrSockPtr->c.file.nbufs); for (i = 0; i < wrSockPtr->c.file.nbufs; i++) { /* * The fd of c.file.currentbuf is always the same as * wrSockPtr->fd and therefore already closed at this point. */ if ( (i != wrSockPtr->c.file.currentbuf) && (wrSockPtr->c.file.bufs[i].fd != NS_INVALID_FD) ) { Ns_Log(DriverDebug, "WriterSockRelease must close fd %d", wrSockPtr->c.file.bufs[i].fd); ns_close(wrSockPtr->c.file.bufs[i].fd); } } ns_free(wrSockPtr->c.file.bufs); } ns_free(wrSockPtr->c.file.buf); } /* *---------------------------------------------------------------------- * * WriterSockRequire, WriterSockRelease -- * * Management functions for WriterSocks. WriterSockRequire() and * WriterSockRelease() are responsible for obtaining and * freeing "WriterSock" structures. When shuch a structure is finally * released, it is removed from the queue, the socket is * closed and the memory is freed. * * Results: * WriterSockRequire() returns a WriterSock from a connection, * the other functions return nothing. * * Side effects: * Updating reference counters, closing socket, freeing memory. * *---------------------------------------------------------------------- */ static WriterSock * WriterSockRequire(const Conn *connPtr) { WriterSock *wrSockPtr; NS_NONNULL_ASSERT(connPtr != NULL); NsWriterLock(); wrSockPtr = (WriterSock *)connPtr->strWriter; if (wrSockPtr != NULL) { wrSockPtr->refCount ++; } NsWriterUnlock(); return wrSockPtr; } static void WriterSockRelease(WriterSock *wrSockPtr) { SpoolerQueue *queuePtr; NS_NONNULL_ASSERT(wrSockPtr != NULL); wrSockPtr->refCount --; Ns_Log(DriverDebug, "WriterSockRelease %p refCount %d keep %d", (void *)wrSockPtr, wrSockPtr->refCount, wrSockPtr->keep); if (wrSockPtr->refCount > 0) { return; } Ns_Log(DriverDebug, "Writer: closed sock %d, file fd %d, error %d/%d, " "sent=%" TCL_LL_MODIFIER "d, flags=%X", wrSockPtr->sockPtr->sock, wrSockPtr->fd, wrSockPtr->status, wrSockPtr->err, wrSockPtr->nsent, wrSockPtr->flags); NsPoolAddBytesSent(wrSockPtr->poolPtr, wrSockPtr->nsent); if (wrSockPtr->doStream != NS_WRITER_STREAM_NONE) { Conn *connPtr; NsWriterLock(); connPtr = wrSockPtr->connPtr; if (connPtr != NULL && connPtr->strWriter != NULL) { connPtr->strWriter = NULL; } NsWriterUnlock(); /* * In case, writer streams are activated for this wrSockPtr, make sure * to release the tmp file. See thread Naviserver Open Files on the * sourceforge mailing list (starting July 2019). */ if (wrSockPtr->doStream == NS_WRITER_STREAM_FINISH) { Ns_ReleaseTemp(wrSockPtr->fd); } } /* * Remove the entry from the queue and decrement counter */ queuePtr = wrSockPtr->queuePtr; if (queuePtr->curPtr == wrSockPtr) { queuePtr->curPtr = wrSockPtr->nextPtr; queuePtr->queuesize--; } else { WriterSock *curPtr, *lastPtr = queuePtr->curPtr; for (curPtr = (lastPtr != NULL) ? lastPtr->nextPtr : NULL; curPtr != NULL; lastPtr = curPtr, curPtr = curPtr->nextPtr ) { if (curPtr == wrSockPtr) { lastPtr->nextPtr = wrSockPtr->nextPtr; queuePtr->queuesize--; break; } } } if ((wrSockPtr->err != 0) || (wrSockPtr->status != SPOOLER_OK)) { int i; /* * Lookup the matching sockState from the spooler state. The array has * just 5 elements, on average, just 2 comparisons are needed (since * OK is at the end). */ for (i = 0; i < Ns_NrElements(spoolerStateMap); i++) { if (spoolerStateMap[i].spoolerState == wrSockPtr->status) { SockError(wrSockPtr->sockPtr, spoolerStateMap[i].sockState, wrSockPtr->err); break; } } NsSockClose(wrSockPtr->sockPtr, (int)NS_FALSE); } else { NsSockClose(wrSockPtr->sockPtr, (int)wrSockPtr->keep); } if (wrSockPtr->clientData != NULL) { ns_free(wrSockPtr->clientData); } if (wrSockPtr->fd != NS_INVALID_FD) { if (wrSockPtr->doStream != NS_WRITER_STREAM_FINISH) { (void) ns_close(wrSockPtr->fd); } WriterSockFileVecCleanup(wrSockPtr); } else if (wrSockPtr->c.mem.bufs != NULL) { if (wrSockPtr->c.mem.fmap.addr != NULL) { NsMemUmap(&wrSockPtr->c.mem.fmap); } else { int i; for (i = 0; i < wrSockPtr->c.mem.nbufs; i++) { ns_free((char *)wrSockPtr->c.mem.bufs[i].iov_base); } } if (wrSockPtr->c.mem.bufs != wrSockPtr->c.mem.preallocated_bufs) { ns_free(wrSockPtr->c.mem.bufs); } } if (wrSockPtr->headerString != NULL) { ns_free(wrSockPtr->headerString); } ns_free(wrSockPtr); } /* *---------------------------------------------------------------------- * * WriterReadFromSpool -- * * Utility function of the WriterThread to read blocks from a * file into the output buffer of the writer. It handles * left overs from previous send attempts and takes care for * locking in case simultaneous reading and writing from the * same file. * * Results: * None. * * Side effects: * Fills up curPtr->c.file.buf and updates counters/sizes. * *---------------------------------------------------------------------- */ static SpoolerState WriterReadFromSpool(WriterSock *curPtr) { NsWriterStreamState doStream; SpoolerState status = SPOOLER_OK; size_t maxsize, toRead; unsigned char *bufPtr; NS_NONNULL_ASSERT(curPtr != NULL); doStream = curPtr->doStream; if (doStream != NS_WRITER_STREAM_NONE) { Ns_MutexLock(&curPtr->c.file.fdlock); toRead = curPtr->c.file.toRead; Ns_MutexUnlock(&curPtr->c.file.fdlock); } else { toRead = curPtr->c.file.toRead; Ns_Log(DriverDebug, "### WriterReadFromSpool [%d]: fd %d tosend %lu files %d", curPtr->c.file.currentbuf, curPtr->fd, toRead, curPtr->c.file.nbufs); } maxsize = curPtr->c.file.maxsize; bufPtr = curPtr->c.file.buf; /* * When bufsize > 0 we have a leftover from previous send. In such * cases, move the leftover to the front, and fill the reminder of * the buffer with new data from curPtr->c. */ if (curPtr->c.file.bufsize > 0u) { Ns_Log(DriverDebug, "### WriterReadFromSpool %p %.6x leftover %" PRIdz " offset %ld", (void *)curPtr, curPtr->flags, curPtr->c.file.bufsize, (long)curPtr->c.file.bufoffset); if (likely(curPtr->c.file.bufoffset > 0)) { memmove(curPtr->c.file.buf, curPtr->c.file.buf + curPtr->c.file.bufoffset, curPtr->c.file.bufsize); } bufPtr = curPtr->c.file.buf + curPtr->c.file.bufsize; maxsize -= curPtr->c.file.bufsize; } if (toRead > maxsize) { toRead = maxsize; } /* * Read content from the file into the buffer. */ if (toRead > 0u) { ssize_t n; if (doStream != NS_WRITER_STREAM_NONE) { /* * In streaming mode, the connection thread writes to the * spool file and the writer thread reads from the same * file. Therefore, we have to re-adjust the current * read/writer position, which might be changed by the * other thread. These positions have to be locked, since * seeking might be subject to race conditions. Here we * set the read pointer to the position after the last * send operation. */ Ns_MutexLock(&curPtr->c.file.fdlock); (void) ns_lseek(curPtr->fd, (off_t)curPtr->nsent, SEEK_SET); } if (curPtr->c.file.nbufs == 0) { /* * Working on a single fd. */ n = ns_read(curPtr->fd, bufPtr, toRead); } else { /* * Working on a Ns_FileVec. */ int currentbuf = curPtr->c.file.currentbuf; size_t wantRead = curPtr->c.file.bufs[currentbuf].length; size_t segSize = (wantRead > toRead ? toRead : wantRead); n = ns_read(curPtr->fd, bufPtr, segSize); Ns_Log(DriverDebug, "### WriterReadFromSpool [%d] (nbufs %d): read from fd %d want %lu got %ld (remain %lu)", currentbuf, curPtr->c.file.nbufs, curPtr->fd, segSize, n, wantRead); if (n > 0) { /* * Reduce the remaining length in the Ns_FileVec for the * next iteration. */ curPtr->c.file.bufs[currentbuf].length -= (size_t)n; if ((size_t)n < wantRead) { /* * Partial read on a segment. */ Ns_Log(DriverDebug, "### WriterReadFromSpool [%d] (nbufs %d): partial read on fd %d (got %ld)", currentbuf, curPtr->c.file.nbufs, curPtr->fd, n); } else if (currentbuf < curPtr->c.file.nbufs - 1 /* && (n == wantRead) */) { /* * All read from this segment, setup next read. */ ns_close(curPtr->fd); curPtr->c.file.bufs[currentbuf].fd = NS_INVALID_FD; curPtr->c.file.currentbuf ++; curPtr->fd = curPtr->c.file.bufs[curPtr->c.file.currentbuf].fd; Ns_Log(DriverDebug, "### WriterReadFromSpool switch to [%d] fd %d", curPtr->c.file.currentbuf, curPtr->fd); } } } if (n <= 0) { status = SPOOLER_READERROR; } else { /* * curPtr->c.file.toRead is still protected by * curPtr->c.file.fdlock when needed (in streaming mode). */ curPtr->c.file.toRead -= (size_t)n; curPtr->c.file.bufsize += (size_t)n; } if (doStream != NS_WRITER_STREAM_NONE) { Ns_MutexUnlock(&curPtr->c.file.fdlock); } } return status; } /* *---------------------------------------------------------------------- * * WriterSend -- * * Utility function of the WriterThread to send content to the client. It * handles partial write operations from the lower level driver * infrastructure. * * Results: * either NS_OK or SOCK_ERROR; * * Side effects: * Sends data, might reshuffle iovec. * *---------------------------------------------------------------------- */ static SpoolerState WriterSend(WriterSock *curPtr, int *err) { const struct iovec *bufs; struct iovec vbuf; int nbufs; SpoolerState status = SPOOLER_OK; size_t toWrite; ssize_t n; NS_NONNULL_ASSERT(curPtr != NULL); NS_NONNULL_ASSERT(err != NULL); /* * Prepare send operation */ if (curPtr->fd != NS_INVALID_FD) { /* * We have a valid file descriptor, send data from file. * * Prepare sending a single buffer with curPtr->c.file.bufsize bytes * from the curPtr->c.file.buf to the client. */ vbuf.iov_len = curPtr->c.file.bufsize; vbuf.iov_base = (void *)curPtr->c.file.buf; bufs = &vbuf; nbufs = 1; toWrite = curPtr->c.file.bufsize; } else { int i; /* * Prepare sending multiple memory buffers. Get length of remaining * buffers. */ toWrite = 0u; for (i = 0; i < curPtr->c.mem.nsbufs; i ++) { toWrite += curPtr->c.mem.sbufs[i].iov_len; } Ns_Log(DriverDebug, "### Writer wants to send remainder nbufs %d len %" PRIdz, curPtr->c.mem.nsbufs, toWrite); /* * Add buffers from the source and fill structure up to max */ while (curPtr->c.mem.bufIdx < curPtr->c.mem.nbufs && curPtr->c.mem.sbufIdx < UIO_SMALLIOV) { const struct iovec *vPtr = &curPtr->c.mem.bufs[curPtr->c.mem.bufIdx]; if (vPtr->iov_len > 0u && vPtr->iov_base != NULL) { Ns_Log(DriverDebug, "### Writer copies source %d to scratch %d len %" PRIiovlen, curPtr->c.mem.bufIdx, curPtr->c.mem.sbufIdx, vPtr->iov_len); toWrite += Ns_SetVec(curPtr->c.mem.sbufs, curPtr->c.mem.sbufIdx++, vPtr->iov_base, vPtr->iov_len); curPtr->c.mem.nsbufs++; } curPtr->c.mem.bufIdx++; } bufs = curPtr->c.mem.sbufs; nbufs = curPtr->c.mem.nsbufs; Ns_Log(DriverDebug, "### Writer wants to send %d bufs size %" PRIdz, nbufs, toWrite); } /* * Perform the actual send operation. */ n = NsDriverSend(curPtr->sockPtr, bufs, nbufs, 0u); if (n == -1) { *err = ns_sockerrno; status = SPOOLER_WRITEERROR; } else { /* * We have sent zero or more bytes. */ if (curPtr->doStream != NS_WRITER_STREAM_NONE) { Ns_MutexLock(&curPtr->c.file.fdlock); curPtr->size -= (size_t)n; Ns_MutexUnlock(&curPtr->c.file.fdlock); } else { curPtr->size -= (size_t)n; } curPtr->nsent += n; curPtr->sockPtr->timeout.sec = 0; if (curPtr->fd != NS_INVALID_FD) { /* * File-descriptor based send operation. Reduce the (remainig) * buffer size the amount of data sent and adjust the buffer * offset. For partial send operations, this will lead to a * remaining buffer size > 0. */ curPtr->c.file.bufsize -= (size_t)n; curPtr->c.file.bufoffset = (off_t)n; } else { if (n < (ssize_t)toWrite) { /* * We have a partial transmit from the iovec * structure. We have to compact it to fill content in * the next round. */ curPtr->c.mem.sbufIdx = Ns_ResetVec(curPtr->c.mem.sbufs, curPtr->c.mem.nsbufs, (size_t)n); curPtr->c.mem.nsbufs -= curPtr->c.mem.sbufIdx; memmove(curPtr->c.mem.sbufs, curPtr->c.mem.sbufs + curPtr->c.mem.sbufIdx, /* move the iovecs to the start of the scratch buffers */ sizeof(struct iovec) * (size_t)curPtr->c.mem.nsbufs); } } } return status; } /* *---------------------------------------------------------------------- * * WriterGetInfoPtr -- * * Helper function to obtain ConnPoolInfo structure for a WriterSock. * * The connInfoPtr is allocated only once per pool and cached in the * WriterSock. Only the first time, a writer thread "sees" a pool, it * allocates the structure for it. * * Results: * None. * * Side effects: * Can allocate memory * *---------------------------------------------------------------------- */ static ConnPoolInfo * WriterGetInfoPtr(WriterSock *curPtr, Tcl_HashTable *pools) { NS_NONNULL_ASSERT(curPtr != NULL); NS_NONNULL_ASSERT(pools != NULL); if (curPtr->infoPtr == NULL) { int isNew; Tcl_HashEntry *hPtr; hPtr = Tcl_CreateHashEntry(pools, (void*)curPtr->poolPtr, &isNew); if (isNew == 1) { /* * This is a pool that we have not seen yet. */ curPtr->infoPtr = ns_malloc(sizeof(ConnPoolInfo)); curPtr->infoPtr->currentPoolRate = 0; curPtr->infoPtr->threadSlot = NsPoolAllocateThreadSlot(curPtr->poolPtr, Ns_ThreadId()); Tcl_SetHashValue(hPtr, curPtr->infoPtr); Ns_Log(DriverDebug, "poollimit: pool '%s' allocate infoPtr with slot %lu poolLimit %d", curPtr->poolPtr->pool, curPtr->infoPtr->threadSlot, curPtr->poolPtr->rate.poolLimit); } else { curPtr->infoPtr = (ConnPoolInfo *)Tcl_GetHashValue(hPtr); } } return curPtr->infoPtr; } /* *---------------------------------------------------------------------- * * WriterPerPoolRates -- * * Compute current bandwidths per pool and writer. * * Since we have potentially multiple writer threads running, all these * might have writer threads of the same pool. In order to minimize * locking, we compute first writer thread specific subresults and combine * these later with with the results of the other threads. * * Results: * None. * * Side effects: * Connections are accepted and their SockPtr is set to NULL * such that closing actual connection does not close the socket. * *---------------------------------------------------------------------- */ static void WriterPerPoolRates(WriterSock *writePtr, Tcl_HashTable *pools) { WriterSock *curPtr; Tcl_HashSearch search; Tcl_HashEntry *hPtr; NS_NONNULL_ASSERT(writePtr != NULL); NS_NONNULL_ASSERT(pools != NULL); /* * First reset pool total rate. We keep the bandwidth managed pools in a * thread-local memory. Before, we accumulate the data, we reset it. */ hPtr = Tcl_FirstHashEntry(pools, &search); while (hPtr != NULL) { ConnPoolInfo *infoPtr = (ConnPoolInfo *)Tcl_GetHashValue(hPtr); infoPtr->currentPoolRate = 0; hPtr = Tcl_NextHashEntry(&search); } /* * Sum the actual rates per bandwidth limited pool for all active writer * jobs. */ for (curPtr = writePtr; curPtr != NULL; curPtr = curPtr->nextPtr) { /* * Does the writer come form a badwidth limited pool? */ if (curPtr->poolPtr->rate.poolLimit > 0 && curPtr->currentRate > 0) { /* * Add the actual rate to the writer specific pool rate. */ ConnPoolInfo *infoPtr = WriterGetInfoPtr(curPtr, pools); infoPtr->currentPoolRate += curPtr->currentRate; Ns_Log(DriverDebug, "poollimit pool '%s' added rate poolLimit %d poolRate %d", curPtr->poolPtr->pool, curPtr->poolPtr->rate.poolLimit, infoPtr->currentPoolRate); } } /* * Now iterate over the pools used by this thread and sum the specific * pool rates from all writer threads. */ hPtr = Tcl_FirstHashEntry(pools, &search); while (hPtr != NULL) { ConnPool *poolPtr = (ConnPool *)Tcl_GetHashKey(pools, hPtr); int totalPoolRate, writerThreadCount, threadDeltaRate; ConnPoolInfo *infoPtr; /* * Compute the following indicators: * - totalPoolRate: accumulated pool rates from all writer threads. * * - threadDeltaRate: how much of the available bandwidth can i used * the current thread. We assume that the distribution of writers * between all writer threads is even, so we can split the * available rate by the number of writer threads working on this * pool. * * - deltaPercentage: adjust in a single iteration just a fraction * (e.g. 10 percent) of the potential change. This function is * called often enough to justify delayed adjustments. */ infoPtr = (ConnPoolInfo *)Tcl_GetHashValue(hPtr); totalPoolRate = NsPoolTotalRate(poolPtr, infoPtr->threadSlot, infoPtr->currentPoolRate, &writerThreadCount); /* * If nothing is going on, allow a thread the full rate. */ if (infoPtr->currentPoolRate == 0) { threadDeltaRate = (poolPtr->rate.poolLimit - totalPoolRate); } else { threadDeltaRate = (poolPtr->rate.poolLimit - totalPoolRate) / writerThreadCount; } infoPtr->deltaPercentage = threadDeltaRate / 10; if (infoPtr->deltaPercentage < -50) { infoPtr->deltaPercentage = -50; } if (totalPoolRate > 0) { Ns_Log(Notice, "... pool '%s' thread's pool rate %d total pool rate %d limit %d " "(#%d writer threads) -> computed rate %d (%d%%) ", NsPoolName(poolPtr->pool), infoPtr->currentPoolRate, totalPoolRate, poolPtr->rate.poolLimit, writerThreadCount, threadDeltaRate, infoPtr->deltaPercentage ); } hPtr = Tcl_NextHashEntry(&search); } } /* *---------------------------------------------------------------------- * * WriterThread -- * * Thread that writes files to clients. * * Results: * None. * * Side effects: * Connections are accepted and their SockPtr is set to NULL * such that closing actual connection does not close the socket. * *---------------------------------------------------------------------- */ static void WriterThread(void *arg) { SpoolerQueue *queuePtr = (SpoolerQueue*)arg; int err, pollTimeout; bool stopping; Ns_Time now; Sock *sockPtr; const Driver *drvPtr; WriterSock *curPtr, *nextPtr, *writePtr; PollData pdata; Tcl_HashTable pools; /* used for accumulating bandwidth per pool */ Ns_ThreadSetName("-writer%d-", queuePtr->id); queuePtr->threadName = Ns_ThreadGetName(); Tcl_InitHashTable(&pools, TCL_ONE_WORD_KEYS); /* * Loop forever until signaled to shut down and all * connections are complete and gracefully closed. */ Ns_Log(Notice, "writer%d: accepting connections", queuePtr->id); PollCreate(&pdata); writePtr = NULL; stopping = NS_FALSE; while (!stopping) { char charBuffer[1]; /* * If there are any write sockets, set the bits. */ PollReset(&pdata); (void)PollSet(&pdata, queuePtr->pipe[0], (short)POLLIN, NULL); if (writePtr == NULL) { pollTimeout = 30 * 1000; } else { /* * If per-pool bandwidth management is requested, compute the base * data for the adjustment. If there is no bandwidth management * requested, there is no slowdow. */ if (NsWriterBandwidthManagement) { WriterPerPoolRates(writePtr, &pools); } /* * There are writers active. Determine on which writers we poll * and compute the maximal poll wait time. */ pollTimeout = 1000; for (curPtr = writePtr; curPtr != NULL; curPtr = curPtr->nextPtr) { int sleepTimeMs = 0; Ns_Log(DriverDebug, "### Writer poll collect %p size %" PRIdz " streaming %d rateLimit %d", (void *)curPtr, curPtr->size, curPtr->doStream, curPtr->rateLimit); if (curPtr->rateLimit > 0 && curPtr->nsent > 0 && curPtr->currentRate > 0 ) { int currentMs, targetTimeMs; /* * Perform per-pool rate management, when * - a poolLimit is provided, * - we have performance data of thee pool, and * - changes are possible (as flagged by deltaPercentage). */ if (NsWriterBandwidthManagement && curPtr->poolPtr->rate.poolLimit > 0 && curPtr->infoPtr != NULL && curPtr->infoPtr->deltaPercentage != 0 ) { /* * Only adjust data for busy writer jobs, which * are close to their limits. */ bool onLimit = (curPtr->currentRate*100 / curPtr->rateLimit) > 90; Ns_Log(DriverDebug, "we allowed %d we use %d on limit %d (%d) , we can do %d%%", curPtr->rateLimit, curPtr->currentRate, (int)onLimit, curPtr->currentRate*100/curPtr->rateLimit, curPtr->infoPtr->deltaPercentage); if (onLimit) { /* * Compute new rate limit based on * positive/negative delta percentage. */ int newRate = curPtr->currentRate + (curPtr->currentRate * curPtr->infoPtr->deltaPercentage / 100); /* * Sanity checks: * - never allow more than poolLimit * - never kill connections completely (e.g. minRate 5KB/s) */ if (newRate > curPtr->poolPtr->rate.poolLimit) { newRate = curPtr->poolPtr->rate.poolLimit; } else if (newRate < 5) { newRate = 5; } Ns_Log(Notice, "... pool '%s' new rate limit changed from %d to %d KB/s (delta %d%%)", curPtr->poolPtr->pool, curPtr->rateLimit, newRate, curPtr->infoPtr->deltaPercentage); curPtr->rateLimit = newRate; } } /* * Adjust rate to the rate limit. */ currentMs = (int)(curPtr->nsent/(Tcl_WideInt)curPtr->currentRate); targetTimeMs = (int)(curPtr->nsent/(Tcl_WideInt)curPtr->rateLimit); sleepTimeMs = 1 + targetTimeMs - currentMs; Ns_Log(WriterDebug, "### Writer(%d)" " byte sent %" TCL_LL_MODIFIER "d msecs %d rate %d KB/s" " targetRate %d KB/s sleep %d", curPtr->sockPtr->sock, curPtr->nsent, currentMs, curPtr->currentRate, curPtr->rateLimit, sleepTimeMs); } if (likely(curPtr->size > 0u)) { if (sleepTimeMs <= 0) { SockPoll(curPtr->sockPtr, (short)POLLOUT, &pdata); pollTimeout = -1; } else { pollTimeout = MIN(sleepTimeMs, pollTimeout); } } else if (unlikely(curPtr->doStream == NS_WRITER_STREAM_FINISH)) { pollTimeout = -1; } } } Ns_Log(DriverDebug, "### Writer final pollTimeout %d", pollTimeout); /* * Select and drain the trigger pipe if necessary. */ (void) PollWait(&pdata, pollTimeout); if (PollIn(&pdata, 0) && unlikely(ns_recv(queuePtr->pipe[0], charBuffer, 1u, 0) != 1)) { Ns_Fatal("writer: trigger ns_recv() failed: %s", ns_sockstrerror(ns_sockerrno)); } /* * Write to all available sockets */ Ns_GetTime(&now); curPtr = writePtr; writePtr = NULL; while (curPtr != NULL) { NsWriterStreamState doStream; SpoolerState spoolerState = SPOOLER_OK; nextPtr = curPtr->nextPtr; sockPtr = curPtr->sockPtr; err = 0; /* * The truth value of doStream does not change through * concurrency. */ doStream = curPtr->doStream; if (unlikely(PollHup(&pdata, sockPtr->pidx))) { Ns_Log(DriverDebug, "### Writer %p reached POLLHUP fd %d", (void *)curPtr, sockPtr->sock); spoolerState = SPOOLER_CLOSE; err = 0; curPtr->infoPtr = WriterGetInfoPtr(curPtr, &pools); curPtr->infoPtr->currentPoolRate += curPtr->currentRate; } else if (likely(PollOut(&pdata, sockPtr->pidx)) || (doStream == NS_WRITER_STREAM_FINISH)) { /* * The socket is writable, we can compute the rate, when * something was sent already and some kind of rate limiting * is in place ... and we have sent enough data to make a good * estimate (just after the 2nd send, so more than driver * buffer size. */ Ns_Log(DriverDebug, "Socket of pool '%s' is writable, writer limit %d nsent %ld", curPtr->poolPtr->pool, curPtr->rateLimit, (long)curPtr->nsent); if (curPtr->rateLimit > 0 && (size_t)curPtr->nsent > curPtr->sockPtr->drvPtr->bufsize ) { Ns_Time diff; long currentMs; Ns_DiffTime(&now, &curPtr->startTime, &diff); currentMs = Ns_TimeToMilliseconds(&diff); if (currentMs > 0) { curPtr->currentRate = (int)((curPtr->nsent)/(Tcl_WideInt)currentMs); Ns_Log(DriverDebug, "Socket of pool '%s' is writable, currentMs %ld has updated current rate %d", curPtr->poolPtr->pool, currentMs,curPtr->currentRate); } } Ns_Log(DriverDebug, "### Writer %p can write to client fd %d (trigger %d) streaming %.6x" " size %" PRIdz " nsent %" TCL_LL_MODIFIER "d bufsize %" PRIdz, (void *)curPtr, sockPtr->sock, PollIn(&pdata, 0), doStream, curPtr->size, curPtr->nsent, curPtr->c.file.bufsize); if (unlikely(curPtr->size < 1u)) { /* * Size < 1 means that everything was sent. */ if (doStream != NS_WRITER_STREAM_ACTIVE) { if (doStream == NS_WRITER_STREAM_FINISH) { Ns_ReleaseTemp(curPtr->fd); } spoolerState = SPOOLER_CLOSE; } } else { /* * If size > 0, there is still something to send. * If we are spooling from a file, read some data * from the (spool) file and place it into curPtr->c.file.buf. */ if (curPtr->fd != NS_INVALID_FD) { spoolerState = WriterReadFromSpool(curPtr); } if (spoolerState == SPOOLER_OK) { spoolerState = WriterSend(curPtr, &err); } } } else { /* * Mark when first timeout occurred or check if it is already * for too long and we need to stop this socket */ if (sockPtr->timeout.sec == 0) { Ns_Log(DriverDebug, "Writer %p fd %d setting sendwait %ld.%6ld", (void *)curPtr, sockPtr->sock, curPtr->sockPtr->drvPtr->sendwait.sec, curPtr->sockPtr->drvPtr->sendwait.usec); SockTimeout(sockPtr, &now, &curPtr->sockPtr->drvPtr->sendwait); } else if (Ns_DiffTime(&sockPtr->timeout, &now, NULL) <= 0) { Ns_Log(DriverDebug, "Writer %p fd %d timeout", (void *)curPtr, sockPtr->sock); err = ETIMEDOUT; spoolerState = SPOOLER_CLOSETIMEOUT; } } /* * Check result status and close the socket in case of * timeout or completion */ Ns_MutexLock(&queuePtr->lock); if (spoolerState == SPOOLER_OK) { if (curPtr->size > 0u || doStream == NS_WRITER_STREAM_ACTIVE) { Ns_Log(DriverDebug, "Writer %p continue OK (size %" PRIdz ") => PUSH", (void *)curPtr, curPtr->size); Push(curPtr, writePtr); } else { Ns_Log(DriverDebug, "Writer %p done OK (size %" PRIdz ") => RELEASE", (void *)curPtr, curPtr->size); WriterSockRelease(curPtr); } } else { /* * spoolerState might be SPOOLER_CLOSE or SPOOLER_*TIMEOUT, or SPOOLER_*ERROR */ Ns_Log(DriverDebug, "Writer %p fd %d release, not OK (status %d) => RELEASE", (void *)curPtr, curPtr->sockPtr->sock, (int)spoolerState); curPtr->status = spoolerState; curPtr->err = err; WriterSockRelease(curPtr); } Ns_MutexUnlock(&queuePtr->lock); curPtr = nextPtr; } /* * Add more sockets to the writer queue */ if (queuePtr->sockPtr != NULL) { Ns_MutexLock(&queuePtr->lock); if (queuePtr->sockPtr != NULL) { curPtr = queuePtr->sockPtr; queuePtr->sockPtr = NULL; while (curPtr != NULL) { nextPtr = curPtr->nextPtr; sockPtr = curPtr->sockPtr; drvPtr = sockPtr->drvPtr; SockTimeout(sockPtr, &now, &drvPtr->sendwait); Push(curPtr, writePtr); queuePtr->queuesize++; curPtr = nextPtr; } queuePtr->curPtr = writePtr; } Ns_MutexUnlock(&queuePtr->lock); } /* * Check for shutdown */ stopping = queuePtr->shutdown; } PollFree(&pdata); { /* * Free ConnPoolInfo */ Tcl_HashSearch search; Tcl_HashEntry *hPtr = Tcl_FirstHashEntry(&pools, &search); while (hPtr != NULL) { ConnPoolInfo *infoPtr = (ConnPoolInfo *)Tcl_GetHashValue(hPtr); ns_free(infoPtr); hPtr = Tcl_NextHashEntry(&search); } /* * Delete the hash table for pools. */ Tcl_DeleteHashTable(&pools); } Ns_Log(Notice, "exiting"); Ns_MutexLock(&queuePtr->lock); queuePtr->stopped = NS_TRUE; Ns_CondBroadcast(&queuePtr->cond); Ns_MutexUnlock(&queuePtr->lock); } /* *---------------------------------------------------------------------- * * NsWriterFinish -- * * Finish a streaming writer job (typically called at the close * of a connection). A streaming writer job is fed typically by a * sequence of ns_write operations. After such an operation, the * WriterThread has to keep the writer job alive. * NsWriterFinish() tells the WriterThread that no more * other writer jobs will come from this connection. * * Results: * None. * * Side effects: * Change the state of the writer job and trigger the queue. * *---------------------------------------------------------------------- */ void NsWriterFinish(NsWriterSock *wrSockPtr) { WriterSock *writerSockPtr = (WriterSock *)wrSockPtr; NS_NONNULL_ASSERT(wrSockPtr != NULL); Ns_Log(DriverDebug, "NsWriterFinish: %p", (void *)writerSockPtr); writerSockPtr->doStream = NS_WRITER_STREAM_FINISH; SockTrigger(writerSockPtr->queuePtr->pipe[1]); } /* *---------------------------------------------------------------------- * * WriterSetupStreamingMode -- * * In streaming mode, setup a temporary fd which is used as input and * output. Streaming i/o will append to the file, while the write will * read from it. * * Results: * Ns_ReturnCode (NS_OK, NS_ERROR, NS_FILTER_BREAK). In the last case * signals that all processing was already performed and the caller can * stop handling more data. On success, the function returns an fd as * last argument. * * Side effects: * Potentially allocating temp file and updating connPtr members. * *---------------------------------------------------------------------- */ Ns_ReturnCode WriterSetupStreamingMode(Conn *connPtr, struct iovec *bufs, int nbufs, int *fdPtr) { bool first; size_t wrote = 0u; WriterSock *wrSockPtr1; Ns_ReturnCode status = NS_OK; NS_NONNULL_ASSERT(connPtr != NULL); NS_NONNULL_ASSERT(bufs != NULL); NS_NONNULL_ASSERT(fdPtr != NULL); Ns_Log(DriverDebug, "NsWriterQueue: streaming writer job"); if (connPtr->fd == 0) { /* * Create a new temporary spool file and provide the fd to the * connection thread via connPtr. */ first = NS_TRUE; wrSockPtr1 = NULL; *fdPtr = Ns_GetTemp(); connPtr->fd = *fdPtr; Ns_Log(DriverDebug, "NsWriterQueue: new tmp file has fd %d", *fdPtr); } else { /* * Reuse previously created spool file. */ first = NS_FALSE; wrSockPtr1 = WriterSockRequire(connPtr); if (wrSockPtr1 == NULL) { Ns_Log(Notice, "NsWriterQueue: writer job was already canceled (fd %d); maybe user dropped connection", connPtr->fd); return NS_ERROR; } else { /* * lock only, when first == NS_FALSE. */ Ns_MutexLock(&wrSockPtr1->c.file.fdlock); (void)ns_lseek(connPtr->fd, 0, SEEK_END); } } /* * For the time being, handle just "string data" in streaming * output (iovec bufs). Write the content to the spool file. */ { int i; for (i = 0; i < nbufs; i++) { ssize_t j = ns_write(connPtr->fd, bufs[i].iov_base, bufs[i].iov_len); if (j > 0) { wrote += (size_t)j; Ns_Log(Debug, "NsWriterQueue: fd %d [%d] spooled %" PRIdz " of %" PRIiovlen " OK %d", connPtr->fd, i, j, bufs[i].iov_len, (j == (ssize_t)bufs[i].iov_len)); } else { Ns_Log(Warning, "NsWriterQueue: spool to fd %d write operation failed", connPtr->fd); } } } if (first) { //bufs = NULL; connPtr->nContentSent = wrote; #ifndef _WIN32 /* * sock_set_blocking can't be used under windows, since sockets * are under windows no file descriptors. */ (void)ns_sock_set_blocking(connPtr->fd, NS_FALSE); #endif /* * Fall through to register stream writer with temp file */ } else { WriterSock *writerSockPtr; /* * This is a later streaming operation, where the writer job * (strWriter) was previously established. */ assert(wrSockPtr1 != NULL); /* * Update the controlling variables (size and toread) in the connPtr, * and the length info for the access log, and trigger the writer to * notify it about the change. */ writerSockPtr = (WriterSock *)connPtr->strWriter; writerSockPtr->size += wrote; writerSockPtr->c.file.toRead += wrote; Ns_MutexUnlock(&wrSockPtr1->c.file.fdlock); connPtr->nContentSent += wrote; if (likely(wrSockPtr1->queuePtr != NULL)) { SockTrigger(wrSockPtr1->queuePtr->pipe[1]); } WriterSockRelease(wrSockPtr1); status = NS_FILTER_BREAK; } return status; } /* *---------------------------------------------------------------------- * * NsWriterQueue -- * * Submit a new job to the writer queue. * * Results: * * NS_ERROR means that the Writer thread refuses to accept this * job and that the client (the connection thread) has to handle * this data. NS_OK means that the Writer thread cares for * transmitting the content to the client. * * Side effects: * Potentially adding a job to the writer queue. * *---------------------------------------------------------------------- */ Ns_ReturnCode NsWriterQueue(Ns_Conn *conn, size_t nsend, Tcl_Channel chan, FILE *fp, int fd, struct iovec *bufs, int nbufs, const Ns_FileVec *filebufs, int nfilebufs, bool everysize) { Conn *connPtr; WriterSock *wrSockPtr; SpoolerQueue *queuePtr; DrvWriter *wrPtr; bool trigger = NS_FALSE; size_t headerSize; Ns_ReturnCode status = NS_OK; Ns_FileVec *fbufs = NULL; int nfbufs = 0; NS_NONNULL_ASSERT(conn != NULL); connPtr = (Conn *)conn; if (unlikely(connPtr->sockPtr == NULL)) { Ns_Log(Warning, "NsWriterQueue: called without sockPtr size %" PRIdz " bufs %d flags %.6x stream %.6x chan %p fd %d", nsend, nbufs, connPtr->flags, connPtr->flags & NS_CONN_STREAM, (void *)chan, fd); status = NS_ERROR; wrPtr = NULL; } else { wrPtr = &connPtr->sockPtr->drvPtr->writer; Ns_Log(DriverDebug, "NsWriterQueue: size %" PRIdz " bufs %p (%d) flags %.6x stream %.6x chan %p fd %d thread %d", nsend, (void *)bufs, nbufs, connPtr->flags, connPtr->flags & NS_CONN_STREAM, (void *)chan, fd, wrPtr->threads); if (unlikely(wrPtr->threads == 0)) { Ns_Log(DriverDebug, "NsWriterQueue: no writer threads configured"); status = NS_ERROR; } else if (nsend < (size_t)wrPtr->writersize && !everysize && connPtr->fd == 0) { Ns_Log(DriverDebug, "NsWriterQueue: file is too small(%" PRIdz " < %" PRIdz ")", nsend, wrPtr->writersize); status = NS_ERROR; } } if (status != NS_OK) { return status; } assert(wrPtr != NULL); /* * In streaming mode, setup a temporary fd which is used as input and * output. Streaming i/o will append to the file, while the write will * read from it. */ if (((connPtr->flags & NS_CONN_STREAM) != 0u) || connPtr->fd > 0) { if (wrPtr->doStream == NS_WRITER_STREAM_NONE) { status = NS_ERROR; } else if (unlikely(fp != NULL || fd != NS_INVALID_FD)) { Ns_Log(DriverDebug, "NsWriterQueue: does not stream from this source via writer"); status = NS_ERROR; } else { status = WriterSetupStreamingMode(connPtr, bufs, nbufs, &fd); } if (unlikely(status != NS_OK)) { if (status == NS_FILTER_BREAK) { status = NS_OK; } return status; } /* * As a result of successful WriterSetupStreamingMode(), we have fd * set. */ assert(fd != NS_INVALID_FD); } else { if (fp != NULL) { /* * The client provided an open file pointer and closes it */ fd = ns_dup(fileno(fp)); } else if (fd != NS_INVALID_FD) { /* * The client provided an open file descriptor and closes it */ fd = ns_dup(fd); } else if (chan != NULL) { ClientData clientData; /* * The client provided an open Tcl channel and closes it */ if (Tcl_GetChannelHandle(chan, TCL_READABLE, &clientData) != TCL_OK) { return NS_ERROR; } fd = ns_dup(PTR2INT(clientData)); } else if (filebufs != NULL && nfilebufs > 0) { /* * The client provided Ns_FileVec with open files. The client is * responsible for closing it, like in all other cases. */ size_t i; /* * This is the only case, where fbufs will be != NULL, * i.e. keeping a duplicate of the passed-in Ns_FileVec structure * for which the client is responsible. */ fbufs = (Ns_FileVec *)ns_calloc((size_t)nfilebufs, sizeof(Ns_FileVec)); nfbufs = nfilebufs; for (i = 0u; i < (size_t)nfilebufs; i++) { fbufs[i].fd = ns_dup(filebufs[i].fd); fbufs[i].length = filebufs[i].length; fbufs[i].offset = filebufs[i].offset; } /* * Place the fd of the first Ns_FileVec to fd. */ fd = fbufs[0].fd; Ns_Log(DriverDebug, "NsWriterQueue: filevec mode, take first fd %d tosend %lu", fd, nsend); } } Ns_Log(DriverDebug, "NsWriterQueue: writer threads %d nsend %" PRIdz " writersize %" PRIdz, wrPtr->threads, nsend, wrPtr->writersize); assert(connPtr->poolPtr != NULL); connPtr->poolPtr->stats.spool++; wrSockPtr = (WriterSock *)ns_calloc(1u, sizeof(WriterSock)); wrSockPtr->sockPtr = connPtr->sockPtr; wrSockPtr->poolPtr = connPtr->poolPtr; /* just for being able to trace back the origin, e.g. list */ wrSockPtr->sockPtr->timeout.sec = 0; wrSockPtr->flags = connPtr->flags; wrSockPtr->refCount = 1; /* * Take the rate limit from the connection. */ wrSockPtr->rateLimit = connPtr->rateLimit; if (wrSockPtr->rateLimit == -1) { /* * The value was not specified via connection. Use either the pool * limit as a base for the computation or fall back to the driver * default value. */ if (connPtr->poolPtr->rate.poolLimit > 0) { /* * Very optimistic start value, but values will float through via * bandwidth management. */ wrSockPtr->rateLimit = connPtr->poolPtr->rate.poolLimit / 2; } else { wrSockPtr->rateLimit = wrPtr->rateLimit; } } Ns_Log(WriterDebug, "### Writer(%d): initial rate limit %d KB/s", wrSockPtr->sockPtr->sock, wrSockPtr->rateLimit); /* * Make sure we have proper content length header for * keep-alive/pipelining. */ Ns_ConnSetLengthHeader(conn, nsend, (wrSockPtr->flags & NS_CONN_STREAM) != 0u); /* * Flush the headers */ if ((conn->flags & NS_CONN_SENTHDRS) == 0u) { Tcl_DString ds; Ns_DStringInit(&ds); Ns_Log(DriverDebug, "### Writer(%d): add header", fd); conn->flags |= NS_CONN_SENTHDRS; (void)Ns_CompleteHeaders(conn, nsend, 0u, &ds); headerSize = (size_t)Ns_DStringLength(&ds); if (headerSize > 0u) { wrSockPtr->headerString = ns_strdup(Tcl_DStringValue(&ds)); } Ns_DStringFree(&ds); } else { headerSize = 0u; } if (fd != NS_INVALID_FD) { /* maybe add mmap support for files (fd != NS_INVALID_FD) */ wrSockPtr->fd = fd; wrSockPtr->c.file.bufs = fbufs; wrSockPtr->c.file.nbufs = nfbufs; Ns_Log(DriverDebug, "### Writer(%d) tosend %" PRIdz " files %d bufsize %" PRIdz, fd, nsend, nfbufs, wrPtr->bufsize); if (unlikely(headerSize >= wrPtr->bufsize)) { /* * We have a header which is larger than bufsize; place it * as "leftover" and use the headerString as buffer for file * reads (rather rare case) */ wrSockPtr->c.file.buf = (unsigned char *)wrSockPtr->headerString; wrSockPtr->c.file.maxsize = headerSize; wrSockPtr->c.file.bufsize = headerSize; wrSockPtr->headerString = NULL; } else if (headerSize > 0u) { /* * We have a header that fits into the bufsize; place it * as "leftover" at the end of the buffer. */ wrSockPtr->c.file.buf = ns_malloc(wrPtr->bufsize); memcpy(wrSockPtr->c.file.buf, wrSockPtr->headerString, headerSize); wrSockPtr->c.file.bufsize = headerSize; wrSockPtr->c.file.maxsize = wrPtr->bufsize; ns_free(wrSockPtr->headerString); wrSockPtr->headerString = NULL; } else { assert(wrSockPtr->headerString == NULL); wrSockPtr->c.file.buf = ns_malloc(wrPtr->bufsize); wrSockPtr->c.file.maxsize = wrPtr->bufsize; } wrSockPtr->c.file.bufoffset = 0; wrSockPtr->c.file.toRead = nsend; } else if (bufs != NULL) { int i, j, headerbufs = (headerSize > 0u ? 1 : 0); wrSockPtr->fd = NS_INVALID_FD; if (nbufs+headerbufs < UIO_SMALLIOV) { wrSockPtr->c.mem.bufs = wrSockPtr->c.mem.preallocated_bufs; } else { Ns_Log(DriverDebug, "NsWriterQueue: alloc %d iovecs", nbufs); wrSockPtr->c.mem.bufs = ns_calloc((size_t)nbufs + (size_t)headerbufs, sizeof(struct iovec)); } wrSockPtr->c.mem.nbufs = nbufs+headerbufs; if (headerbufs != 0) { wrSockPtr->c.mem.bufs[0].iov_base = wrSockPtr->headerString; wrSockPtr->c.mem.bufs[0].iov_len = headerSize; } if (connPtr->fmap.addr != NULL) { Ns_Log(DriverDebug, "NsWriterQueue: deliver fmapped %p", (void *)connPtr->fmap.addr); /* * Deliver an mmapped file, no need to copy content */ for (i = 0, j=headerbufs; i < nbufs; i++, j++) { wrSockPtr->c.mem.bufs[j].iov_base = bufs[i].iov_base; wrSockPtr->c.mem.bufs[j].iov_len = bufs[i].iov_len; } /* * Make a copy of the fmap structure and make clear that * we unmap in the writer thread. */ wrSockPtr->c.mem.fmap = connPtr->fmap; connPtr->fmap.addr = NULL; /* header string will be freed via wrSockPtr->headerString */ } else { /* * Deliver a content from iovec. The lifetime of the * source is unknown, we have to copy the c. */ for (i = 0, j=headerbufs; i < nbufs; i++, j++) { wrSockPtr->c.mem.bufs[j].iov_base = ns_malloc(bufs[i].iov_len); wrSockPtr->c.mem.bufs[j].iov_len = bufs[i].iov_len; memcpy(wrSockPtr->c.mem.bufs[j].iov_base, bufs[i].iov_base, bufs[i].iov_len); } /* header string will be freed a buf[0] */ wrSockPtr->headerString = NULL; } } else { ns_free(wrSockPtr); return NS_ERROR; } /* * Add header size to total size. */ nsend += headerSize; if (connPtr->clientData != NULL) { wrSockPtr->clientData = ns_strdup(connPtr->clientData); } wrSockPtr->startTime = *Ns_ConnStartTime(conn); /* * Setup streaming context before sending potentially headers. */ if ((wrSockPtr->flags & NS_CONN_STREAM) != 0u) { wrSockPtr->doStream = NS_WRITER_STREAM_ACTIVE; assert(connPtr->strWriter == NULL); /* * Add a reference to the stream writer to the connection such * it can efficiently append to a stream when multiple output * operations happen. The backpointer (from the stream writer * to the connection is needed to clear the reference to the * writer in case the writer is deleted. No locks are needed, * since nobody can share this structure yet. */ connPtr->strWriter = (NsWriterSock *)wrSockPtr; wrSockPtr->connPtr = connPtr; } /* * Tell connection, that writer handles the output (including * closing the connection to the client). */ connPtr->flags |= NS_CONN_SENT_VIA_WRITER; wrSockPtr->keep = connPtr->keep > 0 ? NS_TRUE : NS_FALSE; wrSockPtr->size = nsend; Ns_Log(DriverDebug, "NsWriterQueue NS_CONN_SENT_VIA_WRITER connPtr %p", (void*)connPtr); if ((wrSockPtr->flags & NS_CONN_STREAM) == 0u) { Ns_Log(DriverDebug, "NsWriterQueue NS_CONN_SENT_VIA_WRITER connPtr %p clear sockPtr %p", (void*)connPtr, (void*)connPtr->sockPtr); connPtr->sockPtr = NULL; connPtr->flags |= NS_CONN_CLOSED; connPtr->nContentSent = nsend - headerSize; } /* * Get the next writer thread from the list, all writer requests are * rotated between all writer threads */ Ns_MutexLock(&wrPtr->lock); if (wrPtr->curPtr == NULL) { wrPtr->curPtr = wrPtr->firstPtr; } queuePtr = wrPtr->curPtr; wrPtr->curPtr = wrPtr->curPtr->nextPtr; Ns_MutexUnlock(&wrPtr->lock); Ns_Log(WriterDebug, "Writer(%d): started: id=%d fd=%d, " "size=%" PRIdz ", flags=%X, rate %d KB/s: %s", wrSockPtr->sockPtr->sock, queuePtr->id, wrSockPtr->fd, nsend, wrSockPtr->flags, wrSockPtr->rateLimit, connPtr->request.line); /* * Now add new writer socket to the writer thread's queue */ wrSockPtr->queuePtr = queuePtr; Ns_MutexLock(&queuePtr->lock); if (queuePtr->sockPtr == NULL) { trigger = NS_TRUE; } Push(wrSockPtr, queuePtr->sockPtr); Ns_MutexUnlock(&queuePtr->lock); /* * Wake up writer thread */ if (trigger) { SockTrigger(queuePtr->pipe[1]); } return NS_OK; } /* *---------------------------------------------------------------------- * * DriverWriterFromObj -- * * Lookup driver by name and return its DrvWriter. When driverObj is * NULL, get the driver from the conn. * * Results: * Ns_ReturnCode * * Side effects: * Set error message in interp in case of failure. * *---------------------------------------------------------------------- */ static Ns_ReturnCode DriverWriterFromObj( Tcl_Interp *interp, Tcl_Obj *driverObj, Ns_Conn *conn, DrvWriter **wrPtrPtr) { Driver *drvPtr; const char *driverName = NULL; int driverNameLen = 0; DrvWriter *wrPtr = NULL; Ns_ReturnCode result; /* * If no driver is provided, take the current driver. The caller has * to make sure that in cases, where no driver is specified, the * command is run in a connection thread. */ if (driverObj == NULL) { if (conn != NULL) { driverName = Ns_ConnDriverName(conn); driverNameLen = (int)strlen(driverName); } } else { driverName = Tcl_GetStringFromObj(driverObj, &driverNameLen); } if (driverName != NULL) { for (drvPtr = firstDrvPtr; drvPtr != NULL; drvPtr = drvPtr->nextPtr) { if (strncmp(driverName, drvPtr->threadName, (size_t)driverNameLen) == 0) { if (drvPtr->writer.firstPtr != NULL) { wrPtr = &drvPtr->writer; } break; } } } if (unlikely(wrPtr == NULL)) { Ns_TclPrintfResult(interp, "no writer configured for a driver with name %s", driverName); result = NS_ERROR; } else { *wrPtrPtr = wrPtr; result = NS_OK; } return result; } /* *---------------------------------------------------------------------- * * WriterSubmitObjCmd - subcommand of NsTclWriterObjCmd -- * * Implements "ns_writer submit" command. * Send the provided data to the client. * * Results: * Standard Tcl result. * * Side effects: * None. * *---------------------------------------------------------------------- */ static int WriterSubmitObjCmd(ClientData UNUSED(clientData), Tcl_Interp *interp, int objc, Tcl_Obj *const* objv) { int result = TCL_OK; Ns_Conn *conn; Tcl_Obj *dataObj; Ns_ObjvSpec args[] = { {"data", Ns_ObjvObj, &dataObj, NULL}, {NULL, NULL, NULL, NULL} }; if (Ns_ParseObjv(NULL, args, interp, 2, objc, objv) != NS_OK || NsConnRequire(interp, NS_CONN_REQUIRE_ALL, &conn) != NS_OK) { result = TCL_ERROR; } else { int size; unsigned char *data = Tcl_GetByteArrayFromObj(dataObj, &size); if (data != NULL) { struct iovec vbuf; Ns_ReturnCode status; vbuf.iov_base = (void *)data; vbuf.iov_len = (size_t)size; status = NsWriterQueue(conn, (size_t)size, NULL, NULL, NS_INVALID_FD, &vbuf, 1, NULL, 0, NS_TRUE); Tcl_SetObjResult(interp, Tcl_NewBooleanObj(status == NS_OK ? 1 : 0)); } } return result; } /* *---------------------------------------------------------------------- * * WriterCheckInputParams - * * Helper command for WriterSubmitFileObjCmd and WriterSubmitFilesObjCmd * to check validity of filename, offset and size. * * Results: * Standard Tcl result. Returns on success also fd and nrbytes. * * Side effects: * None. * *---------------------------------------------------------------------- */ static int WriterCheckInputParams(Tcl_Interp *interp, const char *filenameString, size_t size, off_t offset, int *fdPtr, size_t *nrbytesPtr) { int result = TCL_OK, rc; struct stat st; Ns_Log(DriverDebug, "WriterCheckInputParams %s offset %" PROTd " size %" PRIdz, filenameString, offset, size); /* * Use stat() call to obtain information about the actual file to check * later the plausibility of the parameters. */ rc = stat(filenameString, &st); if (unlikely(rc != 0)) { Ns_TclPrintfResult(interp, "file does not exist '%s'", filenameString); result = TCL_ERROR; } else { size_t nrbytes = 0u; int fd; /* * Try to open the file and check offset and size parameters. */ fd = ns_open(filenameString, O_RDONLY | O_CLOEXEC, 0); if (unlikely(fd == NS_INVALID_FD)) { Ns_TclPrintfResult(interp, "could not open file '%s'", filenameString); result = TCL_ERROR; } else if (unlikely(offset > st.st_size) || offset < 0) { Ns_TclPrintfResult(interp, "offset must be a positive value less or equal filesize"); result = TCL_ERROR; } else if (size > 0) { if (unlikely((off_t)size + offset > st.st_size)) { Ns_TclPrintfResult(interp, "offset + size must be less or equal filesize"); result = TCL_ERROR; } else { nrbytes = (size_t)size; } } else { nrbytes = (size_t)st.st_size - (size_t)offset; } /* * When an offset is provide, jump to this offset. */ if (offset > 0 && result == TCL_OK) { if (ns_lseek(fd, (off_t)offset, SEEK_SET) == -1) { Ns_TclPrintfResult(interp, "cannot seek to position %ld", (long)offset); result = TCL_ERROR; } } if (result == TCL_OK) { *fdPtr = fd; *nrbytesPtr = nrbytes; } else if (fd != NS_INVALID_FD) { /* * On invalid parameters, close the fd. */ ns_close(fd); } } return result; } /* *---------------------------------------------------------------------- * * WriterSubmitFileObjCmd - subcommand of NsTclWriterObjCmd -- * * Implements "ns_writer submitfile" command. * Send the provided file to the client. * * Results: * Standard Tcl result. * * Side effects: * None. * *---------------------------------------------------------------------- */ static int WriterSubmitFileObjCmd(ClientData UNUSED(clientData), Tcl_Interp *interp, int objc, Tcl_Obj *const* objv) { int result = TCL_OK; Ns_Conn *conn; char *fileNameString; int headers = 0; Tcl_WideInt offset = 0, size = 0; Ns_ObjvValueRange offsetRange = {0, LLONG_MAX}; Ns_ObjvValueRange sizeRange = {1, LLONG_MAX}; Ns_ObjvSpec lopts[] = { {"-headers", Ns_ObjvBool, &headers, INT2PTR(NS_TRUE)}, {"-offset", Ns_ObjvMemUnit, &offset, &offsetRange}, {"-size", Ns_ObjvMemUnit, &size, &sizeRange}, {NULL, NULL, NULL, NULL} }; Ns_ObjvSpec args[] = { {"file", Ns_ObjvString, &fileNameString, NULL}, {NULL, NULL, NULL, NULL} }; if (unlikely(Ns_ParseObjv(lopts, args, interp, 2, objc, objv) != NS_OK) || NsConnRequire(interp, NS_CONN_REQUIRE_ALL, &conn) != NS_OK) { result = TCL_ERROR; } else if (unlikely( Ns_ConnSockPtr(conn) == NULL )) { Ns_Log(Warning, "NsWriterQueue: called without valid sockPtr, maybe connection already closed"); Ns_TclPrintfResult(interp, "0"); result = TCL_OK; } else { size_t nrbytes = 0u; int fd = NS_INVALID_FD; result = WriterCheckInputParams(interp, fileNameString, (size_t)size, offset, &fd, &nrbytes); if (likely(result == TCL_OK)) { Ns_ReturnCode status; /* * The caller requested that we build required headers */ if (headers != 0) { Ns_ConnSetTypeHeader(conn, Ns_GetMimeType(fileNameString)); } status = NsWriterQueue(conn, nrbytes, NULL, NULL, fd, NULL, 0, NULL, 0, NS_TRUE); Tcl_SetObjResult(interp, Tcl_NewBooleanObj(status == NS_OK ? 1 : 0)); if (fd != NS_INVALID_FD) { (void) ns_close(fd); } else { Ns_Log(Warning, "WriterSubmitFileObjCmd called with invalid fd"); } } else if (fd != NS_INVALID_FD) { (void) ns_close(fd); } } return result; } /* *---------------------------------------------------------------------- * * WriterGetMemunitFromDict -- * * Helper function to obtain a memory unit from a dict structure, * optionally checking the value range. * * Results: * Standard Tcl result. * * Side effects: * On errors, an error message is left in the interpreter. * *---------------------------------------------------------------------- */ static int WriterGetMemunitFromDict(Tcl_Interp *interp, Tcl_Obj *dictObj, Tcl_Obj *keyObj, Ns_ObjvValueRange *rangePtr, Tcl_WideInt *valuePtr) { Tcl_Obj *intObj = NULL; int result; NS_NONNULL_ASSERT(interp != NULL); NS_NONNULL_ASSERT(dictObj != NULL); NS_NONNULL_ASSERT(keyObj != NULL); NS_NONNULL_ASSERT(valuePtr != NULL); result = Tcl_DictObjGet(interp, dictObj, keyObj, &intObj); if (result == TCL_OK && intObj != NULL) { result = Ns_TclGetMemUnitFromObj(interp, intObj, valuePtr); if (result == TCL_OK && rangePtr != NULL) { result = Ns_CheckWideRange(interp, Tcl_GetString(keyObj), rangePtr, *valuePtr); } } return result; } /* *---------------------------------------------------------------------- * * WriterSubmitFilesObjCmd - subcommand of NsTclWriterObjCmd -- * * Implements "ns_writer submitfiles" command. Send the provided files * to the client. "files" are provided as a list of dicts, where every * dict must contain a "filename" element and can contain an "-offset" * and/or a "-length" element. * * Results: * Standard Tcl result. * * Side effects: * None. * *---------------------------------------------------------------------- */ static int WriterSubmitFilesObjCmd(ClientData UNUSED(clientData), Tcl_Interp *interp, int objc, Tcl_Obj *const* objv) { int result = TCL_OK; Ns_Conn *conn; int headers = 0, nrFiles; Tcl_Obj *filesObj = NULL, **fileObjv; Ns_ObjvSpec lopts[] = { {"-headers", Ns_ObjvBool, &headers, INT2PTR(NS_TRUE)}, {NULL, NULL, NULL, NULL} }; Ns_ObjvSpec args[] = { {"files", Ns_ObjvObj, &filesObj, NULL}, {NULL, NULL, NULL, NULL} }; if (unlikely(Ns_ParseObjv(lopts, args, interp, 2, objc, objv) != NS_OK) || NsConnRequire(interp, NS_CONN_REQUIRE_ALL, &conn) != NS_OK) { result = TCL_ERROR; } else if (unlikely( Ns_ConnSockPtr(conn) == NULL )) { Ns_Log(Warning, "NsWriterQueue: called without valid sockPtr, " "maybe connection already closed"); Ns_TclPrintfResult(interp, "0"); result = TCL_OK; } else if (Tcl_ListObjGetElements(interp, filesObj, &nrFiles, &fileObjv) != TCL_OK) { Ns_TclPrintfResult(interp, "not a valid list of files: '%s'", Tcl_GetString(filesObj)); result = TCL_ERROR; } else if (nrFiles == 0) { Ns_TclPrintfResult(interp, "The provided list has to contain at least one file spec"); result = TCL_ERROR; } else { size_t totalbytes = 0u, i; Tcl_Obj *keys[3], *filenameObj = NULL; Ns_FileVec *filebufs; const char *firstFilenameString = NULL; Ns_ObjvValueRange offsetRange = {0, LLONG_MAX}; Ns_ObjvValueRange sizeRange = {1, LLONG_MAX}; filebufs = (Ns_FileVec *)ns_calloc((size_t)nrFiles, sizeof(Ns_FileVec)); keys[0] = Tcl_NewStringObj("filename", 8); keys[1] = Tcl_NewStringObj("-offset", 7); keys[2] = Tcl_NewStringObj("-size", 5); Tcl_IncrRefCount(keys[0]); Tcl_IncrRefCount(keys[1]); Tcl_IncrRefCount(keys[2]); for (i = 0u; i < (size_t)nrFiles; i++) { filebufs[i].fd = NS_INVALID_FD; } /* * Iterate over the list of dicts. */ for (i = 0u; i < (size_t)nrFiles; i++) { Tcl_WideInt offset = 0, size = 0; int rc, fd = NS_INVALID_FD; const char *filenameString; size_t nrbytes; /* * Get required "filename" element. */ filenameObj = NULL; rc = Tcl_DictObjGet(interp, fileObjv[i], keys[0], &filenameObj); if (rc != TCL_OK || filenameObj == NULL) { Ns_TclPrintfResult(interp, "missing filename in dict '%s'", Tcl_GetString(fileObjv[i])); result = TCL_ERROR; break; } filenameString = Tcl_GetString(filenameObj); if (firstFilenameString == NULL) { firstFilenameString = filenameString; } /* * Get optional "-offset" and "-size" elements. */ if (WriterGetMemunitFromDict(interp, fileObjv[i], keys[1], &offsetRange, &offset) != TCL_OK) { result = TCL_ERROR; break; } if (WriterGetMemunitFromDict(interp, fileObjv[i], keys[2], &sizeRange, &size) != TCL_OK) { result = TCL_ERROR; break; } /* * Check validity of the provided values */ result = WriterCheckInputParams(interp, Tcl_GetString(filenameObj), (size_t)size, (off_t)offset, &fd, &nrbytes); if (result != TCL_OK) { break; } filebufs[i].fd = fd; filebufs[i].offset = offset; filebufs[i].length = nrbytes; totalbytes = totalbytes + (size_t)nrbytes; } Tcl_DecrRefCount(keys[0]); Tcl_DecrRefCount(keys[1]); Tcl_DecrRefCount(keys[2]); /* * If everything is ok, submit the request to the writer queue. */ if (result == TCL_OK) { Ns_ReturnCode status; if (headers != 0 && firstFilenameString != NULL) { Ns_ConnSetTypeHeader(conn, Ns_GetMimeType(firstFilenameString)); } status = NsWriterQueue(conn, totalbytes, NULL, NULL, NS_INVALID_FD, NULL, 0, filebufs, nrFiles, NS_TRUE); /* * Provide a soft error like for "ns_writer submitfile". */ Tcl_SetObjResult(interp, Tcl_NewBooleanObj(status == NS_OK ? 1 : 0)); } /* * The NsWriterQueue() API makes the usual duplicates of the file * descriptors and the Ns_FileVec structure, so we have to cleanup * here. */ for (i = 0u; i < (size_t)nrFiles; i++) { if (filebufs[i].fd != NS_INVALID_FD) { (void) ns_close(filebufs[i].fd); } } ns_free(filebufs); } return result; } /* *---------------------------------------------------------------------- * * WriterListObjCmd - subcommand of NsTclWriterObjCmd -- * * Implements "ns_writer list" command. * List the current writer jobs. * * Results: * Standard Tcl result. * * Side effects: * None. * *---------------------------------------------------------------------- */ static int WriterListObjCmd(ClientData UNUSED(clientData), Tcl_Interp *interp, int objc, Tcl_Obj *const* objv) { int result = TCL_OK; NsServer *servPtr = NULL; Ns_ObjvSpec lopts[] = { {"-server", Ns_ObjvServer, &servPtr, NULL}, {NULL, NULL, NULL, NULL} }; if (unlikely(Ns_ParseObjv(lopts, NULL, interp, 2, objc, objv) != NS_OK)) { result = TCL_ERROR; } else { Tcl_DString ds, *dsPtr = &ds; const Driver *drvPtr; SpoolerQueue *queuePtr; Tcl_DStringInit(dsPtr); for (drvPtr = firstDrvPtr; drvPtr != NULL; drvPtr = drvPtr->nextPtr) { const DrvWriter *wrPtr; /* * If server was specified, list only results from this server. */ if (servPtr != NULL && servPtr != drvPtr->servPtr) { continue; } wrPtr = &drvPtr->writer; queuePtr = wrPtr->firstPtr; while (queuePtr != NULL) { const WriterSock *wrSockPtr; Ns_MutexLock(&queuePtr->lock); wrSockPtr = queuePtr->curPtr; while (wrSockPtr != NULL) { char ipString[NS_IPADDR_SIZE]; ns_inet_ntop((struct sockaddr *)&(wrSockPtr->sockPtr->sa), ipString,sizeof(ipString)); (void) Ns_DStringNAppend(dsPtr, "{", 1); (void) Ns_DStringAppendTime(dsPtr, &wrSockPtr->startTime); (void) Ns_DStringNAppend(dsPtr, " ", 1); (void) Ns_DStringAppend(dsPtr, queuePtr->threadName); (void) Ns_DStringNAppend(dsPtr, " ", 1); (void) Ns_DStringAppend(dsPtr, drvPtr->threadName); (void) Ns_DStringNAppend(dsPtr, " ", 1); (void) Ns_DStringAppend(dsPtr, NsPoolName(wrSockPtr->poolPtr->pool)); (void) Ns_DStringNAppend(dsPtr, " ", 1); (void) Ns_DStringAppend(dsPtr, ipString); (void) Ns_DStringPrintf(dsPtr, " %d %" PRIdz " %" TCL_LL_MODIFIER "d %d %d ", wrSockPtr->fd, wrSockPtr->size, wrSockPtr->nsent, wrSockPtr->currentRate, wrSockPtr->rateLimit); (void) Ns_DStringAppendElement(dsPtr, (wrSockPtr->clientData != NULL) ? wrSockPtr->clientData : NS_EMPTY_STRING); (void) Ns_DStringNAppend(dsPtr, "} ", 2); wrSockPtr = wrSockPtr->nextPtr; } Ns_MutexUnlock(&queuePtr->lock); queuePtr = queuePtr->nextPtr; } } Tcl_DStringResult(interp, &ds); } return result; } /* *---------------------------------------------------------------------- * * WriterSizeObjCmd - subcommand of NsTclWriterObjCmd -- * * Implements "ns_writer size" command. * Sets or queries size limit for sending via writer. * * Results: * Standard Tcl result. * * Side effects: * None. * *---------------------------------------------------------------------- */ static int WriterSizeObjCmd(ClientData UNUSED(clientData), Tcl_Interp *interp, int objc, Tcl_Obj *const* objv) { int result = TCL_OK; Tcl_Obj *driverObj = NULL; Ns_Conn *conn = NULL; Tcl_WideInt intValue = -1; const char *firstArgString; Ns_ObjvValueRange range = {1024, INT_MAX}; Ns_ObjvSpec *opts, optsNew[] = { {"-driver", Ns_ObjvObj, &driverObj, NULL}, {NULL, NULL, NULL, NULL} }; Ns_ObjvSpec *args, argsNew[] = { {"?value", Ns_ObjvMemUnit, &intValue, &range}, {NULL, NULL, NULL, NULL} }; Ns_ObjvSpec argsLegacy[] = { {"driver", Ns_ObjvObj, &driverObj, NULL}, {"?value", Ns_ObjvMemUnit, &intValue, &range}, {NULL, NULL, NULL, NULL} }; firstArgString = objc > 2 ? Tcl_GetString(objv[2]) : NULL; if (firstArgString != NULL) { if (*firstArgString != '-' && ((objc == 3 && CHARTYPE(digit, *firstArgString) == 0) || objc == 4)) { args = argsLegacy; opts = NULL; Ns_LogDeprecated(objv, objc, "ns_writer size ?-driver drv? ?size?", NULL); } else { args = argsNew; opts = optsNew; } } else { args = argsNew; opts = optsNew; } if (Ns_ParseObjv(opts, args, interp, 2, objc, objv) != NS_OK) { result = TCL_ERROR; } else if ((driverObj == NULL) && NsConnRequire(interp, NS_CONN_REQUIRE_ALL, &conn) != NS_OK) { result = TCL_ERROR; } else { DrvWriter *wrPtr; if (DriverWriterFromObj(interp, driverObj, conn, &wrPtr) != NS_OK) { result = TCL_ERROR; } else if (intValue != -1) { /* * The optional argument was provided. */ wrPtr->writersize = (size_t)intValue; } if (result == TCL_OK) { Tcl_SetObjResult(interp, Tcl_NewIntObj((int)wrPtr->writersize)); } } return result; } /* *---------------------------------------------------------------------- * * WriterStreamingObjCmd - subcommand of NsTclWriterObjCmd -- * * Implements "ns_writer streaming" command. * Sets or queries streaming state of the writer. * * Results: * Standard Tcl result. * * Side effects: * None. * *---------------------------------------------------------------------- */ static int WriterStreamingObjCmd(ClientData UNUSED(clientData), Tcl_Interp *interp, int objc, Tcl_Obj *const* objv) { int boolValue = -1, result = TCL_OK; Tcl_Obj *driverObj = NULL; Ns_Conn *conn = NULL; const char *firstArgString; Ns_ObjvSpec *opts, optsNew[] = { {"-driver", Ns_ObjvObj, &driverObj, NULL}, {NULL, NULL, NULL, NULL} }; Ns_ObjvSpec *args, argsNew[] = { {"?value", Ns_ObjvBool, &boolValue, NULL}, {NULL, NULL, NULL, NULL} }; Ns_ObjvSpec argsLegacy[] = { {"driver", Ns_ObjvObj, &driverObj, NULL}, {"?value", Ns_ObjvBool, &boolValue, NULL}, {NULL, NULL, NULL, NULL} }; firstArgString = objc > 2 ? Tcl_GetString(objv[2]) : NULL; if (firstArgString != NULL) { int argValue; if (*firstArgString != '-' && ((objc == 3 && Tcl_ExprBoolean(interp, firstArgString, &argValue) == TCL_OK) || objc == 4)) { args = argsLegacy; opts = NULL; Ns_LogDeprecated(objv, objc, "ns_writer streaming ?-driver drv? ?value?", NULL); } else { args = argsNew; opts = optsNew; } } else { args = argsNew; opts = optsNew; } if (Ns_ParseObjv(opts, args, interp, 2, objc, objv) != NS_OK) { result = TCL_ERROR; } else if ((driverObj == NULL) && NsConnRequire(interp, NS_CONN_REQUIRE_ALL, &conn) != NS_OK) { result = TCL_ERROR; } else { DrvWriter *wrPtr; if (DriverWriterFromObj(interp, driverObj, conn, &wrPtr) != NS_OK) { result = TCL_ERROR; } else if (boolValue != -1) { /* * The optional argument was provided. */ wrPtr->doStream = (boolValue == 1 ? NS_WRITER_STREAM_ACTIVE : NS_WRITER_STREAM_NONE); } if (result == TCL_OK) { Tcl_SetObjResult(interp, Tcl_NewIntObj(wrPtr->doStream == NS_WRITER_STREAM_ACTIVE ? 1 : 0)); } } return result; } /* *---------------------------------------------------------------------- * * NsTclWriterObjCmd -- * * Implements "ns_writer" command for submitting data to the writer * threads and to configure and query the state of the writer threads at * runtime. * * Results: * Standard Tcl result. * * Side effects: * None. * *---------------------------------------------------------------------- */ int NsTclWriterObjCmd(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *const* objv) { const Ns_SubCmdSpec subcmds[] = { {"list", WriterListObjCmd}, {"size", WriterSizeObjCmd}, {"streaming", WriterStreamingObjCmd}, {"submit", WriterSubmitObjCmd}, {"submitfile", WriterSubmitFileObjCmd}, {"submitfiles",WriterSubmitFilesObjCmd}, {NULL, NULL} }; return Ns_SubcmdObjv(subcmds, clientData, interp, objc, objv); } /* *====================================================================== * Async (log) writer: Write asynchronously to a disk *====================================================================== */ /* *---------------------------------------------------------------------- * * NsAsyncWriterQueueEnable -- * * Enable async writing and start the AsyncWriterThread if * necessary * * Results: * None. * * Side effects: * Potentially starting a thread and set "stopped" to NS_FALSE. * *---------------------------------------------------------------------- */ void NsAsyncWriterQueueEnable(void) { if (Ns_ConfigBool(NS_CONFIG_PARAMETERS, "asynclogwriter", NS_FALSE) == NS_TRUE) { SpoolerQueue *queuePtr; /* * In case, the async writer has not started, the static variable * asyncWriter is NULL. */ if (asyncWriter == NULL) { Ns_MutexLock(&reqLock); if (likely(asyncWriter == NULL)) { /* * Allocate and initialize writer thread context. */ asyncWriter = ns_calloc(1u, sizeof(AsyncWriter)); Ns_MutexUnlock(&reqLock); Ns_MutexSetName2(&asyncWriter->lock, "ns:driver", "async-writer"); /* * Allocate and initialize a Spooler Queue for this thread. */ queuePtr = ns_calloc(1u, sizeof(SpoolerQueue)); Ns_MutexSetName2(&queuePtr->lock, "ns:driver:async-writer", "queue"); asyncWriter->firstPtr = queuePtr; /* * Start the spooler queue */ SpoolerQueueStart(queuePtr, AsyncWriterThread); } else { Ns_MutexUnlock(&reqLock); } } assert(asyncWriter != NULL); queuePtr = asyncWriter->firstPtr; assert(queuePtr != NULL); Ns_MutexLock(&queuePtr->lock); queuePtr->stopped = NS_FALSE; Ns_MutexUnlock(&queuePtr->lock); } } /* *---------------------------------------------------------------------- * * NsAsyncWriterQueueDisable -- * * Disable async writing but don't touch the writer thread. * * Results: * None. * * Side effects: * Disable async writing by setting stopped to 1. * *---------------------------------------------------------------------- */ void NsAsyncWriterQueueDisable(bool shutdown) { if (asyncWriter != NULL) { SpoolerQueue *queuePtr = asyncWriter->firstPtr; Ns_Time timeout; assert(queuePtr != NULL); Ns_GetTime(&timeout); Ns_IncrTime(&timeout, nsconf.shutdowntimeout.sec, nsconf.shutdowntimeout.usec); Ns_MutexLock(&queuePtr->lock); queuePtr->stopped = NS_TRUE; queuePtr->shutdown = shutdown; /* * Trigger the AsyncWriter Thread to drain the spooler queue. */ SockTrigger(queuePtr->pipe[1]); (void)Ns_CondTimedWait(&queuePtr->cond, &queuePtr->lock, &timeout); Ns_MutexUnlock(&queuePtr->lock); if (shutdown) { ns_free(queuePtr); ns_free(asyncWriter); asyncWriter = NULL; } } } /* *---------------------------------------------------------------------- * * NsAsyncWrite -- * * Perform an asynchronous write operation via a writer thread in * case a writer thread is configured and running. The intention * of the asynchronous write operations is to reduce latencies in * connection threads. * * Results: * NS_OK, when write was performed via writer thread, * NS_ERROR otherwise (but data is written). * * Side effects: * I/O Operation. * *---------------------------------------------------------------------- */ Ns_ReturnCode NsAsyncWrite(int fd, const char *buffer, size_t nbyte) { Ns_ReturnCode returnCode = NS_OK; NS_NONNULL_ASSERT(buffer != NULL); /* * If the async writer has not started or is deactivated, behave like a * ns_write() command. If the ns_write() fails, we can't do much, since * the writing of an error message to the log might bring us into an * infinite loop. So we print simple to stderr. */ if (asyncWriter == NULL || asyncWriter->firstPtr->stopped) { ssize_t written = ns_write(fd, buffer, nbyte); if (unlikely(written != (ssize_t)nbyte)) { int retries = 100; /* * Don't go into an infinite loop when multiple subsequent disk * write operations return 0 (maybe disk full). */ returnCode = NS_ERROR; do { if (written < 0) { fprintf(stderr, "error during async write (fd %d): %s\n", fd, strerror(errno)); break; } /* * All partial writes (written >= 0) */ WriteWarningRaw("partial write", fd, nbyte, written); nbyte -= (size_t)written; buffer += written; written = ns_write(fd, buffer, nbyte); if (written == (ssize_t)nbyte) { returnCode = NS_OK; break; } } while (retries-- > 0); } } else { SpoolerQueue *queuePtr; bool trigger = NS_FALSE; const AsyncWriteData *wdPtr; AsyncWriteData *newWdPtr; /* * Allocate a writer cmd and initialize it. In order to provide an * interface compatible to ns_write(), we copy the provided data, * such it can be freed by the caller. When we would give up the * interface, we could free the memory block after writing, and * save a malloc/free operation on the data. */ newWdPtr = ns_calloc(1u, sizeof(AsyncWriteData)); newWdPtr->fd = fd; newWdPtr->bufsize = nbyte; newWdPtr->data = ns_malloc(nbyte + 1u); memcpy(newWdPtr->data, buffer, newWdPtr->bufsize); newWdPtr->buf = newWdPtr->data; newWdPtr->size = newWdPtr->bufsize; /* * Now add new writer socket to the writer thread's queue. In most * cases, the queue will be empty. */ queuePtr = asyncWriter->firstPtr; assert(queuePtr != NULL); Ns_MutexLock(&queuePtr->lock); wdPtr = queuePtr->sockPtr; if (wdPtr != NULL) { newWdPtr->nextPtr = queuePtr->sockPtr; queuePtr->sockPtr = newWdPtr; } else { queuePtr->sockPtr = newWdPtr; trigger = NS_TRUE; } Ns_MutexUnlock(&queuePtr->lock); /* * Wake up writer thread if desired */ if (trigger) { SockTrigger(queuePtr->pipe[1]); } } return returnCode; } /* *---------------------------------------------------------------------- * * AsyncWriterRelease -- * * Deallocate write data. * * Results: * None * * Side effects: * free memory * *---------------------------------------------------------------------- */ static void AsyncWriterRelease(AsyncWriteData *wdPtr) { NS_NONNULL_ASSERT(wdPtr != NULL); ns_free(wdPtr->data); ns_free(wdPtr); } /* *---------------------------------------------------------------------- * * AsyncWriterThread -- * * Thread that implements non-blocking write operations to files * * Results: * None. * * Side effects: * Write to files. * *---------------------------------------------------------------------- */ static void AsyncWriterThread(void *arg) { SpoolerQueue *queuePtr = (SpoolerQueue*)arg; char charBuffer[1]; int pollTimeout; Ns_ReturnCode status; bool stopping; AsyncWriteData *curPtr, *nextPtr, *writePtr; PollData pdata; Ns_ThreadSetName("-asynclogwriter%d-", queuePtr->id); queuePtr->threadName = Ns_ThreadGetName(); /* * Allocate and initialize controlling variables */ PollCreate(&pdata); writePtr = NULL; stopping = NS_FALSE; /* * Loop forever until signaled to shutdown and all * connections are complete and gracefully closed. */ while (!stopping) { /* * Always listen to the trigger pipe. We could as well perform * in the writer thread async write operations, but for the * effect of reducing latency in connection threads, this is * not an issue. To keep things simple, we perform the * typically small write operations without testing for POLLOUT. */ PollReset(&pdata); (void)PollSet(&pdata, queuePtr->pipe[0], (short)POLLIN, NULL); if (writePtr == NULL) { pollTimeout = 30 * 1000; } else { pollTimeout = 0; } /* * Wait for data */ /*n =*/ (void) PollWait(&pdata, pollTimeout); /* * Select and drain the trigger pipe if necessary. */ if (PollIn(&pdata, 0)) { if (ns_recv(queuePtr->pipe[0], charBuffer, 1u, 0) != 1) { Ns_Fatal("asynclogwriter: trigger ns_recv() failed: %s", ns_sockstrerror(ns_sockerrno)); } if (queuePtr->stopped) { /* * Drain the queue from everything */ for (curPtr = writePtr; curPtr != NULL; curPtr = curPtr->nextPtr) { ssize_t written = ns_write(curPtr->fd, curPtr->buf, curPtr->bufsize); if (unlikely(written != (ssize_t)curPtr->bufsize)) { WriteWarningRaw("drain writer", curPtr->fd, curPtr->bufsize, written); } } writePtr = NULL; for (curPtr = queuePtr->sockPtr; curPtr != NULL; curPtr = curPtr->nextPtr) { ssize_t written = ns_write(curPtr->fd, curPtr->buf, curPtr->bufsize); if (unlikely(written != (ssize_t)curPtr->bufsize)) { WriteWarningRaw("drain queue", curPtr->fd, curPtr->bufsize, written); } } queuePtr->sockPtr = NULL; /* * Notify the caller (normally * NsAsyncWriterQueueDisable()) that we are done */ Ns_CondBroadcast(&queuePtr->cond); } } /* * Write to all available file descriptors */ curPtr = writePtr; writePtr = NULL; while (curPtr != NULL) { ssize_t written; nextPtr = curPtr->nextPtr; status = NS_OK; /* * Write the actual data and allow for partial write operations. */ written = ns_write(curPtr->fd, curPtr->buf, curPtr->bufsize); if (unlikely(written < 0)) { status = NS_ERROR; } else { curPtr->size -= (size_t)written; curPtr->nsent += written; curPtr->bufsize -= (size_t)written; if (curPtr->data != NULL) { curPtr->buf += written; } } if (unlikely(status != NS_OK)) { AsyncWriterRelease(curPtr); queuePtr->queuesize--; } else { /* * The write operation was successful. Check if there * is some remaining data to write. If not we are done * with this request can release the write buffer. */ if (curPtr->size > 0u) { Push(curPtr, writePtr); } else { AsyncWriterRelease(curPtr); queuePtr->queuesize--; } } curPtr = nextPtr; } /* * Check for shutdown */ stopping = queuePtr->shutdown; if (stopping) { curPtr = queuePtr->sockPtr; assert(writePtr == NULL); while (curPtr != NULL) { ssize_t written = ns_write(curPtr->fd, curPtr->buf, curPtr->bufsize); if (unlikely(written != (ssize_t)curPtr->bufsize)) { WriteWarningRaw("shutdown", curPtr->fd, curPtr->bufsize, written); } curPtr = curPtr->nextPtr; } } else { /* * Add fresh jobs to the writer queue. This means actually to * move jobs from queuePtr->sockPtr (kept name for being able * to use the same queue as above) to the currently active * jobs in queuePtr->curPtr. */ Ns_MutexLock(&queuePtr->lock); curPtr = queuePtr->sockPtr; queuePtr->sockPtr = NULL; while (curPtr != NULL) { nextPtr = curPtr->nextPtr; Push(curPtr, writePtr); queuePtr->queuesize++; curPtr = nextPtr; } queuePtr->curPtr = writePtr; Ns_MutexUnlock(&queuePtr->lock); } } PollFree(&pdata); queuePtr->stopped = NS_TRUE; Ns_Log(Notice, "exiting"); } /* *---------------------------------------------------------------------- * * AsyncLogfileWriteObjCmd - * * Implements "ns_asynclogfile write" command. Write to a file * descriptor via async writer thread. The command handles partial write * operations internally. * * Results: * Standard Tcl result. * * Side effects: * None. * *---------------------------------------------------------------------- */ static int AsyncLogfileWriteObjCmd(ClientData UNUSED(clientData), Tcl_Interp *interp, int objc, Tcl_Obj *const* objv) { int result = TCL_OK, binary = (int)NS_FALSE, sanitize; Tcl_Obj *stringObj; int fd = 0; Ns_ObjvValueRange fd_range = {0, INT_MAX}; Ns_ObjvValueRange sanitize_range = {0, 2}; Ns_ObjvSpec opts[] = { {"-binary", Ns_ObjvBool, &binary, INT2PTR(NS_TRUE)}, {"-sanitize", Ns_ObjvInt, &sanitize, &sanitize_range}, {NULL, NULL, NULL, NULL} }; Ns_ObjvSpec args[] = { {"fd", Ns_ObjvInt, &fd, &fd_range}, {"buffer", Ns_ObjvObj, &stringObj, NULL}, {NULL, NULL, NULL, NULL} }; /* * Take the config value as default for "-sanitize", but let the used * override it on a per-case basis. */ sanitize = nsconf.sanitize_logfiles; if (unlikely(Ns_ParseObjv(opts, args, interp, 2, objc, objv) != NS_OK)) { result = TCL_ERROR; } else { const char *buffer; int length; Ns_ReturnCode rc; if (binary == (int)NS_TRUE || NsTclObjIsByteArray(stringObj)) { buffer = (const char *) Tcl_GetByteArrayFromObj(stringObj, &length); } else { buffer = Tcl_GetStringFromObj(stringObj, &length); } if (length > 0) { if (sanitize > 0) { Tcl_DString ds; bool lastCharNewline = (buffer[length-1] == '\n'); Tcl_DStringInit(&ds); if (lastCharNewline) { length --; } Ns_DStringAppendPrintable(&ds, sanitize == 2, buffer, (size_t)length); if (lastCharNewline) { Tcl_DStringAppend(&ds, "\n", 1); } rc = NsAsyncWrite(fd, ds.string, (size_t)ds.length); Tcl_DStringFree(&ds); } else { rc = NsAsyncWrite(fd, buffer, (size_t)length); } if (rc != NS_OK) { Ns_TclPrintfResult(interp, "ns_asynclogfile: error during write operation on fd %d: %s", fd, Tcl_PosixError(interp)); result = TCL_ERROR; } } else { result = TCL_OK; } } return result; } /* *---------------------------------------------------------------------- * * AsyncLogfileOpenObjCmd - * * Implements "ns_asynclogfile open" command. The command opens a * write-only log file and return a thread-shareable handle (actually a * numeric file descriptor) which can be used in subsequent "write" or * "close" operations. * * Results: * Standard Tcl result. * * Side effects: * None. * *---------------------------------------------------------------------- */ static int AsyncLogfileOpenObjCmd(ClientData UNUSED(clientData), Tcl_Interp *interp, int objc, Tcl_Obj *const* objv) { int result = TCL_OK; unsigned int flags = O_APPEND; char *fileNameString; Tcl_Obj *flagsObj = NULL; Ns_ObjvTable flagTable[] = { {"APPEND", O_APPEND}, {"EXCL", O_EXCL}, #ifdef O_DSYNC {"DSYNC", O_DSYNC}, #endif #ifdef O_SYNC {"SYNC", O_SYNC}, #endif {"TRUNC", O_TRUNC}, {NULL, 0u} }; Ns_ObjvSpec args[] = { {"filename", Ns_ObjvString, &fileNameString, NULL}, {"?flags", Ns_ObjvObj, &flagsObj, NULL}, //{"mode", Ns_ObjvString, &mode, NULL}, {NULL, NULL, NULL, NULL} }; if (unlikely(Ns_ParseObjv(NULL, args, interp, 2, objc, objv) != NS_OK)) { result = TCL_ERROR; } else if (flagsObj != NULL) { Tcl_Obj **ov; int oc; result = Tcl_ListObjGetElements(interp, flagsObj, &oc, &ov); if (result == TCL_OK && oc > 0) { int i, opt; flags = 0u; for (i = 0; i < oc; i++) { result = Tcl_GetIndexFromObjStruct(interp, ov[i], flagTable, (int)sizeof(flagTable[0]), "flag", 0, &opt); if (result != TCL_OK) { break; } else { flags = flagTable[opt].value; } } } } if (result == TCL_OK) { int fd; fd = ns_open(fileNameString, (int)(O_CREAT | O_WRONLY | O_CLOEXEC | flags), 0644); if (unlikely(fd == NS_INVALID_FD)) { Ns_TclPrintfResult(interp, "could not open file '%s': %s", fileNameString, Tcl_PosixError(interp)); result = TCL_ERROR; } else { Tcl_SetObjResult(interp, Tcl_NewIntObj(fd)); } } return result; } /* *---------------------------------------------------------------------- * * AsyncLogfileCloseObjCmd - * * Implements "ns_asynclogfile close" command. Close the logfile * previously created via "ns_asynclogfile open". * * Results: * Standard Tcl result. * * Side effects: * None. * *---------------------------------------------------------------------- */ static int AsyncLogfileCloseObjCmd(ClientData UNUSED(clientData), Tcl_Interp *interp, int objc, Tcl_Obj *const* objv) { int fd, result = TCL_OK; Ns_ObjvValueRange range = {0, INT_MAX}; Ns_ObjvSpec args[] = { {"fd", Ns_ObjvInt, &fd, &range}, {NULL, NULL, NULL, NULL} }; if (unlikely(Ns_ParseObjv(NULL, args, interp, 2, objc, objv) != NS_OK)) { result = TCL_ERROR; } else { int rc = ns_close(fd); if (rc != 0) { Ns_TclPrintfResult(interp, "could not close fd %d: %s", fd, Tcl_PosixError(interp)); result = TCL_ERROR; } } return result; } /* *---------------------------------------------------------------------- * * NsTclAsyncLogfileObjCmd - * * Wrapper for "ns_asynclogfile open|write|close" commands. * * Results: * Standard Tcl result. * * Side effects: * None. * *---------------------------------------------------------------------- */ int NsTclAsyncLogfileObjCmd(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *const* objv) { const Ns_SubCmdSpec subcmds[] = { {"open", AsyncLogfileOpenObjCmd}, {"write", AsyncLogfileWriteObjCmd}, {"close", AsyncLogfileCloseObjCmd}, {NULL, NULL} }; return Ns_SubcmdObjv(subcmds, clientData, interp, objc, objv); } /* *---------------------------------------------------------------------- * * LookupDriver -- * * Find a matching driver for the specified protocol and optionally the * specified driver name. * * Results: * Driver pointer or NULL on failure. * * Side effects: * When no driver is found, an error is left in the interp result. * *---------------------------------------------------------------------- */ static Driver * LookupDriver(Tcl_Interp *interp, const char* protocol, const char *driverName) { Driver *drvPtr; NS_NONNULL_ASSERT(interp != NULL); NS_NONNULL_ASSERT(protocol != NULL); for (drvPtr = firstDrvPtr; drvPtr != NULL; drvPtr = drvPtr->nextPtr) { Ns_Log(DriverDebug, "... check Driver proto <%s> server %s name %s location %s", drvPtr->protocol, drvPtr->server, drvPtr->threadName, drvPtr->location); if (STREQ(drvPtr->protocol, protocol)) { if (driverName == NULL) { /* * If there is no driver name given, take the first driver * with the matching protocol. */ break; } else if (STREQ(drvPtr->moduleName, driverName)) { /* * The driver name (name of the loaded module) is equal */ break; } } } if (drvPtr == NULL) { if (driverName != NULL) { Ns_TclPrintfResult(interp, "no driver for protocol '%s' & driver name '%s' found", protocol, driverName); } else { Ns_TclPrintfResult(interp, "no driver for protocol '%s' found", protocol); } } return drvPtr; } /* *---------------------------------------------------------------------- * * NSDriverClientOpen -- * * Open a client HTTP connection using the driver interface * * Results: * Tcl return code. * * Side effects: * Opening a connection * *---------------------------------------------------------------------- */ int NSDriverClientOpen(Tcl_Interp *interp, const char *driverName, const char *url, const char *httpMethod, const char *version, const Ns_Time *timeoutPtr, Sock **sockPtrPtr) { char *protocol, *host, *portString, *path, *tail, *url2; int result = TCL_OK; NS_NONNULL_ASSERT(interp != NULL); NS_NONNULL_ASSERT(url != NULL); NS_NONNULL_ASSERT(httpMethod != NULL); NS_NONNULL_ASSERT(version != NULL); NS_NONNULL_ASSERT(sockPtrPtr != NULL); url2 = ns_strdup(url); /* * We need here a fully qualified URL, otherwise raise an error */ if (unlikely(Ns_ParseUrl(url2, &protocol, &host, &portString, &path, &tail) != NS_OK) || protocol == NULL || host == NULL || path == NULL || tail == NULL) { Ns_Log(Notice, "driver: invalid URL '%s' passed to NSDriverClientOpen", url2); result = TCL_ERROR; } else { Driver *drvPtr; unsigned short portNr = 0u; /* make static checker happy */ assert(protocol != NULL); assert(host != NULL); assert(path != NULL); assert(tail != NULL); /* * Find a matching driver for the specified protocol and optionally * the specified driver name. */ drvPtr = LookupDriver(interp, protocol, driverName); if (drvPtr == NULL) { result = TCL_ERROR; } else if (portString != NULL) { portNr = (unsigned short) strtol(portString, NULL, 10); } else if (drvPtr->defport != 0u) { /* * Get the default port from the driver structure; */ portNr = drvPtr->defport; } else { Ns_TclPrintfResult(interp, "no default port for protocol '%s' defined", protocol); result = TCL_ERROR; } if (result == TCL_OK) { NS_SOCKET sock; Ns_ReturnCode status; sock = Ns_SockTimedConnect2(host, portNr, NULL, 0u, timeoutPtr, &status); if (sock == NS_INVALID_SOCKET) { Ns_SockConnectError(interp, host, portNr, status); result = TCL_ERROR; } else { const char *query; Tcl_DString ds, *dsPtr = &ds; Request *reqPtr; Sock *sockPtr; assert(drvPtr != NULL); sockPtr = SockNew(drvPtr); sockPtr->sock = sock; sockPtr->servPtr = drvPtr->servPtr; if (sockPtr->servPtr == NULL) { const NsInterp *itPtr = NsGetInterpData(interp); sockPtr->servPtr = itPtr->servPtr; } RequestNew(sockPtr); Ns_GetTime(&sockPtr->acceptTime); reqPtr = sockPtr->reqPtr; Tcl_DStringInit(dsPtr); Ns_DStringAppend(dsPtr, httpMethod); Ns_StrToUpper(Ns_DStringValue(dsPtr)); Tcl_DStringAppend(dsPtr, " /", 2); if (*path != '\0') { if (*path == '/') { path ++; } Tcl_DStringAppend(dsPtr, path, -1); Tcl_DStringAppend(dsPtr, "/", 1); } Tcl_DStringAppend(dsPtr, tail, -1); Tcl_DStringAppend(dsPtr, " HTTP/", 6); Tcl_DStringAppend(dsPtr, version, -1); reqPtr->request.line = Ns_DStringExport(dsPtr); reqPtr->request.method = ns_strdup(httpMethod); reqPtr->request.protocol = ns_strdup(protocol); reqPtr->request.host = ns_strdup(host); query = strchr(tail, INTCHAR('?')); if (query != NULL) { reqPtr->request.query = ns_strdup(query+1); } else { reqPtr->request.query = NULL; } /*Ns_Log(Notice, "REQUEST LINE <%s> query <%s>", reqPtr->request.line, reqPtr->request.query);*/ *sockPtrPtr = sockPtr; } } } ns_free(url2); return result; } /* *---------------------------------------------------------------------- * * NSDriverSockNew -- * * Create a Sock structure based on the driver interface * * Results: * Tcl return code. * * Side effects: * Accepting a connection * *---------------------------------------------------------------------- */ int NSDriverSockNew(Tcl_Interp *interp, NS_SOCKET sock, const char *protocol, const char *driverName, const char *methodName, Sock **sockPtrPtr) { int result = TCL_OK; Driver *drvPtr; NS_NONNULL_ASSERT(interp != NULL); NS_NONNULL_ASSERT(protocol != NULL); NS_NONNULL_ASSERT(methodName != NULL); NS_NONNULL_ASSERT(sockPtrPtr != NULL); drvPtr = LookupDriver(interp, protocol, driverName); if (drvPtr == NULL) { result = TCL_ERROR; } else { Sock *sockPtr; Tcl_DString ds, *dsPtr = &ds; Request *reqPtr; sockPtr = SockNew(drvPtr); sockPtr->servPtr = drvPtr->servPtr; sockPtr->sock = sock; RequestNew(sockPtr); // not sure if needed // peerAddr is missing Ns_GetTime(&sockPtr->acceptTime); reqPtr = sockPtr->reqPtr; Tcl_DStringInit(dsPtr); Ns_DStringAppend(dsPtr, methodName); Ns_StrToUpper(Ns_DStringValue(dsPtr)); reqPtr->request.line = Ns_DStringExport(dsPtr); reqPtr->request.method = ns_strdup(methodName); reqPtr->request.protocol = ns_strdup(protocol); reqPtr->request.host = NULL; reqPtr->request.query = NULL; /* Ns_Log(Notice, "REQUEST LINE <%s>", reqPtr->request.line);*/ *sockPtrPtr = sockPtr; } return result; } /* * Local Variables: * mode: c * c-basic-offset: 4 * fill-column: 78 * indent-tabs-mode: nil * End: */
/* * The contents of this file are subject to the Mozilla Public License * Version 1.1 (the "License"); you may not use this file except in * compliance with the License. You may obtain a copy of the License at * http://mozilla.org/. * * Software distributed under the License is distributed on an "AS IS" * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See * the License for the specific language governing rights and limitations * under the License. * * The Original Code is AOLserver Code and related documentation * distributed by AOL. * * The Initial Developer of the Original Code is America Online, * Inc. Portions created by AOL are Copyright (C) 1999 America Online, * Inc. All Rights Reserved. * * Alternatively, the contents of this file may be used under the terms * of the GNU General Public License (the "GPL"), in which case the * provisions of GPL are applicable instead of those above. If you wish * to allow use of your version of this file only under the terms of the * GPL and not to allow others to use your version of this file under the * License, indicate your decision by deleting the provisions above and * replace them with the notice and other provisions required by the GPL. * If you do not delete the provisions above, a recipient may use your * version of this file under either the License or the GPL. */ /* * driver.c -- * * Connection I/O for loadable socket drivers. */ #include "nsd.h" /* * The following are valid driver state flags. */ #define DRIVER_STARTED 1u #define DRIVER_STOPPED 2u #define DRIVER_SHUTDOWN 4u #define DRIVER_FAILED 8u /* * Constants for SockState return and reason codes. */ typedef enum { SOCK_READY = 0, SOCK_MORE = 1, SOCK_SPOOL = 2, SOCK_ERROR = -1, SOCK_CLOSE = -2, SOCK_CLOSETIMEOUT = -3, SOCK_READTIMEOUT = -4, SOCK_WRITETIMEOUT = -5, SOCK_READERROR = -6, SOCK_WRITEERROR = -7, SOCK_SHUTERROR = -8, SOCK_BADREQUEST = -9, SOCK_ENTITYTOOLARGE = -10, SOCK_BADHEADER = -11, SOCK_TOOMANYHEADERS = -12 } SockState; /* * Subset for spooler states */ typedef enum { SPOOLER_CLOSE = SOCK_CLOSE, SPOOLER_OK = SOCK_READY, SPOOLER_READERROR = SOCK_READERROR, SPOOLER_WRITEERROR = SOCK_WRITEERROR, SPOOLER_CLOSETIMEOUT = SOCK_CLOSETIMEOUT } SpoolerState; typedef struct { SpoolerState spoolerState; SockState sockState; } SpoolerStateMap; /* * ServerMap maintains Host header to server mappings. */ typedef struct ServerMap { NsServer *servPtr; char location[1]; } ServerMap; /* * The following maintains the spooler state mapping */ static const SpoolerStateMap spoolerStateMap[] = { {SPOOLER_CLOSE, SOCK_CLOSE}, {SPOOLER_READERROR, SOCK_READERROR}, {SPOOLER_WRITEERROR, SOCK_WRITEERROR}, {SPOOLER_CLOSETIMEOUT, SOCK_CLOSETIMEOUT}, {SPOOLER_OK, SOCK_READY} }; /* * The following structure manages polling. The PollIn macro is * used for the common case of checking for readability. */ typedef struct PollData { unsigned int nfds; /* Number of fds being monitored. */ unsigned int maxfds; /* Max fds (will grow as needed). */ struct pollfd *pfds; /* Dynamic array of poll structs. */ Ns_Time timeout; /* Min timeout, if any, for next spin. */ } PollData; #define PollIn(ppd, i) (((ppd)->pfds[(i)].revents & POLLIN) == POLLIN ) #define PollOut(ppd, i) (((ppd)->pfds[(i)].revents & POLLOUT) == POLLOUT) #define PollHup(ppd, i) (((ppd)->pfds[(i)].revents & POLLHUP) == POLLHUP) /* * Collected informationof writer threads for per pool rates, necessary for * per pool bandwidth management. */ typedef struct ConnPoolInfo { size_t threadSlot; int currentPoolRate; int deltaPercentage; } ConnPoolInfo; /* * The following structure maintains writer socket */ typedef struct WriterSock { struct WriterSock *nextPtr; struct Sock *sockPtr; struct SpoolerQueue *queuePtr; struct Conn *connPtr; SpoolerState status; int err; int refCount; unsigned int flags; Tcl_WideInt nsent; size_t size; NsWriterStreamState doStream; int fd; char *headerString; struct ConnPool *poolPtr; union { struct { struct iovec *bufs; /* incoming bufs to be sent */ int nbufs; int bufIdx; struct iovec sbufs[UIO_SMALLIOV]; /* scratch bufs for handling partial sends */ int nsbufs; int sbufIdx; struct iovec preallocated_bufs[UIO_SMALLIOV]; struct FileMap fmap; } mem; struct { size_t maxsize; size_t bufsize; off_t bufoffset; size_t toRead; unsigned char *buf; Ns_FileVec *bufs; int nbufs; int currentbuf; Ns_Mutex fdlock; } file; } c; char *clientData; Ns_Time startTime; int rateLimit; int currentRate; ConnPoolInfo *infoPtr; bool keep; } WriterSock; /* * Async writer definitions */ typedef struct AsyncWriter { Ns_Mutex lock; /* Lock around writer queues */ SpoolerQueue *firstPtr; /* List of writer threads */ } AsyncWriter; /* * AsyncWriteData is similar to WriterSock */ typedef struct AsyncWriteData { struct AsyncWriteData *nextPtr; char *data; int fd; Tcl_WideInt nsent; size_t size; size_t bufsize; const char *buf; } AsyncWriteData; static AsyncWriter *asyncWriter = NULL; /* * Static functions defined in this file. */ static Ns_ThreadProc DriverThread; static Ns_ThreadProc SpoolerThread; static Ns_ThreadProc WriterThread; static Ns_ThreadProc AsyncWriterThread; static Tcl_ObjCmdProc WriterListObjCmd; static Tcl_ObjCmdProc WriterSizeObjCmd; static Tcl_ObjCmdProc WriterStreamingObjCmd; static Tcl_ObjCmdProc WriterSubmitObjCmd; static Tcl_ObjCmdProc WriterSubmitFileObjCmd; static Tcl_ObjCmdProc AsyncLogfileWriteObjCmd; static Tcl_ObjCmdProc AsyncLogfileOpenObjCmd; static Tcl_ObjCmdProc AsyncLogfileCloseObjCmd; static Ns_ReturnCode DriverWriterFromObj(Tcl_Interp *interp, Tcl_Obj *driverObj, Ns_Conn *conn, DrvWriter **wrPtrPtr) NS_GNUC_NONNULL(1) NS_GNUC_NONNULL(4); static NS_SOCKET DriverListen(Driver *drvPtr, const char *bindaddr) NS_GNUC_NONNULL(1) NS_GNUC_NONNULL(2); static NS_DRIVER_ACCEPT_STATUS DriverAccept(Sock *sockPtr, NS_SOCKET sock) NS_GNUC_NONNULL(1); static bool DriverKeep(Sock *sockPtr) NS_GNUC_NONNULL(1); static void DriverClose(Sock *sockPtr) NS_GNUC_NONNULL(1); static Ns_ReturnCode DriverInit(const char *server, const char *moduleName, const char *threadName, const Ns_DriverInitData *init, NsServer *servPtr, const char *path, const char *bindaddrs, const char *defserver, const char *host) NS_GNUC_NONNULL(2) NS_GNUC_NONNULL(3) NS_GNUC_NONNULL(4) NS_GNUC_NONNULL(6) NS_GNUC_NONNULL(7) NS_GNUC_NONNULL(9); static bool DriverModuleInitialized(const char *module) NS_GNUC_NONNULL(1); static void SockSetServer(Sock *sockPtr) NS_GNUC_NONNULL(1); static SockState SockAccept(Driver *drvPtr, NS_SOCKET sock, Sock **sockPtrPtr, const Ns_Time *nowPtr) NS_GNUC_NONNULL(1); static Ns_ReturnCode SockQueue(Sock *sockPtr, const Ns_Time *timePtr) NS_GNUC_NONNULL(1); static Sock *SockNew(Driver *drvPtr) NS_GNUC_NONNULL(1) NS_GNUC_RETURNS_NONNULL; static void SockRelease(Sock *sockPtr, SockState reason, int err) NS_GNUC_NONNULL(1); static void SockError(Sock *sockPtr, SockState reason, int err) NS_GNUC_NONNULL(1); static void SockSendResponse(Sock *sockPtr, int code, const char *errMsg) NS_GNUC_NONNULL(1) NS_GNUC_NONNULL(3); static void SockTrigger(NS_SOCKET sock); static void SockTimeout(Sock *sockPtr, const Ns_Time *nowPtr, const Ns_Time *timeout) NS_GNUC_NONNULL(1); static void SockClose(Sock *sockPtr, int keep) NS_GNUC_NONNULL(1); static SockState SockRead(Sock *sockPtr, int spooler, const Ns_Time *timePtr) NS_GNUC_NONNULL(1); static SockState SockParse(Sock *sockPtr) NS_GNUC_NONNULL(1); static void SockPoll(Sock *sockPtr, short type, PollData *pdata) NS_GNUC_NONNULL(1) NS_GNUC_NONNULL(3); static int SockSpoolerQueue(Driver *drvPtr, Sock *sockPtr) NS_GNUC_NONNULL(1) NS_GNUC_NONNULL(2); static void SpoolerQueueStart(SpoolerQueue *queuePtr, Ns_ThreadProc *proc) NS_GNUC_NONNULL(2); static void SpoolerQueueStop(SpoolerQueue *queuePtr, const Ns_Time *timeoutPtr, const char *name) NS_GNUC_NONNULL(2) NS_GNUC_NONNULL(3); static void PollCreate(PollData *pdata) NS_GNUC_NONNULL(1); static void PollFree(PollData *pdata) NS_GNUC_NONNULL(1); static void PollReset(PollData *pdata) NS_GNUC_NONNULL(1); static NS_POLL_NFDS_TYPE PollSet(PollData *pdata, NS_SOCKET sock, short type, const Ns_Time *timeoutPtr) NS_GNUC_NONNULL(1); static int PollWait(const PollData *pdata, int timeout) NS_GNUC_NONNULL(1); static SockState ChunkedDecode(Request *reqPtr, bool update) NS_GNUC_NONNULL(1); static WriterSock *WriterSockRequire(const Conn *connPtr) NS_GNUC_NONNULL(1); static void WriterSockRelease(WriterSock *wrSockPtr) NS_GNUC_NONNULL(1); static SpoolerState WriterReadFromSpool(WriterSock *curPtr) NS_GNUC_NONNULL(1); static SpoolerState WriterSend(WriterSock *curPtr, int *err) NS_GNUC_NONNULL(1) NS_GNUC_NONNULL(2); static Ns_ReturnCode WriterSetupStreamingMode(Conn *connPtr, struct iovec *bufs, int nbufs, int *fdPtr) NS_GNUC_NONNULL(1) NS_GNUC_NONNULL(2) NS_GNUC_NONNULL(4); static void WriterSockFileVecCleanup(WriterSock *wrSockPtr) NS_GNUC_NONNULL(1); static int WriterGetMemunitFromDict(Tcl_Interp *interp, Tcl_Obj *dictObj, Tcl_Obj *keyObj, Ns_ObjvValueRange *rangePtr, Tcl_WideInt *valuePtr) NS_GNUC_NONNULL(1) NS_GNUC_NONNULL(2) NS_GNUC_NONNULL(3) NS_GNUC_NONNULL(5); static void AsyncWriterRelease(AsyncWriteData *wdPtr) NS_GNUC_NONNULL(1); static void WriteWarningRaw(const char *msg, int fd, size_t wantWrite, ssize_t written) NS_GNUC_NONNULL(1); static const char *GetSockStateName(SockState sockState); static size_t EndOfHeader(Sock *sockPtr) NS_GNUC_NONNULL(1); static void RequestNew(Sock *sockPtr) NS_GNUC_NONNULL(1); static void RequestFree(Sock *sockPtr) NS_GNUC_NONNULL(1); static void LogBuffer(Ns_LogSeverity severity, const char *msg, const char *buffer, size_t len) NS_GNUC_NONNULL(2) NS_GNUC_NONNULL(3); static void ServerMapEntryAdd(Tcl_DString *dsPtr, const char *host, NsServer *servPtr, Driver *drvPtr, bool addDefaultMapEntry) NS_GNUC_NONNULL(1) NS_GNUC_NONNULL(2) NS_GNUC_NONNULL(3) NS_GNUC_NONNULL(4); static Driver *LookupDriver(Tcl_Interp *interp, const char* protocol, const char *driverName) NS_GNUC_NONNULL(1) NS_GNUC_NONNULL(2); static void WriterPerPoolRates(WriterSock *writePtr, Tcl_HashTable *pools) NS_GNUC_NONNULL(1) NS_GNUC_NONNULL(2); static ConnPoolInfo *WriterGetInfoPtr(WriterSock *curPtr, Tcl_HashTable *pools) NS_GNUC_NONNULL(1) NS_GNUC_NONNULL(2); /* * Global variables defined in this file. */ //NS_EXTERN Ns_LogSeverity Ns_LogAccessDebug; Ns_LogSeverity Ns_LogTaskDebug; Ns_LogSeverity Ns_LogRequestDebug; Ns_LogSeverity Ns_LogConnchanDebug; Ns_LogSeverity Ns_LogUrlspaceDebug; Ns_LogSeverity Ns_LogAccessDebug; Ns_LogSeverity Ns_LogTimeoutDebug; NS_EXPORT Ns_LogSeverity Ns_LogAccessDebug; bool NsWriterBandwidthManagement = NS_FALSE; static Ns_LogSeverity WriterDebug; /* Severity at which to log verbose debugging. */ static Ns_LogSeverity DriverDebug; /* Severity at which to log verbose debugging. */ static Ns_Mutex reqLock = NULL; /* Lock for allocated Request structure pool */ static Ns_Mutex writerlock = NULL; /* Lock updating streaming information in the writer */ static Request *firstReqPtr = NULL; /* Allocated request structures kept in a pool */ static Driver *firstDrvPtr = NULL; /* First in list of all drivers */ #define Push(x, xs) ((x)->nextPtr = (xs), (xs) = (x)) /* *---------------------------------------------------------------------- * * WriteWarningRaw -- * * Write a warning message to stderr. This function is for cases, where * writing to Ns_Log can't be used (e.g. in the AsyncWriter, which is * used for writing also to the system log). * * Results: * None. * * Side effects: * Line to stderr. * *---------------------------------------------------------------------- */ static void WriteWarningRaw(const char *msg, int fd, size_t wantWrite, ssize_t written) { fprintf(stderr, "%s: Warning: wanted to write %" PRIuz " bytes, wrote %ld to file descriptor %d\n", msg, wantWrite, (long)written, fd); } /* *---------------------------------------------------------------------- * * GetSockStateName -- * * Return human readable names for StockState values. * * Results: * string * * Side effects: * None. * *---------------------------------------------------------------------- */ static const char * GetSockStateName(SockState sockState) { int sockStateInt = (int)sockState; static const char *sockStateStrings[] = { "SOCK_READY", "SOCK_MORE", "SOCK_SPOOL", "SOCK_ERROR", "SOCK_CLOSE", "SOCK_CLOSETIMEOUT", "SOCK_READTIMEOUT", "SOCK_WRITETIMEOUT", "SOCK_READERROR", "SOCK_WRITEERROR", "SOCK_SHUTERROR", "SOCK_BADREQUEST", "SOCK_ENTITYTOOLARGE", "SOCK_BADHEADER", "SOCK_TOOMANYHEADERS", NULL }; if (sockStateInt < 0) { sockStateInt = (- sockStateInt) + 2; } assert(sockStateInt < Ns_NrElements(sockStateStrings)); return sockStateStrings[sockStateInt]; } /* *---------------------------------------------------------------------- * * NsInitDrivers -- * * Init drivers system. * * Results: * None. * * Side effects: * None. * *---------------------------------------------------------------------- */ void NsInitDrivers(void) { DriverDebug = Ns_CreateLogSeverity("Debug(ns:driver)"); WriterDebug = Ns_CreateLogSeverity("Debug(writer)"); Ns_LogTaskDebug = Ns_CreateLogSeverity("Debug(task)"); Ns_LogRequestDebug = Ns_CreateLogSeverity("Debug(request)"); Ns_LogConnchanDebug = Ns_CreateLogSeverity("Debug(connchan)"); Ns_LogUrlspaceDebug = Ns_CreateLogSeverity("Debug(urlspace)"); Ns_LogAccessDebug = Ns_CreateLogSeverity("Debug(access)"); Ns_LogTimeoutDebug = Ns_CreateLogSeverity("Debug(timeout)"); Ns_MutexInit(&reqLock); Ns_MutexInit(&writerlock); Ns_MutexSetName2(&reqLock, "ns:driver", "requestpool"); Ns_MutexSetName2(&writerlock, "ns:writer", "stream"); } /* *---------------------------------------------------------------------- * * DriverModuleInitialized -- * * Check if a driver with the specified name is already initialized. * * Results: * Boolean * * Side effects: * None. * *---------------------------------------------------------------------- */ static bool DriverModuleInitialized(const char *module) { Driver *drvPtr; bool found = NS_FALSE; NS_NONNULL_ASSERT(module != NULL); for (drvPtr = firstDrvPtr; drvPtr != NULL; drvPtr = drvPtr->nextPtr) { if (strcmp(drvPtr->moduleName, module) == 0) { found = NS_TRUE; Ns_Log(Notice, "Driver %s is already initialized", module); break; } } return found; } /* *---------------------------------------------------------------------- * * Ns_DriverInit -- * * Initialize a driver. * * Results: * NS_OK if initialized, NS_ERROR if config or other error. * * Side effects: * Listen socket will be opened later in NsStartDrivers. * *---------------------------------------------------------------------- */ Ns_ReturnCode Ns_DriverInit(const char *server, const char *module, const Ns_DriverInitData *init) { Ns_ReturnCode status = NS_OK; NsServer *servPtr = NULL; bool alreadyInitialized = NS_FALSE; NS_NONNULL_ASSERT(module != NULL); NS_NONNULL_ASSERT(init != NULL); /* * If a server is provided, servPtr must be set. */ if (server != NULL) { servPtr = NsGetServer(server); if (unlikely(servPtr == NULL)) { Ns_Log(Bug, "cannot lookup server structure for server: %s", module); status = NS_ERROR; } } else { alreadyInitialized = DriverModuleInitialized(module); } /* * Check versions of drivers. */ if (status == NS_OK && init->version < NS_DRIVER_VERSION_4) { Ns_Log(Warning, "%s: driver version is too old (version %d), Version 4 is recommended", module, init->version); } #ifdef HAVE_IPV6 if (status == NS_OK && init->version < NS_DRIVER_VERSION_3) { Ns_Log(Error, "%s: driver version is too old (version %d) and does not support IPv6", module, init->version); status = NS_ERROR; } #endif if (status == NS_OK && init->version < NS_DRIVER_VERSION_2) { Ns_Log(Error, "%s: version field of driver is invalid: %d", module, init->version); status = NS_ERROR; } if (!alreadyInitialized && status == NS_OK) { const char *path, *host, *address, *defserver; bool noHostNameGiven; int nrDrivers, nrBindaddrs = 0, result; Ns_Set *set; Tcl_Obj *bindaddrsObj, **objv; path = ((init->path != NULL) ? init->path : Ns_ConfigGetPath(server, module, (char *)0L)); set = Ns_ConfigCreateSection(path); /* * Determine the "defaultserver" the "hostname" / "address" for * binding to and/or the HTTP location string. */ defserver = Ns_ConfigGetValue(path, "defaultserver"); address = Ns_ConfigGetValue(path, "address"); host = Ns_ConfigGetValue(path, "hostname"); noHostNameGiven = (host == NULL); /* * If the listen address was not specified, attempt to determine it * through a DNS lookup of the specified hostname or the server's * primary hostname. */ if (address == NULL) { Tcl_DString ds; Tcl_DStringInit(&ds); if (noHostNameGiven) { host = Ns_InfoHostname(); } if (Ns_GetAllAddrByHost(&ds, host) == NS_TRUE) { address = ns_strdup(Tcl_DStringValue(&ds)); if (path != NULL) { Ns_SetUpdate(set, "address", address); } Ns_Log(Notice, "no address given, obtained address '%s' from host name %s", address, host); } Tcl_DStringFree(&ds); } if (address == NULL) { address = NS_IP_UNSPECIFIED; Ns_Log(Notice, "no address given, set address to unspecified address %s", address); } bindaddrsObj = Tcl_NewStringObj(address, -1); result = Tcl_ListObjGetElements(NULL, bindaddrsObj, &nrBindaddrs, &objv); if (result != TCL_OK || nrBindaddrs < 1 || nrBindaddrs >= MAX_LISTEN_ADDR_PER_DRIVER) { Ns_Fatal("%s: bindaddrs '%s' is not a valid Tcl list containing addresses (max %d)", module, address, MAX_LISTEN_ADDR_PER_DRIVER); } Tcl_IncrRefCount(bindaddrsObj); /* * If the hostname was not specified and not determined by the lookup * above, set it to the first specified or derived IP address string. */ if (host == NULL) { host = ns_strdup(Tcl_GetString(objv[0])); } if (noHostNameGiven && host != NULL && path != NULL) { Ns_SetUpdate(set, "hostname", host); } Tcl_DecrRefCount(bindaddrsObj); /* * Get configured number of driver threads. */ nrDrivers = Ns_ConfigIntRange(path, "driverthreads", 1, 1, 64); if (nrDrivers > 1) { #if !defined(SO_REUSEPORT) Ns_Log(Warning, "server %s module %s requests %d driverthreads, but is not supported by the operating system", server, module, nrDrivers); Ns_SetUpdate(set, "driverthreads", "1"); nrDrivers = 1; #endif } /* * The common parameters are determined, create the driver thread(s) */ { size_t maxModuleNameLength = strlen(module) + (size_t)TCL_INTEGER_SPACE + 1u; char *moduleName = ns_malloc(maxModuleNameLength); int i; if (host == NULL) { host = Ns_InfoHostname(); } for (i = 0; i < nrDrivers; i++) { snprintf(moduleName, maxModuleNameLength, "%s:%d", module, i); status = DriverInit(server, module, moduleName, init, servPtr, path, address, defserver, host); if (status != NS_OK) { break; } } ns_free(moduleName); } } return status; } /* *---------------------------------------------------------------------- * * ServerMapEntryAdd -- * * Add an entry to the virtual server map. The entry consists of the * value as provided by the host header field and location string, * containing as well the protocol. * * Results: * None * * Side effects: * Potentially adding an entry to the virtual server map. * *---------------------------------------------------------------------- */ static void ServerMapEntryAdd(Tcl_DString *dsPtr, const char *host, NsServer *servPtr, Driver *drvPtr, bool addDefaultMapEntry) { Tcl_HashEntry *hPtr; int isNew; NS_NONNULL_ASSERT(dsPtr != NULL); NS_NONNULL_ASSERT(host != NULL); NS_NONNULL_ASSERT(servPtr != NULL); NS_NONNULL_ASSERT(drvPtr != NULL); hPtr = Tcl_CreateHashEntry(&drvPtr->hosts, host, &isNew); if (isNew != 0) { ServerMap *mapPtr; (void) Ns_DStringVarAppend(dsPtr, drvPtr->protocol, "://", host, (char *)0L); mapPtr = ns_malloc(sizeof(ServerMap) + (size_t)dsPtr->length); mapPtr->servPtr = servPtr; memcpy(mapPtr->location, dsPtr->string, (size_t)dsPtr->length + 1u); Tcl_SetHashValue(hPtr, mapPtr); Ns_Log(Notice, "%s: adding virtual host entry for host <%s> location: %s mapped to server: %s", drvPtr->threadName, host, mapPtr->location, servPtr->server); if (addDefaultMapEntry) { drvPtr->defMapPtr = mapPtr; } /* * Always reset the Tcl_DString */ Ns_DStringSetLength(dsPtr, 0); } else { Ns_Log(Notice, "%s: ignore duplicate virtual host entry: %s", drvPtr->threadName, host); } } /* *---------------------------------------------------------------------- * * NsDriverMapVirtualServers -- * * Map "Host:" headers for drivers not bound to physical servers. This * function has to be called a time, when all servers are already defined * such that NsGetServer(server) can succeed. * * Results: * None. * * Side effects: * Add an entry to the virtual server map via ServerMapEntryAdd() * *---------------------------------------------------------------------- */ void NsDriverMapVirtualServers(void) { Driver *drvPtr; for (drvPtr = firstDrvPtr; drvPtr != NULL; drvPtr = drvPtr->nextPtr) { const Ns_Set *lset; size_t j; Tcl_DString ds, *dsPtr = &ds; const char *path, *defserver, *moduleName; moduleName = drvPtr->moduleName; defserver = drvPtr->defserver; /* * Check for a "/servers" section for this driver module. */ path = Ns_ConfigGetPath(NULL, moduleName, "servers", (char *)0L); lset = Ns_ConfigGetSection(path); if (lset == NULL || Ns_SetSize(lset) == 0u) { /* * The driver module has no (or empty) ".../servers" section. * There is no mapping from host name to virtual server defined. */ if (drvPtr->server == NULL) { /* * We have a global driver module. If there is at least a * default server configured, we can use this for the mapping * to the default server. */ if (defserver != NULL) { NsServer *servPtr = NsGetServer(defserver); Tcl_DStringInit(dsPtr); ServerMapEntryAdd(dsPtr, Ns_InfoHostname(), servPtr, drvPtr, NS_TRUE); Tcl_DStringFree(dsPtr); Ns_Log(Notice, "Global driver has no mapping from host to server (section '%s' missing)", moduleName); } else { /* * Global driver, which has no default server, and no servers section. */ Ns_Fatal("%s: virtual servers configured," " but '%s' has no defaultserver defined", moduleName, path); } } continue; } /* * We have a ".../servers" section, the driver might be global or * local. It is not clear, why we need the server map for the local * driver, but for compatibility, we keep this. * */ if (defserver == NULL) { if (drvPtr->server != NULL) { /* * We have a local (server specific) driver. Since the code * below assumes that we have a "defserver" set, we take the * actual server as defserver. */ defserver = drvPtr->server; } else { /* * We have a global driver, but no defserver. */ Ns_Fatal("%s: virtual servers configured," " but '%s' has no defaultserver defined", moduleName, path); } } assert(defserver != NULL); drvPtr->defMapPtr = NULL; Ns_DStringInit(dsPtr); for (j = 0u; j < Ns_SetSize(lset); ++j) { const char *server = Ns_SetKey(lset, j); const char *host = Ns_SetValue(lset, j); NsServer *servPtr; /* * Perform an explicit lookup of the server. */ servPtr = NsGetServer(server); if (servPtr == NULL) { Ns_Log(Error, "%s: no such server: %s", moduleName, server); } else { char *writableHost, *hostName, *portStart; writableHost = ns_strdup(host); Ns_HttpParseHost(writableHost, &hostName, &portStart); if (portStart == NULL) { Tcl_DString hostDString; /* * The provided host entry does NOT contain a port. * * Add the provided entry to the virtual server map only, * when the configured port is the default port for the * protocol. */ if (drvPtr->port == drvPtr->defport) { ServerMapEntryAdd(dsPtr, host, servPtr, drvPtr, STREQ(defserver, server)); } /* * Auto-add configured port: Add always an entry with the * explicitly configured port of the driver. */ Tcl_DStringInit(&hostDString); Tcl_DStringAppend(&hostDString, host, -1); (void) Ns_DStringPrintf(&hostDString, ":%hu", drvPtr->port); ServerMapEntryAdd(dsPtr, hostDString.string, servPtr, drvPtr, STREQ(defserver, server)); Tcl_DStringFree(&hostDString); } else { /* * The provided host entry does contain a port. * * In case, the provided port is equal to the configured port * of the driver, add an entry. */ unsigned short providedPort = (unsigned short)strtol(portStart+1, NULL, 10); if (providedPort == drvPtr->port) { ServerMapEntryAdd(dsPtr, host, servPtr, drvPtr, STREQ(defserver, server)); /* * In case, the provided port is equal to the default * port of the driver, make sure that we have an entry * without the port. */ if (providedPort == drvPtr->defport) { ServerMapEntryAdd(dsPtr, hostName, servPtr, drvPtr, STREQ(defserver, server)); } } else { Ns_Log(Warning, "%s: driver is listening on port %hu; " "virtual host entry %s ignored", moduleName, drvPtr->port, host); } } ns_free(writableHost); } } Ns_DStringFree(dsPtr); if (drvPtr->defMapPtr == NULL) { fprintf(stderr, "--- Server Map: ---\n"); Ns_SetPrint(lset); Ns_Fatal("%s: default server '%s' not defined in '%s'", moduleName, defserver, path); } } } /* *---------------------------------------------------------------------- * * DriverInit -- * * Helper function of Ns_DriverInit. This function actually allocates and * initialized the driver structure. * * Results: * NS_OK if initialized, NS_ERROR if config or other error. * * Side effects: * Listen socket will be opened later in NsStartDrivers. * *---------------------------------------------------------------------- */ static Ns_ReturnCode DriverInit(const char *server, const char *moduleName, const char *threadName, const Ns_DriverInitData *init, NsServer *servPtr, const char *path, const char *bindaddrs, const char *defserver, const char *host) { const char *defproto; Driver *drvPtr; DrvWriter *wrPtr; DrvSpooler *spPtr; int i; unsigned short defport; NS_NONNULL_ASSERT(threadName != NULL); NS_NONNULL_ASSERT(init != NULL); NS_NONNULL_ASSERT(path != NULL); NS_NONNULL_ASSERT(bindaddrs != NULL); NS_NONNULL_ASSERT(host != NULL); /* * Set the protocol and port defaults. */ if (init->protocol != NULL) { defproto = init->protocol; defport = init->defaultPort; } else { defproto = "unknown"; defport = 0u; } Ns_Log(DriverDebug, "DriverInit server <%s> threadName %s proto %s port %hu", server, threadName, defproto, defport); /* * Allocate a new driver instance and set configurable parameters. */ drvPtr = ns_calloc(1u, sizeof(Driver)); Ns_MutexInit(&drvPtr->lock); Ns_MutexSetName2(&drvPtr->lock, "ns:drv", threadName); Ns_MutexInit(&drvPtr->spooler.lock); Ns_MutexSetName2(&drvPtr->spooler.lock, "ns:drv:spool", threadName); Ns_MutexInit(&drvPtr->writer.lock); Ns_MutexSetName2(&drvPtr->writer.lock, "ns:drv:writer", threadName); if (ns_sockpair(drvPtr->trigger) != 0) { Ns_Fatal("ns_sockpair() failed: %s", ns_sockstrerror(ns_sockerrno)); } drvPtr->server = server; drvPtr->type = init->name; drvPtr->moduleName = ns_strdup(moduleName); drvPtr->threadName = ns_strdup(threadName); drvPtr->defserver = defserver; drvPtr->listenProc = init->listenProc; drvPtr->acceptProc = init->acceptProc; drvPtr->recvProc = init->recvProc; drvPtr->sendProc = init->sendProc; drvPtr->sendFileProc = init->sendFileProc; drvPtr->keepProc = init->keepProc; drvPtr->requestProc = init->requestProc; drvPtr->closeProc = init->closeProc; drvPtr->clientInitProc = init->clientInitProc; drvPtr->arg = init->arg; drvPtr->opts = init->opts; drvPtr->servPtr = servPtr; drvPtr->defport = defport; drvPtr->bufsize = (size_t)Ns_ConfigMemUnitRange(path, "bufsize", 16384, 1024, INT_MAX); drvPtr->maxinput = Ns_ConfigMemUnitRange(path, "maxinput", 1024*1024, 1024, LLONG_MAX); drvPtr->maxupload = Ns_ConfigMemUnitRange(path, "maxupload", 0, 0, (Tcl_WideInt)drvPtr->maxinput); drvPtr->readahead = Ns_ConfigMemUnitRange(path, "readahead", (Tcl_WideInt)drvPtr->bufsize, (Tcl_WideInt)drvPtr->bufsize, drvPtr->maxinput); drvPtr->maxline = Ns_ConfigIntRange(path, "maxline", 8192, 256, INT_MAX); drvPtr->maxheaders = Ns_ConfigIntRange(path, "maxheaders", 128, 8, INT_MAX); drvPtr->maxqueuesize = Ns_ConfigIntRange(path, "maxqueuesize", 1024, 1, INT_MAX); Ns_ConfigTimeUnitRange(path, "sendwait", "30s", 1, 0, INT_MAX, 0, &drvPtr->sendwait); Ns_ConfigTimeUnitRange(path, "recvwait", "30s", 1, 0, INT_MAX, 0, &drvPtr->recvwait); Ns_ConfigTimeUnitRange(path, "closewait", "2s", 0, 0, INT_MAX, 0, &drvPtr->closewait); Ns_ConfigTimeUnitRange(path, "keepwait", "5s", 0, 0, INT_MAX, 0, &drvPtr->keepwait); drvPtr->backlog = Ns_ConfigIntRange(path, "backlog", 256, 1, INT_MAX); drvPtr->driverthreads = Ns_ConfigIntRange(path, "driverthreads", 1, 1, 32); drvPtr->reuseport = Ns_ConfigBool(path, "reuseport", NS_FALSE); drvPtr->acceptsize = Ns_ConfigIntRange(path, "acceptsize", drvPtr->backlog, 1, INT_MAX); drvPtr->keepmaxuploadsize = (size_t)Ns_ConfigMemUnitRange(path, "keepalivemaxuploadsize", 0, 0, INT_MAX); drvPtr->keepmaxdownloadsize = (size_t)Ns_ConfigMemUnitRange(path, "keepalivemaxdownloadsize", 0, 0, INT_MAX); drvPtr->recvTimeout = drvPtr->recvwait; Tcl_InitHashTable(&drvPtr->hosts, TCL_STRING_KEYS); if (drvPtr->driverthreads > 1) { #if !defined(SO_REUSEPORT) drvPtr->driverthreads = 1; drvPtr->reuseport = NS_FALSE; #else /* * When driver threads > 1, "reuseport" has to be active. */ drvPtr->reuseport = NS_TRUE; #endif } if (drvPtr->reuseport) { /* * Reuseport was specified */ #if !defined(SO_REUSEPORT) Ns_Log(Warning, "parameter %s reuseport was specified, but is not supported by the operating system", path); drvPtr->reuseport = NS_FALSE; #endif } drvPtr->uploadpath = ns_strdup(Ns_ConfigString(path, "uploadpath", nsconf.tmpDir)); /* * If activated, "maxupload" has to be at least "readahead" bytes. Tell * the user in case the config values are overruled. */ if ((drvPtr->maxupload > 0) && (drvPtr->maxupload < drvPtr->readahead)) { Ns_Log(Warning, "parameter %s maxupload % " TCL_LL_MODIFIER "d invalid; can be either 0 or must be >= %" TCL_LL_MODIFIER "d (size of readahead)", path, drvPtr->maxupload, drvPtr->readahead); drvPtr->maxupload = drvPtr->readahead; } /* * Determine the port and then set the HTTP location string either * as specified in the config file or constructed from the * protocol, hostname and port. */ drvPtr->protocol = ns_strdup(defproto); drvPtr->address = ns_strdup(bindaddrs); drvPtr->port = (unsigned short)Ns_ConfigIntRange(path, "port", (int)defport, 0, 65535); drvPtr->location = Ns_ConfigGetValue(path, "location"); if (drvPtr->location != NULL && (strstr(drvPtr->location, "://") != NULL)) { drvPtr->location = ns_strdup(drvPtr->location); } else { Tcl_DString ds, *dsPtr = &ds; Ns_DStringInit(dsPtr); Ns_HttpLocationString(dsPtr, drvPtr->protocol, host, drvPtr->port, defport); drvPtr->location = Ns_DStringExport(dsPtr); } drvPtr->nextPtr = firstDrvPtr; firstDrvPtr = drvPtr; /* * Add driver specific extra headers. */ drvPtr->extraHeaders = Ns_ConfigSet(path, "extraheaders"); /* * Check if upload spooler are enabled */ spPtr = &drvPtr->spooler; spPtr->threads = Ns_ConfigIntRange(path, "spoolerthreads", 0, 0, 32); if (spPtr->threads > 0) { Ns_Log(Notice, "%s: enable %d spooler thread(s) " "for uploads >= %" TCL_LL_MODIFIER "d bytes", threadName, spPtr->threads, drvPtr->readahead); for (i = 0; i < spPtr->threads; i++) { SpoolerQueue *queuePtr = ns_calloc(1u, sizeof(SpoolerQueue)); char buffer[100]; snprintf(buffer, sizeof(buffer), "ns:driver:spooler:%d", i); Ns_MutexSetName2(&queuePtr->lock, buffer, "queue"); queuePtr->id = i; Push(queuePtr, spPtr->firstPtr); } } else { Ns_Log(Notice, "%s: enable %d spooler thread(s) ", threadName, spPtr->threads); } /* * Enable writer threads */ wrPtr = &drvPtr->writer; wrPtr->threads = Ns_ConfigIntRange(path, "writerthreads", 0, 0, 32); if (wrPtr->threads > 0) { wrPtr->writersize = (size_t)Ns_ConfigMemUnitRange(path, "writersize", 1024*1024, 1024, INT_MAX); wrPtr->bufsize = (size_t)Ns_ConfigMemUnitRange(path, "writerbufsize", 8192, 512, INT_MAX); wrPtr->rateLimit = Ns_ConfigIntRange(path, "writerratelimit", 0, 0, INT_MAX); wrPtr->doStream = Ns_ConfigBool(path, "writerstreaming", NS_FALSE) ? NS_WRITER_STREAM_ACTIVE : NS_WRITER_STREAM_NONE; Ns_Log(Notice, "%s: enable %d writer thread(s) " "for downloads >= %" PRIdz " bytes, bufsize=%" PRIdz " bytes, HTML streaming %d", threadName, wrPtr->threads, wrPtr->writersize, wrPtr->bufsize, wrPtr->doStream); for (i = 0; i < wrPtr->threads; i++) { SpoolerQueue *queuePtr = ns_calloc(1u, sizeof(SpoolerQueue)); char buffer[100]; snprintf(buffer, sizeof(buffer), "ns:driver:writer:%d", i); Ns_MutexSetName2(&queuePtr->lock, buffer, "queue"); queuePtr->id = i; Push(queuePtr, wrPtr->firstPtr); } } else { Ns_Log(Notice, "%s: enable %d writer thread(s) ", threadName, wrPtr->threads); } return NS_OK; } /* *---------------------------------------------------------------------- * * NsStartDrivers -- * * Listen on all driver address/ports and start the DriverThread. * * Results: * None. * * Side effects: * See DriverThread. * *---------------------------------------------------------------------- */ void NsStartDrivers(void) { Driver *drvPtr; /* * Signal and wait for each driver to start. */ for (drvPtr = firstDrvPtr; drvPtr != NULL; drvPtr = drvPtr->nextPtr) { if (drvPtr->port == 0u) { /* * Don't start a driver having port zero. */ continue; } Ns_ThreadCreate(DriverThread, drvPtr, 0, &drvPtr->thread); Ns_MutexLock(&drvPtr->lock); while ((drvPtr->flags & DRIVER_STARTED) == 0u) { Ns_CondWait(&drvPtr->cond, &drvPtr->lock); } /*if ((drvPtr->flags & DRIVER_FAILED)) { status = NS_ERROR; }*/ Ns_MutexUnlock(&drvPtr->lock); } } /* *---------------------------------------------------------------------- * * NsStopDrivers -- * * Trigger the DriverThread to begin shutdown. * * Results: * None. * * Side effects: * DriverThread will close listen sockets and then exit after all * outstanding connections are complete and closed. * *---------------------------------------------------------------------- */ void NsStopDrivers(void) { Driver *drvPtr; NsAsyncWriterQueueDisable(NS_TRUE); for (drvPtr = firstDrvPtr; drvPtr != NULL; drvPtr = drvPtr->nextPtr) { Tcl_HashEntry *hPtr; Tcl_HashSearch search; if ((drvPtr->flags & DRIVER_STARTED) == 0u) { continue; } Ns_MutexLock(&drvPtr->lock); Ns_Log(Notice, "[driver:%s]: stopping", drvPtr->threadName); drvPtr->flags |= DRIVER_SHUTDOWN; Ns_CondBroadcast(&drvPtr->cond); Ns_MutexUnlock(&drvPtr->lock); SockTrigger(drvPtr->trigger[1]); hPtr = Tcl_FirstHashEntry(&drvPtr->hosts, &search); while (hPtr != NULL) { Tcl_DeleteHashEntry(hPtr); hPtr = Tcl_NextHashEntry(&search); } } } void NsStopSpoolers(void) { const Driver *drvPtr; Ns_Log(Notice, "driver: stopping writer and spooler threads"); /* * Shutdown all spooler and writer threads */ for (drvPtr = firstDrvPtr; drvPtr != NULL; drvPtr = drvPtr->nextPtr) { Ns_Time timeout; if ((drvPtr->flags & DRIVER_STARTED) == 0u) { continue; } Ns_GetTime(&timeout); Ns_IncrTime(&timeout, nsconf.shutdowntimeout.sec, nsconf.shutdowntimeout.usec); SpoolerQueueStop(drvPtr->writer.firstPtr, &timeout, "writer"); SpoolerQueueStop(drvPtr->spooler.firstPtr, &timeout, "spooler"); } } /* *---------------------------------------------------------------------- * * DriverInfoObjCmd -- * * Return public info of all drivers. * Subcommand of NsTclDriverObjCmd. * * Results: * Standard Tcl Result. * * Side effects: * None. * *---------------------------------------------------------------------- */ static int DriverInfoObjCmd(ClientData UNUSED(clientData), Tcl_Interp *interp, int objc, Tcl_Obj *const* objv) { int result = TCL_OK; if (Ns_ParseObjv(NULL, NULL, interp, 2, objc, objv) != NS_OK) { result = TCL_ERROR; } else { const Driver *drvPtr; Tcl_Obj *resultObj = Tcl_NewListObj(0, NULL); Tcl_HashTable driverNames; /* names of the driver modules without duplicates */ Tcl_InitHashTable(&driverNames, TCL_STRING_KEYS); /* * Iterate over all modules, not necessarily all driver threads */ for (drvPtr = firstDrvPtr; drvPtr != NULL; drvPtr = drvPtr->nextPtr) { int isNew = 0; (void)Tcl_CreateHashEntry(&driverNames, drvPtr->moduleName, &isNew); if (isNew == 1) { Tcl_Obj *listObj = Tcl_NewListObj(0, NULL); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj("module", 6)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj(drvPtr->moduleName, -1)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj("type", 4)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj(drvPtr->type, -1)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj("server", 6)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj(drvPtr->server != NULL ? drvPtr->server : NS_EMPTY_STRING, -1)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj("location", 8)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj(drvPtr->location, -1)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj("address", 7)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj(drvPtr->address, -1)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj("protocol", 8)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj(drvPtr->protocol, -1)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj("sendwait", 8)); Tcl_ListObjAppendElement(interp, listObj, Ns_TclNewTimeObj(&drvPtr->sendwait)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj("recvwait", 8)); Tcl_ListObjAppendElement(interp, listObj, Ns_TclNewTimeObj(&drvPtr->sendwait)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj("extraheaders", 12)); if (drvPtr->extraHeaders != NULL) { Tcl_DString ds; Tcl_DStringInit(&ds); Ns_DStringAppendSet(&ds, drvPtr->extraHeaders); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj(ds.string, ds.length)); Tcl_DStringFree(&ds); } else { Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj("", 0)); } Tcl_ListObjAppendElement(interp, resultObj, listObj); } } Tcl_SetObjResult(interp, resultObj); Tcl_DeleteHashTable(&driverNames); } return result; } /* *---------------------------------------------------------------------- * * DriverStatsObjCmd -- * * Return statistics of all drivers. * Subcommand of NsTclDriverObjCmd. * * Results: * Standard Tcl Result. * * Side effects: * None. * *---------------------------------------------------------------------- */ static int DriverStatsObjCmd(ClientData UNUSED(clientData), Tcl_Interp *interp, int objc, Tcl_Obj *const* objv) { int result = TCL_OK; if (Ns_ParseObjv(NULL, NULL, interp, 2, objc, objv) != NS_OK) { result = TCL_ERROR; } else { const Driver *drvPtr; Tcl_Obj *resultObj = Tcl_NewListObj(0, NULL); /* * Iterate over all drivers and collect results. */ for (drvPtr = firstDrvPtr; drvPtr != NULL; drvPtr = drvPtr->nextPtr) { Tcl_Obj *listObj = Tcl_NewListObj(0, NULL); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj("thread", 6)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj(drvPtr->threadName, -1)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj("module", 6)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj(drvPtr->moduleName, -1)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj("received", 8)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewWideIntObj(drvPtr->stats.received)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj("spooled", 7)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewWideIntObj(drvPtr->stats.spooled)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj("partial", 7)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewWideIntObj(drvPtr->stats.partial)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj("errors", 6)); Tcl_ListObjAppendElement(interp, listObj, Tcl_NewWideIntObj(drvPtr->stats.errors)); Tcl_ListObjAppendElement(interp, resultObj, listObj); } Tcl_SetObjResult(interp, resultObj); } return result; } /* *---------------------------------------------------------------------- * * DriverThreadsObjCmd -- * * Return the names of driver threads * * Results: * Standard Tcl Result. * * Side effects: * None. * *---------------------------------------------------------------------- */ static int DriverThreadsObjCmd(ClientData UNUSED(clientData), Tcl_Interp *interp, int objc, Tcl_Obj *const* objv) { int result = TCL_OK; if (Ns_ParseObjv(NULL, NULL, interp, 2, objc, objv) != NS_OK) { result = TCL_ERROR; } else { const Driver *drvPtr; Tcl_Obj *resultObj = Tcl_NewListObj(0, NULL); /* * Iterate over all drivers and collect results. */ for (drvPtr = firstDrvPtr; drvPtr != NULL; drvPtr = drvPtr->nextPtr) { Tcl_ListObjAppendElement(interp, resultObj, Tcl_NewStringObj(drvPtr->threadName, -1)); } Tcl_SetObjResult(interp, resultObj); } return result; } /* *---------------------------------------------------------------------- * * DriverNamesObjCmd -- * * Return the names of drivers. * * Results: * Standard Tcl Result. * * Side effects: * None. * *---------------------------------------------------------------------- */ static int DriverNamesObjCmd(ClientData UNUSED(clientData), Tcl_Interp *interp, int objc, Tcl_Obj *const* objv) { int result = TCL_OK; if (Ns_ParseObjv(NULL, NULL, interp, 2, objc, objv) != NS_OK) { result = TCL_ERROR; } else { const Driver *drvPtr; Tcl_Obj *resultObj = Tcl_NewListObj(0, NULL); Tcl_HashTable driverNames; /* names of the drivers without duplicates */ Tcl_InitHashTable(&driverNames, TCL_STRING_KEYS); /* * Iterate over all drivers and collect results. */ for (drvPtr = firstDrvPtr; drvPtr != NULL; drvPtr = drvPtr->nextPtr) { int isNew; (void)Tcl_CreateHashEntry(&driverNames, drvPtr->moduleName, &isNew); if (isNew == 1) { Tcl_ListObjAppendElement(interp, resultObj, Tcl_NewStringObj(drvPtr->moduleName, -1)); } } Tcl_SetObjResult(interp, resultObj); Tcl_DeleteHashTable(&driverNames); } return result; } /* *---------------------------------------------------------------------- * * NsTclDriverObjCmd - * * Give information about drivers. Currently, just the statistics. * * Results: * Standard Tcl result. * * Side effects: * None. * *---------------------------------------------------------------------- */ int NsTclDriverObjCmd(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *const* objv) { const Ns_SubCmdSpec subcmds[] = { {"info", DriverInfoObjCmd}, {"names", DriverNamesObjCmd}, {"threads", DriverThreadsObjCmd}, {"stats", DriverStatsObjCmd}, {NULL, NULL} }; return Ns_SubcmdObjv(subcmds, clientData, interp, objc, objv); } /* *---------------------------------------------------------------------- * * NsWakeupDriver -- * * Wake up the associated DriverThread. * * Results: * None. * * Side effects: * The poll waiting for this trigger will be interrupted. * *---------------------------------------------------------------------- */ void NsWakeupDriver(const Driver *drvPtr) { NS_NONNULL_ASSERT(drvPtr != NULL); SockTrigger(drvPtr->trigger[1]); } /* *---------------------------------------------------------------------- * * NsWaitDriversShutdown -- * * Wait for exit of DriverThread. This callback is invoked later * by the timed shutdown thread. * * Results: * None. * * Side effects: * Driver thread is joined and trigger pipe closed. * *---------------------------------------------------------------------- */ void NsWaitDriversShutdown(const Ns_Time *toPtr) { Driver *drvPtr; Ns_ReturnCode status = NS_OK; for (drvPtr = firstDrvPtr; drvPtr != NULL; drvPtr = drvPtr->nextPtr) { if ((drvPtr->flags & DRIVER_STARTED) == 0u) { continue; } Ns_MutexLock(&drvPtr->lock); while ((drvPtr->flags & DRIVER_STOPPED) == 0u && status == NS_OK) { status = Ns_CondTimedWait(&drvPtr->cond, &drvPtr->lock, toPtr); } Ns_MutexUnlock(&drvPtr->lock); if (status != NS_OK) { Ns_Log(Warning, "[driver:%s]: shutdown timeout", drvPtr->threadName); } else { Ns_Log(Notice, "[driver:%s]: stopped", drvPtr->threadName); Ns_ThreadJoin(&drvPtr->thread, NULL); drvPtr->thread = NULL; } } } /* *---------------------------------------------------------------------- * * NsGetRequest -- * * Return the request buffer, reading it if necessary (i.e., if not an * async read-ahead connection). This function is called at the start of * connection processing. * Results: * Pointer to Request structure or NULL on error. * * Side effects: * May wait for content to arrive if necessary. * *---------------------------------------------------------------------- */ Request * NsGetRequest(Sock *sockPtr, const Ns_Time *nowPtr) { Request *reqPtr; NS_NONNULL_ASSERT(sockPtr != NULL); /* * The underlying "Request" structure is allocated by RequestNew(), which * must be called for the "sockPtr" prior to calling this * function. "reqPtr" should be NULL just in error cases. */ reqPtr = sockPtr->reqPtr; if (likely(reqPtr != NULL)) { if (likely(reqPtr->request.line != NULL)) { Ns_Log(DriverDebug, "NsGetRequest got the pre-parsed request <%s> from the driver", reqPtr->request.line); } else if (sockPtr->drvPtr->requestProc == NULL) { /* * Non-HTTP driver can send the drvPtr->requestProc to perform * their own request handling. */ SockState status; Ns_Log(DriverDebug, "NsGetRequest has to read+parse the request"); /* * We have no parsed request so far. So, do it now. */ do { Ns_Log(DriverDebug, "NsGetRequest calls SockRead"); status = SockRead(sockPtr, 0, nowPtr); } while (status == SOCK_MORE); /* * If anything went wrong, clean the request provided by * SockRead() and flag the error by returning NULL. */ if (status != SOCK_READY) { if (sockPtr->reqPtr != NULL) { Ns_Log(DriverDebug, "NsGetRequest calls RequestFree"); RequestFree(sockPtr); } reqPtr = NULL; } } else { Ns_Log(DriverDebug, "NsGetRequest found driver specific request Proc, " "probably from a non-HTTP driver"); } } else { Ns_Log(DriverDebug, "NsGetRequest has reqPtr NULL"); } return reqPtr; } /* *---------------------------------------------------------------------- * * NsSockClose -- * * Return a connection to the DriverThread for closing or keepalive. * "keep" might be NS_TRUE/NS_FALSE or -1 if undecided. * * Results: * None. * * Side effects: * Socket may be reused by a keepalive connection. * *---------------------------------------------------------------------- */ void NsSockClose(Sock *sockPtr, int keep) { Driver *drvPtr; bool trigger = NS_FALSE; NS_NONNULL_ASSERT(sockPtr != NULL); drvPtr = sockPtr->drvPtr; Ns_Log(DriverDebug, "NsSockClose sockPtr %p (%d) keep %d", (void *)sockPtr, ((Ns_Sock*)sockPtr)->sock, keep); SockClose(sockPtr, keep); /* * Free the request, unless it is from a non-HTTP driver (who might not * fill out the request structure). */ if (sockPtr->reqPtr != NULL) { Ns_Log(DriverDebug, "NsSockClose calls RequestFree"); RequestFree(sockPtr); } Ns_MutexLock(&drvPtr->lock); if (drvPtr->closePtr == NULL) { trigger = NS_TRUE; } sockPtr->nextPtr = drvPtr->closePtr; drvPtr->closePtr = sockPtr; Ns_MutexUnlock(&drvPtr->lock); if (trigger) { SockTrigger(drvPtr->trigger[1]); } } /* *---------------------------------------------------------------------- * * DriverListen -- * * Open a listening socket for accepting connections. * * Results: * File description of socket, or NS_INVALID_SOCKET on error. * * Side effects: * Depends on driver. * *---------------------------------------------------------------------- */ static NS_SOCKET DriverListen(Driver *drvPtr, const char *bindaddr) { NS_SOCKET sock; NS_NONNULL_ASSERT(drvPtr != NULL); NS_NONNULL_ASSERT(bindaddr != NULL); sock = (*drvPtr->listenProc)((Ns_Driver *) drvPtr, bindaddr, drvPtr->port, drvPtr->backlog, drvPtr->reuseport); if (sock == NS_INVALID_SOCKET) { Ns_Log(Error, "%s: failed to listen on [%s]:%d: %s", drvPtr->threadName, bindaddr, drvPtr->port, ns_sockstrerror(ns_sockerrno)); } else { Ns_Log(Notice, #ifdef HAVE_IPV6 "%s: listening on [%s]:%d", #else "%s: listening on %s:%d", #endif drvPtr->threadName, bindaddr, drvPtr->port); } return sock; } /* *---------------------------------------------------------------------- * * DriverAccept -- * * Accept a new socket. It will be in non-blocking mode. * * Results: * _ACCEPT: a socket was accepted, poll for data * _ACCEPT_DATA: a socket was accepted, data present, read immediately * if in async mode, defer reading to connection thread * _ACCEPT_QUEUE: a socket was accepted, queue immediately * _ACCEPT_ERROR: no socket was accepted * * Side effects: * Depends on driver. * *---------------------------------------------------------------------- */ static NS_DRIVER_ACCEPT_STATUS DriverAccept(Sock *sockPtr, NS_SOCKET sock) { socklen_t n = (socklen_t)sizeof(struct NS_SOCKADDR_STORAGE); NS_NONNULL_ASSERT(sockPtr != NULL); return (*sockPtr->drvPtr->acceptProc)((Ns_Sock *) sockPtr, sock, (struct sockaddr *) &(sockPtr->sa), &n); } /* *---------------------------------------------------------------------- * * NsDriverRecv -- * * Read data from the socket into the given vector of buffers. * * Results: * Number of bytes read, or -1 on error. * * Side effects: * Depends on driver. * *---------------------------------------------------------------------- */ ssize_t NsDriverRecv(Sock *sockPtr, struct iovec *bufs, int nbufs, Ns_Time *timeoutPtr) { ssize_t result; const Driver *drvPtr; NS_NONNULL_ASSERT(sockPtr != NULL); drvPtr = sockPtr->drvPtr; if (likely(drvPtr->recvProc != NULL)) { result = (*drvPtr->recvProc)((Ns_Sock *) sockPtr, bufs, nbufs, timeoutPtr, 0u); } else { Ns_Log(Warning, "driver: no recvProc registered for driver %s", drvPtr->threadName); result = -1; } return result; } /* *---------------------------------------------------------------------- * * NsDriverSend -- * * Write a vector of buffers to the socket via the driver callback. * May not send all of the data. * * Results: * Number of bytes written or -1 on error. * May return 0 (zero) when socket is not writable. * * Side effects: * Depends on the driver. * *---------------------------------------------------------------------- */ ssize_t NsDriverSend(Sock *sockPtr, const struct iovec *bufs, int nbufs, unsigned int flags) { ssize_t sent = -1; const Driver *drvPtr; NS_NONNULL_ASSERT(sockPtr != NULL); drvPtr = sockPtr->drvPtr; NS_NONNULL_ASSERT(drvPtr != NULL); if (likely(drvPtr->sendProc != NULL)) { /* * TODO: The Ns_DriverSendProc signature should be modified * to omit the timeout argument. Same with recvProc(). */ sent = (*drvPtr->sendProc)((Ns_Sock *) sockPtr, bufs, nbufs, NULL, flags); } else { Ns_Log(Warning, "no sendProc registered for driver %s", drvPtr->threadName); } return sent; } /* *---------------------------------------------------------------------- * * NsDriverSendFile -- * * Write a vector of file buffers to the socket via the driver * callback. * * Results: * Number of bytes written, -1 on error. * May not send all the data. * * Side effects: * May block on disk read. * *---------------------------------------------------------------------- */ ssize_t NsDriverSendFile(Sock *sockPtr, Ns_FileVec *bufs, int nbufs, unsigned int flags) { ssize_t sent = -1; const Driver *drvPtr; NS_NONNULL_ASSERT(sockPtr != NULL); NS_NONNULL_ASSERT(bufs != NULL); drvPtr = sockPtr->drvPtr; NS_NONNULL_ASSERT(drvPtr != NULL); if (drvPtr->sendFileProc != NULL) { /* * TODO: The Ns_DriverSendFileProc signature should be modified * to omit the timeout argument. */ sent = (*drvPtr->sendFileProc)((Ns_Sock *)sockPtr, bufs, nbufs, NULL, flags); } else { sent = Ns_SockSendFileBufs((Ns_Sock *)sockPtr, bufs, nbufs, flags); } return sent; } /* *---------------------------------------------------------------------- * * DriverKeep -- * * Can the given socket be kept open in the hopes that another * request will arrive before the keepwait timeout expires? * * Results: * NS_TRUE if the socket is OK for keepalive, NS_FALSE if this is not possible. * * Side effects: * Depends on driver. * *---------------------------------------------------------------------- */ static bool DriverKeep(Sock *sockPtr) { Ns_DriverKeepProc *keepProc; bool result; NS_NONNULL_ASSERT(sockPtr != NULL); keepProc = sockPtr->drvPtr->keepProc; if (keepProc == NULL) { result = NS_FALSE; } else { result = (keepProc)((Ns_Sock *) sockPtr); } return result; } /* *---------------------------------------------------------------------- * * DriverClose -- * * Close the given socket. * * Results: * None. * * Side effects: * Depends on driver. * *---------------------------------------------------------------------- */ static void DriverClose(Sock *sockPtr) { NS_NONNULL_ASSERT(sockPtr != NULL); (*sockPtr->drvPtr->closeProc)((Ns_Sock *) sockPtr); } /* *---------------------------------------------------------------------- * * DriverThread -- * * Main listening socket driver thread. * * Results: * None. * * Side effects: * Connections are accepted on the configured listen sockets, * placed on the run queue to be serviced, and gracefully * closed when done. Async sockets have the entire request read * here before queuing as well. * *---------------------------------------------------------------------- */ static void DriverThread(void *arg) { Driver *drvPtr = (Driver*)arg; Ns_Time now, diff; char charBuffer[1], drain[1024]; int pollTimeout, accepted, nrBindaddrs = 0; bool stopping; unsigned int flags; Sock *sockPtr, *closePtr, *nextPtr, *waitPtr, *readPtr; PollData pdata; Ns_ThreadSetName("-driver:%s-", drvPtr->threadName); Ns_Log(Notice, "starting"); flags = DRIVER_STARTED; { Tcl_Obj *bindaddrsObj, **objv; int j = 0, result; bindaddrsObj = Tcl_NewStringObj(drvPtr->address, -1); Tcl_IncrRefCount(bindaddrsObj); result = Tcl_ListObjGetElements(NULL, bindaddrsObj, &nrBindaddrs, &objv); /* * "result" was ok during startup, it has still to be ok. */ assert(result == TCL_OK); if (result == TCL_OK) { int i; /* * Bind all provided addresses. */ for (i = 0; i < nrBindaddrs; i++) { drvPtr->listenfd[j] = DriverListen(drvPtr, Tcl_GetString(objv[i])); if (drvPtr->listenfd[j] != NS_INVALID_SOCKET) { j ++; } } if (j > 0 && j < nrBindaddrs) { Ns_Log(Warning, "could only bind to %d out of %d addresses", j, nrBindaddrs); } } /* * "j" refers to the number of successful listen() operations. */ nrBindaddrs = j; Tcl_DecrRefCount(bindaddrsObj); } if (nrBindaddrs > 0) { SpoolerQueueStart(drvPtr->spooler.firstPtr, SpoolerThread); SpoolerQueueStart(drvPtr->writer.firstPtr, WriterThread); } else { Ns_Log(Warning, "could no bind any of the following addresses, stopping this driver: %s", drvPtr->address); flags |= (DRIVER_FAILED | DRIVER_SHUTDOWN); } Ns_MutexLock(&drvPtr->lock); drvPtr->flags |= flags; Ns_CondBroadcast(&drvPtr->cond); Ns_MutexUnlock(&drvPtr->lock); /* * Loop forever until signaled to shut down and all * connections are complete and gracefully closed. */ PollCreate(&pdata); Ns_GetTime(&now); closePtr = waitPtr = readPtr = NULL; stopping = ((flags & DRIVER_SHUTDOWN) != 0u); if (!stopping) { Ns_Log(Notice, "driver: accepting connections"); } while (!stopping) { int n; /* * Set the bits for all active drivers if a connection * isn't already pending. */ PollReset(&pdata); (void)PollSet(&pdata, drvPtr->trigger[0], (short)POLLIN, NULL); if (likely(waitPtr == NULL)) { for (n = 0; n < nrBindaddrs; n++) { drvPtr->pidx[n] = PollSet(&pdata, drvPtr->listenfd[n], (short)POLLIN, NULL); } } /* * If there are any closing or read-ahead sockets, set the bits * and determine the minimum relative timeout. * * TODO: the various poll timeouts should probably be configurable. */ if (readPtr == NULL && closePtr == NULL) { pollTimeout = 10 * 1000; } else { for (sockPtr = readPtr; sockPtr != NULL; sockPtr = sockPtr->nextPtr) { SockPoll(sockPtr, (short)POLLIN, &pdata); } for (sockPtr = closePtr; sockPtr != NULL; sockPtr = sockPtr->nextPtr) { SockPoll(sockPtr, (short)POLLIN, &pdata); } if (Ns_DiffTime(&pdata.timeout, &now, &diff) > 0) { /* * The resolution of "pollTimeout" is ms, therefore, we round * up. If we would round down (e.g. 500 microseconds to 0 ms), * the time comparison later would determine that it is too * early. */ pollTimeout = (int)Ns_TimeToMilliseconds(&diff) + 1; } else { pollTimeout = 0; } } n = PollWait(&pdata, pollTimeout); Ns_Log(DriverDebug, "=== PollWait returned %d, trigger[0] %d", n, PollIn(&pdata, 0)); if (PollIn(&pdata, 0) && unlikely(ns_recv(drvPtr->trigger[0], charBuffer, 1u, 0) != 1)) { const char *errstr = ns_sockstrerror(ns_sockerrno); Ns_Fatal("driver: trigger ns_recv() failed: %s", errstr); } /* * Check whether we should re-animate some connection threads, * when e.g. the number of current threads dropped below the * minimal value. Perform this test on timeouts (n == 0; * just for safety reasons) or on explicit wakeup calls. */ if ((n == 0) || PollIn(&pdata, 0)) { NsServer *servPtr = drvPtr->servPtr; if (servPtr != NULL) { /* * Check if we have to reanimate the current server. */ NsEnsureRunningConnectionThreads(servPtr, NULL); } else { Ns_Set *servers = Ns_ConfigCreateSection("ns/servers"); size_t j; /* * Reanimation check on all servers. */ for (j = 0u; j < Ns_SetSize(servers); ++j) { const char *server = Ns_SetKey(servers, j); servPtr = NsGetServer(server); if (servPtr != NULL) { NsEnsureRunningConnectionThreads(servPtr, NULL); } } } } /* * Update the current time and drain and/or release any * closing sockets. */ Ns_GetTime(&now); if (closePtr != NULL) { sockPtr = closePtr; closePtr = NULL; while (sockPtr != NULL) { nextPtr = sockPtr->nextPtr; if (unlikely(PollHup(&pdata, sockPtr->pidx))) { /* * Peer has closed the connection */ SockRelease(sockPtr, SOCK_CLOSE, 0); } else if (likely(PollIn(&pdata, sockPtr->pidx))) { /* * Got some data */ ssize_t received = ns_recv(sockPtr->sock, drain, sizeof(drain), 0); if (received <= 0) { Ns_Log(DriverDebug, "poll closewait pollin; sockrelease SOCK_READERROR (sock %d)", sockPtr->sock); SockRelease(sockPtr, SOCK_READERROR, 0); } else { Push(sockPtr, closePtr); } } else if (Ns_DiffTime(&sockPtr->timeout, &now, &diff) <= 0) { /* no PollHup, no PollIn, maybe timeout */ Ns_Log(DriverDebug, "poll closewait timeout; sockrelease SOCK_CLOSETIMEOUT (sock %d)", sockPtr->sock); SockRelease(sockPtr, SOCK_CLOSETIMEOUT, 0); } else { /* too early, keep waiting */ Push(sockPtr, closePtr); } sockPtr = nextPtr; } } /* * Attempt read-ahead of any new connections. */ sockPtr = readPtr; readPtr = NULL; while (likely(sockPtr != NULL)) { nextPtr = sockPtr->nextPtr; if (unlikely(PollHup(&pdata, sockPtr->pidx))) { /* * Peer has closed the connection */ SockRelease(sockPtr, SOCK_CLOSE, 0); } else if (unlikely(!PollIn(&pdata, sockPtr->pidx)) && ((sockPtr->reqPtr == NULL) || (sockPtr->reqPtr->leftover == 0u))) { /* * Got no data for this sockPtr. */ if (Ns_DiffTime(&sockPtr->timeout, &now, &diff) <= 0) { SockRelease(sockPtr, SOCK_READTIMEOUT, 0); } else { Push(sockPtr, readPtr); } } else { /* * Got some data for this sockPtr. * If enabled, perform read-ahead now. */ assert(drvPtr == sockPtr->drvPtr); if (likely((drvPtr->opts & NS_DRIVER_ASYNC) != 0u)) { SockState s = SockRead(sockPtr, 0, &now); /* * Queue for connection processing if ready. */ switch (s) { case SOCK_SPOOL: drvPtr->stats.spooled++; if (SockSpoolerQueue(drvPtr, sockPtr) == 0) { Push(sockPtr, readPtr); } break; case SOCK_MORE: drvPtr->stats.partial++; SockTimeout(sockPtr, &now, &drvPtr->recvwait); Push(sockPtr, readPtr); break; case SOCK_READY: if (SockQueue(sockPtr, &now) == NS_TIMEOUT) { Push(sockPtr, waitPtr); } break; /* * Already handled or normal cases */ case SOCK_ENTITYTOOLARGE: NS_FALL_THROUGH; /* fall through */ case SOCK_BADREQUEST: NS_FALL_THROUGH; /* fall through */ case SOCK_BADHEADER: NS_FALL_THROUGH; /* fall through */ case SOCK_TOOMANYHEADERS: NS_FALL_THROUGH; /* fall through */ case SOCK_CLOSE: SockRelease(sockPtr, s, errno); break; /* * Exceptions */ case SOCK_READERROR: NS_FALL_THROUGH; /* fall through */ case SOCK_CLOSETIMEOUT: NS_FALL_THROUGH; /* fall through */ case SOCK_ERROR: NS_FALL_THROUGH; /* fall through */ case SOCK_READTIMEOUT: NS_FALL_THROUGH; /* fall through */ case SOCK_SHUTERROR: NS_FALL_THROUGH; /* fall through */ case SOCK_WRITEERROR: NS_FALL_THROUGH; /* fall through */ case SOCK_WRITETIMEOUT: drvPtr->stats.errors++; Ns_Log(Warning, "sockread returned unexpected result %s (err %s); close socket (%d)", GetSockStateName(s), ((errno != 0) ? strerror(errno) : NS_EMPTY_STRING), sockPtr->sock); SockRelease(sockPtr, s, errno); break; } } else { /* * Potentially blocking driver, NS_DRIVER_ASYNC is not defined */ if (Ns_DiffTime(&sockPtr->timeout, &now, &diff) <= 0) { drvPtr->stats.errors++; Ns_Log(Notice, "read-ahead has some data, no async sock read ===== diff time %ld", Ns_DiffTime(&sockPtr->timeout, &now, &diff)); sockPtr->keep = NS_FALSE; SockRelease(sockPtr, SOCK_READTIMEOUT, 0); } else { if (SockQueue(sockPtr, &now) == NS_TIMEOUT) { Push(sockPtr, waitPtr); } } } } sockPtr = nextPtr; } /* * Attempt to queue any pending connection after reversing the * list to ensure oldest connections are tried first. */ if (waitPtr != NULL) { sockPtr = NULL; while ((nextPtr = waitPtr) != NULL) { waitPtr = nextPtr->nextPtr; Push(nextPtr, sockPtr); } while (sockPtr != NULL) { nextPtr = sockPtr->nextPtr; if (SockQueue(sockPtr, &now) == NS_TIMEOUT) { Push(sockPtr, waitPtr); } sockPtr = nextPtr; } } /* * If no connections are waiting, attempt to accept more. */ if (waitPtr == NULL) { /* * If configured, try to accept more than one request, under heavy load * this helps to process more requests */ SockState s; bool acceptMore = NS_TRUE; accepted = 0; while (acceptMore && accepted < drvPtr->acceptsize && drvPtr->queuesize < drvPtr->maxqueuesize ) { bool gotRequests = NS_FALSE; /* * Check for input data on all bind addresses. Stop checking, * when one round of checking on all addresses fails. */ for (n = 0; n < nrBindaddrs; n++) { if ( PollIn(&pdata, drvPtr->pidx[n]) && (s = SockAccept(drvPtr, pdata.pfds[drvPtr->pidx[n]].fd, &sockPtr, &now)) != SOCK_ERROR) { switch (s) { case SOCK_SPOOL: drvPtr->stats.spooled++; if (SockSpoolerQueue(drvPtr, sockPtr) == 0) { Push(sockPtr, readPtr); } break; case SOCK_MORE: drvPtr->stats.partial++; SockTimeout(sockPtr, &now, &drvPtr->recvwait); Push(sockPtr, readPtr); break; case SOCK_READY: if (SockQueue(sockPtr, &now) == NS_TIMEOUT) { Push(sockPtr, waitPtr); } break; case SOCK_BADHEADER: NS_FALL_THROUGH; /* fall through */ case SOCK_BADREQUEST: NS_FALL_THROUGH; /* fall through */ case SOCK_CLOSE: NS_FALL_THROUGH; /* fall through */ case SOCK_CLOSETIMEOUT: NS_FALL_THROUGH; /* fall through */ case SOCK_ENTITYTOOLARGE: NS_FALL_THROUGH; /* fall through */ case SOCK_ERROR: NS_FALL_THROUGH; /* fall through */ case SOCK_READERROR: NS_FALL_THROUGH; /* fall through */ case SOCK_READTIMEOUT: NS_FALL_THROUGH; /* fall through */ case SOCK_SHUTERROR: NS_FALL_THROUGH; /* fall through */ case SOCK_TOOMANYHEADERS: NS_FALL_THROUGH; /* fall through */ case SOCK_WRITEERROR: NS_FALL_THROUGH; /* fall through */ case SOCK_WRITETIMEOUT: Ns_Fatal("driver: SockAccept returned: %s", GetSockStateName(s)); } accepted++; gotRequests = NS_TRUE; #ifdef __APPLE__ /* * On Darwin, the first accept() succeeds typically, but it is * useless to try, since this leads always to an EAGAIN */ acceptMore = NS_FALSE; break; #endif } } if (!gotRequests) { acceptMore = NS_FALSE; } } if (accepted > 1) { Ns_Log(Notice, "... sockAccept accepted %d connections", accepted); } } /* * Check for shut down and get the list of any closing or * keep-alive sockets. */ Ns_MutexLock(&drvPtr->lock); sockPtr = drvPtr->closePtr; drvPtr->closePtr = NULL; flags = drvPtr->flags; Ns_MutexUnlock(&drvPtr->lock); stopping = ((flags & DRIVER_SHUTDOWN) != 0u); /* * Update the timeout for each closing socket and add to the * close list if some data has been read from the socket * (i.e., it's not a closing keep-alive connection). */ while (sockPtr != NULL) { nextPtr = sockPtr->nextPtr; if (sockPtr->keep) { assert(drvPtr == sockPtr->drvPtr); Ns_Log(DriverDebug, "setting keepwait %ld.%6ld for socket %d", drvPtr->keepwait.sec, drvPtr->keepwait.usec, sockPtr->sock); SockTimeout(sockPtr, &now, &drvPtr->keepwait); Push(sockPtr, readPtr); } else { /* * Purely packet oriented drivers set on close the fd to * NS_INVALID_SOCKET. Since we cannot "shutdown" an UDP-socket * for writing, we bypass this call. */ assert(drvPtr == sockPtr->drvPtr); if (sockPtr->sock == NS_INVALID_SOCKET) { SockRelease(sockPtr, SOCK_CLOSE, errno); Ns_Log(DriverDebug, "DRIVER SockRelease: errno %d drvPtr->closewait %ld.%6ld", errno, drvPtr->closewait.sec, drvPtr->closewait.usec); } else if (shutdown(sockPtr->sock, SHUT_WR) != 0) { SockRelease(sockPtr, SOCK_SHUTERROR, errno); } else { Ns_Log(DriverDebug, "setting closewait %ld.%6ld for socket %d", drvPtr->closewait.sec, drvPtr->closewait.usec, sockPtr->sock); SockTimeout(sockPtr, &now, &drvPtr->closewait); Push(sockPtr, closePtr); } } sockPtr = nextPtr; } /* * Close the active drivers if shutdown is pending. */ if (stopping) { for (n = 0; n < nrBindaddrs; n++) { ns_sockclose(drvPtr->listenfd[n]); drvPtr->listenfd[n] = NS_INVALID_SOCKET; } } } PollFree(&pdata); Ns_Log(Notice, "exiting"); Ns_MutexLock(&drvPtr->lock); drvPtr->flags |= DRIVER_STOPPED; Ns_CondBroadcast(&drvPtr->cond); Ns_MutexUnlock(&drvPtr->lock); } static void PollCreate(PollData *pdata) { NS_NONNULL_ASSERT(pdata != NULL); memset(pdata, 0, sizeof(PollData)); } static void PollFree(PollData *pdata) { NS_NONNULL_ASSERT(pdata != NULL); ns_free(pdata->pfds); memset(pdata, 0, sizeof(PollData)); } static void PollReset(PollData *pdata) { NS_NONNULL_ASSERT(pdata != NULL); pdata->nfds = 0u; pdata->timeout.sec = TIME_T_MAX; pdata->timeout.usec = 0; } static NS_POLL_NFDS_TYPE PollSet(PollData *pdata, NS_SOCKET sock, short type, const Ns_Time *timeoutPtr) { NS_NONNULL_ASSERT(pdata != NULL); /* * Grow the pfds array if necessary. */ if (unlikely(pdata->nfds >= pdata->maxfds)) { pdata->maxfds += 100u; pdata->pfds = ns_realloc(pdata->pfds, pdata->maxfds * sizeof(struct pollfd)); } /* * Set the next pollfd struct with this socket. */ pdata->pfds[pdata->nfds].fd = sock; pdata->pfds[pdata->nfds].events = type; pdata->pfds[pdata->nfds].revents = 0; /* * Check for new minimum timeout. */ if (timeoutPtr != NULL && Ns_DiffTime(timeoutPtr, &pdata->timeout, NULL) < 0) { pdata->timeout = *timeoutPtr; } return pdata->nfds++; } static int PollWait(const PollData *pdata, int timeout) { int n; NS_NONNULL_ASSERT(pdata != NULL); do { n = ns_poll(pdata->pfds, pdata->nfds, timeout); } while (n < 0 && errno == NS_EINTR); if (n < 0) { Ns_Fatal("PollWait: ns_poll() failed: %s", ns_sockstrerror(ns_sockerrno)); } return n; } /* *---------------------------------------------------------------------- * * RequestNew * * Prepares for reading from the socket, allocates a "Request" * struct for the given socket. It might be reused from the pool * or freshly allocated. Counterpart of RequestFree(). * * Results: * None * * Side effects: * None * *---------------------------------------------------------------------- */ static void RequestNew(Sock *sockPtr) { Request *reqPtr; bool reuseRequest = NS_TRUE; NS_NONNULL_ASSERT(sockPtr != NULL); /* * Try to get a request from the pool of allocated Requests. */ Ns_MutexLock(&reqLock); reqPtr = firstReqPtr; if (likely(reqPtr != NULL)) { firstReqPtr = reqPtr->nextPtr; } else { reuseRequest = NS_FALSE; } Ns_MutexUnlock(&reqLock); if (reuseRequest) { Ns_Log(DriverDebug, "RequestNew reuses a Request"); } /* * In case we failed, allocate a new Request. */ if (reqPtr == NULL) { Ns_Log(DriverDebug, "RequestNew gets a fresh Request"); reqPtr = ns_calloc(1u, sizeof(Request)); Tcl_DStringInit(&reqPtr->buffer); reqPtr->headers = Ns_SetCreate(NULL); } sockPtr->reqPtr = reqPtr; } /* *---------------------------------------------------------------------- * * RequestFree -- * * Free/clean a socket request structure. This routine is called * at the end of connection processing or on a socket which * times out during async read-ahead. Counterpart of RequestNew(). * * Results: * None. * * Side effects: * None. * *---------------------------------------------------------------------- */ static void RequestFree(Sock *sockPtr) { Request *reqPtr; bool keep; NS_NONNULL_ASSERT(sockPtr != NULL); reqPtr = sockPtr->reqPtr; assert(reqPtr != NULL); Ns_Log(DriverDebug, "=== RequestFree cleans %p (avail %" PRIuz " keep %d length %" PRIuz " contentLength %" PRIuz ")", (void *)reqPtr, reqPtr->avail, sockPtr->keep, reqPtr->length, reqPtr->contentLength); keep = (sockPtr->keep) && (reqPtr->avail > reqPtr->contentLength); if (keep) { size_t leftover = reqPtr->avail - reqPtr->contentLength; const char *offset = reqPtr->buffer.string + ((size_t)reqPtr->buffer.length - leftover); Ns_Log(DriverDebug, "setting leftover to %" PRIuz " bytes", leftover); /* * Here it is safe to move the data in the buffer, although the * reqPtr->content might point to it, since we re-init the content. In * case the terminating null character was written to the end of the * previous buffer, we have to restore the first character. */ memmove(reqPtr->buffer.string, offset, leftover); if (reqPtr->savedChar != '\0') { reqPtr->buffer.string[0] = reqPtr->savedChar; } Tcl_DStringSetLength(&reqPtr->buffer, (int)leftover); LogBuffer(DriverDebug, "KEEP BUFFER", reqPtr->buffer.string, leftover); reqPtr->leftover = leftover; } else { /* * Clean large buffers in order to avoid memory growth on huge * uploads (when maxupload is huge) */ /*fprintf(stderr, "=== reuse buffer size %d avail %d dynamic %d\n", reqPtr->buffer.length, reqPtr->buffer.spaceAvl, reqPtr->buffer.string == reqPtr->buffer.staticSpace);*/ if (Tcl_DStringLength(&reqPtr->buffer) > 65536) { Tcl_DStringFree(&reqPtr->buffer); } else { /* * Reuse buffer, but set length to 0. */ Tcl_DStringSetLength(&reqPtr->buffer, 0); } reqPtr->leftover = 0u; } reqPtr->next = NULL; reqPtr->content = NULL; reqPtr->length = 0u; reqPtr->contentLength = 0u; reqPtr->expectedLength = 0u; reqPtr->chunkStartOff = 0u; reqPtr->chunkWriteOff = 0u; reqPtr->roff = 0u; reqPtr->woff = 0u; reqPtr->coff = 0u; reqPtr->avail = 0u; reqPtr->savedChar = '\0'; Ns_SetTrunc(reqPtr->headers, 0u); if (reqPtr->auth != NULL) { Ns_SetFree(reqPtr->auth); reqPtr->auth = NULL; } if (reqPtr->request.line != NULL) { Ns_Log(DriverDebug, "RequestFree calls Ns_ResetRequest on %p", (void*)&reqPtr->request); Ns_ResetRequest(&reqPtr->request); } else { Ns_Log(DriverDebug, "RequestFree does not call Ns_ResetRequest on %p", (void*)&reqPtr->request); } if (!keep) { /* * Push the reqPtr to the pool for reuse in other connections. */ sockPtr->reqPtr = NULL; Ns_MutexLock(&reqLock); reqPtr->nextPtr = firstReqPtr; firstReqPtr = reqPtr; Ns_MutexUnlock(&reqLock); } else { /* * Keep the partly cleaned up reqPtr associated with the connection. */ Ns_Log(DriverDebug, "=== KEEP request structure in sockPtr (don't push into the pool)"); } } /* *---------------------------------------------------------------------- * * SockQueue -- * * Puts socket into connection queue * * Results: * NS_OK if queued, * NS_ERROR if socket closed because of error * NS_TIMEOUT if queue is full * * Side effects: * None. * *---------------------------------------------------------------------- */ static Ns_ReturnCode SockQueue(Sock *sockPtr, const Ns_Time *timePtr) { Ns_ReturnCode result; NS_NONNULL_ASSERT(sockPtr != NULL); /* * Verify the conditions. Request struct must exist already. */ assert(sockPtr->reqPtr != NULL); SockSetServer(sockPtr); assert(sockPtr->servPtr != NULL); /* * Actual queueing, if not ready spool to the waiting list. */ if (!NsQueueConn(sockPtr, timePtr)) { result = NS_TIMEOUT; } else { result = NS_OK; } return result; } /* *---------------------------------------------------------------------- * * SockPoll -- * * Arrange for given Sock to be monitored. * * Results: * None. * * Side effects: * Sock fd will be monitored for readability on next spin of * DriverThread. * *---------------------------------------------------------------------- */ static void SockPoll(Sock *sockPtr, short type, PollData *pdata) { NS_NONNULL_ASSERT(sockPtr != NULL); NS_NONNULL_ASSERT(pdata != NULL); sockPtr->pidx = PollSet(pdata, sockPtr->sock, type, &sockPtr->timeout); } /* *---------------------------------------------------------------------- * * SockTimeout -- * * Update socket with timeout * * Results: * None. * * Side effects: * Socket timeout will have nowPtr + timeout value * *---------------------------------------------------------------------- */ static void SockTimeout(Sock *sockPtr, const Ns_Time *nowPtr, const Ns_Time *timeout) { NS_NONNULL_ASSERT(sockPtr != NULL); sockPtr->timeout = *nowPtr; Ns_IncrTime(&sockPtr->timeout, timeout->sec, timeout->usec); } /* *---------------------------------------------------------------------- * * SockAccept -- * * Accept and initialize a new Sock in sockPtrPtr. * * Results: * SOCK_READY, SOCK_MORE, SOCK_SPOOL, * SOCK_ERROR + NULL sockPtr. * * Side effects: * Read-ahead may be attempted on new socket. * *---------------------------------------------------------------------- */ static SockState SockAccept(Driver *drvPtr, NS_SOCKET sock, Sock **sockPtrPtr, const Ns_Time *nowPtr) { Sock *sockPtr; SockState sockStatus; NS_DRIVER_ACCEPT_STATUS status; NS_NONNULL_ASSERT(drvPtr != NULL); sockPtr = SockNew(drvPtr); /* * Accept the new connection. */ status = DriverAccept(sockPtr, sock); if (unlikely(status == NS_DRIVER_ACCEPT_ERROR)) { sockStatus = SOCK_ERROR; /* * We reach the place frequently, especially on Linux, when we try to * accept multiple connection in one sweep. Usually, the errno is * EAGAIN. */ Ns_MutexLock(&drvPtr->lock); sockPtr->nextPtr = drvPtr->sockPtr; drvPtr->sockPtr = sockPtr; Ns_MutexUnlock(&drvPtr->lock); sockPtr = NULL; } else { sockPtr->acceptTime = *nowPtr; drvPtr->queuesize++; if (status == NS_DRIVER_ACCEPT_DATA) { /* * If there is already data present then read it without * polling if we're in async mode. */ if ((drvPtr->opts & NS_DRIVER_ASYNC) != 0u) { sockStatus = SockRead(sockPtr, 0, nowPtr); if ((int)sockStatus < 0) { Ns_Log(DriverDebug, "SockRead returned error %s", GetSockStateName(sockStatus)); SockRelease(sockPtr, sockStatus, errno); sockStatus = SOCK_ERROR; sockPtr = NULL; } } else { /* * Queue this socket without reading, NsGetRequest() in the * connection thread will perform actual reading of the * request. */ sockStatus = SOCK_READY; } } else if (status == NS_DRIVER_ACCEPT_QUEUE) { /* * We need to call RequestNew() to make sure socket has request * structure allocated, otherwise NsGetRequest() will call * SockRead() which is not what this driver wants. */ if (sockPtr->reqPtr == NULL) { RequestNew(sockPtr); } sockStatus = SOCK_READY; } else { sockStatus = SOCK_MORE; } } *sockPtrPtr = sockPtr; return sockStatus; } /* *---------------------------------------------------------------------- * * SockNew -- * * Allocate and/or initialize a Sock structure. Counterpart of * SockRelease(). * * Results: * SockPtr * * Side effects: * Potentially new memory is allocated. * *---------------------------------------------------------------------- */ static Sock * SockNew(Driver *drvPtr) { Sock *sockPtr; NS_NONNULL_ASSERT(drvPtr != NULL); Ns_MutexLock(&drvPtr->lock); sockPtr = drvPtr->sockPtr; if (likely(sockPtr != NULL)) { drvPtr->sockPtr = sockPtr->nextPtr; sockPtr->keep = NS_FALSE; } Ns_MutexUnlock(&drvPtr->lock); if (sockPtr == NULL) { size_t sockSize = sizeof(Sock) + (nsconf.nextSlsId * sizeof(Ns_Callback *)); sockPtr = ns_calloc(1u, sockSize); sockPtr->drvPtr = drvPtr; } else { sockPtr->tfd = 0; sockPtr->taddr = NULL; sockPtr->flags = 0u; sockPtr->arg = NULL; sockPtr->recvSockState = NS_SOCK_NONE; } return sockPtr; } /* *---------------------------------------------------------------------- * * SockRelease -- * * Close a socket and release the connection structure for * re-use. * * Results: * None. * * Side effects: * None. * *---------------------------------------------------------------------- */ static void SockRelease(Sock *sockPtr, SockState reason, int err) { Driver *drvPtr; NS_NONNULL_ASSERT(sockPtr != NULL); Ns_Log(DriverDebug, "SockRelease reason %s err %d (sock %d)", GetSockStateName(reason), err, sockPtr->sock); drvPtr = sockPtr->drvPtr; assert(drvPtr != NULL); SockError(sockPtr, reason, err); if (sockPtr->sock != NS_INVALID_SOCKET) { SockClose(sockPtr, (int)NS_FALSE); } else { Ns_Log(DriverDebug, "SockRelease bypasses SockClose, since we have an invalid socket"); } NsSlsCleanup(sockPtr); drvPtr->queuesize--; if (sockPtr->reqPtr != NULL) { Ns_Log(DriverDebug, "SockRelease calls RequestFree"); RequestFree(sockPtr); } Ns_MutexLock(&drvPtr->lock); sockPtr->nextPtr = drvPtr->sockPtr; drvPtr->sockPtr = sockPtr; Ns_MutexUnlock(&drvPtr->lock); } /* *---------------------------------------------------------------------- * * SockError -- * * Log error message for given socket * re-use. * * Results: * None. * * Side effects: * None. * *---------------------------------------------------------------------- */ static void SockError(Sock *sockPtr, SockState reason, int err) { const char *errMsg = NULL; NS_NONNULL_ASSERT(sockPtr != NULL); switch (reason) { case SOCK_READY: case SOCK_SPOOL: case SOCK_MORE: case SOCK_CLOSE: case SOCK_CLOSETIMEOUT: /* This is normal, never log. */ break; case SOCK_READTIMEOUT: /* * For this case, whether this is acceptable or not * depends upon whether this sock was a keep-alive * that we were allowing to 'linger'. */ if (!sockPtr->keep) { errMsg = "Timeout during read"; } break; case SOCK_WRITETIMEOUT: errMsg = "Timeout during write"; break; case SOCK_READERROR: errMsg = "Unable to read request"; break; case SOCK_WRITEERROR: errMsg = "Unable to write request"; break; case SOCK_SHUTERROR: errMsg = "Unable to shutdown socket"; break; case SOCK_BADREQUEST: errMsg = "Bad Request"; SockSendResponse(sockPtr, 400, errMsg); break; case SOCK_TOOMANYHEADERS: errMsg = "Too Many Request Headers"; SockSendResponse(sockPtr, 414, errMsg); break; case SOCK_BADHEADER: errMsg = "Invalid Request Header"; SockSendResponse(sockPtr, 400, errMsg); break; case SOCK_ENTITYTOOLARGE: errMsg = "Request Entity Too Large"; SockSendResponse(sockPtr, 413, errMsg); break; case SOCK_ERROR: errMsg = "Unknown Error"; SockSendResponse(sockPtr, 400, errMsg); break; } if (errMsg != NULL) { char ipString[NS_IPADDR_SIZE]; Ns_Log(DriverDebug, "SockError: %s (%d: %s), sock: %d, peer: [%s]:%d, request: %.99s", errMsg, err, (err != 0) ? strerror(err) : NS_EMPTY_STRING, sockPtr->sock, ns_inet_ntop((struct sockaddr *)&(sockPtr->sa), ipString, sizeof(ipString)), Ns_SockaddrGetPort((struct sockaddr *)&(sockPtr->sa)), (sockPtr->reqPtr != NULL) ? sockPtr->reqPtr->buffer.string : NS_EMPTY_STRING); } } /* *---------------------------------------------------------------------- * * SockSendResponse -- * * Send an HTTP response directly to the client using the * driver callback. * * Results: * None. * * Side effects: * May not sent the complete response to the client * if encountering non-writable connection socket. * *---------------------------------------------------------------------- */ static void SockSendResponse(Sock *sockPtr, int code, const char *errMsg) { struct iovec iov[3]; char header[32]; ssize_t sent, tosend; NS_NONNULL_ASSERT(sockPtr != NULL); NS_NONNULL_ASSERT(errMsg != NULL); snprintf(header, sizeof(header), "HTTP/1.0 %d ", code); iov[0].iov_base = header; iov[0].iov_len = strlen(header); iov[1].iov_base = (void *)errMsg; iov[1].iov_len = strlen(errMsg); iov[2].iov_base = (void *)"\r\n\r\n"; iov[2].iov_len = 4u; tosend = (ssize_t)(iov[0].iov_len + iov[1].iov_len + iov[2].iov_len); sent = NsDriverSend(sockPtr, iov, 3, 0u); if (sent < tosend) { Ns_Log(Warning, "Driver: partial write while sending response;" " %" PRIdz " < %" PRIdz, sent, tosend); } /* * In case we have a request structure, complain the system log about * the bad request. */ if (sockPtr->reqPtr != NULL) { Request *reqPtr = sockPtr->reqPtr; const char *requestLine = (reqPtr->request.line != NULL) ? reqPtr->request.line : NS_EMPTY_STRING; (void)ns_inet_ntop((struct sockaddr *)&(sockPtr->sa), sockPtr->reqPtr->peer, NS_IPADDR_SIZE); /* * Check, if bad request looks like a TLS handshake. If yes, there is * no need to print out the received buffer. */ if (requestLine[0] == (char)0x16 && requestLine[1] >= 3 && requestLine[2] == 1) { Ns_Log(Warning, "invalid request %d (%s) from peer %s: received TLS handshake on a non-TLS connection", code, errMsg, reqPtr->peer); } else { Tcl_DString dsReqLine; Tcl_DStringInit(&dsReqLine); Ns_Log(Warning, "invalid request: %d (%s) from peer %s request '%s' offsets: read %" PRIuz " write %" PRIuz " content %" PRIuz " avail %" PRIuz, code, errMsg, reqPtr->peer, Ns_DStringAppendPrintable(&dsReqLine, NS_FALSE, requestLine, strlen(requestLine)), reqPtr->roff, reqPtr->woff, reqPtr->coff, reqPtr->avail); Tcl_DStringFree(&dsReqLine); LogBuffer(Warning, "REQ BUFFER", reqPtr->buffer.string, (size_t)reqPtr->buffer.length); } } else { Ns_Log(Warning, "invalid request: %d (%s) - no request information available", code, errMsg); } } /* *---------------------------------------------------------------------- * * SockTrigger -- * * Wakeup DriversThread from blocking ns_poll(). * * Results: * None. * * Side effects: * DriversThread will wake up. * *---------------------------------------------------------------------- */ static void SockTrigger(NS_SOCKET sock) { if (send(sock, NS_EMPTY_STRING, 1, 0) != 1) { const char *errstr = ns_sockstrerror(ns_sockerrno); Ns_Log(Error, "driver: trigger send() failed: %s", errstr); } } /* *---------------------------------------------------------------------- * * SockClose -- * * Closes connection socket, does all cleanups. The input parameter * "keep" might be NS_TRUE/NS_FALSE or -1 if undecided. * * Results: * None. * * Side effects: * None * *---------------------------------------------------------------------- */ static void SockClose(Sock *sockPtr, int keep) { NS_NONNULL_ASSERT(sockPtr != NULL); if (keep != 0) { bool driverKeep = DriverKeep(sockPtr); keep = (int)driverKeep; } if (keep == (int)NS_FALSE) { DriverClose(sockPtr); } Ns_MutexLock(&sockPtr->drvPtr->lock); sockPtr->keep = (bool)keep; Ns_MutexUnlock(&sockPtr->drvPtr->lock); /* * Unconditionally remove temporary file, connection thread * should take care about very large uploads. */ if (sockPtr->tfile != NULL) { unlink(sockPtr->tfile); ns_free(sockPtr->tfile); sockPtr->tfile = NULL; if (sockPtr->tfd > 0) { /* * Close and reset fd. The fd should be > 0 unless we are in error * conditions. */ (void) ns_close(sockPtr->tfd); } sockPtr->tfd = 0; } else if (sockPtr->tfd > 0) { /* * This must be a fd allocated via Ns_GetTemp(); */ Ns_ReleaseTemp(sockPtr->tfd); sockPtr->tfd = 0; } #ifndef _WIN32 /* * Un-map temp file used for spooled content. */ if (sockPtr->taddr != NULL) { munmap(sockPtr->taddr, (size_t)sockPtr->tsize); sockPtr->taddr = NULL; } #endif } /* *---------------------------------------------------------------------- * * ChunkedDecode -- * * Reads the content form the incoming request buffer and tries * to decode chunked encoding parts. The function can be called * repeatedly and with incomplete input and overwrites the buffer * with the decoded data optionally. The decoded data is always * shorter than the encoded one. * * Results: * SOCK_READY when chunk was complete, SOCK_MORE when more data is * requried, or some error condition. * * Side effects: * Updates the buffer if update is true (and adjusts * reqPtr->chunkWriteOff). Updates always reqPtr->chunkStartOff to allow * incremental operations. * *---------------------------------------------------------------------- */ static SockState ChunkedDecode(Request *reqPtr, bool update) { const Tcl_DString *bufPtr; const char *end, *chunkStart; SockState result = SOCK_READY; NS_NONNULL_ASSERT(reqPtr != NULL); bufPtr = &reqPtr->buffer; end = bufPtr->string + bufPtr->length; chunkStart = bufPtr->string + reqPtr->chunkStartOff; while (reqPtr->chunkStartOff < (size_t)bufPtr->length) { char *p = strstr(chunkStart, "\r\n"); long chunkLength; if (p == NULL) { Ns_Log(DriverDebug, "ChunkedDecode: chunk did not find end-of-line"); result = SOCK_MORE; break; } *p = '\0'; chunkLength = strtol(chunkStart, NULL, 16); *p = '\r'; if (chunkLength < 0) { Ns_Log(Warning, "ChunkedDecode: negative chunk length"); result = SOCK_BADREQUEST; break; } *p = '\r'; if (p + 2 + chunkLength > end) { Ns_Log(DriverDebug, "ChunkedDecode: chunk length past end of buffer"); result = SOCK_MORE; break; } if (update) { char *writeBuffer = bufPtr->string + reqPtr->chunkWriteOff; memmove(writeBuffer, p + 2, (size_t)chunkLength); reqPtr->chunkWriteOff += (size_t)chunkLength; *(writeBuffer + chunkLength) = '\0'; } reqPtr->chunkStartOff += (size_t)(p - chunkStart) + 4u + (size_t)chunkLength; chunkStart = bufPtr->string + reqPtr->chunkStartOff; } return result; } /* *---------------------------------------------------------------------- * * SockRead -- * * Read content from the given Sock, processing the input as * necessary. This is the core callback routine designed to * either be called repeatedly within the DriverThread during * an async read-ahead or in a blocking loop in NsGetRequest() * at the start of connection processing. * * Results: * SOCK_READY: Request is ready for processing. * SOCK_MORE: More input is required. * SOCK_ERROR: Client drop or timeout. * SOCK_SPOOL: Pass input handling to spooler * SOCK_CLOSE: peer closed connection * SOCK_BADREQUEST * SOCK_BADHEADER * SOCK_TOOMANYHEADERS * * Side effects: * The Request structure will be built up for use by the * connection thread. Also, before returning SOCK_READY, * the next byte to read mark and bytes available are set * to the beginning of the content, just beyond the headers. * * Contents may be spooled into temp file and mmap-ed * *---------------------------------------------------------------------- */ static SockState SockRead(Sock *sockPtr, int spooler, const Ns_Time *timePtr) { const Driver *drvPtr; Request *reqPtr; Tcl_DString *bufPtr; struct iovec buf; char tbuf[16384]; size_t buflen, nread; ssize_t n; SockState resultState; NS_NONNULL_ASSERT(sockPtr != NULL); drvPtr = sockPtr->drvPtr; tbuf[0] = '\0'; /* * In case of "keepwait", the accept time is not meaningful and * reset to 0. In such cases, update "acceptTime" to the actual * begin of a request. This part is intended for async drivers. */ if (sockPtr->acceptTime.sec == 0) { assert(timePtr != NULL); sockPtr->acceptTime = *timePtr; } /* * Initialize request structure if needed. */ if (sockPtr->reqPtr == NULL) { RequestNew(sockPtr); } /* * On the first read, attempt to read-ahead "bufsize" bytes. * Otherwise, read only the number of bytes left in the * content. */ reqPtr = sockPtr->reqPtr; bufPtr = &reqPtr->buffer; if (reqPtr->length == 0u) { nread = drvPtr->bufsize; } else { nread = reqPtr->length - reqPtr->avail; } /* * Grow the buffer to include space for the next bytes. */ buflen = (size_t)bufPtr->length; n = (ssize_t)(buflen + nread); if (unlikely(n > drvPtr->maxinput)) { n = (ssize_t)drvPtr->maxinput; nread = (size_t)n - buflen; if (nread == 0u) { Ns_Log(DriverDebug, "SockRead: maxinput reached %" TCL_LL_MODIFIER "d", drvPtr->maxinput); return SOCK_ERROR; } } /* * Use temp file for content larger than "readahead" bytes. */ #ifndef _WIN32 if (reqPtr->coff > 0u /* We are in the content part (after the header) */ && !reqPtr->chunkStartOff /* Never spool chunked encoded data since we decode in memory */ && reqPtr->length > (size_t)drvPtr->readahead /* We need more data */ && sockPtr->tfd <= 0 /* We have no spool fd */ ) { const DrvSpooler *spPtr = &drvPtr->spooler; Ns_Log(DriverDebug, "SockRead: require tmp file for content spooling (length %" PRIuz" > readahead " "%" TCL_LL_MODIFIER "d)", reqPtr->length, drvPtr->readahead); /* * In driver mode send this Sock to the spooler thread if * it is running */ if (spooler == 0 && spPtr->threads > 0) { return SOCK_SPOOL; } /* * If "maxupload" is specified and content size exceeds the configured * values, spool uploads into normal temp file (not deleted). We do * not want to map such large files into memory. */ if (drvPtr->maxupload > 0 && reqPtr->length > (size_t)drvPtr->maxupload ) { size_t tfileLength = strlen(drvPtr->uploadpath) + 16u; sockPtr->tfile = ns_malloc(tfileLength); snprintf(sockPtr->tfile, tfileLength, "%s/%d.XXXXXX", drvPtr->uploadpath, sockPtr->sock); sockPtr->tfd = ns_mkstemp(sockPtr->tfile); if (sockPtr->tfd == NS_INVALID_FD) { Ns_Log(Error, "SockRead: cannot create spool file with template '%s': %s", sockPtr->tfile, strerror(errno)); } } else { /* * Get a temporary fd. These FDs are used for mmapping. */ sockPtr->tfd = Ns_GetTemp(); } if (unlikely(sockPtr->tfd == NS_INVALID_FD)) { Ns_Log(DriverDebug, "SockRead: spool fd invalid"); return SOCK_ERROR; } n = (ssize_t)((size_t)bufPtr->length - reqPtr->coff); assert(n >= 0); if (ns_write(sockPtr->tfd, bufPtr->string + reqPtr->coff, (size_t)n) != n) { return SOCK_WRITEERROR; } Tcl_DStringSetLength(bufPtr, 0); } #endif if (sockPtr->tfd > 0) { buf.iov_base = tbuf; buf.iov_len = MIN(nread, sizeof(tbuf)); } else { Tcl_DStringSetLength(bufPtr, (int)(buflen + nread)); buf.iov_base = bufPtr->string + reqPtr->woff; buf.iov_len = nread; } if (reqPtr->leftover > 0u) { /* * There is some leftover in the buffer, don't read but take the * leftover instead as input. */ n = (ssize_t)reqPtr->leftover; reqPtr->leftover = 0u; buflen = 0u; Ns_Log(DriverDebug, "SockRead receive from leftover %" PRIdz " bytes", n); } else { /* * Receive actually some data from the driver. */ n = NsDriverRecv(sockPtr, &buf, 1, NULL); Ns_Log(DriverDebug, "SockRead receive from network %" PRIdz " bytes sockState %.2x", n, (int)sockPtr->recvSockState); } { Ns_SockState nsSockState = sockPtr->recvSockState; /* * The nsSockState has one of the following values, when provided: * * NS_SOCK_READ, NS_SOCK_DONE, NS_SOCK_AGAIN, NS_SOCK_EXCEPTION, * NS_SOCK_TIMEOUT */ switch (nsSockState) { case NS_SOCK_TIMEOUT: NS_FALL_THROUGH; /* fall through */ case NS_SOCK_EXCEPTION: return SOCK_READERROR; case NS_SOCK_AGAIN: Tcl_DStringSetLength(bufPtr, (int)buflen); return SOCK_MORE; case NS_SOCK_DONE: return SOCK_CLOSE; case NS_SOCK_READ: break; case NS_SOCK_CANCEL: NS_FALL_THROUGH; /* fall through */ case NS_SOCK_EXIT: NS_FALL_THROUGH; /* fall through */ case NS_SOCK_INIT: NS_FALL_THROUGH; /* fall through */ case NS_SOCK_WRITE: Ns_Log(Warning, "SockRead received unexpected state %.2x from driver", nsSockState); return SOCK_READERROR; case NS_SOCK_NONE: /* * Old style state management based on "n" and "errno", which is * more fragile. We keep there for old-style drivers. */ if (n < 0) { Tcl_DStringSetLength(bufPtr, (int)buflen); /* * The driver returns -1 when the peer closed the connection, but * clears the errno such we can distinguish from error conditions. */ if (errno == 0) { return SOCK_CLOSE; } return SOCK_READERROR; } if (n == 0) { Tcl_DStringSetLength(bufPtr, (int)buflen); return SOCK_MORE; } break; } } if (sockPtr->tfd > 0) { if (ns_write(sockPtr->tfd, tbuf, (size_t)n) != n) { return SOCK_WRITEERROR; } } else { Tcl_DStringSetLength(bufPtr, (int)(buflen + (size_t)n)); } reqPtr->woff += (size_t)n; reqPtr->avail += (size_t)n; /* * This driver needs raw buffer, it is binary or non-HTTP request */ if ((drvPtr->opts & NS_DRIVER_NOPARSE) != 0u) { return SOCK_READY; } resultState = SockParse(sockPtr); return resultState; } /*---------------------------------------------------------------------- * * LogBuffer -- * * Debug function to output buffer content when the provided severity is * enabled. The function prints just visible characters and space as is * and prints the hex code otherwise. * * Results: * None. * * Side effects: * Writes to error.log * *---------------------------------------------------------------------- */ static void LogBuffer(Ns_LogSeverity severity, const char *msg, const char *buffer, size_t len) { Tcl_DString ds; NS_NONNULL_ASSERT(msg != NULL); NS_NONNULL_ASSERT(buffer != NULL); if (Ns_LogSeverityEnabled(severity)) { Tcl_DStringInit(&ds); Tcl_DStringAppend(&ds, msg, -1); Tcl_DStringAppend(&ds, ": ", 2); (void)Ns_DStringAppendPrintable(&ds, NS_FALSE, buffer, len); Ns_Log(severity, "%s", ds.string); Tcl_DStringFree(&ds); } } /*---------------------------------------------------------------------- * * EndOfHeader -- * * Function to be called (once), when end of header is reached. At this * time, all request header lines were parsed already correctly. * * Results: * None. * * Side effects: * Update various reqPtr fields and signal certain facts and error * conditions via sockPtr->flags. In error conditions, sockPtr->keep is * set to NS_FALSE. * *---------------------------------------------------------------------- */ static size_t EndOfHeader(Sock *sockPtr) { Request *reqPtr; const char *s; NS_NONNULL_ASSERT(sockPtr != NULL); reqPtr = sockPtr->reqPtr; assert(reqPtr != NULL); reqPtr->chunkStartOff = 0u; /* * Check for "expect: 100-continue" and clear flag in case we have * pipelining. */ sockPtr->flags &= ~(NS_CONN_CONTINUE); s = Ns_SetIGet(reqPtr->headers, "expect"); if (s != NULL) { if (*s == '1' && *(s+1) == '0' && *(s+2) == '0' && *(s+3) == '-') { char *dup = ns_strdup(s+4); Ns_StrToLower(dup); if (STREQ(dup, "continue")) { sockPtr->flags |= NS_CONN_CONTINUE; } ns_free(dup); } } /* * Handle content-length, which might be provided or not. * Clear length specific error flags. */ sockPtr->flags &= ~(NS_CONN_ENTITYTOOLARGE); s = Ns_SetIGet(reqPtr->headers, "content-length"); if (s == NULL) { s = Ns_SetIGet(reqPtr->headers, "Transfer-Encoding"); if (s != NULL) { /* Lower case is in the standard, capitalized by macOS */ if (STREQ(s, "chunked") || STREQ(s, "Chunked")) { Tcl_WideInt expected; reqPtr->chunkStartOff = reqPtr->roff; reqPtr->chunkWriteOff = reqPtr->chunkStartOff; reqPtr->contentLength = 0u; /* * We need reqPtr->expectedLength for safely terminating read loop. */ s = Ns_SetIGet(reqPtr->headers, "X-Expected-Entity-Length"); if ((s != NULL) && (Ns_StrToWideInt(s, &expected) == NS_OK) && (expected > 0) ) { reqPtr->expectedLength = (size_t)expected; } s = NULL; } } } /* * In case a valid and meaningful was provided, the string with the * content length ("s") is not NULL. */ if (s != NULL) { Tcl_WideInt length; if ((Ns_StrToWideInt(s, &length) == NS_OK) && (length > 0)) { reqPtr->length = (size_t)length; /* * Handle too large input requests. */ if (reqPtr->length > (size_t)sockPtr->drvPtr->maxinput) { Ns_Log(Warning, "SockParse: request too large, length=%" PRIdz ", maxinput=%" TCL_LL_MODIFIER "d", reqPtr->length, sockPtr->drvPtr->maxinput); sockPtr->keep = NS_FALSE; sockPtr->flags |= NS_CONN_ENTITYTOOLARGE; } reqPtr->contentLength = (size_t)length; } } /* * Compression format handling: parse information from request headers * indicating allowed compression formats for quick access. * * Clear compression accepted flag */ sockPtr->flags &= ~(NS_CONN_ZIPACCEPTED|NS_CONN_BROTLIACCEPTED); s = Ns_SetIGet(reqPtr->headers, "Accept-Encoding"); if (s != NULL) { bool gzipAccept, brotliAccept; /* * Get allowed compression formats from "accept-encoding" headers. */ NsParseAcceptEncoding(reqPtr->request.version, s, &gzipAccept, &brotliAccept); if (gzipAccept || brotliAccept) { /* * Don't allow compression formats for Range requests. */ s = Ns_SetIGet(reqPtr->headers, "Range"); if (s == NULL) { if (gzipAccept) { sockPtr->flags |= NS_CONN_ZIPACCEPTED; } if (brotliAccept) { sockPtr->flags |= NS_CONN_BROTLIACCEPTED; } } } } /* * Set up request length for spooling and further read operations */ if (reqPtr->contentLength != 0u) { /* * Content-Length was provided, use it */ reqPtr->length = reqPtr->contentLength; } return reqPtr->roff; } /*---------------------------------------------------------------------- * * SockParse -- * * Construct the given conn by parsing input buffer until end of * headers. Return SOCK_READY when finished parsing. * * Results: * SOCK_READY: Conn is ready for processing. * SOCK_MORE: More input is required. * SOCK_ERROR: Malformed request. * SOCK_BADREQUEST * SOCK_BADHEADER * SOCK_TOOMANYHEADERS * * Side effects: * An Ns_Request and/or Ns_Set may be allocated. * Ns_Conn buffer management offsets updated. * *---------------------------------------------------------------------- */ static SockState SockParse(Sock *sockPtr) { const Tcl_DString *bufPtr; const Driver *drvPtr; Request *reqPtr; char save; SockState result; NS_NONNULL_ASSERT(sockPtr != NULL); drvPtr = sockPtr->drvPtr; NsUpdateProgress((Ns_Sock *) sockPtr); reqPtr = sockPtr->reqPtr; bufPtr = &reqPtr->buffer; /* * Scan lines (header) until start of content (body-part) */ while (reqPtr->coff == 0u) { char *s, *e; size_t cnt; /* * Find the next header line. */ s = bufPtr->string + reqPtr->roff; e = memchr(s, INTCHAR('\n'), reqPtr->avail); if (unlikely(e == NULL)) { /* * Input not yet newline terminated - request more data. */ return SOCK_MORE; } /* * Check for max single line overflows. * * Previous versions if the driver returned here directly an * error code, which was handled via HTTP error message * provided via SockError(). However, the SockError() handling * closes the connection immediately. This has the * consequence, that the HTTP client might never see the error * message, since the request was not yet fully transmitted, * but it will see a "broken pipe: 13" message instead. We * read now the full request and return the message via * ConnRunRequest(). */ if (unlikely((e - s) > drvPtr->maxline)) { sockPtr->keep = NS_FALSE; if (reqPtr->request.line == NULL) { Ns_Log(DriverDebug, "SockParse: maxline reached of %d bytes", drvPtr->maxline); sockPtr->flags = NS_CONN_REQUESTURITOOLONG; Ns_Log(Warning, "request line is too long (%d bytes)", (int)(e - s)); } else { sockPtr->flags = NS_CONN_LINETOOLONG; Ns_Log(Warning, "request header line is too long (%d bytes)", (int)(e - s)); } } /* * Update next read pointer to end of this line. */ cnt = (size_t)(e - s) + 1u; reqPtr->roff += cnt; reqPtr->avail -= cnt; /* * Adjust end pointer to the last content character before the line * terminator. */ if (likely(e > s) && likely(*(e-1) == '\r')) { --e; } /* * Check for end of headers in case we have not done it yet. */ if (unlikely(e == s) && (reqPtr->coff == 0u)) { /* * We are at end of headers. */ reqPtr->coff = EndOfHeader(sockPtr); /* * In cases the client sent "expect: 100-continue", report back that * everything is fine with the headers. */ if ((sockPtr->flags & NS_CONN_CONTINUE) != 0u) { Ns_Log(Ns_LogRequestDebug, "honoring 100-continue"); /* * In case, the request entity (body) was too large, we can * return immediately the error message, when the client has * flagged this via "Expect:". Otherwise we have to read the * full request (although it is too large) to drain the * channel. Otherwise, the server might close the connection * *before* it has received full request with its body from * the client. We just keep the flag and let * Ns_ConnRunRequest() handle the error message. */ if ((sockPtr->flags & NS_CONN_ENTITYTOOLARGE) != 0u) { Ns_Log(Ns_LogRequestDebug, "100-continue: entity too large"); return SOCK_ENTITYTOOLARGE; /* * We have no other error message flagged (future ones * have to be handled here). */ } else { struct iovec iov[1]; ssize_t sent; /* * Reply with "100 continue". */ Ns_Log(Ns_LogRequestDebug, "100-continue: reply CONTINUE"); iov[0].iov_base = (char *)"HTTP/1.1 100 Continue\r\n\r\n"; iov[0].iov_len = strlen(iov[0].iov_base); sent = Ns_SockSendBufs((Ns_Sock *)sockPtr, iov, 1, NULL, 0u); if (sent != (ssize_t)iov[0].iov_len) { Ns_Log(Warning, "could not deliver response: 100 Continue"); /* * Should we bail out here? */ } } } } else { /* * We have the request-line or a header line to process. */ save = *e; *e = '\0'; if (unlikely(reqPtr->request.line == NULL)) { /* * There is no request-line set. The received line must the * the request-line. */ Ns_Log(DriverDebug, "SockParse (%d): parse request line <%s>", sockPtr->sock, s); if (Ns_ParseRequest(&reqPtr->request, s) == NS_ERROR) { /* * Invalid request. */ return SOCK_BADREQUEST; } /* * HTTP 0.9 did not have a HTTP-version number or request headers * and no empty line terminating the request header. */ if (unlikely(reqPtr->request.version < 1.0)) { /* * Pre-HTTP/1.0 request. */ reqPtr->coff = reqPtr->roff; Ns_Log(Notice, "pre-HTTP/1.0 request <%s>", reqPtr->request.line); } } else if (Ns_ParseHeader(reqPtr->headers, s, Preserve) != NS_OK) { /* * Invalid header. */ return SOCK_BADHEADER; } else { /* * Check for max number of headers */ if (unlikely(Ns_SetSize(reqPtr->headers) > (size_t)drvPtr->maxheaders)) { Ns_Log(DriverDebug, "SockParse (%d): maxheaders reached of %d bytes", sockPtr->sock, drvPtr->maxheaders); return SOCK_TOOMANYHEADERS; } } *e = save; } } if (unlikely(reqPtr->request.line == NULL)) { /* * We are at end of headers, but we have not parsed a request line * (maybe just two linefeeds). */ return SOCK_BADREQUEST; } /* * We are in the request body. */ assert(reqPtr->coff > 0u); assert(reqPtr->request.line != NULL); /* * Check if all content has arrived. */ Ns_Log(Debug, "=== length < avail (length %" PRIuz ", avail %" PRIuz ") tfd %d tfile %p chunkStartOff %" PRIuz, reqPtr->length, reqPtr->avail, sockPtr->tfd, (void *)sockPtr->tfile, reqPtr->chunkStartOff); if (reqPtr->chunkStartOff != 0u) { /* * Chunked encoding was provided. */ SockState chunkState; size_t currentContentLength; chunkState = ChunkedDecode(reqPtr, NS_TRUE); currentContentLength = reqPtr->chunkWriteOff - reqPtr->coff; /* * A chunk might be complete, but it might not be the last * chunk from the client. The best thing would be to be able * to read until EOF here. In cases, where the (optional) * "expectedLength" was provided by the client, we terminate * depending on that information */ if ((chunkState == SOCK_MORE) || (reqPtr->expectedLength != 0u && currentContentLength < reqPtr->expectedLength)) { /* * ChunkedDecode wants more data. */ return SOCK_MORE; } else if (chunkState != SOCK_READY) { return chunkState; } /* * ChunkedDecode has enough data. */ reqPtr->length = (size_t)currentContentLength; } if (reqPtr->avail < reqPtr->length) { Ns_Log(DriverDebug, "SockRead wait for more input"); /* * Wait for more input. */ return SOCK_MORE; } Ns_Log(Dev, "=== all required data is available (avail %" PRIuz", length %" PRIuz ", " "readahead %" TCL_LL_MODIFIER "d maxupload %" TCL_LL_MODIFIER "d) tfd %d", reqPtr->avail, reqPtr->length, drvPtr->readahead, drvPtr->maxupload, sockPtr->tfd); /* * We have all required data in the receive buffer or in a temporary file. * * - Uploads > "readahead": these are put into temporary files. * * - Uploads > "maxupload": these are put into temporary files * without mmapping, no content parsing will be performed in memory. */ result = SOCK_READY; if (sockPtr->tfile != NULL) { reqPtr->content = NULL; reqPtr->next = NULL; reqPtr->avail = 0u; Ns_Log(DriverDebug, "content spooled to file: size %" PRIdz ", file %s", reqPtr->length, sockPtr->tfile); /* * Nothing more to do, return via SOCK_READY; */ } else { /* * Uploads < "maxupload" are spooled to files and mmapped in order to * provide the usual interface via [ns_conn content]. */ if (sockPtr->tfd > 0) { #ifdef _WIN32 /* * For _WIN32, tfd should never be set, since tfd-spooling is not * implemented for windows. */ assert(0); #else int prot = PROT_READ | PROT_WRITE; /* * Add a byte to make sure, the string termination with \0 below falls * always into the mmapped area. On some older OSes this might lead to * crashes when we hitting page boundaries. */ ssize_t rc = ns_write(sockPtr->tfd, "\0", 1); if (rc == -1) { Ns_Log(Error, "socket: could not append terminating 0-byte"); } sockPtr->tsize = reqPtr->length + 1; sockPtr->taddr = mmap(0, sockPtr->tsize, prot, MAP_PRIVATE, sockPtr->tfd, 0); if (sockPtr->taddr == MAP_FAILED) { sockPtr->taddr = NULL; result = SOCK_ERROR; } else { reqPtr->content = sockPtr->taddr; Ns_Log(Debug, "content spooled to mmapped file: readahead=%" TCL_LL_MODIFIER "d, filesize=%" PRIdz, drvPtr->readahead, sockPtr->tsize); } #endif } else { /* * Set the content the begin of the remaining buffer (content offset). * This happens as well when reqPtr->contentLength is 0, but it is * needed for chunked input processing. */ reqPtr->content = bufPtr->string + reqPtr->coff; } reqPtr->next = reqPtr->content; /* * Add a terminating null character. The content might be from the receive * buffer (Tcl_DString) or from the mmapped file. Non-mmapped files are handled * above. */ if (reqPtr->length > 0u) { Ns_Log(DriverDebug, "SockRead adds null terminating character at content[%" PRIuz "]", reqPtr->length); reqPtr->savedChar = reqPtr->content[reqPtr->length]; reqPtr->content[reqPtr->length] = '\0'; if (sockPtr->taddr == NULL) { LogBuffer(DriverDebug, "UPDATED BUFFER", sockPtr->reqPtr->buffer.string, (size_t)reqPtr->buffer.length); } } } return result; } /* *---------------------------------------------------------------------- * * SockSetServer -- * * Set virtual server from driver context or Host header. * * Results: * void. * * Side effects: * * Updates sockPtr->servPtr. In case an invalid server set, or the * required host field in HTTP/1.1 is missing the HTTP-method is set to * the constant "BAD". * *---------------------------------------------------------------------- */ static void SockSetServer(Sock *sockPtr) { char *host; Request *reqPtr; bool bad_request = NS_FALSE; Driver *drvPtr; NS_NONNULL_ASSERT(sockPtr != NULL); reqPtr = sockPtr->reqPtr; assert(reqPtr != NULL); drvPtr = sockPtr->drvPtr; assert(drvPtr != NULL); sockPtr->servPtr = drvPtr->servPtr; sockPtr->location = drvPtr->location; host = Ns_SetIGet(reqPtr->headers, "Host"); Ns_Log(DriverDebug, "SockSetServer host '%s' request line '%s'", host, reqPtr->request.line); if (unlikely((host == NULL) && (reqPtr->request.version >= 1.1))) { /* * HTTP/1.1 requires host header */ Ns_Log(Notice, "request header field \"Host\" is missing in HTTP/1.1 request: \"%s\"\n", reqPtr->request.line); bad_request = NS_TRUE; } if (sockPtr->servPtr == NULL) { const ServerMap *mapPtr = NULL; if (host != NULL) { const Tcl_HashEntry *hPtr; size_t hostLength = strlen(host); /* * Remove trailing dot of host header field, since RFC 2976 allows * fully qualified "absolute" DNS names in host fields (see e.g. §3.2.2). */ if (host[hostLength] == '.') { host[hostLength] = '\0'; } /* * Convert provided host header field to lower case before hash * lookup. */ Ns_StrToLower(host); hPtr = Tcl_FindHashEntry(&drvPtr->hosts, host); Ns_Log(DriverDebug, "SockSetServer driver '%s' host '%s' => %p", drvPtr->moduleName, host, (void*)hPtr); if (hPtr != NULL) { /* * Request with provided host header field could be resolved * against a certain server. */ mapPtr = Tcl_GetHashValue(hPtr); } else { /* * Host header field content is not found in the mapping table. */ Ns_Log(DriverDebug, "cannot locate host header content '%s' in virtual hosts " "table of driver '%s', fall back to default '%s'", host, drvPtr->moduleName, drvPtr->defMapPtr->location); if (Ns_LogSeverityEnabled(DriverDebug)) { Tcl_HashEntry *hPtr2; Tcl_HashSearch search; hPtr2 = Tcl_FirstHashEntry(&drvPtr->hosts, &search); while (hPtr2 != NULL) { Ns_Log(Notice, "... host entry: '%s'\n", (char *)Tcl_GetHashKey(&drvPtr->hosts, hPtr2)); hPtr2 = Tcl_NextHashEntry(&search); } } } } if (mapPtr == NULL) { /* * Could not lookup the virtual host, Get the default mapping from the driver. */ mapPtr = drvPtr->defMapPtr; } if (mapPtr != NULL) { sockPtr->servPtr = mapPtr->servPtr; sockPtr->location = mapPtr->location; } if (sockPtr->servPtr == NULL) { Ns_Log(Warning, "cannot determine server for request: \"%s\" (host \"%s\")\n", reqPtr->request.line, host); bad_request = NS_TRUE; } } if (unlikely(bad_request)) { Ns_Log(DriverDebug, "SockSetServer sets method to BAD"); ns_free((char *)reqPtr->request.method); reqPtr->request.method = ns_strdup("BAD"); } } /* *====================================================================== * Spooler Thread: Receive asynchronously from the client socket *====================================================================== */ /* *---------------------------------------------------------------------- * * SpoolerThread -- * * Spooling socket driver thread. * * Results: * None. * * Side effects: * Connections are accepted on the configured listen sockets, * placed on the run queue to be serviced, and gracefully * closed when done. Async sockets have the entire request read * here before queuing as well. * *---------------------------------------------------------------------- */ static void SpoolerThread(void *arg) { SpoolerQueue *queuePtr = (SpoolerQueue*)arg; char charBuffer[1]; int pollTimeout; bool stopping; Sock *sockPtr, *nextPtr, *waitPtr, *readPtr; Ns_Time now, diff; const Driver *drvPtr; PollData pdata; Ns_ThreadSetName("-spooler%d-", queuePtr->id); queuePtr->threadName = Ns_ThreadGetName(); /* * Loop forever until signaled to shut down and all * connections are complete and gracefully closed. */ Ns_Log(Notice, "spooler%d: accepting connections", queuePtr->id); PollCreate(&pdata); Ns_GetTime(&now); waitPtr = readPtr = NULL; stopping = NS_FALSE; while (!stopping) { /* * If there are any read sockets, set the bits * and determine the minimum relative timeout. */ PollReset(&pdata); (void)PollSet(&pdata, queuePtr->pipe[0], (short)POLLIN, NULL); if (readPtr == NULL) { pollTimeout = 30 * 1000; } else { sockPtr = readPtr; while (sockPtr != NULL) { SockPoll(sockPtr, (short)POLLIN, &pdata); sockPtr = sockPtr->nextPtr; } pollTimeout = -1; } /* * Select and drain the trigger pipe if necessary. */ /*n =*/ (void) PollWait(&pdata, pollTimeout); if (PollIn(&pdata, 0) && unlikely(ns_recv(queuePtr->pipe[0], charBuffer, 1u, 0) != 1)) { Ns_Fatal("spooler: trigger ns_recv() failed: %s", ns_sockstrerror(ns_sockerrno)); } /* * Attempt read-ahead of any new connections. */ Ns_GetTime(&now); sockPtr = readPtr; readPtr = NULL; while (sockPtr != NULL) { nextPtr = sockPtr->nextPtr; drvPtr = sockPtr->drvPtr; if (unlikely(PollHup(&pdata, sockPtr->pidx))) { /* * Peer has closed the connection */ SockRelease(sockPtr, SOCK_CLOSE, 0); } else if (!PollIn(&pdata, sockPtr->pidx)) { /* * Got no data */ if (Ns_DiffTime(&sockPtr->timeout, &now, &diff) <= 0) { SockRelease(sockPtr, SOCK_READTIMEOUT, 0); queuePtr->queuesize--; } else { Push(sockPtr, readPtr); } } else { /* * Got some data */ SockState n = SockRead(sockPtr, 1, &now); switch (n) { case SOCK_MORE: SockTimeout(sockPtr, &now, &drvPtr->recvwait); Push(sockPtr, readPtr); break; case SOCK_READY: assert(sockPtr->reqPtr != NULL); SockSetServer(sockPtr); Push(sockPtr, waitPtr); break; case SOCK_BADHEADER: NS_FALL_THROUGH; /* fall through */ case SOCK_BADREQUEST: NS_FALL_THROUGH; /* fall through */ case SOCK_CLOSE: NS_FALL_THROUGH; /* fall through */ case SOCK_CLOSETIMEOUT: NS_FALL_THROUGH; /* fall through */ case SOCK_ENTITYTOOLARGE: NS_FALL_THROUGH; /* fall through */ case SOCK_ERROR: NS_FALL_THROUGH; /* fall through */ case SOCK_READERROR: NS_FALL_THROUGH; /* fall through */ case SOCK_READTIMEOUT: NS_FALL_THROUGH; /* fall through */ case SOCK_SHUTERROR: NS_FALL_THROUGH; /* fall through */ case SOCK_SPOOL: NS_FALL_THROUGH; /* fall through */ case SOCK_TOOMANYHEADERS: NS_FALL_THROUGH; /* fall through */ case SOCK_WRITEERROR: NS_FALL_THROUGH; /* fall through */ case SOCK_WRITETIMEOUT: SockRelease(sockPtr, n, errno); queuePtr->queuesize--; break; } } sockPtr = nextPtr; } /* * Attempt to queue any pending connection * after reversing the list to ensure oldest * connections are tried first. */ if (waitPtr != NULL) { sockPtr = NULL; while ((nextPtr = waitPtr) != NULL) { waitPtr = nextPtr->nextPtr; Push(nextPtr, sockPtr); } while (sockPtr != NULL) { nextPtr = sockPtr->nextPtr; if (!NsQueueConn(sockPtr, &now)) { Push(sockPtr, waitPtr); } else { queuePtr->queuesize--; } sockPtr = nextPtr; } } /* * Add more connections from the spooler queue */ Ns_MutexLock(&queuePtr->lock); if (waitPtr == NULL) { sockPtr = (Sock*)queuePtr->sockPtr; queuePtr->sockPtr = NULL; while (sockPtr != NULL) { nextPtr = sockPtr->nextPtr; drvPtr = sockPtr->drvPtr; SockTimeout(sockPtr, &now, &drvPtr->recvwait); Push(sockPtr, readPtr); queuePtr->queuesize++; sockPtr = nextPtr; } } /* * Check for shutdown */ stopping = queuePtr->shutdown; Ns_MutexUnlock(&queuePtr->lock); } PollFree(&pdata); Ns_Log(Notice, "exiting"); Ns_MutexLock(&queuePtr->lock); queuePtr->stopped = NS_TRUE; Ns_CondBroadcast(&queuePtr->cond); Ns_MutexUnlock(&queuePtr->lock); } static void SpoolerQueueStart(SpoolerQueue *queuePtr, Ns_ThreadProc *proc) { NS_NONNULL_ASSERT(proc != NULL); while (queuePtr != NULL) { if (ns_sockpair(queuePtr->pipe) != 0) { Ns_Fatal("ns_sockpair() failed: %s", ns_sockstrerror(ns_sockerrno)); } Ns_ThreadCreate(proc, queuePtr, 0, &queuePtr->thread); queuePtr = queuePtr->nextPtr; } } static void SpoolerQueueStop(SpoolerQueue *queuePtr, const Ns_Time *timeoutPtr, const char *name) { NS_NONNULL_ASSERT(timeoutPtr != NULL); NS_NONNULL_ASSERT(name != NULL); while (queuePtr != NULL) { Ns_ReturnCode status; Ns_MutexLock(&queuePtr->lock); if (!queuePtr->stopped && !queuePtr->shutdown) { Ns_Log(Debug, "%s%d: triggering shutdown", name, queuePtr->id); queuePtr->shutdown = NS_TRUE; SockTrigger(queuePtr->pipe[1]); } status = NS_OK; while (!queuePtr->stopped && status == NS_OK) { status = Ns_CondTimedWait(&queuePtr->cond, &queuePtr->lock, timeoutPtr); } if (status != NS_OK) { Ns_Log(Warning, "%s%d: timeout waiting for shutdown", name, queuePtr->id); } else { /*Ns_Log(Notice, "%s%d: shutdown complete", name, queuePtr->id);*/ if (queuePtr->thread != NULL) { Ns_ThreadJoin(&queuePtr->thread, NULL); queuePtr->thread = NULL; } else { Ns_Log(Notice, "%s%d: shutdown: thread already gone", name, queuePtr->id); } ns_sockclose(queuePtr->pipe[0]); ns_sockclose(queuePtr->pipe[1]); } Ns_MutexUnlock(&queuePtr->lock); queuePtr = queuePtr->nextPtr; } } static int SockSpoolerQueue(Driver *drvPtr, Sock *sockPtr) { bool trigger = NS_FALSE; SpoolerQueue *queuePtr; NS_NONNULL_ASSERT(drvPtr != NULL); NS_NONNULL_ASSERT(sockPtr != NULL); /* * Get the next spooler thread from the list, all spooler requests are * rotated between all spooler threads */ Ns_MutexLock(&drvPtr->spooler.lock); if (drvPtr->spooler.curPtr == NULL) { drvPtr->spooler.curPtr = drvPtr->spooler.firstPtr; } queuePtr = drvPtr->spooler.curPtr; drvPtr->spooler.curPtr = drvPtr->spooler.curPtr->nextPtr; Ns_MutexUnlock(&drvPtr->spooler.lock); Ns_Log(Debug, "Spooler: %d: started fd=%d: %" PRIdz " bytes", queuePtr->id, sockPtr->sock, sockPtr->reqPtr->length); Ns_MutexLock(&queuePtr->lock); if (queuePtr->sockPtr == NULL) { trigger = NS_TRUE; } Push(sockPtr, queuePtr->sockPtr); Ns_MutexUnlock(&queuePtr->lock); /* * Wake up spooler thread */ if (trigger) { SockTrigger(queuePtr->pipe[1]); } return 1; } /* *====================================================================== * Writer Thread: Write asynchronously to the client socket *====================================================================== */ /* *---------------------------------------------------------------------- * * NsWriterLock, NsWriterUnlock -- * * Provide an API for locking and unlocking context information * for streaming asynchronous writer jobs. The locks are just * needed for managing linkage between "connPtr" and a writer * entry. The lock operations are rather infrequent and the * lock duration is very short, such that at a single global * appears sufficient. * * Results: * None * * Side effects: * Change Mutex state. * *---------------------------------------------------------------------- */ void NsWriterLock(void) { Ns_MutexLock(&writerlock); } void NsWriterUnlock(void) { Ns_MutexUnlock(&writerlock); } /* *---------------------------------------------------------------------- * * WriterSockFileVecCleanup -- * * Cleanup function for FileVec array in WriterSock structure. * * Results: * None. * * Side effects: * Closing potentially file descriptors, freeing Ns_FileVec memory. * *---------------------------------------------------------------------- */ static void WriterSockFileVecCleanup(WriterSock *wrSockPtr) { NS_NONNULL_ASSERT(wrSockPtr != NULL); if ( wrSockPtr->c.file.nbufs > 0) { int i; Ns_Log(DriverDebug, "WriterSockRelease nbufs %d", wrSockPtr->c.file.nbufs); for (i = 0; i < wrSockPtr->c.file.nbufs; i++) { /* * The fd of c.file.currentbuf is always the same as * wrSockPtr->fd and therefore already closed at this point. */ if ( (i != wrSockPtr->c.file.currentbuf) && (wrSockPtr->c.file.bufs[i].fd != NS_INVALID_FD) ) { Ns_Log(DriverDebug, "WriterSockRelease must close fd %d", wrSockPtr->c.file.bufs[i].fd); ns_close(wrSockPtr->c.file.bufs[i].fd); } } ns_free(wrSockPtr->c.file.bufs); } ns_free(wrSockPtr->c.file.buf); } /* *---------------------------------------------------------------------- * * WriterSockRequire, WriterSockRelease -- * * Management functions for WriterSocks. WriterSockRequire() and * WriterSockRelease() are responsible for obtaining and * freeing "WriterSock" structures. When shuch a structure is finally * released, it is removed from the queue, the socket is * closed and the memory is freed. * * Results: * WriterSockRequire() returns a WriterSock from a connection, * the other functions return nothing. * * Side effects: * Updating reference counters, closing socket, freeing memory. * *---------------------------------------------------------------------- */ static WriterSock * WriterSockRequire(const Conn *connPtr) { WriterSock *wrSockPtr; NS_NONNULL_ASSERT(connPtr != NULL); NsWriterLock(); wrSockPtr = (WriterSock *)connPtr->strWriter; if (wrSockPtr != NULL) { wrSockPtr->refCount ++; } NsWriterUnlock(); return wrSockPtr; } static void WriterSockRelease(WriterSock *wrSockPtr) { SpoolerQueue *queuePtr; NS_NONNULL_ASSERT(wrSockPtr != NULL); wrSockPtr->refCount --; Ns_Log(DriverDebug, "WriterSockRelease %p refCount %d keep %d", (void *)wrSockPtr, wrSockPtr->refCount, wrSockPtr->keep); if (wrSockPtr->refCount > 0) { return; } Ns_Log(DriverDebug, "Writer: closed sock %d, file fd %d, error %d/%d, " "sent=%" TCL_LL_MODIFIER "d, flags=%X", wrSockPtr->sockPtr->sock, wrSockPtr->fd, wrSockPtr->status, wrSockPtr->err, wrSockPtr->nsent, wrSockPtr->flags); NsPoolAddBytesSent(wrSockPtr->poolPtr, wrSockPtr->nsent); if (wrSockPtr->doStream != NS_WRITER_STREAM_NONE) { Conn *connPtr; NsWriterLock(); connPtr = wrSockPtr->connPtr; if (connPtr != NULL && connPtr->strWriter != NULL) { connPtr->strWriter = NULL; } NsWriterUnlock(); /* * In case, writer streams are activated for this wrSockPtr, make sure * to release the tmp file. See thread Naviserver Open Files on the * sourceforge mailing list (starting July 2019). */ if (wrSockPtr->doStream == NS_WRITER_STREAM_FINISH) { Ns_ReleaseTemp(wrSockPtr->fd); } } /* * Remove the entry from the queue and decrement counter */ queuePtr = wrSockPtr->queuePtr; if (queuePtr->curPtr == wrSockPtr) { queuePtr->curPtr = wrSockPtr->nextPtr; queuePtr->queuesize--; } else { WriterSock *curPtr, *lastPtr = queuePtr->curPtr; for (curPtr = (lastPtr != NULL) ? lastPtr->nextPtr : NULL; curPtr != NULL; lastPtr = curPtr, curPtr = curPtr->nextPtr ) { if (curPtr == wrSockPtr) { lastPtr->nextPtr = wrSockPtr->nextPtr; queuePtr->queuesize--; break; } } } if ((wrSockPtr->err != 0) || (wrSockPtr->status != SPOOLER_OK)) { int i; /* * Lookup the matching sockState from the spooler state. The array has * just 5 elements, on average, just 2 comparisons are needed (since * OK is at the end). */ for (i = 0; i < Ns_NrElements(spoolerStateMap); i++) { if (spoolerStateMap[i].spoolerState == wrSockPtr->status) { SockError(wrSockPtr->sockPtr, spoolerStateMap[i].sockState, wrSockPtr->err); break; } } NsSockClose(wrSockPtr->sockPtr, (int)NS_FALSE); } else { NsSockClose(wrSockPtr->sockPtr, (int)wrSockPtr->keep); } if (wrSockPtr->clientData != NULL) { ns_free(wrSockPtr->clientData); } if (wrSockPtr->fd != NS_INVALID_FD) { if (wrSockPtr->doStream != NS_WRITER_STREAM_FINISH) { (void) ns_close(wrSockPtr->fd); } WriterSockFileVecCleanup(wrSockPtr); } else if (wrSockPtr->c.mem.bufs != NULL) { if (wrSockPtr->c.mem.fmap.addr != NULL) { NsMemUmap(&wrSockPtr->c.mem.fmap); } else { int i; for (i = 0; i < wrSockPtr->c.mem.nbufs; i++) { ns_free((char *)wrSockPtr->c.mem.bufs[i].iov_base); } } if (wrSockPtr->c.mem.bufs != wrSockPtr->c.mem.preallocated_bufs) { ns_free(wrSockPtr->c.mem.bufs); } } if (wrSockPtr->headerString != NULL) { ns_free(wrSockPtr->headerString); } ns_free(wrSockPtr); } /* *---------------------------------------------------------------------- * * WriterReadFromSpool -- * * Utility function of the WriterThread to read blocks from a * file into the output buffer of the writer. It handles * left overs from previous send attempts and takes care for * locking in case simultaneous reading and writing from the * same file. * * Results: * None. * * Side effects: * Fills up curPtr->c.file.buf and updates counters/sizes. * *---------------------------------------------------------------------- */ static SpoolerState WriterReadFromSpool(WriterSock *curPtr) { NsWriterStreamState doStream; SpoolerState status = SPOOLER_OK; size_t maxsize, toRead; unsigned char *bufPtr; NS_NONNULL_ASSERT(curPtr != NULL); doStream = curPtr->doStream; if (doStream != NS_WRITER_STREAM_NONE) { Ns_MutexLock(&curPtr->c.file.fdlock); toRead = curPtr->c.file.toRead; Ns_MutexUnlock(&curPtr->c.file.fdlock); } else { toRead = curPtr->c.file.toRead; Ns_Log(DriverDebug, "### WriterReadFromSpool [%d]: fd %d tosend %lu files %d", curPtr->c.file.currentbuf, curPtr->fd, toRead, curPtr->c.file.nbufs); } maxsize = curPtr->c.file.maxsize; bufPtr = curPtr->c.file.buf; /* * When bufsize > 0 we have a leftover from previous send. In such * cases, move the leftover to the front, and fill the reminder of * the buffer with new data from curPtr->c. */ if (curPtr->c.file.bufsize > 0u) { Ns_Log(DriverDebug, "### WriterReadFromSpool %p %.6x leftover %" PRIdz " offset %ld", (void *)curPtr, curPtr->flags, curPtr->c.file.bufsize, (long)curPtr->c.file.bufoffset); if (likely(curPtr->c.file.bufoffset > 0)) { memmove(curPtr->c.file.buf, curPtr->c.file.buf + curPtr->c.file.bufoffset, curPtr->c.file.bufsize); } bufPtr = curPtr->c.file.buf + curPtr->c.file.bufsize; maxsize -= curPtr->c.file.bufsize; } if (toRead > maxsize) { toRead = maxsize; } /* * Read content from the file into the buffer. */ if (toRead > 0u) { ssize_t n; if (doStream != NS_WRITER_STREAM_NONE) { /* * In streaming mode, the connection thread writes to the * spool file and the writer thread reads from the same * file. Therefore, we have to re-adjust the current * read/writer position, which might be changed by the * other thread. These positions have to be locked, since * seeking might be subject to race conditions. Here we * set the read pointer to the position after the last * send operation. */ Ns_MutexLock(&curPtr->c.file.fdlock); (void) ns_lseek(curPtr->fd, (off_t)curPtr->nsent, SEEK_SET); } if (curPtr->c.file.nbufs == 0) { /* * Working on a single fd. */ n = ns_read(curPtr->fd, bufPtr, toRead); } else { /* * Working on a Ns_FileVec. */ int currentbuf = curPtr->c.file.currentbuf; size_t wantRead = curPtr->c.file.bufs[currentbuf].length; size_t segSize = (wantRead > toRead ? toRead : wantRead); n = ns_read(curPtr->fd, bufPtr, segSize); Ns_Log(DriverDebug, "### WriterReadFromSpool [%d] (nbufs %d): read from fd %d want %lu got %ld (remain %lu)", currentbuf, curPtr->c.file.nbufs, curPtr->fd, segSize, n, wantRead); if (n > 0) { /* * Reduce the remaining length in the Ns_FileVec for the * next iteration. */ curPtr->c.file.bufs[currentbuf].length -= (size_t)n; if ((size_t)n < wantRead) { /* * Partial read on a segment. */ Ns_Log(DriverDebug, "### WriterReadFromSpool [%d] (nbufs %d): partial read on fd %d (got %ld)", currentbuf, curPtr->c.file.nbufs, curPtr->fd, n); } else if (currentbuf < curPtr->c.file.nbufs - 1 /* && (n == wantRead) */) { /* * All read from this segment, setup next read. */ ns_close(curPtr->fd); curPtr->c.file.bufs[currentbuf].fd = NS_INVALID_FD; curPtr->c.file.currentbuf ++; curPtr->fd = curPtr->c.file.bufs[curPtr->c.file.currentbuf].fd; Ns_Log(DriverDebug, "### WriterReadFromSpool switch to [%d] fd %d", curPtr->c.file.currentbuf, curPtr->fd); } } } if (n <= 0) { status = SPOOLER_READERROR; } else { /* * curPtr->c.file.toRead is still protected by * curPtr->c.file.fdlock when needed (in streaming mode). */ curPtr->c.file.toRead -= (size_t)n; curPtr->c.file.bufsize += (size_t)n; } if (doStream != NS_WRITER_STREAM_NONE) { Ns_MutexUnlock(&curPtr->c.file.fdlock); } } return status; } /* *---------------------------------------------------------------------- * * WriterSend -- * * Utility function of the WriterThread to send content to the client. It * handles partial write operations from the lower level driver * infrastructure. * * Results: * either NS_OK or SOCK_ERROR; * * Side effects: * Sends data, might reshuffle iovec. * *---------------------------------------------------------------------- */ static SpoolerState WriterSend(WriterSock *curPtr, int *err) { const struct iovec *bufs; struct iovec vbuf; int nbufs; SpoolerState status = SPOOLER_OK; size_t toWrite; ssize_t n; NS_NONNULL_ASSERT(curPtr != NULL); NS_NONNULL_ASSERT(err != NULL); /* * Prepare send operation */ if (curPtr->fd != NS_INVALID_FD) { /* * We have a valid file descriptor, send data from file. * * Prepare sending a single buffer with curPtr->c.file.bufsize bytes * from the curPtr->c.file.buf to the client. */ vbuf.iov_len = curPtr->c.file.bufsize; vbuf.iov_base = (void *)curPtr->c.file.buf; bufs = &vbuf; nbufs = 1; toWrite = curPtr->c.file.bufsize; } else { int i; /* * Prepare sending multiple memory buffers. Get length of remaining * buffers. */ toWrite = 0u; for (i = 0; i < curPtr->c.mem.nsbufs; i ++) { toWrite += curPtr->c.mem.sbufs[i].iov_len; } Ns_Log(DriverDebug, "### Writer wants to send remainder nbufs %d len %" PRIdz, curPtr->c.mem.nsbufs, toWrite); /* * Add buffers from the source and fill structure up to max */ while (curPtr->c.mem.bufIdx < curPtr->c.mem.nbufs && curPtr->c.mem.sbufIdx < UIO_SMALLIOV) { const struct iovec *vPtr = &curPtr->c.mem.bufs[curPtr->c.mem.bufIdx]; if (vPtr->iov_len > 0u && vPtr->iov_base != NULL) { Ns_Log(DriverDebug, "### Writer copies source %d to scratch %d len %" PRIiovlen, curPtr->c.mem.bufIdx, curPtr->c.mem.sbufIdx, vPtr->iov_len); toWrite += Ns_SetVec(curPtr->c.mem.sbufs, curPtr->c.mem.sbufIdx++, vPtr->iov_base, vPtr->iov_len); curPtr->c.mem.nsbufs++; } curPtr->c.mem.bufIdx++; } bufs = curPtr->c.mem.sbufs; nbufs = curPtr->c.mem.nsbufs; Ns_Log(DriverDebug, "### Writer wants to send %d bufs size %" PRIdz, nbufs, toWrite); } /* * Perform the actual send operation. */ n = NsDriverSend(curPtr->sockPtr, bufs, nbufs, 0u); if (n == -1) { *err = ns_sockerrno; status = SPOOLER_WRITEERROR; } else { /* * We have sent zero or more bytes. */ if (curPtr->doStream != NS_WRITER_STREAM_NONE) { Ns_MutexLock(&curPtr->c.file.fdlock); curPtr->size -= (size_t)n; Ns_MutexUnlock(&curPtr->c.file.fdlock); } else { curPtr->size -= (size_t)n; } curPtr->nsent += n; curPtr->sockPtr->timeout.sec = 0; if (curPtr->fd != NS_INVALID_FD) { /* * File-descriptor based send operation. Reduce the (remainig) * buffer size the amount of data sent and adjust the buffer * offset. For partial send operations, this will lead to a * remaining buffer size > 0. */ curPtr->c.file.bufsize -= (size_t)n; curPtr->c.file.bufoffset = (off_t)n; } else { if (n < (ssize_t)toWrite) { /* * We have a partial transmit from the iovec * structure. We have to compact it to fill content in * the next round. */ curPtr->c.mem.sbufIdx = Ns_ResetVec(curPtr->c.mem.sbufs, curPtr->c.mem.nsbufs, (size_t)n); curPtr->c.mem.nsbufs -= curPtr->c.mem.sbufIdx; memmove(curPtr->c.mem.sbufs, curPtr->c.mem.sbufs + curPtr->c.mem.sbufIdx, /* move the iovecs to the start of the scratch buffers */ sizeof(struct iovec) * (size_t)curPtr->c.mem.nsbufs); } } } return status; } /* *---------------------------------------------------------------------- * * WriterGetInfoPtr -- * * Helper function to obtain ConnPoolInfo structure for a WriterSock. * * The connInfoPtr is allocated only once per pool and cached in the * WriterSock. Only the first time, a writer thread "sees" a pool, it * allocates the structure for it. * * Results: * None. * * Side effects: * Can allocate memory * *---------------------------------------------------------------------- */ static ConnPoolInfo * WriterGetInfoPtr(WriterSock *curPtr, Tcl_HashTable *pools) { NS_NONNULL_ASSERT(curPtr != NULL); NS_NONNULL_ASSERT(pools != NULL); if (curPtr->infoPtr == NULL) { int isNew; Tcl_HashEntry *hPtr; hPtr = Tcl_CreateHashEntry(pools, (void*)curPtr->poolPtr, &isNew); if (isNew == 1) { /* * This is a pool that we have not seen yet. */ curPtr->infoPtr = ns_malloc(sizeof(ConnPoolInfo)); curPtr->infoPtr->currentPoolRate = 0; curPtr->infoPtr->threadSlot = NsPoolAllocateThreadSlot(curPtr->poolPtr, Ns_ThreadId()); Tcl_SetHashValue(hPtr, curPtr->infoPtr); Ns_Log(DriverDebug, "poollimit: pool '%s' allocate infoPtr with slot %lu poolLimit %d", curPtr->poolPtr->pool, curPtr->infoPtr->threadSlot, curPtr->poolPtr->rate.poolLimit); } else { curPtr->infoPtr = (ConnPoolInfo *)Tcl_GetHashValue(hPtr); } } return curPtr->infoPtr; } /* *---------------------------------------------------------------------- * * WriterPerPoolRates -- * * Compute current bandwidths per pool and writer. * * Since we have potentially multiple writer threads running, all these * might have writer threads of the same pool. In order to minimize * locking, we compute first writer thread specific subresults and combine * these later with with the results of the other threads. * * Results: * None. * * Side effects: * Connections are accepted and their SockPtr is set to NULL * such that closing actual connection does not close the socket. * *---------------------------------------------------------------------- */ static void WriterPerPoolRates(WriterSock *writePtr, Tcl_HashTable *pools) { WriterSock *curPtr; Tcl_HashSearch search; Tcl_HashEntry *hPtr; NS_NONNULL_ASSERT(writePtr != NULL); NS_NONNULL_ASSERT(pools != NULL); /* * First reset pool total rate. We keep the bandwidth managed pools in a * thread-local memory. Before, we accumulate the data, we reset it. */ hPtr = Tcl_FirstHashEntry(pools, &search); while (hPtr != NULL) { ConnPoolInfo *infoPtr = (ConnPoolInfo *)Tcl_GetHashValue(hPtr); infoPtr->currentPoolRate = 0; hPtr = Tcl_NextHashEntry(&search); } /* * Sum the actual rates per bandwidth limited pool for all active writer * jobs. */ for (curPtr = writePtr; curPtr != NULL; curPtr = curPtr->nextPtr) { /* * Does the writer come form a badwidth limited pool? */ if (curPtr->poolPtr->rate.poolLimit > 0 && curPtr->currentRate > 0) { /* * Add the actual rate to the writer specific pool rate. */ ConnPoolInfo *infoPtr = WriterGetInfoPtr(curPtr, pools); infoPtr->currentPoolRate += curPtr->currentRate; Ns_Log(DriverDebug, "poollimit pool '%s' added rate poolLimit %d poolRate %d", curPtr->poolPtr->pool, curPtr->poolPtr->rate.poolLimit, infoPtr->currentPoolRate); } } /* * Now iterate over the pools used by this thread and sum the specific * pool rates from all writer threads. */ hPtr = Tcl_FirstHashEntry(pools, &search); while (hPtr != NULL) { ConnPool *poolPtr = (ConnPool *)Tcl_GetHashKey(pools, hPtr); int totalPoolRate, writerThreadCount, threadDeltaRate; ConnPoolInfo *infoPtr; /* * Compute the following indicators: * - totalPoolRate: accumulated pool rates from all writer threads. * * - threadDeltaRate: how much of the available bandwidth can i used * the current thread. We assume that the distribution of writers * between all writer threads is even, so we can split the * available rate by the number of writer threads working on this * pool. * * - deltaPercentage: adjust in a single iteration just a fraction * (e.g. 10 percent) of the potential change. This function is * called often enough to justify delayed adjustments. */ infoPtr = (ConnPoolInfo *)Tcl_GetHashValue(hPtr); totalPoolRate = NsPoolTotalRate(poolPtr, infoPtr->threadSlot, infoPtr->currentPoolRate, &writerThreadCount); /* * If nothing is going on, allow a thread the full rate. */ if (infoPtr->currentPoolRate == 0) { threadDeltaRate = (poolPtr->rate.poolLimit - totalPoolRate); } else { threadDeltaRate = (poolPtr->rate.poolLimit - totalPoolRate) / writerThreadCount; } infoPtr->deltaPercentage = threadDeltaRate / 10; if (infoPtr->deltaPercentage < -50) { infoPtr->deltaPercentage = -50; } if (totalPoolRate > 0) { Ns_Log(Notice, "... pool '%s' thread's pool rate %d total pool rate %d limit %d " "(#%d writer threads) -> computed rate %d (%d%%) ", NsPoolName(poolPtr->pool), infoPtr->currentPoolRate, totalPoolRate, poolPtr->rate.poolLimit, writerThreadCount, threadDeltaRate, infoPtr->deltaPercentage ); } hPtr = Tcl_NextHashEntry(&search); } } /* *---------------------------------------------------------------------- * * WriterThread -- * * Thread that writes files to clients. * * Results: * None. * * Side effects: * Connections are accepted and their SockPtr is set to NULL * such that closing actual connection does not close the socket. * *---------------------------------------------------------------------- */ static void WriterThread(void *arg) { SpoolerQueue *queuePtr = (SpoolerQueue*)arg; int err, pollTimeout; bool stopping; Ns_Time now; Sock *sockPtr; const Driver *drvPtr; WriterSock *curPtr, *nextPtr, *writePtr; PollData pdata; Tcl_HashTable pools; /* used for accumulating bandwidth per pool */ Ns_ThreadSetName("-writer%d-", queuePtr->id); queuePtr->threadName = Ns_ThreadGetName(); Tcl_InitHashTable(&pools, TCL_ONE_WORD_KEYS); /* * Loop forever until signaled to shut down and all * connections are complete and gracefully closed. */ Ns_Log(Notice, "writer%d: accepting connections", queuePtr->id); PollCreate(&pdata); writePtr = NULL; stopping = NS_FALSE; while (!stopping) { char charBuffer[1]; /* * If there are any write sockets, set the bits. */ PollReset(&pdata); (void)PollSet(&pdata, queuePtr->pipe[0], (short)POLLIN, NULL); if (writePtr == NULL) { pollTimeout = 30 * 1000; } else { /* * If per-pool bandwidth management is requested, compute the base * data for the adjustment. If there is no bandwidth management * requested, there is no slowdow. */ if (NsWriterBandwidthManagement) { WriterPerPoolRates(writePtr, &pools); } /* * There are writers active. Determine on which writers we poll * and compute the maximal poll wait time. */ pollTimeout = 1000; for (curPtr = writePtr; curPtr != NULL; curPtr = curPtr->nextPtr) { int sleepTimeMs = 0; Ns_Log(DriverDebug, "### Writer poll collect %p size %" PRIdz " streaming %d rateLimit %d", (void *)curPtr, curPtr->size, curPtr->doStream, curPtr->rateLimit); if (curPtr->rateLimit > 0 && curPtr->nsent > 0 && curPtr->currentRate > 0 ) { int currentMs, targetTimeMs; /* * Perform per-pool rate management, when * - a poolLimit is provided, * - we have performance data of thee pool, and * - changes are possible (as flagged by deltaPercentage). */ if (NsWriterBandwidthManagement && curPtr->poolPtr->rate.poolLimit > 0 && curPtr->infoPtr != NULL && curPtr->infoPtr->deltaPercentage != 0 ) { /* * Only adjust data for busy writer jobs, which * are close to their limits. */ bool onLimit = (curPtr->currentRate*100 / curPtr->rateLimit) > 90; Ns_Log(DriverDebug, "we allowed %d we use %d on limit %d (%d) , we can do %d%%", curPtr->rateLimit, curPtr->currentRate, (int)onLimit, curPtr->currentRate*100/curPtr->rateLimit, curPtr->infoPtr->deltaPercentage); if (onLimit) { /* * Compute new rate limit based on * positive/negative delta percentage. */ int newRate = curPtr->currentRate + (curPtr->currentRate * curPtr->infoPtr->deltaPercentage / 100); /* * Sanity checks: * - never allow more than poolLimit * - never kill connections completely (e.g. minRate 5KB/s) */ if (newRate > curPtr->poolPtr->rate.poolLimit) { newRate = curPtr->poolPtr->rate.poolLimit; } else if (newRate < 5) { newRate = 5; } Ns_Log(Notice, "... pool '%s' new rate limit changed from %d to %d KB/s (delta %d%%)", curPtr->poolPtr->pool, curPtr->rateLimit, newRate, curPtr->infoPtr->deltaPercentage); curPtr->rateLimit = newRate; } } /* * Adjust rate to the rate limit. */ currentMs = (int)(curPtr->nsent/(Tcl_WideInt)curPtr->currentRate); targetTimeMs = (int)(curPtr->nsent/(Tcl_WideInt)curPtr->rateLimit); sleepTimeMs = 1 + targetTimeMs - currentMs; Ns_Log(WriterDebug, "### Writer(%d)" " byte sent %" TCL_LL_MODIFIER "d msecs %d rate %d KB/s" " targetRate %d KB/s sleep %d", curPtr->sockPtr->sock, curPtr->nsent, currentMs, curPtr->currentRate, curPtr->rateLimit, sleepTimeMs); } if (likely(curPtr->size > 0u)) { if (sleepTimeMs <= 0) { SockPoll(curPtr->sockPtr, (short)POLLOUT, &pdata); pollTimeout = -1; } else { pollTimeout = MIN(sleepTimeMs, pollTimeout); } } else if (unlikely(curPtr->doStream == NS_WRITER_STREAM_FINISH)) { pollTimeout = -1; } } } Ns_Log(DriverDebug, "### Writer final pollTimeout %d", pollTimeout); /* * Select and drain the trigger pipe if necessary. */ (void) PollWait(&pdata, pollTimeout); if (PollIn(&pdata, 0) && unlikely(ns_recv(queuePtr->pipe[0], charBuffer, 1u, 0) != 1)) { Ns_Fatal("writer: trigger ns_recv() failed: %s", ns_sockstrerror(ns_sockerrno)); } /* * Write to all available sockets */ Ns_GetTime(&now); curPtr = writePtr; writePtr = NULL; while (curPtr != NULL) { NsWriterStreamState doStream; SpoolerState spoolerState = SPOOLER_OK; nextPtr = curPtr->nextPtr; sockPtr = curPtr->sockPtr; err = 0; /* * The truth value of doStream does not change through * concurrency. */ doStream = curPtr->doStream; if (unlikely(PollHup(&pdata, sockPtr->pidx))) { Ns_Log(DriverDebug, "### Writer %p reached POLLHUP fd %d", (void *)curPtr, sockPtr->sock); spoolerState = SPOOLER_CLOSE; err = 0; curPtr->infoPtr = WriterGetInfoPtr(curPtr, &pools); curPtr->infoPtr->currentPoolRate += curPtr->currentRate; } else if (likely(PollOut(&pdata, sockPtr->pidx)) || (doStream == NS_WRITER_STREAM_FINISH)) { /* * The socket is writable, we can compute the rate, when * something was sent already and some kind of rate limiting * is in place ... and we have sent enough data to make a good * estimate (just after the 2nd send, so more than driver * buffer size. */ Ns_Log(DriverDebug, "Socket of pool '%s' is writable, writer limit %d nsent %ld", curPtr->poolPtr->pool, curPtr->rateLimit, (long)curPtr->nsent); if (curPtr->rateLimit > 0 && (size_t)curPtr->nsent > curPtr->sockPtr->drvPtr->bufsize ) { Ns_Time diff; long currentMs; Ns_DiffTime(&now, &curPtr->startTime, &diff); currentMs = Ns_TimeToMilliseconds(&diff); if (currentMs > 0) { curPtr->currentRate = (int)((curPtr->nsent)/(Tcl_WideInt)currentMs); Ns_Log(DriverDebug, "Socket of pool '%s' is writable, currentMs %ld has updated current rate %d", curPtr->poolPtr->pool, currentMs,curPtr->currentRate); } } Ns_Log(DriverDebug, "### Writer %p can write to client fd %d (trigger %d) streaming %.6x" " size %" PRIdz " nsent %" TCL_LL_MODIFIER "d bufsize %" PRIdz, (void *)curPtr, sockPtr->sock, PollIn(&pdata, 0), doStream, curPtr->size, curPtr->nsent, curPtr->c.file.bufsize); if (unlikely(curPtr->size < 1u)) { /* * Size < 1 means that everything was sent. */ if (doStream != NS_WRITER_STREAM_ACTIVE) { if (doStream == NS_WRITER_STREAM_FINISH) { Ns_ReleaseTemp(curPtr->fd); } spoolerState = SPOOLER_CLOSE; } } else { /* * If size > 0, there is still something to send. * If we are spooling from a file, read some data * from the (spool) file and place it into curPtr->c.file.buf. */ if (curPtr->fd != NS_INVALID_FD) { spoolerState = WriterReadFromSpool(curPtr); } if (spoolerState == SPOOLER_OK) { spoolerState = WriterSend(curPtr, &err); } } } else { /* * Mark when first timeout occurred or check if it is already * for too long and we need to stop this socket */ if (sockPtr->timeout.sec == 0) { Ns_Log(DriverDebug, "Writer %p fd %d setting sendwait %ld.%6ld", (void *)curPtr, sockPtr->sock, curPtr->sockPtr->drvPtr->sendwait.sec, curPtr->sockPtr->drvPtr->sendwait.usec); SockTimeout(sockPtr, &now, &curPtr->sockPtr->drvPtr->sendwait); } else if (Ns_DiffTime(&sockPtr->timeout, &now, NULL) <= 0) { Ns_Log(DriverDebug, "Writer %p fd %d timeout", (void *)curPtr, sockPtr->sock); err = ETIMEDOUT; spoolerState = SPOOLER_CLOSETIMEOUT; } } /* * Check result status and close the socket in case of * timeout or completion */ Ns_MutexLock(&queuePtr->lock); if (spoolerState == SPOOLER_OK) { if (curPtr->size > 0u || doStream == NS_WRITER_STREAM_ACTIVE) { Ns_Log(DriverDebug, "Writer %p continue OK (size %" PRIdz ") => PUSH", (void *)curPtr, curPtr->size); Push(curPtr, writePtr); } else { Ns_Log(DriverDebug, "Writer %p done OK (size %" PRIdz ") => RELEASE", (void *)curPtr, curPtr->size); WriterSockRelease(curPtr); } } else { /* * spoolerState might be SPOOLER_CLOSE or SPOOLER_*TIMEOUT, or SPOOLER_*ERROR */ Ns_Log(DriverDebug, "Writer %p fd %d release, not OK (status %d) => RELEASE", (void *)curPtr, curPtr->sockPtr->sock, (int)spoolerState); curPtr->status = spoolerState; curPtr->err = err; WriterSockRelease(curPtr); } Ns_MutexUnlock(&queuePtr->lock); curPtr = nextPtr; } /* * Add more sockets to the writer queue */ if (queuePtr->sockPtr != NULL) { Ns_MutexLock(&queuePtr->lock); if (queuePtr->sockPtr != NULL) { curPtr = queuePtr->sockPtr; queuePtr->sockPtr = NULL; while (curPtr != NULL) { nextPtr = curPtr->nextPtr; sockPtr = curPtr->sockPtr; drvPtr = sockPtr->drvPtr; SockTimeout(sockPtr, &now, &drvPtr->sendwait); Push(curPtr, writePtr); queuePtr->queuesize++; curPtr = nextPtr; } queuePtr->curPtr = writePtr; } Ns_MutexUnlock(&queuePtr->lock); } /* * Check for shutdown */ stopping = queuePtr->shutdown; } PollFree(&pdata); { /* * Free ConnPoolInfo */ Tcl_HashSearch search; Tcl_HashEntry *hPtr = Tcl_FirstHashEntry(&pools, &search); while (hPtr != NULL) { ConnPoolInfo *infoPtr = (ConnPoolInfo *)Tcl_GetHashValue(hPtr); ns_free(infoPtr); hPtr = Tcl_NextHashEntry(&search); } /* * Delete the hash table for pools. */ Tcl_DeleteHashTable(&pools); } Ns_Log(Notice, "exiting"); Ns_MutexLock(&queuePtr->lock); queuePtr->stopped = NS_TRUE; Ns_CondBroadcast(&queuePtr->cond); Ns_MutexUnlock(&queuePtr->lock); } /* *---------------------------------------------------------------------- * * NsWriterFinish -- * * Finish a streaming writer job (typically called at the close * of a connection). A streaming writer job is fed typically by a * sequence of ns_write operations. After such an operation, the * WriterThread has to keep the writer job alive. * NsWriterFinish() tells the WriterThread that no more * other writer jobs will come from this connection. * * Results: * None. * * Side effects: * Change the state of the writer job and trigger the queue. * *---------------------------------------------------------------------- */ void NsWriterFinish(NsWriterSock *wrSockPtr) { WriterSock *writerSockPtr = (WriterSock *)wrSockPtr; NS_NONNULL_ASSERT(wrSockPtr != NULL); Ns_Log(DriverDebug, "NsWriterFinish: %p", (void *)writerSockPtr); writerSockPtr->doStream = NS_WRITER_STREAM_FINISH; SockTrigger(writerSockPtr->queuePtr->pipe[1]); } /* *---------------------------------------------------------------------- * * WriterSetupStreamingMode -- * * In streaming mode, setup a temporary fd which is used as input and * output. Streaming i/o will append to the file, while the write will * read from it. * * Results: * Ns_ReturnCode (NS_OK, NS_ERROR, NS_FILTER_BREAK). In the last case * signals that all processing was already performed and the caller can * stop handling more data. On success, the function returns an fd as * last argument. * * Side effects: * Potentially allocating temp file and updating connPtr members. * *---------------------------------------------------------------------- */ Ns_ReturnCode WriterSetupStreamingMode(Conn *connPtr, struct iovec *bufs, int nbufs, int *fdPtr) { bool first; size_t wrote = 0u; WriterSock *wrSockPtr1; Ns_ReturnCode status = NS_OK; NS_NONNULL_ASSERT(connPtr != NULL); NS_NONNULL_ASSERT(bufs != NULL); NS_NONNULL_ASSERT(fdPtr != NULL); Ns_Log(DriverDebug, "NsWriterQueue: streaming writer job"); if (connPtr->fd == 0) { /* * Create a new temporary spool file and provide the fd to the * connection thread via connPtr. */ first = NS_TRUE; wrSockPtr1 = NULL; *fdPtr = Ns_GetTemp(); connPtr->fd = *fdPtr; Ns_Log(DriverDebug, "NsWriterQueue: new tmp file has fd %d", *fdPtr); } else { /* * Reuse previously created spool file. */ first = NS_FALSE; wrSockPtr1 = WriterSockRequire(connPtr); if (wrSockPtr1 == NULL) { Ns_Log(Notice, "NsWriterQueue: writer job was already canceled (fd %d); maybe user dropped connection", connPtr->fd); return NS_ERROR; } else { /* * lock only, when first == NS_FALSE. */ Ns_MutexLock(&wrSockPtr1->c.file.fdlock); (void)ns_lseek(connPtr->fd, 0, SEEK_END); } } /* * For the time being, handle just "string data" in streaming * output (iovec bufs). Write the content to the spool file. */ { int i; for (i = 0; i < nbufs; i++) { ssize_t j = ns_write(connPtr->fd, bufs[i].iov_base, bufs[i].iov_len); if (j > 0) { wrote += (size_t)j; Ns_Log(Debug, "NsWriterQueue: fd %d [%d] spooled %" PRIdz " of %" PRIiovlen " OK %d", connPtr->fd, i, j, bufs[i].iov_len, (j == (ssize_t)bufs[i].iov_len)); } else { Ns_Log(Warning, "NsWriterQueue: spool to fd %d write operation failed", connPtr->fd); } } } if (first) { //bufs = NULL; connPtr->nContentSent = wrote; #ifndef _WIN32 /* * sock_set_blocking can't be used under windows, since sockets * are under windows no file descriptors. */ (void)ns_sock_set_blocking(connPtr->fd, NS_FALSE); #endif /* * Fall through to register stream writer with temp file */ } else { WriterSock *writerSockPtr; /* * This is a later streaming operation, where the writer job * (strWriter) was previously established. */ assert(wrSockPtr1 != NULL); /* * Update the controlling variables (size and toread) in the connPtr, * and the length info for the access log, and trigger the writer to * notify it about the change. */ writerSockPtr = (WriterSock *)connPtr->strWriter; writerSockPtr->size += wrote; writerSockPtr->c.file.toRead += wrote; Ns_MutexUnlock(&wrSockPtr1->c.file.fdlock); connPtr->nContentSent += wrote; if (likely(wrSockPtr1->queuePtr != NULL)) { SockTrigger(wrSockPtr1->queuePtr->pipe[1]); } WriterSockRelease(wrSockPtr1); status = NS_FILTER_BREAK; } return status; } /* *---------------------------------------------------------------------- * * NsWriterQueue -- * * Submit a new job to the writer queue. * * Results: * * NS_ERROR means that the Writer thread refuses to accept this * job and that the client (the connection thread) has to handle * this data. NS_OK means that the Writer thread cares for * transmitting the content to the client. * * Side effects: * Potentially adding a job to the writer queue. * *---------------------------------------------------------------------- */ Ns_ReturnCode NsWriterQueue(Ns_Conn *conn, size_t nsend, Tcl_Channel chan, FILE *fp, int fd, struct iovec *bufs, int nbufs, const Ns_FileVec *filebufs, int nfilebufs, bool everysize) { Conn *connPtr; WriterSock *wrSockPtr; SpoolerQueue *queuePtr; DrvWriter *wrPtr; bool trigger = NS_FALSE; size_t headerSize; Ns_ReturnCode status = NS_OK; Ns_FileVec *fbufs = NULL; int nfbufs = 0; NS_NONNULL_ASSERT(conn != NULL); connPtr = (Conn *)conn; if (unlikely(connPtr->sockPtr == NULL)) { Ns_Log(Warning, "NsWriterQueue: called without sockPtr size %" PRIdz " bufs %d flags %.6x stream %.6x chan %p fd %d", nsend, nbufs, connPtr->flags, connPtr->flags & NS_CONN_STREAM, (void *)chan, fd); status = NS_ERROR; wrPtr = NULL; } else { wrPtr = &connPtr->sockPtr->drvPtr->writer; Ns_Log(DriverDebug, "NsWriterQueue: size %" PRIdz " bufs %p (%d) flags %.6x stream %.6x chan %p fd %d thread %d", nsend, (void *)bufs, nbufs, connPtr->flags, connPtr->flags & NS_CONN_STREAM, (void *)chan, fd, wrPtr->threads); if (unlikely(wrPtr->threads == 0)) { Ns_Log(DriverDebug, "NsWriterQueue: no writer threads configured"); status = NS_ERROR; } else if (nsend < (size_t)wrPtr->writersize && !everysize && connPtr->fd == 0) { Ns_Log(DriverDebug, "NsWriterQueue: file is too small(%" PRIdz " < %" PRIdz ")", nsend, wrPtr->writersize); status = NS_ERROR; } } if (status != NS_OK) { return status; } assert(wrPtr != NULL); /* * In streaming mode, setup a temporary fd which is used as input and * output. Streaming i/o will append to the file, while the write will * read from it. */ if (((connPtr->flags & NS_CONN_STREAM) != 0u) || connPtr->fd > 0) { if (wrPtr->doStream == NS_WRITER_STREAM_NONE) { status = NS_ERROR; } else if (unlikely(fp != NULL || fd != NS_INVALID_FD)) { Ns_Log(DriverDebug, "NsWriterQueue: does not stream from this source via writer"); status = NS_ERROR; } else { status = WriterSetupStreamingMode(connPtr, bufs, nbufs, &fd); } if (unlikely(status != NS_OK)) { if (status == NS_FILTER_BREAK) { status = NS_OK; } return status; } /* * As a result of successful WriterSetupStreamingMode(), we have fd * set. */ assert(fd != NS_INVALID_FD); } else { if (fp != NULL) { /* * The client provided an open file pointer and closes it */ fd = ns_dup(fileno(fp)); } else if (fd != NS_INVALID_FD) { /* * The client provided an open file descriptor and closes it */ fd = ns_dup(fd); } else if (chan != NULL) { ClientData clientData; /* * The client provided an open Tcl channel and closes it */ if (Tcl_GetChannelHandle(chan, TCL_READABLE, &clientData) != TCL_OK) { return NS_ERROR; } fd = ns_dup(PTR2INT(clientData)); } else if (filebufs != NULL && nfilebufs > 0) { /* * The client provided Ns_FileVec with open files. The client is * responsible for closing it, like in all other cases. */ size_t i; /* * This is the only case, where fbufs will be != NULL, * i.e. keeping a duplicate of the passed-in Ns_FileVec structure * for which the client is responsible. */ fbufs = (Ns_FileVec *)ns_calloc((size_t)nfilebufs, sizeof(Ns_FileVec)); nfbufs = nfilebufs; for (i = 0u; i < (size_t)nfilebufs; i++) { fbufs[i].fd = ns_dup(filebufs[i].fd); fbufs[i].length = filebufs[i].length; fbufs[i].offset = filebufs[i].offset; } /* * Place the fd of the first Ns_FileVec to fd. */ fd = fbufs[0].fd; Ns_Log(DriverDebug, "NsWriterQueue: filevec mode, take first fd %d tosend %lu", fd, nsend); } } Ns_Log(DriverDebug, "NsWriterQueue: writer threads %d nsend %" PRIdz " writersize %" PRIdz, wrPtr->threads, nsend, wrPtr->writersize); assert(connPtr->poolPtr != NULL); connPtr->poolPtr->stats.spool++; wrSockPtr = (WriterSock *)ns_calloc(1u, sizeof(WriterSock)); wrSockPtr->sockPtr = connPtr->sockPtr; wrSockPtr->poolPtr = connPtr->poolPtr; /* just for being able to trace back the origin, e.g. list */ wrSockPtr->sockPtr->timeout.sec = 0; wrSockPtr->flags = connPtr->flags; wrSockPtr->refCount = 1; /* * Take the rate limit from the connection. */ wrSockPtr->rateLimit = connPtr->rateLimit; if (wrSockPtr->rateLimit == -1) { /* * The value was not specified via connection. Use either the pool * limit as a base for the computation or fall back to the driver * default value. */ if (connPtr->poolPtr->rate.poolLimit > 0) { /* * Very optimistic start value, but values will float through via * bandwidth management. */ wrSockPtr->rateLimit = connPtr->poolPtr->rate.poolLimit / 2; } else { wrSockPtr->rateLimit = wrPtr->rateLimit; } } Ns_Log(WriterDebug, "### Writer(%d): initial rate limit %d KB/s", wrSockPtr->sockPtr->sock, wrSockPtr->rateLimit); /* * Make sure we have proper content length header for * keep-alive/pipelining. */ Ns_ConnSetLengthHeader(conn, nsend, (wrSockPtr->flags & NS_CONN_STREAM) != 0u); /* * Flush the headers */ if ((conn->flags & NS_CONN_SENTHDRS) == 0u) { Tcl_DString ds; Ns_DStringInit(&ds); Ns_Log(DriverDebug, "### Writer(%d): add header", fd); conn->flags |= NS_CONN_SENTHDRS; (void)Ns_CompleteHeaders(conn, nsend, 0u, &ds); headerSize = (size_t)Ns_DStringLength(&ds); if (headerSize > 0u) { wrSockPtr->headerString = ns_strdup(Tcl_DStringValue(&ds)); } Ns_DStringFree(&ds); } else { headerSize = 0u; } if (fd != NS_INVALID_FD) { /* maybe add mmap support for files (fd != NS_INVALID_FD) */ wrSockPtr->fd = fd; wrSockPtr->c.file.bufs = fbufs; wrSockPtr->c.file.nbufs = nfbufs; Ns_Log(DriverDebug, "### Writer(%d) tosend %" PRIdz " files %d bufsize %" PRIdz, fd, nsend, nfbufs, wrPtr->bufsize); if (unlikely(headerSize >= wrPtr->bufsize)) { /* * We have a header which is larger than bufsize; place it * as "leftover" and use the headerString as buffer for file * reads (rather rare case) */ wrSockPtr->c.file.buf = (unsigned char *)wrSockPtr->headerString; wrSockPtr->c.file.maxsize = headerSize; wrSockPtr->c.file.bufsize = headerSize; wrSockPtr->headerString = NULL; } else if (headerSize > 0u) { /* * We have a header that fits into the bufsize; place it * as "leftover" at the end of the buffer. */ wrSockPtr->c.file.buf = ns_malloc(wrPtr->bufsize); memcpy(wrSockPtr->c.file.buf, wrSockPtr->headerString, headerSize); wrSockPtr->c.file.bufsize = headerSize; wrSockPtr->c.file.maxsize = wrPtr->bufsize; ns_free(wrSockPtr->headerString); wrSockPtr->headerString = NULL; } else { assert(wrSockPtr->headerString == NULL); wrSockPtr->c.file.buf = ns_malloc(wrPtr->bufsize); wrSockPtr->c.file.maxsize = wrPtr->bufsize; } wrSockPtr->c.file.bufoffset = 0; wrSockPtr->c.file.toRead = nsend; } else if (bufs != NULL) { int i, j, headerbufs = (headerSize > 0u ? 1 : 0); wrSockPtr->fd = NS_INVALID_FD; if (nbufs+headerbufs < UIO_SMALLIOV) { wrSockPtr->c.mem.bufs = wrSockPtr->c.mem.preallocated_bufs; } else { Ns_Log(DriverDebug, "NsWriterQueue: alloc %d iovecs", nbufs); wrSockPtr->c.mem.bufs = ns_calloc((size_t)nbufs + (size_t)headerbufs, sizeof(struct iovec)); } wrSockPtr->c.mem.nbufs = nbufs+headerbufs; if (headerbufs != 0) { wrSockPtr->c.mem.bufs[0].iov_base = wrSockPtr->headerString; wrSockPtr->c.mem.bufs[0].iov_len = headerSize; } if (connPtr->fmap.addr != NULL) { Ns_Log(DriverDebug, "NsWriterQueue: deliver fmapped %p", (void *)connPtr->fmap.addr); /* * Deliver an mmapped file, no need to copy content */ for (i = 0, j=headerbufs; i < nbufs; i++, j++) { wrSockPtr->c.mem.bufs[j].iov_base = bufs[i].iov_base; wrSockPtr->c.mem.bufs[j].iov_len = bufs[i].iov_len; } /* * Make a copy of the fmap structure and make clear that * we unmap in the writer thread. */ wrSockPtr->c.mem.fmap = connPtr->fmap; connPtr->fmap.addr = NULL; /* header string will be freed via wrSockPtr->headerString */ } else { /* * Deliver a content from iovec. The lifetime of the * source is unknown, we have to copy the c. */ for (i = 0, j=headerbufs; i < nbufs; i++, j++) { wrSockPtr->c.mem.bufs[j].iov_base = ns_malloc(bufs[i].iov_len); wrSockPtr->c.mem.bufs[j].iov_len = bufs[i].iov_len; memcpy(wrSockPtr->c.mem.bufs[j].iov_base, bufs[i].iov_base, bufs[i].iov_len); } /* header string will be freed a buf[0] */ wrSockPtr->headerString = NULL; } } else { ns_free(wrSockPtr); return NS_ERROR; } /* * Add header size to total size. */ nsend += headerSize; if (connPtr->clientData != NULL) { wrSockPtr->clientData = ns_strdup(connPtr->clientData); } wrSockPtr->startTime = *Ns_ConnStartTime(conn); /* * Setup streaming context before sending potentially headers. */ if ((wrSockPtr->flags & NS_CONN_STREAM) != 0u) { wrSockPtr->doStream = NS_WRITER_STREAM_ACTIVE; assert(connPtr->strWriter == NULL); /* * Add a reference to the stream writer to the connection such * it can efficiently append to a stream when multiple output * operations happen. The backpointer (from the stream writer * to the connection is needed to clear the reference to the * writer in case the writer is deleted. No locks are needed, * since nobody can share this structure yet. */ connPtr->strWriter = (NsWriterSock *)wrSockPtr; wrSockPtr->connPtr = connPtr; } /* * Tell connection, that writer handles the output (including * closing the connection to the client). */ connPtr->flags |= NS_CONN_SENT_VIA_WRITER; wrSockPtr->keep = connPtr->keep > 0 ? NS_TRUE : NS_FALSE; wrSockPtr->size = nsend; Ns_Log(DriverDebug, "NsWriterQueue NS_CONN_SENT_VIA_WRITER connPtr %p", (void*)connPtr); if ((wrSockPtr->flags & NS_CONN_STREAM) == 0u) { Ns_Log(DriverDebug, "NsWriterQueue NS_CONN_SENT_VIA_WRITER connPtr %p clear sockPtr %p", (void*)connPtr, (void*)connPtr->sockPtr); connPtr->sockPtr = NULL; connPtr->flags |= NS_CONN_CLOSED; connPtr->nContentSent = nsend - headerSize; } /* * Get the next writer thread from the list, all writer requests are * rotated between all writer threads */ Ns_MutexLock(&wrPtr->lock); if (wrPtr->curPtr == NULL) { wrPtr->curPtr = wrPtr->firstPtr; } queuePtr = wrPtr->curPtr; wrPtr->curPtr = wrPtr->curPtr->nextPtr; Ns_MutexUnlock(&wrPtr->lock); Ns_Log(WriterDebug, "Writer(%d): started: id=%d fd=%d, " "size=%" PRIdz ", flags=%X, rate %d KB/s: %s", wrSockPtr->sockPtr->sock, queuePtr->id, wrSockPtr->fd, nsend, wrSockPtr->flags, wrSockPtr->rateLimit, connPtr->request.line); /* * Now add new writer socket to the writer thread's queue */ wrSockPtr->queuePtr = queuePtr; Ns_MutexLock(&queuePtr->lock); if (queuePtr->sockPtr == NULL) { trigger = NS_TRUE; } Push(wrSockPtr, queuePtr->sockPtr); Ns_MutexUnlock(&queuePtr->lock); /* * Wake up writer thread */ if (trigger) { SockTrigger(queuePtr->pipe[1]); } return NS_OK; } /* *---------------------------------------------------------------------- * * DriverWriterFromObj -- * * Lookup driver by name and return its DrvWriter. When driverObj is * NULL, get the driver from the conn. * * Results: * Ns_ReturnCode * * Side effects: * Set error message in interp in case of failure. * *---------------------------------------------------------------------- */ static Ns_ReturnCode DriverWriterFromObj( Tcl_Interp *interp, Tcl_Obj *driverObj, Ns_Conn *conn, DrvWriter **wrPtrPtr) { Driver *drvPtr; const char *driverName = NULL; int driverNameLen = 0; DrvWriter *wrPtr = NULL; Ns_ReturnCode result; /* * If no driver is provided, take the current driver. The caller has * to make sure that in cases, where no driver is specified, the * command is run in a connection thread. */ if (driverObj == NULL) { if (conn != NULL) { driverName = Ns_ConnDriverName(conn); driverNameLen = (int)strlen(driverName); } } else { driverName = Tcl_GetStringFromObj(driverObj, &driverNameLen); } if (driverName != NULL) { for (drvPtr = firstDrvPtr; drvPtr != NULL; drvPtr = drvPtr->nextPtr) { if (strncmp(driverName, drvPtr->threadName, (size_t)driverNameLen) == 0) { if (drvPtr->writer.firstPtr != NULL) { wrPtr = &drvPtr->writer; } break; } } } if (unlikely(wrPtr == NULL)) { Ns_TclPrintfResult(interp, "no writer configured for a driver with name %s", driverName); result = NS_ERROR; } else { *wrPtrPtr = wrPtr; result = NS_OK; } return result; } /* *---------------------------------------------------------------------- * * WriterSubmitObjCmd - subcommand of NsTclWriterObjCmd -- * * Implements "ns_writer submit" command. * Send the provided data to the client. * * Results: * Standard Tcl result. * * Side effects: * None. * *---------------------------------------------------------------------- */ static int WriterSubmitObjCmd(ClientData UNUSED(clientData), Tcl_Interp *interp, int objc, Tcl_Obj *const* objv) { int result = TCL_OK; Ns_Conn *conn; Tcl_Obj *dataObj; Ns_ObjvSpec args[] = { {"data", Ns_ObjvObj, &dataObj, NULL}, {NULL, NULL, NULL, NULL} }; if (Ns_ParseObjv(NULL, args, interp, 2, objc, objv) != NS_OK || NsConnRequire(interp, NS_CONN_REQUIRE_ALL, &conn) != NS_OK) { result = TCL_ERROR; } else { int size; unsigned char *data = Tcl_GetByteArrayFromObj(dataObj, &size); if (data != NULL) { struct iovec vbuf; Ns_ReturnCode status; vbuf.iov_base = (void *)data; vbuf.iov_len = (size_t)size; status = NsWriterQueue(conn, (size_t)size, NULL, NULL, NS_INVALID_FD, &vbuf, 1, NULL, 0, NS_TRUE); Tcl_SetObjResult(interp, Tcl_NewBooleanObj(status == NS_OK ? 1 : 0)); } } return result; } /* *---------------------------------------------------------------------- * * WriterCheckInputParams - * * Helper command for WriterSubmitFileObjCmd and WriterSubmitFilesObjCmd * to check validity of filename, offset and size. * * Results: * Standard Tcl result. Returns on success also fd and nrbytes. * * Side effects: * None. * *---------------------------------------------------------------------- */ static int WriterCheckInputParams(Tcl_Interp *interp, const char *filenameString, size_t size, off_t offset, int *fdPtr, size_t *nrbytesPtr) { int result = TCL_OK, rc; struct stat st; Ns_Log(DriverDebug, "WriterCheckInputParams %s offset %" PROTd " size %" PRIdz, filenameString, offset, size); /* * Use stat() call to obtain information about the actual file to check * later the plausibility of the parameters. */ rc = stat(filenameString, &st); if (unlikely(rc != 0)) { Ns_TclPrintfResult(interp, "file does not exist '%s'", filenameString); result = TCL_ERROR; } else { size_t nrbytes = 0u; int fd; /* * Try to open the file and check offset and size parameters. */ fd = ns_open(filenameString, O_RDONLY | O_CLOEXEC, 0); if (unlikely(fd == NS_INVALID_FD)) { Ns_TclPrintfResult(interp, "could not open file '%s'", filenameString); result = TCL_ERROR; } else if (unlikely(offset > st.st_size) || offset < 0) { Ns_TclPrintfResult(interp, "offset must be a positive value less or equal filesize"); result = TCL_ERROR; } else if (size > 0) { if (unlikely((off_t)size + offset > st.st_size)) { Ns_TclPrintfResult(interp, "offset + size must be less or equal filesize"); result = TCL_ERROR; } else { nrbytes = (size_t)size; } } else { nrbytes = (size_t)st.st_size - (size_t)offset; } /* * When an offset is provide, jump to this offset. */ if (offset > 0 && result == TCL_OK) { if (ns_lseek(fd, (off_t)offset, SEEK_SET) == -1) { Ns_TclPrintfResult(interp, "cannot seek to position %ld", (long)offset); result = TCL_ERROR; } } if (result == TCL_OK) { *fdPtr = fd; *nrbytesPtr = nrbytes; } else if (fd != NS_INVALID_FD) { /* * On invalid parameters, close the fd. */ ns_close(fd); } } return result; } /* *---------------------------------------------------------------------- * * WriterSubmitFileObjCmd - subcommand of NsTclWriterObjCmd -- * * Implements "ns_writer submitfile" command. * Send the provided file to the client. * * Results: * Standard Tcl result. * * Side effects: * None. * *---------------------------------------------------------------------- */ static int WriterSubmitFileObjCmd(ClientData UNUSED(clientData), Tcl_Interp *interp, int objc, Tcl_Obj *const* objv) { int result = TCL_OK; Ns_Conn *conn; char *fileNameString; int headers = 0; Tcl_WideInt offset = 0, size = 0; Ns_ObjvValueRange offsetRange = {0, LLONG_MAX}; Ns_ObjvValueRange sizeRange = {1, LLONG_MAX}; Ns_ObjvSpec lopts[] = { {"-headers", Ns_ObjvBool, &headers, INT2PTR(NS_TRUE)}, {"-offset", Ns_ObjvMemUnit, &offset, &offsetRange}, {"-size", Ns_ObjvMemUnit, &size, &sizeRange}, {NULL, NULL, NULL, NULL} }; Ns_ObjvSpec args[] = { {"file", Ns_ObjvString, &fileNameString, NULL}, {NULL, NULL, NULL, NULL} }; if (unlikely(Ns_ParseObjv(lopts, args, interp, 2, objc, objv) != NS_OK) || NsConnRequire(interp, NS_CONN_REQUIRE_ALL, &conn) != NS_OK) { result = TCL_ERROR; } else if (unlikely( Ns_ConnSockPtr(conn) == NULL )) { Ns_Log(Warning, "NsWriterQueue: called without valid sockPtr, maybe connection already closed"); Ns_TclPrintfResult(interp, "0"); result = TCL_OK; } else { size_t nrbytes = 0u; int fd = NS_INVALID_FD; result = WriterCheckInputParams(interp, fileNameString, (size_t)size, offset, &fd, &nrbytes); if (likely(result == TCL_OK)) { Ns_ReturnCode status; /* * The caller requested that we build required headers */ if (headers != 0) { Ns_ConnSetTypeHeader(conn, Ns_GetMimeType(fileNameString)); } status = NsWriterQueue(conn, nrbytes, NULL, NULL, fd, NULL, 0, NULL, 0, NS_TRUE); Tcl_SetObjResult(interp, Tcl_NewBooleanObj(status == NS_OK ? 1 : 0)); if (fd != NS_INVALID_FD) { (void) ns_close(fd); } else { Ns_Log(Warning, "WriterSubmitFileObjCmd called with invalid fd"); } } else if (fd != NS_INVALID_FD) { (void) ns_close(fd); } } return result; } /* *---------------------------------------------------------------------- * * WriterGetMemunitFromDict -- * * Helper function to obtain a memory unit from a dict structure, * optionally checking the value range. * * Results: * Standard Tcl result. * * Side effects: * On errors, an error message is left in the interpreter. * *---------------------------------------------------------------------- */ static int WriterGetMemunitFromDict(Tcl_Interp *interp, Tcl_Obj *dictObj, Tcl_Obj *keyObj, Ns_ObjvValueRange *rangePtr, Tcl_WideInt *valuePtr) { Tcl_Obj *intObj = NULL; int result; NS_NONNULL_ASSERT(interp != NULL); NS_NONNULL_ASSERT(dictObj != NULL); NS_NONNULL_ASSERT(keyObj != NULL); NS_NONNULL_ASSERT(valuePtr != NULL); result = Tcl_DictObjGet(interp, dictObj, keyObj, &intObj); if (result == TCL_OK && intObj != NULL) { result = Ns_TclGetMemUnitFromObj(interp, intObj, valuePtr); if (result == TCL_OK && rangePtr != NULL) { result = Ns_CheckWideRange(interp, Tcl_GetString(keyObj), rangePtr, *valuePtr); } } return result; } /* *---------------------------------------------------------------------- * * WriterSubmitFilesObjCmd - subcommand of NsTclWriterObjCmd -- * * Implements "ns_writer submitfiles" command. Send the provided files * to the client. "files" are provided as a list of dicts, where every * dict must contain a "filename" element and can contain an "-offset" * and/or a "-length" element. * * Results: * Standard Tcl result. * * Side effects: * None. * *---------------------------------------------------------------------- */ static int WriterSubmitFilesObjCmd(ClientData UNUSED(clientData), Tcl_Interp *interp, int objc, Tcl_Obj *const* objv) { int result = TCL_OK; Ns_Conn *conn; int headers = 0, nrFiles; Tcl_Obj *filesObj = NULL, **fileObjv; Ns_ObjvSpec lopts[] = { {"-headers", Ns_ObjvBool, &headers, INT2PTR(NS_TRUE)}, {NULL, NULL, NULL, NULL} }; Ns_ObjvSpec args[] = { {"files", Ns_ObjvObj, &filesObj, NULL}, {NULL, NULL, NULL, NULL} }; if (unlikely(Ns_ParseObjv(lopts, args, interp, 2, objc, objv) != NS_OK) || NsConnRequire(interp, NS_CONN_REQUIRE_ALL, &conn) != NS_OK) { result = TCL_ERROR; } else if (unlikely( Ns_ConnSockPtr(conn) == NULL )) { Ns_Log(Warning, "NsWriterQueue: called without valid sockPtr, " "maybe connection already closed"); Ns_TclPrintfResult(interp, "0"); result = TCL_OK; } else if (Tcl_ListObjGetElements(interp, filesObj, &nrFiles, &fileObjv) != TCL_OK) { Ns_TclPrintfResult(interp, "not a valid list of files: '%s'", Tcl_GetString(filesObj)); result = TCL_ERROR; } else if (nrFiles == 0) { Ns_TclPrintfResult(interp, "The provided list has to contain at least one file spec"); result = TCL_ERROR; } else { size_t totalbytes = 0u, i; Tcl_Obj *keys[3], *filenameObj = NULL; Ns_FileVec *filebufs; const char *firstFilenameString = NULL; Ns_ObjvValueRange offsetRange = {0, LLONG_MAX}; Ns_ObjvValueRange sizeRange = {1, LLONG_MAX}; filebufs = (Ns_FileVec *)ns_calloc((size_t)nrFiles, sizeof(Ns_FileVec)); keys[0] = Tcl_NewStringObj("filename", 8); keys[1] = Tcl_NewStringObj("-offset", 7); keys[2] = Tcl_NewStringObj("-size", 5); Tcl_IncrRefCount(keys[0]); Tcl_IncrRefCount(keys[1]); Tcl_IncrRefCount(keys[2]); for (i = 0u; i < (size_t)nrFiles; i++) { filebufs[i].fd = NS_INVALID_FD; } /* * Iterate over the list of dicts. */ for (i = 0u; i < (size_t)nrFiles; i++) { Tcl_WideInt offset = 0, size = 0; int rc, fd = NS_INVALID_FD; const char *filenameString; size_t nrbytes; /* * Get required "filename" element. */ filenameObj = NULL; rc = Tcl_DictObjGet(interp, fileObjv[i], keys[0], &filenameObj); if (rc != TCL_OK || filenameObj == NULL) { Ns_TclPrintfResult(interp, "missing filename in dict '%s'", Tcl_GetString(fileObjv[i])); result = TCL_ERROR; break; } filenameString = Tcl_GetString(filenameObj); if (firstFilenameString == NULL) { firstFilenameString = filenameString; } /* * Get optional "-offset" and "-size" elements. */ if (WriterGetMemunitFromDict(interp, fileObjv[i], keys[1], &offsetRange, &offset) != TCL_OK) { result = TCL_ERROR; break; } if (WriterGetMemunitFromDict(interp, fileObjv[i], keys[2], &sizeRange, &size) != TCL_OK) { result = TCL_ERROR; break; } /* * Check validity of the provided values */ result = WriterCheckInputParams(interp, Tcl_GetString(filenameObj), (size_t)size, (off_t)offset, &fd, &nrbytes); if (result != TCL_OK) { break; } filebufs[i].fd = fd; filebufs[i].offset = offset; filebufs[i].length = nrbytes; totalbytes = totalbytes + (size_t)nrbytes; } Tcl_DecrRefCount(keys[0]); Tcl_DecrRefCount(keys[1]); Tcl_DecrRefCount(keys[2]); /* * If everything is ok, submit the request to the writer queue. */ if (result == TCL_OK) { Ns_ReturnCode status; if (headers != 0 && firstFilenameString != NULL) { Ns_ConnSetTypeHeader(conn, Ns_GetMimeType(firstFilenameString)); } status = NsWriterQueue(conn, totalbytes, NULL, NULL, NS_INVALID_FD, NULL, 0, filebufs, nrFiles, NS_TRUE); /* * Provide a soft error like for "ns_writer submitfile". */ Tcl_SetObjResult(interp, Tcl_NewBooleanObj(status == NS_OK ? 1 : 0)); } /* * The NsWriterQueue() API makes the usual duplicates of the file * descriptors and the Ns_FileVec structure, so we have to cleanup * here. */ for (i = 0u; i < (size_t)nrFiles; i++) { if (filebufs[i].fd != NS_INVALID_FD) { (void) ns_close(filebufs[i].fd); } } ns_free(filebufs); } return result; } /* *---------------------------------------------------------------------- * * WriterListObjCmd - subcommand of NsTclWriterObjCmd -- * * Implements "ns_writer list" command. * List the current writer jobs. * * Results: * Standard Tcl result. * * Side effects: * None. * *---------------------------------------------------------------------- */ static int WriterListObjCmd(ClientData UNUSED(clientData), Tcl_Interp *interp, int objc, Tcl_Obj *const* objv) { int result = TCL_OK; NsServer *servPtr = NULL; Ns_ObjvSpec lopts[] = { {"-server", Ns_ObjvServer, &servPtr, NULL}, {NULL, NULL, NULL, NULL} }; if (unlikely(Ns_ParseObjv(lopts, NULL, interp, 2, objc, objv) != NS_OK)) { result = TCL_ERROR; } else { Tcl_DString ds, *dsPtr = &ds; const Driver *drvPtr; SpoolerQueue *queuePtr; Tcl_DStringInit(dsPtr); for (drvPtr = firstDrvPtr; drvPtr != NULL; drvPtr = drvPtr->nextPtr) { const DrvWriter *wrPtr; /* * If server was specified, list only results from this server. */ if (servPtr != NULL && servPtr != drvPtr->servPtr) { continue; } wrPtr = &drvPtr->writer; queuePtr = wrPtr->firstPtr; while (queuePtr != NULL) { const WriterSock *wrSockPtr; Ns_MutexLock(&queuePtr->lock); wrSockPtr = queuePtr->curPtr; while (wrSockPtr != NULL) { char ipString[NS_IPADDR_SIZE]; ns_inet_ntop((struct sockaddr *)&(wrSockPtr->sockPtr->sa), ipString,sizeof(ipString)); (void) Ns_DStringNAppend(dsPtr, "{", 1); (void) Ns_DStringAppendTime(dsPtr, &wrSockPtr->startTime); (void) Ns_DStringNAppend(dsPtr, " ", 1); (void) Ns_DStringAppend(dsPtr, queuePtr->threadName); (void) Ns_DStringNAppend(dsPtr, " ", 1); (void) Ns_DStringAppend(dsPtr, drvPtr->threadName); (void) Ns_DStringNAppend(dsPtr, " ", 1); (void) Ns_DStringAppend(dsPtr, NsPoolName(wrSockPtr->poolPtr->pool)); (void) Ns_DStringNAppend(dsPtr, " ", 1); (void) Ns_DStringAppend(dsPtr, ipString); (void) Ns_DStringPrintf(dsPtr, " %d %" PRIdz " %" TCL_LL_MODIFIER "d %d %d ", wrSockPtr->fd, wrSockPtr->size, wrSockPtr->nsent, wrSockPtr->currentRate, wrSockPtr->rateLimit); (void) Ns_DStringAppendElement(dsPtr, (wrSockPtr->clientData != NULL) ? wrSockPtr->clientData : NS_EMPTY_STRING); (void) Ns_DStringNAppend(dsPtr, "} ", 2); wrSockPtr = wrSockPtr->nextPtr; } Ns_MutexUnlock(&queuePtr->lock); queuePtr = queuePtr->nextPtr; } } Tcl_DStringResult(interp, &ds); } return result; } /* *---------------------------------------------------------------------- * * WriterSizeObjCmd - subcommand of NsTclWriterObjCmd -- * * Implements "ns_writer size" command. * Sets or queries size limit for sending via writer. * * Results: * Standard Tcl result. * * Side effects: * None. * *---------------------------------------------------------------------- */ static int WriterSizeObjCmd(ClientData UNUSED(clientData), Tcl_Interp *interp, int objc, Tcl_Obj *const* objv) { int result = TCL_OK; Tcl_Obj *driverObj = NULL; Ns_Conn *conn = NULL; Tcl_WideInt intValue = -1; const char *firstArgString; Ns_ObjvValueRange range = {1024, INT_MAX}; Ns_ObjvSpec *opts, optsNew[] = { {"-driver", Ns_ObjvObj, &driverObj, NULL}, {NULL, NULL, NULL, NULL} }; Ns_ObjvSpec *args, argsNew[] = { {"?value", Ns_ObjvMemUnit, &intValue, &range}, {NULL, NULL, NULL, NULL} }; Ns_ObjvSpec argsLegacy[] = { {"driver", Ns_ObjvObj, &driverObj, NULL}, {"?value", Ns_ObjvMemUnit, &intValue, &range}, {NULL, NULL, NULL, NULL} }; firstArgString = objc > 2 ? Tcl_GetString(objv[2]) : NULL; if (firstArgString != NULL) { if (*firstArgString != '-' && ((objc == 3 && CHARTYPE(digit, *firstArgString) == 0) || objc == 4)) { args = argsLegacy; opts = NULL; Ns_LogDeprecated(objv, objc, "ns_writer size ?-driver drv? ?size?", NULL); } else { args = argsNew; opts = optsNew; } } else { args = argsNew; opts = optsNew; } if (Ns_ParseObjv(opts, args, interp, 2, objc, objv) != NS_OK) { result = TCL_ERROR; } else if ((driverObj == NULL) && NsConnRequire(interp, NS_CONN_REQUIRE_ALL, &conn) != NS_OK) { result = TCL_ERROR; } else { DrvWriter *wrPtr; if (DriverWriterFromObj(interp, driverObj, conn, &wrPtr) != NS_OK) { result = TCL_ERROR; } else if (intValue != -1) { /* * The optional argument was provided. */ wrPtr->writersize = (size_t)intValue; } if (result == TCL_OK) { Tcl_SetObjResult(interp, Tcl_NewIntObj((int)wrPtr->writersize)); } } return result; } /* *---------------------------------------------------------------------- * * WriterStreamingObjCmd - subcommand of NsTclWriterObjCmd -- * * Implements "ns_writer streaming" command. * Sets or queries streaming state of the writer. * * Results: * Standard Tcl result. * * Side effects: * None. * *---------------------------------------------------------------------- */ static int WriterStreamingObjCmd(ClientData UNUSED(clientData), Tcl_Interp *interp, int objc, Tcl_Obj *const* objv) { int boolValue = -1, result = TCL_OK; Tcl_Obj *driverObj = NULL; Ns_Conn *conn = NULL; const char *firstArgString; Ns_ObjvSpec *opts, optsNew[] = { {"-driver", Ns_ObjvObj, &driverObj, NULL}, {NULL, NULL, NULL, NULL} }; Ns_ObjvSpec *args, argsNew[] = { {"?value", Ns_ObjvBool, &boolValue, NULL}, {NULL, NULL, NULL, NULL} }; Ns_ObjvSpec argsLegacy[] = { {"driver", Ns_ObjvObj, &driverObj, NULL}, {"?value", Ns_ObjvBool, &boolValue, NULL}, {NULL, NULL, NULL, NULL} }; firstArgString = objc > 2 ? Tcl_GetString(objv[2]) : NULL; if (firstArgString != NULL) { int argValue; if (*firstArgString != '-' && ((objc == 3 && Tcl_ExprBoolean(interp, firstArgString, &argValue) == TCL_OK) || objc == 4)) { args = argsLegacy; opts = NULL; Ns_LogDeprecated(objv, objc, "ns_writer streaming ?-driver drv? ?value?", NULL); } else { args = argsNew; opts = optsNew; } } else { args = argsNew; opts = optsNew; } if (Ns_ParseObjv(opts, args, interp, 2, objc, objv) != NS_OK) { result = TCL_ERROR; } else if ((driverObj == NULL) && NsConnRequire(interp, NS_CONN_REQUIRE_ALL, &conn) != NS_OK) { result = TCL_ERROR; } else { DrvWriter *wrPtr; if (DriverWriterFromObj(interp, driverObj, conn, &wrPtr) != NS_OK) { result = TCL_ERROR; } else if (boolValue != -1) { /* * The optional argument was provided. */ wrPtr->doStream = (boolValue == 1 ? NS_WRITER_STREAM_ACTIVE : NS_WRITER_STREAM_NONE); } if (result == TCL_OK) { Tcl_SetObjResult(interp, Tcl_NewIntObj(wrPtr->doStream == NS_WRITER_STREAM_ACTIVE ? 1 : 0)); } } return result; } /* *---------------------------------------------------------------------- * * NsTclWriterObjCmd -- * * Implements "ns_writer" command for submitting data to the writer * threads and to configure and query the state of the writer threads at * runtime. * * Results: * Standard Tcl result. * * Side effects: * None. * *---------------------------------------------------------------------- */ int NsTclWriterObjCmd(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *const* objv) { const Ns_SubCmdSpec subcmds[] = { {"list", WriterListObjCmd}, {"size", WriterSizeObjCmd}, {"streaming", WriterStreamingObjCmd}, {"submit", WriterSubmitObjCmd}, {"submitfile", WriterSubmitFileObjCmd}, {"submitfiles",WriterSubmitFilesObjCmd}, {NULL, NULL} }; return Ns_SubcmdObjv(subcmds, clientData, interp, objc, objv); } /* *====================================================================== * Async (log) writer: Write asynchronously to a disk *====================================================================== */ /* *---------------------------------------------------------------------- * * NsAsyncWriterQueueEnable -- * * Enable async writing and start the AsyncWriterThread if * necessary * * Results: * None. * * Side effects: * Potentially starting a thread and set "stopped" to NS_FALSE. * *---------------------------------------------------------------------- */ void NsAsyncWriterQueueEnable(void) { if (Ns_ConfigBool(NS_CONFIG_PARAMETERS, "asynclogwriter", NS_FALSE) == NS_TRUE) { SpoolerQueue *queuePtr; /* * In case, the async writer has not started, the static variable * asyncWriter is NULL. */ if (asyncWriter == NULL) { Ns_MutexLock(&reqLock); if (likely(asyncWriter == NULL)) { /* * Allocate and initialize writer thread context. */ asyncWriter = ns_calloc(1u, sizeof(AsyncWriter)); Ns_MutexUnlock(&reqLock); Ns_MutexSetName2(&asyncWriter->lock, "ns:driver", "async-writer"); /* * Allocate and initialize a Spooler Queue for this thread. */ queuePtr = ns_calloc(1u, sizeof(SpoolerQueue)); Ns_MutexSetName2(&queuePtr->lock, "ns:driver:async-writer", "queue"); asyncWriter->firstPtr = queuePtr; /* * Start the spooler queue */ SpoolerQueueStart(queuePtr, AsyncWriterThread); } else { Ns_MutexUnlock(&reqLock); } } assert(asyncWriter != NULL); queuePtr = asyncWriter->firstPtr; assert(queuePtr != NULL); Ns_MutexLock(&queuePtr->lock); queuePtr->stopped = NS_FALSE; Ns_MutexUnlock(&queuePtr->lock); } } /* *---------------------------------------------------------------------- * * NsAsyncWriterQueueDisable -- * * Disable async writing but don't touch the writer thread. * * Results: * None. * * Side effects: * Disable async writing by setting stopped to 1. * *---------------------------------------------------------------------- */ void NsAsyncWriterQueueDisable(bool shutdown) { if (asyncWriter != NULL) { SpoolerQueue *queuePtr = asyncWriter->firstPtr; Ns_Time timeout; assert(queuePtr != NULL); Ns_GetTime(&timeout); Ns_IncrTime(&timeout, nsconf.shutdowntimeout.sec, nsconf.shutdowntimeout.usec); Ns_MutexLock(&queuePtr->lock); queuePtr->stopped = NS_TRUE; queuePtr->shutdown = shutdown; /* * Trigger the AsyncWriter Thread to drain the spooler queue. */ SockTrigger(queuePtr->pipe[1]); (void)Ns_CondTimedWait(&queuePtr->cond, &queuePtr->lock, &timeout); Ns_MutexUnlock(&queuePtr->lock); if (shutdown) { ns_free(queuePtr); ns_free(asyncWriter); asyncWriter = NULL; } } } /* *---------------------------------------------------------------------- * * NsAsyncWrite -- * * Perform an asynchronous write operation via a writer thread in * case a writer thread is configured and running. The intention * of the asynchronous write operations is to reduce latencies in * connection threads. * * Results: * NS_OK, when write was performed via writer thread, * NS_ERROR otherwise (but data is written). * * Side effects: * I/O Operation. * *---------------------------------------------------------------------- */ Ns_ReturnCode NsAsyncWrite(int fd, const char *buffer, size_t nbyte) { Ns_ReturnCode returnCode = NS_OK; NS_NONNULL_ASSERT(buffer != NULL); /* * If the async writer has not started or is deactivated, behave like a * ns_write() command. If the ns_write() fails, we can't do much, since * the writing of an error message to the log might bring us into an * infinite loop. So we print simple to stderr. */ if (asyncWriter == NULL || asyncWriter->firstPtr->stopped) { ssize_t written = ns_write(fd, buffer, nbyte); if (unlikely(written != (ssize_t)nbyte)) { int retries = 100; /* * Don't go into an infinite loop when multiple subsequent disk * write operations return 0 (maybe disk full). */ returnCode = NS_ERROR; do { if (written < 0) { fprintf(stderr, "error during async write (fd %d): %s\n", fd, strerror(errno)); break; } /* * All partial writes (written >= 0) */ WriteWarningRaw("partial write", fd, nbyte, written); nbyte -= (size_t)written; buffer += written; written = ns_write(fd, buffer, nbyte); if (written == (ssize_t)nbyte) { returnCode = NS_OK; break; } } while (retries-- > 0); } } else { SpoolerQueue *queuePtr; bool trigger = NS_FALSE; const AsyncWriteData *wdPtr; AsyncWriteData *newWdPtr; /* * Allocate a writer cmd and initialize it. In order to provide an * interface compatible to ns_write(), we copy the provided data, * such it can be freed by the caller. When we would give up the * interface, we could free the memory block after writing, and * save a malloc/free operation on the data. */ newWdPtr = ns_calloc(1u, sizeof(AsyncWriteData)); newWdPtr->fd = fd; newWdPtr->bufsize = nbyte; newWdPtr->data = ns_malloc(nbyte + 1u); memcpy(newWdPtr->data, buffer, newWdPtr->bufsize); newWdPtr->buf = newWdPtr->data; newWdPtr->size = newWdPtr->bufsize; /* * Now add new writer socket to the writer thread's queue. In most * cases, the queue will be empty. */ queuePtr = asyncWriter->firstPtr; assert(queuePtr != NULL); Ns_MutexLock(&queuePtr->lock); wdPtr = queuePtr->sockPtr; if (wdPtr != NULL) { newWdPtr->nextPtr = queuePtr->sockPtr; queuePtr->sockPtr = newWdPtr; } else { queuePtr->sockPtr = newWdPtr; trigger = NS_TRUE; } Ns_MutexUnlock(&queuePtr->lock); /* * Wake up writer thread if desired */ if (trigger) { SockTrigger(queuePtr->pipe[1]); } } return returnCode; } /* *---------------------------------------------------------------------- * * AsyncWriterRelease -- * * Deallocate write data. * * Results: * None * * Side effects: * free memory * *---------------------------------------------------------------------- */ static void AsyncWriterRelease(AsyncWriteData *wdPtr) { NS_NONNULL_ASSERT(wdPtr != NULL); ns_free(wdPtr->data); ns_free(wdPtr); } /* *---------------------------------------------------------------------- * * AsyncWriterThread -- * * Thread that implements non-blocking write operations to files * * Results: * None. * * Side effects: * Write to files. * *---------------------------------------------------------------------- */ static void AsyncWriterThread(void *arg) { SpoolerQueue *queuePtr = (SpoolerQueue*)arg; char charBuffer[1]; int pollTimeout; Ns_ReturnCode status; bool stopping; AsyncWriteData *curPtr, *nextPtr, *writePtr; PollData pdata; Ns_ThreadSetName("-asynclogwriter%d-", queuePtr->id); queuePtr->threadName = Ns_ThreadGetName(); /* * Allocate and initialize controlling variables */ PollCreate(&pdata); writePtr = NULL; stopping = NS_FALSE; /* * Loop forever until signaled to shutdown and all * connections are complete and gracefully closed. */ while (!stopping) { /* * Always listen to the trigger pipe. We could as well perform * in the writer thread async write operations, but for the * effect of reducing latency in connection threads, this is * not an issue. To keep things simple, we perform the * typically small write operations without testing for POLLOUT. */ PollReset(&pdata); (void)PollSet(&pdata, queuePtr->pipe[0], (short)POLLIN, NULL); if (writePtr == NULL) { pollTimeout = 30 * 1000; } else { pollTimeout = 0; } /* * Wait for data */ /*n =*/ (void) PollWait(&pdata, pollTimeout); /* * Select and drain the trigger pipe if necessary. */ if (PollIn(&pdata, 0)) { if (ns_recv(queuePtr->pipe[0], charBuffer, 1u, 0) != 1) { Ns_Fatal("asynclogwriter: trigger ns_recv() failed: %s", ns_sockstrerror(ns_sockerrno)); } if (queuePtr->stopped) { /* * Drain the queue from everything */ for (curPtr = writePtr; curPtr != NULL; curPtr = curPtr->nextPtr) { ssize_t written = ns_write(curPtr->fd, curPtr->buf, curPtr->bufsize); if (unlikely(written != (ssize_t)curPtr->bufsize)) { WriteWarningRaw("drain writer", curPtr->fd, curPtr->bufsize, written); } } writePtr = NULL; for (curPtr = queuePtr->sockPtr; curPtr != NULL; curPtr = curPtr->nextPtr) { ssize_t written = ns_write(curPtr->fd, curPtr->buf, curPtr->bufsize); if (unlikely(written != (ssize_t)curPtr->bufsize)) { WriteWarningRaw("drain queue", curPtr->fd, curPtr->bufsize, written); } } queuePtr->sockPtr = NULL; /* * Notify the caller (normally * NsAsyncWriterQueueDisable()) that we are done */ Ns_CondBroadcast(&queuePtr->cond); } } /* * Write to all available file descriptors */ curPtr = writePtr; writePtr = NULL; while (curPtr != NULL) { ssize_t written; nextPtr = curPtr->nextPtr; status = NS_OK; /* * Write the actual data and allow for partial write operations. */ written = ns_write(curPtr->fd, curPtr->buf, curPtr->bufsize); if (unlikely(written < 0)) { status = NS_ERROR; } else { curPtr->size -= (size_t)written; curPtr->nsent += written; curPtr->bufsize -= (size_t)written; if (curPtr->data != NULL) { curPtr->buf += written; } } if (unlikely(status != NS_OK)) { AsyncWriterRelease(curPtr); queuePtr->queuesize--; } else { /* * The write operation was successful. Check if there * is some remaining data to write. If not we are done * with this request can release the write buffer. */ if (curPtr->size > 0u) { Push(curPtr, writePtr); } else { AsyncWriterRelease(curPtr); queuePtr->queuesize--; } } curPtr = nextPtr; } /* * Check for shutdown */ stopping = queuePtr->shutdown; if (stopping) { curPtr = queuePtr->sockPtr; assert(writePtr == NULL); while (curPtr != NULL) { ssize_t written = ns_write(curPtr->fd, curPtr->buf, curPtr->bufsize); if (unlikely(written != (ssize_t)curPtr->bufsize)) { WriteWarningRaw("shutdown", curPtr->fd, curPtr->bufsize, written); } curPtr = curPtr->nextPtr; } } else { /* * Add fresh jobs to the writer queue. This means actually to * move jobs from queuePtr->sockPtr (kept name for being able * to use the same queue as above) to the currently active * jobs in queuePtr->curPtr. */ Ns_MutexLock(&queuePtr->lock); curPtr = queuePtr->sockPtr; queuePtr->sockPtr = NULL; while (curPtr != NULL) { nextPtr = curPtr->nextPtr; Push(curPtr, writePtr); queuePtr->queuesize++; curPtr = nextPtr; } queuePtr->curPtr = writePtr; Ns_MutexUnlock(&queuePtr->lock); } } PollFree(&pdata); queuePtr->stopped = NS_TRUE; Ns_Log(Notice, "exiting"); } /* *---------------------------------------------------------------------- * * AsyncLogfileWriteObjCmd - * * Implements "ns_asynclogfile write" command. Write to a file * descriptor via async writer thread. The command handles partial write * operations internally. * * Results: * Standard Tcl result. * * Side effects: * None. * *---------------------------------------------------------------------- */ static int AsyncLogfileWriteObjCmd(ClientData UNUSED(clientData), Tcl_Interp *interp, int objc, Tcl_Obj *const* objv) { int result = TCL_OK, binary = (int)NS_FALSE, sanitize; Tcl_Obj *stringObj; int fd = 0; Ns_ObjvValueRange fd_range = {0, INT_MAX}; Ns_ObjvValueRange sanitize_range = {0, 2}; Ns_ObjvSpec opts[] = { {"-binary", Ns_ObjvBool, &binary, INT2PTR(NS_TRUE)}, {"-sanitize", Ns_ObjvInt, &sanitize, &sanitize_range}, {NULL, NULL, NULL, NULL} }; Ns_ObjvSpec args[] = { {"fd", Ns_ObjvInt, &fd, &fd_range}, {"buffer", Ns_ObjvObj, &stringObj, NULL}, {NULL, NULL, NULL, NULL} }; /* * Take the config value as default for "-sanitize", but let the used * override it on a per-case basis. */ sanitize = nsconf.sanitize_logfiles; if (unlikely(Ns_ParseObjv(opts, args, interp, 2, objc, objv) != NS_OK)) { result = TCL_ERROR; } else { const char *buffer; int length; Ns_ReturnCode rc; if (binary == (int)NS_TRUE || NsTclObjIsByteArray(stringObj)) { buffer = (const char *) Tcl_GetByteArrayFromObj(stringObj, &length); } else { buffer = Tcl_GetStringFromObj(stringObj, &length); } if (length > 0) { if (sanitize > 0) { Tcl_DString ds; bool lastCharNewline = (buffer[length-1] == '\n'); Tcl_DStringInit(&ds); if (lastCharNewline) { length --; } Ns_DStringAppendPrintable(&ds, sanitize == 2, buffer, (size_t)length); if (lastCharNewline) { Tcl_DStringAppend(&ds, "\n", 1); } rc = NsAsyncWrite(fd, ds.string, (size_t)ds.length); Tcl_DStringFree(&ds); } else { rc = NsAsyncWrite(fd, buffer, (size_t)length); } if (rc != NS_OK) { Ns_TclPrintfResult(interp, "ns_asynclogfile: error during write operation on fd %d: %s", fd, Tcl_PosixError(interp)); result = TCL_ERROR; } } else { result = TCL_OK; } } return result; } /* *---------------------------------------------------------------------- * * AsyncLogfileOpenObjCmd - * * Implements "ns_asynclogfile open" command. The command opens a * write-only log file and return a thread-shareable handle (actually a * numeric file descriptor) which can be used in subsequent "write" or * "close" operations. * * Results: * Standard Tcl result. * * Side effects: * None. * *---------------------------------------------------------------------- */ static int AsyncLogfileOpenObjCmd(ClientData UNUSED(clientData), Tcl_Interp *interp, int objc, Tcl_Obj *const* objv) { int result = TCL_OK; unsigned int flags = O_APPEND; char *fileNameString; Tcl_Obj *flagsObj = NULL; Ns_ObjvTable flagTable[] = { {"APPEND", O_APPEND}, {"EXCL", O_EXCL}, #ifdef O_DSYNC {"DSYNC", O_DSYNC}, #endif #ifdef O_SYNC {"SYNC", O_SYNC}, #endif {"TRUNC", O_TRUNC}, {NULL, 0u} }; Ns_ObjvSpec args[] = { {"filename", Ns_ObjvString, &fileNameString, NULL}, {"?flags", Ns_ObjvObj, &flagsObj, NULL}, //{"mode", Ns_ObjvString, &mode, NULL}, {NULL, NULL, NULL, NULL} }; if (unlikely(Ns_ParseObjv(NULL, args, interp, 2, objc, objv) != NS_OK)) { result = TCL_ERROR; } else if (flagsObj != NULL) { Tcl_Obj **ov; int oc; result = Tcl_ListObjGetElements(interp, flagsObj, &oc, &ov); if (result == TCL_OK && oc > 0) { int i, opt; flags = 0u; for (i = 0; i < oc; i++) { result = Tcl_GetIndexFromObjStruct(interp, ov[i], flagTable, (int)sizeof(flagTable[0]), "flag", 0, &opt); if (result != TCL_OK) { break; } else { flags = flagTable[opt].value; } } } } if (result == TCL_OK) { int fd; fd = ns_open(fileNameString, (int)(O_CREAT | O_WRONLY | O_CLOEXEC | flags), 0644); if (unlikely(fd == NS_INVALID_FD)) { Ns_TclPrintfResult(interp, "could not open file '%s': %s", fileNameString, Tcl_PosixError(interp)); result = TCL_ERROR; } else { Tcl_SetObjResult(interp, Tcl_NewIntObj(fd)); } } return result; } /* *---------------------------------------------------------------------- * * AsyncLogfileCloseObjCmd - * * Implements "ns_asynclogfile close" command. Close the logfile * previously created via "ns_asynclogfile open". * * Results: * Standard Tcl result. * * Side effects: * None. * *---------------------------------------------------------------------- */ static int AsyncLogfileCloseObjCmd(ClientData UNUSED(clientData), Tcl_Interp *interp, int objc, Tcl_Obj *const* objv) { int fd, result = TCL_OK; Ns_ObjvValueRange range = {0, INT_MAX}; Ns_ObjvSpec args[] = { {"fd", Ns_ObjvInt, &fd, &range}, {NULL, NULL, NULL, NULL} }; if (unlikely(Ns_ParseObjv(NULL, args, interp, 2, objc, objv) != NS_OK)) { result = TCL_ERROR; } else { int rc = ns_close(fd); if (rc != 0) { Ns_TclPrintfResult(interp, "could not close fd %d: %s", fd, Tcl_PosixError(interp)); result = TCL_ERROR; } } return result; } /* *---------------------------------------------------------------------- * * NsTclAsyncLogfileObjCmd - * * Wrapper for "ns_asynclogfile open|write|close" commands. * * Results: * Standard Tcl result. * * Side effects: * None. * *---------------------------------------------------------------------- */ int NsTclAsyncLogfileObjCmd(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *const* objv) { const Ns_SubCmdSpec subcmds[] = { {"open", AsyncLogfileOpenObjCmd}, {"write", AsyncLogfileWriteObjCmd}, {"close", AsyncLogfileCloseObjCmd}, {NULL, NULL} }; return Ns_SubcmdObjv(subcmds, clientData, interp, objc, objv); } /* *---------------------------------------------------------------------- * * LookupDriver -- * * Find a matching driver for the specified protocol and optionally the * specified driver name. * * Results: * Driver pointer or NULL on failure. * * Side effects: * When no driver is found, an error is left in the interp result. * *---------------------------------------------------------------------- */ static Driver * LookupDriver(Tcl_Interp *interp, const char* protocol, const char *driverName) { Driver *drvPtr; NS_NONNULL_ASSERT(interp != NULL); NS_NONNULL_ASSERT(protocol != NULL); for (drvPtr = firstDrvPtr; drvPtr != NULL; drvPtr = drvPtr->nextPtr) { Ns_Log(DriverDebug, "... check Driver proto <%s> server %s name %s location %s", drvPtr->protocol, drvPtr->server, drvPtr->threadName, drvPtr->location); if (STREQ(drvPtr->protocol, protocol)) { if (driverName == NULL) { /* * If there is no driver name given, take the first driver * with the matching protocol. */ break; } else if (STREQ(drvPtr->moduleName, driverName)) { /* * The driver name (name of the loaded module) is equal */ break; } } } if (drvPtr == NULL) { if (driverName != NULL) { Ns_TclPrintfResult(interp, "no driver for protocol '%s' & driver name '%s' found", protocol, driverName); } else { Ns_TclPrintfResult(interp, "no driver for protocol '%s' found", protocol); } } return drvPtr; } /* *---------------------------------------------------------------------- * * NSDriverClientOpen -- * * Open a client HTTP connection using the driver interface * * Results: * Tcl return code. * * Side effects: * Opening a connection * *---------------------------------------------------------------------- */ int NSDriverClientOpen(Tcl_Interp *interp, const char *driverName, const char *url, const char *httpMethod, const char *version, const Ns_Time *timeoutPtr, Sock **sockPtrPtr) { char *protocol, *host, *portString, *path, *tail, *url2; int result = TCL_OK; NS_NONNULL_ASSERT(interp != NULL); NS_NONNULL_ASSERT(url != NULL); NS_NONNULL_ASSERT(httpMethod != NULL); NS_NONNULL_ASSERT(version != NULL); NS_NONNULL_ASSERT(sockPtrPtr != NULL); url2 = ns_strdup(url); /* * We need here a fully qualified URL, otherwise raise an error */ if (unlikely(Ns_ParseUrl(url2, &protocol, &host, &portString, &path, &tail) != NS_OK) || protocol == NULL || host == NULL || path == NULL || tail == NULL) { Ns_Log(Notice, "driver: invalid URL '%s' passed to NSDriverClientOpen", url2); result = TCL_ERROR; } else { Driver *drvPtr; unsigned short portNr = 0u; /* make static checker happy */ assert(protocol != NULL); assert(host != NULL); assert(path != NULL); assert(tail != NULL); /* * Find a matching driver for the specified protocol and optionally * the specified driver name. */ drvPtr = LookupDriver(interp, protocol, driverName); if (drvPtr == NULL) { result = TCL_ERROR; } else if (portString != NULL) { portNr = (unsigned short) strtol(portString, NULL, 10); } else if (drvPtr->defport != 0u) { /* * Get the default port from the driver structure; */ portNr = drvPtr->defport; } else { Ns_TclPrintfResult(interp, "no default port for protocol '%s' defined", protocol); result = TCL_ERROR; } if (result == TCL_OK) { NS_SOCKET sock; Ns_ReturnCode status; sock = Ns_SockTimedConnect2(host, portNr, NULL, 0u, timeoutPtr, &status); if (sock == NS_INVALID_SOCKET) { Ns_SockConnectError(interp, host, portNr, status); result = TCL_ERROR; } else { const char *query; Tcl_DString ds, *dsPtr = &ds; Request *reqPtr; Sock *sockPtr; assert(drvPtr != NULL); sockPtr = SockNew(drvPtr); sockPtr->sock = sock; sockPtr->servPtr = drvPtr->servPtr; if (sockPtr->servPtr == NULL) { const NsInterp *itPtr = NsGetInterpData(interp); sockPtr->servPtr = itPtr->servPtr; } RequestNew(sockPtr); Ns_GetTime(&sockPtr->acceptTime); reqPtr = sockPtr->reqPtr; Tcl_DStringInit(dsPtr); Ns_DStringAppend(dsPtr, httpMethod); Ns_StrToUpper(Ns_DStringValue(dsPtr)); Tcl_DStringAppend(dsPtr, " /", 2); if (*path != '\0') { if (*path == '/') { path ++; } Tcl_DStringAppend(dsPtr, path, -1); Tcl_DStringAppend(dsPtr, "/", 1); } Tcl_DStringAppend(dsPtr, tail, -1); Tcl_DStringAppend(dsPtr, " HTTP/", 6); Tcl_DStringAppend(dsPtr, version, -1); reqPtr->request.line = Ns_DStringExport(dsPtr); reqPtr->request.method = ns_strdup(httpMethod); reqPtr->request.protocol = ns_strdup(protocol); reqPtr->request.host = ns_strdup(host); query = strchr(tail, INTCHAR('?')); if (query != NULL) { reqPtr->request.query = ns_strdup(query+1); } else { reqPtr->request.query = NULL; } /*Ns_Log(Notice, "REQUEST LINE <%s> query <%s>", reqPtr->request.line, reqPtr->request.query);*/ *sockPtrPtr = sockPtr; } } } ns_free(url2); return result; } /* *---------------------------------------------------------------------- * * NSDriverSockNew -- * * Create a Sock structure based on the driver interface * * Results: * Tcl return code. * * Side effects: * Accepting a connection * *---------------------------------------------------------------------- */ int NSDriverSockNew(Tcl_Interp *interp, NS_SOCKET sock, const char *protocol, const char *driverName, const char *methodName, Sock **sockPtrPtr) { int result = TCL_OK; Driver *drvPtr; NS_NONNULL_ASSERT(interp != NULL); NS_NONNULL_ASSERT(protocol != NULL); NS_NONNULL_ASSERT(methodName != NULL); NS_NONNULL_ASSERT(sockPtrPtr != NULL); drvPtr = LookupDriver(interp, protocol, driverName); if (drvPtr == NULL) { result = TCL_ERROR; } else { Sock *sockPtr; Tcl_DString ds, *dsPtr = &ds; Request *reqPtr; sockPtr = SockNew(drvPtr); sockPtr->servPtr = drvPtr->servPtr; sockPtr->sock = sock; RequestNew(sockPtr); // not sure if needed // peerAddr is missing Ns_GetTime(&sockPtr->acceptTime); reqPtr = sockPtr->reqPtr; Tcl_DStringInit(dsPtr); Ns_DStringAppend(dsPtr, methodName); Ns_StrToUpper(Ns_DStringValue(dsPtr)); reqPtr->request.line = Ns_DStringExport(dsPtr); reqPtr->request.method = ns_strdup(methodName); reqPtr->request.protocol = ns_strdup(protocol); reqPtr->request.host = NULL; reqPtr->request.query = NULL; /* Ns_Log(Notice, "REQUEST LINE <%s>", reqPtr->request.line);*/ *sockPtrPtr = sockPtr; } return result; } /* * Local Variables: * mode: c * c-basic-offset: 4 * fill-column: 78 * indent-tabs-mode: nil * End: */
SockParse(Sock *sockPtr) { const Tcl_DString *bufPtr; const Driver *drvPtr; Request *reqPtr; char save; SockState result; NS_NONNULL_ASSERT(sockPtr != NULL); drvPtr = sockPtr->drvPtr; NsUpdateProgress((Ns_Sock *) sockPtr); reqPtr = sockPtr->reqPtr; bufPtr = &reqPtr->buffer; /* * Scan lines (header) until start of content (body-part) */ while (reqPtr->coff == 0u) { char *s, *e; size_t cnt; /* * Find the next header line. */ s = bufPtr->string + reqPtr->roff; e = memchr(s, INTCHAR('\n'), reqPtr->avail); if (unlikely(e == NULL)) { /* * Input not yet newline terminated - request more data. */ return SOCK_MORE; } /* * Check for max single line overflows. * * Previous versions if the driver returned here directly an * error code, which was handled via HTTP error message * provided via SockError(). However, the SockError() handling * closes the connection immediately. This has the * consequence, that the HTTP client might never see the error * message, since the request was not yet fully transmitted, * but it will see a "broken pipe: 13" message instead. We * read now the full request and return the message via * ConnRunRequest(). */ if (unlikely((e - s) > drvPtr->maxline)) { sockPtr->keep = NS_FALSE; if (reqPtr->request.line == NULL) { Ns_Log(DriverDebug, "SockParse: maxline reached of %d bytes", drvPtr->maxline); sockPtr->flags = NS_CONN_REQUESTURITOOLONG; Ns_Log(Warning, "request line is too long (%d bytes)", (int)(e - s)); } else { sockPtr->flags = NS_CONN_LINETOOLONG; Ns_Log(Warning, "request header line is too long (%d bytes)", (int)(e - s)); } } /* * Update next read pointer to end of this line. */ cnt = (size_t)(e - s) + 1u; reqPtr->roff += cnt; reqPtr->avail -= cnt; /* * Adjust end pointer to the last content character before the line * terminator. */ if (likely(e > s) && likely(*(e-1) == '\r')) { --e; } /* * Check for end of headers in case we have not done it yet. */ if (unlikely(e == s) && (reqPtr->coff == 0u)) { /* * We are at end of headers. */ reqPtr->coff = EndOfHeader(sockPtr); /* * In cases the client sent "expect: 100-continue", report back that * everything is fine with the headers. */ if ((sockPtr->flags & NS_CONN_CONTINUE) != 0u) { Ns_Log(Ns_LogRequestDebug, "honoring 100-continue"); /* * In case, the request entity (body) was too large, we can * return immediately the error message, when the client has * flagged this via "Expect:". Otherwise we have to read the * full request (although it is too large) to drain the * channel. Otherwise, the server might close the connection * *before* it has received full request with its body from * the client. We just keep the flag and let * Ns_ConnRunRequest() handle the error message. */ if ((sockPtr->flags & NS_CONN_ENTITYTOOLARGE) != 0u) { Ns_Log(Ns_LogRequestDebug, "100-continue: entity too large"); return SOCK_ENTITYTOOLARGE; /* * We have no other error message flagged (future ones * have to be handled here). */ } else { struct iovec iov[1]; ssize_t sent; /* * Reply with "100 continue". */ Ns_Log(Ns_LogRequestDebug, "100-continue: reply CONTINUE"); iov[0].iov_base = (char *)"HTTP/1.1 100 Continue\r\n\r\n"; iov[0].iov_len = strlen(iov[0].iov_base); sent = Ns_SockSendBufs((Ns_Sock *)sockPtr, iov, 1, NULL, 0u); if (sent != (ssize_t)iov[0].iov_len) { Ns_Log(Warning, "could not deliver response: 100 Continue"); /* * Should we bail out here? */ } } } } else { /* * We have the request-line or a header line to process. */ save = *e; *e = '\0'; if (unlikely(reqPtr->request.line == NULL)) { /* * There is no request-line set. The received line must the * the request-line. */ Ns_Log(DriverDebug, "SockParse (%d): parse request line <%s>", sockPtr->sock, s); if (Ns_ParseRequest(&reqPtr->request, s) == NS_ERROR) { /* * Invalid request. */ return SOCK_BADREQUEST; } /* * HTTP 0.9 did not have a HTTP-version number or request headers * and no empty line terminating the request header. */ if (unlikely(reqPtr->request.version < 1.0)) { /* * Pre-HTTP/1.0 request. */ reqPtr->coff = reqPtr->roff; Ns_Log(Notice, "pre-HTTP/1.0 request <%s>", reqPtr->request.line); } } else if (Ns_ParseHeader(reqPtr->headers, s, Preserve) != NS_OK) { /* * Invalid header. */ return SOCK_BADHEADER; } else { /* * Check for max number of headers */ if (unlikely(Ns_SetSize(reqPtr->headers) > (size_t)drvPtr->maxheaders)) { Ns_Log(DriverDebug, "SockParse (%d): maxheaders reached of %d bytes", sockPtr->sock, drvPtr->maxheaders); return SOCK_TOOMANYHEADERS; } } *e = save; } } if (unlikely(reqPtr->request.line == NULL)) { /* * We are at end of headers, but we have not parsed a request line * (maybe just two linefeeds). */ return SOCK_BADREQUEST; } /* * We are in the request body. */ assert(reqPtr->coff > 0u); assert(reqPtr->request.line != NULL); /* * Check if all content has arrived. */ Ns_Log(Dev, "=== length < avail (length %" PRIuz ", avail %" PRIuz ") tfd %d tfile %p chunkStartOff %" PRIuz, reqPtr->length, reqPtr->avail, sockPtr->tfd, (void *)sockPtr->tfile, reqPtr->chunkStartOff); if (reqPtr->chunkStartOff != 0u) { /* * Chunked encoding was provided. */ bool complete; size_t currentContentLength; complete = ChunkedDecode(reqPtr, NS_TRUE); currentContentLength = reqPtr->chunkWriteOff - reqPtr->coff; /* * A chunk might be complete, but it might not be the last * chunk from the client. The best thing would be to be able * to read until EOF here. In cases, where the (optional) * "expectedLength" was provided by the client, we terminate * depending on that information */ if ((!complete) || (reqPtr->expectedLength != 0u && currentContentLength < reqPtr->expectedLength)) { /* * ChunkedDecode wants more data. */ return SOCK_MORE; } /* * ChunkedDecode has enough data. */ reqPtr->length = (size_t)currentContentLength; } if (reqPtr->avail < reqPtr->length) { Ns_Log(DriverDebug, "SockRead wait for more input"); /* * Wait for more input. */ return SOCK_MORE; } Ns_Log(Dev, "=== all required data is available (avail %" PRIuz", length %" PRIuz ", " "readahead %" TCL_LL_MODIFIER "d maxupload %" TCL_LL_MODIFIER "d) tfd %d", reqPtr->avail, reqPtr->length, drvPtr->readahead, drvPtr->maxupload, sockPtr->tfd); /* * We have all required data in the receive buffer or in a temporary file. * * - Uploads > "readahead": these are put into temporary files. * * - Uploads > "maxupload": these are put into temporary files * without mmapping, no content parsing will be performed in memory. */ result = SOCK_READY; if (sockPtr->tfile != NULL) { reqPtr->content = NULL; reqPtr->next = NULL; reqPtr->avail = 0u; Ns_Log(DriverDebug, "content spooled to file: size %" PRIdz ", file %s", reqPtr->length, sockPtr->tfile); /* * Nothing more to do, return via SOCK_READY; */ } else { /* * Uploads < "maxupload" are spooled to files and mmapped in order to * provide the usual interface via [ns_conn content]. */ if (sockPtr->tfd > 0) { #ifdef _WIN32 /* * For _WIN32, tfd should never be set, since tfd-spooling is not * implemented for windows. */ assert(0); #else int prot = PROT_READ | PROT_WRITE; /* * Add a byte to make sure, the string termination with \0 below falls * always into the mmapped area. On some older OSes this might lead to * crashes when we hitting page boundaries. */ ssize_t rc = ns_write(sockPtr->tfd, "\0", 1); if (rc == -1) { Ns_Log(Error, "socket: could not append terminating 0-byte"); } sockPtr->tsize = reqPtr->length + 1; sockPtr->taddr = mmap(0, sockPtr->tsize, prot, MAP_PRIVATE, sockPtr->tfd, 0); if (sockPtr->taddr == MAP_FAILED) { sockPtr->taddr = NULL; result = SOCK_ERROR; } else { reqPtr->content = sockPtr->taddr; Ns_Log(Debug, "content spooled to mmapped file: readahead=%" TCL_LL_MODIFIER "d, filesize=%" PRIdz, drvPtr->readahead, sockPtr->tsize); } #endif } else { /* * Set the content the begin of the remaining buffer (content offset). * This happens as well when reqPtr->contentLength is 0, but it is * needed for chunked input processing. */ reqPtr->content = bufPtr->string + reqPtr->coff; } reqPtr->next = reqPtr->content; /* * Add a terminating null character. The content might be from the receive * buffer (Tcl_DString) or from the mmapped file. Non-mmapped files are handled * above. */ if (reqPtr->length > 0u) { Ns_Log(DriverDebug, "SockRead adds null terminating character at content[%" PRIuz "]", reqPtr->length); reqPtr->savedChar = reqPtr->content[reqPtr->length]; reqPtr->content[reqPtr->length] = '\0'; if (sockPtr->taddr == NULL) { LogBuffer(DriverDebug, "UPDATED BUFFER", sockPtr->reqPtr->buffer.string, (size_t)reqPtr->buffer.length); } } } return result; }
SockParse(Sock *sockPtr) { const Tcl_DString *bufPtr; const Driver *drvPtr; Request *reqPtr; char save; SockState result; NS_NONNULL_ASSERT(sockPtr != NULL); drvPtr = sockPtr->drvPtr; NsUpdateProgress((Ns_Sock *) sockPtr); reqPtr = sockPtr->reqPtr; bufPtr = &reqPtr->buffer; /* * Scan lines (header) until start of content (body-part) */ while (reqPtr->coff == 0u) { char *s, *e; size_t cnt; /* * Find the next header line. */ s = bufPtr->string + reqPtr->roff; e = memchr(s, INTCHAR('\n'), reqPtr->avail); if (unlikely(e == NULL)) { /* * Input not yet newline terminated - request more data. */ return SOCK_MORE; } /* * Check for max single line overflows. * * Previous versions if the driver returned here directly an * error code, which was handled via HTTP error message * provided via SockError(). However, the SockError() handling * closes the connection immediately. This has the * consequence, that the HTTP client might never see the error * message, since the request was not yet fully transmitted, * but it will see a "broken pipe: 13" message instead. We * read now the full request and return the message via * ConnRunRequest(). */ if (unlikely((e - s) > drvPtr->maxline)) { sockPtr->keep = NS_FALSE; if (reqPtr->request.line == NULL) { Ns_Log(DriverDebug, "SockParse: maxline reached of %d bytes", drvPtr->maxline); sockPtr->flags = NS_CONN_REQUESTURITOOLONG; Ns_Log(Warning, "request line is too long (%d bytes)", (int)(e - s)); } else { sockPtr->flags = NS_CONN_LINETOOLONG; Ns_Log(Warning, "request header line is too long (%d bytes)", (int)(e - s)); } } /* * Update next read pointer to end of this line. */ cnt = (size_t)(e - s) + 1u; reqPtr->roff += cnt; reqPtr->avail -= cnt; /* * Adjust end pointer to the last content character before the line * terminator. */ if (likely(e > s) && likely(*(e-1) == '\r')) { --e; } /* * Check for end of headers in case we have not done it yet. */ if (unlikely(e == s) && (reqPtr->coff == 0u)) { /* * We are at end of headers. */ reqPtr->coff = EndOfHeader(sockPtr); /* * In cases the client sent "expect: 100-continue", report back that * everything is fine with the headers. */ if ((sockPtr->flags & NS_CONN_CONTINUE) != 0u) { Ns_Log(Ns_LogRequestDebug, "honoring 100-continue"); /* * In case, the request entity (body) was too large, we can * return immediately the error message, when the client has * flagged this via "Expect:". Otherwise we have to read the * full request (although it is too large) to drain the * channel. Otherwise, the server might close the connection * *before* it has received full request with its body from * the client. We just keep the flag and let * Ns_ConnRunRequest() handle the error message. */ if ((sockPtr->flags & NS_CONN_ENTITYTOOLARGE) != 0u) { Ns_Log(Ns_LogRequestDebug, "100-continue: entity too large"); return SOCK_ENTITYTOOLARGE; /* * We have no other error message flagged (future ones * have to be handled here). */ } else { struct iovec iov[1]; ssize_t sent; /* * Reply with "100 continue". */ Ns_Log(Ns_LogRequestDebug, "100-continue: reply CONTINUE"); iov[0].iov_base = (char *)"HTTP/1.1 100 Continue\r\n\r\n"; iov[0].iov_len = strlen(iov[0].iov_base); sent = Ns_SockSendBufs((Ns_Sock *)sockPtr, iov, 1, NULL, 0u); if (sent != (ssize_t)iov[0].iov_len) { Ns_Log(Warning, "could not deliver response: 100 Continue"); /* * Should we bail out here? */ } } } } else { /* * We have the request-line or a header line to process. */ save = *e; *e = '\0'; if (unlikely(reqPtr->request.line == NULL)) { /* * There is no request-line set. The received line must the * the request-line. */ Ns_Log(DriverDebug, "SockParse (%d): parse request line <%s>", sockPtr->sock, s); if (Ns_ParseRequest(&reqPtr->request, s) == NS_ERROR) { /* * Invalid request. */ return SOCK_BADREQUEST; } /* * HTTP 0.9 did not have a HTTP-version number or request headers * and no empty line terminating the request header. */ if (unlikely(reqPtr->request.version < 1.0)) { /* * Pre-HTTP/1.0 request. */ reqPtr->coff = reqPtr->roff; Ns_Log(Notice, "pre-HTTP/1.0 request <%s>", reqPtr->request.line); } } else if (Ns_ParseHeader(reqPtr->headers, s, Preserve) != NS_OK) { /* * Invalid header. */ return SOCK_BADHEADER; } else { /* * Check for max number of headers */ if (unlikely(Ns_SetSize(reqPtr->headers) > (size_t)drvPtr->maxheaders)) { Ns_Log(DriverDebug, "SockParse (%d): maxheaders reached of %d bytes", sockPtr->sock, drvPtr->maxheaders); return SOCK_TOOMANYHEADERS; } } *e = save; } } if (unlikely(reqPtr->request.line == NULL)) { /* * We are at end of headers, but we have not parsed a request line * (maybe just two linefeeds). */ return SOCK_BADREQUEST; } /* * We are in the request body. */ assert(reqPtr->coff > 0u); assert(reqPtr->request.line != NULL); /* * Check if all content has arrived. */ Ns_Log(Debug, "=== length < avail (length %" PRIuz ", avail %" PRIuz ") tfd %d tfile %p chunkStartOff %" PRIuz, reqPtr->length, reqPtr->avail, sockPtr->tfd, (void *)sockPtr->tfile, reqPtr->chunkStartOff); if (reqPtr->chunkStartOff != 0u) { /* * Chunked encoding was provided. */ SockState chunkState; size_t currentContentLength; chunkState = ChunkedDecode(reqPtr, NS_TRUE); currentContentLength = reqPtr->chunkWriteOff - reqPtr->coff; /* * A chunk might be complete, but it might not be the last * chunk from the client. The best thing would be to be able * to read until EOF here. In cases, where the (optional) * "expectedLength" was provided by the client, we terminate * depending on that information */ if ((chunkState == SOCK_MORE) || (reqPtr->expectedLength != 0u && currentContentLength < reqPtr->expectedLength)) { /* * ChunkedDecode wants more data. */ return SOCK_MORE; } else if (chunkState != SOCK_READY) { return chunkState; } /* * ChunkedDecode has enough data. */ reqPtr->length = (size_t)currentContentLength; } if (reqPtr->avail < reqPtr->length) { Ns_Log(DriverDebug, "SockRead wait for more input"); /* * Wait for more input. */ return SOCK_MORE; } Ns_Log(Dev, "=== all required data is available (avail %" PRIuz", length %" PRIuz ", " "readahead %" TCL_LL_MODIFIER "d maxupload %" TCL_LL_MODIFIER "d) tfd %d", reqPtr->avail, reqPtr->length, drvPtr->readahead, drvPtr->maxupload, sockPtr->tfd); /* * We have all required data in the receive buffer or in a temporary file. * * - Uploads > "readahead": these are put into temporary files. * * - Uploads > "maxupload": these are put into temporary files * without mmapping, no content parsing will be performed in memory. */ result = SOCK_READY; if (sockPtr->tfile != NULL) { reqPtr->content = NULL; reqPtr->next = NULL; reqPtr->avail = 0u; Ns_Log(DriverDebug, "content spooled to file: size %" PRIdz ", file %s", reqPtr->length, sockPtr->tfile); /* * Nothing more to do, return via SOCK_READY; */ } else { /* * Uploads < "maxupload" are spooled to files and mmapped in order to * provide the usual interface via [ns_conn content]. */ if (sockPtr->tfd > 0) { #ifdef _WIN32 /* * For _WIN32, tfd should never be set, since tfd-spooling is not * implemented for windows. */ assert(0); #else int prot = PROT_READ | PROT_WRITE; /* * Add a byte to make sure, the string termination with \0 below falls * always into the mmapped area. On some older OSes this might lead to * crashes when we hitting page boundaries. */ ssize_t rc = ns_write(sockPtr->tfd, "\0", 1); if (rc == -1) { Ns_Log(Error, "socket: could not append terminating 0-byte"); } sockPtr->tsize = reqPtr->length + 1; sockPtr->taddr = mmap(0, sockPtr->tsize, prot, MAP_PRIVATE, sockPtr->tfd, 0); if (sockPtr->taddr == MAP_FAILED) { sockPtr->taddr = NULL; result = SOCK_ERROR; } else { reqPtr->content = sockPtr->taddr; Ns_Log(Debug, "content spooled to mmapped file: readahead=%" TCL_LL_MODIFIER "d, filesize=%" PRIdz, drvPtr->readahead, sockPtr->tsize); } #endif } else { /* * Set the content the begin of the remaining buffer (content offset). * This happens as well when reqPtr->contentLength is 0, but it is * needed for chunked input processing. */ reqPtr->content = bufPtr->string + reqPtr->coff; } reqPtr->next = reqPtr->content; /* * Add a terminating null character. The content might be from the receive * buffer (Tcl_DString) or from the mmapped file. Non-mmapped files are handled * above. */ if (reqPtr->length > 0u) { Ns_Log(DriverDebug, "SockRead adds null terminating character at content[%" PRIuz "]", reqPtr->length); reqPtr->savedChar = reqPtr->content[reqPtr->length]; reqPtr->content[reqPtr->length] = '\0'; if (sockPtr->taddr == NULL) { LogBuffer(DriverDebug, "UPDATED BUFFER", sockPtr->reqPtr->buffer.string, (size_t)reqPtr->buffer.length); } } } return result; }
{'added': [(290, 'static SockState ChunkedDecode(Request *reqPtr, bool update)'), (3377, ' * SOCK_READY when chunk was complete, SOCK_MORE when more data is'), (3378, ' * requried, or some error condition.'), (3381, ' * Updates the buffer if update is true (and adjusts'), (3382, ' * reqPtr->chunkWriteOff). Updates always reqPtr->chunkStartOff to allow'), (3383, ' * incremental operations.'), (3387, 'static SockState'), (3392, ' SockState result = SOCK_READY;'), (3402, ' long chunkLength;'), (3406, ' result = SOCK_MORE;'), (3411, ' chunkLength = strtol(chunkStart, NULL, 16);'), (3412, " *p = '\\r';"), (3413, ' if (chunkLength < 0) {'), (3414, ' Ns_Log(Warning, "ChunkedDecode: negative chunk length");'), (3415, ' result = SOCK_BADREQUEST;'), (3416, ' break;'), (3417, ' }'), (3420, ' if (p + 2 + chunkLength > end) {'), (3422, ' result = SOCK_MORE;'), (3428, ' memmove(writeBuffer, p + 2, (size_t)chunkLength);'), (3429, ' reqPtr->chunkWriteOff += (size_t)chunkLength;'), (3430, " *(writeBuffer + chunkLength) = '\\0';"), (3432, ' reqPtr->chunkStartOff += (size_t)(p - chunkStart) + 4u + (size_t)chunkLength;'), (3436, ' return result;'), (4125, ' Ns_Log(Debug, "=== length < avail (length %" PRIuz'), (4134, ' SockState chunkState;'), (4135, ' size_t currentContentLength;'), (4137, ' chunkState = ChunkedDecode(reqPtr, NS_TRUE);'), (4147, ' if ((chunkState == SOCK_MORE)'), (4153, ''), (4154, ' } else if (chunkState != SOCK_READY) {'), (4155, ' return chunkState;')], 'deleted': [(290, 'static bool ChunkedDecode(Request *reqPtr, bool update)'), (3377, ' * NS_TRUE when chunk was complete, NS_FALSE otherwise'), (3380, ' * updates the buffer if update is true (and adjusts reqPtr->chunkWriteOff)'), (3381, ' * updates always reqPtr->chunkStartOff to allow incremental operations'), (3385, 'static bool'), (3390, ' bool success = NS_TRUE;'), (3400, ' size_t chunk_length;'), (3404, ' success = NS_FALSE;'), (3409, ' chunk_length = (size_t)strtol(chunkStart, NULL, 16);'), (3412, ' if (p + 2 + chunk_length > end) {'), (3414, ' success = NS_FALSE;'), (3420, ' memmove(writeBuffer, p + 2, chunk_length);'), (3421, ' reqPtr->chunkWriteOff += chunk_length;'), (3422, " *(writeBuffer + chunk_length) = '\\0';"), (3424, ' reqPtr->chunkStartOff += (size_t)(p - chunkStart) + 4u + chunk_length;'), (3428, ' return success;'), (4117, ' Ns_Log(Dev, "=== length < avail (length %" PRIuz'), (4126, ' bool complete;'), (4127, ' size_t currentContentLength;'), (4129, ' complete = ChunkedDecode(reqPtr, NS_TRUE);'), (4139, ' if ((!complete)')]}
32
21
4,343
29,706
https://bitbucket.org/naviserver/naviserver
CVE-2020-13111
['CWE-20', 'CWE-787']
decompress.c
BZ2_decompress
/*-------------------------------------------------------------*/ /*--- Decompression machinery ---*/ /*--- decompress.c ---*/ /*-------------------------------------------------------------*/ /* ------------------------------------------------------------------ This file is part of bzip2/libbzip2, a program and library for lossless, block-sorting data compression. bzip2/libbzip2 version 1.0.6 of 6 September 2010 Copyright (C) 1996-2010 Julian Seward <jseward@acm.org> Please read the WARNING, DISCLAIMER and PATENTS sections in the README file. This program is released under the terms of the license contained in the file LICENSE. ------------------------------------------------------------------ */ #include "bzlib_private.h" /*---------------------------------------------------*/ static void makeMaps_d ( DState* s ) { Int32 i; s->nInUse = 0; for (i = 0; i < 256; i++) if (s->inUse[i]) { s->seqToUnseq[s->nInUse] = i; s->nInUse++; } } /*---------------------------------------------------*/ #define RETURN(rrr) \ { retVal = rrr; goto save_state_and_return; }; #define GET_BITS(lll,vvv,nnn) \ case lll: s->state = lll; \ while (True) { \ if (s->bsLive >= nnn) { \ UInt32 v; \ v = (s->bsBuff >> \ (s->bsLive-nnn)) & ((1 << nnn)-1); \ s->bsLive -= nnn; \ vvv = v; \ break; \ } \ if (s->strm->avail_in == 0) RETURN(BZ_OK); \ s->bsBuff \ = (s->bsBuff << 8) | \ ((UInt32) \ (*((UChar*)(s->strm->next_in)))); \ s->bsLive += 8; \ s->strm->next_in++; \ s->strm->avail_in--; \ s->strm->total_in_lo32++; \ if (s->strm->total_in_lo32 == 0) \ s->strm->total_in_hi32++; \ } #define GET_UCHAR(lll,uuu) \ GET_BITS(lll,uuu,8) #define GET_BIT(lll,uuu) \ GET_BITS(lll,uuu,1) /*---------------------------------------------------*/ #define GET_MTF_VAL(label1,label2,lval) \ { \ if (groupPos == 0) { \ groupNo++; \ if (groupNo >= nSelectors) \ RETURN(BZ_DATA_ERROR); \ groupPos = BZ_G_SIZE; \ gSel = s->selector[groupNo]; \ gMinlen = s->minLens[gSel]; \ gLimit = &(s->limit[gSel][0]); \ gPerm = &(s->perm[gSel][0]); \ gBase = &(s->base[gSel][0]); \ } \ groupPos--; \ zn = gMinlen; \ GET_BITS(label1, zvec, zn); \ while (1) { \ if (zn > 20 /* the longest code */) \ RETURN(BZ_DATA_ERROR); \ if (zvec <= gLimit[zn]) break; \ zn++; \ GET_BIT(label2, zj); \ zvec = (zvec << 1) | zj; \ }; \ if (zvec - gBase[zn] < 0 \ || zvec - gBase[zn] >= BZ_MAX_ALPHA_SIZE) \ RETURN(BZ_DATA_ERROR); \ lval = gPerm[zvec - gBase[zn]]; \ } /*---------------------------------------------------*/ Int32 BZ2_decompress ( DState* s ) { UChar uc; Int32 retVal; Int32 minLen, maxLen; bz_stream* strm = s->strm; /* stuff that needs to be saved/restored */ Int32 i; Int32 j; Int32 t; Int32 alphaSize; Int32 nGroups; Int32 nSelectors; Int32 EOB; Int32 groupNo; Int32 groupPos; Int32 nextSym; Int32 nblockMAX; Int32 nblock; Int32 es; Int32 N; Int32 curr; Int32 zt; Int32 zn; Int32 zvec; Int32 zj; Int32 gSel; Int32 gMinlen; Int32* gLimit; Int32* gBase; Int32* gPerm; if (s->state == BZ_X_MAGIC_1) { /*initialise the save area*/ s->save_i = 0; s->save_j = 0; s->save_t = 0; s->save_alphaSize = 0; s->save_nGroups = 0; s->save_nSelectors = 0; s->save_EOB = 0; s->save_groupNo = 0; s->save_groupPos = 0; s->save_nextSym = 0; s->save_nblockMAX = 0; s->save_nblock = 0; s->save_es = 0; s->save_N = 0; s->save_curr = 0; s->save_zt = 0; s->save_zn = 0; s->save_zvec = 0; s->save_zj = 0; s->save_gSel = 0; s->save_gMinlen = 0; s->save_gLimit = NULL; s->save_gBase = NULL; s->save_gPerm = NULL; } /*restore from the save area*/ i = s->save_i; j = s->save_j; t = s->save_t; alphaSize = s->save_alphaSize; nGroups = s->save_nGroups; nSelectors = s->save_nSelectors; EOB = s->save_EOB; groupNo = s->save_groupNo; groupPos = s->save_groupPos; nextSym = s->save_nextSym; nblockMAX = s->save_nblockMAX; nblock = s->save_nblock; es = s->save_es; N = s->save_N; curr = s->save_curr; zt = s->save_zt; zn = s->save_zn; zvec = s->save_zvec; zj = s->save_zj; gSel = s->save_gSel; gMinlen = s->save_gMinlen; gLimit = s->save_gLimit; gBase = s->save_gBase; gPerm = s->save_gPerm; retVal = BZ_OK; switch (s->state) { GET_UCHAR(BZ_X_MAGIC_1, uc); if (uc != BZ_HDR_B) RETURN(BZ_DATA_ERROR_MAGIC); GET_UCHAR(BZ_X_MAGIC_2, uc); if (uc != BZ_HDR_Z) RETURN(BZ_DATA_ERROR_MAGIC); GET_UCHAR(BZ_X_MAGIC_3, uc) if (uc != BZ_HDR_h) RETURN(BZ_DATA_ERROR_MAGIC); GET_BITS(BZ_X_MAGIC_4, s->blockSize100k, 8) if (s->blockSize100k < (BZ_HDR_0 + 1) || s->blockSize100k > (BZ_HDR_0 + 9)) RETURN(BZ_DATA_ERROR_MAGIC); s->blockSize100k -= BZ_HDR_0; if (s->smallDecompress) { s->ll16 = BZALLOC( s->blockSize100k * 100000 * sizeof(UInt16) ); s->ll4 = BZALLOC( ((1 + s->blockSize100k * 100000) >> 1) * sizeof(UChar) ); if (s->ll16 == NULL || s->ll4 == NULL) RETURN(BZ_MEM_ERROR); } else { s->tt = BZALLOC( s->blockSize100k * 100000 * sizeof(Int32) ); if (s->tt == NULL) RETURN(BZ_MEM_ERROR); } GET_UCHAR(BZ_X_BLKHDR_1, uc); if (uc == 0x17) goto endhdr_2; if (uc != 0x31) RETURN(BZ_DATA_ERROR); GET_UCHAR(BZ_X_BLKHDR_2, uc); if (uc != 0x41) RETURN(BZ_DATA_ERROR); GET_UCHAR(BZ_X_BLKHDR_3, uc); if (uc != 0x59) RETURN(BZ_DATA_ERROR); GET_UCHAR(BZ_X_BLKHDR_4, uc); if (uc != 0x26) RETURN(BZ_DATA_ERROR); GET_UCHAR(BZ_X_BLKHDR_5, uc); if (uc != 0x53) RETURN(BZ_DATA_ERROR); GET_UCHAR(BZ_X_BLKHDR_6, uc); if (uc != 0x59) RETURN(BZ_DATA_ERROR); s->currBlockNo++; if (s->verbosity >= 2) VPrintf1 ( "\n [%d: huff+mtf ", s->currBlockNo ); s->storedBlockCRC = 0; GET_UCHAR(BZ_X_BCRC_1, uc); s->storedBlockCRC = (s->storedBlockCRC << 8) | ((UInt32)uc); GET_UCHAR(BZ_X_BCRC_2, uc); s->storedBlockCRC = (s->storedBlockCRC << 8) | ((UInt32)uc); GET_UCHAR(BZ_X_BCRC_3, uc); s->storedBlockCRC = (s->storedBlockCRC << 8) | ((UInt32)uc); GET_UCHAR(BZ_X_BCRC_4, uc); s->storedBlockCRC = (s->storedBlockCRC << 8) | ((UInt32)uc); GET_BITS(BZ_X_RANDBIT, s->blockRandomised, 1); s->origPtr = 0; GET_UCHAR(BZ_X_ORIGPTR_1, uc); s->origPtr = (s->origPtr << 8) | ((Int32)uc); GET_UCHAR(BZ_X_ORIGPTR_2, uc); s->origPtr = (s->origPtr << 8) | ((Int32)uc); GET_UCHAR(BZ_X_ORIGPTR_3, uc); s->origPtr = (s->origPtr << 8) | ((Int32)uc); if (s->origPtr < 0) RETURN(BZ_DATA_ERROR); if (s->origPtr > 10 + 100000*s->blockSize100k) RETURN(BZ_DATA_ERROR); /*--- Receive the mapping table ---*/ for (i = 0; i < 16; i++) { GET_BIT(BZ_X_MAPPING_1, uc); if (uc == 1) s->inUse16[i] = True; else s->inUse16[i] = False; } for (i = 0; i < 256; i++) s->inUse[i] = False; for (i = 0; i < 16; i++) if (s->inUse16[i]) for (j = 0; j < 16; j++) { GET_BIT(BZ_X_MAPPING_2, uc); if (uc == 1) s->inUse[i * 16 + j] = True; } makeMaps_d ( s ); if (s->nInUse == 0) RETURN(BZ_DATA_ERROR); alphaSize = s->nInUse+2; /*--- Now the selectors ---*/ GET_BITS(BZ_X_SELECTOR_1, nGroups, 3); if (nGroups < 2 || nGroups > 6) RETURN(BZ_DATA_ERROR); GET_BITS(BZ_X_SELECTOR_2, nSelectors, 15); if (nSelectors < 1) RETURN(BZ_DATA_ERROR); for (i = 0; i < nSelectors; i++) { j = 0; while (True) { GET_BIT(BZ_X_SELECTOR_3, uc); if (uc == 0) break; j++; if (j >= nGroups) RETURN(BZ_DATA_ERROR); } s->selectorMtf[i] = j; } /*--- Undo the MTF values for the selectors. ---*/ { UChar pos[BZ_N_GROUPS], tmp, v; for (v = 0; v < nGroups; v++) pos[v] = v; for (i = 0; i < nSelectors; i++) { v = s->selectorMtf[i]; tmp = pos[v]; while (v > 0) { pos[v] = pos[v-1]; v--; } pos[0] = tmp; s->selector[i] = tmp; } } /*--- Now the coding tables ---*/ for (t = 0; t < nGroups; t++) { GET_BITS(BZ_X_CODING_1, curr, 5); for (i = 0; i < alphaSize; i++) { while (True) { if (curr < 1 || curr > 20) RETURN(BZ_DATA_ERROR); GET_BIT(BZ_X_CODING_2, uc); if (uc == 0) break; GET_BIT(BZ_X_CODING_3, uc); if (uc == 0) curr++; else curr--; } s->len[t][i] = curr; } } /*--- Create the Huffman decoding tables ---*/ for (t = 0; t < nGroups; t++) { minLen = 32; maxLen = 0; for (i = 0; i < alphaSize; i++) { if (s->len[t][i] > maxLen) maxLen = s->len[t][i]; if (s->len[t][i] < minLen) minLen = s->len[t][i]; } BZ2_hbCreateDecodeTables ( &(s->limit[t][0]), &(s->base[t][0]), &(s->perm[t][0]), &(s->len[t][0]), minLen, maxLen, alphaSize ); s->minLens[t] = minLen; } /*--- Now the MTF values ---*/ EOB = s->nInUse+1; nblockMAX = 100000 * s->blockSize100k; groupNo = -1; groupPos = 0; for (i = 0; i <= 255; i++) s->unzftab[i] = 0; /*-- MTF init --*/ { Int32 ii, jj, kk; kk = MTFA_SIZE-1; for (ii = 256 / MTFL_SIZE - 1; ii >= 0; ii--) { for (jj = MTFL_SIZE-1; jj >= 0; jj--) { s->mtfa[kk] = (UChar)(ii * MTFL_SIZE + jj); kk--; } s->mtfbase[ii] = kk + 1; } } /*-- end MTF init --*/ nblock = 0; GET_MTF_VAL(BZ_X_MTF_1, BZ_X_MTF_2, nextSym); while (True) { if (nextSym == EOB) break; if (nextSym == BZ_RUNA || nextSym == BZ_RUNB) { es = -1; N = 1; do { /* Check that N doesn't get too big, so that es doesn't go negative. The maximum value that can be RUNA/RUNB encoded is equal to the block size (post the initial RLE), viz, 900k, so bounding N at 2 million should guard against overflow without rejecting any legitimate inputs. */ if (N >= 2*1024*1024) RETURN(BZ_DATA_ERROR); if (nextSym == BZ_RUNA) es = es + (0+1) * N; else if (nextSym == BZ_RUNB) es = es + (1+1) * N; N = N * 2; GET_MTF_VAL(BZ_X_MTF_3, BZ_X_MTF_4, nextSym); } while (nextSym == BZ_RUNA || nextSym == BZ_RUNB); es++; uc = s->seqToUnseq[ s->mtfa[s->mtfbase[0]] ]; s->unzftab[uc] += es; if (s->smallDecompress) while (es > 0) { if (nblock >= nblockMAX) RETURN(BZ_DATA_ERROR); s->ll16[nblock] = (UInt16)uc; nblock++; es--; } else while (es > 0) { if (nblock >= nblockMAX) RETURN(BZ_DATA_ERROR); s->tt[nblock] = (UInt32)uc; nblock++; es--; }; continue; } else { if (nblock >= nblockMAX) RETURN(BZ_DATA_ERROR); /*-- uc = MTF ( nextSym-1 ) --*/ { Int32 ii, jj, kk, pp, lno, off; UInt32 nn; nn = (UInt32)(nextSym - 1); if (nn < MTFL_SIZE) { /* avoid general-case expense */ pp = s->mtfbase[0]; uc = s->mtfa[pp+nn]; while (nn > 3) { Int32 z = pp+nn; s->mtfa[(z) ] = s->mtfa[(z)-1]; s->mtfa[(z)-1] = s->mtfa[(z)-2]; s->mtfa[(z)-2] = s->mtfa[(z)-3]; s->mtfa[(z)-3] = s->mtfa[(z)-4]; nn -= 4; } while (nn > 0) { s->mtfa[(pp+nn)] = s->mtfa[(pp+nn)-1]; nn--; }; s->mtfa[pp] = uc; } else { /* general case */ lno = nn / MTFL_SIZE; off = nn % MTFL_SIZE; pp = s->mtfbase[lno] + off; uc = s->mtfa[pp]; while (pp > s->mtfbase[lno]) { s->mtfa[pp] = s->mtfa[pp-1]; pp--; }; s->mtfbase[lno]++; while (lno > 0) { s->mtfbase[lno]--; s->mtfa[s->mtfbase[lno]] = s->mtfa[s->mtfbase[lno-1] + MTFL_SIZE - 1]; lno--; } s->mtfbase[0]--; s->mtfa[s->mtfbase[0]] = uc; if (s->mtfbase[0] == 0) { kk = MTFA_SIZE-1; for (ii = 256 / MTFL_SIZE-1; ii >= 0; ii--) { for (jj = MTFL_SIZE-1; jj >= 0; jj--) { s->mtfa[kk] = s->mtfa[s->mtfbase[ii] + jj]; kk--; } s->mtfbase[ii] = kk + 1; } } } } /*-- end uc = MTF ( nextSym-1 ) --*/ s->unzftab[s->seqToUnseq[uc]]++; if (s->smallDecompress) s->ll16[nblock] = (UInt16)(s->seqToUnseq[uc]); else s->tt[nblock] = (UInt32)(s->seqToUnseq[uc]); nblock++; GET_MTF_VAL(BZ_X_MTF_5, BZ_X_MTF_6, nextSym); continue; } } /* Now we know what nblock is, we can do a better sanity check on s->origPtr. */ if (s->origPtr < 0 || s->origPtr >= nblock) RETURN(BZ_DATA_ERROR); /*-- Set up cftab to facilitate generation of T^(-1) --*/ /* Check: unzftab entries in range. */ for (i = 0; i <= 255; i++) { if (s->unzftab[i] < 0 || s->unzftab[i] > nblock) RETURN(BZ_DATA_ERROR); } /* Actually generate cftab. */ s->cftab[0] = 0; for (i = 1; i <= 256; i++) s->cftab[i] = s->unzftab[i-1]; for (i = 1; i <= 256; i++) s->cftab[i] += s->cftab[i-1]; /* Check: cftab entries in range. */ for (i = 0; i <= 256; i++) { if (s->cftab[i] < 0 || s->cftab[i] > nblock) { /* s->cftab[i] can legitimately be == nblock */ RETURN(BZ_DATA_ERROR); } } /* Check: cftab entries non-descending. */ for (i = 1; i <= 256; i++) { if (s->cftab[i-1] > s->cftab[i]) { RETURN(BZ_DATA_ERROR); } } s->state_out_len = 0; s->state_out_ch = 0; BZ_INITIALISE_CRC ( s->calculatedBlockCRC ); s->state = BZ_X_OUTPUT; if (s->verbosity >= 2) VPrintf0 ( "rt+rld" ); if (s->smallDecompress) { /*-- Make a copy of cftab, used in generation of T --*/ for (i = 0; i <= 256; i++) s->cftabCopy[i] = s->cftab[i]; /*-- compute the T vector --*/ for (i = 0; i < nblock; i++) { uc = (UChar)(s->ll16[i]); SET_LL(i, s->cftabCopy[uc]); s->cftabCopy[uc]++; } /*-- Compute T^(-1) by pointer reversal on T --*/ i = s->origPtr; j = GET_LL(i); do { Int32 tmp = GET_LL(j); SET_LL(j, i); i = j; j = tmp; } while (i != s->origPtr); s->tPos = s->origPtr; s->nblock_used = 0; if (s->blockRandomised) { BZ_RAND_INIT_MASK; BZ_GET_SMALL(s->k0); s->nblock_used++; BZ_RAND_UPD_MASK; s->k0 ^= BZ_RAND_MASK; } else { BZ_GET_SMALL(s->k0); s->nblock_used++; } } else { /*-- compute the T^(-1) vector --*/ for (i = 0; i < nblock; i++) { uc = (UChar)(s->tt[i] & 0xff); s->tt[s->cftab[uc]] |= (i << 8); s->cftab[uc]++; } s->tPos = s->tt[s->origPtr] >> 8; s->nblock_used = 0; if (s->blockRandomised) { BZ_RAND_INIT_MASK; BZ_GET_FAST(s->k0); s->nblock_used++; BZ_RAND_UPD_MASK; s->k0 ^= BZ_RAND_MASK; } else { BZ_GET_FAST(s->k0); s->nblock_used++; } } RETURN(BZ_OK); endhdr_2: GET_UCHAR(BZ_X_ENDHDR_2, uc); if (uc != 0x72) RETURN(BZ_DATA_ERROR); GET_UCHAR(BZ_X_ENDHDR_3, uc); if (uc != 0x45) RETURN(BZ_DATA_ERROR); GET_UCHAR(BZ_X_ENDHDR_4, uc); if (uc != 0x38) RETURN(BZ_DATA_ERROR); GET_UCHAR(BZ_X_ENDHDR_5, uc); if (uc != 0x50) RETURN(BZ_DATA_ERROR); GET_UCHAR(BZ_X_ENDHDR_6, uc); if (uc != 0x90) RETURN(BZ_DATA_ERROR); s->storedCombinedCRC = 0; GET_UCHAR(BZ_X_CCRC_1, uc); s->storedCombinedCRC = (s->storedCombinedCRC << 8) | ((UInt32)uc); GET_UCHAR(BZ_X_CCRC_2, uc); s->storedCombinedCRC = (s->storedCombinedCRC << 8) | ((UInt32)uc); GET_UCHAR(BZ_X_CCRC_3, uc); s->storedCombinedCRC = (s->storedCombinedCRC << 8) | ((UInt32)uc); GET_UCHAR(BZ_X_CCRC_4, uc); s->storedCombinedCRC = (s->storedCombinedCRC << 8) | ((UInt32)uc); s->state = BZ_X_IDLE; RETURN(BZ_STREAM_END); default: AssertH ( False, 4001 ); } AssertH ( False, 4002 ); save_state_and_return: s->save_i = i; s->save_j = j; s->save_t = t; s->save_alphaSize = alphaSize; s->save_nGroups = nGroups; s->save_nSelectors = nSelectors; s->save_EOB = EOB; s->save_groupNo = groupNo; s->save_groupPos = groupPos; s->save_nextSym = nextSym; s->save_nblockMAX = nblockMAX; s->save_nblock = nblock; s->save_es = es; s->save_N = N; s->save_curr = curr; s->save_zt = zt; s->save_zn = zn; s->save_zvec = zvec; s->save_zj = zj; s->save_gSel = gSel; s->save_gMinlen = gMinlen; s->save_gLimit = gLimit; s->save_gBase = gBase; s->save_gPerm = gPerm; return retVal; } /*-------------------------------------------------------------*/ /*--- end decompress.c ---*/ /*-------------------------------------------------------------*/
/*-------------------------------------------------------------*/ /*--- Decompression machinery ---*/ /*--- decompress.c ---*/ /*-------------------------------------------------------------*/ /* ------------------------------------------------------------------ This file is part of bzip2/libbzip2, a program and library for lossless, block-sorting data compression. bzip2/libbzip2 version 1.0.6 of 6 September 2010 Copyright (C) 1996-2010 Julian Seward <jseward@acm.org> Please read the WARNING, DISCLAIMER and PATENTS sections in the README file. This program is released under the terms of the license contained in the file LICENSE. ------------------------------------------------------------------ */ #include "bzlib_private.h" /*---------------------------------------------------*/ static void makeMaps_d ( DState* s ) { Int32 i; s->nInUse = 0; for (i = 0; i < 256; i++) if (s->inUse[i]) { s->seqToUnseq[s->nInUse] = i; s->nInUse++; } } /*---------------------------------------------------*/ #define RETURN(rrr) \ { retVal = rrr; goto save_state_and_return; }; #define GET_BITS(lll,vvv,nnn) \ case lll: s->state = lll; \ while (True) { \ if (s->bsLive >= nnn) { \ UInt32 v; \ v = (s->bsBuff >> \ (s->bsLive-nnn)) & ((1 << nnn)-1); \ s->bsLive -= nnn; \ vvv = v; \ break; \ } \ if (s->strm->avail_in == 0) RETURN(BZ_OK); \ s->bsBuff \ = (s->bsBuff << 8) | \ ((UInt32) \ (*((UChar*)(s->strm->next_in)))); \ s->bsLive += 8; \ s->strm->next_in++; \ s->strm->avail_in--; \ s->strm->total_in_lo32++; \ if (s->strm->total_in_lo32 == 0) \ s->strm->total_in_hi32++; \ } #define GET_UCHAR(lll,uuu) \ GET_BITS(lll,uuu,8) #define GET_BIT(lll,uuu) \ GET_BITS(lll,uuu,1) /*---------------------------------------------------*/ #define GET_MTF_VAL(label1,label2,lval) \ { \ if (groupPos == 0) { \ groupNo++; \ if (groupNo >= nSelectors) \ RETURN(BZ_DATA_ERROR); \ groupPos = BZ_G_SIZE; \ gSel = s->selector[groupNo]; \ gMinlen = s->minLens[gSel]; \ gLimit = &(s->limit[gSel][0]); \ gPerm = &(s->perm[gSel][0]); \ gBase = &(s->base[gSel][0]); \ } \ groupPos--; \ zn = gMinlen; \ GET_BITS(label1, zvec, zn); \ while (1) { \ if (zn > 20 /* the longest code */) \ RETURN(BZ_DATA_ERROR); \ if (zvec <= gLimit[zn]) break; \ zn++; \ GET_BIT(label2, zj); \ zvec = (zvec << 1) | zj; \ }; \ if (zvec - gBase[zn] < 0 \ || zvec - gBase[zn] >= BZ_MAX_ALPHA_SIZE) \ RETURN(BZ_DATA_ERROR); \ lval = gPerm[zvec - gBase[zn]]; \ } /*---------------------------------------------------*/ Int32 BZ2_decompress ( DState* s ) { UChar uc; Int32 retVal; Int32 minLen, maxLen; bz_stream* strm = s->strm; /* stuff that needs to be saved/restored */ Int32 i; Int32 j; Int32 t; Int32 alphaSize; Int32 nGroups; Int32 nSelectors; Int32 EOB; Int32 groupNo; Int32 groupPos; Int32 nextSym; Int32 nblockMAX; Int32 nblock; Int32 es; Int32 N; Int32 curr; Int32 zt; Int32 zn; Int32 zvec; Int32 zj; Int32 gSel; Int32 gMinlen; Int32* gLimit; Int32* gBase; Int32* gPerm; if (s->state == BZ_X_MAGIC_1) { /*initialise the save area*/ s->save_i = 0; s->save_j = 0; s->save_t = 0; s->save_alphaSize = 0; s->save_nGroups = 0; s->save_nSelectors = 0; s->save_EOB = 0; s->save_groupNo = 0; s->save_groupPos = 0; s->save_nextSym = 0; s->save_nblockMAX = 0; s->save_nblock = 0; s->save_es = 0; s->save_N = 0; s->save_curr = 0; s->save_zt = 0; s->save_zn = 0; s->save_zvec = 0; s->save_zj = 0; s->save_gSel = 0; s->save_gMinlen = 0; s->save_gLimit = NULL; s->save_gBase = NULL; s->save_gPerm = NULL; } /*restore from the save area*/ i = s->save_i; j = s->save_j; t = s->save_t; alphaSize = s->save_alphaSize; nGroups = s->save_nGroups; nSelectors = s->save_nSelectors; EOB = s->save_EOB; groupNo = s->save_groupNo; groupPos = s->save_groupPos; nextSym = s->save_nextSym; nblockMAX = s->save_nblockMAX; nblock = s->save_nblock; es = s->save_es; N = s->save_N; curr = s->save_curr; zt = s->save_zt; zn = s->save_zn; zvec = s->save_zvec; zj = s->save_zj; gSel = s->save_gSel; gMinlen = s->save_gMinlen; gLimit = s->save_gLimit; gBase = s->save_gBase; gPerm = s->save_gPerm; retVal = BZ_OK; switch (s->state) { GET_UCHAR(BZ_X_MAGIC_1, uc); if (uc != BZ_HDR_B) RETURN(BZ_DATA_ERROR_MAGIC); GET_UCHAR(BZ_X_MAGIC_2, uc); if (uc != BZ_HDR_Z) RETURN(BZ_DATA_ERROR_MAGIC); GET_UCHAR(BZ_X_MAGIC_3, uc) if (uc != BZ_HDR_h) RETURN(BZ_DATA_ERROR_MAGIC); GET_BITS(BZ_X_MAGIC_4, s->blockSize100k, 8) if (s->blockSize100k < (BZ_HDR_0 + 1) || s->blockSize100k > (BZ_HDR_0 + 9)) RETURN(BZ_DATA_ERROR_MAGIC); s->blockSize100k -= BZ_HDR_0; if (s->smallDecompress) { s->ll16 = BZALLOC( s->blockSize100k * 100000 * sizeof(UInt16) ); s->ll4 = BZALLOC( ((1 + s->blockSize100k * 100000) >> 1) * sizeof(UChar) ); if (s->ll16 == NULL || s->ll4 == NULL) RETURN(BZ_MEM_ERROR); } else { s->tt = BZALLOC( s->blockSize100k * 100000 * sizeof(Int32) ); if (s->tt == NULL) RETURN(BZ_MEM_ERROR); } GET_UCHAR(BZ_X_BLKHDR_1, uc); if (uc == 0x17) goto endhdr_2; if (uc != 0x31) RETURN(BZ_DATA_ERROR); GET_UCHAR(BZ_X_BLKHDR_2, uc); if (uc != 0x41) RETURN(BZ_DATA_ERROR); GET_UCHAR(BZ_X_BLKHDR_3, uc); if (uc != 0x59) RETURN(BZ_DATA_ERROR); GET_UCHAR(BZ_X_BLKHDR_4, uc); if (uc != 0x26) RETURN(BZ_DATA_ERROR); GET_UCHAR(BZ_X_BLKHDR_5, uc); if (uc != 0x53) RETURN(BZ_DATA_ERROR); GET_UCHAR(BZ_X_BLKHDR_6, uc); if (uc != 0x59) RETURN(BZ_DATA_ERROR); s->currBlockNo++; if (s->verbosity >= 2) VPrintf1 ( "\n [%d: huff+mtf ", s->currBlockNo ); s->storedBlockCRC = 0; GET_UCHAR(BZ_X_BCRC_1, uc); s->storedBlockCRC = (s->storedBlockCRC << 8) | ((UInt32)uc); GET_UCHAR(BZ_X_BCRC_2, uc); s->storedBlockCRC = (s->storedBlockCRC << 8) | ((UInt32)uc); GET_UCHAR(BZ_X_BCRC_3, uc); s->storedBlockCRC = (s->storedBlockCRC << 8) | ((UInt32)uc); GET_UCHAR(BZ_X_BCRC_4, uc); s->storedBlockCRC = (s->storedBlockCRC << 8) | ((UInt32)uc); GET_BITS(BZ_X_RANDBIT, s->blockRandomised, 1); s->origPtr = 0; GET_UCHAR(BZ_X_ORIGPTR_1, uc); s->origPtr = (s->origPtr << 8) | ((Int32)uc); GET_UCHAR(BZ_X_ORIGPTR_2, uc); s->origPtr = (s->origPtr << 8) | ((Int32)uc); GET_UCHAR(BZ_X_ORIGPTR_3, uc); s->origPtr = (s->origPtr << 8) | ((Int32)uc); if (s->origPtr < 0) RETURN(BZ_DATA_ERROR); if (s->origPtr > 10 + 100000*s->blockSize100k) RETURN(BZ_DATA_ERROR); /*--- Receive the mapping table ---*/ for (i = 0; i < 16; i++) { GET_BIT(BZ_X_MAPPING_1, uc); if (uc == 1) s->inUse16[i] = True; else s->inUse16[i] = False; } for (i = 0; i < 256; i++) s->inUse[i] = False; for (i = 0; i < 16; i++) if (s->inUse16[i]) for (j = 0; j < 16; j++) { GET_BIT(BZ_X_MAPPING_2, uc); if (uc == 1) s->inUse[i * 16 + j] = True; } makeMaps_d ( s ); if (s->nInUse == 0) RETURN(BZ_DATA_ERROR); alphaSize = s->nInUse+2; /*--- Now the selectors ---*/ GET_BITS(BZ_X_SELECTOR_1, nGroups, 3); if (nGroups < 2 || nGroups > 6) RETURN(BZ_DATA_ERROR); GET_BITS(BZ_X_SELECTOR_2, nSelectors, 15); if (nSelectors < 1 || nSelectors > BZ_MAX_SELECTORS) RETURN(BZ_DATA_ERROR); for (i = 0; i < nSelectors; i++) { j = 0; while (True) { GET_BIT(BZ_X_SELECTOR_3, uc); if (uc == 0) break; j++; if (j >= nGroups) RETURN(BZ_DATA_ERROR); } s->selectorMtf[i] = j; } /*--- Undo the MTF values for the selectors. ---*/ { UChar pos[BZ_N_GROUPS], tmp, v; for (v = 0; v < nGroups; v++) pos[v] = v; for (i = 0; i < nSelectors; i++) { v = s->selectorMtf[i]; tmp = pos[v]; while (v > 0) { pos[v] = pos[v-1]; v--; } pos[0] = tmp; s->selector[i] = tmp; } } /*--- Now the coding tables ---*/ for (t = 0; t < nGroups; t++) { GET_BITS(BZ_X_CODING_1, curr, 5); for (i = 0; i < alphaSize; i++) { while (True) { if (curr < 1 || curr > 20) RETURN(BZ_DATA_ERROR); GET_BIT(BZ_X_CODING_2, uc); if (uc == 0) break; GET_BIT(BZ_X_CODING_3, uc); if (uc == 0) curr++; else curr--; } s->len[t][i] = curr; } } /*--- Create the Huffman decoding tables ---*/ for (t = 0; t < nGroups; t++) { minLen = 32; maxLen = 0; for (i = 0; i < alphaSize; i++) { if (s->len[t][i] > maxLen) maxLen = s->len[t][i]; if (s->len[t][i] < minLen) minLen = s->len[t][i]; } BZ2_hbCreateDecodeTables ( &(s->limit[t][0]), &(s->base[t][0]), &(s->perm[t][0]), &(s->len[t][0]), minLen, maxLen, alphaSize ); s->minLens[t] = minLen; } /*--- Now the MTF values ---*/ EOB = s->nInUse+1; nblockMAX = 100000 * s->blockSize100k; groupNo = -1; groupPos = 0; for (i = 0; i <= 255; i++) s->unzftab[i] = 0; /*-- MTF init --*/ { Int32 ii, jj, kk; kk = MTFA_SIZE-1; for (ii = 256 / MTFL_SIZE - 1; ii >= 0; ii--) { for (jj = MTFL_SIZE-1; jj >= 0; jj--) { s->mtfa[kk] = (UChar)(ii * MTFL_SIZE + jj); kk--; } s->mtfbase[ii] = kk + 1; } } /*-- end MTF init --*/ nblock = 0; GET_MTF_VAL(BZ_X_MTF_1, BZ_X_MTF_2, nextSym); while (True) { if (nextSym == EOB) break; if (nextSym == BZ_RUNA || nextSym == BZ_RUNB) { es = -1; N = 1; do { /* Check that N doesn't get too big, so that es doesn't go negative. The maximum value that can be RUNA/RUNB encoded is equal to the block size (post the initial RLE), viz, 900k, so bounding N at 2 million should guard against overflow without rejecting any legitimate inputs. */ if (N >= 2*1024*1024) RETURN(BZ_DATA_ERROR); if (nextSym == BZ_RUNA) es = es + (0+1) * N; else if (nextSym == BZ_RUNB) es = es + (1+1) * N; N = N * 2; GET_MTF_VAL(BZ_X_MTF_3, BZ_X_MTF_4, nextSym); } while (nextSym == BZ_RUNA || nextSym == BZ_RUNB); es++; uc = s->seqToUnseq[ s->mtfa[s->mtfbase[0]] ]; s->unzftab[uc] += es; if (s->smallDecompress) while (es > 0) { if (nblock >= nblockMAX) RETURN(BZ_DATA_ERROR); s->ll16[nblock] = (UInt16)uc; nblock++; es--; } else while (es > 0) { if (nblock >= nblockMAX) RETURN(BZ_DATA_ERROR); s->tt[nblock] = (UInt32)uc; nblock++; es--; }; continue; } else { if (nblock >= nblockMAX) RETURN(BZ_DATA_ERROR); /*-- uc = MTF ( nextSym-1 ) --*/ { Int32 ii, jj, kk, pp, lno, off; UInt32 nn; nn = (UInt32)(nextSym - 1); if (nn < MTFL_SIZE) { /* avoid general-case expense */ pp = s->mtfbase[0]; uc = s->mtfa[pp+nn]; while (nn > 3) { Int32 z = pp+nn; s->mtfa[(z) ] = s->mtfa[(z)-1]; s->mtfa[(z)-1] = s->mtfa[(z)-2]; s->mtfa[(z)-2] = s->mtfa[(z)-3]; s->mtfa[(z)-3] = s->mtfa[(z)-4]; nn -= 4; } while (nn > 0) { s->mtfa[(pp+nn)] = s->mtfa[(pp+nn)-1]; nn--; }; s->mtfa[pp] = uc; } else { /* general case */ lno = nn / MTFL_SIZE; off = nn % MTFL_SIZE; pp = s->mtfbase[lno] + off; uc = s->mtfa[pp]; while (pp > s->mtfbase[lno]) { s->mtfa[pp] = s->mtfa[pp-1]; pp--; }; s->mtfbase[lno]++; while (lno > 0) { s->mtfbase[lno]--; s->mtfa[s->mtfbase[lno]] = s->mtfa[s->mtfbase[lno-1] + MTFL_SIZE - 1]; lno--; } s->mtfbase[0]--; s->mtfa[s->mtfbase[0]] = uc; if (s->mtfbase[0] == 0) { kk = MTFA_SIZE-1; for (ii = 256 / MTFL_SIZE-1; ii >= 0; ii--) { for (jj = MTFL_SIZE-1; jj >= 0; jj--) { s->mtfa[kk] = s->mtfa[s->mtfbase[ii] + jj]; kk--; } s->mtfbase[ii] = kk + 1; } } } } /*-- end uc = MTF ( nextSym-1 ) --*/ s->unzftab[s->seqToUnseq[uc]]++; if (s->smallDecompress) s->ll16[nblock] = (UInt16)(s->seqToUnseq[uc]); else s->tt[nblock] = (UInt32)(s->seqToUnseq[uc]); nblock++; GET_MTF_VAL(BZ_X_MTF_5, BZ_X_MTF_6, nextSym); continue; } } /* Now we know what nblock is, we can do a better sanity check on s->origPtr. */ if (s->origPtr < 0 || s->origPtr >= nblock) RETURN(BZ_DATA_ERROR); /*-- Set up cftab to facilitate generation of T^(-1) --*/ /* Check: unzftab entries in range. */ for (i = 0; i <= 255; i++) { if (s->unzftab[i] < 0 || s->unzftab[i] > nblock) RETURN(BZ_DATA_ERROR); } /* Actually generate cftab. */ s->cftab[0] = 0; for (i = 1; i <= 256; i++) s->cftab[i] = s->unzftab[i-1]; for (i = 1; i <= 256; i++) s->cftab[i] += s->cftab[i-1]; /* Check: cftab entries in range. */ for (i = 0; i <= 256; i++) { if (s->cftab[i] < 0 || s->cftab[i] > nblock) { /* s->cftab[i] can legitimately be == nblock */ RETURN(BZ_DATA_ERROR); } } /* Check: cftab entries non-descending. */ for (i = 1; i <= 256; i++) { if (s->cftab[i-1] > s->cftab[i]) { RETURN(BZ_DATA_ERROR); } } s->state_out_len = 0; s->state_out_ch = 0; BZ_INITIALISE_CRC ( s->calculatedBlockCRC ); s->state = BZ_X_OUTPUT; if (s->verbosity >= 2) VPrintf0 ( "rt+rld" ); if (s->smallDecompress) { /*-- Make a copy of cftab, used in generation of T --*/ for (i = 0; i <= 256; i++) s->cftabCopy[i] = s->cftab[i]; /*-- compute the T vector --*/ for (i = 0; i < nblock; i++) { uc = (UChar)(s->ll16[i]); SET_LL(i, s->cftabCopy[uc]); s->cftabCopy[uc]++; } /*-- Compute T^(-1) by pointer reversal on T --*/ i = s->origPtr; j = GET_LL(i); do { Int32 tmp = GET_LL(j); SET_LL(j, i); i = j; j = tmp; } while (i != s->origPtr); s->tPos = s->origPtr; s->nblock_used = 0; if (s->blockRandomised) { BZ_RAND_INIT_MASK; BZ_GET_SMALL(s->k0); s->nblock_used++; BZ_RAND_UPD_MASK; s->k0 ^= BZ_RAND_MASK; } else { BZ_GET_SMALL(s->k0); s->nblock_used++; } } else { /*-- compute the T^(-1) vector --*/ for (i = 0; i < nblock; i++) { uc = (UChar)(s->tt[i] & 0xff); s->tt[s->cftab[uc]] |= (i << 8); s->cftab[uc]++; } s->tPos = s->tt[s->origPtr] >> 8; s->nblock_used = 0; if (s->blockRandomised) { BZ_RAND_INIT_MASK; BZ_GET_FAST(s->k0); s->nblock_used++; BZ_RAND_UPD_MASK; s->k0 ^= BZ_RAND_MASK; } else { BZ_GET_FAST(s->k0); s->nblock_used++; } } RETURN(BZ_OK); endhdr_2: GET_UCHAR(BZ_X_ENDHDR_2, uc); if (uc != 0x72) RETURN(BZ_DATA_ERROR); GET_UCHAR(BZ_X_ENDHDR_3, uc); if (uc != 0x45) RETURN(BZ_DATA_ERROR); GET_UCHAR(BZ_X_ENDHDR_4, uc); if (uc != 0x38) RETURN(BZ_DATA_ERROR); GET_UCHAR(BZ_X_ENDHDR_5, uc); if (uc != 0x50) RETURN(BZ_DATA_ERROR); GET_UCHAR(BZ_X_ENDHDR_6, uc); if (uc != 0x90) RETURN(BZ_DATA_ERROR); s->storedCombinedCRC = 0; GET_UCHAR(BZ_X_CCRC_1, uc); s->storedCombinedCRC = (s->storedCombinedCRC << 8) | ((UInt32)uc); GET_UCHAR(BZ_X_CCRC_2, uc); s->storedCombinedCRC = (s->storedCombinedCRC << 8) | ((UInt32)uc); GET_UCHAR(BZ_X_CCRC_3, uc); s->storedCombinedCRC = (s->storedCombinedCRC << 8) | ((UInt32)uc); GET_UCHAR(BZ_X_CCRC_4, uc); s->storedCombinedCRC = (s->storedCombinedCRC << 8) | ((UInt32)uc); s->state = BZ_X_IDLE; RETURN(BZ_STREAM_END); default: AssertH ( False, 4001 ); } AssertH ( False, 4002 ); save_state_and_return: s->save_i = i; s->save_j = j; s->save_t = t; s->save_alphaSize = alphaSize; s->save_nGroups = nGroups; s->save_nSelectors = nSelectors; s->save_EOB = EOB; s->save_groupNo = groupNo; s->save_groupPos = groupPos; s->save_nextSym = nextSym; s->save_nblockMAX = nblockMAX; s->save_nblock = nblock; s->save_es = es; s->save_N = N; s->save_curr = curr; s->save_zt = zt; s->save_zn = zn; s->save_zvec = zvec; s->save_zj = zj; s->save_gSel = gSel; s->save_gMinlen = gMinlen; s->save_gLimit = gLimit; s->save_gBase = gBase; s->save_gPerm = gPerm; return retVal; } /*-------------------------------------------------------------*/ /*--- end decompress.c ---*/ /*-------------------------------------------------------------*/
Int32 BZ2_decompress ( DState* s ) { UChar uc; Int32 retVal; Int32 minLen, maxLen; bz_stream* strm = s->strm; /* stuff that needs to be saved/restored */ Int32 i; Int32 j; Int32 t; Int32 alphaSize; Int32 nGroups; Int32 nSelectors; Int32 EOB; Int32 groupNo; Int32 groupPos; Int32 nextSym; Int32 nblockMAX; Int32 nblock; Int32 es; Int32 N; Int32 curr; Int32 zt; Int32 zn; Int32 zvec; Int32 zj; Int32 gSel; Int32 gMinlen; Int32* gLimit; Int32* gBase; Int32* gPerm; if (s->state == BZ_X_MAGIC_1) { /*initialise the save area*/ s->save_i = 0; s->save_j = 0; s->save_t = 0; s->save_alphaSize = 0; s->save_nGroups = 0; s->save_nSelectors = 0; s->save_EOB = 0; s->save_groupNo = 0; s->save_groupPos = 0; s->save_nextSym = 0; s->save_nblockMAX = 0; s->save_nblock = 0; s->save_es = 0; s->save_N = 0; s->save_curr = 0; s->save_zt = 0; s->save_zn = 0; s->save_zvec = 0; s->save_zj = 0; s->save_gSel = 0; s->save_gMinlen = 0; s->save_gLimit = NULL; s->save_gBase = NULL; s->save_gPerm = NULL; } /*restore from the save area*/ i = s->save_i; j = s->save_j; t = s->save_t; alphaSize = s->save_alphaSize; nGroups = s->save_nGroups; nSelectors = s->save_nSelectors; EOB = s->save_EOB; groupNo = s->save_groupNo; groupPos = s->save_groupPos; nextSym = s->save_nextSym; nblockMAX = s->save_nblockMAX; nblock = s->save_nblock; es = s->save_es; N = s->save_N; curr = s->save_curr; zt = s->save_zt; zn = s->save_zn; zvec = s->save_zvec; zj = s->save_zj; gSel = s->save_gSel; gMinlen = s->save_gMinlen; gLimit = s->save_gLimit; gBase = s->save_gBase; gPerm = s->save_gPerm; retVal = BZ_OK; switch (s->state) { GET_UCHAR(BZ_X_MAGIC_1, uc); if (uc != BZ_HDR_B) RETURN(BZ_DATA_ERROR_MAGIC); GET_UCHAR(BZ_X_MAGIC_2, uc); if (uc != BZ_HDR_Z) RETURN(BZ_DATA_ERROR_MAGIC); GET_UCHAR(BZ_X_MAGIC_3, uc) if (uc != BZ_HDR_h) RETURN(BZ_DATA_ERROR_MAGIC); GET_BITS(BZ_X_MAGIC_4, s->blockSize100k, 8) if (s->blockSize100k < (BZ_HDR_0 + 1) || s->blockSize100k > (BZ_HDR_0 + 9)) RETURN(BZ_DATA_ERROR_MAGIC); s->blockSize100k -= BZ_HDR_0; if (s->smallDecompress) { s->ll16 = BZALLOC( s->blockSize100k * 100000 * sizeof(UInt16) ); s->ll4 = BZALLOC( ((1 + s->blockSize100k * 100000) >> 1) * sizeof(UChar) ); if (s->ll16 == NULL || s->ll4 == NULL) RETURN(BZ_MEM_ERROR); } else { s->tt = BZALLOC( s->blockSize100k * 100000 * sizeof(Int32) ); if (s->tt == NULL) RETURN(BZ_MEM_ERROR); } GET_UCHAR(BZ_X_BLKHDR_1, uc); if (uc == 0x17) goto endhdr_2; if (uc != 0x31) RETURN(BZ_DATA_ERROR); GET_UCHAR(BZ_X_BLKHDR_2, uc); if (uc != 0x41) RETURN(BZ_DATA_ERROR); GET_UCHAR(BZ_X_BLKHDR_3, uc); if (uc != 0x59) RETURN(BZ_DATA_ERROR); GET_UCHAR(BZ_X_BLKHDR_4, uc); if (uc != 0x26) RETURN(BZ_DATA_ERROR); GET_UCHAR(BZ_X_BLKHDR_5, uc); if (uc != 0x53) RETURN(BZ_DATA_ERROR); GET_UCHAR(BZ_X_BLKHDR_6, uc); if (uc != 0x59) RETURN(BZ_DATA_ERROR); s->currBlockNo++; if (s->verbosity >= 2) VPrintf1 ( "\n [%d: huff+mtf ", s->currBlockNo ); s->storedBlockCRC = 0; GET_UCHAR(BZ_X_BCRC_1, uc); s->storedBlockCRC = (s->storedBlockCRC << 8) | ((UInt32)uc); GET_UCHAR(BZ_X_BCRC_2, uc); s->storedBlockCRC = (s->storedBlockCRC << 8) | ((UInt32)uc); GET_UCHAR(BZ_X_BCRC_3, uc); s->storedBlockCRC = (s->storedBlockCRC << 8) | ((UInt32)uc); GET_UCHAR(BZ_X_BCRC_4, uc); s->storedBlockCRC = (s->storedBlockCRC << 8) | ((UInt32)uc); GET_BITS(BZ_X_RANDBIT, s->blockRandomised, 1); s->origPtr = 0; GET_UCHAR(BZ_X_ORIGPTR_1, uc); s->origPtr = (s->origPtr << 8) | ((Int32)uc); GET_UCHAR(BZ_X_ORIGPTR_2, uc); s->origPtr = (s->origPtr << 8) | ((Int32)uc); GET_UCHAR(BZ_X_ORIGPTR_3, uc); s->origPtr = (s->origPtr << 8) | ((Int32)uc); if (s->origPtr < 0) RETURN(BZ_DATA_ERROR); if (s->origPtr > 10 + 100000*s->blockSize100k) RETURN(BZ_DATA_ERROR); /*--- Receive the mapping table ---*/ for (i = 0; i < 16; i++) { GET_BIT(BZ_X_MAPPING_1, uc); if (uc == 1) s->inUse16[i] = True; else s->inUse16[i] = False; } for (i = 0; i < 256; i++) s->inUse[i] = False; for (i = 0; i < 16; i++) if (s->inUse16[i]) for (j = 0; j < 16; j++) { GET_BIT(BZ_X_MAPPING_2, uc); if (uc == 1) s->inUse[i * 16 + j] = True; } makeMaps_d ( s ); if (s->nInUse == 0) RETURN(BZ_DATA_ERROR); alphaSize = s->nInUse+2; /*--- Now the selectors ---*/ GET_BITS(BZ_X_SELECTOR_1, nGroups, 3); if (nGroups < 2 || nGroups > 6) RETURN(BZ_DATA_ERROR); GET_BITS(BZ_X_SELECTOR_2, nSelectors, 15); if (nSelectors < 1) RETURN(BZ_DATA_ERROR); for (i = 0; i < nSelectors; i++) { j = 0; while (True) { GET_BIT(BZ_X_SELECTOR_3, uc); if (uc == 0) break; j++; if (j >= nGroups) RETURN(BZ_DATA_ERROR); } s->selectorMtf[i] = j; } /*--- Undo the MTF values for the selectors. ---*/ { UChar pos[BZ_N_GROUPS], tmp, v; for (v = 0; v < nGroups; v++) pos[v] = v; for (i = 0; i < nSelectors; i++) { v = s->selectorMtf[i]; tmp = pos[v]; while (v > 0) { pos[v] = pos[v-1]; v--; } pos[0] = tmp; s->selector[i] = tmp; } } /*--- Now the coding tables ---*/ for (t = 0; t < nGroups; t++) { GET_BITS(BZ_X_CODING_1, curr, 5); for (i = 0; i < alphaSize; i++) { while (True) { if (curr < 1 || curr > 20) RETURN(BZ_DATA_ERROR); GET_BIT(BZ_X_CODING_2, uc); if (uc == 0) break; GET_BIT(BZ_X_CODING_3, uc); if (uc == 0) curr++; else curr--; } s->len[t][i] = curr; } } /*--- Create the Huffman decoding tables ---*/ for (t = 0; t < nGroups; t++) { minLen = 32; maxLen = 0; for (i = 0; i < alphaSize; i++) { if (s->len[t][i] > maxLen) maxLen = s->len[t][i]; if (s->len[t][i] < minLen) minLen = s->len[t][i]; } BZ2_hbCreateDecodeTables ( &(s->limit[t][0]), &(s->base[t][0]), &(s->perm[t][0]), &(s->len[t][0]), minLen, maxLen, alphaSize ); s->minLens[t] = minLen; } /*--- Now the MTF values ---*/ EOB = s->nInUse+1; nblockMAX = 100000 * s->blockSize100k; groupNo = -1; groupPos = 0; for (i = 0; i <= 255; i++) s->unzftab[i] = 0; /*-- MTF init --*/ { Int32 ii, jj, kk; kk = MTFA_SIZE-1; for (ii = 256 / MTFL_SIZE - 1; ii >= 0; ii--) { for (jj = MTFL_SIZE-1; jj >= 0; jj--) { s->mtfa[kk] = (UChar)(ii * MTFL_SIZE + jj); kk--; } s->mtfbase[ii] = kk + 1; } } /*-- end MTF init --*/ nblock = 0; GET_MTF_VAL(BZ_X_MTF_1, BZ_X_MTF_2, nextSym); while (True) { if (nextSym == EOB) break; if (nextSym == BZ_RUNA || nextSym == BZ_RUNB) { es = -1; N = 1; do { /* Check that N doesn't get too big, so that es doesn't go negative. The maximum value that can be RUNA/RUNB encoded is equal to the block size (post the initial RLE), viz, 900k, so bounding N at 2 million should guard against overflow without rejecting any legitimate inputs. */ if (N >= 2*1024*1024) RETURN(BZ_DATA_ERROR); if (nextSym == BZ_RUNA) es = es + (0+1) * N; else if (nextSym == BZ_RUNB) es = es + (1+1) * N; N = N * 2; GET_MTF_VAL(BZ_X_MTF_3, BZ_X_MTF_4, nextSym); } while (nextSym == BZ_RUNA || nextSym == BZ_RUNB); es++; uc = s->seqToUnseq[ s->mtfa[s->mtfbase[0]] ]; s->unzftab[uc] += es; if (s->smallDecompress) while (es > 0) { if (nblock >= nblockMAX) RETURN(BZ_DATA_ERROR); s->ll16[nblock] = (UInt16)uc; nblock++; es--; } else while (es > 0) { if (nblock >= nblockMAX) RETURN(BZ_DATA_ERROR); s->tt[nblock] = (UInt32)uc; nblock++; es--; }; continue; } else { if (nblock >= nblockMAX) RETURN(BZ_DATA_ERROR); /*-- uc = MTF ( nextSym-1 ) --*/ { Int32 ii, jj, kk, pp, lno, off; UInt32 nn; nn = (UInt32)(nextSym - 1); if (nn < MTFL_SIZE) { /* avoid general-case expense */ pp = s->mtfbase[0]; uc = s->mtfa[pp+nn]; while (nn > 3) { Int32 z = pp+nn; s->mtfa[(z) ] = s->mtfa[(z)-1]; s->mtfa[(z)-1] = s->mtfa[(z)-2]; s->mtfa[(z)-2] = s->mtfa[(z)-3]; s->mtfa[(z)-3] = s->mtfa[(z)-4]; nn -= 4; } while (nn > 0) { s->mtfa[(pp+nn)] = s->mtfa[(pp+nn)-1]; nn--; }; s->mtfa[pp] = uc; } else { /* general case */ lno = nn / MTFL_SIZE; off = nn % MTFL_SIZE; pp = s->mtfbase[lno] + off; uc = s->mtfa[pp]; while (pp > s->mtfbase[lno]) { s->mtfa[pp] = s->mtfa[pp-1]; pp--; }; s->mtfbase[lno]++; while (lno > 0) { s->mtfbase[lno]--; s->mtfa[s->mtfbase[lno]] = s->mtfa[s->mtfbase[lno-1] + MTFL_SIZE - 1]; lno--; } s->mtfbase[0]--; s->mtfa[s->mtfbase[0]] = uc; if (s->mtfbase[0] == 0) { kk = MTFA_SIZE-1; for (ii = 256 / MTFL_SIZE-1; ii >= 0; ii--) { for (jj = MTFL_SIZE-1; jj >= 0; jj--) { s->mtfa[kk] = s->mtfa[s->mtfbase[ii] + jj]; kk--; } s->mtfbase[ii] = kk + 1; } } } } /*-- end uc = MTF ( nextSym-1 ) --*/ s->unzftab[s->seqToUnseq[uc]]++; if (s->smallDecompress) s->ll16[nblock] = (UInt16)(s->seqToUnseq[uc]); else s->tt[nblock] = (UInt32)(s->seqToUnseq[uc]); nblock++; GET_MTF_VAL(BZ_X_MTF_5, BZ_X_MTF_6, nextSym); continue; } } /* Now we know what nblock is, we can do a better sanity check on s->origPtr. */ if (s->origPtr < 0 || s->origPtr >= nblock) RETURN(BZ_DATA_ERROR); /*-- Set up cftab to facilitate generation of T^(-1) --*/ /* Check: unzftab entries in range. */ for (i = 0; i <= 255; i++) { if (s->unzftab[i] < 0 || s->unzftab[i] > nblock) RETURN(BZ_DATA_ERROR); } /* Actually generate cftab. */ s->cftab[0] = 0; for (i = 1; i <= 256; i++) s->cftab[i] = s->unzftab[i-1]; for (i = 1; i <= 256; i++) s->cftab[i] += s->cftab[i-1]; /* Check: cftab entries in range. */ for (i = 0; i <= 256; i++) { if (s->cftab[i] < 0 || s->cftab[i] > nblock) { /* s->cftab[i] can legitimately be == nblock */ RETURN(BZ_DATA_ERROR); } } /* Check: cftab entries non-descending. */ for (i = 1; i <= 256; i++) { if (s->cftab[i-1] > s->cftab[i]) { RETURN(BZ_DATA_ERROR); } } s->state_out_len = 0; s->state_out_ch = 0; BZ_INITIALISE_CRC ( s->calculatedBlockCRC ); s->state = BZ_X_OUTPUT; if (s->verbosity >= 2) VPrintf0 ( "rt+rld" ); if (s->smallDecompress) { /*-- Make a copy of cftab, used in generation of T --*/ for (i = 0; i <= 256; i++) s->cftabCopy[i] = s->cftab[i]; /*-- compute the T vector --*/ for (i = 0; i < nblock; i++) { uc = (UChar)(s->ll16[i]); SET_LL(i, s->cftabCopy[uc]); s->cftabCopy[uc]++; } /*-- Compute T^(-1) by pointer reversal on T --*/ i = s->origPtr; j = GET_LL(i); do { Int32 tmp = GET_LL(j); SET_LL(j, i); i = j; j = tmp; } while (i != s->origPtr); s->tPos = s->origPtr; s->nblock_used = 0; if (s->blockRandomised) { BZ_RAND_INIT_MASK; BZ_GET_SMALL(s->k0); s->nblock_used++; BZ_RAND_UPD_MASK; s->k0 ^= BZ_RAND_MASK; } else { BZ_GET_SMALL(s->k0); s->nblock_used++; } } else { /*-- compute the T^(-1) vector --*/ for (i = 0; i < nblock; i++) { uc = (UChar)(s->tt[i] & 0xff); s->tt[s->cftab[uc]] |= (i << 8); s->cftab[uc]++; } s->tPos = s->tt[s->origPtr] >> 8; s->nblock_used = 0; if (s->blockRandomised) { BZ_RAND_INIT_MASK; BZ_GET_FAST(s->k0); s->nblock_used++; BZ_RAND_UPD_MASK; s->k0 ^= BZ_RAND_MASK; } else { BZ_GET_FAST(s->k0); s->nblock_used++; } } RETURN(BZ_OK); endhdr_2: GET_UCHAR(BZ_X_ENDHDR_2, uc); if (uc != 0x72) RETURN(BZ_DATA_ERROR); GET_UCHAR(BZ_X_ENDHDR_3, uc); if (uc != 0x45) RETURN(BZ_DATA_ERROR); GET_UCHAR(BZ_X_ENDHDR_4, uc); if (uc != 0x38) RETURN(BZ_DATA_ERROR); GET_UCHAR(BZ_X_ENDHDR_5, uc); if (uc != 0x50) RETURN(BZ_DATA_ERROR); GET_UCHAR(BZ_X_ENDHDR_6, uc); if (uc != 0x90) RETURN(BZ_DATA_ERROR); s->storedCombinedCRC = 0; GET_UCHAR(BZ_X_CCRC_1, uc); s->storedCombinedCRC = (s->storedCombinedCRC << 8) | ((UInt32)uc); GET_UCHAR(BZ_X_CCRC_2, uc); s->storedCombinedCRC = (s->storedCombinedCRC << 8) | ((UInt32)uc); GET_UCHAR(BZ_X_CCRC_3, uc); s->storedCombinedCRC = (s->storedCombinedCRC << 8) | ((UInt32)uc); GET_UCHAR(BZ_X_CCRC_4, uc); s->storedCombinedCRC = (s->storedCombinedCRC << 8) | ((UInt32)uc); s->state = BZ_X_IDLE; RETURN(BZ_STREAM_END); default: AssertH ( False, 4001 ); } AssertH ( False, 4002 ); save_state_and_return: s->save_i = i; s->save_j = j; s->save_t = t; s->save_alphaSize = alphaSize; s->save_nGroups = nGroups; s->save_nSelectors = nSelectors; s->save_EOB = EOB; s->save_groupNo = groupNo; s->save_groupPos = groupPos; s->save_nextSym = nextSym; s->save_nblockMAX = nblockMAX; s->save_nblock = nblock; s->save_es = es; s->save_N = N; s->save_curr = curr; s->save_zt = zt; s->save_zn = zn; s->save_zvec = zvec; s->save_zj = zj; s->save_gSel = gSel; s->save_gMinlen = gMinlen; s->save_gLimit = gLimit; s->save_gBase = gBase; s->save_gPerm = gPerm; return retVal; }
Int32 BZ2_decompress ( DState* s ) { UChar uc; Int32 retVal; Int32 minLen, maxLen; bz_stream* strm = s->strm; /* stuff that needs to be saved/restored */ Int32 i; Int32 j; Int32 t; Int32 alphaSize; Int32 nGroups; Int32 nSelectors; Int32 EOB; Int32 groupNo; Int32 groupPos; Int32 nextSym; Int32 nblockMAX; Int32 nblock; Int32 es; Int32 N; Int32 curr; Int32 zt; Int32 zn; Int32 zvec; Int32 zj; Int32 gSel; Int32 gMinlen; Int32* gLimit; Int32* gBase; Int32* gPerm; if (s->state == BZ_X_MAGIC_1) { /*initialise the save area*/ s->save_i = 0; s->save_j = 0; s->save_t = 0; s->save_alphaSize = 0; s->save_nGroups = 0; s->save_nSelectors = 0; s->save_EOB = 0; s->save_groupNo = 0; s->save_groupPos = 0; s->save_nextSym = 0; s->save_nblockMAX = 0; s->save_nblock = 0; s->save_es = 0; s->save_N = 0; s->save_curr = 0; s->save_zt = 0; s->save_zn = 0; s->save_zvec = 0; s->save_zj = 0; s->save_gSel = 0; s->save_gMinlen = 0; s->save_gLimit = NULL; s->save_gBase = NULL; s->save_gPerm = NULL; } /*restore from the save area*/ i = s->save_i; j = s->save_j; t = s->save_t; alphaSize = s->save_alphaSize; nGroups = s->save_nGroups; nSelectors = s->save_nSelectors; EOB = s->save_EOB; groupNo = s->save_groupNo; groupPos = s->save_groupPos; nextSym = s->save_nextSym; nblockMAX = s->save_nblockMAX; nblock = s->save_nblock; es = s->save_es; N = s->save_N; curr = s->save_curr; zt = s->save_zt; zn = s->save_zn; zvec = s->save_zvec; zj = s->save_zj; gSel = s->save_gSel; gMinlen = s->save_gMinlen; gLimit = s->save_gLimit; gBase = s->save_gBase; gPerm = s->save_gPerm; retVal = BZ_OK; switch (s->state) { GET_UCHAR(BZ_X_MAGIC_1, uc); if (uc != BZ_HDR_B) RETURN(BZ_DATA_ERROR_MAGIC); GET_UCHAR(BZ_X_MAGIC_2, uc); if (uc != BZ_HDR_Z) RETURN(BZ_DATA_ERROR_MAGIC); GET_UCHAR(BZ_X_MAGIC_3, uc) if (uc != BZ_HDR_h) RETURN(BZ_DATA_ERROR_MAGIC); GET_BITS(BZ_X_MAGIC_4, s->blockSize100k, 8) if (s->blockSize100k < (BZ_HDR_0 + 1) || s->blockSize100k > (BZ_HDR_0 + 9)) RETURN(BZ_DATA_ERROR_MAGIC); s->blockSize100k -= BZ_HDR_0; if (s->smallDecompress) { s->ll16 = BZALLOC( s->blockSize100k * 100000 * sizeof(UInt16) ); s->ll4 = BZALLOC( ((1 + s->blockSize100k * 100000) >> 1) * sizeof(UChar) ); if (s->ll16 == NULL || s->ll4 == NULL) RETURN(BZ_MEM_ERROR); } else { s->tt = BZALLOC( s->blockSize100k * 100000 * sizeof(Int32) ); if (s->tt == NULL) RETURN(BZ_MEM_ERROR); } GET_UCHAR(BZ_X_BLKHDR_1, uc); if (uc == 0x17) goto endhdr_2; if (uc != 0x31) RETURN(BZ_DATA_ERROR); GET_UCHAR(BZ_X_BLKHDR_2, uc); if (uc != 0x41) RETURN(BZ_DATA_ERROR); GET_UCHAR(BZ_X_BLKHDR_3, uc); if (uc != 0x59) RETURN(BZ_DATA_ERROR); GET_UCHAR(BZ_X_BLKHDR_4, uc); if (uc != 0x26) RETURN(BZ_DATA_ERROR); GET_UCHAR(BZ_X_BLKHDR_5, uc); if (uc != 0x53) RETURN(BZ_DATA_ERROR); GET_UCHAR(BZ_X_BLKHDR_6, uc); if (uc != 0x59) RETURN(BZ_DATA_ERROR); s->currBlockNo++; if (s->verbosity >= 2) VPrintf1 ( "\n [%d: huff+mtf ", s->currBlockNo ); s->storedBlockCRC = 0; GET_UCHAR(BZ_X_BCRC_1, uc); s->storedBlockCRC = (s->storedBlockCRC << 8) | ((UInt32)uc); GET_UCHAR(BZ_X_BCRC_2, uc); s->storedBlockCRC = (s->storedBlockCRC << 8) | ((UInt32)uc); GET_UCHAR(BZ_X_BCRC_3, uc); s->storedBlockCRC = (s->storedBlockCRC << 8) | ((UInt32)uc); GET_UCHAR(BZ_X_BCRC_4, uc); s->storedBlockCRC = (s->storedBlockCRC << 8) | ((UInt32)uc); GET_BITS(BZ_X_RANDBIT, s->blockRandomised, 1); s->origPtr = 0; GET_UCHAR(BZ_X_ORIGPTR_1, uc); s->origPtr = (s->origPtr << 8) | ((Int32)uc); GET_UCHAR(BZ_X_ORIGPTR_2, uc); s->origPtr = (s->origPtr << 8) | ((Int32)uc); GET_UCHAR(BZ_X_ORIGPTR_3, uc); s->origPtr = (s->origPtr << 8) | ((Int32)uc); if (s->origPtr < 0) RETURN(BZ_DATA_ERROR); if (s->origPtr > 10 + 100000*s->blockSize100k) RETURN(BZ_DATA_ERROR); /*--- Receive the mapping table ---*/ for (i = 0; i < 16; i++) { GET_BIT(BZ_X_MAPPING_1, uc); if (uc == 1) s->inUse16[i] = True; else s->inUse16[i] = False; } for (i = 0; i < 256; i++) s->inUse[i] = False; for (i = 0; i < 16; i++) if (s->inUse16[i]) for (j = 0; j < 16; j++) { GET_BIT(BZ_X_MAPPING_2, uc); if (uc == 1) s->inUse[i * 16 + j] = True; } makeMaps_d ( s ); if (s->nInUse == 0) RETURN(BZ_DATA_ERROR); alphaSize = s->nInUse+2; /*--- Now the selectors ---*/ GET_BITS(BZ_X_SELECTOR_1, nGroups, 3); if (nGroups < 2 || nGroups > 6) RETURN(BZ_DATA_ERROR); GET_BITS(BZ_X_SELECTOR_2, nSelectors, 15); if (nSelectors < 1 || nSelectors > BZ_MAX_SELECTORS) RETURN(BZ_DATA_ERROR); for (i = 0; i < nSelectors; i++) { j = 0; while (True) { GET_BIT(BZ_X_SELECTOR_3, uc); if (uc == 0) break; j++; if (j >= nGroups) RETURN(BZ_DATA_ERROR); } s->selectorMtf[i] = j; } /*--- Undo the MTF values for the selectors. ---*/ { UChar pos[BZ_N_GROUPS], tmp, v; for (v = 0; v < nGroups; v++) pos[v] = v; for (i = 0; i < nSelectors; i++) { v = s->selectorMtf[i]; tmp = pos[v]; while (v > 0) { pos[v] = pos[v-1]; v--; } pos[0] = tmp; s->selector[i] = tmp; } } /*--- Now the coding tables ---*/ for (t = 0; t < nGroups; t++) { GET_BITS(BZ_X_CODING_1, curr, 5); for (i = 0; i < alphaSize; i++) { while (True) { if (curr < 1 || curr > 20) RETURN(BZ_DATA_ERROR); GET_BIT(BZ_X_CODING_2, uc); if (uc == 0) break; GET_BIT(BZ_X_CODING_3, uc); if (uc == 0) curr++; else curr--; } s->len[t][i] = curr; } } /*--- Create the Huffman decoding tables ---*/ for (t = 0; t < nGroups; t++) { minLen = 32; maxLen = 0; for (i = 0; i < alphaSize; i++) { if (s->len[t][i] > maxLen) maxLen = s->len[t][i]; if (s->len[t][i] < minLen) minLen = s->len[t][i]; } BZ2_hbCreateDecodeTables ( &(s->limit[t][0]), &(s->base[t][0]), &(s->perm[t][0]), &(s->len[t][0]), minLen, maxLen, alphaSize ); s->minLens[t] = minLen; } /*--- Now the MTF values ---*/ EOB = s->nInUse+1; nblockMAX = 100000 * s->blockSize100k; groupNo = -1; groupPos = 0; for (i = 0; i <= 255; i++) s->unzftab[i] = 0; /*-- MTF init --*/ { Int32 ii, jj, kk; kk = MTFA_SIZE-1; for (ii = 256 / MTFL_SIZE - 1; ii >= 0; ii--) { for (jj = MTFL_SIZE-1; jj >= 0; jj--) { s->mtfa[kk] = (UChar)(ii * MTFL_SIZE + jj); kk--; } s->mtfbase[ii] = kk + 1; } } /*-- end MTF init --*/ nblock = 0; GET_MTF_VAL(BZ_X_MTF_1, BZ_X_MTF_2, nextSym); while (True) { if (nextSym == EOB) break; if (nextSym == BZ_RUNA || nextSym == BZ_RUNB) { es = -1; N = 1; do { /* Check that N doesn't get too big, so that es doesn't go negative. The maximum value that can be RUNA/RUNB encoded is equal to the block size (post the initial RLE), viz, 900k, so bounding N at 2 million should guard against overflow without rejecting any legitimate inputs. */ if (N >= 2*1024*1024) RETURN(BZ_DATA_ERROR); if (nextSym == BZ_RUNA) es = es + (0+1) * N; else if (nextSym == BZ_RUNB) es = es + (1+1) * N; N = N * 2; GET_MTF_VAL(BZ_X_MTF_3, BZ_X_MTF_4, nextSym); } while (nextSym == BZ_RUNA || nextSym == BZ_RUNB); es++; uc = s->seqToUnseq[ s->mtfa[s->mtfbase[0]] ]; s->unzftab[uc] += es; if (s->smallDecompress) while (es > 0) { if (nblock >= nblockMAX) RETURN(BZ_DATA_ERROR); s->ll16[nblock] = (UInt16)uc; nblock++; es--; } else while (es > 0) { if (nblock >= nblockMAX) RETURN(BZ_DATA_ERROR); s->tt[nblock] = (UInt32)uc; nblock++; es--; }; continue; } else { if (nblock >= nblockMAX) RETURN(BZ_DATA_ERROR); /*-- uc = MTF ( nextSym-1 ) --*/ { Int32 ii, jj, kk, pp, lno, off; UInt32 nn; nn = (UInt32)(nextSym - 1); if (nn < MTFL_SIZE) { /* avoid general-case expense */ pp = s->mtfbase[0]; uc = s->mtfa[pp+nn]; while (nn > 3) { Int32 z = pp+nn; s->mtfa[(z) ] = s->mtfa[(z)-1]; s->mtfa[(z)-1] = s->mtfa[(z)-2]; s->mtfa[(z)-2] = s->mtfa[(z)-3]; s->mtfa[(z)-3] = s->mtfa[(z)-4]; nn -= 4; } while (nn > 0) { s->mtfa[(pp+nn)] = s->mtfa[(pp+nn)-1]; nn--; }; s->mtfa[pp] = uc; } else { /* general case */ lno = nn / MTFL_SIZE; off = nn % MTFL_SIZE; pp = s->mtfbase[lno] + off; uc = s->mtfa[pp]; while (pp > s->mtfbase[lno]) { s->mtfa[pp] = s->mtfa[pp-1]; pp--; }; s->mtfbase[lno]++; while (lno > 0) { s->mtfbase[lno]--; s->mtfa[s->mtfbase[lno]] = s->mtfa[s->mtfbase[lno-1] + MTFL_SIZE - 1]; lno--; } s->mtfbase[0]--; s->mtfa[s->mtfbase[0]] = uc; if (s->mtfbase[0] == 0) { kk = MTFA_SIZE-1; for (ii = 256 / MTFL_SIZE-1; ii >= 0; ii--) { for (jj = MTFL_SIZE-1; jj >= 0; jj--) { s->mtfa[kk] = s->mtfa[s->mtfbase[ii] + jj]; kk--; } s->mtfbase[ii] = kk + 1; } } } } /*-- end uc = MTF ( nextSym-1 ) --*/ s->unzftab[s->seqToUnseq[uc]]++; if (s->smallDecompress) s->ll16[nblock] = (UInt16)(s->seqToUnseq[uc]); else s->tt[nblock] = (UInt32)(s->seqToUnseq[uc]); nblock++; GET_MTF_VAL(BZ_X_MTF_5, BZ_X_MTF_6, nextSym); continue; } } /* Now we know what nblock is, we can do a better sanity check on s->origPtr. */ if (s->origPtr < 0 || s->origPtr >= nblock) RETURN(BZ_DATA_ERROR); /*-- Set up cftab to facilitate generation of T^(-1) --*/ /* Check: unzftab entries in range. */ for (i = 0; i <= 255; i++) { if (s->unzftab[i] < 0 || s->unzftab[i] > nblock) RETURN(BZ_DATA_ERROR); } /* Actually generate cftab. */ s->cftab[0] = 0; for (i = 1; i <= 256; i++) s->cftab[i] = s->unzftab[i-1]; for (i = 1; i <= 256; i++) s->cftab[i] += s->cftab[i-1]; /* Check: cftab entries in range. */ for (i = 0; i <= 256; i++) { if (s->cftab[i] < 0 || s->cftab[i] > nblock) { /* s->cftab[i] can legitimately be == nblock */ RETURN(BZ_DATA_ERROR); } } /* Check: cftab entries non-descending. */ for (i = 1; i <= 256; i++) { if (s->cftab[i-1] > s->cftab[i]) { RETURN(BZ_DATA_ERROR); } } s->state_out_len = 0; s->state_out_ch = 0; BZ_INITIALISE_CRC ( s->calculatedBlockCRC ); s->state = BZ_X_OUTPUT; if (s->verbosity >= 2) VPrintf0 ( "rt+rld" ); if (s->smallDecompress) { /*-- Make a copy of cftab, used in generation of T --*/ for (i = 0; i <= 256; i++) s->cftabCopy[i] = s->cftab[i]; /*-- compute the T vector --*/ for (i = 0; i < nblock; i++) { uc = (UChar)(s->ll16[i]); SET_LL(i, s->cftabCopy[uc]); s->cftabCopy[uc]++; } /*-- Compute T^(-1) by pointer reversal on T --*/ i = s->origPtr; j = GET_LL(i); do { Int32 tmp = GET_LL(j); SET_LL(j, i); i = j; j = tmp; } while (i != s->origPtr); s->tPos = s->origPtr; s->nblock_used = 0; if (s->blockRandomised) { BZ_RAND_INIT_MASK; BZ_GET_SMALL(s->k0); s->nblock_used++; BZ_RAND_UPD_MASK; s->k0 ^= BZ_RAND_MASK; } else { BZ_GET_SMALL(s->k0); s->nblock_used++; } } else { /*-- compute the T^(-1) vector --*/ for (i = 0; i < nblock; i++) { uc = (UChar)(s->tt[i] & 0xff); s->tt[s->cftab[uc]] |= (i << 8); s->cftab[uc]++; } s->tPos = s->tt[s->origPtr] >> 8; s->nblock_used = 0; if (s->blockRandomised) { BZ_RAND_INIT_MASK; BZ_GET_FAST(s->k0); s->nblock_used++; BZ_RAND_UPD_MASK; s->k0 ^= BZ_RAND_MASK; } else { BZ_GET_FAST(s->k0); s->nblock_used++; } } RETURN(BZ_OK); endhdr_2: GET_UCHAR(BZ_X_ENDHDR_2, uc); if (uc != 0x72) RETURN(BZ_DATA_ERROR); GET_UCHAR(BZ_X_ENDHDR_3, uc); if (uc != 0x45) RETURN(BZ_DATA_ERROR); GET_UCHAR(BZ_X_ENDHDR_4, uc); if (uc != 0x38) RETURN(BZ_DATA_ERROR); GET_UCHAR(BZ_X_ENDHDR_5, uc); if (uc != 0x50) RETURN(BZ_DATA_ERROR); GET_UCHAR(BZ_X_ENDHDR_6, uc); if (uc != 0x90) RETURN(BZ_DATA_ERROR); s->storedCombinedCRC = 0; GET_UCHAR(BZ_X_CCRC_1, uc); s->storedCombinedCRC = (s->storedCombinedCRC << 8) | ((UInt32)uc); GET_UCHAR(BZ_X_CCRC_2, uc); s->storedCombinedCRC = (s->storedCombinedCRC << 8) | ((UInt32)uc); GET_UCHAR(BZ_X_CCRC_3, uc); s->storedCombinedCRC = (s->storedCombinedCRC << 8) | ((UInt32)uc); GET_UCHAR(BZ_X_CCRC_4, uc); s->storedCombinedCRC = (s->storedCombinedCRC << 8) | ((UInt32)uc); s->state = BZ_X_IDLE; RETURN(BZ_STREAM_END); default: AssertH ( False, 4001 ); } AssertH ( False, 4002 ); save_state_and_return: s->save_i = i; s->save_j = j; s->save_t = t; s->save_alphaSize = alphaSize; s->save_nGroups = nGroups; s->save_nSelectors = nSelectors; s->save_EOB = EOB; s->save_groupNo = groupNo; s->save_groupPos = groupPos; s->save_nextSym = nextSym; s->save_nblockMAX = nblockMAX; s->save_nblock = nblock; s->save_es = es; s->save_N = N; s->save_curr = curr; s->save_zt = zt; s->save_zn = zn; s->save_zvec = zvec; s->save_zj = zj; s->save_gSel = gSel; s->save_gMinlen = gMinlen; s->save_gLimit = gLimit; s->save_gBase = gBase; s->save_gPerm = gPerm; return retVal; }
{'added': [(290, ' if (nSelectors < 1 || nSelectors > BZ_MAX_SELECTORS) RETURN(BZ_DATA_ERROR);')], 'deleted': [(290, ' if (nSelectors < 1) RETURN(BZ_DATA_ERROR);')]}
1
1
447
3,553
https://gitlab.com/federicomenaquintero/bzip2
CVE-2019-12900
['CWE-787']
msnd_pinnacle.c
snd_msnd_interrupt
/********************************************************************* * * Linux multisound pinnacle/fiji driver for ALSA. * * 2002/06/30 Karsten Wiese: * for now this is only used to build a pinnacle / fiji driver. * the OSS parent of this code is designed to also support * the multisound classic via the file msnd_classic.c. * to make it easier for some brave heart to implemt classic * support in alsa, i left all the MSND_CLASSIC tokens in this file. * but for now this untested & undone. * * * ripped from linux kernel 2.4.18 by Karsten Wiese. * * the following is a copy of the 2.4.18 OSS FREE file-heading comment: * * Turtle Beach MultiSound Sound Card Driver for Linux * msnd_pinnacle.c / msnd_classic.c * * -- If MSND_CLASSIC is defined: * * -> driver for Turtle Beach Classic/Monterey/Tahiti * * -- Else * * -> driver for Turtle Beach Pinnacle/Fiji * * 12-3-2000 Modified IO port validation Steve Sycamore * * Copyright (C) 1998 Andrew Veliath * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * ********************************************************************/ #include <linux/kernel.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/types.h> #include <linux/delay.h> #include <linux/ioport.h> #include <linux/firmware.h> #include <linux/isa.h> #include <linux/isapnp.h> #include <linux/irq.h> #include <linux/io.h> #include <sound/core.h> #include <sound/initval.h> #include <sound/asound.h> #include <sound/pcm.h> #include <sound/mpu401.h> #ifdef MSND_CLASSIC # ifndef __alpha__ # define SLOWIO # endif #endif #include "msnd.h" #ifdef MSND_CLASSIC # include "msnd_classic.h" # define LOGNAME "msnd_classic" # define DEV_NAME "msnd-classic" #else # include "msnd_pinnacle.h" # define LOGNAME "snd_msnd_pinnacle" # define DEV_NAME "msnd-pinnacle" #endif static void set_default_audio_parameters(struct snd_msnd *chip) { chip->play_sample_size = DEFSAMPLESIZE; chip->play_sample_rate = DEFSAMPLERATE; chip->play_channels = DEFCHANNELS; chip->capture_sample_size = DEFSAMPLESIZE; chip->capture_sample_rate = DEFSAMPLERATE; chip->capture_channels = DEFCHANNELS; } static void snd_msnd_eval_dsp_msg(struct snd_msnd *chip, u16 wMessage) { switch (HIBYTE(wMessage)) { case HIMT_PLAY_DONE: { if (chip->banksPlayed < 3) snd_printdd("%08X: HIMT_PLAY_DONE: %i\n", (unsigned)jiffies, LOBYTE(wMessage)); if (chip->last_playbank == LOBYTE(wMessage)) { snd_printdd("chip.last_playbank == LOBYTE(wMessage)\n"); break; } chip->banksPlayed++; if (test_bit(F_WRITING, &chip->flags)) snd_msnd_DAPQ(chip, 0); chip->last_playbank = LOBYTE(wMessage); chip->playDMAPos += chip->play_period_bytes; if (chip->playDMAPos > chip->playLimit) chip->playDMAPos = 0; snd_pcm_period_elapsed(chip->playback_substream); break; } case HIMT_RECORD_DONE: if (chip->last_recbank == LOBYTE(wMessage)) break; chip->last_recbank = LOBYTE(wMessage); chip->captureDMAPos += chip->capturePeriodBytes; if (chip->captureDMAPos > (chip->captureLimit)) chip->captureDMAPos = 0; if (test_bit(F_READING, &chip->flags)) snd_msnd_DARQ(chip, chip->last_recbank); snd_pcm_period_elapsed(chip->capture_substream); break; case HIMT_DSP: switch (LOBYTE(wMessage)) { #ifndef MSND_CLASSIC case HIDSP_PLAY_UNDER: #endif case HIDSP_INT_PLAY_UNDER: snd_printd(KERN_WARNING LOGNAME ": Play underflow %i\n", chip->banksPlayed); if (chip->banksPlayed > 2) clear_bit(F_WRITING, &chip->flags); break; case HIDSP_INT_RECORD_OVER: snd_printd(KERN_WARNING LOGNAME ": Record overflow\n"); clear_bit(F_READING, &chip->flags); break; default: snd_printd(KERN_WARNING LOGNAME ": DSP message %d 0x%02x\n", LOBYTE(wMessage), LOBYTE(wMessage)); break; } break; case HIMT_MIDI_IN_UCHAR: if (chip->msndmidi_mpu) snd_msndmidi_input_read(chip->msndmidi_mpu); break; default: snd_printd(KERN_WARNING LOGNAME ": HIMT message %d 0x%02x\n", HIBYTE(wMessage), HIBYTE(wMessage)); break; } } static irqreturn_t snd_msnd_interrupt(int irq, void *dev_id) { struct snd_msnd *chip = dev_id; void *pwDSPQData = chip->mappedbase + DSPQ_DATA_BUFF; /* Send ack to DSP */ /* inb(chip->io + HP_RXL); */ /* Evaluate queued DSP messages */ while (readw(chip->DSPQ + JQS_wTail) != readw(chip->DSPQ + JQS_wHead)) { u16 wTmp; snd_msnd_eval_dsp_msg(chip, readw(pwDSPQData + 2 * readw(chip->DSPQ + JQS_wHead))); wTmp = readw(chip->DSPQ + JQS_wHead) + 1; if (wTmp > readw(chip->DSPQ + JQS_wSize)) writew(0, chip->DSPQ + JQS_wHead); else writew(wTmp, chip->DSPQ + JQS_wHead); } /* Send ack to DSP */ inb(chip->io + HP_RXL); return IRQ_HANDLED; } static int snd_msnd_reset_dsp(long io, unsigned char *info) { int timeout = 100; outb(HPDSPRESET_ON, io + HP_DSPR); msleep(1); #ifndef MSND_CLASSIC if (info) *info = inb(io + HP_INFO); #endif outb(HPDSPRESET_OFF, io + HP_DSPR); msleep(1); while (timeout-- > 0) { if (inb(io + HP_CVR) == HP_CVR_DEF) return 0; msleep(1); } snd_printk(KERN_ERR LOGNAME ": Cannot reset DSP\n"); return -EIO; } static int snd_msnd_probe(struct snd_card *card) { struct snd_msnd *chip = card->private_data; unsigned char info; #ifndef MSND_CLASSIC char *xv, *rev = NULL; char *pin = "TB Pinnacle", *fiji = "TB Fiji"; char *pinfiji = "TB Pinnacle/Fiji"; #endif if (!request_region(chip->io, DSP_NUMIO, "probing")) { snd_printk(KERN_ERR LOGNAME ": I/O port conflict\n"); return -ENODEV; } if (snd_msnd_reset_dsp(chip->io, &info) < 0) { release_region(chip->io, DSP_NUMIO); return -ENODEV; } #ifdef MSND_CLASSIC strcpy(card->shortname, "Classic/Tahiti/Monterey"); strcpy(card->longname, "Turtle Beach Multisound"); printk(KERN_INFO LOGNAME ": %s, " "I/O 0x%lx-0x%lx, IRQ %d, memory mapped to 0x%lX-0x%lX\n", card->shortname, chip->io, chip->io + DSP_NUMIO - 1, chip->irq, chip->base, chip->base + 0x7fff); #else switch (info >> 4) { case 0xf: xv = "<= 1.15"; break; case 0x1: xv = "1.18/1.2"; break; case 0x2: xv = "1.3"; break; case 0x3: xv = "1.4"; break; default: xv = "unknown"; break; } switch (info & 0x7) { case 0x0: rev = "I"; strcpy(card->shortname, pin); break; case 0x1: rev = "F"; strcpy(card->shortname, pin); break; case 0x2: rev = "G"; strcpy(card->shortname, pin); break; case 0x3: rev = "H"; strcpy(card->shortname, pin); break; case 0x4: rev = "E"; strcpy(card->shortname, fiji); break; case 0x5: rev = "C"; strcpy(card->shortname, fiji); break; case 0x6: rev = "D"; strcpy(card->shortname, fiji); break; case 0x7: rev = "A-B (Fiji) or A-E (Pinnacle)"; strcpy(card->shortname, pinfiji); break; } strcpy(card->longname, "Turtle Beach Multisound Pinnacle"); printk(KERN_INFO LOGNAME ": %s revision %s, Xilinx version %s, " "I/O 0x%lx-0x%lx, IRQ %d, memory mapped to 0x%lX-0x%lX\n", card->shortname, rev, xv, chip->io, chip->io + DSP_NUMIO - 1, chip->irq, chip->base, chip->base + 0x7fff); #endif release_region(chip->io, DSP_NUMIO); return 0; } static int snd_msnd_init_sma(struct snd_msnd *chip) { static int initted; u16 mastVolLeft, mastVolRight; unsigned long flags; #ifdef MSND_CLASSIC outb(chip->memid, chip->io + HP_MEMM); #endif outb(HPBLKSEL_0, chip->io + HP_BLKS); /* Motorola 56k shared memory base */ chip->SMA = chip->mappedbase + SMA_STRUCT_START; if (initted) { mastVolLeft = readw(chip->SMA + SMA_wCurrMastVolLeft); mastVolRight = readw(chip->SMA + SMA_wCurrMastVolRight); } else mastVolLeft = mastVolRight = 0; memset_io(chip->mappedbase, 0, 0x8000); /* Critical section: bank 1 access */ spin_lock_irqsave(&chip->lock, flags); outb(HPBLKSEL_1, chip->io + HP_BLKS); memset_io(chip->mappedbase, 0, 0x8000); outb(HPBLKSEL_0, chip->io + HP_BLKS); spin_unlock_irqrestore(&chip->lock, flags); /* Digital audio play queue */ chip->DAPQ = chip->mappedbase + DAPQ_OFFSET; snd_msnd_init_queue(chip->DAPQ, DAPQ_DATA_BUFF, DAPQ_BUFF_SIZE); /* Digital audio record queue */ chip->DARQ = chip->mappedbase + DARQ_OFFSET; snd_msnd_init_queue(chip->DARQ, DARQ_DATA_BUFF, DARQ_BUFF_SIZE); /* MIDI out queue */ chip->MODQ = chip->mappedbase + MODQ_OFFSET; snd_msnd_init_queue(chip->MODQ, MODQ_DATA_BUFF, MODQ_BUFF_SIZE); /* MIDI in queue */ chip->MIDQ = chip->mappedbase + MIDQ_OFFSET; snd_msnd_init_queue(chip->MIDQ, MIDQ_DATA_BUFF, MIDQ_BUFF_SIZE); /* DSP -> host message queue */ chip->DSPQ = chip->mappedbase + DSPQ_OFFSET; snd_msnd_init_queue(chip->DSPQ, DSPQ_DATA_BUFF, DSPQ_BUFF_SIZE); /* Setup some DSP values */ #ifndef MSND_CLASSIC writew(1, chip->SMA + SMA_wCurrPlayFormat); writew(chip->play_sample_size, chip->SMA + SMA_wCurrPlaySampleSize); writew(chip->play_channels, chip->SMA + SMA_wCurrPlayChannels); writew(chip->play_sample_rate, chip->SMA + SMA_wCurrPlaySampleRate); #endif writew(chip->play_sample_rate, chip->SMA + SMA_wCalFreqAtoD); writew(mastVolLeft, chip->SMA + SMA_wCurrMastVolLeft); writew(mastVolRight, chip->SMA + SMA_wCurrMastVolRight); #ifndef MSND_CLASSIC writel(0x00010000, chip->SMA + SMA_dwCurrPlayPitch); writel(0x00000001, chip->SMA + SMA_dwCurrPlayRate); #endif writew(0x303, chip->SMA + SMA_wCurrInputTagBits); initted = 1; return 0; } static int upload_dsp_code(struct snd_card *card) { struct snd_msnd *chip = card->private_data; const struct firmware *init_fw = NULL, *perm_fw = NULL; int err; outb(HPBLKSEL_0, chip->io + HP_BLKS); err = request_firmware(&init_fw, INITCODEFILE, card->dev); if (err < 0) { printk(KERN_ERR LOGNAME ": Error loading " INITCODEFILE); goto cleanup1; } err = request_firmware(&perm_fw, PERMCODEFILE, card->dev); if (err < 0) { printk(KERN_ERR LOGNAME ": Error loading " PERMCODEFILE); goto cleanup; } memcpy_toio(chip->mappedbase, perm_fw->data, perm_fw->size); if (snd_msnd_upload_host(chip, init_fw->data, init_fw->size) < 0) { printk(KERN_WARNING LOGNAME ": Error uploading to DSP\n"); err = -ENODEV; goto cleanup; } printk(KERN_INFO LOGNAME ": DSP firmware uploaded\n"); err = 0; cleanup: release_firmware(perm_fw); cleanup1: release_firmware(init_fw); return err; } #ifdef MSND_CLASSIC static void reset_proteus(struct snd_msnd *chip) { outb(HPPRORESET_ON, chip->io + HP_PROR); msleep(TIME_PRO_RESET); outb(HPPRORESET_OFF, chip->io + HP_PROR); msleep(TIME_PRO_RESET_DONE); } #endif static int snd_msnd_initialize(struct snd_card *card) { struct snd_msnd *chip = card->private_data; int err, timeout; #ifdef MSND_CLASSIC outb(HPWAITSTATE_0, chip->io + HP_WAIT); outb(HPBITMODE_16, chip->io + HP_BITM); reset_proteus(chip); #endif err = snd_msnd_init_sma(chip); if (err < 0) { printk(KERN_WARNING LOGNAME ": Cannot initialize SMA\n"); return err; } err = snd_msnd_reset_dsp(chip->io, NULL); if (err < 0) return err; err = upload_dsp_code(card); if (err < 0) { printk(KERN_WARNING LOGNAME ": Cannot upload DSP code\n"); return err; } timeout = 200; while (readw(chip->mappedbase)) { msleep(1); if (!timeout--) { snd_printd(KERN_ERR LOGNAME ": DSP reset timeout\n"); return -EIO; } } snd_msndmix_setup(chip); return 0; } static int snd_msnd_dsp_full_reset(struct snd_card *card) { struct snd_msnd *chip = card->private_data; int rv; if (test_bit(F_RESETTING, &chip->flags) || ++chip->nresets > 10) return 0; set_bit(F_RESETTING, &chip->flags); snd_msnd_dsp_halt(chip, NULL); /* Unconditionally halt */ rv = snd_msnd_initialize(card); if (rv) printk(KERN_WARNING LOGNAME ": DSP reset failed\n"); snd_msndmix_force_recsrc(chip, 0); clear_bit(F_RESETTING, &chip->flags); return rv; } static int snd_msnd_dev_free(struct snd_device *device) { snd_printdd("snd_msnd_chip_free()\n"); return 0; } static int snd_msnd_send_dsp_cmd_chk(struct snd_msnd *chip, u8 cmd) { if (snd_msnd_send_dsp_cmd(chip, cmd) == 0) return 0; snd_msnd_dsp_full_reset(chip->card); return snd_msnd_send_dsp_cmd(chip, cmd); } static int snd_msnd_calibrate_adc(struct snd_msnd *chip, u16 srate) { snd_printdd("snd_msnd_calibrate_adc(%i)\n", srate); writew(srate, chip->SMA + SMA_wCalFreqAtoD); if (chip->calibrate_signal == 0) writew(readw(chip->SMA + SMA_wCurrHostStatusFlags) | 0x0001, chip->SMA + SMA_wCurrHostStatusFlags); else writew(readw(chip->SMA + SMA_wCurrHostStatusFlags) & ~0x0001, chip->SMA + SMA_wCurrHostStatusFlags); if (snd_msnd_send_word(chip, 0, 0, HDEXAR_CAL_A_TO_D) == 0 && snd_msnd_send_dsp_cmd_chk(chip, HDEX_AUX_REQ) == 0) { schedule_timeout_interruptible(msecs_to_jiffies(333)); return 0; } printk(KERN_WARNING LOGNAME ": ADC calibration failed\n"); return -EIO; } /* * ALSA callback function, called when attempting to open the MIDI device. */ static int snd_msnd_mpu401_open(struct snd_mpu401 *mpu) { snd_msnd_enable_irq(mpu->private_data); snd_msnd_send_dsp_cmd(mpu->private_data, HDEX_MIDI_IN_START); return 0; } static void snd_msnd_mpu401_close(struct snd_mpu401 *mpu) { snd_msnd_send_dsp_cmd(mpu->private_data, HDEX_MIDI_IN_STOP); snd_msnd_disable_irq(mpu->private_data); } static long mpu_io[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; static int mpu_irq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ; static int snd_msnd_attach(struct snd_card *card) { struct snd_msnd *chip = card->private_data; int err; static struct snd_device_ops ops = { .dev_free = snd_msnd_dev_free, }; err = request_irq(chip->irq, snd_msnd_interrupt, 0, card->shortname, chip); if (err < 0) { printk(KERN_ERR LOGNAME ": Couldn't grab IRQ %d\n", chip->irq); return err; } if (request_region(chip->io, DSP_NUMIO, card->shortname) == NULL) { free_irq(chip->irq, chip); return -EBUSY; } if (!request_mem_region(chip->base, BUFFSIZE, card->shortname)) { printk(KERN_ERR LOGNAME ": unable to grab memory region 0x%lx-0x%lx\n", chip->base, chip->base + BUFFSIZE - 1); release_region(chip->io, DSP_NUMIO); free_irq(chip->irq, chip); return -EBUSY; } chip->mappedbase = ioremap_nocache(chip->base, 0x8000); if (!chip->mappedbase) { printk(KERN_ERR LOGNAME ": unable to map memory region 0x%lx-0x%lx\n", chip->base, chip->base + BUFFSIZE - 1); err = -EIO; goto err_release_region; } err = snd_msnd_dsp_full_reset(card); if (err < 0) goto err_release_region; /* Register device */ err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops); if (err < 0) goto err_release_region; err = snd_msnd_pcm(card, 0); if (err < 0) { printk(KERN_ERR LOGNAME ": error creating new PCM device\n"); goto err_release_region; } err = snd_msndmix_new(card); if (err < 0) { printk(KERN_ERR LOGNAME ": error creating new Mixer device\n"); goto err_release_region; } if (mpu_io[0] != SNDRV_AUTO_PORT) { struct snd_mpu401 *mpu; err = snd_mpu401_uart_new(card, 0, MPU401_HW_MPU401, mpu_io[0], MPU401_MODE_INPUT | MPU401_MODE_OUTPUT, mpu_irq[0], &chip->rmidi); if (err < 0) { printk(KERN_ERR LOGNAME ": error creating new Midi device\n"); goto err_release_region; } mpu = chip->rmidi->private_data; mpu->open_input = snd_msnd_mpu401_open; mpu->close_input = snd_msnd_mpu401_close; mpu->private_data = chip; } disable_irq(chip->irq); snd_msnd_calibrate_adc(chip, chip->play_sample_rate); snd_msndmix_force_recsrc(chip, 0); err = snd_card_register(card); if (err < 0) goto err_release_region; return 0; err_release_region: iounmap(chip->mappedbase); release_mem_region(chip->base, BUFFSIZE); release_region(chip->io, DSP_NUMIO); free_irq(chip->irq, chip); return err; } static void snd_msnd_unload(struct snd_card *card) { struct snd_msnd *chip = card->private_data; iounmap(chip->mappedbase); release_mem_region(chip->base, BUFFSIZE); release_region(chip->io, DSP_NUMIO); free_irq(chip->irq, chip); snd_card_free(card); } #ifndef MSND_CLASSIC /* Pinnacle/Fiji Logical Device Configuration */ static int snd_msnd_write_cfg(int cfg, int reg, int value) { outb(reg, cfg); outb(value, cfg + 1); if (value != inb(cfg + 1)) { printk(KERN_ERR LOGNAME ": snd_msnd_write_cfg: I/O error\n"); return -EIO; } return 0; } static int snd_msnd_write_cfg_io0(int cfg, int num, u16 io) { if (snd_msnd_write_cfg(cfg, IREG_LOGDEVICE, num)) return -EIO; if (snd_msnd_write_cfg(cfg, IREG_IO0_BASEHI, HIBYTE(io))) return -EIO; if (snd_msnd_write_cfg(cfg, IREG_IO0_BASELO, LOBYTE(io))) return -EIO; return 0; } static int snd_msnd_write_cfg_io1(int cfg, int num, u16 io) { if (snd_msnd_write_cfg(cfg, IREG_LOGDEVICE, num)) return -EIO; if (snd_msnd_write_cfg(cfg, IREG_IO1_BASEHI, HIBYTE(io))) return -EIO; if (snd_msnd_write_cfg(cfg, IREG_IO1_BASELO, LOBYTE(io))) return -EIO; return 0; } static int snd_msnd_write_cfg_irq(int cfg, int num, u16 irq) { if (snd_msnd_write_cfg(cfg, IREG_LOGDEVICE, num)) return -EIO; if (snd_msnd_write_cfg(cfg, IREG_IRQ_NUMBER, LOBYTE(irq))) return -EIO; if (snd_msnd_write_cfg(cfg, IREG_IRQ_TYPE, IRQTYPE_EDGE)) return -EIO; return 0; } static int snd_msnd_write_cfg_mem(int cfg, int num, int mem) { u16 wmem; mem >>= 8; wmem = (u16)(mem & 0xfff); if (snd_msnd_write_cfg(cfg, IREG_LOGDEVICE, num)) return -EIO; if (snd_msnd_write_cfg(cfg, IREG_MEMBASEHI, HIBYTE(wmem))) return -EIO; if (snd_msnd_write_cfg(cfg, IREG_MEMBASELO, LOBYTE(wmem))) return -EIO; if (wmem && snd_msnd_write_cfg(cfg, IREG_MEMCONTROL, MEMTYPE_HIADDR | MEMTYPE_16BIT)) return -EIO; return 0; } static int snd_msnd_activate_logical(int cfg, int num) { if (snd_msnd_write_cfg(cfg, IREG_LOGDEVICE, num)) return -EIO; if (snd_msnd_write_cfg(cfg, IREG_ACTIVATE, LD_ACTIVATE)) return -EIO; return 0; } static int snd_msnd_write_cfg_logical(int cfg, int num, u16 io0, u16 io1, u16 irq, int mem) { if (snd_msnd_write_cfg(cfg, IREG_LOGDEVICE, num)) return -EIO; if (snd_msnd_write_cfg_io0(cfg, num, io0)) return -EIO; if (snd_msnd_write_cfg_io1(cfg, num, io1)) return -EIO; if (snd_msnd_write_cfg_irq(cfg, num, irq)) return -EIO; if (snd_msnd_write_cfg_mem(cfg, num, mem)) return -EIO; if (snd_msnd_activate_logical(cfg, num)) return -EIO; return 0; } static int snd_msnd_pinnacle_cfg_reset(int cfg) { int i; /* Reset devices if told to */ printk(KERN_INFO LOGNAME ": Resetting all devices\n"); for (i = 0; i < 4; ++i) if (snd_msnd_write_cfg_logical(cfg, i, 0, 0, 0, 0)) return -EIO; return 0; } #endif static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */ static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */ module_param_array(index, int, NULL, S_IRUGO); MODULE_PARM_DESC(index, "Index value for msnd_pinnacle soundcard."); module_param_array(id, charp, NULL, S_IRUGO); MODULE_PARM_DESC(id, "ID string for msnd_pinnacle soundcard."); static long io[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; static int irq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ; static long mem[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; #ifndef MSND_CLASSIC static long cfg[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; /* Extra Peripheral Configuration (Default: Disable) */ static long ide_io0[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; static long ide_io1[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; static int ide_irq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ; static long joystick_io[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; /* If we have the digital daugherboard... */ static int digital[SNDRV_CARDS]; /* Extra Peripheral Configuration */ static int reset[SNDRV_CARDS]; #endif static int write_ndelay[SNDRV_CARDS] = { [0 ... (SNDRV_CARDS-1)] = 1 }; static int calibrate_signal; #ifdef CONFIG_PNP static bool isapnp[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; module_param_array(isapnp, bool, NULL, 0444); MODULE_PARM_DESC(isapnp, "ISA PnP detection for specified soundcard."); #define has_isapnp(x) isapnp[x] #else #define has_isapnp(x) 0 #endif MODULE_AUTHOR("Karsten Wiese <annabellesgarden@yahoo.de>"); MODULE_DESCRIPTION("Turtle Beach " LONGNAME " Linux Driver"); MODULE_LICENSE("GPL"); MODULE_FIRMWARE(INITCODEFILE); MODULE_FIRMWARE(PERMCODEFILE); module_param_hw_array(io, long, ioport, NULL, S_IRUGO); MODULE_PARM_DESC(io, "IO port #"); module_param_hw_array(irq, int, irq, NULL, S_IRUGO); module_param_hw_array(mem, long, iomem, NULL, S_IRUGO); module_param_array(write_ndelay, int, NULL, S_IRUGO); module_param(calibrate_signal, int, S_IRUGO); #ifndef MSND_CLASSIC module_param_array(digital, int, NULL, S_IRUGO); module_param_hw_array(cfg, long, ioport, NULL, S_IRUGO); module_param_array(reset, int, 0, S_IRUGO); module_param_hw_array(mpu_io, long, ioport, NULL, S_IRUGO); module_param_hw_array(mpu_irq, int, irq, NULL, S_IRUGO); module_param_hw_array(ide_io0, long, ioport, NULL, S_IRUGO); module_param_hw_array(ide_io1, long, ioport, NULL, S_IRUGO); module_param_hw_array(ide_irq, int, irq, NULL, S_IRUGO); module_param_hw_array(joystick_io, long, ioport, NULL, S_IRUGO); #endif static int snd_msnd_isa_match(struct device *pdev, unsigned int i) { if (io[i] == SNDRV_AUTO_PORT) return 0; if (irq[i] == SNDRV_AUTO_PORT || mem[i] == SNDRV_AUTO_PORT) { printk(KERN_WARNING LOGNAME ": io, irq and mem must be set\n"); return 0; } #ifdef MSND_CLASSIC if (!(io[i] == 0x290 || io[i] == 0x260 || io[i] == 0x250 || io[i] == 0x240 || io[i] == 0x230 || io[i] == 0x220 || io[i] == 0x210 || io[i] == 0x3e0)) { printk(KERN_ERR LOGNAME ": \"io\" - DSP I/O base must be set " " to 0x210, 0x220, 0x230, 0x240, 0x250, 0x260, 0x290, " "or 0x3E0\n"); return 0; } #else if (io[i] < 0x100 || io[i] > 0x3e0 || (io[i] % 0x10) != 0) { printk(KERN_ERR LOGNAME ": \"io\" - DSP I/O base must within the range 0x100 " "to 0x3E0 and must be evenly divisible by 0x10\n"); return 0; } #endif /* MSND_CLASSIC */ if (!(irq[i] == 5 || irq[i] == 7 || irq[i] == 9 || irq[i] == 10 || irq[i] == 11 || irq[i] == 12)) { printk(KERN_ERR LOGNAME ": \"irq\" - must be set to 5, 7, 9, 10, 11 or 12\n"); return 0; } if (!(mem[i] == 0xb0000 || mem[i] == 0xc8000 || mem[i] == 0xd0000 || mem[i] == 0xd8000 || mem[i] == 0xe0000 || mem[i] == 0xe8000)) { printk(KERN_ERR LOGNAME ": \"mem\" - must be set to " "0xb0000, 0xc8000, 0xd0000, 0xd8000, 0xe0000 or " "0xe8000\n"); return 0; } #ifndef MSND_CLASSIC if (cfg[i] == SNDRV_AUTO_PORT) { printk(KERN_INFO LOGNAME ": Assuming PnP mode\n"); } else if (cfg[i] != 0x250 && cfg[i] != 0x260 && cfg[i] != 0x270) { printk(KERN_INFO LOGNAME ": Config port must be 0x250, 0x260 or 0x270 " "(or unspecified for PnP mode)\n"); return 0; } #endif /* MSND_CLASSIC */ return 1; } static int snd_msnd_isa_probe(struct device *pdev, unsigned int idx) { int err; struct snd_card *card; struct snd_msnd *chip; if (has_isapnp(idx) #ifndef MSND_CLASSIC || cfg[idx] == SNDRV_AUTO_PORT #endif ) { printk(KERN_INFO LOGNAME ": Assuming PnP mode\n"); return -ENODEV; } err = snd_card_new(pdev, index[idx], id[idx], THIS_MODULE, sizeof(struct snd_msnd), &card); if (err < 0) return err; chip = card->private_data; chip->card = card; #ifdef MSND_CLASSIC switch (irq[idx]) { case 5: chip->irqid = HPIRQ_5; break; case 7: chip->irqid = HPIRQ_7; break; case 9: chip->irqid = HPIRQ_9; break; case 10: chip->irqid = HPIRQ_10; break; case 11: chip->irqid = HPIRQ_11; break; case 12: chip->irqid = HPIRQ_12; break; } switch (mem[idx]) { case 0xb0000: chip->memid = HPMEM_B000; break; case 0xc8000: chip->memid = HPMEM_C800; break; case 0xd0000: chip->memid = HPMEM_D000; break; case 0xd8000: chip->memid = HPMEM_D800; break; case 0xe0000: chip->memid = HPMEM_E000; break; case 0xe8000: chip->memid = HPMEM_E800; break; } #else printk(KERN_INFO LOGNAME ": Non-PnP mode: configuring at port 0x%lx\n", cfg[idx]); if (!request_region(cfg[idx], 2, "Pinnacle/Fiji Config")) { printk(KERN_ERR LOGNAME ": Config port 0x%lx conflict\n", cfg[idx]); snd_card_free(card); return -EIO; } if (reset[idx]) if (snd_msnd_pinnacle_cfg_reset(cfg[idx])) { err = -EIO; goto cfg_error; } /* DSP */ err = snd_msnd_write_cfg_logical(cfg[idx], 0, io[idx], 0, irq[idx], mem[idx]); if (err) goto cfg_error; /* The following are Pinnacle specific */ /* MPU */ if (mpu_io[idx] != SNDRV_AUTO_PORT && mpu_irq[idx] != SNDRV_AUTO_IRQ) { printk(KERN_INFO LOGNAME ": Configuring MPU to I/O 0x%lx IRQ %d\n", mpu_io[idx], mpu_irq[idx]); err = snd_msnd_write_cfg_logical(cfg[idx], 1, mpu_io[idx], 0, mpu_irq[idx], 0); if (err) goto cfg_error; } /* IDE */ if (ide_io0[idx] != SNDRV_AUTO_PORT && ide_io1[idx] != SNDRV_AUTO_PORT && ide_irq[idx] != SNDRV_AUTO_IRQ) { printk(KERN_INFO LOGNAME ": Configuring IDE to I/O 0x%lx, 0x%lx IRQ %d\n", ide_io0[idx], ide_io1[idx], ide_irq[idx]); err = snd_msnd_write_cfg_logical(cfg[idx], 2, ide_io0[idx], ide_io1[idx], ide_irq[idx], 0); if (err) goto cfg_error; } /* Joystick */ if (joystick_io[idx] != SNDRV_AUTO_PORT) { printk(KERN_INFO LOGNAME ": Configuring joystick to I/O 0x%lx\n", joystick_io[idx]); err = snd_msnd_write_cfg_logical(cfg[idx], 3, joystick_io[idx], 0, 0, 0); if (err) goto cfg_error; } release_region(cfg[idx], 2); #endif /* MSND_CLASSIC */ set_default_audio_parameters(chip); #ifdef MSND_CLASSIC chip->type = msndClassic; #else chip->type = msndPinnacle; #endif chip->io = io[idx]; chip->irq = irq[idx]; chip->base = mem[idx]; chip->calibrate_signal = calibrate_signal ? 1 : 0; chip->recsrc = 0; chip->dspq_data_buff = DSPQ_DATA_BUFF; chip->dspq_buff_size = DSPQ_BUFF_SIZE; if (write_ndelay[idx]) clear_bit(F_DISABLE_WRITE_NDELAY, &chip->flags); else set_bit(F_DISABLE_WRITE_NDELAY, &chip->flags); #ifndef MSND_CLASSIC if (digital[idx]) set_bit(F_HAVEDIGITAL, &chip->flags); #endif spin_lock_init(&chip->lock); err = snd_msnd_probe(card); if (err < 0) { printk(KERN_ERR LOGNAME ": Probe failed\n"); snd_card_free(card); return err; } err = snd_msnd_attach(card); if (err < 0) { printk(KERN_ERR LOGNAME ": Attach failed\n"); snd_card_free(card); return err; } dev_set_drvdata(pdev, card); return 0; #ifndef MSND_CLASSIC cfg_error: release_region(cfg[idx], 2); snd_card_free(card); return err; #endif } static int snd_msnd_isa_remove(struct device *pdev, unsigned int dev) { snd_msnd_unload(dev_get_drvdata(pdev)); return 0; } static struct isa_driver snd_msnd_driver = { .match = snd_msnd_isa_match, .probe = snd_msnd_isa_probe, .remove = snd_msnd_isa_remove, /* FIXME: suspend, resume */ .driver = { .name = DEV_NAME }, }; #ifdef CONFIG_PNP static int snd_msnd_pnp_detect(struct pnp_card_link *pcard, const struct pnp_card_device_id *pid) { static int idx; struct pnp_dev *pnp_dev; struct pnp_dev *mpu_dev; struct snd_card *card; struct snd_msnd *chip; int ret; for ( ; idx < SNDRV_CARDS; idx++) { if (has_isapnp(idx)) break; } if (idx >= SNDRV_CARDS) return -ENODEV; /* * Check that we still have room for another sound card ... */ pnp_dev = pnp_request_card_device(pcard, pid->devs[0].id, NULL); if (!pnp_dev) return -ENODEV; mpu_dev = pnp_request_card_device(pcard, pid->devs[1].id, NULL); if (!mpu_dev) return -ENODEV; if (!pnp_is_active(pnp_dev) && pnp_activate_dev(pnp_dev) < 0) { printk(KERN_INFO "msnd_pinnacle: device is inactive\n"); return -EBUSY; } if (!pnp_is_active(mpu_dev) && pnp_activate_dev(mpu_dev) < 0) { printk(KERN_INFO "msnd_pinnacle: MPU device is inactive\n"); return -EBUSY; } /* * Create a new ALSA sound card entry, in anticipation * of detecting our hardware ... */ ret = snd_card_new(&pcard->card->dev, index[idx], id[idx], THIS_MODULE, sizeof(struct snd_msnd), &card); if (ret < 0) return ret; chip = card->private_data; chip->card = card; /* * Read the correct parameters off the ISA PnP bus ... */ io[idx] = pnp_port_start(pnp_dev, 0); irq[idx] = pnp_irq(pnp_dev, 0); mem[idx] = pnp_mem_start(pnp_dev, 0); mpu_io[idx] = pnp_port_start(mpu_dev, 0); mpu_irq[idx] = pnp_irq(mpu_dev, 0); set_default_audio_parameters(chip); #ifdef MSND_CLASSIC chip->type = msndClassic; #else chip->type = msndPinnacle; #endif chip->io = io[idx]; chip->irq = irq[idx]; chip->base = mem[idx]; chip->calibrate_signal = calibrate_signal ? 1 : 0; chip->recsrc = 0; chip->dspq_data_buff = DSPQ_DATA_BUFF; chip->dspq_buff_size = DSPQ_BUFF_SIZE; if (write_ndelay[idx]) clear_bit(F_DISABLE_WRITE_NDELAY, &chip->flags); else set_bit(F_DISABLE_WRITE_NDELAY, &chip->flags); #ifndef MSND_CLASSIC if (digital[idx]) set_bit(F_HAVEDIGITAL, &chip->flags); #endif spin_lock_init(&chip->lock); ret = snd_msnd_probe(card); if (ret < 0) { printk(KERN_ERR LOGNAME ": Probe failed\n"); goto _release_card; } ret = snd_msnd_attach(card); if (ret < 0) { printk(KERN_ERR LOGNAME ": Attach failed\n"); goto _release_card; } pnp_set_card_drvdata(pcard, card); ++idx; return 0; _release_card: snd_card_free(card); return ret; } static void snd_msnd_pnp_remove(struct pnp_card_link *pcard) { snd_msnd_unload(pnp_get_card_drvdata(pcard)); pnp_set_card_drvdata(pcard, NULL); } static int isa_registered; static int pnp_registered; static struct pnp_card_device_id msnd_pnpids[] = { /* Pinnacle PnP */ { .id = "BVJ0440", .devs = { { "TBS0000" }, { "TBS0001" } } }, { .id = "" } /* end */ }; MODULE_DEVICE_TABLE(pnp_card, msnd_pnpids); static struct pnp_card_driver msnd_pnpc_driver = { .flags = PNP_DRIVER_RES_DO_NOT_CHANGE, .name = "msnd_pinnacle", .id_table = msnd_pnpids, .probe = snd_msnd_pnp_detect, .remove = snd_msnd_pnp_remove, }; #endif /* CONFIG_PNP */ static int __init snd_msnd_init(void) { int err; err = isa_register_driver(&snd_msnd_driver, SNDRV_CARDS); #ifdef CONFIG_PNP if (!err) isa_registered = 1; err = pnp_register_card_driver(&msnd_pnpc_driver); if (!err) pnp_registered = 1; if (isa_registered) err = 0; #endif return err; } static void __exit snd_msnd_exit(void) { #ifdef CONFIG_PNP if (pnp_registered) pnp_unregister_card_driver(&msnd_pnpc_driver); if (isa_registered) #endif isa_unregister_driver(&snd_msnd_driver); } module_init(snd_msnd_init); module_exit(snd_msnd_exit);
/********************************************************************* * * Linux multisound pinnacle/fiji driver for ALSA. * * 2002/06/30 Karsten Wiese: * for now this is only used to build a pinnacle / fiji driver. * the OSS parent of this code is designed to also support * the multisound classic via the file msnd_classic.c. * to make it easier for some brave heart to implemt classic * support in alsa, i left all the MSND_CLASSIC tokens in this file. * but for now this untested & undone. * * * ripped from linux kernel 2.4.18 by Karsten Wiese. * * the following is a copy of the 2.4.18 OSS FREE file-heading comment: * * Turtle Beach MultiSound Sound Card Driver for Linux * msnd_pinnacle.c / msnd_classic.c * * -- If MSND_CLASSIC is defined: * * -> driver for Turtle Beach Classic/Monterey/Tahiti * * -- Else * * -> driver for Turtle Beach Pinnacle/Fiji * * 12-3-2000 Modified IO port validation Steve Sycamore * * Copyright (C) 1998 Andrew Veliath * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * ********************************************************************/ #include <linux/kernel.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/types.h> #include <linux/delay.h> #include <linux/ioport.h> #include <linux/firmware.h> #include <linux/isa.h> #include <linux/isapnp.h> #include <linux/irq.h> #include <linux/io.h> #include <sound/core.h> #include <sound/initval.h> #include <sound/asound.h> #include <sound/pcm.h> #include <sound/mpu401.h> #ifdef MSND_CLASSIC # ifndef __alpha__ # define SLOWIO # endif #endif #include "msnd.h" #ifdef MSND_CLASSIC # include "msnd_classic.h" # define LOGNAME "msnd_classic" # define DEV_NAME "msnd-classic" #else # include "msnd_pinnacle.h" # define LOGNAME "snd_msnd_pinnacle" # define DEV_NAME "msnd-pinnacle" #endif static void set_default_audio_parameters(struct snd_msnd *chip) { chip->play_sample_size = DEFSAMPLESIZE; chip->play_sample_rate = DEFSAMPLERATE; chip->play_channels = DEFCHANNELS; chip->capture_sample_size = DEFSAMPLESIZE; chip->capture_sample_rate = DEFSAMPLERATE; chip->capture_channels = DEFCHANNELS; } static void snd_msnd_eval_dsp_msg(struct snd_msnd *chip, u16 wMessage) { switch (HIBYTE(wMessage)) { case HIMT_PLAY_DONE: { if (chip->banksPlayed < 3) snd_printdd("%08X: HIMT_PLAY_DONE: %i\n", (unsigned)jiffies, LOBYTE(wMessage)); if (chip->last_playbank == LOBYTE(wMessage)) { snd_printdd("chip.last_playbank == LOBYTE(wMessage)\n"); break; } chip->banksPlayed++; if (test_bit(F_WRITING, &chip->flags)) snd_msnd_DAPQ(chip, 0); chip->last_playbank = LOBYTE(wMessage); chip->playDMAPos += chip->play_period_bytes; if (chip->playDMAPos > chip->playLimit) chip->playDMAPos = 0; snd_pcm_period_elapsed(chip->playback_substream); break; } case HIMT_RECORD_DONE: if (chip->last_recbank == LOBYTE(wMessage)) break; chip->last_recbank = LOBYTE(wMessage); chip->captureDMAPos += chip->capturePeriodBytes; if (chip->captureDMAPos > (chip->captureLimit)) chip->captureDMAPos = 0; if (test_bit(F_READING, &chip->flags)) snd_msnd_DARQ(chip, chip->last_recbank); snd_pcm_period_elapsed(chip->capture_substream); break; case HIMT_DSP: switch (LOBYTE(wMessage)) { #ifndef MSND_CLASSIC case HIDSP_PLAY_UNDER: #endif case HIDSP_INT_PLAY_UNDER: snd_printd(KERN_WARNING LOGNAME ": Play underflow %i\n", chip->banksPlayed); if (chip->banksPlayed > 2) clear_bit(F_WRITING, &chip->flags); break; case HIDSP_INT_RECORD_OVER: snd_printd(KERN_WARNING LOGNAME ": Record overflow\n"); clear_bit(F_READING, &chip->flags); break; default: snd_printd(KERN_WARNING LOGNAME ": DSP message %d 0x%02x\n", LOBYTE(wMessage), LOBYTE(wMessage)); break; } break; case HIMT_MIDI_IN_UCHAR: if (chip->msndmidi_mpu) snd_msndmidi_input_read(chip->msndmidi_mpu); break; default: snd_printd(KERN_WARNING LOGNAME ": HIMT message %d 0x%02x\n", HIBYTE(wMessage), HIBYTE(wMessage)); break; } } static irqreturn_t snd_msnd_interrupt(int irq, void *dev_id) { struct snd_msnd *chip = dev_id; void *pwDSPQData = chip->mappedbase + DSPQ_DATA_BUFF; u16 head, tail, size; /* Send ack to DSP */ /* inb(chip->io + HP_RXL); */ /* Evaluate queued DSP messages */ head = readw(chip->DSPQ + JQS_wHead); tail = readw(chip->DSPQ + JQS_wTail); size = readw(chip->DSPQ + JQS_wSize); if (head > size || tail > size) goto out; while (head != tail) { snd_msnd_eval_dsp_msg(chip, readw(pwDSPQData + 2 * head)); if (++head > size) head = 0; writew(head, chip->DSPQ + JQS_wHead); } out: /* Send ack to DSP */ inb(chip->io + HP_RXL); return IRQ_HANDLED; } static int snd_msnd_reset_dsp(long io, unsigned char *info) { int timeout = 100; outb(HPDSPRESET_ON, io + HP_DSPR); msleep(1); #ifndef MSND_CLASSIC if (info) *info = inb(io + HP_INFO); #endif outb(HPDSPRESET_OFF, io + HP_DSPR); msleep(1); while (timeout-- > 0) { if (inb(io + HP_CVR) == HP_CVR_DEF) return 0; msleep(1); } snd_printk(KERN_ERR LOGNAME ": Cannot reset DSP\n"); return -EIO; } static int snd_msnd_probe(struct snd_card *card) { struct snd_msnd *chip = card->private_data; unsigned char info; #ifndef MSND_CLASSIC char *xv, *rev = NULL; char *pin = "TB Pinnacle", *fiji = "TB Fiji"; char *pinfiji = "TB Pinnacle/Fiji"; #endif if (!request_region(chip->io, DSP_NUMIO, "probing")) { snd_printk(KERN_ERR LOGNAME ": I/O port conflict\n"); return -ENODEV; } if (snd_msnd_reset_dsp(chip->io, &info) < 0) { release_region(chip->io, DSP_NUMIO); return -ENODEV; } #ifdef MSND_CLASSIC strcpy(card->shortname, "Classic/Tahiti/Monterey"); strcpy(card->longname, "Turtle Beach Multisound"); printk(KERN_INFO LOGNAME ": %s, " "I/O 0x%lx-0x%lx, IRQ %d, memory mapped to 0x%lX-0x%lX\n", card->shortname, chip->io, chip->io + DSP_NUMIO - 1, chip->irq, chip->base, chip->base + 0x7fff); #else switch (info >> 4) { case 0xf: xv = "<= 1.15"; break; case 0x1: xv = "1.18/1.2"; break; case 0x2: xv = "1.3"; break; case 0x3: xv = "1.4"; break; default: xv = "unknown"; break; } switch (info & 0x7) { case 0x0: rev = "I"; strcpy(card->shortname, pin); break; case 0x1: rev = "F"; strcpy(card->shortname, pin); break; case 0x2: rev = "G"; strcpy(card->shortname, pin); break; case 0x3: rev = "H"; strcpy(card->shortname, pin); break; case 0x4: rev = "E"; strcpy(card->shortname, fiji); break; case 0x5: rev = "C"; strcpy(card->shortname, fiji); break; case 0x6: rev = "D"; strcpy(card->shortname, fiji); break; case 0x7: rev = "A-B (Fiji) or A-E (Pinnacle)"; strcpy(card->shortname, pinfiji); break; } strcpy(card->longname, "Turtle Beach Multisound Pinnacle"); printk(KERN_INFO LOGNAME ": %s revision %s, Xilinx version %s, " "I/O 0x%lx-0x%lx, IRQ %d, memory mapped to 0x%lX-0x%lX\n", card->shortname, rev, xv, chip->io, chip->io + DSP_NUMIO - 1, chip->irq, chip->base, chip->base + 0x7fff); #endif release_region(chip->io, DSP_NUMIO); return 0; } static int snd_msnd_init_sma(struct snd_msnd *chip) { static int initted; u16 mastVolLeft, mastVolRight; unsigned long flags; #ifdef MSND_CLASSIC outb(chip->memid, chip->io + HP_MEMM); #endif outb(HPBLKSEL_0, chip->io + HP_BLKS); /* Motorola 56k shared memory base */ chip->SMA = chip->mappedbase + SMA_STRUCT_START; if (initted) { mastVolLeft = readw(chip->SMA + SMA_wCurrMastVolLeft); mastVolRight = readw(chip->SMA + SMA_wCurrMastVolRight); } else mastVolLeft = mastVolRight = 0; memset_io(chip->mappedbase, 0, 0x8000); /* Critical section: bank 1 access */ spin_lock_irqsave(&chip->lock, flags); outb(HPBLKSEL_1, chip->io + HP_BLKS); memset_io(chip->mappedbase, 0, 0x8000); outb(HPBLKSEL_0, chip->io + HP_BLKS); spin_unlock_irqrestore(&chip->lock, flags); /* Digital audio play queue */ chip->DAPQ = chip->mappedbase + DAPQ_OFFSET; snd_msnd_init_queue(chip->DAPQ, DAPQ_DATA_BUFF, DAPQ_BUFF_SIZE); /* Digital audio record queue */ chip->DARQ = chip->mappedbase + DARQ_OFFSET; snd_msnd_init_queue(chip->DARQ, DARQ_DATA_BUFF, DARQ_BUFF_SIZE); /* MIDI out queue */ chip->MODQ = chip->mappedbase + MODQ_OFFSET; snd_msnd_init_queue(chip->MODQ, MODQ_DATA_BUFF, MODQ_BUFF_SIZE); /* MIDI in queue */ chip->MIDQ = chip->mappedbase + MIDQ_OFFSET; snd_msnd_init_queue(chip->MIDQ, MIDQ_DATA_BUFF, MIDQ_BUFF_SIZE); /* DSP -> host message queue */ chip->DSPQ = chip->mappedbase + DSPQ_OFFSET; snd_msnd_init_queue(chip->DSPQ, DSPQ_DATA_BUFF, DSPQ_BUFF_SIZE); /* Setup some DSP values */ #ifndef MSND_CLASSIC writew(1, chip->SMA + SMA_wCurrPlayFormat); writew(chip->play_sample_size, chip->SMA + SMA_wCurrPlaySampleSize); writew(chip->play_channels, chip->SMA + SMA_wCurrPlayChannels); writew(chip->play_sample_rate, chip->SMA + SMA_wCurrPlaySampleRate); #endif writew(chip->play_sample_rate, chip->SMA + SMA_wCalFreqAtoD); writew(mastVolLeft, chip->SMA + SMA_wCurrMastVolLeft); writew(mastVolRight, chip->SMA + SMA_wCurrMastVolRight); #ifndef MSND_CLASSIC writel(0x00010000, chip->SMA + SMA_dwCurrPlayPitch); writel(0x00000001, chip->SMA + SMA_dwCurrPlayRate); #endif writew(0x303, chip->SMA + SMA_wCurrInputTagBits); initted = 1; return 0; } static int upload_dsp_code(struct snd_card *card) { struct snd_msnd *chip = card->private_data; const struct firmware *init_fw = NULL, *perm_fw = NULL; int err; outb(HPBLKSEL_0, chip->io + HP_BLKS); err = request_firmware(&init_fw, INITCODEFILE, card->dev); if (err < 0) { printk(KERN_ERR LOGNAME ": Error loading " INITCODEFILE); goto cleanup1; } err = request_firmware(&perm_fw, PERMCODEFILE, card->dev); if (err < 0) { printk(KERN_ERR LOGNAME ": Error loading " PERMCODEFILE); goto cleanup; } memcpy_toio(chip->mappedbase, perm_fw->data, perm_fw->size); if (snd_msnd_upload_host(chip, init_fw->data, init_fw->size) < 0) { printk(KERN_WARNING LOGNAME ": Error uploading to DSP\n"); err = -ENODEV; goto cleanup; } printk(KERN_INFO LOGNAME ": DSP firmware uploaded\n"); err = 0; cleanup: release_firmware(perm_fw); cleanup1: release_firmware(init_fw); return err; } #ifdef MSND_CLASSIC static void reset_proteus(struct snd_msnd *chip) { outb(HPPRORESET_ON, chip->io + HP_PROR); msleep(TIME_PRO_RESET); outb(HPPRORESET_OFF, chip->io + HP_PROR); msleep(TIME_PRO_RESET_DONE); } #endif static int snd_msnd_initialize(struct snd_card *card) { struct snd_msnd *chip = card->private_data; int err, timeout; #ifdef MSND_CLASSIC outb(HPWAITSTATE_0, chip->io + HP_WAIT); outb(HPBITMODE_16, chip->io + HP_BITM); reset_proteus(chip); #endif err = snd_msnd_init_sma(chip); if (err < 0) { printk(KERN_WARNING LOGNAME ": Cannot initialize SMA\n"); return err; } err = snd_msnd_reset_dsp(chip->io, NULL); if (err < 0) return err; err = upload_dsp_code(card); if (err < 0) { printk(KERN_WARNING LOGNAME ": Cannot upload DSP code\n"); return err; } timeout = 200; while (readw(chip->mappedbase)) { msleep(1); if (!timeout--) { snd_printd(KERN_ERR LOGNAME ": DSP reset timeout\n"); return -EIO; } } snd_msndmix_setup(chip); return 0; } static int snd_msnd_dsp_full_reset(struct snd_card *card) { struct snd_msnd *chip = card->private_data; int rv; if (test_bit(F_RESETTING, &chip->flags) || ++chip->nresets > 10) return 0; set_bit(F_RESETTING, &chip->flags); snd_msnd_dsp_halt(chip, NULL); /* Unconditionally halt */ rv = snd_msnd_initialize(card); if (rv) printk(KERN_WARNING LOGNAME ": DSP reset failed\n"); snd_msndmix_force_recsrc(chip, 0); clear_bit(F_RESETTING, &chip->flags); return rv; } static int snd_msnd_dev_free(struct snd_device *device) { snd_printdd("snd_msnd_chip_free()\n"); return 0; } static int snd_msnd_send_dsp_cmd_chk(struct snd_msnd *chip, u8 cmd) { if (snd_msnd_send_dsp_cmd(chip, cmd) == 0) return 0; snd_msnd_dsp_full_reset(chip->card); return snd_msnd_send_dsp_cmd(chip, cmd); } static int snd_msnd_calibrate_adc(struct snd_msnd *chip, u16 srate) { snd_printdd("snd_msnd_calibrate_adc(%i)\n", srate); writew(srate, chip->SMA + SMA_wCalFreqAtoD); if (chip->calibrate_signal == 0) writew(readw(chip->SMA + SMA_wCurrHostStatusFlags) | 0x0001, chip->SMA + SMA_wCurrHostStatusFlags); else writew(readw(chip->SMA + SMA_wCurrHostStatusFlags) & ~0x0001, chip->SMA + SMA_wCurrHostStatusFlags); if (snd_msnd_send_word(chip, 0, 0, HDEXAR_CAL_A_TO_D) == 0 && snd_msnd_send_dsp_cmd_chk(chip, HDEX_AUX_REQ) == 0) { schedule_timeout_interruptible(msecs_to_jiffies(333)); return 0; } printk(KERN_WARNING LOGNAME ": ADC calibration failed\n"); return -EIO; } /* * ALSA callback function, called when attempting to open the MIDI device. */ static int snd_msnd_mpu401_open(struct snd_mpu401 *mpu) { snd_msnd_enable_irq(mpu->private_data); snd_msnd_send_dsp_cmd(mpu->private_data, HDEX_MIDI_IN_START); return 0; } static void snd_msnd_mpu401_close(struct snd_mpu401 *mpu) { snd_msnd_send_dsp_cmd(mpu->private_data, HDEX_MIDI_IN_STOP); snd_msnd_disable_irq(mpu->private_data); } static long mpu_io[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; static int mpu_irq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ; static int snd_msnd_attach(struct snd_card *card) { struct snd_msnd *chip = card->private_data; int err; static struct snd_device_ops ops = { .dev_free = snd_msnd_dev_free, }; err = request_irq(chip->irq, snd_msnd_interrupt, 0, card->shortname, chip); if (err < 0) { printk(KERN_ERR LOGNAME ": Couldn't grab IRQ %d\n", chip->irq); return err; } if (request_region(chip->io, DSP_NUMIO, card->shortname) == NULL) { free_irq(chip->irq, chip); return -EBUSY; } if (!request_mem_region(chip->base, BUFFSIZE, card->shortname)) { printk(KERN_ERR LOGNAME ": unable to grab memory region 0x%lx-0x%lx\n", chip->base, chip->base + BUFFSIZE - 1); release_region(chip->io, DSP_NUMIO); free_irq(chip->irq, chip); return -EBUSY; } chip->mappedbase = ioremap_nocache(chip->base, 0x8000); if (!chip->mappedbase) { printk(KERN_ERR LOGNAME ": unable to map memory region 0x%lx-0x%lx\n", chip->base, chip->base + BUFFSIZE - 1); err = -EIO; goto err_release_region; } err = snd_msnd_dsp_full_reset(card); if (err < 0) goto err_release_region; /* Register device */ err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops); if (err < 0) goto err_release_region; err = snd_msnd_pcm(card, 0); if (err < 0) { printk(KERN_ERR LOGNAME ": error creating new PCM device\n"); goto err_release_region; } err = snd_msndmix_new(card); if (err < 0) { printk(KERN_ERR LOGNAME ": error creating new Mixer device\n"); goto err_release_region; } if (mpu_io[0] != SNDRV_AUTO_PORT) { struct snd_mpu401 *mpu; err = snd_mpu401_uart_new(card, 0, MPU401_HW_MPU401, mpu_io[0], MPU401_MODE_INPUT | MPU401_MODE_OUTPUT, mpu_irq[0], &chip->rmidi); if (err < 0) { printk(KERN_ERR LOGNAME ": error creating new Midi device\n"); goto err_release_region; } mpu = chip->rmidi->private_data; mpu->open_input = snd_msnd_mpu401_open; mpu->close_input = snd_msnd_mpu401_close; mpu->private_data = chip; } disable_irq(chip->irq); snd_msnd_calibrate_adc(chip, chip->play_sample_rate); snd_msndmix_force_recsrc(chip, 0); err = snd_card_register(card); if (err < 0) goto err_release_region; return 0; err_release_region: iounmap(chip->mappedbase); release_mem_region(chip->base, BUFFSIZE); release_region(chip->io, DSP_NUMIO); free_irq(chip->irq, chip); return err; } static void snd_msnd_unload(struct snd_card *card) { struct snd_msnd *chip = card->private_data; iounmap(chip->mappedbase); release_mem_region(chip->base, BUFFSIZE); release_region(chip->io, DSP_NUMIO); free_irq(chip->irq, chip); snd_card_free(card); } #ifndef MSND_CLASSIC /* Pinnacle/Fiji Logical Device Configuration */ static int snd_msnd_write_cfg(int cfg, int reg, int value) { outb(reg, cfg); outb(value, cfg + 1); if (value != inb(cfg + 1)) { printk(KERN_ERR LOGNAME ": snd_msnd_write_cfg: I/O error\n"); return -EIO; } return 0; } static int snd_msnd_write_cfg_io0(int cfg, int num, u16 io) { if (snd_msnd_write_cfg(cfg, IREG_LOGDEVICE, num)) return -EIO; if (snd_msnd_write_cfg(cfg, IREG_IO0_BASEHI, HIBYTE(io))) return -EIO; if (snd_msnd_write_cfg(cfg, IREG_IO0_BASELO, LOBYTE(io))) return -EIO; return 0; } static int snd_msnd_write_cfg_io1(int cfg, int num, u16 io) { if (snd_msnd_write_cfg(cfg, IREG_LOGDEVICE, num)) return -EIO; if (snd_msnd_write_cfg(cfg, IREG_IO1_BASEHI, HIBYTE(io))) return -EIO; if (snd_msnd_write_cfg(cfg, IREG_IO1_BASELO, LOBYTE(io))) return -EIO; return 0; } static int snd_msnd_write_cfg_irq(int cfg, int num, u16 irq) { if (snd_msnd_write_cfg(cfg, IREG_LOGDEVICE, num)) return -EIO; if (snd_msnd_write_cfg(cfg, IREG_IRQ_NUMBER, LOBYTE(irq))) return -EIO; if (snd_msnd_write_cfg(cfg, IREG_IRQ_TYPE, IRQTYPE_EDGE)) return -EIO; return 0; } static int snd_msnd_write_cfg_mem(int cfg, int num, int mem) { u16 wmem; mem >>= 8; wmem = (u16)(mem & 0xfff); if (snd_msnd_write_cfg(cfg, IREG_LOGDEVICE, num)) return -EIO; if (snd_msnd_write_cfg(cfg, IREG_MEMBASEHI, HIBYTE(wmem))) return -EIO; if (snd_msnd_write_cfg(cfg, IREG_MEMBASELO, LOBYTE(wmem))) return -EIO; if (wmem && snd_msnd_write_cfg(cfg, IREG_MEMCONTROL, MEMTYPE_HIADDR | MEMTYPE_16BIT)) return -EIO; return 0; } static int snd_msnd_activate_logical(int cfg, int num) { if (snd_msnd_write_cfg(cfg, IREG_LOGDEVICE, num)) return -EIO; if (snd_msnd_write_cfg(cfg, IREG_ACTIVATE, LD_ACTIVATE)) return -EIO; return 0; } static int snd_msnd_write_cfg_logical(int cfg, int num, u16 io0, u16 io1, u16 irq, int mem) { if (snd_msnd_write_cfg(cfg, IREG_LOGDEVICE, num)) return -EIO; if (snd_msnd_write_cfg_io0(cfg, num, io0)) return -EIO; if (snd_msnd_write_cfg_io1(cfg, num, io1)) return -EIO; if (snd_msnd_write_cfg_irq(cfg, num, irq)) return -EIO; if (snd_msnd_write_cfg_mem(cfg, num, mem)) return -EIO; if (snd_msnd_activate_logical(cfg, num)) return -EIO; return 0; } static int snd_msnd_pinnacle_cfg_reset(int cfg) { int i; /* Reset devices if told to */ printk(KERN_INFO LOGNAME ": Resetting all devices\n"); for (i = 0; i < 4; ++i) if (snd_msnd_write_cfg_logical(cfg, i, 0, 0, 0, 0)) return -EIO; return 0; } #endif static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */ static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */ module_param_array(index, int, NULL, S_IRUGO); MODULE_PARM_DESC(index, "Index value for msnd_pinnacle soundcard."); module_param_array(id, charp, NULL, S_IRUGO); MODULE_PARM_DESC(id, "ID string for msnd_pinnacle soundcard."); static long io[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; static int irq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ; static long mem[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; #ifndef MSND_CLASSIC static long cfg[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; /* Extra Peripheral Configuration (Default: Disable) */ static long ide_io0[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; static long ide_io1[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; static int ide_irq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ; static long joystick_io[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; /* If we have the digital daugherboard... */ static int digital[SNDRV_CARDS]; /* Extra Peripheral Configuration */ static int reset[SNDRV_CARDS]; #endif static int write_ndelay[SNDRV_CARDS] = { [0 ... (SNDRV_CARDS-1)] = 1 }; static int calibrate_signal; #ifdef CONFIG_PNP static bool isapnp[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; module_param_array(isapnp, bool, NULL, 0444); MODULE_PARM_DESC(isapnp, "ISA PnP detection for specified soundcard."); #define has_isapnp(x) isapnp[x] #else #define has_isapnp(x) 0 #endif MODULE_AUTHOR("Karsten Wiese <annabellesgarden@yahoo.de>"); MODULE_DESCRIPTION("Turtle Beach " LONGNAME " Linux Driver"); MODULE_LICENSE("GPL"); MODULE_FIRMWARE(INITCODEFILE); MODULE_FIRMWARE(PERMCODEFILE); module_param_hw_array(io, long, ioport, NULL, S_IRUGO); MODULE_PARM_DESC(io, "IO port #"); module_param_hw_array(irq, int, irq, NULL, S_IRUGO); module_param_hw_array(mem, long, iomem, NULL, S_IRUGO); module_param_array(write_ndelay, int, NULL, S_IRUGO); module_param(calibrate_signal, int, S_IRUGO); #ifndef MSND_CLASSIC module_param_array(digital, int, NULL, S_IRUGO); module_param_hw_array(cfg, long, ioport, NULL, S_IRUGO); module_param_array(reset, int, 0, S_IRUGO); module_param_hw_array(mpu_io, long, ioport, NULL, S_IRUGO); module_param_hw_array(mpu_irq, int, irq, NULL, S_IRUGO); module_param_hw_array(ide_io0, long, ioport, NULL, S_IRUGO); module_param_hw_array(ide_io1, long, ioport, NULL, S_IRUGO); module_param_hw_array(ide_irq, int, irq, NULL, S_IRUGO); module_param_hw_array(joystick_io, long, ioport, NULL, S_IRUGO); #endif static int snd_msnd_isa_match(struct device *pdev, unsigned int i) { if (io[i] == SNDRV_AUTO_PORT) return 0; if (irq[i] == SNDRV_AUTO_PORT || mem[i] == SNDRV_AUTO_PORT) { printk(KERN_WARNING LOGNAME ": io, irq and mem must be set\n"); return 0; } #ifdef MSND_CLASSIC if (!(io[i] == 0x290 || io[i] == 0x260 || io[i] == 0x250 || io[i] == 0x240 || io[i] == 0x230 || io[i] == 0x220 || io[i] == 0x210 || io[i] == 0x3e0)) { printk(KERN_ERR LOGNAME ": \"io\" - DSP I/O base must be set " " to 0x210, 0x220, 0x230, 0x240, 0x250, 0x260, 0x290, " "or 0x3E0\n"); return 0; } #else if (io[i] < 0x100 || io[i] > 0x3e0 || (io[i] % 0x10) != 0) { printk(KERN_ERR LOGNAME ": \"io\" - DSP I/O base must within the range 0x100 " "to 0x3E0 and must be evenly divisible by 0x10\n"); return 0; } #endif /* MSND_CLASSIC */ if (!(irq[i] == 5 || irq[i] == 7 || irq[i] == 9 || irq[i] == 10 || irq[i] == 11 || irq[i] == 12)) { printk(KERN_ERR LOGNAME ": \"irq\" - must be set to 5, 7, 9, 10, 11 or 12\n"); return 0; } if (!(mem[i] == 0xb0000 || mem[i] == 0xc8000 || mem[i] == 0xd0000 || mem[i] == 0xd8000 || mem[i] == 0xe0000 || mem[i] == 0xe8000)) { printk(KERN_ERR LOGNAME ": \"mem\" - must be set to " "0xb0000, 0xc8000, 0xd0000, 0xd8000, 0xe0000 or " "0xe8000\n"); return 0; } #ifndef MSND_CLASSIC if (cfg[i] == SNDRV_AUTO_PORT) { printk(KERN_INFO LOGNAME ": Assuming PnP mode\n"); } else if (cfg[i] != 0x250 && cfg[i] != 0x260 && cfg[i] != 0x270) { printk(KERN_INFO LOGNAME ": Config port must be 0x250, 0x260 or 0x270 " "(or unspecified for PnP mode)\n"); return 0; } #endif /* MSND_CLASSIC */ return 1; } static int snd_msnd_isa_probe(struct device *pdev, unsigned int idx) { int err; struct snd_card *card; struct snd_msnd *chip; if (has_isapnp(idx) #ifndef MSND_CLASSIC || cfg[idx] == SNDRV_AUTO_PORT #endif ) { printk(KERN_INFO LOGNAME ": Assuming PnP mode\n"); return -ENODEV; } err = snd_card_new(pdev, index[idx], id[idx], THIS_MODULE, sizeof(struct snd_msnd), &card); if (err < 0) return err; chip = card->private_data; chip->card = card; #ifdef MSND_CLASSIC switch (irq[idx]) { case 5: chip->irqid = HPIRQ_5; break; case 7: chip->irqid = HPIRQ_7; break; case 9: chip->irqid = HPIRQ_9; break; case 10: chip->irqid = HPIRQ_10; break; case 11: chip->irqid = HPIRQ_11; break; case 12: chip->irqid = HPIRQ_12; break; } switch (mem[idx]) { case 0xb0000: chip->memid = HPMEM_B000; break; case 0xc8000: chip->memid = HPMEM_C800; break; case 0xd0000: chip->memid = HPMEM_D000; break; case 0xd8000: chip->memid = HPMEM_D800; break; case 0xe0000: chip->memid = HPMEM_E000; break; case 0xe8000: chip->memid = HPMEM_E800; break; } #else printk(KERN_INFO LOGNAME ": Non-PnP mode: configuring at port 0x%lx\n", cfg[idx]); if (!request_region(cfg[idx], 2, "Pinnacle/Fiji Config")) { printk(KERN_ERR LOGNAME ": Config port 0x%lx conflict\n", cfg[idx]); snd_card_free(card); return -EIO; } if (reset[idx]) if (snd_msnd_pinnacle_cfg_reset(cfg[idx])) { err = -EIO; goto cfg_error; } /* DSP */ err = snd_msnd_write_cfg_logical(cfg[idx], 0, io[idx], 0, irq[idx], mem[idx]); if (err) goto cfg_error; /* The following are Pinnacle specific */ /* MPU */ if (mpu_io[idx] != SNDRV_AUTO_PORT && mpu_irq[idx] != SNDRV_AUTO_IRQ) { printk(KERN_INFO LOGNAME ": Configuring MPU to I/O 0x%lx IRQ %d\n", mpu_io[idx], mpu_irq[idx]); err = snd_msnd_write_cfg_logical(cfg[idx], 1, mpu_io[idx], 0, mpu_irq[idx], 0); if (err) goto cfg_error; } /* IDE */ if (ide_io0[idx] != SNDRV_AUTO_PORT && ide_io1[idx] != SNDRV_AUTO_PORT && ide_irq[idx] != SNDRV_AUTO_IRQ) { printk(KERN_INFO LOGNAME ": Configuring IDE to I/O 0x%lx, 0x%lx IRQ %d\n", ide_io0[idx], ide_io1[idx], ide_irq[idx]); err = snd_msnd_write_cfg_logical(cfg[idx], 2, ide_io0[idx], ide_io1[idx], ide_irq[idx], 0); if (err) goto cfg_error; } /* Joystick */ if (joystick_io[idx] != SNDRV_AUTO_PORT) { printk(KERN_INFO LOGNAME ": Configuring joystick to I/O 0x%lx\n", joystick_io[idx]); err = snd_msnd_write_cfg_logical(cfg[idx], 3, joystick_io[idx], 0, 0, 0); if (err) goto cfg_error; } release_region(cfg[idx], 2); #endif /* MSND_CLASSIC */ set_default_audio_parameters(chip); #ifdef MSND_CLASSIC chip->type = msndClassic; #else chip->type = msndPinnacle; #endif chip->io = io[idx]; chip->irq = irq[idx]; chip->base = mem[idx]; chip->calibrate_signal = calibrate_signal ? 1 : 0; chip->recsrc = 0; chip->dspq_data_buff = DSPQ_DATA_BUFF; chip->dspq_buff_size = DSPQ_BUFF_SIZE; if (write_ndelay[idx]) clear_bit(F_DISABLE_WRITE_NDELAY, &chip->flags); else set_bit(F_DISABLE_WRITE_NDELAY, &chip->flags); #ifndef MSND_CLASSIC if (digital[idx]) set_bit(F_HAVEDIGITAL, &chip->flags); #endif spin_lock_init(&chip->lock); err = snd_msnd_probe(card); if (err < 0) { printk(KERN_ERR LOGNAME ": Probe failed\n"); snd_card_free(card); return err; } err = snd_msnd_attach(card); if (err < 0) { printk(KERN_ERR LOGNAME ": Attach failed\n"); snd_card_free(card); return err; } dev_set_drvdata(pdev, card); return 0; #ifndef MSND_CLASSIC cfg_error: release_region(cfg[idx], 2); snd_card_free(card); return err; #endif } static int snd_msnd_isa_remove(struct device *pdev, unsigned int dev) { snd_msnd_unload(dev_get_drvdata(pdev)); return 0; } static struct isa_driver snd_msnd_driver = { .match = snd_msnd_isa_match, .probe = snd_msnd_isa_probe, .remove = snd_msnd_isa_remove, /* FIXME: suspend, resume */ .driver = { .name = DEV_NAME }, }; #ifdef CONFIG_PNP static int snd_msnd_pnp_detect(struct pnp_card_link *pcard, const struct pnp_card_device_id *pid) { static int idx; struct pnp_dev *pnp_dev; struct pnp_dev *mpu_dev; struct snd_card *card; struct snd_msnd *chip; int ret; for ( ; idx < SNDRV_CARDS; idx++) { if (has_isapnp(idx)) break; } if (idx >= SNDRV_CARDS) return -ENODEV; /* * Check that we still have room for another sound card ... */ pnp_dev = pnp_request_card_device(pcard, pid->devs[0].id, NULL); if (!pnp_dev) return -ENODEV; mpu_dev = pnp_request_card_device(pcard, pid->devs[1].id, NULL); if (!mpu_dev) return -ENODEV; if (!pnp_is_active(pnp_dev) && pnp_activate_dev(pnp_dev) < 0) { printk(KERN_INFO "msnd_pinnacle: device is inactive\n"); return -EBUSY; } if (!pnp_is_active(mpu_dev) && pnp_activate_dev(mpu_dev) < 0) { printk(KERN_INFO "msnd_pinnacle: MPU device is inactive\n"); return -EBUSY; } /* * Create a new ALSA sound card entry, in anticipation * of detecting our hardware ... */ ret = snd_card_new(&pcard->card->dev, index[idx], id[idx], THIS_MODULE, sizeof(struct snd_msnd), &card); if (ret < 0) return ret; chip = card->private_data; chip->card = card; /* * Read the correct parameters off the ISA PnP bus ... */ io[idx] = pnp_port_start(pnp_dev, 0); irq[idx] = pnp_irq(pnp_dev, 0); mem[idx] = pnp_mem_start(pnp_dev, 0); mpu_io[idx] = pnp_port_start(mpu_dev, 0); mpu_irq[idx] = pnp_irq(mpu_dev, 0); set_default_audio_parameters(chip); #ifdef MSND_CLASSIC chip->type = msndClassic; #else chip->type = msndPinnacle; #endif chip->io = io[idx]; chip->irq = irq[idx]; chip->base = mem[idx]; chip->calibrate_signal = calibrate_signal ? 1 : 0; chip->recsrc = 0; chip->dspq_data_buff = DSPQ_DATA_BUFF; chip->dspq_buff_size = DSPQ_BUFF_SIZE; if (write_ndelay[idx]) clear_bit(F_DISABLE_WRITE_NDELAY, &chip->flags); else set_bit(F_DISABLE_WRITE_NDELAY, &chip->flags); #ifndef MSND_CLASSIC if (digital[idx]) set_bit(F_HAVEDIGITAL, &chip->flags); #endif spin_lock_init(&chip->lock); ret = snd_msnd_probe(card); if (ret < 0) { printk(KERN_ERR LOGNAME ": Probe failed\n"); goto _release_card; } ret = snd_msnd_attach(card); if (ret < 0) { printk(KERN_ERR LOGNAME ": Attach failed\n"); goto _release_card; } pnp_set_card_drvdata(pcard, card); ++idx; return 0; _release_card: snd_card_free(card); return ret; } static void snd_msnd_pnp_remove(struct pnp_card_link *pcard) { snd_msnd_unload(pnp_get_card_drvdata(pcard)); pnp_set_card_drvdata(pcard, NULL); } static int isa_registered; static int pnp_registered; static struct pnp_card_device_id msnd_pnpids[] = { /* Pinnacle PnP */ { .id = "BVJ0440", .devs = { { "TBS0000" }, { "TBS0001" } } }, { .id = "" } /* end */ }; MODULE_DEVICE_TABLE(pnp_card, msnd_pnpids); static struct pnp_card_driver msnd_pnpc_driver = { .flags = PNP_DRIVER_RES_DO_NOT_CHANGE, .name = "msnd_pinnacle", .id_table = msnd_pnpids, .probe = snd_msnd_pnp_detect, .remove = snd_msnd_pnp_remove, }; #endif /* CONFIG_PNP */ static int __init snd_msnd_init(void) { int err; err = isa_register_driver(&snd_msnd_driver, SNDRV_CARDS); #ifdef CONFIG_PNP if (!err) isa_registered = 1; err = pnp_register_card_driver(&msnd_pnpc_driver); if (!err) pnp_registered = 1; if (isa_registered) err = 0; #endif return err; } static void __exit snd_msnd_exit(void) { #ifdef CONFIG_PNP if (pnp_registered) pnp_unregister_card_driver(&msnd_pnpc_driver); if (isa_registered) #endif isa_unregister_driver(&snd_msnd_driver); } module_init(snd_msnd_init); module_exit(snd_msnd_exit);
static irqreturn_t snd_msnd_interrupt(int irq, void *dev_id) { struct snd_msnd *chip = dev_id; void *pwDSPQData = chip->mappedbase + DSPQ_DATA_BUFF; /* Send ack to DSP */ /* inb(chip->io + HP_RXL); */ /* Evaluate queued DSP messages */ while (readw(chip->DSPQ + JQS_wTail) != readw(chip->DSPQ + JQS_wHead)) { u16 wTmp; snd_msnd_eval_dsp_msg(chip, readw(pwDSPQData + 2 * readw(chip->DSPQ + JQS_wHead))); wTmp = readw(chip->DSPQ + JQS_wHead) + 1; if (wTmp > readw(chip->DSPQ + JQS_wSize)) writew(0, chip->DSPQ + JQS_wHead); else writew(wTmp, chip->DSPQ + JQS_wHead); } /* Send ack to DSP */ inb(chip->io + HP_RXL); return IRQ_HANDLED; }
static irqreturn_t snd_msnd_interrupt(int irq, void *dev_id) { struct snd_msnd *chip = dev_id; void *pwDSPQData = chip->mappedbase + DSPQ_DATA_BUFF; u16 head, tail, size; /* Send ack to DSP */ /* inb(chip->io + HP_RXL); */ /* Evaluate queued DSP messages */ head = readw(chip->DSPQ + JQS_wHead); tail = readw(chip->DSPQ + JQS_wTail); size = readw(chip->DSPQ + JQS_wSize); if (head > size || tail > size) goto out; while (head != tail) { snd_msnd_eval_dsp_msg(chip, readw(pwDSPQData + 2 * head)); if (++head > size) head = 0; writew(head, chip->DSPQ + JQS_wHead); } out: /* Send ack to DSP */ inb(chip->io + HP_RXL); return IRQ_HANDLED; }
{'added': [(173, '\tu16 head, tail, size;'), (179, '\thead = readw(chip->DSPQ + JQS_wHead);'), (180, '\ttail = readw(chip->DSPQ + JQS_wTail);'), (181, '\tsize = readw(chip->DSPQ + JQS_wSize);'), (182, '\tif (head > size || tail > size)'), (183, '\t\tgoto out;'), (184, '\twhile (head != tail) {'), (185, '\t\tsnd_msnd_eval_dsp_msg(chip, readw(pwDSPQData + 2 * head));'), (186, '\t\tif (++head > size)'), (187, '\t\t\thead = 0;'), (188, '\t\twritew(head, chip->DSPQ + JQS_wHead);'), (190, ' out:')], 'deleted': [(178, '\twhile (readw(chip->DSPQ + JQS_wTail) != readw(chip->DSPQ + JQS_wHead)) {'), (179, '\t\tu16 wTmp;'), (180, ''), (181, '\t\tsnd_msnd_eval_dsp_msg(chip,'), (182, '\t\t\treadw(pwDSPQData + 2 * readw(chip->DSPQ + JQS_wHead)));'), (183, ''), (184, '\t\twTmp = readw(chip->DSPQ + JQS_wHead) + 1;'), (185, '\t\tif (wTmp > readw(chip->DSPQ + JQS_wSize))'), (186, '\t\t\twritew(0, chip->DSPQ + JQS_wHead);'), (187, '\t\telse'), (188, '\t\t\twritew(wTmp, chip->DSPQ + JQS_wHead);')]}
12
11
921
5,689
https://github.com/torvalds/linux
CVE-2017-9984
['CWE-125']
proc.c
proc_keys_show
/* procfs files for key database enumeration * * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/module.h> #include <linux/init.h> #include <linux/sched.h> #include <linux/fs.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <asm/errno.h> #include "internal.h" static int proc_keys_open(struct inode *inode, struct file *file); static void *proc_keys_start(struct seq_file *p, loff_t *_pos); static void *proc_keys_next(struct seq_file *p, void *v, loff_t *_pos); static void proc_keys_stop(struct seq_file *p, void *v); static int proc_keys_show(struct seq_file *m, void *v); static const struct seq_operations proc_keys_ops = { .start = proc_keys_start, .next = proc_keys_next, .stop = proc_keys_stop, .show = proc_keys_show, }; static const struct file_operations proc_keys_fops = { .open = proc_keys_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static int proc_key_users_open(struct inode *inode, struct file *file); static void *proc_key_users_start(struct seq_file *p, loff_t *_pos); static void *proc_key_users_next(struct seq_file *p, void *v, loff_t *_pos); static void proc_key_users_stop(struct seq_file *p, void *v); static int proc_key_users_show(struct seq_file *m, void *v); static const struct seq_operations proc_key_users_ops = { .start = proc_key_users_start, .next = proc_key_users_next, .stop = proc_key_users_stop, .show = proc_key_users_show, }; static const struct file_operations proc_key_users_fops = { .open = proc_key_users_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; /* * Declare the /proc files. */ static int __init key_proc_init(void) { struct proc_dir_entry *p; p = proc_create("keys", 0, NULL, &proc_keys_fops); if (!p) panic("Cannot create /proc/keys\n"); p = proc_create("key-users", 0, NULL, &proc_key_users_fops); if (!p) panic("Cannot create /proc/key-users\n"); return 0; } __initcall(key_proc_init); /* * Implement "/proc/keys" to provide a list of the keys on the system that * grant View permission to the caller. */ static struct rb_node *key_serial_next(struct seq_file *p, struct rb_node *n) { struct user_namespace *user_ns = seq_user_ns(p); n = rb_next(n); while (n) { struct key *key = rb_entry(n, struct key, serial_node); if (kuid_has_mapping(user_ns, key->user->uid)) break; n = rb_next(n); } return n; } static int proc_keys_open(struct inode *inode, struct file *file) { return seq_open(file, &proc_keys_ops); } static struct key *find_ge_key(struct seq_file *p, key_serial_t id) { struct user_namespace *user_ns = seq_user_ns(p); struct rb_node *n = key_serial_tree.rb_node; struct key *minkey = NULL; while (n) { struct key *key = rb_entry(n, struct key, serial_node); if (id < key->serial) { if (!minkey || minkey->serial > key->serial) minkey = key; n = n->rb_left; } else if (id > key->serial) { n = n->rb_right; } else { minkey = key; break; } key = NULL; } if (!minkey) return NULL; for (;;) { if (kuid_has_mapping(user_ns, minkey->user->uid)) return minkey; n = rb_next(&minkey->serial_node); if (!n) return NULL; minkey = rb_entry(n, struct key, serial_node); } } static void *proc_keys_start(struct seq_file *p, loff_t *_pos) __acquires(key_serial_lock) { key_serial_t pos = *_pos; struct key *key; spin_lock(&key_serial_lock); if (*_pos > INT_MAX) return NULL; key = find_ge_key(p, pos); if (!key) return NULL; *_pos = key->serial; return &key->serial_node; } static inline key_serial_t key_node_serial(struct rb_node *n) { struct key *key = rb_entry(n, struct key, serial_node); return key->serial; } static void *proc_keys_next(struct seq_file *p, void *v, loff_t *_pos) { struct rb_node *n; n = key_serial_next(p, v); if (n) *_pos = key_node_serial(n); return n; } static void proc_keys_stop(struct seq_file *p, void *v) __releases(key_serial_lock) { spin_unlock(&key_serial_lock); } static int proc_keys_show(struct seq_file *m, void *v) { struct rb_node *_p = v; struct key *key = rb_entry(_p, struct key, serial_node); struct timespec now; unsigned long timo; key_ref_t key_ref, skey_ref; char xbuf[16]; int rc; struct keyring_search_context ctx = { .index_key.type = key->type, .index_key.description = key->description, .cred = m->file->f_cred, .match_data.cmp = lookup_user_key_possessed, .match_data.raw_data = key, .match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT, .flags = KEYRING_SEARCH_NO_STATE_CHECK, }; key_ref = make_key_ref(key, 0); /* determine if the key is possessed by this process (a test we can * skip if the key does not indicate the possessor can view it */ if (key->perm & KEY_POS_VIEW) { skey_ref = search_my_process_keyrings(&ctx); if (!IS_ERR(skey_ref)) { key_ref_put(skey_ref); key_ref = make_key_ref(key, 1); } } /* check whether the current task is allowed to view the key */ rc = key_task_permission(key_ref, ctx.cred, KEY_NEED_VIEW); if (rc < 0) return 0; now = current_kernel_time(); rcu_read_lock(); /* come up with a suitable timeout value */ if (key->expiry == 0) { memcpy(xbuf, "perm", 5); } else if (now.tv_sec >= key->expiry) { memcpy(xbuf, "expd", 5); } else { timo = key->expiry - now.tv_sec; if (timo < 60) sprintf(xbuf, "%lus", timo); else if (timo < 60*60) sprintf(xbuf, "%lum", timo / 60); else if (timo < 60*60*24) sprintf(xbuf, "%luh", timo / (60*60)); else if (timo < 60*60*24*7) sprintf(xbuf, "%lud", timo / (60*60*24)); else sprintf(xbuf, "%luw", timo / (60*60*24*7)); } #define showflag(KEY, LETTER, FLAG) \ (test_bit(FLAG, &(KEY)->flags) ? LETTER : '-') seq_printf(m, "%08x %c%c%c%c%c%c%c %5d %4s %08x %5d %5d %-9.9s ", key->serial, showflag(key, 'I', KEY_FLAG_INSTANTIATED), showflag(key, 'R', KEY_FLAG_REVOKED), showflag(key, 'D', KEY_FLAG_DEAD), showflag(key, 'Q', KEY_FLAG_IN_QUOTA), showflag(key, 'U', KEY_FLAG_USER_CONSTRUCT), showflag(key, 'N', KEY_FLAG_NEGATIVE), showflag(key, 'i', KEY_FLAG_INVALIDATED), refcount_read(&key->usage), xbuf, key->perm, from_kuid_munged(seq_user_ns(m), key->uid), from_kgid_munged(seq_user_ns(m), key->gid), key->type->name); #undef showflag if (key->type->describe) key->type->describe(key, m); seq_putc(m, '\n'); rcu_read_unlock(); return 0; } static struct rb_node *__key_user_next(struct user_namespace *user_ns, struct rb_node *n) { while (n) { struct key_user *user = rb_entry(n, struct key_user, node); if (kuid_has_mapping(user_ns, user->uid)) break; n = rb_next(n); } return n; } static struct rb_node *key_user_next(struct user_namespace *user_ns, struct rb_node *n) { return __key_user_next(user_ns, rb_next(n)); } static struct rb_node *key_user_first(struct user_namespace *user_ns, struct rb_root *r) { struct rb_node *n = rb_first(r); return __key_user_next(user_ns, n); } /* * Implement "/proc/key-users" to provides a list of the key users and their * quotas. */ static int proc_key_users_open(struct inode *inode, struct file *file) { return seq_open(file, &proc_key_users_ops); } static void *proc_key_users_start(struct seq_file *p, loff_t *_pos) __acquires(key_user_lock) { struct rb_node *_p; loff_t pos = *_pos; spin_lock(&key_user_lock); _p = key_user_first(seq_user_ns(p), &key_user_tree); while (pos > 0 && _p) { pos--; _p = key_user_next(seq_user_ns(p), _p); } return _p; } static void *proc_key_users_next(struct seq_file *p, void *v, loff_t *_pos) { (*_pos)++; return key_user_next(seq_user_ns(p), (struct rb_node *)v); } static void proc_key_users_stop(struct seq_file *p, void *v) __releases(key_user_lock) { spin_unlock(&key_user_lock); } static int proc_key_users_show(struct seq_file *m, void *v) { struct rb_node *_p = v; struct key_user *user = rb_entry(_p, struct key_user, node); unsigned maxkeys = uid_eq(user->uid, GLOBAL_ROOT_UID) ? key_quota_root_maxkeys : key_quota_maxkeys; unsigned maxbytes = uid_eq(user->uid, GLOBAL_ROOT_UID) ? key_quota_root_maxbytes : key_quota_maxbytes; seq_printf(m, "%5u: %5d %d/%d %d/%d %d/%d\n", from_kuid_munged(seq_user_ns(m), user->uid), refcount_read(&user->usage), atomic_read(&user->nkeys), atomic_read(&user->nikeys), user->qnkeys, maxkeys, user->qnbytes, maxbytes); return 0; }
/* procfs files for key database enumeration * * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/module.h> #include <linux/init.h> #include <linux/sched.h> #include <linux/fs.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <asm/errno.h> #include "internal.h" static int proc_keys_open(struct inode *inode, struct file *file); static void *proc_keys_start(struct seq_file *p, loff_t *_pos); static void *proc_keys_next(struct seq_file *p, void *v, loff_t *_pos); static void proc_keys_stop(struct seq_file *p, void *v); static int proc_keys_show(struct seq_file *m, void *v); static const struct seq_operations proc_keys_ops = { .start = proc_keys_start, .next = proc_keys_next, .stop = proc_keys_stop, .show = proc_keys_show, }; static const struct file_operations proc_keys_fops = { .open = proc_keys_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static int proc_key_users_open(struct inode *inode, struct file *file); static void *proc_key_users_start(struct seq_file *p, loff_t *_pos); static void *proc_key_users_next(struct seq_file *p, void *v, loff_t *_pos); static void proc_key_users_stop(struct seq_file *p, void *v); static int proc_key_users_show(struct seq_file *m, void *v); static const struct seq_operations proc_key_users_ops = { .start = proc_key_users_start, .next = proc_key_users_next, .stop = proc_key_users_stop, .show = proc_key_users_show, }; static const struct file_operations proc_key_users_fops = { .open = proc_key_users_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; /* * Declare the /proc files. */ static int __init key_proc_init(void) { struct proc_dir_entry *p; p = proc_create("keys", 0, NULL, &proc_keys_fops); if (!p) panic("Cannot create /proc/keys\n"); p = proc_create("key-users", 0, NULL, &proc_key_users_fops); if (!p) panic("Cannot create /proc/key-users\n"); return 0; } __initcall(key_proc_init); /* * Implement "/proc/keys" to provide a list of the keys on the system that * grant View permission to the caller. */ static struct rb_node *key_serial_next(struct seq_file *p, struct rb_node *n) { struct user_namespace *user_ns = seq_user_ns(p); n = rb_next(n); while (n) { struct key *key = rb_entry(n, struct key, serial_node); if (kuid_has_mapping(user_ns, key->user->uid)) break; n = rb_next(n); } return n; } static int proc_keys_open(struct inode *inode, struct file *file) { return seq_open(file, &proc_keys_ops); } static struct key *find_ge_key(struct seq_file *p, key_serial_t id) { struct user_namespace *user_ns = seq_user_ns(p); struct rb_node *n = key_serial_tree.rb_node; struct key *minkey = NULL; while (n) { struct key *key = rb_entry(n, struct key, serial_node); if (id < key->serial) { if (!minkey || minkey->serial > key->serial) minkey = key; n = n->rb_left; } else if (id > key->serial) { n = n->rb_right; } else { minkey = key; break; } key = NULL; } if (!minkey) return NULL; for (;;) { if (kuid_has_mapping(user_ns, minkey->user->uid)) return minkey; n = rb_next(&minkey->serial_node); if (!n) return NULL; minkey = rb_entry(n, struct key, serial_node); } } static void *proc_keys_start(struct seq_file *p, loff_t *_pos) __acquires(key_serial_lock) { key_serial_t pos = *_pos; struct key *key; spin_lock(&key_serial_lock); if (*_pos > INT_MAX) return NULL; key = find_ge_key(p, pos); if (!key) return NULL; *_pos = key->serial; return &key->serial_node; } static inline key_serial_t key_node_serial(struct rb_node *n) { struct key *key = rb_entry(n, struct key, serial_node); return key->serial; } static void *proc_keys_next(struct seq_file *p, void *v, loff_t *_pos) { struct rb_node *n; n = key_serial_next(p, v); if (n) *_pos = key_node_serial(n); return n; } static void proc_keys_stop(struct seq_file *p, void *v) __releases(key_serial_lock) { spin_unlock(&key_serial_lock); } static int proc_keys_show(struct seq_file *m, void *v) { struct rb_node *_p = v; struct key *key = rb_entry(_p, struct key, serial_node); struct timespec now; unsigned long timo; key_ref_t key_ref, skey_ref; char xbuf[16]; short state; int rc; struct keyring_search_context ctx = { .index_key.type = key->type, .index_key.description = key->description, .cred = m->file->f_cred, .match_data.cmp = lookup_user_key_possessed, .match_data.raw_data = key, .match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT, .flags = KEYRING_SEARCH_NO_STATE_CHECK, }; key_ref = make_key_ref(key, 0); /* determine if the key is possessed by this process (a test we can * skip if the key does not indicate the possessor can view it */ if (key->perm & KEY_POS_VIEW) { skey_ref = search_my_process_keyrings(&ctx); if (!IS_ERR(skey_ref)) { key_ref_put(skey_ref); key_ref = make_key_ref(key, 1); } } /* check whether the current task is allowed to view the key */ rc = key_task_permission(key_ref, ctx.cred, KEY_NEED_VIEW); if (rc < 0) return 0; now = current_kernel_time(); rcu_read_lock(); /* come up with a suitable timeout value */ if (key->expiry == 0) { memcpy(xbuf, "perm", 5); } else if (now.tv_sec >= key->expiry) { memcpy(xbuf, "expd", 5); } else { timo = key->expiry - now.tv_sec; if (timo < 60) sprintf(xbuf, "%lus", timo); else if (timo < 60*60) sprintf(xbuf, "%lum", timo / 60); else if (timo < 60*60*24) sprintf(xbuf, "%luh", timo / (60*60)); else if (timo < 60*60*24*7) sprintf(xbuf, "%lud", timo / (60*60*24)); else sprintf(xbuf, "%luw", timo / (60*60*24*7)); } state = key_read_state(key); #define showflag(KEY, LETTER, FLAG) \ (test_bit(FLAG, &(KEY)->flags) ? LETTER : '-') seq_printf(m, "%08x %c%c%c%c%c%c%c %5d %4s %08x %5d %5d %-9.9s ", key->serial, state != KEY_IS_UNINSTANTIATED ? 'I' : '-', showflag(key, 'R', KEY_FLAG_REVOKED), showflag(key, 'D', KEY_FLAG_DEAD), showflag(key, 'Q', KEY_FLAG_IN_QUOTA), showflag(key, 'U', KEY_FLAG_USER_CONSTRUCT), state < 0 ? 'N' : '-', showflag(key, 'i', KEY_FLAG_INVALIDATED), refcount_read(&key->usage), xbuf, key->perm, from_kuid_munged(seq_user_ns(m), key->uid), from_kgid_munged(seq_user_ns(m), key->gid), key->type->name); #undef showflag if (key->type->describe) key->type->describe(key, m); seq_putc(m, '\n'); rcu_read_unlock(); return 0; } static struct rb_node *__key_user_next(struct user_namespace *user_ns, struct rb_node *n) { while (n) { struct key_user *user = rb_entry(n, struct key_user, node); if (kuid_has_mapping(user_ns, user->uid)) break; n = rb_next(n); } return n; } static struct rb_node *key_user_next(struct user_namespace *user_ns, struct rb_node *n) { return __key_user_next(user_ns, rb_next(n)); } static struct rb_node *key_user_first(struct user_namespace *user_ns, struct rb_root *r) { struct rb_node *n = rb_first(r); return __key_user_next(user_ns, n); } /* * Implement "/proc/key-users" to provides a list of the key users and their * quotas. */ static int proc_key_users_open(struct inode *inode, struct file *file) { return seq_open(file, &proc_key_users_ops); } static void *proc_key_users_start(struct seq_file *p, loff_t *_pos) __acquires(key_user_lock) { struct rb_node *_p; loff_t pos = *_pos; spin_lock(&key_user_lock); _p = key_user_first(seq_user_ns(p), &key_user_tree); while (pos > 0 && _p) { pos--; _p = key_user_next(seq_user_ns(p), _p); } return _p; } static void *proc_key_users_next(struct seq_file *p, void *v, loff_t *_pos) { (*_pos)++; return key_user_next(seq_user_ns(p), (struct rb_node *)v); } static void proc_key_users_stop(struct seq_file *p, void *v) __releases(key_user_lock) { spin_unlock(&key_user_lock); } static int proc_key_users_show(struct seq_file *m, void *v) { struct rb_node *_p = v; struct key_user *user = rb_entry(_p, struct key_user, node); unsigned maxkeys = uid_eq(user->uid, GLOBAL_ROOT_UID) ? key_quota_root_maxkeys : key_quota_maxkeys; unsigned maxbytes = uid_eq(user->uid, GLOBAL_ROOT_UID) ? key_quota_root_maxbytes : key_quota_maxbytes; seq_printf(m, "%5u: %5d %d/%d %d/%d %d/%d\n", from_kuid_munged(seq_user_ns(m), user->uid), refcount_read(&user->usage), atomic_read(&user->nkeys), atomic_read(&user->nikeys), user->qnkeys, maxkeys, user->qnbytes, maxbytes); return 0; }
static int proc_keys_show(struct seq_file *m, void *v) { struct rb_node *_p = v; struct key *key = rb_entry(_p, struct key, serial_node); struct timespec now; unsigned long timo; key_ref_t key_ref, skey_ref; char xbuf[16]; int rc; struct keyring_search_context ctx = { .index_key.type = key->type, .index_key.description = key->description, .cred = m->file->f_cred, .match_data.cmp = lookup_user_key_possessed, .match_data.raw_data = key, .match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT, .flags = KEYRING_SEARCH_NO_STATE_CHECK, }; key_ref = make_key_ref(key, 0); /* determine if the key is possessed by this process (a test we can * skip if the key does not indicate the possessor can view it */ if (key->perm & KEY_POS_VIEW) { skey_ref = search_my_process_keyrings(&ctx); if (!IS_ERR(skey_ref)) { key_ref_put(skey_ref); key_ref = make_key_ref(key, 1); } } /* check whether the current task is allowed to view the key */ rc = key_task_permission(key_ref, ctx.cred, KEY_NEED_VIEW); if (rc < 0) return 0; now = current_kernel_time(); rcu_read_lock(); /* come up with a suitable timeout value */ if (key->expiry == 0) { memcpy(xbuf, "perm", 5); } else if (now.tv_sec >= key->expiry) { memcpy(xbuf, "expd", 5); } else { timo = key->expiry - now.tv_sec; if (timo < 60) sprintf(xbuf, "%lus", timo); else if (timo < 60*60) sprintf(xbuf, "%lum", timo / 60); else if (timo < 60*60*24) sprintf(xbuf, "%luh", timo / (60*60)); else if (timo < 60*60*24*7) sprintf(xbuf, "%lud", timo / (60*60*24)); else sprintf(xbuf, "%luw", timo / (60*60*24*7)); } #define showflag(KEY, LETTER, FLAG) \ (test_bit(FLAG, &(KEY)->flags) ? LETTER : '-') seq_printf(m, "%08x %c%c%c%c%c%c%c %5d %4s %08x %5d %5d %-9.9s ", key->serial, showflag(key, 'I', KEY_FLAG_INSTANTIATED), showflag(key, 'R', KEY_FLAG_REVOKED), showflag(key, 'D', KEY_FLAG_DEAD), showflag(key, 'Q', KEY_FLAG_IN_QUOTA), showflag(key, 'U', KEY_FLAG_USER_CONSTRUCT), showflag(key, 'N', KEY_FLAG_NEGATIVE), showflag(key, 'i', KEY_FLAG_INVALIDATED), refcount_read(&key->usage), xbuf, key->perm, from_kuid_munged(seq_user_ns(m), key->uid), from_kgid_munged(seq_user_ns(m), key->gid), key->type->name); #undef showflag if (key->type->describe) key->type->describe(key, m); seq_putc(m, '\n'); rcu_read_unlock(); return 0; }
static int proc_keys_show(struct seq_file *m, void *v) { struct rb_node *_p = v; struct key *key = rb_entry(_p, struct key, serial_node); struct timespec now; unsigned long timo; key_ref_t key_ref, skey_ref; char xbuf[16]; short state; int rc; struct keyring_search_context ctx = { .index_key.type = key->type, .index_key.description = key->description, .cred = m->file->f_cred, .match_data.cmp = lookup_user_key_possessed, .match_data.raw_data = key, .match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT, .flags = KEYRING_SEARCH_NO_STATE_CHECK, }; key_ref = make_key_ref(key, 0); /* determine if the key is possessed by this process (a test we can * skip if the key does not indicate the possessor can view it */ if (key->perm & KEY_POS_VIEW) { skey_ref = search_my_process_keyrings(&ctx); if (!IS_ERR(skey_ref)) { key_ref_put(skey_ref); key_ref = make_key_ref(key, 1); } } /* check whether the current task is allowed to view the key */ rc = key_task_permission(key_ref, ctx.cred, KEY_NEED_VIEW); if (rc < 0) return 0; now = current_kernel_time(); rcu_read_lock(); /* come up with a suitable timeout value */ if (key->expiry == 0) { memcpy(xbuf, "perm", 5); } else if (now.tv_sec >= key->expiry) { memcpy(xbuf, "expd", 5); } else { timo = key->expiry - now.tv_sec; if (timo < 60) sprintf(xbuf, "%lus", timo); else if (timo < 60*60) sprintf(xbuf, "%lum", timo / 60); else if (timo < 60*60*24) sprintf(xbuf, "%luh", timo / (60*60)); else if (timo < 60*60*24*7) sprintf(xbuf, "%lud", timo / (60*60*24)); else sprintf(xbuf, "%luw", timo / (60*60*24*7)); } state = key_read_state(key); #define showflag(KEY, LETTER, FLAG) \ (test_bit(FLAG, &(KEY)->flags) ? LETTER : '-') seq_printf(m, "%08x %c%c%c%c%c%c%c %5d %4s %08x %5d %5d %-9.9s ", key->serial, state != KEY_IS_UNINSTANTIATED ? 'I' : '-', showflag(key, 'R', KEY_FLAG_REVOKED), showflag(key, 'D', KEY_FLAG_DEAD), showflag(key, 'Q', KEY_FLAG_IN_QUOTA), showflag(key, 'U', KEY_FLAG_USER_CONSTRUCT), state < 0 ? 'N' : '-', showflag(key, 'i', KEY_FLAG_INVALIDATED), refcount_read(&key->usage), xbuf, key->perm, from_kuid_munged(seq_user_ns(m), key->uid), from_kgid_munged(seq_user_ns(m), key->gid), key->type->name); #undef showflag if (key->type->describe) key->type->describe(key, m); seq_putc(m, '\n'); rcu_read_unlock(); return 0; }
{'added': [(185, '\tshort state;'), (240, '\tstate = key_read_state(key);'), (241, ''), (247, "\t\t state != KEY_IS_UNINSTANTIATED ? 'I' : '-',"), (252, "\t\t state < 0 ? 'N' : '-',")], 'deleted': [(244, "\t\t showflag(key, 'I', KEY_FLAG_INSTANTIATED),"), (249, "\t\t showflag(key, 'N', KEY_FLAG_NEGATIVE),")]}
5
2
268
1,782
https://github.com/torvalds/linux
CVE-2017-15951
['CWE-20']
xattr.c
do_setxattr
/* * Copyright (C) 2007 Red Hat. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License v2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. */ #include <linux/init.h> #include <linux/fs.h> #include <linux/slab.h> #include <linux/rwsem.h> #include <linux/xattr.h> #include <linux/security.h> #include <linux/posix_acl_xattr.h> #include "ctree.h" #include "btrfs_inode.h" #include "transaction.h" #include "xattr.h" #include "disk-io.h" #include "props.h" ssize_t __btrfs_getxattr(struct inode *inode, const char *name, void *buffer, size_t size) { struct btrfs_dir_item *di; struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_path *path; struct extent_buffer *leaf; int ret = 0; unsigned long data_ptr; path = btrfs_alloc_path(); if (!path) return -ENOMEM; /* lookup the xattr by name */ di = btrfs_lookup_xattr(NULL, root, path, btrfs_ino(inode), name, strlen(name), 0); if (!di) { ret = -ENODATA; goto out; } else if (IS_ERR(di)) { ret = PTR_ERR(di); goto out; } leaf = path->nodes[0]; /* if size is 0, that means we want the size of the attr */ if (!size) { ret = btrfs_dir_data_len(leaf, di); goto out; } /* now get the data out of our dir_item */ if (btrfs_dir_data_len(leaf, di) > size) { ret = -ERANGE; goto out; } /* * The way things are packed into the leaf is like this * |struct btrfs_dir_item|name|data| * where name is the xattr name, so security.foo, and data is the * content of the xattr. data_ptr points to the location in memory * where the data starts in the in memory leaf */ data_ptr = (unsigned long)((char *)(di + 1) + btrfs_dir_name_len(leaf, di)); read_extent_buffer(leaf, buffer, data_ptr, btrfs_dir_data_len(leaf, di)); ret = btrfs_dir_data_len(leaf, di); out: btrfs_free_path(path); return ret; } static int do_setxattr(struct btrfs_trans_handle *trans, struct inode *inode, const char *name, const void *value, size_t size, int flags) { struct btrfs_dir_item *di; struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_path *path; size_t name_len = strlen(name); int ret = 0; if (name_len + size > BTRFS_MAX_XATTR_SIZE(root)) return -ENOSPC; path = btrfs_alloc_path(); if (!path) return -ENOMEM; if (flags & XATTR_REPLACE) { di = btrfs_lookup_xattr(trans, root, path, btrfs_ino(inode), name, name_len, -1); if (IS_ERR(di)) { ret = PTR_ERR(di); goto out; } else if (!di) { ret = -ENODATA; goto out; } ret = btrfs_delete_one_dir_name(trans, root, path, di); if (ret) goto out; btrfs_release_path(path); /* * remove the attribute */ if (!value) goto out; } else { di = btrfs_lookup_xattr(NULL, root, path, btrfs_ino(inode), name, name_len, 0); if (IS_ERR(di)) { ret = PTR_ERR(di); goto out; } if (!di && !value) goto out; btrfs_release_path(path); } again: ret = btrfs_insert_xattr_item(trans, root, path, btrfs_ino(inode), name, name_len, value, size); /* * If we're setting an xattr to a new value but the new value is say * exactly BTRFS_MAX_XATTR_SIZE, we could end up with EOVERFLOW getting * back from split_leaf. This is because it thinks we'll be extending * the existing item size, but we're asking for enough space to add the * item itself. So if we get EOVERFLOW just set ret to EEXIST and let * the rest of the function figure it out. */ if (ret == -EOVERFLOW) ret = -EEXIST; if (ret == -EEXIST) { if (flags & XATTR_CREATE) goto out; /* * We can't use the path we already have since we won't have the * proper locking for a delete, so release the path and * re-lookup to delete the thing. */ btrfs_release_path(path); di = btrfs_lookup_xattr(trans, root, path, btrfs_ino(inode), name, name_len, -1); if (IS_ERR(di)) { ret = PTR_ERR(di); goto out; } else if (!di) { /* Shouldn't happen but just in case... */ btrfs_release_path(path); goto again; } ret = btrfs_delete_one_dir_name(trans, root, path, di); if (ret) goto out; /* * We have a value to set, so go back and try to insert it now. */ if (value) { btrfs_release_path(path); goto again; } } out: btrfs_free_path(path); return ret; } /* * @value: "" makes the attribute to empty, NULL removes it */ int __btrfs_setxattr(struct btrfs_trans_handle *trans, struct inode *inode, const char *name, const void *value, size_t size, int flags) { struct btrfs_root *root = BTRFS_I(inode)->root; int ret; if (trans) return do_setxattr(trans, inode, name, value, size, flags); trans = btrfs_start_transaction(root, 2); if (IS_ERR(trans)) return PTR_ERR(trans); ret = do_setxattr(trans, inode, name, value, size, flags); if (ret) goto out; inode_inc_iversion(inode); inode->i_ctime = CURRENT_TIME; set_bit(BTRFS_INODE_COPY_EVERYTHING, &BTRFS_I(inode)->runtime_flags); ret = btrfs_update_inode(trans, root, inode); BUG_ON(ret); out: btrfs_end_transaction(trans, root); return ret; } ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size) { struct btrfs_key key, found_key; struct inode *inode = dentry->d_inode; struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_path *path; struct extent_buffer *leaf; struct btrfs_dir_item *di; int ret = 0, slot; size_t total_size = 0, size_left = size; unsigned long name_ptr; size_t name_len; /* * ok we want all objects associated with this id. * NOTE: we set key.offset = 0; because we want to start with the * first xattr that we find and walk forward */ key.objectid = btrfs_ino(inode); key.type = BTRFS_XATTR_ITEM_KEY; key.offset = 0; path = btrfs_alloc_path(); if (!path) return -ENOMEM; path->reada = 2; /* search for our xattrs */ ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) goto err; while (1) { leaf = path->nodes[0]; slot = path->slots[0]; /* this is where we start walking through the path */ if (slot >= btrfs_header_nritems(leaf)) { /* * if we've reached the last slot in this leaf we need * to go to the next leaf and reset everything */ ret = btrfs_next_leaf(root, path); if (ret < 0) goto err; else if (ret > 0) break; continue; } btrfs_item_key_to_cpu(leaf, &found_key, slot); /* check to make sure this item is what we want */ if (found_key.objectid != key.objectid) break; if (found_key.type != BTRFS_XATTR_ITEM_KEY) break; di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item); if (verify_dir_item(root, leaf, di)) goto next; name_len = btrfs_dir_name_len(leaf, di); total_size += name_len + 1; /* we are just looking for how big our buffer needs to be */ if (!size) goto next; if (!buffer || (name_len + 1) > size_left) { ret = -ERANGE; goto err; } name_ptr = (unsigned long)(di + 1); read_extent_buffer(leaf, buffer, name_ptr, name_len); buffer[name_len] = '\0'; size_left -= name_len + 1; buffer += name_len + 1; next: path->slots[0]++; } ret = total_size; err: btrfs_free_path(path); return ret; } /* * List of handlers for synthetic system.* attributes. All real ondisk * attributes are handled directly. */ const struct xattr_handler *btrfs_xattr_handlers[] = { #ifdef CONFIG_BTRFS_FS_POSIX_ACL &posix_acl_access_xattr_handler, &posix_acl_default_xattr_handler, #endif NULL, }; /* * Check if the attribute is in a supported namespace. * * This applied after the check for the synthetic attributes in the system * namespace. */ static bool btrfs_is_valid_xattr(const char *name) { return !strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN) || !strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN) || !strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) || !strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN) || !strncmp(name, XATTR_BTRFS_PREFIX, XATTR_BTRFS_PREFIX_LEN); } ssize_t btrfs_getxattr(struct dentry *dentry, const char *name, void *buffer, size_t size) { /* * If this is a request for a synthetic attribute in the system.* * namespace use the generic infrastructure to resolve a handler * for it via sb->s_xattr. */ if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN)) return generic_getxattr(dentry, name, buffer, size); if (!btrfs_is_valid_xattr(name)) return -EOPNOTSUPP; return __btrfs_getxattr(dentry->d_inode, name, buffer, size); } int btrfs_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size, int flags) { struct btrfs_root *root = BTRFS_I(dentry->d_inode)->root; /* * The permission on security.* and system.* is not checked * in permission(). */ if (btrfs_root_readonly(root)) return -EROFS; /* * If this is a request for a synthetic attribute in the system.* * namespace use the generic infrastructure to resolve a handler * for it via sb->s_xattr. */ if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN)) return generic_setxattr(dentry, name, value, size, flags); if (!btrfs_is_valid_xattr(name)) return -EOPNOTSUPP; if (!strncmp(name, XATTR_BTRFS_PREFIX, XATTR_BTRFS_PREFIX_LEN)) return btrfs_set_prop(dentry->d_inode, name, value, size, flags); if (size == 0) value = ""; /* empty EA, do not remove */ return __btrfs_setxattr(NULL, dentry->d_inode, name, value, size, flags); } int btrfs_removexattr(struct dentry *dentry, const char *name) { struct btrfs_root *root = BTRFS_I(dentry->d_inode)->root; /* * The permission on security.* and system.* is not checked * in permission(). */ if (btrfs_root_readonly(root)) return -EROFS; /* * If this is a request for a synthetic attribute in the system.* * namespace use the generic infrastructure to resolve a handler * for it via sb->s_xattr. */ if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN)) return generic_removexattr(dentry, name); if (!btrfs_is_valid_xattr(name)) return -EOPNOTSUPP; if (!strncmp(name, XATTR_BTRFS_PREFIX, XATTR_BTRFS_PREFIX_LEN)) return btrfs_set_prop(dentry->d_inode, name, NULL, 0, XATTR_REPLACE); return __btrfs_setxattr(NULL, dentry->d_inode, name, NULL, 0, XATTR_REPLACE); } static int btrfs_initxattrs(struct inode *inode, const struct xattr *xattr_array, void *fs_info) { const struct xattr *xattr; struct btrfs_trans_handle *trans = fs_info; char *name; int err = 0; for (xattr = xattr_array; xattr->name != NULL; xattr++) { name = kmalloc(XATTR_SECURITY_PREFIX_LEN + strlen(xattr->name) + 1, GFP_NOFS); if (!name) { err = -ENOMEM; break; } strcpy(name, XATTR_SECURITY_PREFIX); strcpy(name + XATTR_SECURITY_PREFIX_LEN, xattr->name); err = __btrfs_setxattr(trans, inode, name, xattr->value, xattr->value_len, 0); kfree(name); if (err < 0) break; } return err; } int btrfs_xattr_security_init(struct btrfs_trans_handle *trans, struct inode *inode, struct inode *dir, const struct qstr *qstr) { return security_inode_init_security(inode, dir, qstr, &btrfs_initxattrs, trans); }
/* * Copyright (C) 2007 Red Hat. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License v2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. */ #include <linux/init.h> #include <linux/fs.h> #include <linux/slab.h> #include <linux/rwsem.h> #include <linux/xattr.h> #include <linux/security.h> #include <linux/posix_acl_xattr.h> #include "ctree.h" #include "btrfs_inode.h" #include "transaction.h" #include "xattr.h" #include "disk-io.h" #include "props.h" #include "locking.h" ssize_t __btrfs_getxattr(struct inode *inode, const char *name, void *buffer, size_t size) { struct btrfs_dir_item *di; struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_path *path; struct extent_buffer *leaf; int ret = 0; unsigned long data_ptr; path = btrfs_alloc_path(); if (!path) return -ENOMEM; /* lookup the xattr by name */ di = btrfs_lookup_xattr(NULL, root, path, btrfs_ino(inode), name, strlen(name), 0); if (!di) { ret = -ENODATA; goto out; } else if (IS_ERR(di)) { ret = PTR_ERR(di); goto out; } leaf = path->nodes[0]; /* if size is 0, that means we want the size of the attr */ if (!size) { ret = btrfs_dir_data_len(leaf, di); goto out; } /* now get the data out of our dir_item */ if (btrfs_dir_data_len(leaf, di) > size) { ret = -ERANGE; goto out; } /* * The way things are packed into the leaf is like this * |struct btrfs_dir_item|name|data| * where name is the xattr name, so security.foo, and data is the * content of the xattr. data_ptr points to the location in memory * where the data starts in the in memory leaf */ data_ptr = (unsigned long)((char *)(di + 1) + btrfs_dir_name_len(leaf, di)); read_extent_buffer(leaf, buffer, data_ptr, btrfs_dir_data_len(leaf, di)); ret = btrfs_dir_data_len(leaf, di); out: btrfs_free_path(path); return ret; } static int do_setxattr(struct btrfs_trans_handle *trans, struct inode *inode, const char *name, const void *value, size_t size, int flags) { struct btrfs_dir_item *di = NULL; struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_path *path; size_t name_len = strlen(name); int ret = 0; if (name_len + size > BTRFS_MAX_XATTR_SIZE(root)) return -ENOSPC; path = btrfs_alloc_path(); if (!path) return -ENOMEM; path->skip_release_on_error = 1; if (!value) { di = btrfs_lookup_xattr(trans, root, path, btrfs_ino(inode), name, name_len, -1); if (!di && (flags & XATTR_REPLACE)) ret = -ENODATA; else if (di) ret = btrfs_delete_one_dir_name(trans, root, path, di); goto out; } /* * For a replace we can't just do the insert blindly. * Do a lookup first (read-only btrfs_search_slot), and return if xattr * doesn't exist. If it exists, fall down below to the insert/replace * path - we can't race with a concurrent xattr delete, because the VFS * locks the inode's i_mutex before calling setxattr or removexattr. */ if (flags & XATTR_REPLACE) { ASSERT(mutex_is_locked(&inode->i_mutex)); di = btrfs_lookup_xattr(NULL, root, path, btrfs_ino(inode), name, name_len, 0); if (!di) { ret = -ENODATA; goto out; } btrfs_release_path(path); di = NULL; } ret = btrfs_insert_xattr_item(trans, root, path, btrfs_ino(inode), name, name_len, value, size); if (ret == -EOVERFLOW) { /* * We have an existing item in a leaf, split_leaf couldn't * expand it. That item might have or not a dir_item that * matches our target xattr, so lets check. */ ret = 0; btrfs_assert_tree_locked(path->nodes[0]); di = btrfs_match_dir_item_name(root, path, name, name_len); if (!di && !(flags & XATTR_REPLACE)) { ret = -ENOSPC; goto out; } } else if (ret == -EEXIST) { ret = 0; di = btrfs_match_dir_item_name(root, path, name, name_len); ASSERT(di); /* logic error */ } else if (ret) { goto out; } if (di && (flags & XATTR_CREATE)) { ret = -EEXIST; goto out; } if (di) { /* * We're doing a replace, and it must be atomic, that is, at * any point in time we have either the old or the new xattr * value in the tree. We don't want readers (getxattr and * listxattrs) to miss a value, this is specially important * for ACLs. */ const int slot = path->slots[0]; struct extent_buffer *leaf = path->nodes[0]; const u16 old_data_len = btrfs_dir_data_len(leaf, di); const u32 item_size = btrfs_item_size_nr(leaf, slot); const u32 data_size = sizeof(*di) + name_len + size; struct btrfs_item *item; unsigned long data_ptr; char *ptr; if (size > old_data_len) { if (btrfs_leaf_free_space(root, leaf) < (size - old_data_len)) { ret = -ENOSPC; goto out; } } if (old_data_len + name_len + sizeof(*di) == item_size) { /* No other xattrs packed in the same leaf item. */ if (size > old_data_len) btrfs_extend_item(root, path, size - old_data_len); else if (size < old_data_len) btrfs_truncate_item(root, path, data_size, 1); } else { /* There are other xattrs packed in the same item. */ ret = btrfs_delete_one_dir_name(trans, root, path, di); if (ret) goto out; btrfs_extend_item(root, path, data_size); } item = btrfs_item_nr(slot); ptr = btrfs_item_ptr(leaf, slot, char); ptr += btrfs_item_size(leaf, item) - data_size; di = (struct btrfs_dir_item *)ptr; btrfs_set_dir_data_len(leaf, di, size); data_ptr = ((unsigned long)(di + 1)) + name_len; write_extent_buffer(leaf, value, data_ptr, size); btrfs_mark_buffer_dirty(leaf); } else { /* * Insert, and we had space for the xattr, so path->slots[0] is * where our xattr dir_item is and btrfs_insert_xattr_item() * filled it. */ } out: btrfs_free_path(path); return ret; } /* * @value: "" makes the attribute to empty, NULL removes it */ int __btrfs_setxattr(struct btrfs_trans_handle *trans, struct inode *inode, const char *name, const void *value, size_t size, int flags) { struct btrfs_root *root = BTRFS_I(inode)->root; int ret; if (trans) return do_setxattr(trans, inode, name, value, size, flags); trans = btrfs_start_transaction(root, 2); if (IS_ERR(trans)) return PTR_ERR(trans); ret = do_setxattr(trans, inode, name, value, size, flags); if (ret) goto out; inode_inc_iversion(inode); inode->i_ctime = CURRENT_TIME; set_bit(BTRFS_INODE_COPY_EVERYTHING, &BTRFS_I(inode)->runtime_flags); ret = btrfs_update_inode(trans, root, inode); BUG_ON(ret); out: btrfs_end_transaction(trans, root); return ret; } ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size) { struct btrfs_key key, found_key; struct inode *inode = dentry->d_inode; struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_path *path; struct extent_buffer *leaf; struct btrfs_dir_item *di; int ret = 0, slot; size_t total_size = 0, size_left = size; unsigned long name_ptr; size_t name_len; /* * ok we want all objects associated with this id. * NOTE: we set key.offset = 0; because we want to start with the * first xattr that we find and walk forward */ key.objectid = btrfs_ino(inode); key.type = BTRFS_XATTR_ITEM_KEY; key.offset = 0; path = btrfs_alloc_path(); if (!path) return -ENOMEM; path->reada = 2; /* search for our xattrs */ ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) goto err; while (1) { leaf = path->nodes[0]; slot = path->slots[0]; /* this is where we start walking through the path */ if (slot >= btrfs_header_nritems(leaf)) { /* * if we've reached the last slot in this leaf we need * to go to the next leaf and reset everything */ ret = btrfs_next_leaf(root, path); if (ret < 0) goto err; else if (ret > 0) break; continue; } btrfs_item_key_to_cpu(leaf, &found_key, slot); /* check to make sure this item is what we want */ if (found_key.objectid != key.objectid) break; if (found_key.type != BTRFS_XATTR_ITEM_KEY) break; di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item); if (verify_dir_item(root, leaf, di)) goto next; name_len = btrfs_dir_name_len(leaf, di); total_size += name_len + 1; /* we are just looking for how big our buffer needs to be */ if (!size) goto next; if (!buffer || (name_len + 1) > size_left) { ret = -ERANGE; goto err; } name_ptr = (unsigned long)(di + 1); read_extent_buffer(leaf, buffer, name_ptr, name_len); buffer[name_len] = '\0'; size_left -= name_len + 1; buffer += name_len + 1; next: path->slots[0]++; } ret = total_size; err: btrfs_free_path(path); return ret; } /* * List of handlers for synthetic system.* attributes. All real ondisk * attributes are handled directly. */ const struct xattr_handler *btrfs_xattr_handlers[] = { #ifdef CONFIG_BTRFS_FS_POSIX_ACL &posix_acl_access_xattr_handler, &posix_acl_default_xattr_handler, #endif NULL, }; /* * Check if the attribute is in a supported namespace. * * This applied after the check for the synthetic attributes in the system * namespace. */ static bool btrfs_is_valid_xattr(const char *name) { return !strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN) || !strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN) || !strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) || !strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN) || !strncmp(name, XATTR_BTRFS_PREFIX, XATTR_BTRFS_PREFIX_LEN); } ssize_t btrfs_getxattr(struct dentry *dentry, const char *name, void *buffer, size_t size) { /* * If this is a request for a synthetic attribute in the system.* * namespace use the generic infrastructure to resolve a handler * for it via sb->s_xattr. */ if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN)) return generic_getxattr(dentry, name, buffer, size); if (!btrfs_is_valid_xattr(name)) return -EOPNOTSUPP; return __btrfs_getxattr(dentry->d_inode, name, buffer, size); } int btrfs_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size, int flags) { struct btrfs_root *root = BTRFS_I(dentry->d_inode)->root; /* * The permission on security.* and system.* is not checked * in permission(). */ if (btrfs_root_readonly(root)) return -EROFS; /* * If this is a request for a synthetic attribute in the system.* * namespace use the generic infrastructure to resolve a handler * for it via sb->s_xattr. */ if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN)) return generic_setxattr(dentry, name, value, size, flags); if (!btrfs_is_valid_xattr(name)) return -EOPNOTSUPP; if (!strncmp(name, XATTR_BTRFS_PREFIX, XATTR_BTRFS_PREFIX_LEN)) return btrfs_set_prop(dentry->d_inode, name, value, size, flags); if (size == 0) value = ""; /* empty EA, do not remove */ return __btrfs_setxattr(NULL, dentry->d_inode, name, value, size, flags); } int btrfs_removexattr(struct dentry *dentry, const char *name) { struct btrfs_root *root = BTRFS_I(dentry->d_inode)->root; /* * The permission on security.* and system.* is not checked * in permission(). */ if (btrfs_root_readonly(root)) return -EROFS; /* * If this is a request for a synthetic attribute in the system.* * namespace use the generic infrastructure to resolve a handler * for it via sb->s_xattr. */ if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN)) return generic_removexattr(dentry, name); if (!btrfs_is_valid_xattr(name)) return -EOPNOTSUPP; if (!strncmp(name, XATTR_BTRFS_PREFIX, XATTR_BTRFS_PREFIX_LEN)) return btrfs_set_prop(dentry->d_inode, name, NULL, 0, XATTR_REPLACE); return __btrfs_setxattr(NULL, dentry->d_inode, name, NULL, 0, XATTR_REPLACE); } static int btrfs_initxattrs(struct inode *inode, const struct xattr *xattr_array, void *fs_info) { const struct xattr *xattr; struct btrfs_trans_handle *trans = fs_info; char *name; int err = 0; for (xattr = xattr_array; xattr->name != NULL; xattr++) { name = kmalloc(XATTR_SECURITY_PREFIX_LEN + strlen(xattr->name) + 1, GFP_NOFS); if (!name) { err = -ENOMEM; break; } strcpy(name, XATTR_SECURITY_PREFIX); strcpy(name + XATTR_SECURITY_PREFIX_LEN, xattr->name); err = __btrfs_setxattr(trans, inode, name, xattr->value, xattr->value_len, 0); kfree(name); if (err < 0) break; } return err; } int btrfs_xattr_security_init(struct btrfs_trans_handle *trans, struct inode *inode, struct inode *dir, const struct qstr *qstr) { return security_inode_init_security(inode, dir, qstr, &btrfs_initxattrs, trans); }
static int do_setxattr(struct btrfs_trans_handle *trans, struct inode *inode, const char *name, const void *value, size_t size, int flags) { struct btrfs_dir_item *di; struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_path *path; size_t name_len = strlen(name); int ret = 0; if (name_len + size > BTRFS_MAX_XATTR_SIZE(root)) return -ENOSPC; path = btrfs_alloc_path(); if (!path) return -ENOMEM; if (flags & XATTR_REPLACE) { di = btrfs_lookup_xattr(trans, root, path, btrfs_ino(inode), name, name_len, -1); if (IS_ERR(di)) { ret = PTR_ERR(di); goto out; } else if (!di) { ret = -ENODATA; goto out; } ret = btrfs_delete_one_dir_name(trans, root, path, di); if (ret) goto out; btrfs_release_path(path); /* * remove the attribute */ if (!value) goto out; } else { di = btrfs_lookup_xattr(NULL, root, path, btrfs_ino(inode), name, name_len, 0); if (IS_ERR(di)) { ret = PTR_ERR(di); goto out; } if (!di && !value) goto out; btrfs_release_path(path); } again: ret = btrfs_insert_xattr_item(trans, root, path, btrfs_ino(inode), name, name_len, value, size); /* * If we're setting an xattr to a new value but the new value is say * exactly BTRFS_MAX_XATTR_SIZE, we could end up with EOVERFLOW getting * back from split_leaf. This is because it thinks we'll be extending * the existing item size, but we're asking for enough space to add the * item itself. So if we get EOVERFLOW just set ret to EEXIST and let * the rest of the function figure it out. */ if (ret == -EOVERFLOW) ret = -EEXIST; if (ret == -EEXIST) { if (flags & XATTR_CREATE) goto out; /* * We can't use the path we already have since we won't have the * proper locking for a delete, so release the path and * re-lookup to delete the thing. */ btrfs_release_path(path); di = btrfs_lookup_xattr(trans, root, path, btrfs_ino(inode), name, name_len, -1); if (IS_ERR(di)) { ret = PTR_ERR(di); goto out; } else if (!di) { /* Shouldn't happen but just in case... */ btrfs_release_path(path); goto again; } ret = btrfs_delete_one_dir_name(trans, root, path, di); if (ret) goto out; /* * We have a value to set, so go back and try to insert it now. */ if (value) { btrfs_release_path(path); goto again; } } out: btrfs_free_path(path); return ret; }
static int do_setxattr(struct btrfs_trans_handle *trans, struct inode *inode, const char *name, const void *value, size_t size, int flags) { struct btrfs_dir_item *di = NULL; struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_path *path; size_t name_len = strlen(name); int ret = 0; if (name_len + size > BTRFS_MAX_XATTR_SIZE(root)) return -ENOSPC; path = btrfs_alloc_path(); if (!path) return -ENOMEM; path->skip_release_on_error = 1; if (!value) { di = btrfs_lookup_xattr(trans, root, path, btrfs_ino(inode), name, name_len, -1); if (!di && (flags & XATTR_REPLACE)) ret = -ENODATA; else if (di) ret = btrfs_delete_one_dir_name(trans, root, path, di); goto out; } /* * For a replace we can't just do the insert blindly. * Do a lookup first (read-only btrfs_search_slot), and return if xattr * doesn't exist. If it exists, fall down below to the insert/replace * path - we can't race with a concurrent xattr delete, because the VFS * locks the inode's i_mutex before calling setxattr or removexattr. */ if (flags & XATTR_REPLACE) { ASSERT(mutex_is_locked(&inode->i_mutex)); di = btrfs_lookup_xattr(NULL, root, path, btrfs_ino(inode), name, name_len, 0); if (!di) { ret = -ENODATA; goto out; } btrfs_release_path(path); di = NULL; } ret = btrfs_insert_xattr_item(trans, root, path, btrfs_ino(inode), name, name_len, value, size); if (ret == -EOVERFLOW) { /* * We have an existing item in a leaf, split_leaf couldn't * expand it. That item might have or not a dir_item that * matches our target xattr, so lets check. */ ret = 0; btrfs_assert_tree_locked(path->nodes[0]); di = btrfs_match_dir_item_name(root, path, name, name_len); if (!di && !(flags & XATTR_REPLACE)) { ret = -ENOSPC; goto out; } } else if (ret == -EEXIST) { ret = 0; di = btrfs_match_dir_item_name(root, path, name, name_len); ASSERT(di); /* logic error */ } else if (ret) { goto out; } if (di && (flags & XATTR_CREATE)) { ret = -EEXIST; goto out; } if (di) { /* * We're doing a replace, and it must be atomic, that is, at * any point in time we have either the old or the new xattr * value in the tree. We don't want readers (getxattr and * listxattrs) to miss a value, this is specially important * for ACLs. */ const int slot = path->slots[0]; struct extent_buffer *leaf = path->nodes[0]; const u16 old_data_len = btrfs_dir_data_len(leaf, di); const u32 item_size = btrfs_item_size_nr(leaf, slot); const u32 data_size = sizeof(*di) + name_len + size; struct btrfs_item *item; unsigned long data_ptr; char *ptr; if (size > old_data_len) { if (btrfs_leaf_free_space(root, leaf) < (size - old_data_len)) { ret = -ENOSPC; goto out; } } if (old_data_len + name_len + sizeof(*di) == item_size) { /* No other xattrs packed in the same leaf item. */ if (size > old_data_len) btrfs_extend_item(root, path, size - old_data_len); else if (size < old_data_len) btrfs_truncate_item(root, path, data_size, 1); } else { /* There are other xattrs packed in the same item. */ ret = btrfs_delete_one_dir_name(trans, root, path, di); if (ret) goto out; btrfs_extend_item(root, path, data_size); } item = btrfs_item_nr(slot); ptr = btrfs_item_ptr(leaf, slot, char); ptr += btrfs_item_size(leaf, item) - data_size; di = (struct btrfs_dir_item *)ptr; btrfs_set_dir_data_len(leaf, di, size); data_ptr = ((unsigned long)(di + 1)) + name_len; write_extent_buffer(leaf, value, data_ptr, size); btrfs_mark_buffer_dirty(leaf); } else { /* * Insert, and we had space for the xattr, so path->slots[0] is * where our xattr dir_item is and btrfs_insert_xattr_item() * filled it. */ } out: btrfs_free_path(path); return ret; }
{'added': [(32, '#include "locking.h"'), (95, '\tstruct btrfs_dir_item *di = NULL;'), (107, '\tpath->skip_release_on_error = 1;'), (108, ''), (109, '\tif (!value) {'), (110, '\t\tdi = btrfs_lookup_xattr(trans, root, path, btrfs_ino(inode),'), (111, '\t\t\t\t\tname, name_len, -1);'), (112, '\t\tif (!di && (flags & XATTR_REPLACE))'), (113, '\t\t\tret = -ENODATA;'), (114, '\t\telse if (di)'), (115, '\t\t\tret = btrfs_delete_one_dir_name(trans, root, path, di);'), (116, '\t\tgoto out;'), (117, '\t}'), (119, '\t/*'), (120, "\t * For a replace we can't just do the insert blindly."), (121, '\t * Do a lookup first (read-only btrfs_search_slot), and return if xattr'), (122, "\t * doesn't exist. If it exists, fall down below to the insert/replace"), (123, "\t * path - we can't race with a concurrent xattr delete, because the VFS"), (124, "\t * locks the inode's i_mutex before calling setxattr or removexattr."), (125, '\t */'), (127, '\t\tASSERT(mutex_is_locked(&inode->i_mutex));'), (128, '\t\tdi = btrfs_lookup_xattr(NULL, root, path, btrfs_ino(inode),'), (129, '\t\t\t\t\tname, name_len, 0);'), (130, '\t\tif (!di) {'), (135, '\t\tdi = NULL;'), (136, '\t}'), (138, '\tret = btrfs_insert_xattr_item(trans, root, path, btrfs_ino(inode),'), (139, '\t\t\t\t name, name_len, value, size);'), (140, '\tif (ret == -EOVERFLOW) {'), (142, "\t\t * We have an existing item in a leaf, split_leaf couldn't"), (143, '\t\t * expand it. That item might have or not a dir_item that'), (144, '\t\t * matches our target xattr, so lets check.'), (146, '\t\tret = 0;'), (147, '\t\tbtrfs_assert_tree_locked(path->nodes[0]);'), (148, '\t\tdi = btrfs_match_dir_item_name(root, path, name, name_len);'), (149, '\t\tif (!di && !(flags & XATTR_REPLACE)) {'), (150, '\t\t\tret = -ENOSPC;'), (153, '\t} else if (ret == -EEXIST) {'), (154, '\t\tret = 0;'), (155, '\t\tdi = btrfs_match_dir_item_name(root, path, name, name_len);'), (156, '\t\tASSERT(di); /* logic error */'), (157, '\t} else if (ret) {'), (158, '\t\tgoto out;'), (161, '\tif (di && (flags & XATTR_CREATE)) {'), (163, '\t\tgoto out;'), (164, '\t}'), (166, '\tif (di) {'), (168, "\t\t * We're doing a replace, and it must be atomic, that is, at"), (169, '\t\t * any point in time we have either the old or the new xattr'), (170, "\t\t * value in the tree. We don't want readers (getxattr and"), (171, '\t\t * listxattrs) to miss a value, this is specially important'), (172, '\t\t * for ACLs.'), (174, '\t\tconst int slot = path->slots[0];'), (175, '\t\tstruct extent_buffer *leaf = path->nodes[0];'), (176, '\t\tconst u16 old_data_len = btrfs_dir_data_len(leaf, di);'), (177, '\t\tconst u32 item_size = btrfs_item_size_nr(leaf, slot);'), (178, '\t\tconst u32 data_size = sizeof(*di) + name_len + size;'), (179, '\t\tstruct btrfs_item *item;'), (180, '\t\tunsigned long data_ptr;'), (181, '\t\tchar *ptr;'), (182, ''), (183, '\t\tif (size > old_data_len) {'), (184, '\t\t\tif (btrfs_leaf_free_space(root, leaf) <'), (185, '\t\t\t (size - old_data_len)) {'), (186, '\t\t\t\tret = -ENOSPC;'), (187, '\t\t\t\tgoto out;'), (188, '\t\t\t}'), (191, '\t\tif (old_data_len + name_len + sizeof(*di) == item_size) {'), (192, '\t\t\t/* No other xattrs packed in the same leaf item. */'), (193, '\t\t\tif (size > old_data_len)'), (194, '\t\t\t\tbtrfs_extend_item(root, path,'), (195, '\t\t\t\t\t\t size - old_data_len);'), (196, '\t\t\telse if (size < old_data_len)'), (197, '\t\t\t\tbtrfs_truncate_item(root, path, data_size, 1);'), (198, '\t\t} else {'), (199, '\t\t\t/* There are other xattrs packed in the same item. */'), (200, '\t\t\tret = btrfs_delete_one_dir_name(trans, root, path, di);'), (201, '\t\t\tif (ret)'), (202, '\t\t\t\tgoto out;'), (203, '\t\t\tbtrfs_extend_item(root, path, data_size);'), (204, '\t\t}'), (206, '\t\titem = btrfs_item_nr(slot);'), (207, '\t\tptr = btrfs_item_ptr(leaf, slot, char);'), (208, '\t\tptr += btrfs_item_size(leaf, item) - data_size;'), (209, '\t\tdi = (struct btrfs_dir_item *)ptr;'), (210, '\t\tbtrfs_set_dir_data_len(leaf, di, size);'), (211, '\t\tdata_ptr = ((unsigned long)(di + 1)) + name_len;'), (212, '\t\twrite_extent_buffer(leaf, value, data_ptr, size);'), (213, '\t\tbtrfs_mark_buffer_dirty(leaf);'), (214, '\t} else {'), (216, '\t\t * Insert, and we had space for the xattr, so path->slots[0] is'), (217, '\t\t * where our xattr dir_item is and btrfs_insert_xattr_item()'), (218, '\t\t * filled it.')], 'deleted': [(94, '\tstruct btrfs_dir_item *di;'), (108, '\t\tdi = btrfs_lookup_xattr(trans, root, path, btrfs_ino(inode), name,'), (109, '\t\t\t\t\tname_len, -1);'), (110, '\t\tif (IS_ERR(di)) {'), (111, '\t\t\tret = PTR_ERR(di);'), (112, '\t\t\tgoto out;'), (113, '\t\t} else if (!di) {'), (117, '\t\tret = btrfs_delete_one_dir_name(trans, root, path, di);'), (118, '\t\tif (ret)'), (119, '\t\t\tgoto out;'), (123, '\t\t * remove the attribute'), (125, '\t\tif (!value)'), (126, '\t\t\tgoto out;'), (127, '\t} else {'), (128, '\t\tdi = btrfs_lookup_xattr(NULL, root, path, btrfs_ino(inode),'), (129, '\t\t\t\t\tname, name_len, 0);'), (130, '\t\tif (IS_ERR(di)) {'), (131, '\t\t\tret = PTR_ERR(di);'), (134, '\t\tif (!di && !value)'), (135, '\t\t\tgoto out;'), (136, '\t\tbtrfs_release_path(path);'), (139, 'again:'), (140, '\tret = btrfs_insert_xattr_item(trans, root, path, btrfs_ino(inode),'), (141, '\t\t\t\t name, name_len, value, size);'), (142, '\t/*'), (143, "\t * If we're setting an xattr to a new value but the new value is say"), (144, '\t * exactly BTRFS_MAX_XATTR_SIZE, we could end up with EOVERFLOW getting'), (145, "\t * back from split_leaf. This is because it thinks we'll be extending"), (146, "\t * the existing item size, but we're asking for enough space to add the"), (147, '\t * item itself. So if we get EOVERFLOW just set ret to EEXIST and let'), (148, '\t * the rest of the function figure it out.'), (149, '\t */'), (150, '\tif (ret == -EOVERFLOW)'), (153, '\tif (ret == -EEXIST) {'), (154, '\t\tif (flags & XATTR_CREATE)'), (155, '\t\t\tgoto out;'), (157, "\t\t * We can't use the path we already have since we won't have the"), (158, '\t\t * proper locking for a delete, so release the path and'), (159, '\t\t * re-lookup to delete the thing.'), (161, '\t\tbtrfs_release_path(path);'), (162, '\t\tdi = btrfs_lookup_xattr(trans, root, path, btrfs_ino(inode),'), (163, '\t\t\t\t\tname, name_len, -1);'), (164, '\t\tif (IS_ERR(di)) {'), (165, '\t\t\tret = PTR_ERR(di);'), (166, '\t\t\tgoto out;'), (167, '\t\t} else if (!di) {'), (168, "\t\t\t/* Shouldn't happen but just in case... */"), (169, '\t\t\tbtrfs_release_path(path);'), (170, '\t\t\tgoto again;'), (173, '\t\tret = btrfs_delete_one_dir_name(trans, root, path, di);'), (174, '\t\tif (ret)'), (175, '\t\t\tgoto out;'), (178, '\t\t * We have a value to set, so go back and try to insert it now.'), (180, '\t\tif (value) {'), (181, '\t\t\tbtrfs_release_path(path);'), (182, '\t\t\tgoto again;'), (183, '\t\t}')]}
93
57
323
2,090
https://github.com/torvalds/linux
CVE-2014-9710
['CWE-362']
elf.c
store_versioninfo_gnu_verdef
/* radare - LGPL - Copyright 2008-2017 - nibble, pancake, alvaro_fe */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <assert.h> #include <r_types.h> #include <r_util.h> #include "elf.h" #ifdef IFDBG #undef IFDBG #endif #define DO_THE_DBG 0 #define IFDBG if (DO_THE_DBG) #define IFINT if (0) #define ELF_PAGE_MASK 0xFFFFFFFFFFFFF000LL #define ELF_PAGE_SIZE 12 #define R_ELF_NO_RELRO 0 #define R_ELF_PART_RELRO 1 #define R_ELF_FULL_RELRO 2 #define bprintf if(bin->verbose)eprintf #define READ8(x, i) r_read_ble8(x + i); i += 1; #define READ16(x, i) r_read_ble16(x + i, bin->endian); i += 2; #define READ32(x, i) r_read_ble32(x + i, bin->endian); i += 4; #define READ64(x, i) r_read_ble64(x + i, bin->endian); i += 8; #define GROWTH_FACTOR (1.5) static inline int __strnlen(const char *str, int len) { int l = 0; while (IS_PRINTABLE (*str) && --len) { if (((ut8)*str) == 0xff) { break; } str++; l++; } return l + 1; } static int handle_e_ident(ELFOBJ *bin) { return !strncmp ((char *)bin->ehdr.e_ident, ELFMAG, SELFMAG) || !strncmp ((char *)bin->ehdr.e_ident, CGCMAG, SCGCMAG); } static int init_ehdr(ELFOBJ *bin) { ut8 e_ident[EI_NIDENT]; ut8 ehdr[sizeof (Elf_(Ehdr))] = {0}; int i, len; if (r_buf_read_at (bin->b, 0, e_ident, EI_NIDENT) == -1) { bprintf ("Warning: read (magic)\n"); return false; } sdb_set (bin->kv, "elf_type.cparse", "enum elf_type { ET_NONE=0, ET_REL=1," " ET_EXEC=2, ET_DYN=3, ET_CORE=4, ET_LOOS=0xfe00, ET_HIOS=0xfeff," " ET_LOPROC=0xff00, ET_HIPROC=0xffff };", 0); sdb_set (bin->kv, "elf_machine.cparse", "enum elf_machine{EM_NONE=0, EM_M32=1," " EM_SPARC=2, EM_386=3, EM_68K=4, EM_88K=5, EM_486=6, " " EM_860=7, EM_MIPS=8, EM_S370=9, EM_MIPS_RS3_LE=10, EM_RS6000=11," " EM_UNKNOWN12=12, EM_UNKNOWN13=13, EM_UNKNOWN14=14, " " EM_PA_RISC=15, EM_PARISC=EM_PA_RISC, EM_nCUBE=16, EM_VPP500=17," " EM_SPARC32PLUS=18, EM_960=19, EM_PPC=20, EM_PPC64=21, " " EM_S390=22, EM_UNKNOWN22=EM_S390, EM_UNKNOWN23=23, EM_UNKNOWN24=24," " EM_UNKNOWN25=25, EM_UNKNOWN26=26, EM_UNKNOWN27=27, EM_UNKNOWN28=28," " EM_UNKNOWN29=29, EM_UNKNOWN30=30, EM_UNKNOWN31=31, EM_UNKNOWN32=32," " EM_UNKNOWN33=33, EM_UNKNOWN34=34, EM_UNKNOWN35=35, EM_V800=36," " EM_FR20=37, EM_RH32=38, EM_RCE=39, EM_ARM=40, EM_ALPHA=41, EM_SH=42," " EM_SPARCV9=43, EM_TRICORE=44, EM_ARC=45, EM_H8_300=46, EM_H8_300H=47," " EM_H8S=48, EM_H8_500=49, EM_IA_64=50, EM_MIPS_X=51, EM_COLDFIRE=52," " EM_68HC12=53, EM_MMA=54, EM_PCP=55, EM_NCPU=56, EM_NDR1=57," " EM_STARCORE=58, EM_ME16=59, EM_ST100=60, EM_TINYJ=61, EM_AMD64=62," " EM_X86_64=EM_AMD64, EM_PDSP=63, EM_UNKNOWN64=64, EM_UNKNOWN65=65," " EM_FX66=66, EM_ST9PLUS=67, EM_ST7=68, EM_68HC16=69, EM_68HC11=70," " EM_68HC08=71, EM_68HC05=72, EM_SVX=73, EM_ST19=74, EM_VAX=75, " " EM_CRIS=76, EM_JAVELIN=77, EM_FIREPATH=78, EM_ZSP=79, EM_MMIX=80," " EM_HUANY=81, EM_PRISM=82, EM_AVR=83, EM_FR30=84, EM_D10V=85, EM_D30V=86," " EM_V850=87, EM_M32R=88, EM_MN10300=89, EM_MN10200=90, EM_PJ=91," " EM_OPENRISC=92, EM_ARC_A5=93, EM_XTENSA=94, EM_NUM=95};", 0); sdb_num_set (bin->kv, "elf_header.offset", 0, 0); sdb_num_set (bin->kv, "elf_header.size", sizeof (Elf_(Ehdr)), 0); #if R_BIN_ELF64 sdb_set (bin->kv, "elf_header.format", "[16]z[2]E[2]Exqqqxwwwwww" " ident (elf_type)type (elf_machine)machine version entry phoff shoff flags ehsize" " phentsize phnum shentsize shnum shstrndx", 0); #else sdb_set (bin->kv, "elf_header.format", "[16]z[2]E[2]Exxxxxwwwwww" " ident (elf_type)type (elf_machine)machine version entry phoff shoff flags ehsize" " phentsize phnum shentsize shnum shstrndx", 0); #endif bin->endian = (e_ident[EI_DATA] == ELFDATA2MSB)? 1: 0; memset (&bin->ehdr, 0, sizeof (Elf_(Ehdr))); len = r_buf_read_at (bin->b, 0, ehdr, sizeof (Elf_(Ehdr))); if (len < 1) { bprintf ("Warning: read (ehdr)\n"); return false; } memcpy (&bin->ehdr.e_ident, ehdr, 16); i = 16; bin->ehdr.e_type = READ16 (ehdr, i) bin->ehdr.e_machine = READ16 (ehdr, i) bin->ehdr.e_version = READ32 (ehdr, i) #if R_BIN_ELF64 bin->ehdr.e_entry = READ64 (ehdr, i) bin->ehdr.e_phoff = READ64 (ehdr, i) bin->ehdr.e_shoff = READ64 (ehdr, i) #else bin->ehdr.e_entry = READ32 (ehdr, i) bin->ehdr.e_phoff = READ32 (ehdr, i) bin->ehdr.e_shoff = READ32 (ehdr, i) #endif bin->ehdr.e_flags = READ32 (ehdr, i) bin->ehdr.e_ehsize = READ16 (ehdr, i) bin->ehdr.e_phentsize = READ16 (ehdr, i) bin->ehdr.e_phnum = READ16 (ehdr, i) bin->ehdr.e_shentsize = READ16 (ehdr, i) bin->ehdr.e_shnum = READ16 (ehdr, i) bin->ehdr.e_shstrndx = READ16 (ehdr, i) return handle_e_ident (bin); // Usage example: // > td `k bin/cur/info/elf_type.cparse`; td `k bin/cur/info/elf_machine.cparse` // > pf `k bin/cur/info/elf_header.format` @ `k bin/cur/info/elf_header.offset` } static int init_phdr(ELFOBJ *bin) { ut32 phdr_size; ut8 phdr[sizeof (Elf_(Phdr))] = {0}; int i, j, len; if (!bin->ehdr.e_phnum) { return false; } if (bin->phdr) { return true; } if (!UT32_MUL (&phdr_size, (ut32)bin->ehdr.e_phnum, sizeof (Elf_(Phdr)))) { return false; } if (!phdr_size) { return false; } if (phdr_size > bin->size) { return false; } if (phdr_size > (ut32)bin->size) { return false; } if (bin->ehdr.e_phoff > bin->size) { return false; } if (bin->ehdr.e_phoff + phdr_size > bin->size) { return false; } if (!(bin->phdr = calloc (phdr_size, 1))) { perror ("malloc (phdr)"); return false; } for (i = 0; i < bin->ehdr.e_phnum; i++) { j = 0; len = r_buf_read_at (bin->b, bin->ehdr.e_phoff + i * sizeof (Elf_(Phdr)), phdr, sizeof (Elf_(Phdr))); if (len < 1) { bprintf ("Warning: read (phdr)\n"); R_FREE (bin->phdr); return false; } bin->phdr[i].p_type = READ32 (phdr, j) #if R_BIN_ELF64 bin->phdr[i].p_flags = READ32 (phdr, j) bin->phdr[i].p_offset = READ64 (phdr, j) bin->phdr[i].p_vaddr = READ64 (phdr, j) bin->phdr[i].p_paddr = READ64 (phdr, j) bin->phdr[i].p_filesz = READ64 (phdr, j) bin->phdr[i].p_memsz = READ64 (phdr, j) bin->phdr[i].p_align = READ64 (phdr, j) #else bin->phdr[i].p_offset = READ32 (phdr, j) bin->phdr[i].p_vaddr = READ32 (phdr, j) bin->phdr[i].p_paddr = READ32 (phdr, j) bin->phdr[i].p_filesz = READ32 (phdr, j) bin->phdr[i].p_memsz = READ32 (phdr, j) bin->phdr[i].p_flags = READ32 (phdr, j) bin->phdr[i].p_align = READ32 (phdr, j) #endif } sdb_num_set (bin->kv, "elf_phdr.offset", bin->ehdr.e_phoff, 0); sdb_num_set (bin->kv, "elf_phdr.size", sizeof (Elf_(Phdr)), 0); sdb_set (bin->kv, "elf_p_type.cparse", "enum elf_p_type {PT_NULL=0,PT_LOAD=1,PT_DYNAMIC=2," "PT_INTERP=3,PT_NOTE=4,PT_SHLIB=5,PT_PHDR=6,PT_LOOS=0x60000000," "PT_HIOS=0x6fffffff,PT_LOPROC=0x70000000,PT_HIPROC=0x7fffffff};", 0); sdb_set (bin->kv, "elf_p_flags.cparse", "enum elf_p_flags {PF_None=0,PF_Exec=1," "PF_Write=2,PF_Write_Exec=3,PF_Read=4,PF_Read_Exec=5,PF_Read_Write=6," "PF_Read_Write_Exec=7};", 0); #if R_BIN_ELF64 sdb_set (bin->kv, "elf_phdr.format", "[4]E[4]Eqqqqqq (elf_p_type)type (elf_p_flags)flags" " offset vaddr paddr filesz memsz align", 0); #else sdb_set (bin->kv, "elf_phdr.format", "[4]Exxxxx[4]Ex (elf_p_type)type offset vaddr paddr" " filesz memsz (elf_p_flags)flags align", 0); #endif return true; // Usage example: // > td `k bin/cur/info/elf_p_type.cparse`; td `k bin/cur/info/elf_p_flags.cparse` // > pf `k bin/cur/info/elf_phdr.format` @ `k bin/cur/info/elf_phdr.offset` } static int init_shdr(ELFOBJ *bin) { ut32 shdr_size; ut8 shdr[sizeof (Elf_(Shdr))] = {0}; int i, j, len; if (!bin || bin->shdr) { return true; } if (!UT32_MUL (&shdr_size, bin->ehdr.e_shnum, sizeof (Elf_(Shdr)))) { return false; } if (shdr_size < 1) { return false; } if (shdr_size > bin->size) { return false; } if (bin->ehdr.e_shoff > bin->size) { return false; } if (bin->ehdr.e_shoff + shdr_size > bin->size) { return false; } if (!(bin->shdr = calloc (1, shdr_size + 1))) { perror ("malloc (shdr)"); return false; } sdb_num_set (bin->kv, "elf_shdr.offset", bin->ehdr.e_shoff, 0); sdb_num_set (bin->kv, "elf_shdr.size", sizeof (Elf_(Shdr)), 0); sdb_set (bin->kv, "elf_s_type.cparse", "enum elf_s_type {SHT_NULL=0,SHT_PROGBITS=1," "SHT_SYMTAB=2,SHT_STRTAB=3,SHT_RELA=4,SHT_HASH=5,SHT_DYNAMIC=6,SHT_NOTE=7," "SHT_NOBITS=8,SHT_REL=9,SHT_SHLIB=10,SHT_DYNSYM=11,SHT_LOOS=0x60000000," "SHT_HIOS=0x6fffffff,SHT_LOPROC=0x70000000,SHT_HIPROC=0x7fffffff};", 0); for (i = 0; i < bin->ehdr.e_shnum; i++) { j = 0; len = r_buf_read_at (bin->b, bin->ehdr.e_shoff + i * sizeof (Elf_(Shdr)), shdr, sizeof (Elf_(Shdr))); if (len < 1) { bprintf ("Warning: read (shdr) at 0x%"PFMT64x"\n", (ut64) bin->ehdr.e_shoff); R_FREE (bin->shdr); return false; } bin->shdr[i].sh_name = READ32 (shdr, j) bin->shdr[i].sh_type = READ32 (shdr, j) #if R_BIN_ELF64 bin->shdr[i].sh_flags = READ64 (shdr, j) bin->shdr[i].sh_addr = READ64 (shdr, j) bin->shdr[i].sh_offset = READ64 (shdr, j) bin->shdr[i].sh_size = READ64 (shdr, j) bin->shdr[i].sh_link = READ32 (shdr, j) bin->shdr[i].sh_info = READ32 (shdr, j) bin->shdr[i].sh_addralign = READ64 (shdr, j) bin->shdr[i].sh_entsize = READ64 (shdr, j) #else bin->shdr[i].sh_flags = READ32 (shdr, j) bin->shdr[i].sh_addr = READ32 (shdr, j) bin->shdr[i].sh_offset = READ32 (shdr, j) bin->shdr[i].sh_size = READ32 (shdr, j) bin->shdr[i].sh_link = READ32 (shdr, j) bin->shdr[i].sh_info = READ32 (shdr, j) bin->shdr[i].sh_addralign = READ32 (shdr, j) bin->shdr[i].sh_entsize = READ32 (shdr, j) #endif } #if R_BIN_ELF64 sdb_set (bin->kv, "elf_s_flags_64.cparse", "enum elf_s_flags_64 {SF64_None=0,SF64_Exec=1," "SF64_Alloc=2,SF64_Alloc_Exec=3,SF64_Write=4,SF64_Write_Exec=5," "SF64_Write_Alloc=6,SF64_Write_Alloc_Exec=7};", 0); sdb_set (bin->kv, "elf_shdr.format", "x[4]E[8]Eqqqxxqq name (elf_s_type)type" " (elf_s_flags_64)flags addr offset size link info addralign entsize", 0); #else sdb_set (bin->kv, "elf_s_flags_32.cparse", "enum elf_s_flags_32 {SF32_None=0,SF32_Exec=1," "SF32_Alloc=2,SF32_Alloc_Exec=3,SF32_Write=4,SF32_Write_Exec=5," "SF32_Write_Alloc=6,SF32_Write_Alloc_Exec=7};", 0); sdb_set (bin->kv, "elf_shdr.format", "x[4]E[4]Exxxxxxx name (elf_s_type)type" " (elf_s_flags_32)flags addr offset size link info addralign entsize", 0); #endif return true; // Usage example: // > td `k bin/cur/info/elf_s_type.cparse`; td `k bin/cur/info/elf_s_flags_64.cparse` // > pf `k bin/cur/info/elf_shdr.format` @ `k bin/cur/info/elf_shdr.offset` } static int init_strtab(ELFOBJ *bin) { if (bin->strtab || !bin->shdr) { return false; } if (bin->ehdr.e_shstrndx != SHN_UNDEF && (bin->ehdr.e_shstrndx >= bin->ehdr.e_shnum || (bin->ehdr.e_shstrndx >= SHN_LORESERVE && bin->ehdr.e_shstrndx < SHN_HIRESERVE))) return false; /* sh_size must be lower than UT32_MAX and not equal to zero, to avoid bugs on malloc() */ if (bin->shdr[bin->ehdr.e_shstrndx].sh_size > UT32_MAX) { return false; } if (!bin->shdr[bin->ehdr.e_shstrndx].sh_size) { return false; } bin->shstrtab_section = bin->strtab_section = &bin->shdr[bin->ehdr.e_shstrndx]; bin->shstrtab_size = bin->strtab_section->sh_size; if (bin->shstrtab_size > bin->size) { return false; } if (!(bin->shstrtab = calloc (1, bin->shstrtab_size + 1))) { perror ("malloc"); bin->shstrtab = NULL; return false; } if (bin->shstrtab_section->sh_offset > bin->size) { R_FREE (bin->shstrtab); return false; } if (bin->shstrtab_section->sh_offset + bin->shstrtab_section->sh_size > bin->size) { R_FREE (bin->shstrtab); return false; } if (r_buf_read_at (bin->b, bin->shstrtab_section->sh_offset, (ut8*)bin->shstrtab, bin->shstrtab_section->sh_size + 1) < 1) { bprintf ("Warning: read (shstrtab) at 0x%"PFMT64x"\n", (ut64) bin->shstrtab_section->sh_offset); R_FREE (bin->shstrtab); return false; } bin->shstrtab[bin->shstrtab_section->sh_size] = '\0'; sdb_num_set (bin->kv, "elf_shstrtab.offset", bin->shstrtab_section->sh_offset, 0); sdb_num_set (bin->kv, "elf_shstrtab.size", bin->shstrtab_section->sh_size, 0); return true; } static int init_dynamic_section(struct Elf_(r_bin_elf_obj_t) *bin) { Elf_(Dyn) *dyn = NULL; Elf_(Dyn) d = {0}; Elf_(Addr) strtabaddr = 0; ut64 offset = 0; char *strtab = NULL; size_t relentry = 0, strsize = 0; int entries; int i, j, len, r; ut8 sdyn[sizeof (Elf_(Dyn))] = {0}; ut32 dyn_size = 0; if (!bin || !bin->phdr || !bin->ehdr.e_phnum) { return false; } for (i = 0; i < bin->ehdr.e_phnum ; i++) { if (bin->phdr[i].p_type == PT_DYNAMIC) { dyn_size = bin->phdr[i].p_filesz; break; } } if (i == bin->ehdr.e_phnum) { return false; } if (bin->phdr[i].p_filesz > bin->size) { return false; } if (bin->phdr[i].p_offset > bin->size) { return false; } if (bin->phdr[i].p_offset + sizeof(Elf_(Dyn)) > bin->size) { return false; } for (entries = 0; entries < (dyn_size / sizeof (Elf_(Dyn))); entries++) { j = 0; len = r_buf_read_at (bin->b, bin->phdr[i].p_offset + entries * sizeof (Elf_(Dyn)), sdyn, sizeof (Elf_(Dyn))); if (len < 1) { goto beach; } #if R_BIN_ELF64 d.d_tag = READ64 (sdyn, j) #else d.d_tag = READ32 (sdyn, j) #endif if (d.d_tag == DT_NULL) { break; } } if (entries < 1) { return false; } dyn = (Elf_(Dyn)*)calloc (entries, sizeof (Elf_(Dyn))); if (!dyn) { return false; } if (!UT32_MUL (&dyn_size, entries, sizeof (Elf_(Dyn)))) { goto beach; } if (!dyn_size) { goto beach; } offset = Elf_(r_bin_elf_v2p) (bin, bin->phdr[i].p_vaddr); if (offset > bin->size || offset + dyn_size > bin->size) { goto beach; } for (i = 0; i < entries; i++) { j = 0; r_buf_read_at (bin->b, offset + i * sizeof (Elf_(Dyn)), sdyn, sizeof (Elf_(Dyn))); if (len < 1) { bprintf("Warning: read (dyn)\n"); } #if R_BIN_ELF64 dyn[i].d_tag = READ64 (sdyn, j) dyn[i].d_un.d_ptr = READ64 (sdyn, j) #else dyn[i].d_tag = READ32 (sdyn, j) dyn[i].d_un.d_ptr = READ32 (sdyn, j) #endif switch (dyn[i].d_tag) { case DT_STRTAB: strtabaddr = Elf_(r_bin_elf_v2p) (bin, dyn[i].d_un.d_ptr); break; case DT_STRSZ: strsize = dyn[i].d_un.d_val; break; case DT_PLTREL: bin->is_rela = dyn[i].d_un.d_val; break; case DT_RELAENT: relentry = dyn[i].d_un.d_val; break; default: if ((dyn[i].d_tag >= DT_VERSYM) && (dyn[i].d_tag <= DT_VERNEEDNUM)) { bin->version_info[DT_VERSIONTAGIDX (dyn[i].d_tag)] = dyn[i].d_un.d_val; } break; } } if (!bin->is_rela) { bin->is_rela = sizeof (Elf_(Rela)) == relentry? DT_RELA : DT_REL; } if (!strtabaddr || strtabaddr > bin->size || strsize > ST32_MAX || !strsize || strsize > bin->size) { if (!strtabaddr) { bprintf ("Warning: section.shstrtab not found or invalid\n"); } goto beach; } strtab = (char *)calloc (1, strsize + 1); if (!strtab) { goto beach; } if (strtabaddr + strsize > bin->size) { free (strtab); goto beach; } r = r_buf_read_at (bin->b, strtabaddr, (ut8 *)strtab, strsize); if (r < 1) { free (strtab); goto beach; } bin->dyn_buf = dyn; bin->dyn_entries = entries; bin->strtab = strtab; bin->strtab_size = strsize; r = Elf_(r_bin_elf_has_relro)(bin); switch (r) { case R_ELF_FULL_RELRO: sdb_set (bin->kv, "elf.relro", "full", 0); break; case R_ELF_PART_RELRO: sdb_set (bin->kv, "elf.relro", "partial", 0); break; default: sdb_set (bin->kv, "elf.relro", "no", 0); break; } sdb_num_set (bin->kv, "elf_strtab.offset", strtabaddr, 0); sdb_num_set (bin->kv, "elf_strtab.size", strsize, 0); return true; beach: free (dyn); return false; } static RBinElfSection* get_section_by_name(ELFOBJ *bin, const char *section_name) { int i; if (!bin->g_sections) { return NULL; } for (i = 0; !bin->g_sections[i].last; i++) { if (!strncmp (bin->g_sections[i].name, section_name, ELF_STRING_LENGTH-1)) { return &bin->g_sections[i]; } } return NULL; } static char *get_ver_flags(ut32 flags) { static char buff[32]; buff[0] = 0; if (!flags) { return "none"; } if (flags & VER_FLG_BASE) { strcpy (buff, "BASE "); } if (flags & VER_FLG_WEAK) { if (flags & VER_FLG_BASE) { strcat (buff, "| "); } strcat (buff, "WEAK "); } if (flags & ~(VER_FLG_BASE | VER_FLG_WEAK)) { strcat (buff, "| <unknown>"); } return buff; } static Sdb *store_versioninfo_gnu_versym(ELFOBJ *bin, Elf_(Shdr) *shdr, int sz) { int i; const ut64 num_entries = sz / sizeof (Elf_(Versym)); const char *section_name = ""; const char *link_section_name = ""; Elf_(Shdr) *link_shdr = NULL; Sdb *sdb = sdb_new0(); if (!sdb) { return NULL; } if (!bin->version_info[DT_VERSIONTAGIDX (DT_VERSYM)]) { sdb_free (sdb); return NULL; } if (shdr->sh_link > bin->ehdr.e_shnum) { sdb_free (sdb); return NULL; } link_shdr = &bin->shdr[shdr->sh_link]; ut8 *edata = (ut8*) calloc (R_MAX (1, num_entries), sizeof (ut16)); if (!edata) { sdb_free (sdb); return NULL; } ut16 *data = (ut16*) calloc (R_MAX (1, num_entries), sizeof (ut16)); if (!data) { free (edata); sdb_free (sdb); return NULL; } ut64 off = Elf_(r_bin_elf_v2p) (bin, bin->version_info[DT_VERSIONTAGIDX (DT_VERSYM)]); if (bin->shstrtab && shdr->sh_name < bin->shstrtab_size) { section_name = &bin->shstrtab[shdr->sh_name]; } if (bin->shstrtab && link_shdr->sh_name < bin->shstrtab_size) { link_section_name = &bin->shstrtab[link_shdr->sh_name]; } r_buf_read_at (bin->b, off, edata, sizeof (ut16) * num_entries); sdb_set (sdb, "section_name", section_name, 0); sdb_num_set (sdb, "num_entries", num_entries, 0); sdb_num_set (sdb, "addr", shdr->sh_addr, 0); sdb_num_set (sdb, "offset", shdr->sh_offset, 0); sdb_num_set (sdb, "link", shdr->sh_link, 0); sdb_set (sdb, "link_section_name", link_section_name, 0); for (i = num_entries; i--;) { data[i] = r_read_ble16 (&edata[i * sizeof (ut16)], bin->endian); } R_FREE (edata); for (i = 0; i < num_entries; i += 4) { int j; int check_def; char key[32] = {0}; Sdb *sdb_entry = sdb_new0 (); snprintf (key, sizeof (key), "entry%d", i / 4); sdb_ns_set (sdb, key, sdb_entry); sdb_num_set (sdb_entry, "idx", i, 0); for (j = 0; (j < 4) && (i + j) < num_entries; ++j) { int k; char *tmp_val = NULL; snprintf (key, sizeof (key), "value%d", j); switch (data[i + j]) { case 0: sdb_set (sdb_entry, key, "0 (*local*)", 0); break; case 1: sdb_set (sdb_entry, key, "1 (*global*)", 0); break; default: tmp_val = sdb_fmt (0, "%x ", data[i+j] & 0x7FFF); check_def = true; if (bin->version_info[DT_VERSIONTAGIDX (DT_VERNEED)]) { Elf_(Verneed) vn; ut8 svn[sizeof (Elf_(Verneed))] = {0}; ut64 offset = Elf_(r_bin_elf_v2p) (bin, bin->version_info[DT_VERSIONTAGIDX (DT_VERNEED)]); do { Elf_(Vernaux) vna; ut8 svna[sizeof (Elf_(Vernaux))] = {0}; ut64 a_off; if (offset > bin->size || offset + sizeof (vn) > bin->size) { goto beach; } if (r_buf_read_at (bin->b, offset, svn, sizeof (svn)) < 0) { bprintf ("Warning: Cannot read Verneed for Versym\n"); goto beach; } k = 0; vn.vn_version = READ16 (svn, k) vn.vn_cnt = READ16 (svn, k) vn.vn_file = READ32 (svn, k) vn.vn_aux = READ32 (svn, k) vn.vn_next = READ32 (svn, k) a_off = offset + vn.vn_aux; do { if (a_off > bin->size || a_off + sizeof (vna) > bin->size) { goto beach; } if (r_buf_read_at (bin->b, a_off, svna, sizeof (svna)) < 0) { bprintf ("Warning: Cannot read Vernaux for Versym\n"); goto beach; } k = 0; vna.vna_hash = READ32 (svna, k) vna.vna_flags = READ16 (svna, k) vna.vna_other = READ16 (svna, k) vna.vna_name = READ32 (svna, k) vna.vna_next = READ32 (svna, k) a_off += vna.vna_next; } while (vna.vna_other != data[i + j] && vna.vna_next != 0); if (vna.vna_other == data[i + j]) { if (vna.vna_name > bin->strtab_size) { goto beach; } sdb_set (sdb_entry, key, sdb_fmt (0, "%s(%s)", tmp_val, bin->strtab + vna.vna_name), 0); check_def = false; break; } offset += vn.vn_next; } while (vn.vn_next); } ut64 vinfoaddr = bin->version_info[DT_VERSIONTAGIDX (DT_VERDEF)]; if (check_def && data[i + j] != 0x8001 && vinfoaddr) { Elf_(Verdef) vd; ut8 svd[sizeof (Elf_(Verdef))] = {0}; ut64 offset = Elf_(r_bin_elf_v2p) (bin, vinfoaddr); if (offset > bin->size || offset + sizeof (vd) > bin->size) { goto beach; } do { if (r_buf_read_at (bin->b, offset, svd, sizeof (svd)) < 0) { bprintf ("Warning: Cannot read Verdef for Versym\n"); goto beach; } k = 0; vd.vd_version = READ16 (svd, k) vd.vd_flags = READ16 (svd, k) vd.vd_ndx = READ16 (svd, k) vd.vd_cnt = READ16 (svd, k) vd.vd_hash = READ32 (svd, k) vd.vd_aux = READ32 (svd, k) vd.vd_next = READ32 (svd, k) offset += vd.vd_next; } while (vd.vd_ndx != (data[i + j] & 0x7FFF) && vd.vd_next != 0); if (vd.vd_ndx == (data[i + j] & 0x7FFF)) { Elf_(Verdaux) vda; ut8 svda[sizeof (Elf_(Verdaux))] = {0}; ut64 off_vda = offset - vd.vd_next + vd.vd_aux; if (off_vda > bin->size || off_vda + sizeof (vda) > bin->size) { goto beach; } if (r_buf_read_at (bin->b, off_vda, svda, sizeof (svda)) < 0) { bprintf ("Warning: Cannot read Verdaux for Versym\n"); goto beach; } k = 0; vda.vda_name = READ32 (svda, k) vda.vda_next = READ32 (svda, k) if (vda.vda_name > bin->strtab_size) { goto beach; } const char *name = bin->strtab + vda.vda_name; sdb_set (sdb_entry, key, sdb_fmt (0,"%s(%s%-*s)", tmp_val, name, (int)(12 - strlen (name)),")") , 0); } } } } } beach: free (data); return sdb; } static Sdb *store_versioninfo_gnu_verdef(ELFOBJ *bin, Elf_(Shdr) *shdr, int sz) { const char *section_name = ""; const char *link_section_name = ""; char *end = NULL; Elf_(Shdr) *link_shdr = NULL; ut8 dfs[sizeof (Elf_(Verdef))] = {0}; Sdb *sdb; int cnt, i; if (shdr->sh_link > bin->ehdr.e_shnum) { return false; } link_shdr = &bin->shdr[shdr->sh_link]; if (shdr->sh_size < 1) { return false; } Elf_(Verdef) *defs = calloc (shdr->sh_size, sizeof (char)); if (!defs) { return false; } if (bin->shstrtab && shdr->sh_name < bin->shstrtab_size) { section_name = &bin->shstrtab[shdr->sh_name]; } if (link_shdr && bin->shstrtab && link_shdr->sh_name < bin->shstrtab_size) { link_section_name = &bin->shstrtab[link_shdr->sh_name]; } if (!defs) { bprintf ("Warning: Cannot allocate memory (Check Elf_(Verdef))\n"); return NULL; } sdb = sdb_new0 (); end = (char *)defs + shdr->sh_size; sdb_set (sdb, "section_name", section_name, 0); sdb_num_set (sdb, "entries", shdr->sh_info, 0); sdb_num_set (sdb, "addr", shdr->sh_addr, 0); sdb_num_set (sdb, "offset", shdr->sh_offset, 0); sdb_num_set (sdb, "link", shdr->sh_link, 0); sdb_set (sdb, "link_section_name", link_section_name, 0); for (cnt = 0, i = 0; cnt < shdr->sh_info && ((char *)defs + i < end); ++cnt) { Sdb *sdb_verdef = sdb_new0 (); char *vstart = ((char*)defs) + i; char key[32] = {0}; Elf_(Verdef) *verdef = (Elf_(Verdef)*)vstart; Elf_(Verdaux) aux = {0}; int j = 0; int isum = 0; r_buf_read_at (bin->b, shdr->sh_offset + i, dfs, sizeof (Elf_(Verdef))); verdef->vd_version = READ16 (dfs, j) verdef->vd_flags = READ16 (dfs, j) verdef->vd_ndx = READ16 (dfs, j) verdef->vd_cnt = READ16 (dfs, j) verdef->vd_hash = READ32 (dfs, j) verdef->vd_aux = READ32 (dfs, j) verdef->vd_next = READ32 (dfs, j) vstart += verdef->vd_aux; if (vstart > end || vstart + sizeof (Elf_(Verdaux)) > end) { sdb_free (sdb_verdef); goto out_error; } j = 0; aux.vda_name = READ32 (vstart, j) aux.vda_next = READ32 (vstart, j) isum = i + verdef->vd_aux; if (aux.vda_name > bin->dynstr_size) { sdb_free (sdb_verdef); goto out_error; } sdb_num_set (sdb_verdef, "idx", i, 0); sdb_num_set (sdb_verdef, "vd_version", verdef->vd_version, 0); sdb_num_set (sdb_verdef, "vd_ndx", verdef->vd_ndx, 0); sdb_num_set (sdb_verdef, "vd_cnt", verdef->vd_cnt, 0); sdb_set (sdb_verdef, "vda_name", &bin->dynstr[aux.vda_name], 0); sdb_set (sdb_verdef, "flags", get_ver_flags (verdef->vd_flags), 0); for (j = 1; j < verdef->vd_cnt; ++j) { int k; Sdb *sdb_parent = sdb_new0 (); isum += aux.vda_next; vstart += aux.vda_next; if (vstart > end || vstart + sizeof(Elf_(Verdaux)) > end) { sdb_free (sdb_verdef); sdb_free (sdb_parent); goto out_error; } k = 0; aux.vda_name = READ32 (vstart, k) aux.vda_next = READ32 (vstart, k) if (aux.vda_name > bin->dynstr_size) { sdb_free (sdb_verdef); sdb_free (sdb_parent); goto out_error; } sdb_num_set (sdb_parent, "idx", isum, 0); sdb_num_set (sdb_parent, "parent", j, 0); sdb_set (sdb_parent, "vda_name", &bin->dynstr[aux.vda_name], 0); snprintf (key, sizeof (key), "parent%d", j - 1); sdb_ns_set (sdb_verdef, key, sdb_parent); } snprintf (key, sizeof (key), "verdef%d", cnt); sdb_ns_set (sdb, key, sdb_verdef); if (!verdef->vd_next) { sdb_free (sdb_verdef); goto out_error; } i += verdef->vd_next; } free (defs); return sdb; out_error: free (defs); sdb_free (sdb); return NULL; } static Sdb *store_versioninfo_gnu_verneed(ELFOBJ *bin, Elf_(Shdr) *shdr, int sz) { ut8 *end, *need = NULL; const char *section_name = ""; Elf_(Shdr) *link_shdr = NULL; const char *link_section_name = ""; Sdb *sdb_vernaux = NULL; Sdb *sdb_version = NULL; Sdb *sdb = NULL; int i, cnt; if (!bin || !bin->dynstr) { return NULL; } if (shdr->sh_link > bin->ehdr.e_shnum) { return NULL; } if (shdr->sh_size < 1) { return NULL; } sdb = sdb_new0 (); if (!sdb) { return NULL; } link_shdr = &bin->shdr[shdr->sh_link]; if (bin->shstrtab && shdr->sh_name < bin->shstrtab_size) { section_name = &bin->shstrtab[shdr->sh_name]; } if (bin->shstrtab && link_shdr->sh_name < bin->shstrtab_size) { link_section_name = &bin->shstrtab[link_shdr->sh_name]; } if (!(need = (ut8*) calloc (R_MAX (1, shdr->sh_size), sizeof (ut8)))) { bprintf ("Warning: Cannot allocate memory for Elf_(Verneed)\n"); goto beach; } end = need + shdr->sh_size; sdb_set (sdb, "section_name", section_name, 0); sdb_num_set (sdb, "num_entries", shdr->sh_info, 0); sdb_num_set (sdb, "addr", shdr->sh_addr, 0); sdb_num_set (sdb, "offset", shdr->sh_offset, 0); sdb_num_set (sdb, "link", shdr->sh_link, 0); sdb_set (sdb, "link_section_name", link_section_name, 0); if (shdr->sh_offset > bin->size || shdr->sh_offset + shdr->sh_size > bin->size) { goto beach; } if (shdr->sh_offset + shdr->sh_size < shdr->sh_size) { goto beach; } i = r_buf_read_at (bin->b, shdr->sh_offset, need, shdr->sh_size); if (i < 0) goto beach; //XXX we should use DT_VERNEEDNUM instead of sh_info //TODO https://sourceware.org/ml/binutils/2014-11/msg00353.html for (i = 0, cnt = 0; cnt < shdr->sh_info; ++cnt) { int j, isum; ut8 *vstart = need + i; Elf_(Verneed) vvn = {0}; if (vstart + sizeof (Elf_(Verneed)) > end) { goto beach; } Elf_(Verneed) *entry = &vvn; char key[32] = {0}; sdb_version = sdb_new0 (); if (!sdb_version) { goto beach; } j = 0; vvn.vn_version = READ16 (vstart, j) vvn.vn_cnt = READ16 (vstart, j) vvn.vn_file = READ32 (vstart, j) vvn.vn_aux = READ32 (vstart, j) vvn.vn_next = READ32 (vstart, j) sdb_num_set (sdb_version, "vn_version", entry->vn_version, 0); sdb_num_set (sdb_version, "idx", i, 0); if (entry->vn_file > bin->dynstr_size) { goto beach; } { char *s = r_str_ndup (&bin->dynstr[entry->vn_file], 16); sdb_set (sdb_version, "file_name", s, 0); free (s); } sdb_num_set (sdb_version, "cnt", entry->vn_cnt, 0); vstart += entry->vn_aux; for (j = 0, isum = i + entry->vn_aux; j < entry->vn_cnt && vstart + sizeof (Elf_(Vernaux)) <= end; ++j) { int k; Elf_(Vernaux) * aux = NULL; Elf_(Vernaux) vaux = {0}; sdb_vernaux = sdb_new0 (); if (!sdb_vernaux) { goto beach; } aux = (Elf_(Vernaux)*)&vaux; k = 0; vaux.vna_hash = READ32 (vstart, k) vaux.vna_flags = READ16 (vstart, k) vaux.vna_other = READ16 (vstart, k) vaux.vna_name = READ32 (vstart, k) vaux.vna_next = READ32 (vstart, k) if (aux->vna_name > bin->dynstr_size) { goto beach; } sdb_num_set (sdb_vernaux, "idx", isum, 0); if (aux->vna_name > 0 && aux->vna_name + 8 < bin->dynstr_size) { char name [16]; strncpy (name, &bin->dynstr[aux->vna_name], sizeof (name)-1); name[sizeof(name)-1] = 0; sdb_set (sdb_vernaux, "name", name, 0); } sdb_set (sdb_vernaux, "flags", get_ver_flags (aux->vna_flags), 0); sdb_num_set (sdb_vernaux, "version", aux->vna_other, 0); isum += aux->vna_next; vstart += aux->vna_next; snprintf (key, sizeof (key), "vernaux%d", j); sdb_ns_set (sdb_version, key, sdb_vernaux); } if ((int)entry->vn_next < 0) { bprintf ("Invalid vn_next\n"); break; } i += entry->vn_next; snprintf (key, sizeof (key), "version%d", cnt ); sdb_ns_set (sdb, key, sdb_version); //if entry->vn_next is 0 it iterate infinitely if (!entry->vn_next) { break; } } free (need); return sdb; beach: free (need); sdb_free (sdb_vernaux); sdb_free (sdb_version); sdb_free (sdb); return NULL; } static Sdb *store_versioninfo(ELFOBJ *bin) { Sdb *sdb_versioninfo = NULL; int num_verdef = 0; int num_verneed = 0; int num_versym = 0; int i; if (!bin || !bin->shdr) { return NULL; } if (!(sdb_versioninfo = sdb_new0 ())) { return NULL; } for (i = 0; i < bin->ehdr.e_shnum; i++) { Sdb *sdb = NULL; char key[32] = {0}; int size = bin->shdr[i].sh_size; if (size - (i*sizeof(Elf_(Shdr)) > bin->size)) { size = bin->size - (i*sizeof(Elf_(Shdr))); } int left = size - (i * sizeof (Elf_(Shdr))); left = R_MIN (left, bin->shdr[i].sh_size); if (left < 0) { break; } switch (bin->shdr[i].sh_type) { case SHT_GNU_verdef: sdb = store_versioninfo_gnu_verdef (bin, &bin->shdr[i], left); snprintf (key, sizeof (key), "verdef%d", num_verdef++); sdb_ns_set (sdb_versioninfo, key, sdb); break; case SHT_GNU_verneed: sdb = store_versioninfo_gnu_verneed (bin, &bin->shdr[i], left); snprintf (key, sizeof (key), "verneed%d", num_verneed++); sdb_ns_set (sdb_versioninfo, key, sdb); break; case SHT_GNU_versym: sdb = store_versioninfo_gnu_versym (bin, &bin->shdr[i], left); snprintf (key, sizeof (key), "versym%d", num_versym++); sdb_ns_set (sdb_versioninfo, key, sdb); break; } } return sdb_versioninfo; } static bool init_dynstr(ELFOBJ *bin) { int i, r; const char *section_name = NULL; if (!bin || !bin->shdr) { return false; } if (!bin->shstrtab) { return false; } for (i = 0; i < bin->ehdr.e_shnum; ++i) { if (bin->shdr[i].sh_name > bin->shstrtab_size) { return false; } section_name = &bin->shstrtab[bin->shdr[i].sh_name]; if (bin->shdr[i].sh_type == SHT_STRTAB && !strcmp (section_name, ".dynstr")) { if (!(bin->dynstr = (char*) calloc (bin->shdr[i].sh_size + 1, sizeof (char)))) { bprintf("Warning: Cannot allocate memory for dynamic strings\n"); return false; } if (bin->shdr[i].sh_offset > bin->size) { return false; } if (bin->shdr[i].sh_offset + bin->shdr[i].sh_size > bin->size) { return false; } if (bin->shdr[i].sh_offset + bin->shdr[i].sh_size < bin->shdr[i].sh_size) { return false; } r = r_buf_read_at (bin->b, bin->shdr[i].sh_offset, (ut8*)bin->dynstr, bin->shdr[i].sh_size); if (r < 1) { R_FREE (bin->dynstr); bin->dynstr_size = 0; return false; } bin->dynstr_size = bin->shdr[i].sh_size; return true; } } return false; } static int elf_init(ELFOBJ *bin) { bin->phdr = NULL; bin->shdr = NULL; bin->strtab = NULL; bin->shstrtab = NULL; bin->strtab_size = 0; bin->strtab_section = NULL; bin->dyn_buf = NULL; bin->dynstr = NULL; ZERO_FILL (bin->version_info); bin->g_sections = NULL; bin->g_symbols = NULL; bin->g_imports = NULL; /* bin is not an ELF */ if (!init_ehdr (bin)) { return false; } if (!init_phdr (bin)) { bprintf ("Warning: Cannot initialize program headers\n"); } if (!init_shdr (bin)) { bprintf ("Warning: Cannot initialize section headers\n"); } if (!init_strtab (bin)) { bprintf ("Warning: Cannot initialize strings table\n"); } if (!init_dynstr (bin)) { bprintf ("Warning: Cannot initialize dynamic strings\n"); } bin->baddr = Elf_(r_bin_elf_get_baddr) (bin); if (!init_dynamic_section (bin) && !Elf_(r_bin_elf_get_static)(bin)) bprintf ("Warning: Cannot initialize dynamic section\n"); bin->imports_by_ord_size = 0; bin->imports_by_ord = NULL; bin->symbols_by_ord_size = 0; bin->symbols_by_ord = NULL; bin->g_sections = Elf_(r_bin_elf_get_sections) (bin); bin->boffset = Elf_(r_bin_elf_get_boffset) (bin); sdb_ns_set (bin->kv, "versioninfo", store_versioninfo (bin)); return true; } ut64 Elf_(r_bin_elf_get_section_offset)(ELFOBJ *bin, const char *section_name) { RBinElfSection *section = get_section_by_name (bin, section_name); if (!section) return UT64_MAX; return section->offset; } ut64 Elf_(r_bin_elf_get_section_addr)(ELFOBJ *bin, const char *section_name) { RBinElfSection *section = get_section_by_name (bin, section_name); return section? section->rva: UT64_MAX; } ut64 Elf_(r_bin_elf_get_section_addr_end)(ELFOBJ *bin, const char *section_name) { RBinElfSection *section = get_section_by_name (bin, section_name); return section? section->rva + section->size: UT64_MAX; } #define REL (is_rela ? (void*)rela : (void*)rel) #define REL_BUF is_rela ? (ut8*)(&rela[k]) : (ut8*)(&rel[k]) #define REL_OFFSET is_rela ? rela[k].r_offset : rel[k].r_offset #define REL_TYPE is_rela ? rela[k].r_info : rel[k].r_info static ut64 get_import_addr(ELFOBJ *bin, int sym) { Elf_(Rel) *rel = NULL; Elf_(Rela) *rela = NULL; ut8 rl[sizeof (Elf_(Rel))] = {0}; ut8 rla[sizeof (Elf_(Rela))] = {0}; RBinElfSection *rel_sec = NULL; Elf_(Addr) plt_sym_addr = -1; ut64 got_addr, got_offset; ut64 plt_addr; int j, k, tsize, len, nrel; bool is_rela = false; const char *rel_sect[] = { ".rel.plt", ".rela.plt", ".rel.dyn", ".rela.dyn", NULL }; const char *rela_sect[] = { ".rela.plt", ".rel.plt", ".rela.dyn", ".rel.dyn", NULL }; if ((!bin->shdr || !bin->strtab) && !bin->phdr) { return -1; } if ((got_offset = Elf_(r_bin_elf_get_section_offset) (bin, ".got")) == -1 && (got_offset = Elf_(r_bin_elf_get_section_offset) (bin, ".got.plt")) == -1) { return -1; } if ((got_addr = Elf_(r_bin_elf_get_section_addr) (bin, ".got")) == -1 && (got_addr = Elf_(r_bin_elf_get_section_addr) (bin, ".got.plt")) == -1) { return -1; } if (bin->is_rela == DT_REL) { j = 0; while (!rel_sec && rel_sect[j]) { rel_sec = get_section_by_name (bin, rel_sect[j++]); } tsize = sizeof (Elf_(Rel)); } else if (bin->is_rela == DT_RELA) { j = 0; while (!rel_sec && rela_sect[j]) { rel_sec = get_section_by_name (bin, rela_sect[j++]); } is_rela = true; tsize = sizeof (Elf_(Rela)); } if (!rel_sec) { return -1; } if (rel_sec->size < 1) { return -1; } nrel = (ut32)((int)rel_sec->size / (int)tsize); if (nrel < 1) { return -1; } if (is_rela) { rela = calloc (nrel, tsize); if (!rela) { return -1; } } else { rel = calloc (nrel, tsize); if (!rel) { return -1; } } for (j = k = 0; j < rel_sec->size && k < nrel; j += tsize, k++) { int l = 0; if (rel_sec->offset + j > bin->size) { goto out; } if (rel_sec->offset + j + tsize > bin->size) { goto out; } len = r_buf_read_at ( bin->b, rel_sec->offset + j, is_rela ? rla : rl, is_rela ? sizeof (Elf_ (Rela)) : sizeof (Elf_ (Rel))); if (len < 1) { goto out; } #if R_BIN_ELF64 if (is_rela) { rela[k].r_offset = READ64 (rla, l) rela[k].r_info = READ64 (rla, l) rela[k].r_addend = READ64 (rla, l) } else { rel[k].r_offset = READ64 (rl, l) rel[k].r_info = READ64 (rl, l) } #else if (is_rela) { rela[k].r_offset = READ32 (rla, l) rela[k].r_info = READ32 (rla, l) rela[k].r_addend = READ32 (rla, l) } else { rel[k].r_offset = READ32 (rl, l) rel[k].r_info = READ32 (rl, l) } #endif int reloc_type = ELF_R_TYPE (REL_TYPE); int reloc_sym = ELF_R_SYM (REL_TYPE); if (reloc_sym == sym) { int of = REL_OFFSET; of = of - got_addr + got_offset; switch (bin->ehdr.e_machine) { case EM_PPC: case EM_PPC64: { RBinElfSection *s = get_section_by_name (bin, ".plt"); if (s) { ut8 buf[4]; ut64 base; len = r_buf_read_at (bin->b, s->offset, buf, sizeof (buf)); if (len < 4) { goto out; } base = r_read_be32 (buf); base -= (nrel * 16); base += (k * 16); plt_addr = base; free (REL); return plt_addr; } } break; case EM_SPARC: case EM_SPARCV9: case EM_SPARC32PLUS: plt_addr = Elf_(r_bin_elf_get_section_addr) (bin, ".plt"); if (plt_addr == -1) { free (rela); return -1; } if (reloc_type == R_386_PC16) { plt_addr += k * 12 + 20; // thumb symbol if (plt_addr & 1) { plt_addr--; } free (REL); return plt_addr; } else { bprintf ("Unknown sparc reloc type %d\n", reloc_type); } /* SPARC */ break; case EM_ARM: case EM_AARCH64: plt_addr = Elf_(r_bin_elf_get_section_addr) (bin, ".plt"); if (plt_addr == -1) { free (rela); return UT32_MAX; } switch (reloc_type) { case R_386_8: { plt_addr += k * 12 + 20; // thumb symbol if (plt_addr & 1) { plt_addr--; } free (REL); return plt_addr; } break; case 1026: // arm64 aarch64 plt_sym_addr = plt_addr + k * 16 + 32; goto done; default: bprintf ("Unsupported relocation type for imports %d\n", reloc_type); break; } break; case EM_386: case EM_X86_64: switch (reloc_type) { case 1: // unknown relocs found in voidlinux for x86-64 // break; case R_386_GLOB_DAT: case R_386_JMP_SLOT: { ut8 buf[8]; if (of + sizeof(Elf_(Addr)) < bin->size) { // ONLY FOR X86 if (of > bin->size || of + sizeof (Elf_(Addr)) > bin->size) { goto out; } len = r_buf_read_at (bin->b, of, buf, sizeof (Elf_(Addr))); if (len < -1) { goto out; } plt_sym_addr = sizeof (Elf_(Addr)) == 4 ? r_read_le32 (buf) : r_read_le64 (buf); if (!plt_sym_addr) { //XXX HACK ALERT!!!! full relro?? try to fix it //will there always be .plt.got, what would happen if is .got.plt? RBinElfSection *s = get_section_by_name (bin, ".plt.got"); if (Elf_(r_bin_elf_has_relro)(bin) < R_ELF_PART_RELRO || !s) { goto done; } plt_addr = s->offset; of = of + got_addr - got_offset; while (plt_addr + 2 + 4 < s->offset + s->size) { /*we try to locate the plt entry that correspond with the relocation since got does not point back to .plt. In this case it has the following form ff253a152000 JMP QWORD [RIP + 0x20153A] 6690 NOP ---- ff25ec9f0408 JMP DWORD [reloc.puts_236] plt_addr + 2 to remove jmp opcode and get the imm reading 4 and if RIP (plt_addr + 6) + imm == rel->offset return plt_addr, that will be our sym addr perhaps this hack doesn't work on 32 bits */ len = r_buf_read_at (bin->b, plt_addr + 2, buf, 4); if (len < -1) { goto out; } plt_sym_addr = sizeof (Elf_(Addr)) == 4 ? r_read_le32 (buf) : r_read_le64 (buf); //relative address if ((plt_addr + 6 + Elf_(r_bin_elf_v2p) (bin, plt_sym_addr)) == of) { plt_sym_addr = plt_addr; goto done; } else if (plt_sym_addr == of) { plt_sym_addr = plt_addr; goto done; } plt_addr += 8; } } else { plt_sym_addr -= 6; } goto done; } break; } default: bprintf ("Unsupported relocation type for imports %d\n", reloc_type); free (REL); return of; break; } break; case 8: // MIPS32 BIG ENDIAN relocs { RBinElfSection *s = get_section_by_name (bin, ".rela.plt"); if (s) { ut8 buf[1024]; const ut8 *base; plt_addr = s->rva + s->size; len = r_buf_read_at (bin->b, s->offset + s->size, buf, sizeof (buf)); if (len != sizeof (buf)) { // oops } base = r_mem_mem_aligned (buf, sizeof (buf), (const ut8*)"\x3c\x0f\x00", 3, 4); if (base) { plt_addr += (int)(size_t)(base - buf); } else { plt_addr += 108 + 8; // HARDCODED HACK } plt_addr += k * 16; free (REL); return plt_addr; } } break; default: bprintf ("Unsupported relocs type %d for arch %d\n", reloc_type, bin->ehdr.e_machine); break; } } } done: free (REL); return plt_sym_addr; out: free (REL); return -1; } int Elf_(r_bin_elf_has_nx)(ELFOBJ *bin) { int i; if (bin && bin->phdr) { for (i = 0; i < bin->ehdr.e_phnum; i++) { if (bin->phdr[i].p_type == PT_GNU_STACK) { return (!(bin->phdr[i].p_flags & 1))? 1: 0; } } } return 0; } int Elf_(r_bin_elf_has_relro)(ELFOBJ *bin) { int i; bool haveBindNow = false; bool haveGnuRelro = false; if (bin && bin->dyn_buf) { for (i = 0; i < bin->dyn_entries; i++) { switch (bin->dyn_buf[i].d_tag) { case DT_BIND_NOW: haveBindNow = true; break; case DT_FLAGS: for (i++; i < bin->dyn_entries ; i++) { ut32 dTag = bin->dyn_buf[i].d_tag; if (!dTag) { break; } switch (dTag) { case DT_FLAGS_1: if (bin->dyn_buf[i].d_un.d_val & DF_1_NOW) { haveBindNow = true; break; } } } break; } } } if (bin && bin->phdr) { for (i = 0; i < bin->ehdr.e_phnum; i++) { if (bin->phdr[i].p_type == PT_GNU_RELRO) { haveGnuRelro = true; break; } } } if (haveGnuRelro) { if (haveBindNow) { return R_ELF_FULL_RELRO; } return R_ELF_PART_RELRO; } return R_ELF_NO_RELRO; } /* To compute the base address, one determines the memory address associated with the lowest p_vaddr value for a PT_LOAD segment. One then obtains the base address by truncating the memory address to the nearest multiple of the maximum page size */ ut64 Elf_(r_bin_elf_get_baddr)(ELFOBJ *bin) { int i; ut64 tmp, base = UT64_MAX; if (!bin) { return 0; } if (bin->phdr) { for (i = 0; i < bin->ehdr.e_phnum; i++) { if (bin->phdr[i].p_type == PT_LOAD) { tmp = (ut64)bin->phdr[i].p_vaddr & ELF_PAGE_MASK; tmp = tmp - (tmp % (1 << ELF_PAGE_SIZE)); if (tmp < base) { base = tmp; } } } } if (base == UT64_MAX && bin->ehdr.e_type == ET_REL) { //we return our own base address for ET_REL type //we act as a loader for ELF return 0x08000000; } return base == UT64_MAX ? 0 : base; } ut64 Elf_(r_bin_elf_get_boffset)(ELFOBJ *bin) { int i; ut64 tmp, base = UT64_MAX; if (bin && bin->phdr) { for (i = 0; i < bin->ehdr.e_phnum; i++) { if (bin->phdr[i].p_type == PT_LOAD) { tmp = (ut64)bin->phdr[i].p_offset & ELF_PAGE_MASK; tmp = tmp - (tmp % (1 << ELF_PAGE_SIZE)); if (tmp < base) { base = tmp; } } } } return base == UT64_MAX ? 0 : base; } ut64 Elf_(r_bin_elf_get_init_offset)(ELFOBJ *bin) { ut64 entry = Elf_(r_bin_elf_get_entry_offset) (bin); ut8 buf[512]; if (!bin) { return 0LL; } if (r_buf_read_at (bin->b, entry + 16, buf, sizeof (buf)) < 1) { bprintf ("Warning: read (init_offset)\n"); return 0; } if (buf[0] == 0x68) { // push // x86 only ut64 addr; memmove (buf, buf+1, 4); addr = (ut64)r_read_le32 (buf); return Elf_(r_bin_elf_v2p) (bin, addr); } return 0; } ut64 Elf_(r_bin_elf_get_fini_offset)(ELFOBJ *bin) { ut64 entry = Elf_(r_bin_elf_get_entry_offset) (bin); ut8 buf[512]; if (!bin) { return 0LL; } if (r_buf_read_at (bin->b, entry+11, buf, sizeof (buf)) == -1) { bprintf ("Warning: read (get_fini)\n"); return 0; } if (*buf == 0x68) { // push // x86/32 only ut64 addr; memmove (buf, buf+1, 4); addr = (ut64)r_read_le32 (buf); return Elf_(r_bin_elf_v2p) (bin, addr); } return 0; } ut64 Elf_(r_bin_elf_get_entry_offset)(ELFOBJ *bin) { ut64 entry; if (!bin) { return 0LL; } entry = bin->ehdr.e_entry; if (!entry) { entry = Elf_(r_bin_elf_get_section_offset)(bin, ".init.text"); if (entry != UT64_MAX) { return entry; } entry = Elf_(r_bin_elf_get_section_offset)(bin, ".text"); if (entry != UT64_MAX) { return entry; } entry = Elf_(r_bin_elf_get_section_offset)(bin, ".init"); if (entry != UT64_MAX) { return entry; } if (entry == UT64_MAX) { return 0; } } return Elf_(r_bin_elf_v2p) (bin, entry); } static ut64 getmainsymbol(ELFOBJ *bin) { struct r_bin_elf_symbol_t *symbol; int i; if (!(symbol = Elf_(r_bin_elf_get_symbols) (bin))) { return UT64_MAX; } for (i = 0; !symbol[i].last; i++) { if (!strcmp (symbol[i].name, "main")) { ut64 paddr = symbol[i].offset; return Elf_(r_bin_elf_p2v) (bin, paddr); } } return UT64_MAX; } ut64 Elf_(r_bin_elf_get_main_offset)(ELFOBJ *bin) { ut64 entry = Elf_(r_bin_elf_get_entry_offset) (bin); ut8 buf[512]; if (!bin) { return 0LL; } if (entry > bin->size || (entry + sizeof (buf)) > bin->size) { return 0; } if (r_buf_read_at (bin->b, entry, buf, sizeof (buf)) < 1) { bprintf ("Warning: read (main)\n"); return 0; } // ARM64 if (buf[0x18+3] == 0x58 && buf[0x2f] == 0x00) { ut32 entry_vaddr = Elf_(r_bin_elf_p2v) (bin, entry); ut32 main_addr = r_read_le32 (&buf[0x30]); if ((main_addr >> 16) == (entry_vaddr >> 16)) { return Elf_(r_bin_elf_v2p) (bin, main_addr); } } // TODO: Use arch to identify arch before memcmp's // ARM ut64 text = Elf_(r_bin_elf_get_section_offset)(bin, ".text"); ut64 text_end = text + bin->size; // ARM-Thumb-Linux if (entry & 1 && !memcmp (buf, "\xf0\x00\x0b\x4f\xf0\x00", 6)) { ut32 * ptr = (ut32*)(buf+40-1); if (*ptr &1) { return Elf_(r_bin_elf_v2p) (bin, *ptr -1); } } if (!memcmp (buf, "\x00\xb0\xa0\xe3\x00\xe0\xa0\xe3", 8)) { // endian stuff here ut32 *addr = (ut32*)(buf+0x34); /* 0x00012000 00b0a0e3 mov fp, 0 0x00012004 00e0a0e3 mov lr, 0 */ if (*addr > text && *addr < (text_end)) { return Elf_(r_bin_elf_v2p) (bin, *addr); } } // MIPS /* get .got, calculate offset of main symbol */ if (!memcmp (buf, "\x21\x00\xe0\x03\x01\x00\x11\x04", 8)) { /* assuming the startup code looks like got = gp-0x7ff0 got[index__libc_start_main] ( got[index_main] ); looking for the instruction generating the first argument to find main lw a0, offset(gp) */ ut64 got_offset; if ((got_offset = Elf_(r_bin_elf_get_section_offset) (bin, ".got")) != -1 || (got_offset = Elf_(r_bin_elf_get_section_offset) (bin, ".got.plt")) != -1) { const ut64 gp = got_offset + 0x7ff0; unsigned i; for (i = 0; i < sizeof(buf) / sizeof(buf[0]); i += 4) { const ut32 instr = r_read_le32 (&buf[i]); if ((instr & 0xffff0000) == 0x8f840000) { // lw a0, offset(gp) const short delta = instr & 0x0000ffff; r_buf_read_at (bin->b, /* got_entry_offset = */ gp + delta, buf, 4); return Elf_(r_bin_elf_v2p) (bin, r_read_le32 (&buf[0])); } } } return 0; } // ARM if (!memcmp (buf, "\x24\xc0\x9f\xe5\x00\xb0\xa0\xe3", 8)) { ut64 addr = r_read_le32 (&buf[48]); return Elf_(r_bin_elf_v2p) (bin, addr); } // X86-CGC if (buf[0] == 0xe8 && !memcmp (buf + 5, "\x50\xe8\x00\x00\x00\x00\xb8\x01\x00\x00\x00\x53", 12)) { size_t SIZEOF_CALL = 5; ut64 rel_addr = (ut64)((int)(buf[1] + (buf[2] << 8) + (buf[3] << 16) + (buf[4] << 24))); ut64 addr = Elf_(r_bin_elf_p2v)(bin, entry + SIZEOF_CALL); addr += rel_addr; return Elf_(r_bin_elf_v2p) (bin, addr); } // X86-PIE if (buf[0x00] == 0x48 && buf[0x1e] == 0x8d && buf[0x11] == 0xe8) { ut32 *pmain = (ut32*)(buf + 0x30); ut64 vmain = Elf_(r_bin_elf_p2v) (bin, (ut64)*pmain); ut64 ventry = Elf_(r_bin_elf_p2v) (bin, entry); if (vmain >> 16 == ventry >> 16) { return (ut64)vmain; } } // X86-PIE if (buf[0x1d] == 0x48 && buf[0x1e] == 0x8b) { if (!memcmp (buf, "\x31\xed\x49\x89", 4)) {// linux ut64 maddr, baddr; ut8 n32s[sizeof (ut32)] = {0}; maddr = entry + 0x24 + r_read_le32 (buf + 0x20); if (r_buf_read_at (bin->b, maddr, n32s, sizeof (ut32)) == -1) { bprintf ("Warning: read (maddr) 2\n"); return 0; } maddr = (ut64)r_read_le32 (&n32s[0]); baddr = (bin->ehdr.e_entry >> 16) << 16; if (bin->phdr) { baddr = Elf_(r_bin_elf_get_baddr) (bin); } maddr += baddr; return maddr; } } // X86-NONPIE #if R_BIN_ELF64 if (!memcmp (buf, "\x49\x89\xd9", 3) && buf[156] == 0xe8) { // openbsd return r_read_le32 (&buf[157]) + entry + 156 + 5; } if (!memcmp (buf+29, "\x48\xc7\xc7", 3)) { // linux ut64 addr = (ut64)r_read_le32 (&buf[29 + 3]); return Elf_(r_bin_elf_v2p) (bin, addr); } #else if (buf[23] == '\x68') { ut64 addr = (ut64)r_read_le32 (&buf[23 + 1]); return Elf_(r_bin_elf_v2p) (bin, addr); } #endif /* linux64 pie main -- probably buggy in some cases */ if (buf[29] == 0x48 && buf[30] == 0x8d) { // lea rdi, qword [rip-0x21c4] ut8 *p = buf + 32; st32 maindelta = (st32)r_read_le32 (p); ut64 vmain = (ut64)(entry + 29 + maindelta) + 7; ut64 ventry = Elf_(r_bin_elf_p2v) (bin, entry); if (vmain>>16 == ventry>>16) { return (ut64)vmain; } } /* find sym.main if possible */ { ut64 m = getmainsymbol (bin); if (m != UT64_MAX) return m; } return UT64_MAX; } int Elf_(r_bin_elf_get_stripped)(ELFOBJ *bin) { int i; if (!bin->shdr) { return false; } for (i = 0; i < bin->ehdr.e_shnum; i++) { if (bin->shdr[i].sh_type == SHT_SYMTAB) { return false; } } return true; } char *Elf_(r_bin_elf_intrp)(ELFOBJ *bin) { int i; if (!bin || !bin->phdr) { return NULL; } for (i = 0; i < bin->ehdr.e_phnum; i++) { if (bin->phdr[i].p_type == PT_INTERP) { char *str = NULL; ut64 addr = bin->phdr[i].p_offset; int sz = bin->phdr[i].p_memsz; sdb_num_set (bin->kv, "elf_header.intrp_addr", addr, 0); sdb_num_set (bin->kv, "elf_header.intrp_size", sz, 0); if (sz < 1) { return NULL; } str = malloc (sz + 1); if (!str) { return NULL; } if (r_buf_read_at (bin->b, addr, (ut8*)str, sz) < 1) { bprintf ("Warning: read (main)\n"); return 0; } str[sz] = 0; sdb_set (bin->kv, "elf_header.intrp", str, 0); return str; } } return NULL; } int Elf_(r_bin_elf_get_static)(ELFOBJ *bin) { int i; if (!bin->phdr) { return false; } for (i = 0; i < bin->ehdr.e_phnum; i++) { if (bin->phdr[i].p_type == PT_INTERP) { return false; } } return true; } char* Elf_(r_bin_elf_get_data_encoding)(ELFOBJ *bin) { switch (bin->ehdr.e_ident[EI_DATA]) { case ELFDATANONE: return strdup ("none"); case ELFDATA2LSB: return strdup ("2's complement, little endian"); case ELFDATA2MSB: return strdup ("2's complement, big endian"); default: return r_str_newf ("<unknown: %x>", bin->ehdr.e_ident[EI_DATA]); } } int Elf_(r_bin_elf_has_va)(ELFOBJ *bin) { return true; } char* Elf_(r_bin_elf_get_arch)(ELFOBJ *bin) { switch (bin->ehdr.e_machine) { case EM_ARC: case EM_ARC_A5: return strdup ("arc"); case EM_AVR: return strdup ("avr"); case EM_CRIS: return strdup ("cris"); case EM_68K: return strdup ("m68k"); case EM_MIPS: case EM_MIPS_RS3_LE: case EM_MIPS_X: return strdup ("mips"); case EM_MCST_ELBRUS: return strdup ("elbrus"); case EM_TRICORE: return strdup ("tricore"); case EM_ARM: case EM_AARCH64: return strdup ("arm"); case EM_HEXAGON: return strdup ("hexagon"); case EM_BLACKFIN: return strdup ("blackfin"); case EM_SPARC: case EM_SPARC32PLUS: case EM_SPARCV9: return strdup ("sparc"); case EM_PPC: case EM_PPC64: return strdup ("ppc"); case EM_PARISC: return strdup ("hppa"); case EM_PROPELLER: return strdup ("propeller"); case EM_MICROBLAZE: return strdup ("microblaze.gnu"); case EM_RISCV: return strdup ("riscv"); case EM_VAX: return strdup ("vax"); case EM_XTENSA: return strdup ("xtensa"); case EM_LANAI: return strdup ("lanai"); case EM_VIDEOCORE3: case EM_VIDEOCORE4: return strdup ("vc4"); case EM_SH: return strdup ("sh"); case EM_V850: return strdup ("v850"); case EM_IA_64: return strdup("ia64"); default: return strdup ("x86"); } } char* Elf_(r_bin_elf_get_machine_name)(ELFOBJ *bin) { switch (bin->ehdr.e_machine) { case EM_NONE: return strdup ("No machine"); case EM_M32: return strdup ("AT&T WE 32100"); case EM_SPARC: return strdup ("SUN SPARC"); case EM_386: return strdup ("Intel 80386"); case EM_68K: return strdup ("Motorola m68k family"); case EM_88K: return strdup ("Motorola m88k family"); case EM_860: return strdup ("Intel 80860"); case EM_MIPS: return strdup ("MIPS R3000"); case EM_S370: return strdup ("IBM System/370"); case EM_MIPS_RS3_LE: return strdup ("MIPS R3000 little-endian"); case EM_PARISC: return strdup ("HPPA"); case EM_VPP500: return strdup ("Fujitsu VPP500"); case EM_SPARC32PLUS: return strdup ("Sun's \"v8plus\""); case EM_960: return strdup ("Intel 80960"); case EM_PPC: return strdup ("PowerPC"); case EM_PPC64: return strdup ("PowerPC 64-bit"); case EM_S390: return strdup ("IBM S390"); case EM_V800: return strdup ("NEC V800 series"); case EM_FR20: return strdup ("Fujitsu FR20"); case EM_RH32: return strdup ("TRW RH-32"); case EM_RCE: return strdup ("Motorola RCE"); case EM_ARM: return strdup ("ARM"); case EM_BLACKFIN: return strdup ("Analog Devices Blackfin"); case EM_FAKE_ALPHA: return strdup ("Digital Alpha"); case EM_SH: return strdup ("Hitachi SH"); case EM_SPARCV9: return strdup ("SPARC v9 64-bit"); case EM_TRICORE: return strdup ("Siemens Tricore"); case EM_ARC: return strdup ("Argonaut RISC Core"); case EM_H8_300: return strdup ("Hitachi H8/300"); case EM_H8_300H: return strdup ("Hitachi H8/300H"); case EM_H8S: return strdup ("Hitachi H8S"); case EM_H8_500: return strdup ("Hitachi H8/500"); case EM_IA_64: return strdup ("Intel Merced"); case EM_MIPS_X: return strdup ("Stanford MIPS-X"); case EM_COLDFIRE: return strdup ("Motorola Coldfire"); case EM_68HC12: return strdup ("Motorola M68HC12"); case EM_MMA: return strdup ("Fujitsu MMA Multimedia Accelerator"); case EM_PCP: return strdup ("Siemens PCP"); case EM_NCPU: return strdup ("Sony nCPU embeeded RISC"); case EM_NDR1: return strdup ("Denso NDR1 microprocessor"); case EM_STARCORE: return strdup ("Motorola Start*Core processor"); case EM_ME16: return strdup ("Toyota ME16 processor"); case EM_ST100: return strdup ("STMicroelectronic ST100 processor"); case EM_TINYJ: return strdup ("Advanced Logic Corp. Tinyj emb.fam"); case EM_X86_64: return strdup ("AMD x86-64 architecture"); case EM_LANAI: return strdup ("32bit LANAI architecture"); case EM_PDSP: return strdup ("Sony DSP Processor"); case EM_FX66: return strdup ("Siemens FX66 microcontroller"); case EM_ST9PLUS: return strdup ("STMicroelectronics ST9+ 8/16 mc"); case EM_ST7: return strdup ("STmicroelectronics ST7 8 bit mc"); case EM_68HC16: return strdup ("Motorola MC68HC16 microcontroller"); case EM_68HC11: return strdup ("Motorola MC68HC11 microcontroller"); case EM_68HC08: return strdup ("Motorola MC68HC08 microcontroller"); case EM_68HC05: return strdup ("Motorola MC68HC05 microcontroller"); case EM_SVX: return strdup ("Silicon Graphics SVx"); case EM_ST19: return strdup ("STMicroelectronics ST19 8 bit mc"); case EM_VAX: return strdup ("Digital VAX"); case EM_CRIS: return strdup ("Axis Communications 32-bit embedded processor"); case EM_JAVELIN: return strdup ("Infineon Technologies 32-bit embedded processor"); case EM_FIREPATH: return strdup ("Element 14 64-bit DSP Processor"); case EM_ZSP: return strdup ("LSI Logic 16-bit DSP Processor"); case EM_MMIX: return strdup ("Donald Knuth's educational 64-bit processor"); case EM_HUANY: return strdup ("Harvard University machine-independent object files"); case EM_PRISM: return strdup ("SiTera Prism"); case EM_AVR: return strdup ("Atmel AVR 8-bit microcontroller"); case EM_FR30: return strdup ("Fujitsu FR30"); case EM_D10V: return strdup ("Mitsubishi D10V"); case EM_D30V: return strdup ("Mitsubishi D30V"); case EM_V850: return strdup ("NEC v850"); case EM_M32R: return strdup ("Mitsubishi M32R"); case EM_MN10300: return strdup ("Matsushita MN10300"); case EM_MN10200: return strdup ("Matsushita MN10200"); case EM_PJ: return strdup ("picoJava"); case EM_OPENRISC: return strdup ("OpenRISC 32-bit embedded processor"); case EM_ARC_A5: return strdup ("ARC Cores Tangent-A5"); case EM_XTENSA: return strdup ("Tensilica Xtensa Architecture"); case EM_AARCH64: return strdup ("ARM aarch64"); case EM_PROPELLER: return strdup ("Parallax Propeller"); case EM_MICROBLAZE: return strdup ("Xilinx MicroBlaze"); case EM_RISCV: return strdup ("RISC V"); case EM_VIDEOCORE3: return strdup ("VideoCore III"); case EM_VIDEOCORE4: return strdup ("VideoCore IV"); default: return r_str_newf ("<unknown>: 0x%x", bin->ehdr.e_machine); } } char* Elf_(r_bin_elf_get_file_type)(ELFOBJ *bin) { ut32 e_type; if (!bin) { return NULL; } e_type = (ut32)bin->ehdr.e_type; // cast to avoid warn in iphone-gcc, must be ut16 switch (e_type) { case ET_NONE: return strdup ("NONE (None)"); case ET_REL: return strdup ("REL (Relocatable file)"); case ET_EXEC: return strdup ("EXEC (Executable file)"); case ET_DYN: return strdup ("DYN (Shared object file)"); case ET_CORE: return strdup ("CORE (Core file)"); } if ((e_type >= ET_LOPROC) && (e_type <= ET_HIPROC)) { return r_str_newf ("Processor Specific: %x", e_type); } if ((e_type >= ET_LOOS) && (e_type <= ET_HIOS)) { return r_str_newf ("OS Specific: %x", e_type); } return r_str_newf ("<unknown>: %x", e_type); } char* Elf_(r_bin_elf_get_elf_class)(ELFOBJ *bin) { switch (bin->ehdr.e_ident[EI_CLASS]) { case ELFCLASSNONE: return strdup ("none"); case ELFCLASS32: return strdup ("ELF32"); case ELFCLASS64: return strdup ("ELF64"); default: return r_str_newf ("<unknown: %x>", bin->ehdr.e_ident[EI_CLASS]); } } int Elf_(r_bin_elf_get_bits)(ELFOBJ *bin) { /* Hack for ARCompact */ if (bin->ehdr.e_machine == EM_ARC_A5) { return 16; } /* Hack for Ps2 */ if (bin->phdr && bin->ehdr.e_machine == EM_MIPS) { const ut32 mipsType = bin->ehdr.e_flags & EF_MIPS_ARCH; if (bin->ehdr.e_type == ET_EXEC) { int i; bool haveInterp = false; for (i = 0; i < bin->ehdr.e_phnum; i++) { if (bin->phdr[i].p_type == PT_INTERP) { haveInterp = true; } } if (!haveInterp && mipsType == EF_MIPS_ARCH_3) { // Playstation2 Hack return 64; } } // TODO: show this specific asm.cpu somewhere in bininfo (mips1, mips2, mips3, mips32r2, ...) switch (mipsType) { case EF_MIPS_ARCH_1: case EF_MIPS_ARCH_2: case EF_MIPS_ARCH_3: case EF_MIPS_ARCH_4: case EF_MIPS_ARCH_5: case EF_MIPS_ARCH_32: return 32; case EF_MIPS_ARCH_64: return 64; case EF_MIPS_ARCH_32R2: return 32; case EF_MIPS_ARCH_64R2: return 64; break; } return 32; } /* Hack for Thumb */ if (bin->ehdr.e_machine == EM_ARM) { if (bin->ehdr.e_type != ET_EXEC) { struct r_bin_elf_symbol_t *symbol; if ((symbol = Elf_(r_bin_elf_get_symbols) (bin))) { int i = 0; for (i = 0; !symbol[i].last; i++) { ut64 paddr = symbol[i].offset; if (paddr & 1) { return 16; } } } } { ut64 entry = Elf_(r_bin_elf_get_entry_offset) (bin); if (entry & 1) { return 16; } } } switch (bin->ehdr.e_ident[EI_CLASS]) { case ELFCLASS32: return 32; case ELFCLASS64: return 64; case ELFCLASSNONE: default: return 32; // defaults } } static inline int noodle(ELFOBJ *bin, const char *s) { const ut8 *p = bin->b->buf; if (bin->b->length > 64) { p += bin->b->length - 64; } else { return 0; } return r_mem_mem (p, 64, (const ut8 *)s, strlen (s)) != NULL; } static inline int needle(ELFOBJ *bin, const char *s) { if (bin->shstrtab) { ut32 len = bin->shstrtab_size; if (len > 4096) { len = 4096; // avoid slow loading .. can be buggy? } return r_mem_mem ((const ut8*)bin->shstrtab, len, (const ut8*)s, strlen (s)) != NULL; } return 0; } // TODO: must return const char * all those strings must be const char os[LINUX] or so char* Elf_(r_bin_elf_get_osabi_name)(ELFOBJ *bin) { switch (bin->ehdr.e_ident[EI_OSABI]) { case ELFOSABI_LINUX: return strdup("linux"); case ELFOSABI_SOLARIS: return strdup("solaris"); case ELFOSABI_FREEBSD: return strdup("freebsd"); case ELFOSABI_HPUX: return strdup("hpux"); } /* Hack to identify OS */ if (needle (bin, "openbsd")) return strdup ("openbsd"); if (needle (bin, "netbsd")) return strdup ("netbsd"); if (needle (bin, "freebsd")) return strdup ("freebsd"); if (noodle (bin, "BEOS:APP_VERSION")) return strdup ("beos"); if (needle (bin, "GNU")) return strdup ("linux"); return strdup ("linux"); } ut8 *Elf_(r_bin_elf_grab_regstate)(ELFOBJ *bin, int *len) { if (bin->phdr) { int i; int num = bin->ehdr.e_phnum; for (i = 0; i < num; i++) { if (bin->phdr[i].p_type != PT_NOTE) { continue; } int bits = Elf_(r_bin_elf_get_bits)(bin); int regdelta = (bits == 64)? 0x84: 0x40; // x64 vs x32 int regsize = 160; // for x86-64 ut8 *buf = malloc (regsize); if (r_buf_read_at (bin->b, bin->phdr[i].p_offset + regdelta, buf, regsize) != regsize) { free (buf); bprintf ("Cannot read register state from CORE file\n"); return NULL; } if (len) { *len = regsize; } return buf; } } bprintf ("Cannot find NOTE section\n"); return NULL; } int Elf_(r_bin_elf_is_big_endian)(ELFOBJ *bin) { return (bin->ehdr.e_ident[EI_DATA] == ELFDATA2MSB); } /* XXX Init dt_strtab? */ char *Elf_(r_bin_elf_get_rpath)(ELFOBJ *bin) { char *ret = NULL; int j; if (!bin || !bin->phdr || !bin->dyn_buf || !bin->strtab) { return NULL; } for (j = 0; j< bin->dyn_entries; j++) { if (bin->dyn_buf[j].d_tag == DT_RPATH || bin->dyn_buf[j].d_tag == DT_RUNPATH) { if (!(ret = calloc (1, ELF_STRING_LENGTH))) { perror ("malloc (rpath)"); return NULL; } if (bin->dyn_buf[j].d_un.d_val > bin->strtab_size) { free (ret); return NULL; } strncpy (ret, bin->strtab + bin->dyn_buf[j].d_un.d_val, ELF_STRING_LENGTH); ret[ELF_STRING_LENGTH - 1] = '\0'; break; } } return ret; } static size_t get_relocs_num(ELFOBJ *bin) { size_t i, size, ret = 0; /* we need to be careful here, in malformed files the section size might * not be a multiple of a Rel/Rela size; round up so we allocate enough * space. */ #define NUMENTRIES_ROUNDUP(sectionsize, entrysize) (((sectionsize)+(entrysize)-1)/(entrysize)) if (!bin->g_sections) { return 0; } size = bin->is_rela == DT_REL ? sizeof (Elf_(Rel)) : sizeof (Elf_(Rela)); for (i = 0; !bin->g_sections[i].last; i++) { if (!strncmp (bin->g_sections[i].name, ".rela.", strlen (".rela."))) { if (!bin->is_rela) { size = sizeof (Elf_(Rela)); } ret += NUMENTRIES_ROUNDUP (bin->g_sections[i].size, size); } else if (!strncmp (bin->g_sections[i].name, ".rel.", strlen (".rel."))){ if (!bin->is_rela) { size = sizeof (Elf_(Rel)); } ret += NUMENTRIES_ROUNDUP (bin->g_sections[i].size, size); } } return ret; #undef NUMENTRIES_ROUNDUP } static int read_reloc(ELFOBJ *bin, RBinElfReloc *r, int is_rela, ut64 offset) { ut8 *buf = bin->b->buf; int j = 0; if (offset + sizeof (Elf_ (Rela)) > bin->size || offset + sizeof (Elf_(Rela)) < offset) { return -1; } if (is_rela == DT_RELA) { Elf_(Rela) rela; #if R_BIN_ELF64 rela.r_offset = READ64 (buf + offset, j) rela.r_info = READ64 (buf + offset, j) rela.r_addend = READ64 (buf + offset, j) #else rela.r_offset = READ32 (buf + offset, j) rela.r_info = READ32 (buf + offset, j) rela.r_addend = READ32 (buf + offset, j) #endif r->is_rela = is_rela; r->offset = rela.r_offset; r->type = ELF_R_TYPE (rela.r_info); r->sym = ELF_R_SYM (rela.r_info); r->last = 0; r->addend = rela.r_addend; return sizeof (Elf_(Rela)); } else { Elf_(Rel) rel; #if R_BIN_ELF64 rel.r_offset = READ64 (buf + offset, j) rel.r_info = READ64 (buf + offset, j) #else rel.r_offset = READ32 (buf + offset, j) rel.r_info = READ32 (buf + offset, j) #endif r->is_rela = is_rela; r->offset = rel.r_offset; r->type = ELF_R_TYPE (rel.r_info); r->sym = ELF_R_SYM (rel.r_info); r->last = 0; return sizeof (Elf_(Rel)); } } RBinElfReloc* Elf_(r_bin_elf_get_relocs)(ELFOBJ *bin) { int res, rel, rela, i, j; size_t reloc_num = 0; RBinElfReloc *ret = NULL; if (!bin || !bin->g_sections) { return NULL; } reloc_num = get_relocs_num (bin); if (!reloc_num) { return NULL; } bin->reloc_num = reloc_num; ret = (RBinElfReloc*)calloc ((size_t)reloc_num + 1, sizeof(RBinElfReloc)); if (!ret) { return NULL; } #if DEAD_CODE ut64 section_text_offset = Elf_(r_bin_elf_get_section_offset) (bin, ".text"); if (section_text_offset == -1) { section_text_offset = 0; } #endif for (i = 0, rel = 0; !bin->g_sections[i].last && rel < reloc_num ; i++) { bool is_rela = 0 == strncmp (bin->g_sections[i].name, ".rela.", strlen (".rela.")); bool is_rel = 0 == strncmp (bin->g_sections[i].name, ".rel.", strlen (".rel.")); if (!is_rela && !is_rel) { continue; } for (j = 0; j < bin->g_sections[i].size; j += res) { if (bin->g_sections[i].size > bin->size) { break; } if (bin->g_sections[i].offset > bin->size) { break; } if (rel >= reloc_num) { bprintf ("Internal error: ELF relocation buffer too small," "please file a bug report."); break; } if (!bin->is_rela) { rela = is_rela? DT_RELA : DT_REL; } else { rela = bin->is_rela; } res = read_reloc (bin, &ret[rel], rela, bin->g_sections[i].offset + j); if (j + res > bin->g_sections[i].size) { bprintf ("Warning: malformed file, relocation entry #%u is partially beyond the end of section %u.\n", rel, i); } if (bin->ehdr.e_type == ET_REL) { if (bin->g_sections[i].info < bin->ehdr.e_shnum && bin->shdr) { ret[rel].rva = bin->shdr[bin->g_sections[i].info].sh_offset + ret[rel].offset; ret[rel].rva = Elf_(r_bin_elf_p2v) (bin, ret[rel].rva); } else { ret[rel].rva = ret[rel].offset; } } else { ret[rel].rva = ret[rel].offset; ret[rel].offset = Elf_(r_bin_elf_v2p) (bin, ret[rel].offset); } ret[rel].last = 0; if (res < 0) { break; } rel++; } } ret[reloc_num].last = 1; return ret; } RBinElfLib* Elf_(r_bin_elf_get_libs)(ELFOBJ *bin) { RBinElfLib *ret = NULL; int j, k; if (!bin || !bin->phdr || !bin->dyn_buf || !bin->strtab || *(bin->strtab+1) == '0') { return NULL; } for (j = 0, k = 0; j < bin->dyn_entries; j++) if (bin->dyn_buf[j].d_tag == DT_NEEDED) { RBinElfLib *r = realloc (ret, (k + 1) * sizeof (RBinElfLib)); if (!r) { perror ("realloc (libs)"); free (ret); return NULL; } ret = r; if (bin->dyn_buf[j].d_un.d_val > bin->strtab_size) { free (ret); return NULL; } strncpy (ret[k].name, bin->strtab + bin->dyn_buf[j].d_un.d_val, ELF_STRING_LENGTH); ret[k].name[ELF_STRING_LENGTH - 1] = '\0'; ret[k].last = 0; if (ret[k].name[0]) { k++; } } RBinElfLib *r = realloc (ret, (k + 1) * sizeof (RBinElfLib)); if (!r) { perror ("realloc (libs)"); free (ret); return NULL; } ret = r; ret[k].last = 1; return ret; } static RBinElfSection* get_sections_from_phdr(ELFOBJ *bin) { RBinElfSection *ret; int i, num_sections = 0; ut64 reldyn = 0, relava = 0, pltgotva = 0, relva = 0; ut64 reldynsz = 0, relasz = 0, pltgotsz = 0; if (!bin || !bin->phdr || !bin->ehdr.e_phnum) return NULL; for (i = 0; i < bin->dyn_entries; i++) { switch (bin->dyn_buf[i].d_tag) { case DT_REL: reldyn = bin->dyn_buf[i].d_un.d_ptr; num_sections++; break; case DT_RELA: relva = bin->dyn_buf[i].d_un.d_ptr; num_sections++; break; case DT_RELSZ: reldynsz = bin->dyn_buf[i].d_un.d_val; break; case DT_RELASZ: relasz = bin->dyn_buf[i].d_un.d_val; break; case DT_PLTGOT: pltgotva = bin->dyn_buf[i].d_un.d_ptr; num_sections++; break; case DT_PLTRELSZ: pltgotsz = bin->dyn_buf[i].d_un.d_val; break; case DT_JMPREL: relava = bin->dyn_buf[i].d_un.d_ptr; num_sections++; break; default: break; } } ret = calloc (num_sections + 1, sizeof(RBinElfSection)); if (!ret) { return NULL; } i = 0; if (reldyn) { ret[i].offset = Elf_(r_bin_elf_v2p) (bin, reldyn); ret[i].rva = reldyn; ret[i].size = reldynsz; strcpy (ret[i].name, ".rel.dyn"); ret[i].last = 0; i++; } if (relava) { ret[i].offset = Elf_(r_bin_elf_v2p) (bin, relava); ret[i].rva = relava; ret[i].size = pltgotsz; strcpy (ret[i].name, ".rela.plt"); ret[i].last = 0; i++; } if (relva) { ret[i].offset = Elf_(r_bin_elf_v2p) (bin, relva); ret[i].rva = relva; ret[i].size = relasz; strcpy (ret[i].name, ".rel.plt"); ret[i].last = 0; i++; } if (pltgotva) { ret[i].offset = Elf_(r_bin_elf_v2p) (bin, pltgotva); ret[i].rva = pltgotva; ret[i].size = pltgotsz; strcpy (ret[i].name, ".got.plt"); ret[i].last = 0; i++; } ret[i].last = 1; return ret; } RBinElfSection* Elf_(r_bin_elf_get_sections)(ELFOBJ *bin) { RBinElfSection *ret = NULL; char unknown_s[20], invalid_s[20]; int i, nidx, unknown_c=0, invalid_c=0; if (!bin) { return NULL; } if (bin->g_sections) { return bin->g_sections; } if (!bin->shdr) { //we don't give up search in phdr section return get_sections_from_phdr (bin); } if (!(ret = calloc ((bin->ehdr.e_shnum + 1), sizeof (RBinElfSection)))) { return NULL; } for (i = 0; i < bin->ehdr.e_shnum; i++) { ret[i].offset = bin->shdr[i].sh_offset; ret[i].size = bin->shdr[i].sh_size; ret[i].align = bin->shdr[i].sh_addralign; ret[i].flags = bin->shdr[i].sh_flags; ret[i].link = bin->shdr[i].sh_link; ret[i].info = bin->shdr[i].sh_info; ret[i].type = bin->shdr[i].sh_type; if (bin->ehdr.e_type == ET_REL) { ret[i].rva = bin->baddr + bin->shdr[i].sh_offset; } else { ret[i].rva = bin->shdr[i].sh_addr; } nidx = bin->shdr[i].sh_name; #define SHNAME (int)bin->shdr[i].sh_name #define SHNLEN ELF_STRING_LENGTH - 4 #define SHSIZE (int)bin->shstrtab_size if (nidx < 0 || !bin->shstrtab_section || !bin->shstrtab_size || nidx > bin->shstrtab_size) { snprintf (invalid_s, sizeof (invalid_s) - 4, "invalid%d", invalid_c); strncpy (ret[i].name, invalid_s, SHNLEN); invalid_c++; } else { if (bin->shstrtab && (SHNAME > 0) && (SHNAME < SHSIZE)) { strncpy (ret[i].name, &bin->shstrtab[SHNAME], SHNLEN); } else { if (bin->shdr[i].sh_type == SHT_NULL) { //to follow the same behaviour as readelf strncpy (ret[i].name, "", sizeof (ret[i].name) - 4); } else { snprintf (unknown_s, sizeof (unknown_s)-4, "unknown%d", unknown_c); strncpy (ret[i].name, unknown_s, sizeof (ret[i].name)-4); unknown_c++; } } } ret[i].name[ELF_STRING_LENGTH-2] = '\0'; ret[i].last = 0; } ret[i].last = 1; return ret; } static void fill_symbol_bind_and_type (struct r_bin_elf_symbol_t *ret, Elf_(Sym) *sym) { #define s_bind(x) ret->bind = x #define s_type(x) ret->type = x switch (ELF_ST_BIND(sym->st_info)) { case STB_LOCAL: s_bind ("LOCAL"); break; case STB_GLOBAL: s_bind ("GLOBAL"); break; case STB_WEAK: s_bind ("WEAK"); break; case STB_NUM: s_bind ("NUM"); break; case STB_LOOS: s_bind ("LOOS"); break; case STB_HIOS: s_bind ("HIOS"); break; case STB_LOPROC: s_bind ("LOPROC"); break; case STB_HIPROC: s_bind ("HIPROC"); break; default: s_bind ("UNKNOWN"); } switch (ELF_ST_TYPE (sym->st_info)) { case STT_NOTYPE: s_type ("NOTYPE"); break; case STT_OBJECT: s_type ("OBJECT"); break; case STT_FUNC: s_type ("FUNC"); break; case STT_SECTION: s_type ("SECTION"); break; case STT_FILE: s_type ("FILE"); break; case STT_COMMON: s_type ("COMMON"); break; case STT_TLS: s_type ("TLS"); break; case STT_NUM: s_type ("NUM"); break; case STT_LOOS: s_type ("LOOS"); break; case STT_HIOS: s_type ("HIOS"); break; case STT_LOPROC: s_type ("LOPROC"); break; case STT_HIPROC: s_type ("HIPROC"); break; default: s_type ("UNKNOWN"); } } static RBinElfSymbol* get_symbols_from_phdr(ELFOBJ *bin, int type) { Elf_(Sym) *sym = NULL; Elf_(Addr) addr_sym_table = 0; ut8 s[sizeof (Elf_(Sym))] = {0}; RBinElfSymbol *ret = NULL; int i, j, r, tsize, nsym, ret_ctr; ut64 toffset = 0, tmp_offset; ut32 size, sym_size = 0; if (!bin || !bin->phdr || !bin->ehdr.e_phnum) { return NULL; } for (j = 0; j < bin->dyn_entries; j++) { switch (bin->dyn_buf[j].d_tag) { case (DT_SYMTAB): addr_sym_table = Elf_(r_bin_elf_v2p) (bin, bin->dyn_buf[j].d_un.d_ptr); break; case (DT_SYMENT): sym_size = bin->dyn_buf[j].d_un.d_val; break; default: break; } } if (!addr_sym_table) { return NULL; } if (!sym_size) { return NULL; } //since ELF doesn't specify the symbol table size we may read until the end of the buffer nsym = (bin->size - addr_sym_table) / sym_size; if (!UT32_MUL (&size, nsym, sizeof (Elf_ (Sym)))) { goto beach; } if (size < 1) { goto beach; } if (addr_sym_table > bin->size || addr_sym_table + size > bin->size) { goto beach; } if (nsym < 1) { return NULL; } // we reserve room for 4096 and grow as needed. size_t capacity1 = 4096; size_t capacity2 = 4096; sym = (Elf_(Sym)*) calloc (capacity1, sym_size); ret = (RBinElfSymbol *) calloc (capacity2, sizeof (struct r_bin_elf_symbol_t)); if (!sym || !ret) { goto beach; } for (i = 1, ret_ctr = 0; i < nsym; i++) { if (i >= capacity1) { // maybe grow // You take what you want, but you eat what you take. Elf_(Sym)* temp_sym = (Elf_(Sym)*) realloc(sym, (capacity1 * GROWTH_FACTOR) * sym_size); if (!temp_sym) { goto beach; } sym = temp_sym; capacity1 *= GROWTH_FACTOR; } if (ret_ctr >= capacity2) { // maybe grow RBinElfSymbol *temp_ret = realloc (ret, capacity2 * GROWTH_FACTOR * sizeof (struct r_bin_elf_symbol_t)); if (!temp_ret) { goto beach; } ret = temp_ret; capacity2 *= GROWTH_FACTOR; } // read in one entry r = r_buf_read_at (bin->b, addr_sym_table + i * sizeof (Elf_ (Sym)), s, sizeof (Elf_ (Sym))); if (r < 1) { goto beach; } int j = 0; #if R_BIN_ELF64 sym[i].st_name = READ32 (s, j); sym[i].st_info = READ8 (s, j); sym[i].st_other = READ8 (s, j); sym[i].st_shndx = READ16 (s, j); sym[i].st_value = READ64 (s, j); sym[i].st_size = READ64 (s, j); #else sym[i].st_name = READ32 (s, j); sym[i].st_value = READ32 (s, j); sym[i].st_size = READ32 (s, j); sym[i].st_info = READ8 (s, j); sym[i].st_other = READ8 (s, j); sym[i].st_shndx = READ16 (s, j); #endif // zero symbol is always empty // Examine entry and maybe store if (type == R_BIN_ELF_IMPORTS && sym[i].st_shndx == STN_UNDEF) { if (sym[i].st_value) { toffset = sym[i].st_value; } else if ((toffset = get_import_addr (bin, i)) == -1){ toffset = 0; } tsize = 16; } else if (type == R_BIN_ELF_SYMBOLS && sym[i].st_shndx != STN_UNDEF && ELF_ST_TYPE (sym[i].st_info) != STT_SECTION && ELF_ST_TYPE (sym[i].st_info) != STT_FILE) { tsize = sym[i].st_size; toffset = (ut64) sym[i].st_value; } else { continue; } tmp_offset = Elf_(r_bin_elf_v2p) (bin, toffset); if (tmp_offset > bin->size) { goto done; } if (sym[i].st_name + 2 > bin->strtab_size) { // Since we are reading beyond the symbol table what's happening // is that some entry is trying to dereference the strtab beyond its capacity // is not a symbol so is the end goto done; } ret[ret_ctr].offset = tmp_offset; ret[ret_ctr].size = tsize; { int rest = ELF_STRING_LENGTH - 1; int st_name = sym[i].st_name; int maxsize = R_MIN (bin->size, bin->strtab_size); if (st_name < 0 || st_name >= maxsize) { ret[ret_ctr].name[0] = 0; } else { const int len = __strnlen (bin->strtab + st_name, rest); memcpy (ret[ret_ctr].name, &bin->strtab[st_name], len); } } ret[ret_ctr].ordinal = i; ret[ret_ctr].in_shdr = false; ret[ret_ctr].name[ELF_STRING_LENGTH - 2] = '\0'; fill_symbol_bind_and_type (&ret[ret_ctr], &sym[i]); ret[ret_ctr].last = 0; ret_ctr++; } done: ret[ret_ctr].last = 1; // Size everything down to only what is used { nsym = i > 0 ? i : 1; Elf_ (Sym) * temp_sym = (Elf_ (Sym)*) realloc (sym, (nsym * GROWTH_FACTOR) * sym_size); if (!temp_sym) { goto beach; } sym = temp_sym; } { ret_ctr = ret_ctr > 0 ? ret_ctr : 1; RBinElfSymbol *p = (RBinElfSymbol *) realloc (ret, (ret_ctr + 1) * sizeof (RBinElfSymbol)); if (!p) { goto beach; } ret = p; } if (type == R_BIN_ELF_IMPORTS && !bin->imports_by_ord_size) { bin->imports_by_ord_size = ret_ctr + 1; if (ret_ctr > 0) { bin->imports_by_ord = (RBinImport * *) calloc (ret_ctr + 1, sizeof (RBinImport*)); } else { bin->imports_by_ord = NULL; } } else if (type == R_BIN_ELF_SYMBOLS && !bin->symbols_by_ord_size && ret_ctr) { bin->symbols_by_ord_size = ret_ctr + 1; if (ret_ctr > 0) { bin->symbols_by_ord = (RBinSymbol * *) calloc (ret_ctr + 1, sizeof (RBinSymbol*)); }else { bin->symbols_by_ord = NULL; } } free (sym); return ret; beach: free (sym); free (ret); return NULL; } static RBinElfSymbol *Elf_(r_bin_elf_get_phdr_symbols)(ELFOBJ *bin) { if (!bin) { return NULL; } if (bin->phdr_symbols) { return bin->phdr_symbols; } bin->phdr_symbols = get_symbols_from_phdr (bin, R_BIN_ELF_SYMBOLS); return bin->phdr_symbols; } static RBinElfSymbol *Elf_(r_bin_elf_get_phdr_imports)(ELFOBJ *bin) { if (!bin) { return NULL; } if (bin->phdr_imports) { return bin->phdr_imports; } bin->phdr_imports = get_symbols_from_phdr (bin, R_BIN_ELF_IMPORTS); return bin->phdr_imports; } static int Elf_(fix_symbols)(ELFOBJ *bin, int nsym, int type, RBinElfSymbol **sym) { int count = 0; RBinElfSymbol *ret = *sym; RBinElfSymbol *phdr_symbols = (type == R_BIN_ELF_SYMBOLS) ? Elf_(r_bin_elf_get_phdr_symbols) (bin) : Elf_(r_bin_elf_get_phdr_imports) (bin); RBinElfSymbol *tmp, *p; if (phdr_symbols) { RBinElfSymbol *d = ret; while (!d->last) { /* find match in phdr */ p = phdr_symbols; while (!p->last) { if (p->offset && d->offset == p->offset) { p->in_shdr = true; if (*p->name && strcmp (d->name, p->name)) { strcpy (d->name, p->name); } } p++; } d++; } p = phdr_symbols; while (!p->last) { if (!p->in_shdr) { count++; } p++; } /*Take those symbols that are not present in the shdr but yes in phdr*/ /*This should only should happen with fucked up binaries*/ if (count > 0) { /*what happens if a shdr says it has only one symbol? we should look anyway into phdr*/ tmp = (RBinElfSymbol*)realloc (ret, (nsym + count + 1) * sizeof (RBinElfSymbol)); if (!tmp) { return -1; } ret = tmp; ret[nsym--].last = 0; p = phdr_symbols; while (!p->last) { if (!p->in_shdr) { memcpy (&ret[++nsym], p, sizeof (RBinElfSymbol)); } p++; } ret[nsym + 1].last = 1; } *sym = ret; return nsym + 1; } return nsym; } static RBinElfSymbol* Elf_(_r_bin_elf_get_symbols_imports)(ELFOBJ *bin, int type) { ut32 shdr_size; int tsize, nsym, ret_ctr = 0, i, j, r, k, newsize; ut64 toffset; ut32 size = 0; RBinElfSymbol *ret = NULL; Elf_(Shdr) *strtab_section = NULL; Elf_(Sym) *sym = NULL; ut8 s[sizeof (Elf_(Sym))] = { 0 }; char *strtab = NULL; if (!bin || !bin->shdr || !bin->ehdr.e_shnum || bin->ehdr.e_shnum == 0xffff) { return (type == R_BIN_ELF_SYMBOLS) ? Elf_(r_bin_elf_get_phdr_symbols) (bin) : Elf_(r_bin_elf_get_phdr_imports) (bin); } if (!UT32_MUL (&shdr_size, bin->ehdr.e_shnum, sizeof (Elf_(Shdr)))) { return false; } if (shdr_size + 8 > bin->size) { return false; } for (i = 0; i < bin->ehdr.e_shnum; i++) { if ((type == R_BIN_ELF_IMPORTS && bin->shdr[i].sh_type == (bin->ehdr.e_type == ET_REL ? SHT_SYMTAB : SHT_DYNSYM)) || (type == R_BIN_ELF_SYMBOLS && bin->shdr[i].sh_type == (Elf_(r_bin_elf_get_stripped) (bin) ? SHT_DYNSYM : SHT_SYMTAB))) { if (bin->shdr[i].sh_link < 1) { /* oops. fix out of range pointers */ continue; } // hack to avoid asan cry if ((bin->shdr[i].sh_link * sizeof(Elf_(Shdr))) >= shdr_size) { /* oops. fix out of range pointers */ continue; } strtab_section = &bin->shdr[bin->shdr[i].sh_link]; if (strtab_section->sh_size > ST32_MAX || strtab_section->sh_size+8 > bin->size) { bprintf ("size (syms strtab)"); free (ret); free (strtab); return NULL; } if (!strtab) { if (!(strtab = (char *)calloc (1, 8 + strtab_section->sh_size))) { bprintf ("malloc (syms strtab)"); goto beach; } if (strtab_section->sh_offset > bin->size || strtab_section->sh_offset + strtab_section->sh_size > bin->size) { goto beach; } if (r_buf_read_at (bin->b, strtab_section->sh_offset, (ut8*)strtab, strtab_section->sh_size) == -1) { bprintf ("Warning: read (syms strtab)\n"); goto beach; } } newsize = 1 + bin->shdr[i].sh_size; if (newsize < 0 || newsize > bin->size) { bprintf ("invalid shdr %d size\n", i); goto beach; } nsym = (int)(bin->shdr[i].sh_size / sizeof (Elf_(Sym))); if (nsym < 0) { goto beach; } if (!(sym = (Elf_(Sym) *)calloc (nsym, sizeof (Elf_(Sym))))) { bprintf ("calloc (syms)"); goto beach; } if (!UT32_MUL (&size, nsym, sizeof (Elf_(Sym)))) { goto beach; } if (size < 1 || size > bin->size) { goto beach; } if (bin->shdr[i].sh_offset > bin->size) { goto beach; } if (bin->shdr[i].sh_offset + size > bin->size) { goto beach; } for (j = 0; j < nsym; j++) { int k = 0; r = r_buf_read_at (bin->b, bin->shdr[i].sh_offset + j * sizeof (Elf_(Sym)), s, sizeof (Elf_(Sym))); if (r < 1) { bprintf ("Warning: read (sym)\n"); goto beach; } #if R_BIN_ELF64 sym[j].st_name = READ32 (s, k) sym[j].st_info = READ8 (s, k) sym[j].st_other = READ8 (s, k) sym[j].st_shndx = READ16 (s, k) sym[j].st_value = READ64 (s, k) sym[j].st_size = READ64 (s, k) #else sym[j].st_name = READ32 (s, k) sym[j].st_value = READ32 (s, k) sym[j].st_size = READ32 (s, k) sym[j].st_info = READ8 (s, k) sym[j].st_other = READ8 (s, k) sym[j].st_shndx = READ16 (s, k) #endif } free (ret); ret = calloc (nsym, sizeof (RBinElfSymbol)); if (!ret) { bprintf ("Cannot allocate %d symbols\n", nsym); goto beach; } for (k = 1, ret_ctr = 0; k < nsym; k++) { if (type == R_BIN_ELF_IMPORTS && sym[k].st_shndx == STN_UNDEF) { if (sym[k].st_value) { toffset = sym[k].st_value; } else if ((toffset = get_import_addr (bin, k)) == -1){ toffset = 0; } tsize = 16; } else if (type == R_BIN_ELF_SYMBOLS && sym[k].st_shndx != STN_UNDEF && ELF_ST_TYPE (sym[k].st_info) != STT_SECTION && ELF_ST_TYPE (sym[k].st_info) != STT_FILE) { //int idx = sym[k].st_shndx; tsize = sym[k].st_size; toffset = (ut64)sym[k].st_value; } else { continue; } if (bin->ehdr.e_type == ET_REL) { if (sym[k].st_shndx < bin->ehdr.e_shnum) ret[ret_ctr].offset = sym[k].st_value + bin->shdr[sym[k].st_shndx].sh_offset; } else { ret[ret_ctr].offset = Elf_(r_bin_elf_v2p) (bin, toffset); } ret[ret_ctr].size = tsize; if (sym[k].st_name + 2 > strtab_section->sh_size) { bprintf ("Warning: index out of strtab range\n"); goto beach; } { int rest = ELF_STRING_LENGTH - 1; int st_name = sym[k].st_name; int maxsize = R_MIN (bin->b->length, strtab_section->sh_size); if (st_name < 0 || st_name >= maxsize) { ret[ret_ctr].name[0] = 0; } else { const size_t len = __strnlen (strtab + sym[k].st_name, rest); memcpy (ret[ret_ctr].name, &strtab[sym[k].st_name], len); } } ret[ret_ctr].ordinal = k; ret[ret_ctr].name[ELF_STRING_LENGTH - 2] = '\0'; fill_symbol_bind_and_type (&ret[ret_ctr], &sym[k]); ret[ret_ctr].last = 0; ret_ctr++; } ret[ret_ctr].last = 1; // ugly dirty hack :D R_FREE (strtab); R_FREE (sym); } } if (!ret) { return (type == R_BIN_ELF_SYMBOLS) ? Elf_(r_bin_elf_get_phdr_symbols) (bin) : Elf_(r_bin_elf_get_phdr_imports) (bin); } int max = -1; RBinElfSymbol *aux = NULL; nsym = Elf_(fix_symbols) (bin, ret_ctr, type, &ret); if (nsym == -1) { goto beach; } aux = ret; while (!aux->last) { if ((int)aux->ordinal > max) { max = aux->ordinal; } aux++; } nsym = max; if (type == R_BIN_ELF_IMPORTS) { R_FREE (bin->imports_by_ord); bin->imports_by_ord_size = nsym + 1; bin->imports_by_ord = (RBinImport**)calloc (R_MAX (1, nsym + 1), sizeof (RBinImport*)); } else if (type == R_BIN_ELF_SYMBOLS) { R_FREE (bin->symbols_by_ord); bin->symbols_by_ord_size = nsym + 1; bin->symbols_by_ord = (RBinSymbol**)calloc (R_MAX (1, nsym + 1), sizeof (RBinSymbol*)); } return ret; beach: free (ret); free (sym); free (strtab); return NULL; } RBinElfSymbol *Elf_(r_bin_elf_get_symbols)(ELFOBJ *bin) { if (!bin->g_symbols) { bin->g_symbols = Elf_(_r_bin_elf_get_symbols_imports) (bin, R_BIN_ELF_SYMBOLS); } return bin->g_symbols; } RBinElfSymbol *Elf_(r_bin_elf_get_imports)(ELFOBJ *bin) { if (!bin->g_imports) { bin->g_imports = Elf_(_r_bin_elf_get_symbols_imports) (bin, R_BIN_ELF_IMPORTS); } return bin->g_imports; } RBinElfField* Elf_(r_bin_elf_get_fields)(ELFOBJ *bin) { RBinElfField *ret = NULL; int i = 0, j; if (!bin || !(ret = calloc ((bin->ehdr.e_phnum + 3 + 1), sizeof (RBinElfField)))) { return NULL; } strncpy (ret[i].name, "ehdr", ELF_STRING_LENGTH); ret[i].offset = 0; ret[i++].last = 0; strncpy (ret[i].name, "shoff", ELF_STRING_LENGTH); ret[i].offset = bin->ehdr.e_shoff; ret[i++].last = 0; strncpy (ret[i].name, "phoff", ELF_STRING_LENGTH); ret[i].offset = bin->ehdr.e_phoff; ret[i++].last = 0; for (j = 0; bin->phdr && j < bin->ehdr.e_phnum; i++, j++) { snprintf (ret[i].name, ELF_STRING_LENGTH, "phdr_%i", j); ret[i].offset = bin->phdr[j].p_offset; ret[i].last = 0; } ret[i].last = 1; return ret; } void* Elf_(r_bin_elf_free)(ELFOBJ* bin) { int i; if (!bin) { return NULL; } free (bin->phdr); free (bin->shdr); free (bin->strtab); free (bin->dyn_buf); free (bin->shstrtab); free (bin->dynstr); //free (bin->strtab_section); if (bin->imports_by_ord) { for (i = 0; i<bin->imports_by_ord_size; i++) { free (bin->imports_by_ord[i]); } free (bin->imports_by_ord); } if (bin->symbols_by_ord) { for (i = 0; i<bin->symbols_by_ord_size; i++) { free (bin->symbols_by_ord[i]); } free (bin->symbols_by_ord); } r_buf_free (bin->b); if (bin->g_symbols != bin->phdr_symbols) { R_FREE (bin->phdr_symbols); } if (bin->g_imports != bin->phdr_imports) { R_FREE (bin->phdr_imports); } R_FREE (bin->g_sections); R_FREE (bin->g_symbols); R_FREE (bin->g_imports); free (bin); return NULL; } ELFOBJ* Elf_(r_bin_elf_new)(const char* file, bool verbose) { ut8 *buf; int size; ELFOBJ *bin = R_NEW0 (ELFOBJ); if (!bin) { return NULL; } memset (bin, 0, sizeof (ELFOBJ)); bin->file = file; if (!(buf = (ut8*)r_file_slurp (file, &size))) { return Elf_(r_bin_elf_free) (bin); } bin->size = size; bin->verbose = verbose; bin->b = r_buf_new (); if (!r_buf_set_bytes (bin->b, buf, bin->size)) { free (buf); return Elf_(r_bin_elf_free) (bin); } if (!elf_init (bin)) { free (buf); return Elf_(r_bin_elf_free) (bin); } free (buf); return bin; } ELFOBJ* Elf_(r_bin_elf_new_buf)(RBuffer *buf, bool verbose) { ELFOBJ *bin = R_NEW0 (ELFOBJ); bin->kv = sdb_new0 (); bin->b = r_buf_new (); bin->size = (ut32)buf->length; bin->verbose = verbose; if (!r_buf_set_bytes (bin->b, buf->buf, buf->length)) { return Elf_(r_bin_elf_free) (bin); } if (!elf_init (bin)) { return Elf_(r_bin_elf_free) (bin); } return bin; } static int is_in_pphdr (Elf_(Phdr) *p, ut64 addr) { return addr >= p->p_offset && addr < p->p_offset + p->p_memsz; } static int is_in_vphdr (Elf_(Phdr) *p, ut64 addr) { return addr >= p->p_vaddr && addr < p->p_vaddr + p->p_memsz; } /* converts a physical address to the virtual address, looking * at the program headers in the binary bin */ ut64 Elf_(r_bin_elf_p2v) (ELFOBJ *bin, ut64 paddr) { int i; if (!bin) return 0; if (!bin->phdr) { if (bin->ehdr.e_type == ET_REL) { return bin->baddr + paddr; } return paddr; } for (i = 0; i < bin->ehdr.e_phnum; ++i) { Elf_(Phdr) *p = &bin->phdr[i]; if (!p) { break; } if (p->p_type == PT_LOAD && is_in_pphdr (p, paddr)) { if (!p->p_vaddr && !p->p_offset) { continue; } return p->p_vaddr + paddr - p->p_offset; } } return paddr; } /* converts a virtual address to the relative physical address, looking * at the program headers in the binary bin */ ut64 Elf_(r_bin_elf_v2p) (ELFOBJ *bin, ut64 vaddr) { int i; if (!bin) { return 0; } if (!bin->phdr) { if (bin->ehdr.e_type == ET_REL) { return vaddr - bin->baddr; } return vaddr; } for (i = 0; i < bin->ehdr.e_phnum; ++i) { Elf_(Phdr) *p = &bin->phdr[i]; if (!p) { break; } if (p->p_type == PT_LOAD && is_in_vphdr (p, vaddr)) { if (!p->p_offset && !p->p_vaddr) { continue; } return p->p_offset + vaddr - p->p_vaddr; } } return vaddr; }
/* radare - LGPL - Copyright 2008-2017 - nibble, pancake, alvaro_fe */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <assert.h> #include <r_types.h> #include <r_util.h> #include "elf.h" #ifdef IFDBG #undef IFDBG #endif #define DO_THE_DBG 0 #define IFDBG if (DO_THE_DBG) #define IFINT if (0) #define ELF_PAGE_MASK 0xFFFFFFFFFFFFF000LL #define ELF_PAGE_SIZE 12 #define R_ELF_NO_RELRO 0 #define R_ELF_PART_RELRO 1 #define R_ELF_FULL_RELRO 2 #define bprintf if(bin->verbose)eprintf #define READ8(x, i) r_read_ble8(x + i); i += 1; #define READ16(x, i) r_read_ble16(x + i, bin->endian); i += 2; #define READ32(x, i) r_read_ble32(x + i, bin->endian); i += 4; #define READ64(x, i) r_read_ble64(x + i, bin->endian); i += 8; #define GROWTH_FACTOR (1.5) static inline int __strnlen(const char *str, int len) { int l = 0; while (IS_PRINTABLE (*str) && --len) { if (((ut8)*str) == 0xff) { break; } str++; l++; } return l + 1; } static int handle_e_ident(ELFOBJ *bin) { return !strncmp ((char *)bin->ehdr.e_ident, ELFMAG, SELFMAG) || !strncmp ((char *)bin->ehdr.e_ident, CGCMAG, SCGCMAG); } static int init_ehdr(ELFOBJ *bin) { ut8 e_ident[EI_NIDENT]; ut8 ehdr[sizeof (Elf_(Ehdr))] = {0}; int i, len; if (r_buf_read_at (bin->b, 0, e_ident, EI_NIDENT) == -1) { bprintf ("Warning: read (magic)\n"); return false; } sdb_set (bin->kv, "elf_type.cparse", "enum elf_type { ET_NONE=0, ET_REL=1," " ET_EXEC=2, ET_DYN=3, ET_CORE=4, ET_LOOS=0xfe00, ET_HIOS=0xfeff," " ET_LOPROC=0xff00, ET_HIPROC=0xffff };", 0); sdb_set (bin->kv, "elf_machine.cparse", "enum elf_machine{EM_NONE=0, EM_M32=1," " EM_SPARC=2, EM_386=3, EM_68K=4, EM_88K=5, EM_486=6, " " EM_860=7, EM_MIPS=8, EM_S370=9, EM_MIPS_RS3_LE=10, EM_RS6000=11," " EM_UNKNOWN12=12, EM_UNKNOWN13=13, EM_UNKNOWN14=14, " " EM_PA_RISC=15, EM_PARISC=EM_PA_RISC, EM_nCUBE=16, EM_VPP500=17," " EM_SPARC32PLUS=18, EM_960=19, EM_PPC=20, EM_PPC64=21, " " EM_S390=22, EM_UNKNOWN22=EM_S390, EM_UNKNOWN23=23, EM_UNKNOWN24=24," " EM_UNKNOWN25=25, EM_UNKNOWN26=26, EM_UNKNOWN27=27, EM_UNKNOWN28=28," " EM_UNKNOWN29=29, EM_UNKNOWN30=30, EM_UNKNOWN31=31, EM_UNKNOWN32=32," " EM_UNKNOWN33=33, EM_UNKNOWN34=34, EM_UNKNOWN35=35, EM_V800=36," " EM_FR20=37, EM_RH32=38, EM_RCE=39, EM_ARM=40, EM_ALPHA=41, EM_SH=42," " EM_SPARCV9=43, EM_TRICORE=44, EM_ARC=45, EM_H8_300=46, EM_H8_300H=47," " EM_H8S=48, EM_H8_500=49, EM_IA_64=50, EM_MIPS_X=51, EM_COLDFIRE=52," " EM_68HC12=53, EM_MMA=54, EM_PCP=55, EM_NCPU=56, EM_NDR1=57," " EM_STARCORE=58, EM_ME16=59, EM_ST100=60, EM_TINYJ=61, EM_AMD64=62," " EM_X86_64=EM_AMD64, EM_PDSP=63, EM_UNKNOWN64=64, EM_UNKNOWN65=65," " EM_FX66=66, EM_ST9PLUS=67, EM_ST7=68, EM_68HC16=69, EM_68HC11=70," " EM_68HC08=71, EM_68HC05=72, EM_SVX=73, EM_ST19=74, EM_VAX=75, " " EM_CRIS=76, EM_JAVELIN=77, EM_FIREPATH=78, EM_ZSP=79, EM_MMIX=80," " EM_HUANY=81, EM_PRISM=82, EM_AVR=83, EM_FR30=84, EM_D10V=85, EM_D30V=86," " EM_V850=87, EM_M32R=88, EM_MN10300=89, EM_MN10200=90, EM_PJ=91," " EM_OPENRISC=92, EM_ARC_A5=93, EM_XTENSA=94, EM_NUM=95};", 0); sdb_num_set (bin->kv, "elf_header.offset", 0, 0); sdb_num_set (bin->kv, "elf_header.size", sizeof (Elf_(Ehdr)), 0); #if R_BIN_ELF64 sdb_set (bin->kv, "elf_header.format", "[16]z[2]E[2]Exqqqxwwwwww" " ident (elf_type)type (elf_machine)machine version entry phoff shoff flags ehsize" " phentsize phnum shentsize shnum shstrndx", 0); #else sdb_set (bin->kv, "elf_header.format", "[16]z[2]E[2]Exxxxxwwwwww" " ident (elf_type)type (elf_machine)machine version entry phoff shoff flags ehsize" " phentsize phnum shentsize shnum shstrndx", 0); #endif bin->endian = (e_ident[EI_DATA] == ELFDATA2MSB)? 1: 0; memset (&bin->ehdr, 0, sizeof (Elf_(Ehdr))); len = r_buf_read_at (bin->b, 0, ehdr, sizeof (Elf_(Ehdr))); if (len < 1) { bprintf ("Warning: read (ehdr)\n"); return false; } memcpy (&bin->ehdr.e_ident, ehdr, 16); i = 16; bin->ehdr.e_type = READ16 (ehdr, i) bin->ehdr.e_machine = READ16 (ehdr, i) bin->ehdr.e_version = READ32 (ehdr, i) #if R_BIN_ELF64 bin->ehdr.e_entry = READ64 (ehdr, i) bin->ehdr.e_phoff = READ64 (ehdr, i) bin->ehdr.e_shoff = READ64 (ehdr, i) #else bin->ehdr.e_entry = READ32 (ehdr, i) bin->ehdr.e_phoff = READ32 (ehdr, i) bin->ehdr.e_shoff = READ32 (ehdr, i) #endif bin->ehdr.e_flags = READ32 (ehdr, i) bin->ehdr.e_ehsize = READ16 (ehdr, i) bin->ehdr.e_phentsize = READ16 (ehdr, i) bin->ehdr.e_phnum = READ16 (ehdr, i) bin->ehdr.e_shentsize = READ16 (ehdr, i) bin->ehdr.e_shnum = READ16 (ehdr, i) bin->ehdr.e_shstrndx = READ16 (ehdr, i) return handle_e_ident (bin); // Usage example: // > td `k bin/cur/info/elf_type.cparse`; td `k bin/cur/info/elf_machine.cparse` // > pf `k bin/cur/info/elf_header.format` @ `k bin/cur/info/elf_header.offset` } static int init_phdr(ELFOBJ *bin) { ut32 phdr_size; ut8 phdr[sizeof (Elf_(Phdr))] = {0}; int i, j, len; if (!bin->ehdr.e_phnum) { return false; } if (bin->phdr) { return true; } if (!UT32_MUL (&phdr_size, (ut32)bin->ehdr.e_phnum, sizeof (Elf_(Phdr)))) { return false; } if (!phdr_size) { return false; } if (phdr_size > bin->size) { return false; } if (phdr_size > (ut32)bin->size) { return false; } if (bin->ehdr.e_phoff > bin->size) { return false; } if (bin->ehdr.e_phoff + phdr_size > bin->size) { return false; } if (!(bin->phdr = calloc (phdr_size, 1))) { perror ("malloc (phdr)"); return false; } for (i = 0; i < bin->ehdr.e_phnum; i++) { j = 0; len = r_buf_read_at (bin->b, bin->ehdr.e_phoff + i * sizeof (Elf_(Phdr)), phdr, sizeof (Elf_(Phdr))); if (len < 1) { bprintf ("Warning: read (phdr)\n"); R_FREE (bin->phdr); return false; } bin->phdr[i].p_type = READ32 (phdr, j) #if R_BIN_ELF64 bin->phdr[i].p_flags = READ32 (phdr, j) bin->phdr[i].p_offset = READ64 (phdr, j) bin->phdr[i].p_vaddr = READ64 (phdr, j) bin->phdr[i].p_paddr = READ64 (phdr, j) bin->phdr[i].p_filesz = READ64 (phdr, j) bin->phdr[i].p_memsz = READ64 (phdr, j) bin->phdr[i].p_align = READ64 (phdr, j) #else bin->phdr[i].p_offset = READ32 (phdr, j) bin->phdr[i].p_vaddr = READ32 (phdr, j) bin->phdr[i].p_paddr = READ32 (phdr, j) bin->phdr[i].p_filesz = READ32 (phdr, j) bin->phdr[i].p_memsz = READ32 (phdr, j) bin->phdr[i].p_flags = READ32 (phdr, j) bin->phdr[i].p_align = READ32 (phdr, j) #endif } sdb_num_set (bin->kv, "elf_phdr.offset", bin->ehdr.e_phoff, 0); sdb_num_set (bin->kv, "elf_phdr.size", sizeof (Elf_(Phdr)), 0); sdb_set (bin->kv, "elf_p_type.cparse", "enum elf_p_type {PT_NULL=0,PT_LOAD=1,PT_DYNAMIC=2," "PT_INTERP=3,PT_NOTE=4,PT_SHLIB=5,PT_PHDR=6,PT_LOOS=0x60000000," "PT_HIOS=0x6fffffff,PT_LOPROC=0x70000000,PT_HIPROC=0x7fffffff};", 0); sdb_set (bin->kv, "elf_p_flags.cparse", "enum elf_p_flags {PF_None=0,PF_Exec=1," "PF_Write=2,PF_Write_Exec=3,PF_Read=4,PF_Read_Exec=5,PF_Read_Write=6," "PF_Read_Write_Exec=7};", 0); #if R_BIN_ELF64 sdb_set (bin->kv, "elf_phdr.format", "[4]E[4]Eqqqqqq (elf_p_type)type (elf_p_flags)flags" " offset vaddr paddr filesz memsz align", 0); #else sdb_set (bin->kv, "elf_phdr.format", "[4]Exxxxx[4]Ex (elf_p_type)type offset vaddr paddr" " filesz memsz (elf_p_flags)flags align", 0); #endif return true; // Usage example: // > td `k bin/cur/info/elf_p_type.cparse`; td `k bin/cur/info/elf_p_flags.cparse` // > pf `k bin/cur/info/elf_phdr.format` @ `k bin/cur/info/elf_phdr.offset` } static int init_shdr(ELFOBJ *bin) { ut32 shdr_size; ut8 shdr[sizeof (Elf_(Shdr))] = {0}; int i, j, len; if (!bin || bin->shdr) { return true; } if (!UT32_MUL (&shdr_size, bin->ehdr.e_shnum, sizeof (Elf_(Shdr)))) { return false; } if (shdr_size < 1) { return false; } if (shdr_size > bin->size) { return false; } if (bin->ehdr.e_shoff > bin->size) { return false; } if (bin->ehdr.e_shoff + shdr_size > bin->size) { return false; } if (!(bin->shdr = calloc (1, shdr_size + 1))) { perror ("malloc (shdr)"); return false; } sdb_num_set (bin->kv, "elf_shdr.offset", bin->ehdr.e_shoff, 0); sdb_num_set (bin->kv, "elf_shdr.size", sizeof (Elf_(Shdr)), 0); sdb_set (bin->kv, "elf_s_type.cparse", "enum elf_s_type {SHT_NULL=0,SHT_PROGBITS=1," "SHT_SYMTAB=2,SHT_STRTAB=3,SHT_RELA=4,SHT_HASH=5,SHT_DYNAMIC=6,SHT_NOTE=7," "SHT_NOBITS=8,SHT_REL=9,SHT_SHLIB=10,SHT_DYNSYM=11,SHT_LOOS=0x60000000," "SHT_HIOS=0x6fffffff,SHT_LOPROC=0x70000000,SHT_HIPROC=0x7fffffff};", 0); for (i = 0; i < bin->ehdr.e_shnum; i++) { j = 0; len = r_buf_read_at (bin->b, bin->ehdr.e_shoff + i * sizeof (Elf_(Shdr)), shdr, sizeof (Elf_(Shdr))); if (len < 1) { bprintf ("Warning: read (shdr) at 0x%"PFMT64x"\n", (ut64) bin->ehdr.e_shoff); R_FREE (bin->shdr); return false; } bin->shdr[i].sh_name = READ32 (shdr, j) bin->shdr[i].sh_type = READ32 (shdr, j) #if R_BIN_ELF64 bin->shdr[i].sh_flags = READ64 (shdr, j) bin->shdr[i].sh_addr = READ64 (shdr, j) bin->shdr[i].sh_offset = READ64 (shdr, j) bin->shdr[i].sh_size = READ64 (shdr, j) bin->shdr[i].sh_link = READ32 (shdr, j) bin->shdr[i].sh_info = READ32 (shdr, j) bin->shdr[i].sh_addralign = READ64 (shdr, j) bin->shdr[i].sh_entsize = READ64 (shdr, j) #else bin->shdr[i].sh_flags = READ32 (shdr, j) bin->shdr[i].sh_addr = READ32 (shdr, j) bin->shdr[i].sh_offset = READ32 (shdr, j) bin->shdr[i].sh_size = READ32 (shdr, j) bin->shdr[i].sh_link = READ32 (shdr, j) bin->shdr[i].sh_info = READ32 (shdr, j) bin->shdr[i].sh_addralign = READ32 (shdr, j) bin->shdr[i].sh_entsize = READ32 (shdr, j) #endif } #if R_BIN_ELF64 sdb_set (bin->kv, "elf_s_flags_64.cparse", "enum elf_s_flags_64 {SF64_None=0,SF64_Exec=1," "SF64_Alloc=2,SF64_Alloc_Exec=3,SF64_Write=4,SF64_Write_Exec=5," "SF64_Write_Alloc=6,SF64_Write_Alloc_Exec=7};", 0); sdb_set (bin->kv, "elf_shdr.format", "x[4]E[8]Eqqqxxqq name (elf_s_type)type" " (elf_s_flags_64)flags addr offset size link info addralign entsize", 0); #else sdb_set (bin->kv, "elf_s_flags_32.cparse", "enum elf_s_flags_32 {SF32_None=0,SF32_Exec=1," "SF32_Alloc=2,SF32_Alloc_Exec=3,SF32_Write=4,SF32_Write_Exec=5," "SF32_Write_Alloc=6,SF32_Write_Alloc_Exec=7};", 0); sdb_set (bin->kv, "elf_shdr.format", "x[4]E[4]Exxxxxxx name (elf_s_type)type" " (elf_s_flags_32)flags addr offset size link info addralign entsize", 0); #endif return true; // Usage example: // > td `k bin/cur/info/elf_s_type.cparse`; td `k bin/cur/info/elf_s_flags_64.cparse` // > pf `k bin/cur/info/elf_shdr.format` @ `k bin/cur/info/elf_shdr.offset` } static int init_strtab(ELFOBJ *bin) { if (bin->strtab || !bin->shdr) { return false; } if (bin->ehdr.e_shstrndx != SHN_UNDEF && (bin->ehdr.e_shstrndx >= bin->ehdr.e_shnum || (bin->ehdr.e_shstrndx >= SHN_LORESERVE && bin->ehdr.e_shstrndx < SHN_HIRESERVE))) return false; /* sh_size must be lower than UT32_MAX and not equal to zero, to avoid bugs on malloc() */ if (bin->shdr[bin->ehdr.e_shstrndx].sh_size > UT32_MAX) { return false; } if (!bin->shdr[bin->ehdr.e_shstrndx].sh_size) { return false; } bin->shstrtab_section = bin->strtab_section = &bin->shdr[bin->ehdr.e_shstrndx]; bin->shstrtab_size = bin->strtab_section->sh_size; if (bin->shstrtab_size > bin->size) { return false; } if (!(bin->shstrtab = calloc (1, bin->shstrtab_size + 1))) { perror ("malloc"); bin->shstrtab = NULL; return false; } if (bin->shstrtab_section->sh_offset > bin->size) { R_FREE (bin->shstrtab); return false; } if (bin->shstrtab_section->sh_offset + bin->shstrtab_section->sh_size > bin->size) { R_FREE (bin->shstrtab); return false; } if (r_buf_read_at (bin->b, bin->shstrtab_section->sh_offset, (ut8*)bin->shstrtab, bin->shstrtab_section->sh_size + 1) < 1) { bprintf ("Warning: read (shstrtab) at 0x%"PFMT64x"\n", (ut64) bin->shstrtab_section->sh_offset); R_FREE (bin->shstrtab); return false; } bin->shstrtab[bin->shstrtab_section->sh_size] = '\0'; sdb_num_set (bin->kv, "elf_shstrtab.offset", bin->shstrtab_section->sh_offset, 0); sdb_num_set (bin->kv, "elf_shstrtab.size", bin->shstrtab_section->sh_size, 0); return true; } static int init_dynamic_section(struct Elf_(r_bin_elf_obj_t) *bin) { Elf_(Dyn) *dyn = NULL; Elf_(Dyn) d = {0}; Elf_(Addr) strtabaddr = 0; ut64 offset = 0; char *strtab = NULL; size_t relentry = 0, strsize = 0; int entries; int i, j, len, r; ut8 sdyn[sizeof (Elf_(Dyn))] = {0}; ut32 dyn_size = 0; if (!bin || !bin->phdr || !bin->ehdr.e_phnum) { return false; } for (i = 0; i < bin->ehdr.e_phnum ; i++) { if (bin->phdr[i].p_type == PT_DYNAMIC) { dyn_size = bin->phdr[i].p_filesz; break; } } if (i == bin->ehdr.e_phnum) { return false; } if (bin->phdr[i].p_filesz > bin->size) { return false; } if (bin->phdr[i].p_offset > bin->size) { return false; } if (bin->phdr[i].p_offset + sizeof(Elf_(Dyn)) > bin->size) { return false; } for (entries = 0; entries < (dyn_size / sizeof (Elf_(Dyn))); entries++) { j = 0; len = r_buf_read_at (bin->b, bin->phdr[i].p_offset + entries * sizeof (Elf_(Dyn)), sdyn, sizeof (Elf_(Dyn))); if (len < 1) { goto beach; } #if R_BIN_ELF64 d.d_tag = READ64 (sdyn, j) #else d.d_tag = READ32 (sdyn, j) #endif if (d.d_tag == DT_NULL) { break; } } if (entries < 1) { return false; } dyn = (Elf_(Dyn)*)calloc (entries, sizeof (Elf_(Dyn))); if (!dyn) { return false; } if (!UT32_MUL (&dyn_size, entries, sizeof (Elf_(Dyn)))) { goto beach; } if (!dyn_size) { goto beach; } offset = Elf_(r_bin_elf_v2p) (bin, bin->phdr[i].p_vaddr); if (offset > bin->size || offset + dyn_size > bin->size) { goto beach; } for (i = 0; i < entries; i++) { j = 0; r_buf_read_at (bin->b, offset + i * sizeof (Elf_(Dyn)), sdyn, sizeof (Elf_(Dyn))); if (len < 1) { bprintf("Warning: read (dyn)\n"); } #if R_BIN_ELF64 dyn[i].d_tag = READ64 (sdyn, j) dyn[i].d_un.d_ptr = READ64 (sdyn, j) #else dyn[i].d_tag = READ32 (sdyn, j) dyn[i].d_un.d_ptr = READ32 (sdyn, j) #endif switch (dyn[i].d_tag) { case DT_STRTAB: strtabaddr = Elf_(r_bin_elf_v2p) (bin, dyn[i].d_un.d_ptr); break; case DT_STRSZ: strsize = dyn[i].d_un.d_val; break; case DT_PLTREL: bin->is_rela = dyn[i].d_un.d_val; break; case DT_RELAENT: relentry = dyn[i].d_un.d_val; break; default: if ((dyn[i].d_tag >= DT_VERSYM) && (dyn[i].d_tag <= DT_VERNEEDNUM)) { bin->version_info[DT_VERSIONTAGIDX (dyn[i].d_tag)] = dyn[i].d_un.d_val; } break; } } if (!bin->is_rela) { bin->is_rela = sizeof (Elf_(Rela)) == relentry? DT_RELA : DT_REL; } if (!strtabaddr || strtabaddr > bin->size || strsize > ST32_MAX || !strsize || strsize > bin->size) { if (!strtabaddr) { bprintf ("Warning: section.shstrtab not found or invalid\n"); } goto beach; } strtab = (char *)calloc (1, strsize + 1); if (!strtab) { goto beach; } if (strtabaddr + strsize > bin->size) { free (strtab); goto beach; } r = r_buf_read_at (bin->b, strtabaddr, (ut8 *)strtab, strsize); if (r < 1) { free (strtab); goto beach; } bin->dyn_buf = dyn; bin->dyn_entries = entries; bin->strtab = strtab; bin->strtab_size = strsize; r = Elf_(r_bin_elf_has_relro)(bin); switch (r) { case R_ELF_FULL_RELRO: sdb_set (bin->kv, "elf.relro", "full", 0); break; case R_ELF_PART_RELRO: sdb_set (bin->kv, "elf.relro", "partial", 0); break; default: sdb_set (bin->kv, "elf.relro", "no", 0); break; } sdb_num_set (bin->kv, "elf_strtab.offset", strtabaddr, 0); sdb_num_set (bin->kv, "elf_strtab.size", strsize, 0); return true; beach: free (dyn); return false; } static RBinElfSection* get_section_by_name(ELFOBJ *bin, const char *section_name) { int i; if (!bin->g_sections) { return NULL; } for (i = 0; !bin->g_sections[i].last; i++) { if (!strncmp (bin->g_sections[i].name, section_name, ELF_STRING_LENGTH-1)) { return &bin->g_sections[i]; } } return NULL; } static char *get_ver_flags(ut32 flags) { static char buff[32]; buff[0] = 0; if (!flags) { return "none"; } if (flags & VER_FLG_BASE) { strcpy (buff, "BASE "); } if (flags & VER_FLG_WEAK) { if (flags & VER_FLG_BASE) { strcat (buff, "| "); } strcat (buff, "WEAK "); } if (flags & ~(VER_FLG_BASE | VER_FLG_WEAK)) { strcat (buff, "| <unknown>"); } return buff; } static Sdb *store_versioninfo_gnu_versym(ELFOBJ *bin, Elf_(Shdr) *shdr, int sz) { int i; const ut64 num_entries = sz / sizeof (Elf_(Versym)); const char *section_name = ""; const char *link_section_name = ""; Elf_(Shdr) *link_shdr = NULL; Sdb *sdb = sdb_new0(); if (!sdb) { return NULL; } if (!bin->version_info[DT_VERSIONTAGIDX (DT_VERSYM)]) { sdb_free (sdb); return NULL; } if (shdr->sh_link > bin->ehdr.e_shnum) { sdb_free (sdb); return NULL; } link_shdr = &bin->shdr[shdr->sh_link]; ut8 *edata = (ut8*) calloc (R_MAX (1, num_entries), sizeof (ut16)); if (!edata) { sdb_free (sdb); return NULL; } ut16 *data = (ut16*) calloc (R_MAX (1, num_entries), sizeof (ut16)); if (!data) { free (edata); sdb_free (sdb); return NULL; } ut64 off = Elf_(r_bin_elf_v2p) (bin, bin->version_info[DT_VERSIONTAGIDX (DT_VERSYM)]); if (bin->shstrtab && shdr->sh_name < bin->shstrtab_size) { section_name = &bin->shstrtab[shdr->sh_name]; } if (bin->shstrtab && link_shdr->sh_name < bin->shstrtab_size) { link_section_name = &bin->shstrtab[link_shdr->sh_name]; } r_buf_read_at (bin->b, off, edata, sizeof (ut16) * num_entries); sdb_set (sdb, "section_name", section_name, 0); sdb_num_set (sdb, "num_entries", num_entries, 0); sdb_num_set (sdb, "addr", shdr->sh_addr, 0); sdb_num_set (sdb, "offset", shdr->sh_offset, 0); sdb_num_set (sdb, "link", shdr->sh_link, 0); sdb_set (sdb, "link_section_name", link_section_name, 0); for (i = num_entries; i--;) { data[i] = r_read_ble16 (&edata[i * sizeof (ut16)], bin->endian); } R_FREE (edata); for (i = 0; i < num_entries; i += 4) { int j; int check_def; char key[32] = {0}; Sdb *sdb_entry = sdb_new0 (); snprintf (key, sizeof (key), "entry%d", i / 4); sdb_ns_set (sdb, key, sdb_entry); sdb_num_set (sdb_entry, "idx", i, 0); for (j = 0; (j < 4) && (i + j) < num_entries; ++j) { int k; char *tmp_val = NULL; snprintf (key, sizeof (key), "value%d", j); switch (data[i + j]) { case 0: sdb_set (sdb_entry, key, "0 (*local*)", 0); break; case 1: sdb_set (sdb_entry, key, "1 (*global*)", 0); break; default: tmp_val = sdb_fmt (0, "%x ", data[i+j] & 0x7FFF); check_def = true; if (bin->version_info[DT_VERSIONTAGIDX (DT_VERNEED)]) { Elf_(Verneed) vn; ut8 svn[sizeof (Elf_(Verneed))] = {0}; ut64 offset = Elf_(r_bin_elf_v2p) (bin, bin->version_info[DT_VERSIONTAGIDX (DT_VERNEED)]); do { Elf_(Vernaux) vna; ut8 svna[sizeof (Elf_(Vernaux))] = {0}; ut64 a_off; if (offset > bin->size || offset + sizeof (vn) > bin->size) { goto beach; } if (r_buf_read_at (bin->b, offset, svn, sizeof (svn)) < 0) { bprintf ("Warning: Cannot read Verneed for Versym\n"); goto beach; } k = 0; vn.vn_version = READ16 (svn, k) vn.vn_cnt = READ16 (svn, k) vn.vn_file = READ32 (svn, k) vn.vn_aux = READ32 (svn, k) vn.vn_next = READ32 (svn, k) a_off = offset + vn.vn_aux; do { if (a_off > bin->size || a_off + sizeof (vna) > bin->size) { goto beach; } if (r_buf_read_at (bin->b, a_off, svna, sizeof (svna)) < 0) { bprintf ("Warning: Cannot read Vernaux for Versym\n"); goto beach; } k = 0; vna.vna_hash = READ32 (svna, k) vna.vna_flags = READ16 (svna, k) vna.vna_other = READ16 (svna, k) vna.vna_name = READ32 (svna, k) vna.vna_next = READ32 (svna, k) a_off += vna.vna_next; } while (vna.vna_other != data[i + j] && vna.vna_next != 0); if (vna.vna_other == data[i + j]) { if (vna.vna_name > bin->strtab_size) { goto beach; } sdb_set (sdb_entry, key, sdb_fmt (0, "%s(%s)", tmp_val, bin->strtab + vna.vna_name), 0); check_def = false; break; } offset += vn.vn_next; } while (vn.vn_next); } ut64 vinfoaddr = bin->version_info[DT_VERSIONTAGIDX (DT_VERDEF)]; if (check_def && data[i + j] != 0x8001 && vinfoaddr) { Elf_(Verdef) vd; ut8 svd[sizeof (Elf_(Verdef))] = {0}; ut64 offset = Elf_(r_bin_elf_v2p) (bin, vinfoaddr); if (offset > bin->size || offset + sizeof (vd) > bin->size) { goto beach; } do { if (r_buf_read_at (bin->b, offset, svd, sizeof (svd)) < 0) { bprintf ("Warning: Cannot read Verdef for Versym\n"); goto beach; } k = 0; vd.vd_version = READ16 (svd, k) vd.vd_flags = READ16 (svd, k) vd.vd_ndx = READ16 (svd, k) vd.vd_cnt = READ16 (svd, k) vd.vd_hash = READ32 (svd, k) vd.vd_aux = READ32 (svd, k) vd.vd_next = READ32 (svd, k) offset += vd.vd_next; } while (vd.vd_ndx != (data[i + j] & 0x7FFF) && vd.vd_next != 0); if (vd.vd_ndx == (data[i + j] & 0x7FFF)) { Elf_(Verdaux) vda; ut8 svda[sizeof (Elf_(Verdaux))] = {0}; ut64 off_vda = offset - vd.vd_next + vd.vd_aux; if (off_vda > bin->size || off_vda + sizeof (vda) > bin->size) { goto beach; } if (r_buf_read_at (bin->b, off_vda, svda, sizeof (svda)) < 0) { bprintf ("Warning: Cannot read Verdaux for Versym\n"); goto beach; } k = 0; vda.vda_name = READ32 (svda, k) vda.vda_next = READ32 (svda, k) if (vda.vda_name > bin->strtab_size) { goto beach; } const char *name = bin->strtab + vda.vda_name; sdb_set (sdb_entry, key, sdb_fmt (0,"%s(%s%-*s)", tmp_val, name, (int)(12 - strlen (name)),")") , 0); } } } } } beach: free (data); return sdb; } static Sdb *store_versioninfo_gnu_verdef(ELFOBJ *bin, Elf_(Shdr) *shdr, int sz) { const char *section_name = ""; const char *link_section_name = ""; char *end = NULL; Elf_(Shdr) *link_shdr = NULL; ut8 dfs[sizeof (Elf_(Verdef))] = {0}; Sdb *sdb; int cnt, i; if (shdr->sh_link > bin->ehdr.e_shnum) { return false; } link_shdr = &bin->shdr[shdr->sh_link]; if (shdr->sh_size < 1) { return false; } Elf_(Verdef) *defs = calloc (shdr->sh_size, sizeof (char)); if (!defs) { return false; } if (bin->shstrtab && shdr->sh_name < bin->shstrtab_size) { section_name = &bin->shstrtab[shdr->sh_name]; } if (link_shdr && bin->shstrtab && link_shdr->sh_name < bin->shstrtab_size) { link_section_name = &bin->shstrtab[link_shdr->sh_name]; } if (!defs) { bprintf ("Warning: Cannot allocate memory (Check Elf_(Verdef))\n"); return NULL; } sdb = sdb_new0 (); end = (char *)defs + shdr->sh_size; sdb_set (sdb, "section_name", section_name, 0); sdb_num_set (sdb, "entries", shdr->sh_info, 0); sdb_num_set (sdb, "addr", shdr->sh_addr, 0); sdb_num_set (sdb, "offset", shdr->sh_offset, 0); sdb_num_set (sdb, "link", shdr->sh_link, 0); sdb_set (sdb, "link_section_name", link_section_name, 0); for (cnt = 0, i = 0; i >= 0 && cnt < shdr->sh_info && ((char *)defs + i < end); ++cnt) { Sdb *sdb_verdef = sdb_new0 (); char *vstart = ((char*)defs) + i; char key[32] = {0}; Elf_(Verdef) *verdef = (Elf_(Verdef)*)vstart; Elf_(Verdaux) aux = {0}; int j = 0; int isum = 0; r_buf_read_at (bin->b, shdr->sh_offset + i, dfs, sizeof (Elf_(Verdef))); verdef->vd_version = READ16 (dfs, j) verdef->vd_flags = READ16 (dfs, j) verdef->vd_ndx = READ16 (dfs, j) verdef->vd_cnt = READ16 (dfs, j) verdef->vd_hash = READ32 (dfs, j) verdef->vd_aux = READ32 (dfs, j) verdef->vd_next = READ32 (dfs, j) vstart += verdef->vd_aux; if (vstart > end || vstart + sizeof (Elf_(Verdaux)) > end) { sdb_free (sdb_verdef); goto out_error; } j = 0; aux.vda_name = READ32 (vstart, j) aux.vda_next = READ32 (vstart, j) isum = i + verdef->vd_aux; if (aux.vda_name > bin->dynstr_size) { sdb_free (sdb_verdef); goto out_error; } sdb_num_set (sdb_verdef, "idx", i, 0); sdb_num_set (sdb_verdef, "vd_version", verdef->vd_version, 0); sdb_num_set (sdb_verdef, "vd_ndx", verdef->vd_ndx, 0); sdb_num_set (sdb_verdef, "vd_cnt", verdef->vd_cnt, 0); sdb_set (sdb_verdef, "vda_name", &bin->dynstr[aux.vda_name], 0); sdb_set (sdb_verdef, "flags", get_ver_flags (verdef->vd_flags), 0); for (j = 1; j < verdef->vd_cnt; ++j) { int k; Sdb *sdb_parent = sdb_new0 (); isum += aux.vda_next; vstart += aux.vda_next; if (vstart > end || vstart + sizeof(Elf_(Verdaux)) > end) { sdb_free (sdb_verdef); sdb_free (sdb_parent); goto out_error; } k = 0; aux.vda_name = READ32 (vstart, k) aux.vda_next = READ32 (vstart, k) if (aux.vda_name > bin->dynstr_size) { sdb_free (sdb_verdef); sdb_free (sdb_parent); goto out_error; } sdb_num_set (sdb_parent, "idx", isum, 0); sdb_num_set (sdb_parent, "parent", j, 0); sdb_set (sdb_parent, "vda_name", &bin->dynstr[aux.vda_name], 0); snprintf (key, sizeof (key), "parent%d", j - 1); sdb_ns_set (sdb_verdef, key, sdb_parent); } snprintf (key, sizeof (key), "verdef%d", cnt); sdb_ns_set (sdb, key, sdb_verdef); if (!verdef->vd_next) { sdb_free (sdb_verdef); goto out_error; } if ((st32)verdef->vd_next < 1) { eprintf ("Warning: Invalid vd_next in the ELF version\n"); break; } i += verdef->vd_next; } free (defs); return sdb; out_error: free (defs); sdb_free (sdb); return NULL; } static Sdb *store_versioninfo_gnu_verneed(ELFOBJ *bin, Elf_(Shdr) *shdr, int sz) { ut8 *end, *need = NULL; const char *section_name = ""; Elf_(Shdr) *link_shdr = NULL; const char *link_section_name = ""; Sdb *sdb_vernaux = NULL; Sdb *sdb_version = NULL; Sdb *sdb = NULL; int i, cnt; if (!bin || !bin->dynstr) { return NULL; } if (shdr->sh_link > bin->ehdr.e_shnum) { return NULL; } if (shdr->sh_size < 1) { return NULL; } sdb = sdb_new0 (); if (!sdb) { return NULL; } link_shdr = &bin->shdr[shdr->sh_link]; if (bin->shstrtab && shdr->sh_name < bin->shstrtab_size) { section_name = &bin->shstrtab[shdr->sh_name]; } if (bin->shstrtab && link_shdr->sh_name < bin->shstrtab_size) { link_section_name = &bin->shstrtab[link_shdr->sh_name]; } if (!(need = (ut8*) calloc (R_MAX (1, shdr->sh_size), sizeof (ut8)))) { bprintf ("Warning: Cannot allocate memory for Elf_(Verneed)\n"); goto beach; } end = need + shdr->sh_size; sdb_set (sdb, "section_name", section_name, 0); sdb_num_set (sdb, "num_entries", shdr->sh_info, 0); sdb_num_set (sdb, "addr", shdr->sh_addr, 0); sdb_num_set (sdb, "offset", shdr->sh_offset, 0); sdb_num_set (sdb, "link", shdr->sh_link, 0); sdb_set (sdb, "link_section_name", link_section_name, 0); if (shdr->sh_offset > bin->size || shdr->sh_offset + shdr->sh_size > bin->size) { goto beach; } if (shdr->sh_offset + shdr->sh_size < shdr->sh_size) { goto beach; } i = r_buf_read_at (bin->b, shdr->sh_offset, need, shdr->sh_size); if (i < 0) goto beach; //XXX we should use DT_VERNEEDNUM instead of sh_info //TODO https://sourceware.org/ml/binutils/2014-11/msg00353.html for (i = 0, cnt = 0; cnt < shdr->sh_info; ++cnt) { int j, isum; ut8 *vstart = need + i; Elf_(Verneed) vvn = {0}; if (vstart + sizeof (Elf_(Verneed)) > end) { goto beach; } Elf_(Verneed) *entry = &vvn; char key[32] = {0}; sdb_version = sdb_new0 (); if (!sdb_version) { goto beach; } j = 0; vvn.vn_version = READ16 (vstart, j) vvn.vn_cnt = READ16 (vstart, j) vvn.vn_file = READ32 (vstart, j) vvn.vn_aux = READ32 (vstart, j) vvn.vn_next = READ32 (vstart, j) sdb_num_set (sdb_version, "vn_version", entry->vn_version, 0); sdb_num_set (sdb_version, "idx", i, 0); if (entry->vn_file > bin->dynstr_size) { goto beach; } { char *s = r_str_ndup (&bin->dynstr[entry->vn_file], 16); sdb_set (sdb_version, "file_name", s, 0); free (s); } sdb_num_set (sdb_version, "cnt", entry->vn_cnt, 0); vstart += entry->vn_aux; for (j = 0, isum = i + entry->vn_aux; j < entry->vn_cnt && vstart + sizeof (Elf_(Vernaux)) <= end; ++j) { int k; Elf_(Vernaux) * aux = NULL; Elf_(Vernaux) vaux = {0}; sdb_vernaux = sdb_new0 (); if (!sdb_vernaux) { goto beach; } aux = (Elf_(Vernaux)*)&vaux; k = 0; vaux.vna_hash = READ32 (vstart, k) vaux.vna_flags = READ16 (vstart, k) vaux.vna_other = READ16 (vstart, k) vaux.vna_name = READ32 (vstart, k) vaux.vna_next = READ32 (vstart, k) if (aux->vna_name > bin->dynstr_size) { goto beach; } sdb_num_set (sdb_vernaux, "idx", isum, 0); if (aux->vna_name > 0 && aux->vna_name + 8 < bin->dynstr_size) { char name [16]; strncpy (name, &bin->dynstr[aux->vna_name], sizeof (name)-1); name[sizeof(name)-1] = 0; sdb_set (sdb_vernaux, "name", name, 0); } sdb_set (sdb_vernaux, "flags", get_ver_flags (aux->vna_flags), 0); sdb_num_set (sdb_vernaux, "version", aux->vna_other, 0); isum += aux->vna_next; vstart += aux->vna_next; snprintf (key, sizeof (key), "vernaux%d", j); sdb_ns_set (sdb_version, key, sdb_vernaux); } if ((int)entry->vn_next < 0) { bprintf ("Invalid vn_next\n"); break; } i += entry->vn_next; snprintf (key, sizeof (key), "version%d", cnt ); sdb_ns_set (sdb, key, sdb_version); //if entry->vn_next is 0 it iterate infinitely if (!entry->vn_next) { break; } } free (need); return sdb; beach: free (need); sdb_free (sdb_vernaux); sdb_free (sdb_version); sdb_free (sdb); return NULL; } static Sdb *store_versioninfo(ELFOBJ *bin) { Sdb *sdb_versioninfo = NULL; int num_verdef = 0; int num_verneed = 0; int num_versym = 0; int i; if (!bin || !bin->shdr) { return NULL; } if (!(sdb_versioninfo = sdb_new0 ())) { return NULL; } for (i = 0; i < bin->ehdr.e_shnum; i++) { Sdb *sdb = NULL; char key[32] = {0}; int size = bin->shdr[i].sh_size; if (size - (i*sizeof(Elf_(Shdr)) > bin->size)) { size = bin->size - (i*sizeof(Elf_(Shdr))); } int left = size - (i * sizeof (Elf_(Shdr))); left = R_MIN (left, bin->shdr[i].sh_size); if (left < 0) { break; } switch (bin->shdr[i].sh_type) { case SHT_GNU_verdef: sdb = store_versioninfo_gnu_verdef (bin, &bin->shdr[i], left); snprintf (key, sizeof (key), "verdef%d", num_verdef++); sdb_ns_set (sdb_versioninfo, key, sdb); break; case SHT_GNU_verneed: sdb = store_versioninfo_gnu_verneed (bin, &bin->shdr[i], left); snprintf (key, sizeof (key), "verneed%d", num_verneed++); sdb_ns_set (sdb_versioninfo, key, sdb); break; case SHT_GNU_versym: sdb = store_versioninfo_gnu_versym (bin, &bin->shdr[i], left); snprintf (key, sizeof (key), "versym%d", num_versym++); sdb_ns_set (sdb_versioninfo, key, sdb); break; } } return sdb_versioninfo; } static bool init_dynstr(ELFOBJ *bin) { int i, r; const char *section_name = NULL; if (!bin || !bin->shdr) { return false; } if (!bin->shstrtab) { return false; } for (i = 0; i < bin->ehdr.e_shnum; ++i) { if (bin->shdr[i].sh_name > bin->shstrtab_size) { return false; } section_name = &bin->shstrtab[bin->shdr[i].sh_name]; if (bin->shdr[i].sh_type == SHT_STRTAB && !strcmp (section_name, ".dynstr")) { if (!(bin->dynstr = (char*) calloc (bin->shdr[i].sh_size + 1, sizeof (char)))) { bprintf("Warning: Cannot allocate memory for dynamic strings\n"); return false; } if (bin->shdr[i].sh_offset > bin->size) { return false; } if (bin->shdr[i].sh_offset + bin->shdr[i].sh_size > bin->size) { return false; } if (bin->shdr[i].sh_offset + bin->shdr[i].sh_size < bin->shdr[i].sh_size) { return false; } r = r_buf_read_at (bin->b, bin->shdr[i].sh_offset, (ut8*)bin->dynstr, bin->shdr[i].sh_size); if (r < 1) { R_FREE (bin->dynstr); bin->dynstr_size = 0; return false; } bin->dynstr_size = bin->shdr[i].sh_size; return true; } } return false; } static int elf_init(ELFOBJ *bin) { bin->phdr = NULL; bin->shdr = NULL; bin->strtab = NULL; bin->shstrtab = NULL; bin->strtab_size = 0; bin->strtab_section = NULL; bin->dyn_buf = NULL; bin->dynstr = NULL; ZERO_FILL (bin->version_info); bin->g_sections = NULL; bin->g_symbols = NULL; bin->g_imports = NULL; /* bin is not an ELF */ if (!init_ehdr (bin)) { return false; } if (!init_phdr (bin)) { bprintf ("Warning: Cannot initialize program headers\n"); } if (!init_shdr (bin)) { bprintf ("Warning: Cannot initialize section headers\n"); } if (!init_strtab (bin)) { bprintf ("Warning: Cannot initialize strings table\n"); } if (!init_dynstr (bin)) { bprintf ("Warning: Cannot initialize dynamic strings\n"); } bin->baddr = Elf_(r_bin_elf_get_baddr) (bin); if (!init_dynamic_section (bin) && !Elf_(r_bin_elf_get_static)(bin)) bprintf ("Warning: Cannot initialize dynamic section\n"); bin->imports_by_ord_size = 0; bin->imports_by_ord = NULL; bin->symbols_by_ord_size = 0; bin->symbols_by_ord = NULL; bin->g_sections = Elf_(r_bin_elf_get_sections) (bin); bin->boffset = Elf_(r_bin_elf_get_boffset) (bin); sdb_ns_set (bin->kv, "versioninfo", store_versioninfo (bin)); return true; } ut64 Elf_(r_bin_elf_get_section_offset)(ELFOBJ *bin, const char *section_name) { RBinElfSection *section = get_section_by_name (bin, section_name); if (!section) return UT64_MAX; return section->offset; } ut64 Elf_(r_bin_elf_get_section_addr)(ELFOBJ *bin, const char *section_name) { RBinElfSection *section = get_section_by_name (bin, section_name); return section? section->rva: UT64_MAX; } ut64 Elf_(r_bin_elf_get_section_addr_end)(ELFOBJ *bin, const char *section_name) { RBinElfSection *section = get_section_by_name (bin, section_name); return section? section->rva + section->size: UT64_MAX; } #define REL (is_rela ? (void*)rela : (void*)rel) #define REL_BUF is_rela ? (ut8*)(&rela[k]) : (ut8*)(&rel[k]) #define REL_OFFSET is_rela ? rela[k].r_offset : rel[k].r_offset #define REL_TYPE is_rela ? rela[k].r_info : rel[k].r_info static ut64 get_import_addr(ELFOBJ *bin, int sym) { Elf_(Rel) *rel = NULL; Elf_(Rela) *rela = NULL; ut8 rl[sizeof (Elf_(Rel))] = {0}; ut8 rla[sizeof (Elf_(Rela))] = {0}; RBinElfSection *rel_sec = NULL; Elf_(Addr) plt_sym_addr = -1; ut64 got_addr, got_offset; ut64 plt_addr; int j, k, tsize, len, nrel; bool is_rela = false; const char *rel_sect[] = { ".rel.plt", ".rela.plt", ".rel.dyn", ".rela.dyn", NULL }; const char *rela_sect[] = { ".rela.plt", ".rel.plt", ".rela.dyn", ".rel.dyn", NULL }; if ((!bin->shdr || !bin->strtab) && !bin->phdr) { return -1; } if ((got_offset = Elf_(r_bin_elf_get_section_offset) (bin, ".got")) == -1 && (got_offset = Elf_(r_bin_elf_get_section_offset) (bin, ".got.plt")) == -1) { return -1; } if ((got_addr = Elf_(r_bin_elf_get_section_addr) (bin, ".got")) == -1 && (got_addr = Elf_(r_bin_elf_get_section_addr) (bin, ".got.plt")) == -1) { return -1; } if (bin->is_rela == DT_REL) { j = 0; while (!rel_sec && rel_sect[j]) { rel_sec = get_section_by_name (bin, rel_sect[j++]); } tsize = sizeof (Elf_(Rel)); } else if (bin->is_rela == DT_RELA) { j = 0; while (!rel_sec && rela_sect[j]) { rel_sec = get_section_by_name (bin, rela_sect[j++]); } is_rela = true; tsize = sizeof (Elf_(Rela)); } if (!rel_sec) { return -1; } if (rel_sec->size < 1) { return -1; } nrel = (ut32)((int)rel_sec->size / (int)tsize); if (nrel < 1) { return -1; } if (is_rela) { rela = calloc (nrel, tsize); if (!rela) { return -1; } } else { rel = calloc (nrel, tsize); if (!rel) { return -1; } } for (j = k = 0; j < rel_sec->size && k < nrel; j += tsize, k++) { int l = 0; if (rel_sec->offset + j > bin->size) { goto out; } if (rel_sec->offset + j + tsize > bin->size) { goto out; } len = r_buf_read_at ( bin->b, rel_sec->offset + j, is_rela ? rla : rl, is_rela ? sizeof (Elf_ (Rela)) : sizeof (Elf_ (Rel))); if (len < 1) { goto out; } #if R_BIN_ELF64 if (is_rela) { rela[k].r_offset = READ64 (rla, l) rela[k].r_info = READ64 (rla, l) rela[k].r_addend = READ64 (rla, l) } else { rel[k].r_offset = READ64 (rl, l) rel[k].r_info = READ64 (rl, l) } #else if (is_rela) { rela[k].r_offset = READ32 (rla, l) rela[k].r_info = READ32 (rla, l) rela[k].r_addend = READ32 (rla, l) } else { rel[k].r_offset = READ32 (rl, l) rel[k].r_info = READ32 (rl, l) } #endif int reloc_type = ELF_R_TYPE (REL_TYPE); int reloc_sym = ELF_R_SYM (REL_TYPE); if (reloc_sym == sym) { int of = REL_OFFSET; of = of - got_addr + got_offset; switch (bin->ehdr.e_machine) { case EM_PPC: case EM_PPC64: { RBinElfSection *s = get_section_by_name (bin, ".plt"); if (s) { ut8 buf[4]; ut64 base; len = r_buf_read_at (bin->b, s->offset, buf, sizeof (buf)); if (len < 4) { goto out; } base = r_read_be32 (buf); base -= (nrel * 16); base += (k * 16); plt_addr = base; free (REL); return plt_addr; } } break; case EM_SPARC: case EM_SPARCV9: case EM_SPARC32PLUS: plt_addr = Elf_(r_bin_elf_get_section_addr) (bin, ".plt"); if (plt_addr == -1) { free (rela); return -1; } if (reloc_type == R_386_PC16) { plt_addr += k * 12 + 20; // thumb symbol if (plt_addr & 1) { plt_addr--; } free (REL); return plt_addr; } else { bprintf ("Unknown sparc reloc type %d\n", reloc_type); } /* SPARC */ break; case EM_ARM: case EM_AARCH64: plt_addr = Elf_(r_bin_elf_get_section_addr) (bin, ".plt"); if (plt_addr == -1) { free (rela); return UT32_MAX; } switch (reloc_type) { case R_386_8: { plt_addr += k * 12 + 20; // thumb symbol if (plt_addr & 1) { plt_addr--; } free (REL); return plt_addr; } break; case 1026: // arm64 aarch64 plt_sym_addr = plt_addr + k * 16 + 32; goto done; default: bprintf ("Unsupported relocation type for imports %d\n", reloc_type); break; } break; case EM_386: case EM_X86_64: switch (reloc_type) { case 1: // unknown relocs found in voidlinux for x86-64 // break; case R_386_GLOB_DAT: case R_386_JMP_SLOT: { ut8 buf[8]; if (of + sizeof(Elf_(Addr)) < bin->size) { // ONLY FOR X86 if (of > bin->size || of + sizeof (Elf_(Addr)) > bin->size) { goto out; } len = r_buf_read_at (bin->b, of, buf, sizeof (Elf_(Addr))); if (len < -1) { goto out; } plt_sym_addr = sizeof (Elf_(Addr)) == 4 ? r_read_le32 (buf) : r_read_le64 (buf); if (!plt_sym_addr) { //XXX HACK ALERT!!!! full relro?? try to fix it //will there always be .plt.got, what would happen if is .got.plt? RBinElfSection *s = get_section_by_name (bin, ".plt.got"); if (Elf_(r_bin_elf_has_relro)(bin) < R_ELF_PART_RELRO || !s) { goto done; } plt_addr = s->offset; of = of + got_addr - got_offset; while (plt_addr + 2 + 4 < s->offset + s->size) { /*we try to locate the plt entry that correspond with the relocation since got does not point back to .plt. In this case it has the following form ff253a152000 JMP QWORD [RIP + 0x20153A] 6690 NOP ---- ff25ec9f0408 JMP DWORD [reloc.puts_236] plt_addr + 2 to remove jmp opcode and get the imm reading 4 and if RIP (plt_addr + 6) + imm == rel->offset return plt_addr, that will be our sym addr perhaps this hack doesn't work on 32 bits */ len = r_buf_read_at (bin->b, plt_addr + 2, buf, 4); if (len < -1) { goto out; } plt_sym_addr = sizeof (Elf_(Addr)) == 4 ? r_read_le32 (buf) : r_read_le64 (buf); //relative address if ((plt_addr + 6 + Elf_(r_bin_elf_v2p) (bin, plt_sym_addr)) == of) { plt_sym_addr = plt_addr; goto done; } else if (plt_sym_addr == of) { plt_sym_addr = plt_addr; goto done; } plt_addr += 8; } } else { plt_sym_addr -= 6; } goto done; } break; } default: bprintf ("Unsupported relocation type for imports %d\n", reloc_type); free (REL); return of; break; } break; case 8: // MIPS32 BIG ENDIAN relocs { RBinElfSection *s = get_section_by_name (bin, ".rela.plt"); if (s) { ut8 buf[1024]; const ut8 *base; plt_addr = s->rva + s->size; len = r_buf_read_at (bin->b, s->offset + s->size, buf, sizeof (buf)); if (len != sizeof (buf)) { // oops } base = r_mem_mem_aligned (buf, sizeof (buf), (const ut8*)"\x3c\x0f\x00", 3, 4); if (base) { plt_addr += (int)(size_t)(base - buf); } else { plt_addr += 108 + 8; // HARDCODED HACK } plt_addr += k * 16; free (REL); return plt_addr; } } break; default: bprintf ("Unsupported relocs type %d for arch %d\n", reloc_type, bin->ehdr.e_machine); break; } } } done: free (REL); return plt_sym_addr; out: free (REL); return -1; } int Elf_(r_bin_elf_has_nx)(ELFOBJ *bin) { int i; if (bin && bin->phdr) { for (i = 0; i < bin->ehdr.e_phnum; i++) { if (bin->phdr[i].p_type == PT_GNU_STACK) { return (!(bin->phdr[i].p_flags & 1))? 1: 0; } } } return 0; } int Elf_(r_bin_elf_has_relro)(ELFOBJ *bin) { int i; bool haveBindNow = false; bool haveGnuRelro = false; if (bin && bin->dyn_buf) { for (i = 0; i < bin->dyn_entries; i++) { switch (bin->dyn_buf[i].d_tag) { case DT_BIND_NOW: haveBindNow = true; break; case DT_FLAGS: for (i++; i < bin->dyn_entries ; i++) { ut32 dTag = bin->dyn_buf[i].d_tag; if (!dTag) { break; } switch (dTag) { case DT_FLAGS_1: if (bin->dyn_buf[i].d_un.d_val & DF_1_NOW) { haveBindNow = true; break; } } } break; } } } if (bin && bin->phdr) { for (i = 0; i < bin->ehdr.e_phnum; i++) { if (bin->phdr[i].p_type == PT_GNU_RELRO) { haveGnuRelro = true; break; } } } if (haveGnuRelro) { if (haveBindNow) { return R_ELF_FULL_RELRO; } return R_ELF_PART_RELRO; } return R_ELF_NO_RELRO; } /* To compute the base address, one determines the memory address associated with the lowest p_vaddr value for a PT_LOAD segment. One then obtains the base address by truncating the memory address to the nearest multiple of the maximum page size */ ut64 Elf_(r_bin_elf_get_baddr)(ELFOBJ *bin) { int i; ut64 tmp, base = UT64_MAX; if (!bin) { return 0; } if (bin->phdr) { for (i = 0; i < bin->ehdr.e_phnum; i++) { if (bin->phdr[i].p_type == PT_LOAD) { tmp = (ut64)bin->phdr[i].p_vaddr & ELF_PAGE_MASK; tmp = tmp - (tmp % (1 << ELF_PAGE_SIZE)); if (tmp < base) { base = tmp; } } } } if (base == UT64_MAX && bin->ehdr.e_type == ET_REL) { //we return our own base address for ET_REL type //we act as a loader for ELF return 0x08000000; } return base == UT64_MAX ? 0 : base; } ut64 Elf_(r_bin_elf_get_boffset)(ELFOBJ *bin) { int i; ut64 tmp, base = UT64_MAX; if (bin && bin->phdr) { for (i = 0; i < bin->ehdr.e_phnum; i++) { if (bin->phdr[i].p_type == PT_LOAD) { tmp = (ut64)bin->phdr[i].p_offset & ELF_PAGE_MASK; tmp = tmp - (tmp % (1 << ELF_PAGE_SIZE)); if (tmp < base) { base = tmp; } } } } return base == UT64_MAX ? 0 : base; } ut64 Elf_(r_bin_elf_get_init_offset)(ELFOBJ *bin) { ut64 entry = Elf_(r_bin_elf_get_entry_offset) (bin); ut8 buf[512]; if (!bin) { return 0LL; } if (r_buf_read_at (bin->b, entry + 16, buf, sizeof (buf)) < 1) { bprintf ("Warning: read (init_offset)\n"); return 0; } if (buf[0] == 0x68) { // push // x86 only ut64 addr; memmove (buf, buf+1, 4); addr = (ut64)r_read_le32 (buf); return Elf_(r_bin_elf_v2p) (bin, addr); } return 0; } ut64 Elf_(r_bin_elf_get_fini_offset)(ELFOBJ *bin) { ut64 entry = Elf_(r_bin_elf_get_entry_offset) (bin); ut8 buf[512]; if (!bin) { return 0LL; } if (r_buf_read_at (bin->b, entry+11, buf, sizeof (buf)) == -1) { bprintf ("Warning: read (get_fini)\n"); return 0; } if (*buf == 0x68) { // push // x86/32 only ut64 addr; memmove (buf, buf+1, 4); addr = (ut64)r_read_le32 (buf); return Elf_(r_bin_elf_v2p) (bin, addr); } return 0; } ut64 Elf_(r_bin_elf_get_entry_offset)(ELFOBJ *bin) { ut64 entry; if (!bin) { return 0LL; } entry = bin->ehdr.e_entry; if (!entry) { entry = Elf_(r_bin_elf_get_section_offset)(bin, ".init.text"); if (entry != UT64_MAX) { return entry; } entry = Elf_(r_bin_elf_get_section_offset)(bin, ".text"); if (entry != UT64_MAX) { return entry; } entry = Elf_(r_bin_elf_get_section_offset)(bin, ".init"); if (entry != UT64_MAX) { return entry; } if (entry == UT64_MAX) { return 0; } } return Elf_(r_bin_elf_v2p) (bin, entry); } static ut64 getmainsymbol(ELFOBJ *bin) { struct r_bin_elf_symbol_t *symbol; int i; if (!(symbol = Elf_(r_bin_elf_get_symbols) (bin))) { return UT64_MAX; } for (i = 0; !symbol[i].last; i++) { if (!strcmp (symbol[i].name, "main")) { ut64 paddr = symbol[i].offset; return Elf_(r_bin_elf_p2v) (bin, paddr); } } return UT64_MAX; } ut64 Elf_(r_bin_elf_get_main_offset)(ELFOBJ *bin) { ut64 entry = Elf_(r_bin_elf_get_entry_offset) (bin); ut8 buf[512]; if (!bin) { return 0LL; } if (entry > bin->size || (entry + sizeof (buf)) > bin->size) { return 0; } if (r_buf_read_at (bin->b, entry, buf, sizeof (buf)) < 1) { bprintf ("Warning: read (main)\n"); return 0; } // ARM64 if (buf[0x18+3] == 0x58 && buf[0x2f] == 0x00) { ut32 entry_vaddr = Elf_(r_bin_elf_p2v) (bin, entry); ut32 main_addr = r_read_le32 (&buf[0x30]); if ((main_addr >> 16) == (entry_vaddr >> 16)) { return Elf_(r_bin_elf_v2p) (bin, main_addr); } } // TODO: Use arch to identify arch before memcmp's // ARM ut64 text = Elf_(r_bin_elf_get_section_offset)(bin, ".text"); ut64 text_end = text + bin->size; // ARM-Thumb-Linux if (entry & 1 && !memcmp (buf, "\xf0\x00\x0b\x4f\xf0\x00", 6)) { ut32 * ptr = (ut32*)(buf+40-1); if (*ptr &1) { return Elf_(r_bin_elf_v2p) (bin, *ptr -1); } } if (!memcmp (buf, "\x00\xb0\xa0\xe3\x00\xe0\xa0\xe3", 8)) { // endian stuff here ut32 *addr = (ut32*)(buf+0x34); /* 0x00012000 00b0a0e3 mov fp, 0 0x00012004 00e0a0e3 mov lr, 0 */ if (*addr > text && *addr < (text_end)) { return Elf_(r_bin_elf_v2p) (bin, *addr); } } // MIPS /* get .got, calculate offset of main symbol */ if (!memcmp (buf, "\x21\x00\xe0\x03\x01\x00\x11\x04", 8)) { /* assuming the startup code looks like got = gp-0x7ff0 got[index__libc_start_main] ( got[index_main] ); looking for the instruction generating the first argument to find main lw a0, offset(gp) */ ut64 got_offset; if ((got_offset = Elf_(r_bin_elf_get_section_offset) (bin, ".got")) != -1 || (got_offset = Elf_(r_bin_elf_get_section_offset) (bin, ".got.plt")) != -1) { const ut64 gp = got_offset + 0x7ff0; unsigned i; for (i = 0; i < sizeof(buf) / sizeof(buf[0]); i += 4) { const ut32 instr = r_read_le32 (&buf[i]); if ((instr & 0xffff0000) == 0x8f840000) { // lw a0, offset(gp) const short delta = instr & 0x0000ffff; r_buf_read_at (bin->b, /* got_entry_offset = */ gp + delta, buf, 4); return Elf_(r_bin_elf_v2p) (bin, r_read_le32 (&buf[0])); } } } return 0; } // ARM if (!memcmp (buf, "\x24\xc0\x9f\xe5\x00\xb0\xa0\xe3", 8)) { ut64 addr = r_read_le32 (&buf[48]); return Elf_(r_bin_elf_v2p) (bin, addr); } // X86-CGC if (buf[0] == 0xe8 && !memcmp (buf + 5, "\x50\xe8\x00\x00\x00\x00\xb8\x01\x00\x00\x00\x53", 12)) { size_t SIZEOF_CALL = 5; ut64 rel_addr = (ut64)((int)(buf[1] + (buf[2] << 8) + (buf[3] << 16) + (buf[4] << 24))); ut64 addr = Elf_(r_bin_elf_p2v)(bin, entry + SIZEOF_CALL); addr += rel_addr; return Elf_(r_bin_elf_v2p) (bin, addr); } // X86-PIE if (buf[0x00] == 0x48 && buf[0x1e] == 0x8d && buf[0x11] == 0xe8) { ut32 *pmain = (ut32*)(buf + 0x30); ut64 vmain = Elf_(r_bin_elf_p2v) (bin, (ut64)*pmain); ut64 ventry = Elf_(r_bin_elf_p2v) (bin, entry); if (vmain >> 16 == ventry >> 16) { return (ut64)vmain; } } // X86-PIE if (buf[0x1d] == 0x48 && buf[0x1e] == 0x8b) { if (!memcmp (buf, "\x31\xed\x49\x89", 4)) {// linux ut64 maddr, baddr; ut8 n32s[sizeof (ut32)] = {0}; maddr = entry + 0x24 + r_read_le32 (buf + 0x20); if (r_buf_read_at (bin->b, maddr, n32s, sizeof (ut32)) == -1) { bprintf ("Warning: read (maddr) 2\n"); return 0; } maddr = (ut64)r_read_le32 (&n32s[0]); baddr = (bin->ehdr.e_entry >> 16) << 16; if (bin->phdr) { baddr = Elf_(r_bin_elf_get_baddr) (bin); } maddr += baddr; return maddr; } } // X86-NONPIE #if R_BIN_ELF64 if (!memcmp (buf, "\x49\x89\xd9", 3) && buf[156] == 0xe8) { // openbsd return r_read_le32 (&buf[157]) + entry + 156 + 5; } if (!memcmp (buf+29, "\x48\xc7\xc7", 3)) { // linux ut64 addr = (ut64)r_read_le32 (&buf[29 + 3]); return Elf_(r_bin_elf_v2p) (bin, addr); } #else if (buf[23] == '\x68') { ut64 addr = (ut64)r_read_le32 (&buf[23 + 1]); return Elf_(r_bin_elf_v2p) (bin, addr); } #endif /* linux64 pie main -- probably buggy in some cases */ if (buf[29] == 0x48 && buf[30] == 0x8d) { // lea rdi, qword [rip-0x21c4] ut8 *p = buf + 32; st32 maindelta = (st32)r_read_le32 (p); ut64 vmain = (ut64)(entry + 29 + maindelta) + 7; ut64 ventry = Elf_(r_bin_elf_p2v) (bin, entry); if (vmain>>16 == ventry>>16) { return (ut64)vmain; } } /* find sym.main if possible */ { ut64 m = getmainsymbol (bin); if (m != UT64_MAX) return m; } return UT64_MAX; } int Elf_(r_bin_elf_get_stripped)(ELFOBJ *bin) { int i; if (!bin->shdr) { return false; } for (i = 0; i < bin->ehdr.e_shnum; i++) { if (bin->shdr[i].sh_type == SHT_SYMTAB) { return false; } } return true; } char *Elf_(r_bin_elf_intrp)(ELFOBJ *bin) { int i; if (!bin || !bin->phdr) { return NULL; } for (i = 0; i < bin->ehdr.e_phnum; i++) { if (bin->phdr[i].p_type == PT_INTERP) { char *str = NULL; ut64 addr = bin->phdr[i].p_offset; int sz = bin->phdr[i].p_memsz; sdb_num_set (bin->kv, "elf_header.intrp_addr", addr, 0); sdb_num_set (bin->kv, "elf_header.intrp_size", sz, 0); if (sz < 1) { return NULL; } str = malloc (sz + 1); if (!str) { return NULL; } if (r_buf_read_at (bin->b, addr, (ut8*)str, sz) < 1) { bprintf ("Warning: read (main)\n"); return 0; } str[sz] = 0; sdb_set (bin->kv, "elf_header.intrp", str, 0); return str; } } return NULL; } int Elf_(r_bin_elf_get_static)(ELFOBJ *bin) { int i; if (!bin->phdr) { return false; } for (i = 0; i < bin->ehdr.e_phnum; i++) { if (bin->phdr[i].p_type == PT_INTERP) { return false; } } return true; } char* Elf_(r_bin_elf_get_data_encoding)(ELFOBJ *bin) { switch (bin->ehdr.e_ident[EI_DATA]) { case ELFDATANONE: return strdup ("none"); case ELFDATA2LSB: return strdup ("2's complement, little endian"); case ELFDATA2MSB: return strdup ("2's complement, big endian"); default: return r_str_newf ("<unknown: %x>", bin->ehdr.e_ident[EI_DATA]); } } int Elf_(r_bin_elf_has_va)(ELFOBJ *bin) { return true; } char* Elf_(r_bin_elf_get_arch)(ELFOBJ *bin) { switch (bin->ehdr.e_machine) { case EM_ARC: case EM_ARC_A5: return strdup ("arc"); case EM_AVR: return strdup ("avr"); case EM_CRIS: return strdup ("cris"); case EM_68K: return strdup ("m68k"); case EM_MIPS: case EM_MIPS_RS3_LE: case EM_MIPS_X: return strdup ("mips"); case EM_MCST_ELBRUS: return strdup ("elbrus"); case EM_TRICORE: return strdup ("tricore"); case EM_ARM: case EM_AARCH64: return strdup ("arm"); case EM_HEXAGON: return strdup ("hexagon"); case EM_BLACKFIN: return strdup ("blackfin"); case EM_SPARC: case EM_SPARC32PLUS: case EM_SPARCV9: return strdup ("sparc"); case EM_PPC: case EM_PPC64: return strdup ("ppc"); case EM_PARISC: return strdup ("hppa"); case EM_PROPELLER: return strdup ("propeller"); case EM_MICROBLAZE: return strdup ("microblaze.gnu"); case EM_RISCV: return strdup ("riscv"); case EM_VAX: return strdup ("vax"); case EM_XTENSA: return strdup ("xtensa"); case EM_LANAI: return strdup ("lanai"); case EM_VIDEOCORE3: case EM_VIDEOCORE4: return strdup ("vc4"); case EM_SH: return strdup ("sh"); case EM_V850: return strdup ("v850"); case EM_IA_64: return strdup("ia64"); default: return strdup ("x86"); } } char* Elf_(r_bin_elf_get_machine_name)(ELFOBJ *bin) { switch (bin->ehdr.e_machine) { case EM_NONE: return strdup ("No machine"); case EM_M32: return strdup ("AT&T WE 32100"); case EM_SPARC: return strdup ("SUN SPARC"); case EM_386: return strdup ("Intel 80386"); case EM_68K: return strdup ("Motorola m68k family"); case EM_88K: return strdup ("Motorola m88k family"); case EM_860: return strdup ("Intel 80860"); case EM_MIPS: return strdup ("MIPS R3000"); case EM_S370: return strdup ("IBM System/370"); case EM_MIPS_RS3_LE: return strdup ("MIPS R3000 little-endian"); case EM_PARISC: return strdup ("HPPA"); case EM_VPP500: return strdup ("Fujitsu VPP500"); case EM_SPARC32PLUS: return strdup ("Sun's \"v8plus\""); case EM_960: return strdup ("Intel 80960"); case EM_PPC: return strdup ("PowerPC"); case EM_PPC64: return strdup ("PowerPC 64-bit"); case EM_S390: return strdup ("IBM S390"); case EM_V800: return strdup ("NEC V800 series"); case EM_FR20: return strdup ("Fujitsu FR20"); case EM_RH32: return strdup ("TRW RH-32"); case EM_RCE: return strdup ("Motorola RCE"); case EM_ARM: return strdup ("ARM"); case EM_BLACKFIN: return strdup ("Analog Devices Blackfin"); case EM_FAKE_ALPHA: return strdup ("Digital Alpha"); case EM_SH: return strdup ("Hitachi SH"); case EM_SPARCV9: return strdup ("SPARC v9 64-bit"); case EM_TRICORE: return strdup ("Siemens Tricore"); case EM_ARC: return strdup ("Argonaut RISC Core"); case EM_H8_300: return strdup ("Hitachi H8/300"); case EM_H8_300H: return strdup ("Hitachi H8/300H"); case EM_H8S: return strdup ("Hitachi H8S"); case EM_H8_500: return strdup ("Hitachi H8/500"); case EM_IA_64: return strdup ("Intel Merced"); case EM_MIPS_X: return strdup ("Stanford MIPS-X"); case EM_COLDFIRE: return strdup ("Motorola Coldfire"); case EM_68HC12: return strdup ("Motorola M68HC12"); case EM_MMA: return strdup ("Fujitsu MMA Multimedia Accelerator"); case EM_PCP: return strdup ("Siemens PCP"); case EM_NCPU: return strdup ("Sony nCPU embeeded RISC"); case EM_NDR1: return strdup ("Denso NDR1 microprocessor"); case EM_STARCORE: return strdup ("Motorola Start*Core processor"); case EM_ME16: return strdup ("Toyota ME16 processor"); case EM_ST100: return strdup ("STMicroelectronic ST100 processor"); case EM_TINYJ: return strdup ("Advanced Logic Corp. Tinyj emb.fam"); case EM_X86_64: return strdup ("AMD x86-64 architecture"); case EM_LANAI: return strdup ("32bit LANAI architecture"); case EM_PDSP: return strdup ("Sony DSP Processor"); case EM_FX66: return strdup ("Siemens FX66 microcontroller"); case EM_ST9PLUS: return strdup ("STMicroelectronics ST9+ 8/16 mc"); case EM_ST7: return strdup ("STmicroelectronics ST7 8 bit mc"); case EM_68HC16: return strdup ("Motorola MC68HC16 microcontroller"); case EM_68HC11: return strdup ("Motorola MC68HC11 microcontroller"); case EM_68HC08: return strdup ("Motorola MC68HC08 microcontroller"); case EM_68HC05: return strdup ("Motorola MC68HC05 microcontroller"); case EM_SVX: return strdup ("Silicon Graphics SVx"); case EM_ST19: return strdup ("STMicroelectronics ST19 8 bit mc"); case EM_VAX: return strdup ("Digital VAX"); case EM_CRIS: return strdup ("Axis Communications 32-bit embedded processor"); case EM_JAVELIN: return strdup ("Infineon Technologies 32-bit embedded processor"); case EM_FIREPATH: return strdup ("Element 14 64-bit DSP Processor"); case EM_ZSP: return strdup ("LSI Logic 16-bit DSP Processor"); case EM_MMIX: return strdup ("Donald Knuth's educational 64-bit processor"); case EM_HUANY: return strdup ("Harvard University machine-independent object files"); case EM_PRISM: return strdup ("SiTera Prism"); case EM_AVR: return strdup ("Atmel AVR 8-bit microcontroller"); case EM_FR30: return strdup ("Fujitsu FR30"); case EM_D10V: return strdup ("Mitsubishi D10V"); case EM_D30V: return strdup ("Mitsubishi D30V"); case EM_V850: return strdup ("NEC v850"); case EM_M32R: return strdup ("Mitsubishi M32R"); case EM_MN10300: return strdup ("Matsushita MN10300"); case EM_MN10200: return strdup ("Matsushita MN10200"); case EM_PJ: return strdup ("picoJava"); case EM_OPENRISC: return strdup ("OpenRISC 32-bit embedded processor"); case EM_ARC_A5: return strdup ("ARC Cores Tangent-A5"); case EM_XTENSA: return strdup ("Tensilica Xtensa Architecture"); case EM_AARCH64: return strdup ("ARM aarch64"); case EM_PROPELLER: return strdup ("Parallax Propeller"); case EM_MICROBLAZE: return strdup ("Xilinx MicroBlaze"); case EM_RISCV: return strdup ("RISC V"); case EM_VIDEOCORE3: return strdup ("VideoCore III"); case EM_VIDEOCORE4: return strdup ("VideoCore IV"); default: return r_str_newf ("<unknown>: 0x%x", bin->ehdr.e_machine); } } char* Elf_(r_bin_elf_get_file_type)(ELFOBJ *bin) { ut32 e_type; if (!bin) { return NULL; } e_type = (ut32)bin->ehdr.e_type; // cast to avoid warn in iphone-gcc, must be ut16 switch (e_type) { case ET_NONE: return strdup ("NONE (None)"); case ET_REL: return strdup ("REL (Relocatable file)"); case ET_EXEC: return strdup ("EXEC (Executable file)"); case ET_DYN: return strdup ("DYN (Shared object file)"); case ET_CORE: return strdup ("CORE (Core file)"); } if ((e_type >= ET_LOPROC) && (e_type <= ET_HIPROC)) { return r_str_newf ("Processor Specific: %x", e_type); } if ((e_type >= ET_LOOS) && (e_type <= ET_HIOS)) { return r_str_newf ("OS Specific: %x", e_type); } return r_str_newf ("<unknown>: %x", e_type); } char* Elf_(r_bin_elf_get_elf_class)(ELFOBJ *bin) { switch (bin->ehdr.e_ident[EI_CLASS]) { case ELFCLASSNONE: return strdup ("none"); case ELFCLASS32: return strdup ("ELF32"); case ELFCLASS64: return strdup ("ELF64"); default: return r_str_newf ("<unknown: %x>", bin->ehdr.e_ident[EI_CLASS]); } } int Elf_(r_bin_elf_get_bits)(ELFOBJ *bin) { /* Hack for ARCompact */ if (bin->ehdr.e_machine == EM_ARC_A5) { return 16; } /* Hack for Ps2 */ if (bin->phdr && bin->ehdr.e_machine == EM_MIPS) { const ut32 mipsType = bin->ehdr.e_flags & EF_MIPS_ARCH; if (bin->ehdr.e_type == ET_EXEC) { int i; bool haveInterp = false; for (i = 0; i < bin->ehdr.e_phnum; i++) { if (bin->phdr[i].p_type == PT_INTERP) { haveInterp = true; } } if (!haveInterp && mipsType == EF_MIPS_ARCH_3) { // Playstation2 Hack return 64; } } // TODO: show this specific asm.cpu somewhere in bininfo (mips1, mips2, mips3, mips32r2, ...) switch (mipsType) { case EF_MIPS_ARCH_1: case EF_MIPS_ARCH_2: case EF_MIPS_ARCH_3: case EF_MIPS_ARCH_4: case EF_MIPS_ARCH_5: case EF_MIPS_ARCH_32: return 32; case EF_MIPS_ARCH_64: return 64; case EF_MIPS_ARCH_32R2: return 32; case EF_MIPS_ARCH_64R2: return 64; break; } return 32; } /* Hack for Thumb */ if (bin->ehdr.e_machine == EM_ARM) { if (bin->ehdr.e_type != ET_EXEC) { struct r_bin_elf_symbol_t *symbol; if ((symbol = Elf_(r_bin_elf_get_symbols) (bin))) { int i = 0; for (i = 0; !symbol[i].last; i++) { ut64 paddr = symbol[i].offset; if (paddr & 1) { return 16; } } } } { ut64 entry = Elf_(r_bin_elf_get_entry_offset) (bin); if (entry & 1) { return 16; } } } switch (bin->ehdr.e_ident[EI_CLASS]) { case ELFCLASS32: return 32; case ELFCLASS64: return 64; case ELFCLASSNONE: default: return 32; // defaults } } static inline int noodle(ELFOBJ *bin, const char *s) { const ut8 *p = bin->b->buf; if (bin->b->length > 64) { p += bin->b->length - 64; } else { return 0; } return r_mem_mem (p, 64, (const ut8 *)s, strlen (s)) != NULL; } static inline int needle(ELFOBJ *bin, const char *s) { if (bin->shstrtab) { ut32 len = bin->shstrtab_size; if (len > 4096) { len = 4096; // avoid slow loading .. can be buggy? } return r_mem_mem ((const ut8*)bin->shstrtab, len, (const ut8*)s, strlen (s)) != NULL; } return 0; } // TODO: must return const char * all those strings must be const char os[LINUX] or so char* Elf_(r_bin_elf_get_osabi_name)(ELFOBJ *bin) { switch (bin->ehdr.e_ident[EI_OSABI]) { case ELFOSABI_LINUX: return strdup("linux"); case ELFOSABI_SOLARIS: return strdup("solaris"); case ELFOSABI_FREEBSD: return strdup("freebsd"); case ELFOSABI_HPUX: return strdup("hpux"); } /* Hack to identify OS */ if (needle (bin, "openbsd")) return strdup ("openbsd"); if (needle (bin, "netbsd")) return strdup ("netbsd"); if (needle (bin, "freebsd")) return strdup ("freebsd"); if (noodle (bin, "BEOS:APP_VERSION")) return strdup ("beos"); if (needle (bin, "GNU")) return strdup ("linux"); return strdup ("linux"); } ut8 *Elf_(r_bin_elf_grab_regstate)(ELFOBJ *bin, int *len) { if (bin->phdr) { int i; int num = bin->ehdr.e_phnum; for (i = 0; i < num; i++) { if (bin->phdr[i].p_type != PT_NOTE) { continue; } int bits = Elf_(r_bin_elf_get_bits)(bin); int regdelta = (bits == 64)? 0x84: 0x40; // x64 vs x32 int regsize = 160; // for x86-64 ut8 *buf = malloc (regsize); if (r_buf_read_at (bin->b, bin->phdr[i].p_offset + regdelta, buf, regsize) != regsize) { free (buf); bprintf ("Cannot read register state from CORE file\n"); return NULL; } if (len) { *len = regsize; } return buf; } } bprintf ("Cannot find NOTE section\n"); return NULL; } int Elf_(r_bin_elf_is_big_endian)(ELFOBJ *bin) { return (bin->ehdr.e_ident[EI_DATA] == ELFDATA2MSB); } /* XXX Init dt_strtab? */ char *Elf_(r_bin_elf_get_rpath)(ELFOBJ *bin) { char *ret = NULL; int j; if (!bin || !bin->phdr || !bin->dyn_buf || !bin->strtab) { return NULL; } for (j = 0; j< bin->dyn_entries; j++) { if (bin->dyn_buf[j].d_tag == DT_RPATH || bin->dyn_buf[j].d_tag == DT_RUNPATH) { if (!(ret = calloc (1, ELF_STRING_LENGTH))) { perror ("malloc (rpath)"); return NULL; } if (bin->dyn_buf[j].d_un.d_val > bin->strtab_size) { free (ret); return NULL; } strncpy (ret, bin->strtab + bin->dyn_buf[j].d_un.d_val, ELF_STRING_LENGTH); ret[ELF_STRING_LENGTH - 1] = '\0'; break; } } return ret; } static size_t get_relocs_num(ELFOBJ *bin) { size_t i, size, ret = 0; /* we need to be careful here, in malformed files the section size might * not be a multiple of a Rel/Rela size; round up so we allocate enough * space. */ #define NUMENTRIES_ROUNDUP(sectionsize, entrysize) (((sectionsize)+(entrysize)-1)/(entrysize)) if (!bin->g_sections) { return 0; } size = bin->is_rela == DT_REL ? sizeof (Elf_(Rel)) : sizeof (Elf_(Rela)); for (i = 0; !bin->g_sections[i].last; i++) { if (!strncmp (bin->g_sections[i].name, ".rela.", strlen (".rela."))) { if (!bin->is_rela) { size = sizeof (Elf_(Rela)); } ret += NUMENTRIES_ROUNDUP (bin->g_sections[i].size, size); } else if (!strncmp (bin->g_sections[i].name, ".rel.", strlen (".rel."))){ if (!bin->is_rela) { size = sizeof (Elf_(Rel)); } ret += NUMENTRIES_ROUNDUP (bin->g_sections[i].size, size); } } return ret; #undef NUMENTRIES_ROUNDUP } static int read_reloc(ELFOBJ *bin, RBinElfReloc *r, int is_rela, ut64 offset) { ut8 *buf = bin->b->buf; int j = 0; if (offset + sizeof (Elf_ (Rela)) > bin->size || offset + sizeof (Elf_(Rela)) < offset) { return -1; } if (is_rela == DT_RELA) { Elf_(Rela) rela; #if R_BIN_ELF64 rela.r_offset = READ64 (buf + offset, j) rela.r_info = READ64 (buf + offset, j) rela.r_addend = READ64 (buf + offset, j) #else rela.r_offset = READ32 (buf + offset, j) rela.r_info = READ32 (buf + offset, j) rela.r_addend = READ32 (buf + offset, j) #endif r->is_rela = is_rela; r->offset = rela.r_offset; r->type = ELF_R_TYPE (rela.r_info); r->sym = ELF_R_SYM (rela.r_info); r->last = 0; r->addend = rela.r_addend; return sizeof (Elf_(Rela)); } else { Elf_(Rel) rel; #if R_BIN_ELF64 rel.r_offset = READ64 (buf + offset, j) rel.r_info = READ64 (buf + offset, j) #else rel.r_offset = READ32 (buf + offset, j) rel.r_info = READ32 (buf + offset, j) #endif r->is_rela = is_rela; r->offset = rel.r_offset; r->type = ELF_R_TYPE (rel.r_info); r->sym = ELF_R_SYM (rel.r_info); r->last = 0; return sizeof (Elf_(Rel)); } } RBinElfReloc* Elf_(r_bin_elf_get_relocs)(ELFOBJ *bin) { int res, rel, rela, i, j; size_t reloc_num = 0; RBinElfReloc *ret = NULL; if (!bin || !bin->g_sections) { return NULL; } reloc_num = get_relocs_num (bin); if (!reloc_num) { return NULL; } bin->reloc_num = reloc_num; ret = (RBinElfReloc*)calloc ((size_t)reloc_num + 1, sizeof(RBinElfReloc)); if (!ret) { return NULL; } #if DEAD_CODE ut64 section_text_offset = Elf_(r_bin_elf_get_section_offset) (bin, ".text"); if (section_text_offset == -1) { section_text_offset = 0; } #endif for (i = 0, rel = 0; !bin->g_sections[i].last && rel < reloc_num ; i++) { bool is_rela = 0 == strncmp (bin->g_sections[i].name, ".rela.", strlen (".rela.")); bool is_rel = 0 == strncmp (bin->g_sections[i].name, ".rel.", strlen (".rel.")); if (!is_rela && !is_rel) { continue; } for (j = 0; j < bin->g_sections[i].size; j += res) { if (bin->g_sections[i].size > bin->size) { break; } if (bin->g_sections[i].offset > bin->size) { break; } if (rel >= reloc_num) { bprintf ("Internal error: ELF relocation buffer too small," "please file a bug report."); break; } if (!bin->is_rela) { rela = is_rela? DT_RELA : DT_REL; } else { rela = bin->is_rela; } res = read_reloc (bin, &ret[rel], rela, bin->g_sections[i].offset + j); if (j + res > bin->g_sections[i].size) { bprintf ("Warning: malformed file, relocation entry #%u is partially beyond the end of section %u.\n", rel, i); } if (bin->ehdr.e_type == ET_REL) { if (bin->g_sections[i].info < bin->ehdr.e_shnum && bin->shdr) { ret[rel].rva = bin->shdr[bin->g_sections[i].info].sh_offset + ret[rel].offset; ret[rel].rva = Elf_(r_bin_elf_p2v) (bin, ret[rel].rva); } else { ret[rel].rva = ret[rel].offset; } } else { ret[rel].rva = ret[rel].offset; ret[rel].offset = Elf_(r_bin_elf_v2p) (bin, ret[rel].offset); } ret[rel].last = 0; if (res < 0) { break; } rel++; } } ret[reloc_num].last = 1; return ret; } RBinElfLib* Elf_(r_bin_elf_get_libs)(ELFOBJ *bin) { RBinElfLib *ret = NULL; int j, k; if (!bin || !bin->phdr || !bin->dyn_buf || !bin->strtab || *(bin->strtab+1) == '0') { return NULL; } for (j = 0, k = 0; j < bin->dyn_entries; j++) if (bin->dyn_buf[j].d_tag == DT_NEEDED) { RBinElfLib *r = realloc (ret, (k + 1) * sizeof (RBinElfLib)); if (!r) { perror ("realloc (libs)"); free (ret); return NULL; } ret = r; if (bin->dyn_buf[j].d_un.d_val > bin->strtab_size) { free (ret); return NULL; } strncpy (ret[k].name, bin->strtab + bin->dyn_buf[j].d_un.d_val, ELF_STRING_LENGTH); ret[k].name[ELF_STRING_LENGTH - 1] = '\0'; ret[k].last = 0; if (ret[k].name[0]) { k++; } } RBinElfLib *r = realloc (ret, (k + 1) * sizeof (RBinElfLib)); if (!r) { perror ("realloc (libs)"); free (ret); return NULL; } ret = r; ret[k].last = 1; return ret; } static RBinElfSection* get_sections_from_phdr(ELFOBJ *bin) { RBinElfSection *ret; int i, num_sections = 0; ut64 reldyn = 0, relava = 0, pltgotva = 0, relva = 0; ut64 reldynsz = 0, relasz = 0, pltgotsz = 0; if (!bin || !bin->phdr || !bin->ehdr.e_phnum) return NULL; for (i = 0; i < bin->dyn_entries; i++) { switch (bin->dyn_buf[i].d_tag) { case DT_REL: reldyn = bin->dyn_buf[i].d_un.d_ptr; num_sections++; break; case DT_RELA: relva = bin->dyn_buf[i].d_un.d_ptr; num_sections++; break; case DT_RELSZ: reldynsz = bin->dyn_buf[i].d_un.d_val; break; case DT_RELASZ: relasz = bin->dyn_buf[i].d_un.d_val; break; case DT_PLTGOT: pltgotva = bin->dyn_buf[i].d_un.d_ptr; num_sections++; break; case DT_PLTRELSZ: pltgotsz = bin->dyn_buf[i].d_un.d_val; break; case DT_JMPREL: relava = bin->dyn_buf[i].d_un.d_ptr; num_sections++; break; default: break; } } ret = calloc (num_sections + 1, sizeof(RBinElfSection)); if (!ret) { return NULL; } i = 0; if (reldyn) { ret[i].offset = Elf_(r_bin_elf_v2p) (bin, reldyn); ret[i].rva = reldyn; ret[i].size = reldynsz; strcpy (ret[i].name, ".rel.dyn"); ret[i].last = 0; i++; } if (relava) { ret[i].offset = Elf_(r_bin_elf_v2p) (bin, relava); ret[i].rva = relava; ret[i].size = pltgotsz; strcpy (ret[i].name, ".rela.plt"); ret[i].last = 0; i++; } if (relva) { ret[i].offset = Elf_(r_bin_elf_v2p) (bin, relva); ret[i].rva = relva; ret[i].size = relasz; strcpy (ret[i].name, ".rel.plt"); ret[i].last = 0; i++; } if (pltgotva) { ret[i].offset = Elf_(r_bin_elf_v2p) (bin, pltgotva); ret[i].rva = pltgotva; ret[i].size = pltgotsz; strcpy (ret[i].name, ".got.plt"); ret[i].last = 0; i++; } ret[i].last = 1; return ret; } RBinElfSection* Elf_(r_bin_elf_get_sections)(ELFOBJ *bin) { RBinElfSection *ret = NULL; char unknown_s[20], invalid_s[20]; int i, nidx, unknown_c=0, invalid_c=0; if (!bin) { return NULL; } if (bin->g_sections) { return bin->g_sections; } if (!bin->shdr) { //we don't give up search in phdr section return get_sections_from_phdr (bin); } if (!(ret = calloc ((bin->ehdr.e_shnum + 1), sizeof (RBinElfSection)))) { return NULL; } for (i = 0; i < bin->ehdr.e_shnum; i++) { ret[i].offset = bin->shdr[i].sh_offset; ret[i].size = bin->shdr[i].sh_size; ret[i].align = bin->shdr[i].sh_addralign; ret[i].flags = bin->shdr[i].sh_flags; ret[i].link = bin->shdr[i].sh_link; ret[i].info = bin->shdr[i].sh_info; ret[i].type = bin->shdr[i].sh_type; if (bin->ehdr.e_type == ET_REL) { ret[i].rva = bin->baddr + bin->shdr[i].sh_offset; } else { ret[i].rva = bin->shdr[i].sh_addr; } nidx = bin->shdr[i].sh_name; #define SHNAME (int)bin->shdr[i].sh_name #define SHNLEN ELF_STRING_LENGTH - 4 #define SHSIZE (int)bin->shstrtab_size if (nidx < 0 || !bin->shstrtab_section || !bin->shstrtab_size || nidx > bin->shstrtab_size) { snprintf (invalid_s, sizeof (invalid_s) - 4, "invalid%d", invalid_c); strncpy (ret[i].name, invalid_s, SHNLEN); invalid_c++; } else { if (bin->shstrtab && (SHNAME > 0) && (SHNAME < SHSIZE)) { strncpy (ret[i].name, &bin->shstrtab[SHNAME], SHNLEN); } else { if (bin->shdr[i].sh_type == SHT_NULL) { //to follow the same behaviour as readelf strncpy (ret[i].name, "", sizeof (ret[i].name) - 4); } else { snprintf (unknown_s, sizeof (unknown_s)-4, "unknown%d", unknown_c); strncpy (ret[i].name, unknown_s, sizeof (ret[i].name)-4); unknown_c++; } } } ret[i].name[ELF_STRING_LENGTH-2] = '\0'; ret[i].last = 0; } ret[i].last = 1; return ret; } static void fill_symbol_bind_and_type (struct r_bin_elf_symbol_t *ret, Elf_(Sym) *sym) { #define s_bind(x) ret->bind = x #define s_type(x) ret->type = x switch (ELF_ST_BIND(sym->st_info)) { case STB_LOCAL: s_bind ("LOCAL"); break; case STB_GLOBAL: s_bind ("GLOBAL"); break; case STB_WEAK: s_bind ("WEAK"); break; case STB_NUM: s_bind ("NUM"); break; case STB_LOOS: s_bind ("LOOS"); break; case STB_HIOS: s_bind ("HIOS"); break; case STB_LOPROC: s_bind ("LOPROC"); break; case STB_HIPROC: s_bind ("HIPROC"); break; default: s_bind ("UNKNOWN"); } switch (ELF_ST_TYPE (sym->st_info)) { case STT_NOTYPE: s_type ("NOTYPE"); break; case STT_OBJECT: s_type ("OBJECT"); break; case STT_FUNC: s_type ("FUNC"); break; case STT_SECTION: s_type ("SECTION"); break; case STT_FILE: s_type ("FILE"); break; case STT_COMMON: s_type ("COMMON"); break; case STT_TLS: s_type ("TLS"); break; case STT_NUM: s_type ("NUM"); break; case STT_LOOS: s_type ("LOOS"); break; case STT_HIOS: s_type ("HIOS"); break; case STT_LOPROC: s_type ("LOPROC"); break; case STT_HIPROC: s_type ("HIPROC"); break; default: s_type ("UNKNOWN"); } } static RBinElfSymbol* get_symbols_from_phdr(ELFOBJ *bin, int type) { Elf_(Sym) *sym = NULL; Elf_(Addr) addr_sym_table = 0; ut8 s[sizeof (Elf_(Sym))] = {0}; RBinElfSymbol *ret = NULL; int i, j, r, tsize, nsym, ret_ctr; ut64 toffset = 0, tmp_offset; ut32 size, sym_size = 0; if (!bin || !bin->phdr || !bin->ehdr.e_phnum) { return NULL; } for (j = 0; j < bin->dyn_entries; j++) { switch (bin->dyn_buf[j].d_tag) { case (DT_SYMTAB): addr_sym_table = Elf_(r_bin_elf_v2p) (bin, bin->dyn_buf[j].d_un.d_ptr); break; case (DT_SYMENT): sym_size = bin->dyn_buf[j].d_un.d_val; break; default: break; } } if (!addr_sym_table) { return NULL; } if (!sym_size) { return NULL; } //since ELF doesn't specify the symbol table size we may read until the end of the buffer nsym = (bin->size - addr_sym_table) / sym_size; if (!UT32_MUL (&size, nsym, sizeof (Elf_ (Sym)))) { goto beach; } if (size < 1) { goto beach; } if (addr_sym_table > bin->size || addr_sym_table + size > bin->size) { goto beach; } if (nsym < 1) { return NULL; } // we reserve room for 4096 and grow as needed. size_t capacity1 = 4096; size_t capacity2 = 4096; sym = (Elf_(Sym)*) calloc (capacity1, sym_size); ret = (RBinElfSymbol *) calloc (capacity2, sizeof (struct r_bin_elf_symbol_t)); if (!sym || !ret) { goto beach; } for (i = 1, ret_ctr = 0; i < nsym; i++) { if (i >= capacity1) { // maybe grow // You take what you want, but you eat what you take. Elf_(Sym)* temp_sym = (Elf_(Sym)*) realloc(sym, (capacity1 * GROWTH_FACTOR) * sym_size); if (!temp_sym) { goto beach; } sym = temp_sym; capacity1 *= GROWTH_FACTOR; } if (ret_ctr >= capacity2) { // maybe grow RBinElfSymbol *temp_ret = realloc (ret, capacity2 * GROWTH_FACTOR * sizeof (struct r_bin_elf_symbol_t)); if (!temp_ret) { goto beach; } ret = temp_ret; capacity2 *= GROWTH_FACTOR; } // read in one entry r = r_buf_read_at (bin->b, addr_sym_table + i * sizeof (Elf_ (Sym)), s, sizeof (Elf_ (Sym))); if (r < 1) { goto beach; } int j = 0; #if R_BIN_ELF64 sym[i].st_name = READ32 (s, j); sym[i].st_info = READ8 (s, j); sym[i].st_other = READ8 (s, j); sym[i].st_shndx = READ16 (s, j); sym[i].st_value = READ64 (s, j); sym[i].st_size = READ64 (s, j); #else sym[i].st_name = READ32 (s, j); sym[i].st_value = READ32 (s, j); sym[i].st_size = READ32 (s, j); sym[i].st_info = READ8 (s, j); sym[i].st_other = READ8 (s, j); sym[i].st_shndx = READ16 (s, j); #endif // zero symbol is always empty // Examine entry and maybe store if (type == R_BIN_ELF_IMPORTS && sym[i].st_shndx == STN_UNDEF) { if (sym[i].st_value) { toffset = sym[i].st_value; } else if ((toffset = get_import_addr (bin, i)) == -1){ toffset = 0; } tsize = 16; } else if (type == R_BIN_ELF_SYMBOLS && sym[i].st_shndx != STN_UNDEF && ELF_ST_TYPE (sym[i].st_info) != STT_SECTION && ELF_ST_TYPE (sym[i].st_info) != STT_FILE) { tsize = sym[i].st_size; toffset = (ut64) sym[i].st_value; } else { continue; } tmp_offset = Elf_(r_bin_elf_v2p) (bin, toffset); if (tmp_offset > bin->size) { goto done; } if (sym[i].st_name + 2 > bin->strtab_size) { // Since we are reading beyond the symbol table what's happening // is that some entry is trying to dereference the strtab beyond its capacity // is not a symbol so is the end goto done; } ret[ret_ctr].offset = tmp_offset; ret[ret_ctr].size = tsize; { int rest = ELF_STRING_LENGTH - 1; int st_name = sym[i].st_name; int maxsize = R_MIN (bin->size, bin->strtab_size); if (st_name < 0 || st_name >= maxsize) { ret[ret_ctr].name[0] = 0; } else { const int len = __strnlen (bin->strtab + st_name, rest); memcpy (ret[ret_ctr].name, &bin->strtab[st_name], len); } } ret[ret_ctr].ordinal = i; ret[ret_ctr].in_shdr = false; ret[ret_ctr].name[ELF_STRING_LENGTH - 2] = '\0'; fill_symbol_bind_and_type (&ret[ret_ctr], &sym[i]); ret[ret_ctr].last = 0; ret_ctr++; } done: ret[ret_ctr].last = 1; // Size everything down to only what is used { nsym = i > 0 ? i : 1; Elf_ (Sym) * temp_sym = (Elf_ (Sym)*) realloc (sym, (nsym * GROWTH_FACTOR) * sym_size); if (!temp_sym) { goto beach; } sym = temp_sym; } { ret_ctr = ret_ctr > 0 ? ret_ctr : 1; RBinElfSymbol *p = (RBinElfSymbol *) realloc (ret, (ret_ctr + 1) * sizeof (RBinElfSymbol)); if (!p) { goto beach; } ret = p; } if (type == R_BIN_ELF_IMPORTS && !bin->imports_by_ord_size) { bin->imports_by_ord_size = ret_ctr + 1; if (ret_ctr > 0) { bin->imports_by_ord = (RBinImport * *) calloc (ret_ctr + 1, sizeof (RBinImport*)); } else { bin->imports_by_ord = NULL; } } else if (type == R_BIN_ELF_SYMBOLS && !bin->symbols_by_ord_size && ret_ctr) { bin->symbols_by_ord_size = ret_ctr + 1; if (ret_ctr > 0) { bin->symbols_by_ord = (RBinSymbol * *) calloc (ret_ctr + 1, sizeof (RBinSymbol*)); }else { bin->symbols_by_ord = NULL; } } free (sym); return ret; beach: free (sym); free (ret); return NULL; } static RBinElfSymbol *Elf_(r_bin_elf_get_phdr_symbols)(ELFOBJ *bin) { if (!bin) { return NULL; } if (bin->phdr_symbols) { return bin->phdr_symbols; } bin->phdr_symbols = get_symbols_from_phdr (bin, R_BIN_ELF_SYMBOLS); return bin->phdr_symbols; } static RBinElfSymbol *Elf_(r_bin_elf_get_phdr_imports)(ELFOBJ *bin) { if (!bin) { return NULL; } if (bin->phdr_imports) { return bin->phdr_imports; } bin->phdr_imports = get_symbols_from_phdr (bin, R_BIN_ELF_IMPORTS); return bin->phdr_imports; } static int Elf_(fix_symbols)(ELFOBJ *bin, int nsym, int type, RBinElfSymbol **sym) { int count = 0; RBinElfSymbol *ret = *sym; RBinElfSymbol *phdr_symbols = (type == R_BIN_ELF_SYMBOLS) ? Elf_(r_bin_elf_get_phdr_symbols) (bin) : Elf_(r_bin_elf_get_phdr_imports) (bin); RBinElfSymbol *tmp, *p; if (phdr_symbols) { RBinElfSymbol *d = ret; while (!d->last) { /* find match in phdr */ p = phdr_symbols; while (!p->last) { if (p->offset && d->offset == p->offset) { p->in_shdr = true; if (*p->name && strcmp (d->name, p->name)) { strcpy (d->name, p->name); } } p++; } d++; } p = phdr_symbols; while (!p->last) { if (!p->in_shdr) { count++; } p++; } /*Take those symbols that are not present in the shdr but yes in phdr*/ /*This should only should happen with fucked up binaries*/ if (count > 0) { /*what happens if a shdr says it has only one symbol? we should look anyway into phdr*/ tmp = (RBinElfSymbol*)realloc (ret, (nsym + count + 1) * sizeof (RBinElfSymbol)); if (!tmp) { return -1; } ret = tmp; ret[nsym--].last = 0; p = phdr_symbols; while (!p->last) { if (!p->in_shdr) { memcpy (&ret[++nsym], p, sizeof (RBinElfSymbol)); } p++; } ret[nsym + 1].last = 1; } *sym = ret; return nsym + 1; } return nsym; } static RBinElfSymbol* Elf_(_r_bin_elf_get_symbols_imports)(ELFOBJ *bin, int type) { ut32 shdr_size; int tsize, nsym, ret_ctr = 0, i, j, r, k, newsize; ut64 toffset; ut32 size = 0; RBinElfSymbol *ret = NULL; Elf_(Shdr) *strtab_section = NULL; Elf_(Sym) *sym = NULL; ut8 s[sizeof (Elf_(Sym))] = { 0 }; char *strtab = NULL; if (!bin || !bin->shdr || !bin->ehdr.e_shnum || bin->ehdr.e_shnum == 0xffff) { return (type == R_BIN_ELF_SYMBOLS) ? Elf_(r_bin_elf_get_phdr_symbols) (bin) : Elf_(r_bin_elf_get_phdr_imports) (bin); } if (!UT32_MUL (&shdr_size, bin->ehdr.e_shnum, sizeof (Elf_(Shdr)))) { return false; } if (shdr_size + 8 > bin->size) { return false; } for (i = 0; i < bin->ehdr.e_shnum; i++) { if ((type == R_BIN_ELF_IMPORTS && bin->shdr[i].sh_type == (bin->ehdr.e_type == ET_REL ? SHT_SYMTAB : SHT_DYNSYM)) || (type == R_BIN_ELF_SYMBOLS && bin->shdr[i].sh_type == (Elf_(r_bin_elf_get_stripped) (bin) ? SHT_DYNSYM : SHT_SYMTAB))) { if (bin->shdr[i].sh_link < 1) { /* oops. fix out of range pointers */ continue; } // hack to avoid asan cry if ((bin->shdr[i].sh_link * sizeof(Elf_(Shdr))) >= shdr_size) { /* oops. fix out of range pointers */ continue; } strtab_section = &bin->shdr[bin->shdr[i].sh_link]; if (strtab_section->sh_size > ST32_MAX || strtab_section->sh_size+8 > bin->size) { bprintf ("size (syms strtab)"); free (ret); free (strtab); return NULL; } if (!strtab) { if (!(strtab = (char *)calloc (1, 8 + strtab_section->sh_size))) { bprintf ("malloc (syms strtab)"); goto beach; } if (strtab_section->sh_offset > bin->size || strtab_section->sh_offset + strtab_section->sh_size > bin->size) { goto beach; } if (r_buf_read_at (bin->b, strtab_section->sh_offset, (ut8*)strtab, strtab_section->sh_size) == -1) { bprintf ("Warning: read (syms strtab)\n"); goto beach; } } newsize = 1 + bin->shdr[i].sh_size; if (newsize < 0 || newsize > bin->size) { bprintf ("invalid shdr %d size\n", i); goto beach; } nsym = (int)(bin->shdr[i].sh_size / sizeof (Elf_(Sym))); if (nsym < 0) { goto beach; } if (!(sym = (Elf_(Sym) *)calloc (nsym, sizeof (Elf_(Sym))))) { bprintf ("calloc (syms)"); goto beach; } if (!UT32_MUL (&size, nsym, sizeof (Elf_(Sym)))) { goto beach; } if (size < 1 || size > bin->size) { goto beach; } if (bin->shdr[i].sh_offset > bin->size) { goto beach; } if (bin->shdr[i].sh_offset + size > bin->size) { goto beach; } for (j = 0; j < nsym; j++) { int k = 0; r = r_buf_read_at (bin->b, bin->shdr[i].sh_offset + j * sizeof (Elf_(Sym)), s, sizeof (Elf_(Sym))); if (r < 1) { bprintf ("Warning: read (sym)\n"); goto beach; } #if R_BIN_ELF64 sym[j].st_name = READ32 (s, k) sym[j].st_info = READ8 (s, k) sym[j].st_other = READ8 (s, k) sym[j].st_shndx = READ16 (s, k) sym[j].st_value = READ64 (s, k) sym[j].st_size = READ64 (s, k) #else sym[j].st_name = READ32 (s, k) sym[j].st_value = READ32 (s, k) sym[j].st_size = READ32 (s, k) sym[j].st_info = READ8 (s, k) sym[j].st_other = READ8 (s, k) sym[j].st_shndx = READ16 (s, k) #endif } free (ret); ret = calloc (nsym, sizeof (RBinElfSymbol)); if (!ret) { bprintf ("Cannot allocate %d symbols\n", nsym); goto beach; } for (k = 1, ret_ctr = 0; k < nsym; k++) { if (type == R_BIN_ELF_IMPORTS && sym[k].st_shndx == STN_UNDEF) { if (sym[k].st_value) { toffset = sym[k].st_value; } else if ((toffset = get_import_addr (bin, k)) == -1){ toffset = 0; } tsize = 16; } else if (type == R_BIN_ELF_SYMBOLS && sym[k].st_shndx != STN_UNDEF && ELF_ST_TYPE (sym[k].st_info) != STT_SECTION && ELF_ST_TYPE (sym[k].st_info) != STT_FILE) { //int idx = sym[k].st_shndx; tsize = sym[k].st_size; toffset = (ut64)sym[k].st_value; } else { continue; } if (bin->ehdr.e_type == ET_REL) { if (sym[k].st_shndx < bin->ehdr.e_shnum) ret[ret_ctr].offset = sym[k].st_value + bin->shdr[sym[k].st_shndx].sh_offset; } else { ret[ret_ctr].offset = Elf_(r_bin_elf_v2p) (bin, toffset); } ret[ret_ctr].size = tsize; if (sym[k].st_name + 2 > strtab_section->sh_size) { bprintf ("Warning: index out of strtab range\n"); goto beach; } { int rest = ELF_STRING_LENGTH - 1; int st_name = sym[k].st_name; int maxsize = R_MIN (bin->b->length, strtab_section->sh_size); if (st_name < 0 || st_name >= maxsize) { ret[ret_ctr].name[0] = 0; } else { const size_t len = __strnlen (strtab + sym[k].st_name, rest); memcpy (ret[ret_ctr].name, &strtab[sym[k].st_name], len); } } ret[ret_ctr].ordinal = k; ret[ret_ctr].name[ELF_STRING_LENGTH - 2] = '\0'; fill_symbol_bind_and_type (&ret[ret_ctr], &sym[k]); ret[ret_ctr].last = 0; ret_ctr++; } ret[ret_ctr].last = 1; // ugly dirty hack :D R_FREE (strtab); R_FREE (sym); } } if (!ret) { return (type == R_BIN_ELF_SYMBOLS) ? Elf_(r_bin_elf_get_phdr_symbols) (bin) : Elf_(r_bin_elf_get_phdr_imports) (bin); } int max = -1; RBinElfSymbol *aux = NULL; nsym = Elf_(fix_symbols) (bin, ret_ctr, type, &ret); if (nsym == -1) { goto beach; } aux = ret; while (!aux->last) { if ((int)aux->ordinal > max) { max = aux->ordinal; } aux++; } nsym = max; if (type == R_BIN_ELF_IMPORTS) { R_FREE (bin->imports_by_ord); bin->imports_by_ord_size = nsym + 1; bin->imports_by_ord = (RBinImport**)calloc (R_MAX (1, nsym + 1), sizeof (RBinImport*)); } else if (type == R_BIN_ELF_SYMBOLS) { R_FREE (bin->symbols_by_ord); bin->symbols_by_ord_size = nsym + 1; bin->symbols_by_ord = (RBinSymbol**)calloc (R_MAX (1, nsym + 1), sizeof (RBinSymbol*)); } return ret; beach: free (ret); free (sym); free (strtab); return NULL; } RBinElfSymbol *Elf_(r_bin_elf_get_symbols)(ELFOBJ *bin) { if (!bin->g_symbols) { bin->g_symbols = Elf_(_r_bin_elf_get_symbols_imports) (bin, R_BIN_ELF_SYMBOLS); } return bin->g_symbols; } RBinElfSymbol *Elf_(r_bin_elf_get_imports)(ELFOBJ *bin) { if (!bin->g_imports) { bin->g_imports = Elf_(_r_bin_elf_get_symbols_imports) (bin, R_BIN_ELF_IMPORTS); } return bin->g_imports; } RBinElfField* Elf_(r_bin_elf_get_fields)(ELFOBJ *bin) { RBinElfField *ret = NULL; int i = 0, j; if (!bin || !(ret = calloc ((bin->ehdr.e_phnum + 3 + 1), sizeof (RBinElfField)))) { return NULL; } strncpy (ret[i].name, "ehdr", ELF_STRING_LENGTH); ret[i].offset = 0; ret[i++].last = 0; strncpy (ret[i].name, "shoff", ELF_STRING_LENGTH); ret[i].offset = bin->ehdr.e_shoff; ret[i++].last = 0; strncpy (ret[i].name, "phoff", ELF_STRING_LENGTH); ret[i].offset = bin->ehdr.e_phoff; ret[i++].last = 0; for (j = 0; bin->phdr && j < bin->ehdr.e_phnum; i++, j++) { snprintf (ret[i].name, ELF_STRING_LENGTH, "phdr_%i", j); ret[i].offset = bin->phdr[j].p_offset; ret[i].last = 0; } ret[i].last = 1; return ret; } void* Elf_(r_bin_elf_free)(ELFOBJ* bin) { int i; if (!bin) { return NULL; } free (bin->phdr); free (bin->shdr); free (bin->strtab); free (bin->dyn_buf); free (bin->shstrtab); free (bin->dynstr); //free (bin->strtab_section); if (bin->imports_by_ord) { for (i = 0; i<bin->imports_by_ord_size; i++) { free (bin->imports_by_ord[i]); } free (bin->imports_by_ord); } if (bin->symbols_by_ord) { for (i = 0; i<bin->symbols_by_ord_size; i++) { free (bin->symbols_by_ord[i]); } free (bin->symbols_by_ord); } r_buf_free (bin->b); if (bin->g_symbols != bin->phdr_symbols) { R_FREE (bin->phdr_symbols); } if (bin->g_imports != bin->phdr_imports) { R_FREE (bin->phdr_imports); } R_FREE (bin->g_sections); R_FREE (bin->g_symbols); R_FREE (bin->g_imports); free (bin); return NULL; } ELFOBJ* Elf_(r_bin_elf_new)(const char* file, bool verbose) { ut8 *buf; int size; ELFOBJ *bin = R_NEW0 (ELFOBJ); if (!bin) { return NULL; } memset (bin, 0, sizeof (ELFOBJ)); bin->file = file; if (!(buf = (ut8*)r_file_slurp (file, &size))) { return Elf_(r_bin_elf_free) (bin); } bin->size = size; bin->verbose = verbose; bin->b = r_buf_new (); if (!r_buf_set_bytes (bin->b, buf, bin->size)) { free (buf); return Elf_(r_bin_elf_free) (bin); } if (!elf_init (bin)) { free (buf); return Elf_(r_bin_elf_free) (bin); } free (buf); return bin; } ELFOBJ* Elf_(r_bin_elf_new_buf)(RBuffer *buf, bool verbose) { ELFOBJ *bin = R_NEW0 (ELFOBJ); bin->kv = sdb_new0 (); bin->b = r_buf_new (); bin->size = (ut32)buf->length; bin->verbose = verbose; if (!r_buf_set_bytes (bin->b, buf->buf, buf->length)) { return Elf_(r_bin_elf_free) (bin); } if (!elf_init (bin)) { return Elf_(r_bin_elf_free) (bin); } return bin; } static int is_in_pphdr (Elf_(Phdr) *p, ut64 addr) { return addr >= p->p_offset && addr < p->p_offset + p->p_memsz; } static int is_in_vphdr (Elf_(Phdr) *p, ut64 addr) { return addr >= p->p_vaddr && addr < p->p_vaddr + p->p_memsz; } /* converts a physical address to the virtual address, looking * at the program headers in the binary bin */ ut64 Elf_(r_bin_elf_p2v) (ELFOBJ *bin, ut64 paddr) { int i; if (!bin) return 0; if (!bin->phdr) { if (bin->ehdr.e_type == ET_REL) { return bin->baddr + paddr; } return paddr; } for (i = 0; i < bin->ehdr.e_phnum; ++i) { Elf_(Phdr) *p = &bin->phdr[i]; if (!p) { break; } if (p->p_type == PT_LOAD && is_in_pphdr (p, paddr)) { if (!p->p_vaddr && !p->p_offset) { continue; } return p->p_vaddr + paddr - p->p_offset; } } return paddr; } /* converts a virtual address to the relative physical address, looking * at the program headers in the binary bin */ ut64 Elf_(r_bin_elf_v2p) (ELFOBJ *bin, ut64 vaddr) { int i; if (!bin) { return 0; } if (!bin->phdr) { if (bin->ehdr.e_type == ET_REL) { return vaddr - bin->baddr; } return vaddr; } for (i = 0; i < bin->ehdr.e_phnum; ++i) { Elf_(Phdr) *p = &bin->phdr[i]; if (!p) { break; } if (p->p_type == PT_LOAD && is_in_vphdr (p, vaddr)) { if (!p->p_offset && !p->p_vaddr) { continue; } return p->p_offset + vaddr - p->p_vaddr; } } return vaddr; }
static Sdb *store_versioninfo_gnu_verdef(ELFOBJ *bin, Elf_(Shdr) *shdr, int sz) { const char *section_name = ""; const char *link_section_name = ""; char *end = NULL; Elf_(Shdr) *link_shdr = NULL; ut8 dfs[sizeof (Elf_(Verdef))] = {0}; Sdb *sdb; int cnt, i; if (shdr->sh_link > bin->ehdr.e_shnum) { return false; } link_shdr = &bin->shdr[shdr->sh_link]; if (shdr->sh_size < 1) { return false; } Elf_(Verdef) *defs = calloc (shdr->sh_size, sizeof (char)); if (!defs) { return false; } if (bin->shstrtab && shdr->sh_name < bin->shstrtab_size) { section_name = &bin->shstrtab[shdr->sh_name]; } if (link_shdr && bin->shstrtab && link_shdr->sh_name < bin->shstrtab_size) { link_section_name = &bin->shstrtab[link_shdr->sh_name]; } if (!defs) { bprintf ("Warning: Cannot allocate memory (Check Elf_(Verdef))\n"); return NULL; } sdb = sdb_new0 (); end = (char *)defs + shdr->sh_size; sdb_set (sdb, "section_name", section_name, 0); sdb_num_set (sdb, "entries", shdr->sh_info, 0); sdb_num_set (sdb, "addr", shdr->sh_addr, 0); sdb_num_set (sdb, "offset", shdr->sh_offset, 0); sdb_num_set (sdb, "link", shdr->sh_link, 0); sdb_set (sdb, "link_section_name", link_section_name, 0); for (cnt = 0, i = 0; cnt < shdr->sh_info && ((char *)defs + i < end); ++cnt) { Sdb *sdb_verdef = sdb_new0 (); char *vstart = ((char*)defs) + i; char key[32] = {0}; Elf_(Verdef) *verdef = (Elf_(Verdef)*)vstart; Elf_(Verdaux) aux = {0}; int j = 0; int isum = 0; r_buf_read_at (bin->b, shdr->sh_offset + i, dfs, sizeof (Elf_(Verdef))); verdef->vd_version = READ16 (dfs, j) verdef->vd_flags = READ16 (dfs, j) verdef->vd_ndx = READ16 (dfs, j) verdef->vd_cnt = READ16 (dfs, j) verdef->vd_hash = READ32 (dfs, j) verdef->vd_aux = READ32 (dfs, j) verdef->vd_next = READ32 (dfs, j) vstart += verdef->vd_aux; if (vstart > end || vstart + sizeof (Elf_(Verdaux)) > end) { sdb_free (sdb_verdef); goto out_error; } j = 0; aux.vda_name = READ32 (vstart, j) aux.vda_next = READ32 (vstart, j) isum = i + verdef->vd_aux; if (aux.vda_name > bin->dynstr_size) { sdb_free (sdb_verdef); goto out_error; } sdb_num_set (sdb_verdef, "idx", i, 0); sdb_num_set (sdb_verdef, "vd_version", verdef->vd_version, 0); sdb_num_set (sdb_verdef, "vd_ndx", verdef->vd_ndx, 0); sdb_num_set (sdb_verdef, "vd_cnt", verdef->vd_cnt, 0); sdb_set (sdb_verdef, "vda_name", &bin->dynstr[aux.vda_name], 0); sdb_set (sdb_verdef, "flags", get_ver_flags (verdef->vd_flags), 0); for (j = 1; j < verdef->vd_cnt; ++j) { int k; Sdb *sdb_parent = sdb_new0 (); isum += aux.vda_next; vstart += aux.vda_next; if (vstart > end || vstart + sizeof(Elf_(Verdaux)) > end) { sdb_free (sdb_verdef); sdb_free (sdb_parent); goto out_error; } k = 0; aux.vda_name = READ32 (vstart, k) aux.vda_next = READ32 (vstart, k) if (aux.vda_name > bin->dynstr_size) { sdb_free (sdb_verdef); sdb_free (sdb_parent); goto out_error; } sdb_num_set (sdb_parent, "idx", isum, 0); sdb_num_set (sdb_parent, "parent", j, 0); sdb_set (sdb_parent, "vda_name", &bin->dynstr[aux.vda_name], 0); snprintf (key, sizeof (key), "parent%d", j - 1); sdb_ns_set (sdb_verdef, key, sdb_parent); } snprintf (key, sizeof (key), "verdef%d", cnt); sdb_ns_set (sdb, key, sdb_verdef); if (!verdef->vd_next) { sdb_free (sdb_verdef); goto out_error; } i += verdef->vd_next; } free (defs); return sdb; out_error: free (defs); sdb_free (sdb); return NULL; }
static Sdb *store_versioninfo_gnu_verdef(ELFOBJ *bin, Elf_(Shdr) *shdr, int sz) { const char *section_name = ""; const char *link_section_name = ""; char *end = NULL; Elf_(Shdr) *link_shdr = NULL; ut8 dfs[sizeof (Elf_(Verdef))] = {0}; Sdb *sdb; int cnt, i; if (shdr->sh_link > bin->ehdr.e_shnum) { return false; } link_shdr = &bin->shdr[shdr->sh_link]; if (shdr->sh_size < 1) { return false; } Elf_(Verdef) *defs = calloc (shdr->sh_size, sizeof (char)); if (!defs) { return false; } if (bin->shstrtab && shdr->sh_name < bin->shstrtab_size) { section_name = &bin->shstrtab[shdr->sh_name]; } if (link_shdr && bin->shstrtab && link_shdr->sh_name < bin->shstrtab_size) { link_section_name = &bin->shstrtab[link_shdr->sh_name]; } if (!defs) { bprintf ("Warning: Cannot allocate memory (Check Elf_(Verdef))\n"); return NULL; } sdb = sdb_new0 (); end = (char *)defs + shdr->sh_size; sdb_set (sdb, "section_name", section_name, 0); sdb_num_set (sdb, "entries", shdr->sh_info, 0); sdb_num_set (sdb, "addr", shdr->sh_addr, 0); sdb_num_set (sdb, "offset", shdr->sh_offset, 0); sdb_num_set (sdb, "link", shdr->sh_link, 0); sdb_set (sdb, "link_section_name", link_section_name, 0); for (cnt = 0, i = 0; i >= 0 && cnt < shdr->sh_info && ((char *)defs + i < end); ++cnt) { Sdb *sdb_verdef = sdb_new0 (); char *vstart = ((char*)defs) + i; char key[32] = {0}; Elf_(Verdef) *verdef = (Elf_(Verdef)*)vstart; Elf_(Verdaux) aux = {0}; int j = 0; int isum = 0; r_buf_read_at (bin->b, shdr->sh_offset + i, dfs, sizeof (Elf_(Verdef))); verdef->vd_version = READ16 (dfs, j) verdef->vd_flags = READ16 (dfs, j) verdef->vd_ndx = READ16 (dfs, j) verdef->vd_cnt = READ16 (dfs, j) verdef->vd_hash = READ32 (dfs, j) verdef->vd_aux = READ32 (dfs, j) verdef->vd_next = READ32 (dfs, j) vstart += verdef->vd_aux; if (vstart > end || vstart + sizeof (Elf_(Verdaux)) > end) { sdb_free (sdb_verdef); goto out_error; } j = 0; aux.vda_name = READ32 (vstart, j) aux.vda_next = READ32 (vstart, j) isum = i + verdef->vd_aux; if (aux.vda_name > bin->dynstr_size) { sdb_free (sdb_verdef); goto out_error; } sdb_num_set (sdb_verdef, "idx", i, 0); sdb_num_set (sdb_verdef, "vd_version", verdef->vd_version, 0); sdb_num_set (sdb_verdef, "vd_ndx", verdef->vd_ndx, 0); sdb_num_set (sdb_verdef, "vd_cnt", verdef->vd_cnt, 0); sdb_set (sdb_verdef, "vda_name", &bin->dynstr[aux.vda_name], 0); sdb_set (sdb_verdef, "flags", get_ver_flags (verdef->vd_flags), 0); for (j = 1; j < verdef->vd_cnt; ++j) { int k; Sdb *sdb_parent = sdb_new0 (); isum += aux.vda_next; vstart += aux.vda_next; if (vstart > end || vstart + sizeof(Elf_(Verdaux)) > end) { sdb_free (sdb_verdef); sdb_free (sdb_parent); goto out_error; } k = 0; aux.vda_name = READ32 (vstart, k) aux.vda_next = READ32 (vstart, k) if (aux.vda_name > bin->dynstr_size) { sdb_free (sdb_verdef); sdb_free (sdb_parent); goto out_error; } sdb_num_set (sdb_parent, "idx", isum, 0); sdb_num_set (sdb_parent, "parent", j, 0); sdb_set (sdb_parent, "vda_name", &bin->dynstr[aux.vda_name], 0); snprintf (key, sizeof (key), "parent%d", j - 1); sdb_ns_set (sdb_verdef, key, sdb_parent); } snprintf (key, sizeof (key), "verdef%d", cnt); sdb_ns_set (sdb, key, sdb_verdef); if (!verdef->vd_next) { sdb_free (sdb_verdef); goto out_error; } if ((st32)verdef->vd_next < 1) { eprintf ("Warning: Invalid vd_next in the ELF version\n"); break; } i += verdef->vd_next; } free (defs); return sdb; out_error: free (defs); sdb_free (sdb); return NULL; }
{'added': [(734, '\tfor (cnt = 0, i = 0; i >= 0 && cnt < shdr->sh_info && ((char *)defs + i < end); ++cnt) {'), (805, '\t\tif ((st32)verdef->vd_next < 1) {'), (806, '\t\t\teprintf ("Warning: Invalid vd_next in the ELF version\\n");'), (807, '\t\t\tbreak;'), (808, '\t\t}')], 'deleted': [(734, '\tfor (cnt = 0, i = 0; cnt < shdr->sh_info && ((char *)defs + i < end); ++cnt) {')]}
5
1
2,821
21,420
https://github.com/radare/radare2
CVE-2017-15385
['CWE-119']
diutil.cc
DU_getStringDOElement
/* * * Copyright (C) 1994-2021, OFFIS e.V. * All rights reserved. See COPYRIGHT file for details. * * This software and supporting documentation were partly developed by * * OFFIS e.V. * R&D Division Health * Escherweg 2 * D-26121 Oldenburg, Germany * * For further copyrights, see the following paragraphs. * */ /* ** Copyright (C) 1993/1994, OFFIS, Oldenburg University and CERIUM ** ** This software and supporting documentation were ** developed by ** ** Institut OFFIS ** Bereich Kommunikationssysteme ** Westerstr. 10-12 ** 26121 Oldenburg, Germany ** ** Fachbereich Informatik ** Abteilung Prozessinformatik ** Carl von Ossietzky Universitaet Oldenburg ** Ammerlaender Heerstr. 114-118 ** 26111 Oldenburg, Germany ** ** CERIUM ** Laboratoire SIM ** Faculte de Medecine ** 2 Avenue du Pr. Leon Bernard ** 35043 Rennes Cedex, France ** ** for CEN/TC251/WG4 as a contribution to the Radiological ** Society of North America (RSNA) 1993 Digital Imaging and ** Communications in Medicine (DICOM) Demonstration. ** ** THIS SOFTWARE IS MADE AVAILABLE, AS IS, AND NEITHER OFFIS, ** OLDENBURG UNIVERSITY NOR CERIUM MAKE ANY WARRANTY REGARDING ** THE SOFTWARE, ITS PERFORMANCE, ITS MERCHANTABILITY OR ** FITNESS FOR ANY PARTICULAR USE, FREEDOM FROM ANY COMPUTER ** DISEASES OR ITS CONFORMITY TO ANY SPECIFICATION. THE ** ENTIRE RISK AS TO QUALITY AND PERFORMANCE OF THE SOFTWARE ** IS WITH THE USER. ** ** Copyright of the software and supporting documentation ** is, unless otherwise stated, jointly owned by OFFIS, ** Oldenburg University and CERIUM and free access is hereby ** granted as a license to use this software, copy this ** software and prepare derivative works based upon this ** software. However, any distribution of this software ** source code or supporting documentation or derivative ** works (source code and supporting documentation) must ** include the three paragraphs of this copyright notice. ** */ /* ** ** Author: Andrew Hewett Created: 11-08-93 ** ** Module: diutil ** ** Purpose: ** This file contains the interface to ** some general useful dicom utility routines ** ** Module Prefix: DU_ */ #include "dcmtk/config/osconfig.h" /* make sure OS specific configuration is included first */ #ifdef HAVE_UNIX_H #if defined(macintosh) && defined (HAVE_WINSOCK_H) /* unix.h defines timeval incompatible with winsock.h */ #define timeval _UNWANTED_timeval #endif #include <unix.h> /* for unlink() under Metrowerks C++ (Macintosh) */ #undef timeval #endif #ifdef HAVE_SYS_TYPES_H #include <sys/types.h> #endif #ifdef HAVE_SYS_STAT_H #include <sys/stat.h> #endif #ifdef HAVE_STAT_H #include <stat.h> #endif #include "dcmtk/ofstd/ofstd.h" #include "dcmtk/dcmnet/diutil.h" #include "dcmtk/dcmdata/dcdatset.h" #include "dcmtk/dcmdata/dcfilefo.h" #include "dcmtk/dcmdata/dcmetinf.h" #include "dcmtk/dcmdata/dcdeftag.h" #include "dcmtk/dcmdata/dcuid.h" #include "dcmtk/dcmdata/dcbytstr.h" static char staticBuf[256]; OFLogger DCM_dcmnetLogger = OFLog::getLogger(DCMNET_LOGGER_NAME); #define TO_UCHAR(s) OFstatic_cast(unsigned char, (s)) void DU_stripTrailingSpaces(char *s) { int i, n; if (s) { n = OFstatic_cast(int, strlen(s)); for (i = n - 1; i >= 0 && isspace(TO_UCHAR(s[i])); i--) s[i] = '\0'; } } void DU_stripLeadingSpaces(char *s) { int i, j, n; if (s == NULL) return; n = OFstatic_cast(int, strlen(s)); if (n == 0) return; if (!isspace(TO_UCHAR(s[0]))) return; /* no leading space */ /* first non-space */ for (i=0; i<n && isspace(TO_UCHAR(s[i])); i++) /* do nothing, just iterate */ ; if (i<n) { /* found non-space, pull to front (inclusive '\0') */ for (j=i; j<=n; j++) { s[j-i] = s[j]; } } else { /* all spaces */ s[0] = '\0'; } } void DU_stripLeadingAndTrailingSpaces(char *s) { DU_stripLeadingSpaces(s); DU_stripTrailingSpaces(s); } #undef TO_UCHAR OFBool DU_getStringDOElement(DcmItem *obj, DcmTagKey t, char *s, size_t bufsize) { DcmByteString *elem; DcmStack stack; OFCondition ec = EC_Normal; char* aString; ec = obj->search(t, stack); elem = (DcmByteString*) stack.top(); if (ec == EC_Normal && elem != NULL) { if (elem->getLength() == 0) { s[0] = '\0'; } else { ec = elem->getString(aString); OFStandard::strlcpy(s, aString, bufsize); } } return (ec == EC_Normal); } OFBool DU_putStringDOElement(DcmItem *obj, DcmTagKey t, const char *s) { OFCondition ec = EC_Normal; DcmElement *e = NULL; DcmTag tag(t); ec = DcmItem::newDicomElement(e, tag); if (ec == EC_Normal && s != NULL) { ec = e->putString(s); } if (ec == EC_Normal) { ec = obj->insert(e, OFTrue); } return (ec == EC_Normal); } OFBool DU_getShortDOElement(DcmItem *obj, DcmTagKey t, Uint16 *us) { DcmElement *elem; DcmStack stack; OFCondition ec = EC_Normal; ec = obj->search(t, stack); elem = (DcmElement*) stack.top(); if (ec == EC_Normal && elem != NULL) { ec = elem->getUint16(*us, 0); } return (ec == EC_Normal); } OFBool DU_putShortDOElement(DcmItem *obj, DcmTagKey t, Uint16 us) { OFCondition ec = EC_Normal; DcmElement *e = NULL; DcmTag tag(t); ec = DcmItem::newDicomElement(e, tag); if (ec == EC_Normal) { ec = e->putUint16(us); } if (ec == EC_Normal) { ec = obj->insert(e, OFTrue); } return (ec == EC_Normal); } OFBool DU_findSOPClassAndInstanceInDataSet( DcmItem *obj, char* sopClass, size_t sopClassSize, char* sopInstance, size_t sopInstanceSize, OFBool tolerateSpacePaddedUIDs) { OFBool result = (DU_getStringDOElement(obj, DCM_SOPClassUID, sopClass, sopClassSize) && DU_getStringDOElement(obj, DCM_SOPInstanceUID, sopInstance, sopInstanceSize)); if (tolerateSpacePaddedUIDs) { /* gracefully correct space-padded UID strings */ int slength; if ((0 < (slength=OFstatic_cast(int, strlen(sopClass))))&&(sopClass[slength-1]==' ')) sopClass[slength-1]=0; if ((0 < (slength=OFstatic_cast(int, strlen(sopInstance))))&&(sopInstance[slength-1]==' ')) sopInstance[slength-1]=0; } return result; } OFBool DU_findSOPClassAndInstanceInFile( const char *fname, char* sopClass, size_t sopClassSize, char* sopInstance, size_t sopInstanceSize, OFBool tolerateSpacePaddedUIDs) { DcmFileFormat ff; if (! ff.loadFile(fname, EXS_Unknown, EGL_noChange).good()) return OFFalse; /* look in the meta-header first */ OFBool found = DU_findSOPClassAndInstanceInDataSet( ff.getMetaInfo(), sopClass, sopClassSize, sopInstance, sopInstanceSize, tolerateSpacePaddedUIDs); if (!found) { found = DU_findSOPClassAndInstanceInDataSet( ff.getDataset(), sopClass, sopClassSize, sopInstance, sopInstanceSize, tolerateSpacePaddedUIDs); } return found; } const char * DU_cechoStatusString(Uint16 statusCode) { const char *s = NULL; if (statusCode == STATUS_Success) s = "Success"; else { sprintf(staticBuf, "Unknown Status: 0x%x", (unsigned int)statusCode); s = staticBuf; } return s; } const char * DU_cstoreStatusString(Uint16 statusCode) { const char *s = NULL; switch (statusCode) { case STATUS_Success: s = "Success"; break; case STATUS_STORE_Refused_SOPClassNotSupported: s = "Refused: SOPClassNotSupported"; break; case STATUS_STORE_Warning_CoercionOfDataElements: s = "Warning: CoercionOfDataElements"; break; case STATUS_STORE_Warning_DataSetDoesNotMatchSOPClass: s = "Warning: DataSetDoesNotMatchSOPClass"; break; case STATUS_STORE_Warning_ElementsDiscarded: s = "Warning: ElementsDiscarded"; break; } if (s) return s; switch (statusCode & 0xff00) { /* high byte significant */ case STATUS_STORE_Refused_OutOfResources: /* high byte */ s = "Refused: OutOfResources"; break; case STATUS_STORE_Error_DataSetDoesNotMatchSOPClass: /* high byte */ s = "Error: DataSetDoesNotMatchSOPClass"; break; } if (s) return s; switch (statusCode & 0xf000) { /* high nibble significant */ case STATUS_STORE_Error_CannotUnderstand: /* high nibble */ s = "Error: CannotUnderstand"; break; } if (s == NULL) { sprintf(staticBuf, "Unknown Status: 0x%x", (unsigned int)statusCode); s = staticBuf; } return s; } const char * DU_cfindStatusString(Uint16 statusCode) { const char *s = NULL; switch (statusCode) { case STATUS_Success: s = "Success"; break; case STATUS_Pending: s = "Pending"; break; case STATUS_FIND_Refused_OutOfResources: s = "Refused: OutOfResources"; break; case STATUS_FIND_Refused_SOPClassNotSupported: s = "Refused: SOPClassNotSupported"; break; case STATUS_FIND_Failed_IdentifierDoesNotMatchSOPClass: s = "Failed: IdentifierDoesNotMatchSOPClass"; break; case STATUS_FIND_Cancel_MatchingTerminatedDueToCancelRequest: s = "Cancel: MatchingTerminatedDueToCancelRequest"; break; case STATUS_FIND_Pending_WarningUnsupportedOptionalKeys: s = "Pending: WarningUnsupportedOptionalKeys"; break; } if (s) return s; switch (statusCode & 0xf000) { /* high nibble significant */ case STATUS_FIND_Failed_UnableToProcess: /* high nibble */ s = "Failed: UnableToProcess"; break; } if (s == NULL) { sprintf(staticBuf, "Unknown Status: 0x%x", (unsigned int)statusCode); s = staticBuf; } return s; } const char * DU_cmoveStatusString(Uint16 statusCode) { const char *s = NULL; switch (statusCode) { case STATUS_Success: s = "Success"; break; case STATUS_Pending: s = "Pending"; break; case STATUS_MOVE_Refused_OutOfResourcesNumberOfMatches: s = "Refused: OutOfResourcesNumberOfMatches"; break; case STATUS_MOVE_Refused_OutOfResourcesSubOperations: s = "Refused: OutOfResourcesSubOperations"; break; case STATUS_MOVE_Failed_SOPClassNotSupported: s = "Failed: SOPClassNotSupported"; break; case STATUS_MOVE_Failed_MoveDestinationUnknown: s = "Failed: MoveDestinationUnknown"; break; case STATUS_MOVE_Failed_IdentifierDoesNotMatchSOPClass: s = "Failed: IdentifierDoesNotMatchSOPClass"; break; case STATUS_MOVE_Cancel_SubOperationsTerminatedDueToCancelIndication: s = "Cancel: SubOperationsTerminatedDueToCancelIndication"; break; case STATUS_MOVE_Warning_SubOperationsCompleteOneOrMoreFailures: s = "Warning: SubOperationsCompleteOneOrMoreFailures"; break; } if (s) return s; switch (statusCode & 0xf000) { /* high nibble significant */ case STATUS_MOVE_Failed_UnableToProcess: /* high nibble */ s = "Failed: UnableToProcess"; break; } if (s == NULL) { sprintf(staticBuf, "Unknown Status: 0x%x", (unsigned int)statusCode); s = staticBuf; } return s; } const char * DU_cgetStatusString(Uint16 statusCode) { const char *s = NULL; switch (statusCode) { case STATUS_Success: s = "Success"; break; case STATUS_Pending: s = "Pending"; break; case STATUS_GET_Refused_OutOfResourcesNumberOfMatches: s = "Refused: OutOfResourcesNumberOfMatches"; break; case STATUS_GET_Refused_OutOfResourcesSubOperations: s = "Refused: OutOfResourcesSubOperations"; break; case STATUS_GET_Failed_SOPClassNotSupported: s = "Failed: SOPClassNotSupported"; break; case STATUS_GET_Failed_IdentifierDoesNotMatchSOPClass: s = "Failed: IdentifierDoesNotMatchSOPClass"; break; case STATUS_GET_Cancel_SubOperationsTerminatedDueToCancelIndication: s = "Cancel: SubOperationsTerminatedDueToCancelIndication"; break; case STATUS_GET_Warning_SubOperationsCompleteOneOrMoreFailures: s = "Warning: SubOperationsCompleteOneOrMoreFailures"; break; } if (s) return s; switch (statusCode & 0xf000) { /* high nibble significant */ case STATUS_GET_Failed_UnableToProcess: /* high nibble */ s = "Failed: UnableToProcess"; break; } if (s == NULL) { sprintf(staticBuf, "Unknown Status: 0x%x", (unsigned int)statusCode); s = staticBuf; } return s; } const char * DU_ncreateStatusString(Uint16 statusCode) { const char *s = NULL; switch (statusCode) { case STATUS_Success: s = "Success"; break; case STATUS_N_ClassInstanceConflict: s = "Failure: ClassInstanceConflict"; break; case STATUS_N_DuplicateInvocation: s = "Failure: DuplicateInvocation"; break; case STATUS_N_DuplicateSOPInstance: s = "Failure: DuplicateSOPInstance"; break; case STATUS_N_InvalidAttributeValue: s = "Failure: InvalidAttributeValue"; break; case STATUS_N_InvalidObjectInstance: s = "Failure: InvalidObjectInstance"; break; case STATUS_N_MissingAttribute: s = "Failure: MissingAttribute"; break; case STATUS_N_MissingAttributeValue: s = "Failure: MissingAttributeValue"; break; case STATUS_N_MistypedArgument: s = "Failure: MistypedArgument"; break; case STATUS_N_NoSuchAttribute: s = "Failure: NoSuchAttribute"; break; case STATUS_N_NoSuchSOPClass: s = "Failure: NoSuchSOPClass"; break; case STATUS_N_NoSuchObjectInstance: s = "Failure: NoSuchObjectInstance"; break; case STATUS_N_ProcessingFailure: s = "Failure: ProcessingFailure"; break; case STATUS_N_ResourceLimitation: s = "Failure: ResourceLimitation"; break; case STATUS_N_UnrecognizedOperation: s = "Failure: UnrecognizedOperation"; break; case STATUS_N_AttributeValueOutOfRange: s = "Warning: AttributeValueOutOfRange"; break; } if (s) return s; switch (statusCode & 0xf000) { /* high nibble significant */ case STATUS_FIND_Failed_UnableToProcess: /* high nibble */ s = "Failed: UnableToProcess"; break; } if (s == NULL) { sprintf(staticBuf, "Unknown Status: 0x%x", (unsigned int)statusCode); s = staticBuf; } return s; } const char * DU_ngetStatusString(Uint16 statusCode) { const char *s = NULL; switch (statusCode) { case STATUS_Success: s = "Success"; break; case STATUS_N_ClassInstanceConflict: s = "Failure: ClassInstanceConflict"; break; case STATUS_N_DuplicateInvocation: s = "Failure: DuplicateInvocation"; break; case STATUS_N_InvalidObjectInstance: s = "Failure: InvalidObjectInstance"; break; case STATUS_N_MistypedArgument: s = "Failure: MistypedArgument"; break; case STATUS_N_NoSuchSOPClass: s = "Failure: NoSuchSOPClass"; break; case STATUS_N_NoSuchObjectInstance: s = "Failure: NoSuchObjectInstance"; break; case STATUS_N_ProcessingFailure: s = "Failure: ProcessingFailure"; break; case STATUS_N_ResourceLimitation: s = "Failure: ResourceLimitation"; break; case STATUS_N_AttributeListError: s = "Warning: AttributeListError"; break; case STATUS_N_AttributeValueOutOfRange: s = "Warning: AttributeValueOutOfRange"; break; } if (s) return s; switch (statusCode & 0xf000) { /* high nibble significant */ case STATUS_FIND_Failed_UnableToProcess: /* high nibble */ s = "Failed: UnableToProcess"; break; } if (s == NULL) { sprintf(staticBuf, "Unknown Status: 0x%x", (unsigned int)statusCode); s = staticBuf; } return s; } const char * DU_nsetStatusString(Uint16 statusCode) { const char *s = NULL; switch (statusCode) { case STATUS_Success: s = "Success"; break; case STATUS_N_ClassInstanceConflict: s = "Failure: ClassInstanceConflict"; break; case STATUS_N_DuplicateInvocation: s = "Failure: DuplicateInvocation"; break; case STATUS_N_InvalidAttributeValue: s = "Failure: InvalidAttributeValue"; break; case STATUS_N_MistypedArgument: s = "Failure: MistypedArgument"; break; case STATUS_N_InvalidObjectInstance: s = "Failure: InvalidObjectInstance"; break; case STATUS_N_MissingAttributeValue: s = "Failure: MissingAttributeValue"; break; case STATUS_N_NoSuchAttribute: s = "Failure: NoSuchAttribute"; break; case STATUS_N_NoSuchSOPClass: s = "Failure: NoSuchSOPClass"; break; case STATUS_N_NoSuchObjectInstance: s = "Failure: NoSuchObjectInstance"; break; case STATUS_N_ProcessingFailure: s = "Failure: ProcessingFailure"; break; case STATUS_N_ResourceLimitation: s = "Failure: ResourceLimitation"; break; case STATUS_N_UnrecognizedOperation: s = "Failure: UnrecognizedOperation"; break; case STATUS_N_AttributeValueOutOfRange: s = "Warning: AttributeValueOutOfRange"; break; } if (s) return s; switch (statusCode & 0xf000) { /* high nibble significant */ case STATUS_FIND_Failed_UnableToProcess: /* high nibble */ s = "Failed: UnableToProcess"; break; } if (s == NULL) { sprintf(staticBuf, "Unknown Status: 0x%x", (unsigned int)statusCode); s = staticBuf; } return s; } const char * DU_nactionStatusString(Uint16 statusCode) { const char *s = NULL; switch (statusCode) { case STATUS_Success: s = "Success"; break; case STATUS_N_ClassInstanceConflict: s = "Failure: ClassInstanceConflict"; break; case STATUS_N_DuplicateInvocation: s = "Failure: DuplicateInvocation"; break; case STATUS_N_InvalidArgumentValue: s = "Failure: InvalidArgumentValue"; break; case STATUS_N_InvalidObjectInstance: s = "Failure: InvalidObjectInstance"; break; case STATUS_N_MistypedArgument: s = "Failure: MistypedArgument"; break; case STATUS_N_NoSuchAction: s = "Failure: NoSuchAction"; break; case STATUS_N_NoSuchArgument: s = "Failure: NoSuchArgument"; break; case STATUS_N_NoSuchSOPClass: s = "Failure: NoSuchSOPClass"; break; case STATUS_N_NoSuchObjectInstance: s = "Failure: NoSuchObjectInstance"; break; case STATUS_N_ProcessingFailure: s = "Failure: ProcessingFailure"; break; case STATUS_N_ResourceLimitation: s = "Failure: ResourceLimitation"; break; case STATUS_N_UnrecognizedOperation: s = "Failure: UnrecognizedOperation"; break; } if (s) return s; switch (statusCode & 0xf000) { /* high nibble significant */ case STATUS_FIND_Failed_UnableToProcess: /* high nibble */ s = "Failed: UnableToProcess"; break; } if (s == NULL) { sprintf(staticBuf, "Unknown Status: 0x%x", (unsigned int)statusCode); s = staticBuf; } return s; } const char * DU_ndeleteStatusString(Uint16 statusCode) { const char *s = NULL; switch (statusCode) { case STATUS_Success: s = "Success"; break; case STATUS_N_ClassInstanceConflict: s = "Failure: ClassInstanceConflict"; break; case STATUS_N_DuplicateInvocation: s = "Failure: DuplicateInvocation"; break; case STATUS_N_InvalidObjectInstance: s = "Failure: InvalidObjectInstance"; break; case STATUS_N_MistypedArgument: s = "Failure: MistypedArgument"; break; case STATUS_N_NoSuchSOPClass: s = "Failure: NoSuchSOPClass"; break; case STATUS_N_NoSuchObjectInstance: s = "Failure: NoSuchObjectInstance"; break; case STATUS_N_ProcessingFailure: s = "Failure: ProcessingFailure"; break; case STATUS_N_ResourceLimitation: s = "Failure: ResourceLimitation"; break; case STATUS_N_UnrecognizedOperation: s = "Failure: UnrecognizedOperation"; break; } if (s) return s; switch (statusCode & 0xf000) { /* high nibble significant */ case STATUS_FIND_Failed_UnableToProcess: /* high nibble */ s = "Failed: UnableToProcess"; break; } if (s == NULL) { sprintf(staticBuf, "Unknown Status: 0x%x", (unsigned int)statusCode); s = staticBuf; } return s; } const char * DU_neventReportStatusString(Uint16 statusCode) { const char *s = NULL; switch (statusCode) { case STATUS_Success: s = "Success"; break; case STATUS_N_ClassInstanceConflict: s = "Failure: ClassInstanceConflict"; break; case STATUS_N_DuplicateInvocation: s = "Failure: DuplicateInvocation"; break; case STATUS_N_InvalidArgumentValue: s = "Failure: InvalidArgumentValue"; break; case STATUS_N_InvalidObjectInstance: s = "Failure: InvalidObjectInstance"; break; case STATUS_N_MistypedArgument: s = "Failure: MistypedArgument"; break; case STATUS_N_NoSuchArgument: s = "Failure: NoSuchArgument"; break; case STATUS_N_NoSuchEventType: s = "Failure: NoSuchEventType"; break; case STATUS_N_NoSuchSOPClass: s = "Failure: NoSuchSOPClass"; break; case STATUS_N_NoSuchObjectInstance: s = "Failure: NoSuchObjectInstance"; break; case STATUS_N_ProcessingFailure: s = "Failure: ProcessingFailure"; break; case STATUS_N_ResourceLimitation: s = "Failure: ResourceLimitation"; break; case STATUS_N_UnrecognizedOperation: s = "Failure: UnrecognizedOperation"; break; } if (s) return s; switch (statusCode & 0xf000) { /* high nibble significant */ case STATUS_FIND_Failed_UnableToProcess: /* high nibble */ s = "Failed: UnableToProcess"; break; } if (s == NULL) { sprintf(staticBuf, "Unknown Status: 0x%x", (unsigned int)statusCode); s = staticBuf; } return s; } void DU_logSelectResult(int selectReturnValue) { if (selectReturnValue < 0) { #ifdef HAVE_WINSOCK_H LPVOID errBuf = NULL; OFString err; // Obtain an error string from system error code if (FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, NULL, GetLastError(), MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), OFreinterpret_cast(LPTSTR, &errBuf), 0, NULL) > 0) { err = (OFstatic_cast(const char *, errBuf)); } else err = "Unknown Winsock error code"; LocalFree(errBuf); DCMNET_DEBUG("Windows Socket error while waiting for incoming network data: " << err); #else // POSIX interface char buf[256]; DCMNET_DEBUG("Error while waiting for incoming network data: " << OFStandard::strerror(errno, buf, 256)); #endif } else if (selectReturnValue == 0) { DCMNET_TRACE("Timeout while waiting for incoming network data"); } else { // This function is only meant to be used for return values <= 0, handle // normal case anyway DCMNET_TRACE("Receiving data via select()"); } }
/* * * Copyright (C) 1994-2021, OFFIS e.V. * All rights reserved. See COPYRIGHT file for details. * * This software and supporting documentation were partly developed by * * OFFIS e.V. * R&D Division Health * Escherweg 2 * D-26121 Oldenburg, Germany * * For further copyrights, see the following paragraphs. * */ /* ** Copyright (C) 1993/1994, OFFIS, Oldenburg University and CERIUM ** ** This software and supporting documentation were ** developed by ** ** Institut OFFIS ** Bereich Kommunikationssysteme ** Westerstr. 10-12 ** 26121 Oldenburg, Germany ** ** Fachbereich Informatik ** Abteilung Prozessinformatik ** Carl von Ossietzky Universitaet Oldenburg ** Ammerlaender Heerstr. 114-118 ** 26111 Oldenburg, Germany ** ** CERIUM ** Laboratoire SIM ** Faculte de Medecine ** 2 Avenue du Pr. Leon Bernard ** 35043 Rennes Cedex, France ** ** for CEN/TC251/WG4 as a contribution to the Radiological ** Society of North America (RSNA) 1993 Digital Imaging and ** Communications in Medicine (DICOM) Demonstration. ** ** THIS SOFTWARE IS MADE AVAILABLE, AS IS, AND NEITHER OFFIS, ** OLDENBURG UNIVERSITY NOR CERIUM MAKE ANY WARRANTY REGARDING ** THE SOFTWARE, ITS PERFORMANCE, ITS MERCHANTABILITY OR ** FITNESS FOR ANY PARTICULAR USE, FREEDOM FROM ANY COMPUTER ** DISEASES OR ITS CONFORMITY TO ANY SPECIFICATION. THE ** ENTIRE RISK AS TO QUALITY AND PERFORMANCE OF THE SOFTWARE ** IS WITH THE USER. ** ** Copyright of the software and supporting documentation ** is, unless otherwise stated, jointly owned by OFFIS, ** Oldenburg University and CERIUM and free access is hereby ** granted as a license to use this software, copy this ** software and prepare derivative works based upon this ** software. However, any distribution of this software ** source code or supporting documentation or derivative ** works (source code and supporting documentation) must ** include the three paragraphs of this copyright notice. ** */ /* ** ** Author: Andrew Hewett Created: 11-08-93 ** ** Module: diutil ** ** Purpose: ** This file contains the interface to ** some general useful dicom utility routines ** ** Module Prefix: DU_ */ #include "dcmtk/config/osconfig.h" /* make sure OS specific configuration is included first */ #ifdef HAVE_UNIX_H #if defined(macintosh) && defined (HAVE_WINSOCK_H) /* unix.h defines timeval incompatible with winsock.h */ #define timeval _UNWANTED_timeval #endif #include <unix.h> /* for unlink() under Metrowerks C++ (Macintosh) */ #undef timeval #endif #ifdef HAVE_SYS_TYPES_H #include <sys/types.h> #endif #ifdef HAVE_SYS_STAT_H #include <sys/stat.h> #endif #ifdef HAVE_STAT_H #include <stat.h> #endif #include "dcmtk/ofstd/ofstd.h" #include "dcmtk/dcmnet/diutil.h" #include "dcmtk/dcmdata/dcdatset.h" #include "dcmtk/dcmdata/dcfilefo.h" #include "dcmtk/dcmdata/dcmetinf.h" #include "dcmtk/dcmdata/dcdeftag.h" #include "dcmtk/dcmdata/dcuid.h" #include "dcmtk/dcmdata/dcbytstr.h" static char staticBuf[256]; OFLogger DCM_dcmnetLogger = OFLog::getLogger(DCMNET_LOGGER_NAME); #define TO_UCHAR(s) OFstatic_cast(unsigned char, (s)) void DU_stripTrailingSpaces(char *s) { int i, n; if (s) { n = OFstatic_cast(int, strlen(s)); for (i = n - 1; i >= 0 && isspace(TO_UCHAR(s[i])); i--) s[i] = '\0'; } } void DU_stripLeadingSpaces(char *s) { int i, j, n; if (s == NULL) return; n = OFstatic_cast(int, strlen(s)); if (n == 0) return; if (!isspace(TO_UCHAR(s[0]))) return; /* no leading space */ /* first non-space */ for (i=0; i<n && isspace(TO_UCHAR(s[i])); i++) /* do nothing, just iterate */ ; if (i<n) { /* found non-space, pull to front (inclusive '\0') */ for (j=i; j<=n; j++) { s[j-i] = s[j]; } } else { /* all spaces */ s[0] = '\0'; } } void DU_stripLeadingAndTrailingSpaces(char *s) { DU_stripLeadingSpaces(s); DU_stripTrailingSpaces(s); } #undef TO_UCHAR OFBool DU_getStringDOElement(DcmItem *obj, DcmTagKey t, char *s, size_t bufsize) { DcmByteString *elem; DcmStack stack; OFCondition ec = EC_Normal; char* aString; ec = obj->search(t, stack); elem = (DcmByteString*) stack.top(); if (ec == EC_Normal && elem != NULL) { if (elem->getLength() == 0) { s[0] = '\0'; } else { ec = elem->getString(aString); if (ec == EC_Normal) OFStandard::strlcpy(s, aString, bufsize); } } return (ec == EC_Normal); } OFBool DU_putStringDOElement(DcmItem *obj, DcmTagKey t, const char *s) { OFCondition ec = EC_Normal; DcmElement *e = NULL; DcmTag tag(t); ec = DcmItem::newDicomElement(e, tag); if (ec == EC_Normal && s != NULL) { ec = e->putString(s); } if (ec == EC_Normal) { ec = obj->insert(e, OFTrue); } return (ec == EC_Normal); } OFBool DU_getShortDOElement(DcmItem *obj, DcmTagKey t, Uint16 *us) { DcmElement *elem; DcmStack stack; OFCondition ec = EC_Normal; ec = obj->search(t, stack); elem = (DcmElement*) stack.top(); if (ec == EC_Normal && elem != NULL) { ec = elem->getUint16(*us, 0); } return (ec == EC_Normal); } OFBool DU_putShortDOElement(DcmItem *obj, DcmTagKey t, Uint16 us) { OFCondition ec = EC_Normal; DcmElement *e = NULL; DcmTag tag(t); ec = DcmItem::newDicomElement(e, tag); if (ec == EC_Normal) { ec = e->putUint16(us); } if (ec == EC_Normal) { ec = obj->insert(e, OFTrue); } return (ec == EC_Normal); } OFBool DU_findSOPClassAndInstanceInDataSet( DcmItem *obj, char* sopClass, size_t sopClassSize, char* sopInstance, size_t sopInstanceSize, OFBool tolerateSpacePaddedUIDs) { OFBool result = (DU_getStringDOElement(obj, DCM_SOPClassUID, sopClass, sopClassSize) && DU_getStringDOElement(obj, DCM_SOPInstanceUID, sopInstance, sopInstanceSize)); if (tolerateSpacePaddedUIDs) { /* gracefully correct space-padded UID strings */ int slength; if ((0 < (slength=OFstatic_cast(int, strlen(sopClass))))&&(sopClass[slength-1]==' ')) sopClass[slength-1]=0; if ((0 < (slength=OFstatic_cast(int, strlen(sopInstance))))&&(sopInstance[slength-1]==' ')) sopInstance[slength-1]=0; } return result; } OFBool DU_findSOPClassAndInstanceInFile( const char *fname, char* sopClass, size_t sopClassSize, char* sopInstance, size_t sopInstanceSize, OFBool tolerateSpacePaddedUIDs) { DcmFileFormat ff; if (! ff.loadFile(fname, EXS_Unknown, EGL_noChange).good()) return OFFalse; /* look in the meta-header first */ OFBool found = DU_findSOPClassAndInstanceInDataSet( ff.getMetaInfo(), sopClass, sopClassSize, sopInstance, sopInstanceSize, tolerateSpacePaddedUIDs); if (!found) { found = DU_findSOPClassAndInstanceInDataSet( ff.getDataset(), sopClass, sopClassSize, sopInstance, sopInstanceSize, tolerateSpacePaddedUIDs); } return found; } const char * DU_cechoStatusString(Uint16 statusCode) { const char *s = NULL; if (statusCode == STATUS_Success) s = "Success"; else { sprintf(staticBuf, "Unknown Status: 0x%x", (unsigned int)statusCode); s = staticBuf; } return s; } const char * DU_cstoreStatusString(Uint16 statusCode) { const char *s = NULL; switch (statusCode) { case STATUS_Success: s = "Success"; break; case STATUS_STORE_Refused_SOPClassNotSupported: s = "Refused: SOPClassNotSupported"; break; case STATUS_STORE_Warning_CoercionOfDataElements: s = "Warning: CoercionOfDataElements"; break; case STATUS_STORE_Warning_DataSetDoesNotMatchSOPClass: s = "Warning: DataSetDoesNotMatchSOPClass"; break; case STATUS_STORE_Warning_ElementsDiscarded: s = "Warning: ElementsDiscarded"; break; } if (s) return s; switch (statusCode & 0xff00) { /* high byte significant */ case STATUS_STORE_Refused_OutOfResources: /* high byte */ s = "Refused: OutOfResources"; break; case STATUS_STORE_Error_DataSetDoesNotMatchSOPClass: /* high byte */ s = "Error: DataSetDoesNotMatchSOPClass"; break; } if (s) return s; switch (statusCode & 0xf000) { /* high nibble significant */ case STATUS_STORE_Error_CannotUnderstand: /* high nibble */ s = "Error: CannotUnderstand"; break; } if (s == NULL) { sprintf(staticBuf, "Unknown Status: 0x%x", (unsigned int)statusCode); s = staticBuf; } return s; } const char * DU_cfindStatusString(Uint16 statusCode) { const char *s = NULL; switch (statusCode) { case STATUS_Success: s = "Success"; break; case STATUS_Pending: s = "Pending"; break; case STATUS_FIND_Refused_OutOfResources: s = "Refused: OutOfResources"; break; case STATUS_FIND_Refused_SOPClassNotSupported: s = "Refused: SOPClassNotSupported"; break; case STATUS_FIND_Failed_IdentifierDoesNotMatchSOPClass: s = "Failed: IdentifierDoesNotMatchSOPClass"; break; case STATUS_FIND_Cancel_MatchingTerminatedDueToCancelRequest: s = "Cancel: MatchingTerminatedDueToCancelRequest"; break; case STATUS_FIND_Pending_WarningUnsupportedOptionalKeys: s = "Pending: WarningUnsupportedOptionalKeys"; break; } if (s) return s; switch (statusCode & 0xf000) { /* high nibble significant */ case STATUS_FIND_Failed_UnableToProcess: /* high nibble */ s = "Failed: UnableToProcess"; break; } if (s == NULL) { sprintf(staticBuf, "Unknown Status: 0x%x", (unsigned int)statusCode); s = staticBuf; } return s; } const char * DU_cmoveStatusString(Uint16 statusCode) { const char *s = NULL; switch (statusCode) { case STATUS_Success: s = "Success"; break; case STATUS_Pending: s = "Pending"; break; case STATUS_MOVE_Refused_OutOfResourcesNumberOfMatches: s = "Refused: OutOfResourcesNumberOfMatches"; break; case STATUS_MOVE_Refused_OutOfResourcesSubOperations: s = "Refused: OutOfResourcesSubOperations"; break; case STATUS_MOVE_Failed_SOPClassNotSupported: s = "Failed: SOPClassNotSupported"; break; case STATUS_MOVE_Failed_MoveDestinationUnknown: s = "Failed: MoveDestinationUnknown"; break; case STATUS_MOVE_Failed_IdentifierDoesNotMatchSOPClass: s = "Failed: IdentifierDoesNotMatchSOPClass"; break; case STATUS_MOVE_Cancel_SubOperationsTerminatedDueToCancelIndication: s = "Cancel: SubOperationsTerminatedDueToCancelIndication"; break; case STATUS_MOVE_Warning_SubOperationsCompleteOneOrMoreFailures: s = "Warning: SubOperationsCompleteOneOrMoreFailures"; break; } if (s) return s; switch (statusCode & 0xf000) { /* high nibble significant */ case STATUS_MOVE_Failed_UnableToProcess: /* high nibble */ s = "Failed: UnableToProcess"; break; } if (s == NULL) { sprintf(staticBuf, "Unknown Status: 0x%x", (unsigned int)statusCode); s = staticBuf; } return s; } const char * DU_cgetStatusString(Uint16 statusCode) { const char *s = NULL; switch (statusCode) { case STATUS_Success: s = "Success"; break; case STATUS_Pending: s = "Pending"; break; case STATUS_GET_Refused_OutOfResourcesNumberOfMatches: s = "Refused: OutOfResourcesNumberOfMatches"; break; case STATUS_GET_Refused_OutOfResourcesSubOperations: s = "Refused: OutOfResourcesSubOperations"; break; case STATUS_GET_Failed_SOPClassNotSupported: s = "Failed: SOPClassNotSupported"; break; case STATUS_GET_Failed_IdentifierDoesNotMatchSOPClass: s = "Failed: IdentifierDoesNotMatchSOPClass"; break; case STATUS_GET_Cancel_SubOperationsTerminatedDueToCancelIndication: s = "Cancel: SubOperationsTerminatedDueToCancelIndication"; break; case STATUS_GET_Warning_SubOperationsCompleteOneOrMoreFailures: s = "Warning: SubOperationsCompleteOneOrMoreFailures"; break; } if (s) return s; switch (statusCode & 0xf000) { /* high nibble significant */ case STATUS_GET_Failed_UnableToProcess: /* high nibble */ s = "Failed: UnableToProcess"; break; } if (s == NULL) { sprintf(staticBuf, "Unknown Status: 0x%x", (unsigned int)statusCode); s = staticBuf; } return s; } const char * DU_ncreateStatusString(Uint16 statusCode) { const char *s = NULL; switch (statusCode) { case STATUS_Success: s = "Success"; break; case STATUS_N_ClassInstanceConflict: s = "Failure: ClassInstanceConflict"; break; case STATUS_N_DuplicateInvocation: s = "Failure: DuplicateInvocation"; break; case STATUS_N_DuplicateSOPInstance: s = "Failure: DuplicateSOPInstance"; break; case STATUS_N_InvalidAttributeValue: s = "Failure: InvalidAttributeValue"; break; case STATUS_N_InvalidObjectInstance: s = "Failure: InvalidObjectInstance"; break; case STATUS_N_MissingAttribute: s = "Failure: MissingAttribute"; break; case STATUS_N_MissingAttributeValue: s = "Failure: MissingAttributeValue"; break; case STATUS_N_MistypedArgument: s = "Failure: MistypedArgument"; break; case STATUS_N_NoSuchAttribute: s = "Failure: NoSuchAttribute"; break; case STATUS_N_NoSuchSOPClass: s = "Failure: NoSuchSOPClass"; break; case STATUS_N_NoSuchObjectInstance: s = "Failure: NoSuchObjectInstance"; break; case STATUS_N_ProcessingFailure: s = "Failure: ProcessingFailure"; break; case STATUS_N_ResourceLimitation: s = "Failure: ResourceLimitation"; break; case STATUS_N_UnrecognizedOperation: s = "Failure: UnrecognizedOperation"; break; case STATUS_N_AttributeValueOutOfRange: s = "Warning: AttributeValueOutOfRange"; break; } if (s) return s; switch (statusCode & 0xf000) { /* high nibble significant */ case STATUS_FIND_Failed_UnableToProcess: /* high nibble */ s = "Failed: UnableToProcess"; break; } if (s == NULL) { sprintf(staticBuf, "Unknown Status: 0x%x", (unsigned int)statusCode); s = staticBuf; } return s; } const char * DU_ngetStatusString(Uint16 statusCode) { const char *s = NULL; switch (statusCode) { case STATUS_Success: s = "Success"; break; case STATUS_N_ClassInstanceConflict: s = "Failure: ClassInstanceConflict"; break; case STATUS_N_DuplicateInvocation: s = "Failure: DuplicateInvocation"; break; case STATUS_N_InvalidObjectInstance: s = "Failure: InvalidObjectInstance"; break; case STATUS_N_MistypedArgument: s = "Failure: MistypedArgument"; break; case STATUS_N_NoSuchSOPClass: s = "Failure: NoSuchSOPClass"; break; case STATUS_N_NoSuchObjectInstance: s = "Failure: NoSuchObjectInstance"; break; case STATUS_N_ProcessingFailure: s = "Failure: ProcessingFailure"; break; case STATUS_N_ResourceLimitation: s = "Failure: ResourceLimitation"; break; case STATUS_N_AttributeListError: s = "Warning: AttributeListError"; break; case STATUS_N_AttributeValueOutOfRange: s = "Warning: AttributeValueOutOfRange"; break; } if (s) return s; switch (statusCode & 0xf000) { /* high nibble significant */ case STATUS_FIND_Failed_UnableToProcess: /* high nibble */ s = "Failed: UnableToProcess"; break; } if (s == NULL) { sprintf(staticBuf, "Unknown Status: 0x%x", (unsigned int)statusCode); s = staticBuf; } return s; } const char * DU_nsetStatusString(Uint16 statusCode) { const char *s = NULL; switch (statusCode) { case STATUS_Success: s = "Success"; break; case STATUS_N_ClassInstanceConflict: s = "Failure: ClassInstanceConflict"; break; case STATUS_N_DuplicateInvocation: s = "Failure: DuplicateInvocation"; break; case STATUS_N_InvalidAttributeValue: s = "Failure: InvalidAttributeValue"; break; case STATUS_N_MistypedArgument: s = "Failure: MistypedArgument"; break; case STATUS_N_InvalidObjectInstance: s = "Failure: InvalidObjectInstance"; break; case STATUS_N_MissingAttributeValue: s = "Failure: MissingAttributeValue"; break; case STATUS_N_NoSuchAttribute: s = "Failure: NoSuchAttribute"; break; case STATUS_N_NoSuchSOPClass: s = "Failure: NoSuchSOPClass"; break; case STATUS_N_NoSuchObjectInstance: s = "Failure: NoSuchObjectInstance"; break; case STATUS_N_ProcessingFailure: s = "Failure: ProcessingFailure"; break; case STATUS_N_ResourceLimitation: s = "Failure: ResourceLimitation"; break; case STATUS_N_UnrecognizedOperation: s = "Failure: UnrecognizedOperation"; break; case STATUS_N_AttributeValueOutOfRange: s = "Warning: AttributeValueOutOfRange"; break; } if (s) return s; switch (statusCode & 0xf000) { /* high nibble significant */ case STATUS_FIND_Failed_UnableToProcess: /* high nibble */ s = "Failed: UnableToProcess"; break; } if (s == NULL) { sprintf(staticBuf, "Unknown Status: 0x%x", (unsigned int)statusCode); s = staticBuf; } return s; } const char * DU_nactionStatusString(Uint16 statusCode) { const char *s = NULL; switch (statusCode) { case STATUS_Success: s = "Success"; break; case STATUS_N_ClassInstanceConflict: s = "Failure: ClassInstanceConflict"; break; case STATUS_N_DuplicateInvocation: s = "Failure: DuplicateInvocation"; break; case STATUS_N_InvalidArgumentValue: s = "Failure: InvalidArgumentValue"; break; case STATUS_N_InvalidObjectInstance: s = "Failure: InvalidObjectInstance"; break; case STATUS_N_MistypedArgument: s = "Failure: MistypedArgument"; break; case STATUS_N_NoSuchAction: s = "Failure: NoSuchAction"; break; case STATUS_N_NoSuchArgument: s = "Failure: NoSuchArgument"; break; case STATUS_N_NoSuchSOPClass: s = "Failure: NoSuchSOPClass"; break; case STATUS_N_NoSuchObjectInstance: s = "Failure: NoSuchObjectInstance"; break; case STATUS_N_ProcessingFailure: s = "Failure: ProcessingFailure"; break; case STATUS_N_ResourceLimitation: s = "Failure: ResourceLimitation"; break; case STATUS_N_UnrecognizedOperation: s = "Failure: UnrecognizedOperation"; break; } if (s) return s; switch (statusCode & 0xf000) { /* high nibble significant */ case STATUS_FIND_Failed_UnableToProcess: /* high nibble */ s = "Failed: UnableToProcess"; break; } if (s == NULL) { sprintf(staticBuf, "Unknown Status: 0x%x", (unsigned int)statusCode); s = staticBuf; } return s; } const char * DU_ndeleteStatusString(Uint16 statusCode) { const char *s = NULL; switch (statusCode) { case STATUS_Success: s = "Success"; break; case STATUS_N_ClassInstanceConflict: s = "Failure: ClassInstanceConflict"; break; case STATUS_N_DuplicateInvocation: s = "Failure: DuplicateInvocation"; break; case STATUS_N_InvalidObjectInstance: s = "Failure: InvalidObjectInstance"; break; case STATUS_N_MistypedArgument: s = "Failure: MistypedArgument"; break; case STATUS_N_NoSuchSOPClass: s = "Failure: NoSuchSOPClass"; break; case STATUS_N_NoSuchObjectInstance: s = "Failure: NoSuchObjectInstance"; break; case STATUS_N_ProcessingFailure: s = "Failure: ProcessingFailure"; break; case STATUS_N_ResourceLimitation: s = "Failure: ResourceLimitation"; break; case STATUS_N_UnrecognizedOperation: s = "Failure: UnrecognizedOperation"; break; } if (s) return s; switch (statusCode & 0xf000) { /* high nibble significant */ case STATUS_FIND_Failed_UnableToProcess: /* high nibble */ s = "Failed: UnableToProcess"; break; } if (s == NULL) { sprintf(staticBuf, "Unknown Status: 0x%x", (unsigned int)statusCode); s = staticBuf; } return s; } const char * DU_neventReportStatusString(Uint16 statusCode) { const char *s = NULL; switch (statusCode) { case STATUS_Success: s = "Success"; break; case STATUS_N_ClassInstanceConflict: s = "Failure: ClassInstanceConflict"; break; case STATUS_N_DuplicateInvocation: s = "Failure: DuplicateInvocation"; break; case STATUS_N_InvalidArgumentValue: s = "Failure: InvalidArgumentValue"; break; case STATUS_N_InvalidObjectInstance: s = "Failure: InvalidObjectInstance"; break; case STATUS_N_MistypedArgument: s = "Failure: MistypedArgument"; break; case STATUS_N_NoSuchArgument: s = "Failure: NoSuchArgument"; break; case STATUS_N_NoSuchEventType: s = "Failure: NoSuchEventType"; break; case STATUS_N_NoSuchSOPClass: s = "Failure: NoSuchSOPClass"; break; case STATUS_N_NoSuchObjectInstance: s = "Failure: NoSuchObjectInstance"; break; case STATUS_N_ProcessingFailure: s = "Failure: ProcessingFailure"; break; case STATUS_N_ResourceLimitation: s = "Failure: ResourceLimitation"; break; case STATUS_N_UnrecognizedOperation: s = "Failure: UnrecognizedOperation"; break; } if (s) return s; switch (statusCode & 0xf000) { /* high nibble significant */ case STATUS_FIND_Failed_UnableToProcess: /* high nibble */ s = "Failed: UnableToProcess"; break; } if (s == NULL) { sprintf(staticBuf, "Unknown Status: 0x%x", (unsigned int)statusCode); s = staticBuf; } return s; } void DU_logSelectResult(int selectReturnValue) { if (selectReturnValue < 0) { #ifdef HAVE_WINSOCK_H LPVOID errBuf = NULL; OFString err; // Obtain an error string from system error code if (FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, NULL, GetLastError(), MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), OFreinterpret_cast(LPTSTR, &errBuf), 0, NULL) > 0) { err = (OFstatic_cast(const char *, errBuf)); } else err = "Unknown Winsock error code"; LocalFree(errBuf); DCMNET_DEBUG("Windows Socket error while waiting for incoming network data: " << err); #else // POSIX interface char buf[256]; DCMNET_DEBUG("Error while waiting for incoming network data: " << OFStandard::strerror(errno, buf, 256)); #endif } else if (selectReturnValue == 0) { DCMNET_TRACE("Timeout while waiting for incoming network data"); } else { // This function is only meant to be used for return values <= 0, handle // normal case anyway DCMNET_TRACE("Receiving data via select()"); } }
DU_getStringDOElement(DcmItem *obj, DcmTagKey t, char *s, size_t bufsize) { DcmByteString *elem; DcmStack stack; OFCondition ec = EC_Normal; char* aString; ec = obj->search(t, stack); elem = (DcmByteString*) stack.top(); if (ec == EC_Normal && elem != NULL) { if (elem->getLength() == 0) { s[0] = '\0'; } else { ec = elem->getString(aString); OFStandard::strlcpy(s, aString, bufsize); } } return (ec == EC_Normal); }
DU_getStringDOElement(DcmItem *obj, DcmTagKey t, char *s, size_t bufsize) { DcmByteString *elem; DcmStack stack; OFCondition ec = EC_Normal; char* aString; ec = obj->search(t, stack); elem = (DcmByteString*) stack.top(); if (ec == EC_Normal && elem != NULL) { if (elem->getLength() == 0) { s[0] = '\0'; } else { ec = elem->getString(aString); if (ec == EC_Normal) OFStandard::strlcpy(s, aString, bufsize); } } return (ec == EC_Normal); }
{'added': [(174, ' if (ec == EC_Normal)'), (175, ' OFStandard::strlcpy(s, aString, bufsize);')], 'deleted': [(174, ' OFStandard::strlcpy(s, aString, bufsize);')]}
2
1
710
2,818
https://github.com/DCMTK/dcmtk
CVE-2021-41689
['CWE-476']
nf_conntrack_reasm.c
nf_ct_frag6_gather
/* * IPv6 fragment reassembly for connection tracking * * Copyright (C)2004 USAGI/WIDE Project * * Author: * Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp> * * Based on: net/ipv6/reassembly.c * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #define pr_fmt(fmt) "IPv6-nf: " fmt #include <linux/errno.h> #include <linux/types.h> #include <linux/string.h> #include <linux/socket.h> #include <linux/sockios.h> #include <linux/jiffies.h> #include <linux/net.h> #include <linux/list.h> #include <linux/netdevice.h> #include <linux/in6.h> #include <linux/ipv6.h> #include <linux/icmpv6.h> #include <linux/random.h> #include <linux/slab.h> #include <net/sock.h> #include <net/snmp.h> #include <net/inet_frag.h> #include <net/ipv6.h> #include <net/protocol.h> #include <net/transp_v6.h> #include <net/rawv6.h> #include <net/ndisc.h> #include <net/addrconf.h> #include <net/inet_ecn.h> #include <net/netfilter/ipv6/nf_conntrack_ipv6.h> #include <linux/sysctl.h> #include <linux/netfilter.h> #include <linux/netfilter_ipv6.h> #include <linux/kernel.h> #include <linux/module.h> #include <net/netfilter/ipv6/nf_defrag_ipv6.h> static const char nf_frags_cache_name[] = "nf-frags"; struct nf_ct_frag6_skb_cb { struct inet6_skb_parm h; int offset; }; #define NFCT_FRAG6_CB(skb) ((struct nf_ct_frag6_skb_cb *)((skb)->cb)) static struct inet_frags nf_frags; #ifdef CONFIG_SYSCTL static int zero; static struct ctl_table nf_ct_frag6_sysctl_table[] = { { .procname = "nf_conntrack_frag6_timeout", .data = &init_net.nf_frag.frags.timeout, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { .procname = "nf_conntrack_frag6_low_thresh", .data = &init_net.nf_frag.frags.low_thresh, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, .extra2 = &init_net.nf_frag.frags.high_thresh }, { .procname = "nf_conntrack_frag6_high_thresh", .data = &init_net.nf_frag.frags.high_thresh, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &init_net.nf_frag.frags.low_thresh }, { } }; static int nf_ct_frag6_sysctl_register(struct net *net) { struct ctl_table *table; struct ctl_table_header *hdr; table = nf_ct_frag6_sysctl_table; if (!net_eq(net, &init_net)) { table = kmemdup(table, sizeof(nf_ct_frag6_sysctl_table), GFP_KERNEL); if (table == NULL) goto err_alloc; table[0].data = &net->nf_frag.frags.timeout; table[1].data = &net->nf_frag.frags.low_thresh; table[1].extra2 = &net->nf_frag.frags.high_thresh; table[2].data = &net->nf_frag.frags.high_thresh; table[2].extra1 = &net->nf_frag.frags.low_thresh; table[2].extra2 = &init_net.nf_frag.frags.high_thresh; } hdr = register_net_sysctl(net, "net/netfilter", table); if (hdr == NULL) goto err_reg; net->nf_frag.sysctl.frags_hdr = hdr; return 0; err_reg: if (!net_eq(net, &init_net)) kfree(table); err_alloc: return -ENOMEM; } static void __net_exit nf_ct_frags6_sysctl_unregister(struct net *net) { struct ctl_table *table; table = net->nf_frag.sysctl.frags_hdr->ctl_table_arg; unregister_net_sysctl_table(net->nf_frag.sysctl.frags_hdr); if (!net_eq(net, &init_net)) kfree(table); } #else static int nf_ct_frag6_sysctl_register(struct net *net) { return 0; } static void __net_exit nf_ct_frags6_sysctl_unregister(struct net *net) { } #endif static inline u8 ip6_frag_ecn(const struct ipv6hdr *ipv6h) { return 1 << (ipv6_get_dsfield(ipv6h) & INET_ECN_MASK); } static unsigned int nf_hash_frag(__be32 id, const struct in6_addr *saddr, const struct in6_addr *daddr) { net_get_random_once(&nf_frags.rnd, sizeof(nf_frags.rnd)); return jhash_3words(ipv6_addr_hash(saddr), ipv6_addr_hash(daddr), (__force u32)id, nf_frags.rnd); } static unsigned int nf_hashfn(const struct inet_frag_queue *q) { const struct frag_queue *nq; nq = container_of(q, struct frag_queue, q); return nf_hash_frag(nq->id, &nq->saddr, &nq->daddr); } static void nf_ct_frag6_expire(unsigned long data) { struct frag_queue *fq; struct net *net; fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q); net = container_of(fq->q.net, struct net, nf_frag.frags); ip6_expire_frag_queue(net, fq, &nf_frags); } /* Creation primitives. */ static inline struct frag_queue *fq_find(struct net *net, __be32 id, u32 user, struct in6_addr *src, struct in6_addr *dst, int iif, u8 ecn) { struct inet_frag_queue *q; struct ip6_create_arg arg; unsigned int hash; arg.id = id; arg.user = user; arg.src = src; arg.dst = dst; arg.iif = iif; arg.ecn = ecn; local_bh_disable(); hash = nf_hash_frag(id, src, dst); q = inet_frag_find(&net->nf_frag.frags, &nf_frags, &arg, hash); local_bh_enable(); if (IS_ERR_OR_NULL(q)) { inet_frag_maybe_warn_overflow(q, pr_fmt()); return NULL; } return container_of(q, struct frag_queue, q); } static int nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb, const struct frag_hdr *fhdr, int nhoff) { struct sk_buff *prev, *next; unsigned int payload_len; int offset, end; u8 ecn; if (fq->q.flags & INET_FRAG_COMPLETE) { pr_debug("Already completed\n"); goto err; } payload_len = ntohs(ipv6_hdr(skb)->payload_len); offset = ntohs(fhdr->frag_off) & ~0x7; end = offset + (payload_len - ((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1))); if ((unsigned int)end > IPV6_MAXPLEN) { pr_debug("offset is too large.\n"); return -1; } ecn = ip6_frag_ecn(ipv6_hdr(skb)); if (skb->ip_summed == CHECKSUM_COMPLETE) { const unsigned char *nh = skb_network_header(skb); skb->csum = csum_sub(skb->csum, csum_partial(nh, (u8 *)(fhdr + 1) - nh, 0)); } /* Is this the final fragment? */ if (!(fhdr->frag_off & htons(IP6_MF))) { /* If we already have some bits beyond end * or have different end, the segment is corrupted. */ if (end < fq->q.len || ((fq->q.flags & INET_FRAG_LAST_IN) && end != fq->q.len)) { pr_debug("already received last fragment\n"); goto err; } fq->q.flags |= INET_FRAG_LAST_IN; fq->q.len = end; } else { /* Check if the fragment is rounded to 8 bytes. * Required by the RFC. */ if (end & 0x7) { /* RFC2460 says always send parameter problem in * this case. -DaveM */ pr_debug("end of fragment not rounded to 8 bytes.\n"); return -1; } if (end > fq->q.len) { /* Some bits beyond end -> corruption. */ if (fq->q.flags & INET_FRAG_LAST_IN) { pr_debug("last packet already reached.\n"); goto err; } fq->q.len = end; } } if (end == offset) goto err; /* Point into the IP datagram 'data' part. */ if (!pskb_pull(skb, (u8 *) (fhdr + 1) - skb->data)) { pr_debug("queue: message is too short.\n"); goto err; } if (pskb_trim_rcsum(skb, end - offset)) { pr_debug("Can't trim\n"); goto err; } /* Find out which fragments are in front and at the back of us * in the chain of fragments so far. We must know where to put * this fragment, right? */ prev = fq->q.fragments_tail; if (!prev || NFCT_FRAG6_CB(prev)->offset < offset) { next = NULL; goto found; } prev = NULL; for (next = fq->q.fragments; next != NULL; next = next->next) { if (NFCT_FRAG6_CB(next)->offset >= offset) break; /* bingo! */ prev = next; } found: /* RFC5722, Section 4: * When reassembling an IPv6 datagram, if * one or more its constituent fragments is determined to be an * overlapping fragment, the entire datagram (and any constituent * fragments, including those not yet received) MUST be silently * discarded. */ /* Check for overlap with preceding fragment. */ if (prev && (NFCT_FRAG6_CB(prev)->offset + prev->len) > offset) goto discard_fq; /* Look for overlap with succeeding segment. */ if (next && NFCT_FRAG6_CB(next)->offset < end) goto discard_fq; NFCT_FRAG6_CB(skb)->offset = offset; /* Insert this fragment in the chain of fragments. */ skb->next = next; if (!next) fq->q.fragments_tail = skb; if (prev) prev->next = skb; else fq->q.fragments = skb; if (skb->dev) { fq->iif = skb->dev->ifindex; skb->dev = NULL; } fq->q.stamp = skb->tstamp; fq->q.meat += skb->len; fq->ecn |= ecn; if (payload_len > fq->q.max_size) fq->q.max_size = payload_len; add_frag_mem_limit(fq->q.net, skb->truesize); /* The first fragment. * nhoffset is obtained from the first fragment, of course. */ if (offset == 0) { fq->nhoffset = nhoff; fq->q.flags |= INET_FRAG_FIRST_IN; } return 0; discard_fq: inet_frag_kill(&fq->q, &nf_frags); err: return -1; } /* * Check if this packet is complete. * * It is called with locked fq, and caller must check that * queue is eligible for reassembly i.e. it is not COMPLETE, * the last and the first frames arrived and all the bits are here. * * returns true if *prev skb has been transformed into the reassembled * skb, false otherwise. */ static bool nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *prev, struct net_device *dev) { struct sk_buff *fp, *head = fq->q.fragments; int payload_len; u8 ecn; inet_frag_kill(&fq->q, &nf_frags); WARN_ON(head == NULL); WARN_ON(NFCT_FRAG6_CB(head)->offset != 0); ecn = ip_frag_ecn_table[fq->ecn]; if (unlikely(ecn == 0xff)) return false; /* Unfragmented part is taken from the first segment. */ payload_len = ((head->data - skb_network_header(head)) - sizeof(struct ipv6hdr) + fq->q.len - sizeof(struct frag_hdr)); if (payload_len > IPV6_MAXPLEN) { net_dbg_ratelimited("nf_ct_frag6_reasm: payload len = %d\n", payload_len); return false; } /* Head of list must not be cloned. */ if (skb_unclone(head, GFP_ATOMIC)) return false; /* If the first fragment is fragmented itself, we split * it to two chunks: the first with data and paged part * and the second, holding only fragments. */ if (skb_has_frag_list(head)) { struct sk_buff *clone; int i, plen = 0; clone = alloc_skb(0, GFP_ATOMIC); if (clone == NULL) return false; clone->next = head->next; head->next = clone; skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list; skb_frag_list_init(head); for (i = 0; i < skb_shinfo(head)->nr_frags; i++) plen += skb_frag_size(&skb_shinfo(head)->frags[i]); clone->len = clone->data_len = head->data_len - plen; head->data_len -= clone->len; head->len -= clone->len; clone->csum = 0; clone->ip_summed = head->ip_summed; add_frag_mem_limit(fq->q.net, clone->truesize); } /* morph head into last received skb: prev. * * This allows callers of ipv6 conntrack defrag to continue * to use the last skb(frag) passed into the reasm engine. * The last skb frag 'silently' turns into the full reassembled skb. * * Since prev is also part of q->fragments we have to clone it first. */ if (head != prev) { struct sk_buff *iter; fp = skb_clone(prev, GFP_ATOMIC); if (!fp) return false; fp->next = prev->next; iter = head; while (iter) { if (iter->next == prev) { iter->next = fp; break; } iter = iter->next; } skb_morph(prev, head); prev->next = head->next; consume_skb(head); head = prev; } /* We have to remove fragment header from datagram and to relocate * header in order to calculate ICV correctly. */ skb_network_header(head)[fq->nhoffset] = skb_transport_header(head)[0]; memmove(head->head + sizeof(struct frag_hdr), head->head, (head->data - head->head) - sizeof(struct frag_hdr)); head->mac_header += sizeof(struct frag_hdr); head->network_header += sizeof(struct frag_hdr); skb_shinfo(head)->frag_list = head->next; skb_reset_transport_header(head); skb_push(head, head->data - skb_network_header(head)); for (fp = head->next; fp; fp = fp->next) { head->data_len += fp->len; head->len += fp->len; if (head->ip_summed != fp->ip_summed) head->ip_summed = CHECKSUM_NONE; else if (head->ip_summed == CHECKSUM_COMPLETE) head->csum = csum_add(head->csum, fp->csum); head->truesize += fp->truesize; } sub_frag_mem_limit(fq->q.net, head->truesize); head->ignore_df = 1; head->next = NULL; head->dev = dev; head->tstamp = fq->q.stamp; ipv6_hdr(head)->payload_len = htons(payload_len); ipv6_change_dsfield(ipv6_hdr(head), 0xff, ecn); IP6CB(head)->frag_max_size = sizeof(struct ipv6hdr) + fq->q.max_size; /* Yes, and fold redundant checksum back. 8) */ if (head->ip_summed == CHECKSUM_COMPLETE) head->csum = csum_partial(skb_network_header(head), skb_network_header_len(head), head->csum); fq->q.fragments = NULL; fq->q.fragments_tail = NULL; return true; } /* * find the header just before Fragment Header. * * if success return 0 and set ... * (*prevhdrp): the value of "Next Header Field" in the header * just before Fragment Header. * (*prevhoff): the offset of "Next Header Field" in the header * just before Fragment Header. * (*fhoff) : the offset of Fragment Header. * * Based on ipv6_skip_hdr() in net/ipv6/exthdr.c * */ static int find_prev_fhdr(struct sk_buff *skb, u8 *prevhdrp, int *prevhoff, int *fhoff) { u8 nexthdr = ipv6_hdr(skb)->nexthdr; const int netoff = skb_network_offset(skb); u8 prev_nhoff = netoff + offsetof(struct ipv6hdr, nexthdr); int start = netoff + sizeof(struct ipv6hdr); int len = skb->len - start; u8 prevhdr = NEXTHDR_IPV6; while (nexthdr != NEXTHDR_FRAGMENT) { struct ipv6_opt_hdr hdr; int hdrlen; if (!ipv6_ext_hdr(nexthdr)) { return -1; } if (nexthdr == NEXTHDR_NONE) { pr_debug("next header is none\n"); return -1; } if (len < (int)sizeof(struct ipv6_opt_hdr)) { pr_debug("too short\n"); return -1; } if (skb_copy_bits(skb, start, &hdr, sizeof(hdr))) BUG(); if (nexthdr == NEXTHDR_AUTH) hdrlen = (hdr.hdrlen+2)<<2; else hdrlen = ipv6_optlen(&hdr); prevhdr = nexthdr; prev_nhoff = start; nexthdr = hdr.nexthdr; len -= hdrlen; start += hdrlen; } if (len < 0) return -1; *prevhdrp = prevhdr; *prevhoff = prev_nhoff; *fhoff = start; return 0; } int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user) { struct net_device *dev = skb->dev; int fhoff, nhoff, ret; struct frag_hdr *fhdr; struct frag_queue *fq; struct ipv6hdr *hdr; u8 prevhdr; /* Jumbo payload inhibits frag. header */ if (ipv6_hdr(skb)->payload_len == 0) { pr_debug("payload len = 0\n"); return -EINVAL; } if (find_prev_fhdr(skb, &prevhdr, &nhoff, &fhoff) < 0) return -EINVAL; if (!pskb_may_pull(skb, fhoff + sizeof(*fhdr))) return -ENOMEM; skb_set_transport_header(skb, fhoff); hdr = ipv6_hdr(skb); fhdr = (struct frag_hdr *)skb_transport_header(skb); fq = fq_find(net, fhdr->identification, user, &hdr->saddr, &hdr->daddr, skb->dev ? skb->dev->ifindex : 0, ip6_frag_ecn(hdr)); if (fq == NULL) { pr_debug("Can't find and can't create new queue\n"); return -ENOMEM; } spin_lock_bh(&fq->q.lock); if (nf_ct_frag6_queue(fq, skb, fhdr, nhoff) < 0) { ret = -EINVAL; goto out_unlock; } /* after queue has assumed skb ownership, only 0 or -EINPROGRESS * must be returned. */ ret = -EINPROGRESS; if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && fq->q.meat == fq->q.len && nf_ct_frag6_reasm(fq, skb, dev)) ret = 0; out_unlock: spin_unlock_bh(&fq->q.lock); inet_frag_put(&fq->q, &nf_frags); return ret; } EXPORT_SYMBOL_GPL(nf_ct_frag6_gather); static int nf_ct_net_init(struct net *net) { int res; net->nf_frag.frags.high_thresh = IPV6_FRAG_HIGH_THRESH; net->nf_frag.frags.low_thresh = IPV6_FRAG_LOW_THRESH; net->nf_frag.frags.timeout = IPV6_FRAG_TIMEOUT; res = inet_frags_init_net(&net->nf_frag.frags); if (res) return res; res = nf_ct_frag6_sysctl_register(net); if (res) inet_frags_uninit_net(&net->nf_frag.frags); return res; } static void nf_ct_net_exit(struct net *net) { nf_ct_frags6_sysctl_unregister(net); inet_frags_exit_net(&net->nf_frag.frags, &nf_frags); } static struct pernet_operations nf_ct_net_ops = { .init = nf_ct_net_init, .exit = nf_ct_net_exit, }; int nf_ct_frag6_init(void) { int ret = 0; nf_frags.hashfn = nf_hashfn; nf_frags.constructor = ip6_frag_init; nf_frags.destructor = NULL; nf_frags.qsize = sizeof(struct frag_queue); nf_frags.match = ip6_frag_match; nf_frags.frag_expire = nf_ct_frag6_expire; nf_frags.frags_cache_name = nf_frags_cache_name; ret = inet_frags_init(&nf_frags); if (ret) goto out; ret = register_pernet_subsys(&nf_ct_net_ops); if (ret) inet_frags_fini(&nf_frags); out: return ret; } void nf_ct_frag6_cleanup(void) { unregister_pernet_subsys(&nf_ct_net_ops); inet_frags_fini(&nf_frags); }
/* * IPv6 fragment reassembly for connection tracking * * Copyright (C)2004 USAGI/WIDE Project * * Author: * Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp> * * Based on: net/ipv6/reassembly.c * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #define pr_fmt(fmt) "IPv6-nf: " fmt #include <linux/errno.h> #include <linux/types.h> #include <linux/string.h> #include <linux/socket.h> #include <linux/sockios.h> #include <linux/jiffies.h> #include <linux/net.h> #include <linux/list.h> #include <linux/netdevice.h> #include <linux/in6.h> #include <linux/ipv6.h> #include <linux/icmpv6.h> #include <linux/random.h> #include <linux/slab.h> #include <net/sock.h> #include <net/snmp.h> #include <net/inet_frag.h> #include <net/ipv6.h> #include <net/protocol.h> #include <net/transp_v6.h> #include <net/rawv6.h> #include <net/ndisc.h> #include <net/addrconf.h> #include <net/inet_ecn.h> #include <net/netfilter/ipv6/nf_conntrack_ipv6.h> #include <linux/sysctl.h> #include <linux/netfilter.h> #include <linux/netfilter_ipv6.h> #include <linux/kernel.h> #include <linux/module.h> #include <net/netfilter/ipv6/nf_defrag_ipv6.h> static const char nf_frags_cache_name[] = "nf-frags"; struct nf_ct_frag6_skb_cb { struct inet6_skb_parm h; int offset; }; #define NFCT_FRAG6_CB(skb) ((struct nf_ct_frag6_skb_cb *)((skb)->cb)) static struct inet_frags nf_frags; #ifdef CONFIG_SYSCTL static int zero; static struct ctl_table nf_ct_frag6_sysctl_table[] = { { .procname = "nf_conntrack_frag6_timeout", .data = &init_net.nf_frag.frags.timeout, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { .procname = "nf_conntrack_frag6_low_thresh", .data = &init_net.nf_frag.frags.low_thresh, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, .extra2 = &init_net.nf_frag.frags.high_thresh }, { .procname = "nf_conntrack_frag6_high_thresh", .data = &init_net.nf_frag.frags.high_thresh, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &init_net.nf_frag.frags.low_thresh }, { } }; static int nf_ct_frag6_sysctl_register(struct net *net) { struct ctl_table *table; struct ctl_table_header *hdr; table = nf_ct_frag6_sysctl_table; if (!net_eq(net, &init_net)) { table = kmemdup(table, sizeof(nf_ct_frag6_sysctl_table), GFP_KERNEL); if (table == NULL) goto err_alloc; table[0].data = &net->nf_frag.frags.timeout; table[1].data = &net->nf_frag.frags.low_thresh; table[1].extra2 = &net->nf_frag.frags.high_thresh; table[2].data = &net->nf_frag.frags.high_thresh; table[2].extra1 = &net->nf_frag.frags.low_thresh; table[2].extra2 = &init_net.nf_frag.frags.high_thresh; } hdr = register_net_sysctl(net, "net/netfilter", table); if (hdr == NULL) goto err_reg; net->nf_frag.sysctl.frags_hdr = hdr; return 0; err_reg: if (!net_eq(net, &init_net)) kfree(table); err_alloc: return -ENOMEM; } static void __net_exit nf_ct_frags6_sysctl_unregister(struct net *net) { struct ctl_table *table; table = net->nf_frag.sysctl.frags_hdr->ctl_table_arg; unregister_net_sysctl_table(net->nf_frag.sysctl.frags_hdr); if (!net_eq(net, &init_net)) kfree(table); } #else static int nf_ct_frag6_sysctl_register(struct net *net) { return 0; } static void __net_exit nf_ct_frags6_sysctl_unregister(struct net *net) { } #endif static inline u8 ip6_frag_ecn(const struct ipv6hdr *ipv6h) { return 1 << (ipv6_get_dsfield(ipv6h) & INET_ECN_MASK); } static unsigned int nf_hash_frag(__be32 id, const struct in6_addr *saddr, const struct in6_addr *daddr) { net_get_random_once(&nf_frags.rnd, sizeof(nf_frags.rnd)); return jhash_3words(ipv6_addr_hash(saddr), ipv6_addr_hash(daddr), (__force u32)id, nf_frags.rnd); } static unsigned int nf_hashfn(const struct inet_frag_queue *q) { const struct frag_queue *nq; nq = container_of(q, struct frag_queue, q); return nf_hash_frag(nq->id, &nq->saddr, &nq->daddr); } static void nf_ct_frag6_expire(unsigned long data) { struct frag_queue *fq; struct net *net; fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q); net = container_of(fq->q.net, struct net, nf_frag.frags); ip6_expire_frag_queue(net, fq, &nf_frags); } /* Creation primitives. */ static inline struct frag_queue *fq_find(struct net *net, __be32 id, u32 user, struct in6_addr *src, struct in6_addr *dst, int iif, u8 ecn) { struct inet_frag_queue *q; struct ip6_create_arg arg; unsigned int hash; arg.id = id; arg.user = user; arg.src = src; arg.dst = dst; arg.iif = iif; arg.ecn = ecn; local_bh_disable(); hash = nf_hash_frag(id, src, dst); q = inet_frag_find(&net->nf_frag.frags, &nf_frags, &arg, hash); local_bh_enable(); if (IS_ERR_OR_NULL(q)) { inet_frag_maybe_warn_overflow(q, pr_fmt()); return NULL; } return container_of(q, struct frag_queue, q); } static int nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb, const struct frag_hdr *fhdr, int nhoff) { struct sk_buff *prev, *next; unsigned int payload_len; int offset, end; u8 ecn; if (fq->q.flags & INET_FRAG_COMPLETE) { pr_debug("Already completed\n"); goto err; } payload_len = ntohs(ipv6_hdr(skb)->payload_len); offset = ntohs(fhdr->frag_off) & ~0x7; end = offset + (payload_len - ((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1))); if ((unsigned int)end > IPV6_MAXPLEN) { pr_debug("offset is too large.\n"); return -1; } ecn = ip6_frag_ecn(ipv6_hdr(skb)); if (skb->ip_summed == CHECKSUM_COMPLETE) { const unsigned char *nh = skb_network_header(skb); skb->csum = csum_sub(skb->csum, csum_partial(nh, (u8 *)(fhdr + 1) - nh, 0)); } /* Is this the final fragment? */ if (!(fhdr->frag_off & htons(IP6_MF))) { /* If we already have some bits beyond end * or have different end, the segment is corrupted. */ if (end < fq->q.len || ((fq->q.flags & INET_FRAG_LAST_IN) && end != fq->q.len)) { pr_debug("already received last fragment\n"); goto err; } fq->q.flags |= INET_FRAG_LAST_IN; fq->q.len = end; } else { /* Check if the fragment is rounded to 8 bytes. * Required by the RFC. */ if (end & 0x7) { /* RFC2460 says always send parameter problem in * this case. -DaveM */ pr_debug("end of fragment not rounded to 8 bytes.\n"); return -1; } if (end > fq->q.len) { /* Some bits beyond end -> corruption. */ if (fq->q.flags & INET_FRAG_LAST_IN) { pr_debug("last packet already reached.\n"); goto err; } fq->q.len = end; } } if (end == offset) goto err; /* Point into the IP datagram 'data' part. */ if (!pskb_pull(skb, (u8 *) (fhdr + 1) - skb->data)) { pr_debug("queue: message is too short.\n"); goto err; } if (pskb_trim_rcsum(skb, end - offset)) { pr_debug("Can't trim\n"); goto err; } /* Find out which fragments are in front and at the back of us * in the chain of fragments so far. We must know where to put * this fragment, right? */ prev = fq->q.fragments_tail; if (!prev || NFCT_FRAG6_CB(prev)->offset < offset) { next = NULL; goto found; } prev = NULL; for (next = fq->q.fragments; next != NULL; next = next->next) { if (NFCT_FRAG6_CB(next)->offset >= offset) break; /* bingo! */ prev = next; } found: /* RFC5722, Section 4: * When reassembling an IPv6 datagram, if * one or more its constituent fragments is determined to be an * overlapping fragment, the entire datagram (and any constituent * fragments, including those not yet received) MUST be silently * discarded. */ /* Check for overlap with preceding fragment. */ if (prev && (NFCT_FRAG6_CB(prev)->offset + prev->len) > offset) goto discard_fq; /* Look for overlap with succeeding segment. */ if (next && NFCT_FRAG6_CB(next)->offset < end) goto discard_fq; NFCT_FRAG6_CB(skb)->offset = offset; /* Insert this fragment in the chain of fragments. */ skb->next = next; if (!next) fq->q.fragments_tail = skb; if (prev) prev->next = skb; else fq->q.fragments = skb; if (skb->dev) { fq->iif = skb->dev->ifindex; skb->dev = NULL; } fq->q.stamp = skb->tstamp; fq->q.meat += skb->len; fq->ecn |= ecn; if (payload_len > fq->q.max_size) fq->q.max_size = payload_len; add_frag_mem_limit(fq->q.net, skb->truesize); /* The first fragment. * nhoffset is obtained from the first fragment, of course. */ if (offset == 0) { fq->nhoffset = nhoff; fq->q.flags |= INET_FRAG_FIRST_IN; } return 0; discard_fq: inet_frag_kill(&fq->q, &nf_frags); err: return -1; } /* * Check if this packet is complete. * * It is called with locked fq, and caller must check that * queue is eligible for reassembly i.e. it is not COMPLETE, * the last and the first frames arrived and all the bits are here. * * returns true if *prev skb has been transformed into the reassembled * skb, false otherwise. */ static bool nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *prev, struct net_device *dev) { struct sk_buff *fp, *head = fq->q.fragments; int payload_len; u8 ecn; inet_frag_kill(&fq->q, &nf_frags); WARN_ON(head == NULL); WARN_ON(NFCT_FRAG6_CB(head)->offset != 0); ecn = ip_frag_ecn_table[fq->ecn]; if (unlikely(ecn == 0xff)) return false; /* Unfragmented part is taken from the first segment. */ payload_len = ((head->data - skb_network_header(head)) - sizeof(struct ipv6hdr) + fq->q.len - sizeof(struct frag_hdr)); if (payload_len > IPV6_MAXPLEN) { net_dbg_ratelimited("nf_ct_frag6_reasm: payload len = %d\n", payload_len); return false; } /* Head of list must not be cloned. */ if (skb_unclone(head, GFP_ATOMIC)) return false; /* If the first fragment is fragmented itself, we split * it to two chunks: the first with data and paged part * and the second, holding only fragments. */ if (skb_has_frag_list(head)) { struct sk_buff *clone; int i, plen = 0; clone = alloc_skb(0, GFP_ATOMIC); if (clone == NULL) return false; clone->next = head->next; head->next = clone; skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list; skb_frag_list_init(head); for (i = 0; i < skb_shinfo(head)->nr_frags; i++) plen += skb_frag_size(&skb_shinfo(head)->frags[i]); clone->len = clone->data_len = head->data_len - plen; head->data_len -= clone->len; head->len -= clone->len; clone->csum = 0; clone->ip_summed = head->ip_summed; add_frag_mem_limit(fq->q.net, clone->truesize); } /* morph head into last received skb: prev. * * This allows callers of ipv6 conntrack defrag to continue * to use the last skb(frag) passed into the reasm engine. * The last skb frag 'silently' turns into the full reassembled skb. * * Since prev is also part of q->fragments we have to clone it first. */ if (head != prev) { struct sk_buff *iter; fp = skb_clone(prev, GFP_ATOMIC); if (!fp) return false; fp->next = prev->next; iter = head; while (iter) { if (iter->next == prev) { iter->next = fp; break; } iter = iter->next; } skb_morph(prev, head); prev->next = head->next; consume_skb(head); head = prev; } /* We have to remove fragment header from datagram and to relocate * header in order to calculate ICV correctly. */ skb_network_header(head)[fq->nhoffset] = skb_transport_header(head)[0]; memmove(head->head + sizeof(struct frag_hdr), head->head, (head->data - head->head) - sizeof(struct frag_hdr)); head->mac_header += sizeof(struct frag_hdr); head->network_header += sizeof(struct frag_hdr); skb_shinfo(head)->frag_list = head->next; skb_reset_transport_header(head); skb_push(head, head->data - skb_network_header(head)); for (fp = head->next; fp; fp = fp->next) { head->data_len += fp->len; head->len += fp->len; if (head->ip_summed != fp->ip_summed) head->ip_summed = CHECKSUM_NONE; else if (head->ip_summed == CHECKSUM_COMPLETE) head->csum = csum_add(head->csum, fp->csum); head->truesize += fp->truesize; } sub_frag_mem_limit(fq->q.net, head->truesize); head->ignore_df = 1; head->next = NULL; head->dev = dev; head->tstamp = fq->q.stamp; ipv6_hdr(head)->payload_len = htons(payload_len); ipv6_change_dsfield(ipv6_hdr(head), 0xff, ecn); IP6CB(head)->frag_max_size = sizeof(struct ipv6hdr) + fq->q.max_size; /* Yes, and fold redundant checksum back. 8) */ if (head->ip_summed == CHECKSUM_COMPLETE) head->csum = csum_partial(skb_network_header(head), skb_network_header_len(head), head->csum); fq->q.fragments = NULL; fq->q.fragments_tail = NULL; return true; } /* * find the header just before Fragment Header. * * if success return 0 and set ... * (*prevhdrp): the value of "Next Header Field" in the header * just before Fragment Header. * (*prevhoff): the offset of "Next Header Field" in the header * just before Fragment Header. * (*fhoff) : the offset of Fragment Header. * * Based on ipv6_skip_hdr() in net/ipv6/exthdr.c * */ static int find_prev_fhdr(struct sk_buff *skb, u8 *prevhdrp, int *prevhoff, int *fhoff) { u8 nexthdr = ipv6_hdr(skb)->nexthdr; const int netoff = skb_network_offset(skb); u8 prev_nhoff = netoff + offsetof(struct ipv6hdr, nexthdr); int start = netoff + sizeof(struct ipv6hdr); int len = skb->len - start; u8 prevhdr = NEXTHDR_IPV6; while (nexthdr != NEXTHDR_FRAGMENT) { struct ipv6_opt_hdr hdr; int hdrlen; if (!ipv6_ext_hdr(nexthdr)) { return -1; } if (nexthdr == NEXTHDR_NONE) { pr_debug("next header is none\n"); return -1; } if (len < (int)sizeof(struct ipv6_opt_hdr)) { pr_debug("too short\n"); return -1; } if (skb_copy_bits(skb, start, &hdr, sizeof(hdr))) BUG(); if (nexthdr == NEXTHDR_AUTH) hdrlen = (hdr.hdrlen+2)<<2; else hdrlen = ipv6_optlen(&hdr); prevhdr = nexthdr; prev_nhoff = start; nexthdr = hdr.nexthdr; len -= hdrlen; start += hdrlen; } if (len < 0) return -1; *prevhdrp = prevhdr; *prevhoff = prev_nhoff; *fhoff = start; return 0; } int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user) { struct net_device *dev = skb->dev; int fhoff, nhoff, ret; struct frag_hdr *fhdr; struct frag_queue *fq; struct ipv6hdr *hdr; u8 prevhdr; /* Jumbo payload inhibits frag. header */ if (ipv6_hdr(skb)->payload_len == 0) { pr_debug("payload len = 0\n"); return 0; } if (find_prev_fhdr(skb, &prevhdr, &nhoff, &fhoff) < 0) return 0; if (!pskb_may_pull(skb, fhoff + sizeof(*fhdr))) return -ENOMEM; skb_set_transport_header(skb, fhoff); hdr = ipv6_hdr(skb); fhdr = (struct frag_hdr *)skb_transport_header(skb); fq = fq_find(net, fhdr->identification, user, &hdr->saddr, &hdr->daddr, skb->dev ? skb->dev->ifindex : 0, ip6_frag_ecn(hdr)); if (fq == NULL) { pr_debug("Can't find and can't create new queue\n"); return -ENOMEM; } spin_lock_bh(&fq->q.lock); if (nf_ct_frag6_queue(fq, skb, fhdr, nhoff) < 0) { ret = -EINVAL; goto out_unlock; } /* after queue has assumed skb ownership, only 0 or -EINPROGRESS * must be returned. */ ret = -EINPROGRESS; if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && fq->q.meat == fq->q.len && nf_ct_frag6_reasm(fq, skb, dev)) ret = 0; out_unlock: spin_unlock_bh(&fq->q.lock); inet_frag_put(&fq->q, &nf_frags); return ret; } EXPORT_SYMBOL_GPL(nf_ct_frag6_gather); static int nf_ct_net_init(struct net *net) { int res; net->nf_frag.frags.high_thresh = IPV6_FRAG_HIGH_THRESH; net->nf_frag.frags.low_thresh = IPV6_FRAG_LOW_THRESH; net->nf_frag.frags.timeout = IPV6_FRAG_TIMEOUT; res = inet_frags_init_net(&net->nf_frag.frags); if (res) return res; res = nf_ct_frag6_sysctl_register(net); if (res) inet_frags_uninit_net(&net->nf_frag.frags); return res; } static void nf_ct_net_exit(struct net *net) { nf_ct_frags6_sysctl_unregister(net); inet_frags_exit_net(&net->nf_frag.frags, &nf_frags); } static struct pernet_operations nf_ct_net_ops = { .init = nf_ct_net_init, .exit = nf_ct_net_exit, }; int nf_ct_frag6_init(void) { int ret = 0; nf_frags.hashfn = nf_hashfn; nf_frags.constructor = ip6_frag_init; nf_frags.destructor = NULL; nf_frags.qsize = sizeof(struct frag_queue); nf_frags.match = ip6_frag_match; nf_frags.frag_expire = nf_ct_frag6_expire; nf_frags.frags_cache_name = nf_frags_cache_name; ret = inet_frags_init(&nf_frags); if (ret) goto out; ret = register_pernet_subsys(&nf_ct_net_ops); if (ret) inet_frags_fini(&nf_frags); out: return ret; } void nf_ct_frag6_cleanup(void) { unregister_pernet_subsys(&nf_ct_net_ops); inet_frags_fini(&nf_frags); }
int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user) { struct net_device *dev = skb->dev; int fhoff, nhoff, ret; struct frag_hdr *fhdr; struct frag_queue *fq; struct ipv6hdr *hdr; u8 prevhdr; /* Jumbo payload inhibits frag. header */ if (ipv6_hdr(skb)->payload_len == 0) { pr_debug("payload len = 0\n"); return -EINVAL; } if (find_prev_fhdr(skb, &prevhdr, &nhoff, &fhoff) < 0) return -EINVAL; if (!pskb_may_pull(skb, fhoff + sizeof(*fhdr))) return -ENOMEM; skb_set_transport_header(skb, fhoff); hdr = ipv6_hdr(skb); fhdr = (struct frag_hdr *)skb_transport_header(skb); fq = fq_find(net, fhdr->identification, user, &hdr->saddr, &hdr->daddr, skb->dev ? skb->dev->ifindex : 0, ip6_frag_ecn(hdr)); if (fq == NULL) { pr_debug("Can't find and can't create new queue\n"); return -ENOMEM; } spin_lock_bh(&fq->q.lock); if (nf_ct_frag6_queue(fq, skb, fhdr, nhoff) < 0) { ret = -EINVAL; goto out_unlock; } /* after queue has assumed skb ownership, only 0 or -EINPROGRESS * must be returned. */ ret = -EINPROGRESS; if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && fq->q.meat == fq->q.len && nf_ct_frag6_reasm(fq, skb, dev)) ret = 0; out_unlock: spin_unlock_bh(&fq->q.lock); inet_frag_put(&fq->q, &nf_frags); return ret; }
int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user) { struct net_device *dev = skb->dev; int fhoff, nhoff, ret; struct frag_hdr *fhdr; struct frag_queue *fq; struct ipv6hdr *hdr; u8 prevhdr; /* Jumbo payload inhibits frag. header */ if (ipv6_hdr(skb)->payload_len == 0) { pr_debug("payload len = 0\n"); return 0; } if (find_prev_fhdr(skb, &prevhdr, &nhoff, &fhoff) < 0) return 0; if (!pskb_may_pull(skb, fhoff + sizeof(*fhdr))) return -ENOMEM; skb_set_transport_header(skb, fhoff); hdr = ipv6_hdr(skb); fhdr = (struct frag_hdr *)skb_transport_header(skb); fq = fq_find(net, fhdr->identification, user, &hdr->saddr, &hdr->daddr, skb->dev ? skb->dev->ifindex : 0, ip6_frag_ecn(hdr)); if (fq == NULL) { pr_debug("Can't find and can't create new queue\n"); return -ENOMEM; } spin_lock_bh(&fq->q.lock); if (nf_ct_frag6_queue(fq, skb, fhdr, nhoff) < 0) { ret = -EINVAL; goto out_unlock; } /* after queue has assumed skb ownership, only 0 or -EINPROGRESS * must be returned. */ ret = -EINPROGRESS; if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && fq->q.meat == fq->q.len && nf_ct_frag6_reasm(fq, skb, dev)) ret = 0; out_unlock: spin_unlock_bh(&fq->q.lock); inet_frag_put(&fq->q, &nf_frags); return ret; }
{'added': [(579, '\t\treturn 0;'), (583, '\t\treturn 0;')], 'deleted': [(579, '\t\treturn -EINVAL;'), (583, '\t\treturn -EINVAL;')]}
2
2
482
3,119
https://github.com/torvalds/linux
CVE-2016-9755
['CWE-787']
cindent.c
skip_string
/* vi:set ts=8 sts=4 sw=4 noet: * * VIM - Vi IMproved by Bram Moolenaar * * Do ":help uganda" in Vim to read copying and usage conditions. * Do ":help credits" in Vim to see a list of people who contributed. * See README.txt for an overview of the Vim source code. */ /* * cindent.c: C indentation related functions * * Many of C-indenting functions originally come from Eric Fischer. * * Below "XXX" means that this function may unlock the current line. */ #include "vim.h" // values for the "lookfor" state #define LOOKFOR_INITIAL 0 #define LOOKFOR_IF 1 #define LOOKFOR_DO 2 #define LOOKFOR_CASE 3 #define LOOKFOR_ANY 4 #define LOOKFOR_TERM 5 #define LOOKFOR_UNTERM 6 #define LOOKFOR_SCOPEDECL 7 #define LOOKFOR_NOBREAK 8 #define LOOKFOR_CPP_BASECLASS 9 #define LOOKFOR_ENUM_OR_INIT 10 #define LOOKFOR_JS_KEY 11 #define LOOKFOR_COMMA 12 #if defined(FEAT_CINDENT) || defined(FEAT_SMARTINDENT) /* * Return TRUE if the string "line" starts with a word from 'cinwords'. */ int cin_is_cinword(char_u *line) { char_u *cinw; char_u *cinw_buf; int cinw_len; int retval = FALSE; int len; cinw_len = (int)STRLEN(curbuf->b_p_cinw) + 1; cinw_buf = alloc(cinw_len); if (cinw_buf != NULL) { line = skipwhite(line); for (cinw = curbuf->b_p_cinw; *cinw; ) { len = copy_option_part(&cinw, cinw_buf, cinw_len, ","); if (STRNCMP(line, cinw_buf, len) == 0 && (!vim_iswordc(line[len]) || !vim_iswordc(line[len - 1]))) { retval = TRUE; break; } } vim_free(cinw_buf); } return retval; } #endif /* * Skip to the end of a "string" and a 'c' character. * If there is no string or character, return argument unmodified. */ static char_u * skip_string(char_u *p) { int i; // We loop, because strings may be concatenated: "date""time". for ( ; ; ++p) { if (p[0] == '\'') // 'c' or '\n' or '\000' { if (p[1] == NUL) // ' at end of line break; i = 2; if (p[1] == '\\' && p[2] != NUL) // '\n' or '\000' { ++i; while (vim_isdigit(p[i - 1])) // '\000' ++i; } if (p[i] == '\'') // check for trailing ' { p += i; continue; } } else if (p[0] == '"') // start of string { for (++p; p[0]; ++p) { if (p[0] == '\\' && p[1] != NUL) ++p; else if (p[0] == '"') // end of string break; } if (p[0] == '"') continue; // continue for another string } else if (p[0] == 'R' && p[1] == '"') { // Raw string: R"[delim](...)[delim]" char_u *delim = p + 2; char_u *paren = vim_strchr(delim, '('); if (paren != NULL) { size_t delim_len = paren - delim; for (p += 3; *p; ++p) if (p[0] == ')' && STRNCMP(p + 1, delim, delim_len) == 0 && p[delim_len + 1] == '"') { p += delim_len + 1; break; } if (p[0] == '"') continue; // continue for another string } } break; // no string found } if (!*p) --p; // backup from NUL return p; } /* * Return TRUE if "line[col]" is inside a C string. */ int is_pos_in_string(char_u *line, colnr_T col) { char_u *p; for (p = line; *p && (colnr_T)(p - line) < col; ++p) p = skip_string(p); return !((colnr_T)(p - line) <= col); } #if defined(FEAT_CINDENT) || defined(FEAT_SYN_HL) /* * Find the start of a comment, not knowing if we are in a comment right now. * Search starts at w_cursor.lnum and goes backwards. * Return NULL when not inside a comment. */ static pos_T * ind_find_start_comment(void) // XXX { return find_start_comment(curbuf->b_ind_maxcomment); } pos_T * find_start_comment(int ind_maxcomment) // XXX { pos_T *pos; int cur_maxcomment = ind_maxcomment; for (;;) { pos = findmatchlimit(NULL, '*', FM_BACKWARD, cur_maxcomment); if (pos == NULL) break; // Check if the comment start we found is inside a string. // If it is then restrict the search to below this line and try again. if (!is_pos_in_string(ml_get(pos->lnum), pos->col)) break; cur_maxcomment = curwin->w_cursor.lnum - pos->lnum - 1; if (cur_maxcomment <= 0) { pos = NULL; break; } } return pos; } /* * Find the start of a raw string, not knowing if we are in one right now. * Search starts at w_cursor.lnum and goes backwards. * Return NULL when not inside a raw string. */ static pos_T * find_start_rawstring(int ind_maxcomment) // XXX { pos_T *pos; int cur_maxcomment = ind_maxcomment; for (;;) { pos = findmatchlimit(NULL, 'R', FM_BACKWARD, cur_maxcomment); if (pos == NULL) break; // Check if the raw string start we found is inside a string. // If it is then restrict the search to below this line and try again. if (!is_pos_in_string(ml_get(pos->lnum), pos->col)) break; cur_maxcomment = curwin->w_cursor.lnum - pos->lnum - 1; if (cur_maxcomment <= 0) { pos = NULL; break; } } return pos; } /* * Find the start of a comment or raw string, not knowing if we are in a * comment or raw string right now. * Search starts at w_cursor.lnum and goes backwards. * If is_raw is given and returns start of raw_string, sets it to true. * Return NULL when not inside a comment or raw string. * "CORS" -> Comment Or Raw String */ static pos_T * ind_find_start_CORS(linenr_T *is_raw) // XXX { static pos_T comment_pos_copy; pos_T *comment_pos; pos_T *rs_pos; comment_pos = find_start_comment(curbuf->b_ind_maxcomment); if (comment_pos != NULL) { // Need to make a copy of the static pos in findmatchlimit(), // calling find_start_rawstring() may change it. comment_pos_copy = *comment_pos; comment_pos = &comment_pos_copy; } rs_pos = find_start_rawstring(curbuf->b_ind_maxcomment); // If comment_pos is before rs_pos the raw string is inside the comment. // If rs_pos is before comment_pos the comment is inside the raw string. if (comment_pos == NULL || (rs_pos != NULL && LT_POS(*rs_pos, *comment_pos))) { if (is_raw != NULL && rs_pos != NULL) *is_raw = rs_pos->lnum; return rs_pos; } return comment_pos; } #endif // FEAT_CINDENT || FEAT_SYN_HL #if defined(FEAT_CINDENT) || defined(PROTO) /* * Return TRUE if C-indenting is on. */ int cindent_on(void) { return (!p_paste && (curbuf->b_p_cin # ifdef FEAT_EVAL || *curbuf->b_p_inde != NUL # endif )); } // Find result cache for cpp_baseclass typedef struct { int found; lpos_T lpos; } cpp_baseclass_cache_T; /* * Skip over white space and C comments within the line. * Also skip over Perl/shell comments if desired. */ static char_u * cin_skipcomment(char_u *s) { while (*s) { char_u *prev_s = s; s = skipwhite(s); // Perl/shell # comment comment continues until eol. Require a space // before # to avoid recognizing $#array. if (curbuf->b_ind_hash_comment != 0 && s != prev_s && *s == '#') { s += STRLEN(s); break; } if (*s != '/') break; ++s; if (*s == '/') // slash-slash comment continues till eol { s += STRLEN(s); break; } if (*s != '*') break; for (++s; *s; ++s) // skip slash-star comment if (s[0] == '*' && s[1] == '/') { s += 2; break; } } return s; } /* * Return TRUE if there is no code at *s. White space and comments are * not considered code. */ static int cin_nocode(char_u *s) { return *cin_skipcomment(s) == NUL; } /* * Recognize the start of a C or C++ comment. */ static int cin_iscomment(char_u *p) { return (p[0] == '/' && (p[1] == '*' || p[1] == '/')); } /* * Recognize the start of a "//" comment. */ static int cin_islinecomment(char_u *p) { return (p[0] == '/' && p[1] == '/'); } /* * Check previous lines for a "//" line comment, skipping over blank lines. */ static pos_T * find_line_comment(void) // XXX { static pos_T pos; char_u *line; char_u *p; pos = curwin->w_cursor; while (--pos.lnum > 0) { line = ml_get(pos.lnum); p = skipwhite(line); if (cin_islinecomment(p)) { pos.col = (int)(p - line); return &pos; } if (*p != NUL) break; } return NULL; } /* * Return TRUE if "text" starts with "key:". */ static int cin_has_js_key(char_u *text) { char_u *s = skipwhite(text); int quote = -1; if (*s == '\'' || *s == '"') { // can be 'key': or "key": quote = *s; ++s; } if (!vim_isIDc(*s)) // need at least one ID character return FALSE; while (vim_isIDc(*s)) ++s; if (*s == quote) ++s; s = cin_skipcomment(s); // "::" is not a label, it's C++ return (*s == ':' && s[1] != ':'); } /* * Check if string matches "label:"; move to character after ':' if true. * "*s" must point to the start of the label, if there is one. */ static int cin_islabel_skip(char_u **s) { if (!vim_isIDc(**s)) // need at least one ID character return FALSE; while (vim_isIDc(**s)) (*s)++; *s = cin_skipcomment(*s); // "::" is not a label, it's C++ return (**s == ':' && *++*s != ':'); } /* * Recognize a scope declaration label from the 'cinscopedecls' option. */ static int cin_isscopedecl(char_u *p) { size_t cinsd_len; char_u *cinsd_buf; char_u *cinsd; size_t len; char_u *skip; char_u *s = cin_skipcomment(p); int found = FALSE; cinsd_len = STRLEN(curbuf->b_p_cinsd) + 1; cinsd_buf = alloc(cinsd_len); if (cinsd_buf == NULL) return FALSE; for (cinsd = curbuf->b_p_cinsd; *cinsd; ) { len = copy_option_part(&cinsd, cinsd_buf, (int)cinsd_len, ","); if (STRNCMP(s, cinsd_buf, len) == 0) { skip = cin_skipcomment(s + len); if (*skip == ':' && skip[1] != ':') { found = TRUE; break; } } } vim_free(cinsd_buf); return found; } /* * Recognize a preprocessor statement: Any line that starts with '#'. */ static int cin_ispreproc(char_u *s) { if (*skipwhite(s) == '#') return TRUE; return FALSE; } /* * Return TRUE if line "*pp" at "*lnump" is a preprocessor statement or a * continuation line of a preprocessor statement. Decrease "*lnump" to the * start and return the line in "*pp". * Put the amount of indent in "*amount". */ static int cin_ispreproc_cont(char_u **pp, linenr_T *lnump, int *amount) { char_u *line = *pp; linenr_T lnum = *lnump; int retval = FALSE; int candidate_amount = *amount; if (*line != NUL && line[STRLEN(line) - 1] == '\\') candidate_amount = get_indent_lnum(lnum); for (;;) { if (cin_ispreproc(line)) { retval = TRUE; *lnump = lnum; break; } if (lnum == 1) break; line = ml_get(--lnum); if (*line == NUL || line[STRLEN(line) - 1] != '\\') break; } if (lnum != *lnump) *pp = ml_get(*lnump); if (retval) *amount = candidate_amount; return retval; } static int cin_iselse( char_u *p) { if (*p == '}') // accept "} else" p = cin_skipcomment(p + 1); return (STRNCMP(p, "else", 4) == 0 && !vim_isIDc(p[4])); } /* * Recognize a line that starts with '{' or '}', or ends with ';', ',', '{' or * '}'. * Don't consider "} else" a terminated line. * If a line begins with an "else", only consider it terminated if no unmatched * opening braces follow (handle "else { foo();" correctly). * Return the character terminating the line (ending char's have precedence if * both apply in order to determine initializations). */ static int cin_isterminated( char_u *s, int incl_open, // include '{' at the end as terminator int incl_comma) // recognize a trailing comma { char_u found_start = 0; unsigned n_open = 0; int is_else = FALSE; s = cin_skipcomment(s); if (*s == '{' || (*s == '}' && !cin_iselse(s))) found_start = *s; if (!found_start) is_else = cin_iselse(s); while (*s) { // skip over comments, "" strings and 'c'haracters s = skip_string(cin_skipcomment(s)); if (*s == '}' && n_open > 0) --n_open; if ((!is_else || n_open == 0) && (*s == ';' || *s == '}' || (incl_comma && *s == ',')) && cin_nocode(s + 1)) return *s; else if (*s == '{') { if (incl_open && cin_nocode(s + 1)) return *s; else ++n_open; } if (*s) s++; } return found_start; } /* * Return TRUE when "s" starts with "word" and then a non-ID character. */ static int cin_starts_with(char_u *s, char *word) { int l = (int)STRLEN(word); return (STRNCMP(s, word, l) == 0 && !vim_isIDc(s[l])); } /* * Recognize a "default" switch label. */ static int cin_isdefault(char_u *s) { return (STRNCMP(s, "default", 7) == 0 && *(s = cin_skipcomment(s + 7)) == ':' && s[1] != ':'); } /* * Recognize a switch label: "case .*:" or "default:". */ static int cin_iscase( char_u *s, int strict) // Allow relaxed check of case statement for JS { s = cin_skipcomment(s); if (cin_starts_with(s, "case")) { for (s += 4; *s; ++s) { s = cin_skipcomment(s); if (*s == NUL) break; if (*s == ':') { if (s[1] == ':') // skip over "::" for C++ ++s; else return TRUE; } if (*s == '\'' && s[1] && s[2] == '\'') s += 2; // skip over ':' else if (*s == '/' && (s[1] == '*' || s[1] == '/')) return FALSE; // stop at comment else if (*s == '"') { // JS etc. if (strict) return FALSE; // stop at string else return TRUE; } } return FALSE; } if (cin_isdefault(s)) return TRUE; return FALSE; } /* * Recognize a label: "label:". * Note: curwin->w_cursor must be where we are looking for the label. */ static int cin_islabel(void) // XXX { char_u *s; s = cin_skipcomment(ml_get_curline()); // Exclude "default" from labels, since it should be indented // like a switch label. Same for C++ scope declarations. if (cin_isdefault(s)) return FALSE; if (cin_isscopedecl(s)) return FALSE; if (cin_islabel_skip(&s)) { // Only accept a label if the previous line is terminated or is a case // label. pos_T cursor_save; pos_T *trypos; char_u *line; cursor_save = curwin->w_cursor; while (curwin->w_cursor.lnum > 1) { --curwin->w_cursor.lnum; // If we're in a comment or raw string now, skip to the start of // it. curwin->w_cursor.col = 0; if ((trypos = ind_find_start_CORS(NULL)) != NULL) // XXX curwin->w_cursor = *trypos; line = ml_get_curline(); if (cin_ispreproc(line)) // ignore #defines, #if, etc. continue; if (*(line = cin_skipcomment(line)) == NUL) continue; curwin->w_cursor = cursor_save; if (cin_isterminated(line, TRUE, FALSE) || cin_isscopedecl(line) || cin_iscase(line, TRUE) || (cin_islabel_skip(&line) && cin_nocode(line))) return TRUE; return FALSE; } curwin->w_cursor = cursor_save; return TRUE; // label at start of file??? } return FALSE; } /* * Return TRUE if string "s" ends with the string "find", possibly followed by * white space and comments. Skip strings and comments. * Ignore "ignore" after "find" if it's not NULL. */ static int cin_ends_in(char_u *s, char_u *find, char_u *ignore) { char_u *p = s; char_u *r; int len = (int)STRLEN(find); while (*p != NUL) { p = cin_skipcomment(p); if (STRNCMP(p, find, len) == 0) { r = skipwhite(p + len); if (ignore != NULL && STRNCMP(r, ignore, STRLEN(ignore)) == 0) r = skipwhite(r + STRLEN(ignore)); if (cin_nocode(r)) return TRUE; } if (*p != NUL) ++p; } return FALSE; } /* * Recognize structure initialization and enumerations: * "[typedef] [static|public|protected|private] enum" * "[typedef] [static|public|protected|private] = {" */ static int cin_isinit(void) { char_u *s; static char *skip[] = {"static", "public", "protected", "private"}; s = cin_skipcomment(ml_get_curline()); if (cin_starts_with(s, "typedef")) s = cin_skipcomment(s + 7); for (;;) { int i, l; for (i = 0; i < (int)ARRAY_LENGTH(skip); ++i) { l = (int)strlen(skip[i]); if (cin_starts_with(s, skip[i])) { s = cin_skipcomment(s + l); l = 0; break; } } if (l != 0) break; } if (cin_starts_with(s, "enum")) return TRUE; if (cin_ends_in(s, (char_u *)"=", (char_u *)"{")) return TRUE; return FALSE; } // Maximum number of lines to search back for a "namespace" line. #define FIND_NAMESPACE_LIM 20 /* * Recognize a "namespace" scope declaration. */ static int cin_is_cpp_namespace(char_u *s) { char_u *p; int has_name = FALSE; int has_name_start = FALSE; s = cin_skipcomment(s); if (STRNCMP(s, "inline", 6) == 0 && (s[6] == NUL || !vim_iswordc(s[6]))) s = cin_skipcomment(skipwhite(s + 6)); if (STRNCMP(s, "namespace", 9) == 0 && (s[9] == NUL || !vim_iswordc(s[9]))) { p = cin_skipcomment(skipwhite(s + 9)); while (*p != NUL) { if (VIM_ISWHITE(*p)) { has_name = TRUE; // found end of a name p = cin_skipcomment(skipwhite(p)); } else if (*p == '{') { break; } else if (vim_iswordc(*p)) { has_name_start = TRUE; if (has_name) return FALSE; // word character after skipping past name ++p; } else if (p[0] == ':' && p[1] == ':' && vim_iswordc(p[2])) { if (!has_name_start || has_name) return FALSE; // C++ 17 nested namespace p += 3; } else { return FALSE; } } return TRUE; } return FALSE; } /* * Recognize a `extern "C"` or `extern "C++"` linkage specifications. */ static int cin_is_cpp_extern_c(char_u *s) { char_u *p; int has_string_literal = FALSE; s = cin_skipcomment(s); if (STRNCMP(s, "extern", 6) == 0 && (s[6] == NUL || !vim_iswordc(s[6]))) { p = cin_skipcomment(skipwhite(s + 6)); while (*p != NUL) { if (VIM_ISWHITE(*p)) { p = cin_skipcomment(skipwhite(p)); } else if (*p == '{') { break; } else if (p[0] == '"' && p[1] == 'C' && p[2] == '"') { if (has_string_literal) return FALSE; has_string_literal = TRUE; p += 3; } else if (p[0] == '"' && p[1] == 'C' && p[2] == '+' && p[3] == '+' && p[4] == '"') { if (has_string_literal) return FALSE; has_string_literal = TRUE; p += 5; } else { return FALSE; } } return has_string_literal ? TRUE : FALSE; } return FALSE; } /* * Return a pointer to the first non-empty non-comment character after a ':'. * Return NULL if not found. * case 234: a = b; * ^ */ static char_u * after_label(char_u *l) { for ( ; *l; ++l) { if (*l == ':') { if (l[1] == ':') // skip over "::" for C++ ++l; else if (!cin_iscase(l + 1, FALSE)) break; } else if (*l == '\'' && l[1] && l[2] == '\'') l += 2; // skip over 'x' } if (*l == NUL) return NULL; l = cin_skipcomment(l + 1); if (*l == NUL) return NULL; return l; } /* * Get indent of line "lnum", skipping a label. * Return 0 if there is nothing after the label. */ static int get_indent_nolabel (linenr_T lnum) // XXX { char_u *l; pos_T fp; colnr_T col; char_u *p; l = ml_get(lnum); p = after_label(l); if (p == NULL) return 0; fp.col = (colnr_T)(p - l); fp.lnum = lnum; getvcol(curwin, &fp, &col, NULL, NULL); return (int)col; } /* * Find indent for line "lnum", ignoring any case or jump label. * Also return a pointer to the text (after the label) in "pp". * label: if (asdf && asdfasdf) * ^ */ static int skip_label(linenr_T lnum, char_u **pp) { char_u *l; int amount; pos_T cursor_save; cursor_save = curwin->w_cursor; curwin->w_cursor.lnum = lnum; l = ml_get_curline(); // XXX if (cin_iscase(l, FALSE) || cin_isscopedecl(l) || cin_islabel()) { amount = get_indent_nolabel(lnum); l = after_label(ml_get_curline()); if (l == NULL) // just in case l = ml_get_curline(); } else { amount = get_indent(); l = ml_get_curline(); } *pp = l; curwin->w_cursor = cursor_save; return amount; } /* * Return the indent of the first variable name after a type in a declaration. * int a, indent of "a" * static struct foo b, indent of "b" * enum bla c, indent of "c" * Returns zero when it doesn't look like a declaration. */ static int cin_first_id_amount(void) { char_u *line, *p, *s; int len; pos_T fp; colnr_T col; line = ml_get_curline(); p = skipwhite(line); len = (int)(skiptowhite(p) - p); if (len == 6 && STRNCMP(p, "static", 6) == 0) { p = skipwhite(p + 6); len = (int)(skiptowhite(p) - p); } if (len == 6 && STRNCMP(p, "struct", 6) == 0) p = skipwhite(p + 6); else if (len == 4 && STRNCMP(p, "enum", 4) == 0) p = skipwhite(p + 4); else if ((len == 8 && STRNCMP(p, "unsigned", 8) == 0) || (len == 6 && STRNCMP(p, "signed", 6) == 0)) { s = skipwhite(p + len); if ((STRNCMP(s, "int", 3) == 0 && VIM_ISWHITE(s[3])) || (STRNCMP(s, "long", 4) == 0 && VIM_ISWHITE(s[4])) || (STRNCMP(s, "short", 5) == 0 && VIM_ISWHITE(s[5])) || (STRNCMP(s, "char", 4) == 0 && VIM_ISWHITE(s[4]))) p = s; } for (len = 0; vim_isIDc(p[len]); ++len) ; if (len == 0 || !VIM_ISWHITE(p[len]) || cin_nocode(p)) return 0; p = skipwhite(p + len); fp.lnum = curwin->w_cursor.lnum; fp.col = (colnr_T)(p - line); getvcol(curwin, &fp, &col, NULL, NULL); return (int)col; } /* * Return the indent of the first non-blank after an equal sign. * char *foo = "here"; * Return zero if no (useful) equal sign found. * Return -1 if the line above "lnum" ends in a backslash. * foo = "asdf\ * asdf\ * here"; */ static int cin_get_equal_amount(linenr_T lnum) { char_u *line; char_u *s; colnr_T col; pos_T fp; if (lnum > 1) { line = ml_get(lnum - 1); if (*line != NUL && line[STRLEN(line) - 1] == '\\') return -1; } line = s = ml_get(lnum); while (*s != NUL && vim_strchr((char_u *)"=;{}\"'", *s) == NULL) { if (cin_iscomment(s)) // ignore comments s = cin_skipcomment(s); else ++s; } if (*s != '=') return 0; s = skipwhite(s + 1); if (cin_nocode(s)) return 0; if (*s == '"') // nice alignment for continued strings ++s; fp.lnum = lnum; fp.col = (colnr_T)(s - line); getvcol(curwin, &fp, &col, NULL, NULL); return (int)col; } /* * Skip strings, chars and comments until at or past "trypos". * Return the column found. */ static int cin_skip2pos(pos_T *trypos) { char_u *line; char_u *p; char_u *new_p; p = line = ml_get(trypos->lnum); while (*p && (colnr_T)(p - line) < trypos->col) { if (cin_iscomment(p)) p = cin_skipcomment(p); else { new_p = skip_string(p); if (new_p == p) ++p; else p = new_p; } } return (int)(p - line); } static pos_T * find_match_char(int c, int ind_maxparen) // XXX { pos_T cursor_save; pos_T *trypos; static pos_T pos_copy; int ind_maxp_wk; cursor_save = curwin->w_cursor; ind_maxp_wk = ind_maxparen; retry: if ((trypos = findmatchlimit(NULL, c, 0, ind_maxp_wk)) != NULL) { // check if the ( is in a // comment if ((colnr_T)cin_skip2pos(trypos) > trypos->col) { ind_maxp_wk = ind_maxparen - (int)(cursor_save.lnum - trypos->lnum); if (ind_maxp_wk > 0) { curwin->w_cursor = *trypos; curwin->w_cursor.col = 0; // XXX goto retry; } trypos = NULL; } else { pos_T *trypos_wk; pos_copy = *trypos; // copy trypos, findmatch will change it trypos = &pos_copy; curwin->w_cursor = *trypos; if ((trypos_wk = ind_find_start_CORS(NULL)) != NULL) // XXX { ind_maxp_wk = ind_maxparen - (int)(cursor_save.lnum - trypos_wk->lnum); if (ind_maxp_wk > 0) { curwin->w_cursor = *trypos_wk; goto retry; } trypos = NULL; } } } curwin->w_cursor = cursor_save; return trypos; } /* * Find the matching '(', ignoring it if it is in a comment. * Return NULL if no match found. */ static pos_T * find_match_paren(int ind_maxparen) // XXX { return find_match_char('(', ind_maxparen); } /* * Set w_cursor.col to the column number of the last unmatched ')' or '{' in * line "l". "l" must point to the start of the line. */ static int find_last_paren(char_u *l, int start, int end) { int i; int retval = FALSE; int open_count = 0; curwin->w_cursor.col = 0; // default is start of line for (i = 0; l[i] != NUL; i++) { i = (int)(cin_skipcomment(l + i) - l); // ignore parens in comments i = (int)(skip_string(l + i) - l); // ignore parens in quotes if (l[i] == start) ++open_count; else if (l[i] == end) { if (open_count > 0) --open_count; else { curwin->w_cursor.col = i; retval = TRUE; } } } return retval; } /* * Recognize the basic picture of a function declaration -- it needs to * have an open paren somewhere and a close paren at the end of the line and * no semicolons anywhere. * When a line ends in a comma we continue looking in the next line. * "sp" points to a string with the line. When looking at other lines it must * be restored to the line. When it's NULL fetch lines here. * "first_lnum" is where we start looking. * "min_lnum" is the line before which we will not be looking. */ static int cin_isfuncdecl( char_u **sp, linenr_T first_lnum, linenr_T min_lnum) { char_u *s; linenr_T lnum = first_lnum; linenr_T save_lnum = curwin->w_cursor.lnum; int retval = FALSE; pos_T *trypos; int just_started = TRUE; if (sp == NULL) s = ml_get(lnum); else s = *sp; curwin->w_cursor.lnum = lnum; if (find_last_paren(s, '(', ')') && (trypos = find_match_paren(curbuf->b_ind_maxparen)) != NULL) { lnum = trypos->lnum; if (lnum < min_lnum) { curwin->w_cursor.lnum = save_lnum; return FALSE; } s = ml_get(lnum); } curwin->w_cursor.lnum = save_lnum; // Ignore line starting with #. if (cin_ispreproc(s)) return FALSE; while (*s && *s != '(' && *s != ';' && *s != '\'' && *s != '"') { if (cin_iscomment(s)) // ignore comments s = cin_skipcomment(s); else if (*s == ':') { if (*(s + 1) == ':') s += 2; else // To avoid a mistake in the following situation: // A::A(int a, int b) // : a(0) // <--not a function decl // , b(0) // {... return FALSE; } else ++s; } if (*s != '(') return FALSE; // ';', ' or " before any () or no '(' while (*s && *s != ';' && *s != '\'' && *s != '"') { if (*s == ')' && cin_nocode(s + 1)) { // ')' at the end: may have found a match // Check for the previous line not to end in a backslash: // #if defined(x) && {backslash} // defined(y) lnum = first_lnum - 1; s = ml_get(lnum); if (*s == NUL || s[STRLEN(s) - 1] != '\\') retval = TRUE; goto done; } if ((*s == ',' && cin_nocode(s + 1)) || s[1] == NUL || cin_nocode(s)) { int comma = (*s == ','); // ',' at the end: continue looking in the next line. // At the end: check for ',' in the next line, for this style: // func(arg1 // , arg2) for (;;) { if (lnum >= curbuf->b_ml.ml_line_count) break; s = ml_get(++lnum); if (!cin_ispreproc(s)) break; } if (lnum >= curbuf->b_ml.ml_line_count) break; // Require a comma at end of the line or a comma or ')' at the // start of next line. s = skipwhite(s); if (!just_started && (!comma && *s != ',' && *s != ')')) break; just_started = FALSE; } else if (cin_iscomment(s)) // ignore comments s = cin_skipcomment(s); else { ++s; just_started = FALSE; } } done: if (lnum != first_lnum && sp != NULL) *sp = ml_get(first_lnum); return retval; } static int cin_isif(char_u *p) { return (STRNCMP(p, "if", 2) == 0 && !vim_isIDc(p[2])); } static int cin_isdo(char_u *p) { return (STRNCMP(p, "do", 2) == 0 && !vim_isIDc(p[2])); } /* * Check if this is a "while" that should have a matching "do". * We only accept a "while (condition) ;", with only white space between the * ')' and ';'. The condition may be spread over several lines. */ static int cin_iswhileofdo (char_u *p, linenr_T lnum) // XXX { pos_T cursor_save; pos_T *trypos; int retval = FALSE; p = cin_skipcomment(p); if (*p == '}') // accept "} while (cond);" p = cin_skipcomment(p + 1); if (cin_starts_with(p, "while")) { cursor_save = curwin->w_cursor; curwin->w_cursor.lnum = lnum; curwin->w_cursor.col = 0; p = ml_get_curline(); while (*p && *p != 'w') // skip any '}', until the 'w' of the "while" { ++p; ++curwin->w_cursor.col; } if ((trypos = findmatchlimit(NULL, 0, 0, curbuf->b_ind_maxparen)) != NULL && *cin_skipcomment(ml_get_pos(trypos) + 1) == ';') retval = TRUE; curwin->w_cursor = cursor_save; } return retval; } /* * Check whether in "p" there is an "if", "for" or "while" before "*poffset". * Return 0 if there is none. * Otherwise return !0 and update "*poffset" to point to the place where the * string was found. */ static int cin_is_if_for_while_before_offset(char_u *line, int *poffset) { int offset = *poffset; if (offset-- < 2) return 0; while (offset > 2 && VIM_ISWHITE(line[offset])) --offset; offset -= 1; if (!STRNCMP(line + offset, "if", 2)) goto probablyFound; if (offset >= 1) { offset -= 1; if (!STRNCMP(line + offset, "for", 3)) goto probablyFound; if (offset >= 2) { offset -= 2; if (!STRNCMP(line + offset, "while", 5)) goto probablyFound; } } return 0; probablyFound: if (!offset || !vim_isIDc(line[offset - 1])) { *poffset = offset; return 1; } return 0; } /* * Return TRUE if we are at the end of a do-while. * do * nothing; * while (foo * && bar); <-- here * Adjust the cursor to the line with "while". */ static int cin_iswhileofdo_end(int terminated) { char_u *line; char_u *p; char_u *s; pos_T *trypos; int i; if (terminated != ';') // there must be a ';' at the end return FALSE; p = line = ml_get_curline(); while (*p != NUL) { p = cin_skipcomment(p); if (*p == ')') { s = skipwhite(p + 1); if (*s == ';' && cin_nocode(s + 1)) { // Found ");" at end of the line, now check there is "while" // before the matching '('. XXX i = (int)(p - line); curwin->w_cursor.col = i; trypos = find_match_paren(curbuf->b_ind_maxparen); if (trypos != NULL) { s = cin_skipcomment(ml_get(trypos->lnum)); if (*s == '}') // accept "} while (cond);" s = cin_skipcomment(s + 1); if (cin_starts_with(s, "while")) { curwin->w_cursor.lnum = trypos->lnum; return TRUE; } } // Searching may have made "line" invalid, get it again. line = ml_get_curline(); p = line + i; } } if (*p != NUL) ++p; } return FALSE; } static int cin_isbreak(char_u *p) { return (STRNCMP(p, "break", 5) == 0 && !vim_isIDc(p[5])); } /* * Find the position of a C++ base-class declaration or * constructor-initialization. eg: * * class MyClass : * baseClass <-- here * class MyClass : public baseClass, * anotherBaseClass <-- here (should probably lineup ??) * MyClass::MyClass(...) : * baseClass(...) <-- here (constructor-initialization) * * This is a lot of guessing. Watch out for "cond ? func() : foo". */ static int cin_is_cpp_baseclass( cpp_baseclass_cache_T *cached) // input and output { lpos_T *pos = &cached->lpos; // find position char_u *s; int class_or_struct, lookfor_ctor_init, cpp_base_class; linenr_T lnum = curwin->w_cursor.lnum; char_u *line = ml_get_curline(); if (pos->lnum <= lnum) return cached->found; // Use the cached result pos->col = 0; s = skipwhite(line); if (*s == '#') // skip #define FOO x ? (x) : x return FALSE; s = cin_skipcomment(s); if (*s == NUL) return FALSE; cpp_base_class = lookfor_ctor_init = class_or_struct = FALSE; // Search for a line starting with '#', empty, ending in ';' or containing // '{' or '}' and start below it. This handles the following situations: // a = cond ? // func() : // asdf; // func::foo() // : something // {} // Foo::Foo (int one, int two) // : something(4), // somethingelse(3) // {} while (lnum > 1) { line = ml_get(lnum - 1); s = skipwhite(line); if (*s == '#' || *s == NUL) break; while (*s != NUL) { s = cin_skipcomment(s); if (*s == '{' || *s == '}' || (*s == ';' && cin_nocode(s + 1))) break; if (*s != NUL) ++s; } if (*s != NUL) break; --lnum; } pos->lnum = lnum; line = ml_get(lnum); s = line; for (;;) { if (*s == NUL) { if (lnum == curwin->w_cursor.lnum) break; // Continue in the cursor line. line = ml_get(++lnum); s = line; } if (s == line) { // don't recognize "case (foo):" as a baseclass if (cin_iscase(s, FALSE)) break; s = cin_skipcomment(line); if (*s == NUL) continue; } if (s[0] == '"' || (s[0] == 'R' && s[1] == '"')) s = skip_string(s) + 1; else if (s[0] == ':') { if (s[1] == ':') { // skip double colon. It can't be a constructor // initialization any more lookfor_ctor_init = FALSE; s = cin_skipcomment(s + 2); } else if (lookfor_ctor_init || class_or_struct) { // we have something found, that looks like the start of // cpp-base-class-declaration or constructor-initialization cpp_base_class = TRUE; lookfor_ctor_init = class_or_struct = FALSE; pos->col = 0; s = cin_skipcomment(s + 1); } else s = cin_skipcomment(s + 1); } else if ((STRNCMP(s, "class", 5) == 0 && !vim_isIDc(s[5])) || (STRNCMP(s, "struct", 6) == 0 && !vim_isIDc(s[6]))) { class_or_struct = TRUE; lookfor_ctor_init = FALSE; if (*s == 'c') s = cin_skipcomment(s + 5); else s = cin_skipcomment(s + 6); } else { if (s[0] == '{' || s[0] == '}' || s[0] == ';') { cpp_base_class = lookfor_ctor_init = class_or_struct = FALSE; } else if (s[0] == ')') { // Constructor-initialization is assumed if we come across // something like "):" class_or_struct = FALSE; lookfor_ctor_init = TRUE; } else if (s[0] == '?') { // Avoid seeing '() :' after '?' as constructor init. return FALSE; } else if (!vim_isIDc(s[0])) { // if it is not an identifier, we are wrong class_or_struct = FALSE; lookfor_ctor_init = FALSE; } else if (pos->col == 0) { // it can't be a constructor-initialization any more lookfor_ctor_init = FALSE; // the first statement starts here: lineup with this one... if (cpp_base_class) pos->col = (colnr_T)(s - line); } // When the line ends in a comma don't align with it. if (lnum == curwin->w_cursor.lnum && *s == ',' && cin_nocode(s + 1)) pos->col = 0; s = cin_skipcomment(s + 1); } } cached->found = cpp_base_class; if (cpp_base_class) pos->lnum = lnum; return cpp_base_class; } static int get_baseclass_amount(int col) { int amount; colnr_T vcol; pos_T *trypos; if (col == 0) { amount = get_indent(); if (find_last_paren(ml_get_curline(), '(', ')') && (trypos = find_match_paren(curbuf->b_ind_maxparen)) != NULL) amount = get_indent_lnum(trypos->lnum); // XXX if (!cin_ends_in(ml_get_curline(), (char_u *)",", NULL)) amount += curbuf->b_ind_cpp_baseclass; } else { curwin->w_cursor.col = col; getvcol(curwin, &curwin->w_cursor, &vcol, NULL, NULL); amount = (int)vcol; } if (amount < curbuf->b_ind_cpp_baseclass) amount = curbuf->b_ind_cpp_baseclass; return amount; } /* * Find the '{' at the start of the block we are in. * Return NULL if no match found. * Ignore a '{' that is in a comment, makes indenting the next three lines * work. */ // foo() // { // } static pos_T * find_start_brace(void) // XXX { pos_T cursor_save; pos_T *trypos; pos_T *pos; static pos_T pos_copy; cursor_save = curwin->w_cursor; while ((trypos = findmatchlimit(NULL, '{', FM_BLOCKSTOP, 0)) != NULL) { pos_copy = *trypos; // copy pos_T, next findmatch will change it trypos = &pos_copy; curwin->w_cursor = *trypos; pos = NULL; // ignore the { if it's in a // or / * * / comment if ((colnr_T)cin_skip2pos(trypos) == trypos->col && (pos = ind_find_start_CORS(NULL)) == NULL) // XXX break; if (pos != NULL) curwin->w_cursor = *pos; } curwin->w_cursor = cursor_save; return trypos; } /* * Find the matching '(', ignoring it if it is in a comment or before an * unmatched {. * Return NULL if no match found. */ static pos_T * find_match_paren_after_brace (int ind_maxparen) // XXX { pos_T *trypos = find_match_paren(ind_maxparen); if (trypos != NULL) { pos_T *tryposBrace = find_start_brace(); // If both an unmatched '(' and '{' is found. Ignore the '(' // position if the '{' is further down. if (tryposBrace != NULL && (trypos->lnum != tryposBrace->lnum ? trypos->lnum < tryposBrace->lnum : trypos->col < tryposBrace->col)) trypos = NULL; } return trypos; } /* * Return ind_maxparen corrected for the difference in line number between the * cursor position and "startpos". This makes sure that searching for a * matching paren above the cursor line doesn't find a match because of * looking a few lines further. */ static int corr_ind_maxparen(pos_T *startpos) { long n = (long)startpos->lnum - (long)curwin->w_cursor.lnum; if (n > 0 && n < curbuf->b_ind_maxparen / 2) return curbuf->b_ind_maxparen - (int)n; return curbuf->b_ind_maxparen; } /* * Parse 'cinoptions' and set the values in "curbuf". * Must be called when 'cinoptions', 'shiftwidth' and/or 'tabstop' changes. */ void parse_cino(buf_T *buf) { char_u *p; char_u *l; char_u *digits; int n; int divider; int fraction = 0; int sw = (int)get_sw_value(buf); // Set the default values. // Spaces from a block's opening brace the prevailing indent for that // block should be. buf->b_ind_level = sw; // Spaces from the edge of the line an open brace that's at the end of a // line is imagined to be. buf->b_ind_open_imag = 0; // Spaces from the prevailing indent for a line that is not preceded by // an opening brace. buf->b_ind_no_brace = 0; // Column where the first { of a function should be located }. buf->b_ind_first_open = 0; // Spaces from the prevailing indent a leftmost open brace should be // located. buf->b_ind_open_extra = 0; // Spaces from the matching open brace (real location for one at the left // edge; imaginary location from one that ends a line) the matching close // brace should be located. buf->b_ind_close_extra = 0; // Spaces from the edge of the line an open brace sitting in the leftmost // column is imagined to be. buf->b_ind_open_left_imag = 0; // Spaces jump labels should be shifted to the left if N is non-negative, // otherwise the jump label will be put to column 1. buf->b_ind_jump_label = -1; // Spaces from the switch() indent a "case xx" label should be located. buf->b_ind_case = sw; // Spaces from the "case xx:" code after a switch() should be located. buf->b_ind_case_code = sw; // Lineup break at end of case in switch() with case label. buf->b_ind_case_break = 0; // Spaces from the class declaration indent a scope declaration label // should be located. buf->b_ind_scopedecl = sw; // Spaces from the scope declaration label code should be located. buf->b_ind_scopedecl_code = sw; // Amount K&R-style parameters should be indented. buf->b_ind_param = sw; // Amount a function type spec should be indented. buf->b_ind_func_type = sw; // Amount a cpp base class declaration or constructor initialization // should be indented. buf->b_ind_cpp_baseclass = sw; // additional spaces beyond the prevailing indent a continuation line // should be located. buf->b_ind_continuation = sw; // Spaces from the indent of the line with an unclosed parenthesis. buf->b_ind_unclosed = sw * 2; // Spaces from the indent of the line with an unclosed parenthesis, which // itself is also unclosed. buf->b_ind_unclosed2 = sw; // Suppress ignoring spaces from the indent of a line starting with an // unclosed parenthesis. buf->b_ind_unclosed_noignore = 0; // If the opening paren is the last nonwhite character on the line, and // b_ind_unclosed_wrapped is nonzero, use this indent relative to the outer // context (for very long lines). buf->b_ind_unclosed_wrapped = 0; // Suppress ignoring white space when lining up with the character after // an unclosed parenthesis. buf->b_ind_unclosed_whiteok = 0; // Indent a closing parenthesis under the line start of the matching // opening parenthesis. buf->b_ind_matching_paren = 0; // Indent a closing parenthesis under the previous line. buf->b_ind_paren_prev = 0; // Extra indent for comments. buf->b_ind_comment = 0; // Spaces from the comment opener when there is nothing after it. buf->b_ind_in_comment = 3; // Boolean: if non-zero, use b_ind_in_comment even if there is something // after the comment opener. buf->b_ind_in_comment2 = 0; // Max lines to search for an open paren. buf->b_ind_maxparen = 20; // Max lines to search for an open comment. buf->b_ind_maxcomment = 70; // Handle braces for java code. buf->b_ind_java = 0; // Not to confuse JS object properties with labels. buf->b_ind_js = 0; // Handle blocked cases correctly. buf->b_ind_keep_case_label = 0; // Handle C++ namespace. buf->b_ind_cpp_namespace = 0; // Handle continuation lines containing conditions of if(), for() and // while(). buf->b_ind_if_for_while = 0; // indentation for # comments buf->b_ind_hash_comment = 0; // Handle C++ extern "C" or "C++" buf->b_ind_cpp_extern_c = 0; // Handle C #pragma directives buf->b_ind_pragma = 0; for (p = buf->b_p_cino; *p; ) { l = p++; if (*p == '-') ++p; digits = p; // remember where the digits start n = getdigits(&p); divider = 0; if (*p == '.') // ".5s" means a fraction { fraction = atol((char *)++p); while (VIM_ISDIGIT(*p)) { ++p; if (divider) divider *= 10; else divider = 10; } } if (*p == 's') // "2s" means two times 'shiftwidth' { if (p == digits) n = sw; // just "s" is one 'shiftwidth' else { n *= sw; if (divider) n += (sw * fraction + divider / 2) / divider; } ++p; } if (l[1] == '-') n = -n; // When adding an entry here, also update the default 'cinoptions' in // doc/indent.txt, and add explanation for it! switch (*l) { case '>': buf->b_ind_level = n; break; case 'e': buf->b_ind_open_imag = n; break; case 'n': buf->b_ind_no_brace = n; break; case 'f': buf->b_ind_first_open = n; break; case '{': buf->b_ind_open_extra = n; break; case '}': buf->b_ind_close_extra = n; break; case '^': buf->b_ind_open_left_imag = n; break; case 'L': buf->b_ind_jump_label = n; break; case ':': buf->b_ind_case = n; break; case '=': buf->b_ind_case_code = n; break; case 'b': buf->b_ind_case_break = n; break; case 'p': buf->b_ind_param = n; break; case 't': buf->b_ind_func_type = n; break; case '/': buf->b_ind_comment = n; break; case 'c': buf->b_ind_in_comment = n; break; case 'C': buf->b_ind_in_comment2 = n; break; case 'i': buf->b_ind_cpp_baseclass = n; break; case '+': buf->b_ind_continuation = n; break; case '(': buf->b_ind_unclosed = n; break; case 'u': buf->b_ind_unclosed2 = n; break; case 'U': buf->b_ind_unclosed_noignore = n; break; case 'W': buf->b_ind_unclosed_wrapped = n; break; case 'w': buf->b_ind_unclosed_whiteok = n; break; case 'm': buf->b_ind_matching_paren = n; break; case 'M': buf->b_ind_paren_prev = n; break; case ')': buf->b_ind_maxparen = n; break; case '*': buf->b_ind_maxcomment = n; break; case 'g': buf->b_ind_scopedecl = n; break; case 'h': buf->b_ind_scopedecl_code = n; break; case 'j': buf->b_ind_java = n; break; case 'J': buf->b_ind_js = n; break; case 'l': buf->b_ind_keep_case_label = n; break; case '#': buf->b_ind_hash_comment = n; break; case 'N': buf->b_ind_cpp_namespace = n; break; case 'k': buf->b_ind_if_for_while = n; break; case 'E': buf->b_ind_cpp_extern_c = n; break; case 'P': buf->b_ind_pragma = n; break; } if (*p == ',') ++p; } } static int find_match(int lookfor, linenr_T ourscope) { char_u *look; pos_T *theirscope; char_u *mightbeif; int elselevel; int whilelevel; if (lookfor == LOOKFOR_IF) { elselevel = 1; whilelevel = 0; } else { elselevel = 0; whilelevel = 1; } curwin->w_cursor.col = 0; while (curwin->w_cursor.lnum > ourscope + 1) { curwin->w_cursor.lnum--; curwin->w_cursor.col = 0; look = cin_skipcomment(ml_get_curline()); if (cin_iselse(look) || cin_isif(look) || cin_isdo(look) // XXX || cin_iswhileofdo(look, curwin->w_cursor.lnum)) { // if we've gone outside the braces entirely, // we must be out of scope... theirscope = find_start_brace(); // XXX if (theirscope == NULL) break; // and if the brace enclosing this is further // back than the one enclosing the else, we're // out of luck too. if (theirscope->lnum < ourscope) break; // and if they're enclosed in a *deeper* brace, // then we can ignore it because it's in a // different scope... if (theirscope->lnum > ourscope) continue; // if it was an "else" (that's not an "else if") // then we need to go back to another if, so // increment elselevel look = cin_skipcomment(ml_get_curline()); if (cin_iselse(look)) { mightbeif = cin_skipcomment(look + 4); if (!cin_isif(mightbeif)) ++elselevel; continue; } // if it was a "while" then we need to go back to // another "do", so increment whilelevel. XXX if (cin_iswhileofdo(look, curwin->w_cursor.lnum)) { ++whilelevel; continue; } // If it's an "if" decrement elselevel look = cin_skipcomment(ml_get_curline()); if (cin_isif(look)) { elselevel--; // When looking for an "if" ignore "while"s that // get in the way. if (elselevel == 0 && lookfor == LOOKFOR_IF) whilelevel = 0; } // If it's a "do" decrement whilelevel if (cin_isdo(look)) whilelevel--; // if we've used up all the elses, then // this must be the if that we want! // match the indent level of that if. if (elselevel <= 0 && whilelevel <= 0) return OK; } } return FAIL; } /* * Return the desired indent for C code. * Return -1 if the indent should be left alone (inside a raw string). */ int get_c_indent(void) { pos_T cur_curpos; int amount; int scope_amount; int cur_amount = MAXCOL; colnr_T col; char_u *theline; char_u *linecopy; pos_T *trypos; pos_T *comment_pos; pos_T *tryposBrace = NULL; pos_T tryposCopy; pos_T our_paren_pos; char_u *start; int start_brace; #define BRACE_IN_COL0 1 // '{' is in column 0 #define BRACE_AT_START 2 // '{' is at start of line #define BRACE_AT_END 3 // '{' is at end of line linenr_T ourscope; char_u *l; char_u *look; char_u terminated; int lookfor; int whilelevel; linenr_T lnum; int n; int iscase; int lookfor_break; int lookfor_cpp_namespace = FALSE; int cont_amount = 0; // amount for continuation line int original_line_islabel; int added_to_amount = 0; int js_cur_has_key = 0; linenr_T raw_string_start = 0; cpp_baseclass_cache_T cache_cpp_baseclass = { FALSE, { MAXLNUM, 0 } }; // make a copy, value is changed below int ind_continuation = curbuf->b_ind_continuation; // remember where the cursor was when we started cur_curpos = curwin->w_cursor; // if we are at line 1 zero indent is fine, right? if (cur_curpos.lnum == 1) return 0; // Get a copy of the current contents of the line. // This is required, because only the most recent line obtained with // ml_get is valid! linecopy = vim_strsave(ml_get(cur_curpos.lnum)); if (linecopy == NULL) return 0; // In insert mode and the cursor is on a ')' truncate the line at the // cursor position. We don't want to line up with the matching '(' when // inserting new stuff. // For unknown reasons the cursor might be past the end of the line, thus // check for that. if ((State & MODE_INSERT) && curwin->w_cursor.col < (colnr_T)STRLEN(linecopy) && linecopy[curwin->w_cursor.col] == ')') linecopy[curwin->w_cursor.col] = NUL; theline = skipwhite(linecopy); // move the cursor to the start of the line curwin->w_cursor.col = 0; original_line_islabel = cin_islabel(); // XXX // If we are inside a raw string don't change the indent. // Ignore a raw string inside a comment. comment_pos = ind_find_start_comment(); if (comment_pos != NULL) { // findmatchlimit() static pos is overwritten, make a copy tryposCopy = *comment_pos; comment_pos = &tryposCopy; } trypos = find_start_rawstring(curbuf->b_ind_maxcomment); if (trypos != NULL && (comment_pos == NULL || LT_POS(*trypos, *comment_pos))) { amount = -1; goto laterend; } // #defines and so on go at the left when included in 'cinkeys', // excluding pragmas when customized in 'cinoptions' if (*theline == '#' && (*linecopy == '#' || in_cinkeys('#', ' ', TRUE))) { char_u *directive = skipwhite(theline + 1); if (curbuf->b_ind_pragma == 0 || STRNCMP(directive, "pragma", 6) != 0) { amount = curbuf->b_ind_hash_comment; goto theend; } } // Is it a non-case label? Then that goes at the left margin too unless: // - JS flag is set. // - 'L' item has a positive value. if (original_line_islabel && !curbuf->b_ind_js && curbuf->b_ind_jump_label < 0) { amount = 0; goto theend; } // If we're inside a "//" comment and there is a "//" comment in a // previous line, lineup with that one. if (cin_islinecomment(theline)) { pos_T linecomment_pos; trypos = find_line_comment(); // XXX if (trypos == NULL && curwin->w_cursor.lnum > 1) { // There may be a statement before the comment, search from the end // of the line for a comment start. linecomment_pos.col = check_linecomment(ml_get(curwin->w_cursor.lnum - 1)); if (linecomment_pos.col != MAXCOL) { trypos = &linecomment_pos; trypos->lnum = curwin->w_cursor.lnum - 1; } } if (trypos != NULL) { // find how indented the line beginning the comment is getvcol(curwin, trypos, &col, NULL, NULL); amount = col; goto theend; } } // If we're inside a comment and not looking at the start of the // comment, try using the 'comments' option. if (!cin_iscomment(theline) && comment_pos != NULL) // XXX { int lead_start_len = 2; int lead_middle_len = 1; char_u lead_start[COM_MAX_LEN]; // start-comment string char_u lead_middle[COM_MAX_LEN]; // middle-comment string char_u lead_end[COM_MAX_LEN]; // end-comment string char_u *p; int start_align = 0; int start_off = 0; int done = FALSE; // find how indented the line beginning the comment is getvcol(curwin, comment_pos, &col, NULL, NULL); amount = col; *lead_start = NUL; *lead_middle = NUL; p = curbuf->b_p_com; while (*p != NUL) { int align = 0; int off = 0; int what = 0; while (*p != NUL && *p != ':') { if (*p == COM_START || *p == COM_END || *p == COM_MIDDLE) what = *p++; else if (*p == COM_LEFT || *p == COM_RIGHT) align = *p++; else if (VIM_ISDIGIT(*p) || *p == '-') off = getdigits(&p); else ++p; } if (*p == ':') ++p; (void)copy_option_part(&p, lead_end, COM_MAX_LEN, ","); if (what == COM_START) { STRCPY(lead_start, lead_end); lead_start_len = (int)STRLEN(lead_start); start_off = off; start_align = align; } else if (what == COM_MIDDLE) { STRCPY(lead_middle, lead_end); lead_middle_len = (int)STRLEN(lead_middle); } else if (what == COM_END) { // If our line starts with the middle comment string, line it // up with the comment opener per the 'comments' option. if (STRNCMP(theline, lead_middle, lead_middle_len) == 0 && STRNCMP(theline, lead_end, STRLEN(lead_end)) != 0) { done = TRUE; if (curwin->w_cursor.lnum > 1) { // If the start comment string matches in the previous // line, use the indent of that line plus offset. If // the middle comment string matches in the previous // line, use the indent of that line. XXX look = skipwhite(ml_get(curwin->w_cursor.lnum - 1)); if (STRNCMP(look, lead_start, lead_start_len) == 0) amount = get_indent_lnum(curwin->w_cursor.lnum - 1); else if (STRNCMP(look, lead_middle, lead_middle_len) == 0) { amount = get_indent_lnum(curwin->w_cursor.lnum - 1); break; } // If the start comment string doesn't match with the // start of the comment, skip this entry. XXX else if (STRNCMP(ml_get(comment_pos->lnum) + comment_pos->col, lead_start, lead_start_len) != 0) continue; } if (start_off != 0) amount += start_off; else if (start_align == COM_RIGHT) amount += vim_strsize(lead_start) - vim_strsize(lead_middle); break; } // If our line starts with the end comment string, line it up // with the middle comment if (STRNCMP(theline, lead_middle, lead_middle_len) != 0 && STRNCMP(theline, lead_end, STRLEN(lead_end)) == 0) { amount = get_indent_lnum(curwin->w_cursor.lnum - 1); // XXX if (off != 0) amount += off; else if (align == COM_RIGHT) amount += vim_strsize(lead_start) - vim_strsize(lead_middle); done = TRUE; break; } } } // If our line starts with an asterisk, line up with the // asterisk in the comment opener; otherwise, line up // with the first character of the comment text. if (done) ; else if (theline[0] == '*') amount += 1; else { // If we are more than one line away from the comment opener, take // the indent of the previous non-empty line. If 'cino' has "CO" // and we are just below the comment opener and there are any // white characters after it line up with the text after it; // otherwise, add the amount specified by "c" in 'cino' amount = -1; for (lnum = cur_curpos.lnum - 1; lnum > comment_pos->lnum; --lnum) { if (linewhite(lnum)) // skip blank lines continue; amount = get_indent_lnum(lnum); // XXX break; } if (amount == -1) // use the comment opener { if (!curbuf->b_ind_in_comment2) { start = ml_get(comment_pos->lnum); look = start + comment_pos->col + 2; // skip / and * if (*look != NUL) // if something after it comment_pos->col = (colnr_T)(skipwhite(look) - start); } getvcol(curwin, comment_pos, &col, NULL, NULL); amount = col; if (curbuf->b_ind_in_comment2 || *look == NUL) amount += curbuf->b_ind_in_comment; } } goto theend; } // Are we looking at a ']' that has a match? if (*skipwhite(theline) == ']' && (trypos = find_match_char('[', curbuf->b_ind_maxparen)) != NULL) { // align with the line containing the '['. amount = get_indent_lnum(trypos->lnum); goto theend; } // Are we inside parentheses or braces? XXX if (((trypos = find_match_paren(curbuf->b_ind_maxparen)) != NULL && curbuf->b_ind_java == 0) || (tryposBrace = find_start_brace()) != NULL || trypos != NULL) { if (trypos != NULL && tryposBrace != NULL) { // Both an unmatched '(' and '{' is found. Use the one which is // closer to the current cursor position, set the other to NULL. if (trypos->lnum != tryposBrace->lnum ? trypos->lnum < tryposBrace->lnum : trypos->col < tryposBrace->col) trypos = NULL; else tryposBrace = NULL; } if (trypos != NULL) { // If the matching paren is more than one line away, use the indent of // a previous non-empty line that matches the same paren. if (theline[0] == ')' && curbuf->b_ind_paren_prev) { // Line up with the start of the matching paren line. amount = get_indent_lnum(curwin->w_cursor.lnum - 1); // XXX } else { amount = -1; our_paren_pos = *trypos; for (lnum = cur_curpos.lnum - 1; lnum > our_paren_pos.lnum; --lnum) { l = skipwhite(ml_get(lnum)); if (cin_nocode(l)) // skip comment lines continue; if (cin_ispreproc_cont(&l, &lnum, &amount)) continue; // ignore #define, #if, etc. curwin->w_cursor.lnum = lnum; // Skip a comment or raw string. XXX if ((trypos = ind_find_start_CORS(NULL)) != NULL) { lnum = trypos->lnum + 1; continue; } // XXX if ((trypos = find_match_paren( corr_ind_maxparen(&cur_curpos))) != NULL && trypos->lnum == our_paren_pos.lnum && trypos->col == our_paren_pos.col) { amount = get_indent_lnum(lnum); // XXX if (theline[0] == ')') { if (our_paren_pos.lnum != lnum && cur_amount > amount) cur_amount = amount; amount = -1; } break; } } } // Line up with line where the matching paren is. XXX // If the line starts with a '(' or the indent for unclosed // parentheses is zero, line up with the unclosed parentheses. if (amount == -1) { int ignore_paren_col = 0; int is_if_for_while = 0; if (curbuf->b_ind_if_for_while) { // Look for the outermost opening parenthesis on this line // and check whether it belongs to an "if", "for" or "while". pos_T cursor_save = curwin->w_cursor; pos_T outermost; char_u *line; trypos = &our_paren_pos; do { outermost = *trypos; curwin->w_cursor.lnum = outermost.lnum; curwin->w_cursor.col = outermost.col; trypos = find_match_paren(curbuf->b_ind_maxparen); } while (trypos && trypos->lnum == outermost.lnum); curwin->w_cursor = cursor_save; line = ml_get(outermost.lnum); is_if_for_while = cin_is_if_for_while_before_offset(line, &outermost.col); } amount = skip_label(our_paren_pos.lnum, &look); look = skipwhite(look); if (*look == '(') { linenr_T save_lnum = curwin->w_cursor.lnum; char_u *line; int look_col; // Ignore a '(' in front of the line that has a match before // our matching '('. curwin->w_cursor.lnum = our_paren_pos.lnum; line = ml_get_curline(); look_col = (int)(look - line); curwin->w_cursor.col = look_col + 1; if ((trypos = findmatchlimit(NULL, ')', 0, curbuf->b_ind_maxparen)) != NULL && trypos->lnum == our_paren_pos.lnum && trypos->col < our_paren_pos.col) ignore_paren_col = trypos->col + 1; curwin->w_cursor.lnum = save_lnum; look = ml_get(our_paren_pos.lnum) + look_col; } if (theline[0] == ')' || (curbuf->b_ind_unclosed == 0 && is_if_for_while == 0) || (!curbuf->b_ind_unclosed_noignore && *look == '(' && ignore_paren_col == 0)) { // If we're looking at a close paren, line up right there; // otherwise, line up with the next (non-white) character. // When b_ind_unclosed_wrapped is set and the matching paren is // the last nonwhite character of the line, use either the // indent of the current line or the indentation of the next // outer paren and add b_ind_unclosed_wrapped (for very long // lines). if (theline[0] != ')') { cur_amount = MAXCOL; l = ml_get(our_paren_pos.lnum); if (curbuf->b_ind_unclosed_wrapped && cin_ends_in(l, (char_u *)"(", NULL)) { // look for opening unmatched paren, indent one level // for each additional level n = 1; for (col = 0; col < our_paren_pos.col; ++col) { switch (l[col]) { case '(': case '{': ++n; break; case ')': case '}': if (n > 1) --n; break; } } our_paren_pos.col = 0; amount += n * curbuf->b_ind_unclosed_wrapped; } else if (curbuf->b_ind_unclosed_whiteok) our_paren_pos.col++; else { col = our_paren_pos.col + 1; while (VIM_ISWHITE(l[col])) col++; if (l[col] != NUL) // In case of trailing space our_paren_pos.col = col; else our_paren_pos.col++; } } // Find how indented the paren is, or the character after it // if we did the above "if". if (our_paren_pos.col > 0) { getvcol(curwin, &our_paren_pos, &col, NULL, NULL); if (cur_amount > (int)col) cur_amount = col; } } if (theline[0] == ')' && curbuf->b_ind_matching_paren) { // Line up with the start of the matching paren line. } else if ((curbuf->b_ind_unclosed == 0 && is_if_for_while == 0) || (!curbuf->b_ind_unclosed_noignore && *look == '(' && ignore_paren_col == 0)) { if (cur_amount != MAXCOL) amount = cur_amount; } else { // Add b_ind_unclosed2 for each '(' before our matching one, // but ignore (void) before the line (ignore_paren_col). col = our_paren_pos.col; while ((int)our_paren_pos.col > ignore_paren_col) { --our_paren_pos.col; switch (*ml_get_pos(&our_paren_pos)) { case '(': amount += curbuf->b_ind_unclosed2; col = our_paren_pos.col; break; case ')': amount -= curbuf->b_ind_unclosed2; col = MAXCOL; break; } } // Use b_ind_unclosed once, when the first '(' is not inside // braces if (col == MAXCOL) amount += curbuf->b_ind_unclosed; else { curwin->w_cursor.lnum = our_paren_pos.lnum; curwin->w_cursor.col = col; if (find_match_paren_after_brace(curbuf->b_ind_maxparen) != NULL) amount += curbuf->b_ind_unclosed2; else { if (is_if_for_while) amount += curbuf->b_ind_if_for_while; else amount += curbuf->b_ind_unclosed; } } // For a line starting with ')' use the minimum of the two // positions, to avoid giving it more indent than the previous // lines: // func_long_name( if (x // arg && yy // ) ^ not here ) ^ not here if (cur_amount < amount) amount = cur_amount; } } // add extra indent for a comment if (cin_iscomment(theline)) amount += curbuf->b_ind_comment; } else { // We are inside braces, there is a { before this line at the position // stored in tryposBrace. // Make a copy of tryposBrace, it may point to pos_copy inside // find_start_brace(), which may be changed somewhere. tryposCopy = *tryposBrace; tryposBrace = &tryposCopy; trypos = tryposBrace; ourscope = trypos->lnum; start = ml_get(ourscope); // Now figure out how indented the line is in general. // If the brace was at the start of the line, we use that; // otherwise, check out the indentation of the line as // a whole and then add the "imaginary indent" to that. look = skipwhite(start); if (*look == '{') { getvcol(curwin, trypos, &col, NULL, NULL); amount = col; if (*start == '{') start_brace = BRACE_IN_COL0; else start_brace = BRACE_AT_START; } else { // That opening brace might have been on a continuation // line. if so, find the start of the line. curwin->w_cursor.lnum = ourscope; // Position the cursor over the rightmost paren, so that // matching it will take us back to the start of the line. lnum = ourscope; if (find_last_paren(start, '(', ')') && (trypos = find_match_paren(curbuf->b_ind_maxparen)) != NULL) lnum = trypos->lnum; // It could have been something like // case 1: if (asdf && // ldfd) { // } if ((curbuf->b_ind_js || curbuf->b_ind_keep_case_label) && cin_iscase(skipwhite(ml_get_curline()), FALSE)) amount = get_indent(); else if (curbuf->b_ind_js) amount = get_indent_lnum(lnum); else amount = skip_label(lnum, &l); start_brace = BRACE_AT_END; } // For Javascript check if the line starts with "key:". if (curbuf->b_ind_js) js_cur_has_key = cin_has_js_key(theline); // If we're looking at a closing brace, that's where // we want to be. otherwise, add the amount of room // that an indent is supposed to be. if (theline[0] == '}') { // they may want closing braces to line up with something // other than the open brace. indulge them, if so. amount += curbuf->b_ind_close_extra; } else { // If we're looking at an "else", try to find an "if" // to match it with. // If we're looking at a "while", try to find a "do" // to match it with. lookfor = LOOKFOR_INITIAL; if (cin_iselse(theline)) lookfor = LOOKFOR_IF; else if (cin_iswhileofdo(theline, cur_curpos.lnum)) // XXX lookfor = LOOKFOR_DO; if (lookfor != LOOKFOR_INITIAL) { curwin->w_cursor.lnum = cur_curpos.lnum; if (find_match(lookfor, ourscope) == OK) { amount = get_indent(); // XXX goto theend; } } // We get here if we are not on an "while-of-do" or "else" (or // failed to find a matching "if"). // Search backwards for something to line up with. // First set amount for when we don't find anything. // if the '{' is _really_ at the left margin, use the imaginary // location of a left-margin brace. Otherwise, correct the // location for b_ind_open_extra. if (start_brace == BRACE_IN_COL0) // '{' is in column 0 { amount = curbuf->b_ind_open_left_imag; lookfor_cpp_namespace = TRUE; } else if (start_brace == BRACE_AT_START && lookfor_cpp_namespace) // '{' is at start { lookfor_cpp_namespace = TRUE; } else { if (start_brace == BRACE_AT_END) // '{' is at end of line { amount += curbuf->b_ind_open_imag; l = skipwhite(ml_get_curline()); if (cin_is_cpp_namespace(l)) amount += curbuf->b_ind_cpp_namespace; else if (cin_is_cpp_extern_c(l)) amount += curbuf->b_ind_cpp_extern_c; } else { // Compensate for adding b_ind_open_extra later. amount -= curbuf->b_ind_open_extra; if (amount < 0) amount = 0; } } lookfor_break = FALSE; if (cin_iscase(theline, FALSE)) // it's a switch() label { lookfor = LOOKFOR_CASE; // find a previous switch() label amount += curbuf->b_ind_case; } else if (cin_isscopedecl(theline)) // private:, ... { lookfor = LOOKFOR_SCOPEDECL; // class decl is this block amount += curbuf->b_ind_scopedecl; } else { if (curbuf->b_ind_case_break && cin_isbreak(theline)) // break; ... lookfor_break = TRUE; lookfor = LOOKFOR_INITIAL; // b_ind_level from start of block amount += curbuf->b_ind_level; } scope_amount = amount; whilelevel = 0; // Search backwards. If we find something we recognize, line up // with that. // // If we're looking at an open brace, indent // the usual amount relative to the conditional // that opens the block. curwin->w_cursor = cur_curpos; for (;;) { curwin->w_cursor.lnum--; curwin->w_cursor.col = 0; // If we went all the way back to the start of our scope, line // up with it. if (curwin->w_cursor.lnum <= ourscope) { // We reached end of scope: // If looking for an enum or structure initialization // go further back: // If it is an initializer (enum xxx or xxx =), then // don't add ind_continuation, otherwise it is a variable // declaration: // int x, // here; <-- add ind_continuation if (lookfor == LOOKFOR_ENUM_OR_INIT) { if (curwin->w_cursor.lnum == 0 || curwin->w_cursor.lnum < ourscope - curbuf->b_ind_maxparen) { // nothing found (abuse curbuf->b_ind_maxparen as // limit) assume terminated line (i.e. a variable // initialization) if (cont_amount > 0) amount = cont_amount; else if (!curbuf->b_ind_js) amount += ind_continuation; break; } l = ml_get_curline(); // If we're in a comment or raw string now, skip to // the start of it. trypos = ind_find_start_CORS(NULL); if (trypos != NULL) { curwin->w_cursor.lnum = trypos->lnum + 1; curwin->w_cursor.col = 0; continue; } // Skip preprocessor directives and blank lines. if (cin_ispreproc_cont(&l, &curwin->w_cursor.lnum, &amount)) continue; if (cin_nocode(l)) continue; terminated = cin_isterminated(l, FALSE, TRUE); // If we are at top level and the line looks like a // function declaration, we are done // (it's a variable declaration). if (start_brace != BRACE_IN_COL0 || !cin_isfuncdecl(&l, curwin->w_cursor.lnum, 0)) { // if the line is terminated with another ',' // it is a continued variable initialization. // don't add extra indent. // TODO: does not work, if a function // declaration is split over multiple lines: // cin_isfuncdecl returns FALSE then. if (terminated == ',') break; // if it is an enum declaration or an assignment, // we are done. if (terminated != ';' && cin_isinit()) break; // nothing useful found if (terminated == 0 || terminated == '{') continue; } if (terminated != ';') { // Skip parens and braces. Position the cursor // over the rightmost paren, so that matching it // will take us back to the start of the line. // XXX trypos = NULL; if (find_last_paren(l, '(', ')')) trypos = find_match_paren( curbuf->b_ind_maxparen); if (trypos == NULL && find_last_paren(l, '{', '}')) trypos = find_start_brace(); if (trypos != NULL) { curwin->w_cursor.lnum = trypos->lnum + 1; curwin->w_cursor.col = 0; continue; } } // it's a variable declaration, add indentation // like in // int a, // b; if (cont_amount > 0) amount = cont_amount; else amount += ind_continuation; } else if (lookfor == LOOKFOR_UNTERM) { if (cont_amount > 0) amount = cont_amount; else amount += ind_continuation; } else { if (lookfor != LOOKFOR_TERM && lookfor != LOOKFOR_CPP_BASECLASS && lookfor != LOOKFOR_COMMA) { amount = scope_amount; if (theline[0] == '{') { amount += curbuf->b_ind_open_extra; added_to_amount = curbuf->b_ind_open_extra; } } if (lookfor_cpp_namespace) { // Looking for C++ namespace, need to look further // back. if (curwin->w_cursor.lnum == ourscope) continue; if (curwin->w_cursor.lnum == 0 || curwin->w_cursor.lnum < ourscope - FIND_NAMESPACE_LIM) break; l = ml_get_curline(); // If we're in a comment or raw string now, skip // to the start of it. trypos = ind_find_start_CORS(NULL); if (trypos != NULL) { curwin->w_cursor.lnum = trypos->lnum + 1; curwin->w_cursor.col = 0; continue; } // Skip preprocessor directives and blank lines. if (cin_ispreproc_cont(&l, &curwin->w_cursor.lnum, &amount)) continue; // Finally the actual check for "namespace". if (cin_is_cpp_namespace(l)) { amount += curbuf->b_ind_cpp_namespace - added_to_amount; break; } else if (cin_is_cpp_extern_c(l)) { amount += curbuf->b_ind_cpp_extern_c - added_to_amount; break; } if (cin_nocode(l)) continue; } } break; } // If we're in a comment or raw string now, skip to the start // of it. XXX if ((trypos = ind_find_start_CORS(&raw_string_start)) != NULL) { curwin->w_cursor.lnum = trypos->lnum + 1; curwin->w_cursor.col = 0; continue; } l = ml_get_curline(); // If this is a switch() label, may line up relative to that. // If this is a C++ scope declaration, do the same. iscase = cin_iscase(l, FALSE); if (iscase || cin_isscopedecl(l)) { // we are only looking for cpp base class // declaration/initialization any longer if (lookfor == LOOKFOR_CPP_BASECLASS) break; // When looking for a "do" we are not interested in // labels. if (whilelevel > 0) continue; // case xx: // c = 99 + <- this indent plus continuation //-> here; if (lookfor == LOOKFOR_UNTERM || lookfor == LOOKFOR_ENUM_OR_INIT) { if (cont_amount > 0) amount = cont_amount; else amount += ind_continuation; break; } // case xx: <- line up with this case // x = 333; // case yy: if ( (iscase && lookfor == LOOKFOR_CASE) || (iscase && lookfor_break) || (!iscase && lookfor == LOOKFOR_SCOPEDECL)) { // Check that this case label is not for another // switch() XXX if ((trypos = find_start_brace()) == NULL || trypos->lnum == ourscope) { amount = get_indent(); // XXX break; } continue; } n = get_indent_nolabel(curwin->w_cursor.lnum); // XXX // case xx: if (cond) <- line up with this if // y = y + 1; // -> s = 99; // // case xx: // if (cond) <- line up with this line // y = y + 1; // -> s = 99; if (lookfor == LOOKFOR_TERM) { if (n) amount = n; if (!lookfor_break) break; } // case xx: x = x + 1; <- line up with this x // -> y = y + 1; // // case xx: if (cond) <- line up with this if // -> y = y + 1; if (n) { amount = n; l = after_label(ml_get_curline()); if (l != NULL && cin_is_cinword(l)) { if (theline[0] == '{') amount += curbuf->b_ind_open_extra; else amount += curbuf->b_ind_level + curbuf->b_ind_no_brace; } break; } // Try to get the indent of a statement before the switch // label. If nothing is found, line up relative to the // switch label. // break; <- may line up with this line // case xx: // -> y = 1; scope_amount = get_indent() + (iscase // XXX ? curbuf->b_ind_case_code : curbuf->b_ind_scopedecl_code); lookfor = curbuf->b_ind_case_break ? LOOKFOR_NOBREAK : LOOKFOR_ANY; continue; } // Looking for a switch() label or C++ scope declaration, // ignore other lines, skip {}-blocks. if (lookfor == LOOKFOR_CASE || lookfor == LOOKFOR_SCOPEDECL) { if (find_last_paren(l, '{', '}') && (trypos = find_start_brace()) != NULL) { curwin->w_cursor.lnum = trypos->lnum + 1; curwin->w_cursor.col = 0; } continue; } // Ignore jump labels with nothing after them. if (!curbuf->b_ind_js && cin_islabel()) { l = after_label(ml_get_curline()); if (l == NULL || cin_nocode(l)) continue; } // Ignore #defines, #if, etc. // Ignore comment and empty lines. // (need to get the line again, cin_islabel() may have // unlocked it) l = ml_get_curline(); if (cin_ispreproc_cont(&l, &curwin->w_cursor.lnum, &amount) || cin_nocode(l)) continue; // Are we at the start of a cpp base class declaration or // constructor initialization? XXX n = FALSE; if (lookfor != LOOKFOR_TERM && curbuf->b_ind_cpp_baseclass > 0) { n = cin_is_cpp_baseclass(&cache_cpp_baseclass); l = ml_get_curline(); } if (n) { if (lookfor == LOOKFOR_UNTERM) { if (cont_amount > 0) amount = cont_amount; else amount += ind_continuation; } else if (theline[0] == '{') { // Need to find start of the declaration. lookfor = LOOKFOR_UNTERM; ind_continuation = 0; continue; } else // XXX amount = get_baseclass_amount( cache_cpp_baseclass.lpos.col); break; } else if (lookfor == LOOKFOR_CPP_BASECLASS) { // only look, whether there is a cpp base class // declaration or initialization before the opening brace. if (cin_isterminated(l, TRUE, FALSE)) break; else continue; } // What happens next depends on the line being terminated. // If terminated with a ',' only consider it terminating if // there is another unterminated statement behind, eg: // 123, // sizeof // here // Otherwise check whether it is an enumeration or structure // initialisation (not indented) or a variable declaration // (indented). terminated = cin_isterminated(l, FALSE, TRUE); if (js_cur_has_key) { js_cur_has_key = 0; // only check the first line if (curbuf->b_ind_js && terminated == ',') { // For Javascript we might be inside an object: // key: something, <- align with this // key: something // or: // key: something + <- align with this // something, // key: something lookfor = LOOKFOR_JS_KEY; } } if (lookfor == LOOKFOR_JS_KEY && cin_has_js_key(l)) { amount = get_indent(); break; } if (lookfor == LOOKFOR_COMMA) { if (tryposBrace != NULL && tryposBrace->lnum >= curwin->w_cursor.lnum) break; if (terminated == ',') // line below current line is the one that starts a // (possibly broken) line ending in a comma. break; else { amount = get_indent(); if (curwin->w_cursor.lnum - 1 == ourscope) // line above is start of the scope, thus current // line is the one that stars a (possibly broken) // line ending in a comma. break; } } if (terminated == 0 || (lookfor != LOOKFOR_UNTERM && terminated == ',')) { if (lookfor != LOOKFOR_ENUM_OR_INIT && (*skipwhite(l) == '[' || l[STRLEN(l) - 1] == '[')) amount += ind_continuation; // if we're in the middle of a paren thing, // go back to the line that starts it so // we can get the right prevailing indent // if ( foo && // bar ) // Position the cursor over the rightmost paren, so that // matching it will take us back to the start of the line. // Ignore a match before the start of the block. (void)find_last_paren(l, '(', ')'); trypos = find_match_paren(corr_ind_maxparen(&cur_curpos)); if (trypos != NULL && (trypos->lnum < tryposBrace->lnum || (trypos->lnum == tryposBrace->lnum && trypos->col < tryposBrace->col))) trypos = NULL; // If we are looking for ',', we also look for matching // braces. if (trypos == NULL && terminated == ',' && find_last_paren(l, '{', '}')) trypos = find_start_brace(); if (trypos != NULL) { // Check if we are on a case label now. This is // handled above. // case xx: if ( asdf && // asdf) curwin->w_cursor = *trypos; l = ml_get_curline(); if (cin_iscase(l, FALSE) || cin_isscopedecl(l)) { ++curwin->w_cursor.lnum; curwin->w_cursor.col = 0; continue; } } // Skip over continuation lines to find the one to get the // indent from // char *usethis = "bla{backslash} // bla", // here; if (terminated == ',') { while (curwin->w_cursor.lnum > 1) { l = ml_get(curwin->w_cursor.lnum - 1); if (*l == NUL || l[STRLEN(l) - 1] != '\\') break; --curwin->w_cursor.lnum; curwin->w_cursor.col = 0; } } // Get indent and pointer to text for current line, // ignoring any jump label. XXX if (curbuf->b_ind_js) cur_amount = get_indent(); else cur_amount = skip_label(curwin->w_cursor.lnum, &l); // If this is just above the line we are indenting, and it // starts with a '{', line it up with this line. // while (not) // -> { // } if (terminated != ',' && lookfor != LOOKFOR_TERM && theline[0] == '{') { amount = cur_amount; // Only add b_ind_open_extra when the current line // doesn't start with a '{', which must have a match // in the same line (scope is the same). Probably: // { 1, 2 }, // -> { 3, 4 } if (*skipwhite(l) != '{') amount += curbuf->b_ind_open_extra; if (curbuf->b_ind_cpp_baseclass && !curbuf->b_ind_js) { // have to look back, whether it is a cpp base // class declaration or initialization lookfor = LOOKFOR_CPP_BASECLASS; continue; } break; } // Check if we are after an "if", "while", etc. // Also allow " } else". if (cin_is_cinword(l) || cin_iselse(skipwhite(l))) { // Found an unterminated line after an if (), line up // with the last one. // if (cond) // 100 + // -> here; if (lookfor == LOOKFOR_UNTERM || lookfor == LOOKFOR_ENUM_OR_INIT) { if (cont_amount > 0) amount = cont_amount; else amount += ind_continuation; break; } // If this is just above the line we are indenting, we // are finished. // while (not) // -> here; // Otherwise this indent can be used when the line // before this is terminated. // yyy; // if (stat) // while (not) // xxx; // -> here; amount = cur_amount; if (theline[0] == '{') amount += curbuf->b_ind_open_extra; if (lookfor != LOOKFOR_TERM) { amount += curbuf->b_ind_level + curbuf->b_ind_no_brace; break; } // Special trick: when expecting the while () after a // do, line up with the while() // do // x = 1; // -> here l = skipwhite(ml_get_curline()); if (cin_isdo(l)) { if (whilelevel == 0) break; --whilelevel; } // When searching for a terminated line, don't use the // one between the "if" and the matching "else". // Need to use the scope of this "else". XXX // If whilelevel != 0 continue looking for a "do {". if (cin_iselse(l) && whilelevel == 0) { // If we're looking at "} else", let's make sure we // find the opening brace of the enclosing scope, // not the one from "if () {". if (*l == '}') curwin->w_cursor.col = (colnr_T)(l - ml_get_curline()) + 1; if ((trypos = find_start_brace()) == NULL || find_match(LOOKFOR_IF, trypos->lnum) == FAIL) break; } } // If we're below an unterminated line that is not an // "if" or something, we may line up with this line or // add something for a continuation line, depending on // the line before this one. else { // Found two unterminated lines on a row, line up with // the last one. // c = 99 + // 100 + // -> here; if (lookfor == LOOKFOR_UNTERM) { // When line ends in a comma add extra indent if (terminated == ',') amount += ind_continuation; break; } if (lookfor == LOOKFOR_ENUM_OR_INIT) { // Found two lines ending in ',', lineup with the // lowest one, but check for cpp base class // declaration/initialization, if it is an // opening brace or we are looking just for // enumerations/initializations. if (terminated == ',') { if (curbuf->b_ind_cpp_baseclass == 0) break; lookfor = LOOKFOR_CPP_BASECLASS; continue; } // Ignore unterminated lines in between, but // reduce indent. if (amount > cur_amount) amount = cur_amount; } else { // Found first unterminated line on a row, may // line up with this line, remember its indent // 100 + // -> here; l = ml_get_curline(); amount = cur_amount; n = (int)STRLEN(l); if (terminated == ',' && (*skipwhite(l) == ']' || (n >=2 && l[n - 2] == ']'))) break; // If previous line ends in ',', check whether we // are in an initialization or enum // struct xxx = // { // sizeof a, // 124 }; // or a normal possible continuation line. // but only, of no other statement has been found // yet. if (lookfor == LOOKFOR_INITIAL && terminated == ',') { if (curbuf->b_ind_js) { // Search for a line ending in a comma // and line up with the line below it // (could be the current line). // some = [ // 1, <- line up here // 2, // some = [ // 3 + <- line up here // 4 * // 5, // 6, if (cin_iscomment(skipwhite(l))) break; lookfor = LOOKFOR_COMMA; trypos = find_match_char('[', curbuf->b_ind_maxparen); if (trypos != NULL) { if (trypos->lnum == curwin->w_cursor.lnum - 1) { // Current line is first inside // [], line up with it. break; } ourscope = trypos->lnum; } } else { lookfor = LOOKFOR_ENUM_OR_INIT; cont_amount = cin_first_id_amount(); } } else { if (lookfor == LOOKFOR_INITIAL && *l != NUL && l[STRLEN(l) - 1] == '\\') // XXX cont_amount = cin_get_equal_amount( curwin->w_cursor.lnum); if (lookfor != LOOKFOR_TERM && lookfor != LOOKFOR_JS_KEY && lookfor != LOOKFOR_COMMA && raw_string_start != curwin->w_cursor.lnum) lookfor = LOOKFOR_UNTERM; } } } } // Check if we are after a while (cond); // If so: Ignore until the matching "do". else if (cin_iswhileofdo_end(terminated)) // XXX { // Found an unterminated line after a while ();, line up // with the last one. // while (cond); // 100 + <- line up with this one // -> here; if (lookfor == LOOKFOR_UNTERM || lookfor == LOOKFOR_ENUM_OR_INIT) { if (cont_amount > 0) amount = cont_amount; else amount += ind_continuation; break; } if (whilelevel == 0) { lookfor = LOOKFOR_TERM; amount = get_indent(); // XXX if (theline[0] == '{') amount += curbuf->b_ind_open_extra; } ++whilelevel; } // We are after a "normal" statement. // If we had another statement we can stop now and use the // indent of that other statement. // Otherwise the indent of the current statement may be used, // search backwards for the next "normal" statement. else { // Skip single break line, if before a switch label. It // may be lined up with the case label. if (lookfor == LOOKFOR_NOBREAK && cin_isbreak(skipwhite(ml_get_curline()))) { lookfor = LOOKFOR_ANY; continue; } // Handle "do {" line. if (whilelevel > 0) { l = cin_skipcomment(ml_get_curline()); if (cin_isdo(l)) { amount = get_indent(); // XXX --whilelevel; continue; } } // Found a terminated line above an unterminated line. Add // the amount for a continuation line. // x = 1; // y = foo + // -> here; // or // int x = 1; // int foo, // -> here; if (lookfor == LOOKFOR_UNTERM || lookfor == LOOKFOR_ENUM_OR_INIT) { if (cont_amount > 0) amount = cont_amount; else amount += ind_continuation; break; } // Found a terminated line above a terminated line or "if" // etc. line. Use the amount of the line below us. // x = 1; x = 1; // if (asdf) y = 2; // while (asdf) ->here; // here; // ->foo; if (lookfor == LOOKFOR_TERM) { if (!lookfor_break && whilelevel == 0) break; } // First line above the one we're indenting is terminated. // To know what needs to be done look further backward for // a terminated line. else { // position the cursor over the rightmost paren, so // that matching it will take us back to the start of // the line. Helps for: // func(asdr, // asdfasdf); // here; term_again: l = ml_get_curline(); if (find_last_paren(l, '(', ')') && (trypos = find_match_paren( curbuf->b_ind_maxparen)) != NULL) { // Check if we are on a case label now. This is // handled above. // case xx: if ( asdf && // asdf) curwin->w_cursor = *trypos; l = ml_get_curline(); if (cin_iscase(l, FALSE) || cin_isscopedecl(l)) { ++curwin->w_cursor.lnum; curwin->w_cursor.col = 0; continue; } } // When aligning with the case statement, don't align // with a statement after it. // case 1: { <-- don't use this { position // stat; // } // case 2: // stat; // } iscase = (curbuf->b_ind_keep_case_label && cin_iscase(l, FALSE)); // Get indent and pointer to text for current line, // ignoring any jump label. amount = skip_label(curwin->w_cursor.lnum, &l); if (theline[0] == '{') amount += curbuf->b_ind_open_extra; // See remark above: "Only add b_ind_open_extra.." l = skipwhite(l); if (*l == '{') amount -= curbuf->b_ind_open_extra; lookfor = iscase ? LOOKFOR_ANY : LOOKFOR_TERM; // When a terminated line starts with "else" skip to // the matching "if": // else 3; // indent this; // Need to use the scope of this "else". XXX // If whilelevel != 0 continue looking for a "do {". if (lookfor == LOOKFOR_TERM && *l != '}' && cin_iselse(l) && whilelevel == 0) { if ((trypos = find_start_brace()) == NULL || find_match(LOOKFOR_IF, trypos->lnum) == FAIL) break; continue; } // If we're at the end of a block, skip to the start of // that block. l = ml_get_curline(); if (find_last_paren(l, '{', '}') // XXX && (trypos = find_start_brace()) != NULL) { curwin->w_cursor = *trypos; // if not "else {" check for terminated again // but skip block for "} else {" l = cin_skipcomment(ml_get_curline()); if (*l == '}' || !cin_iselse(l)) goto term_again; ++curwin->w_cursor.lnum; curwin->w_cursor.col = 0; } } } } } } // add extra indent for a comment if (cin_iscomment(theline)) amount += curbuf->b_ind_comment; // subtract extra left-shift for jump labels if (curbuf->b_ind_jump_label > 0 && original_line_islabel) amount -= curbuf->b_ind_jump_label; goto theend; } // ok -- we're not inside any sort of structure at all! // // This means we're at the top level, and everything should // basically just match where the previous line is, except // for the lines immediately following a function declaration, // which are K&R-style parameters and need to be indented. // // if our line starts with an open brace, forget about any // prevailing indent and make sure it looks like the start // of a function if (theline[0] == '{') { amount = curbuf->b_ind_first_open; goto theend; } // If the NEXT line is a function declaration, the current // line needs to be indented as a function type spec. // Don't do this if the current line looks like a comment or if the // current line is terminated, ie. ends in ';', or if the current line // contains { or }: "void f() {\n if (1)" if (cur_curpos.lnum < curbuf->b_ml.ml_line_count && !cin_nocode(theline) && vim_strchr(theline, '{') == NULL && vim_strchr(theline, '}') == NULL && !cin_ends_in(theline, (char_u *)":", NULL) && !cin_ends_in(theline, (char_u *)",", NULL) && cin_isfuncdecl(NULL, cur_curpos.lnum + 1, cur_curpos.lnum + 1) && !cin_isterminated(theline, FALSE, TRUE)) { amount = curbuf->b_ind_func_type; goto theend; } // search backwards until we find something we recognize amount = 0; curwin->w_cursor = cur_curpos; while (curwin->w_cursor.lnum > 1) { curwin->w_cursor.lnum--; curwin->w_cursor.col = 0; l = ml_get_curline(); // If we're in a comment or raw string now, skip to the start // of it. XXX if ((trypos = ind_find_start_CORS(NULL)) != NULL) { curwin->w_cursor.lnum = trypos->lnum + 1; curwin->w_cursor.col = 0; continue; } // Are we at the start of a cpp base class declaration or // constructor initialization? XXX n = FALSE; if (curbuf->b_ind_cpp_baseclass != 0 && theline[0] != '{') { n = cin_is_cpp_baseclass(&cache_cpp_baseclass); l = ml_get_curline(); } if (n) { // XXX amount = get_baseclass_amount(cache_cpp_baseclass.lpos.col); break; } // Skip preprocessor directives and blank lines. if (cin_ispreproc_cont(&l, &curwin->w_cursor.lnum, &amount)) continue; if (cin_nocode(l)) continue; // If the previous line ends in ',', use one level of // indentation: // int foo, // bar; // do this before checking for '}' in case of eg. // enum foobar // { // ... // } foo, // bar; n = 0; if (cin_ends_in(l, (char_u *)",", NULL) || (*l != NUL && (n = l[STRLEN(l) - 1]) == '\\')) { // take us back to opening paren if (find_last_paren(l, '(', ')') && (trypos = find_match_paren( curbuf->b_ind_maxparen)) != NULL) curwin->w_cursor = *trypos; // For a line ending in ',' that is a continuation line go // back to the first line with a backslash: // char *foo = "bla{backslash} // bla", // here; while (n == 0 && curwin->w_cursor.lnum > 1) { l = ml_get(curwin->w_cursor.lnum - 1); if (*l == NUL || l[STRLEN(l) - 1] != '\\') break; --curwin->w_cursor.lnum; curwin->w_cursor.col = 0; } amount = get_indent(); // XXX if (amount == 0) amount = cin_first_id_amount(); if (amount == 0) amount = ind_continuation; break; } // If the line looks like a function declaration, and we're // not in a comment, put it the left margin. if (cin_isfuncdecl(NULL, cur_curpos.lnum, 0)) // XXX break; l = ml_get_curline(); // Finding the closing '}' of a previous function. Put // current line at the left margin. For when 'cino' has "fs". if (*skipwhite(l) == '}') break; // (matching {) // If the previous line ends on '};' (maybe followed by // comments) align at column 0. For example: // char *string_array[] = { "foo", // / * x * / "b};ar" }; / * foobar * / if (cin_ends_in(l, (char_u *)"};", NULL)) break; // If the previous line ends on '[' we are probably in an // array constant: // something = [ // 234, <- extra indent if (cin_ends_in(l, (char_u *)"[", NULL)) { amount = get_indent() + ind_continuation; break; } // Find a line only has a semicolon that belongs to a previous // line ending in '}', e.g. before an #endif. Don't increase // indent then. if (*(look = skipwhite(l)) == ';' && cin_nocode(look + 1)) { pos_T curpos_save = curwin->w_cursor; while (curwin->w_cursor.lnum > 1) { look = ml_get(--curwin->w_cursor.lnum); if (!(cin_nocode(look) || cin_ispreproc_cont( &look, &curwin->w_cursor.lnum, &amount))) break; } if (curwin->w_cursor.lnum > 0 && cin_ends_in(look, (char_u *)"}", NULL)) break; curwin->w_cursor = curpos_save; } // If the PREVIOUS line is a function declaration, the current // line (and the ones that follow) needs to be indented as // parameters. if (cin_isfuncdecl(&l, curwin->w_cursor.lnum, 0)) { amount = curbuf->b_ind_param; break; } // If the previous line ends in ';' and the line before the // previous line ends in ',' or '\', ident to column zero: // int foo, // bar; // indent_to_0 here; if (cin_ends_in(l, (char_u *)";", NULL)) { l = ml_get(curwin->w_cursor.lnum - 1); if (cin_ends_in(l, (char_u *)",", NULL) || (*l != NUL && l[STRLEN(l) - 1] == '\\')) break; l = ml_get_curline(); } // Doesn't look like anything interesting -- so just // use the indent of this line. // // Position the cursor over the rightmost paren, so that // matching it will take us back to the start of the line. find_last_paren(l, '(', ')'); if ((trypos = find_match_paren(curbuf->b_ind_maxparen)) != NULL) curwin->w_cursor = *trypos; amount = get_indent(); // XXX break; } // add extra indent for a comment if (cin_iscomment(theline)) amount += curbuf->b_ind_comment; // add extra indent if the previous line ended in a backslash: // "asdfasdf{backslash} // here"; // char *foo = "asdf{backslash} // here"; if (cur_curpos.lnum > 1) { l = ml_get(cur_curpos.lnum - 1); if (*l != NUL && l[STRLEN(l) - 1] == '\\') { cur_amount = cin_get_equal_amount(cur_curpos.lnum - 1); if (cur_amount > 0) amount = cur_amount; else if (cur_amount == 0) amount += ind_continuation; } } theend: if (amount < 0) amount = 0; laterend: // put the cursor back where it belongs curwin->w_cursor = cur_curpos; vim_free(linecopy); return amount; } /* * return TRUE if 'cinkeys' contains the key "keytyped", * when == '*': Only if key is preceded with '*' (indent before insert) * when == '!': Only if key is preceded with '!' (don't insert) * when == ' ': Only if key is not preceded with '*'(indent afterwards) * * "keytyped" can have a few special values: * KEY_OPEN_FORW * KEY_OPEN_BACK * KEY_COMPLETE just finished completion. * * If line_is_empty is TRUE accept keys with '0' before them. */ int in_cinkeys( int keytyped, int when, int line_is_empty) { char_u *look; int try_match; int try_match_word; char_u *p; char_u *line; int icase; int i; if (keytyped == NUL) // Can happen with CTRL-Y and CTRL-E on a short line. return FALSE; #ifdef FEAT_EVAL if (*curbuf->b_p_inde != NUL) look = curbuf->b_p_indk; // 'indentexpr' set: use 'indentkeys' else #endif look = curbuf->b_p_cink; // 'indentexpr' empty: use 'cinkeys' while (*look) { // Find out if we want to try a match with this key, depending on // 'when' and a '*' or '!' before the key. switch (when) { case '*': try_match = (*look == '*'); break; case '!': try_match = (*look == '!'); break; default: try_match = (*look != '*'); break; } if (*look == '*' || *look == '!') ++look; // If there is a '0', only accept a match if the line is empty. // But may still match when typing last char of a word. if (*look == '0') { try_match_word = try_match; if (!line_is_empty) try_match = FALSE; ++look; } else try_match_word = FALSE; // does it look like a control character? if (*look == '^' && look[1] >= '?' && look[1] <= '_') { if (try_match && keytyped == Ctrl_chr(look[1])) return TRUE; look += 2; } // 'o' means "o" command, open forward. // 'O' means "O" command, open backward. else if (*look == 'o') { if (try_match && keytyped == KEY_OPEN_FORW) return TRUE; ++look; } else if (*look == 'O') { if (try_match && keytyped == KEY_OPEN_BACK) return TRUE; ++look; } // 'e' means to check for "else" at start of line and just before the // cursor. else if (*look == 'e') { if (try_match && keytyped == 'e' && curwin->w_cursor.col >= 4) { p = ml_get_curline(); if (skipwhite(p) == p + curwin->w_cursor.col - 4 && STRNCMP(p + curwin->w_cursor.col - 4, "else", 4) == 0) return TRUE; } ++look; } // ':' only causes an indent if it is at the end of a label or case // statement, or when it was before typing the ':' (to fix // class::method for C++). else if (*look == ':') { if (try_match && keytyped == ':') { p = ml_get_curline(); if (cin_iscase(p, FALSE) || cin_isscopedecl(p) || cin_islabel()) return TRUE; // Need to get the line again after cin_islabel(). p = ml_get_curline(); if (curwin->w_cursor.col > 2 && p[curwin->w_cursor.col - 1] == ':' && p[curwin->w_cursor.col - 2] == ':') { p[curwin->w_cursor.col - 1] = ' '; i = (cin_iscase(p, FALSE) || cin_isscopedecl(p) || cin_islabel()); p = ml_get_curline(); p[curwin->w_cursor.col - 1] = ':'; if (i) return TRUE; } } ++look; } // Is it a key in <>, maybe? else if (*look == '<') { if (try_match) { // make up some named keys <o>, <O>, <e>, <0>, <>>, <<>, <*>, // <:> and <!> so that people can re-indent on o, O, e, 0, <, // >, *, : and ! keys if they really really want to. if (vim_strchr((char_u *)"<>!*oOe0:", look[1]) != NULL && keytyped == look[1]) return TRUE; if (keytyped == get_special_key_code(look + 1)) return TRUE; } while (*look && *look != '>') look++; while (*look == '>') look++; } // Is it a word: "=word"? else if (*look == '=' && look[1] != ',' && look[1] != NUL) { ++look; if (*look == '~') { icase = TRUE; ++look; } else icase = FALSE; p = vim_strchr(look, ','); if (p == NULL) p = look + STRLEN(look); if ((try_match || try_match_word) && curwin->w_cursor.col >= (colnr_T)(p - look)) { int match = FALSE; if (keytyped == KEY_COMPLETE) { char_u *s; // Just completed a word, check if it starts with "look". // search back for the start of a word. line = ml_get_curline(); if (has_mbyte) { char_u *n; for (s = line + curwin->w_cursor.col; s > line; s = n) { n = mb_prevptr(line, s); if (!vim_iswordp(n)) break; } } else for (s = line + curwin->w_cursor.col; s > line; --s) if (!vim_iswordc(s[-1])) break; if (s + (p - look) <= line + curwin->w_cursor.col && (icase ? MB_STRNICMP(s, look, p - look) : STRNCMP(s, look, p - look)) == 0) match = TRUE; } else // TODO: multi-byte if (keytyped == (int)p[-1] || (icase && keytyped < 256 && TOLOWER_LOC(keytyped) == TOLOWER_LOC((int)p[-1]))) { line = ml_get_cursor(); if ((curwin->w_cursor.col == (colnr_T)(p - look) || !vim_iswordc(line[-(p - look) - 1])) && (icase ? MB_STRNICMP(line - (p - look), look, p - look) : STRNCMP(line - (p - look), look, p - look)) == 0) match = TRUE; } if (match && try_match_word && !try_match) { // "0=word": Check if there are only blanks before the // word. if (getwhitecols_curline() != (int)(curwin->w_cursor.col - (p - look))) match = FALSE; } if (match) return TRUE; } look = p; } // ok, it's a boring generic character. else { if (try_match && *look == keytyped) return TRUE; if (*look != NUL) ++look; } // Skip over ", ". look = skip_to_option_part(look); } return FALSE; } /* * Do C or expression indenting on the current line. */ void do_c_expr_indent(void) { # ifdef FEAT_EVAL if (*curbuf->b_p_inde != NUL) fixthisline(get_expr_indent); else # endif fixthisline(get_c_indent); } #endif #if defined(FEAT_EVAL) || defined(PROTO) /* * "cindent(lnum)" function */ void f_cindent(typval_T *argvars UNUSED, typval_T *rettv) { # ifdef FEAT_CINDENT pos_T pos; linenr_T lnum; if (in_vim9script() && check_for_lnum_arg(argvars, 0) == FAIL) return; pos = curwin->w_cursor; lnum = tv_get_lnum(argvars); if (lnum >= 1 && lnum <= curbuf->b_ml.ml_line_count) { curwin->w_cursor.lnum = lnum; rettv->vval.v_number = get_c_indent(); curwin->w_cursor = pos; } else # endif rettv->vval.v_number = -1; } #endif
/* vi:set ts=8 sts=4 sw=4 noet: * * VIM - Vi IMproved by Bram Moolenaar * * Do ":help uganda" in Vim to read copying and usage conditions. * Do ":help credits" in Vim to see a list of people who contributed. * See README.txt for an overview of the Vim source code. */ /* * cindent.c: C indentation related functions * * Many of C-indenting functions originally come from Eric Fischer. * * Below "XXX" means that this function may unlock the current line. */ #include "vim.h" // values for the "lookfor" state #define LOOKFOR_INITIAL 0 #define LOOKFOR_IF 1 #define LOOKFOR_DO 2 #define LOOKFOR_CASE 3 #define LOOKFOR_ANY 4 #define LOOKFOR_TERM 5 #define LOOKFOR_UNTERM 6 #define LOOKFOR_SCOPEDECL 7 #define LOOKFOR_NOBREAK 8 #define LOOKFOR_CPP_BASECLASS 9 #define LOOKFOR_ENUM_OR_INIT 10 #define LOOKFOR_JS_KEY 11 #define LOOKFOR_COMMA 12 #if defined(FEAT_CINDENT) || defined(FEAT_SMARTINDENT) /* * Return TRUE if the string "line" starts with a word from 'cinwords'. */ int cin_is_cinword(char_u *line) { char_u *cinw; char_u *cinw_buf; int cinw_len; int retval = FALSE; int len; cinw_len = (int)STRLEN(curbuf->b_p_cinw) + 1; cinw_buf = alloc(cinw_len); if (cinw_buf != NULL) { line = skipwhite(line); for (cinw = curbuf->b_p_cinw; *cinw; ) { len = copy_option_part(&cinw, cinw_buf, cinw_len, ","); if (STRNCMP(line, cinw_buf, len) == 0 && (!vim_iswordc(line[len]) || !vim_iswordc(line[len - 1]))) { retval = TRUE; break; } } vim_free(cinw_buf); } return retval; } #endif /* * Skip to the end of a "string" and a 'c' character. * If there is no string or character, return argument unmodified. */ static char_u * skip_string(char_u *p) { int i; // We loop, because strings may be concatenated: "date""time". for ( ; ; ++p) { if (p[0] == '\'') // 'c' or '\n' or '\000' { if (p[1] == NUL) // ' at end of line break; i = 2; if (p[1] == '\\' && p[2] != NUL) // '\n' or '\000' { ++i; while (vim_isdigit(p[i - 1])) // '\000' ++i; } if (p[i - 1] != NUL && p[i] == '\'') // check for trailing ' { p += i; continue; } } else if (p[0] == '"') // start of string { for (++p; p[0]; ++p) { if (p[0] == '\\' && p[1] != NUL) ++p; else if (p[0] == '"') // end of string break; } if (p[0] == '"') continue; // continue for another string } else if (p[0] == 'R' && p[1] == '"') { // Raw string: R"[delim](...)[delim]" char_u *delim = p + 2; char_u *paren = vim_strchr(delim, '('); if (paren != NULL) { size_t delim_len = paren - delim; for (p += 3; *p; ++p) if (p[0] == ')' && STRNCMP(p + 1, delim, delim_len) == 0 && p[delim_len + 1] == '"') { p += delim_len + 1; break; } if (p[0] == '"') continue; // continue for another string } } break; // no string found } if (!*p) --p; // backup from NUL return p; } /* * Return TRUE if "line[col]" is inside a C string. */ int is_pos_in_string(char_u *line, colnr_T col) { char_u *p; for (p = line; *p && (colnr_T)(p - line) < col; ++p) p = skip_string(p); return !((colnr_T)(p - line) <= col); } #if defined(FEAT_CINDENT) || defined(FEAT_SYN_HL) /* * Find the start of a comment, not knowing if we are in a comment right now. * Search starts at w_cursor.lnum and goes backwards. * Return NULL when not inside a comment. */ static pos_T * ind_find_start_comment(void) // XXX { return find_start_comment(curbuf->b_ind_maxcomment); } pos_T * find_start_comment(int ind_maxcomment) // XXX { pos_T *pos; int cur_maxcomment = ind_maxcomment; for (;;) { pos = findmatchlimit(NULL, '*', FM_BACKWARD, cur_maxcomment); if (pos == NULL) break; // Check if the comment start we found is inside a string. // If it is then restrict the search to below this line and try again. if (!is_pos_in_string(ml_get(pos->lnum), pos->col)) break; cur_maxcomment = curwin->w_cursor.lnum - pos->lnum - 1; if (cur_maxcomment <= 0) { pos = NULL; break; } } return pos; } /* * Find the start of a raw string, not knowing if we are in one right now. * Search starts at w_cursor.lnum and goes backwards. * Return NULL when not inside a raw string. */ static pos_T * find_start_rawstring(int ind_maxcomment) // XXX { pos_T *pos; int cur_maxcomment = ind_maxcomment; for (;;) { pos = findmatchlimit(NULL, 'R', FM_BACKWARD, cur_maxcomment); if (pos == NULL) break; // Check if the raw string start we found is inside a string. // If it is then restrict the search to below this line and try again. if (!is_pos_in_string(ml_get(pos->lnum), pos->col)) break; cur_maxcomment = curwin->w_cursor.lnum - pos->lnum - 1; if (cur_maxcomment <= 0) { pos = NULL; break; } } return pos; } /* * Find the start of a comment or raw string, not knowing if we are in a * comment or raw string right now. * Search starts at w_cursor.lnum and goes backwards. * If is_raw is given and returns start of raw_string, sets it to true. * Return NULL when not inside a comment or raw string. * "CORS" -> Comment Or Raw String */ static pos_T * ind_find_start_CORS(linenr_T *is_raw) // XXX { static pos_T comment_pos_copy; pos_T *comment_pos; pos_T *rs_pos; comment_pos = find_start_comment(curbuf->b_ind_maxcomment); if (comment_pos != NULL) { // Need to make a copy of the static pos in findmatchlimit(), // calling find_start_rawstring() may change it. comment_pos_copy = *comment_pos; comment_pos = &comment_pos_copy; } rs_pos = find_start_rawstring(curbuf->b_ind_maxcomment); // If comment_pos is before rs_pos the raw string is inside the comment. // If rs_pos is before comment_pos the comment is inside the raw string. if (comment_pos == NULL || (rs_pos != NULL && LT_POS(*rs_pos, *comment_pos))) { if (is_raw != NULL && rs_pos != NULL) *is_raw = rs_pos->lnum; return rs_pos; } return comment_pos; } #endif // FEAT_CINDENT || FEAT_SYN_HL #if defined(FEAT_CINDENT) || defined(PROTO) /* * Return TRUE if C-indenting is on. */ int cindent_on(void) { return (!p_paste && (curbuf->b_p_cin # ifdef FEAT_EVAL || *curbuf->b_p_inde != NUL # endif )); } // Find result cache for cpp_baseclass typedef struct { int found; lpos_T lpos; } cpp_baseclass_cache_T; /* * Skip over white space and C comments within the line. * Also skip over Perl/shell comments if desired. */ static char_u * cin_skipcomment(char_u *s) { while (*s) { char_u *prev_s = s; s = skipwhite(s); // Perl/shell # comment comment continues until eol. Require a space // before # to avoid recognizing $#array. if (curbuf->b_ind_hash_comment != 0 && s != prev_s && *s == '#') { s += STRLEN(s); break; } if (*s != '/') break; ++s; if (*s == '/') // slash-slash comment continues till eol { s += STRLEN(s); break; } if (*s != '*') break; for (++s; *s; ++s) // skip slash-star comment if (s[0] == '*' && s[1] == '/') { s += 2; break; } } return s; } /* * Return TRUE if there is no code at *s. White space and comments are * not considered code. */ static int cin_nocode(char_u *s) { return *cin_skipcomment(s) == NUL; } /* * Recognize the start of a C or C++ comment. */ static int cin_iscomment(char_u *p) { return (p[0] == '/' && (p[1] == '*' || p[1] == '/')); } /* * Recognize the start of a "//" comment. */ static int cin_islinecomment(char_u *p) { return (p[0] == '/' && p[1] == '/'); } /* * Check previous lines for a "//" line comment, skipping over blank lines. */ static pos_T * find_line_comment(void) // XXX { static pos_T pos; char_u *line; char_u *p; pos = curwin->w_cursor; while (--pos.lnum > 0) { line = ml_get(pos.lnum); p = skipwhite(line); if (cin_islinecomment(p)) { pos.col = (int)(p - line); return &pos; } if (*p != NUL) break; } return NULL; } /* * Return TRUE if "text" starts with "key:". */ static int cin_has_js_key(char_u *text) { char_u *s = skipwhite(text); int quote = -1; if (*s == '\'' || *s == '"') { // can be 'key': or "key": quote = *s; ++s; } if (!vim_isIDc(*s)) // need at least one ID character return FALSE; while (vim_isIDc(*s)) ++s; if (*s == quote) ++s; s = cin_skipcomment(s); // "::" is not a label, it's C++ return (*s == ':' && s[1] != ':'); } /* * Check if string matches "label:"; move to character after ':' if true. * "*s" must point to the start of the label, if there is one. */ static int cin_islabel_skip(char_u **s) { if (!vim_isIDc(**s)) // need at least one ID character return FALSE; while (vim_isIDc(**s)) (*s)++; *s = cin_skipcomment(*s); // "::" is not a label, it's C++ return (**s == ':' && *++*s != ':'); } /* * Recognize a scope declaration label from the 'cinscopedecls' option. */ static int cin_isscopedecl(char_u *p) { size_t cinsd_len; char_u *cinsd_buf; char_u *cinsd; size_t len; char_u *skip; char_u *s = cin_skipcomment(p); int found = FALSE; cinsd_len = STRLEN(curbuf->b_p_cinsd) + 1; cinsd_buf = alloc(cinsd_len); if (cinsd_buf == NULL) return FALSE; for (cinsd = curbuf->b_p_cinsd; *cinsd; ) { len = copy_option_part(&cinsd, cinsd_buf, (int)cinsd_len, ","); if (STRNCMP(s, cinsd_buf, len) == 0) { skip = cin_skipcomment(s + len); if (*skip == ':' && skip[1] != ':') { found = TRUE; break; } } } vim_free(cinsd_buf); return found; } /* * Recognize a preprocessor statement: Any line that starts with '#'. */ static int cin_ispreproc(char_u *s) { if (*skipwhite(s) == '#') return TRUE; return FALSE; } /* * Return TRUE if line "*pp" at "*lnump" is a preprocessor statement or a * continuation line of a preprocessor statement. Decrease "*lnump" to the * start and return the line in "*pp". * Put the amount of indent in "*amount". */ static int cin_ispreproc_cont(char_u **pp, linenr_T *lnump, int *amount) { char_u *line = *pp; linenr_T lnum = *lnump; int retval = FALSE; int candidate_amount = *amount; if (*line != NUL && line[STRLEN(line) - 1] == '\\') candidate_amount = get_indent_lnum(lnum); for (;;) { if (cin_ispreproc(line)) { retval = TRUE; *lnump = lnum; break; } if (lnum == 1) break; line = ml_get(--lnum); if (*line == NUL || line[STRLEN(line) - 1] != '\\') break; } if (lnum != *lnump) *pp = ml_get(*lnump); if (retval) *amount = candidate_amount; return retval; } static int cin_iselse( char_u *p) { if (*p == '}') // accept "} else" p = cin_skipcomment(p + 1); return (STRNCMP(p, "else", 4) == 0 && !vim_isIDc(p[4])); } /* * Recognize a line that starts with '{' or '}', or ends with ';', ',', '{' or * '}'. * Don't consider "} else" a terminated line. * If a line begins with an "else", only consider it terminated if no unmatched * opening braces follow (handle "else { foo();" correctly). * Return the character terminating the line (ending char's have precedence if * both apply in order to determine initializations). */ static int cin_isterminated( char_u *s, int incl_open, // include '{' at the end as terminator int incl_comma) // recognize a trailing comma { char_u found_start = 0; unsigned n_open = 0; int is_else = FALSE; s = cin_skipcomment(s); if (*s == '{' || (*s == '}' && !cin_iselse(s))) found_start = *s; if (!found_start) is_else = cin_iselse(s); while (*s) { // skip over comments, "" strings and 'c'haracters s = skip_string(cin_skipcomment(s)); if (*s == '}' && n_open > 0) --n_open; if ((!is_else || n_open == 0) && (*s == ';' || *s == '}' || (incl_comma && *s == ',')) && cin_nocode(s + 1)) return *s; else if (*s == '{') { if (incl_open && cin_nocode(s + 1)) return *s; else ++n_open; } if (*s) s++; } return found_start; } /* * Return TRUE when "s" starts with "word" and then a non-ID character. */ static int cin_starts_with(char_u *s, char *word) { int l = (int)STRLEN(word); return (STRNCMP(s, word, l) == 0 && !vim_isIDc(s[l])); } /* * Recognize a "default" switch label. */ static int cin_isdefault(char_u *s) { return (STRNCMP(s, "default", 7) == 0 && *(s = cin_skipcomment(s + 7)) == ':' && s[1] != ':'); } /* * Recognize a switch label: "case .*:" or "default:". */ static int cin_iscase( char_u *s, int strict) // Allow relaxed check of case statement for JS { s = cin_skipcomment(s); if (cin_starts_with(s, "case")) { for (s += 4; *s; ++s) { s = cin_skipcomment(s); if (*s == NUL) break; if (*s == ':') { if (s[1] == ':') // skip over "::" for C++ ++s; else return TRUE; } if (*s == '\'' && s[1] && s[2] == '\'') s += 2; // skip over ':' else if (*s == '/' && (s[1] == '*' || s[1] == '/')) return FALSE; // stop at comment else if (*s == '"') { // JS etc. if (strict) return FALSE; // stop at string else return TRUE; } } return FALSE; } if (cin_isdefault(s)) return TRUE; return FALSE; } /* * Recognize a label: "label:". * Note: curwin->w_cursor must be where we are looking for the label. */ static int cin_islabel(void) // XXX { char_u *s; s = cin_skipcomment(ml_get_curline()); // Exclude "default" from labels, since it should be indented // like a switch label. Same for C++ scope declarations. if (cin_isdefault(s)) return FALSE; if (cin_isscopedecl(s)) return FALSE; if (cin_islabel_skip(&s)) { // Only accept a label if the previous line is terminated or is a case // label. pos_T cursor_save; pos_T *trypos; char_u *line; cursor_save = curwin->w_cursor; while (curwin->w_cursor.lnum > 1) { --curwin->w_cursor.lnum; // If we're in a comment or raw string now, skip to the start of // it. curwin->w_cursor.col = 0; if ((trypos = ind_find_start_CORS(NULL)) != NULL) // XXX curwin->w_cursor = *trypos; line = ml_get_curline(); if (cin_ispreproc(line)) // ignore #defines, #if, etc. continue; if (*(line = cin_skipcomment(line)) == NUL) continue; curwin->w_cursor = cursor_save; if (cin_isterminated(line, TRUE, FALSE) || cin_isscopedecl(line) || cin_iscase(line, TRUE) || (cin_islabel_skip(&line) && cin_nocode(line))) return TRUE; return FALSE; } curwin->w_cursor = cursor_save; return TRUE; // label at start of file??? } return FALSE; } /* * Return TRUE if string "s" ends with the string "find", possibly followed by * white space and comments. Skip strings and comments. * Ignore "ignore" after "find" if it's not NULL. */ static int cin_ends_in(char_u *s, char_u *find, char_u *ignore) { char_u *p = s; char_u *r; int len = (int)STRLEN(find); while (*p != NUL) { p = cin_skipcomment(p); if (STRNCMP(p, find, len) == 0) { r = skipwhite(p + len); if (ignore != NULL && STRNCMP(r, ignore, STRLEN(ignore)) == 0) r = skipwhite(r + STRLEN(ignore)); if (cin_nocode(r)) return TRUE; } if (*p != NUL) ++p; } return FALSE; } /* * Recognize structure initialization and enumerations: * "[typedef] [static|public|protected|private] enum" * "[typedef] [static|public|protected|private] = {" */ static int cin_isinit(void) { char_u *s; static char *skip[] = {"static", "public", "protected", "private"}; s = cin_skipcomment(ml_get_curline()); if (cin_starts_with(s, "typedef")) s = cin_skipcomment(s + 7); for (;;) { int i, l; for (i = 0; i < (int)ARRAY_LENGTH(skip); ++i) { l = (int)strlen(skip[i]); if (cin_starts_with(s, skip[i])) { s = cin_skipcomment(s + l); l = 0; break; } } if (l != 0) break; } if (cin_starts_with(s, "enum")) return TRUE; if (cin_ends_in(s, (char_u *)"=", (char_u *)"{")) return TRUE; return FALSE; } // Maximum number of lines to search back for a "namespace" line. #define FIND_NAMESPACE_LIM 20 /* * Recognize a "namespace" scope declaration. */ static int cin_is_cpp_namespace(char_u *s) { char_u *p; int has_name = FALSE; int has_name_start = FALSE; s = cin_skipcomment(s); if (STRNCMP(s, "inline", 6) == 0 && (s[6] == NUL || !vim_iswordc(s[6]))) s = cin_skipcomment(skipwhite(s + 6)); if (STRNCMP(s, "namespace", 9) == 0 && (s[9] == NUL || !vim_iswordc(s[9]))) { p = cin_skipcomment(skipwhite(s + 9)); while (*p != NUL) { if (VIM_ISWHITE(*p)) { has_name = TRUE; // found end of a name p = cin_skipcomment(skipwhite(p)); } else if (*p == '{') { break; } else if (vim_iswordc(*p)) { has_name_start = TRUE; if (has_name) return FALSE; // word character after skipping past name ++p; } else if (p[0] == ':' && p[1] == ':' && vim_iswordc(p[2])) { if (!has_name_start || has_name) return FALSE; // C++ 17 nested namespace p += 3; } else { return FALSE; } } return TRUE; } return FALSE; } /* * Recognize a `extern "C"` or `extern "C++"` linkage specifications. */ static int cin_is_cpp_extern_c(char_u *s) { char_u *p; int has_string_literal = FALSE; s = cin_skipcomment(s); if (STRNCMP(s, "extern", 6) == 0 && (s[6] == NUL || !vim_iswordc(s[6]))) { p = cin_skipcomment(skipwhite(s + 6)); while (*p != NUL) { if (VIM_ISWHITE(*p)) { p = cin_skipcomment(skipwhite(p)); } else if (*p == '{') { break; } else if (p[0] == '"' && p[1] == 'C' && p[2] == '"') { if (has_string_literal) return FALSE; has_string_literal = TRUE; p += 3; } else if (p[0] == '"' && p[1] == 'C' && p[2] == '+' && p[3] == '+' && p[4] == '"') { if (has_string_literal) return FALSE; has_string_literal = TRUE; p += 5; } else { return FALSE; } } return has_string_literal ? TRUE : FALSE; } return FALSE; } /* * Return a pointer to the first non-empty non-comment character after a ':'. * Return NULL if not found. * case 234: a = b; * ^ */ static char_u * after_label(char_u *l) { for ( ; *l; ++l) { if (*l == ':') { if (l[1] == ':') // skip over "::" for C++ ++l; else if (!cin_iscase(l + 1, FALSE)) break; } else if (*l == '\'' && l[1] && l[2] == '\'') l += 2; // skip over 'x' } if (*l == NUL) return NULL; l = cin_skipcomment(l + 1); if (*l == NUL) return NULL; return l; } /* * Get indent of line "lnum", skipping a label. * Return 0 if there is nothing after the label. */ static int get_indent_nolabel (linenr_T lnum) // XXX { char_u *l; pos_T fp; colnr_T col; char_u *p; l = ml_get(lnum); p = after_label(l); if (p == NULL) return 0; fp.col = (colnr_T)(p - l); fp.lnum = lnum; getvcol(curwin, &fp, &col, NULL, NULL); return (int)col; } /* * Find indent for line "lnum", ignoring any case or jump label. * Also return a pointer to the text (after the label) in "pp". * label: if (asdf && asdfasdf) * ^ */ static int skip_label(linenr_T lnum, char_u **pp) { char_u *l; int amount; pos_T cursor_save; cursor_save = curwin->w_cursor; curwin->w_cursor.lnum = lnum; l = ml_get_curline(); // XXX if (cin_iscase(l, FALSE) || cin_isscopedecl(l) || cin_islabel()) { amount = get_indent_nolabel(lnum); l = after_label(ml_get_curline()); if (l == NULL) // just in case l = ml_get_curline(); } else { amount = get_indent(); l = ml_get_curline(); } *pp = l; curwin->w_cursor = cursor_save; return amount; } /* * Return the indent of the first variable name after a type in a declaration. * int a, indent of "a" * static struct foo b, indent of "b" * enum bla c, indent of "c" * Returns zero when it doesn't look like a declaration. */ static int cin_first_id_amount(void) { char_u *line, *p, *s; int len; pos_T fp; colnr_T col; line = ml_get_curline(); p = skipwhite(line); len = (int)(skiptowhite(p) - p); if (len == 6 && STRNCMP(p, "static", 6) == 0) { p = skipwhite(p + 6); len = (int)(skiptowhite(p) - p); } if (len == 6 && STRNCMP(p, "struct", 6) == 0) p = skipwhite(p + 6); else if (len == 4 && STRNCMP(p, "enum", 4) == 0) p = skipwhite(p + 4); else if ((len == 8 && STRNCMP(p, "unsigned", 8) == 0) || (len == 6 && STRNCMP(p, "signed", 6) == 0)) { s = skipwhite(p + len); if ((STRNCMP(s, "int", 3) == 0 && VIM_ISWHITE(s[3])) || (STRNCMP(s, "long", 4) == 0 && VIM_ISWHITE(s[4])) || (STRNCMP(s, "short", 5) == 0 && VIM_ISWHITE(s[5])) || (STRNCMP(s, "char", 4) == 0 && VIM_ISWHITE(s[4]))) p = s; } for (len = 0; vim_isIDc(p[len]); ++len) ; if (len == 0 || !VIM_ISWHITE(p[len]) || cin_nocode(p)) return 0; p = skipwhite(p + len); fp.lnum = curwin->w_cursor.lnum; fp.col = (colnr_T)(p - line); getvcol(curwin, &fp, &col, NULL, NULL); return (int)col; } /* * Return the indent of the first non-blank after an equal sign. * char *foo = "here"; * Return zero if no (useful) equal sign found. * Return -1 if the line above "lnum" ends in a backslash. * foo = "asdf\ * asdf\ * here"; */ static int cin_get_equal_amount(linenr_T lnum) { char_u *line; char_u *s; colnr_T col; pos_T fp; if (lnum > 1) { line = ml_get(lnum - 1); if (*line != NUL && line[STRLEN(line) - 1] == '\\') return -1; } line = s = ml_get(lnum); while (*s != NUL && vim_strchr((char_u *)"=;{}\"'", *s) == NULL) { if (cin_iscomment(s)) // ignore comments s = cin_skipcomment(s); else ++s; } if (*s != '=') return 0; s = skipwhite(s + 1); if (cin_nocode(s)) return 0; if (*s == '"') // nice alignment for continued strings ++s; fp.lnum = lnum; fp.col = (colnr_T)(s - line); getvcol(curwin, &fp, &col, NULL, NULL); return (int)col; } /* * Skip strings, chars and comments until at or past "trypos". * Return the column found. */ static int cin_skip2pos(pos_T *trypos) { char_u *line; char_u *p; char_u *new_p; p = line = ml_get(trypos->lnum); while (*p && (colnr_T)(p - line) < trypos->col) { if (cin_iscomment(p)) p = cin_skipcomment(p); else { new_p = skip_string(p); if (new_p == p) ++p; else p = new_p; } } return (int)(p - line); } static pos_T * find_match_char(int c, int ind_maxparen) // XXX { pos_T cursor_save; pos_T *trypos; static pos_T pos_copy; int ind_maxp_wk; cursor_save = curwin->w_cursor; ind_maxp_wk = ind_maxparen; retry: if ((trypos = findmatchlimit(NULL, c, 0, ind_maxp_wk)) != NULL) { // check if the ( is in a // comment if ((colnr_T)cin_skip2pos(trypos) > trypos->col) { ind_maxp_wk = ind_maxparen - (int)(cursor_save.lnum - trypos->lnum); if (ind_maxp_wk > 0) { curwin->w_cursor = *trypos; curwin->w_cursor.col = 0; // XXX goto retry; } trypos = NULL; } else { pos_T *trypos_wk; pos_copy = *trypos; // copy trypos, findmatch will change it trypos = &pos_copy; curwin->w_cursor = *trypos; if ((trypos_wk = ind_find_start_CORS(NULL)) != NULL) // XXX { ind_maxp_wk = ind_maxparen - (int)(cursor_save.lnum - trypos_wk->lnum); if (ind_maxp_wk > 0) { curwin->w_cursor = *trypos_wk; goto retry; } trypos = NULL; } } } curwin->w_cursor = cursor_save; return trypos; } /* * Find the matching '(', ignoring it if it is in a comment. * Return NULL if no match found. */ static pos_T * find_match_paren(int ind_maxparen) // XXX { return find_match_char('(', ind_maxparen); } /* * Set w_cursor.col to the column number of the last unmatched ')' or '{' in * line "l". "l" must point to the start of the line. */ static int find_last_paren(char_u *l, int start, int end) { int i; int retval = FALSE; int open_count = 0; curwin->w_cursor.col = 0; // default is start of line for (i = 0; l[i] != NUL; i++) { i = (int)(cin_skipcomment(l + i) - l); // ignore parens in comments i = (int)(skip_string(l + i) - l); // ignore parens in quotes if (l[i] == start) ++open_count; else if (l[i] == end) { if (open_count > 0) --open_count; else { curwin->w_cursor.col = i; retval = TRUE; } } } return retval; } /* * Recognize the basic picture of a function declaration -- it needs to * have an open paren somewhere and a close paren at the end of the line and * no semicolons anywhere. * When a line ends in a comma we continue looking in the next line. * "sp" points to a string with the line. When looking at other lines it must * be restored to the line. When it's NULL fetch lines here. * "first_lnum" is where we start looking. * "min_lnum" is the line before which we will not be looking. */ static int cin_isfuncdecl( char_u **sp, linenr_T first_lnum, linenr_T min_lnum) { char_u *s; linenr_T lnum = first_lnum; linenr_T save_lnum = curwin->w_cursor.lnum; int retval = FALSE; pos_T *trypos; int just_started = TRUE; if (sp == NULL) s = ml_get(lnum); else s = *sp; curwin->w_cursor.lnum = lnum; if (find_last_paren(s, '(', ')') && (trypos = find_match_paren(curbuf->b_ind_maxparen)) != NULL) { lnum = trypos->lnum; if (lnum < min_lnum) { curwin->w_cursor.lnum = save_lnum; return FALSE; } s = ml_get(lnum); } curwin->w_cursor.lnum = save_lnum; // Ignore line starting with #. if (cin_ispreproc(s)) return FALSE; while (*s && *s != '(' && *s != ';' && *s != '\'' && *s != '"') { if (cin_iscomment(s)) // ignore comments s = cin_skipcomment(s); else if (*s == ':') { if (*(s + 1) == ':') s += 2; else // To avoid a mistake in the following situation: // A::A(int a, int b) // : a(0) // <--not a function decl // , b(0) // {... return FALSE; } else ++s; } if (*s != '(') return FALSE; // ';', ' or " before any () or no '(' while (*s && *s != ';' && *s != '\'' && *s != '"') { if (*s == ')' && cin_nocode(s + 1)) { // ')' at the end: may have found a match // Check for the previous line not to end in a backslash: // #if defined(x) && {backslash} // defined(y) lnum = first_lnum - 1; s = ml_get(lnum); if (*s == NUL || s[STRLEN(s) - 1] != '\\') retval = TRUE; goto done; } if ((*s == ',' && cin_nocode(s + 1)) || s[1] == NUL || cin_nocode(s)) { int comma = (*s == ','); // ',' at the end: continue looking in the next line. // At the end: check for ',' in the next line, for this style: // func(arg1 // , arg2) for (;;) { if (lnum >= curbuf->b_ml.ml_line_count) break; s = ml_get(++lnum); if (!cin_ispreproc(s)) break; } if (lnum >= curbuf->b_ml.ml_line_count) break; // Require a comma at end of the line or a comma or ')' at the // start of next line. s = skipwhite(s); if (!just_started && (!comma && *s != ',' && *s != ')')) break; just_started = FALSE; } else if (cin_iscomment(s)) // ignore comments s = cin_skipcomment(s); else { ++s; just_started = FALSE; } } done: if (lnum != first_lnum && sp != NULL) *sp = ml_get(first_lnum); return retval; } static int cin_isif(char_u *p) { return (STRNCMP(p, "if", 2) == 0 && !vim_isIDc(p[2])); } static int cin_isdo(char_u *p) { return (STRNCMP(p, "do", 2) == 0 && !vim_isIDc(p[2])); } /* * Check if this is a "while" that should have a matching "do". * We only accept a "while (condition) ;", with only white space between the * ')' and ';'. The condition may be spread over several lines. */ static int cin_iswhileofdo (char_u *p, linenr_T lnum) // XXX { pos_T cursor_save; pos_T *trypos; int retval = FALSE; p = cin_skipcomment(p); if (*p == '}') // accept "} while (cond);" p = cin_skipcomment(p + 1); if (cin_starts_with(p, "while")) { cursor_save = curwin->w_cursor; curwin->w_cursor.lnum = lnum; curwin->w_cursor.col = 0; p = ml_get_curline(); while (*p && *p != 'w') // skip any '}', until the 'w' of the "while" { ++p; ++curwin->w_cursor.col; } if ((trypos = findmatchlimit(NULL, 0, 0, curbuf->b_ind_maxparen)) != NULL && *cin_skipcomment(ml_get_pos(trypos) + 1) == ';') retval = TRUE; curwin->w_cursor = cursor_save; } return retval; } /* * Check whether in "p" there is an "if", "for" or "while" before "*poffset". * Return 0 if there is none. * Otherwise return !0 and update "*poffset" to point to the place where the * string was found. */ static int cin_is_if_for_while_before_offset(char_u *line, int *poffset) { int offset = *poffset; if (offset-- < 2) return 0; while (offset > 2 && VIM_ISWHITE(line[offset])) --offset; offset -= 1; if (!STRNCMP(line + offset, "if", 2)) goto probablyFound; if (offset >= 1) { offset -= 1; if (!STRNCMP(line + offset, "for", 3)) goto probablyFound; if (offset >= 2) { offset -= 2; if (!STRNCMP(line + offset, "while", 5)) goto probablyFound; } } return 0; probablyFound: if (!offset || !vim_isIDc(line[offset - 1])) { *poffset = offset; return 1; } return 0; } /* * Return TRUE if we are at the end of a do-while. * do * nothing; * while (foo * && bar); <-- here * Adjust the cursor to the line with "while". */ static int cin_iswhileofdo_end(int terminated) { char_u *line; char_u *p; char_u *s; pos_T *trypos; int i; if (terminated != ';') // there must be a ';' at the end return FALSE; p = line = ml_get_curline(); while (*p != NUL) { p = cin_skipcomment(p); if (*p == ')') { s = skipwhite(p + 1); if (*s == ';' && cin_nocode(s + 1)) { // Found ");" at end of the line, now check there is "while" // before the matching '('. XXX i = (int)(p - line); curwin->w_cursor.col = i; trypos = find_match_paren(curbuf->b_ind_maxparen); if (trypos != NULL) { s = cin_skipcomment(ml_get(trypos->lnum)); if (*s == '}') // accept "} while (cond);" s = cin_skipcomment(s + 1); if (cin_starts_with(s, "while")) { curwin->w_cursor.lnum = trypos->lnum; return TRUE; } } // Searching may have made "line" invalid, get it again. line = ml_get_curline(); p = line + i; } } if (*p != NUL) ++p; } return FALSE; } static int cin_isbreak(char_u *p) { return (STRNCMP(p, "break", 5) == 0 && !vim_isIDc(p[5])); } /* * Find the position of a C++ base-class declaration or * constructor-initialization. eg: * * class MyClass : * baseClass <-- here * class MyClass : public baseClass, * anotherBaseClass <-- here (should probably lineup ??) * MyClass::MyClass(...) : * baseClass(...) <-- here (constructor-initialization) * * This is a lot of guessing. Watch out for "cond ? func() : foo". */ static int cin_is_cpp_baseclass( cpp_baseclass_cache_T *cached) // input and output { lpos_T *pos = &cached->lpos; // find position char_u *s; int class_or_struct, lookfor_ctor_init, cpp_base_class; linenr_T lnum = curwin->w_cursor.lnum; char_u *line = ml_get_curline(); if (pos->lnum <= lnum) return cached->found; // Use the cached result pos->col = 0; s = skipwhite(line); if (*s == '#') // skip #define FOO x ? (x) : x return FALSE; s = cin_skipcomment(s); if (*s == NUL) return FALSE; cpp_base_class = lookfor_ctor_init = class_or_struct = FALSE; // Search for a line starting with '#', empty, ending in ';' or containing // '{' or '}' and start below it. This handles the following situations: // a = cond ? // func() : // asdf; // func::foo() // : something // {} // Foo::Foo (int one, int two) // : something(4), // somethingelse(3) // {} while (lnum > 1) { line = ml_get(lnum - 1); s = skipwhite(line); if (*s == '#' || *s == NUL) break; while (*s != NUL) { s = cin_skipcomment(s); if (*s == '{' || *s == '}' || (*s == ';' && cin_nocode(s + 1))) break; if (*s != NUL) ++s; } if (*s != NUL) break; --lnum; } pos->lnum = lnum; line = ml_get(lnum); s = line; for (;;) { if (*s == NUL) { if (lnum == curwin->w_cursor.lnum) break; // Continue in the cursor line. line = ml_get(++lnum); s = line; } if (s == line) { // don't recognize "case (foo):" as a baseclass if (cin_iscase(s, FALSE)) break; s = cin_skipcomment(line); if (*s == NUL) continue; } if (s[0] == '"' || (s[0] == 'R' && s[1] == '"')) s = skip_string(s) + 1; else if (s[0] == ':') { if (s[1] == ':') { // skip double colon. It can't be a constructor // initialization any more lookfor_ctor_init = FALSE; s = cin_skipcomment(s + 2); } else if (lookfor_ctor_init || class_or_struct) { // we have something found, that looks like the start of // cpp-base-class-declaration or constructor-initialization cpp_base_class = TRUE; lookfor_ctor_init = class_or_struct = FALSE; pos->col = 0; s = cin_skipcomment(s + 1); } else s = cin_skipcomment(s + 1); } else if ((STRNCMP(s, "class", 5) == 0 && !vim_isIDc(s[5])) || (STRNCMP(s, "struct", 6) == 0 && !vim_isIDc(s[6]))) { class_or_struct = TRUE; lookfor_ctor_init = FALSE; if (*s == 'c') s = cin_skipcomment(s + 5); else s = cin_skipcomment(s + 6); } else { if (s[0] == '{' || s[0] == '}' || s[0] == ';') { cpp_base_class = lookfor_ctor_init = class_or_struct = FALSE; } else if (s[0] == ')') { // Constructor-initialization is assumed if we come across // something like "):" class_or_struct = FALSE; lookfor_ctor_init = TRUE; } else if (s[0] == '?') { // Avoid seeing '() :' after '?' as constructor init. return FALSE; } else if (!vim_isIDc(s[0])) { // if it is not an identifier, we are wrong class_or_struct = FALSE; lookfor_ctor_init = FALSE; } else if (pos->col == 0) { // it can't be a constructor-initialization any more lookfor_ctor_init = FALSE; // the first statement starts here: lineup with this one... if (cpp_base_class) pos->col = (colnr_T)(s - line); } // When the line ends in a comma don't align with it. if (lnum == curwin->w_cursor.lnum && *s == ',' && cin_nocode(s + 1)) pos->col = 0; s = cin_skipcomment(s + 1); } } cached->found = cpp_base_class; if (cpp_base_class) pos->lnum = lnum; return cpp_base_class; } static int get_baseclass_amount(int col) { int amount; colnr_T vcol; pos_T *trypos; if (col == 0) { amount = get_indent(); if (find_last_paren(ml_get_curline(), '(', ')') && (trypos = find_match_paren(curbuf->b_ind_maxparen)) != NULL) amount = get_indent_lnum(trypos->lnum); // XXX if (!cin_ends_in(ml_get_curline(), (char_u *)",", NULL)) amount += curbuf->b_ind_cpp_baseclass; } else { curwin->w_cursor.col = col; getvcol(curwin, &curwin->w_cursor, &vcol, NULL, NULL); amount = (int)vcol; } if (amount < curbuf->b_ind_cpp_baseclass) amount = curbuf->b_ind_cpp_baseclass; return amount; } /* * Find the '{' at the start of the block we are in. * Return NULL if no match found. * Ignore a '{' that is in a comment, makes indenting the next three lines * work. */ // foo() // { // } static pos_T * find_start_brace(void) // XXX { pos_T cursor_save; pos_T *trypos; pos_T *pos; static pos_T pos_copy; cursor_save = curwin->w_cursor; while ((trypos = findmatchlimit(NULL, '{', FM_BLOCKSTOP, 0)) != NULL) { pos_copy = *trypos; // copy pos_T, next findmatch will change it trypos = &pos_copy; curwin->w_cursor = *trypos; pos = NULL; // ignore the { if it's in a // or / * * / comment if ((colnr_T)cin_skip2pos(trypos) == trypos->col && (pos = ind_find_start_CORS(NULL)) == NULL) // XXX break; if (pos != NULL) curwin->w_cursor = *pos; } curwin->w_cursor = cursor_save; return trypos; } /* * Find the matching '(', ignoring it if it is in a comment or before an * unmatched {. * Return NULL if no match found. */ static pos_T * find_match_paren_after_brace (int ind_maxparen) // XXX { pos_T *trypos = find_match_paren(ind_maxparen); if (trypos != NULL) { pos_T *tryposBrace = find_start_brace(); // If both an unmatched '(' and '{' is found. Ignore the '(' // position if the '{' is further down. if (tryposBrace != NULL && (trypos->lnum != tryposBrace->lnum ? trypos->lnum < tryposBrace->lnum : trypos->col < tryposBrace->col)) trypos = NULL; } return trypos; } /* * Return ind_maxparen corrected for the difference in line number between the * cursor position and "startpos". This makes sure that searching for a * matching paren above the cursor line doesn't find a match because of * looking a few lines further. */ static int corr_ind_maxparen(pos_T *startpos) { long n = (long)startpos->lnum - (long)curwin->w_cursor.lnum; if (n > 0 && n < curbuf->b_ind_maxparen / 2) return curbuf->b_ind_maxparen - (int)n; return curbuf->b_ind_maxparen; } /* * Parse 'cinoptions' and set the values in "curbuf". * Must be called when 'cinoptions', 'shiftwidth' and/or 'tabstop' changes. */ void parse_cino(buf_T *buf) { char_u *p; char_u *l; char_u *digits; int n; int divider; int fraction = 0; int sw = (int)get_sw_value(buf); // Set the default values. // Spaces from a block's opening brace the prevailing indent for that // block should be. buf->b_ind_level = sw; // Spaces from the edge of the line an open brace that's at the end of a // line is imagined to be. buf->b_ind_open_imag = 0; // Spaces from the prevailing indent for a line that is not preceded by // an opening brace. buf->b_ind_no_brace = 0; // Column where the first { of a function should be located }. buf->b_ind_first_open = 0; // Spaces from the prevailing indent a leftmost open brace should be // located. buf->b_ind_open_extra = 0; // Spaces from the matching open brace (real location for one at the left // edge; imaginary location from one that ends a line) the matching close // brace should be located. buf->b_ind_close_extra = 0; // Spaces from the edge of the line an open brace sitting in the leftmost // column is imagined to be. buf->b_ind_open_left_imag = 0; // Spaces jump labels should be shifted to the left if N is non-negative, // otherwise the jump label will be put to column 1. buf->b_ind_jump_label = -1; // Spaces from the switch() indent a "case xx" label should be located. buf->b_ind_case = sw; // Spaces from the "case xx:" code after a switch() should be located. buf->b_ind_case_code = sw; // Lineup break at end of case in switch() with case label. buf->b_ind_case_break = 0; // Spaces from the class declaration indent a scope declaration label // should be located. buf->b_ind_scopedecl = sw; // Spaces from the scope declaration label code should be located. buf->b_ind_scopedecl_code = sw; // Amount K&R-style parameters should be indented. buf->b_ind_param = sw; // Amount a function type spec should be indented. buf->b_ind_func_type = sw; // Amount a cpp base class declaration or constructor initialization // should be indented. buf->b_ind_cpp_baseclass = sw; // additional spaces beyond the prevailing indent a continuation line // should be located. buf->b_ind_continuation = sw; // Spaces from the indent of the line with an unclosed parenthesis. buf->b_ind_unclosed = sw * 2; // Spaces from the indent of the line with an unclosed parenthesis, which // itself is also unclosed. buf->b_ind_unclosed2 = sw; // Suppress ignoring spaces from the indent of a line starting with an // unclosed parenthesis. buf->b_ind_unclosed_noignore = 0; // If the opening paren is the last nonwhite character on the line, and // b_ind_unclosed_wrapped is nonzero, use this indent relative to the outer // context (for very long lines). buf->b_ind_unclosed_wrapped = 0; // Suppress ignoring white space when lining up with the character after // an unclosed parenthesis. buf->b_ind_unclosed_whiteok = 0; // Indent a closing parenthesis under the line start of the matching // opening parenthesis. buf->b_ind_matching_paren = 0; // Indent a closing parenthesis under the previous line. buf->b_ind_paren_prev = 0; // Extra indent for comments. buf->b_ind_comment = 0; // Spaces from the comment opener when there is nothing after it. buf->b_ind_in_comment = 3; // Boolean: if non-zero, use b_ind_in_comment even if there is something // after the comment opener. buf->b_ind_in_comment2 = 0; // Max lines to search for an open paren. buf->b_ind_maxparen = 20; // Max lines to search for an open comment. buf->b_ind_maxcomment = 70; // Handle braces for java code. buf->b_ind_java = 0; // Not to confuse JS object properties with labels. buf->b_ind_js = 0; // Handle blocked cases correctly. buf->b_ind_keep_case_label = 0; // Handle C++ namespace. buf->b_ind_cpp_namespace = 0; // Handle continuation lines containing conditions of if(), for() and // while(). buf->b_ind_if_for_while = 0; // indentation for # comments buf->b_ind_hash_comment = 0; // Handle C++ extern "C" or "C++" buf->b_ind_cpp_extern_c = 0; // Handle C #pragma directives buf->b_ind_pragma = 0; for (p = buf->b_p_cino; *p; ) { l = p++; if (*p == '-') ++p; digits = p; // remember where the digits start n = getdigits(&p); divider = 0; if (*p == '.') // ".5s" means a fraction { fraction = atol((char *)++p); while (VIM_ISDIGIT(*p)) { ++p; if (divider) divider *= 10; else divider = 10; } } if (*p == 's') // "2s" means two times 'shiftwidth' { if (p == digits) n = sw; // just "s" is one 'shiftwidth' else { n *= sw; if (divider) n += (sw * fraction + divider / 2) / divider; } ++p; } if (l[1] == '-') n = -n; // When adding an entry here, also update the default 'cinoptions' in // doc/indent.txt, and add explanation for it! switch (*l) { case '>': buf->b_ind_level = n; break; case 'e': buf->b_ind_open_imag = n; break; case 'n': buf->b_ind_no_brace = n; break; case 'f': buf->b_ind_first_open = n; break; case '{': buf->b_ind_open_extra = n; break; case '}': buf->b_ind_close_extra = n; break; case '^': buf->b_ind_open_left_imag = n; break; case 'L': buf->b_ind_jump_label = n; break; case ':': buf->b_ind_case = n; break; case '=': buf->b_ind_case_code = n; break; case 'b': buf->b_ind_case_break = n; break; case 'p': buf->b_ind_param = n; break; case 't': buf->b_ind_func_type = n; break; case '/': buf->b_ind_comment = n; break; case 'c': buf->b_ind_in_comment = n; break; case 'C': buf->b_ind_in_comment2 = n; break; case 'i': buf->b_ind_cpp_baseclass = n; break; case '+': buf->b_ind_continuation = n; break; case '(': buf->b_ind_unclosed = n; break; case 'u': buf->b_ind_unclosed2 = n; break; case 'U': buf->b_ind_unclosed_noignore = n; break; case 'W': buf->b_ind_unclosed_wrapped = n; break; case 'w': buf->b_ind_unclosed_whiteok = n; break; case 'm': buf->b_ind_matching_paren = n; break; case 'M': buf->b_ind_paren_prev = n; break; case ')': buf->b_ind_maxparen = n; break; case '*': buf->b_ind_maxcomment = n; break; case 'g': buf->b_ind_scopedecl = n; break; case 'h': buf->b_ind_scopedecl_code = n; break; case 'j': buf->b_ind_java = n; break; case 'J': buf->b_ind_js = n; break; case 'l': buf->b_ind_keep_case_label = n; break; case '#': buf->b_ind_hash_comment = n; break; case 'N': buf->b_ind_cpp_namespace = n; break; case 'k': buf->b_ind_if_for_while = n; break; case 'E': buf->b_ind_cpp_extern_c = n; break; case 'P': buf->b_ind_pragma = n; break; } if (*p == ',') ++p; } } static int find_match(int lookfor, linenr_T ourscope) { char_u *look; pos_T *theirscope; char_u *mightbeif; int elselevel; int whilelevel; if (lookfor == LOOKFOR_IF) { elselevel = 1; whilelevel = 0; } else { elselevel = 0; whilelevel = 1; } curwin->w_cursor.col = 0; while (curwin->w_cursor.lnum > ourscope + 1) { curwin->w_cursor.lnum--; curwin->w_cursor.col = 0; look = cin_skipcomment(ml_get_curline()); if (cin_iselse(look) || cin_isif(look) || cin_isdo(look) // XXX || cin_iswhileofdo(look, curwin->w_cursor.lnum)) { // if we've gone outside the braces entirely, // we must be out of scope... theirscope = find_start_brace(); // XXX if (theirscope == NULL) break; // and if the brace enclosing this is further // back than the one enclosing the else, we're // out of luck too. if (theirscope->lnum < ourscope) break; // and if they're enclosed in a *deeper* brace, // then we can ignore it because it's in a // different scope... if (theirscope->lnum > ourscope) continue; // if it was an "else" (that's not an "else if") // then we need to go back to another if, so // increment elselevel look = cin_skipcomment(ml_get_curline()); if (cin_iselse(look)) { mightbeif = cin_skipcomment(look + 4); if (!cin_isif(mightbeif)) ++elselevel; continue; } // if it was a "while" then we need to go back to // another "do", so increment whilelevel. XXX if (cin_iswhileofdo(look, curwin->w_cursor.lnum)) { ++whilelevel; continue; } // If it's an "if" decrement elselevel look = cin_skipcomment(ml_get_curline()); if (cin_isif(look)) { elselevel--; // When looking for an "if" ignore "while"s that // get in the way. if (elselevel == 0 && lookfor == LOOKFOR_IF) whilelevel = 0; } // If it's a "do" decrement whilelevel if (cin_isdo(look)) whilelevel--; // if we've used up all the elses, then // this must be the if that we want! // match the indent level of that if. if (elselevel <= 0 && whilelevel <= 0) return OK; } } return FAIL; } /* * Return the desired indent for C code. * Return -1 if the indent should be left alone (inside a raw string). */ int get_c_indent(void) { pos_T cur_curpos; int amount; int scope_amount; int cur_amount = MAXCOL; colnr_T col; char_u *theline; char_u *linecopy; pos_T *trypos; pos_T *comment_pos; pos_T *tryposBrace = NULL; pos_T tryposCopy; pos_T our_paren_pos; char_u *start; int start_brace; #define BRACE_IN_COL0 1 // '{' is in column 0 #define BRACE_AT_START 2 // '{' is at start of line #define BRACE_AT_END 3 // '{' is at end of line linenr_T ourscope; char_u *l; char_u *look; char_u terminated; int lookfor; int whilelevel; linenr_T lnum; int n; int iscase; int lookfor_break; int lookfor_cpp_namespace = FALSE; int cont_amount = 0; // amount for continuation line int original_line_islabel; int added_to_amount = 0; int js_cur_has_key = 0; linenr_T raw_string_start = 0; cpp_baseclass_cache_T cache_cpp_baseclass = { FALSE, { MAXLNUM, 0 } }; // make a copy, value is changed below int ind_continuation = curbuf->b_ind_continuation; // remember where the cursor was when we started cur_curpos = curwin->w_cursor; // if we are at line 1 zero indent is fine, right? if (cur_curpos.lnum == 1) return 0; // Get a copy of the current contents of the line. // This is required, because only the most recent line obtained with // ml_get is valid! linecopy = vim_strsave(ml_get(cur_curpos.lnum)); if (linecopy == NULL) return 0; // In insert mode and the cursor is on a ')' truncate the line at the // cursor position. We don't want to line up with the matching '(' when // inserting new stuff. // For unknown reasons the cursor might be past the end of the line, thus // check for that. if ((State & MODE_INSERT) && curwin->w_cursor.col < (colnr_T)STRLEN(linecopy) && linecopy[curwin->w_cursor.col] == ')') linecopy[curwin->w_cursor.col] = NUL; theline = skipwhite(linecopy); // move the cursor to the start of the line curwin->w_cursor.col = 0; original_line_islabel = cin_islabel(); // XXX // If we are inside a raw string don't change the indent. // Ignore a raw string inside a comment. comment_pos = ind_find_start_comment(); if (comment_pos != NULL) { // findmatchlimit() static pos is overwritten, make a copy tryposCopy = *comment_pos; comment_pos = &tryposCopy; } trypos = find_start_rawstring(curbuf->b_ind_maxcomment); if (trypos != NULL && (comment_pos == NULL || LT_POS(*trypos, *comment_pos))) { amount = -1; goto laterend; } // #defines and so on go at the left when included in 'cinkeys', // excluding pragmas when customized in 'cinoptions' if (*theline == '#' && (*linecopy == '#' || in_cinkeys('#', ' ', TRUE))) { char_u *directive = skipwhite(theline + 1); if (curbuf->b_ind_pragma == 0 || STRNCMP(directive, "pragma", 6) != 0) { amount = curbuf->b_ind_hash_comment; goto theend; } } // Is it a non-case label? Then that goes at the left margin too unless: // - JS flag is set. // - 'L' item has a positive value. if (original_line_islabel && !curbuf->b_ind_js && curbuf->b_ind_jump_label < 0) { amount = 0; goto theend; } // If we're inside a "//" comment and there is a "//" comment in a // previous line, lineup with that one. if (cin_islinecomment(theline)) { pos_T linecomment_pos; trypos = find_line_comment(); // XXX if (trypos == NULL && curwin->w_cursor.lnum > 1) { // There may be a statement before the comment, search from the end // of the line for a comment start. linecomment_pos.col = check_linecomment(ml_get(curwin->w_cursor.lnum - 1)); if (linecomment_pos.col != MAXCOL) { trypos = &linecomment_pos; trypos->lnum = curwin->w_cursor.lnum - 1; } } if (trypos != NULL) { // find how indented the line beginning the comment is getvcol(curwin, trypos, &col, NULL, NULL); amount = col; goto theend; } } // If we're inside a comment and not looking at the start of the // comment, try using the 'comments' option. if (!cin_iscomment(theline) && comment_pos != NULL) // XXX { int lead_start_len = 2; int lead_middle_len = 1; char_u lead_start[COM_MAX_LEN]; // start-comment string char_u lead_middle[COM_MAX_LEN]; // middle-comment string char_u lead_end[COM_MAX_LEN]; // end-comment string char_u *p; int start_align = 0; int start_off = 0; int done = FALSE; // find how indented the line beginning the comment is getvcol(curwin, comment_pos, &col, NULL, NULL); amount = col; *lead_start = NUL; *lead_middle = NUL; p = curbuf->b_p_com; while (*p != NUL) { int align = 0; int off = 0; int what = 0; while (*p != NUL && *p != ':') { if (*p == COM_START || *p == COM_END || *p == COM_MIDDLE) what = *p++; else if (*p == COM_LEFT || *p == COM_RIGHT) align = *p++; else if (VIM_ISDIGIT(*p) || *p == '-') off = getdigits(&p); else ++p; } if (*p == ':') ++p; (void)copy_option_part(&p, lead_end, COM_MAX_LEN, ","); if (what == COM_START) { STRCPY(lead_start, lead_end); lead_start_len = (int)STRLEN(lead_start); start_off = off; start_align = align; } else if (what == COM_MIDDLE) { STRCPY(lead_middle, lead_end); lead_middle_len = (int)STRLEN(lead_middle); } else if (what == COM_END) { // If our line starts with the middle comment string, line it // up with the comment opener per the 'comments' option. if (STRNCMP(theline, lead_middle, lead_middle_len) == 0 && STRNCMP(theline, lead_end, STRLEN(lead_end)) != 0) { done = TRUE; if (curwin->w_cursor.lnum > 1) { // If the start comment string matches in the previous // line, use the indent of that line plus offset. If // the middle comment string matches in the previous // line, use the indent of that line. XXX look = skipwhite(ml_get(curwin->w_cursor.lnum - 1)); if (STRNCMP(look, lead_start, lead_start_len) == 0) amount = get_indent_lnum(curwin->w_cursor.lnum - 1); else if (STRNCMP(look, lead_middle, lead_middle_len) == 0) { amount = get_indent_lnum(curwin->w_cursor.lnum - 1); break; } // If the start comment string doesn't match with the // start of the comment, skip this entry. XXX else if (STRNCMP(ml_get(comment_pos->lnum) + comment_pos->col, lead_start, lead_start_len) != 0) continue; } if (start_off != 0) amount += start_off; else if (start_align == COM_RIGHT) amount += vim_strsize(lead_start) - vim_strsize(lead_middle); break; } // If our line starts with the end comment string, line it up // with the middle comment if (STRNCMP(theline, lead_middle, lead_middle_len) != 0 && STRNCMP(theline, lead_end, STRLEN(lead_end)) == 0) { amount = get_indent_lnum(curwin->w_cursor.lnum - 1); // XXX if (off != 0) amount += off; else if (align == COM_RIGHT) amount += vim_strsize(lead_start) - vim_strsize(lead_middle); done = TRUE; break; } } } // If our line starts with an asterisk, line up with the // asterisk in the comment opener; otherwise, line up // with the first character of the comment text. if (done) ; else if (theline[0] == '*') amount += 1; else { // If we are more than one line away from the comment opener, take // the indent of the previous non-empty line. If 'cino' has "CO" // and we are just below the comment opener and there are any // white characters after it line up with the text after it; // otherwise, add the amount specified by "c" in 'cino' amount = -1; for (lnum = cur_curpos.lnum - 1; lnum > comment_pos->lnum; --lnum) { if (linewhite(lnum)) // skip blank lines continue; amount = get_indent_lnum(lnum); // XXX break; } if (amount == -1) // use the comment opener { if (!curbuf->b_ind_in_comment2) { start = ml_get(comment_pos->lnum); look = start + comment_pos->col + 2; // skip / and * if (*look != NUL) // if something after it comment_pos->col = (colnr_T)(skipwhite(look) - start); } getvcol(curwin, comment_pos, &col, NULL, NULL); amount = col; if (curbuf->b_ind_in_comment2 || *look == NUL) amount += curbuf->b_ind_in_comment; } } goto theend; } // Are we looking at a ']' that has a match? if (*skipwhite(theline) == ']' && (trypos = find_match_char('[', curbuf->b_ind_maxparen)) != NULL) { // align with the line containing the '['. amount = get_indent_lnum(trypos->lnum); goto theend; } // Are we inside parentheses or braces? XXX if (((trypos = find_match_paren(curbuf->b_ind_maxparen)) != NULL && curbuf->b_ind_java == 0) || (tryposBrace = find_start_brace()) != NULL || trypos != NULL) { if (trypos != NULL && tryposBrace != NULL) { // Both an unmatched '(' and '{' is found. Use the one which is // closer to the current cursor position, set the other to NULL. if (trypos->lnum != tryposBrace->lnum ? trypos->lnum < tryposBrace->lnum : trypos->col < tryposBrace->col) trypos = NULL; else tryposBrace = NULL; } if (trypos != NULL) { // If the matching paren is more than one line away, use the indent of // a previous non-empty line that matches the same paren. if (theline[0] == ')' && curbuf->b_ind_paren_prev) { // Line up with the start of the matching paren line. amount = get_indent_lnum(curwin->w_cursor.lnum - 1); // XXX } else { amount = -1; our_paren_pos = *trypos; for (lnum = cur_curpos.lnum - 1; lnum > our_paren_pos.lnum; --lnum) { l = skipwhite(ml_get(lnum)); if (cin_nocode(l)) // skip comment lines continue; if (cin_ispreproc_cont(&l, &lnum, &amount)) continue; // ignore #define, #if, etc. curwin->w_cursor.lnum = lnum; // Skip a comment or raw string. XXX if ((trypos = ind_find_start_CORS(NULL)) != NULL) { lnum = trypos->lnum + 1; continue; } // XXX if ((trypos = find_match_paren( corr_ind_maxparen(&cur_curpos))) != NULL && trypos->lnum == our_paren_pos.lnum && trypos->col == our_paren_pos.col) { amount = get_indent_lnum(lnum); // XXX if (theline[0] == ')') { if (our_paren_pos.lnum != lnum && cur_amount > amount) cur_amount = amount; amount = -1; } break; } } } // Line up with line where the matching paren is. XXX // If the line starts with a '(' or the indent for unclosed // parentheses is zero, line up with the unclosed parentheses. if (amount == -1) { int ignore_paren_col = 0; int is_if_for_while = 0; if (curbuf->b_ind_if_for_while) { // Look for the outermost opening parenthesis on this line // and check whether it belongs to an "if", "for" or "while". pos_T cursor_save = curwin->w_cursor; pos_T outermost; char_u *line; trypos = &our_paren_pos; do { outermost = *trypos; curwin->w_cursor.lnum = outermost.lnum; curwin->w_cursor.col = outermost.col; trypos = find_match_paren(curbuf->b_ind_maxparen); } while (trypos && trypos->lnum == outermost.lnum); curwin->w_cursor = cursor_save; line = ml_get(outermost.lnum); is_if_for_while = cin_is_if_for_while_before_offset(line, &outermost.col); } amount = skip_label(our_paren_pos.lnum, &look); look = skipwhite(look); if (*look == '(') { linenr_T save_lnum = curwin->w_cursor.lnum; char_u *line; int look_col; // Ignore a '(' in front of the line that has a match before // our matching '('. curwin->w_cursor.lnum = our_paren_pos.lnum; line = ml_get_curline(); look_col = (int)(look - line); curwin->w_cursor.col = look_col + 1; if ((trypos = findmatchlimit(NULL, ')', 0, curbuf->b_ind_maxparen)) != NULL && trypos->lnum == our_paren_pos.lnum && trypos->col < our_paren_pos.col) ignore_paren_col = trypos->col + 1; curwin->w_cursor.lnum = save_lnum; look = ml_get(our_paren_pos.lnum) + look_col; } if (theline[0] == ')' || (curbuf->b_ind_unclosed == 0 && is_if_for_while == 0) || (!curbuf->b_ind_unclosed_noignore && *look == '(' && ignore_paren_col == 0)) { // If we're looking at a close paren, line up right there; // otherwise, line up with the next (non-white) character. // When b_ind_unclosed_wrapped is set and the matching paren is // the last nonwhite character of the line, use either the // indent of the current line or the indentation of the next // outer paren and add b_ind_unclosed_wrapped (for very long // lines). if (theline[0] != ')') { cur_amount = MAXCOL; l = ml_get(our_paren_pos.lnum); if (curbuf->b_ind_unclosed_wrapped && cin_ends_in(l, (char_u *)"(", NULL)) { // look for opening unmatched paren, indent one level // for each additional level n = 1; for (col = 0; col < our_paren_pos.col; ++col) { switch (l[col]) { case '(': case '{': ++n; break; case ')': case '}': if (n > 1) --n; break; } } our_paren_pos.col = 0; amount += n * curbuf->b_ind_unclosed_wrapped; } else if (curbuf->b_ind_unclosed_whiteok) our_paren_pos.col++; else { col = our_paren_pos.col + 1; while (VIM_ISWHITE(l[col])) col++; if (l[col] != NUL) // In case of trailing space our_paren_pos.col = col; else our_paren_pos.col++; } } // Find how indented the paren is, or the character after it // if we did the above "if". if (our_paren_pos.col > 0) { getvcol(curwin, &our_paren_pos, &col, NULL, NULL); if (cur_amount > (int)col) cur_amount = col; } } if (theline[0] == ')' && curbuf->b_ind_matching_paren) { // Line up with the start of the matching paren line. } else if ((curbuf->b_ind_unclosed == 0 && is_if_for_while == 0) || (!curbuf->b_ind_unclosed_noignore && *look == '(' && ignore_paren_col == 0)) { if (cur_amount != MAXCOL) amount = cur_amount; } else { // Add b_ind_unclosed2 for each '(' before our matching one, // but ignore (void) before the line (ignore_paren_col). col = our_paren_pos.col; while ((int)our_paren_pos.col > ignore_paren_col) { --our_paren_pos.col; switch (*ml_get_pos(&our_paren_pos)) { case '(': amount += curbuf->b_ind_unclosed2; col = our_paren_pos.col; break; case ')': amount -= curbuf->b_ind_unclosed2; col = MAXCOL; break; } } // Use b_ind_unclosed once, when the first '(' is not inside // braces if (col == MAXCOL) amount += curbuf->b_ind_unclosed; else { curwin->w_cursor.lnum = our_paren_pos.lnum; curwin->w_cursor.col = col; if (find_match_paren_after_brace(curbuf->b_ind_maxparen) != NULL) amount += curbuf->b_ind_unclosed2; else { if (is_if_for_while) amount += curbuf->b_ind_if_for_while; else amount += curbuf->b_ind_unclosed; } } // For a line starting with ')' use the minimum of the two // positions, to avoid giving it more indent than the previous // lines: // func_long_name( if (x // arg && yy // ) ^ not here ) ^ not here if (cur_amount < amount) amount = cur_amount; } } // add extra indent for a comment if (cin_iscomment(theline)) amount += curbuf->b_ind_comment; } else { // We are inside braces, there is a { before this line at the position // stored in tryposBrace. // Make a copy of tryposBrace, it may point to pos_copy inside // find_start_brace(), which may be changed somewhere. tryposCopy = *tryposBrace; tryposBrace = &tryposCopy; trypos = tryposBrace; ourscope = trypos->lnum; start = ml_get(ourscope); // Now figure out how indented the line is in general. // If the brace was at the start of the line, we use that; // otherwise, check out the indentation of the line as // a whole and then add the "imaginary indent" to that. look = skipwhite(start); if (*look == '{') { getvcol(curwin, trypos, &col, NULL, NULL); amount = col; if (*start == '{') start_brace = BRACE_IN_COL0; else start_brace = BRACE_AT_START; } else { // That opening brace might have been on a continuation // line. if so, find the start of the line. curwin->w_cursor.lnum = ourscope; // Position the cursor over the rightmost paren, so that // matching it will take us back to the start of the line. lnum = ourscope; if (find_last_paren(start, '(', ')') && (trypos = find_match_paren(curbuf->b_ind_maxparen)) != NULL) lnum = trypos->lnum; // It could have been something like // case 1: if (asdf && // ldfd) { // } if ((curbuf->b_ind_js || curbuf->b_ind_keep_case_label) && cin_iscase(skipwhite(ml_get_curline()), FALSE)) amount = get_indent(); else if (curbuf->b_ind_js) amount = get_indent_lnum(lnum); else amount = skip_label(lnum, &l); start_brace = BRACE_AT_END; } // For Javascript check if the line starts with "key:". if (curbuf->b_ind_js) js_cur_has_key = cin_has_js_key(theline); // If we're looking at a closing brace, that's where // we want to be. otherwise, add the amount of room // that an indent is supposed to be. if (theline[0] == '}') { // they may want closing braces to line up with something // other than the open brace. indulge them, if so. amount += curbuf->b_ind_close_extra; } else { // If we're looking at an "else", try to find an "if" // to match it with. // If we're looking at a "while", try to find a "do" // to match it with. lookfor = LOOKFOR_INITIAL; if (cin_iselse(theline)) lookfor = LOOKFOR_IF; else if (cin_iswhileofdo(theline, cur_curpos.lnum)) // XXX lookfor = LOOKFOR_DO; if (lookfor != LOOKFOR_INITIAL) { curwin->w_cursor.lnum = cur_curpos.lnum; if (find_match(lookfor, ourscope) == OK) { amount = get_indent(); // XXX goto theend; } } // We get here if we are not on an "while-of-do" or "else" (or // failed to find a matching "if"). // Search backwards for something to line up with. // First set amount for when we don't find anything. // if the '{' is _really_ at the left margin, use the imaginary // location of a left-margin brace. Otherwise, correct the // location for b_ind_open_extra. if (start_brace == BRACE_IN_COL0) // '{' is in column 0 { amount = curbuf->b_ind_open_left_imag; lookfor_cpp_namespace = TRUE; } else if (start_brace == BRACE_AT_START && lookfor_cpp_namespace) // '{' is at start { lookfor_cpp_namespace = TRUE; } else { if (start_brace == BRACE_AT_END) // '{' is at end of line { amount += curbuf->b_ind_open_imag; l = skipwhite(ml_get_curline()); if (cin_is_cpp_namespace(l)) amount += curbuf->b_ind_cpp_namespace; else if (cin_is_cpp_extern_c(l)) amount += curbuf->b_ind_cpp_extern_c; } else { // Compensate for adding b_ind_open_extra later. amount -= curbuf->b_ind_open_extra; if (amount < 0) amount = 0; } } lookfor_break = FALSE; if (cin_iscase(theline, FALSE)) // it's a switch() label { lookfor = LOOKFOR_CASE; // find a previous switch() label amount += curbuf->b_ind_case; } else if (cin_isscopedecl(theline)) // private:, ... { lookfor = LOOKFOR_SCOPEDECL; // class decl is this block amount += curbuf->b_ind_scopedecl; } else { if (curbuf->b_ind_case_break && cin_isbreak(theline)) // break; ... lookfor_break = TRUE; lookfor = LOOKFOR_INITIAL; // b_ind_level from start of block amount += curbuf->b_ind_level; } scope_amount = amount; whilelevel = 0; // Search backwards. If we find something we recognize, line up // with that. // // If we're looking at an open brace, indent // the usual amount relative to the conditional // that opens the block. curwin->w_cursor = cur_curpos; for (;;) { curwin->w_cursor.lnum--; curwin->w_cursor.col = 0; // If we went all the way back to the start of our scope, line // up with it. if (curwin->w_cursor.lnum <= ourscope) { // We reached end of scope: // If looking for an enum or structure initialization // go further back: // If it is an initializer (enum xxx or xxx =), then // don't add ind_continuation, otherwise it is a variable // declaration: // int x, // here; <-- add ind_continuation if (lookfor == LOOKFOR_ENUM_OR_INIT) { if (curwin->w_cursor.lnum == 0 || curwin->w_cursor.lnum < ourscope - curbuf->b_ind_maxparen) { // nothing found (abuse curbuf->b_ind_maxparen as // limit) assume terminated line (i.e. a variable // initialization) if (cont_amount > 0) amount = cont_amount; else if (!curbuf->b_ind_js) amount += ind_continuation; break; } l = ml_get_curline(); // If we're in a comment or raw string now, skip to // the start of it. trypos = ind_find_start_CORS(NULL); if (trypos != NULL) { curwin->w_cursor.lnum = trypos->lnum + 1; curwin->w_cursor.col = 0; continue; } // Skip preprocessor directives and blank lines. if (cin_ispreproc_cont(&l, &curwin->w_cursor.lnum, &amount)) continue; if (cin_nocode(l)) continue; terminated = cin_isterminated(l, FALSE, TRUE); // If we are at top level and the line looks like a // function declaration, we are done // (it's a variable declaration). if (start_brace != BRACE_IN_COL0 || !cin_isfuncdecl(&l, curwin->w_cursor.lnum, 0)) { // if the line is terminated with another ',' // it is a continued variable initialization. // don't add extra indent. // TODO: does not work, if a function // declaration is split over multiple lines: // cin_isfuncdecl returns FALSE then. if (terminated == ',') break; // if it is an enum declaration or an assignment, // we are done. if (terminated != ';' && cin_isinit()) break; // nothing useful found if (terminated == 0 || terminated == '{') continue; } if (terminated != ';') { // Skip parens and braces. Position the cursor // over the rightmost paren, so that matching it // will take us back to the start of the line. // XXX trypos = NULL; if (find_last_paren(l, '(', ')')) trypos = find_match_paren( curbuf->b_ind_maxparen); if (trypos == NULL && find_last_paren(l, '{', '}')) trypos = find_start_brace(); if (trypos != NULL) { curwin->w_cursor.lnum = trypos->lnum + 1; curwin->w_cursor.col = 0; continue; } } // it's a variable declaration, add indentation // like in // int a, // b; if (cont_amount > 0) amount = cont_amount; else amount += ind_continuation; } else if (lookfor == LOOKFOR_UNTERM) { if (cont_amount > 0) amount = cont_amount; else amount += ind_continuation; } else { if (lookfor != LOOKFOR_TERM && lookfor != LOOKFOR_CPP_BASECLASS && lookfor != LOOKFOR_COMMA) { amount = scope_amount; if (theline[0] == '{') { amount += curbuf->b_ind_open_extra; added_to_amount = curbuf->b_ind_open_extra; } } if (lookfor_cpp_namespace) { // Looking for C++ namespace, need to look further // back. if (curwin->w_cursor.lnum == ourscope) continue; if (curwin->w_cursor.lnum == 0 || curwin->w_cursor.lnum < ourscope - FIND_NAMESPACE_LIM) break; l = ml_get_curline(); // If we're in a comment or raw string now, skip // to the start of it. trypos = ind_find_start_CORS(NULL); if (trypos != NULL) { curwin->w_cursor.lnum = trypos->lnum + 1; curwin->w_cursor.col = 0; continue; } // Skip preprocessor directives and blank lines. if (cin_ispreproc_cont(&l, &curwin->w_cursor.lnum, &amount)) continue; // Finally the actual check for "namespace". if (cin_is_cpp_namespace(l)) { amount += curbuf->b_ind_cpp_namespace - added_to_amount; break; } else if (cin_is_cpp_extern_c(l)) { amount += curbuf->b_ind_cpp_extern_c - added_to_amount; break; } if (cin_nocode(l)) continue; } } break; } // If we're in a comment or raw string now, skip to the start // of it. XXX if ((trypos = ind_find_start_CORS(&raw_string_start)) != NULL) { curwin->w_cursor.lnum = trypos->lnum + 1; curwin->w_cursor.col = 0; continue; } l = ml_get_curline(); // If this is a switch() label, may line up relative to that. // If this is a C++ scope declaration, do the same. iscase = cin_iscase(l, FALSE); if (iscase || cin_isscopedecl(l)) { // we are only looking for cpp base class // declaration/initialization any longer if (lookfor == LOOKFOR_CPP_BASECLASS) break; // When looking for a "do" we are not interested in // labels. if (whilelevel > 0) continue; // case xx: // c = 99 + <- this indent plus continuation //-> here; if (lookfor == LOOKFOR_UNTERM || lookfor == LOOKFOR_ENUM_OR_INIT) { if (cont_amount > 0) amount = cont_amount; else amount += ind_continuation; break; } // case xx: <- line up with this case // x = 333; // case yy: if ( (iscase && lookfor == LOOKFOR_CASE) || (iscase && lookfor_break) || (!iscase && lookfor == LOOKFOR_SCOPEDECL)) { // Check that this case label is not for another // switch() XXX if ((trypos = find_start_brace()) == NULL || trypos->lnum == ourscope) { amount = get_indent(); // XXX break; } continue; } n = get_indent_nolabel(curwin->w_cursor.lnum); // XXX // case xx: if (cond) <- line up with this if // y = y + 1; // -> s = 99; // // case xx: // if (cond) <- line up with this line // y = y + 1; // -> s = 99; if (lookfor == LOOKFOR_TERM) { if (n) amount = n; if (!lookfor_break) break; } // case xx: x = x + 1; <- line up with this x // -> y = y + 1; // // case xx: if (cond) <- line up with this if // -> y = y + 1; if (n) { amount = n; l = after_label(ml_get_curline()); if (l != NULL && cin_is_cinword(l)) { if (theline[0] == '{') amount += curbuf->b_ind_open_extra; else amount += curbuf->b_ind_level + curbuf->b_ind_no_brace; } break; } // Try to get the indent of a statement before the switch // label. If nothing is found, line up relative to the // switch label. // break; <- may line up with this line // case xx: // -> y = 1; scope_amount = get_indent() + (iscase // XXX ? curbuf->b_ind_case_code : curbuf->b_ind_scopedecl_code); lookfor = curbuf->b_ind_case_break ? LOOKFOR_NOBREAK : LOOKFOR_ANY; continue; } // Looking for a switch() label or C++ scope declaration, // ignore other lines, skip {}-blocks. if (lookfor == LOOKFOR_CASE || lookfor == LOOKFOR_SCOPEDECL) { if (find_last_paren(l, '{', '}') && (trypos = find_start_brace()) != NULL) { curwin->w_cursor.lnum = trypos->lnum + 1; curwin->w_cursor.col = 0; } continue; } // Ignore jump labels with nothing after them. if (!curbuf->b_ind_js && cin_islabel()) { l = after_label(ml_get_curline()); if (l == NULL || cin_nocode(l)) continue; } // Ignore #defines, #if, etc. // Ignore comment and empty lines. // (need to get the line again, cin_islabel() may have // unlocked it) l = ml_get_curline(); if (cin_ispreproc_cont(&l, &curwin->w_cursor.lnum, &amount) || cin_nocode(l)) continue; // Are we at the start of a cpp base class declaration or // constructor initialization? XXX n = FALSE; if (lookfor != LOOKFOR_TERM && curbuf->b_ind_cpp_baseclass > 0) { n = cin_is_cpp_baseclass(&cache_cpp_baseclass); l = ml_get_curline(); } if (n) { if (lookfor == LOOKFOR_UNTERM) { if (cont_amount > 0) amount = cont_amount; else amount += ind_continuation; } else if (theline[0] == '{') { // Need to find start of the declaration. lookfor = LOOKFOR_UNTERM; ind_continuation = 0; continue; } else // XXX amount = get_baseclass_amount( cache_cpp_baseclass.lpos.col); break; } else if (lookfor == LOOKFOR_CPP_BASECLASS) { // only look, whether there is a cpp base class // declaration or initialization before the opening brace. if (cin_isterminated(l, TRUE, FALSE)) break; else continue; } // What happens next depends on the line being terminated. // If terminated with a ',' only consider it terminating if // there is another unterminated statement behind, eg: // 123, // sizeof // here // Otherwise check whether it is an enumeration or structure // initialisation (not indented) or a variable declaration // (indented). terminated = cin_isterminated(l, FALSE, TRUE); if (js_cur_has_key) { js_cur_has_key = 0; // only check the first line if (curbuf->b_ind_js && terminated == ',') { // For Javascript we might be inside an object: // key: something, <- align with this // key: something // or: // key: something + <- align with this // something, // key: something lookfor = LOOKFOR_JS_KEY; } } if (lookfor == LOOKFOR_JS_KEY && cin_has_js_key(l)) { amount = get_indent(); break; } if (lookfor == LOOKFOR_COMMA) { if (tryposBrace != NULL && tryposBrace->lnum >= curwin->w_cursor.lnum) break; if (terminated == ',') // line below current line is the one that starts a // (possibly broken) line ending in a comma. break; else { amount = get_indent(); if (curwin->w_cursor.lnum - 1 == ourscope) // line above is start of the scope, thus current // line is the one that stars a (possibly broken) // line ending in a comma. break; } } if (terminated == 0 || (lookfor != LOOKFOR_UNTERM && terminated == ',')) { if (lookfor != LOOKFOR_ENUM_OR_INIT && (*skipwhite(l) == '[' || l[STRLEN(l) - 1] == '[')) amount += ind_continuation; // if we're in the middle of a paren thing, // go back to the line that starts it so // we can get the right prevailing indent // if ( foo && // bar ) // Position the cursor over the rightmost paren, so that // matching it will take us back to the start of the line. // Ignore a match before the start of the block. (void)find_last_paren(l, '(', ')'); trypos = find_match_paren(corr_ind_maxparen(&cur_curpos)); if (trypos != NULL && (trypos->lnum < tryposBrace->lnum || (trypos->lnum == tryposBrace->lnum && trypos->col < tryposBrace->col))) trypos = NULL; // If we are looking for ',', we also look for matching // braces. if (trypos == NULL && terminated == ',' && find_last_paren(l, '{', '}')) trypos = find_start_brace(); if (trypos != NULL) { // Check if we are on a case label now. This is // handled above. // case xx: if ( asdf && // asdf) curwin->w_cursor = *trypos; l = ml_get_curline(); if (cin_iscase(l, FALSE) || cin_isscopedecl(l)) { ++curwin->w_cursor.lnum; curwin->w_cursor.col = 0; continue; } } // Skip over continuation lines to find the one to get the // indent from // char *usethis = "bla{backslash} // bla", // here; if (terminated == ',') { while (curwin->w_cursor.lnum > 1) { l = ml_get(curwin->w_cursor.lnum - 1); if (*l == NUL || l[STRLEN(l) - 1] != '\\') break; --curwin->w_cursor.lnum; curwin->w_cursor.col = 0; } } // Get indent and pointer to text for current line, // ignoring any jump label. XXX if (curbuf->b_ind_js) cur_amount = get_indent(); else cur_amount = skip_label(curwin->w_cursor.lnum, &l); // If this is just above the line we are indenting, and it // starts with a '{', line it up with this line. // while (not) // -> { // } if (terminated != ',' && lookfor != LOOKFOR_TERM && theline[0] == '{') { amount = cur_amount; // Only add b_ind_open_extra when the current line // doesn't start with a '{', which must have a match // in the same line (scope is the same). Probably: // { 1, 2 }, // -> { 3, 4 } if (*skipwhite(l) != '{') amount += curbuf->b_ind_open_extra; if (curbuf->b_ind_cpp_baseclass && !curbuf->b_ind_js) { // have to look back, whether it is a cpp base // class declaration or initialization lookfor = LOOKFOR_CPP_BASECLASS; continue; } break; } // Check if we are after an "if", "while", etc. // Also allow " } else". if (cin_is_cinword(l) || cin_iselse(skipwhite(l))) { // Found an unterminated line after an if (), line up // with the last one. // if (cond) // 100 + // -> here; if (lookfor == LOOKFOR_UNTERM || lookfor == LOOKFOR_ENUM_OR_INIT) { if (cont_amount > 0) amount = cont_amount; else amount += ind_continuation; break; } // If this is just above the line we are indenting, we // are finished. // while (not) // -> here; // Otherwise this indent can be used when the line // before this is terminated. // yyy; // if (stat) // while (not) // xxx; // -> here; amount = cur_amount; if (theline[0] == '{') amount += curbuf->b_ind_open_extra; if (lookfor != LOOKFOR_TERM) { amount += curbuf->b_ind_level + curbuf->b_ind_no_brace; break; } // Special trick: when expecting the while () after a // do, line up with the while() // do // x = 1; // -> here l = skipwhite(ml_get_curline()); if (cin_isdo(l)) { if (whilelevel == 0) break; --whilelevel; } // When searching for a terminated line, don't use the // one between the "if" and the matching "else". // Need to use the scope of this "else". XXX // If whilelevel != 0 continue looking for a "do {". if (cin_iselse(l) && whilelevel == 0) { // If we're looking at "} else", let's make sure we // find the opening brace of the enclosing scope, // not the one from "if () {". if (*l == '}') curwin->w_cursor.col = (colnr_T)(l - ml_get_curline()) + 1; if ((trypos = find_start_brace()) == NULL || find_match(LOOKFOR_IF, trypos->lnum) == FAIL) break; } } // If we're below an unterminated line that is not an // "if" or something, we may line up with this line or // add something for a continuation line, depending on // the line before this one. else { // Found two unterminated lines on a row, line up with // the last one. // c = 99 + // 100 + // -> here; if (lookfor == LOOKFOR_UNTERM) { // When line ends in a comma add extra indent if (terminated == ',') amount += ind_continuation; break; } if (lookfor == LOOKFOR_ENUM_OR_INIT) { // Found two lines ending in ',', lineup with the // lowest one, but check for cpp base class // declaration/initialization, if it is an // opening brace or we are looking just for // enumerations/initializations. if (terminated == ',') { if (curbuf->b_ind_cpp_baseclass == 0) break; lookfor = LOOKFOR_CPP_BASECLASS; continue; } // Ignore unterminated lines in between, but // reduce indent. if (amount > cur_amount) amount = cur_amount; } else { // Found first unterminated line on a row, may // line up with this line, remember its indent // 100 + // -> here; l = ml_get_curline(); amount = cur_amount; n = (int)STRLEN(l); if (terminated == ',' && (*skipwhite(l) == ']' || (n >=2 && l[n - 2] == ']'))) break; // If previous line ends in ',', check whether we // are in an initialization or enum // struct xxx = // { // sizeof a, // 124 }; // or a normal possible continuation line. // but only, of no other statement has been found // yet. if (lookfor == LOOKFOR_INITIAL && terminated == ',') { if (curbuf->b_ind_js) { // Search for a line ending in a comma // and line up with the line below it // (could be the current line). // some = [ // 1, <- line up here // 2, // some = [ // 3 + <- line up here // 4 * // 5, // 6, if (cin_iscomment(skipwhite(l))) break; lookfor = LOOKFOR_COMMA; trypos = find_match_char('[', curbuf->b_ind_maxparen); if (trypos != NULL) { if (trypos->lnum == curwin->w_cursor.lnum - 1) { // Current line is first inside // [], line up with it. break; } ourscope = trypos->lnum; } } else { lookfor = LOOKFOR_ENUM_OR_INIT; cont_amount = cin_first_id_amount(); } } else { if (lookfor == LOOKFOR_INITIAL && *l != NUL && l[STRLEN(l) - 1] == '\\') // XXX cont_amount = cin_get_equal_amount( curwin->w_cursor.lnum); if (lookfor != LOOKFOR_TERM && lookfor != LOOKFOR_JS_KEY && lookfor != LOOKFOR_COMMA && raw_string_start != curwin->w_cursor.lnum) lookfor = LOOKFOR_UNTERM; } } } } // Check if we are after a while (cond); // If so: Ignore until the matching "do". else if (cin_iswhileofdo_end(terminated)) // XXX { // Found an unterminated line after a while ();, line up // with the last one. // while (cond); // 100 + <- line up with this one // -> here; if (lookfor == LOOKFOR_UNTERM || lookfor == LOOKFOR_ENUM_OR_INIT) { if (cont_amount > 0) amount = cont_amount; else amount += ind_continuation; break; } if (whilelevel == 0) { lookfor = LOOKFOR_TERM; amount = get_indent(); // XXX if (theline[0] == '{') amount += curbuf->b_ind_open_extra; } ++whilelevel; } // We are after a "normal" statement. // If we had another statement we can stop now and use the // indent of that other statement. // Otherwise the indent of the current statement may be used, // search backwards for the next "normal" statement. else { // Skip single break line, if before a switch label. It // may be lined up with the case label. if (lookfor == LOOKFOR_NOBREAK && cin_isbreak(skipwhite(ml_get_curline()))) { lookfor = LOOKFOR_ANY; continue; } // Handle "do {" line. if (whilelevel > 0) { l = cin_skipcomment(ml_get_curline()); if (cin_isdo(l)) { amount = get_indent(); // XXX --whilelevel; continue; } } // Found a terminated line above an unterminated line. Add // the amount for a continuation line. // x = 1; // y = foo + // -> here; // or // int x = 1; // int foo, // -> here; if (lookfor == LOOKFOR_UNTERM || lookfor == LOOKFOR_ENUM_OR_INIT) { if (cont_amount > 0) amount = cont_amount; else amount += ind_continuation; break; } // Found a terminated line above a terminated line or "if" // etc. line. Use the amount of the line below us. // x = 1; x = 1; // if (asdf) y = 2; // while (asdf) ->here; // here; // ->foo; if (lookfor == LOOKFOR_TERM) { if (!lookfor_break && whilelevel == 0) break; } // First line above the one we're indenting is terminated. // To know what needs to be done look further backward for // a terminated line. else { // position the cursor over the rightmost paren, so // that matching it will take us back to the start of // the line. Helps for: // func(asdr, // asdfasdf); // here; term_again: l = ml_get_curline(); if (find_last_paren(l, '(', ')') && (trypos = find_match_paren( curbuf->b_ind_maxparen)) != NULL) { // Check if we are on a case label now. This is // handled above. // case xx: if ( asdf && // asdf) curwin->w_cursor = *trypos; l = ml_get_curline(); if (cin_iscase(l, FALSE) || cin_isscopedecl(l)) { ++curwin->w_cursor.lnum; curwin->w_cursor.col = 0; continue; } } // When aligning with the case statement, don't align // with a statement after it. // case 1: { <-- don't use this { position // stat; // } // case 2: // stat; // } iscase = (curbuf->b_ind_keep_case_label && cin_iscase(l, FALSE)); // Get indent and pointer to text for current line, // ignoring any jump label. amount = skip_label(curwin->w_cursor.lnum, &l); if (theline[0] == '{') amount += curbuf->b_ind_open_extra; // See remark above: "Only add b_ind_open_extra.." l = skipwhite(l); if (*l == '{') amount -= curbuf->b_ind_open_extra; lookfor = iscase ? LOOKFOR_ANY : LOOKFOR_TERM; // When a terminated line starts with "else" skip to // the matching "if": // else 3; // indent this; // Need to use the scope of this "else". XXX // If whilelevel != 0 continue looking for a "do {". if (lookfor == LOOKFOR_TERM && *l != '}' && cin_iselse(l) && whilelevel == 0) { if ((trypos = find_start_brace()) == NULL || find_match(LOOKFOR_IF, trypos->lnum) == FAIL) break; continue; } // If we're at the end of a block, skip to the start of // that block. l = ml_get_curline(); if (find_last_paren(l, '{', '}') // XXX && (trypos = find_start_brace()) != NULL) { curwin->w_cursor = *trypos; // if not "else {" check for terminated again // but skip block for "} else {" l = cin_skipcomment(ml_get_curline()); if (*l == '}' || !cin_iselse(l)) goto term_again; ++curwin->w_cursor.lnum; curwin->w_cursor.col = 0; } } } } } } // add extra indent for a comment if (cin_iscomment(theline)) amount += curbuf->b_ind_comment; // subtract extra left-shift for jump labels if (curbuf->b_ind_jump_label > 0 && original_line_islabel) amount -= curbuf->b_ind_jump_label; goto theend; } // ok -- we're not inside any sort of structure at all! // // This means we're at the top level, and everything should // basically just match where the previous line is, except // for the lines immediately following a function declaration, // which are K&R-style parameters and need to be indented. // // if our line starts with an open brace, forget about any // prevailing indent and make sure it looks like the start // of a function if (theline[0] == '{') { amount = curbuf->b_ind_first_open; goto theend; } // If the NEXT line is a function declaration, the current // line needs to be indented as a function type spec. // Don't do this if the current line looks like a comment or if the // current line is terminated, ie. ends in ';', or if the current line // contains { or }: "void f() {\n if (1)" if (cur_curpos.lnum < curbuf->b_ml.ml_line_count && !cin_nocode(theline) && vim_strchr(theline, '{') == NULL && vim_strchr(theline, '}') == NULL && !cin_ends_in(theline, (char_u *)":", NULL) && !cin_ends_in(theline, (char_u *)",", NULL) && cin_isfuncdecl(NULL, cur_curpos.lnum + 1, cur_curpos.lnum + 1) && !cin_isterminated(theline, FALSE, TRUE)) { amount = curbuf->b_ind_func_type; goto theend; } // search backwards until we find something we recognize amount = 0; curwin->w_cursor = cur_curpos; while (curwin->w_cursor.lnum > 1) { curwin->w_cursor.lnum--; curwin->w_cursor.col = 0; l = ml_get_curline(); // If we're in a comment or raw string now, skip to the start // of it. XXX if ((trypos = ind_find_start_CORS(NULL)) != NULL) { curwin->w_cursor.lnum = trypos->lnum + 1; curwin->w_cursor.col = 0; continue; } // Are we at the start of a cpp base class declaration or // constructor initialization? XXX n = FALSE; if (curbuf->b_ind_cpp_baseclass != 0 && theline[0] != '{') { n = cin_is_cpp_baseclass(&cache_cpp_baseclass); l = ml_get_curline(); } if (n) { // XXX amount = get_baseclass_amount(cache_cpp_baseclass.lpos.col); break; } // Skip preprocessor directives and blank lines. if (cin_ispreproc_cont(&l, &curwin->w_cursor.lnum, &amount)) continue; if (cin_nocode(l)) continue; // If the previous line ends in ',', use one level of // indentation: // int foo, // bar; // do this before checking for '}' in case of eg. // enum foobar // { // ... // } foo, // bar; n = 0; if (cin_ends_in(l, (char_u *)",", NULL) || (*l != NUL && (n = l[STRLEN(l) - 1]) == '\\')) { // take us back to opening paren if (find_last_paren(l, '(', ')') && (trypos = find_match_paren( curbuf->b_ind_maxparen)) != NULL) curwin->w_cursor = *trypos; // For a line ending in ',' that is a continuation line go // back to the first line with a backslash: // char *foo = "bla{backslash} // bla", // here; while (n == 0 && curwin->w_cursor.lnum > 1) { l = ml_get(curwin->w_cursor.lnum - 1); if (*l == NUL || l[STRLEN(l) - 1] != '\\') break; --curwin->w_cursor.lnum; curwin->w_cursor.col = 0; } amount = get_indent(); // XXX if (amount == 0) amount = cin_first_id_amount(); if (amount == 0) amount = ind_continuation; break; } // If the line looks like a function declaration, and we're // not in a comment, put it the left margin. if (cin_isfuncdecl(NULL, cur_curpos.lnum, 0)) // XXX break; l = ml_get_curline(); // Finding the closing '}' of a previous function. Put // current line at the left margin. For when 'cino' has "fs". if (*skipwhite(l) == '}') break; // (matching {) // If the previous line ends on '};' (maybe followed by // comments) align at column 0. For example: // char *string_array[] = { "foo", // / * x * / "b};ar" }; / * foobar * / if (cin_ends_in(l, (char_u *)"};", NULL)) break; // If the previous line ends on '[' we are probably in an // array constant: // something = [ // 234, <- extra indent if (cin_ends_in(l, (char_u *)"[", NULL)) { amount = get_indent() + ind_continuation; break; } // Find a line only has a semicolon that belongs to a previous // line ending in '}', e.g. before an #endif. Don't increase // indent then. if (*(look = skipwhite(l)) == ';' && cin_nocode(look + 1)) { pos_T curpos_save = curwin->w_cursor; while (curwin->w_cursor.lnum > 1) { look = ml_get(--curwin->w_cursor.lnum); if (!(cin_nocode(look) || cin_ispreproc_cont( &look, &curwin->w_cursor.lnum, &amount))) break; } if (curwin->w_cursor.lnum > 0 && cin_ends_in(look, (char_u *)"}", NULL)) break; curwin->w_cursor = curpos_save; } // If the PREVIOUS line is a function declaration, the current // line (and the ones that follow) needs to be indented as // parameters. if (cin_isfuncdecl(&l, curwin->w_cursor.lnum, 0)) { amount = curbuf->b_ind_param; break; } // If the previous line ends in ';' and the line before the // previous line ends in ',' or '\', ident to column zero: // int foo, // bar; // indent_to_0 here; if (cin_ends_in(l, (char_u *)";", NULL)) { l = ml_get(curwin->w_cursor.lnum - 1); if (cin_ends_in(l, (char_u *)",", NULL) || (*l != NUL && l[STRLEN(l) - 1] == '\\')) break; l = ml_get_curline(); } // Doesn't look like anything interesting -- so just // use the indent of this line. // // Position the cursor over the rightmost paren, so that // matching it will take us back to the start of the line. find_last_paren(l, '(', ')'); if ((trypos = find_match_paren(curbuf->b_ind_maxparen)) != NULL) curwin->w_cursor = *trypos; amount = get_indent(); // XXX break; } // add extra indent for a comment if (cin_iscomment(theline)) amount += curbuf->b_ind_comment; // add extra indent if the previous line ended in a backslash: // "asdfasdf{backslash} // here"; // char *foo = "asdf{backslash} // here"; if (cur_curpos.lnum > 1) { l = ml_get(cur_curpos.lnum - 1); if (*l != NUL && l[STRLEN(l) - 1] == '\\') { cur_amount = cin_get_equal_amount(cur_curpos.lnum - 1); if (cur_amount > 0) amount = cur_amount; else if (cur_amount == 0) amount += ind_continuation; } } theend: if (amount < 0) amount = 0; laterend: // put the cursor back where it belongs curwin->w_cursor = cur_curpos; vim_free(linecopy); return amount; } /* * return TRUE if 'cinkeys' contains the key "keytyped", * when == '*': Only if key is preceded with '*' (indent before insert) * when == '!': Only if key is preceded with '!' (don't insert) * when == ' ': Only if key is not preceded with '*'(indent afterwards) * * "keytyped" can have a few special values: * KEY_OPEN_FORW * KEY_OPEN_BACK * KEY_COMPLETE just finished completion. * * If line_is_empty is TRUE accept keys with '0' before them. */ int in_cinkeys( int keytyped, int when, int line_is_empty) { char_u *look; int try_match; int try_match_word; char_u *p; char_u *line; int icase; int i; if (keytyped == NUL) // Can happen with CTRL-Y and CTRL-E on a short line. return FALSE; #ifdef FEAT_EVAL if (*curbuf->b_p_inde != NUL) look = curbuf->b_p_indk; // 'indentexpr' set: use 'indentkeys' else #endif look = curbuf->b_p_cink; // 'indentexpr' empty: use 'cinkeys' while (*look) { // Find out if we want to try a match with this key, depending on // 'when' and a '*' or '!' before the key. switch (when) { case '*': try_match = (*look == '*'); break; case '!': try_match = (*look == '!'); break; default: try_match = (*look != '*'); break; } if (*look == '*' || *look == '!') ++look; // If there is a '0', only accept a match if the line is empty. // But may still match when typing last char of a word. if (*look == '0') { try_match_word = try_match; if (!line_is_empty) try_match = FALSE; ++look; } else try_match_word = FALSE; // does it look like a control character? if (*look == '^' && look[1] >= '?' && look[1] <= '_') { if (try_match && keytyped == Ctrl_chr(look[1])) return TRUE; look += 2; } // 'o' means "o" command, open forward. // 'O' means "O" command, open backward. else if (*look == 'o') { if (try_match && keytyped == KEY_OPEN_FORW) return TRUE; ++look; } else if (*look == 'O') { if (try_match && keytyped == KEY_OPEN_BACK) return TRUE; ++look; } // 'e' means to check for "else" at start of line and just before the // cursor. else if (*look == 'e') { if (try_match && keytyped == 'e' && curwin->w_cursor.col >= 4) { p = ml_get_curline(); if (skipwhite(p) == p + curwin->w_cursor.col - 4 && STRNCMP(p + curwin->w_cursor.col - 4, "else", 4) == 0) return TRUE; } ++look; } // ':' only causes an indent if it is at the end of a label or case // statement, or when it was before typing the ':' (to fix // class::method for C++). else if (*look == ':') { if (try_match && keytyped == ':') { p = ml_get_curline(); if (cin_iscase(p, FALSE) || cin_isscopedecl(p) || cin_islabel()) return TRUE; // Need to get the line again after cin_islabel(). p = ml_get_curline(); if (curwin->w_cursor.col > 2 && p[curwin->w_cursor.col - 1] == ':' && p[curwin->w_cursor.col - 2] == ':') { p[curwin->w_cursor.col - 1] = ' '; i = (cin_iscase(p, FALSE) || cin_isscopedecl(p) || cin_islabel()); p = ml_get_curline(); p[curwin->w_cursor.col - 1] = ':'; if (i) return TRUE; } } ++look; } // Is it a key in <>, maybe? else if (*look == '<') { if (try_match) { // make up some named keys <o>, <O>, <e>, <0>, <>>, <<>, <*>, // <:> and <!> so that people can re-indent on o, O, e, 0, <, // >, *, : and ! keys if they really really want to. if (vim_strchr((char_u *)"<>!*oOe0:", look[1]) != NULL && keytyped == look[1]) return TRUE; if (keytyped == get_special_key_code(look + 1)) return TRUE; } while (*look && *look != '>') look++; while (*look == '>') look++; } // Is it a word: "=word"? else if (*look == '=' && look[1] != ',' && look[1] != NUL) { ++look; if (*look == '~') { icase = TRUE; ++look; } else icase = FALSE; p = vim_strchr(look, ','); if (p == NULL) p = look + STRLEN(look); if ((try_match || try_match_word) && curwin->w_cursor.col >= (colnr_T)(p - look)) { int match = FALSE; if (keytyped == KEY_COMPLETE) { char_u *s; // Just completed a word, check if it starts with "look". // search back for the start of a word. line = ml_get_curline(); if (has_mbyte) { char_u *n; for (s = line + curwin->w_cursor.col; s > line; s = n) { n = mb_prevptr(line, s); if (!vim_iswordp(n)) break; } } else for (s = line + curwin->w_cursor.col; s > line; --s) if (!vim_iswordc(s[-1])) break; if (s + (p - look) <= line + curwin->w_cursor.col && (icase ? MB_STRNICMP(s, look, p - look) : STRNCMP(s, look, p - look)) == 0) match = TRUE; } else // TODO: multi-byte if (keytyped == (int)p[-1] || (icase && keytyped < 256 && TOLOWER_LOC(keytyped) == TOLOWER_LOC((int)p[-1]))) { line = ml_get_cursor(); if ((curwin->w_cursor.col == (colnr_T)(p - look) || !vim_iswordc(line[-(p - look) - 1])) && (icase ? MB_STRNICMP(line - (p - look), look, p - look) : STRNCMP(line - (p - look), look, p - look)) == 0) match = TRUE; } if (match && try_match_word && !try_match) { // "0=word": Check if there are only blanks before the // word. if (getwhitecols_curline() != (int)(curwin->w_cursor.col - (p - look))) match = FALSE; } if (match) return TRUE; } look = p; } // ok, it's a boring generic character. else { if (try_match && *look == keytyped) return TRUE; if (*look != NUL) ++look; } // Skip over ", ". look = skip_to_option_part(look); } return FALSE; } /* * Do C or expression indenting on the current line. */ void do_c_expr_indent(void) { # ifdef FEAT_EVAL if (*curbuf->b_p_inde != NUL) fixthisline(get_expr_indent); else # endif fixthisline(get_c_indent); } #endif #if defined(FEAT_EVAL) || defined(PROTO) /* * "cindent(lnum)" function */ void f_cindent(typval_T *argvars UNUSED, typval_T *rettv) { # ifdef FEAT_CINDENT pos_T pos; linenr_T lnum; if (in_vim9script() && check_for_lnum_arg(argvars, 0) == FAIL) return; pos = curwin->w_cursor; lnum = tv_get_lnum(argvars); if (lnum >= 1 && lnum <= curbuf->b_ml.ml_line_count) { curwin->w_cursor.lnum = lnum; rettv->vval.v_number = get_c_indent(); curwin->w_cursor = pos; } else # endif rettv->vval.v_number = -1; } #endif
skip_string(char_u *p) { int i; // We loop, because strings may be concatenated: "date""time". for ( ; ; ++p) { if (p[0] == '\'') // 'c' or '\n' or '\000' { if (p[1] == NUL) // ' at end of line break; i = 2; if (p[1] == '\\' && p[2] != NUL) // '\n' or '\000' { ++i; while (vim_isdigit(p[i - 1])) // '\000' ++i; } if (p[i] == '\'') // check for trailing ' { p += i; continue; } } else if (p[0] == '"') // start of string { for (++p; p[0]; ++p) { if (p[0] == '\\' && p[1] != NUL) ++p; else if (p[0] == '"') // end of string break; } if (p[0] == '"') continue; // continue for another string } else if (p[0] == 'R' && p[1] == '"') { // Raw string: R"[delim](...)[delim]" char_u *delim = p + 2; char_u *paren = vim_strchr(delim, '('); if (paren != NULL) { size_t delim_len = paren - delim; for (p += 3; *p; ++p) if (p[0] == ')' && STRNCMP(p + 1, delim, delim_len) == 0 && p[delim_len + 1] == '"') { p += delim_len + 1; break; } if (p[0] == '"') continue; // continue for another string } } break; // no string found } if (!*p) --p; // backup from NUL return p; }
skip_string(char_u *p) { int i; // We loop, because strings may be concatenated: "date""time". for ( ; ; ++p) { if (p[0] == '\'') // 'c' or '\n' or '\000' { if (p[1] == NUL) // ' at end of line break; i = 2; if (p[1] == '\\' && p[2] != NUL) // '\n' or '\000' { ++i; while (vim_isdigit(p[i - 1])) // '\000' ++i; } if (p[i - 1] != NUL && p[i] == '\'') // check for trailing ' { p += i; continue; } } else if (p[0] == '"') // start of string { for (++p; p[0]; ++p) { if (p[0] == '\\' && p[1] != NUL) ++p; else if (p[0] == '"') // end of string break; } if (p[0] == '"') continue; // continue for another string } else if (p[0] == 'R' && p[1] == '"') { // Raw string: R"[delim](...)[delim]" char_u *delim = p + 2; char_u *paren = vim_strchr(delim, '('); if (paren != NULL) { size_t delim_len = paren - delim; for (p += 3; *p; ++p) if (p[0] == ')' && STRNCMP(p + 1, delim, delim_len) == 0 && p[delim_len + 1] == '"') { p += delim_len + 1; break; } if (p[0] == '"') continue; // continue for another string } } break; // no string found } if (!*p) --p; // backup from NUL return p; }
{'added': [(92, "\t if (p[i - 1] != NUL && p[i] == '\\'') // check for trailing '")], 'deleted': [(92, "\t if (p[i] == '\\'')\t\t // check for trailing '")]}
1
1
2,833
15,176
https://github.com/vim/vim
CVE-2022-1733
['CWE-787']
mmap.c
expand_downwards
/* * mm/mmap.c * * Written by obz. * * Address space accounting code <alan@lxorguk.ukuu.org.uk> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/slab.h> #include <linux/backing-dev.h> #include <linux/mm.h> #include <linux/vmacache.h> #include <linux/shm.h> #include <linux/mman.h> #include <linux/pagemap.h> #include <linux/swap.h> #include <linux/syscalls.h> #include <linux/capability.h> #include <linux/init.h> #include <linux/file.h> #include <linux/fs.h> #include <linux/personality.h> #include <linux/security.h> #include <linux/hugetlb.h> #include <linux/shmem_fs.h> #include <linux/profile.h> #include <linux/export.h> #include <linux/mount.h> #include <linux/mempolicy.h> #include <linux/rmap.h> #include <linux/mmu_notifier.h> #include <linux/mmdebug.h> #include <linux/perf_event.h> #include <linux/audit.h> #include <linux/khugepaged.h> #include <linux/uprobes.h> #include <linux/rbtree_augmented.h> #include <linux/notifier.h> #include <linux/memory.h> #include <linux/printk.h> #include <linux/userfaultfd_k.h> #include <linux/moduleparam.h> #include <linux/pkeys.h> #include <linux/oom.h> #include <linux/uaccess.h> #include <asm/cacheflush.h> #include <asm/tlb.h> #include <asm/mmu_context.h> #include "internal.h" #ifndef arch_mmap_check #define arch_mmap_check(addr, len, flags) (0) #endif #ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS const int mmap_rnd_bits_min = CONFIG_ARCH_MMAP_RND_BITS_MIN; const int mmap_rnd_bits_max = CONFIG_ARCH_MMAP_RND_BITS_MAX; int mmap_rnd_bits __read_mostly = CONFIG_ARCH_MMAP_RND_BITS; #endif #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS const int mmap_rnd_compat_bits_min = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN; const int mmap_rnd_compat_bits_max = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX; int mmap_rnd_compat_bits __read_mostly = CONFIG_ARCH_MMAP_RND_COMPAT_BITS; #endif static bool ignore_rlimit_data; core_param(ignore_rlimit_data, ignore_rlimit_data, bool, 0644); static void unmap_region(struct mm_struct *mm, struct vm_area_struct *vma, struct vm_area_struct *prev, unsigned long start, unsigned long end); /* description of effects of mapping type and prot in current implementation. * this is due to the limited x86 page protection hardware. The expected * behavior is in parens: * * map_type prot * PROT_NONE PROT_READ PROT_WRITE PROT_EXEC * MAP_SHARED r: (no) no r: (yes) yes r: (no) yes r: (no) yes * w: (no) no w: (no) no w: (yes) yes w: (no) no * x: (no) no x: (no) yes x: (no) yes x: (yes) yes * * MAP_PRIVATE r: (no) no r: (yes) yes r: (no) yes r: (no) yes * w: (no) no w: (no) no w: (copy) copy w: (no) no * x: (no) no x: (no) yes x: (no) yes x: (yes) yes * * On arm64, PROT_EXEC has the following behaviour for both MAP_SHARED and * MAP_PRIVATE: * r: (no) no * w: (no) no * x: (yes) yes */ pgprot_t protection_map[16] __ro_after_init = { __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111, __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111 }; #ifndef CONFIG_ARCH_HAS_FILTER_PGPROT static inline pgprot_t arch_filter_pgprot(pgprot_t prot) { return prot; } #endif pgprot_t vm_get_page_prot(unsigned long vm_flags) { pgprot_t ret = __pgprot(pgprot_val(protection_map[vm_flags & (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) | pgprot_val(arch_vm_get_page_prot(vm_flags))); return arch_filter_pgprot(ret); } EXPORT_SYMBOL(vm_get_page_prot); static pgprot_t vm_pgprot_modify(pgprot_t oldprot, unsigned long vm_flags) { return pgprot_modify(oldprot, vm_get_page_prot(vm_flags)); } /* Update vma->vm_page_prot to reflect vma->vm_flags. */ void vma_set_page_prot(struct vm_area_struct *vma) { unsigned long vm_flags = vma->vm_flags; pgprot_t vm_page_prot; vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags); if (vma_wants_writenotify(vma, vm_page_prot)) { vm_flags &= ~VM_SHARED; vm_page_prot = vm_pgprot_modify(vm_page_prot, vm_flags); } /* remove_protection_ptes reads vma->vm_page_prot without mmap_sem */ WRITE_ONCE(vma->vm_page_prot, vm_page_prot); } /* * Requires inode->i_mapping->i_mmap_rwsem */ static void __remove_shared_vm_struct(struct vm_area_struct *vma, struct file *file, struct address_space *mapping) { if (vma->vm_flags & VM_DENYWRITE) atomic_inc(&file_inode(file)->i_writecount); if (vma->vm_flags & VM_SHARED) mapping_unmap_writable(mapping); flush_dcache_mmap_lock(mapping); vma_interval_tree_remove(vma, &mapping->i_mmap); flush_dcache_mmap_unlock(mapping); } /* * Unlink a file-based vm structure from its interval tree, to hide * vma from rmap and vmtruncate before freeing its page tables. */ void unlink_file_vma(struct vm_area_struct *vma) { struct file *file = vma->vm_file; if (file) { struct address_space *mapping = file->f_mapping; i_mmap_lock_write(mapping); __remove_shared_vm_struct(vma, file, mapping); i_mmap_unlock_write(mapping); } } /* * Close a vm structure and free it, returning the next. */ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma) { struct vm_area_struct *next = vma->vm_next; might_sleep(); if (vma->vm_ops && vma->vm_ops->close) vma->vm_ops->close(vma); if (vma->vm_file) fput(vma->vm_file); mpol_put(vma_policy(vma)); vm_area_free(vma); return next; } static int do_brk_flags(unsigned long addr, unsigned long request, unsigned long flags, struct list_head *uf); SYSCALL_DEFINE1(brk, unsigned long, brk) { unsigned long retval; unsigned long newbrk, oldbrk, origbrk; struct mm_struct *mm = current->mm; struct vm_area_struct *next; unsigned long min_brk; bool populate; bool downgraded = false; LIST_HEAD(uf); if (down_write_killable(&mm->mmap_sem)) return -EINTR; origbrk = mm->brk; #ifdef CONFIG_COMPAT_BRK /* * CONFIG_COMPAT_BRK can still be overridden by setting * randomize_va_space to 2, which will still cause mm->start_brk * to be arbitrarily shifted */ if (current->brk_randomized) min_brk = mm->start_brk; else min_brk = mm->end_data; #else min_brk = mm->start_brk; #endif if (brk < min_brk) goto out; /* * Check against rlimit here. If this check is done later after the test * of oldbrk with newbrk then it can escape the test and let the data * segment grow beyond its set limit the in case where the limit is * not page aligned -Ram Gupta */ if (check_data_rlimit(rlimit(RLIMIT_DATA), brk, mm->start_brk, mm->end_data, mm->start_data)) goto out; newbrk = PAGE_ALIGN(brk); oldbrk = PAGE_ALIGN(mm->brk); if (oldbrk == newbrk) { mm->brk = brk; goto success; } /* * Always allow shrinking brk. * __do_munmap() may downgrade mmap_sem to read. */ if (brk <= mm->brk) { int ret; /* * mm->brk must to be protected by write mmap_sem so update it * before downgrading mmap_sem. When __do_munmap() fails, * mm->brk will be restored from origbrk. */ mm->brk = brk; ret = __do_munmap(mm, newbrk, oldbrk-newbrk, &uf, true); if (ret < 0) { mm->brk = origbrk; goto out; } else if (ret == 1) { downgraded = true; } goto success; } /* Check against existing mmap mappings. */ next = find_vma(mm, oldbrk); if (next && newbrk + PAGE_SIZE > vm_start_gap(next)) goto out; /* Ok, looks good - let it rip. */ if (do_brk_flags(oldbrk, newbrk-oldbrk, 0, &uf) < 0) goto out; mm->brk = brk; success: populate = newbrk > oldbrk && (mm->def_flags & VM_LOCKED) != 0; if (downgraded) up_read(&mm->mmap_sem); else up_write(&mm->mmap_sem); userfaultfd_unmap_complete(mm, &uf); if (populate) mm_populate(oldbrk, newbrk - oldbrk); return brk; out: retval = origbrk; up_write(&mm->mmap_sem); return retval; } static long vma_compute_subtree_gap(struct vm_area_struct *vma) { unsigned long max, prev_end, subtree_gap; /* * Note: in the rare case of a VM_GROWSDOWN above a VM_GROWSUP, we * allow two stack_guard_gaps between them here, and when choosing * an unmapped area; whereas when expanding we only require one. * That's a little inconsistent, but keeps the code here simpler. */ max = vm_start_gap(vma); if (vma->vm_prev) { prev_end = vm_end_gap(vma->vm_prev); if (max > prev_end) max -= prev_end; else max = 0; } if (vma->vm_rb.rb_left) { subtree_gap = rb_entry(vma->vm_rb.rb_left, struct vm_area_struct, vm_rb)->rb_subtree_gap; if (subtree_gap > max) max = subtree_gap; } if (vma->vm_rb.rb_right) { subtree_gap = rb_entry(vma->vm_rb.rb_right, struct vm_area_struct, vm_rb)->rb_subtree_gap; if (subtree_gap > max) max = subtree_gap; } return max; } #ifdef CONFIG_DEBUG_VM_RB static int browse_rb(struct mm_struct *mm) { struct rb_root *root = &mm->mm_rb; int i = 0, j, bug = 0; struct rb_node *nd, *pn = NULL; unsigned long prev = 0, pend = 0; for (nd = rb_first(root); nd; nd = rb_next(nd)) { struct vm_area_struct *vma; vma = rb_entry(nd, struct vm_area_struct, vm_rb); if (vma->vm_start < prev) { pr_emerg("vm_start %lx < prev %lx\n", vma->vm_start, prev); bug = 1; } if (vma->vm_start < pend) { pr_emerg("vm_start %lx < pend %lx\n", vma->vm_start, pend); bug = 1; } if (vma->vm_start > vma->vm_end) { pr_emerg("vm_start %lx > vm_end %lx\n", vma->vm_start, vma->vm_end); bug = 1; } spin_lock(&mm->page_table_lock); if (vma->rb_subtree_gap != vma_compute_subtree_gap(vma)) { pr_emerg("free gap %lx, correct %lx\n", vma->rb_subtree_gap, vma_compute_subtree_gap(vma)); bug = 1; } spin_unlock(&mm->page_table_lock); i++; pn = nd; prev = vma->vm_start; pend = vma->vm_end; } j = 0; for (nd = pn; nd; nd = rb_prev(nd)) j++; if (i != j) { pr_emerg("backwards %d, forwards %d\n", j, i); bug = 1; } return bug ? -1 : i; } static void validate_mm_rb(struct rb_root *root, struct vm_area_struct *ignore) { struct rb_node *nd; for (nd = rb_first(root); nd; nd = rb_next(nd)) { struct vm_area_struct *vma; vma = rb_entry(nd, struct vm_area_struct, vm_rb); VM_BUG_ON_VMA(vma != ignore && vma->rb_subtree_gap != vma_compute_subtree_gap(vma), vma); } } static void validate_mm(struct mm_struct *mm) { int bug = 0; int i = 0; unsigned long highest_address = 0; struct vm_area_struct *vma = mm->mmap; while (vma) { struct anon_vma *anon_vma = vma->anon_vma; struct anon_vma_chain *avc; if (anon_vma) { anon_vma_lock_read(anon_vma); list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) anon_vma_interval_tree_verify(avc); anon_vma_unlock_read(anon_vma); } highest_address = vm_end_gap(vma); vma = vma->vm_next; i++; } if (i != mm->map_count) { pr_emerg("map_count %d vm_next %d\n", mm->map_count, i); bug = 1; } if (highest_address != mm->highest_vm_end) { pr_emerg("mm->highest_vm_end %lx, found %lx\n", mm->highest_vm_end, highest_address); bug = 1; } i = browse_rb(mm); if (i != mm->map_count) { if (i != -1) pr_emerg("map_count %d rb %d\n", mm->map_count, i); bug = 1; } VM_BUG_ON_MM(bug, mm); } #else #define validate_mm_rb(root, ignore) do { } while (0) #define validate_mm(mm) do { } while (0) #endif RB_DECLARE_CALLBACKS(static, vma_gap_callbacks, struct vm_area_struct, vm_rb, unsigned long, rb_subtree_gap, vma_compute_subtree_gap) /* * Update augmented rbtree rb_subtree_gap values after vma->vm_start or * vma->vm_prev->vm_end values changed, without modifying the vma's position * in the rbtree. */ static void vma_gap_update(struct vm_area_struct *vma) { /* * As it turns out, RB_DECLARE_CALLBACKS() already created a callback * function that does exacltly what we want. */ vma_gap_callbacks_propagate(&vma->vm_rb, NULL); } static inline void vma_rb_insert(struct vm_area_struct *vma, struct rb_root *root) { /* All rb_subtree_gap values must be consistent prior to insertion */ validate_mm_rb(root, NULL); rb_insert_augmented(&vma->vm_rb, root, &vma_gap_callbacks); } static void __vma_rb_erase(struct vm_area_struct *vma, struct rb_root *root) { /* * Note rb_erase_augmented is a fairly large inline function, * so make sure we instantiate it only once with our desired * augmented rbtree callbacks. */ rb_erase_augmented(&vma->vm_rb, root, &vma_gap_callbacks); } static __always_inline void vma_rb_erase_ignore(struct vm_area_struct *vma, struct rb_root *root, struct vm_area_struct *ignore) { /* * All rb_subtree_gap values must be consistent prior to erase, * with the possible exception of the "next" vma being erased if * next->vm_start was reduced. */ validate_mm_rb(root, ignore); __vma_rb_erase(vma, root); } static __always_inline void vma_rb_erase(struct vm_area_struct *vma, struct rb_root *root) { /* * All rb_subtree_gap values must be consistent prior to erase, * with the possible exception of the vma being erased. */ validate_mm_rb(root, vma); __vma_rb_erase(vma, root); } /* * vma has some anon_vma assigned, and is already inserted on that * anon_vma's interval trees. * * Before updating the vma's vm_start / vm_end / vm_pgoff fields, the * vma must be removed from the anon_vma's interval trees using * anon_vma_interval_tree_pre_update_vma(). * * After the update, the vma will be reinserted using * anon_vma_interval_tree_post_update_vma(). * * The entire update must be protected by exclusive mmap_sem and by * the root anon_vma's mutex. */ static inline void anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma) { struct anon_vma_chain *avc; list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) anon_vma_interval_tree_remove(avc, &avc->anon_vma->rb_root); } static inline void anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma) { struct anon_vma_chain *avc; list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) anon_vma_interval_tree_insert(avc, &avc->anon_vma->rb_root); } static int find_vma_links(struct mm_struct *mm, unsigned long addr, unsigned long end, struct vm_area_struct **pprev, struct rb_node ***rb_link, struct rb_node **rb_parent) { struct rb_node **__rb_link, *__rb_parent, *rb_prev; __rb_link = &mm->mm_rb.rb_node; rb_prev = __rb_parent = NULL; while (*__rb_link) { struct vm_area_struct *vma_tmp; __rb_parent = *__rb_link; vma_tmp = rb_entry(__rb_parent, struct vm_area_struct, vm_rb); if (vma_tmp->vm_end > addr) { /* Fail if an existing vma overlaps the area */ if (vma_tmp->vm_start < end) return -ENOMEM; __rb_link = &__rb_parent->rb_left; } else { rb_prev = __rb_parent; __rb_link = &__rb_parent->rb_right; } } *pprev = NULL; if (rb_prev) *pprev = rb_entry(rb_prev, struct vm_area_struct, vm_rb); *rb_link = __rb_link; *rb_parent = __rb_parent; return 0; } static unsigned long count_vma_pages_range(struct mm_struct *mm, unsigned long addr, unsigned long end) { unsigned long nr_pages = 0; struct vm_area_struct *vma; /* Find first overlaping mapping */ vma = find_vma_intersection(mm, addr, end); if (!vma) return 0; nr_pages = (min(end, vma->vm_end) - max(addr, vma->vm_start)) >> PAGE_SHIFT; /* Iterate over the rest of the overlaps */ for (vma = vma->vm_next; vma; vma = vma->vm_next) { unsigned long overlap_len; if (vma->vm_start > end) break; overlap_len = min(end, vma->vm_end) - vma->vm_start; nr_pages += overlap_len >> PAGE_SHIFT; } return nr_pages; } void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma, struct rb_node **rb_link, struct rb_node *rb_parent) { /* Update tracking information for the gap following the new vma. */ if (vma->vm_next) vma_gap_update(vma->vm_next); else mm->highest_vm_end = vm_end_gap(vma); /* * vma->vm_prev wasn't known when we followed the rbtree to find the * correct insertion point for that vma. As a result, we could not * update the vma vm_rb parents rb_subtree_gap values on the way down. * So, we first insert the vma with a zero rb_subtree_gap value * (to be consistent with what we did on the way down), and then * immediately update the gap to the correct value. Finally we * rebalance the rbtree after all augmented values have been set. */ rb_link_node(&vma->vm_rb, rb_parent, rb_link); vma->rb_subtree_gap = 0; vma_gap_update(vma); vma_rb_insert(vma, &mm->mm_rb); } static void __vma_link_file(struct vm_area_struct *vma) { struct file *file; file = vma->vm_file; if (file) { struct address_space *mapping = file->f_mapping; if (vma->vm_flags & VM_DENYWRITE) atomic_dec(&file_inode(file)->i_writecount); if (vma->vm_flags & VM_SHARED) atomic_inc(&mapping->i_mmap_writable); flush_dcache_mmap_lock(mapping); vma_interval_tree_insert(vma, &mapping->i_mmap); flush_dcache_mmap_unlock(mapping); } } static void __vma_link(struct mm_struct *mm, struct vm_area_struct *vma, struct vm_area_struct *prev, struct rb_node **rb_link, struct rb_node *rb_parent) { __vma_link_list(mm, vma, prev, rb_parent); __vma_link_rb(mm, vma, rb_link, rb_parent); } static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma, struct vm_area_struct *prev, struct rb_node **rb_link, struct rb_node *rb_parent) { struct address_space *mapping = NULL; if (vma->vm_file) { mapping = vma->vm_file->f_mapping; i_mmap_lock_write(mapping); } __vma_link(mm, vma, prev, rb_link, rb_parent); __vma_link_file(vma); if (mapping) i_mmap_unlock_write(mapping); mm->map_count++; validate_mm(mm); } /* * Helper for vma_adjust() in the split_vma insert case: insert a vma into the * mm's list and rbtree. It has already been inserted into the interval tree. */ static void __insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) { struct vm_area_struct *prev; struct rb_node **rb_link, *rb_parent; if (find_vma_links(mm, vma->vm_start, vma->vm_end, &prev, &rb_link, &rb_parent)) BUG(); __vma_link(mm, vma, prev, rb_link, rb_parent); mm->map_count++; } static __always_inline void __vma_unlink_common(struct mm_struct *mm, struct vm_area_struct *vma, struct vm_area_struct *prev, bool has_prev, struct vm_area_struct *ignore) { struct vm_area_struct *next; vma_rb_erase_ignore(vma, &mm->mm_rb, ignore); next = vma->vm_next; if (has_prev) prev->vm_next = next; else { prev = vma->vm_prev; if (prev) prev->vm_next = next; else mm->mmap = next; } if (next) next->vm_prev = prev; /* Kill the cache */ vmacache_invalidate(mm); } static inline void __vma_unlink_prev(struct mm_struct *mm, struct vm_area_struct *vma, struct vm_area_struct *prev) { __vma_unlink_common(mm, vma, prev, true, vma); } /* * We cannot adjust vm_start, vm_end, vm_pgoff fields of a vma that * is already present in an i_mmap tree without adjusting the tree. * The following helper function should be used when such adjustments * are necessary. The "insert" vma (if any) is to be inserted * before we drop the necessary locks. */ int __vma_adjust(struct vm_area_struct *vma, unsigned long start, unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert, struct vm_area_struct *expand) { struct mm_struct *mm = vma->vm_mm; struct vm_area_struct *next = vma->vm_next, *orig_vma = vma; struct address_space *mapping = NULL; struct rb_root_cached *root = NULL; struct anon_vma *anon_vma = NULL; struct file *file = vma->vm_file; bool start_changed = false, end_changed = false; long adjust_next = 0; int remove_next = 0; if (next && !insert) { struct vm_area_struct *exporter = NULL, *importer = NULL; if (end >= next->vm_end) { /* * vma expands, overlapping all the next, and * perhaps the one after too (mprotect case 6). * The only other cases that gets here are * case 1, case 7 and case 8. */ if (next == expand) { /* * The only case where we don't expand "vma" * and we expand "next" instead is case 8. */ VM_WARN_ON(end != next->vm_end); /* * remove_next == 3 means we're * removing "vma" and that to do so we * swapped "vma" and "next". */ remove_next = 3; VM_WARN_ON(file != next->vm_file); swap(vma, next); } else { VM_WARN_ON(expand != vma); /* * case 1, 6, 7, remove_next == 2 is case 6, * remove_next == 1 is case 1 or 7. */ remove_next = 1 + (end > next->vm_end); VM_WARN_ON(remove_next == 2 && end != next->vm_next->vm_end); VM_WARN_ON(remove_next == 1 && end != next->vm_end); /* trim end to next, for case 6 first pass */ end = next->vm_end; } exporter = next; importer = vma; /* * If next doesn't have anon_vma, import from vma after * next, if the vma overlaps with it. */ if (remove_next == 2 && !next->anon_vma) exporter = next->vm_next; } else if (end > next->vm_start) { /* * vma expands, overlapping part of the next: * mprotect case 5 shifting the boundary up. */ adjust_next = (end - next->vm_start) >> PAGE_SHIFT; exporter = next; importer = vma; VM_WARN_ON(expand != importer); } else if (end < vma->vm_end) { /* * vma shrinks, and !insert tells it's not * split_vma inserting another: so it must be * mprotect case 4 shifting the boundary down. */ adjust_next = -((vma->vm_end - end) >> PAGE_SHIFT); exporter = vma; importer = next; VM_WARN_ON(expand != importer); } /* * Easily overlooked: when mprotect shifts the boundary, * make sure the expanding vma has anon_vma set if the * shrinking vma had, to cover any anon pages imported. */ if (exporter && exporter->anon_vma && !importer->anon_vma) { int error; importer->anon_vma = exporter->anon_vma; error = anon_vma_clone(importer, exporter); if (error) return error; } } again: vma_adjust_trans_huge(orig_vma, start, end, adjust_next); if (file) { mapping = file->f_mapping; root = &mapping->i_mmap; uprobe_munmap(vma, vma->vm_start, vma->vm_end); if (adjust_next) uprobe_munmap(next, next->vm_start, next->vm_end); i_mmap_lock_write(mapping); if (insert) { /* * Put into interval tree now, so instantiated pages * are visible to arm/parisc __flush_dcache_page * throughout; but we cannot insert into address * space until vma start or end is updated. */ __vma_link_file(insert); } } anon_vma = vma->anon_vma; if (!anon_vma && adjust_next) anon_vma = next->anon_vma; if (anon_vma) { VM_WARN_ON(adjust_next && next->anon_vma && anon_vma != next->anon_vma); anon_vma_lock_write(anon_vma); anon_vma_interval_tree_pre_update_vma(vma); if (adjust_next) anon_vma_interval_tree_pre_update_vma(next); } if (root) { flush_dcache_mmap_lock(mapping); vma_interval_tree_remove(vma, root); if (adjust_next) vma_interval_tree_remove(next, root); } if (start != vma->vm_start) { vma->vm_start = start; start_changed = true; } if (end != vma->vm_end) { vma->vm_end = end; end_changed = true; } vma->vm_pgoff = pgoff; if (adjust_next) { next->vm_start += adjust_next << PAGE_SHIFT; next->vm_pgoff += adjust_next; } if (root) { if (adjust_next) vma_interval_tree_insert(next, root); vma_interval_tree_insert(vma, root); flush_dcache_mmap_unlock(mapping); } if (remove_next) { /* * vma_merge has merged next into vma, and needs * us to remove next before dropping the locks. */ if (remove_next != 3) __vma_unlink_prev(mm, next, vma); else /* * vma is not before next if they've been * swapped. * * pre-swap() next->vm_start was reduced so * tell validate_mm_rb to ignore pre-swap() * "next" (which is stored in post-swap() * "vma"). */ __vma_unlink_common(mm, next, NULL, false, vma); if (file) __remove_shared_vm_struct(next, file, mapping); } else if (insert) { /* * split_vma has split insert from vma, and needs * us to insert it before dropping the locks * (it may either follow vma or precede it). */ __insert_vm_struct(mm, insert); } else { if (start_changed) vma_gap_update(vma); if (end_changed) { if (!next) mm->highest_vm_end = vm_end_gap(vma); else if (!adjust_next) vma_gap_update(next); } } if (anon_vma) { anon_vma_interval_tree_post_update_vma(vma); if (adjust_next) anon_vma_interval_tree_post_update_vma(next); anon_vma_unlock_write(anon_vma); } if (mapping) i_mmap_unlock_write(mapping); if (root) { uprobe_mmap(vma); if (adjust_next) uprobe_mmap(next); } if (remove_next) { if (file) { uprobe_munmap(next, next->vm_start, next->vm_end); fput(file); } if (next->anon_vma) anon_vma_merge(vma, next); mm->map_count--; mpol_put(vma_policy(next)); vm_area_free(next); /* * In mprotect's case 6 (see comments on vma_merge), * we must remove another next too. It would clutter * up the code too much to do both in one go. */ if (remove_next != 3) { /* * If "next" was removed and vma->vm_end was * expanded (up) over it, in turn * "next->vm_prev->vm_end" changed and the * "vma->vm_next" gap must be updated. */ next = vma->vm_next; } else { /* * For the scope of the comment "next" and * "vma" considered pre-swap(): if "vma" was * removed, next->vm_start was expanded (down) * over it and the "next" gap must be updated. * Because of the swap() the post-swap() "vma" * actually points to pre-swap() "next" * (post-swap() "next" as opposed is now a * dangling pointer). */ next = vma; } if (remove_next == 2) { remove_next = 1; end = next->vm_end; goto again; } else if (next) vma_gap_update(next); else { /* * If remove_next == 2 we obviously can't * reach this path. * * If remove_next == 3 we can't reach this * path because pre-swap() next is always not * NULL. pre-swap() "next" is not being * removed and its next->vm_end is not altered * (and furthermore "end" already matches * next->vm_end in remove_next == 3). * * We reach this only in the remove_next == 1 * case if the "next" vma that was removed was * the highest vma of the mm. However in such * case next->vm_end == "end" and the extended * "vma" has vma->vm_end == next->vm_end so * mm->highest_vm_end doesn't need any update * in remove_next == 1 case. */ VM_WARN_ON(mm->highest_vm_end != vm_end_gap(vma)); } } if (insert && file) uprobe_mmap(insert); validate_mm(mm); return 0; } /* * If the vma has a ->close operation then the driver probably needs to release * per-vma resources, so we don't attempt to merge those. */ static inline int is_mergeable_vma(struct vm_area_struct *vma, struct file *file, unsigned long vm_flags, struct vm_userfaultfd_ctx vm_userfaultfd_ctx) { /* * VM_SOFTDIRTY should not prevent from VMA merging, if we * match the flags but dirty bit -- the caller should mark * merged VMA as dirty. If dirty bit won't be excluded from * comparison, we increase pressue on the memory system forcing * the kernel to generate new VMAs when old one could be * extended instead. */ if ((vma->vm_flags ^ vm_flags) & ~VM_SOFTDIRTY) return 0; if (vma->vm_file != file) return 0; if (vma->vm_ops && vma->vm_ops->close) return 0; if (!is_mergeable_vm_userfaultfd_ctx(vma, vm_userfaultfd_ctx)) return 0; return 1; } static inline int is_mergeable_anon_vma(struct anon_vma *anon_vma1, struct anon_vma *anon_vma2, struct vm_area_struct *vma) { /* * The list_is_singular() test is to avoid merging VMA cloned from * parents. This can improve scalability caused by anon_vma lock. */ if ((!anon_vma1 || !anon_vma2) && (!vma || list_is_singular(&vma->anon_vma_chain))) return 1; return anon_vma1 == anon_vma2; } /* * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff) * in front of (at a lower virtual address and file offset than) the vma. * * We cannot merge two vmas if they have differently assigned (non-NULL) * anon_vmas, nor if same anon_vma is assigned but offsets incompatible. * * We don't check here for the merged mmap wrapping around the end of pagecache * indices (16TB on ia32) because do_mmap_pgoff() does not permit mmap's which * wrap, nor mmaps which cover the final page at index -1UL. */ static int can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags, struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff, struct vm_userfaultfd_ctx vm_userfaultfd_ctx) { if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx) && is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) { if (vma->vm_pgoff == vm_pgoff) return 1; } return 0; } /* * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff) * beyond (at a higher virtual address and file offset than) the vma. * * We cannot merge two vmas if they have differently assigned (non-NULL) * anon_vmas, nor if same anon_vma is assigned but offsets incompatible. */ static int can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags, struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff, struct vm_userfaultfd_ctx vm_userfaultfd_ctx) { if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx) && is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) { pgoff_t vm_pglen; vm_pglen = vma_pages(vma); if (vma->vm_pgoff + vm_pglen == vm_pgoff) return 1; } return 0; } /* * Given a mapping request (addr,end,vm_flags,file,pgoff), figure out * whether that can be merged with its predecessor or its successor. * Or both (it neatly fills a hole). * * In most cases - when called for mmap, brk or mremap - [addr,end) is * certain not to be mapped by the time vma_merge is called; but when * called for mprotect, it is certain to be already mapped (either at * an offset within prev, or at the start of next), and the flags of * this area are about to be changed to vm_flags - and the no-change * case has already been eliminated. * * The following mprotect cases have to be considered, where AAAA is * the area passed down from mprotect_fixup, never extending beyond one * vma, PPPPPP is the prev vma specified, and NNNNNN the next vma after: * * AAAA AAAA AAAA AAAA * PPPPPPNNNNNN PPPPPPNNNNNN PPPPPPNNNNNN PPPPNNNNXXXX * cannot merge might become might become might become * PPNNNNNNNNNN PPPPPPPPPPNN PPPPPPPPPPPP 6 or * mmap, brk or case 4 below case 5 below PPPPPPPPXXXX 7 or * mremap move: PPPPXXXXXXXX 8 * AAAA * PPPP NNNN PPPPPPPPPPPP PPPPPPPPNNNN PPPPNNNNNNNN * might become case 1 below case 2 below case 3 below * * It is important for case 8 that the the vma NNNN overlapping the * region AAAA is never going to extended over XXXX. Instead XXXX must * be extended in region AAAA and NNNN must be removed. This way in * all cases where vma_merge succeeds, the moment vma_adjust drops the * rmap_locks, the properties of the merged vma will be already * correct for the whole merged range. Some of those properties like * vm_page_prot/vm_flags may be accessed by rmap_walks and they must * be correct for the whole merged range immediately after the * rmap_locks are released. Otherwise if XXXX would be removed and * NNNN would be extended over the XXXX range, remove_migration_ptes * or other rmap walkers (if working on addresses beyond the "end" * parameter) may establish ptes with the wrong permissions of NNNN * instead of the right permissions of XXXX. */ struct vm_area_struct *vma_merge(struct mm_struct *mm, struct vm_area_struct *prev, unsigned long addr, unsigned long end, unsigned long vm_flags, struct anon_vma *anon_vma, struct file *file, pgoff_t pgoff, struct mempolicy *policy, struct vm_userfaultfd_ctx vm_userfaultfd_ctx) { pgoff_t pglen = (end - addr) >> PAGE_SHIFT; struct vm_area_struct *area, *next; int err; /* * We later require that vma->vm_flags == vm_flags, * so this tests vma->vm_flags & VM_SPECIAL, too. */ if (vm_flags & VM_SPECIAL) return NULL; if (prev) next = prev->vm_next; else next = mm->mmap; area = next; if (area && area->vm_end == end) /* cases 6, 7, 8 */ next = next->vm_next; /* verify some invariant that must be enforced by the caller */ VM_WARN_ON(prev && addr <= prev->vm_start); VM_WARN_ON(area && end > area->vm_end); VM_WARN_ON(addr >= end); /* * Can it merge with the predecessor? */ if (prev && prev->vm_end == addr && mpol_equal(vma_policy(prev), policy) && can_vma_merge_after(prev, vm_flags, anon_vma, file, pgoff, vm_userfaultfd_ctx)) { /* * OK, it can. Can we now merge in the successor as well? */ if (next && end == next->vm_start && mpol_equal(policy, vma_policy(next)) && can_vma_merge_before(next, vm_flags, anon_vma, file, pgoff+pglen, vm_userfaultfd_ctx) && is_mergeable_anon_vma(prev->anon_vma, next->anon_vma, NULL)) { /* cases 1, 6 */ err = __vma_adjust(prev, prev->vm_start, next->vm_end, prev->vm_pgoff, NULL, prev); } else /* cases 2, 5, 7 */ err = __vma_adjust(prev, prev->vm_start, end, prev->vm_pgoff, NULL, prev); if (err) return NULL; khugepaged_enter_vma_merge(prev, vm_flags); return prev; } /* * Can this new request be merged in front of next? */ if (next && end == next->vm_start && mpol_equal(policy, vma_policy(next)) && can_vma_merge_before(next, vm_flags, anon_vma, file, pgoff+pglen, vm_userfaultfd_ctx)) { if (prev && addr < prev->vm_end) /* case 4 */ err = __vma_adjust(prev, prev->vm_start, addr, prev->vm_pgoff, NULL, next); else { /* cases 3, 8 */ err = __vma_adjust(area, addr, next->vm_end, next->vm_pgoff - pglen, NULL, next); /* * In case 3 area is already equal to next and * this is a noop, but in case 8 "area" has * been removed and next was expanded over it. */ area = next; } if (err) return NULL; khugepaged_enter_vma_merge(area, vm_flags); return area; } return NULL; } /* * Rough compatbility check to quickly see if it's even worth looking * at sharing an anon_vma. * * They need to have the same vm_file, and the flags can only differ * in things that mprotect may change. * * NOTE! The fact that we share an anon_vma doesn't _have_ to mean that * we can merge the two vma's. For example, we refuse to merge a vma if * there is a vm_ops->close() function, because that indicates that the * driver is doing some kind of reference counting. But that doesn't * really matter for the anon_vma sharing case. */ static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *b) { return a->vm_end == b->vm_start && mpol_equal(vma_policy(a), vma_policy(b)) && a->vm_file == b->vm_file && !((a->vm_flags ^ b->vm_flags) & ~(VM_READ|VM_WRITE|VM_EXEC|VM_SOFTDIRTY)) && b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT); } /* * Do some basic sanity checking to see if we can re-use the anon_vma * from 'old'. The 'a'/'b' vma's are in VM order - one of them will be * the same as 'old', the other will be the new one that is trying * to share the anon_vma. * * NOTE! This runs with mm_sem held for reading, so it is possible that * the anon_vma of 'old' is concurrently in the process of being set up * by another page fault trying to merge _that_. But that's ok: if it * is being set up, that automatically means that it will be a singleton * acceptable for merging, so we can do all of this optimistically. But * we do that READ_ONCE() to make sure that we never re-load the pointer. * * IOW: that the "list_is_singular()" test on the anon_vma_chain only * matters for the 'stable anon_vma' case (ie the thing we want to avoid * is to return an anon_vma that is "complex" due to having gone through * a fork). * * We also make sure that the two vma's are compatible (adjacent, * and with the same memory policies). That's all stable, even with just * a read lock on the mm_sem. */ static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old, struct vm_area_struct *a, struct vm_area_struct *b) { if (anon_vma_compatible(a, b)) { struct anon_vma *anon_vma = READ_ONCE(old->anon_vma); if (anon_vma && list_is_singular(&old->anon_vma_chain)) return anon_vma; } return NULL; } /* * find_mergeable_anon_vma is used by anon_vma_prepare, to check * neighbouring vmas for a suitable anon_vma, before it goes off * to allocate a new anon_vma. It checks because a repetitive * sequence of mprotects and faults may otherwise lead to distinct * anon_vmas being allocated, preventing vma merge in subsequent * mprotect. */ struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma) { struct anon_vma *anon_vma; struct vm_area_struct *near; near = vma->vm_next; if (!near) goto try_prev; anon_vma = reusable_anon_vma(near, vma, near); if (anon_vma) return anon_vma; try_prev: near = vma->vm_prev; if (!near) goto none; anon_vma = reusable_anon_vma(near, near, vma); if (anon_vma) return anon_vma; none: /* * There's no absolute need to look only at touching neighbours: * we could search further afield for "compatible" anon_vmas. * But it would probably just be a waste of time searching, * or lead to too many vmas hanging off the same anon_vma. * We're trying to allow mprotect remerging later on, * not trying to minimize memory used for anon_vmas. */ return NULL; } /* * If a hint addr is less than mmap_min_addr change hint to be as * low as possible but still greater than mmap_min_addr */ static inline unsigned long round_hint_to_min(unsigned long hint) { hint &= PAGE_MASK; if (((void *)hint != NULL) && (hint < mmap_min_addr)) return PAGE_ALIGN(mmap_min_addr); return hint; } static inline int mlock_future_check(struct mm_struct *mm, unsigned long flags, unsigned long len) { unsigned long locked, lock_limit; /* mlock MCL_FUTURE? */ if (flags & VM_LOCKED) { locked = len >> PAGE_SHIFT; locked += mm->locked_vm; lock_limit = rlimit(RLIMIT_MEMLOCK); lock_limit >>= PAGE_SHIFT; if (locked > lock_limit && !capable(CAP_IPC_LOCK)) return -EAGAIN; } return 0; } static inline u64 file_mmap_size_max(struct file *file, struct inode *inode) { if (S_ISREG(inode->i_mode)) return MAX_LFS_FILESIZE; if (S_ISBLK(inode->i_mode)) return MAX_LFS_FILESIZE; /* Special "we do even unsigned file positions" case */ if (file->f_mode & FMODE_UNSIGNED_OFFSET) return 0; /* Yes, random drivers might want more. But I'm tired of buggy drivers */ return ULONG_MAX; } static inline bool file_mmap_ok(struct file *file, struct inode *inode, unsigned long pgoff, unsigned long len) { u64 maxsize = file_mmap_size_max(file, inode); if (maxsize && len > maxsize) return false; maxsize -= len; if (pgoff > maxsize >> PAGE_SHIFT) return false; return true; } /* * The caller must hold down_write(&current->mm->mmap_sem). */ unsigned long do_mmap(struct file *file, unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate, struct list_head *uf) { struct mm_struct *mm = current->mm; int pkey = 0; *populate = 0; if (!len) return -EINVAL; /* * Does the application expect PROT_READ to imply PROT_EXEC? * * (the exception is when the underlying filesystem is noexec * mounted, in which case we dont add PROT_EXEC.) */ if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC)) if (!(file && path_noexec(&file->f_path))) prot |= PROT_EXEC; /* force arch specific MAP_FIXED handling in get_unmapped_area */ if (flags & MAP_FIXED_NOREPLACE) flags |= MAP_FIXED; if (!(flags & MAP_FIXED)) addr = round_hint_to_min(addr); /* Careful about overflows.. */ len = PAGE_ALIGN(len); if (!len) return -ENOMEM; /* offset overflow? */ if ((pgoff + (len >> PAGE_SHIFT)) < pgoff) return -EOVERFLOW; /* Too many mappings? */ if (mm->map_count > sysctl_max_map_count) return -ENOMEM; /* Obtain the address to map to. we verify (or select) it and ensure * that it represents a valid section of the address space. */ addr = get_unmapped_area(file, addr, len, pgoff, flags); if (offset_in_page(addr)) return addr; if (flags & MAP_FIXED_NOREPLACE) { struct vm_area_struct *vma = find_vma(mm, addr); if (vma && vma->vm_start < addr + len) return -EEXIST; } if (prot == PROT_EXEC) { pkey = execute_only_pkey(mm); if (pkey < 0) pkey = 0; } /* Do simple checking here so the lower-level routines won't have * to. we assume access permissions have been handled by the open * of the memory object, so we don't do any here. */ vm_flags |= calc_vm_prot_bits(prot, pkey) | calc_vm_flag_bits(flags) | mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; if (flags & MAP_LOCKED) if (!can_do_mlock()) return -EPERM; if (mlock_future_check(mm, vm_flags, len)) return -EAGAIN; if (file) { struct inode *inode = file_inode(file); unsigned long flags_mask; if (!file_mmap_ok(file, inode, pgoff, len)) return -EOVERFLOW; flags_mask = LEGACY_MAP_MASK | file->f_op->mmap_supported_flags; switch (flags & MAP_TYPE) { case MAP_SHARED: /* * Force use of MAP_SHARED_VALIDATE with non-legacy * flags. E.g. MAP_SYNC is dangerous to use with * MAP_SHARED as you don't know which consistency model * you will get. We silently ignore unsupported flags * with MAP_SHARED to preserve backward compatibility. */ flags &= LEGACY_MAP_MASK; /* fall through */ case MAP_SHARED_VALIDATE: if (flags & ~flags_mask) return -EOPNOTSUPP; if ((prot&PROT_WRITE) && !(file->f_mode&FMODE_WRITE)) return -EACCES; /* * Make sure we don't allow writing to an append-only * file.. */ if (IS_APPEND(inode) && (file->f_mode & FMODE_WRITE)) return -EACCES; /* * Make sure there are no mandatory locks on the file. */ if (locks_verify_locked(file)) return -EAGAIN; vm_flags |= VM_SHARED | VM_MAYSHARE; if (!(file->f_mode & FMODE_WRITE)) vm_flags &= ~(VM_MAYWRITE | VM_SHARED); /* fall through */ case MAP_PRIVATE: if (!(file->f_mode & FMODE_READ)) return -EACCES; if (path_noexec(&file->f_path)) { if (vm_flags & VM_EXEC) return -EPERM; vm_flags &= ~VM_MAYEXEC; } if (!file->f_op->mmap) return -ENODEV; if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP)) return -EINVAL; break; default: return -EINVAL; } } else { switch (flags & MAP_TYPE) { case MAP_SHARED: if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP)) return -EINVAL; /* * Ignore pgoff. */ pgoff = 0; vm_flags |= VM_SHARED | VM_MAYSHARE; break; case MAP_PRIVATE: /* * Set pgoff according to addr for anon_vma. */ pgoff = addr >> PAGE_SHIFT; break; default: return -EINVAL; } } /* * Set 'VM_NORESERVE' if we should not account for the * memory use of this mapping. */ if (flags & MAP_NORESERVE) { /* We honor MAP_NORESERVE if allowed to overcommit */ if (sysctl_overcommit_memory != OVERCOMMIT_NEVER) vm_flags |= VM_NORESERVE; /* hugetlb applies strict overcommit unless MAP_NORESERVE */ if (file && is_file_hugepages(file)) vm_flags |= VM_NORESERVE; } addr = mmap_region(file, addr, len, vm_flags, pgoff, uf); if (!IS_ERR_VALUE(addr) && ((vm_flags & VM_LOCKED) || (flags & (MAP_POPULATE | MAP_NONBLOCK)) == MAP_POPULATE)) *populate = len; return addr; } unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long fd, unsigned long pgoff) { struct file *file = NULL; unsigned long retval; if (!(flags & MAP_ANONYMOUS)) { audit_mmap_fd(fd, flags); file = fget(fd); if (!file) return -EBADF; if (is_file_hugepages(file)) len = ALIGN(len, huge_page_size(hstate_file(file))); retval = -EINVAL; if (unlikely(flags & MAP_HUGETLB && !is_file_hugepages(file))) goto out_fput; } else if (flags & MAP_HUGETLB) { struct user_struct *user = NULL; struct hstate *hs; hs = hstate_sizelog((flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK); if (!hs) return -EINVAL; len = ALIGN(len, huge_page_size(hs)); /* * VM_NORESERVE is used because the reservations will be * taken when vm_ops->mmap() is called * A dummy user value is used because we are not locking * memory so no accounting is necessary */ file = hugetlb_file_setup(HUGETLB_ANON_FILE, len, VM_NORESERVE, &user, HUGETLB_ANONHUGE_INODE, (flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK); if (IS_ERR(file)) return PTR_ERR(file); } flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff); out_fput: if (file) fput(file); return retval; } SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len, unsigned long, prot, unsigned long, flags, unsigned long, fd, unsigned long, pgoff) { return ksys_mmap_pgoff(addr, len, prot, flags, fd, pgoff); } #ifdef __ARCH_WANT_SYS_OLD_MMAP struct mmap_arg_struct { unsigned long addr; unsigned long len; unsigned long prot; unsigned long flags; unsigned long fd; unsigned long offset; }; SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg) { struct mmap_arg_struct a; if (copy_from_user(&a, arg, sizeof(a))) return -EFAULT; if (offset_in_page(a.offset)) return -EINVAL; return ksys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT); } #endif /* __ARCH_WANT_SYS_OLD_MMAP */ /* * Some shared mappigns will want the pages marked read-only * to track write events. If so, we'll downgrade vm_page_prot * to the private version (using protection_map[] without the * VM_SHARED bit). */ int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot) { vm_flags_t vm_flags = vma->vm_flags; const struct vm_operations_struct *vm_ops = vma->vm_ops; /* If it was private or non-writable, the write bit is already clear */ if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED))) return 0; /* The backer wishes to know when pages are first written to? */ if (vm_ops && (vm_ops->page_mkwrite || vm_ops->pfn_mkwrite)) return 1; /* The open routine did something to the protections that pgprot_modify * won't preserve? */ if (pgprot_val(vm_page_prot) != pgprot_val(vm_pgprot_modify(vm_page_prot, vm_flags))) return 0; /* Do we need to track softdirty? */ if (IS_ENABLED(CONFIG_MEM_SOFT_DIRTY) && !(vm_flags & VM_SOFTDIRTY)) return 1; /* Specialty mapping? */ if (vm_flags & VM_PFNMAP) return 0; /* Can the mapping track the dirty pages? */ return vma->vm_file && vma->vm_file->f_mapping && mapping_cap_account_dirty(vma->vm_file->f_mapping); } /* * We account for memory if it's a private writeable mapping, * not hugepages and VM_NORESERVE wasn't set. */ static inline int accountable_mapping(struct file *file, vm_flags_t vm_flags) { /* * hugetlb has its own accounting separate from the core VM * VM_HUGETLB may not be set yet so we cannot check for that flag. */ if (file && is_file_hugepages(file)) return 0; return (vm_flags & (VM_NORESERVE | VM_SHARED | VM_WRITE)) == VM_WRITE; } unsigned long mmap_region(struct file *file, unsigned long addr, unsigned long len, vm_flags_t vm_flags, unsigned long pgoff, struct list_head *uf) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma, *prev; int error; struct rb_node **rb_link, *rb_parent; unsigned long charged = 0; /* Check against address space limit. */ if (!may_expand_vm(mm, vm_flags, len >> PAGE_SHIFT)) { unsigned long nr_pages; /* * MAP_FIXED may remove pages of mappings that intersects with * requested mapping. Account for the pages it would unmap. */ nr_pages = count_vma_pages_range(mm, addr, addr + len); if (!may_expand_vm(mm, vm_flags, (len >> PAGE_SHIFT) - nr_pages)) return -ENOMEM; } /* Clear old maps */ while (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) { if (do_munmap(mm, addr, len, uf)) return -ENOMEM; } /* * Private writable mapping: check memory availability */ if (accountable_mapping(file, vm_flags)) { charged = len >> PAGE_SHIFT; if (security_vm_enough_memory_mm(mm, charged)) return -ENOMEM; vm_flags |= VM_ACCOUNT; } /* * Can we just expand an old mapping? */ vma = vma_merge(mm, prev, addr, addr + len, vm_flags, NULL, file, pgoff, NULL, NULL_VM_UFFD_CTX); if (vma) goto out; /* * Determine the object being mapped and call the appropriate * specific mapper. the address has already been validated, but * not unmapped, but the maps are removed from the list. */ vma = vm_area_alloc(mm); if (!vma) { error = -ENOMEM; goto unacct_error; } vma->vm_start = addr; vma->vm_end = addr + len; vma->vm_flags = vm_flags; vma->vm_page_prot = vm_get_page_prot(vm_flags); vma->vm_pgoff = pgoff; if (file) { if (vm_flags & VM_DENYWRITE) { error = deny_write_access(file); if (error) goto free_vma; } if (vm_flags & VM_SHARED) { error = mapping_map_writable(file->f_mapping); if (error) goto allow_write_and_free_vma; } /* ->mmap() can change vma->vm_file, but must guarantee that * vma_link() below can deny write-access if VM_DENYWRITE is set * and map writably if VM_SHARED is set. This usually means the * new file must not have been exposed to user-space, yet. */ vma->vm_file = get_file(file); error = call_mmap(file, vma); if (error) goto unmap_and_free_vma; /* Can addr have changed?? * * Answer: Yes, several device drivers can do it in their * f_op->mmap method. -DaveM * Bug: If addr is changed, prev, rb_link, rb_parent should * be updated for vma_link() */ WARN_ON_ONCE(addr != vma->vm_start); addr = vma->vm_start; vm_flags = vma->vm_flags; } else if (vm_flags & VM_SHARED) { error = shmem_zero_setup(vma); if (error) goto free_vma; } else { vma_set_anonymous(vma); } vma_link(mm, vma, prev, rb_link, rb_parent); /* Once vma denies write, undo our temporary denial count */ if (file) { if (vm_flags & VM_SHARED) mapping_unmap_writable(file->f_mapping); if (vm_flags & VM_DENYWRITE) allow_write_access(file); } file = vma->vm_file; out: perf_event_mmap(vma); vm_stat_account(mm, vm_flags, len >> PAGE_SHIFT); if (vm_flags & VM_LOCKED) { if ((vm_flags & VM_SPECIAL) || vma_is_dax(vma) || is_vm_hugetlb_page(vma) || vma == get_gate_vma(current->mm)) vma->vm_flags &= VM_LOCKED_CLEAR_MASK; else mm->locked_vm += (len >> PAGE_SHIFT); } if (file) uprobe_mmap(vma); /* * New (or expanded) vma always get soft dirty status. * Otherwise user-space soft-dirty page tracker won't * be able to distinguish situation when vma area unmapped, * then new mapped in-place (which must be aimed as * a completely new data area). */ vma->vm_flags |= VM_SOFTDIRTY; vma_set_page_prot(vma); return addr; unmap_and_free_vma: vma->vm_file = NULL; fput(file); /* Undo any partial mapping done by a device driver. */ unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end); charged = 0; if (vm_flags & VM_SHARED) mapping_unmap_writable(file->f_mapping); allow_write_and_free_vma: if (vm_flags & VM_DENYWRITE) allow_write_access(file); free_vma: vm_area_free(vma); unacct_error: if (charged) vm_unacct_memory(charged); return error; } unsigned long unmapped_area(struct vm_unmapped_area_info *info) { /* * We implement the search by looking for an rbtree node that * immediately follows a suitable gap. That is, * - gap_start = vma->vm_prev->vm_end <= info->high_limit - length; * - gap_end = vma->vm_start >= info->low_limit + length; * - gap_end - gap_start >= length */ struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long length, low_limit, high_limit, gap_start, gap_end; /* Adjust search length to account for worst case alignment overhead */ length = info->length + info->align_mask; if (length < info->length) return -ENOMEM; /* Adjust search limits by the desired length */ if (info->high_limit < length) return -ENOMEM; high_limit = info->high_limit - length; if (info->low_limit > high_limit) return -ENOMEM; low_limit = info->low_limit + length; /* Check if rbtree root looks promising */ if (RB_EMPTY_ROOT(&mm->mm_rb)) goto check_highest; vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb); if (vma->rb_subtree_gap < length) goto check_highest; while (true) { /* Visit left subtree if it looks promising */ gap_end = vm_start_gap(vma); if (gap_end >= low_limit && vma->vm_rb.rb_left) { struct vm_area_struct *left = rb_entry(vma->vm_rb.rb_left, struct vm_area_struct, vm_rb); if (left->rb_subtree_gap >= length) { vma = left; continue; } } gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0; check_current: /* Check if current node has a suitable gap */ if (gap_start > high_limit) return -ENOMEM; if (gap_end >= low_limit && gap_end > gap_start && gap_end - gap_start >= length) goto found; /* Visit right subtree if it looks promising */ if (vma->vm_rb.rb_right) { struct vm_area_struct *right = rb_entry(vma->vm_rb.rb_right, struct vm_area_struct, vm_rb); if (right->rb_subtree_gap >= length) { vma = right; continue; } } /* Go back up the rbtree to find next candidate node */ while (true) { struct rb_node *prev = &vma->vm_rb; if (!rb_parent(prev)) goto check_highest; vma = rb_entry(rb_parent(prev), struct vm_area_struct, vm_rb); if (prev == vma->vm_rb.rb_left) { gap_start = vm_end_gap(vma->vm_prev); gap_end = vm_start_gap(vma); goto check_current; } } } check_highest: /* Check highest gap, which does not precede any rbtree node */ gap_start = mm->highest_vm_end; gap_end = ULONG_MAX; /* Only for VM_BUG_ON below */ if (gap_start > high_limit) return -ENOMEM; found: /* We found a suitable gap. Clip it with the original low_limit. */ if (gap_start < info->low_limit) gap_start = info->low_limit; /* Adjust gap address to the desired alignment */ gap_start += (info->align_offset - gap_start) & info->align_mask; VM_BUG_ON(gap_start + info->length > info->high_limit); VM_BUG_ON(gap_start + info->length > gap_end); return gap_start; } unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long length, low_limit, high_limit, gap_start, gap_end; /* Adjust search length to account for worst case alignment overhead */ length = info->length + info->align_mask; if (length < info->length) return -ENOMEM; /* * Adjust search limits by the desired length. * See implementation comment at top of unmapped_area(). */ gap_end = info->high_limit; if (gap_end < length) return -ENOMEM; high_limit = gap_end - length; if (info->low_limit > high_limit) return -ENOMEM; low_limit = info->low_limit + length; /* Check highest gap, which does not precede any rbtree node */ gap_start = mm->highest_vm_end; if (gap_start <= high_limit) goto found_highest; /* Check if rbtree root looks promising */ if (RB_EMPTY_ROOT(&mm->mm_rb)) return -ENOMEM; vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb); if (vma->rb_subtree_gap < length) return -ENOMEM; while (true) { /* Visit right subtree if it looks promising */ gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0; if (gap_start <= high_limit && vma->vm_rb.rb_right) { struct vm_area_struct *right = rb_entry(vma->vm_rb.rb_right, struct vm_area_struct, vm_rb); if (right->rb_subtree_gap >= length) { vma = right; continue; } } check_current: /* Check if current node has a suitable gap */ gap_end = vm_start_gap(vma); if (gap_end < low_limit) return -ENOMEM; if (gap_start <= high_limit && gap_end > gap_start && gap_end - gap_start >= length) goto found; /* Visit left subtree if it looks promising */ if (vma->vm_rb.rb_left) { struct vm_area_struct *left = rb_entry(vma->vm_rb.rb_left, struct vm_area_struct, vm_rb); if (left->rb_subtree_gap >= length) { vma = left; continue; } } /* Go back up the rbtree to find next candidate node */ while (true) { struct rb_node *prev = &vma->vm_rb; if (!rb_parent(prev)) return -ENOMEM; vma = rb_entry(rb_parent(prev), struct vm_area_struct, vm_rb); if (prev == vma->vm_rb.rb_right) { gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0; goto check_current; } } } found: /* We found a suitable gap. Clip it with the original high_limit. */ if (gap_end > info->high_limit) gap_end = info->high_limit; found_highest: /* Compute highest gap address at the desired alignment */ gap_end -= info->length; gap_end -= (gap_end - info->align_offset) & info->align_mask; VM_BUG_ON(gap_end < info->low_limit); VM_BUG_ON(gap_end < gap_start); return gap_end; } #ifndef arch_get_mmap_end #define arch_get_mmap_end(addr) (TASK_SIZE) #endif #ifndef arch_get_mmap_base #define arch_get_mmap_base(addr, base) (base) #endif /* Get an address range which is currently unmapped. * For shmat() with addr=0. * * Ugly calling convention alert: * Return value with the low bits set means error value, * ie * if (ret & ~PAGE_MASK) * error = ret; * * This function "knows" that -ENOMEM has the bits set. */ #ifndef HAVE_ARCH_UNMAPPED_AREA unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma, *prev; struct vm_unmapped_area_info info; const unsigned long mmap_end = arch_get_mmap_end(addr); if (len > mmap_end - mmap_min_addr) return -ENOMEM; if (flags & MAP_FIXED) return addr; if (addr) { addr = PAGE_ALIGN(addr); vma = find_vma_prev(mm, addr, &prev); if (mmap_end - len >= addr && addr >= mmap_min_addr && (!vma || addr + len <= vm_start_gap(vma)) && (!prev || addr >= vm_end_gap(prev))) return addr; } info.flags = 0; info.length = len; info.low_limit = mm->mmap_base; info.high_limit = mmap_end; info.align_mask = 0; return vm_unmapped_area(&info); } #endif /* * This mmap-allocator allocates new areas top-down from below the * stack's low limit (the base): */ #ifndef HAVE_ARCH_UNMAPPED_AREA_TOPDOWN unsigned long arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, const unsigned long len, const unsigned long pgoff, const unsigned long flags) { struct vm_area_struct *vma, *prev; struct mm_struct *mm = current->mm; unsigned long addr = addr0; struct vm_unmapped_area_info info; const unsigned long mmap_end = arch_get_mmap_end(addr); /* requested length too big for entire address space */ if (len > mmap_end - mmap_min_addr) return -ENOMEM; if (flags & MAP_FIXED) return addr; /* requesting a specific address */ if (addr) { addr = PAGE_ALIGN(addr); vma = find_vma_prev(mm, addr, &prev); if (mmap_end - len >= addr && addr >= mmap_min_addr && (!vma || addr + len <= vm_start_gap(vma)) && (!prev || addr >= vm_end_gap(prev))) return addr; } info.flags = VM_UNMAPPED_AREA_TOPDOWN; info.length = len; info.low_limit = max(PAGE_SIZE, mmap_min_addr); info.high_limit = arch_get_mmap_base(addr, mm->mmap_base); info.align_mask = 0; addr = vm_unmapped_area(&info); /* * A failed mmap() very likely causes application failure, * so fall back to the bottom-up function here. This scenario * can happen with large stack limits and large mmap() * allocations. */ if (offset_in_page(addr)) { VM_BUG_ON(addr != -ENOMEM); info.flags = 0; info.low_limit = TASK_UNMAPPED_BASE; info.high_limit = mmap_end; addr = vm_unmapped_area(&info); } return addr; } #endif unsigned long get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { unsigned long (*get_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); unsigned long error = arch_mmap_check(addr, len, flags); if (error) return error; /* Careful about overflows.. */ if (len > TASK_SIZE) return -ENOMEM; get_area = current->mm->get_unmapped_area; if (file) { if (file->f_op->get_unmapped_area) get_area = file->f_op->get_unmapped_area; } else if (flags & MAP_SHARED) { /* * mmap_region() will call shmem_zero_setup() to create a file, * so use shmem's get_unmapped_area in case it can be huge. * do_mmap_pgoff() will clear pgoff, so match alignment. */ pgoff = 0; get_area = shmem_get_unmapped_area; } addr = get_area(file, addr, len, pgoff, flags); if (IS_ERR_VALUE(addr)) return addr; if (addr > TASK_SIZE - len) return -ENOMEM; if (offset_in_page(addr)) return -EINVAL; error = security_mmap_addr(addr); return error ? error : addr; } EXPORT_SYMBOL(get_unmapped_area); /* Look up the first VMA which satisfies addr < vm_end, NULL if none. */ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) { struct rb_node *rb_node; struct vm_area_struct *vma; /* Check the cache first. */ vma = vmacache_find(mm, addr); if (likely(vma)) return vma; rb_node = mm->mm_rb.rb_node; while (rb_node) { struct vm_area_struct *tmp; tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb); if (tmp->vm_end > addr) { vma = tmp; if (tmp->vm_start <= addr) break; rb_node = rb_node->rb_left; } else rb_node = rb_node->rb_right; } if (vma) vmacache_update(addr, vma); return vma; } EXPORT_SYMBOL(find_vma); /* * Same as find_vma, but also return a pointer to the previous VMA in *pprev. */ struct vm_area_struct * find_vma_prev(struct mm_struct *mm, unsigned long addr, struct vm_area_struct **pprev) { struct vm_area_struct *vma; vma = find_vma(mm, addr); if (vma) { *pprev = vma->vm_prev; } else { struct rb_node *rb_node = mm->mm_rb.rb_node; *pprev = NULL; while (rb_node) { *pprev = rb_entry(rb_node, struct vm_area_struct, vm_rb); rb_node = rb_node->rb_right; } } return vma; } /* * Verify that the stack growth is acceptable and * update accounting. This is shared with both the * grow-up and grow-down cases. */ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, unsigned long grow) { struct mm_struct *mm = vma->vm_mm; unsigned long new_start; /* address space limit tests */ if (!may_expand_vm(mm, vma->vm_flags, grow)) return -ENOMEM; /* Stack limit test */ if (size > rlimit(RLIMIT_STACK)) return -ENOMEM; /* mlock limit tests */ if (vma->vm_flags & VM_LOCKED) { unsigned long locked; unsigned long limit; locked = mm->locked_vm + grow; limit = rlimit(RLIMIT_MEMLOCK); limit >>= PAGE_SHIFT; if (locked > limit && !capable(CAP_IPC_LOCK)) return -ENOMEM; } /* Check to ensure the stack will not grow into a hugetlb-only region */ new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start : vma->vm_end - size; if (is_hugepage_only_range(vma->vm_mm, new_start, size)) return -EFAULT; /* * Overcommit.. This must be the final test, as it will * update security statistics. */ if (security_vm_enough_memory_mm(mm, grow)) return -ENOMEM; return 0; } #if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64) /* * PA-RISC uses this for its stack; IA64 for its Register Backing Store. * vma is the last one with address > vma->vm_end. Have to extend vma. */ int expand_upwards(struct vm_area_struct *vma, unsigned long address) { struct mm_struct *mm = vma->vm_mm; struct vm_area_struct *next; unsigned long gap_addr; int error = 0; if (!(vma->vm_flags & VM_GROWSUP)) return -EFAULT; /* Guard against exceeding limits of the address space. */ address &= PAGE_MASK; if (address >= (TASK_SIZE & PAGE_MASK)) return -ENOMEM; address += PAGE_SIZE; /* Enforce stack_guard_gap */ gap_addr = address + stack_guard_gap; /* Guard against overflow */ if (gap_addr < address || gap_addr > TASK_SIZE) gap_addr = TASK_SIZE; next = vma->vm_next; if (next && next->vm_start < gap_addr && (next->vm_flags & (VM_WRITE|VM_READ|VM_EXEC))) { if (!(next->vm_flags & VM_GROWSUP)) return -ENOMEM; /* Check that both stack segments have the same anon_vma? */ } /* We must make sure the anon_vma is allocated. */ if (unlikely(anon_vma_prepare(vma))) return -ENOMEM; /* * vma->vm_start/vm_end cannot change under us because the caller * is required to hold the mmap_sem in read mode. We need the * anon_vma lock to serialize against concurrent expand_stacks. */ anon_vma_lock_write(vma->anon_vma); /* Somebody else might have raced and expanded it already */ if (address > vma->vm_end) { unsigned long size, grow; size = address - vma->vm_start; grow = (address - vma->vm_end) >> PAGE_SHIFT; error = -ENOMEM; if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) { error = acct_stack_growth(vma, size, grow); if (!error) { /* * vma_gap_update() doesn't support concurrent * updates, but we only hold a shared mmap_sem * lock here, so we need to protect against * concurrent vma expansions. * anon_vma_lock_write() doesn't help here, as * we don't guarantee that all growable vmas * in a mm share the same root anon vma. * So, we reuse mm->page_table_lock to guard * against concurrent vma expansions. */ spin_lock(&mm->page_table_lock); if (vma->vm_flags & VM_LOCKED) mm->locked_vm += grow; vm_stat_account(mm, vma->vm_flags, grow); anon_vma_interval_tree_pre_update_vma(vma); vma->vm_end = address; anon_vma_interval_tree_post_update_vma(vma); if (vma->vm_next) vma_gap_update(vma->vm_next); else mm->highest_vm_end = vm_end_gap(vma); spin_unlock(&mm->page_table_lock); perf_event_mmap(vma); } } } anon_vma_unlock_write(vma->anon_vma); khugepaged_enter_vma_merge(vma, vma->vm_flags); validate_mm(mm); return error; } #endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */ /* * vma is the first one with address < vma->vm_start. Have to extend vma. */ int expand_downwards(struct vm_area_struct *vma, unsigned long address) { struct mm_struct *mm = vma->vm_mm; struct vm_area_struct *prev; int error; address &= PAGE_MASK; error = security_mmap_addr(address); if (error) return error; /* Enforce stack_guard_gap */ prev = vma->vm_prev; /* Check that both stack segments have the same anon_vma? */ if (prev && !(prev->vm_flags & VM_GROWSDOWN) && (prev->vm_flags & (VM_WRITE|VM_READ|VM_EXEC))) { if (address - prev->vm_end < stack_guard_gap) return -ENOMEM; } /* We must make sure the anon_vma is allocated. */ if (unlikely(anon_vma_prepare(vma))) return -ENOMEM; /* * vma->vm_start/vm_end cannot change under us because the caller * is required to hold the mmap_sem in read mode. We need the * anon_vma lock to serialize against concurrent expand_stacks. */ anon_vma_lock_write(vma->anon_vma); /* Somebody else might have raced and expanded it already */ if (address < vma->vm_start) { unsigned long size, grow; size = vma->vm_end - address; grow = (vma->vm_start - address) >> PAGE_SHIFT; error = -ENOMEM; if (grow <= vma->vm_pgoff) { error = acct_stack_growth(vma, size, grow); if (!error) { /* * vma_gap_update() doesn't support concurrent * updates, but we only hold a shared mmap_sem * lock here, so we need to protect against * concurrent vma expansions. * anon_vma_lock_write() doesn't help here, as * we don't guarantee that all growable vmas * in a mm share the same root anon vma. * So, we reuse mm->page_table_lock to guard * against concurrent vma expansions. */ spin_lock(&mm->page_table_lock); if (vma->vm_flags & VM_LOCKED) mm->locked_vm += grow; vm_stat_account(mm, vma->vm_flags, grow); anon_vma_interval_tree_pre_update_vma(vma); vma->vm_start = address; vma->vm_pgoff -= grow; anon_vma_interval_tree_post_update_vma(vma); vma_gap_update(vma); spin_unlock(&mm->page_table_lock); perf_event_mmap(vma); } } } anon_vma_unlock_write(vma->anon_vma); khugepaged_enter_vma_merge(vma, vma->vm_flags); validate_mm(mm); return error; } /* enforced gap between the expanding stack and other mappings. */ unsigned long stack_guard_gap = 256UL<<PAGE_SHIFT; static int __init cmdline_parse_stack_guard_gap(char *p) { unsigned long val; char *endptr; val = simple_strtoul(p, &endptr, 10); if (!*endptr) stack_guard_gap = val << PAGE_SHIFT; return 0; } __setup("stack_guard_gap=", cmdline_parse_stack_guard_gap); #ifdef CONFIG_STACK_GROWSUP int expand_stack(struct vm_area_struct *vma, unsigned long address) { return expand_upwards(vma, address); } struct vm_area_struct * find_extend_vma(struct mm_struct *mm, unsigned long addr) { struct vm_area_struct *vma, *prev; addr &= PAGE_MASK; vma = find_vma_prev(mm, addr, &prev); if (vma && (vma->vm_start <= addr)) return vma; if (!prev || expand_stack(prev, addr)) return NULL; if (prev->vm_flags & VM_LOCKED) populate_vma_page_range(prev, addr, prev->vm_end, NULL); return prev; } #else int expand_stack(struct vm_area_struct *vma, unsigned long address) { return expand_downwards(vma, address); } struct vm_area_struct * find_extend_vma(struct mm_struct *mm, unsigned long addr) { struct vm_area_struct *vma; unsigned long start; addr &= PAGE_MASK; vma = find_vma(mm, addr); if (!vma) return NULL; if (vma->vm_start <= addr) return vma; if (!(vma->vm_flags & VM_GROWSDOWN)) return NULL; start = vma->vm_start; if (expand_stack(vma, addr)) return NULL; if (vma->vm_flags & VM_LOCKED) populate_vma_page_range(vma, addr, start, NULL); return vma; } #endif EXPORT_SYMBOL_GPL(find_extend_vma); /* * Ok - we have the memory areas we should free on the vma list, * so release them, and do the vma updates. * * Called with the mm semaphore held. */ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma) { unsigned long nr_accounted = 0; /* Update high watermark before we lower total_vm */ update_hiwater_vm(mm); do { long nrpages = vma_pages(vma); if (vma->vm_flags & VM_ACCOUNT) nr_accounted += nrpages; vm_stat_account(mm, vma->vm_flags, -nrpages); vma = remove_vma(vma); } while (vma); vm_unacct_memory(nr_accounted); validate_mm(mm); } /* * Get rid of page table information in the indicated region. * * Called with the mm semaphore held. */ static void unmap_region(struct mm_struct *mm, struct vm_area_struct *vma, struct vm_area_struct *prev, unsigned long start, unsigned long end) { struct vm_area_struct *next = prev ? prev->vm_next : mm->mmap; struct mmu_gather tlb; lru_add_drain(); tlb_gather_mmu(&tlb, mm, start, end); update_hiwater_rss(mm); unmap_vmas(&tlb, vma, start, end); free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, next ? next->vm_start : USER_PGTABLES_CEILING); tlb_finish_mmu(&tlb, start, end); } /* * Create a list of vma's touched by the unmap, removing them from the mm's * vma list as we go.. */ static void detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma, struct vm_area_struct *prev, unsigned long end) { struct vm_area_struct **insertion_point; struct vm_area_struct *tail_vma = NULL; insertion_point = (prev ? &prev->vm_next : &mm->mmap); vma->vm_prev = NULL; do { vma_rb_erase(vma, &mm->mm_rb); mm->map_count--; tail_vma = vma; vma = vma->vm_next; } while (vma && vma->vm_start < end); *insertion_point = vma; if (vma) { vma->vm_prev = prev; vma_gap_update(vma); } else mm->highest_vm_end = prev ? vm_end_gap(prev) : 0; tail_vma->vm_next = NULL; /* Kill the cache */ vmacache_invalidate(mm); } /* * __split_vma() bypasses sysctl_max_map_count checking. We use this where it * has already been checked or doesn't make sense to fail. */ int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, int new_below) { struct vm_area_struct *new; int err; if (vma->vm_ops && vma->vm_ops->split) { err = vma->vm_ops->split(vma, addr); if (err) return err; } new = vm_area_dup(vma); if (!new) return -ENOMEM; if (new_below) new->vm_end = addr; else { new->vm_start = addr; new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT); } err = vma_dup_policy(vma, new); if (err) goto out_free_vma; err = anon_vma_clone(new, vma); if (err) goto out_free_mpol; if (new->vm_file) get_file(new->vm_file); if (new->vm_ops && new->vm_ops->open) new->vm_ops->open(new); if (new_below) err = vma_adjust(vma, addr, vma->vm_end, vma->vm_pgoff + ((addr - new->vm_start) >> PAGE_SHIFT), new); else err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new); /* Success. */ if (!err) return 0; /* Clean everything up if vma_adjust failed. */ if (new->vm_ops && new->vm_ops->close) new->vm_ops->close(new); if (new->vm_file) fput(new->vm_file); unlink_anon_vmas(new); out_free_mpol: mpol_put(vma_policy(new)); out_free_vma: vm_area_free(new); return err; } /* * Split a vma into two pieces at address 'addr', a new vma is allocated * either for the first part or the tail. */ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, int new_below) { if (mm->map_count >= sysctl_max_map_count) return -ENOMEM; return __split_vma(mm, vma, addr, new_below); } /* Munmap is split into 2 main parts -- this part which finds * what needs doing, and the areas themselves, which do the * work. This now handles partial unmappings. * Jeremy Fitzhardinge <jeremy@goop.org> */ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len, struct list_head *uf, bool downgrade) { unsigned long end; struct vm_area_struct *vma, *prev, *last; if ((offset_in_page(start)) || start > TASK_SIZE || len > TASK_SIZE-start) return -EINVAL; len = PAGE_ALIGN(len); if (len == 0) return -EINVAL; /* Find the first overlapping VMA */ vma = find_vma(mm, start); if (!vma) return 0; prev = vma->vm_prev; /* we have start < vma->vm_end */ /* if it doesn't overlap, we have nothing.. */ end = start + len; if (vma->vm_start >= end) return 0; /* * If we need to split any vma, do it now to save pain later. * * Note: mremap's move_vma VM_ACCOUNT handling assumes a partially * unmapped vm_area_struct will remain in use: so lower split_vma * places tmp vma above, and higher split_vma places tmp vma below. */ if (start > vma->vm_start) { int error; /* * Make sure that map_count on return from munmap() will * not exceed its limit; but let map_count go just above * its limit temporarily, to help free resources as expected. */ if (end < vma->vm_end && mm->map_count >= sysctl_max_map_count) return -ENOMEM; error = __split_vma(mm, vma, start, 0); if (error) return error; prev = vma; } /* Does it split the last one? */ last = find_vma(mm, end); if (last && end > last->vm_start) { int error = __split_vma(mm, last, end, 1); if (error) return error; } vma = prev ? prev->vm_next : mm->mmap; if (unlikely(uf)) { /* * If userfaultfd_unmap_prep returns an error the vmas * will remain splitted, but userland will get a * highly unexpected error anyway. This is no * different than the case where the first of the two * __split_vma fails, but we don't undo the first * split, despite we could. This is unlikely enough * failure that it's not worth optimizing it for. */ int error = userfaultfd_unmap_prep(vma, start, end, uf); if (error) return error; } /* * unlock any mlock()ed ranges before detaching vmas */ if (mm->locked_vm) { struct vm_area_struct *tmp = vma; while (tmp && tmp->vm_start < end) { if (tmp->vm_flags & VM_LOCKED) { mm->locked_vm -= vma_pages(tmp); munlock_vma_pages_all(tmp); } tmp = tmp->vm_next; } } /* Detach vmas from rbtree */ detach_vmas_to_be_unmapped(mm, vma, prev, end); /* * mpx unmap needs to be called with mmap_sem held for write. * It is safe to call it before unmap_region(). */ arch_unmap(mm, vma, start, end); if (downgrade) downgrade_write(&mm->mmap_sem); unmap_region(mm, vma, prev, start, end); /* Fix up all other VM information */ remove_vma_list(mm, vma); return downgrade ? 1 : 0; } int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, struct list_head *uf) { return __do_munmap(mm, start, len, uf, false); } static int __vm_munmap(unsigned long start, size_t len, bool downgrade) { int ret; struct mm_struct *mm = current->mm; LIST_HEAD(uf); if (down_write_killable(&mm->mmap_sem)) return -EINTR; ret = __do_munmap(mm, start, len, &uf, downgrade); /* * Returning 1 indicates mmap_sem is downgraded. * But 1 is not legal return value of vm_munmap() and munmap(), reset * it to 0 before return. */ if (ret == 1) { up_read(&mm->mmap_sem); ret = 0; } else up_write(&mm->mmap_sem); userfaultfd_unmap_complete(mm, &uf); return ret; } int vm_munmap(unsigned long start, size_t len) { return __vm_munmap(start, len, false); } EXPORT_SYMBOL(vm_munmap); SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len) { profile_munmap(addr); return __vm_munmap(addr, len, true); } /* * Emulation of deprecated remap_file_pages() syscall. */ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size, unsigned long, prot, unsigned long, pgoff, unsigned long, flags) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long populate = 0; unsigned long ret = -EINVAL; struct file *file; pr_warn_once("%s (%d) uses deprecated remap_file_pages() syscall. See Documentation/vm/remap_file_pages.rst.\n", current->comm, current->pid); if (prot) return ret; start = start & PAGE_MASK; size = size & PAGE_MASK; if (start + size <= start) return ret; /* Does pgoff wrap? */ if (pgoff + (size >> PAGE_SHIFT) < pgoff) return ret; if (down_write_killable(&mm->mmap_sem)) return -EINTR; vma = find_vma(mm, start); if (!vma || !(vma->vm_flags & VM_SHARED)) goto out; if (start < vma->vm_start) goto out; if (start + size > vma->vm_end) { struct vm_area_struct *next; for (next = vma->vm_next; next; next = next->vm_next) { /* hole between vmas ? */ if (next->vm_start != next->vm_prev->vm_end) goto out; if (next->vm_file != vma->vm_file) goto out; if (next->vm_flags != vma->vm_flags) goto out; if (start + size <= next->vm_end) break; } if (!next) goto out; } prot |= vma->vm_flags & VM_READ ? PROT_READ : 0; prot |= vma->vm_flags & VM_WRITE ? PROT_WRITE : 0; prot |= vma->vm_flags & VM_EXEC ? PROT_EXEC : 0; flags &= MAP_NONBLOCK; flags |= MAP_SHARED | MAP_FIXED | MAP_POPULATE; if (vma->vm_flags & VM_LOCKED) { struct vm_area_struct *tmp; flags |= MAP_LOCKED; /* drop PG_Mlocked flag for over-mapped range */ for (tmp = vma; tmp->vm_start >= start + size; tmp = tmp->vm_next) { /* * Split pmd and munlock page on the border * of the range. */ vma_adjust_trans_huge(tmp, start, start + size, 0); munlock_vma_pages_range(tmp, max(tmp->vm_start, start), min(tmp->vm_end, start + size)); } } file = get_file(vma->vm_file); ret = do_mmap_pgoff(vma->vm_file, start, size, prot, flags, pgoff, &populate, NULL); fput(file); out: up_write(&mm->mmap_sem); if (populate) mm_populate(ret, populate); if (!IS_ERR_VALUE(ret)) ret = 0; return ret; } /* * this is really a simplified "do_mmap". it only handles * anonymous maps. eventually we may be able to do some * brk-specific accounting here. */ static int do_brk_flags(unsigned long addr, unsigned long len, unsigned long flags, struct list_head *uf) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma, *prev; struct rb_node **rb_link, *rb_parent; pgoff_t pgoff = addr >> PAGE_SHIFT; int error; /* Until we need other flags, refuse anything except VM_EXEC. */ if ((flags & (~VM_EXEC)) != 0) return -EINVAL; flags |= VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags; error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED); if (offset_in_page(error)) return error; error = mlock_future_check(mm, mm->def_flags, len); if (error) return error; /* * Clear old maps. this also does some error checking for us */ while (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) { if (do_munmap(mm, addr, len, uf)) return -ENOMEM; } /* Check against address space limits *after* clearing old maps... */ if (!may_expand_vm(mm, flags, len >> PAGE_SHIFT)) return -ENOMEM; if (mm->map_count > sysctl_max_map_count) return -ENOMEM; if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT)) return -ENOMEM; /* Can we just expand an old private anonymous mapping? */ vma = vma_merge(mm, prev, addr, addr + len, flags, NULL, NULL, pgoff, NULL, NULL_VM_UFFD_CTX); if (vma) goto out; /* * create a vma struct for an anonymous mapping */ vma = vm_area_alloc(mm); if (!vma) { vm_unacct_memory(len >> PAGE_SHIFT); return -ENOMEM; } vma_set_anonymous(vma); vma->vm_start = addr; vma->vm_end = addr + len; vma->vm_pgoff = pgoff; vma->vm_flags = flags; vma->vm_page_prot = vm_get_page_prot(flags); vma_link(mm, vma, prev, rb_link, rb_parent); out: perf_event_mmap(vma); mm->total_vm += len >> PAGE_SHIFT; mm->data_vm += len >> PAGE_SHIFT; if (flags & VM_LOCKED) mm->locked_vm += (len >> PAGE_SHIFT); vma->vm_flags |= VM_SOFTDIRTY; return 0; } int vm_brk_flags(unsigned long addr, unsigned long request, unsigned long flags) { struct mm_struct *mm = current->mm; unsigned long len; int ret; bool populate; LIST_HEAD(uf); len = PAGE_ALIGN(request); if (len < request) return -ENOMEM; if (!len) return 0; if (down_write_killable(&mm->mmap_sem)) return -EINTR; ret = do_brk_flags(addr, len, flags, &uf); populate = ((mm->def_flags & VM_LOCKED) != 0); up_write(&mm->mmap_sem); userfaultfd_unmap_complete(mm, &uf); if (populate && !ret) mm_populate(addr, len); return ret; } EXPORT_SYMBOL(vm_brk_flags); int vm_brk(unsigned long addr, unsigned long len) { return vm_brk_flags(addr, len, 0); } EXPORT_SYMBOL(vm_brk); /* Release all mmaps. */ void exit_mmap(struct mm_struct *mm) { struct mmu_gather tlb; struct vm_area_struct *vma; unsigned long nr_accounted = 0; /* mm's last user has gone, and its about to be pulled down */ mmu_notifier_release(mm); if (unlikely(mm_is_oom_victim(mm))) { /* * Manually reap the mm to free as much memory as possible. * Then, as the oom reaper does, set MMF_OOM_SKIP to disregard * this mm from further consideration. Taking mm->mmap_sem for * write after setting MMF_OOM_SKIP will guarantee that the oom * reaper will not run on this mm again after mmap_sem is * dropped. * * Nothing can be holding mm->mmap_sem here and the above call * to mmu_notifier_release(mm) ensures mmu notifier callbacks in * __oom_reap_task_mm() will not block. * * This needs to be done before calling munlock_vma_pages_all(), * which clears VM_LOCKED, otherwise the oom reaper cannot * reliably test it. */ (void)__oom_reap_task_mm(mm); set_bit(MMF_OOM_SKIP, &mm->flags); down_write(&mm->mmap_sem); up_write(&mm->mmap_sem); } if (mm->locked_vm) { vma = mm->mmap; while (vma) { if (vma->vm_flags & VM_LOCKED) munlock_vma_pages_all(vma); vma = vma->vm_next; } } arch_exit_mmap(mm); vma = mm->mmap; if (!vma) /* Can happen if dup_mmap() received an OOM */ return; lru_add_drain(); flush_cache_mm(mm); tlb_gather_mmu(&tlb, mm, 0, -1); /* update_hiwater_rss(mm) here? but nobody should be looking */ /* Use -1 here to ensure all VMAs in the mm are unmapped */ unmap_vmas(&tlb, vma, 0, -1); free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, USER_PGTABLES_CEILING); tlb_finish_mmu(&tlb, 0, -1); /* * Walk the list again, actually closing and freeing it, * with preemption enabled, without holding any MM locks. */ while (vma) { if (vma->vm_flags & VM_ACCOUNT) nr_accounted += vma_pages(vma); vma = remove_vma(vma); } vm_unacct_memory(nr_accounted); } /* Insert vm structure into process list sorted by address * and into the inode's i_mmap tree. If vm_file is non-NULL * then i_mmap_rwsem is taken here. */ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) { struct vm_area_struct *prev; struct rb_node **rb_link, *rb_parent; if (find_vma_links(mm, vma->vm_start, vma->vm_end, &prev, &rb_link, &rb_parent)) return -ENOMEM; if ((vma->vm_flags & VM_ACCOUNT) && security_vm_enough_memory_mm(mm, vma_pages(vma))) return -ENOMEM; /* * The vm_pgoff of a purely anonymous vma should be irrelevant * until its first write fault, when page's anon_vma and index * are set. But now set the vm_pgoff it will almost certainly * end up with (unless mremap moves it elsewhere before that * first wfault), so /proc/pid/maps tells a consistent story. * * By setting it to reflect the virtual start address of the * vma, merges and splits can happen in a seamless way, just * using the existing file pgoff checks and manipulations. * Similarly in do_mmap_pgoff and in do_brk. */ if (vma_is_anonymous(vma)) { BUG_ON(vma->anon_vma); vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT; } vma_link(mm, vma, prev, rb_link, rb_parent); return 0; } /* * Copy the vma structure to a new location in the same mm, * prior to moving page table entries, to effect an mremap move. */ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, unsigned long addr, unsigned long len, pgoff_t pgoff, bool *need_rmap_locks) { struct vm_area_struct *vma = *vmap; unsigned long vma_start = vma->vm_start; struct mm_struct *mm = vma->vm_mm; struct vm_area_struct *new_vma, *prev; struct rb_node **rb_link, *rb_parent; bool faulted_in_anon_vma = true; /* * If anonymous vma has not yet been faulted, update new pgoff * to match new location, to increase its chance of merging. */ if (unlikely(vma_is_anonymous(vma) && !vma->anon_vma)) { pgoff = addr >> PAGE_SHIFT; faulted_in_anon_vma = false; } if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) return NULL; /* should never get here */ new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags, vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma), vma->vm_userfaultfd_ctx); if (new_vma) { /* * Source vma may have been merged into new_vma */ if (unlikely(vma_start >= new_vma->vm_start && vma_start < new_vma->vm_end)) { /* * The only way we can get a vma_merge with * self during an mremap is if the vma hasn't * been faulted in yet and we were allowed to * reset the dst vma->vm_pgoff to the * destination address of the mremap to allow * the merge to happen. mremap must change the * vm_pgoff linearity between src and dst vmas * (in turn preventing a vma_merge) to be * safe. It is only safe to keep the vm_pgoff * linear if there are no pages mapped yet. */ VM_BUG_ON_VMA(faulted_in_anon_vma, new_vma); *vmap = vma = new_vma; } *need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff); } else { new_vma = vm_area_dup(vma); if (!new_vma) goto out; new_vma->vm_start = addr; new_vma->vm_end = addr + len; new_vma->vm_pgoff = pgoff; if (vma_dup_policy(vma, new_vma)) goto out_free_vma; if (anon_vma_clone(new_vma, vma)) goto out_free_mempol; if (new_vma->vm_file) get_file(new_vma->vm_file); if (new_vma->vm_ops && new_vma->vm_ops->open) new_vma->vm_ops->open(new_vma); vma_link(mm, new_vma, prev, rb_link, rb_parent); *need_rmap_locks = false; } return new_vma; out_free_mempol: mpol_put(vma_policy(new_vma)); out_free_vma: vm_area_free(new_vma); out: return NULL; } /* * Return true if the calling process may expand its vm space by the passed * number of pages */ bool may_expand_vm(struct mm_struct *mm, vm_flags_t flags, unsigned long npages) { if (mm->total_vm + npages > rlimit(RLIMIT_AS) >> PAGE_SHIFT) return false; if (is_data_mapping(flags) && mm->data_vm + npages > rlimit(RLIMIT_DATA) >> PAGE_SHIFT) { /* Workaround for Valgrind */ if (rlimit(RLIMIT_DATA) == 0 && mm->data_vm + npages <= rlimit_max(RLIMIT_DATA) >> PAGE_SHIFT) return true; pr_warn_once("%s (%d): VmData %lu exceed data ulimit %lu. Update limits%s.\n", current->comm, current->pid, (mm->data_vm + npages) << PAGE_SHIFT, rlimit(RLIMIT_DATA), ignore_rlimit_data ? "" : " or use boot option ignore_rlimit_data"); if (!ignore_rlimit_data) return false; } return true; } void vm_stat_account(struct mm_struct *mm, vm_flags_t flags, long npages) { mm->total_vm += npages; if (is_exec_mapping(flags)) mm->exec_vm += npages; else if (is_stack_mapping(flags)) mm->stack_vm += npages; else if (is_data_mapping(flags)) mm->data_vm += npages; } static vm_fault_t special_mapping_fault(struct vm_fault *vmf); /* * Having a close hook prevents vma merging regardless of flags. */ static void special_mapping_close(struct vm_area_struct *vma) { } static const char *special_mapping_name(struct vm_area_struct *vma) { return ((struct vm_special_mapping *)vma->vm_private_data)->name; } static int special_mapping_mremap(struct vm_area_struct *new_vma) { struct vm_special_mapping *sm = new_vma->vm_private_data; if (WARN_ON_ONCE(current->mm != new_vma->vm_mm)) return -EFAULT; if (sm->mremap) return sm->mremap(sm, new_vma); return 0; } static const struct vm_operations_struct special_mapping_vmops = { .close = special_mapping_close, .fault = special_mapping_fault, .mremap = special_mapping_mremap, .name = special_mapping_name, }; static const struct vm_operations_struct legacy_special_mapping_vmops = { .close = special_mapping_close, .fault = special_mapping_fault, }; static vm_fault_t special_mapping_fault(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; pgoff_t pgoff; struct page **pages; if (vma->vm_ops == &legacy_special_mapping_vmops) { pages = vma->vm_private_data; } else { struct vm_special_mapping *sm = vma->vm_private_data; if (sm->fault) return sm->fault(sm, vmf->vma, vmf); pages = sm->pages; } for (pgoff = vmf->pgoff; pgoff && *pages; ++pages) pgoff--; if (*pages) { struct page *page = *pages; get_page(page); vmf->page = page; return 0; } return VM_FAULT_SIGBUS; } static struct vm_area_struct *__install_special_mapping( struct mm_struct *mm, unsigned long addr, unsigned long len, unsigned long vm_flags, void *priv, const struct vm_operations_struct *ops) { int ret; struct vm_area_struct *vma; vma = vm_area_alloc(mm); if (unlikely(vma == NULL)) return ERR_PTR(-ENOMEM); vma->vm_start = addr; vma->vm_end = addr + len; vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND | VM_SOFTDIRTY; vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); vma->vm_ops = ops; vma->vm_private_data = priv; ret = insert_vm_struct(mm, vma); if (ret) goto out; vm_stat_account(mm, vma->vm_flags, len >> PAGE_SHIFT); perf_event_mmap(vma); return vma; out: vm_area_free(vma); return ERR_PTR(ret); } bool vma_is_special_mapping(const struct vm_area_struct *vma, const struct vm_special_mapping *sm) { return vma->vm_private_data == sm && (vma->vm_ops == &special_mapping_vmops || vma->vm_ops == &legacy_special_mapping_vmops); } /* * Called with mm->mmap_sem held for writing. * Insert a new vma covering the given region, with the given flags. * Its pages are supplied by the given array of struct page *. * The array can be shorter than len >> PAGE_SHIFT if it's null-terminated. * The region past the last page supplied will always produce SIGBUS. * The array pointer and the pages it points to are assumed to stay alive * for as long as this mapping might exist. */ struct vm_area_struct *_install_special_mapping( struct mm_struct *mm, unsigned long addr, unsigned long len, unsigned long vm_flags, const struct vm_special_mapping *spec) { return __install_special_mapping(mm, addr, len, vm_flags, (void *)spec, &special_mapping_vmops); } int install_special_mapping(struct mm_struct *mm, unsigned long addr, unsigned long len, unsigned long vm_flags, struct page **pages) { struct vm_area_struct *vma = __install_special_mapping( mm, addr, len, vm_flags, (void *)pages, &legacy_special_mapping_vmops); return PTR_ERR_OR_ZERO(vma); } static DEFINE_MUTEX(mm_all_locks_mutex); static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma) { if (!test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) { /* * The LSB of head.next can't change from under us * because we hold the mm_all_locks_mutex. */ down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_sem); /* * We can safely modify head.next after taking the * anon_vma->root->rwsem. If some other vma in this mm shares * the same anon_vma we won't take it again. * * No need of atomic instructions here, head.next * can't change from under us thanks to the * anon_vma->root->rwsem. */ if (__test_and_set_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) BUG(); } } static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping) { if (!test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) { /* * AS_MM_ALL_LOCKS can't change from under us because * we hold the mm_all_locks_mutex. * * Operations on ->flags have to be atomic because * even if AS_MM_ALL_LOCKS is stable thanks to the * mm_all_locks_mutex, there may be other cpus * changing other bitflags in parallel to us. */ if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags)) BUG(); down_write_nest_lock(&mapping->i_mmap_rwsem, &mm->mmap_sem); } } /* * This operation locks against the VM for all pte/vma/mm related * operations that could ever happen on a certain mm. This includes * vmtruncate, try_to_unmap, and all page faults. * * The caller must take the mmap_sem in write mode before calling * mm_take_all_locks(). The caller isn't allowed to release the * mmap_sem until mm_drop_all_locks() returns. * * mmap_sem in write mode is required in order to block all operations * that could modify pagetables and free pages without need of * altering the vma layout. It's also needed in write mode to avoid new * anon_vmas to be associated with existing vmas. * * A single task can't take more than one mm_take_all_locks() in a row * or it would deadlock. * * The LSB in anon_vma->rb_root.rb_node and the AS_MM_ALL_LOCKS bitflag in * mapping->flags avoid to take the same lock twice, if more than one * vma in this mm is backed by the same anon_vma or address_space. * * We take locks in following order, accordingly to comment at beginning * of mm/rmap.c: * - all hugetlbfs_i_mmap_rwsem_key locks (aka mapping->i_mmap_rwsem for * hugetlb mapping); * - all i_mmap_rwsem locks; * - all anon_vma->rwseml * * We can take all locks within these types randomly because the VM code * doesn't nest them and we protected from parallel mm_take_all_locks() by * mm_all_locks_mutex. * * mm_take_all_locks() and mm_drop_all_locks are expensive operations * that may have to take thousand of locks. * * mm_take_all_locks() can fail if it's interrupted by signals. */ int mm_take_all_locks(struct mm_struct *mm) { struct vm_area_struct *vma; struct anon_vma_chain *avc; BUG_ON(down_read_trylock(&mm->mmap_sem)); mutex_lock(&mm_all_locks_mutex); for (vma = mm->mmap; vma; vma = vma->vm_next) { if (signal_pending(current)) goto out_unlock; if (vma->vm_file && vma->vm_file->f_mapping && is_vm_hugetlb_page(vma)) vm_lock_mapping(mm, vma->vm_file->f_mapping); } for (vma = mm->mmap; vma; vma = vma->vm_next) { if (signal_pending(current)) goto out_unlock; if (vma->vm_file && vma->vm_file->f_mapping && !is_vm_hugetlb_page(vma)) vm_lock_mapping(mm, vma->vm_file->f_mapping); } for (vma = mm->mmap; vma; vma = vma->vm_next) { if (signal_pending(current)) goto out_unlock; if (vma->anon_vma) list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) vm_lock_anon_vma(mm, avc->anon_vma); } return 0; out_unlock: mm_drop_all_locks(mm); return -EINTR; } static void vm_unlock_anon_vma(struct anon_vma *anon_vma) { if (test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) { /* * The LSB of head.next can't change to 0 from under * us because we hold the mm_all_locks_mutex. * * We must however clear the bitflag before unlocking * the vma so the users using the anon_vma->rb_root will * never see our bitflag. * * No need of atomic instructions here, head.next * can't change from under us until we release the * anon_vma->root->rwsem. */ if (!__test_and_clear_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) BUG(); anon_vma_unlock_write(anon_vma); } } static void vm_unlock_mapping(struct address_space *mapping) { if (test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) { /* * AS_MM_ALL_LOCKS can't change to 0 from under us * because we hold the mm_all_locks_mutex. */ i_mmap_unlock_write(mapping); if (!test_and_clear_bit(AS_MM_ALL_LOCKS, &mapping->flags)) BUG(); } } /* * The mmap_sem cannot be released by the caller until * mm_drop_all_locks() returns. */ void mm_drop_all_locks(struct mm_struct *mm) { struct vm_area_struct *vma; struct anon_vma_chain *avc; BUG_ON(down_read_trylock(&mm->mmap_sem)); BUG_ON(!mutex_is_locked(&mm_all_locks_mutex)); for (vma = mm->mmap; vma; vma = vma->vm_next) { if (vma->anon_vma) list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) vm_unlock_anon_vma(avc->anon_vma); if (vma->vm_file && vma->vm_file->f_mapping) vm_unlock_mapping(vma->vm_file->f_mapping); } mutex_unlock(&mm_all_locks_mutex); } /* * initialise the percpu counter for VM */ void __init mmap_init(void) { int ret; ret = percpu_counter_init(&vm_committed_as, 0, GFP_KERNEL); VM_BUG_ON(ret); } /* * Initialise sysctl_user_reserve_kbytes. * * This is intended to prevent a user from starting a single memory hogging * process, such that they cannot recover (kill the hog) in OVERCOMMIT_NEVER * mode. * * The default value is min(3% of free memory, 128MB) * 128MB is enough to recover with sshd/login, bash, and top/kill. */ static int init_user_reserve(void) { unsigned long free_kbytes; free_kbytes = global_zone_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10); sysctl_user_reserve_kbytes = min(free_kbytes / 32, 1UL << 17); return 0; } subsys_initcall(init_user_reserve); /* * Initialise sysctl_admin_reserve_kbytes. * * The purpose of sysctl_admin_reserve_kbytes is to allow the sys admin * to log in and kill a memory hogging process. * * Systems with more than 256MB will reserve 8MB, enough to recover * with sshd, bash, and top in OVERCOMMIT_GUESS. Smaller systems will * only reserve 3% of free pages by default. */ static int init_admin_reserve(void) { unsigned long free_kbytes; free_kbytes = global_zone_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10); sysctl_admin_reserve_kbytes = min(free_kbytes / 32, 1UL << 13); return 0; } subsys_initcall(init_admin_reserve); /* * Reinititalise user and admin reserves if memory is added or removed. * * The default user reserve max is 128MB, and the default max for the * admin reserve is 8MB. These are usually, but not always, enough to * enable recovery from a memory hogging process using login/sshd, a shell, * and tools like top. It may make sense to increase or even disable the * reserve depending on the existence of swap or variations in the recovery * tools. So, the admin may have changed them. * * If memory is added and the reserves have been eliminated or increased above * the default max, then we'll trust the admin. * * If memory is removed and there isn't enough free memory, then we * need to reset the reserves. * * Otherwise keep the reserve set by the admin. */ static int reserve_mem_notifier(struct notifier_block *nb, unsigned long action, void *data) { unsigned long tmp, free_kbytes; switch (action) { case MEM_ONLINE: /* Default max is 128MB. Leave alone if modified by operator. */ tmp = sysctl_user_reserve_kbytes; if (0 < tmp && tmp < (1UL << 17)) init_user_reserve(); /* Default max is 8MB. Leave alone if modified by operator. */ tmp = sysctl_admin_reserve_kbytes; if (0 < tmp && tmp < (1UL << 13)) init_admin_reserve(); break; case MEM_OFFLINE: free_kbytes = global_zone_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10); if (sysctl_user_reserve_kbytes > free_kbytes) { init_user_reserve(); pr_info("vm.user_reserve_kbytes reset to %lu\n", sysctl_user_reserve_kbytes); } if (sysctl_admin_reserve_kbytes > free_kbytes) { init_admin_reserve(); pr_info("vm.admin_reserve_kbytes reset to %lu\n", sysctl_admin_reserve_kbytes); } break; default: break; } return NOTIFY_OK; } static struct notifier_block reserve_mem_nb = { .notifier_call = reserve_mem_notifier, }; static int __meminit init_reserve_notifier(void) { if (register_hotmemory_notifier(&reserve_mem_nb)) pr_err("Failed registering memory add/remove notifier for admin reserve\n"); return 0; } subsys_initcall(init_reserve_notifier);
/* * mm/mmap.c * * Written by obz. * * Address space accounting code <alan@lxorguk.ukuu.org.uk> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/slab.h> #include <linux/backing-dev.h> #include <linux/mm.h> #include <linux/vmacache.h> #include <linux/shm.h> #include <linux/mman.h> #include <linux/pagemap.h> #include <linux/swap.h> #include <linux/syscalls.h> #include <linux/capability.h> #include <linux/init.h> #include <linux/file.h> #include <linux/fs.h> #include <linux/personality.h> #include <linux/security.h> #include <linux/hugetlb.h> #include <linux/shmem_fs.h> #include <linux/profile.h> #include <linux/export.h> #include <linux/mount.h> #include <linux/mempolicy.h> #include <linux/rmap.h> #include <linux/mmu_notifier.h> #include <linux/mmdebug.h> #include <linux/perf_event.h> #include <linux/audit.h> #include <linux/khugepaged.h> #include <linux/uprobes.h> #include <linux/rbtree_augmented.h> #include <linux/notifier.h> #include <linux/memory.h> #include <linux/printk.h> #include <linux/userfaultfd_k.h> #include <linux/moduleparam.h> #include <linux/pkeys.h> #include <linux/oom.h> #include <linux/uaccess.h> #include <asm/cacheflush.h> #include <asm/tlb.h> #include <asm/mmu_context.h> #include "internal.h" #ifndef arch_mmap_check #define arch_mmap_check(addr, len, flags) (0) #endif #ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS const int mmap_rnd_bits_min = CONFIG_ARCH_MMAP_RND_BITS_MIN; const int mmap_rnd_bits_max = CONFIG_ARCH_MMAP_RND_BITS_MAX; int mmap_rnd_bits __read_mostly = CONFIG_ARCH_MMAP_RND_BITS; #endif #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS const int mmap_rnd_compat_bits_min = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN; const int mmap_rnd_compat_bits_max = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX; int mmap_rnd_compat_bits __read_mostly = CONFIG_ARCH_MMAP_RND_COMPAT_BITS; #endif static bool ignore_rlimit_data; core_param(ignore_rlimit_data, ignore_rlimit_data, bool, 0644); static void unmap_region(struct mm_struct *mm, struct vm_area_struct *vma, struct vm_area_struct *prev, unsigned long start, unsigned long end); /* description of effects of mapping type and prot in current implementation. * this is due to the limited x86 page protection hardware. The expected * behavior is in parens: * * map_type prot * PROT_NONE PROT_READ PROT_WRITE PROT_EXEC * MAP_SHARED r: (no) no r: (yes) yes r: (no) yes r: (no) yes * w: (no) no w: (no) no w: (yes) yes w: (no) no * x: (no) no x: (no) yes x: (no) yes x: (yes) yes * * MAP_PRIVATE r: (no) no r: (yes) yes r: (no) yes r: (no) yes * w: (no) no w: (no) no w: (copy) copy w: (no) no * x: (no) no x: (no) yes x: (no) yes x: (yes) yes * * On arm64, PROT_EXEC has the following behaviour for both MAP_SHARED and * MAP_PRIVATE: * r: (no) no * w: (no) no * x: (yes) yes */ pgprot_t protection_map[16] __ro_after_init = { __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111, __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111 }; #ifndef CONFIG_ARCH_HAS_FILTER_PGPROT static inline pgprot_t arch_filter_pgprot(pgprot_t prot) { return prot; } #endif pgprot_t vm_get_page_prot(unsigned long vm_flags) { pgprot_t ret = __pgprot(pgprot_val(protection_map[vm_flags & (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) | pgprot_val(arch_vm_get_page_prot(vm_flags))); return arch_filter_pgprot(ret); } EXPORT_SYMBOL(vm_get_page_prot); static pgprot_t vm_pgprot_modify(pgprot_t oldprot, unsigned long vm_flags) { return pgprot_modify(oldprot, vm_get_page_prot(vm_flags)); } /* Update vma->vm_page_prot to reflect vma->vm_flags. */ void vma_set_page_prot(struct vm_area_struct *vma) { unsigned long vm_flags = vma->vm_flags; pgprot_t vm_page_prot; vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags); if (vma_wants_writenotify(vma, vm_page_prot)) { vm_flags &= ~VM_SHARED; vm_page_prot = vm_pgprot_modify(vm_page_prot, vm_flags); } /* remove_protection_ptes reads vma->vm_page_prot without mmap_sem */ WRITE_ONCE(vma->vm_page_prot, vm_page_prot); } /* * Requires inode->i_mapping->i_mmap_rwsem */ static void __remove_shared_vm_struct(struct vm_area_struct *vma, struct file *file, struct address_space *mapping) { if (vma->vm_flags & VM_DENYWRITE) atomic_inc(&file_inode(file)->i_writecount); if (vma->vm_flags & VM_SHARED) mapping_unmap_writable(mapping); flush_dcache_mmap_lock(mapping); vma_interval_tree_remove(vma, &mapping->i_mmap); flush_dcache_mmap_unlock(mapping); } /* * Unlink a file-based vm structure from its interval tree, to hide * vma from rmap and vmtruncate before freeing its page tables. */ void unlink_file_vma(struct vm_area_struct *vma) { struct file *file = vma->vm_file; if (file) { struct address_space *mapping = file->f_mapping; i_mmap_lock_write(mapping); __remove_shared_vm_struct(vma, file, mapping); i_mmap_unlock_write(mapping); } } /* * Close a vm structure and free it, returning the next. */ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma) { struct vm_area_struct *next = vma->vm_next; might_sleep(); if (vma->vm_ops && vma->vm_ops->close) vma->vm_ops->close(vma); if (vma->vm_file) fput(vma->vm_file); mpol_put(vma_policy(vma)); vm_area_free(vma); return next; } static int do_brk_flags(unsigned long addr, unsigned long request, unsigned long flags, struct list_head *uf); SYSCALL_DEFINE1(brk, unsigned long, brk) { unsigned long retval; unsigned long newbrk, oldbrk, origbrk; struct mm_struct *mm = current->mm; struct vm_area_struct *next; unsigned long min_brk; bool populate; bool downgraded = false; LIST_HEAD(uf); if (down_write_killable(&mm->mmap_sem)) return -EINTR; origbrk = mm->brk; #ifdef CONFIG_COMPAT_BRK /* * CONFIG_COMPAT_BRK can still be overridden by setting * randomize_va_space to 2, which will still cause mm->start_brk * to be arbitrarily shifted */ if (current->brk_randomized) min_brk = mm->start_brk; else min_brk = mm->end_data; #else min_brk = mm->start_brk; #endif if (brk < min_brk) goto out; /* * Check against rlimit here. If this check is done later after the test * of oldbrk with newbrk then it can escape the test and let the data * segment grow beyond its set limit the in case where the limit is * not page aligned -Ram Gupta */ if (check_data_rlimit(rlimit(RLIMIT_DATA), brk, mm->start_brk, mm->end_data, mm->start_data)) goto out; newbrk = PAGE_ALIGN(brk); oldbrk = PAGE_ALIGN(mm->brk); if (oldbrk == newbrk) { mm->brk = brk; goto success; } /* * Always allow shrinking brk. * __do_munmap() may downgrade mmap_sem to read. */ if (brk <= mm->brk) { int ret; /* * mm->brk must to be protected by write mmap_sem so update it * before downgrading mmap_sem. When __do_munmap() fails, * mm->brk will be restored from origbrk. */ mm->brk = brk; ret = __do_munmap(mm, newbrk, oldbrk-newbrk, &uf, true); if (ret < 0) { mm->brk = origbrk; goto out; } else if (ret == 1) { downgraded = true; } goto success; } /* Check against existing mmap mappings. */ next = find_vma(mm, oldbrk); if (next && newbrk + PAGE_SIZE > vm_start_gap(next)) goto out; /* Ok, looks good - let it rip. */ if (do_brk_flags(oldbrk, newbrk-oldbrk, 0, &uf) < 0) goto out; mm->brk = brk; success: populate = newbrk > oldbrk && (mm->def_flags & VM_LOCKED) != 0; if (downgraded) up_read(&mm->mmap_sem); else up_write(&mm->mmap_sem); userfaultfd_unmap_complete(mm, &uf); if (populate) mm_populate(oldbrk, newbrk - oldbrk); return brk; out: retval = origbrk; up_write(&mm->mmap_sem); return retval; } static long vma_compute_subtree_gap(struct vm_area_struct *vma) { unsigned long max, prev_end, subtree_gap; /* * Note: in the rare case of a VM_GROWSDOWN above a VM_GROWSUP, we * allow two stack_guard_gaps between them here, and when choosing * an unmapped area; whereas when expanding we only require one. * That's a little inconsistent, but keeps the code here simpler. */ max = vm_start_gap(vma); if (vma->vm_prev) { prev_end = vm_end_gap(vma->vm_prev); if (max > prev_end) max -= prev_end; else max = 0; } if (vma->vm_rb.rb_left) { subtree_gap = rb_entry(vma->vm_rb.rb_left, struct vm_area_struct, vm_rb)->rb_subtree_gap; if (subtree_gap > max) max = subtree_gap; } if (vma->vm_rb.rb_right) { subtree_gap = rb_entry(vma->vm_rb.rb_right, struct vm_area_struct, vm_rb)->rb_subtree_gap; if (subtree_gap > max) max = subtree_gap; } return max; } #ifdef CONFIG_DEBUG_VM_RB static int browse_rb(struct mm_struct *mm) { struct rb_root *root = &mm->mm_rb; int i = 0, j, bug = 0; struct rb_node *nd, *pn = NULL; unsigned long prev = 0, pend = 0; for (nd = rb_first(root); nd; nd = rb_next(nd)) { struct vm_area_struct *vma; vma = rb_entry(nd, struct vm_area_struct, vm_rb); if (vma->vm_start < prev) { pr_emerg("vm_start %lx < prev %lx\n", vma->vm_start, prev); bug = 1; } if (vma->vm_start < pend) { pr_emerg("vm_start %lx < pend %lx\n", vma->vm_start, pend); bug = 1; } if (vma->vm_start > vma->vm_end) { pr_emerg("vm_start %lx > vm_end %lx\n", vma->vm_start, vma->vm_end); bug = 1; } spin_lock(&mm->page_table_lock); if (vma->rb_subtree_gap != vma_compute_subtree_gap(vma)) { pr_emerg("free gap %lx, correct %lx\n", vma->rb_subtree_gap, vma_compute_subtree_gap(vma)); bug = 1; } spin_unlock(&mm->page_table_lock); i++; pn = nd; prev = vma->vm_start; pend = vma->vm_end; } j = 0; for (nd = pn; nd; nd = rb_prev(nd)) j++; if (i != j) { pr_emerg("backwards %d, forwards %d\n", j, i); bug = 1; } return bug ? -1 : i; } static void validate_mm_rb(struct rb_root *root, struct vm_area_struct *ignore) { struct rb_node *nd; for (nd = rb_first(root); nd; nd = rb_next(nd)) { struct vm_area_struct *vma; vma = rb_entry(nd, struct vm_area_struct, vm_rb); VM_BUG_ON_VMA(vma != ignore && vma->rb_subtree_gap != vma_compute_subtree_gap(vma), vma); } } static void validate_mm(struct mm_struct *mm) { int bug = 0; int i = 0; unsigned long highest_address = 0; struct vm_area_struct *vma = mm->mmap; while (vma) { struct anon_vma *anon_vma = vma->anon_vma; struct anon_vma_chain *avc; if (anon_vma) { anon_vma_lock_read(anon_vma); list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) anon_vma_interval_tree_verify(avc); anon_vma_unlock_read(anon_vma); } highest_address = vm_end_gap(vma); vma = vma->vm_next; i++; } if (i != mm->map_count) { pr_emerg("map_count %d vm_next %d\n", mm->map_count, i); bug = 1; } if (highest_address != mm->highest_vm_end) { pr_emerg("mm->highest_vm_end %lx, found %lx\n", mm->highest_vm_end, highest_address); bug = 1; } i = browse_rb(mm); if (i != mm->map_count) { if (i != -1) pr_emerg("map_count %d rb %d\n", mm->map_count, i); bug = 1; } VM_BUG_ON_MM(bug, mm); } #else #define validate_mm_rb(root, ignore) do { } while (0) #define validate_mm(mm) do { } while (0) #endif RB_DECLARE_CALLBACKS(static, vma_gap_callbacks, struct vm_area_struct, vm_rb, unsigned long, rb_subtree_gap, vma_compute_subtree_gap) /* * Update augmented rbtree rb_subtree_gap values after vma->vm_start or * vma->vm_prev->vm_end values changed, without modifying the vma's position * in the rbtree. */ static void vma_gap_update(struct vm_area_struct *vma) { /* * As it turns out, RB_DECLARE_CALLBACKS() already created a callback * function that does exacltly what we want. */ vma_gap_callbacks_propagate(&vma->vm_rb, NULL); } static inline void vma_rb_insert(struct vm_area_struct *vma, struct rb_root *root) { /* All rb_subtree_gap values must be consistent prior to insertion */ validate_mm_rb(root, NULL); rb_insert_augmented(&vma->vm_rb, root, &vma_gap_callbacks); } static void __vma_rb_erase(struct vm_area_struct *vma, struct rb_root *root) { /* * Note rb_erase_augmented is a fairly large inline function, * so make sure we instantiate it only once with our desired * augmented rbtree callbacks. */ rb_erase_augmented(&vma->vm_rb, root, &vma_gap_callbacks); } static __always_inline void vma_rb_erase_ignore(struct vm_area_struct *vma, struct rb_root *root, struct vm_area_struct *ignore) { /* * All rb_subtree_gap values must be consistent prior to erase, * with the possible exception of the "next" vma being erased if * next->vm_start was reduced. */ validate_mm_rb(root, ignore); __vma_rb_erase(vma, root); } static __always_inline void vma_rb_erase(struct vm_area_struct *vma, struct rb_root *root) { /* * All rb_subtree_gap values must be consistent prior to erase, * with the possible exception of the vma being erased. */ validate_mm_rb(root, vma); __vma_rb_erase(vma, root); } /* * vma has some anon_vma assigned, and is already inserted on that * anon_vma's interval trees. * * Before updating the vma's vm_start / vm_end / vm_pgoff fields, the * vma must be removed from the anon_vma's interval trees using * anon_vma_interval_tree_pre_update_vma(). * * After the update, the vma will be reinserted using * anon_vma_interval_tree_post_update_vma(). * * The entire update must be protected by exclusive mmap_sem and by * the root anon_vma's mutex. */ static inline void anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma) { struct anon_vma_chain *avc; list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) anon_vma_interval_tree_remove(avc, &avc->anon_vma->rb_root); } static inline void anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma) { struct anon_vma_chain *avc; list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) anon_vma_interval_tree_insert(avc, &avc->anon_vma->rb_root); } static int find_vma_links(struct mm_struct *mm, unsigned long addr, unsigned long end, struct vm_area_struct **pprev, struct rb_node ***rb_link, struct rb_node **rb_parent) { struct rb_node **__rb_link, *__rb_parent, *rb_prev; __rb_link = &mm->mm_rb.rb_node; rb_prev = __rb_parent = NULL; while (*__rb_link) { struct vm_area_struct *vma_tmp; __rb_parent = *__rb_link; vma_tmp = rb_entry(__rb_parent, struct vm_area_struct, vm_rb); if (vma_tmp->vm_end > addr) { /* Fail if an existing vma overlaps the area */ if (vma_tmp->vm_start < end) return -ENOMEM; __rb_link = &__rb_parent->rb_left; } else { rb_prev = __rb_parent; __rb_link = &__rb_parent->rb_right; } } *pprev = NULL; if (rb_prev) *pprev = rb_entry(rb_prev, struct vm_area_struct, vm_rb); *rb_link = __rb_link; *rb_parent = __rb_parent; return 0; } static unsigned long count_vma_pages_range(struct mm_struct *mm, unsigned long addr, unsigned long end) { unsigned long nr_pages = 0; struct vm_area_struct *vma; /* Find first overlaping mapping */ vma = find_vma_intersection(mm, addr, end); if (!vma) return 0; nr_pages = (min(end, vma->vm_end) - max(addr, vma->vm_start)) >> PAGE_SHIFT; /* Iterate over the rest of the overlaps */ for (vma = vma->vm_next; vma; vma = vma->vm_next) { unsigned long overlap_len; if (vma->vm_start > end) break; overlap_len = min(end, vma->vm_end) - vma->vm_start; nr_pages += overlap_len >> PAGE_SHIFT; } return nr_pages; } void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma, struct rb_node **rb_link, struct rb_node *rb_parent) { /* Update tracking information for the gap following the new vma. */ if (vma->vm_next) vma_gap_update(vma->vm_next); else mm->highest_vm_end = vm_end_gap(vma); /* * vma->vm_prev wasn't known when we followed the rbtree to find the * correct insertion point for that vma. As a result, we could not * update the vma vm_rb parents rb_subtree_gap values on the way down. * So, we first insert the vma with a zero rb_subtree_gap value * (to be consistent with what we did on the way down), and then * immediately update the gap to the correct value. Finally we * rebalance the rbtree after all augmented values have been set. */ rb_link_node(&vma->vm_rb, rb_parent, rb_link); vma->rb_subtree_gap = 0; vma_gap_update(vma); vma_rb_insert(vma, &mm->mm_rb); } static void __vma_link_file(struct vm_area_struct *vma) { struct file *file; file = vma->vm_file; if (file) { struct address_space *mapping = file->f_mapping; if (vma->vm_flags & VM_DENYWRITE) atomic_dec(&file_inode(file)->i_writecount); if (vma->vm_flags & VM_SHARED) atomic_inc(&mapping->i_mmap_writable); flush_dcache_mmap_lock(mapping); vma_interval_tree_insert(vma, &mapping->i_mmap); flush_dcache_mmap_unlock(mapping); } } static void __vma_link(struct mm_struct *mm, struct vm_area_struct *vma, struct vm_area_struct *prev, struct rb_node **rb_link, struct rb_node *rb_parent) { __vma_link_list(mm, vma, prev, rb_parent); __vma_link_rb(mm, vma, rb_link, rb_parent); } static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma, struct vm_area_struct *prev, struct rb_node **rb_link, struct rb_node *rb_parent) { struct address_space *mapping = NULL; if (vma->vm_file) { mapping = vma->vm_file->f_mapping; i_mmap_lock_write(mapping); } __vma_link(mm, vma, prev, rb_link, rb_parent); __vma_link_file(vma); if (mapping) i_mmap_unlock_write(mapping); mm->map_count++; validate_mm(mm); } /* * Helper for vma_adjust() in the split_vma insert case: insert a vma into the * mm's list and rbtree. It has already been inserted into the interval tree. */ static void __insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) { struct vm_area_struct *prev; struct rb_node **rb_link, *rb_parent; if (find_vma_links(mm, vma->vm_start, vma->vm_end, &prev, &rb_link, &rb_parent)) BUG(); __vma_link(mm, vma, prev, rb_link, rb_parent); mm->map_count++; } static __always_inline void __vma_unlink_common(struct mm_struct *mm, struct vm_area_struct *vma, struct vm_area_struct *prev, bool has_prev, struct vm_area_struct *ignore) { struct vm_area_struct *next; vma_rb_erase_ignore(vma, &mm->mm_rb, ignore); next = vma->vm_next; if (has_prev) prev->vm_next = next; else { prev = vma->vm_prev; if (prev) prev->vm_next = next; else mm->mmap = next; } if (next) next->vm_prev = prev; /* Kill the cache */ vmacache_invalidate(mm); } static inline void __vma_unlink_prev(struct mm_struct *mm, struct vm_area_struct *vma, struct vm_area_struct *prev) { __vma_unlink_common(mm, vma, prev, true, vma); } /* * We cannot adjust vm_start, vm_end, vm_pgoff fields of a vma that * is already present in an i_mmap tree without adjusting the tree. * The following helper function should be used when such adjustments * are necessary. The "insert" vma (if any) is to be inserted * before we drop the necessary locks. */ int __vma_adjust(struct vm_area_struct *vma, unsigned long start, unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert, struct vm_area_struct *expand) { struct mm_struct *mm = vma->vm_mm; struct vm_area_struct *next = vma->vm_next, *orig_vma = vma; struct address_space *mapping = NULL; struct rb_root_cached *root = NULL; struct anon_vma *anon_vma = NULL; struct file *file = vma->vm_file; bool start_changed = false, end_changed = false; long adjust_next = 0; int remove_next = 0; if (next && !insert) { struct vm_area_struct *exporter = NULL, *importer = NULL; if (end >= next->vm_end) { /* * vma expands, overlapping all the next, and * perhaps the one after too (mprotect case 6). * The only other cases that gets here are * case 1, case 7 and case 8. */ if (next == expand) { /* * The only case where we don't expand "vma" * and we expand "next" instead is case 8. */ VM_WARN_ON(end != next->vm_end); /* * remove_next == 3 means we're * removing "vma" and that to do so we * swapped "vma" and "next". */ remove_next = 3; VM_WARN_ON(file != next->vm_file); swap(vma, next); } else { VM_WARN_ON(expand != vma); /* * case 1, 6, 7, remove_next == 2 is case 6, * remove_next == 1 is case 1 or 7. */ remove_next = 1 + (end > next->vm_end); VM_WARN_ON(remove_next == 2 && end != next->vm_next->vm_end); VM_WARN_ON(remove_next == 1 && end != next->vm_end); /* trim end to next, for case 6 first pass */ end = next->vm_end; } exporter = next; importer = vma; /* * If next doesn't have anon_vma, import from vma after * next, if the vma overlaps with it. */ if (remove_next == 2 && !next->anon_vma) exporter = next->vm_next; } else if (end > next->vm_start) { /* * vma expands, overlapping part of the next: * mprotect case 5 shifting the boundary up. */ adjust_next = (end - next->vm_start) >> PAGE_SHIFT; exporter = next; importer = vma; VM_WARN_ON(expand != importer); } else if (end < vma->vm_end) { /* * vma shrinks, and !insert tells it's not * split_vma inserting another: so it must be * mprotect case 4 shifting the boundary down. */ adjust_next = -((vma->vm_end - end) >> PAGE_SHIFT); exporter = vma; importer = next; VM_WARN_ON(expand != importer); } /* * Easily overlooked: when mprotect shifts the boundary, * make sure the expanding vma has anon_vma set if the * shrinking vma had, to cover any anon pages imported. */ if (exporter && exporter->anon_vma && !importer->anon_vma) { int error; importer->anon_vma = exporter->anon_vma; error = anon_vma_clone(importer, exporter); if (error) return error; } } again: vma_adjust_trans_huge(orig_vma, start, end, adjust_next); if (file) { mapping = file->f_mapping; root = &mapping->i_mmap; uprobe_munmap(vma, vma->vm_start, vma->vm_end); if (adjust_next) uprobe_munmap(next, next->vm_start, next->vm_end); i_mmap_lock_write(mapping); if (insert) { /* * Put into interval tree now, so instantiated pages * are visible to arm/parisc __flush_dcache_page * throughout; but we cannot insert into address * space until vma start or end is updated. */ __vma_link_file(insert); } } anon_vma = vma->anon_vma; if (!anon_vma && adjust_next) anon_vma = next->anon_vma; if (anon_vma) { VM_WARN_ON(adjust_next && next->anon_vma && anon_vma != next->anon_vma); anon_vma_lock_write(anon_vma); anon_vma_interval_tree_pre_update_vma(vma); if (adjust_next) anon_vma_interval_tree_pre_update_vma(next); } if (root) { flush_dcache_mmap_lock(mapping); vma_interval_tree_remove(vma, root); if (adjust_next) vma_interval_tree_remove(next, root); } if (start != vma->vm_start) { vma->vm_start = start; start_changed = true; } if (end != vma->vm_end) { vma->vm_end = end; end_changed = true; } vma->vm_pgoff = pgoff; if (adjust_next) { next->vm_start += adjust_next << PAGE_SHIFT; next->vm_pgoff += adjust_next; } if (root) { if (adjust_next) vma_interval_tree_insert(next, root); vma_interval_tree_insert(vma, root); flush_dcache_mmap_unlock(mapping); } if (remove_next) { /* * vma_merge has merged next into vma, and needs * us to remove next before dropping the locks. */ if (remove_next != 3) __vma_unlink_prev(mm, next, vma); else /* * vma is not before next if they've been * swapped. * * pre-swap() next->vm_start was reduced so * tell validate_mm_rb to ignore pre-swap() * "next" (which is stored in post-swap() * "vma"). */ __vma_unlink_common(mm, next, NULL, false, vma); if (file) __remove_shared_vm_struct(next, file, mapping); } else if (insert) { /* * split_vma has split insert from vma, and needs * us to insert it before dropping the locks * (it may either follow vma or precede it). */ __insert_vm_struct(mm, insert); } else { if (start_changed) vma_gap_update(vma); if (end_changed) { if (!next) mm->highest_vm_end = vm_end_gap(vma); else if (!adjust_next) vma_gap_update(next); } } if (anon_vma) { anon_vma_interval_tree_post_update_vma(vma); if (adjust_next) anon_vma_interval_tree_post_update_vma(next); anon_vma_unlock_write(anon_vma); } if (mapping) i_mmap_unlock_write(mapping); if (root) { uprobe_mmap(vma); if (adjust_next) uprobe_mmap(next); } if (remove_next) { if (file) { uprobe_munmap(next, next->vm_start, next->vm_end); fput(file); } if (next->anon_vma) anon_vma_merge(vma, next); mm->map_count--; mpol_put(vma_policy(next)); vm_area_free(next); /* * In mprotect's case 6 (see comments on vma_merge), * we must remove another next too. It would clutter * up the code too much to do both in one go. */ if (remove_next != 3) { /* * If "next" was removed and vma->vm_end was * expanded (up) over it, in turn * "next->vm_prev->vm_end" changed and the * "vma->vm_next" gap must be updated. */ next = vma->vm_next; } else { /* * For the scope of the comment "next" and * "vma" considered pre-swap(): if "vma" was * removed, next->vm_start was expanded (down) * over it and the "next" gap must be updated. * Because of the swap() the post-swap() "vma" * actually points to pre-swap() "next" * (post-swap() "next" as opposed is now a * dangling pointer). */ next = vma; } if (remove_next == 2) { remove_next = 1; end = next->vm_end; goto again; } else if (next) vma_gap_update(next); else { /* * If remove_next == 2 we obviously can't * reach this path. * * If remove_next == 3 we can't reach this * path because pre-swap() next is always not * NULL. pre-swap() "next" is not being * removed and its next->vm_end is not altered * (and furthermore "end" already matches * next->vm_end in remove_next == 3). * * We reach this only in the remove_next == 1 * case if the "next" vma that was removed was * the highest vma of the mm. However in such * case next->vm_end == "end" and the extended * "vma" has vma->vm_end == next->vm_end so * mm->highest_vm_end doesn't need any update * in remove_next == 1 case. */ VM_WARN_ON(mm->highest_vm_end != vm_end_gap(vma)); } } if (insert && file) uprobe_mmap(insert); validate_mm(mm); return 0; } /* * If the vma has a ->close operation then the driver probably needs to release * per-vma resources, so we don't attempt to merge those. */ static inline int is_mergeable_vma(struct vm_area_struct *vma, struct file *file, unsigned long vm_flags, struct vm_userfaultfd_ctx vm_userfaultfd_ctx) { /* * VM_SOFTDIRTY should not prevent from VMA merging, if we * match the flags but dirty bit -- the caller should mark * merged VMA as dirty. If dirty bit won't be excluded from * comparison, we increase pressue on the memory system forcing * the kernel to generate new VMAs when old one could be * extended instead. */ if ((vma->vm_flags ^ vm_flags) & ~VM_SOFTDIRTY) return 0; if (vma->vm_file != file) return 0; if (vma->vm_ops && vma->vm_ops->close) return 0; if (!is_mergeable_vm_userfaultfd_ctx(vma, vm_userfaultfd_ctx)) return 0; return 1; } static inline int is_mergeable_anon_vma(struct anon_vma *anon_vma1, struct anon_vma *anon_vma2, struct vm_area_struct *vma) { /* * The list_is_singular() test is to avoid merging VMA cloned from * parents. This can improve scalability caused by anon_vma lock. */ if ((!anon_vma1 || !anon_vma2) && (!vma || list_is_singular(&vma->anon_vma_chain))) return 1; return anon_vma1 == anon_vma2; } /* * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff) * in front of (at a lower virtual address and file offset than) the vma. * * We cannot merge two vmas if they have differently assigned (non-NULL) * anon_vmas, nor if same anon_vma is assigned but offsets incompatible. * * We don't check here for the merged mmap wrapping around the end of pagecache * indices (16TB on ia32) because do_mmap_pgoff() does not permit mmap's which * wrap, nor mmaps which cover the final page at index -1UL. */ static int can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags, struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff, struct vm_userfaultfd_ctx vm_userfaultfd_ctx) { if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx) && is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) { if (vma->vm_pgoff == vm_pgoff) return 1; } return 0; } /* * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff) * beyond (at a higher virtual address and file offset than) the vma. * * We cannot merge two vmas if they have differently assigned (non-NULL) * anon_vmas, nor if same anon_vma is assigned but offsets incompatible. */ static int can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags, struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff, struct vm_userfaultfd_ctx vm_userfaultfd_ctx) { if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx) && is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) { pgoff_t vm_pglen; vm_pglen = vma_pages(vma); if (vma->vm_pgoff + vm_pglen == vm_pgoff) return 1; } return 0; } /* * Given a mapping request (addr,end,vm_flags,file,pgoff), figure out * whether that can be merged with its predecessor or its successor. * Or both (it neatly fills a hole). * * In most cases - when called for mmap, brk or mremap - [addr,end) is * certain not to be mapped by the time vma_merge is called; but when * called for mprotect, it is certain to be already mapped (either at * an offset within prev, or at the start of next), and the flags of * this area are about to be changed to vm_flags - and the no-change * case has already been eliminated. * * The following mprotect cases have to be considered, where AAAA is * the area passed down from mprotect_fixup, never extending beyond one * vma, PPPPPP is the prev vma specified, and NNNNNN the next vma after: * * AAAA AAAA AAAA AAAA * PPPPPPNNNNNN PPPPPPNNNNNN PPPPPPNNNNNN PPPPNNNNXXXX * cannot merge might become might become might become * PPNNNNNNNNNN PPPPPPPPPPNN PPPPPPPPPPPP 6 or * mmap, brk or case 4 below case 5 below PPPPPPPPXXXX 7 or * mremap move: PPPPXXXXXXXX 8 * AAAA * PPPP NNNN PPPPPPPPPPPP PPPPPPPPNNNN PPPPNNNNNNNN * might become case 1 below case 2 below case 3 below * * It is important for case 8 that the the vma NNNN overlapping the * region AAAA is never going to extended over XXXX. Instead XXXX must * be extended in region AAAA and NNNN must be removed. This way in * all cases where vma_merge succeeds, the moment vma_adjust drops the * rmap_locks, the properties of the merged vma will be already * correct for the whole merged range. Some of those properties like * vm_page_prot/vm_flags may be accessed by rmap_walks and they must * be correct for the whole merged range immediately after the * rmap_locks are released. Otherwise if XXXX would be removed and * NNNN would be extended over the XXXX range, remove_migration_ptes * or other rmap walkers (if working on addresses beyond the "end" * parameter) may establish ptes with the wrong permissions of NNNN * instead of the right permissions of XXXX. */ struct vm_area_struct *vma_merge(struct mm_struct *mm, struct vm_area_struct *prev, unsigned long addr, unsigned long end, unsigned long vm_flags, struct anon_vma *anon_vma, struct file *file, pgoff_t pgoff, struct mempolicy *policy, struct vm_userfaultfd_ctx vm_userfaultfd_ctx) { pgoff_t pglen = (end - addr) >> PAGE_SHIFT; struct vm_area_struct *area, *next; int err; /* * We later require that vma->vm_flags == vm_flags, * so this tests vma->vm_flags & VM_SPECIAL, too. */ if (vm_flags & VM_SPECIAL) return NULL; if (prev) next = prev->vm_next; else next = mm->mmap; area = next; if (area && area->vm_end == end) /* cases 6, 7, 8 */ next = next->vm_next; /* verify some invariant that must be enforced by the caller */ VM_WARN_ON(prev && addr <= prev->vm_start); VM_WARN_ON(area && end > area->vm_end); VM_WARN_ON(addr >= end); /* * Can it merge with the predecessor? */ if (prev && prev->vm_end == addr && mpol_equal(vma_policy(prev), policy) && can_vma_merge_after(prev, vm_flags, anon_vma, file, pgoff, vm_userfaultfd_ctx)) { /* * OK, it can. Can we now merge in the successor as well? */ if (next && end == next->vm_start && mpol_equal(policy, vma_policy(next)) && can_vma_merge_before(next, vm_flags, anon_vma, file, pgoff+pglen, vm_userfaultfd_ctx) && is_mergeable_anon_vma(prev->anon_vma, next->anon_vma, NULL)) { /* cases 1, 6 */ err = __vma_adjust(prev, prev->vm_start, next->vm_end, prev->vm_pgoff, NULL, prev); } else /* cases 2, 5, 7 */ err = __vma_adjust(prev, prev->vm_start, end, prev->vm_pgoff, NULL, prev); if (err) return NULL; khugepaged_enter_vma_merge(prev, vm_flags); return prev; } /* * Can this new request be merged in front of next? */ if (next && end == next->vm_start && mpol_equal(policy, vma_policy(next)) && can_vma_merge_before(next, vm_flags, anon_vma, file, pgoff+pglen, vm_userfaultfd_ctx)) { if (prev && addr < prev->vm_end) /* case 4 */ err = __vma_adjust(prev, prev->vm_start, addr, prev->vm_pgoff, NULL, next); else { /* cases 3, 8 */ err = __vma_adjust(area, addr, next->vm_end, next->vm_pgoff - pglen, NULL, next); /* * In case 3 area is already equal to next and * this is a noop, but in case 8 "area" has * been removed and next was expanded over it. */ area = next; } if (err) return NULL; khugepaged_enter_vma_merge(area, vm_flags); return area; } return NULL; } /* * Rough compatbility check to quickly see if it's even worth looking * at sharing an anon_vma. * * They need to have the same vm_file, and the flags can only differ * in things that mprotect may change. * * NOTE! The fact that we share an anon_vma doesn't _have_ to mean that * we can merge the two vma's. For example, we refuse to merge a vma if * there is a vm_ops->close() function, because that indicates that the * driver is doing some kind of reference counting. But that doesn't * really matter for the anon_vma sharing case. */ static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *b) { return a->vm_end == b->vm_start && mpol_equal(vma_policy(a), vma_policy(b)) && a->vm_file == b->vm_file && !((a->vm_flags ^ b->vm_flags) & ~(VM_READ|VM_WRITE|VM_EXEC|VM_SOFTDIRTY)) && b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT); } /* * Do some basic sanity checking to see if we can re-use the anon_vma * from 'old'. The 'a'/'b' vma's are in VM order - one of them will be * the same as 'old', the other will be the new one that is trying * to share the anon_vma. * * NOTE! This runs with mm_sem held for reading, so it is possible that * the anon_vma of 'old' is concurrently in the process of being set up * by another page fault trying to merge _that_. But that's ok: if it * is being set up, that automatically means that it will be a singleton * acceptable for merging, so we can do all of this optimistically. But * we do that READ_ONCE() to make sure that we never re-load the pointer. * * IOW: that the "list_is_singular()" test on the anon_vma_chain only * matters for the 'stable anon_vma' case (ie the thing we want to avoid * is to return an anon_vma that is "complex" due to having gone through * a fork). * * We also make sure that the two vma's are compatible (adjacent, * and with the same memory policies). That's all stable, even with just * a read lock on the mm_sem. */ static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old, struct vm_area_struct *a, struct vm_area_struct *b) { if (anon_vma_compatible(a, b)) { struct anon_vma *anon_vma = READ_ONCE(old->anon_vma); if (anon_vma && list_is_singular(&old->anon_vma_chain)) return anon_vma; } return NULL; } /* * find_mergeable_anon_vma is used by anon_vma_prepare, to check * neighbouring vmas for a suitable anon_vma, before it goes off * to allocate a new anon_vma. It checks because a repetitive * sequence of mprotects and faults may otherwise lead to distinct * anon_vmas being allocated, preventing vma merge in subsequent * mprotect. */ struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma) { struct anon_vma *anon_vma; struct vm_area_struct *near; near = vma->vm_next; if (!near) goto try_prev; anon_vma = reusable_anon_vma(near, vma, near); if (anon_vma) return anon_vma; try_prev: near = vma->vm_prev; if (!near) goto none; anon_vma = reusable_anon_vma(near, near, vma); if (anon_vma) return anon_vma; none: /* * There's no absolute need to look only at touching neighbours: * we could search further afield for "compatible" anon_vmas. * But it would probably just be a waste of time searching, * or lead to too many vmas hanging off the same anon_vma. * We're trying to allow mprotect remerging later on, * not trying to minimize memory used for anon_vmas. */ return NULL; } /* * If a hint addr is less than mmap_min_addr change hint to be as * low as possible but still greater than mmap_min_addr */ static inline unsigned long round_hint_to_min(unsigned long hint) { hint &= PAGE_MASK; if (((void *)hint != NULL) && (hint < mmap_min_addr)) return PAGE_ALIGN(mmap_min_addr); return hint; } static inline int mlock_future_check(struct mm_struct *mm, unsigned long flags, unsigned long len) { unsigned long locked, lock_limit; /* mlock MCL_FUTURE? */ if (flags & VM_LOCKED) { locked = len >> PAGE_SHIFT; locked += mm->locked_vm; lock_limit = rlimit(RLIMIT_MEMLOCK); lock_limit >>= PAGE_SHIFT; if (locked > lock_limit && !capable(CAP_IPC_LOCK)) return -EAGAIN; } return 0; } static inline u64 file_mmap_size_max(struct file *file, struct inode *inode) { if (S_ISREG(inode->i_mode)) return MAX_LFS_FILESIZE; if (S_ISBLK(inode->i_mode)) return MAX_LFS_FILESIZE; /* Special "we do even unsigned file positions" case */ if (file->f_mode & FMODE_UNSIGNED_OFFSET) return 0; /* Yes, random drivers might want more. But I'm tired of buggy drivers */ return ULONG_MAX; } static inline bool file_mmap_ok(struct file *file, struct inode *inode, unsigned long pgoff, unsigned long len) { u64 maxsize = file_mmap_size_max(file, inode); if (maxsize && len > maxsize) return false; maxsize -= len; if (pgoff > maxsize >> PAGE_SHIFT) return false; return true; } /* * The caller must hold down_write(&current->mm->mmap_sem). */ unsigned long do_mmap(struct file *file, unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate, struct list_head *uf) { struct mm_struct *mm = current->mm; int pkey = 0; *populate = 0; if (!len) return -EINVAL; /* * Does the application expect PROT_READ to imply PROT_EXEC? * * (the exception is when the underlying filesystem is noexec * mounted, in which case we dont add PROT_EXEC.) */ if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC)) if (!(file && path_noexec(&file->f_path))) prot |= PROT_EXEC; /* force arch specific MAP_FIXED handling in get_unmapped_area */ if (flags & MAP_FIXED_NOREPLACE) flags |= MAP_FIXED; if (!(flags & MAP_FIXED)) addr = round_hint_to_min(addr); /* Careful about overflows.. */ len = PAGE_ALIGN(len); if (!len) return -ENOMEM; /* offset overflow? */ if ((pgoff + (len >> PAGE_SHIFT)) < pgoff) return -EOVERFLOW; /* Too many mappings? */ if (mm->map_count > sysctl_max_map_count) return -ENOMEM; /* Obtain the address to map to. we verify (or select) it and ensure * that it represents a valid section of the address space. */ addr = get_unmapped_area(file, addr, len, pgoff, flags); if (offset_in_page(addr)) return addr; if (flags & MAP_FIXED_NOREPLACE) { struct vm_area_struct *vma = find_vma(mm, addr); if (vma && vma->vm_start < addr + len) return -EEXIST; } if (prot == PROT_EXEC) { pkey = execute_only_pkey(mm); if (pkey < 0) pkey = 0; } /* Do simple checking here so the lower-level routines won't have * to. we assume access permissions have been handled by the open * of the memory object, so we don't do any here. */ vm_flags |= calc_vm_prot_bits(prot, pkey) | calc_vm_flag_bits(flags) | mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; if (flags & MAP_LOCKED) if (!can_do_mlock()) return -EPERM; if (mlock_future_check(mm, vm_flags, len)) return -EAGAIN; if (file) { struct inode *inode = file_inode(file); unsigned long flags_mask; if (!file_mmap_ok(file, inode, pgoff, len)) return -EOVERFLOW; flags_mask = LEGACY_MAP_MASK | file->f_op->mmap_supported_flags; switch (flags & MAP_TYPE) { case MAP_SHARED: /* * Force use of MAP_SHARED_VALIDATE with non-legacy * flags. E.g. MAP_SYNC is dangerous to use with * MAP_SHARED as you don't know which consistency model * you will get. We silently ignore unsupported flags * with MAP_SHARED to preserve backward compatibility. */ flags &= LEGACY_MAP_MASK; /* fall through */ case MAP_SHARED_VALIDATE: if (flags & ~flags_mask) return -EOPNOTSUPP; if ((prot&PROT_WRITE) && !(file->f_mode&FMODE_WRITE)) return -EACCES; /* * Make sure we don't allow writing to an append-only * file.. */ if (IS_APPEND(inode) && (file->f_mode & FMODE_WRITE)) return -EACCES; /* * Make sure there are no mandatory locks on the file. */ if (locks_verify_locked(file)) return -EAGAIN; vm_flags |= VM_SHARED | VM_MAYSHARE; if (!(file->f_mode & FMODE_WRITE)) vm_flags &= ~(VM_MAYWRITE | VM_SHARED); /* fall through */ case MAP_PRIVATE: if (!(file->f_mode & FMODE_READ)) return -EACCES; if (path_noexec(&file->f_path)) { if (vm_flags & VM_EXEC) return -EPERM; vm_flags &= ~VM_MAYEXEC; } if (!file->f_op->mmap) return -ENODEV; if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP)) return -EINVAL; break; default: return -EINVAL; } } else { switch (flags & MAP_TYPE) { case MAP_SHARED: if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP)) return -EINVAL; /* * Ignore pgoff. */ pgoff = 0; vm_flags |= VM_SHARED | VM_MAYSHARE; break; case MAP_PRIVATE: /* * Set pgoff according to addr for anon_vma. */ pgoff = addr >> PAGE_SHIFT; break; default: return -EINVAL; } } /* * Set 'VM_NORESERVE' if we should not account for the * memory use of this mapping. */ if (flags & MAP_NORESERVE) { /* We honor MAP_NORESERVE if allowed to overcommit */ if (sysctl_overcommit_memory != OVERCOMMIT_NEVER) vm_flags |= VM_NORESERVE; /* hugetlb applies strict overcommit unless MAP_NORESERVE */ if (file && is_file_hugepages(file)) vm_flags |= VM_NORESERVE; } addr = mmap_region(file, addr, len, vm_flags, pgoff, uf); if (!IS_ERR_VALUE(addr) && ((vm_flags & VM_LOCKED) || (flags & (MAP_POPULATE | MAP_NONBLOCK)) == MAP_POPULATE)) *populate = len; return addr; } unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long fd, unsigned long pgoff) { struct file *file = NULL; unsigned long retval; if (!(flags & MAP_ANONYMOUS)) { audit_mmap_fd(fd, flags); file = fget(fd); if (!file) return -EBADF; if (is_file_hugepages(file)) len = ALIGN(len, huge_page_size(hstate_file(file))); retval = -EINVAL; if (unlikely(flags & MAP_HUGETLB && !is_file_hugepages(file))) goto out_fput; } else if (flags & MAP_HUGETLB) { struct user_struct *user = NULL; struct hstate *hs; hs = hstate_sizelog((flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK); if (!hs) return -EINVAL; len = ALIGN(len, huge_page_size(hs)); /* * VM_NORESERVE is used because the reservations will be * taken when vm_ops->mmap() is called * A dummy user value is used because we are not locking * memory so no accounting is necessary */ file = hugetlb_file_setup(HUGETLB_ANON_FILE, len, VM_NORESERVE, &user, HUGETLB_ANONHUGE_INODE, (flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK); if (IS_ERR(file)) return PTR_ERR(file); } flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff); out_fput: if (file) fput(file); return retval; } SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len, unsigned long, prot, unsigned long, flags, unsigned long, fd, unsigned long, pgoff) { return ksys_mmap_pgoff(addr, len, prot, flags, fd, pgoff); } #ifdef __ARCH_WANT_SYS_OLD_MMAP struct mmap_arg_struct { unsigned long addr; unsigned long len; unsigned long prot; unsigned long flags; unsigned long fd; unsigned long offset; }; SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg) { struct mmap_arg_struct a; if (copy_from_user(&a, arg, sizeof(a))) return -EFAULT; if (offset_in_page(a.offset)) return -EINVAL; return ksys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT); } #endif /* __ARCH_WANT_SYS_OLD_MMAP */ /* * Some shared mappigns will want the pages marked read-only * to track write events. If so, we'll downgrade vm_page_prot * to the private version (using protection_map[] without the * VM_SHARED bit). */ int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot) { vm_flags_t vm_flags = vma->vm_flags; const struct vm_operations_struct *vm_ops = vma->vm_ops; /* If it was private or non-writable, the write bit is already clear */ if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED))) return 0; /* The backer wishes to know when pages are first written to? */ if (vm_ops && (vm_ops->page_mkwrite || vm_ops->pfn_mkwrite)) return 1; /* The open routine did something to the protections that pgprot_modify * won't preserve? */ if (pgprot_val(vm_page_prot) != pgprot_val(vm_pgprot_modify(vm_page_prot, vm_flags))) return 0; /* Do we need to track softdirty? */ if (IS_ENABLED(CONFIG_MEM_SOFT_DIRTY) && !(vm_flags & VM_SOFTDIRTY)) return 1; /* Specialty mapping? */ if (vm_flags & VM_PFNMAP) return 0; /* Can the mapping track the dirty pages? */ return vma->vm_file && vma->vm_file->f_mapping && mapping_cap_account_dirty(vma->vm_file->f_mapping); } /* * We account for memory if it's a private writeable mapping, * not hugepages and VM_NORESERVE wasn't set. */ static inline int accountable_mapping(struct file *file, vm_flags_t vm_flags) { /* * hugetlb has its own accounting separate from the core VM * VM_HUGETLB may not be set yet so we cannot check for that flag. */ if (file && is_file_hugepages(file)) return 0; return (vm_flags & (VM_NORESERVE | VM_SHARED | VM_WRITE)) == VM_WRITE; } unsigned long mmap_region(struct file *file, unsigned long addr, unsigned long len, vm_flags_t vm_flags, unsigned long pgoff, struct list_head *uf) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma, *prev; int error; struct rb_node **rb_link, *rb_parent; unsigned long charged = 0; /* Check against address space limit. */ if (!may_expand_vm(mm, vm_flags, len >> PAGE_SHIFT)) { unsigned long nr_pages; /* * MAP_FIXED may remove pages of mappings that intersects with * requested mapping. Account for the pages it would unmap. */ nr_pages = count_vma_pages_range(mm, addr, addr + len); if (!may_expand_vm(mm, vm_flags, (len >> PAGE_SHIFT) - nr_pages)) return -ENOMEM; } /* Clear old maps */ while (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) { if (do_munmap(mm, addr, len, uf)) return -ENOMEM; } /* * Private writable mapping: check memory availability */ if (accountable_mapping(file, vm_flags)) { charged = len >> PAGE_SHIFT; if (security_vm_enough_memory_mm(mm, charged)) return -ENOMEM; vm_flags |= VM_ACCOUNT; } /* * Can we just expand an old mapping? */ vma = vma_merge(mm, prev, addr, addr + len, vm_flags, NULL, file, pgoff, NULL, NULL_VM_UFFD_CTX); if (vma) goto out; /* * Determine the object being mapped and call the appropriate * specific mapper. the address has already been validated, but * not unmapped, but the maps are removed from the list. */ vma = vm_area_alloc(mm); if (!vma) { error = -ENOMEM; goto unacct_error; } vma->vm_start = addr; vma->vm_end = addr + len; vma->vm_flags = vm_flags; vma->vm_page_prot = vm_get_page_prot(vm_flags); vma->vm_pgoff = pgoff; if (file) { if (vm_flags & VM_DENYWRITE) { error = deny_write_access(file); if (error) goto free_vma; } if (vm_flags & VM_SHARED) { error = mapping_map_writable(file->f_mapping); if (error) goto allow_write_and_free_vma; } /* ->mmap() can change vma->vm_file, but must guarantee that * vma_link() below can deny write-access if VM_DENYWRITE is set * and map writably if VM_SHARED is set. This usually means the * new file must not have been exposed to user-space, yet. */ vma->vm_file = get_file(file); error = call_mmap(file, vma); if (error) goto unmap_and_free_vma; /* Can addr have changed?? * * Answer: Yes, several device drivers can do it in their * f_op->mmap method. -DaveM * Bug: If addr is changed, prev, rb_link, rb_parent should * be updated for vma_link() */ WARN_ON_ONCE(addr != vma->vm_start); addr = vma->vm_start; vm_flags = vma->vm_flags; } else if (vm_flags & VM_SHARED) { error = shmem_zero_setup(vma); if (error) goto free_vma; } else { vma_set_anonymous(vma); } vma_link(mm, vma, prev, rb_link, rb_parent); /* Once vma denies write, undo our temporary denial count */ if (file) { if (vm_flags & VM_SHARED) mapping_unmap_writable(file->f_mapping); if (vm_flags & VM_DENYWRITE) allow_write_access(file); } file = vma->vm_file; out: perf_event_mmap(vma); vm_stat_account(mm, vm_flags, len >> PAGE_SHIFT); if (vm_flags & VM_LOCKED) { if ((vm_flags & VM_SPECIAL) || vma_is_dax(vma) || is_vm_hugetlb_page(vma) || vma == get_gate_vma(current->mm)) vma->vm_flags &= VM_LOCKED_CLEAR_MASK; else mm->locked_vm += (len >> PAGE_SHIFT); } if (file) uprobe_mmap(vma); /* * New (or expanded) vma always get soft dirty status. * Otherwise user-space soft-dirty page tracker won't * be able to distinguish situation when vma area unmapped, * then new mapped in-place (which must be aimed as * a completely new data area). */ vma->vm_flags |= VM_SOFTDIRTY; vma_set_page_prot(vma); return addr; unmap_and_free_vma: vma->vm_file = NULL; fput(file); /* Undo any partial mapping done by a device driver. */ unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end); charged = 0; if (vm_flags & VM_SHARED) mapping_unmap_writable(file->f_mapping); allow_write_and_free_vma: if (vm_flags & VM_DENYWRITE) allow_write_access(file); free_vma: vm_area_free(vma); unacct_error: if (charged) vm_unacct_memory(charged); return error; } unsigned long unmapped_area(struct vm_unmapped_area_info *info) { /* * We implement the search by looking for an rbtree node that * immediately follows a suitable gap. That is, * - gap_start = vma->vm_prev->vm_end <= info->high_limit - length; * - gap_end = vma->vm_start >= info->low_limit + length; * - gap_end - gap_start >= length */ struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long length, low_limit, high_limit, gap_start, gap_end; /* Adjust search length to account for worst case alignment overhead */ length = info->length + info->align_mask; if (length < info->length) return -ENOMEM; /* Adjust search limits by the desired length */ if (info->high_limit < length) return -ENOMEM; high_limit = info->high_limit - length; if (info->low_limit > high_limit) return -ENOMEM; low_limit = info->low_limit + length; /* Check if rbtree root looks promising */ if (RB_EMPTY_ROOT(&mm->mm_rb)) goto check_highest; vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb); if (vma->rb_subtree_gap < length) goto check_highest; while (true) { /* Visit left subtree if it looks promising */ gap_end = vm_start_gap(vma); if (gap_end >= low_limit && vma->vm_rb.rb_left) { struct vm_area_struct *left = rb_entry(vma->vm_rb.rb_left, struct vm_area_struct, vm_rb); if (left->rb_subtree_gap >= length) { vma = left; continue; } } gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0; check_current: /* Check if current node has a suitable gap */ if (gap_start > high_limit) return -ENOMEM; if (gap_end >= low_limit && gap_end > gap_start && gap_end - gap_start >= length) goto found; /* Visit right subtree if it looks promising */ if (vma->vm_rb.rb_right) { struct vm_area_struct *right = rb_entry(vma->vm_rb.rb_right, struct vm_area_struct, vm_rb); if (right->rb_subtree_gap >= length) { vma = right; continue; } } /* Go back up the rbtree to find next candidate node */ while (true) { struct rb_node *prev = &vma->vm_rb; if (!rb_parent(prev)) goto check_highest; vma = rb_entry(rb_parent(prev), struct vm_area_struct, vm_rb); if (prev == vma->vm_rb.rb_left) { gap_start = vm_end_gap(vma->vm_prev); gap_end = vm_start_gap(vma); goto check_current; } } } check_highest: /* Check highest gap, which does not precede any rbtree node */ gap_start = mm->highest_vm_end; gap_end = ULONG_MAX; /* Only for VM_BUG_ON below */ if (gap_start > high_limit) return -ENOMEM; found: /* We found a suitable gap. Clip it with the original low_limit. */ if (gap_start < info->low_limit) gap_start = info->low_limit; /* Adjust gap address to the desired alignment */ gap_start += (info->align_offset - gap_start) & info->align_mask; VM_BUG_ON(gap_start + info->length > info->high_limit); VM_BUG_ON(gap_start + info->length > gap_end); return gap_start; } unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long length, low_limit, high_limit, gap_start, gap_end; /* Adjust search length to account for worst case alignment overhead */ length = info->length + info->align_mask; if (length < info->length) return -ENOMEM; /* * Adjust search limits by the desired length. * See implementation comment at top of unmapped_area(). */ gap_end = info->high_limit; if (gap_end < length) return -ENOMEM; high_limit = gap_end - length; if (info->low_limit > high_limit) return -ENOMEM; low_limit = info->low_limit + length; /* Check highest gap, which does not precede any rbtree node */ gap_start = mm->highest_vm_end; if (gap_start <= high_limit) goto found_highest; /* Check if rbtree root looks promising */ if (RB_EMPTY_ROOT(&mm->mm_rb)) return -ENOMEM; vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb); if (vma->rb_subtree_gap < length) return -ENOMEM; while (true) { /* Visit right subtree if it looks promising */ gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0; if (gap_start <= high_limit && vma->vm_rb.rb_right) { struct vm_area_struct *right = rb_entry(vma->vm_rb.rb_right, struct vm_area_struct, vm_rb); if (right->rb_subtree_gap >= length) { vma = right; continue; } } check_current: /* Check if current node has a suitable gap */ gap_end = vm_start_gap(vma); if (gap_end < low_limit) return -ENOMEM; if (gap_start <= high_limit && gap_end > gap_start && gap_end - gap_start >= length) goto found; /* Visit left subtree if it looks promising */ if (vma->vm_rb.rb_left) { struct vm_area_struct *left = rb_entry(vma->vm_rb.rb_left, struct vm_area_struct, vm_rb); if (left->rb_subtree_gap >= length) { vma = left; continue; } } /* Go back up the rbtree to find next candidate node */ while (true) { struct rb_node *prev = &vma->vm_rb; if (!rb_parent(prev)) return -ENOMEM; vma = rb_entry(rb_parent(prev), struct vm_area_struct, vm_rb); if (prev == vma->vm_rb.rb_right) { gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0; goto check_current; } } } found: /* We found a suitable gap. Clip it with the original high_limit. */ if (gap_end > info->high_limit) gap_end = info->high_limit; found_highest: /* Compute highest gap address at the desired alignment */ gap_end -= info->length; gap_end -= (gap_end - info->align_offset) & info->align_mask; VM_BUG_ON(gap_end < info->low_limit); VM_BUG_ON(gap_end < gap_start); return gap_end; } #ifndef arch_get_mmap_end #define arch_get_mmap_end(addr) (TASK_SIZE) #endif #ifndef arch_get_mmap_base #define arch_get_mmap_base(addr, base) (base) #endif /* Get an address range which is currently unmapped. * For shmat() with addr=0. * * Ugly calling convention alert: * Return value with the low bits set means error value, * ie * if (ret & ~PAGE_MASK) * error = ret; * * This function "knows" that -ENOMEM has the bits set. */ #ifndef HAVE_ARCH_UNMAPPED_AREA unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma, *prev; struct vm_unmapped_area_info info; const unsigned long mmap_end = arch_get_mmap_end(addr); if (len > mmap_end - mmap_min_addr) return -ENOMEM; if (flags & MAP_FIXED) return addr; if (addr) { addr = PAGE_ALIGN(addr); vma = find_vma_prev(mm, addr, &prev); if (mmap_end - len >= addr && addr >= mmap_min_addr && (!vma || addr + len <= vm_start_gap(vma)) && (!prev || addr >= vm_end_gap(prev))) return addr; } info.flags = 0; info.length = len; info.low_limit = mm->mmap_base; info.high_limit = mmap_end; info.align_mask = 0; return vm_unmapped_area(&info); } #endif /* * This mmap-allocator allocates new areas top-down from below the * stack's low limit (the base): */ #ifndef HAVE_ARCH_UNMAPPED_AREA_TOPDOWN unsigned long arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, const unsigned long len, const unsigned long pgoff, const unsigned long flags) { struct vm_area_struct *vma, *prev; struct mm_struct *mm = current->mm; unsigned long addr = addr0; struct vm_unmapped_area_info info; const unsigned long mmap_end = arch_get_mmap_end(addr); /* requested length too big for entire address space */ if (len > mmap_end - mmap_min_addr) return -ENOMEM; if (flags & MAP_FIXED) return addr; /* requesting a specific address */ if (addr) { addr = PAGE_ALIGN(addr); vma = find_vma_prev(mm, addr, &prev); if (mmap_end - len >= addr && addr >= mmap_min_addr && (!vma || addr + len <= vm_start_gap(vma)) && (!prev || addr >= vm_end_gap(prev))) return addr; } info.flags = VM_UNMAPPED_AREA_TOPDOWN; info.length = len; info.low_limit = max(PAGE_SIZE, mmap_min_addr); info.high_limit = arch_get_mmap_base(addr, mm->mmap_base); info.align_mask = 0; addr = vm_unmapped_area(&info); /* * A failed mmap() very likely causes application failure, * so fall back to the bottom-up function here. This scenario * can happen with large stack limits and large mmap() * allocations. */ if (offset_in_page(addr)) { VM_BUG_ON(addr != -ENOMEM); info.flags = 0; info.low_limit = TASK_UNMAPPED_BASE; info.high_limit = mmap_end; addr = vm_unmapped_area(&info); } return addr; } #endif unsigned long get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { unsigned long (*get_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); unsigned long error = arch_mmap_check(addr, len, flags); if (error) return error; /* Careful about overflows.. */ if (len > TASK_SIZE) return -ENOMEM; get_area = current->mm->get_unmapped_area; if (file) { if (file->f_op->get_unmapped_area) get_area = file->f_op->get_unmapped_area; } else if (flags & MAP_SHARED) { /* * mmap_region() will call shmem_zero_setup() to create a file, * so use shmem's get_unmapped_area in case it can be huge. * do_mmap_pgoff() will clear pgoff, so match alignment. */ pgoff = 0; get_area = shmem_get_unmapped_area; } addr = get_area(file, addr, len, pgoff, flags); if (IS_ERR_VALUE(addr)) return addr; if (addr > TASK_SIZE - len) return -ENOMEM; if (offset_in_page(addr)) return -EINVAL; error = security_mmap_addr(addr); return error ? error : addr; } EXPORT_SYMBOL(get_unmapped_area); /* Look up the first VMA which satisfies addr < vm_end, NULL if none. */ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) { struct rb_node *rb_node; struct vm_area_struct *vma; /* Check the cache first. */ vma = vmacache_find(mm, addr); if (likely(vma)) return vma; rb_node = mm->mm_rb.rb_node; while (rb_node) { struct vm_area_struct *tmp; tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb); if (tmp->vm_end > addr) { vma = tmp; if (tmp->vm_start <= addr) break; rb_node = rb_node->rb_left; } else rb_node = rb_node->rb_right; } if (vma) vmacache_update(addr, vma); return vma; } EXPORT_SYMBOL(find_vma); /* * Same as find_vma, but also return a pointer to the previous VMA in *pprev. */ struct vm_area_struct * find_vma_prev(struct mm_struct *mm, unsigned long addr, struct vm_area_struct **pprev) { struct vm_area_struct *vma; vma = find_vma(mm, addr); if (vma) { *pprev = vma->vm_prev; } else { struct rb_node *rb_node = mm->mm_rb.rb_node; *pprev = NULL; while (rb_node) { *pprev = rb_entry(rb_node, struct vm_area_struct, vm_rb); rb_node = rb_node->rb_right; } } return vma; } /* * Verify that the stack growth is acceptable and * update accounting. This is shared with both the * grow-up and grow-down cases. */ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, unsigned long grow) { struct mm_struct *mm = vma->vm_mm; unsigned long new_start; /* address space limit tests */ if (!may_expand_vm(mm, vma->vm_flags, grow)) return -ENOMEM; /* Stack limit test */ if (size > rlimit(RLIMIT_STACK)) return -ENOMEM; /* mlock limit tests */ if (vma->vm_flags & VM_LOCKED) { unsigned long locked; unsigned long limit; locked = mm->locked_vm + grow; limit = rlimit(RLIMIT_MEMLOCK); limit >>= PAGE_SHIFT; if (locked > limit && !capable(CAP_IPC_LOCK)) return -ENOMEM; } /* Check to ensure the stack will not grow into a hugetlb-only region */ new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start : vma->vm_end - size; if (is_hugepage_only_range(vma->vm_mm, new_start, size)) return -EFAULT; /* * Overcommit.. This must be the final test, as it will * update security statistics. */ if (security_vm_enough_memory_mm(mm, grow)) return -ENOMEM; return 0; } #if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64) /* * PA-RISC uses this for its stack; IA64 for its Register Backing Store. * vma is the last one with address > vma->vm_end. Have to extend vma. */ int expand_upwards(struct vm_area_struct *vma, unsigned long address) { struct mm_struct *mm = vma->vm_mm; struct vm_area_struct *next; unsigned long gap_addr; int error = 0; if (!(vma->vm_flags & VM_GROWSUP)) return -EFAULT; /* Guard against exceeding limits of the address space. */ address &= PAGE_MASK; if (address >= (TASK_SIZE & PAGE_MASK)) return -ENOMEM; address += PAGE_SIZE; /* Enforce stack_guard_gap */ gap_addr = address + stack_guard_gap; /* Guard against overflow */ if (gap_addr < address || gap_addr > TASK_SIZE) gap_addr = TASK_SIZE; next = vma->vm_next; if (next && next->vm_start < gap_addr && (next->vm_flags & (VM_WRITE|VM_READ|VM_EXEC))) { if (!(next->vm_flags & VM_GROWSUP)) return -ENOMEM; /* Check that both stack segments have the same anon_vma? */ } /* We must make sure the anon_vma is allocated. */ if (unlikely(anon_vma_prepare(vma))) return -ENOMEM; /* * vma->vm_start/vm_end cannot change under us because the caller * is required to hold the mmap_sem in read mode. We need the * anon_vma lock to serialize against concurrent expand_stacks. */ anon_vma_lock_write(vma->anon_vma); /* Somebody else might have raced and expanded it already */ if (address > vma->vm_end) { unsigned long size, grow; size = address - vma->vm_start; grow = (address - vma->vm_end) >> PAGE_SHIFT; error = -ENOMEM; if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) { error = acct_stack_growth(vma, size, grow); if (!error) { /* * vma_gap_update() doesn't support concurrent * updates, but we only hold a shared mmap_sem * lock here, so we need to protect against * concurrent vma expansions. * anon_vma_lock_write() doesn't help here, as * we don't guarantee that all growable vmas * in a mm share the same root anon vma. * So, we reuse mm->page_table_lock to guard * against concurrent vma expansions. */ spin_lock(&mm->page_table_lock); if (vma->vm_flags & VM_LOCKED) mm->locked_vm += grow; vm_stat_account(mm, vma->vm_flags, grow); anon_vma_interval_tree_pre_update_vma(vma); vma->vm_end = address; anon_vma_interval_tree_post_update_vma(vma); if (vma->vm_next) vma_gap_update(vma->vm_next); else mm->highest_vm_end = vm_end_gap(vma); spin_unlock(&mm->page_table_lock); perf_event_mmap(vma); } } } anon_vma_unlock_write(vma->anon_vma); khugepaged_enter_vma_merge(vma, vma->vm_flags); validate_mm(mm); return error; } #endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */ /* * vma is the first one with address < vma->vm_start. Have to extend vma. */ int expand_downwards(struct vm_area_struct *vma, unsigned long address) { struct mm_struct *mm = vma->vm_mm; struct vm_area_struct *prev; int error = 0; address &= PAGE_MASK; if (address < mmap_min_addr) return -EPERM; /* Enforce stack_guard_gap */ prev = vma->vm_prev; /* Check that both stack segments have the same anon_vma? */ if (prev && !(prev->vm_flags & VM_GROWSDOWN) && (prev->vm_flags & (VM_WRITE|VM_READ|VM_EXEC))) { if (address - prev->vm_end < stack_guard_gap) return -ENOMEM; } /* We must make sure the anon_vma is allocated. */ if (unlikely(anon_vma_prepare(vma))) return -ENOMEM; /* * vma->vm_start/vm_end cannot change under us because the caller * is required to hold the mmap_sem in read mode. We need the * anon_vma lock to serialize against concurrent expand_stacks. */ anon_vma_lock_write(vma->anon_vma); /* Somebody else might have raced and expanded it already */ if (address < vma->vm_start) { unsigned long size, grow; size = vma->vm_end - address; grow = (vma->vm_start - address) >> PAGE_SHIFT; error = -ENOMEM; if (grow <= vma->vm_pgoff) { error = acct_stack_growth(vma, size, grow); if (!error) { /* * vma_gap_update() doesn't support concurrent * updates, but we only hold a shared mmap_sem * lock here, so we need to protect against * concurrent vma expansions. * anon_vma_lock_write() doesn't help here, as * we don't guarantee that all growable vmas * in a mm share the same root anon vma. * So, we reuse mm->page_table_lock to guard * against concurrent vma expansions. */ spin_lock(&mm->page_table_lock); if (vma->vm_flags & VM_LOCKED) mm->locked_vm += grow; vm_stat_account(mm, vma->vm_flags, grow); anon_vma_interval_tree_pre_update_vma(vma); vma->vm_start = address; vma->vm_pgoff -= grow; anon_vma_interval_tree_post_update_vma(vma); vma_gap_update(vma); spin_unlock(&mm->page_table_lock); perf_event_mmap(vma); } } } anon_vma_unlock_write(vma->anon_vma); khugepaged_enter_vma_merge(vma, vma->vm_flags); validate_mm(mm); return error; } /* enforced gap between the expanding stack and other mappings. */ unsigned long stack_guard_gap = 256UL<<PAGE_SHIFT; static int __init cmdline_parse_stack_guard_gap(char *p) { unsigned long val; char *endptr; val = simple_strtoul(p, &endptr, 10); if (!*endptr) stack_guard_gap = val << PAGE_SHIFT; return 0; } __setup("stack_guard_gap=", cmdline_parse_stack_guard_gap); #ifdef CONFIG_STACK_GROWSUP int expand_stack(struct vm_area_struct *vma, unsigned long address) { return expand_upwards(vma, address); } struct vm_area_struct * find_extend_vma(struct mm_struct *mm, unsigned long addr) { struct vm_area_struct *vma, *prev; addr &= PAGE_MASK; vma = find_vma_prev(mm, addr, &prev); if (vma && (vma->vm_start <= addr)) return vma; if (!prev || expand_stack(prev, addr)) return NULL; if (prev->vm_flags & VM_LOCKED) populate_vma_page_range(prev, addr, prev->vm_end, NULL); return prev; } #else int expand_stack(struct vm_area_struct *vma, unsigned long address) { return expand_downwards(vma, address); } struct vm_area_struct * find_extend_vma(struct mm_struct *mm, unsigned long addr) { struct vm_area_struct *vma; unsigned long start; addr &= PAGE_MASK; vma = find_vma(mm, addr); if (!vma) return NULL; if (vma->vm_start <= addr) return vma; if (!(vma->vm_flags & VM_GROWSDOWN)) return NULL; start = vma->vm_start; if (expand_stack(vma, addr)) return NULL; if (vma->vm_flags & VM_LOCKED) populate_vma_page_range(vma, addr, start, NULL); return vma; } #endif EXPORT_SYMBOL_GPL(find_extend_vma); /* * Ok - we have the memory areas we should free on the vma list, * so release them, and do the vma updates. * * Called with the mm semaphore held. */ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma) { unsigned long nr_accounted = 0; /* Update high watermark before we lower total_vm */ update_hiwater_vm(mm); do { long nrpages = vma_pages(vma); if (vma->vm_flags & VM_ACCOUNT) nr_accounted += nrpages; vm_stat_account(mm, vma->vm_flags, -nrpages); vma = remove_vma(vma); } while (vma); vm_unacct_memory(nr_accounted); validate_mm(mm); } /* * Get rid of page table information in the indicated region. * * Called with the mm semaphore held. */ static void unmap_region(struct mm_struct *mm, struct vm_area_struct *vma, struct vm_area_struct *prev, unsigned long start, unsigned long end) { struct vm_area_struct *next = prev ? prev->vm_next : mm->mmap; struct mmu_gather tlb; lru_add_drain(); tlb_gather_mmu(&tlb, mm, start, end); update_hiwater_rss(mm); unmap_vmas(&tlb, vma, start, end); free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, next ? next->vm_start : USER_PGTABLES_CEILING); tlb_finish_mmu(&tlb, start, end); } /* * Create a list of vma's touched by the unmap, removing them from the mm's * vma list as we go.. */ static void detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma, struct vm_area_struct *prev, unsigned long end) { struct vm_area_struct **insertion_point; struct vm_area_struct *tail_vma = NULL; insertion_point = (prev ? &prev->vm_next : &mm->mmap); vma->vm_prev = NULL; do { vma_rb_erase(vma, &mm->mm_rb); mm->map_count--; tail_vma = vma; vma = vma->vm_next; } while (vma && vma->vm_start < end); *insertion_point = vma; if (vma) { vma->vm_prev = prev; vma_gap_update(vma); } else mm->highest_vm_end = prev ? vm_end_gap(prev) : 0; tail_vma->vm_next = NULL; /* Kill the cache */ vmacache_invalidate(mm); } /* * __split_vma() bypasses sysctl_max_map_count checking. We use this where it * has already been checked or doesn't make sense to fail. */ int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, int new_below) { struct vm_area_struct *new; int err; if (vma->vm_ops && vma->vm_ops->split) { err = vma->vm_ops->split(vma, addr); if (err) return err; } new = vm_area_dup(vma); if (!new) return -ENOMEM; if (new_below) new->vm_end = addr; else { new->vm_start = addr; new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT); } err = vma_dup_policy(vma, new); if (err) goto out_free_vma; err = anon_vma_clone(new, vma); if (err) goto out_free_mpol; if (new->vm_file) get_file(new->vm_file); if (new->vm_ops && new->vm_ops->open) new->vm_ops->open(new); if (new_below) err = vma_adjust(vma, addr, vma->vm_end, vma->vm_pgoff + ((addr - new->vm_start) >> PAGE_SHIFT), new); else err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new); /* Success. */ if (!err) return 0; /* Clean everything up if vma_adjust failed. */ if (new->vm_ops && new->vm_ops->close) new->vm_ops->close(new); if (new->vm_file) fput(new->vm_file); unlink_anon_vmas(new); out_free_mpol: mpol_put(vma_policy(new)); out_free_vma: vm_area_free(new); return err; } /* * Split a vma into two pieces at address 'addr', a new vma is allocated * either for the first part or the tail. */ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, int new_below) { if (mm->map_count >= sysctl_max_map_count) return -ENOMEM; return __split_vma(mm, vma, addr, new_below); } /* Munmap is split into 2 main parts -- this part which finds * what needs doing, and the areas themselves, which do the * work. This now handles partial unmappings. * Jeremy Fitzhardinge <jeremy@goop.org> */ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len, struct list_head *uf, bool downgrade) { unsigned long end; struct vm_area_struct *vma, *prev, *last; if ((offset_in_page(start)) || start > TASK_SIZE || len > TASK_SIZE-start) return -EINVAL; len = PAGE_ALIGN(len); if (len == 0) return -EINVAL; /* Find the first overlapping VMA */ vma = find_vma(mm, start); if (!vma) return 0; prev = vma->vm_prev; /* we have start < vma->vm_end */ /* if it doesn't overlap, we have nothing.. */ end = start + len; if (vma->vm_start >= end) return 0; /* * If we need to split any vma, do it now to save pain later. * * Note: mremap's move_vma VM_ACCOUNT handling assumes a partially * unmapped vm_area_struct will remain in use: so lower split_vma * places tmp vma above, and higher split_vma places tmp vma below. */ if (start > vma->vm_start) { int error; /* * Make sure that map_count on return from munmap() will * not exceed its limit; but let map_count go just above * its limit temporarily, to help free resources as expected. */ if (end < vma->vm_end && mm->map_count >= sysctl_max_map_count) return -ENOMEM; error = __split_vma(mm, vma, start, 0); if (error) return error; prev = vma; } /* Does it split the last one? */ last = find_vma(mm, end); if (last && end > last->vm_start) { int error = __split_vma(mm, last, end, 1); if (error) return error; } vma = prev ? prev->vm_next : mm->mmap; if (unlikely(uf)) { /* * If userfaultfd_unmap_prep returns an error the vmas * will remain splitted, but userland will get a * highly unexpected error anyway. This is no * different than the case where the first of the two * __split_vma fails, but we don't undo the first * split, despite we could. This is unlikely enough * failure that it's not worth optimizing it for. */ int error = userfaultfd_unmap_prep(vma, start, end, uf); if (error) return error; } /* * unlock any mlock()ed ranges before detaching vmas */ if (mm->locked_vm) { struct vm_area_struct *tmp = vma; while (tmp && tmp->vm_start < end) { if (tmp->vm_flags & VM_LOCKED) { mm->locked_vm -= vma_pages(tmp); munlock_vma_pages_all(tmp); } tmp = tmp->vm_next; } } /* Detach vmas from rbtree */ detach_vmas_to_be_unmapped(mm, vma, prev, end); /* * mpx unmap needs to be called with mmap_sem held for write. * It is safe to call it before unmap_region(). */ arch_unmap(mm, vma, start, end); if (downgrade) downgrade_write(&mm->mmap_sem); unmap_region(mm, vma, prev, start, end); /* Fix up all other VM information */ remove_vma_list(mm, vma); return downgrade ? 1 : 0; } int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, struct list_head *uf) { return __do_munmap(mm, start, len, uf, false); } static int __vm_munmap(unsigned long start, size_t len, bool downgrade) { int ret; struct mm_struct *mm = current->mm; LIST_HEAD(uf); if (down_write_killable(&mm->mmap_sem)) return -EINTR; ret = __do_munmap(mm, start, len, &uf, downgrade); /* * Returning 1 indicates mmap_sem is downgraded. * But 1 is not legal return value of vm_munmap() and munmap(), reset * it to 0 before return. */ if (ret == 1) { up_read(&mm->mmap_sem); ret = 0; } else up_write(&mm->mmap_sem); userfaultfd_unmap_complete(mm, &uf); return ret; } int vm_munmap(unsigned long start, size_t len) { return __vm_munmap(start, len, false); } EXPORT_SYMBOL(vm_munmap); SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len) { profile_munmap(addr); return __vm_munmap(addr, len, true); } /* * Emulation of deprecated remap_file_pages() syscall. */ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size, unsigned long, prot, unsigned long, pgoff, unsigned long, flags) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long populate = 0; unsigned long ret = -EINVAL; struct file *file; pr_warn_once("%s (%d) uses deprecated remap_file_pages() syscall. See Documentation/vm/remap_file_pages.rst.\n", current->comm, current->pid); if (prot) return ret; start = start & PAGE_MASK; size = size & PAGE_MASK; if (start + size <= start) return ret; /* Does pgoff wrap? */ if (pgoff + (size >> PAGE_SHIFT) < pgoff) return ret; if (down_write_killable(&mm->mmap_sem)) return -EINTR; vma = find_vma(mm, start); if (!vma || !(vma->vm_flags & VM_SHARED)) goto out; if (start < vma->vm_start) goto out; if (start + size > vma->vm_end) { struct vm_area_struct *next; for (next = vma->vm_next; next; next = next->vm_next) { /* hole between vmas ? */ if (next->vm_start != next->vm_prev->vm_end) goto out; if (next->vm_file != vma->vm_file) goto out; if (next->vm_flags != vma->vm_flags) goto out; if (start + size <= next->vm_end) break; } if (!next) goto out; } prot |= vma->vm_flags & VM_READ ? PROT_READ : 0; prot |= vma->vm_flags & VM_WRITE ? PROT_WRITE : 0; prot |= vma->vm_flags & VM_EXEC ? PROT_EXEC : 0; flags &= MAP_NONBLOCK; flags |= MAP_SHARED | MAP_FIXED | MAP_POPULATE; if (vma->vm_flags & VM_LOCKED) { struct vm_area_struct *tmp; flags |= MAP_LOCKED; /* drop PG_Mlocked flag for over-mapped range */ for (tmp = vma; tmp->vm_start >= start + size; tmp = tmp->vm_next) { /* * Split pmd and munlock page on the border * of the range. */ vma_adjust_trans_huge(tmp, start, start + size, 0); munlock_vma_pages_range(tmp, max(tmp->vm_start, start), min(tmp->vm_end, start + size)); } } file = get_file(vma->vm_file); ret = do_mmap_pgoff(vma->vm_file, start, size, prot, flags, pgoff, &populate, NULL); fput(file); out: up_write(&mm->mmap_sem); if (populate) mm_populate(ret, populate); if (!IS_ERR_VALUE(ret)) ret = 0; return ret; } /* * this is really a simplified "do_mmap". it only handles * anonymous maps. eventually we may be able to do some * brk-specific accounting here. */ static int do_brk_flags(unsigned long addr, unsigned long len, unsigned long flags, struct list_head *uf) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma, *prev; struct rb_node **rb_link, *rb_parent; pgoff_t pgoff = addr >> PAGE_SHIFT; int error; /* Until we need other flags, refuse anything except VM_EXEC. */ if ((flags & (~VM_EXEC)) != 0) return -EINVAL; flags |= VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags; error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED); if (offset_in_page(error)) return error; error = mlock_future_check(mm, mm->def_flags, len); if (error) return error; /* * Clear old maps. this also does some error checking for us */ while (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) { if (do_munmap(mm, addr, len, uf)) return -ENOMEM; } /* Check against address space limits *after* clearing old maps... */ if (!may_expand_vm(mm, flags, len >> PAGE_SHIFT)) return -ENOMEM; if (mm->map_count > sysctl_max_map_count) return -ENOMEM; if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT)) return -ENOMEM; /* Can we just expand an old private anonymous mapping? */ vma = vma_merge(mm, prev, addr, addr + len, flags, NULL, NULL, pgoff, NULL, NULL_VM_UFFD_CTX); if (vma) goto out; /* * create a vma struct for an anonymous mapping */ vma = vm_area_alloc(mm); if (!vma) { vm_unacct_memory(len >> PAGE_SHIFT); return -ENOMEM; } vma_set_anonymous(vma); vma->vm_start = addr; vma->vm_end = addr + len; vma->vm_pgoff = pgoff; vma->vm_flags = flags; vma->vm_page_prot = vm_get_page_prot(flags); vma_link(mm, vma, prev, rb_link, rb_parent); out: perf_event_mmap(vma); mm->total_vm += len >> PAGE_SHIFT; mm->data_vm += len >> PAGE_SHIFT; if (flags & VM_LOCKED) mm->locked_vm += (len >> PAGE_SHIFT); vma->vm_flags |= VM_SOFTDIRTY; return 0; } int vm_brk_flags(unsigned long addr, unsigned long request, unsigned long flags) { struct mm_struct *mm = current->mm; unsigned long len; int ret; bool populate; LIST_HEAD(uf); len = PAGE_ALIGN(request); if (len < request) return -ENOMEM; if (!len) return 0; if (down_write_killable(&mm->mmap_sem)) return -EINTR; ret = do_brk_flags(addr, len, flags, &uf); populate = ((mm->def_flags & VM_LOCKED) != 0); up_write(&mm->mmap_sem); userfaultfd_unmap_complete(mm, &uf); if (populate && !ret) mm_populate(addr, len); return ret; } EXPORT_SYMBOL(vm_brk_flags); int vm_brk(unsigned long addr, unsigned long len) { return vm_brk_flags(addr, len, 0); } EXPORT_SYMBOL(vm_brk); /* Release all mmaps. */ void exit_mmap(struct mm_struct *mm) { struct mmu_gather tlb; struct vm_area_struct *vma; unsigned long nr_accounted = 0; /* mm's last user has gone, and its about to be pulled down */ mmu_notifier_release(mm); if (unlikely(mm_is_oom_victim(mm))) { /* * Manually reap the mm to free as much memory as possible. * Then, as the oom reaper does, set MMF_OOM_SKIP to disregard * this mm from further consideration. Taking mm->mmap_sem for * write after setting MMF_OOM_SKIP will guarantee that the oom * reaper will not run on this mm again after mmap_sem is * dropped. * * Nothing can be holding mm->mmap_sem here and the above call * to mmu_notifier_release(mm) ensures mmu notifier callbacks in * __oom_reap_task_mm() will not block. * * This needs to be done before calling munlock_vma_pages_all(), * which clears VM_LOCKED, otherwise the oom reaper cannot * reliably test it. */ (void)__oom_reap_task_mm(mm); set_bit(MMF_OOM_SKIP, &mm->flags); down_write(&mm->mmap_sem); up_write(&mm->mmap_sem); } if (mm->locked_vm) { vma = mm->mmap; while (vma) { if (vma->vm_flags & VM_LOCKED) munlock_vma_pages_all(vma); vma = vma->vm_next; } } arch_exit_mmap(mm); vma = mm->mmap; if (!vma) /* Can happen if dup_mmap() received an OOM */ return; lru_add_drain(); flush_cache_mm(mm); tlb_gather_mmu(&tlb, mm, 0, -1); /* update_hiwater_rss(mm) here? but nobody should be looking */ /* Use -1 here to ensure all VMAs in the mm are unmapped */ unmap_vmas(&tlb, vma, 0, -1); free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, USER_PGTABLES_CEILING); tlb_finish_mmu(&tlb, 0, -1); /* * Walk the list again, actually closing and freeing it, * with preemption enabled, without holding any MM locks. */ while (vma) { if (vma->vm_flags & VM_ACCOUNT) nr_accounted += vma_pages(vma); vma = remove_vma(vma); } vm_unacct_memory(nr_accounted); } /* Insert vm structure into process list sorted by address * and into the inode's i_mmap tree. If vm_file is non-NULL * then i_mmap_rwsem is taken here. */ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) { struct vm_area_struct *prev; struct rb_node **rb_link, *rb_parent; if (find_vma_links(mm, vma->vm_start, vma->vm_end, &prev, &rb_link, &rb_parent)) return -ENOMEM; if ((vma->vm_flags & VM_ACCOUNT) && security_vm_enough_memory_mm(mm, vma_pages(vma))) return -ENOMEM; /* * The vm_pgoff of a purely anonymous vma should be irrelevant * until its first write fault, when page's anon_vma and index * are set. But now set the vm_pgoff it will almost certainly * end up with (unless mremap moves it elsewhere before that * first wfault), so /proc/pid/maps tells a consistent story. * * By setting it to reflect the virtual start address of the * vma, merges and splits can happen in a seamless way, just * using the existing file pgoff checks and manipulations. * Similarly in do_mmap_pgoff and in do_brk. */ if (vma_is_anonymous(vma)) { BUG_ON(vma->anon_vma); vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT; } vma_link(mm, vma, prev, rb_link, rb_parent); return 0; } /* * Copy the vma structure to a new location in the same mm, * prior to moving page table entries, to effect an mremap move. */ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, unsigned long addr, unsigned long len, pgoff_t pgoff, bool *need_rmap_locks) { struct vm_area_struct *vma = *vmap; unsigned long vma_start = vma->vm_start; struct mm_struct *mm = vma->vm_mm; struct vm_area_struct *new_vma, *prev; struct rb_node **rb_link, *rb_parent; bool faulted_in_anon_vma = true; /* * If anonymous vma has not yet been faulted, update new pgoff * to match new location, to increase its chance of merging. */ if (unlikely(vma_is_anonymous(vma) && !vma->anon_vma)) { pgoff = addr >> PAGE_SHIFT; faulted_in_anon_vma = false; } if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) return NULL; /* should never get here */ new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags, vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma), vma->vm_userfaultfd_ctx); if (new_vma) { /* * Source vma may have been merged into new_vma */ if (unlikely(vma_start >= new_vma->vm_start && vma_start < new_vma->vm_end)) { /* * The only way we can get a vma_merge with * self during an mremap is if the vma hasn't * been faulted in yet and we were allowed to * reset the dst vma->vm_pgoff to the * destination address of the mremap to allow * the merge to happen. mremap must change the * vm_pgoff linearity between src and dst vmas * (in turn preventing a vma_merge) to be * safe. It is only safe to keep the vm_pgoff * linear if there are no pages mapped yet. */ VM_BUG_ON_VMA(faulted_in_anon_vma, new_vma); *vmap = vma = new_vma; } *need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff); } else { new_vma = vm_area_dup(vma); if (!new_vma) goto out; new_vma->vm_start = addr; new_vma->vm_end = addr + len; new_vma->vm_pgoff = pgoff; if (vma_dup_policy(vma, new_vma)) goto out_free_vma; if (anon_vma_clone(new_vma, vma)) goto out_free_mempol; if (new_vma->vm_file) get_file(new_vma->vm_file); if (new_vma->vm_ops && new_vma->vm_ops->open) new_vma->vm_ops->open(new_vma); vma_link(mm, new_vma, prev, rb_link, rb_parent); *need_rmap_locks = false; } return new_vma; out_free_mempol: mpol_put(vma_policy(new_vma)); out_free_vma: vm_area_free(new_vma); out: return NULL; } /* * Return true if the calling process may expand its vm space by the passed * number of pages */ bool may_expand_vm(struct mm_struct *mm, vm_flags_t flags, unsigned long npages) { if (mm->total_vm + npages > rlimit(RLIMIT_AS) >> PAGE_SHIFT) return false; if (is_data_mapping(flags) && mm->data_vm + npages > rlimit(RLIMIT_DATA) >> PAGE_SHIFT) { /* Workaround for Valgrind */ if (rlimit(RLIMIT_DATA) == 0 && mm->data_vm + npages <= rlimit_max(RLIMIT_DATA) >> PAGE_SHIFT) return true; pr_warn_once("%s (%d): VmData %lu exceed data ulimit %lu. Update limits%s.\n", current->comm, current->pid, (mm->data_vm + npages) << PAGE_SHIFT, rlimit(RLIMIT_DATA), ignore_rlimit_data ? "" : " or use boot option ignore_rlimit_data"); if (!ignore_rlimit_data) return false; } return true; } void vm_stat_account(struct mm_struct *mm, vm_flags_t flags, long npages) { mm->total_vm += npages; if (is_exec_mapping(flags)) mm->exec_vm += npages; else if (is_stack_mapping(flags)) mm->stack_vm += npages; else if (is_data_mapping(flags)) mm->data_vm += npages; } static vm_fault_t special_mapping_fault(struct vm_fault *vmf); /* * Having a close hook prevents vma merging regardless of flags. */ static void special_mapping_close(struct vm_area_struct *vma) { } static const char *special_mapping_name(struct vm_area_struct *vma) { return ((struct vm_special_mapping *)vma->vm_private_data)->name; } static int special_mapping_mremap(struct vm_area_struct *new_vma) { struct vm_special_mapping *sm = new_vma->vm_private_data; if (WARN_ON_ONCE(current->mm != new_vma->vm_mm)) return -EFAULT; if (sm->mremap) return sm->mremap(sm, new_vma); return 0; } static const struct vm_operations_struct special_mapping_vmops = { .close = special_mapping_close, .fault = special_mapping_fault, .mremap = special_mapping_mremap, .name = special_mapping_name, }; static const struct vm_operations_struct legacy_special_mapping_vmops = { .close = special_mapping_close, .fault = special_mapping_fault, }; static vm_fault_t special_mapping_fault(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; pgoff_t pgoff; struct page **pages; if (vma->vm_ops == &legacy_special_mapping_vmops) { pages = vma->vm_private_data; } else { struct vm_special_mapping *sm = vma->vm_private_data; if (sm->fault) return sm->fault(sm, vmf->vma, vmf); pages = sm->pages; } for (pgoff = vmf->pgoff; pgoff && *pages; ++pages) pgoff--; if (*pages) { struct page *page = *pages; get_page(page); vmf->page = page; return 0; } return VM_FAULT_SIGBUS; } static struct vm_area_struct *__install_special_mapping( struct mm_struct *mm, unsigned long addr, unsigned long len, unsigned long vm_flags, void *priv, const struct vm_operations_struct *ops) { int ret; struct vm_area_struct *vma; vma = vm_area_alloc(mm); if (unlikely(vma == NULL)) return ERR_PTR(-ENOMEM); vma->vm_start = addr; vma->vm_end = addr + len; vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND | VM_SOFTDIRTY; vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); vma->vm_ops = ops; vma->vm_private_data = priv; ret = insert_vm_struct(mm, vma); if (ret) goto out; vm_stat_account(mm, vma->vm_flags, len >> PAGE_SHIFT); perf_event_mmap(vma); return vma; out: vm_area_free(vma); return ERR_PTR(ret); } bool vma_is_special_mapping(const struct vm_area_struct *vma, const struct vm_special_mapping *sm) { return vma->vm_private_data == sm && (vma->vm_ops == &special_mapping_vmops || vma->vm_ops == &legacy_special_mapping_vmops); } /* * Called with mm->mmap_sem held for writing. * Insert a new vma covering the given region, with the given flags. * Its pages are supplied by the given array of struct page *. * The array can be shorter than len >> PAGE_SHIFT if it's null-terminated. * The region past the last page supplied will always produce SIGBUS. * The array pointer and the pages it points to are assumed to stay alive * for as long as this mapping might exist. */ struct vm_area_struct *_install_special_mapping( struct mm_struct *mm, unsigned long addr, unsigned long len, unsigned long vm_flags, const struct vm_special_mapping *spec) { return __install_special_mapping(mm, addr, len, vm_flags, (void *)spec, &special_mapping_vmops); } int install_special_mapping(struct mm_struct *mm, unsigned long addr, unsigned long len, unsigned long vm_flags, struct page **pages) { struct vm_area_struct *vma = __install_special_mapping( mm, addr, len, vm_flags, (void *)pages, &legacy_special_mapping_vmops); return PTR_ERR_OR_ZERO(vma); } static DEFINE_MUTEX(mm_all_locks_mutex); static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma) { if (!test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) { /* * The LSB of head.next can't change from under us * because we hold the mm_all_locks_mutex. */ down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_sem); /* * We can safely modify head.next after taking the * anon_vma->root->rwsem. If some other vma in this mm shares * the same anon_vma we won't take it again. * * No need of atomic instructions here, head.next * can't change from under us thanks to the * anon_vma->root->rwsem. */ if (__test_and_set_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) BUG(); } } static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping) { if (!test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) { /* * AS_MM_ALL_LOCKS can't change from under us because * we hold the mm_all_locks_mutex. * * Operations on ->flags have to be atomic because * even if AS_MM_ALL_LOCKS is stable thanks to the * mm_all_locks_mutex, there may be other cpus * changing other bitflags in parallel to us. */ if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags)) BUG(); down_write_nest_lock(&mapping->i_mmap_rwsem, &mm->mmap_sem); } } /* * This operation locks against the VM for all pte/vma/mm related * operations that could ever happen on a certain mm. This includes * vmtruncate, try_to_unmap, and all page faults. * * The caller must take the mmap_sem in write mode before calling * mm_take_all_locks(). The caller isn't allowed to release the * mmap_sem until mm_drop_all_locks() returns. * * mmap_sem in write mode is required in order to block all operations * that could modify pagetables and free pages without need of * altering the vma layout. It's also needed in write mode to avoid new * anon_vmas to be associated with existing vmas. * * A single task can't take more than one mm_take_all_locks() in a row * or it would deadlock. * * The LSB in anon_vma->rb_root.rb_node and the AS_MM_ALL_LOCKS bitflag in * mapping->flags avoid to take the same lock twice, if more than one * vma in this mm is backed by the same anon_vma or address_space. * * We take locks in following order, accordingly to comment at beginning * of mm/rmap.c: * - all hugetlbfs_i_mmap_rwsem_key locks (aka mapping->i_mmap_rwsem for * hugetlb mapping); * - all i_mmap_rwsem locks; * - all anon_vma->rwseml * * We can take all locks within these types randomly because the VM code * doesn't nest them and we protected from parallel mm_take_all_locks() by * mm_all_locks_mutex. * * mm_take_all_locks() and mm_drop_all_locks are expensive operations * that may have to take thousand of locks. * * mm_take_all_locks() can fail if it's interrupted by signals. */ int mm_take_all_locks(struct mm_struct *mm) { struct vm_area_struct *vma; struct anon_vma_chain *avc; BUG_ON(down_read_trylock(&mm->mmap_sem)); mutex_lock(&mm_all_locks_mutex); for (vma = mm->mmap; vma; vma = vma->vm_next) { if (signal_pending(current)) goto out_unlock; if (vma->vm_file && vma->vm_file->f_mapping && is_vm_hugetlb_page(vma)) vm_lock_mapping(mm, vma->vm_file->f_mapping); } for (vma = mm->mmap; vma; vma = vma->vm_next) { if (signal_pending(current)) goto out_unlock; if (vma->vm_file && vma->vm_file->f_mapping && !is_vm_hugetlb_page(vma)) vm_lock_mapping(mm, vma->vm_file->f_mapping); } for (vma = mm->mmap; vma; vma = vma->vm_next) { if (signal_pending(current)) goto out_unlock; if (vma->anon_vma) list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) vm_lock_anon_vma(mm, avc->anon_vma); } return 0; out_unlock: mm_drop_all_locks(mm); return -EINTR; } static void vm_unlock_anon_vma(struct anon_vma *anon_vma) { if (test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) { /* * The LSB of head.next can't change to 0 from under * us because we hold the mm_all_locks_mutex. * * We must however clear the bitflag before unlocking * the vma so the users using the anon_vma->rb_root will * never see our bitflag. * * No need of atomic instructions here, head.next * can't change from under us until we release the * anon_vma->root->rwsem. */ if (!__test_and_clear_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) BUG(); anon_vma_unlock_write(anon_vma); } } static void vm_unlock_mapping(struct address_space *mapping) { if (test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) { /* * AS_MM_ALL_LOCKS can't change to 0 from under us * because we hold the mm_all_locks_mutex. */ i_mmap_unlock_write(mapping); if (!test_and_clear_bit(AS_MM_ALL_LOCKS, &mapping->flags)) BUG(); } } /* * The mmap_sem cannot be released by the caller until * mm_drop_all_locks() returns. */ void mm_drop_all_locks(struct mm_struct *mm) { struct vm_area_struct *vma; struct anon_vma_chain *avc; BUG_ON(down_read_trylock(&mm->mmap_sem)); BUG_ON(!mutex_is_locked(&mm_all_locks_mutex)); for (vma = mm->mmap; vma; vma = vma->vm_next) { if (vma->anon_vma) list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) vm_unlock_anon_vma(avc->anon_vma); if (vma->vm_file && vma->vm_file->f_mapping) vm_unlock_mapping(vma->vm_file->f_mapping); } mutex_unlock(&mm_all_locks_mutex); } /* * initialise the percpu counter for VM */ void __init mmap_init(void) { int ret; ret = percpu_counter_init(&vm_committed_as, 0, GFP_KERNEL); VM_BUG_ON(ret); } /* * Initialise sysctl_user_reserve_kbytes. * * This is intended to prevent a user from starting a single memory hogging * process, such that they cannot recover (kill the hog) in OVERCOMMIT_NEVER * mode. * * The default value is min(3% of free memory, 128MB) * 128MB is enough to recover with sshd/login, bash, and top/kill. */ static int init_user_reserve(void) { unsigned long free_kbytes; free_kbytes = global_zone_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10); sysctl_user_reserve_kbytes = min(free_kbytes / 32, 1UL << 17); return 0; } subsys_initcall(init_user_reserve); /* * Initialise sysctl_admin_reserve_kbytes. * * The purpose of sysctl_admin_reserve_kbytes is to allow the sys admin * to log in and kill a memory hogging process. * * Systems with more than 256MB will reserve 8MB, enough to recover * with sshd, bash, and top in OVERCOMMIT_GUESS. Smaller systems will * only reserve 3% of free pages by default. */ static int init_admin_reserve(void) { unsigned long free_kbytes; free_kbytes = global_zone_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10); sysctl_admin_reserve_kbytes = min(free_kbytes / 32, 1UL << 13); return 0; } subsys_initcall(init_admin_reserve); /* * Reinititalise user and admin reserves if memory is added or removed. * * The default user reserve max is 128MB, and the default max for the * admin reserve is 8MB. These are usually, but not always, enough to * enable recovery from a memory hogging process using login/sshd, a shell, * and tools like top. It may make sense to increase or even disable the * reserve depending on the existence of swap or variations in the recovery * tools. So, the admin may have changed them. * * If memory is added and the reserves have been eliminated or increased above * the default max, then we'll trust the admin. * * If memory is removed and there isn't enough free memory, then we * need to reset the reserves. * * Otherwise keep the reserve set by the admin. */ static int reserve_mem_notifier(struct notifier_block *nb, unsigned long action, void *data) { unsigned long tmp, free_kbytes; switch (action) { case MEM_ONLINE: /* Default max is 128MB. Leave alone if modified by operator. */ tmp = sysctl_user_reserve_kbytes; if (0 < tmp && tmp < (1UL << 17)) init_user_reserve(); /* Default max is 8MB. Leave alone if modified by operator. */ tmp = sysctl_admin_reserve_kbytes; if (0 < tmp && tmp < (1UL << 13)) init_admin_reserve(); break; case MEM_OFFLINE: free_kbytes = global_zone_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10); if (sysctl_user_reserve_kbytes > free_kbytes) { init_user_reserve(); pr_info("vm.user_reserve_kbytes reset to %lu\n", sysctl_user_reserve_kbytes); } if (sysctl_admin_reserve_kbytes > free_kbytes) { init_admin_reserve(); pr_info("vm.admin_reserve_kbytes reset to %lu\n", sysctl_admin_reserve_kbytes); } break; default: break; } return NOTIFY_OK; } static struct notifier_block reserve_mem_nb = { .notifier_call = reserve_mem_notifier, }; static int __meminit init_reserve_notifier(void) { if (register_hotmemory_notifier(&reserve_mem_nb)) pr_err("Failed registering memory add/remove notifier for admin reserve\n"); return 0; } subsys_initcall(init_reserve_notifier);
int expand_downwards(struct vm_area_struct *vma, unsigned long address) { struct mm_struct *mm = vma->vm_mm; struct vm_area_struct *prev; int error; address &= PAGE_MASK; error = security_mmap_addr(address); if (error) return error; /* Enforce stack_guard_gap */ prev = vma->vm_prev; /* Check that both stack segments have the same anon_vma? */ if (prev && !(prev->vm_flags & VM_GROWSDOWN) && (prev->vm_flags & (VM_WRITE|VM_READ|VM_EXEC))) { if (address - prev->vm_end < stack_guard_gap) return -ENOMEM; } /* We must make sure the anon_vma is allocated. */ if (unlikely(anon_vma_prepare(vma))) return -ENOMEM; /* * vma->vm_start/vm_end cannot change under us because the caller * is required to hold the mmap_sem in read mode. We need the * anon_vma lock to serialize against concurrent expand_stacks. */ anon_vma_lock_write(vma->anon_vma); /* Somebody else might have raced and expanded it already */ if (address < vma->vm_start) { unsigned long size, grow; size = vma->vm_end - address; grow = (vma->vm_start - address) >> PAGE_SHIFT; error = -ENOMEM; if (grow <= vma->vm_pgoff) { error = acct_stack_growth(vma, size, grow); if (!error) { /* * vma_gap_update() doesn't support concurrent * updates, but we only hold a shared mmap_sem * lock here, so we need to protect against * concurrent vma expansions. * anon_vma_lock_write() doesn't help here, as * we don't guarantee that all growable vmas * in a mm share the same root anon vma. * So, we reuse mm->page_table_lock to guard * against concurrent vma expansions. */ spin_lock(&mm->page_table_lock); if (vma->vm_flags & VM_LOCKED) mm->locked_vm += grow; vm_stat_account(mm, vma->vm_flags, grow); anon_vma_interval_tree_pre_update_vma(vma); vma->vm_start = address; vma->vm_pgoff -= grow; anon_vma_interval_tree_post_update_vma(vma); vma_gap_update(vma); spin_unlock(&mm->page_table_lock); perf_event_mmap(vma); } } } anon_vma_unlock_write(vma->anon_vma); khugepaged_enter_vma_merge(vma, vma->vm_flags); validate_mm(mm); return error; }
int expand_downwards(struct vm_area_struct *vma, unsigned long address) { struct mm_struct *mm = vma->vm_mm; struct vm_area_struct *prev; int error = 0; address &= PAGE_MASK; if (address < mmap_min_addr) return -EPERM; /* Enforce stack_guard_gap */ prev = vma->vm_prev; /* Check that both stack segments have the same anon_vma? */ if (prev && !(prev->vm_flags & VM_GROWSDOWN) && (prev->vm_flags & (VM_WRITE|VM_READ|VM_EXEC))) { if (address - prev->vm_end < stack_guard_gap) return -ENOMEM; } /* We must make sure the anon_vma is allocated. */ if (unlikely(anon_vma_prepare(vma))) return -ENOMEM; /* * vma->vm_start/vm_end cannot change under us because the caller * is required to hold the mmap_sem in read mode. We need the * anon_vma lock to serialize against concurrent expand_stacks. */ anon_vma_lock_write(vma->anon_vma); /* Somebody else might have raced and expanded it already */ if (address < vma->vm_start) { unsigned long size, grow; size = vma->vm_end - address; grow = (vma->vm_start - address) >> PAGE_SHIFT; error = -ENOMEM; if (grow <= vma->vm_pgoff) { error = acct_stack_growth(vma, size, grow); if (!error) { /* * vma_gap_update() doesn't support concurrent * updates, but we only hold a shared mmap_sem * lock here, so we need to protect against * concurrent vma expansions. * anon_vma_lock_write() doesn't help here, as * we don't guarantee that all growable vmas * in a mm share the same root anon vma. * So, we reuse mm->page_table_lock to guard * against concurrent vma expansions. */ spin_lock(&mm->page_table_lock); if (vma->vm_flags & VM_LOCKED) mm->locked_vm += grow; vm_stat_account(mm, vma->vm_flags, grow); anon_vma_interval_tree_pre_update_vma(vma); vma->vm_start = address; vma->vm_pgoff -= grow; anon_vma_interval_tree_post_update_vma(vma); vma_gap_update(vma); spin_unlock(&mm->page_table_lock); perf_event_mmap(vma); } } } anon_vma_unlock_write(vma->anon_vma); khugepaged_enter_vma_merge(vma, vma->vm_flags); validate_mm(mm); return error; }
{'added': [(2429, '\tint error = 0;'), (2432, '\tif (address < mmap_min_addr)'), (2433, '\t\treturn -EPERM;')], 'deleted': [(2429, '\tint error;'), (2432, '\terror = security_mmap_addr(address);'), (2433, '\tif (error)'), (2434, '\t\treturn error;')]}
3
4
2,323
14,373
https://github.com/torvalds/linux
CVE-2019-9213
['CWE-476']
cryptoflex-tool.c
read_private_key
/* * cryptoflex-tool.c: Tool for doing various Cryptoflex related stuff * * Copyright (C) 2001 Juha Yrjölä <juha.yrjola@iki.fi> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include "config.h" #include "libopensc/sc-ossl-compat.h" #include <openssl/bn.h> #include <openssl/rsa.h> #include <openssl/x509.h> #include <openssl/pem.h> #include <openssl/err.h> #include "libopensc/pkcs15.h" #include "common/compat_strlcpy.h" #include "common/compat_strlcat.h" #include "util.h" static const char *app_name = "cryptoflex-tool"; static char * opt_reader = NULL; static int opt_wait = 0; static int opt_key_num = 1, opt_pin_num = -1; static int verbose = 0; static int opt_exponent = 3; static int opt_mod_length = 1024; static int opt_key_count = 1; static int opt_pin_attempts = 10; static int opt_puk_attempts = 10; static const char *opt_appdf = NULL, *opt_prkeyf = NULL, *opt_pubkeyf = NULL; static u8 *pincode = NULL; static const struct option options[] = { { "list-keys", 0, NULL, 'l' }, { "create-key-files", 1, NULL, 'c' }, { "create-pin-file", 1, NULL, 'P' }, { "generate-key", 0, NULL, 'g' }, { "read-key", 0, NULL, 'R' }, { "verify-pin", 0, NULL, 'V' }, { "key-num", 1, NULL, 'k' }, { "app-df", 1, NULL, 'a' }, { "prkey-file", 1, NULL, 'p' }, { "pubkey-file", 1, NULL, 'u' }, { "exponent", 1, NULL, 'e' }, { "modulus-length", 1, NULL, 'm' }, { "reader", 1, NULL, 'r' }, { "wait", 0, NULL, 'w' }, { "verbose", 0, NULL, 'v' }, { NULL, 0, NULL, 0 } }; static const char *option_help[] = { "Lists all keys in a public key file", "Creates new RSA key files for <arg> keys", "Creates a new CHV<arg> file", "Generates a new RSA key pair", "Reads a public key from the card", "Verifies CHV1 before issuing commands", "Selects which key number to operate on [1]", "Selects the DF to operate in", "Private key file", "Public key file", "The RSA exponent to use in key generation [3]", "Modulus length to use in key generation [1024]", "Uses reader <arg>", "Wait for card insertion", "Verbose operation. Use several times to enable debug output.", }; static sc_context_t *ctx = NULL; static sc_card_t *card = NULL; static char *getpin(const char *prompt) { char *buf, pass[20]; int i; printf("%s", prompt); fflush(stdout); if (fgets(pass, 20, stdin) == NULL) return NULL; for (i = 0; i < 20; i++) if (pass[i] == '\n') pass[i] = 0; if (strlen(pass) == 0) return NULL; buf = malloc(8); if (buf == NULL) return NULL; if (strlen(pass) > 8) { fprintf(stderr, "PIN code too long.\n"); free(buf); return NULL; } memset(buf, 0, 8); strlcpy(buf, pass, 8); return buf; } static int verify_pin(int pin) { char prompt[50]; int r, tries_left = -1; if (pincode == NULL) { sprintf(prompt, "Please enter CHV%d: ", pin); pincode = (u8 *) getpin(prompt); if (pincode == NULL || strlen((char *) pincode) == 0) return -1; } if (pin != 1 && pin != 2) return -3; r = sc_verify(card, SC_AC_CHV, pin, pincode, 8, &tries_left); if (r) { memset(pincode, 0, 8); free(pincode); pincode = NULL; fprintf(stderr, "PIN code verification failed: %s\n", sc_strerror(r)); return -1; } return 0; } static int select_app_df(void) { sc_path_t path; sc_file_t *file; char str[80]; int r; strcpy(str, "3F00"); if (opt_appdf != NULL) strlcat(str, opt_appdf, sizeof str); sc_format_path(str, &path); r = sc_select_file(card, &path, &file); if (r) { fprintf(stderr, "Unable to select application DF: %s\n", sc_strerror(r)); return -1; } if (file->type != SC_FILE_TYPE_DF) { fprintf(stderr, "Selected application DF is not a DF.\n"); return -1; } sc_file_free(file); if (opt_pin_num >= 0) return verify_pin(opt_pin_num); else return 0; } static void invert_buf(u8 *dest, const u8 *src, size_t c) { size_t i; for (i = 0; i < c; i++) dest[i] = src[c-1-i]; } static BIGNUM * cf2bn(const u8 *buf, size_t bufsize, BIGNUM *num) { u8 tmp[512]; invert_buf(tmp, buf, bufsize); return BN_bin2bn(tmp, bufsize, num); } static int bn2cf(const BIGNUM *num, u8 *buf) { u8 tmp[512]; int r; r = BN_bn2bin(num, tmp); if (r <= 0) return r; invert_buf(buf, tmp, r); return r; } static int parse_public_key(const u8 *key, size_t keysize, RSA *rsa) { const u8 *p = key; BIGNUM *n, *e; int base; base = (keysize - 7) / 5; if (base != 32 && base != 48 && base != 64 && base != 128) { fprintf(stderr, "Invalid public key.\n"); return -1; } p += 3; n = BN_new(); if (n == NULL) return -1; cf2bn(p, 2 * base, n); p += 2 * base; p += base; p += 2 * base; e = BN_new(); if (e == NULL) return -1; cf2bn(p, 4, e); if (RSA_set0_key(rsa, n, e, NULL) != 1) return -1; return 0; } static int gen_d(RSA *rsa) { BN_CTX *bnctx; BIGNUM *r0, *r1, *r2; const BIGNUM *rsa_p, *rsa_q, *rsa_n, *rsa_e, *rsa_d; BIGNUM *rsa_n_new, *rsa_e_new, *rsa_d_new; bnctx = BN_CTX_new(); if (bnctx == NULL) return -1; BN_CTX_start(bnctx); r0 = BN_CTX_get(bnctx); r1 = BN_CTX_get(bnctx); r2 = BN_CTX_get(bnctx); RSA_get0_key(rsa, &rsa_n, &rsa_e, &rsa_d); RSA_get0_factors(rsa, &rsa_p, &rsa_q); BN_sub(r1, rsa_p, BN_value_one()); BN_sub(r2, rsa_q, BN_value_one()); BN_mul(r0, r1, r2, bnctx); if ((rsa_d_new = BN_mod_inverse(NULL, rsa_e, r0, bnctx)) == NULL) { fprintf(stderr, "BN_mod_inverse() failed.\n"); return -1; } /* RSA_set0_key will free previous value, and replace with new value * Thus the need to copy the contents of rsa_n and rsa_e */ rsa_n_new = BN_dup(rsa_n); rsa_e_new = BN_dup(rsa_e); if (RSA_set0_key(rsa, rsa_n_new, rsa_e_new, rsa_d_new) != 1) return -1; BN_CTX_end(bnctx); BN_CTX_free(bnctx); return 0; } static int parse_private_key(const u8 *key, size_t keysize, RSA *rsa) { const u8 *p = key; BIGNUM *bn_p, *q, *dmp1, *dmq1, *iqmp; int base; base = (keysize - 3) / 5; if (base != 32 && base != 48 && base != 64 && base != 128) { fprintf(stderr, "Invalid private key.\n"); return -1; } p += 3; bn_p = BN_new(); if (bn_p == NULL) return -1; cf2bn(p, base, bn_p); p += base; q = BN_new(); if (q == NULL) return -1; cf2bn(p, base, q); p += base; iqmp = BN_new(); if (iqmp == NULL) return -1; cf2bn(p, base, iqmp); p += base; dmp1 = BN_new(); if (dmp1 == NULL) return -1; cf2bn(p, base, dmp1); p += base; dmq1 = BN_new(); if (dmq1 == NULL) return -1; cf2bn(p, base, dmq1); p += base; if (RSA_set0_factors(rsa, bn_p, q) != 1) return -1; if (RSA_set0_crt_params(rsa, dmp1, dmq1, iqmp) != 1) return -1; if (gen_d(rsa)) return -1; return 0; } static int read_public_key(RSA *rsa) { int r; sc_path_t path; sc_file_t *file; u8 buf[2048], *p = buf; size_t bufsize, keysize; r = select_app_df(); if (r) return 1; sc_format_path("I1012", &path); r = sc_select_file(card, &path, &file); if (r) { fprintf(stderr, "Unable to select public key file: %s\n", sc_strerror(r)); return 2; } bufsize = file->size; sc_file_free(file); r = sc_read_binary(card, 0, buf, bufsize, 0); if (r < 0) { fprintf(stderr, "Unable to read public key file: %s\n", sc_strerror(r)); return 2; } bufsize = r; do { if (bufsize < 4) return 3; keysize = (p[0] << 8) | p[1]; if (keysize == 0) break; if (keysize < 3) return 3; if (p[2] == opt_key_num) break; p += keysize; bufsize -= keysize; } while (1); if (keysize == 0) { printf("Key number %d not found.\n", opt_key_num); return 2; } return parse_public_key(p, keysize, rsa); } static int read_private_key(RSA *rsa) { int r; sc_path_t path; sc_file_t *file; const sc_acl_entry_t *e; u8 buf[2048], *p = buf; size_t bufsize, keysize; r = select_app_df(); if (r) return 1; sc_format_path("I0012", &path); r = sc_select_file(card, &path, &file); if (r) { fprintf(stderr, "Unable to select private key file: %s\n", sc_strerror(r)); return 2; } e = sc_file_get_acl_entry(file, SC_AC_OP_READ); if (e == NULL || e->method == SC_AC_NEVER) return 10; bufsize = file->size; sc_file_free(file); r = sc_read_binary(card, 0, buf, bufsize, 0); if (r < 0) { fprintf(stderr, "Unable to read private key file: %s\n", sc_strerror(r)); return 2; } bufsize = r; do { if (bufsize < 4) return 3; keysize = (p[0] << 8) | p[1]; if (keysize == 0) break; if (keysize < 3) return 3; if (p[2] == opt_key_num) break; p += keysize; bufsize -= keysize; } while (1); if (keysize == 0) { printf("Key number %d not found.\n", opt_key_num); return 2; } return parse_private_key(p, keysize, rsa); } static int read_key(void) { RSA *rsa = RSA_new(); u8 buf[1024], *p = buf; u8 b64buf[2048]; int r; if (rsa == NULL) return -1; r = read_public_key(rsa); if (r) return r; r = i2d_RSA_PUBKEY(rsa, &p); if (r <= 0) { fprintf(stderr, "Error encoding public key.\n"); return -1; } r = sc_base64_encode(buf, r, b64buf, sizeof(b64buf), 64); if (r < 0) { fprintf(stderr, "Error in Base64 encoding: %s\n", sc_strerror(r)); return -1; } printf("-----BEGIN PUBLIC KEY-----\n%s-----END PUBLIC KEY-----\n", b64buf); r = read_private_key(rsa); if (r == 10) return 0; else if (r) return r; p = buf; r = i2d_RSAPrivateKey(rsa, &p); if (r <= 0) { fprintf(stderr, "Error encoding private key.\n"); return -1; } r = sc_base64_encode(buf, r, b64buf, sizeof(b64buf), 64); if (r < 0) { fprintf(stderr, "Error in Base64 encoding: %s\n", sc_strerror(r)); return -1; } printf("-----BEGIN RSA PRIVATE KEY-----\n%s-----END RSA PRIVATE KEY-----\n", b64buf); return 0; } static int list_keys(void) { int r, idx = 0; sc_path_t path; u8 buf[2048], *p = buf; size_t keysize, i; int mod_lens[] = { 512, 768, 1024, 2048 }; size_t sizes[] = { 167, 247, 327, 647 }; r = select_app_df(); if (r) return 1; sc_format_path("I1012", &path); r = sc_select_file(card, &path, NULL); if (r) { fprintf(stderr, "Unable to select public key file: %s\n", sc_strerror(r)); return 2; } do { int mod_len = -1; r = sc_read_binary(card, idx, buf, 3, 0); if (r < 0) { fprintf(stderr, "Unable to read public key file: %s\n", sc_strerror(r)); return 2; } keysize = (p[0] << 8) | p[1]; if (keysize == 0) break; idx += keysize; for (i = 0; i < sizeof(sizes)/sizeof(sizes[ 0]); i++) if (sizes[i] == keysize) mod_len = mod_lens[i]; if (mod_len < 0) printf("Key %d -- unknown modulus length\n", p[2] & 0x0F); else printf("Key %d -- Modulus length %d\n", p[2] & 0x0F, mod_len); } while (1); return 0; } static int generate_key(void) { sc_apdu_t apdu; u8 sbuf[4]; u8 p2; int r; switch (opt_mod_length) { case 512: p2 = 0x40; break; case 768: p2 = 0x60; break; case 1024: p2 = 0x80; break; case 2048: p2 = 0x00; break; default: fprintf(stderr, "Invalid modulus length.\n"); return 2; } sc_format_apdu(card, &apdu, SC_APDU_CASE_3_SHORT, 0x46, (u8) opt_key_num-1, p2); apdu.cla = 0xF0; apdu.lc = 4; apdu.datalen = 4; apdu.data = sbuf; sbuf[0] = opt_exponent & 0xFF; sbuf[1] = (opt_exponent >> 8) & 0xFF; sbuf[2] = (opt_exponent >> 16) & 0xFF; sbuf[3] = (opt_exponent >> 24) & 0xFF; r = select_app_df(); if (r) return 1; if (verbose) printf("Generating key...\n"); r = sc_transmit_apdu(card, &apdu); if (r) { fprintf(stderr, "APDU transmit failed: %s\n", sc_strerror(r)); if (r == SC_ERROR_TRANSMIT_FAILED) fprintf(stderr, "Reader has timed out. It is still possible that the key generation has\n" "succeeded.\n"); return 1; } if (apdu.sw1 == 0x90 && apdu.sw2 == 0x00) { printf("Key generation successful.\n"); return 0; } if (apdu.sw1 == 0x69 && apdu.sw2 == 0x82) fprintf(stderr, "CHV1 not verified or invalid exponent value.\n"); else fprintf(stderr, "Card returned SW1=%02X, SW2=%02X.\n", apdu.sw1, apdu.sw2); return 1; } static int create_key_files(void) { sc_file_t *file; int mod_lens[] = { 512, 768, 1024, 2048 }; int sizes[] = { 163, 243, 323, 643 }; int size = -1; int r; size_t i; for (i = 0; i < sizeof(mod_lens) / sizeof(int); i++) if (mod_lens[i] == opt_mod_length) { size = sizes[i]; break; } if (size == -1) { fprintf(stderr, "Invalid modulus length.\n"); return 1; } if (verbose) printf("Creating key files for %d keys.\n", opt_key_count); file = sc_file_new(); if (!file) { fprintf(stderr, "out of memory.\n"); return 1; } file->type = SC_FILE_TYPE_WORKING_EF; file->ef_structure = SC_FILE_EF_TRANSPARENT; file->id = 0x0012; file->size = opt_key_count * size + 3; sc_file_add_acl_entry(file, SC_AC_OP_READ, SC_AC_NEVER, SC_AC_KEY_REF_NONE); sc_file_add_acl_entry(file, SC_AC_OP_UPDATE, SC_AC_CHV, 1); sc_file_add_acl_entry(file, SC_AC_OP_INVALIDATE, SC_AC_CHV, 1); sc_file_add_acl_entry(file, SC_AC_OP_REHABILITATE, SC_AC_CHV, 1); if (select_app_df()) { sc_file_free(file); return 1; } r = sc_create_file(card, file); sc_file_free(file); if (r) { fprintf(stderr, "Unable to create private key file: %s\n", sc_strerror(r)); return 1; } file = sc_file_new(); if (!file) { fprintf(stderr, "out of memory.\n"); return 1; } file->type = SC_FILE_TYPE_WORKING_EF; file->ef_structure = SC_FILE_EF_TRANSPARENT; file->id = 0x1012; file->size = opt_key_count * (size + 4) + 3; sc_file_add_acl_entry(file, SC_AC_OP_READ, SC_AC_NONE, SC_AC_KEY_REF_NONE); sc_file_add_acl_entry(file, SC_AC_OP_UPDATE, SC_AC_CHV, 1); sc_file_add_acl_entry(file, SC_AC_OP_INVALIDATE, SC_AC_CHV, 1); sc_file_add_acl_entry(file, SC_AC_OP_REHABILITATE, SC_AC_CHV, 1); if (select_app_df()) { sc_file_free(file); return 1; } r = sc_create_file(card, file); sc_file_free(file); if (r) { fprintf(stderr, "Unable to create public key file: %s\n", sc_strerror(r)); return 1; } if (verbose) printf("Key files generated successfully.\n"); return 0; } static int read_rsa_privkey(RSA **rsa_out) { RSA *rsa = NULL; BIO *in = NULL; int r; in = BIO_new(BIO_s_file()); if (opt_prkeyf == NULL) { fprintf(stderr, "Private key file must be set.\n"); return 2; } r = BIO_read_filename(in, opt_prkeyf); if (r <= 0) { perror(opt_prkeyf); return 2; } rsa = PEM_read_bio_RSAPrivateKey(in, NULL, NULL, NULL); if (rsa == NULL) { fprintf(stderr, "Unable to load private key.\n"); return 2; } BIO_free(in); *rsa_out = rsa; return 0; } static int encode_private_key(RSA *rsa, u8 *key, size_t *keysize) { u8 buf[1024], *p = buf; u8 bnbuf[256]; int base = 0; int r; const BIGNUM *rsa_p, *rsa_q, *rsa_dmp1, *rsa_dmq1, *rsa_iqmp; switch (RSA_bits(rsa)) { case 512: base = 32; break; case 768: base = 48; break; case 1024: base = 64; break; case 2048: base = 128; break; } if (base == 0) { fprintf(stderr, "Key length invalid.\n"); return 2; } *p++ = (5 * base + 3) >> 8; *p++ = (5 * base + 3) & 0xFF; *p++ = opt_key_num; RSA_get0_factors(rsa, &rsa_p, &rsa_q); r = bn2cf(rsa_p, bnbuf); if (r != base) { fprintf(stderr, "Invalid private key.\n"); return 2; } memcpy(p, bnbuf, base); p += base; r = bn2cf(rsa_q, bnbuf); if (r != base) { fprintf(stderr, "Invalid private key.\n"); return 2; } memcpy(p, bnbuf, base); p += base; RSA_get0_crt_params(rsa, &rsa_dmp1, &rsa_dmq1, &rsa_iqmp); r = bn2cf(rsa_iqmp, bnbuf); if (r != base) { fprintf(stderr, "Invalid private key.\n"); return 2; } memcpy(p, bnbuf, base); p += base; r = bn2cf(rsa_dmp1, bnbuf); if (r != base) { fprintf(stderr, "Invalid private key.\n"); return 2; } memcpy(p, bnbuf, base); p += base; r = bn2cf(rsa_dmq1, bnbuf); if (r != base) { fprintf(stderr, "Invalid private key.\n"); return 2; } memcpy(p, bnbuf, base); p += base; memcpy(key, buf, p - buf); *keysize = p - buf; return 0; } static int encode_public_key(RSA *rsa, u8 *key, size_t *keysize) { u8 buf[1024], *p = buf; u8 bnbuf[256]; int base = 0; int r; const BIGNUM *rsa_n, *rsa_e; switch (RSA_bits(rsa)) { case 512: base = 32; break; case 768: base = 48; break; case 1024: base = 64; break; case 2048: base = 128; break; } if (base == 0) { fprintf(stderr, "Key length invalid.\n"); return 2; } *p++ = (5 * base + 7) >> 8; *p++ = (5 * base + 7) & 0xFF; *p++ = opt_key_num; RSA_get0_key(rsa, &rsa_n, &rsa_e, NULL); r = bn2cf(rsa_n, bnbuf); if (r != 2*base) { fprintf(stderr, "Invalid public key.\n"); return 2; } memcpy(p, bnbuf, 2*base); p += 2*base; memset(p, 0, base); p += base; memset(bnbuf, 0, 2*base); memcpy(p, bnbuf, 2*base); p += 2*base; r = bn2cf(rsa_e, bnbuf); memcpy(p, bnbuf, 4); p += 4; memcpy(key, buf, p - buf); *keysize = p - buf; return 0; } static int update_public_key(const u8 *key, size_t keysize) { int r, idx = 0; sc_path_t path; r = select_app_df(); if (r) return 1; sc_format_path("I1012", &path); r = sc_select_file(card, &path, NULL); if (r) { fprintf(stderr, "Unable to select public key file: %s\n", sc_strerror(r)); return 2; } idx = keysize * (opt_key_num-1); r = sc_update_binary(card, idx, key, keysize, 0); if (r < 0) { fprintf(stderr, "Unable to write public key: %s\n", sc_strerror(r)); return 2; } return 0; } static int update_private_key(const u8 *key, size_t keysize) { int r, idx = 0; sc_path_t path; r = select_app_df(); if (r) return 1; sc_format_path("I0012", &path); r = sc_select_file(card, &path, NULL); if (r) { fprintf(stderr, "Unable to select private key file: %s\n", sc_strerror(r)); return 2; } idx = keysize * (opt_key_num-1); r = sc_update_binary(card, idx, key, keysize, 0); if (r < 0) { fprintf(stderr, "Unable to write private key: %s\n", sc_strerror(r)); return 2; } return 0; } static int store_key(void) { u8 prv[1024], pub[1024]; size_t prvsize, pubsize; int r; RSA *rsa; r = read_rsa_privkey(&rsa); if (r) return r; r = encode_private_key(rsa, prv, &prvsize); if (r) return r; r = encode_public_key(rsa, pub, &pubsize); if (r) return r; if (verbose) printf("Storing private key...\n"); r = select_app_df(); if (r) return r; r = update_private_key(prv, prvsize); if (r) return r; if (verbose) printf("Storing public key...\n"); r = select_app_df(); if (r) return r; r = update_public_key(pub, pubsize); if (r) return r; return 0; } static int create_pin_file(const sc_path_t *inpath, int chv, const char *key_id) { char prompt[40], *pin, *puk; char buf[30], *p = buf; sc_path_t file_id, path; sc_file_t *file; size_t len; int r; file_id = *inpath; if (file_id.len < 2) return -1; if (chv == 1) sc_format_path("I0000", &file_id); else if (chv == 2) sc_format_path("I0100", &file_id); else return -1; r = sc_select_file(card, inpath, NULL); if (r) return -1; r = sc_select_file(card, &file_id, NULL); if (r == 0) return 0; sprintf(prompt, "Please enter CHV%d%s: ", chv, key_id); pin = getpin(prompt); if (pin == NULL) return -1; sprintf(prompt, "Please enter PUK for CHV%d%s: ", chv, key_id); puk = getpin(prompt); if (puk == NULL) { free(pin); return -1; } memset(p, 0xFF, 3); p += 3; memcpy(p, pin, 8); p += 8; *p++ = opt_pin_attempts; *p++ = opt_pin_attempts; memcpy(p, puk, 8); p += 8; *p++ = opt_puk_attempts; *p++ = opt_puk_attempts; len = p - buf; free(pin); free(puk); file = sc_file_new(); file->type = SC_FILE_TYPE_WORKING_EF; file->ef_structure = SC_FILE_EF_TRANSPARENT; sc_file_add_acl_entry(file, SC_AC_OP_READ, SC_AC_NEVER, SC_AC_KEY_REF_NONE); if (inpath->len == 2 && inpath->value[0] == 0x3F && inpath->value[1] == 0x00) sc_file_add_acl_entry(file, SC_AC_OP_UPDATE, SC_AC_AUT, 1); else sc_file_add_acl_entry(file, SC_AC_OP_UPDATE, SC_AC_CHV, 2); sc_file_add_acl_entry(file, SC_AC_OP_INVALIDATE, SC_AC_AUT, 1); sc_file_add_acl_entry(file, SC_AC_OP_REHABILITATE, SC_AC_AUT, 1); file->size = len; file->id = (file_id.value[0] << 8) | file_id.value[1]; r = sc_create_file(card, file); sc_file_free(file); if (r) { fprintf(stderr, "PIN file creation failed: %s\n", sc_strerror(r)); return r; } path = *inpath; sc_append_path(&path, &file_id); r = sc_select_file(card, &path, NULL); if (r) { fprintf(stderr, "Unable to select created PIN file: %s\n", sc_strerror(r)); return r; } r = sc_update_binary(card, 0, (const u8 *) buf, len, 0); if (r < 0) { fprintf(stderr, "Unable to update created PIN file: %s\n", sc_strerror(r)); return r; } return 0; } static int create_pin(void) { sc_path_t path; char buf[80]; if (opt_pin_num != 1 && opt_pin_num != 2) { fprintf(stderr, "Invalid PIN number. Possible values: 1, 2.\n"); return 2; } strcpy(buf, "3F00"); if (opt_appdf != NULL) strlcat(buf, opt_appdf, sizeof buf); sc_format_path(buf, &path); return create_pin_file(&path, opt_pin_num, ""); } int main(int argc, char *argv[]) { int err = 0, r, c, long_optind = 0; int action_count = 0; int do_read_key = 0; int do_generate_key = 0; int do_create_key_files = 0; int do_list_keys = 0; int do_store_key = 0; int do_create_pin_file = 0; sc_context_param_t ctx_param; while (1) { c = getopt_long(argc, argv, "P:Vslgc:Rk:r:p:u:e:m:vwa:", options, &long_optind); if (c == -1) break; if (c == '?') util_print_usage_and_die(app_name, options, option_help, NULL); switch (c) { case 'l': do_list_keys = 1; action_count++; break; case 'P': do_create_pin_file = 1; opt_pin_num = atoi(optarg); action_count++; break; case 'R': do_read_key = 1; action_count++; break; case 'g': do_generate_key = 1; action_count++; break; case 'c': do_create_key_files = 1; opt_key_count = atoi(optarg); action_count++; break; case 's': do_store_key = 1; action_count++; break; case 'k': opt_key_num = atoi(optarg); if (opt_key_num < 1 || opt_key_num > 15) { fprintf(stderr, "Key number invalid.\n"); exit(2); } break; case 'V': opt_pin_num = 1; break; case 'e': opt_exponent = atoi(optarg); break; case 'm': opt_mod_length = atoi(optarg); break; case 'p': opt_prkeyf = optarg; break; case 'u': opt_pubkeyf = optarg; break; case 'r': opt_reader = optarg; break; case 'v': verbose++; break; case 'w': opt_wait = 1; break; case 'a': opt_appdf = optarg; break; } } if (action_count == 0) util_print_usage_and_die(app_name, options, option_help, NULL); memset(&ctx_param, 0, sizeof(ctx_param)); ctx_param.ver = 0; ctx_param.app_name = app_name; r = sc_context_create(&ctx, &ctx_param); if (r) { fprintf(stderr, "Failed to establish context: %s\n", sc_strerror(r)); return 1; } if (verbose > 1) { ctx->debug = verbose; sc_ctx_log_to_file(ctx, "stderr"); } err = util_connect_card(ctx, &card, opt_reader, opt_wait, verbose); printf("Using card driver: %s\n", card->driver->name); if (do_create_pin_file) { if ((err = create_pin()) != 0) goto end; action_count--; } if (do_create_key_files) { if ((err = create_key_files()) != 0) goto end; action_count--; } if (do_generate_key) { if ((err = generate_key()) != 0) goto end; action_count--; } if (do_store_key) { if ((err = store_key()) != 0) goto end; action_count--; } if (do_list_keys) { if ((err = list_keys()) != 0) goto end; action_count--; } if (do_read_key) { if ((err = read_key()) != 0) goto end; action_count--; } if (pincode != NULL) { memset(pincode, 0, 8); free(pincode); } end: if (card) { sc_unlock(card); sc_disconnect_card(card); } if (ctx) sc_release_context(ctx); return err; }
/* * cryptoflex-tool.c: Tool for doing various Cryptoflex related stuff * * Copyright (C) 2001 Juha Yrjölä <juha.yrjola@iki.fi> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include "config.h" #include "libopensc/sc-ossl-compat.h" #include "libopensc/internal.h" #include <openssl/bn.h> #include <openssl/rsa.h> #include <openssl/x509.h> #include <openssl/pem.h> #include <openssl/err.h> #include "libopensc/pkcs15.h" #include "common/compat_strlcpy.h" #include "common/compat_strlcat.h" #include "util.h" static const char *app_name = "cryptoflex-tool"; static char * opt_reader = NULL; static int opt_wait = 0; static int opt_key_num = 1, opt_pin_num = -1; static int verbose = 0; static int opt_exponent = 3; static int opt_mod_length = 1024; static int opt_key_count = 1; static int opt_pin_attempts = 10; static int opt_puk_attempts = 10; static const char *opt_appdf = NULL, *opt_prkeyf = NULL, *opt_pubkeyf = NULL; static u8 *pincode = NULL; static const struct option options[] = { { "list-keys", 0, NULL, 'l' }, { "create-key-files", 1, NULL, 'c' }, { "create-pin-file", 1, NULL, 'P' }, { "generate-key", 0, NULL, 'g' }, { "read-key", 0, NULL, 'R' }, { "verify-pin", 0, NULL, 'V' }, { "key-num", 1, NULL, 'k' }, { "app-df", 1, NULL, 'a' }, { "prkey-file", 1, NULL, 'p' }, { "pubkey-file", 1, NULL, 'u' }, { "exponent", 1, NULL, 'e' }, { "modulus-length", 1, NULL, 'm' }, { "reader", 1, NULL, 'r' }, { "wait", 0, NULL, 'w' }, { "verbose", 0, NULL, 'v' }, { NULL, 0, NULL, 0 } }; static const char *option_help[] = { "Lists all keys in a public key file", "Creates new RSA key files for <arg> keys", "Creates a new CHV<arg> file", "Generates a new RSA key pair", "Reads a public key from the card", "Verifies CHV1 before issuing commands", "Selects which key number to operate on [1]", "Selects the DF to operate in", "Private key file", "Public key file", "The RSA exponent to use in key generation [3]", "Modulus length to use in key generation [1024]", "Uses reader <arg>", "Wait for card insertion", "Verbose operation. Use several times to enable debug output.", }; static sc_context_t *ctx = NULL; static sc_card_t *card = NULL; static char *getpin(const char *prompt) { char *buf, pass[20]; int i; printf("%s", prompt); fflush(stdout); if (fgets(pass, 20, stdin) == NULL) return NULL; for (i = 0; i < 20; i++) if (pass[i] == '\n') pass[i] = 0; if (strlen(pass) == 0) return NULL; buf = malloc(8); if (buf == NULL) return NULL; if (strlen(pass) > 8) { fprintf(stderr, "PIN code too long.\n"); free(buf); return NULL; } memset(buf, 0, 8); strlcpy(buf, pass, 8); return buf; } static int verify_pin(int pin) { char prompt[50]; int r, tries_left = -1; if (pincode == NULL) { sprintf(prompt, "Please enter CHV%d: ", pin); pincode = (u8 *) getpin(prompt); if (pincode == NULL || strlen((char *) pincode) == 0) return -1; } if (pin != 1 && pin != 2) return -3; r = sc_verify(card, SC_AC_CHV, pin, pincode, 8, &tries_left); if (r) { memset(pincode, 0, 8); free(pincode); pincode = NULL; fprintf(stderr, "PIN code verification failed: %s\n", sc_strerror(r)); return -1; } return 0; } static int select_app_df(void) { sc_path_t path; sc_file_t *file; char str[80]; int r; strcpy(str, "3F00"); if (opt_appdf != NULL) strlcat(str, opt_appdf, sizeof str); sc_format_path(str, &path); r = sc_select_file(card, &path, &file); if (r) { fprintf(stderr, "Unable to select application DF: %s\n", sc_strerror(r)); return -1; } if (file->type != SC_FILE_TYPE_DF) { fprintf(stderr, "Selected application DF is not a DF.\n"); return -1; } sc_file_free(file); if (opt_pin_num >= 0) return verify_pin(opt_pin_num); else return 0; } static void invert_buf(u8 *dest, const u8 *src, size_t c) { size_t i; for (i = 0; i < c; i++) dest[i] = src[c-1-i]; } static BIGNUM * cf2bn(const u8 *buf, size_t bufsize, BIGNUM *num) { u8 tmp[512]; invert_buf(tmp, buf, bufsize); return BN_bin2bn(tmp, bufsize, num); } static int bn2cf(const BIGNUM *num, u8 *buf) { u8 tmp[512]; int r; r = BN_bn2bin(num, tmp); if (r <= 0) return r; invert_buf(buf, tmp, r); return r; } static int parse_public_key(const u8 *key, size_t keysize, RSA *rsa) { const u8 *p = key; BIGNUM *n, *e; int base; base = (keysize - 7) / 5; if (base != 32 && base != 48 && base != 64 && base != 128) { fprintf(stderr, "Invalid public key.\n"); return -1; } p += 3; n = BN_new(); if (n == NULL) return -1; cf2bn(p, 2 * base, n); p += 2 * base; p += base; p += 2 * base; e = BN_new(); if (e == NULL) return -1; cf2bn(p, 4, e); if (RSA_set0_key(rsa, n, e, NULL) != 1) return -1; return 0; } static int gen_d(RSA *rsa) { BN_CTX *bnctx; BIGNUM *r0, *r1, *r2; const BIGNUM *rsa_p, *rsa_q, *rsa_n, *rsa_e, *rsa_d; BIGNUM *rsa_n_new, *rsa_e_new, *rsa_d_new; bnctx = BN_CTX_new(); if (bnctx == NULL) return -1; BN_CTX_start(bnctx); r0 = BN_CTX_get(bnctx); r1 = BN_CTX_get(bnctx); r2 = BN_CTX_get(bnctx); RSA_get0_key(rsa, &rsa_n, &rsa_e, &rsa_d); RSA_get0_factors(rsa, &rsa_p, &rsa_q); BN_sub(r1, rsa_p, BN_value_one()); BN_sub(r2, rsa_q, BN_value_one()); BN_mul(r0, r1, r2, bnctx); if ((rsa_d_new = BN_mod_inverse(NULL, rsa_e, r0, bnctx)) == NULL) { fprintf(stderr, "BN_mod_inverse() failed.\n"); return -1; } /* RSA_set0_key will free previous value, and replace with new value * Thus the need to copy the contents of rsa_n and rsa_e */ rsa_n_new = BN_dup(rsa_n); rsa_e_new = BN_dup(rsa_e); if (RSA_set0_key(rsa, rsa_n_new, rsa_e_new, rsa_d_new) != 1) return -1; BN_CTX_end(bnctx); BN_CTX_free(bnctx); return 0; } static int parse_private_key(const u8 *key, size_t keysize, RSA *rsa) { const u8 *p = key; BIGNUM *bn_p, *q, *dmp1, *dmq1, *iqmp; int base; base = (keysize - 3) / 5; if (base != 32 && base != 48 && base != 64 && base != 128) { fprintf(stderr, "Invalid private key.\n"); return -1; } p += 3; bn_p = BN_new(); if (bn_p == NULL) return -1; cf2bn(p, base, bn_p); p += base; q = BN_new(); if (q == NULL) return -1; cf2bn(p, base, q); p += base; iqmp = BN_new(); if (iqmp == NULL) return -1; cf2bn(p, base, iqmp); p += base; dmp1 = BN_new(); if (dmp1 == NULL) return -1; cf2bn(p, base, dmp1); p += base; dmq1 = BN_new(); if (dmq1 == NULL) return -1; cf2bn(p, base, dmq1); p += base; if (RSA_set0_factors(rsa, bn_p, q) != 1) return -1; if (RSA_set0_crt_params(rsa, dmp1, dmq1, iqmp) != 1) return -1; if (gen_d(rsa)) return -1; return 0; } static int read_public_key(RSA *rsa) { int r; sc_path_t path; sc_file_t *file; u8 buf[2048], *p = buf; size_t bufsize, keysize; r = select_app_df(); if (r) return 1; sc_format_path("I1012", &path); r = sc_select_file(card, &path, &file); if (r) { fprintf(stderr, "Unable to select public key file: %s\n", sc_strerror(r)); return 2; } bufsize = MIN(file->size, sizeof buf); sc_file_free(file); r = sc_read_binary(card, 0, buf, bufsize, 0); if (r < 0) { fprintf(stderr, "Unable to read public key file: %s\n", sc_strerror(r)); return 2; } bufsize = r; do { if (bufsize < 4) return 3; keysize = (p[0] << 8) | p[1]; if (keysize == 0) break; if (keysize < 3) return 3; if (p[2] == opt_key_num) break; p += keysize; bufsize -= keysize; } while (1); if (keysize == 0) { printf("Key number %d not found.\n", opt_key_num); return 2; } return parse_public_key(p, keysize, rsa); } static int read_private_key(RSA *rsa) { int r; sc_path_t path; sc_file_t *file; const sc_acl_entry_t *e; u8 buf[2048], *p = buf; size_t bufsize, keysize; r = select_app_df(); if (r) return 1; sc_format_path("I0012", &path); r = sc_select_file(card, &path, &file); if (r) { fprintf(stderr, "Unable to select private key file: %s\n", sc_strerror(r)); return 2; } e = sc_file_get_acl_entry(file, SC_AC_OP_READ); if (e == NULL || e->method == SC_AC_NEVER) return 10; bufsize = MIN(file->size, sizeof buf); sc_file_free(file); r = sc_read_binary(card, 0, buf, bufsize, 0); if (r < 0) { fprintf(stderr, "Unable to read private key file: %s\n", sc_strerror(r)); return 2; } bufsize = r; do { if (bufsize < 4) return 3; keysize = (p[0] << 8) | p[1]; if (keysize == 0) break; if (keysize < 3) return 3; if (p[2] == opt_key_num) break; p += keysize; bufsize -= keysize; } while (1); if (keysize == 0) { printf("Key number %d not found.\n", opt_key_num); return 2; } return parse_private_key(p, keysize, rsa); } static int read_key(void) { RSA *rsa = RSA_new(); u8 buf[1024], *p = buf; u8 b64buf[2048]; int r; if (rsa == NULL) return -1; r = read_public_key(rsa); if (r) return r; r = i2d_RSA_PUBKEY(rsa, &p); if (r <= 0) { fprintf(stderr, "Error encoding public key.\n"); return -1; } r = sc_base64_encode(buf, r, b64buf, sizeof(b64buf), 64); if (r < 0) { fprintf(stderr, "Error in Base64 encoding: %s\n", sc_strerror(r)); return -1; } printf("-----BEGIN PUBLIC KEY-----\n%s-----END PUBLIC KEY-----\n", b64buf); r = read_private_key(rsa); if (r == 10) return 0; else if (r) return r; p = buf; r = i2d_RSAPrivateKey(rsa, &p); if (r <= 0) { fprintf(stderr, "Error encoding private key.\n"); return -1; } r = sc_base64_encode(buf, r, b64buf, sizeof(b64buf), 64); if (r < 0) { fprintf(stderr, "Error in Base64 encoding: %s\n", sc_strerror(r)); return -1; } printf("-----BEGIN RSA PRIVATE KEY-----\n%s-----END RSA PRIVATE KEY-----\n", b64buf); return 0; } static int list_keys(void) { int r, idx = 0; sc_path_t path; u8 buf[2048], *p = buf; size_t keysize, i; int mod_lens[] = { 512, 768, 1024, 2048 }; size_t sizes[] = { 167, 247, 327, 647 }; r = select_app_df(); if (r) return 1; sc_format_path("I1012", &path); r = sc_select_file(card, &path, NULL); if (r) { fprintf(stderr, "Unable to select public key file: %s\n", sc_strerror(r)); return 2; } do { int mod_len = -1; r = sc_read_binary(card, idx, buf, 3, 0); if (r < 0) { fprintf(stderr, "Unable to read public key file: %s\n", sc_strerror(r)); return 2; } keysize = (p[0] << 8) | p[1]; if (keysize == 0) break; idx += keysize; for (i = 0; i < sizeof(sizes)/sizeof(sizes[ 0]); i++) if (sizes[i] == keysize) mod_len = mod_lens[i]; if (mod_len < 0) printf("Key %d -- unknown modulus length\n", p[2] & 0x0F); else printf("Key %d -- Modulus length %d\n", p[2] & 0x0F, mod_len); } while (1); return 0; } static int generate_key(void) { sc_apdu_t apdu; u8 sbuf[4]; u8 p2; int r; switch (opt_mod_length) { case 512: p2 = 0x40; break; case 768: p2 = 0x60; break; case 1024: p2 = 0x80; break; case 2048: p2 = 0x00; break; default: fprintf(stderr, "Invalid modulus length.\n"); return 2; } sc_format_apdu(card, &apdu, SC_APDU_CASE_3_SHORT, 0x46, (u8) opt_key_num-1, p2); apdu.cla = 0xF0; apdu.lc = 4; apdu.datalen = 4; apdu.data = sbuf; sbuf[0] = opt_exponent & 0xFF; sbuf[1] = (opt_exponent >> 8) & 0xFF; sbuf[2] = (opt_exponent >> 16) & 0xFF; sbuf[3] = (opt_exponent >> 24) & 0xFF; r = select_app_df(); if (r) return 1; if (verbose) printf("Generating key...\n"); r = sc_transmit_apdu(card, &apdu); if (r) { fprintf(stderr, "APDU transmit failed: %s\n", sc_strerror(r)); if (r == SC_ERROR_TRANSMIT_FAILED) fprintf(stderr, "Reader has timed out. It is still possible that the key generation has\n" "succeeded.\n"); return 1; } if (apdu.sw1 == 0x90 && apdu.sw2 == 0x00) { printf("Key generation successful.\n"); return 0; } if (apdu.sw1 == 0x69 && apdu.sw2 == 0x82) fprintf(stderr, "CHV1 not verified or invalid exponent value.\n"); else fprintf(stderr, "Card returned SW1=%02X, SW2=%02X.\n", apdu.sw1, apdu.sw2); return 1; } static int create_key_files(void) { sc_file_t *file; int mod_lens[] = { 512, 768, 1024, 2048 }; int sizes[] = { 163, 243, 323, 643 }; int size = -1; int r; size_t i; for (i = 0; i < sizeof(mod_lens) / sizeof(int); i++) if (mod_lens[i] == opt_mod_length) { size = sizes[i]; break; } if (size == -1) { fprintf(stderr, "Invalid modulus length.\n"); return 1; } if (verbose) printf("Creating key files for %d keys.\n", opt_key_count); file = sc_file_new(); if (!file) { fprintf(stderr, "out of memory.\n"); return 1; } file->type = SC_FILE_TYPE_WORKING_EF; file->ef_structure = SC_FILE_EF_TRANSPARENT; file->id = 0x0012; file->size = opt_key_count * size + 3; sc_file_add_acl_entry(file, SC_AC_OP_READ, SC_AC_NEVER, SC_AC_KEY_REF_NONE); sc_file_add_acl_entry(file, SC_AC_OP_UPDATE, SC_AC_CHV, 1); sc_file_add_acl_entry(file, SC_AC_OP_INVALIDATE, SC_AC_CHV, 1); sc_file_add_acl_entry(file, SC_AC_OP_REHABILITATE, SC_AC_CHV, 1); if (select_app_df()) { sc_file_free(file); return 1; } r = sc_create_file(card, file); sc_file_free(file); if (r) { fprintf(stderr, "Unable to create private key file: %s\n", sc_strerror(r)); return 1; } file = sc_file_new(); if (!file) { fprintf(stderr, "out of memory.\n"); return 1; } file->type = SC_FILE_TYPE_WORKING_EF; file->ef_structure = SC_FILE_EF_TRANSPARENT; file->id = 0x1012; file->size = opt_key_count * (size + 4) + 3; sc_file_add_acl_entry(file, SC_AC_OP_READ, SC_AC_NONE, SC_AC_KEY_REF_NONE); sc_file_add_acl_entry(file, SC_AC_OP_UPDATE, SC_AC_CHV, 1); sc_file_add_acl_entry(file, SC_AC_OP_INVALIDATE, SC_AC_CHV, 1); sc_file_add_acl_entry(file, SC_AC_OP_REHABILITATE, SC_AC_CHV, 1); if (select_app_df()) { sc_file_free(file); return 1; } r = sc_create_file(card, file); sc_file_free(file); if (r) { fprintf(stderr, "Unable to create public key file: %s\n", sc_strerror(r)); return 1; } if (verbose) printf("Key files generated successfully.\n"); return 0; } static int read_rsa_privkey(RSA **rsa_out) { RSA *rsa = NULL; BIO *in = NULL; int r; in = BIO_new(BIO_s_file()); if (opt_prkeyf == NULL) { fprintf(stderr, "Private key file must be set.\n"); return 2; } r = BIO_read_filename(in, opt_prkeyf); if (r <= 0) { perror(opt_prkeyf); return 2; } rsa = PEM_read_bio_RSAPrivateKey(in, NULL, NULL, NULL); if (rsa == NULL) { fprintf(stderr, "Unable to load private key.\n"); return 2; } BIO_free(in); *rsa_out = rsa; return 0; } static int encode_private_key(RSA *rsa, u8 *key, size_t *keysize) { u8 buf[1024], *p = buf; u8 bnbuf[256]; int base = 0; int r; const BIGNUM *rsa_p, *rsa_q, *rsa_dmp1, *rsa_dmq1, *rsa_iqmp; switch (RSA_bits(rsa)) { case 512: base = 32; break; case 768: base = 48; break; case 1024: base = 64; break; case 2048: base = 128; break; } if (base == 0) { fprintf(stderr, "Key length invalid.\n"); return 2; } *p++ = (5 * base + 3) >> 8; *p++ = (5 * base + 3) & 0xFF; *p++ = opt_key_num; RSA_get0_factors(rsa, &rsa_p, &rsa_q); r = bn2cf(rsa_p, bnbuf); if (r != base) { fprintf(stderr, "Invalid private key.\n"); return 2; } memcpy(p, bnbuf, base); p += base; r = bn2cf(rsa_q, bnbuf); if (r != base) { fprintf(stderr, "Invalid private key.\n"); return 2; } memcpy(p, bnbuf, base); p += base; RSA_get0_crt_params(rsa, &rsa_dmp1, &rsa_dmq1, &rsa_iqmp); r = bn2cf(rsa_iqmp, bnbuf); if (r != base) { fprintf(stderr, "Invalid private key.\n"); return 2; } memcpy(p, bnbuf, base); p += base; r = bn2cf(rsa_dmp1, bnbuf); if (r != base) { fprintf(stderr, "Invalid private key.\n"); return 2; } memcpy(p, bnbuf, base); p += base; r = bn2cf(rsa_dmq1, bnbuf); if (r != base) { fprintf(stderr, "Invalid private key.\n"); return 2; } memcpy(p, bnbuf, base); p += base; memcpy(key, buf, p - buf); *keysize = p - buf; return 0; } static int encode_public_key(RSA *rsa, u8 *key, size_t *keysize) { u8 buf[1024], *p = buf; u8 bnbuf[256]; int base = 0; int r; const BIGNUM *rsa_n, *rsa_e; switch (RSA_bits(rsa)) { case 512: base = 32; break; case 768: base = 48; break; case 1024: base = 64; break; case 2048: base = 128; break; } if (base == 0) { fprintf(stderr, "Key length invalid.\n"); return 2; } *p++ = (5 * base + 7) >> 8; *p++ = (5 * base + 7) & 0xFF; *p++ = opt_key_num; RSA_get0_key(rsa, &rsa_n, &rsa_e, NULL); r = bn2cf(rsa_n, bnbuf); if (r != 2*base) { fprintf(stderr, "Invalid public key.\n"); return 2; } memcpy(p, bnbuf, 2*base); p += 2*base; memset(p, 0, base); p += base; memset(bnbuf, 0, 2*base); memcpy(p, bnbuf, 2*base); p += 2*base; r = bn2cf(rsa_e, bnbuf); memcpy(p, bnbuf, 4); p += 4; memcpy(key, buf, p - buf); *keysize = p - buf; return 0; } static int update_public_key(const u8 *key, size_t keysize) { int r, idx = 0; sc_path_t path; r = select_app_df(); if (r) return 1; sc_format_path("I1012", &path); r = sc_select_file(card, &path, NULL); if (r) { fprintf(stderr, "Unable to select public key file: %s\n", sc_strerror(r)); return 2; } idx = keysize * (opt_key_num-1); r = sc_update_binary(card, idx, key, keysize, 0); if (r < 0) { fprintf(stderr, "Unable to write public key: %s\n", sc_strerror(r)); return 2; } return 0; } static int update_private_key(const u8 *key, size_t keysize) { int r, idx = 0; sc_path_t path; r = select_app_df(); if (r) return 1; sc_format_path("I0012", &path); r = sc_select_file(card, &path, NULL); if (r) { fprintf(stderr, "Unable to select private key file: %s\n", sc_strerror(r)); return 2; } idx = keysize * (opt_key_num-1); r = sc_update_binary(card, idx, key, keysize, 0); if (r < 0) { fprintf(stderr, "Unable to write private key: %s\n", sc_strerror(r)); return 2; } return 0; } static int store_key(void) { u8 prv[1024], pub[1024]; size_t prvsize, pubsize; int r; RSA *rsa; r = read_rsa_privkey(&rsa); if (r) return r; r = encode_private_key(rsa, prv, &prvsize); if (r) return r; r = encode_public_key(rsa, pub, &pubsize); if (r) return r; if (verbose) printf("Storing private key...\n"); r = select_app_df(); if (r) return r; r = update_private_key(prv, prvsize); if (r) return r; if (verbose) printf("Storing public key...\n"); r = select_app_df(); if (r) return r; r = update_public_key(pub, pubsize); if (r) return r; return 0; } static int create_pin_file(const sc_path_t *inpath, int chv, const char *key_id) { char prompt[40], *pin, *puk; char buf[30], *p = buf; sc_path_t file_id, path; sc_file_t *file; size_t len; int r; file_id = *inpath; if (file_id.len < 2) return -1; if (chv == 1) sc_format_path("I0000", &file_id); else if (chv == 2) sc_format_path("I0100", &file_id); else return -1; r = sc_select_file(card, inpath, NULL); if (r) return -1; r = sc_select_file(card, &file_id, NULL); if (r == 0) return 0; sprintf(prompt, "Please enter CHV%d%s: ", chv, key_id); pin = getpin(prompt); if (pin == NULL) return -1; sprintf(prompt, "Please enter PUK for CHV%d%s: ", chv, key_id); puk = getpin(prompt); if (puk == NULL) { free(pin); return -1; } memset(p, 0xFF, 3); p += 3; memcpy(p, pin, 8); p += 8; *p++ = opt_pin_attempts; *p++ = opt_pin_attempts; memcpy(p, puk, 8); p += 8; *p++ = opt_puk_attempts; *p++ = opt_puk_attempts; len = p - buf; free(pin); free(puk); file = sc_file_new(); file->type = SC_FILE_TYPE_WORKING_EF; file->ef_structure = SC_FILE_EF_TRANSPARENT; sc_file_add_acl_entry(file, SC_AC_OP_READ, SC_AC_NEVER, SC_AC_KEY_REF_NONE); if (inpath->len == 2 && inpath->value[0] == 0x3F && inpath->value[1] == 0x00) sc_file_add_acl_entry(file, SC_AC_OP_UPDATE, SC_AC_AUT, 1); else sc_file_add_acl_entry(file, SC_AC_OP_UPDATE, SC_AC_CHV, 2); sc_file_add_acl_entry(file, SC_AC_OP_INVALIDATE, SC_AC_AUT, 1); sc_file_add_acl_entry(file, SC_AC_OP_REHABILITATE, SC_AC_AUT, 1); file->size = len; file->id = (file_id.value[0] << 8) | file_id.value[1]; r = sc_create_file(card, file); sc_file_free(file); if (r) { fprintf(stderr, "PIN file creation failed: %s\n", sc_strerror(r)); return r; } path = *inpath; sc_append_path(&path, &file_id); r = sc_select_file(card, &path, NULL); if (r) { fprintf(stderr, "Unable to select created PIN file: %s\n", sc_strerror(r)); return r; } r = sc_update_binary(card, 0, (const u8 *) buf, len, 0); if (r < 0) { fprintf(stderr, "Unable to update created PIN file: %s\n", sc_strerror(r)); return r; } return 0; } static int create_pin(void) { sc_path_t path; char buf[80]; if (opt_pin_num != 1 && opt_pin_num != 2) { fprintf(stderr, "Invalid PIN number. Possible values: 1, 2.\n"); return 2; } strcpy(buf, "3F00"); if (opt_appdf != NULL) strlcat(buf, opt_appdf, sizeof buf); sc_format_path(buf, &path); return create_pin_file(&path, opt_pin_num, ""); } int main(int argc, char *argv[]) { int err = 0, r, c, long_optind = 0; int action_count = 0; int do_read_key = 0; int do_generate_key = 0; int do_create_key_files = 0; int do_list_keys = 0; int do_store_key = 0; int do_create_pin_file = 0; sc_context_param_t ctx_param; while (1) { c = getopt_long(argc, argv, "P:Vslgc:Rk:r:p:u:e:m:vwa:", options, &long_optind); if (c == -1) break; if (c == '?') util_print_usage_and_die(app_name, options, option_help, NULL); switch (c) { case 'l': do_list_keys = 1; action_count++; break; case 'P': do_create_pin_file = 1; opt_pin_num = atoi(optarg); action_count++; break; case 'R': do_read_key = 1; action_count++; break; case 'g': do_generate_key = 1; action_count++; break; case 'c': do_create_key_files = 1; opt_key_count = atoi(optarg); action_count++; break; case 's': do_store_key = 1; action_count++; break; case 'k': opt_key_num = atoi(optarg); if (opt_key_num < 1 || opt_key_num > 15) { fprintf(stderr, "Key number invalid.\n"); exit(2); } break; case 'V': opt_pin_num = 1; break; case 'e': opt_exponent = atoi(optarg); break; case 'm': opt_mod_length = atoi(optarg); break; case 'p': opt_prkeyf = optarg; break; case 'u': opt_pubkeyf = optarg; break; case 'r': opt_reader = optarg; break; case 'v': verbose++; break; case 'w': opt_wait = 1; break; case 'a': opt_appdf = optarg; break; } } if (action_count == 0) util_print_usage_and_die(app_name, options, option_help, NULL); memset(&ctx_param, 0, sizeof(ctx_param)); ctx_param.ver = 0; ctx_param.app_name = app_name; r = sc_context_create(&ctx, &ctx_param); if (r) { fprintf(stderr, "Failed to establish context: %s\n", sc_strerror(r)); return 1; } if (verbose > 1) { ctx->debug = verbose; sc_ctx_log_to_file(ctx, "stderr"); } err = util_connect_card(ctx, &card, opt_reader, opt_wait, verbose); printf("Using card driver: %s\n", card->driver->name); if (do_create_pin_file) { if ((err = create_pin()) != 0) goto end; action_count--; } if (do_create_key_files) { if ((err = create_key_files()) != 0) goto end; action_count--; } if (do_generate_key) { if ((err = generate_key()) != 0) goto end; action_count--; } if (do_store_key) { if ((err = store_key()) != 0) goto end; action_count--; } if (do_list_keys) { if ((err = list_keys()) != 0) goto end; action_count--; } if (do_read_key) { if ((err = read_key()) != 0) goto end; action_count--; } if (pincode != NULL) { memset(pincode, 0, 8); free(pincode); } end: if (card) { sc_unlock(card); sc_disconnect_card(card); } if (ctx) sc_release_context(ctx); return err; }
static int read_private_key(RSA *rsa) { int r; sc_path_t path; sc_file_t *file; const sc_acl_entry_t *e; u8 buf[2048], *p = buf; size_t bufsize, keysize; r = select_app_df(); if (r) return 1; sc_format_path("I0012", &path); r = sc_select_file(card, &path, &file); if (r) { fprintf(stderr, "Unable to select private key file: %s\n", sc_strerror(r)); return 2; } e = sc_file_get_acl_entry(file, SC_AC_OP_READ); if (e == NULL || e->method == SC_AC_NEVER) return 10; bufsize = file->size; sc_file_free(file); r = sc_read_binary(card, 0, buf, bufsize, 0); if (r < 0) { fprintf(stderr, "Unable to read private key file: %s\n", sc_strerror(r)); return 2; } bufsize = r; do { if (bufsize < 4) return 3; keysize = (p[0] << 8) | p[1]; if (keysize == 0) break; if (keysize < 3) return 3; if (p[2] == opt_key_num) break; p += keysize; bufsize -= keysize; } while (1); if (keysize == 0) { printf("Key number %d not found.\n", opt_key_num); return 2; } return parse_private_key(p, keysize, rsa); }
static int read_private_key(RSA *rsa) { int r; sc_path_t path; sc_file_t *file; const sc_acl_entry_t *e; u8 buf[2048], *p = buf; size_t bufsize, keysize; r = select_app_df(); if (r) return 1; sc_format_path("I0012", &path); r = sc_select_file(card, &path, &file); if (r) { fprintf(stderr, "Unable to select private key file: %s\n", sc_strerror(r)); return 2; } e = sc_file_get_acl_entry(file, SC_AC_OP_READ); if (e == NULL || e->method == SC_AC_NEVER) return 10; bufsize = MIN(file->size, sizeof buf); sc_file_free(file); r = sc_read_binary(card, 0, buf, bufsize, 0); if (r < 0) { fprintf(stderr, "Unable to read private key file: %s\n", sc_strerror(r)); return 2; } bufsize = r; do { if (bufsize < 4) return 3; keysize = (p[0] << 8) | p[1]; if (keysize == 0) break; if (keysize < 3) return 3; if (p[2] == opt_key_num) break; p += keysize; bufsize -= keysize; } while (1); if (keysize == 0) { printf("Key number %d not found.\n", opt_key_num); return 2; } return parse_private_key(p, keysize, rsa); }
{'added': [(24, '#include "libopensc/internal.h"'), (335, '\tbufsize = MIN(file->size, sizeof buf);'), (386, '\tbufsize = MIN(file->size, sizeof buf);')], 'deleted': [(334, '\tbufsize = file->size;'), (385, '\tbufsize = file->size;')]}
3
2
996
5,901
https://github.com/OpenSC/OpenSC
CVE-2018-16391
['CWE-119']
cryptoflex-tool.c
read_public_key
/* * cryptoflex-tool.c: Tool for doing various Cryptoflex related stuff * * Copyright (C) 2001 Juha Yrjölä <juha.yrjola@iki.fi> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include "config.h" #include "libopensc/sc-ossl-compat.h" #include <openssl/bn.h> #include <openssl/rsa.h> #include <openssl/x509.h> #include <openssl/pem.h> #include <openssl/err.h> #include "libopensc/pkcs15.h" #include "common/compat_strlcpy.h" #include "common/compat_strlcat.h" #include "util.h" static const char *app_name = "cryptoflex-tool"; static char * opt_reader = NULL; static int opt_wait = 0; static int opt_key_num = 1, opt_pin_num = -1; static int verbose = 0; static int opt_exponent = 3; static int opt_mod_length = 1024; static int opt_key_count = 1; static int opt_pin_attempts = 10; static int opt_puk_attempts = 10; static const char *opt_appdf = NULL, *opt_prkeyf = NULL, *opt_pubkeyf = NULL; static u8 *pincode = NULL; static const struct option options[] = { { "list-keys", 0, NULL, 'l' }, { "create-key-files", 1, NULL, 'c' }, { "create-pin-file", 1, NULL, 'P' }, { "generate-key", 0, NULL, 'g' }, { "read-key", 0, NULL, 'R' }, { "verify-pin", 0, NULL, 'V' }, { "key-num", 1, NULL, 'k' }, { "app-df", 1, NULL, 'a' }, { "prkey-file", 1, NULL, 'p' }, { "pubkey-file", 1, NULL, 'u' }, { "exponent", 1, NULL, 'e' }, { "modulus-length", 1, NULL, 'm' }, { "reader", 1, NULL, 'r' }, { "wait", 0, NULL, 'w' }, { "verbose", 0, NULL, 'v' }, { NULL, 0, NULL, 0 } }; static const char *option_help[] = { "Lists all keys in a public key file", "Creates new RSA key files for <arg> keys", "Creates a new CHV<arg> file", "Generates a new RSA key pair", "Reads a public key from the card", "Verifies CHV1 before issuing commands", "Selects which key number to operate on [1]", "Selects the DF to operate in", "Private key file", "Public key file", "The RSA exponent to use in key generation [3]", "Modulus length to use in key generation [1024]", "Uses reader <arg>", "Wait for card insertion", "Verbose operation. Use several times to enable debug output.", }; static sc_context_t *ctx = NULL; static sc_card_t *card = NULL; static char *getpin(const char *prompt) { char *buf, pass[20]; int i; printf("%s", prompt); fflush(stdout); if (fgets(pass, 20, stdin) == NULL) return NULL; for (i = 0; i < 20; i++) if (pass[i] == '\n') pass[i] = 0; if (strlen(pass) == 0) return NULL; buf = malloc(8); if (buf == NULL) return NULL; if (strlen(pass) > 8) { fprintf(stderr, "PIN code too long.\n"); free(buf); return NULL; } memset(buf, 0, 8); strlcpy(buf, pass, 8); return buf; } static int verify_pin(int pin) { char prompt[50]; int r, tries_left = -1; if (pincode == NULL) { sprintf(prompt, "Please enter CHV%d: ", pin); pincode = (u8 *) getpin(prompt); if (pincode == NULL || strlen((char *) pincode) == 0) return -1; } if (pin != 1 && pin != 2) return -3; r = sc_verify(card, SC_AC_CHV, pin, pincode, 8, &tries_left); if (r) { memset(pincode, 0, 8); free(pincode); pincode = NULL; fprintf(stderr, "PIN code verification failed: %s\n", sc_strerror(r)); return -1; } return 0; } static int select_app_df(void) { sc_path_t path; sc_file_t *file; char str[80]; int r; strcpy(str, "3F00"); if (opt_appdf != NULL) strlcat(str, opt_appdf, sizeof str); sc_format_path(str, &path); r = sc_select_file(card, &path, &file); if (r) { fprintf(stderr, "Unable to select application DF: %s\n", sc_strerror(r)); return -1; } if (file->type != SC_FILE_TYPE_DF) { fprintf(stderr, "Selected application DF is not a DF.\n"); return -1; } sc_file_free(file); if (opt_pin_num >= 0) return verify_pin(opt_pin_num); else return 0; } static void invert_buf(u8 *dest, const u8 *src, size_t c) { size_t i; for (i = 0; i < c; i++) dest[i] = src[c-1-i]; } static BIGNUM * cf2bn(const u8 *buf, size_t bufsize, BIGNUM *num) { u8 tmp[512]; invert_buf(tmp, buf, bufsize); return BN_bin2bn(tmp, bufsize, num); } static int bn2cf(const BIGNUM *num, u8 *buf) { u8 tmp[512]; int r; r = BN_bn2bin(num, tmp); if (r <= 0) return r; invert_buf(buf, tmp, r); return r; } static int parse_public_key(const u8 *key, size_t keysize, RSA *rsa) { const u8 *p = key; BIGNUM *n, *e; int base; base = (keysize - 7) / 5; if (base != 32 && base != 48 && base != 64 && base != 128) { fprintf(stderr, "Invalid public key.\n"); return -1; } p += 3; n = BN_new(); if (n == NULL) return -1; cf2bn(p, 2 * base, n); p += 2 * base; p += base; p += 2 * base; e = BN_new(); if (e == NULL) return -1; cf2bn(p, 4, e); if (RSA_set0_key(rsa, n, e, NULL) != 1) return -1; return 0; } static int gen_d(RSA *rsa) { BN_CTX *bnctx; BIGNUM *r0, *r1, *r2; const BIGNUM *rsa_p, *rsa_q, *rsa_n, *rsa_e, *rsa_d; BIGNUM *rsa_n_new, *rsa_e_new, *rsa_d_new; bnctx = BN_CTX_new(); if (bnctx == NULL) return -1; BN_CTX_start(bnctx); r0 = BN_CTX_get(bnctx); r1 = BN_CTX_get(bnctx); r2 = BN_CTX_get(bnctx); RSA_get0_key(rsa, &rsa_n, &rsa_e, &rsa_d); RSA_get0_factors(rsa, &rsa_p, &rsa_q); BN_sub(r1, rsa_p, BN_value_one()); BN_sub(r2, rsa_q, BN_value_one()); BN_mul(r0, r1, r2, bnctx); if ((rsa_d_new = BN_mod_inverse(NULL, rsa_e, r0, bnctx)) == NULL) { fprintf(stderr, "BN_mod_inverse() failed.\n"); return -1; } /* RSA_set0_key will free previous value, and replace with new value * Thus the need to copy the contents of rsa_n and rsa_e */ rsa_n_new = BN_dup(rsa_n); rsa_e_new = BN_dup(rsa_e); if (RSA_set0_key(rsa, rsa_n_new, rsa_e_new, rsa_d_new) != 1) return -1; BN_CTX_end(bnctx); BN_CTX_free(bnctx); return 0; } static int parse_private_key(const u8 *key, size_t keysize, RSA *rsa) { const u8 *p = key; BIGNUM *bn_p, *q, *dmp1, *dmq1, *iqmp; int base; base = (keysize - 3) / 5; if (base != 32 && base != 48 && base != 64 && base != 128) { fprintf(stderr, "Invalid private key.\n"); return -1; } p += 3; bn_p = BN_new(); if (bn_p == NULL) return -1; cf2bn(p, base, bn_p); p += base; q = BN_new(); if (q == NULL) return -1; cf2bn(p, base, q); p += base; iqmp = BN_new(); if (iqmp == NULL) return -1; cf2bn(p, base, iqmp); p += base; dmp1 = BN_new(); if (dmp1 == NULL) return -1; cf2bn(p, base, dmp1); p += base; dmq1 = BN_new(); if (dmq1 == NULL) return -1; cf2bn(p, base, dmq1); p += base; if (RSA_set0_factors(rsa, bn_p, q) != 1) return -1; if (RSA_set0_crt_params(rsa, dmp1, dmq1, iqmp) != 1) return -1; if (gen_d(rsa)) return -1; return 0; } static int read_public_key(RSA *rsa) { int r; sc_path_t path; sc_file_t *file; u8 buf[2048], *p = buf; size_t bufsize, keysize; r = select_app_df(); if (r) return 1; sc_format_path("I1012", &path); r = sc_select_file(card, &path, &file); if (r) { fprintf(stderr, "Unable to select public key file: %s\n", sc_strerror(r)); return 2; } bufsize = file->size; sc_file_free(file); r = sc_read_binary(card, 0, buf, bufsize, 0); if (r < 0) { fprintf(stderr, "Unable to read public key file: %s\n", sc_strerror(r)); return 2; } bufsize = r; do { if (bufsize < 4) return 3; keysize = (p[0] << 8) | p[1]; if (keysize == 0) break; if (keysize < 3) return 3; if (p[2] == opt_key_num) break; p += keysize; bufsize -= keysize; } while (1); if (keysize == 0) { printf("Key number %d not found.\n", opt_key_num); return 2; } return parse_public_key(p, keysize, rsa); } static int read_private_key(RSA *rsa) { int r; sc_path_t path; sc_file_t *file; const sc_acl_entry_t *e; u8 buf[2048], *p = buf; size_t bufsize, keysize; r = select_app_df(); if (r) return 1; sc_format_path("I0012", &path); r = sc_select_file(card, &path, &file); if (r) { fprintf(stderr, "Unable to select private key file: %s\n", sc_strerror(r)); return 2; } e = sc_file_get_acl_entry(file, SC_AC_OP_READ); if (e == NULL || e->method == SC_AC_NEVER) return 10; bufsize = file->size; sc_file_free(file); r = sc_read_binary(card, 0, buf, bufsize, 0); if (r < 0) { fprintf(stderr, "Unable to read private key file: %s\n", sc_strerror(r)); return 2; } bufsize = r; do { if (bufsize < 4) return 3; keysize = (p[0] << 8) | p[1]; if (keysize == 0) break; if (keysize < 3) return 3; if (p[2] == opt_key_num) break; p += keysize; bufsize -= keysize; } while (1); if (keysize == 0) { printf("Key number %d not found.\n", opt_key_num); return 2; } return parse_private_key(p, keysize, rsa); } static int read_key(void) { RSA *rsa = RSA_new(); u8 buf[1024], *p = buf; u8 b64buf[2048]; int r; if (rsa == NULL) return -1; r = read_public_key(rsa); if (r) return r; r = i2d_RSA_PUBKEY(rsa, &p); if (r <= 0) { fprintf(stderr, "Error encoding public key.\n"); return -1; } r = sc_base64_encode(buf, r, b64buf, sizeof(b64buf), 64); if (r < 0) { fprintf(stderr, "Error in Base64 encoding: %s\n", sc_strerror(r)); return -1; } printf("-----BEGIN PUBLIC KEY-----\n%s-----END PUBLIC KEY-----\n", b64buf); r = read_private_key(rsa); if (r == 10) return 0; else if (r) return r; p = buf; r = i2d_RSAPrivateKey(rsa, &p); if (r <= 0) { fprintf(stderr, "Error encoding private key.\n"); return -1; } r = sc_base64_encode(buf, r, b64buf, sizeof(b64buf), 64); if (r < 0) { fprintf(stderr, "Error in Base64 encoding: %s\n", sc_strerror(r)); return -1; } printf("-----BEGIN RSA PRIVATE KEY-----\n%s-----END RSA PRIVATE KEY-----\n", b64buf); return 0; } static int list_keys(void) { int r, idx = 0; sc_path_t path; u8 buf[2048], *p = buf; size_t keysize, i; int mod_lens[] = { 512, 768, 1024, 2048 }; size_t sizes[] = { 167, 247, 327, 647 }; r = select_app_df(); if (r) return 1; sc_format_path("I1012", &path); r = sc_select_file(card, &path, NULL); if (r) { fprintf(stderr, "Unable to select public key file: %s\n", sc_strerror(r)); return 2; } do { int mod_len = -1; r = sc_read_binary(card, idx, buf, 3, 0); if (r < 0) { fprintf(stderr, "Unable to read public key file: %s\n", sc_strerror(r)); return 2; } keysize = (p[0] << 8) | p[1]; if (keysize == 0) break; idx += keysize; for (i = 0; i < sizeof(sizes)/sizeof(sizes[ 0]); i++) if (sizes[i] == keysize) mod_len = mod_lens[i]; if (mod_len < 0) printf("Key %d -- unknown modulus length\n", p[2] & 0x0F); else printf("Key %d -- Modulus length %d\n", p[2] & 0x0F, mod_len); } while (1); return 0; } static int generate_key(void) { sc_apdu_t apdu; u8 sbuf[4]; u8 p2; int r; switch (opt_mod_length) { case 512: p2 = 0x40; break; case 768: p2 = 0x60; break; case 1024: p2 = 0x80; break; case 2048: p2 = 0x00; break; default: fprintf(stderr, "Invalid modulus length.\n"); return 2; } sc_format_apdu(card, &apdu, SC_APDU_CASE_3_SHORT, 0x46, (u8) opt_key_num-1, p2); apdu.cla = 0xF0; apdu.lc = 4; apdu.datalen = 4; apdu.data = sbuf; sbuf[0] = opt_exponent & 0xFF; sbuf[1] = (opt_exponent >> 8) & 0xFF; sbuf[2] = (opt_exponent >> 16) & 0xFF; sbuf[3] = (opt_exponent >> 24) & 0xFF; r = select_app_df(); if (r) return 1; if (verbose) printf("Generating key...\n"); r = sc_transmit_apdu(card, &apdu); if (r) { fprintf(stderr, "APDU transmit failed: %s\n", sc_strerror(r)); if (r == SC_ERROR_TRANSMIT_FAILED) fprintf(stderr, "Reader has timed out. It is still possible that the key generation has\n" "succeeded.\n"); return 1; } if (apdu.sw1 == 0x90 && apdu.sw2 == 0x00) { printf("Key generation successful.\n"); return 0; } if (apdu.sw1 == 0x69 && apdu.sw2 == 0x82) fprintf(stderr, "CHV1 not verified or invalid exponent value.\n"); else fprintf(stderr, "Card returned SW1=%02X, SW2=%02X.\n", apdu.sw1, apdu.sw2); return 1; } static int create_key_files(void) { sc_file_t *file; int mod_lens[] = { 512, 768, 1024, 2048 }; int sizes[] = { 163, 243, 323, 643 }; int size = -1; int r; size_t i; for (i = 0; i < sizeof(mod_lens) / sizeof(int); i++) if (mod_lens[i] == opt_mod_length) { size = sizes[i]; break; } if (size == -1) { fprintf(stderr, "Invalid modulus length.\n"); return 1; } if (verbose) printf("Creating key files for %d keys.\n", opt_key_count); file = sc_file_new(); if (!file) { fprintf(stderr, "out of memory.\n"); return 1; } file->type = SC_FILE_TYPE_WORKING_EF; file->ef_structure = SC_FILE_EF_TRANSPARENT; file->id = 0x0012; file->size = opt_key_count * size + 3; sc_file_add_acl_entry(file, SC_AC_OP_READ, SC_AC_NEVER, SC_AC_KEY_REF_NONE); sc_file_add_acl_entry(file, SC_AC_OP_UPDATE, SC_AC_CHV, 1); sc_file_add_acl_entry(file, SC_AC_OP_INVALIDATE, SC_AC_CHV, 1); sc_file_add_acl_entry(file, SC_AC_OP_REHABILITATE, SC_AC_CHV, 1); if (select_app_df()) { sc_file_free(file); return 1; } r = sc_create_file(card, file); sc_file_free(file); if (r) { fprintf(stderr, "Unable to create private key file: %s\n", sc_strerror(r)); return 1; } file = sc_file_new(); if (!file) { fprintf(stderr, "out of memory.\n"); return 1; } file->type = SC_FILE_TYPE_WORKING_EF; file->ef_structure = SC_FILE_EF_TRANSPARENT; file->id = 0x1012; file->size = opt_key_count * (size + 4) + 3; sc_file_add_acl_entry(file, SC_AC_OP_READ, SC_AC_NONE, SC_AC_KEY_REF_NONE); sc_file_add_acl_entry(file, SC_AC_OP_UPDATE, SC_AC_CHV, 1); sc_file_add_acl_entry(file, SC_AC_OP_INVALIDATE, SC_AC_CHV, 1); sc_file_add_acl_entry(file, SC_AC_OP_REHABILITATE, SC_AC_CHV, 1); if (select_app_df()) { sc_file_free(file); return 1; } r = sc_create_file(card, file); sc_file_free(file); if (r) { fprintf(stderr, "Unable to create public key file: %s\n", sc_strerror(r)); return 1; } if (verbose) printf("Key files generated successfully.\n"); return 0; } static int read_rsa_privkey(RSA **rsa_out) { RSA *rsa = NULL; BIO *in = NULL; int r; in = BIO_new(BIO_s_file()); if (opt_prkeyf == NULL) { fprintf(stderr, "Private key file must be set.\n"); return 2; } r = BIO_read_filename(in, opt_prkeyf); if (r <= 0) { perror(opt_prkeyf); return 2; } rsa = PEM_read_bio_RSAPrivateKey(in, NULL, NULL, NULL); if (rsa == NULL) { fprintf(stderr, "Unable to load private key.\n"); return 2; } BIO_free(in); *rsa_out = rsa; return 0; } static int encode_private_key(RSA *rsa, u8 *key, size_t *keysize) { u8 buf[1024], *p = buf; u8 bnbuf[256]; int base = 0; int r; const BIGNUM *rsa_p, *rsa_q, *rsa_dmp1, *rsa_dmq1, *rsa_iqmp; switch (RSA_bits(rsa)) { case 512: base = 32; break; case 768: base = 48; break; case 1024: base = 64; break; case 2048: base = 128; break; } if (base == 0) { fprintf(stderr, "Key length invalid.\n"); return 2; } *p++ = (5 * base + 3) >> 8; *p++ = (5 * base + 3) & 0xFF; *p++ = opt_key_num; RSA_get0_factors(rsa, &rsa_p, &rsa_q); r = bn2cf(rsa_p, bnbuf); if (r != base) { fprintf(stderr, "Invalid private key.\n"); return 2; } memcpy(p, bnbuf, base); p += base; r = bn2cf(rsa_q, bnbuf); if (r != base) { fprintf(stderr, "Invalid private key.\n"); return 2; } memcpy(p, bnbuf, base); p += base; RSA_get0_crt_params(rsa, &rsa_dmp1, &rsa_dmq1, &rsa_iqmp); r = bn2cf(rsa_iqmp, bnbuf); if (r != base) { fprintf(stderr, "Invalid private key.\n"); return 2; } memcpy(p, bnbuf, base); p += base; r = bn2cf(rsa_dmp1, bnbuf); if (r != base) { fprintf(stderr, "Invalid private key.\n"); return 2; } memcpy(p, bnbuf, base); p += base; r = bn2cf(rsa_dmq1, bnbuf); if (r != base) { fprintf(stderr, "Invalid private key.\n"); return 2; } memcpy(p, bnbuf, base); p += base; memcpy(key, buf, p - buf); *keysize = p - buf; return 0; } static int encode_public_key(RSA *rsa, u8 *key, size_t *keysize) { u8 buf[1024], *p = buf; u8 bnbuf[256]; int base = 0; int r; const BIGNUM *rsa_n, *rsa_e; switch (RSA_bits(rsa)) { case 512: base = 32; break; case 768: base = 48; break; case 1024: base = 64; break; case 2048: base = 128; break; } if (base == 0) { fprintf(stderr, "Key length invalid.\n"); return 2; } *p++ = (5 * base + 7) >> 8; *p++ = (5 * base + 7) & 0xFF; *p++ = opt_key_num; RSA_get0_key(rsa, &rsa_n, &rsa_e, NULL); r = bn2cf(rsa_n, bnbuf); if (r != 2*base) { fprintf(stderr, "Invalid public key.\n"); return 2; } memcpy(p, bnbuf, 2*base); p += 2*base; memset(p, 0, base); p += base; memset(bnbuf, 0, 2*base); memcpy(p, bnbuf, 2*base); p += 2*base; r = bn2cf(rsa_e, bnbuf); memcpy(p, bnbuf, 4); p += 4; memcpy(key, buf, p - buf); *keysize = p - buf; return 0; } static int update_public_key(const u8 *key, size_t keysize) { int r, idx = 0; sc_path_t path; r = select_app_df(); if (r) return 1; sc_format_path("I1012", &path); r = sc_select_file(card, &path, NULL); if (r) { fprintf(stderr, "Unable to select public key file: %s\n", sc_strerror(r)); return 2; } idx = keysize * (opt_key_num-1); r = sc_update_binary(card, idx, key, keysize, 0); if (r < 0) { fprintf(stderr, "Unable to write public key: %s\n", sc_strerror(r)); return 2; } return 0; } static int update_private_key(const u8 *key, size_t keysize) { int r, idx = 0; sc_path_t path; r = select_app_df(); if (r) return 1; sc_format_path("I0012", &path); r = sc_select_file(card, &path, NULL); if (r) { fprintf(stderr, "Unable to select private key file: %s\n", sc_strerror(r)); return 2; } idx = keysize * (opt_key_num-1); r = sc_update_binary(card, idx, key, keysize, 0); if (r < 0) { fprintf(stderr, "Unable to write private key: %s\n", sc_strerror(r)); return 2; } return 0; } static int store_key(void) { u8 prv[1024], pub[1024]; size_t prvsize, pubsize; int r; RSA *rsa; r = read_rsa_privkey(&rsa); if (r) return r; r = encode_private_key(rsa, prv, &prvsize); if (r) return r; r = encode_public_key(rsa, pub, &pubsize); if (r) return r; if (verbose) printf("Storing private key...\n"); r = select_app_df(); if (r) return r; r = update_private_key(prv, prvsize); if (r) return r; if (verbose) printf("Storing public key...\n"); r = select_app_df(); if (r) return r; r = update_public_key(pub, pubsize); if (r) return r; return 0; } static int create_pin_file(const sc_path_t *inpath, int chv, const char *key_id) { char prompt[40], *pin, *puk; char buf[30], *p = buf; sc_path_t file_id, path; sc_file_t *file; size_t len; int r; file_id = *inpath; if (file_id.len < 2) return -1; if (chv == 1) sc_format_path("I0000", &file_id); else if (chv == 2) sc_format_path("I0100", &file_id); else return -1; r = sc_select_file(card, inpath, NULL); if (r) return -1; r = sc_select_file(card, &file_id, NULL); if (r == 0) return 0; sprintf(prompt, "Please enter CHV%d%s: ", chv, key_id); pin = getpin(prompt); if (pin == NULL) return -1; sprintf(prompt, "Please enter PUK for CHV%d%s: ", chv, key_id); puk = getpin(prompt); if (puk == NULL) { free(pin); return -1; } memset(p, 0xFF, 3); p += 3; memcpy(p, pin, 8); p += 8; *p++ = opt_pin_attempts; *p++ = opt_pin_attempts; memcpy(p, puk, 8); p += 8; *p++ = opt_puk_attempts; *p++ = opt_puk_attempts; len = p - buf; free(pin); free(puk); file = sc_file_new(); file->type = SC_FILE_TYPE_WORKING_EF; file->ef_structure = SC_FILE_EF_TRANSPARENT; sc_file_add_acl_entry(file, SC_AC_OP_READ, SC_AC_NEVER, SC_AC_KEY_REF_NONE); if (inpath->len == 2 && inpath->value[0] == 0x3F && inpath->value[1] == 0x00) sc_file_add_acl_entry(file, SC_AC_OP_UPDATE, SC_AC_AUT, 1); else sc_file_add_acl_entry(file, SC_AC_OP_UPDATE, SC_AC_CHV, 2); sc_file_add_acl_entry(file, SC_AC_OP_INVALIDATE, SC_AC_AUT, 1); sc_file_add_acl_entry(file, SC_AC_OP_REHABILITATE, SC_AC_AUT, 1); file->size = len; file->id = (file_id.value[0] << 8) | file_id.value[1]; r = sc_create_file(card, file); sc_file_free(file); if (r) { fprintf(stderr, "PIN file creation failed: %s\n", sc_strerror(r)); return r; } path = *inpath; sc_append_path(&path, &file_id); r = sc_select_file(card, &path, NULL); if (r) { fprintf(stderr, "Unable to select created PIN file: %s\n", sc_strerror(r)); return r; } r = sc_update_binary(card, 0, (const u8 *) buf, len, 0); if (r < 0) { fprintf(stderr, "Unable to update created PIN file: %s\n", sc_strerror(r)); return r; } return 0; } static int create_pin(void) { sc_path_t path; char buf[80]; if (opt_pin_num != 1 && opt_pin_num != 2) { fprintf(stderr, "Invalid PIN number. Possible values: 1, 2.\n"); return 2; } strcpy(buf, "3F00"); if (opt_appdf != NULL) strlcat(buf, opt_appdf, sizeof buf); sc_format_path(buf, &path); return create_pin_file(&path, opt_pin_num, ""); } int main(int argc, char *argv[]) { int err = 0, r, c, long_optind = 0; int action_count = 0; int do_read_key = 0; int do_generate_key = 0; int do_create_key_files = 0; int do_list_keys = 0; int do_store_key = 0; int do_create_pin_file = 0; sc_context_param_t ctx_param; while (1) { c = getopt_long(argc, argv, "P:Vslgc:Rk:r:p:u:e:m:vwa:", options, &long_optind); if (c == -1) break; if (c == '?') util_print_usage_and_die(app_name, options, option_help, NULL); switch (c) { case 'l': do_list_keys = 1; action_count++; break; case 'P': do_create_pin_file = 1; opt_pin_num = atoi(optarg); action_count++; break; case 'R': do_read_key = 1; action_count++; break; case 'g': do_generate_key = 1; action_count++; break; case 'c': do_create_key_files = 1; opt_key_count = atoi(optarg); action_count++; break; case 's': do_store_key = 1; action_count++; break; case 'k': opt_key_num = atoi(optarg); if (opt_key_num < 1 || opt_key_num > 15) { fprintf(stderr, "Key number invalid.\n"); exit(2); } break; case 'V': opt_pin_num = 1; break; case 'e': opt_exponent = atoi(optarg); break; case 'm': opt_mod_length = atoi(optarg); break; case 'p': opt_prkeyf = optarg; break; case 'u': opt_pubkeyf = optarg; break; case 'r': opt_reader = optarg; break; case 'v': verbose++; break; case 'w': opt_wait = 1; break; case 'a': opt_appdf = optarg; break; } } if (action_count == 0) util_print_usage_and_die(app_name, options, option_help, NULL); memset(&ctx_param, 0, sizeof(ctx_param)); ctx_param.ver = 0; ctx_param.app_name = app_name; r = sc_context_create(&ctx, &ctx_param); if (r) { fprintf(stderr, "Failed to establish context: %s\n", sc_strerror(r)); return 1; } if (verbose > 1) { ctx->debug = verbose; sc_ctx_log_to_file(ctx, "stderr"); } err = util_connect_card(ctx, &card, opt_reader, opt_wait, verbose); printf("Using card driver: %s\n", card->driver->name); if (do_create_pin_file) { if ((err = create_pin()) != 0) goto end; action_count--; } if (do_create_key_files) { if ((err = create_key_files()) != 0) goto end; action_count--; } if (do_generate_key) { if ((err = generate_key()) != 0) goto end; action_count--; } if (do_store_key) { if ((err = store_key()) != 0) goto end; action_count--; } if (do_list_keys) { if ((err = list_keys()) != 0) goto end; action_count--; } if (do_read_key) { if ((err = read_key()) != 0) goto end; action_count--; } if (pincode != NULL) { memset(pincode, 0, 8); free(pincode); } end: if (card) { sc_unlock(card); sc_disconnect_card(card); } if (ctx) sc_release_context(ctx); return err; }
/* * cryptoflex-tool.c: Tool for doing various Cryptoflex related stuff * * Copyright (C) 2001 Juha Yrjölä <juha.yrjola@iki.fi> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include "config.h" #include "libopensc/sc-ossl-compat.h" #include "libopensc/internal.h" #include <openssl/bn.h> #include <openssl/rsa.h> #include <openssl/x509.h> #include <openssl/pem.h> #include <openssl/err.h> #include "libopensc/pkcs15.h" #include "common/compat_strlcpy.h" #include "common/compat_strlcat.h" #include "util.h" static const char *app_name = "cryptoflex-tool"; static char * opt_reader = NULL; static int opt_wait = 0; static int opt_key_num = 1, opt_pin_num = -1; static int verbose = 0; static int opt_exponent = 3; static int opt_mod_length = 1024; static int opt_key_count = 1; static int opt_pin_attempts = 10; static int opt_puk_attempts = 10; static const char *opt_appdf = NULL, *opt_prkeyf = NULL, *opt_pubkeyf = NULL; static u8 *pincode = NULL; static const struct option options[] = { { "list-keys", 0, NULL, 'l' }, { "create-key-files", 1, NULL, 'c' }, { "create-pin-file", 1, NULL, 'P' }, { "generate-key", 0, NULL, 'g' }, { "read-key", 0, NULL, 'R' }, { "verify-pin", 0, NULL, 'V' }, { "key-num", 1, NULL, 'k' }, { "app-df", 1, NULL, 'a' }, { "prkey-file", 1, NULL, 'p' }, { "pubkey-file", 1, NULL, 'u' }, { "exponent", 1, NULL, 'e' }, { "modulus-length", 1, NULL, 'm' }, { "reader", 1, NULL, 'r' }, { "wait", 0, NULL, 'w' }, { "verbose", 0, NULL, 'v' }, { NULL, 0, NULL, 0 } }; static const char *option_help[] = { "Lists all keys in a public key file", "Creates new RSA key files for <arg> keys", "Creates a new CHV<arg> file", "Generates a new RSA key pair", "Reads a public key from the card", "Verifies CHV1 before issuing commands", "Selects which key number to operate on [1]", "Selects the DF to operate in", "Private key file", "Public key file", "The RSA exponent to use in key generation [3]", "Modulus length to use in key generation [1024]", "Uses reader <arg>", "Wait for card insertion", "Verbose operation. Use several times to enable debug output.", }; static sc_context_t *ctx = NULL; static sc_card_t *card = NULL; static char *getpin(const char *prompt) { char *buf, pass[20]; int i; printf("%s", prompt); fflush(stdout); if (fgets(pass, 20, stdin) == NULL) return NULL; for (i = 0; i < 20; i++) if (pass[i] == '\n') pass[i] = 0; if (strlen(pass) == 0) return NULL; buf = malloc(8); if (buf == NULL) return NULL; if (strlen(pass) > 8) { fprintf(stderr, "PIN code too long.\n"); free(buf); return NULL; } memset(buf, 0, 8); strlcpy(buf, pass, 8); return buf; } static int verify_pin(int pin) { char prompt[50]; int r, tries_left = -1; if (pincode == NULL) { sprintf(prompt, "Please enter CHV%d: ", pin); pincode = (u8 *) getpin(prompt); if (pincode == NULL || strlen((char *) pincode) == 0) return -1; } if (pin != 1 && pin != 2) return -3; r = sc_verify(card, SC_AC_CHV, pin, pincode, 8, &tries_left); if (r) { memset(pincode, 0, 8); free(pincode); pincode = NULL; fprintf(stderr, "PIN code verification failed: %s\n", sc_strerror(r)); return -1; } return 0; } static int select_app_df(void) { sc_path_t path; sc_file_t *file; char str[80]; int r; strcpy(str, "3F00"); if (opt_appdf != NULL) strlcat(str, opt_appdf, sizeof str); sc_format_path(str, &path); r = sc_select_file(card, &path, &file); if (r) { fprintf(stderr, "Unable to select application DF: %s\n", sc_strerror(r)); return -1; } if (file->type != SC_FILE_TYPE_DF) { fprintf(stderr, "Selected application DF is not a DF.\n"); return -1; } sc_file_free(file); if (opt_pin_num >= 0) return verify_pin(opt_pin_num); else return 0; } static void invert_buf(u8 *dest, const u8 *src, size_t c) { size_t i; for (i = 0; i < c; i++) dest[i] = src[c-1-i]; } static BIGNUM * cf2bn(const u8 *buf, size_t bufsize, BIGNUM *num) { u8 tmp[512]; invert_buf(tmp, buf, bufsize); return BN_bin2bn(tmp, bufsize, num); } static int bn2cf(const BIGNUM *num, u8 *buf) { u8 tmp[512]; int r; r = BN_bn2bin(num, tmp); if (r <= 0) return r; invert_buf(buf, tmp, r); return r; } static int parse_public_key(const u8 *key, size_t keysize, RSA *rsa) { const u8 *p = key; BIGNUM *n, *e; int base; base = (keysize - 7) / 5; if (base != 32 && base != 48 && base != 64 && base != 128) { fprintf(stderr, "Invalid public key.\n"); return -1; } p += 3; n = BN_new(); if (n == NULL) return -1; cf2bn(p, 2 * base, n); p += 2 * base; p += base; p += 2 * base; e = BN_new(); if (e == NULL) return -1; cf2bn(p, 4, e); if (RSA_set0_key(rsa, n, e, NULL) != 1) return -1; return 0; } static int gen_d(RSA *rsa) { BN_CTX *bnctx; BIGNUM *r0, *r1, *r2; const BIGNUM *rsa_p, *rsa_q, *rsa_n, *rsa_e, *rsa_d; BIGNUM *rsa_n_new, *rsa_e_new, *rsa_d_new; bnctx = BN_CTX_new(); if (bnctx == NULL) return -1; BN_CTX_start(bnctx); r0 = BN_CTX_get(bnctx); r1 = BN_CTX_get(bnctx); r2 = BN_CTX_get(bnctx); RSA_get0_key(rsa, &rsa_n, &rsa_e, &rsa_d); RSA_get0_factors(rsa, &rsa_p, &rsa_q); BN_sub(r1, rsa_p, BN_value_one()); BN_sub(r2, rsa_q, BN_value_one()); BN_mul(r0, r1, r2, bnctx); if ((rsa_d_new = BN_mod_inverse(NULL, rsa_e, r0, bnctx)) == NULL) { fprintf(stderr, "BN_mod_inverse() failed.\n"); return -1; } /* RSA_set0_key will free previous value, and replace with new value * Thus the need to copy the contents of rsa_n and rsa_e */ rsa_n_new = BN_dup(rsa_n); rsa_e_new = BN_dup(rsa_e); if (RSA_set0_key(rsa, rsa_n_new, rsa_e_new, rsa_d_new) != 1) return -1; BN_CTX_end(bnctx); BN_CTX_free(bnctx); return 0; } static int parse_private_key(const u8 *key, size_t keysize, RSA *rsa) { const u8 *p = key; BIGNUM *bn_p, *q, *dmp1, *dmq1, *iqmp; int base; base = (keysize - 3) / 5; if (base != 32 && base != 48 && base != 64 && base != 128) { fprintf(stderr, "Invalid private key.\n"); return -1; } p += 3; bn_p = BN_new(); if (bn_p == NULL) return -1; cf2bn(p, base, bn_p); p += base; q = BN_new(); if (q == NULL) return -1; cf2bn(p, base, q); p += base; iqmp = BN_new(); if (iqmp == NULL) return -1; cf2bn(p, base, iqmp); p += base; dmp1 = BN_new(); if (dmp1 == NULL) return -1; cf2bn(p, base, dmp1); p += base; dmq1 = BN_new(); if (dmq1 == NULL) return -1; cf2bn(p, base, dmq1); p += base; if (RSA_set0_factors(rsa, bn_p, q) != 1) return -1; if (RSA_set0_crt_params(rsa, dmp1, dmq1, iqmp) != 1) return -1; if (gen_d(rsa)) return -1; return 0; } static int read_public_key(RSA *rsa) { int r; sc_path_t path; sc_file_t *file; u8 buf[2048], *p = buf; size_t bufsize, keysize; r = select_app_df(); if (r) return 1; sc_format_path("I1012", &path); r = sc_select_file(card, &path, &file); if (r) { fprintf(stderr, "Unable to select public key file: %s\n", sc_strerror(r)); return 2; } bufsize = MIN(file->size, sizeof buf); sc_file_free(file); r = sc_read_binary(card, 0, buf, bufsize, 0); if (r < 0) { fprintf(stderr, "Unable to read public key file: %s\n", sc_strerror(r)); return 2; } bufsize = r; do { if (bufsize < 4) return 3; keysize = (p[0] << 8) | p[1]; if (keysize == 0) break; if (keysize < 3) return 3; if (p[2] == opt_key_num) break; p += keysize; bufsize -= keysize; } while (1); if (keysize == 0) { printf("Key number %d not found.\n", opt_key_num); return 2; } return parse_public_key(p, keysize, rsa); } static int read_private_key(RSA *rsa) { int r; sc_path_t path; sc_file_t *file; const sc_acl_entry_t *e; u8 buf[2048], *p = buf; size_t bufsize, keysize; r = select_app_df(); if (r) return 1; sc_format_path("I0012", &path); r = sc_select_file(card, &path, &file); if (r) { fprintf(stderr, "Unable to select private key file: %s\n", sc_strerror(r)); return 2; } e = sc_file_get_acl_entry(file, SC_AC_OP_READ); if (e == NULL || e->method == SC_AC_NEVER) return 10; bufsize = MIN(file->size, sizeof buf); sc_file_free(file); r = sc_read_binary(card, 0, buf, bufsize, 0); if (r < 0) { fprintf(stderr, "Unable to read private key file: %s\n", sc_strerror(r)); return 2; } bufsize = r; do { if (bufsize < 4) return 3; keysize = (p[0] << 8) | p[1]; if (keysize == 0) break; if (keysize < 3) return 3; if (p[2] == opt_key_num) break; p += keysize; bufsize -= keysize; } while (1); if (keysize == 0) { printf("Key number %d not found.\n", opt_key_num); return 2; } return parse_private_key(p, keysize, rsa); } static int read_key(void) { RSA *rsa = RSA_new(); u8 buf[1024], *p = buf; u8 b64buf[2048]; int r; if (rsa == NULL) return -1; r = read_public_key(rsa); if (r) return r; r = i2d_RSA_PUBKEY(rsa, &p); if (r <= 0) { fprintf(stderr, "Error encoding public key.\n"); return -1; } r = sc_base64_encode(buf, r, b64buf, sizeof(b64buf), 64); if (r < 0) { fprintf(stderr, "Error in Base64 encoding: %s\n", sc_strerror(r)); return -1; } printf("-----BEGIN PUBLIC KEY-----\n%s-----END PUBLIC KEY-----\n", b64buf); r = read_private_key(rsa); if (r == 10) return 0; else if (r) return r; p = buf; r = i2d_RSAPrivateKey(rsa, &p); if (r <= 0) { fprintf(stderr, "Error encoding private key.\n"); return -1; } r = sc_base64_encode(buf, r, b64buf, sizeof(b64buf), 64); if (r < 0) { fprintf(stderr, "Error in Base64 encoding: %s\n", sc_strerror(r)); return -1; } printf("-----BEGIN RSA PRIVATE KEY-----\n%s-----END RSA PRIVATE KEY-----\n", b64buf); return 0; } static int list_keys(void) { int r, idx = 0; sc_path_t path; u8 buf[2048], *p = buf; size_t keysize, i; int mod_lens[] = { 512, 768, 1024, 2048 }; size_t sizes[] = { 167, 247, 327, 647 }; r = select_app_df(); if (r) return 1; sc_format_path("I1012", &path); r = sc_select_file(card, &path, NULL); if (r) { fprintf(stderr, "Unable to select public key file: %s\n", sc_strerror(r)); return 2; } do { int mod_len = -1; r = sc_read_binary(card, idx, buf, 3, 0); if (r < 0) { fprintf(stderr, "Unable to read public key file: %s\n", sc_strerror(r)); return 2; } keysize = (p[0] << 8) | p[1]; if (keysize == 0) break; idx += keysize; for (i = 0; i < sizeof(sizes)/sizeof(sizes[ 0]); i++) if (sizes[i] == keysize) mod_len = mod_lens[i]; if (mod_len < 0) printf("Key %d -- unknown modulus length\n", p[2] & 0x0F); else printf("Key %d -- Modulus length %d\n", p[2] & 0x0F, mod_len); } while (1); return 0; } static int generate_key(void) { sc_apdu_t apdu; u8 sbuf[4]; u8 p2; int r; switch (opt_mod_length) { case 512: p2 = 0x40; break; case 768: p2 = 0x60; break; case 1024: p2 = 0x80; break; case 2048: p2 = 0x00; break; default: fprintf(stderr, "Invalid modulus length.\n"); return 2; } sc_format_apdu(card, &apdu, SC_APDU_CASE_3_SHORT, 0x46, (u8) opt_key_num-1, p2); apdu.cla = 0xF0; apdu.lc = 4; apdu.datalen = 4; apdu.data = sbuf; sbuf[0] = opt_exponent & 0xFF; sbuf[1] = (opt_exponent >> 8) & 0xFF; sbuf[2] = (opt_exponent >> 16) & 0xFF; sbuf[3] = (opt_exponent >> 24) & 0xFF; r = select_app_df(); if (r) return 1; if (verbose) printf("Generating key...\n"); r = sc_transmit_apdu(card, &apdu); if (r) { fprintf(stderr, "APDU transmit failed: %s\n", sc_strerror(r)); if (r == SC_ERROR_TRANSMIT_FAILED) fprintf(stderr, "Reader has timed out. It is still possible that the key generation has\n" "succeeded.\n"); return 1; } if (apdu.sw1 == 0x90 && apdu.sw2 == 0x00) { printf("Key generation successful.\n"); return 0; } if (apdu.sw1 == 0x69 && apdu.sw2 == 0x82) fprintf(stderr, "CHV1 not verified or invalid exponent value.\n"); else fprintf(stderr, "Card returned SW1=%02X, SW2=%02X.\n", apdu.sw1, apdu.sw2); return 1; } static int create_key_files(void) { sc_file_t *file; int mod_lens[] = { 512, 768, 1024, 2048 }; int sizes[] = { 163, 243, 323, 643 }; int size = -1; int r; size_t i; for (i = 0; i < sizeof(mod_lens) / sizeof(int); i++) if (mod_lens[i] == opt_mod_length) { size = sizes[i]; break; } if (size == -1) { fprintf(stderr, "Invalid modulus length.\n"); return 1; } if (verbose) printf("Creating key files for %d keys.\n", opt_key_count); file = sc_file_new(); if (!file) { fprintf(stderr, "out of memory.\n"); return 1; } file->type = SC_FILE_TYPE_WORKING_EF; file->ef_structure = SC_FILE_EF_TRANSPARENT; file->id = 0x0012; file->size = opt_key_count * size + 3; sc_file_add_acl_entry(file, SC_AC_OP_READ, SC_AC_NEVER, SC_AC_KEY_REF_NONE); sc_file_add_acl_entry(file, SC_AC_OP_UPDATE, SC_AC_CHV, 1); sc_file_add_acl_entry(file, SC_AC_OP_INVALIDATE, SC_AC_CHV, 1); sc_file_add_acl_entry(file, SC_AC_OP_REHABILITATE, SC_AC_CHV, 1); if (select_app_df()) { sc_file_free(file); return 1; } r = sc_create_file(card, file); sc_file_free(file); if (r) { fprintf(stderr, "Unable to create private key file: %s\n", sc_strerror(r)); return 1; } file = sc_file_new(); if (!file) { fprintf(stderr, "out of memory.\n"); return 1; } file->type = SC_FILE_TYPE_WORKING_EF; file->ef_structure = SC_FILE_EF_TRANSPARENT; file->id = 0x1012; file->size = opt_key_count * (size + 4) + 3; sc_file_add_acl_entry(file, SC_AC_OP_READ, SC_AC_NONE, SC_AC_KEY_REF_NONE); sc_file_add_acl_entry(file, SC_AC_OP_UPDATE, SC_AC_CHV, 1); sc_file_add_acl_entry(file, SC_AC_OP_INVALIDATE, SC_AC_CHV, 1); sc_file_add_acl_entry(file, SC_AC_OP_REHABILITATE, SC_AC_CHV, 1); if (select_app_df()) { sc_file_free(file); return 1; } r = sc_create_file(card, file); sc_file_free(file); if (r) { fprintf(stderr, "Unable to create public key file: %s\n", sc_strerror(r)); return 1; } if (verbose) printf("Key files generated successfully.\n"); return 0; } static int read_rsa_privkey(RSA **rsa_out) { RSA *rsa = NULL; BIO *in = NULL; int r; in = BIO_new(BIO_s_file()); if (opt_prkeyf == NULL) { fprintf(stderr, "Private key file must be set.\n"); return 2; } r = BIO_read_filename(in, opt_prkeyf); if (r <= 0) { perror(opt_prkeyf); return 2; } rsa = PEM_read_bio_RSAPrivateKey(in, NULL, NULL, NULL); if (rsa == NULL) { fprintf(stderr, "Unable to load private key.\n"); return 2; } BIO_free(in); *rsa_out = rsa; return 0; } static int encode_private_key(RSA *rsa, u8 *key, size_t *keysize) { u8 buf[1024], *p = buf; u8 bnbuf[256]; int base = 0; int r; const BIGNUM *rsa_p, *rsa_q, *rsa_dmp1, *rsa_dmq1, *rsa_iqmp; switch (RSA_bits(rsa)) { case 512: base = 32; break; case 768: base = 48; break; case 1024: base = 64; break; case 2048: base = 128; break; } if (base == 0) { fprintf(stderr, "Key length invalid.\n"); return 2; } *p++ = (5 * base + 3) >> 8; *p++ = (5 * base + 3) & 0xFF; *p++ = opt_key_num; RSA_get0_factors(rsa, &rsa_p, &rsa_q); r = bn2cf(rsa_p, bnbuf); if (r != base) { fprintf(stderr, "Invalid private key.\n"); return 2; } memcpy(p, bnbuf, base); p += base; r = bn2cf(rsa_q, bnbuf); if (r != base) { fprintf(stderr, "Invalid private key.\n"); return 2; } memcpy(p, bnbuf, base); p += base; RSA_get0_crt_params(rsa, &rsa_dmp1, &rsa_dmq1, &rsa_iqmp); r = bn2cf(rsa_iqmp, bnbuf); if (r != base) { fprintf(stderr, "Invalid private key.\n"); return 2; } memcpy(p, bnbuf, base); p += base; r = bn2cf(rsa_dmp1, bnbuf); if (r != base) { fprintf(stderr, "Invalid private key.\n"); return 2; } memcpy(p, bnbuf, base); p += base; r = bn2cf(rsa_dmq1, bnbuf); if (r != base) { fprintf(stderr, "Invalid private key.\n"); return 2; } memcpy(p, bnbuf, base); p += base; memcpy(key, buf, p - buf); *keysize = p - buf; return 0; } static int encode_public_key(RSA *rsa, u8 *key, size_t *keysize) { u8 buf[1024], *p = buf; u8 bnbuf[256]; int base = 0; int r; const BIGNUM *rsa_n, *rsa_e; switch (RSA_bits(rsa)) { case 512: base = 32; break; case 768: base = 48; break; case 1024: base = 64; break; case 2048: base = 128; break; } if (base == 0) { fprintf(stderr, "Key length invalid.\n"); return 2; } *p++ = (5 * base + 7) >> 8; *p++ = (5 * base + 7) & 0xFF; *p++ = opt_key_num; RSA_get0_key(rsa, &rsa_n, &rsa_e, NULL); r = bn2cf(rsa_n, bnbuf); if (r != 2*base) { fprintf(stderr, "Invalid public key.\n"); return 2; } memcpy(p, bnbuf, 2*base); p += 2*base; memset(p, 0, base); p += base; memset(bnbuf, 0, 2*base); memcpy(p, bnbuf, 2*base); p += 2*base; r = bn2cf(rsa_e, bnbuf); memcpy(p, bnbuf, 4); p += 4; memcpy(key, buf, p - buf); *keysize = p - buf; return 0; } static int update_public_key(const u8 *key, size_t keysize) { int r, idx = 0; sc_path_t path; r = select_app_df(); if (r) return 1; sc_format_path("I1012", &path); r = sc_select_file(card, &path, NULL); if (r) { fprintf(stderr, "Unable to select public key file: %s\n", sc_strerror(r)); return 2; } idx = keysize * (opt_key_num-1); r = sc_update_binary(card, idx, key, keysize, 0); if (r < 0) { fprintf(stderr, "Unable to write public key: %s\n", sc_strerror(r)); return 2; } return 0; } static int update_private_key(const u8 *key, size_t keysize) { int r, idx = 0; sc_path_t path; r = select_app_df(); if (r) return 1; sc_format_path("I0012", &path); r = sc_select_file(card, &path, NULL); if (r) { fprintf(stderr, "Unable to select private key file: %s\n", sc_strerror(r)); return 2; } idx = keysize * (opt_key_num-1); r = sc_update_binary(card, idx, key, keysize, 0); if (r < 0) { fprintf(stderr, "Unable to write private key: %s\n", sc_strerror(r)); return 2; } return 0; } static int store_key(void) { u8 prv[1024], pub[1024]; size_t prvsize, pubsize; int r; RSA *rsa; r = read_rsa_privkey(&rsa); if (r) return r; r = encode_private_key(rsa, prv, &prvsize); if (r) return r; r = encode_public_key(rsa, pub, &pubsize); if (r) return r; if (verbose) printf("Storing private key...\n"); r = select_app_df(); if (r) return r; r = update_private_key(prv, prvsize); if (r) return r; if (verbose) printf("Storing public key...\n"); r = select_app_df(); if (r) return r; r = update_public_key(pub, pubsize); if (r) return r; return 0; } static int create_pin_file(const sc_path_t *inpath, int chv, const char *key_id) { char prompt[40], *pin, *puk; char buf[30], *p = buf; sc_path_t file_id, path; sc_file_t *file; size_t len; int r; file_id = *inpath; if (file_id.len < 2) return -1; if (chv == 1) sc_format_path("I0000", &file_id); else if (chv == 2) sc_format_path("I0100", &file_id); else return -1; r = sc_select_file(card, inpath, NULL); if (r) return -1; r = sc_select_file(card, &file_id, NULL); if (r == 0) return 0; sprintf(prompt, "Please enter CHV%d%s: ", chv, key_id); pin = getpin(prompt); if (pin == NULL) return -1; sprintf(prompt, "Please enter PUK for CHV%d%s: ", chv, key_id); puk = getpin(prompt); if (puk == NULL) { free(pin); return -1; } memset(p, 0xFF, 3); p += 3; memcpy(p, pin, 8); p += 8; *p++ = opt_pin_attempts; *p++ = opt_pin_attempts; memcpy(p, puk, 8); p += 8; *p++ = opt_puk_attempts; *p++ = opt_puk_attempts; len = p - buf; free(pin); free(puk); file = sc_file_new(); file->type = SC_FILE_TYPE_WORKING_EF; file->ef_structure = SC_FILE_EF_TRANSPARENT; sc_file_add_acl_entry(file, SC_AC_OP_READ, SC_AC_NEVER, SC_AC_KEY_REF_NONE); if (inpath->len == 2 && inpath->value[0] == 0x3F && inpath->value[1] == 0x00) sc_file_add_acl_entry(file, SC_AC_OP_UPDATE, SC_AC_AUT, 1); else sc_file_add_acl_entry(file, SC_AC_OP_UPDATE, SC_AC_CHV, 2); sc_file_add_acl_entry(file, SC_AC_OP_INVALIDATE, SC_AC_AUT, 1); sc_file_add_acl_entry(file, SC_AC_OP_REHABILITATE, SC_AC_AUT, 1); file->size = len; file->id = (file_id.value[0] << 8) | file_id.value[1]; r = sc_create_file(card, file); sc_file_free(file); if (r) { fprintf(stderr, "PIN file creation failed: %s\n", sc_strerror(r)); return r; } path = *inpath; sc_append_path(&path, &file_id); r = sc_select_file(card, &path, NULL); if (r) { fprintf(stderr, "Unable to select created PIN file: %s\n", sc_strerror(r)); return r; } r = sc_update_binary(card, 0, (const u8 *) buf, len, 0); if (r < 0) { fprintf(stderr, "Unable to update created PIN file: %s\n", sc_strerror(r)); return r; } return 0; } static int create_pin(void) { sc_path_t path; char buf[80]; if (opt_pin_num != 1 && opt_pin_num != 2) { fprintf(stderr, "Invalid PIN number. Possible values: 1, 2.\n"); return 2; } strcpy(buf, "3F00"); if (opt_appdf != NULL) strlcat(buf, opt_appdf, sizeof buf); sc_format_path(buf, &path); return create_pin_file(&path, opt_pin_num, ""); } int main(int argc, char *argv[]) { int err = 0, r, c, long_optind = 0; int action_count = 0; int do_read_key = 0; int do_generate_key = 0; int do_create_key_files = 0; int do_list_keys = 0; int do_store_key = 0; int do_create_pin_file = 0; sc_context_param_t ctx_param; while (1) { c = getopt_long(argc, argv, "P:Vslgc:Rk:r:p:u:e:m:vwa:", options, &long_optind); if (c == -1) break; if (c == '?') util_print_usage_and_die(app_name, options, option_help, NULL); switch (c) { case 'l': do_list_keys = 1; action_count++; break; case 'P': do_create_pin_file = 1; opt_pin_num = atoi(optarg); action_count++; break; case 'R': do_read_key = 1; action_count++; break; case 'g': do_generate_key = 1; action_count++; break; case 'c': do_create_key_files = 1; opt_key_count = atoi(optarg); action_count++; break; case 's': do_store_key = 1; action_count++; break; case 'k': opt_key_num = atoi(optarg); if (opt_key_num < 1 || opt_key_num > 15) { fprintf(stderr, "Key number invalid.\n"); exit(2); } break; case 'V': opt_pin_num = 1; break; case 'e': opt_exponent = atoi(optarg); break; case 'm': opt_mod_length = atoi(optarg); break; case 'p': opt_prkeyf = optarg; break; case 'u': opt_pubkeyf = optarg; break; case 'r': opt_reader = optarg; break; case 'v': verbose++; break; case 'w': opt_wait = 1; break; case 'a': opt_appdf = optarg; break; } } if (action_count == 0) util_print_usage_and_die(app_name, options, option_help, NULL); memset(&ctx_param, 0, sizeof(ctx_param)); ctx_param.ver = 0; ctx_param.app_name = app_name; r = sc_context_create(&ctx, &ctx_param); if (r) { fprintf(stderr, "Failed to establish context: %s\n", sc_strerror(r)); return 1; } if (verbose > 1) { ctx->debug = verbose; sc_ctx_log_to_file(ctx, "stderr"); } err = util_connect_card(ctx, &card, opt_reader, opt_wait, verbose); printf("Using card driver: %s\n", card->driver->name); if (do_create_pin_file) { if ((err = create_pin()) != 0) goto end; action_count--; } if (do_create_key_files) { if ((err = create_key_files()) != 0) goto end; action_count--; } if (do_generate_key) { if ((err = generate_key()) != 0) goto end; action_count--; } if (do_store_key) { if ((err = store_key()) != 0) goto end; action_count--; } if (do_list_keys) { if ((err = list_keys()) != 0) goto end; action_count--; } if (do_read_key) { if ((err = read_key()) != 0) goto end; action_count--; } if (pincode != NULL) { memset(pincode, 0, 8); free(pincode); } end: if (card) { sc_unlock(card); sc_disconnect_card(card); } if (ctx) sc_release_context(ctx); return err; }
static int read_public_key(RSA *rsa) { int r; sc_path_t path; sc_file_t *file; u8 buf[2048], *p = buf; size_t bufsize, keysize; r = select_app_df(); if (r) return 1; sc_format_path("I1012", &path); r = sc_select_file(card, &path, &file); if (r) { fprintf(stderr, "Unable to select public key file: %s\n", sc_strerror(r)); return 2; } bufsize = file->size; sc_file_free(file); r = sc_read_binary(card, 0, buf, bufsize, 0); if (r < 0) { fprintf(stderr, "Unable to read public key file: %s\n", sc_strerror(r)); return 2; } bufsize = r; do { if (bufsize < 4) return 3; keysize = (p[0] << 8) | p[1]; if (keysize == 0) break; if (keysize < 3) return 3; if (p[2] == opt_key_num) break; p += keysize; bufsize -= keysize; } while (1); if (keysize == 0) { printf("Key number %d not found.\n", opt_key_num); return 2; } return parse_public_key(p, keysize, rsa); }
static int read_public_key(RSA *rsa) { int r; sc_path_t path; sc_file_t *file; u8 buf[2048], *p = buf; size_t bufsize, keysize; r = select_app_df(); if (r) return 1; sc_format_path("I1012", &path); r = sc_select_file(card, &path, &file); if (r) { fprintf(stderr, "Unable to select public key file: %s\n", sc_strerror(r)); return 2; } bufsize = MIN(file->size, sizeof buf); sc_file_free(file); r = sc_read_binary(card, 0, buf, bufsize, 0); if (r < 0) { fprintf(stderr, "Unable to read public key file: %s\n", sc_strerror(r)); return 2; } bufsize = r; do { if (bufsize < 4) return 3; keysize = (p[0] << 8) | p[1]; if (keysize == 0) break; if (keysize < 3) return 3; if (p[2] == opt_key_num) break; p += keysize; bufsize -= keysize; } while (1); if (keysize == 0) { printf("Key number %d not found.\n", opt_key_num); return 2; } return parse_public_key(p, keysize, rsa); }
{'added': [(24, '#include "libopensc/internal.h"'), (335, '\tbufsize = MIN(file->size, sizeof buf);'), (386, '\tbufsize = MIN(file->size, sizeof buf);')], 'deleted': [(334, '\tbufsize = file->size;'), (385, '\tbufsize = file->size;')]}
3
2
996
5,901
https://github.com/OpenSC/OpenSC
CVE-2018-16391
['CWE-119']
nedmalloc.c
nedalloc::strdup
/* Alternative malloc implementation for multiple threads without lock contention based on dlmalloc. (C) 2005-2006 Niall Douglas Boost Software License - Version 1.0 - August 17th, 2003 Permission is hereby granted, free of charge, to any person or organization obtaining a copy of the software and accompanying documentation covered by this license (the "Software") to use, reproduce, display, distribute, execute, and transmit the Software, and to prepare derivative works of the Software, and to permit third-parties to whom the Software is furnished to do so, all subject to the following: The copyright notices in the Software and this entire statement, including the above license grant, this restriction and the following disclaimer, must be included in all copies of the Software, in whole or in part, and all derivative works of the Software, unless such copies or derivative works are solely in the form of machine-executable object code generated by a source language processor. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #ifdef _MSC_VER /* Enable full aliasing on MSVC */ /*#pragma optimize("a", on)*/ #endif /*#define FULLSANITYCHECKS*/ #include "nedmalloc.h" #if defined(WIN32) #include <malloc.h> #endif #define MSPACES 1 #define ONLY_MSPACES 1 #ifndef USE_LOCKS #define USE_LOCKS 1 #endif #define FOOTERS 1 /* Need to enable footers so frees lock the right mspace */ #undef DEBUG /* dlmalloc wants DEBUG either 0 or 1 */ #ifdef _DEBUG #define DEBUG 1 #else #define DEBUG 0 #endif #ifdef NDEBUG /* Disable assert checking on release builds */ #undef DEBUG #endif /* The default of 64Kb means we spend too much time kernel-side */ #ifndef DEFAULT_GRANULARITY #define DEFAULT_GRANULARITY (1*1024*1024) #endif /*#define USE_SPIN_LOCKS 0*/ /*#define FORCEINLINE*/ #include "malloc.c.h" #ifdef NDEBUG /* Disable assert checking on release builds */ #undef DEBUG #endif /* The maximum concurrent threads in a pool possible */ #ifndef MAXTHREADSINPOOL #define MAXTHREADSINPOOL 16 #endif /* The maximum number of threadcaches which can be allocated */ #ifndef THREADCACHEMAXCACHES #define THREADCACHEMAXCACHES 256 #endif /* The maximum size to be allocated from the thread cache */ #ifndef THREADCACHEMAX #define THREADCACHEMAX 8192 #endif #if 0 /* The number of cache entries for finer grained bins. This is (topbitpos(THREADCACHEMAX)-4)*2 */ #define THREADCACHEMAXBINS ((13-4)*2) #else /* The number of cache entries. This is (topbitpos(THREADCACHEMAX)-4) */ #define THREADCACHEMAXBINS (13-4) #endif /* Point at which the free space in a thread cache is garbage collected */ #ifndef THREADCACHEMAXFREESPACE #define THREADCACHEMAXFREESPACE (512*1024) #endif #ifdef WIN32 #define TLSVAR DWORD #define TLSALLOC(k) (*(k)=TlsAlloc(), TLS_OUT_OF_INDEXES==*(k)) #define TLSFREE(k) (!TlsFree(k)) #define TLSGET(k) TlsGetValue(k) #define TLSSET(k, a) (!TlsSetValue(k, a)) #ifdef DEBUG static LPVOID ChkedTlsGetValue(DWORD idx) { LPVOID ret=TlsGetValue(idx); assert(S_OK==GetLastError()); return ret; } #undef TLSGET #define TLSGET(k) ChkedTlsGetValue(k) #endif #else #define TLSVAR pthread_key_t #define TLSALLOC(k) pthread_key_create(k, 0) #define TLSFREE(k) pthread_key_delete(k) #define TLSGET(k) pthread_getspecific(k) #define TLSSET(k, a) pthread_setspecific(k, a) #endif #if 0 /* Only enable if testing with valgrind. Causes misoperation */ #define mspace_malloc(p, s) malloc(s) #define mspace_realloc(p, m, s) realloc(m, s) #define mspace_calloc(p, n, s) calloc(n, s) #define mspace_free(p, m) free(m) #endif #if defined(__cplusplus) #if !defined(NO_NED_NAMESPACE) namespace nedalloc { #else extern "C" { #endif #endif size_t nedblksize(void *mem) THROWSPEC { #if 0 /* Only enable if testing with valgrind. Causes misoperation */ return THREADCACHEMAX; #else if(mem) { mchunkptr p=mem2chunk(mem); assert(cinuse(p)); /* If this fails, someone tried to free a block twice */ if(cinuse(p)) return chunksize(p)-overhead_for(p); } return 0; #endif } void nedsetvalue(void *v) THROWSPEC { nedpsetvalue(0, v); } void * nedmalloc(size_t size) THROWSPEC { return nedpmalloc(0, size); } void * nedcalloc(size_t no, size_t size) THROWSPEC { return nedpcalloc(0, no, size); } void * nedrealloc(void *mem, size_t size) THROWSPEC { return nedprealloc(0, mem, size); } void nedfree(void *mem) THROWSPEC { nedpfree(0, mem); } void * nedmemalign(size_t alignment, size_t bytes) THROWSPEC { return nedpmemalign(0, alignment, bytes); } #if !NO_MALLINFO struct mallinfo nedmallinfo(void) THROWSPEC { return nedpmallinfo(0); } #endif int nedmallopt(int parno, int value) THROWSPEC { return nedpmallopt(0, parno, value); } int nedmalloc_trim(size_t pad) THROWSPEC { return nedpmalloc_trim(0, pad); } void nedmalloc_stats(void) THROWSPEC { nedpmalloc_stats(0); } size_t nedmalloc_footprint(void) THROWSPEC { return nedpmalloc_footprint(0); } void **nedindependent_calloc(size_t elemsno, size_t elemsize, void **chunks) THROWSPEC { return nedpindependent_calloc(0, elemsno, elemsize, chunks); } void **nedindependent_comalloc(size_t elems, size_t *sizes, void **chunks) THROWSPEC { return nedpindependent_comalloc(0, elems, sizes, chunks); } struct threadcacheblk_t; typedef struct threadcacheblk_t threadcacheblk; struct threadcacheblk_t { /* Keep less than 16 bytes on 32 bit systems and 32 bytes on 64 bit systems */ #ifdef FULLSANITYCHECKS unsigned int magic; #endif unsigned int lastUsed, size; threadcacheblk *next, *prev; }; typedef struct threadcache_t { #ifdef FULLSANITYCHECKS unsigned int magic1; #endif int mymspace; /* Last mspace entry this thread used */ long threadid; unsigned int mallocs, frees, successes; size_t freeInCache; /* How much free space is stored in this cache */ threadcacheblk *bins[(THREADCACHEMAXBINS+1)*2]; #ifdef FULLSANITYCHECKS unsigned int magic2; #endif } threadcache; struct nedpool_t { MLOCK_T mutex; void *uservalue; int threads; /* Max entries in m to use */ threadcache *caches[THREADCACHEMAXCACHES]; TLSVAR mycache; /* Thread cache for this thread. 0 for unset, negative for use mspace-1 directly, otherwise is cache-1 */ mstate m[MAXTHREADSINPOOL+1]; /* mspace entries for this pool */ }; static nedpool syspool; static FORCEINLINE unsigned int size2binidx(size_t _size) THROWSPEC { /* 8=1000 16=10000 20=10100 24=11000 32=100000 48=110000 4096=1000000000000 */ unsigned int topbit, size=(unsigned int)(_size>>4); /* 16=1 20=1 24=1 32=10 48=11 64=100 96=110 128=1000 4096=100000000 */ #if defined(__GNUC__) topbit = sizeof(size)*__CHAR_BIT__ - 1 - __builtin_clz(size); #elif defined(_MSC_VER) && _MSC_VER>=1300 { unsigned long bsrTopBit; _BitScanReverse(&bsrTopBit, size); topbit = bsrTopBit; } #else #if 0 union { unsigned asInt[2]; double asDouble; }; int n; asDouble = (double)size + 0.5; topbit = (asInt[!FOX_BIGENDIAN] >> 20) - 1023; #else { unsigned int x=size; x = x | (x >> 1); x = x | (x >> 2); x = x | (x >> 4); x = x | (x >> 8); x = x | (x >>16); x = ~x; x = x - ((x >> 1) & 0x55555555); x = (x & 0x33333333) + ((x >> 2) & 0x33333333); x = (x + (x >> 4)) & 0x0F0F0F0F; x = x + (x << 8); x = x + (x << 16); topbit=31 - (x >> 24); } #endif #endif return topbit; } #ifdef FULLSANITYCHECKS static void tcsanitycheck(threadcacheblk **ptr) THROWSPEC { assert((ptr[0] && ptr[1]) || (!ptr[0] && !ptr[1])); if(ptr[0] && ptr[1]) { assert(nedblksize(ptr[0])>=sizeof(threadcacheblk)); assert(nedblksize(ptr[1])>=sizeof(threadcacheblk)); assert(*(unsigned int *) "NEDN"==ptr[0]->magic); assert(*(unsigned int *) "NEDN"==ptr[1]->magic); assert(!ptr[0]->prev); assert(!ptr[1]->next); if(ptr[0]==ptr[1]) { assert(!ptr[0]->next); assert(!ptr[1]->prev); } } } static void tcfullsanitycheck(threadcache *tc) THROWSPEC { threadcacheblk **tcbptr=tc->bins; int n; for(n=0; n<=THREADCACHEMAXBINS; n++, tcbptr+=2) { threadcacheblk *b, *ob=0; tcsanitycheck(tcbptr); for(b=tcbptr[0]; b; ob=b, b=b->next) { assert(*(unsigned int *) "NEDN"==b->magic); assert(!ob || ob->next==b); assert(!ob || b->prev==ob); } } } #endif static NOINLINE void RemoveCacheEntries(nedpool *p, threadcache *tc, unsigned int age) THROWSPEC { #ifdef FULLSANITYCHECKS tcfullsanitycheck(tc); #endif if(tc->freeInCache) { threadcacheblk **tcbptr=tc->bins; int n; for(n=0; n<=THREADCACHEMAXBINS; n++, tcbptr+=2) { threadcacheblk **tcb=tcbptr+1; /* come from oldest end of list */ /*tcsanitycheck(tcbptr);*/ for(; *tcb && tc->frees-(*tcb)->lastUsed>=age; ) { threadcacheblk *f=*tcb; size_t blksize=f->size; /*nedblksize(f);*/ assert(blksize<=nedblksize(f)); assert(blksize); #ifdef FULLSANITYCHECKS assert(*(unsigned int *) "NEDN"==(*tcb)->magic); #endif *tcb=(*tcb)->prev; if(*tcb) (*tcb)->next=0; else *tcbptr=0; tc->freeInCache-=blksize; assert((long) tc->freeInCache>=0); mspace_free(0, f); /*tcsanitycheck(tcbptr);*/ } } } #ifdef FULLSANITYCHECKS tcfullsanitycheck(tc); #endif } static void DestroyCaches(nedpool *p) THROWSPEC { if(p->caches) { threadcache *tc; int n; for(n=0; n<THREADCACHEMAXCACHES; n++) { if((tc=p->caches[n])) { tc->frees++; RemoveCacheEntries(p, tc, 0); assert(!tc->freeInCache); tc->mymspace=-1; tc->threadid=0; mspace_free(0, tc); p->caches[n]=0; } } } } static NOINLINE threadcache *AllocCache(nedpool *p) THROWSPEC { threadcache *tc=0; int n, end; ACQUIRE_LOCK(&p->mutex); for(n=0; n<THREADCACHEMAXCACHES && p->caches[n]; n++); if(THREADCACHEMAXCACHES==n) { /* List exhausted, so disable for this thread */ RELEASE_LOCK(&p->mutex); return 0; } tc=p->caches[n]=(threadcache *) mspace_calloc(p->m[0], 1, sizeof(threadcache)); if(!tc) { RELEASE_LOCK(&p->mutex); return 0; } #ifdef FULLSANITYCHECKS tc->magic1=*(unsigned int *)"NEDMALC1"; tc->magic2=*(unsigned int *)"NEDMALC2"; #endif tc->threadid=(long)(size_t)CURRENT_THREAD; for(end=0; p->m[end]; end++); tc->mymspace=tc->threadid % end; RELEASE_LOCK(&p->mutex); if(TLSSET(p->mycache, (void *)(size_t)(n+1))) abort(); return tc; } static void *threadcache_malloc(nedpool *p, threadcache *tc, size_t *size) THROWSPEC { void *ret=0; unsigned int bestsize; unsigned int idx=size2binidx(*size); size_t blksize=0; threadcacheblk *blk, **binsptr; #ifdef FULLSANITYCHECKS tcfullsanitycheck(tc); #endif /* Calculate best fit bin size */ bestsize=1<<(idx+4); #if 0 /* Finer grained bin fit */ idx<<=1; if(*size>bestsize) { idx++; bestsize+=bestsize>>1; } if(*size>bestsize) { idx++; bestsize=1<<(4+(idx>>1)); } #else if(*size>bestsize) { idx++; bestsize<<=1; } #endif assert(bestsize>=*size); if(*size<bestsize) *size=bestsize; assert(*size<=THREADCACHEMAX); assert(idx<=THREADCACHEMAXBINS); binsptr=&tc->bins[idx*2]; /* Try to match close, but move up a bin if necessary */ blk=*binsptr; if(!blk || blk->size<*size) { /* Bump it up a bin */ if(idx<THREADCACHEMAXBINS) { idx++; binsptr+=2; blk=*binsptr; } } if(blk) { blksize=blk->size; /*nedblksize(blk);*/ assert(nedblksize(blk)>=blksize); assert(blksize>=*size); if(blk->next) blk->next->prev=0; *binsptr=blk->next; if(!*binsptr) binsptr[1]=0; #ifdef FULLSANITYCHECKS blk->magic=0; #endif assert(binsptr[0]!=blk && binsptr[1]!=blk); assert(nedblksize(blk)>=sizeof(threadcacheblk) && nedblksize(blk)<=THREADCACHEMAX+CHUNK_OVERHEAD); /*printf("malloc: %p, %p, %p, %lu\n", p, tc, blk, (long) size);*/ ret=(void *) blk; } ++tc->mallocs; if(ret) { assert(blksize>=*size); ++tc->successes; tc->freeInCache-=blksize; assert((long) tc->freeInCache>=0); } #if defined(DEBUG) && 0 if(!(tc->mallocs & 0xfff)) { printf("*** threadcache=%u, mallocs=%u (%f), free=%u (%f), freeInCache=%u\n", (unsigned int) tc->threadid, tc->mallocs, (float) tc->successes/tc->mallocs, tc->frees, (float) tc->successes/tc->frees, (unsigned int) tc->freeInCache); } #endif #ifdef FULLSANITYCHECKS tcfullsanitycheck(tc); #endif return ret; } static NOINLINE void ReleaseFreeInCache(nedpool *p, threadcache *tc, int mymspace) THROWSPEC { unsigned int age=THREADCACHEMAXFREESPACE/8192; /*ACQUIRE_LOCK(&p->m[mymspace]->mutex);*/ while(age && tc->freeInCache>=THREADCACHEMAXFREESPACE) { RemoveCacheEntries(p, tc, age); /*printf("*** Removing cache entries older than %u (%u)\n", age, (unsigned int) tc->freeInCache);*/ age>>=1; } /*RELEASE_LOCK(&p->m[mymspace]->mutex);*/ } static void threadcache_free(nedpool *p, threadcache *tc, int mymspace, void *mem, size_t size) THROWSPEC { unsigned int bestsize; unsigned int idx=size2binidx(size); threadcacheblk **binsptr, *tck=(threadcacheblk *) mem; assert(size>=sizeof(threadcacheblk) && size<=THREADCACHEMAX+CHUNK_OVERHEAD); #ifdef DEBUG { /* Make sure this is a valid memory block */ mchunkptr p = mem2chunk(mem); mstate fm = get_mstate_for(p); if (!ok_magic(fm)) { USAGE_ERROR_ACTION(fm, p); return; } } #endif #ifdef FULLSANITYCHECKS tcfullsanitycheck(tc); #endif /* Calculate best fit bin size */ bestsize=1<<(idx+4); #if 0 /* Finer grained bin fit */ idx<<=1; if(size>bestsize) { unsigned int biggerbestsize=bestsize+bestsize<<1; if(size>=biggerbestsize) { idx++; bestsize=biggerbestsize; } } #endif if(bestsize!=size) /* dlmalloc can round up, so we round down to preserve indexing */ size=bestsize; binsptr=&tc->bins[idx*2]; assert(idx<=THREADCACHEMAXBINS); if(tck==*binsptr) { fprintf(stderr, "Attempt to free already freed memory block %p - aborting!\n", tck); abort(); } #ifdef FULLSANITYCHECKS tck->magic=*(unsigned int *) "NEDN"; #endif tck->lastUsed=++tc->frees; tck->size=(unsigned int) size; tck->next=*binsptr; tck->prev=0; if(tck->next) tck->next->prev=tck; else binsptr[1]=tck; assert(!*binsptr || (*binsptr)->size==tck->size); *binsptr=tck; assert(tck==tc->bins[idx*2]); assert(tc->bins[idx*2+1]==tck || binsptr[0]->next->prev==tck); /*printf("free: %p, %p, %p, %lu\n", p, tc, mem, (long) size);*/ tc->freeInCache+=size; #ifdef FULLSANITYCHECKS tcfullsanitycheck(tc); #endif #if 1 if(tc->freeInCache>=THREADCACHEMAXFREESPACE) ReleaseFreeInCache(p, tc, mymspace); #endif } static NOINLINE int InitPool(nedpool *p, size_t capacity, int threads) THROWSPEC { /* threads is -1 for system pool */ ensure_initialization(); ACQUIRE_MALLOC_GLOBAL_LOCK(); if(p->threads) goto done; if(INITIAL_LOCK(&p->mutex)) goto err; if(TLSALLOC(&p->mycache)) goto err; if(!(p->m[0]=(mstate) create_mspace(capacity, 1))) goto err; p->m[0]->extp=p; p->threads=(threads<1 || threads>MAXTHREADSINPOOL) ? MAXTHREADSINPOOL : threads; done: RELEASE_MALLOC_GLOBAL_LOCK(); return 1; err: if(threads<0) abort(); /* If you can't allocate for system pool, we're screwed */ DestroyCaches(p); if(p->m[0]) { destroy_mspace(p->m[0]); p->m[0]=0; } if(p->mycache) { if(TLSFREE(p->mycache)) abort(); p->mycache=0; } RELEASE_MALLOC_GLOBAL_LOCK(); return 0; } static NOINLINE mstate FindMSpace(nedpool *p, threadcache *tc, int *lastUsed, size_t size) THROWSPEC { /* Gets called when thread's last used mspace is in use. The strategy is to run through the list of all available mspaces looking for an unlocked one and if we fail, we create a new one so long as we don't exceed p->threads */ int n, end; for(n=end=*lastUsed+1; p->m[n]; end=++n) { if(TRY_LOCK(&p->m[n]->mutex)) goto found; } for(n=0; n<*lastUsed && p->m[n]; n++) { if(TRY_LOCK(&p->m[n]->mutex)) goto found; } if(end<p->threads) { mstate temp; if(!(temp=(mstate) create_mspace(size, 1))) goto badexit; /* Now we're ready to modify the lists, we lock */ ACQUIRE_LOCK(&p->mutex); while(p->m[end] && end<p->threads) end++; if(end>=p->threads) { /* Drat, must destroy it now */ RELEASE_LOCK(&p->mutex); destroy_mspace((mspace) temp); goto badexit; } /* We really want to make sure this goes into memory now but we have to be careful of breaking aliasing rules, so write it twice */ { volatile struct malloc_state **_m=(volatile struct malloc_state **) &p->m[end]; *_m=(p->m[end]=temp); } ACQUIRE_LOCK(&p->m[end]->mutex); /*printf("Created mspace idx %d\n", end);*/ RELEASE_LOCK(&p->mutex); n=end; goto found; } /* Let it lock on the last one it used */ badexit: ACQUIRE_LOCK(&p->m[*lastUsed]->mutex); return p->m[*lastUsed]; found: *lastUsed=n; if(tc) tc->mymspace=n; else { if(TLSSET(p->mycache, (void *)(size_t)(-(n+1)))) abort(); } return p->m[n]; } nedpool *nedcreatepool(size_t capacity, int threads) THROWSPEC { nedpool *ret; if(!(ret=(nedpool *) nedpcalloc(0, 1, sizeof(nedpool)))) return 0; if(!InitPool(ret, capacity, threads)) { nedpfree(0, ret); return 0; } return ret; } void neddestroypool(nedpool *p) THROWSPEC { int n; ACQUIRE_LOCK(&p->mutex); DestroyCaches(p); for(n=0; p->m[n]; n++) { destroy_mspace(p->m[n]); p->m[n]=0; } RELEASE_LOCK(&p->mutex); if(TLSFREE(p->mycache)) abort(); nedpfree(0, p); } void nedpsetvalue(nedpool *p, void *v) THROWSPEC { if(!p) { p=&syspool; if(!syspool.threads) InitPool(&syspool, 0, -1); } p->uservalue=v; } void *nedgetvalue(nedpool **p, void *mem) THROWSPEC { nedpool *np=0; mchunkptr mcp=mem2chunk(mem); mstate fm; if(!(is_aligned(chunk2mem(mcp))) && mcp->head != FENCEPOST_HEAD) return 0; if(!cinuse(mcp)) return 0; if(!next_pinuse(mcp)) return 0; if(!is_mmapped(mcp) && !pinuse(mcp)) { if(next_chunk(prev_chunk(mcp))!=mcp) return 0; } fm=get_mstate_for(mcp); if(!ok_magic(fm)) return 0; if(!ok_address(fm, mcp)) return 0; if(!fm->extp) return 0; np=(nedpool *) fm->extp; if(p) *p=np; return np->uservalue; } void neddisablethreadcache(nedpool *p) THROWSPEC { int mycache; if(!p) { p=&syspool; if(!syspool.threads) InitPool(&syspool, 0, -1); } mycache=(int)(size_t) TLSGET(p->mycache); if(!mycache) { /* Set to mspace 0 */ if(TLSSET(p->mycache, (void *)-1)) abort(); } else if(mycache>0) { /* Set to last used mspace */ threadcache *tc=p->caches[mycache-1]; #if defined(DEBUG) printf("Threadcache utilisation: %lf%% in cache with %lf%% lost to other threads\n", 100.0*tc->successes/tc->mallocs, 100.0*((double) tc->mallocs-tc->frees)/tc->mallocs); #endif if(TLSSET(p->mycache, (void *)(size_t)(-tc->mymspace))) abort(); tc->frees++; RemoveCacheEntries(p, tc, 0); assert(!tc->freeInCache); tc->mymspace=-1; tc->threadid=0; mspace_free(0, p->caches[mycache-1]); p->caches[mycache-1]=0; } } #define GETMSPACE(m,p,tc,ms,s,action) \ do \ { \ mstate m = GetMSpace((p),(tc),(ms),(s)); \ action; \ RELEASE_LOCK(&m->mutex); \ } while (0) static FORCEINLINE mstate GetMSpace(nedpool *p, threadcache *tc, int mymspace, size_t size) THROWSPEC { /* Returns a locked and ready for use mspace */ mstate m=p->m[mymspace]; assert(m); if(!TRY_LOCK(&p->m[mymspace]->mutex)) m=FindMSpace(p, tc, &mymspace, size);\ /*assert(IS_LOCKED(&p->m[mymspace]->mutex));*/ return m; } static FORCEINLINE void GetThreadCache(nedpool **p, threadcache **tc, int *mymspace, size_t *size) THROWSPEC { int mycache; if(size && *size<sizeof(threadcacheblk)) *size=sizeof(threadcacheblk); if(!*p) { *p=&syspool; if(!syspool.threads) InitPool(&syspool, 0, -1); } mycache=(int)(size_t) TLSGET((*p)->mycache); if(mycache>0) { *tc=(*p)->caches[mycache-1]; *mymspace=(*tc)->mymspace; } else if(!mycache) { *tc=AllocCache(*p); if(!*tc) { /* Disable */ if(TLSSET((*p)->mycache, (void *)-1)) abort(); *mymspace=0; } else *mymspace=(*tc)->mymspace; } else { *tc=0; *mymspace=-mycache-1; } assert(*mymspace>=0); assert((long)(size_t)CURRENT_THREAD==(*tc)->threadid); #ifdef FULLSANITYCHECKS if(*tc) { if(*(unsigned int *)"NEDMALC1"!=(*tc)->magic1 || *(unsigned int *)"NEDMALC2"!=(*tc)->magic2) { abort(); } } #endif } void * nedpmalloc(nedpool *p, size_t size) THROWSPEC { void *ret=0; threadcache *tc; int mymspace; GetThreadCache(&p, &tc, &mymspace, &size); #if THREADCACHEMAX if(tc && size<=THREADCACHEMAX) { /* Use the thread cache */ ret=threadcache_malloc(p, tc, &size); } #endif if(!ret) { /* Use this thread's mspace */ GETMSPACE(m, p, tc, mymspace, size, ret=mspace_malloc(m, size)); } return ret; } void * nedpcalloc(nedpool *p, size_t no, size_t size) THROWSPEC { size_t rsize=size*no; void *ret=0; threadcache *tc; int mymspace; GetThreadCache(&p, &tc, &mymspace, &rsize); #if THREADCACHEMAX if(tc && rsize<=THREADCACHEMAX) { /* Use the thread cache */ if((ret=threadcache_malloc(p, tc, &rsize))) memset(ret, 0, rsize); } #endif if(!ret) { /* Use this thread's mspace */ GETMSPACE(m, p, tc, mymspace, rsize, ret=mspace_calloc(m, 1, rsize)); } return ret; } void * nedprealloc(nedpool *p, void *mem, size_t size) THROWSPEC { void *ret=0; threadcache *tc; int mymspace; if(!mem) return nedpmalloc(p, size); GetThreadCache(&p, &tc, &mymspace, &size); #if THREADCACHEMAX if(tc && size && size<=THREADCACHEMAX) { /* Use the thread cache */ size_t memsize=nedblksize(mem); assert(memsize); if((ret=threadcache_malloc(p, tc, &size))) { memcpy(ret, mem, memsize<size ? memsize : size); if(memsize<=THREADCACHEMAX) threadcache_free(p, tc, mymspace, mem, memsize); else mspace_free(0, mem); } } #endif if(!ret) { /* Reallocs always happen in the mspace they happened in, so skip locking the preferred mspace for this thread */ ret=mspace_realloc(0, mem, size); } return ret; } void nedpfree(nedpool *p, void *mem) THROWSPEC { /* Frees always happen in the mspace they happened in, so skip locking the preferred mspace for this thread */ threadcache *tc; int mymspace; size_t memsize; assert(mem); GetThreadCache(&p, &tc, &mymspace, 0); #if THREADCACHEMAX memsize=nedblksize(mem); assert(memsize); if(mem && tc && memsize<=(THREADCACHEMAX+CHUNK_OVERHEAD)) threadcache_free(p, tc, mymspace, mem, memsize); else #endif mspace_free(0, mem); } void * nedpmemalign(nedpool *p, size_t alignment, size_t bytes) THROWSPEC { void *ret; threadcache *tc; int mymspace; GetThreadCache(&p, &tc, &mymspace, &bytes); { /* Use this thread's mspace */ GETMSPACE(m, p, tc, mymspace, bytes, ret=mspace_memalign(m, alignment, bytes)); } return ret; } #if !NO_MALLINFO struct mallinfo nedpmallinfo(nedpool *p) THROWSPEC { int n; struct mallinfo ret={0}; if(!p) { p=&syspool; if(!syspool.threads) InitPool(&syspool, 0, -1); } for(n=0; p->m[n]; n++) { struct mallinfo t=mspace_mallinfo(p->m[n]); ret.arena+=t.arena; ret.ordblks+=t.ordblks; ret.hblkhd+=t.hblkhd; ret.usmblks+=t.usmblks; ret.uordblks+=t.uordblks; ret.fordblks+=t.fordblks; ret.keepcost+=t.keepcost; } return ret; } #endif int nedpmallopt(nedpool *p, int parno, int value) THROWSPEC { return mspace_mallopt(parno, value); } int nedpmalloc_trim(nedpool *p, size_t pad) THROWSPEC { int n, ret=0; if(!p) { p=&syspool; if(!syspool.threads) InitPool(&syspool, 0, -1); } for(n=0; p->m[n]; n++) { ret+=mspace_trim(p->m[n], pad); } return ret; } void nedpmalloc_stats(nedpool *p) THROWSPEC { int n; if(!p) { p=&syspool; if(!syspool.threads) InitPool(&syspool, 0, -1); } for(n=0; p->m[n]; n++) { mspace_malloc_stats(p->m[n]); } } size_t nedpmalloc_footprint(nedpool *p) THROWSPEC { size_t ret=0; int n; if(!p) { p=&syspool; if(!syspool.threads) InitPool(&syspool, 0, -1); } for(n=0; p->m[n]; n++) { ret+=mspace_footprint(p->m[n]); } return ret; } void **nedpindependent_calloc(nedpool *p, size_t elemsno, size_t elemsize, void **chunks) THROWSPEC { void **ret; threadcache *tc; int mymspace; GetThreadCache(&p, &tc, &mymspace, &elemsize); GETMSPACE(m, p, tc, mymspace, elemsno*elemsize, ret=mspace_independent_calloc(m, elemsno, elemsize, chunks)); return ret; } void **nedpindependent_comalloc(nedpool *p, size_t elems, size_t *sizes, void **chunks) THROWSPEC { void **ret; threadcache *tc; int mymspace; size_t i, *adjustedsizes=(size_t *) alloca(elems*sizeof(size_t)); if(!adjustedsizes) return 0; for(i=0; i<elems; i++) adjustedsizes[i]=sizes[i]<sizeof(threadcacheblk) ? sizeof(threadcacheblk) : sizes[i]; GetThreadCache(&p, &tc, &mymspace, 0); GETMSPACE(m, p, tc, mymspace, 0, ret=mspace_independent_comalloc(m, elems, adjustedsizes, chunks)); return ret; } #ifdef OVERRIDE_STRDUP /* * This implementation is purely there to override the libc version, to * avoid a crash due to allocation and free on different 'heaps'. */ char *strdup(const char *s1) { char *s2 = 0; if (s1) { s2 = malloc(strlen(s1) + 1); strcpy(s2, s1); } return s2; } #endif #if defined(__cplusplus) } #endif
/* Alternative malloc implementation for multiple threads without lock contention based on dlmalloc. (C) 2005-2006 Niall Douglas Boost Software License - Version 1.0 - August 17th, 2003 Permission is hereby granted, free of charge, to any person or organization obtaining a copy of the software and accompanying documentation covered by this license (the "Software") to use, reproduce, display, distribute, execute, and transmit the Software, and to prepare derivative works of the Software, and to permit third-parties to whom the Software is furnished to do so, all subject to the following: The copyright notices in the Software and this entire statement, including the above license grant, this restriction and the following disclaimer, must be included in all copies of the Software, in whole or in part, and all derivative works of the Software, unless such copies or derivative works are solely in the form of machine-executable object code generated by a source language processor. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #ifdef _MSC_VER /* Enable full aliasing on MSVC */ /*#pragma optimize("a", on)*/ #endif /*#define FULLSANITYCHECKS*/ #include "nedmalloc.h" #if defined(WIN32) #include <malloc.h> #endif #define MSPACES 1 #define ONLY_MSPACES 1 #ifndef USE_LOCKS #define USE_LOCKS 1 #endif #define FOOTERS 1 /* Need to enable footers so frees lock the right mspace */ #undef DEBUG /* dlmalloc wants DEBUG either 0 or 1 */ #ifdef _DEBUG #define DEBUG 1 #else #define DEBUG 0 #endif #ifdef NDEBUG /* Disable assert checking on release builds */ #undef DEBUG #endif /* The default of 64Kb means we spend too much time kernel-side */ #ifndef DEFAULT_GRANULARITY #define DEFAULT_GRANULARITY (1*1024*1024) #endif /*#define USE_SPIN_LOCKS 0*/ /*#define FORCEINLINE*/ #include "malloc.c.h" #ifdef NDEBUG /* Disable assert checking on release builds */ #undef DEBUG #endif /* The maximum concurrent threads in a pool possible */ #ifndef MAXTHREADSINPOOL #define MAXTHREADSINPOOL 16 #endif /* The maximum number of threadcaches which can be allocated */ #ifndef THREADCACHEMAXCACHES #define THREADCACHEMAXCACHES 256 #endif /* The maximum size to be allocated from the thread cache */ #ifndef THREADCACHEMAX #define THREADCACHEMAX 8192 #endif #if 0 /* The number of cache entries for finer grained bins. This is (topbitpos(THREADCACHEMAX)-4)*2 */ #define THREADCACHEMAXBINS ((13-4)*2) #else /* The number of cache entries. This is (topbitpos(THREADCACHEMAX)-4) */ #define THREADCACHEMAXBINS (13-4) #endif /* Point at which the free space in a thread cache is garbage collected */ #ifndef THREADCACHEMAXFREESPACE #define THREADCACHEMAXFREESPACE (512*1024) #endif #ifdef WIN32 #define TLSVAR DWORD #define TLSALLOC(k) (*(k)=TlsAlloc(), TLS_OUT_OF_INDEXES==*(k)) #define TLSFREE(k) (!TlsFree(k)) #define TLSGET(k) TlsGetValue(k) #define TLSSET(k, a) (!TlsSetValue(k, a)) #ifdef DEBUG static LPVOID ChkedTlsGetValue(DWORD idx) { LPVOID ret=TlsGetValue(idx); assert(S_OK==GetLastError()); return ret; } #undef TLSGET #define TLSGET(k) ChkedTlsGetValue(k) #endif #else #define TLSVAR pthread_key_t #define TLSALLOC(k) pthread_key_create(k, 0) #define TLSFREE(k) pthread_key_delete(k) #define TLSGET(k) pthread_getspecific(k) #define TLSSET(k, a) pthread_setspecific(k, a) #endif #if 0 /* Only enable if testing with valgrind. Causes misoperation */ #define mspace_malloc(p, s) malloc(s) #define mspace_realloc(p, m, s) realloc(m, s) #define mspace_calloc(p, n, s) calloc(n, s) #define mspace_free(p, m) free(m) #endif #if defined(__cplusplus) #if !defined(NO_NED_NAMESPACE) namespace nedalloc { #else extern "C" { #endif #endif size_t nedblksize(void *mem) THROWSPEC { #if 0 /* Only enable if testing with valgrind. Causes misoperation */ return THREADCACHEMAX; #else if(mem) { mchunkptr p=mem2chunk(mem); assert(cinuse(p)); /* If this fails, someone tried to free a block twice */ if(cinuse(p)) return chunksize(p)-overhead_for(p); } return 0; #endif } void nedsetvalue(void *v) THROWSPEC { nedpsetvalue(0, v); } void * nedmalloc(size_t size) THROWSPEC { return nedpmalloc(0, size); } void * nedcalloc(size_t no, size_t size) THROWSPEC { return nedpcalloc(0, no, size); } void * nedrealloc(void *mem, size_t size) THROWSPEC { return nedprealloc(0, mem, size); } void nedfree(void *mem) THROWSPEC { nedpfree(0, mem); } void * nedmemalign(size_t alignment, size_t bytes) THROWSPEC { return nedpmemalign(0, alignment, bytes); } #if !NO_MALLINFO struct mallinfo nedmallinfo(void) THROWSPEC { return nedpmallinfo(0); } #endif int nedmallopt(int parno, int value) THROWSPEC { return nedpmallopt(0, parno, value); } int nedmalloc_trim(size_t pad) THROWSPEC { return nedpmalloc_trim(0, pad); } void nedmalloc_stats(void) THROWSPEC { nedpmalloc_stats(0); } size_t nedmalloc_footprint(void) THROWSPEC { return nedpmalloc_footprint(0); } void **nedindependent_calloc(size_t elemsno, size_t elemsize, void **chunks) THROWSPEC { return nedpindependent_calloc(0, elemsno, elemsize, chunks); } void **nedindependent_comalloc(size_t elems, size_t *sizes, void **chunks) THROWSPEC { return nedpindependent_comalloc(0, elems, sizes, chunks); } struct threadcacheblk_t; typedef struct threadcacheblk_t threadcacheblk; struct threadcacheblk_t { /* Keep less than 16 bytes on 32 bit systems and 32 bytes on 64 bit systems */ #ifdef FULLSANITYCHECKS unsigned int magic; #endif unsigned int lastUsed, size; threadcacheblk *next, *prev; }; typedef struct threadcache_t { #ifdef FULLSANITYCHECKS unsigned int magic1; #endif int mymspace; /* Last mspace entry this thread used */ long threadid; unsigned int mallocs, frees, successes; size_t freeInCache; /* How much free space is stored in this cache */ threadcacheblk *bins[(THREADCACHEMAXBINS+1)*2]; #ifdef FULLSANITYCHECKS unsigned int magic2; #endif } threadcache; struct nedpool_t { MLOCK_T mutex; void *uservalue; int threads; /* Max entries in m to use */ threadcache *caches[THREADCACHEMAXCACHES]; TLSVAR mycache; /* Thread cache for this thread. 0 for unset, negative for use mspace-1 directly, otherwise is cache-1 */ mstate m[MAXTHREADSINPOOL+1]; /* mspace entries for this pool */ }; static nedpool syspool; static FORCEINLINE unsigned int size2binidx(size_t _size) THROWSPEC { /* 8=1000 16=10000 20=10100 24=11000 32=100000 48=110000 4096=1000000000000 */ unsigned int topbit, size=(unsigned int)(_size>>4); /* 16=1 20=1 24=1 32=10 48=11 64=100 96=110 128=1000 4096=100000000 */ #if defined(__GNUC__) topbit = sizeof(size)*__CHAR_BIT__ - 1 - __builtin_clz(size); #elif defined(_MSC_VER) && _MSC_VER>=1300 { unsigned long bsrTopBit; _BitScanReverse(&bsrTopBit, size); topbit = bsrTopBit; } #else #if 0 union { unsigned asInt[2]; double asDouble; }; int n; asDouble = (double)size + 0.5; topbit = (asInt[!FOX_BIGENDIAN] >> 20) - 1023; #else { unsigned int x=size; x = x | (x >> 1); x = x | (x >> 2); x = x | (x >> 4); x = x | (x >> 8); x = x | (x >>16); x = ~x; x = x - ((x >> 1) & 0x55555555); x = (x & 0x33333333) + ((x >> 2) & 0x33333333); x = (x + (x >> 4)) & 0x0F0F0F0F; x = x + (x << 8); x = x + (x << 16); topbit=31 - (x >> 24); } #endif #endif return topbit; } #ifdef FULLSANITYCHECKS static void tcsanitycheck(threadcacheblk **ptr) THROWSPEC { assert((ptr[0] && ptr[1]) || (!ptr[0] && !ptr[1])); if(ptr[0] && ptr[1]) { assert(nedblksize(ptr[0])>=sizeof(threadcacheblk)); assert(nedblksize(ptr[1])>=sizeof(threadcacheblk)); assert(*(unsigned int *) "NEDN"==ptr[0]->magic); assert(*(unsigned int *) "NEDN"==ptr[1]->magic); assert(!ptr[0]->prev); assert(!ptr[1]->next); if(ptr[0]==ptr[1]) { assert(!ptr[0]->next); assert(!ptr[1]->prev); } } } static void tcfullsanitycheck(threadcache *tc) THROWSPEC { threadcacheblk **tcbptr=tc->bins; int n; for(n=0; n<=THREADCACHEMAXBINS; n++, tcbptr+=2) { threadcacheblk *b, *ob=0; tcsanitycheck(tcbptr); for(b=tcbptr[0]; b; ob=b, b=b->next) { assert(*(unsigned int *) "NEDN"==b->magic); assert(!ob || ob->next==b); assert(!ob || b->prev==ob); } } } #endif static NOINLINE void RemoveCacheEntries(nedpool *p, threadcache *tc, unsigned int age) THROWSPEC { #ifdef FULLSANITYCHECKS tcfullsanitycheck(tc); #endif if(tc->freeInCache) { threadcacheblk **tcbptr=tc->bins; int n; for(n=0; n<=THREADCACHEMAXBINS; n++, tcbptr+=2) { threadcacheblk **tcb=tcbptr+1; /* come from oldest end of list */ /*tcsanitycheck(tcbptr);*/ for(; *tcb && tc->frees-(*tcb)->lastUsed>=age; ) { threadcacheblk *f=*tcb; size_t blksize=f->size; /*nedblksize(f);*/ assert(blksize<=nedblksize(f)); assert(blksize); #ifdef FULLSANITYCHECKS assert(*(unsigned int *) "NEDN"==(*tcb)->magic); #endif *tcb=(*tcb)->prev; if(*tcb) (*tcb)->next=0; else *tcbptr=0; tc->freeInCache-=blksize; assert((long) tc->freeInCache>=0); mspace_free(0, f); /*tcsanitycheck(tcbptr);*/ } } } #ifdef FULLSANITYCHECKS tcfullsanitycheck(tc); #endif } static void DestroyCaches(nedpool *p) THROWSPEC { if(p->caches) { threadcache *tc; int n; for(n=0; n<THREADCACHEMAXCACHES; n++) { if((tc=p->caches[n])) { tc->frees++; RemoveCacheEntries(p, tc, 0); assert(!tc->freeInCache); tc->mymspace=-1; tc->threadid=0; mspace_free(0, tc); p->caches[n]=0; } } } } static NOINLINE threadcache *AllocCache(nedpool *p) THROWSPEC { threadcache *tc=0; int n, end; ACQUIRE_LOCK(&p->mutex); for(n=0; n<THREADCACHEMAXCACHES && p->caches[n]; n++); if(THREADCACHEMAXCACHES==n) { /* List exhausted, so disable for this thread */ RELEASE_LOCK(&p->mutex); return 0; } tc=p->caches[n]=(threadcache *) mspace_calloc(p->m[0], 1, sizeof(threadcache)); if(!tc) { RELEASE_LOCK(&p->mutex); return 0; } #ifdef FULLSANITYCHECKS tc->magic1=*(unsigned int *)"NEDMALC1"; tc->magic2=*(unsigned int *)"NEDMALC2"; #endif tc->threadid=(long)(size_t)CURRENT_THREAD; for(end=0; p->m[end]; end++); tc->mymspace=tc->threadid % end; RELEASE_LOCK(&p->mutex); if(TLSSET(p->mycache, (void *)(size_t)(n+1))) abort(); return tc; } static void *threadcache_malloc(nedpool *p, threadcache *tc, size_t *size) THROWSPEC { void *ret=0; unsigned int bestsize; unsigned int idx=size2binidx(*size); size_t blksize=0; threadcacheblk *blk, **binsptr; #ifdef FULLSANITYCHECKS tcfullsanitycheck(tc); #endif /* Calculate best fit bin size */ bestsize=1<<(idx+4); #if 0 /* Finer grained bin fit */ idx<<=1; if(*size>bestsize) { idx++; bestsize+=bestsize>>1; } if(*size>bestsize) { idx++; bestsize=1<<(4+(idx>>1)); } #else if(*size>bestsize) { idx++; bestsize<<=1; } #endif assert(bestsize>=*size); if(*size<bestsize) *size=bestsize; assert(*size<=THREADCACHEMAX); assert(idx<=THREADCACHEMAXBINS); binsptr=&tc->bins[idx*2]; /* Try to match close, but move up a bin if necessary */ blk=*binsptr; if(!blk || blk->size<*size) { /* Bump it up a bin */ if(idx<THREADCACHEMAXBINS) { idx++; binsptr+=2; blk=*binsptr; } } if(blk) { blksize=blk->size; /*nedblksize(blk);*/ assert(nedblksize(blk)>=blksize); assert(blksize>=*size); if(blk->next) blk->next->prev=0; *binsptr=blk->next; if(!*binsptr) binsptr[1]=0; #ifdef FULLSANITYCHECKS blk->magic=0; #endif assert(binsptr[0]!=blk && binsptr[1]!=blk); assert(nedblksize(blk)>=sizeof(threadcacheblk) && nedblksize(blk)<=THREADCACHEMAX+CHUNK_OVERHEAD); /*printf("malloc: %p, %p, %p, %lu\n", p, tc, blk, (long) size);*/ ret=(void *) blk; } ++tc->mallocs; if(ret) { assert(blksize>=*size); ++tc->successes; tc->freeInCache-=blksize; assert((long) tc->freeInCache>=0); } #if defined(DEBUG) && 0 if(!(tc->mallocs & 0xfff)) { printf("*** threadcache=%u, mallocs=%u (%f), free=%u (%f), freeInCache=%u\n", (unsigned int) tc->threadid, tc->mallocs, (float) tc->successes/tc->mallocs, tc->frees, (float) tc->successes/tc->frees, (unsigned int) tc->freeInCache); } #endif #ifdef FULLSANITYCHECKS tcfullsanitycheck(tc); #endif return ret; } static NOINLINE void ReleaseFreeInCache(nedpool *p, threadcache *tc, int mymspace) THROWSPEC { unsigned int age=THREADCACHEMAXFREESPACE/8192; /*ACQUIRE_LOCK(&p->m[mymspace]->mutex);*/ while(age && tc->freeInCache>=THREADCACHEMAXFREESPACE) { RemoveCacheEntries(p, tc, age); /*printf("*** Removing cache entries older than %u (%u)\n", age, (unsigned int) tc->freeInCache);*/ age>>=1; } /*RELEASE_LOCK(&p->m[mymspace]->mutex);*/ } static void threadcache_free(nedpool *p, threadcache *tc, int mymspace, void *mem, size_t size) THROWSPEC { unsigned int bestsize; unsigned int idx=size2binidx(size); threadcacheblk **binsptr, *tck=(threadcacheblk *) mem; assert(size>=sizeof(threadcacheblk) && size<=THREADCACHEMAX+CHUNK_OVERHEAD); #ifdef DEBUG { /* Make sure this is a valid memory block */ mchunkptr p = mem2chunk(mem); mstate fm = get_mstate_for(p); if (!ok_magic(fm)) { USAGE_ERROR_ACTION(fm, p); return; } } #endif #ifdef FULLSANITYCHECKS tcfullsanitycheck(tc); #endif /* Calculate best fit bin size */ bestsize=1<<(idx+4); #if 0 /* Finer grained bin fit */ idx<<=1; if(size>bestsize) { unsigned int biggerbestsize=bestsize+bestsize<<1; if(size>=biggerbestsize) { idx++; bestsize=biggerbestsize; } } #endif if(bestsize!=size) /* dlmalloc can round up, so we round down to preserve indexing */ size=bestsize; binsptr=&tc->bins[idx*2]; assert(idx<=THREADCACHEMAXBINS); if(tck==*binsptr) { fprintf(stderr, "Attempt to free already freed memory block %p - aborting!\n", tck); abort(); } #ifdef FULLSANITYCHECKS tck->magic=*(unsigned int *) "NEDN"; #endif tck->lastUsed=++tc->frees; tck->size=(unsigned int) size; tck->next=*binsptr; tck->prev=0; if(tck->next) tck->next->prev=tck; else binsptr[1]=tck; assert(!*binsptr || (*binsptr)->size==tck->size); *binsptr=tck; assert(tck==tc->bins[idx*2]); assert(tc->bins[idx*2+1]==tck || binsptr[0]->next->prev==tck); /*printf("free: %p, %p, %p, %lu\n", p, tc, mem, (long) size);*/ tc->freeInCache+=size; #ifdef FULLSANITYCHECKS tcfullsanitycheck(tc); #endif #if 1 if(tc->freeInCache>=THREADCACHEMAXFREESPACE) ReleaseFreeInCache(p, tc, mymspace); #endif } static NOINLINE int InitPool(nedpool *p, size_t capacity, int threads) THROWSPEC { /* threads is -1 for system pool */ ensure_initialization(); ACQUIRE_MALLOC_GLOBAL_LOCK(); if(p->threads) goto done; if(INITIAL_LOCK(&p->mutex)) goto err; if(TLSALLOC(&p->mycache)) goto err; if(!(p->m[0]=(mstate) create_mspace(capacity, 1))) goto err; p->m[0]->extp=p; p->threads=(threads<1 || threads>MAXTHREADSINPOOL) ? MAXTHREADSINPOOL : threads; done: RELEASE_MALLOC_GLOBAL_LOCK(); return 1; err: if(threads<0) abort(); /* If you can't allocate for system pool, we're screwed */ DestroyCaches(p); if(p->m[0]) { destroy_mspace(p->m[0]); p->m[0]=0; } if(p->mycache) { if(TLSFREE(p->mycache)) abort(); p->mycache=0; } RELEASE_MALLOC_GLOBAL_LOCK(); return 0; } static NOINLINE mstate FindMSpace(nedpool *p, threadcache *tc, int *lastUsed, size_t size) THROWSPEC { /* Gets called when thread's last used mspace is in use. The strategy is to run through the list of all available mspaces looking for an unlocked one and if we fail, we create a new one so long as we don't exceed p->threads */ int n, end; for(n=end=*lastUsed+1; p->m[n]; end=++n) { if(TRY_LOCK(&p->m[n]->mutex)) goto found; } for(n=0; n<*lastUsed && p->m[n]; n++) { if(TRY_LOCK(&p->m[n]->mutex)) goto found; } if(end<p->threads) { mstate temp; if(!(temp=(mstate) create_mspace(size, 1))) goto badexit; /* Now we're ready to modify the lists, we lock */ ACQUIRE_LOCK(&p->mutex); while(p->m[end] && end<p->threads) end++; if(end>=p->threads) { /* Drat, must destroy it now */ RELEASE_LOCK(&p->mutex); destroy_mspace((mspace) temp); goto badexit; } /* We really want to make sure this goes into memory now but we have to be careful of breaking aliasing rules, so write it twice */ { volatile struct malloc_state **_m=(volatile struct malloc_state **) &p->m[end]; *_m=(p->m[end]=temp); } ACQUIRE_LOCK(&p->m[end]->mutex); /*printf("Created mspace idx %d\n", end);*/ RELEASE_LOCK(&p->mutex); n=end; goto found; } /* Let it lock on the last one it used */ badexit: ACQUIRE_LOCK(&p->m[*lastUsed]->mutex); return p->m[*lastUsed]; found: *lastUsed=n; if(tc) tc->mymspace=n; else { if(TLSSET(p->mycache, (void *)(size_t)(-(n+1)))) abort(); } return p->m[n]; } nedpool *nedcreatepool(size_t capacity, int threads) THROWSPEC { nedpool *ret; if(!(ret=(nedpool *) nedpcalloc(0, 1, sizeof(nedpool)))) return 0; if(!InitPool(ret, capacity, threads)) { nedpfree(0, ret); return 0; } return ret; } void neddestroypool(nedpool *p) THROWSPEC { int n; ACQUIRE_LOCK(&p->mutex); DestroyCaches(p); for(n=0; p->m[n]; n++) { destroy_mspace(p->m[n]); p->m[n]=0; } RELEASE_LOCK(&p->mutex); if(TLSFREE(p->mycache)) abort(); nedpfree(0, p); } void nedpsetvalue(nedpool *p, void *v) THROWSPEC { if(!p) { p=&syspool; if(!syspool.threads) InitPool(&syspool, 0, -1); } p->uservalue=v; } void *nedgetvalue(nedpool **p, void *mem) THROWSPEC { nedpool *np=0; mchunkptr mcp=mem2chunk(mem); mstate fm; if(!(is_aligned(chunk2mem(mcp))) && mcp->head != FENCEPOST_HEAD) return 0; if(!cinuse(mcp)) return 0; if(!next_pinuse(mcp)) return 0; if(!is_mmapped(mcp) && !pinuse(mcp)) { if(next_chunk(prev_chunk(mcp))!=mcp) return 0; } fm=get_mstate_for(mcp); if(!ok_magic(fm)) return 0; if(!ok_address(fm, mcp)) return 0; if(!fm->extp) return 0; np=(nedpool *) fm->extp; if(p) *p=np; return np->uservalue; } void neddisablethreadcache(nedpool *p) THROWSPEC { int mycache; if(!p) { p=&syspool; if(!syspool.threads) InitPool(&syspool, 0, -1); } mycache=(int)(size_t) TLSGET(p->mycache); if(!mycache) { /* Set to mspace 0 */ if(TLSSET(p->mycache, (void *)-1)) abort(); } else if(mycache>0) { /* Set to last used mspace */ threadcache *tc=p->caches[mycache-1]; #if defined(DEBUG) printf("Threadcache utilisation: %lf%% in cache with %lf%% lost to other threads\n", 100.0*tc->successes/tc->mallocs, 100.0*((double) tc->mallocs-tc->frees)/tc->mallocs); #endif if(TLSSET(p->mycache, (void *)(size_t)(-tc->mymspace))) abort(); tc->frees++; RemoveCacheEntries(p, tc, 0); assert(!tc->freeInCache); tc->mymspace=-1; tc->threadid=0; mspace_free(0, p->caches[mycache-1]); p->caches[mycache-1]=0; } } #define GETMSPACE(m,p,tc,ms,s,action) \ do \ { \ mstate m = GetMSpace((p),(tc),(ms),(s)); \ action; \ RELEASE_LOCK(&m->mutex); \ } while (0) static FORCEINLINE mstate GetMSpace(nedpool *p, threadcache *tc, int mymspace, size_t size) THROWSPEC { /* Returns a locked and ready for use mspace */ mstate m=p->m[mymspace]; assert(m); if(!TRY_LOCK(&p->m[mymspace]->mutex)) m=FindMSpace(p, tc, &mymspace, size);\ /*assert(IS_LOCKED(&p->m[mymspace]->mutex));*/ return m; } static FORCEINLINE void GetThreadCache(nedpool **p, threadcache **tc, int *mymspace, size_t *size) THROWSPEC { int mycache; if(size && *size<sizeof(threadcacheblk)) *size=sizeof(threadcacheblk); if(!*p) { *p=&syspool; if(!syspool.threads) InitPool(&syspool, 0, -1); } mycache=(int)(size_t) TLSGET((*p)->mycache); if(mycache>0) { *tc=(*p)->caches[mycache-1]; *mymspace=(*tc)->mymspace; } else if(!mycache) { *tc=AllocCache(*p); if(!*tc) { /* Disable */ if(TLSSET((*p)->mycache, (void *)-1)) abort(); *mymspace=0; } else *mymspace=(*tc)->mymspace; } else { *tc=0; *mymspace=-mycache-1; } assert(*mymspace>=0); assert((long)(size_t)CURRENT_THREAD==(*tc)->threadid); #ifdef FULLSANITYCHECKS if(*tc) { if(*(unsigned int *)"NEDMALC1"!=(*tc)->magic1 || *(unsigned int *)"NEDMALC2"!=(*tc)->magic2) { abort(); } } #endif } void * nedpmalloc(nedpool *p, size_t size) THROWSPEC { void *ret=0; threadcache *tc; int mymspace; GetThreadCache(&p, &tc, &mymspace, &size); #if THREADCACHEMAX if(tc && size<=THREADCACHEMAX) { /* Use the thread cache */ ret=threadcache_malloc(p, tc, &size); } #endif if(!ret) { /* Use this thread's mspace */ GETMSPACE(m, p, tc, mymspace, size, ret=mspace_malloc(m, size)); } return ret; } void * nedpcalloc(nedpool *p, size_t no, size_t size) THROWSPEC { size_t rsize=size*no; void *ret=0; threadcache *tc; int mymspace; GetThreadCache(&p, &tc, &mymspace, &rsize); #if THREADCACHEMAX if(tc && rsize<=THREADCACHEMAX) { /* Use the thread cache */ if((ret=threadcache_malloc(p, tc, &rsize))) memset(ret, 0, rsize); } #endif if(!ret) { /* Use this thread's mspace */ GETMSPACE(m, p, tc, mymspace, rsize, ret=mspace_calloc(m, 1, rsize)); } return ret; } void * nedprealloc(nedpool *p, void *mem, size_t size) THROWSPEC { void *ret=0; threadcache *tc; int mymspace; if(!mem) return nedpmalloc(p, size); GetThreadCache(&p, &tc, &mymspace, &size); #if THREADCACHEMAX if(tc && size && size<=THREADCACHEMAX) { /* Use the thread cache */ size_t memsize=nedblksize(mem); assert(memsize); if((ret=threadcache_malloc(p, tc, &size))) { memcpy(ret, mem, memsize<size ? memsize : size); if(memsize<=THREADCACHEMAX) threadcache_free(p, tc, mymspace, mem, memsize); else mspace_free(0, mem); } } #endif if(!ret) { /* Reallocs always happen in the mspace they happened in, so skip locking the preferred mspace for this thread */ ret=mspace_realloc(0, mem, size); } return ret; } void nedpfree(nedpool *p, void *mem) THROWSPEC { /* Frees always happen in the mspace they happened in, so skip locking the preferred mspace for this thread */ threadcache *tc; int mymspace; size_t memsize; assert(mem); GetThreadCache(&p, &tc, &mymspace, 0); #if THREADCACHEMAX memsize=nedblksize(mem); assert(memsize); if(mem && tc && memsize<=(THREADCACHEMAX+CHUNK_OVERHEAD)) threadcache_free(p, tc, mymspace, mem, memsize); else #endif mspace_free(0, mem); } void * nedpmemalign(nedpool *p, size_t alignment, size_t bytes) THROWSPEC { void *ret; threadcache *tc; int mymspace; GetThreadCache(&p, &tc, &mymspace, &bytes); { /* Use this thread's mspace */ GETMSPACE(m, p, tc, mymspace, bytes, ret=mspace_memalign(m, alignment, bytes)); } return ret; } #if !NO_MALLINFO struct mallinfo nedpmallinfo(nedpool *p) THROWSPEC { int n; struct mallinfo ret={0}; if(!p) { p=&syspool; if(!syspool.threads) InitPool(&syspool, 0, -1); } for(n=0; p->m[n]; n++) { struct mallinfo t=mspace_mallinfo(p->m[n]); ret.arena+=t.arena; ret.ordblks+=t.ordblks; ret.hblkhd+=t.hblkhd; ret.usmblks+=t.usmblks; ret.uordblks+=t.uordblks; ret.fordblks+=t.fordblks; ret.keepcost+=t.keepcost; } return ret; } #endif int nedpmallopt(nedpool *p, int parno, int value) THROWSPEC { return mspace_mallopt(parno, value); } int nedpmalloc_trim(nedpool *p, size_t pad) THROWSPEC { int n, ret=0; if(!p) { p=&syspool; if(!syspool.threads) InitPool(&syspool, 0, -1); } for(n=0; p->m[n]; n++) { ret+=mspace_trim(p->m[n], pad); } return ret; } void nedpmalloc_stats(nedpool *p) THROWSPEC { int n; if(!p) { p=&syspool; if(!syspool.threads) InitPool(&syspool, 0, -1); } for(n=0; p->m[n]; n++) { mspace_malloc_stats(p->m[n]); } } size_t nedpmalloc_footprint(nedpool *p) THROWSPEC { size_t ret=0; int n; if(!p) { p=&syspool; if(!syspool.threads) InitPool(&syspool, 0, -1); } for(n=0; p->m[n]; n++) { ret+=mspace_footprint(p->m[n]); } return ret; } void **nedpindependent_calloc(nedpool *p, size_t elemsno, size_t elemsize, void **chunks) THROWSPEC { void **ret; threadcache *tc; int mymspace; GetThreadCache(&p, &tc, &mymspace, &elemsize); GETMSPACE(m, p, tc, mymspace, elemsno*elemsize, ret=mspace_independent_calloc(m, elemsno, elemsize, chunks)); return ret; } void **nedpindependent_comalloc(nedpool *p, size_t elems, size_t *sizes, void **chunks) THROWSPEC { void **ret; threadcache *tc; int mymspace; size_t i, *adjustedsizes=(size_t *) alloca(elems*sizeof(size_t)); if(!adjustedsizes) return 0; for(i=0; i<elems; i++) adjustedsizes[i]=sizes[i]<sizeof(threadcacheblk) ? sizeof(threadcacheblk) : sizes[i]; GetThreadCache(&p, &tc, &mymspace, 0); GETMSPACE(m, p, tc, mymspace, 0, ret=mspace_independent_comalloc(m, elems, adjustedsizes, chunks)); return ret; } #ifdef OVERRIDE_STRDUP /* * This implementation is purely there to override the libc version, to * avoid a crash due to allocation and free on different 'heaps'. */ char *strdup(const char *s1) { char *s2 = 0; if (s1) { size_t len = strlen(s1) + 1; s2 = malloc(len); memcpy(s2, s1, len); } return s2; } #endif #if defined(__cplusplus) } #endif
char *strdup(const char *s1) { char *s2 = 0; if (s1) { s2 = malloc(strlen(s1) + 1); strcpy(s2, s1); } return s2; }
char *strdup(const char *s1) { char *s2 = 0; if (s1) { size_t len = strlen(s1) + 1; s2 = malloc(len); memcpy(s2, s1, len); } return s2; }
{'added': [(960, '\t\tsize_t len = strlen(s1) + 1;'), (961, '\t\ts2 = malloc(len);'), (962, '\t\tmemcpy(s2, s1, len);')], 'deleted': [(960, '\t\ts2 = malloc(strlen(s1) + 1);'), (961, '\t\tstrcpy(s2, s1);')]}
3
2
720
5,162
https://github.com/git/git
CVE-2016-2315
['CWE-119']
tcd.c
opj_tcd_init_tile
/* * The copyright in this software is being made available under the 2-clauses * BSD License, included below. This software may be subject to other third * party and contributor rights, including patent rights, and no such rights * are granted under this license. * * Copyright (c) 2002-2014, Universite catholique de Louvain (UCL), Belgium * Copyright (c) 2002-2014, Professor Benoit Macq * Copyright (c) 2001-2003, David Janssens * Copyright (c) 2002-2003, Yannick Verschueren * Copyright (c) 2003-2007, Francois-Olivier Devaux * Copyright (c) 2003-2014, Antonin Descampe * Copyright (c) 2005, Herve Drolon, FreeImage Team * Copyright (c) 2006-2007, Parvatha Elangovan * Copyright (c) 2008, 2011-2012, Centre National d'Etudes Spatiales (CNES), FR * Copyright (c) 2012, CS Systemes d'Information, France * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS `AS IS' * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "opj_includes.h" /* ----------------------------------------------------------------------- */ /* TODO MSD: */ #ifdef TODO_MSD void tcd_dump(FILE *fd, opj_tcd_t *tcd, opj_tcd_image_t * img) { int tileno, compno, resno, bandno, precno;/*, cblkno;*/ fprintf(fd, "image {\n"); fprintf(fd, " tw=%d, th=%d x0=%d x1=%d y0=%d y1=%d\n", img->tw, img->th, tcd->image->x0, tcd->image->x1, tcd->image->y0, tcd->image->y1); for (tileno = 0; tileno < img->th * img->tw; tileno++) { opj_tcd_tile_t *tile = &tcd->tcd_image->tiles[tileno]; fprintf(fd, " tile {\n"); fprintf(fd, " x0=%d, y0=%d, x1=%d, y1=%d, numcomps=%d\n", tile->x0, tile->y0, tile->x1, tile->y1, tile->numcomps); for (compno = 0; compno < tile->numcomps; compno++) { opj_tcd_tilecomp_t *tilec = &tile->comps[compno]; fprintf(fd, " tilec {\n"); fprintf(fd, " x0=%d, y0=%d, x1=%d, y1=%d, numresolutions=%d\n", tilec->x0, tilec->y0, tilec->x1, tilec->y1, tilec->numresolutions); for (resno = 0; resno < tilec->numresolutions; resno++) { opj_tcd_resolution_t *res = &tilec->resolutions[resno]; fprintf(fd, "\n res {\n"); fprintf(fd, " x0=%d, y0=%d, x1=%d, y1=%d, pw=%d, ph=%d, numbands=%d\n", res->x0, res->y0, res->x1, res->y1, res->pw, res->ph, res->numbands); for (bandno = 0; bandno < res->numbands; bandno++) { opj_tcd_band_t *band = &res->bands[bandno]; fprintf(fd, " band {\n"); fprintf(fd, " x0=%d, y0=%d, x1=%d, y1=%d, stepsize=%f, numbps=%d\n", band->x0, band->y0, band->x1, band->y1, band->stepsize, band->numbps); for (precno = 0; precno < res->pw * res->ph; precno++) { opj_tcd_precinct_t *prec = &band->precincts[precno]; fprintf(fd, " prec {\n"); fprintf(fd, " x0=%d, y0=%d, x1=%d, y1=%d, cw=%d, ch=%d\n", prec->x0, prec->y0, prec->x1, prec->y1, prec->cw, prec->ch); /* for (cblkno = 0; cblkno < prec->cw * prec->ch; cblkno++) { opj_tcd_cblk_t *cblk = &prec->cblks[cblkno]; fprintf(fd, " cblk {\n"); fprintf(fd, " x0=%d, y0=%d, x1=%d, y1=%d\n", cblk->x0, cblk->y0, cblk->x1, cblk->y1); fprintf(fd, " }\n"); } */ fprintf(fd, " }\n"); } fprintf(fd, " }\n"); } fprintf(fd, " }\n"); } fprintf(fd, " }\n"); } fprintf(fd, " }\n"); } fprintf(fd, "}\n"); } #endif /** * Initializes tile coding/decoding */ static INLINE OPJ_BOOL opj_tcd_init_tile(opj_tcd_t *p_tcd, OPJ_UINT32 p_tile_no, OPJ_BOOL isEncoder, OPJ_FLOAT32 fraction, OPJ_SIZE_T sizeof_block); /** * Allocates memory for a decoding code block. */ static OPJ_BOOL opj_tcd_code_block_dec_allocate (opj_tcd_cblk_dec_t * p_code_block); /** * Deallocates the decoding data of the given precinct. */ static void opj_tcd_code_block_dec_deallocate (opj_tcd_precinct_t * p_precinct); /** * Allocates memory for an encoding code block (but not data). */ static OPJ_BOOL opj_tcd_code_block_enc_allocate (opj_tcd_cblk_enc_t * p_code_block); /** * Allocates data for an encoding code block */ static OPJ_BOOL opj_tcd_code_block_enc_allocate_data (opj_tcd_cblk_enc_t * p_code_block); /** * Deallocates the encoding data of the given precinct. */ static void opj_tcd_code_block_enc_deallocate (opj_tcd_precinct_t * p_precinct); /** Free the memory allocated for encoding @param tcd TCD handle */ static void opj_tcd_free_tile(opj_tcd_t *tcd); static OPJ_BOOL opj_tcd_t2_decode ( opj_tcd_t *p_tcd, OPJ_BYTE * p_src_data, OPJ_UINT32 * p_data_read, OPJ_UINT32 p_max_src_size, opj_codestream_index_t *p_cstr_index ); static OPJ_BOOL opj_tcd_t1_decode (opj_tcd_t *p_tcd); static OPJ_BOOL opj_tcd_dwt_decode (opj_tcd_t *p_tcd); static OPJ_BOOL opj_tcd_mct_decode (opj_tcd_t *p_tcd); static OPJ_BOOL opj_tcd_dc_level_shift_decode (opj_tcd_t *p_tcd); static OPJ_BOOL opj_tcd_dc_level_shift_encode ( opj_tcd_t *p_tcd ); static OPJ_BOOL opj_tcd_mct_encode ( opj_tcd_t *p_tcd ); static OPJ_BOOL opj_tcd_dwt_encode ( opj_tcd_t *p_tcd ); static OPJ_BOOL opj_tcd_t1_encode ( opj_tcd_t *p_tcd ); static OPJ_BOOL opj_tcd_t2_encode ( opj_tcd_t *p_tcd, OPJ_BYTE * p_dest_data, OPJ_UINT32 * p_data_written, OPJ_UINT32 p_max_dest_size, opj_codestream_info_t *p_cstr_info ); static OPJ_BOOL opj_tcd_rate_allocate_encode( opj_tcd_t *p_tcd, OPJ_BYTE * p_dest_data, OPJ_UINT32 p_max_dest_size, opj_codestream_info_t *p_cstr_info ); /* ----------------------------------------------------------------------- */ /** Create a new TCD handle */ opj_tcd_t* opj_tcd_create(OPJ_BOOL p_is_decoder) { opj_tcd_t *l_tcd = 00; /* create the tcd structure */ l_tcd = (opj_tcd_t*) opj_calloc(1,sizeof(opj_tcd_t)); if (!l_tcd) { return 00; } l_tcd->m_is_decoder = p_is_decoder ? 1 : 0; l_tcd->tcd_image = (opj_tcd_image_t*)opj_calloc(1,sizeof(opj_tcd_image_t)); if (!l_tcd->tcd_image) { opj_free(l_tcd); return 00; } return l_tcd; } /* ----------------------------------------------------------------------- */ void opj_tcd_rateallocate_fixed(opj_tcd_t *tcd) { OPJ_UINT32 layno; for (layno = 0; layno < tcd->tcp->numlayers; layno++) { opj_tcd_makelayer_fixed(tcd, layno, 1); } } void opj_tcd_makelayer( opj_tcd_t *tcd, OPJ_UINT32 layno, OPJ_FLOAT64 thresh, OPJ_UINT32 final) { OPJ_UINT32 compno, resno, bandno, precno, cblkno; OPJ_UINT32 passno; opj_tcd_tile_t *tcd_tile = tcd->tcd_image->tiles; tcd_tile->distolayer[layno] = 0; /* fixed_quality */ for (compno = 0; compno < tcd_tile->numcomps; compno++) { opj_tcd_tilecomp_t *tilec = &tcd_tile->comps[compno]; for (resno = 0; resno < tilec->numresolutions; resno++) { opj_tcd_resolution_t *res = &tilec->resolutions[resno]; for (bandno = 0; bandno < res->numbands; bandno++) { opj_tcd_band_t *band = &res->bands[bandno]; for (precno = 0; precno < res->pw * res->ph; precno++) { opj_tcd_precinct_t *prc = &band->precincts[precno]; for (cblkno = 0; cblkno < prc->cw * prc->ch; cblkno++) { opj_tcd_cblk_enc_t *cblk = &prc->cblks.enc[cblkno]; opj_tcd_layer_t *layer = &cblk->layers[layno]; OPJ_UINT32 n; if (layno == 0) { cblk->numpassesinlayers = 0; } n = cblk->numpassesinlayers; for (passno = cblk->numpassesinlayers; passno < cblk->totalpasses; passno++) { OPJ_UINT32 dr; OPJ_FLOAT64 dd; opj_tcd_pass_t *pass = &cblk->passes[passno]; if (n == 0) { dr = pass->rate; dd = pass->distortiondec; } else { dr = pass->rate - cblk->passes[n - 1].rate; dd = pass->distortiondec - cblk->passes[n - 1].distortiondec; } if (!dr) { if (dd != 0) n = passno + 1; continue; } if (dd / dr >= thresh) n = passno + 1; } layer->numpasses = n - cblk->numpassesinlayers; if (!layer->numpasses) { layer->disto = 0; continue; } if (cblk->numpassesinlayers == 0) { layer->len = cblk->passes[n - 1].rate; layer->data = cblk->data; layer->disto = cblk->passes[n - 1].distortiondec; } else { layer->len = cblk->passes[n - 1].rate - cblk->passes[cblk->numpassesinlayers - 1].rate; layer->data = cblk->data + cblk->passes[cblk->numpassesinlayers - 1].rate; layer->disto = cblk->passes[n - 1].distortiondec - cblk->passes[cblk->numpassesinlayers - 1].distortiondec; } tcd_tile->distolayer[layno] += layer->disto; /* fixed_quality */ if (final) cblk->numpassesinlayers = n; } } } } } } void opj_tcd_makelayer_fixed(opj_tcd_t *tcd, OPJ_UINT32 layno, OPJ_UINT32 final) { OPJ_UINT32 compno, resno, bandno, precno, cblkno; OPJ_INT32 value; /*, matrice[tcd_tcp->numlayers][tcd_tile->comps[0].numresolutions][3]; */ OPJ_INT32 matrice[10][10][3]; OPJ_UINT32 i, j, k; opj_cp_t *cp = tcd->cp; opj_tcd_tile_t *tcd_tile = tcd->tcd_image->tiles; opj_tcp_t *tcd_tcp = tcd->tcp; for (compno = 0; compno < tcd_tile->numcomps; compno++) { opj_tcd_tilecomp_t *tilec = &tcd_tile->comps[compno]; for (i = 0; i < tcd_tcp->numlayers; i++) { for (j = 0; j < tilec->numresolutions; j++) { for (k = 0; k < 3; k++) { matrice[i][j][k] = (OPJ_INT32) ((OPJ_FLOAT32)cp->m_specific_param.m_enc.m_matrice[i * tilec->numresolutions * 3 + j * 3 + k] * (OPJ_FLOAT32) (tcd->image->comps[compno].prec / 16.0)); } } } for (resno = 0; resno < tilec->numresolutions; resno++) { opj_tcd_resolution_t *res = &tilec->resolutions[resno]; for (bandno = 0; bandno < res->numbands; bandno++) { opj_tcd_band_t *band = &res->bands[bandno]; for (precno = 0; precno < res->pw * res->ph; precno++) { opj_tcd_precinct_t *prc = &band->precincts[precno]; for (cblkno = 0; cblkno < prc->cw * prc->ch; cblkno++) { opj_tcd_cblk_enc_t *cblk = &prc->cblks.enc[cblkno]; opj_tcd_layer_t *layer = &cblk->layers[layno]; OPJ_UINT32 n; OPJ_INT32 imsb = (OPJ_INT32)(tcd->image->comps[compno].prec - cblk->numbps); /* number of bit-plan equal to zero */ /* Correction of the matrix of coefficient to include the IMSB information */ if (layno == 0) { value = matrice[layno][resno][bandno]; if (imsb >= value) { value = 0; } else { value -= imsb; } } else { value = matrice[layno][resno][bandno] - matrice[layno - 1][resno][bandno]; if (imsb >= matrice[layno - 1][resno][bandno]) { value -= (imsb - matrice[layno - 1][resno][bandno]); if (value < 0) { value = 0; } } } if (layno == 0) { cblk->numpassesinlayers = 0; } n = cblk->numpassesinlayers; if (cblk->numpassesinlayers == 0) { if (value != 0) { n = 3 * (OPJ_UINT32)value - 2 + cblk->numpassesinlayers; } else { n = cblk->numpassesinlayers; } } else { n = 3 * (OPJ_UINT32)value + cblk->numpassesinlayers; } layer->numpasses = n - cblk->numpassesinlayers; if (!layer->numpasses) continue; if (cblk->numpassesinlayers == 0) { layer->len = cblk->passes[n - 1].rate; layer->data = cblk->data; } else { layer->len = cblk->passes[n - 1].rate - cblk->passes[cblk->numpassesinlayers - 1].rate; layer->data = cblk->data + cblk->passes[cblk->numpassesinlayers - 1].rate; } if (final) cblk->numpassesinlayers = n; } } } } } } OPJ_BOOL opj_tcd_rateallocate( opj_tcd_t *tcd, OPJ_BYTE *dest, OPJ_UINT32 * p_data_written, OPJ_UINT32 len, opj_codestream_info_t *cstr_info) { OPJ_UINT32 compno, resno, bandno, precno, cblkno, layno; OPJ_UINT32 passno; OPJ_FLOAT64 min, max; OPJ_FLOAT64 cumdisto[100]; /* fixed_quality */ const OPJ_FLOAT64 K = 1; /* 1.1; fixed_quality */ OPJ_FLOAT64 maxSE = 0; opj_cp_t *cp = tcd->cp; opj_tcd_tile_t *tcd_tile = tcd->tcd_image->tiles; opj_tcp_t *tcd_tcp = tcd->tcp; min = DBL_MAX; max = 0; tcd_tile->numpix = 0; /* fixed_quality */ for (compno = 0; compno < tcd_tile->numcomps; compno++) { opj_tcd_tilecomp_t *tilec = &tcd_tile->comps[compno]; tilec->numpix = 0; for (resno = 0; resno < tilec->numresolutions; resno++) { opj_tcd_resolution_t *res = &tilec->resolutions[resno]; for (bandno = 0; bandno < res->numbands; bandno++) { opj_tcd_band_t *band = &res->bands[bandno]; for (precno = 0; precno < res->pw * res->ph; precno++) { opj_tcd_precinct_t *prc = &band->precincts[precno]; for (cblkno = 0; cblkno < prc->cw * prc->ch; cblkno++) { opj_tcd_cblk_enc_t *cblk = &prc->cblks.enc[cblkno]; for (passno = 0; passno < cblk->totalpasses; passno++) { opj_tcd_pass_t *pass = &cblk->passes[passno]; OPJ_INT32 dr; OPJ_FLOAT64 dd, rdslope; if (passno == 0) { dr = (OPJ_INT32)pass->rate; dd = pass->distortiondec; } else { dr = (OPJ_INT32)(pass->rate - cblk->passes[passno - 1].rate); dd = pass->distortiondec - cblk->passes[passno - 1].distortiondec; } if (dr == 0) { continue; } rdslope = dd / dr; if (rdslope < min) { min = rdslope; } if (rdslope > max) { max = rdslope; } } /* passno */ /* fixed_quality */ tcd_tile->numpix += ((cblk->x1 - cblk->x0) * (cblk->y1 - cblk->y0)); tilec->numpix += ((cblk->x1 - cblk->x0) * (cblk->y1 - cblk->y0)); } /* cbklno */ } /* precno */ } /* bandno */ } /* resno */ maxSE += (((OPJ_FLOAT64)(1 << tcd->image->comps[compno].prec) - 1.0) * ((OPJ_FLOAT64)(1 << tcd->image->comps[compno].prec) -1.0)) * ((OPJ_FLOAT64)(tilec->numpix)); } /* compno */ /* index file */ if(cstr_info) { opj_tile_info_t *tile_info = &cstr_info->tile[tcd->tcd_tileno]; tile_info->numpix = tcd_tile->numpix; tile_info->distotile = tcd_tile->distotile; tile_info->thresh = (OPJ_FLOAT64 *) opj_malloc(tcd_tcp->numlayers * sizeof(OPJ_FLOAT64)); if (!tile_info->thresh) { /* FIXME event manager error callback */ return OPJ_FALSE; } } for (layno = 0; layno < tcd_tcp->numlayers; layno++) { OPJ_FLOAT64 lo = min; OPJ_FLOAT64 hi = max; OPJ_BOOL success = OPJ_FALSE; OPJ_UINT32 maxlen = tcd_tcp->rates[layno] ? opj_uint_min(((OPJ_UINT32) ceil(tcd_tcp->rates[layno])), len) : len; OPJ_FLOAT64 goodthresh = 0; OPJ_FLOAT64 stable_thresh = 0; OPJ_UINT32 i; OPJ_FLOAT64 distotarget; /* fixed_quality */ /* fixed_quality */ distotarget = tcd_tile->distotile - ((K * maxSE) / pow((OPJ_FLOAT32)10, tcd_tcp->distoratio[layno] / 10)); /* Don't try to find an optimal threshold but rather take everything not included yet, if -r xx,yy,zz,0 (disto_alloc == 1 and rates == 0) -q xx,yy,zz,0 (fixed_quality == 1 and distoratio == 0) ==> possible to have some lossy layers and the last layer for sure lossless */ if ( ((cp->m_specific_param.m_enc.m_disto_alloc==1) && (tcd_tcp->rates[layno]>0)) || ((cp->m_specific_param.m_enc.m_fixed_quality==1) && (tcd_tcp->distoratio[layno]>0))) { opj_t2_t*t2 = opj_t2_create(tcd->image, cp); OPJ_FLOAT64 thresh = 0; if (t2 == 00) { return OPJ_FALSE; } for (i = 0; i < 128; ++i) { OPJ_FLOAT64 distoachieved = 0; /* fixed_quality */ thresh = (lo + hi) / 2; opj_tcd_makelayer(tcd, layno, thresh, 0); if (cp->m_specific_param.m_enc.m_fixed_quality) { /* fixed_quality */ if(OPJ_IS_CINEMA(cp->rsiz)){ if (! opj_t2_encode_packets(t2,tcd->tcd_tileno, tcd_tile, layno + 1, dest, p_data_written, maxlen, cstr_info,tcd->cur_tp_num,tcd->tp_pos,tcd->cur_pino,THRESH_CALC)) { lo = thresh; continue; } else { distoachieved = layno == 0 ? tcd_tile->distolayer[0] : cumdisto[layno - 1] + tcd_tile->distolayer[layno]; if (distoachieved < distotarget) { hi=thresh; stable_thresh = thresh; continue; }else{ lo=thresh; } } }else{ distoachieved = (layno == 0) ? tcd_tile->distolayer[0] : (cumdisto[layno - 1] + tcd_tile->distolayer[layno]); if (distoachieved < distotarget) { hi = thresh; stable_thresh = thresh; continue; } lo = thresh; } } else { if (! opj_t2_encode_packets(t2, tcd->tcd_tileno, tcd_tile, layno + 1, dest,p_data_written, maxlen, cstr_info,tcd->cur_tp_num,tcd->tp_pos,tcd->cur_pino,THRESH_CALC)) { /* TODO: what to do with l ??? seek / tell ??? */ /* opj_event_msg(tcd->cinfo, EVT_INFO, "rate alloc: len=%d, max=%d\n", l, maxlen); */ lo = thresh; continue; } hi = thresh; stable_thresh = thresh; } } success = OPJ_TRUE; goodthresh = stable_thresh == 0? thresh : stable_thresh; opj_t2_destroy(t2); } else { success = OPJ_TRUE; goodthresh = min; } if (!success) { return OPJ_FALSE; } if(cstr_info) { /* Threshold for Marcela Index */ cstr_info->tile[tcd->tcd_tileno].thresh[layno] = goodthresh; } opj_tcd_makelayer(tcd, layno, goodthresh, 1); /* fixed_quality */ cumdisto[layno] = (layno == 0) ? tcd_tile->distolayer[0] : (cumdisto[layno - 1] + tcd_tile->distolayer[layno]); } return OPJ_TRUE; } OPJ_BOOL opj_tcd_init( opj_tcd_t *p_tcd, opj_image_t * p_image, opj_cp_t * p_cp ) { p_tcd->image = p_image; p_tcd->cp = p_cp; p_tcd->tcd_image->tiles = (opj_tcd_tile_t *) opj_calloc(1,sizeof(opj_tcd_tile_t)); if (! p_tcd->tcd_image->tiles) { return OPJ_FALSE; } p_tcd->tcd_image->tiles->comps = (opj_tcd_tilecomp_t *) opj_calloc(p_image->numcomps,sizeof(opj_tcd_tilecomp_t)); if (! p_tcd->tcd_image->tiles->comps ) { return OPJ_FALSE; } p_tcd->tcd_image->tiles->numcomps = p_image->numcomps; p_tcd->tp_pos = p_cp->m_specific_param.m_enc.m_tp_pos; return OPJ_TRUE; } /** Destroy a previously created TCD handle */ void opj_tcd_destroy(opj_tcd_t *tcd) { if (tcd) { opj_tcd_free_tile(tcd); if (tcd->tcd_image) { opj_free(tcd->tcd_image); tcd->tcd_image = 00; } opj_free(tcd); } } OPJ_BOOL opj_alloc_tile_component_data(opj_tcd_tilecomp_t *l_tilec) { if ((l_tilec->data == 00) || ((l_tilec->data_size_needed > l_tilec->data_size) && (l_tilec->ownsData == OPJ_FALSE))) { l_tilec->data = (OPJ_INT32 *) opj_malloc(l_tilec->data_size_needed); if (! l_tilec->data ) { return OPJ_FALSE; } /*fprintf(stderr, "tAllocate data of tilec (int): %d x OPJ_UINT32n",l_data_size);*/ l_tilec->data_size = l_tilec->data_size_needed; l_tilec->ownsData = OPJ_TRUE; } else if (l_tilec->data_size_needed > l_tilec->data_size) { OPJ_INT32 * new_data = (OPJ_INT32 *) opj_realloc(l_tilec->data, l_tilec->data_size_needed); /* opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to handle tile datan"); */ /* fprintf(stderr, "Not enough memory to handle tile data"); */ if (! new_data) { opj_free(l_tilec->data); l_tilec->data = NULL; l_tilec->data_size = 0; l_tilec->data_size_needed = 0; l_tilec->ownsData = OPJ_FALSE; return OPJ_FALSE; } l_tilec->data = new_data; /*fprintf(stderr, "tReallocate data of tilec (int): from %d to %d x OPJ_UINT32n", l_tilec->data_size, l_data_size);*/ l_tilec->data_size = l_tilec->data_size_needed; l_tilec->ownsData = OPJ_TRUE; } return OPJ_TRUE; } /* ----------------------------------------------------------------------- */ static INLINE OPJ_BOOL opj_tcd_init_tile(opj_tcd_t *p_tcd, OPJ_UINT32 p_tile_no, OPJ_BOOL isEncoder, OPJ_FLOAT32 fraction, OPJ_SIZE_T sizeof_block) { OPJ_UINT32 (*l_gain_ptr)(OPJ_UINT32) = 00; OPJ_UINT32 compno, resno, bandno, precno, cblkno; opj_tcp_t * l_tcp = 00; opj_cp_t * l_cp = 00; opj_tcd_tile_t * l_tile = 00; opj_tccp_t *l_tccp = 00; opj_tcd_tilecomp_t *l_tilec = 00; opj_image_comp_t * l_image_comp = 00; opj_tcd_resolution_t *l_res = 00; opj_tcd_band_t *l_band = 00; opj_stepsize_t * l_step_size = 00; opj_tcd_precinct_t *l_current_precinct = 00; opj_image_t *l_image = 00; OPJ_UINT32 p,q; OPJ_UINT32 l_level_no; OPJ_UINT32 l_pdx, l_pdy; OPJ_UINT32 l_gain; OPJ_INT32 l_x0b, l_y0b; /* extent of precincts , top left, bottom right**/ OPJ_INT32 l_tl_prc_x_start, l_tl_prc_y_start, l_br_prc_x_end, l_br_prc_y_end; /* number of precinct for a resolution */ OPJ_UINT32 l_nb_precincts; /* room needed to store l_nb_precinct precinct for a resolution */ OPJ_UINT32 l_nb_precinct_size; /* number of code blocks for a precinct*/ OPJ_UINT32 l_nb_code_blocks; /* room needed to store l_nb_code_blocks code blocks for a precinct*/ OPJ_UINT32 l_nb_code_blocks_size; /* size of data for a tile */ OPJ_UINT32 l_data_size; l_cp = p_tcd->cp; l_tcp = &(l_cp->tcps[p_tile_no]); l_tile = p_tcd->tcd_image->tiles; l_tccp = l_tcp->tccps; l_tilec = l_tile->comps; l_image = p_tcd->image; l_image_comp = p_tcd->image->comps; p = p_tile_no % l_cp->tw; /* tile coordinates */ q = p_tile_no / l_cp->tw; /*fprintf(stderr, "Tile coordinate = %d,%d\n", p, q);*/ /* 4 borders of the tile rescale on the image if necessary */ l_tile->x0 = opj_int_max((OPJ_INT32)(l_cp->tx0 + p * l_cp->tdx), (OPJ_INT32)l_image->x0); l_tile->y0 = opj_int_max((OPJ_INT32)(l_cp->ty0 + q * l_cp->tdy), (OPJ_INT32)l_image->y0); l_tile->x1 = opj_int_min((OPJ_INT32)(l_cp->tx0 + (p + 1) * l_cp->tdx), (OPJ_INT32)l_image->x1); l_tile->y1 = opj_int_min((OPJ_INT32)(l_cp->ty0 + (q + 1) * l_cp->tdy), (OPJ_INT32)l_image->y1); /* testcase 1888.pdf.asan.35.988 */ if (l_tccp->numresolutions == 0) { fprintf(stderr, "tiles require at least one resolution\n"); return OPJ_FALSE; } /*fprintf(stderr, "Tile border = %d,%d,%d,%d\n", l_tile->x0, l_tile->y0,l_tile->x1,l_tile->y1);*/ /*tile->numcomps = image->numcomps; */ for (compno = 0; compno < l_tile->numcomps; ++compno) { /*fprintf(stderr, "compno = %d/%d\n", compno, l_tile->numcomps);*/ l_image_comp->resno_decoded = 0; /* border of each l_tile component (global) */ l_tilec->x0 = opj_int_ceildiv(l_tile->x0, (OPJ_INT32)l_image_comp->dx); l_tilec->y0 = opj_int_ceildiv(l_tile->y0, (OPJ_INT32)l_image_comp->dy); l_tilec->x1 = opj_int_ceildiv(l_tile->x1, (OPJ_INT32)l_image_comp->dx); l_tilec->y1 = opj_int_ceildiv(l_tile->y1, (OPJ_INT32)l_image_comp->dy); /*fprintf(stderr, "\tTile compo border = %d,%d,%d,%d\n", l_tilec->x0, l_tilec->y0,l_tilec->x1,l_tilec->y1);*/ /* compute l_data_size with overflow check */ l_data_size = (OPJ_UINT32)(l_tilec->x1 - l_tilec->x0); if ((((OPJ_UINT32)-1) / l_data_size) < (OPJ_UINT32)(l_tilec->y1 - l_tilec->y0)) { /* TODO event */ return OPJ_FALSE; } l_data_size = l_data_size * (OPJ_UINT32)(l_tilec->y1 - l_tilec->y0); if ((((OPJ_UINT32)-1) / (OPJ_UINT32)sizeof(OPJ_UINT32)) < l_data_size) { /* TODO event */ return OPJ_FALSE; } l_data_size = l_data_size * (OPJ_UINT32)sizeof(OPJ_UINT32); l_tilec->numresolutions = l_tccp->numresolutions; if (l_tccp->numresolutions < l_cp->m_specific_param.m_dec.m_reduce) { l_tilec->minimum_num_resolutions = 1; } else { l_tilec->minimum_num_resolutions = l_tccp->numresolutions - l_cp->m_specific_param.m_dec.m_reduce; } l_tilec->data_size_needed = l_data_size; if (p_tcd->m_is_decoder && !opj_alloc_tile_component_data(l_tilec)) { return OPJ_FALSE; } l_data_size = l_tilec->numresolutions * (OPJ_UINT32)sizeof(opj_tcd_resolution_t); if (l_tilec->resolutions == 00) { l_tilec->resolutions = (opj_tcd_resolution_t *) opj_malloc(l_data_size); if (! l_tilec->resolutions ) { return OPJ_FALSE; } /*fprintf(stderr, "\tAllocate resolutions of tilec (opj_tcd_resolution_t): %d\n",l_data_size);*/ l_tilec->resolutions_size = l_data_size; memset(l_tilec->resolutions,0,l_data_size); } else if (l_data_size > l_tilec->resolutions_size) { opj_tcd_resolution_t* new_resolutions = (opj_tcd_resolution_t *) opj_realloc(l_tilec->resolutions, l_data_size); if (! new_resolutions) { /* opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to tile resolutions\n"); */ fprintf(stderr, "Not enough memory to tile resolutions\n"); opj_free(l_tilec->resolutions); l_tilec->resolutions = NULL; l_tilec->resolutions_size = 0; return OPJ_FALSE; } l_tilec->resolutions = new_resolutions; /*fprintf(stderr, "\tReallocate data of tilec (int): from %d to %d x OPJ_UINT32\n", l_tilec->resolutions_size, l_data_size);*/ memset(((OPJ_BYTE*) l_tilec->resolutions)+l_tilec->resolutions_size,0,l_data_size - l_tilec->resolutions_size); l_tilec->resolutions_size = l_data_size; } l_level_no = l_tilec->numresolutions - 1; l_res = l_tilec->resolutions; l_step_size = l_tccp->stepsizes; if (l_tccp->qmfbid == 0) { l_gain_ptr = &opj_dwt_getgain_real; } else { l_gain_ptr = &opj_dwt_getgain; } /*fprintf(stderr, "\tlevel_no=%d\n",l_level_no);*/ for (resno = 0; resno < l_tilec->numresolutions; ++resno) { /*fprintf(stderr, "\t\tresno = %d/%d\n", resno, l_tilec->numresolutions);*/ OPJ_INT32 tlcbgxstart, tlcbgystart /*, brcbgxend, brcbgyend*/; OPJ_UINT32 cbgwidthexpn, cbgheightexpn; OPJ_UINT32 cblkwidthexpn, cblkheightexpn; /* border for each resolution level (global) */ l_res->x0 = opj_int_ceildivpow2(l_tilec->x0, (OPJ_INT32)l_level_no); l_res->y0 = opj_int_ceildivpow2(l_tilec->y0, (OPJ_INT32)l_level_no); l_res->x1 = opj_int_ceildivpow2(l_tilec->x1, (OPJ_INT32)l_level_no); l_res->y1 = opj_int_ceildivpow2(l_tilec->y1, (OPJ_INT32)l_level_no); /*fprintf(stderr, "\t\t\tres_x0= %d, res_y0 =%d, res_x1=%d, res_y1=%d\n", l_res->x0, l_res->y0, l_res->x1, l_res->y1);*/ /* p. 35, table A-23, ISO/IEC FDIS154444-1 : 2000 (18 august 2000) */ l_pdx = l_tccp->prcw[resno]; l_pdy = l_tccp->prch[resno]; /*fprintf(stderr, "\t\t\tpdx=%d, pdy=%d\n", l_pdx, l_pdy);*/ /* p. 64, B.6, ISO/IEC FDIS15444-1 : 2000 (18 august 2000) */ l_tl_prc_x_start = opj_int_floordivpow2(l_res->x0, (OPJ_INT32)l_pdx) << l_pdx; l_tl_prc_y_start = opj_int_floordivpow2(l_res->y0, (OPJ_INT32)l_pdy) << l_pdy; l_br_prc_x_end = opj_int_ceildivpow2(l_res->x1, (OPJ_INT32)l_pdx) << l_pdx; l_br_prc_y_end = opj_int_ceildivpow2(l_res->y1, (OPJ_INT32)l_pdy) << l_pdy; /*fprintf(stderr, "\t\t\tprc_x_start=%d, prc_y_start=%d, br_prc_x_end=%d, br_prc_y_end=%d \n", l_tl_prc_x_start, l_tl_prc_y_start, l_br_prc_x_end ,l_br_prc_y_end );*/ l_res->pw = (l_res->x0 == l_res->x1) ? 0 : (OPJ_UINT32)((l_br_prc_x_end - l_tl_prc_x_start) >> l_pdx); l_res->ph = (l_res->y0 == l_res->y1) ? 0 : (OPJ_UINT32)((l_br_prc_y_end - l_tl_prc_y_start) >> l_pdy); /*fprintf(stderr, "\t\t\tres_pw=%d, res_ph=%d\n", l_res->pw, l_res->ph );*/ l_nb_precincts = l_res->pw * l_res->ph; l_nb_precinct_size = l_nb_precincts * (OPJ_UINT32)sizeof(opj_tcd_precinct_t); if (resno == 0) { tlcbgxstart = l_tl_prc_x_start; tlcbgystart = l_tl_prc_y_start; /*brcbgxend = l_br_prc_x_end;*/ /* brcbgyend = l_br_prc_y_end;*/ cbgwidthexpn = l_pdx; cbgheightexpn = l_pdy; l_res->numbands = 1; } else { tlcbgxstart = opj_int_ceildivpow2(l_tl_prc_x_start, 1); tlcbgystart = opj_int_ceildivpow2(l_tl_prc_y_start, 1); /*brcbgxend = opj_int_ceildivpow2(l_br_prc_x_end, 1);*/ /*brcbgyend = opj_int_ceildivpow2(l_br_prc_y_end, 1);*/ cbgwidthexpn = l_pdx - 1; cbgheightexpn = l_pdy - 1; l_res->numbands = 3; } cblkwidthexpn = opj_uint_min(l_tccp->cblkw, cbgwidthexpn); cblkheightexpn = opj_uint_min(l_tccp->cblkh, cbgheightexpn); l_band = l_res->bands; for (bandno = 0; bandno < l_res->numbands; ++bandno) { OPJ_INT32 numbps; /*fprintf(stderr, "\t\t\tband_no=%d/%d\n", bandno, l_res->numbands );*/ if (resno == 0) { l_band->bandno = 0 ; l_band->x0 = opj_int_ceildivpow2(l_tilec->x0, (OPJ_INT32)l_level_no); l_band->y0 = opj_int_ceildivpow2(l_tilec->y0, (OPJ_INT32)l_level_no); l_band->x1 = opj_int_ceildivpow2(l_tilec->x1, (OPJ_INT32)l_level_no); l_band->y1 = opj_int_ceildivpow2(l_tilec->y1, (OPJ_INT32)l_level_no); } else { l_band->bandno = bandno + 1; /* x0b = 1 if bandno = 1 or 3 */ l_x0b = l_band->bandno&1; /* y0b = 1 if bandno = 2 or 3 */ l_y0b = (OPJ_INT32)((l_band->bandno)>>1); /* l_band border (global) */ l_band->x0 = opj_int_ceildivpow2(l_tilec->x0 - (1 << l_level_no) * l_x0b, (OPJ_INT32)(l_level_no + 1)); l_band->y0 = opj_int_ceildivpow2(l_tilec->y0 - (1 << l_level_no) * l_y0b, (OPJ_INT32)(l_level_no + 1)); l_band->x1 = opj_int_ceildivpow2(l_tilec->x1 - (1 << l_level_no) * l_x0b, (OPJ_INT32)(l_level_no + 1)); l_band->y1 = opj_int_ceildivpow2(l_tilec->y1 - (1 << l_level_no) * l_y0b, (OPJ_INT32)(l_level_no + 1)); } /** avoid an if with storing function pointer */ l_gain = (*l_gain_ptr) (l_band->bandno); numbps = (OPJ_INT32)(l_image_comp->prec + l_gain); l_band->stepsize = (OPJ_FLOAT32)(((1.0 + l_step_size->mant / 2048.0) * pow(2.0, (OPJ_INT32) (numbps - l_step_size->expn)))) * fraction; l_band->numbps = l_step_size->expn + (OPJ_INT32)l_tccp->numgbits - 1; /* WHY -1 ? */ if (! l_band->precincts) { l_band->precincts = (opj_tcd_precinct_t *) opj_malloc( /*3 * */ l_nb_precinct_size); if (! l_band->precincts) { return OPJ_FALSE; } /*fprintf(stderr, "\t\t\t\tAllocate precincts of a band (opj_tcd_precinct_t): %d\n",l_nb_precinct_size); */ memset(l_band->precincts,0,l_nb_precinct_size); l_band->precincts_data_size = l_nb_precinct_size; } else if (l_band->precincts_data_size < l_nb_precinct_size) { opj_tcd_precinct_t * new_precincts = (opj_tcd_precinct_t *) opj_realloc(l_band->precincts,/*3 * */ l_nb_precinct_size); if (! new_precincts) { /* opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to handle band precints\n"); */ fprintf(stderr, "Not enough memory to handle band precints\n"); opj_free(l_band->precincts); l_band->precincts = NULL; l_band->precincts_data_size = 0; return OPJ_FALSE; } l_band->precincts = new_precincts; /*fprintf(stderr, "\t\t\t\tReallocate precincts of a band (opj_tcd_precinct_t): from %d to %d\n",l_band->precincts_data_size, l_nb_precinct_size);*/ memset(((OPJ_BYTE *) l_band->precincts) + l_band->precincts_data_size,0,l_nb_precinct_size - l_band->precincts_data_size); l_band->precincts_data_size = l_nb_precinct_size; } l_current_precinct = l_band->precincts; for (precno = 0; precno < l_nb_precincts; ++precno) { OPJ_INT32 tlcblkxstart, tlcblkystart, brcblkxend, brcblkyend; OPJ_INT32 cbgxstart = tlcbgxstart + (OPJ_INT32)(precno % l_res->pw) * (1 << cbgwidthexpn); OPJ_INT32 cbgystart = tlcbgystart + (OPJ_INT32)(precno / l_res->pw) * (1 << cbgheightexpn); OPJ_INT32 cbgxend = cbgxstart + (1 << cbgwidthexpn); OPJ_INT32 cbgyend = cbgystart + (1 << cbgheightexpn); /*fprintf(stderr, "\t precno=%d; bandno=%d, resno=%d; compno=%d\n", precno, bandno , resno, compno);*/ /*fprintf(stderr, "\t tlcbgxstart(=%d) + (precno(=%d) percent res->pw(=%d)) * (1 << cbgwidthexpn(=%d)) \n",tlcbgxstart,precno,l_res->pw,cbgwidthexpn);*/ /* precinct size (global) */ /*fprintf(stderr, "\t cbgxstart=%d, l_band->x0 = %d \n",cbgxstart, l_band->x0);*/ l_current_precinct->x0 = opj_int_max(cbgxstart, l_band->x0); l_current_precinct->y0 = opj_int_max(cbgystart, l_band->y0); l_current_precinct->x1 = opj_int_min(cbgxend, l_band->x1); l_current_precinct->y1 = opj_int_min(cbgyend, l_band->y1); /*fprintf(stderr, "\t prc_x0=%d; prc_y0=%d, prc_x1=%d; prc_y1=%d\n",l_current_precinct->x0, l_current_precinct->y0 ,l_current_precinct->x1, l_current_precinct->y1);*/ tlcblkxstart = opj_int_floordivpow2(l_current_precinct->x0, (OPJ_INT32)cblkwidthexpn) << cblkwidthexpn; /*fprintf(stderr, "\t tlcblkxstart =%d\n",tlcblkxstart );*/ tlcblkystart = opj_int_floordivpow2(l_current_precinct->y0, (OPJ_INT32)cblkheightexpn) << cblkheightexpn; /*fprintf(stderr, "\t tlcblkystart =%d\n",tlcblkystart );*/ brcblkxend = opj_int_ceildivpow2(l_current_precinct->x1, (OPJ_INT32)cblkwidthexpn) << cblkwidthexpn; /*fprintf(stderr, "\t brcblkxend =%d\n",brcblkxend );*/ brcblkyend = opj_int_ceildivpow2(l_current_precinct->y1, (OPJ_INT32)cblkheightexpn) << cblkheightexpn; /*fprintf(stderr, "\t brcblkyend =%d\n",brcblkyend );*/ l_current_precinct->cw = (OPJ_UINT32)((brcblkxend - tlcblkxstart) >> cblkwidthexpn); l_current_precinct->ch = (OPJ_UINT32)((brcblkyend - tlcblkystart) >> cblkheightexpn); l_nb_code_blocks = l_current_precinct->cw * l_current_precinct->ch; /*fprintf(stderr, "\t\t\t\t precinct_cw = %d x recinct_ch = %d\n",l_current_precinct->cw, l_current_precinct->ch); */ l_nb_code_blocks_size = l_nb_code_blocks * (OPJ_UINT32)sizeof_block; if (! l_current_precinct->cblks.blocks) { l_current_precinct->cblks.blocks = opj_malloc(l_nb_code_blocks_size); if (! l_current_precinct->cblks.blocks ) { return OPJ_FALSE; } /*fprintf(stderr, "\t\t\t\tAllocate cblks of a precinct (opj_tcd_cblk_dec_t): %d\n",l_nb_code_blocks_size);*/ memset(l_current_precinct->cblks.blocks,0,l_nb_code_blocks_size); l_current_precinct->block_size = l_nb_code_blocks_size; } else if (l_nb_code_blocks_size > l_current_precinct->block_size) { void *new_blocks = opj_realloc(l_current_precinct->cblks.blocks, l_nb_code_blocks_size); if (! new_blocks) { opj_free(l_current_precinct->cblks.blocks); l_current_precinct->cblks.blocks = NULL; l_current_precinct->block_size = 0; /* opj_event_msg(p_manager, EVT_ERROR, "Not enough memory for current precinct codeblock element\n"); */ fprintf(stderr, "Not enough memory for current precinct codeblock element\n"); return OPJ_FALSE; } l_current_precinct->cblks.blocks = new_blocks; /*fprintf(stderr, "\t\t\t\tReallocate cblks of a precinct (opj_tcd_cblk_dec_t): from %d to %d\n",l_current_precinct->block_size, l_nb_code_blocks_size); */ memset(((OPJ_BYTE *) l_current_precinct->cblks.blocks) + l_current_precinct->block_size ,0 ,l_nb_code_blocks_size - l_current_precinct->block_size); l_current_precinct->block_size = l_nb_code_blocks_size; } if (! l_current_precinct->incltree) { l_current_precinct->incltree = opj_tgt_create(l_current_precinct->cw, l_current_precinct->ch); } else{ l_current_precinct->incltree = opj_tgt_init(l_current_precinct->incltree, l_current_precinct->cw, l_current_precinct->ch); } if (! l_current_precinct->incltree) { fprintf(stderr, "WARNING: No incltree created.\n"); /*return OPJ_FALSE;*/ } if (! l_current_precinct->imsbtree) { l_current_precinct->imsbtree = opj_tgt_create( l_current_precinct->cw, l_current_precinct->ch); } else { l_current_precinct->imsbtree = opj_tgt_init( l_current_precinct->imsbtree, l_current_precinct->cw, l_current_precinct->ch); } if (! l_current_precinct->imsbtree) { fprintf(stderr, "WARNING: No imsbtree created.\n"); /*return OPJ_FALSE;*/ } for (cblkno = 0; cblkno < l_nb_code_blocks; ++cblkno) { OPJ_INT32 cblkxstart = tlcblkxstart + (OPJ_INT32)(cblkno % l_current_precinct->cw) * (1 << cblkwidthexpn); OPJ_INT32 cblkystart = tlcblkystart + (OPJ_INT32)(cblkno / l_current_precinct->cw) * (1 << cblkheightexpn); OPJ_INT32 cblkxend = cblkxstart + (1 << cblkwidthexpn); OPJ_INT32 cblkyend = cblkystart + (1 << cblkheightexpn); if (isEncoder) { opj_tcd_cblk_enc_t* l_code_block = l_current_precinct->cblks.enc + cblkno; if (! opj_tcd_code_block_enc_allocate(l_code_block)) { return OPJ_FALSE; } /* code-block size (global) */ l_code_block->x0 = opj_int_max(cblkxstart, l_current_precinct->x0); l_code_block->y0 = opj_int_max(cblkystart, l_current_precinct->y0); l_code_block->x1 = opj_int_min(cblkxend, l_current_precinct->x1); l_code_block->y1 = opj_int_min(cblkyend, l_current_precinct->y1); if (! opj_tcd_code_block_enc_allocate_data(l_code_block)) { return OPJ_FALSE; } } else { opj_tcd_cblk_dec_t* l_code_block = l_current_precinct->cblks.dec + cblkno; if (! opj_tcd_code_block_dec_allocate(l_code_block)) { return OPJ_FALSE; } /* code-block size (global) */ l_code_block->x0 = opj_int_max(cblkxstart, l_current_precinct->x0); l_code_block->y0 = opj_int_max(cblkystart, l_current_precinct->y0); l_code_block->x1 = opj_int_min(cblkxend, l_current_precinct->x1); l_code_block->y1 = opj_int_min(cblkyend, l_current_precinct->y1); } } ++l_current_precinct; } /* precno */ ++l_band; ++l_step_size; } /* bandno */ ++l_res; --l_level_no; } /* resno */ ++l_tccp; ++l_tilec; ++l_image_comp; } /* compno */ return OPJ_TRUE; } OPJ_BOOL opj_tcd_init_encode_tile (opj_tcd_t *p_tcd, OPJ_UINT32 p_tile_no) { return opj_tcd_init_tile(p_tcd, p_tile_no, OPJ_TRUE, 1.0F, sizeof(opj_tcd_cblk_enc_t)); } OPJ_BOOL opj_tcd_init_decode_tile (opj_tcd_t *p_tcd, OPJ_UINT32 p_tile_no) { return opj_tcd_init_tile(p_tcd, p_tile_no, OPJ_FALSE, 0.5F, sizeof(opj_tcd_cblk_dec_t)); } /** * Allocates memory for an encoding code block (but not data memory). */ static OPJ_BOOL opj_tcd_code_block_enc_allocate (opj_tcd_cblk_enc_t * p_code_block) { if (! p_code_block->layers) { /* no memset since data */ p_code_block->layers = (opj_tcd_layer_t*) opj_calloc(100, sizeof(opj_tcd_layer_t)); if (! p_code_block->layers) { return OPJ_FALSE; } } if (! p_code_block->passes) { p_code_block->passes = (opj_tcd_pass_t*) opj_calloc(100, sizeof(opj_tcd_pass_t)); if (! p_code_block->passes) { return OPJ_FALSE; } } return OPJ_TRUE; } /** * Allocates data memory for an encoding code block. */ static OPJ_BOOL opj_tcd_code_block_enc_allocate_data (opj_tcd_cblk_enc_t * p_code_block) { OPJ_UINT32 l_data_size; l_data_size = (OPJ_UINT32)((p_code_block->x1 - p_code_block->x0) * (p_code_block->y1 - p_code_block->y0) * (OPJ_INT32)sizeof(OPJ_UINT32)); if (l_data_size > p_code_block->data_size) { if (p_code_block->data) { opj_free(p_code_block->data - 1); /* again, why -1 */ } p_code_block->data = (OPJ_BYTE*) opj_malloc(l_data_size); if(! p_code_block->data) { p_code_block->data_size = 0U; return OPJ_FALSE; } p_code_block->data_size = l_data_size; p_code_block->data[0] = 0; p_code_block->data+=1; /*why +1 ?*/ } return OPJ_TRUE; } /** * Allocates memory for a decoding code block. */ static OPJ_BOOL opj_tcd_code_block_dec_allocate (opj_tcd_cblk_dec_t * p_code_block) { if (! p_code_block->data) { p_code_block->data = (OPJ_BYTE*) opj_malloc(OPJ_J2K_DEFAULT_CBLK_DATA_SIZE); if (! p_code_block->data) { return OPJ_FALSE; } p_code_block->data_max_size = OPJ_J2K_DEFAULT_CBLK_DATA_SIZE; /*fprintf(stderr, "Allocate 8192 elements of code_block->data\n");*/ p_code_block->segs = (opj_tcd_seg_t *) opj_calloc(OPJ_J2K_DEFAULT_NB_SEGS,sizeof(opj_tcd_seg_t)); if (! p_code_block->segs) { return OPJ_FALSE; } /*fprintf(stderr, "Allocate %d elements of code_block->data\n", OPJ_J2K_DEFAULT_NB_SEGS * sizeof(opj_tcd_seg_t));*/ p_code_block->m_current_max_segs = OPJ_J2K_DEFAULT_NB_SEGS; /*fprintf(stderr, "m_current_max_segs of code_block->data = %d\n", p_code_block->m_current_max_segs);*/ } else { /* sanitize */ OPJ_BYTE* l_data = p_code_block->data; OPJ_UINT32 l_data_max_size = p_code_block->data_max_size; opj_tcd_seg_t * l_segs = p_code_block->segs; OPJ_UINT32 l_current_max_segs = p_code_block->m_current_max_segs; memset(p_code_block, 0, sizeof(opj_tcd_cblk_dec_t)); p_code_block->data = l_data; p_code_block->data_max_size = l_data_max_size; p_code_block->segs = l_segs; p_code_block->m_current_max_segs = l_current_max_segs; } return OPJ_TRUE; } OPJ_UINT32 opj_tcd_get_decoded_tile_size ( opj_tcd_t *p_tcd ) { OPJ_UINT32 i; OPJ_UINT32 l_data_size = 0; opj_image_comp_t * l_img_comp = 00; opj_tcd_tilecomp_t * l_tile_comp = 00; opj_tcd_resolution_t * l_res = 00; OPJ_UINT32 l_size_comp, l_remaining; l_tile_comp = p_tcd->tcd_image->tiles->comps; l_img_comp = p_tcd->image->comps; for (i=0;i<p_tcd->image->numcomps;++i) { l_size_comp = l_img_comp->prec >> 3; /*(/ 8)*/ l_remaining = l_img_comp->prec & 7; /* (%8) */ if(l_remaining) { ++l_size_comp; } if (l_size_comp == 3) { l_size_comp = 4; } l_res = l_tile_comp->resolutions + l_tile_comp->minimum_num_resolutions - 1; l_data_size += l_size_comp * (OPJ_UINT32)((l_res->x1 - l_res->x0) * (l_res->y1 - l_res->y0)); ++l_img_comp; ++l_tile_comp; } return l_data_size; } OPJ_BOOL opj_tcd_encode_tile( opj_tcd_t *p_tcd, OPJ_UINT32 p_tile_no, OPJ_BYTE *p_dest, OPJ_UINT32 * p_data_written, OPJ_UINT32 p_max_length, opj_codestream_info_t *p_cstr_info) { if (p_tcd->cur_tp_num == 0) { p_tcd->tcd_tileno = p_tile_no; p_tcd->tcp = &p_tcd->cp->tcps[p_tile_no]; /* INDEX >> "Precinct_nb_X et Precinct_nb_Y" */ if(p_cstr_info) { OPJ_UINT32 l_num_packs = 0; OPJ_UINT32 i; opj_tcd_tilecomp_t *l_tilec_idx = &p_tcd->tcd_image->tiles->comps[0]; /* based on component 0 */ opj_tccp_t *l_tccp = p_tcd->tcp->tccps; /* based on component 0 */ for (i = 0; i < l_tilec_idx->numresolutions; i++) { opj_tcd_resolution_t *l_res_idx = &l_tilec_idx->resolutions[i]; p_cstr_info->tile[p_tile_no].pw[i] = (int)l_res_idx->pw; p_cstr_info->tile[p_tile_no].ph[i] = (int)l_res_idx->ph; l_num_packs += l_res_idx->pw * l_res_idx->ph; p_cstr_info->tile[p_tile_no].pdx[i] = (int)l_tccp->prcw[i]; p_cstr_info->tile[p_tile_no].pdy[i] = (int)l_tccp->prch[i]; } p_cstr_info->tile[p_tile_no].packet = (opj_packet_info_t*) opj_calloc((size_t)p_cstr_info->numcomps * (size_t)p_cstr_info->numlayers * l_num_packs, sizeof(opj_packet_info_t)); if (!p_cstr_info->tile[p_tile_no].packet) { /* FIXME event manager error callback */ return OPJ_FALSE; } } /* << INDEX */ /* FIXME _ProfStart(PGROUP_DC_SHIFT); */ /*---------------TILE-------------------*/ if (! opj_tcd_dc_level_shift_encode(p_tcd)) { return OPJ_FALSE; } /* FIXME _ProfStop(PGROUP_DC_SHIFT); */ /* FIXME _ProfStart(PGROUP_MCT); */ if (! opj_tcd_mct_encode(p_tcd)) { return OPJ_FALSE; } /* FIXME _ProfStop(PGROUP_MCT); */ /* FIXME _ProfStart(PGROUP_DWT); */ if (! opj_tcd_dwt_encode(p_tcd)) { return OPJ_FALSE; } /* FIXME _ProfStop(PGROUP_DWT); */ /* FIXME _ProfStart(PGROUP_T1); */ if (! opj_tcd_t1_encode(p_tcd)) { return OPJ_FALSE; } /* FIXME _ProfStop(PGROUP_T1); */ /* FIXME _ProfStart(PGROUP_RATE); */ if (! opj_tcd_rate_allocate_encode(p_tcd,p_dest,p_max_length,p_cstr_info)) { return OPJ_FALSE; } /* FIXME _ProfStop(PGROUP_RATE); */ } /*--------------TIER2------------------*/ /* INDEX */ if (p_cstr_info) { p_cstr_info->index_write = 1; } /* FIXME _ProfStart(PGROUP_T2); */ if (! opj_tcd_t2_encode(p_tcd,p_dest,p_data_written,p_max_length,p_cstr_info)) { return OPJ_FALSE; } /* FIXME _ProfStop(PGROUP_T2); */ /*---------------CLEAN-------------------*/ return OPJ_TRUE; } OPJ_BOOL opj_tcd_decode_tile( opj_tcd_t *p_tcd, OPJ_BYTE *p_src, OPJ_UINT32 p_max_length, OPJ_UINT32 p_tile_no, opj_codestream_index_t *p_cstr_index ) { OPJ_UINT32 l_data_read; p_tcd->tcd_tileno = p_tile_no; p_tcd->tcp = &(p_tcd->cp->tcps[p_tile_no]); #ifdef TODO_MSD /* FIXME */ /* INDEX >> */ if(p_cstr_info) { OPJ_UINT32 resno, compno, numprec = 0; for (compno = 0; compno < (OPJ_UINT32) p_cstr_info->numcomps; compno++) { opj_tcp_t *tcp = &p_tcd->cp->tcps[0]; opj_tccp_t *tccp = &tcp->tccps[compno]; opj_tcd_tilecomp_t *tilec_idx = &p_tcd->tcd_image->tiles->comps[compno]; for (resno = 0; resno < tilec_idx->numresolutions; resno++) { opj_tcd_resolution_t *res_idx = &tilec_idx->resolutions[resno]; p_cstr_info->tile[p_tile_no].pw[resno] = res_idx->pw; p_cstr_info->tile[p_tile_no].ph[resno] = res_idx->ph; numprec += res_idx->pw * res_idx->ph; p_cstr_info->tile[p_tile_no].pdx[resno] = tccp->prcw[resno]; p_cstr_info->tile[p_tile_no].pdy[resno] = tccp->prch[resno]; } } p_cstr_info->tile[p_tile_no].packet = (opj_packet_info_t *) opj_malloc(p_cstr_info->numlayers * numprec * sizeof(opj_packet_info_t)); p_cstr_info->packno = 0; } /* << INDEX */ #endif /*--------------TIER2------------------*/ /* FIXME _ProfStart(PGROUP_T2); */ l_data_read = 0; if (! opj_tcd_t2_decode(p_tcd, p_src, &l_data_read, p_max_length, p_cstr_index)) { return OPJ_FALSE; } /* FIXME _ProfStop(PGROUP_T2); */ /*------------------TIER1-----------------*/ /* FIXME _ProfStart(PGROUP_T1); */ if (! opj_tcd_t1_decode(p_tcd)) { return OPJ_FALSE; } /* FIXME _ProfStop(PGROUP_T1); */ /*----------------DWT---------------------*/ /* FIXME _ProfStart(PGROUP_DWT); */ if (! opj_tcd_dwt_decode(p_tcd)) { return OPJ_FALSE; } /* FIXME _ProfStop(PGROUP_DWT); */ /*----------------MCT-------------------*/ /* FIXME _ProfStart(PGROUP_MCT); */ if (! opj_tcd_mct_decode(p_tcd)) { return OPJ_FALSE; } /* FIXME _ProfStop(PGROUP_MCT); */ /* FIXME _ProfStart(PGROUP_DC_SHIFT); */ if (! opj_tcd_dc_level_shift_decode(p_tcd)) { return OPJ_FALSE; } /* FIXME _ProfStop(PGROUP_DC_SHIFT); */ /*---------------TILE-------------------*/ return OPJ_TRUE; } OPJ_BOOL opj_tcd_update_tile_data ( opj_tcd_t *p_tcd, OPJ_BYTE * p_dest, OPJ_UINT32 p_dest_length ) { OPJ_UINT32 i,j,k,l_data_size = 0; opj_image_comp_t * l_img_comp = 00; opj_tcd_tilecomp_t * l_tilec = 00; opj_tcd_resolution_t * l_res; OPJ_UINT32 l_size_comp, l_remaining; OPJ_UINT32 l_stride, l_width,l_height; l_data_size = opj_tcd_get_decoded_tile_size(p_tcd); if (l_data_size > p_dest_length) { return OPJ_FALSE; } l_tilec = p_tcd->tcd_image->tiles->comps; l_img_comp = p_tcd->image->comps; for (i=0;i<p_tcd->image->numcomps;++i) { l_size_comp = l_img_comp->prec >> 3; /*(/ 8)*/ l_remaining = l_img_comp->prec & 7; /* (%8) */ l_res = l_tilec->resolutions + l_img_comp->resno_decoded; l_width = (OPJ_UINT32)(l_res->x1 - l_res->x0); l_height = (OPJ_UINT32)(l_res->y1 - l_res->y0); l_stride = (OPJ_UINT32)(l_tilec->x1 - l_tilec->x0) - l_width; if (l_remaining) { ++l_size_comp; } if (l_size_comp == 3) { l_size_comp = 4; } switch (l_size_comp) { case 1: { OPJ_CHAR * l_dest_ptr = (OPJ_CHAR *) p_dest; const OPJ_INT32 * l_src_ptr = l_tilec->data; if (l_img_comp->sgnd) { for (j=0;j<l_height;++j) { for (k=0;k<l_width;++k) { *(l_dest_ptr++) = (OPJ_CHAR) (*(l_src_ptr++)); } l_src_ptr += l_stride; } } else { for (j=0;j<l_height;++j) { for (k=0;k<l_width;++k) { *(l_dest_ptr++) = (OPJ_CHAR) ((*(l_src_ptr++))&0xff); } l_src_ptr += l_stride; } } p_dest = (OPJ_BYTE *)l_dest_ptr; } break; case 2: { const OPJ_INT32 * l_src_ptr = l_tilec->data; OPJ_INT16 * l_dest_ptr = (OPJ_INT16 *) p_dest; if (l_img_comp->sgnd) { for (j=0;j<l_height;++j) { for (k=0;k<l_width;++k) { *(l_dest_ptr++) = (OPJ_INT16) (*(l_src_ptr++)); } l_src_ptr += l_stride; } } else { for (j=0;j<l_height;++j) { for (k=0;k<l_width;++k) { *(l_dest_ptr++) = (OPJ_INT16) ((*(l_src_ptr++))&0xffff); } l_src_ptr += l_stride; } } p_dest = (OPJ_BYTE*) l_dest_ptr; } break; case 4: { OPJ_INT32 * l_dest_ptr = (OPJ_INT32 *) p_dest; OPJ_INT32 * l_src_ptr = l_tilec->data; for (j=0;j<l_height;++j) { for (k=0;k<l_width;++k) { *(l_dest_ptr++) = (*(l_src_ptr++)); } l_src_ptr += l_stride; } p_dest = (OPJ_BYTE*) l_dest_ptr; } break; } ++l_img_comp; ++l_tilec; } return OPJ_TRUE; } void opj_tcd_free_tile(opj_tcd_t *p_tcd) { OPJ_UINT32 compno, resno, bandno, precno; opj_tcd_tile_t *l_tile = 00; opj_tcd_tilecomp_t *l_tile_comp = 00; opj_tcd_resolution_t *l_res = 00; opj_tcd_band_t *l_band = 00; opj_tcd_precinct_t *l_precinct = 00; OPJ_UINT32 l_nb_resolutions, l_nb_precincts; void (* l_tcd_code_block_deallocate) (opj_tcd_precinct_t *) = 00; if (! p_tcd) { return; } if (! p_tcd->tcd_image) { return; } if (p_tcd->m_is_decoder) { l_tcd_code_block_deallocate = opj_tcd_code_block_dec_deallocate; } else { l_tcd_code_block_deallocate = opj_tcd_code_block_enc_deallocate; } l_tile = p_tcd->tcd_image->tiles; if (! l_tile) { return; } l_tile_comp = l_tile->comps; for (compno = 0; compno < l_tile->numcomps; ++compno) { l_res = l_tile_comp->resolutions; if (l_res) { l_nb_resolutions = l_tile_comp->resolutions_size / sizeof(opj_tcd_resolution_t); for (resno = 0; resno < l_nb_resolutions; ++resno) { l_band = l_res->bands; for (bandno = 0; bandno < 3; ++bandno) { l_precinct = l_band->precincts; if (l_precinct) { l_nb_precincts = l_band->precincts_data_size / sizeof(opj_tcd_precinct_t); for (precno = 0; precno < l_nb_precincts; ++precno) { opj_tgt_destroy(l_precinct->incltree); l_precinct->incltree = 00; opj_tgt_destroy(l_precinct->imsbtree); l_precinct->imsbtree = 00; (*l_tcd_code_block_deallocate) (l_precinct); ++l_precinct; } opj_free(l_band->precincts); l_band->precincts = 00; } ++l_band; } /* for (resno */ ++l_res; } opj_free(l_tile_comp->resolutions); l_tile_comp->resolutions = 00; } if (l_tile_comp->ownsData && l_tile_comp->data) { opj_free(l_tile_comp->data); l_tile_comp->data = 00; l_tile_comp->ownsData = 0; l_tile_comp->data_size = 0; l_tile_comp->data_size_needed = 0; } ++l_tile_comp; } opj_free(l_tile->comps); l_tile->comps = 00; opj_free(p_tcd->tcd_image->tiles); p_tcd->tcd_image->tiles = 00; } OPJ_BOOL opj_tcd_t2_decode (opj_tcd_t *p_tcd, OPJ_BYTE * p_src_data, OPJ_UINT32 * p_data_read, OPJ_UINT32 p_max_src_size, opj_codestream_index_t *p_cstr_index ) { opj_t2_t * l_t2; l_t2 = opj_t2_create(p_tcd->image, p_tcd->cp); if (l_t2 == 00) { return OPJ_FALSE; } if (! opj_t2_decode_packets( l_t2, p_tcd->tcd_tileno, p_tcd->tcd_image->tiles, p_src_data, p_data_read, p_max_src_size, p_cstr_index)) { opj_t2_destroy(l_t2); return OPJ_FALSE; } opj_t2_destroy(l_t2); /*---------------CLEAN-------------------*/ return OPJ_TRUE; } OPJ_BOOL opj_tcd_t1_decode ( opj_tcd_t *p_tcd ) { OPJ_UINT32 compno; opj_t1_t * l_t1; opj_tcd_tile_t * l_tile = p_tcd->tcd_image->tiles; opj_tcd_tilecomp_t* l_tile_comp = l_tile->comps; opj_tccp_t * l_tccp = p_tcd->tcp->tccps; l_t1 = opj_t1_create(OPJ_FALSE); if (l_t1 == 00) { return OPJ_FALSE; } for (compno = 0; compno < l_tile->numcomps; ++compno) { /* The +3 is headroom required by the vectorized DWT */ if (OPJ_FALSE == opj_t1_decode_cblks(l_t1, l_tile_comp, l_tccp)) { opj_t1_destroy(l_t1); return OPJ_FALSE; } ++l_tile_comp; ++l_tccp; } opj_t1_destroy(l_t1); return OPJ_TRUE; } OPJ_BOOL opj_tcd_dwt_decode ( opj_tcd_t *p_tcd ) { OPJ_UINT32 compno; opj_tcd_tile_t * l_tile = p_tcd->tcd_image->tiles; opj_tcd_tilecomp_t * l_tile_comp = l_tile->comps; opj_tccp_t * l_tccp = p_tcd->tcp->tccps; opj_image_comp_t * l_img_comp = p_tcd->image->comps; for (compno = 0; compno < l_tile->numcomps; compno++) { /* if (tcd->cp->reduce != 0) { tcd->image->comps[compno].resno_decoded = tile->comps[compno].numresolutions - tcd->cp->reduce - 1; if (tcd->image->comps[compno].resno_decoded < 0) { return false; } } numres2decode = tcd->image->comps[compno].resno_decoded + 1; if(numres2decode > 0){ */ if (l_tccp->qmfbid == 1) { if (! opj_dwt_decode(l_tile_comp, l_img_comp->resno_decoded+1)) { return OPJ_FALSE; } } else { if (! opj_dwt_decode_real(l_tile_comp, l_img_comp->resno_decoded+1)) { return OPJ_FALSE; } } ++l_tile_comp; ++l_img_comp; ++l_tccp; } return OPJ_TRUE; } OPJ_BOOL opj_tcd_mct_decode ( opj_tcd_t *p_tcd ) { opj_tcd_tile_t * l_tile = p_tcd->tcd_image->tiles; opj_tcp_t * l_tcp = p_tcd->tcp; opj_tcd_tilecomp_t * l_tile_comp = l_tile->comps; OPJ_UINT32 l_samples,i; if (! l_tcp->mct) { return OPJ_TRUE; } l_samples = (OPJ_UINT32)((l_tile_comp->x1 - l_tile_comp->x0) * (l_tile_comp->y1 - l_tile_comp->y0)); if (l_tile->numcomps >= 3 ){ /* testcase 1336.pdf.asan.47.376 */ if ((l_tile->comps[0].x1 - l_tile->comps[0].x0) * (l_tile->comps[0].y1 - l_tile->comps[0].y0) < (OPJ_INT32)l_samples || (l_tile->comps[1].x1 - l_tile->comps[1].x0) * (l_tile->comps[1].y1 - l_tile->comps[1].y0) < (OPJ_INT32)l_samples || (l_tile->comps[2].x1 - l_tile->comps[2].x0) * (l_tile->comps[2].y1 - l_tile->comps[2].y0) < (OPJ_INT32)l_samples) { fprintf(stderr, "Tiles don't all have the same dimension. Skip the MCT step.\n"); return OPJ_FALSE; } else if (l_tcp->mct == 2) { OPJ_BYTE ** l_data; if (! l_tcp->m_mct_decoding_matrix) { return OPJ_TRUE; } l_data = (OPJ_BYTE **) opj_malloc(l_tile->numcomps*sizeof(OPJ_BYTE*)); if (! l_data) { return OPJ_FALSE; } for (i=0;i<l_tile->numcomps;++i) { l_data[i] = (OPJ_BYTE*) l_tile_comp->data; ++l_tile_comp; } if (! opj_mct_decode_custom(/* MCT data */ (OPJ_BYTE*) l_tcp->m_mct_decoding_matrix, /* size of components */ l_samples, /* components */ l_data, /* nb of components (i.e. size of pData) */ l_tile->numcomps, /* tells if the data is signed */ p_tcd->image->comps->sgnd)) { opj_free(l_data); return OPJ_FALSE; } opj_free(l_data); } else { if (l_tcp->tccps->qmfbid == 1) { opj_mct_decode( l_tile->comps[0].data, l_tile->comps[1].data, l_tile->comps[2].data, l_samples); } else { opj_mct_decode_real((OPJ_FLOAT32*)l_tile->comps[0].data, (OPJ_FLOAT32*)l_tile->comps[1].data, (OPJ_FLOAT32*)l_tile->comps[2].data, l_samples); } } } else { /* FIXME need to use opj_event_msg function */ fprintf(stderr,"Number of components (%d) is inconsistent with a MCT. Skip the MCT step.\n",l_tile->numcomps); } return OPJ_TRUE; } OPJ_BOOL opj_tcd_dc_level_shift_decode ( opj_tcd_t *p_tcd ) { OPJ_UINT32 compno; opj_tcd_tilecomp_t * l_tile_comp = 00; opj_tccp_t * l_tccp = 00; opj_image_comp_t * l_img_comp = 00; opj_tcd_resolution_t* l_res = 00; opj_tcd_tile_t * l_tile; OPJ_UINT32 l_width,l_height,i,j; OPJ_INT32 * l_current_ptr; OPJ_INT32 l_min, l_max; OPJ_UINT32 l_stride; l_tile = p_tcd->tcd_image->tiles; l_tile_comp = l_tile->comps; l_tccp = p_tcd->tcp->tccps; l_img_comp = p_tcd->image->comps; for (compno = 0; compno < l_tile->numcomps; compno++) { l_res = l_tile_comp->resolutions + l_img_comp->resno_decoded; l_width = (OPJ_UINT32)(l_res->x1 - l_res->x0); l_height = (OPJ_UINT32)(l_res->y1 - l_res->y0); l_stride = (OPJ_UINT32)(l_tile_comp->x1 - l_tile_comp->x0) - l_width; assert(l_height == 0 || l_width + l_stride <= l_tile_comp->data_size / l_height); /*MUPDF*/ if (l_img_comp->sgnd) { l_min = -(1 << (l_img_comp->prec - 1)); l_max = (1 << (l_img_comp->prec - 1)) - 1; } else { l_min = 0; l_max = (1 << l_img_comp->prec) - 1; } l_current_ptr = l_tile_comp->data; if (l_tccp->qmfbid == 1) { for (j=0;j<l_height;++j) { for (i = 0; i < l_width; ++i) { *l_current_ptr = opj_int_clamp(*l_current_ptr + l_tccp->m_dc_level_shift, l_min, l_max); ++l_current_ptr; } l_current_ptr += l_stride; } } else { for (j=0;j<l_height;++j) { for (i = 0; i < l_width; ++i) { OPJ_FLOAT32 l_value = *((OPJ_FLOAT32 *) l_current_ptr); *l_current_ptr = opj_int_clamp((OPJ_INT32)lrintf(l_value) + l_tccp->m_dc_level_shift, l_min, l_max); ; ++l_current_ptr; } l_current_ptr += l_stride; } } ++l_img_comp; ++l_tccp; ++l_tile_comp; } return OPJ_TRUE; } /** * Deallocates the encoding data of the given precinct. */ void opj_tcd_code_block_dec_deallocate (opj_tcd_precinct_t * p_precinct) { OPJ_UINT32 cblkno , l_nb_code_blocks; opj_tcd_cblk_dec_t * l_code_block = p_precinct->cblks.dec; if (l_code_block) { /*fprintf(stderr,"deallocate codeblock:{\n");*/ /*fprintf(stderr,"\t x0=%d, y0=%d, x1=%d, y1=%d\n",l_code_block->x0, l_code_block->y0, l_code_block->x1, l_code_block->y1);*/ /*fprintf(stderr,"\t numbps=%d, numlenbits=%d, len=%d, numnewpasses=%d, real_num_segs=%d, m_current_max_segs=%d\n ", l_code_block->numbps, l_code_block->numlenbits, l_code_block->len, l_code_block->numnewpasses, l_code_block->real_num_segs, l_code_block->m_current_max_segs );*/ l_nb_code_blocks = p_precinct->block_size / sizeof(opj_tcd_cblk_dec_t); /*fprintf(stderr,"nb_code_blocks =%d\t}\n", l_nb_code_blocks);*/ for (cblkno = 0; cblkno < l_nb_code_blocks; ++cblkno) { if (l_code_block->data) { opj_free(l_code_block->data); l_code_block->data = 00; } if (l_code_block->segs) { opj_free(l_code_block->segs ); l_code_block->segs = 00; } ++l_code_block; } opj_free(p_precinct->cblks.dec); p_precinct->cblks.dec = 00; } } /** * Deallocates the encoding data of the given precinct. */ void opj_tcd_code_block_enc_deallocate (opj_tcd_precinct_t * p_precinct) { OPJ_UINT32 cblkno , l_nb_code_blocks; opj_tcd_cblk_enc_t * l_code_block = p_precinct->cblks.enc; if (l_code_block) { l_nb_code_blocks = p_precinct->block_size / sizeof(opj_tcd_cblk_enc_t); for (cblkno = 0; cblkno < l_nb_code_blocks; ++cblkno) { if (l_code_block->data) { opj_free(l_code_block->data - 1); l_code_block->data = 00; } if (l_code_block->layers) { opj_free(l_code_block->layers ); l_code_block->layers = 00; } if (l_code_block->passes) { opj_free(l_code_block->passes ); l_code_block->passes = 00; } ++l_code_block; } opj_free(p_precinct->cblks.enc); p_precinct->cblks.enc = 00; } } OPJ_UINT32 opj_tcd_get_encoded_tile_size ( opj_tcd_t *p_tcd ) { OPJ_UINT32 i,l_data_size = 0; opj_image_comp_t * l_img_comp = 00; opj_tcd_tilecomp_t * l_tilec = 00; OPJ_UINT32 l_size_comp, l_remaining; l_tilec = p_tcd->tcd_image->tiles->comps; l_img_comp = p_tcd->image->comps; for (i=0;i<p_tcd->image->numcomps;++i) { l_size_comp = l_img_comp->prec >> 3; /*(/ 8)*/ l_remaining = l_img_comp->prec & 7; /* (%8) */ if (l_remaining) { ++l_size_comp; } if (l_size_comp == 3) { l_size_comp = 4; } l_data_size += l_size_comp * (OPJ_UINT32)((l_tilec->x1 - l_tilec->x0) * (l_tilec->y1 - l_tilec->y0)); ++l_img_comp; ++l_tilec; } return l_data_size; } OPJ_BOOL opj_tcd_dc_level_shift_encode ( opj_tcd_t *p_tcd ) { OPJ_UINT32 compno; opj_tcd_tilecomp_t * l_tile_comp = 00; opj_tccp_t * l_tccp = 00; opj_image_comp_t * l_img_comp = 00; opj_tcd_tile_t * l_tile; OPJ_UINT32 l_nb_elem,i; OPJ_INT32 * l_current_ptr; l_tile = p_tcd->tcd_image->tiles; l_tile_comp = l_tile->comps; l_tccp = p_tcd->tcp->tccps; l_img_comp = p_tcd->image->comps; for (compno = 0; compno < l_tile->numcomps; compno++) { l_current_ptr = l_tile_comp->data; l_nb_elem = (OPJ_UINT32)((l_tile_comp->x1 - l_tile_comp->x0) * (l_tile_comp->y1 - l_tile_comp->y0)); if (l_tccp->qmfbid == 1) { for (i = 0; i < l_nb_elem; ++i) { *l_current_ptr -= l_tccp->m_dc_level_shift ; ++l_current_ptr; } } else { for (i = 0; i < l_nb_elem; ++i) { *l_current_ptr = (*l_current_ptr - l_tccp->m_dc_level_shift) << 11 ; ++l_current_ptr; } } ++l_img_comp; ++l_tccp; ++l_tile_comp; } return OPJ_TRUE; } OPJ_BOOL opj_tcd_mct_encode ( opj_tcd_t *p_tcd ) { opj_tcd_tile_t * l_tile = p_tcd->tcd_image->tiles; opj_tcd_tilecomp_t * l_tile_comp = p_tcd->tcd_image->tiles->comps; OPJ_UINT32 samples = (OPJ_UINT32)((l_tile_comp->x1 - l_tile_comp->x0) * (l_tile_comp->y1 - l_tile_comp->y0)); OPJ_UINT32 i; OPJ_BYTE ** l_data = 00; opj_tcp_t * l_tcp = p_tcd->tcp; if(!p_tcd->tcp->mct) { return OPJ_TRUE; } if (p_tcd->tcp->mct == 2) { if (! p_tcd->tcp->m_mct_coding_matrix) { return OPJ_TRUE; } l_data = (OPJ_BYTE **) opj_malloc(l_tile->numcomps*sizeof(OPJ_BYTE*)); if (! l_data) { return OPJ_FALSE; } for (i=0;i<l_tile->numcomps;++i) { l_data[i] = (OPJ_BYTE*) l_tile_comp->data; ++l_tile_comp; } if (! opj_mct_encode_custom(/* MCT data */ (OPJ_BYTE*) p_tcd->tcp->m_mct_coding_matrix, /* size of components */ samples, /* components */ l_data, /* nb of components (i.e. size of pData) */ l_tile->numcomps, /* tells if the data is signed */ p_tcd->image->comps->sgnd) ) { opj_free(l_data); return OPJ_FALSE; } opj_free(l_data); } else if (l_tcp->tccps->qmfbid == 0) { opj_mct_encode_real(l_tile->comps[0].data, l_tile->comps[1].data, l_tile->comps[2].data, samples); } else { opj_mct_encode(l_tile->comps[0].data, l_tile->comps[1].data, l_tile->comps[2].data, samples); } return OPJ_TRUE; } OPJ_BOOL opj_tcd_dwt_encode ( opj_tcd_t *p_tcd ) { opj_tcd_tile_t * l_tile = p_tcd->tcd_image->tiles; opj_tcd_tilecomp_t * l_tile_comp = p_tcd->tcd_image->tiles->comps; opj_tccp_t * l_tccp = p_tcd->tcp->tccps; OPJ_UINT32 compno; for (compno = 0; compno < l_tile->numcomps; ++compno) { if (l_tccp->qmfbid == 1) { if (! opj_dwt_encode(l_tile_comp)) { return OPJ_FALSE; } } else if (l_tccp->qmfbid == 0) { if (! opj_dwt_encode_real(l_tile_comp)) { return OPJ_FALSE; } } ++l_tile_comp; ++l_tccp; } return OPJ_TRUE; } OPJ_BOOL opj_tcd_t1_encode ( opj_tcd_t *p_tcd ) { opj_t1_t * l_t1; const OPJ_FLOAT64 * l_mct_norms; OPJ_UINT32 l_mct_numcomps = 0U; opj_tcp_t * l_tcp = p_tcd->tcp; l_t1 = opj_t1_create(OPJ_TRUE); if (l_t1 == 00) { return OPJ_FALSE; } if (l_tcp->mct == 1) { l_mct_numcomps = 3U; /* irreversible encoding */ if (l_tcp->tccps->qmfbid == 0) { l_mct_norms = opj_mct_get_mct_norms_real(); } else { l_mct_norms = opj_mct_get_mct_norms(); } } else { l_mct_numcomps = p_tcd->image->numcomps; l_mct_norms = (const OPJ_FLOAT64 *) (l_tcp->mct_norms); } if (! opj_t1_encode_cblks(l_t1, p_tcd->tcd_image->tiles , l_tcp, l_mct_norms, l_mct_numcomps)) { opj_t1_destroy(l_t1); return OPJ_FALSE; } opj_t1_destroy(l_t1); return OPJ_TRUE; } OPJ_BOOL opj_tcd_t2_encode (opj_tcd_t *p_tcd, OPJ_BYTE * p_dest_data, OPJ_UINT32 * p_data_written, OPJ_UINT32 p_max_dest_size, opj_codestream_info_t *p_cstr_info ) { opj_t2_t * l_t2; l_t2 = opj_t2_create(p_tcd->image, p_tcd->cp); if (l_t2 == 00) { return OPJ_FALSE; } if (! opj_t2_encode_packets( l_t2, p_tcd->tcd_tileno, p_tcd->tcd_image->tiles, p_tcd->tcp->numlayers, p_dest_data, p_data_written, p_max_dest_size, p_cstr_info, p_tcd->tp_num, p_tcd->tp_pos, p_tcd->cur_pino, FINAL_PASS)) { opj_t2_destroy(l_t2); return OPJ_FALSE; } opj_t2_destroy(l_t2); /*---------------CLEAN-------------------*/ return OPJ_TRUE; } OPJ_BOOL opj_tcd_rate_allocate_encode( opj_tcd_t *p_tcd, OPJ_BYTE * p_dest_data, OPJ_UINT32 p_max_dest_size, opj_codestream_info_t *p_cstr_info ) { opj_cp_t * l_cp = p_tcd->cp; OPJ_UINT32 l_nb_written = 0; if (p_cstr_info) { p_cstr_info->index_write = 0; } if (l_cp->m_specific_param.m_enc.m_disto_alloc|| l_cp->m_specific_param.m_enc.m_fixed_quality) { /* fixed_quality */ /* Normal Rate/distortion allocation */ if (! opj_tcd_rateallocate(p_tcd, p_dest_data,&l_nb_written, p_max_dest_size, p_cstr_info)) { return OPJ_FALSE; } } else { /* Fixed layer allocation */ opj_tcd_rateallocate_fixed(p_tcd); } return OPJ_TRUE; } OPJ_BOOL opj_tcd_copy_tile_data ( opj_tcd_t *p_tcd, OPJ_BYTE * p_src, OPJ_UINT32 p_src_length ) { OPJ_UINT32 i,j,l_data_size = 0; opj_image_comp_t * l_img_comp = 00; opj_tcd_tilecomp_t * l_tilec = 00; OPJ_UINT32 l_size_comp, l_remaining; OPJ_UINT32 l_nb_elem; l_data_size = opj_tcd_get_encoded_tile_size(p_tcd); if (l_data_size != p_src_length) { return OPJ_FALSE; } l_tilec = p_tcd->tcd_image->tiles->comps; l_img_comp = p_tcd->image->comps; for (i=0;i<p_tcd->image->numcomps;++i) { l_size_comp = l_img_comp->prec >> 3; /*(/ 8)*/ l_remaining = l_img_comp->prec & 7; /* (%8) */ l_nb_elem = (OPJ_UINT32)((l_tilec->x1 - l_tilec->x0) * (l_tilec->y1 - l_tilec->y0)); if (l_remaining) { ++l_size_comp; } if (l_size_comp == 3) { l_size_comp = 4; } switch (l_size_comp) { case 1: { OPJ_CHAR * l_src_ptr = (OPJ_CHAR *) p_src; OPJ_INT32 * l_dest_ptr = l_tilec->data; if (l_img_comp->sgnd) { for (j=0;j<l_nb_elem;++j) { *(l_dest_ptr++) = (OPJ_INT32) (*(l_src_ptr++)); } } else { for (j=0;j<l_nb_elem;++j) { *(l_dest_ptr++) = (*(l_src_ptr++))&0xff; } } p_src = (OPJ_BYTE*) l_src_ptr; } break; case 2: { OPJ_INT32 * l_dest_ptr = l_tilec->data; OPJ_INT16 * l_src_ptr = (OPJ_INT16 *) p_src; if (l_img_comp->sgnd) { for (j=0;j<l_nb_elem;++j) { *(l_dest_ptr++) = (OPJ_INT32) (*(l_src_ptr++)); } } else { for (j=0;j<l_nb_elem;++j) { *(l_dest_ptr++) = (*(l_src_ptr++))&0xffff; } } p_src = (OPJ_BYTE*) l_src_ptr; } break; case 4: { OPJ_INT32 * l_src_ptr = (OPJ_INT32 *) p_src; OPJ_INT32 * l_dest_ptr = l_tilec->data; for (j=0;j<l_nb_elem;++j) { *(l_dest_ptr++) = (OPJ_INT32) (*(l_src_ptr++)); } p_src = (OPJ_BYTE*) l_src_ptr; } break; } ++l_img_comp; ++l_tilec; } return OPJ_TRUE; }
/* * The copyright in this software is being made available under the 2-clauses * BSD License, included below. This software may be subject to other third * party and contributor rights, including patent rights, and no such rights * are granted under this license. * * Copyright (c) 2002-2014, Universite catholique de Louvain (UCL), Belgium * Copyright (c) 2002-2014, Professor Benoit Macq * Copyright (c) 2001-2003, David Janssens * Copyright (c) 2002-2003, Yannick Verschueren * Copyright (c) 2003-2007, Francois-Olivier Devaux * Copyright (c) 2003-2014, Antonin Descampe * Copyright (c) 2005, Herve Drolon, FreeImage Team * Copyright (c) 2006-2007, Parvatha Elangovan * Copyright (c) 2008, 2011-2012, Centre National d'Etudes Spatiales (CNES), FR * Copyright (c) 2012, CS Systemes d'Information, France * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS `AS IS' * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "opj_includes.h" /* ----------------------------------------------------------------------- */ /* TODO MSD: */ #ifdef TODO_MSD void tcd_dump(FILE *fd, opj_tcd_t *tcd, opj_tcd_image_t * img) { int tileno, compno, resno, bandno, precno;/*, cblkno;*/ fprintf(fd, "image {\n"); fprintf(fd, " tw=%d, th=%d x0=%d x1=%d y0=%d y1=%d\n", img->tw, img->th, tcd->image->x0, tcd->image->x1, tcd->image->y0, tcd->image->y1); for (tileno = 0; tileno < img->th * img->tw; tileno++) { opj_tcd_tile_t *tile = &tcd->tcd_image->tiles[tileno]; fprintf(fd, " tile {\n"); fprintf(fd, " x0=%d, y0=%d, x1=%d, y1=%d, numcomps=%d\n", tile->x0, tile->y0, tile->x1, tile->y1, tile->numcomps); for (compno = 0; compno < tile->numcomps; compno++) { opj_tcd_tilecomp_t *tilec = &tile->comps[compno]; fprintf(fd, " tilec {\n"); fprintf(fd, " x0=%d, y0=%d, x1=%d, y1=%d, numresolutions=%d\n", tilec->x0, tilec->y0, tilec->x1, tilec->y1, tilec->numresolutions); for (resno = 0; resno < tilec->numresolutions; resno++) { opj_tcd_resolution_t *res = &tilec->resolutions[resno]; fprintf(fd, "\n res {\n"); fprintf(fd, " x0=%d, y0=%d, x1=%d, y1=%d, pw=%d, ph=%d, numbands=%d\n", res->x0, res->y0, res->x1, res->y1, res->pw, res->ph, res->numbands); for (bandno = 0; bandno < res->numbands; bandno++) { opj_tcd_band_t *band = &res->bands[bandno]; fprintf(fd, " band {\n"); fprintf(fd, " x0=%d, y0=%d, x1=%d, y1=%d, stepsize=%f, numbps=%d\n", band->x0, band->y0, band->x1, band->y1, band->stepsize, band->numbps); for (precno = 0; precno < res->pw * res->ph; precno++) { opj_tcd_precinct_t *prec = &band->precincts[precno]; fprintf(fd, " prec {\n"); fprintf(fd, " x0=%d, y0=%d, x1=%d, y1=%d, cw=%d, ch=%d\n", prec->x0, prec->y0, prec->x1, prec->y1, prec->cw, prec->ch); /* for (cblkno = 0; cblkno < prec->cw * prec->ch; cblkno++) { opj_tcd_cblk_t *cblk = &prec->cblks[cblkno]; fprintf(fd, " cblk {\n"); fprintf(fd, " x0=%d, y0=%d, x1=%d, y1=%d\n", cblk->x0, cblk->y0, cblk->x1, cblk->y1); fprintf(fd, " }\n"); } */ fprintf(fd, " }\n"); } fprintf(fd, " }\n"); } fprintf(fd, " }\n"); } fprintf(fd, " }\n"); } fprintf(fd, " }\n"); } fprintf(fd, "}\n"); } #endif /** * Initializes tile coding/decoding */ static INLINE OPJ_BOOL opj_tcd_init_tile(opj_tcd_t *p_tcd, OPJ_UINT32 p_tile_no, OPJ_BOOL isEncoder, OPJ_FLOAT32 fraction, OPJ_SIZE_T sizeof_block); /** * Allocates memory for a decoding code block. */ static OPJ_BOOL opj_tcd_code_block_dec_allocate (opj_tcd_cblk_dec_t * p_code_block); /** * Deallocates the decoding data of the given precinct. */ static void opj_tcd_code_block_dec_deallocate (opj_tcd_precinct_t * p_precinct); /** * Allocates memory for an encoding code block (but not data). */ static OPJ_BOOL opj_tcd_code_block_enc_allocate (opj_tcd_cblk_enc_t * p_code_block); /** * Allocates data for an encoding code block */ static OPJ_BOOL opj_tcd_code_block_enc_allocate_data (opj_tcd_cblk_enc_t * p_code_block); /** * Deallocates the encoding data of the given precinct. */ static void opj_tcd_code_block_enc_deallocate (opj_tcd_precinct_t * p_precinct); /** Free the memory allocated for encoding @param tcd TCD handle */ static void opj_tcd_free_tile(opj_tcd_t *tcd); static OPJ_BOOL opj_tcd_t2_decode ( opj_tcd_t *p_tcd, OPJ_BYTE * p_src_data, OPJ_UINT32 * p_data_read, OPJ_UINT32 p_max_src_size, opj_codestream_index_t *p_cstr_index ); static OPJ_BOOL opj_tcd_t1_decode (opj_tcd_t *p_tcd); static OPJ_BOOL opj_tcd_dwt_decode (opj_tcd_t *p_tcd); static OPJ_BOOL opj_tcd_mct_decode (opj_tcd_t *p_tcd); static OPJ_BOOL opj_tcd_dc_level_shift_decode (opj_tcd_t *p_tcd); static OPJ_BOOL opj_tcd_dc_level_shift_encode ( opj_tcd_t *p_tcd ); static OPJ_BOOL opj_tcd_mct_encode ( opj_tcd_t *p_tcd ); static OPJ_BOOL opj_tcd_dwt_encode ( opj_tcd_t *p_tcd ); static OPJ_BOOL opj_tcd_t1_encode ( opj_tcd_t *p_tcd ); static OPJ_BOOL opj_tcd_t2_encode ( opj_tcd_t *p_tcd, OPJ_BYTE * p_dest_data, OPJ_UINT32 * p_data_written, OPJ_UINT32 p_max_dest_size, opj_codestream_info_t *p_cstr_info ); static OPJ_BOOL opj_tcd_rate_allocate_encode( opj_tcd_t *p_tcd, OPJ_BYTE * p_dest_data, OPJ_UINT32 p_max_dest_size, opj_codestream_info_t *p_cstr_info ); /* ----------------------------------------------------------------------- */ /** Create a new TCD handle */ opj_tcd_t* opj_tcd_create(OPJ_BOOL p_is_decoder) { opj_tcd_t *l_tcd = 00; /* create the tcd structure */ l_tcd = (opj_tcd_t*) opj_calloc(1,sizeof(opj_tcd_t)); if (!l_tcd) { return 00; } l_tcd->m_is_decoder = p_is_decoder ? 1 : 0; l_tcd->tcd_image = (opj_tcd_image_t*)opj_calloc(1,sizeof(opj_tcd_image_t)); if (!l_tcd->tcd_image) { opj_free(l_tcd); return 00; } return l_tcd; } /* ----------------------------------------------------------------------- */ void opj_tcd_rateallocate_fixed(opj_tcd_t *tcd) { OPJ_UINT32 layno; for (layno = 0; layno < tcd->tcp->numlayers; layno++) { opj_tcd_makelayer_fixed(tcd, layno, 1); } } void opj_tcd_makelayer( opj_tcd_t *tcd, OPJ_UINT32 layno, OPJ_FLOAT64 thresh, OPJ_UINT32 final) { OPJ_UINT32 compno, resno, bandno, precno, cblkno; OPJ_UINT32 passno; opj_tcd_tile_t *tcd_tile = tcd->tcd_image->tiles; tcd_tile->distolayer[layno] = 0; /* fixed_quality */ for (compno = 0; compno < tcd_tile->numcomps; compno++) { opj_tcd_tilecomp_t *tilec = &tcd_tile->comps[compno]; for (resno = 0; resno < tilec->numresolutions; resno++) { opj_tcd_resolution_t *res = &tilec->resolutions[resno]; for (bandno = 0; bandno < res->numbands; bandno++) { opj_tcd_band_t *band = &res->bands[bandno]; for (precno = 0; precno < res->pw * res->ph; precno++) { opj_tcd_precinct_t *prc = &band->precincts[precno]; for (cblkno = 0; cblkno < prc->cw * prc->ch; cblkno++) { opj_tcd_cblk_enc_t *cblk = &prc->cblks.enc[cblkno]; opj_tcd_layer_t *layer = &cblk->layers[layno]; OPJ_UINT32 n; if (layno == 0) { cblk->numpassesinlayers = 0; } n = cblk->numpassesinlayers; for (passno = cblk->numpassesinlayers; passno < cblk->totalpasses; passno++) { OPJ_UINT32 dr; OPJ_FLOAT64 dd; opj_tcd_pass_t *pass = &cblk->passes[passno]; if (n == 0) { dr = pass->rate; dd = pass->distortiondec; } else { dr = pass->rate - cblk->passes[n - 1].rate; dd = pass->distortiondec - cblk->passes[n - 1].distortiondec; } if (!dr) { if (dd != 0) n = passno + 1; continue; } if (dd / dr >= thresh) n = passno + 1; } layer->numpasses = n - cblk->numpassesinlayers; if (!layer->numpasses) { layer->disto = 0; continue; } if (cblk->numpassesinlayers == 0) { layer->len = cblk->passes[n - 1].rate; layer->data = cblk->data; layer->disto = cblk->passes[n - 1].distortiondec; } else { layer->len = cblk->passes[n - 1].rate - cblk->passes[cblk->numpassesinlayers - 1].rate; layer->data = cblk->data + cblk->passes[cblk->numpassesinlayers - 1].rate; layer->disto = cblk->passes[n - 1].distortiondec - cblk->passes[cblk->numpassesinlayers - 1].distortiondec; } tcd_tile->distolayer[layno] += layer->disto; /* fixed_quality */ if (final) cblk->numpassesinlayers = n; } } } } } } void opj_tcd_makelayer_fixed(opj_tcd_t *tcd, OPJ_UINT32 layno, OPJ_UINT32 final) { OPJ_UINT32 compno, resno, bandno, precno, cblkno; OPJ_INT32 value; /*, matrice[tcd_tcp->numlayers][tcd_tile->comps[0].numresolutions][3]; */ OPJ_INT32 matrice[10][10][3]; OPJ_UINT32 i, j, k; opj_cp_t *cp = tcd->cp; opj_tcd_tile_t *tcd_tile = tcd->tcd_image->tiles; opj_tcp_t *tcd_tcp = tcd->tcp; for (compno = 0; compno < tcd_tile->numcomps; compno++) { opj_tcd_tilecomp_t *tilec = &tcd_tile->comps[compno]; for (i = 0; i < tcd_tcp->numlayers; i++) { for (j = 0; j < tilec->numresolutions; j++) { for (k = 0; k < 3; k++) { matrice[i][j][k] = (OPJ_INT32) ((OPJ_FLOAT32)cp->m_specific_param.m_enc.m_matrice[i * tilec->numresolutions * 3 + j * 3 + k] * (OPJ_FLOAT32) (tcd->image->comps[compno].prec / 16.0)); } } } for (resno = 0; resno < tilec->numresolutions; resno++) { opj_tcd_resolution_t *res = &tilec->resolutions[resno]; for (bandno = 0; bandno < res->numbands; bandno++) { opj_tcd_band_t *band = &res->bands[bandno]; for (precno = 0; precno < res->pw * res->ph; precno++) { opj_tcd_precinct_t *prc = &band->precincts[precno]; for (cblkno = 0; cblkno < prc->cw * prc->ch; cblkno++) { opj_tcd_cblk_enc_t *cblk = &prc->cblks.enc[cblkno]; opj_tcd_layer_t *layer = &cblk->layers[layno]; OPJ_UINT32 n; OPJ_INT32 imsb = (OPJ_INT32)(tcd->image->comps[compno].prec - cblk->numbps); /* number of bit-plan equal to zero */ /* Correction of the matrix of coefficient to include the IMSB information */ if (layno == 0) { value = matrice[layno][resno][bandno]; if (imsb >= value) { value = 0; } else { value -= imsb; } } else { value = matrice[layno][resno][bandno] - matrice[layno - 1][resno][bandno]; if (imsb >= matrice[layno - 1][resno][bandno]) { value -= (imsb - matrice[layno - 1][resno][bandno]); if (value < 0) { value = 0; } } } if (layno == 0) { cblk->numpassesinlayers = 0; } n = cblk->numpassesinlayers; if (cblk->numpassesinlayers == 0) { if (value != 0) { n = 3 * (OPJ_UINT32)value - 2 + cblk->numpassesinlayers; } else { n = cblk->numpassesinlayers; } } else { n = 3 * (OPJ_UINT32)value + cblk->numpassesinlayers; } layer->numpasses = n - cblk->numpassesinlayers; if (!layer->numpasses) continue; if (cblk->numpassesinlayers == 0) { layer->len = cblk->passes[n - 1].rate; layer->data = cblk->data; } else { layer->len = cblk->passes[n - 1].rate - cblk->passes[cblk->numpassesinlayers - 1].rate; layer->data = cblk->data + cblk->passes[cblk->numpassesinlayers - 1].rate; } if (final) cblk->numpassesinlayers = n; } } } } } } OPJ_BOOL opj_tcd_rateallocate( opj_tcd_t *tcd, OPJ_BYTE *dest, OPJ_UINT32 * p_data_written, OPJ_UINT32 len, opj_codestream_info_t *cstr_info) { OPJ_UINT32 compno, resno, bandno, precno, cblkno, layno; OPJ_UINT32 passno; OPJ_FLOAT64 min, max; OPJ_FLOAT64 cumdisto[100]; /* fixed_quality */ const OPJ_FLOAT64 K = 1; /* 1.1; fixed_quality */ OPJ_FLOAT64 maxSE = 0; opj_cp_t *cp = tcd->cp; opj_tcd_tile_t *tcd_tile = tcd->tcd_image->tiles; opj_tcp_t *tcd_tcp = tcd->tcp; min = DBL_MAX; max = 0; tcd_tile->numpix = 0; /* fixed_quality */ for (compno = 0; compno < tcd_tile->numcomps; compno++) { opj_tcd_tilecomp_t *tilec = &tcd_tile->comps[compno]; tilec->numpix = 0; for (resno = 0; resno < tilec->numresolutions; resno++) { opj_tcd_resolution_t *res = &tilec->resolutions[resno]; for (bandno = 0; bandno < res->numbands; bandno++) { opj_tcd_band_t *band = &res->bands[bandno]; for (precno = 0; precno < res->pw * res->ph; precno++) { opj_tcd_precinct_t *prc = &band->precincts[precno]; for (cblkno = 0; cblkno < prc->cw * prc->ch; cblkno++) { opj_tcd_cblk_enc_t *cblk = &prc->cblks.enc[cblkno]; for (passno = 0; passno < cblk->totalpasses; passno++) { opj_tcd_pass_t *pass = &cblk->passes[passno]; OPJ_INT32 dr; OPJ_FLOAT64 dd, rdslope; if (passno == 0) { dr = (OPJ_INT32)pass->rate; dd = pass->distortiondec; } else { dr = (OPJ_INT32)(pass->rate - cblk->passes[passno - 1].rate); dd = pass->distortiondec - cblk->passes[passno - 1].distortiondec; } if (dr == 0) { continue; } rdslope = dd / dr; if (rdslope < min) { min = rdslope; } if (rdslope > max) { max = rdslope; } } /* passno */ /* fixed_quality */ tcd_tile->numpix += ((cblk->x1 - cblk->x0) * (cblk->y1 - cblk->y0)); tilec->numpix += ((cblk->x1 - cblk->x0) * (cblk->y1 - cblk->y0)); } /* cbklno */ } /* precno */ } /* bandno */ } /* resno */ maxSE += (((OPJ_FLOAT64)(1 << tcd->image->comps[compno].prec) - 1.0) * ((OPJ_FLOAT64)(1 << tcd->image->comps[compno].prec) -1.0)) * ((OPJ_FLOAT64)(tilec->numpix)); } /* compno */ /* index file */ if(cstr_info) { opj_tile_info_t *tile_info = &cstr_info->tile[tcd->tcd_tileno]; tile_info->numpix = tcd_tile->numpix; tile_info->distotile = tcd_tile->distotile; tile_info->thresh = (OPJ_FLOAT64 *) opj_malloc(tcd_tcp->numlayers * sizeof(OPJ_FLOAT64)); if (!tile_info->thresh) { /* FIXME event manager error callback */ return OPJ_FALSE; } } for (layno = 0; layno < tcd_tcp->numlayers; layno++) { OPJ_FLOAT64 lo = min; OPJ_FLOAT64 hi = max; OPJ_BOOL success = OPJ_FALSE; OPJ_UINT32 maxlen = tcd_tcp->rates[layno] ? opj_uint_min(((OPJ_UINT32) ceil(tcd_tcp->rates[layno])), len) : len; OPJ_FLOAT64 goodthresh = 0; OPJ_FLOAT64 stable_thresh = 0; OPJ_UINT32 i; OPJ_FLOAT64 distotarget; /* fixed_quality */ /* fixed_quality */ distotarget = tcd_tile->distotile - ((K * maxSE) / pow((OPJ_FLOAT32)10, tcd_tcp->distoratio[layno] / 10)); /* Don't try to find an optimal threshold but rather take everything not included yet, if -r xx,yy,zz,0 (disto_alloc == 1 and rates == 0) -q xx,yy,zz,0 (fixed_quality == 1 and distoratio == 0) ==> possible to have some lossy layers and the last layer for sure lossless */ if ( ((cp->m_specific_param.m_enc.m_disto_alloc==1) && (tcd_tcp->rates[layno]>0)) || ((cp->m_specific_param.m_enc.m_fixed_quality==1) && (tcd_tcp->distoratio[layno]>0))) { opj_t2_t*t2 = opj_t2_create(tcd->image, cp); OPJ_FLOAT64 thresh = 0; if (t2 == 00) { return OPJ_FALSE; } for (i = 0; i < 128; ++i) { OPJ_FLOAT64 distoachieved = 0; /* fixed_quality */ thresh = (lo + hi) / 2; opj_tcd_makelayer(tcd, layno, thresh, 0); if (cp->m_specific_param.m_enc.m_fixed_quality) { /* fixed_quality */ if(OPJ_IS_CINEMA(cp->rsiz)){ if (! opj_t2_encode_packets(t2,tcd->tcd_tileno, tcd_tile, layno + 1, dest, p_data_written, maxlen, cstr_info,tcd->cur_tp_num,tcd->tp_pos,tcd->cur_pino,THRESH_CALC)) { lo = thresh; continue; } else { distoachieved = layno == 0 ? tcd_tile->distolayer[0] : cumdisto[layno - 1] + tcd_tile->distolayer[layno]; if (distoachieved < distotarget) { hi=thresh; stable_thresh = thresh; continue; }else{ lo=thresh; } } }else{ distoachieved = (layno == 0) ? tcd_tile->distolayer[0] : (cumdisto[layno - 1] + tcd_tile->distolayer[layno]); if (distoachieved < distotarget) { hi = thresh; stable_thresh = thresh; continue; } lo = thresh; } } else { if (! opj_t2_encode_packets(t2, tcd->tcd_tileno, tcd_tile, layno + 1, dest,p_data_written, maxlen, cstr_info,tcd->cur_tp_num,tcd->tp_pos,tcd->cur_pino,THRESH_CALC)) { /* TODO: what to do with l ??? seek / tell ??? */ /* opj_event_msg(tcd->cinfo, EVT_INFO, "rate alloc: len=%d, max=%d\n", l, maxlen); */ lo = thresh; continue; } hi = thresh; stable_thresh = thresh; } } success = OPJ_TRUE; goodthresh = stable_thresh == 0? thresh : stable_thresh; opj_t2_destroy(t2); } else { success = OPJ_TRUE; goodthresh = min; } if (!success) { return OPJ_FALSE; } if(cstr_info) { /* Threshold for Marcela Index */ cstr_info->tile[tcd->tcd_tileno].thresh[layno] = goodthresh; } opj_tcd_makelayer(tcd, layno, goodthresh, 1); /* fixed_quality */ cumdisto[layno] = (layno == 0) ? tcd_tile->distolayer[0] : (cumdisto[layno - 1] + tcd_tile->distolayer[layno]); } return OPJ_TRUE; } OPJ_BOOL opj_tcd_init( opj_tcd_t *p_tcd, opj_image_t * p_image, opj_cp_t * p_cp ) { p_tcd->image = p_image; p_tcd->cp = p_cp; p_tcd->tcd_image->tiles = (opj_tcd_tile_t *) opj_calloc(1,sizeof(opj_tcd_tile_t)); if (! p_tcd->tcd_image->tiles) { return OPJ_FALSE; } p_tcd->tcd_image->tiles->comps = (opj_tcd_tilecomp_t *) opj_calloc(p_image->numcomps,sizeof(opj_tcd_tilecomp_t)); if (! p_tcd->tcd_image->tiles->comps ) { return OPJ_FALSE; } p_tcd->tcd_image->tiles->numcomps = p_image->numcomps; p_tcd->tp_pos = p_cp->m_specific_param.m_enc.m_tp_pos; return OPJ_TRUE; } /** Destroy a previously created TCD handle */ void opj_tcd_destroy(opj_tcd_t *tcd) { if (tcd) { opj_tcd_free_tile(tcd); if (tcd->tcd_image) { opj_free(tcd->tcd_image); tcd->tcd_image = 00; } opj_free(tcd); } } OPJ_BOOL opj_alloc_tile_component_data(opj_tcd_tilecomp_t *l_tilec) { if ((l_tilec->data == 00) || ((l_tilec->data_size_needed > l_tilec->data_size) && (l_tilec->ownsData == OPJ_FALSE))) { l_tilec->data = (OPJ_INT32 *) opj_malloc(l_tilec->data_size_needed); if (! l_tilec->data ) { return OPJ_FALSE; } /*fprintf(stderr, "tAllocate data of tilec (int): %d x OPJ_UINT32n",l_data_size);*/ l_tilec->data_size = l_tilec->data_size_needed; l_tilec->ownsData = OPJ_TRUE; } else if (l_tilec->data_size_needed > l_tilec->data_size) { OPJ_INT32 * new_data = (OPJ_INT32 *) opj_realloc(l_tilec->data, l_tilec->data_size_needed); /* opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to handle tile datan"); */ /* fprintf(stderr, "Not enough memory to handle tile data"); */ if (! new_data) { opj_free(l_tilec->data); l_tilec->data = NULL; l_tilec->data_size = 0; l_tilec->data_size_needed = 0; l_tilec->ownsData = OPJ_FALSE; return OPJ_FALSE; } l_tilec->data = new_data; /*fprintf(stderr, "tReallocate data of tilec (int): from %d to %d x OPJ_UINT32n", l_tilec->data_size, l_data_size);*/ l_tilec->data_size = l_tilec->data_size_needed; l_tilec->ownsData = OPJ_TRUE; } return OPJ_TRUE; } /* ----------------------------------------------------------------------- */ static INLINE OPJ_BOOL opj_tcd_init_tile(opj_tcd_t *p_tcd, OPJ_UINT32 p_tile_no, OPJ_BOOL isEncoder, OPJ_FLOAT32 fraction, OPJ_SIZE_T sizeof_block) { OPJ_UINT32 (*l_gain_ptr)(OPJ_UINT32) = 00; OPJ_UINT32 compno, resno, bandno, precno, cblkno; opj_tcp_t * l_tcp = 00; opj_cp_t * l_cp = 00; opj_tcd_tile_t * l_tile = 00; opj_tccp_t *l_tccp = 00; opj_tcd_tilecomp_t *l_tilec = 00; opj_image_comp_t * l_image_comp = 00; opj_tcd_resolution_t *l_res = 00; opj_tcd_band_t *l_band = 00; opj_stepsize_t * l_step_size = 00; opj_tcd_precinct_t *l_current_precinct = 00; opj_image_t *l_image = 00; OPJ_UINT32 p,q; OPJ_UINT32 l_level_no; OPJ_UINT32 l_pdx, l_pdy; OPJ_UINT32 l_gain; OPJ_INT32 l_x0b, l_y0b; /* extent of precincts , top left, bottom right**/ OPJ_INT32 l_tl_prc_x_start, l_tl_prc_y_start, l_br_prc_x_end, l_br_prc_y_end; /* number of precinct for a resolution */ OPJ_UINT32 l_nb_precincts; /* room needed to store l_nb_precinct precinct for a resolution */ OPJ_UINT32 l_nb_precinct_size; /* number of code blocks for a precinct*/ OPJ_UINT32 l_nb_code_blocks; /* room needed to store l_nb_code_blocks code blocks for a precinct*/ OPJ_UINT32 l_nb_code_blocks_size; /* size of data for a tile */ OPJ_UINT32 l_data_size; l_cp = p_tcd->cp; l_tcp = &(l_cp->tcps[p_tile_no]); l_tile = p_tcd->tcd_image->tiles; l_tccp = l_tcp->tccps; l_tilec = l_tile->comps; l_image = p_tcd->image; l_image_comp = p_tcd->image->comps; p = p_tile_no % l_cp->tw; /* tile coordinates */ q = p_tile_no / l_cp->tw; /*fprintf(stderr, "Tile coordinate = %d,%d\n", p, q);*/ /* 4 borders of the tile rescale on the image if necessary */ l_tile->x0 = (OPJ_INT32)opj_uint_max(l_cp->tx0 + p * l_cp->tdx, l_image->x0); l_tile->y0 = (OPJ_INT32)opj_uint_max(l_cp->ty0 + q * l_cp->tdy, l_image->y0); l_tile->x1 = (OPJ_INT32)opj_uint_min(l_cp->tx0 + (p + 1) * l_cp->tdx, l_image->x1); l_tile->y1 = (OPJ_INT32)opj_uint_min(l_cp->ty0 + (q + 1) * l_cp->tdy, l_image->y1); /* testcase 1888.pdf.asan.35.988 */ if (l_tccp->numresolutions == 0) { fprintf(stderr, "tiles require at least one resolution\n"); return OPJ_FALSE; } /*fprintf(stderr, "Tile border = %d,%d,%d,%d\n", l_tile->x0, l_tile->y0,l_tile->x1,l_tile->y1);*/ /*tile->numcomps = image->numcomps; */ for (compno = 0; compno < l_tile->numcomps; ++compno) { /*fprintf(stderr, "compno = %d/%d\n", compno, l_tile->numcomps);*/ l_image_comp->resno_decoded = 0; /* border of each l_tile component (global) */ l_tilec->x0 = opj_int_ceildiv(l_tile->x0, (OPJ_INT32)l_image_comp->dx); l_tilec->y0 = opj_int_ceildiv(l_tile->y0, (OPJ_INT32)l_image_comp->dy); l_tilec->x1 = opj_int_ceildiv(l_tile->x1, (OPJ_INT32)l_image_comp->dx); l_tilec->y1 = opj_int_ceildiv(l_tile->y1, (OPJ_INT32)l_image_comp->dy); /*fprintf(stderr, "\tTile compo border = %d,%d,%d,%d\n", l_tilec->x0, l_tilec->y0,l_tilec->x1,l_tilec->y1);*/ /* compute l_data_size with overflow check */ l_data_size = (OPJ_UINT32)(l_tilec->x1 - l_tilec->x0); if ((((OPJ_UINT32)-1) / l_data_size) < (OPJ_UINT32)(l_tilec->y1 - l_tilec->y0)) { /* TODO event */ return OPJ_FALSE; } l_data_size = l_data_size * (OPJ_UINT32)(l_tilec->y1 - l_tilec->y0); if ((((OPJ_UINT32)-1) / (OPJ_UINT32)sizeof(OPJ_UINT32)) < l_data_size) { /* TODO event */ return OPJ_FALSE; } l_data_size = l_data_size * (OPJ_UINT32)sizeof(OPJ_UINT32); l_tilec->numresolutions = l_tccp->numresolutions; if (l_tccp->numresolutions < l_cp->m_specific_param.m_dec.m_reduce) { l_tilec->minimum_num_resolutions = 1; } else { l_tilec->minimum_num_resolutions = l_tccp->numresolutions - l_cp->m_specific_param.m_dec.m_reduce; } l_tilec->data_size_needed = l_data_size; if (p_tcd->m_is_decoder && !opj_alloc_tile_component_data(l_tilec)) { return OPJ_FALSE; } l_data_size = l_tilec->numresolutions * (OPJ_UINT32)sizeof(opj_tcd_resolution_t); if (l_tilec->resolutions == 00) { l_tilec->resolutions = (opj_tcd_resolution_t *) opj_malloc(l_data_size); if (! l_tilec->resolutions ) { return OPJ_FALSE; } /*fprintf(stderr, "\tAllocate resolutions of tilec (opj_tcd_resolution_t): %d\n",l_data_size);*/ l_tilec->resolutions_size = l_data_size; memset(l_tilec->resolutions,0,l_data_size); } else if (l_data_size > l_tilec->resolutions_size) { opj_tcd_resolution_t* new_resolutions = (opj_tcd_resolution_t *) opj_realloc(l_tilec->resolutions, l_data_size); if (! new_resolutions) { /* opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to tile resolutions\n"); */ fprintf(stderr, "Not enough memory to tile resolutions\n"); opj_free(l_tilec->resolutions); l_tilec->resolutions = NULL; l_tilec->resolutions_size = 0; return OPJ_FALSE; } l_tilec->resolutions = new_resolutions; /*fprintf(stderr, "\tReallocate data of tilec (int): from %d to %d x OPJ_UINT32\n", l_tilec->resolutions_size, l_data_size);*/ memset(((OPJ_BYTE*) l_tilec->resolutions)+l_tilec->resolutions_size,0,l_data_size - l_tilec->resolutions_size); l_tilec->resolutions_size = l_data_size; } l_level_no = l_tilec->numresolutions - 1; l_res = l_tilec->resolutions; l_step_size = l_tccp->stepsizes; if (l_tccp->qmfbid == 0) { l_gain_ptr = &opj_dwt_getgain_real; } else { l_gain_ptr = &opj_dwt_getgain; } /*fprintf(stderr, "\tlevel_no=%d\n",l_level_no);*/ for (resno = 0; resno < l_tilec->numresolutions; ++resno) { /*fprintf(stderr, "\t\tresno = %d/%d\n", resno, l_tilec->numresolutions);*/ OPJ_INT32 tlcbgxstart, tlcbgystart /*, brcbgxend, brcbgyend*/; OPJ_UINT32 cbgwidthexpn, cbgheightexpn; OPJ_UINT32 cblkwidthexpn, cblkheightexpn; /* border for each resolution level (global) */ l_res->x0 = opj_int_ceildivpow2(l_tilec->x0, (OPJ_INT32)l_level_no); l_res->y0 = opj_int_ceildivpow2(l_tilec->y0, (OPJ_INT32)l_level_no); l_res->x1 = opj_int_ceildivpow2(l_tilec->x1, (OPJ_INT32)l_level_no); l_res->y1 = opj_int_ceildivpow2(l_tilec->y1, (OPJ_INT32)l_level_no); /*fprintf(stderr, "\t\t\tres_x0= %d, res_y0 =%d, res_x1=%d, res_y1=%d\n", l_res->x0, l_res->y0, l_res->x1, l_res->y1);*/ /* p. 35, table A-23, ISO/IEC FDIS154444-1 : 2000 (18 august 2000) */ l_pdx = l_tccp->prcw[resno]; l_pdy = l_tccp->prch[resno]; /*fprintf(stderr, "\t\t\tpdx=%d, pdy=%d\n", l_pdx, l_pdy);*/ /* p. 64, B.6, ISO/IEC FDIS15444-1 : 2000 (18 august 2000) */ l_tl_prc_x_start = opj_int_floordivpow2(l_res->x0, (OPJ_INT32)l_pdx) << l_pdx; l_tl_prc_y_start = opj_int_floordivpow2(l_res->y0, (OPJ_INT32)l_pdy) << l_pdy; l_br_prc_x_end = opj_int_ceildivpow2(l_res->x1, (OPJ_INT32)l_pdx) << l_pdx; l_br_prc_y_end = opj_int_ceildivpow2(l_res->y1, (OPJ_INT32)l_pdy) << l_pdy; /*fprintf(stderr, "\t\t\tprc_x_start=%d, prc_y_start=%d, br_prc_x_end=%d, br_prc_y_end=%d \n", l_tl_prc_x_start, l_tl_prc_y_start, l_br_prc_x_end ,l_br_prc_y_end );*/ l_res->pw = (l_res->x0 == l_res->x1) ? 0 : (OPJ_UINT32)((l_br_prc_x_end - l_tl_prc_x_start) >> l_pdx); l_res->ph = (l_res->y0 == l_res->y1) ? 0 : (OPJ_UINT32)((l_br_prc_y_end - l_tl_prc_y_start) >> l_pdy); /*fprintf(stderr, "\t\t\tres_pw=%d, res_ph=%d\n", l_res->pw, l_res->ph );*/ l_nb_precincts = l_res->pw * l_res->ph; l_nb_precinct_size = l_nb_precincts * (OPJ_UINT32)sizeof(opj_tcd_precinct_t); if (resno == 0) { tlcbgxstart = l_tl_prc_x_start; tlcbgystart = l_tl_prc_y_start; /*brcbgxend = l_br_prc_x_end;*/ /* brcbgyend = l_br_prc_y_end;*/ cbgwidthexpn = l_pdx; cbgheightexpn = l_pdy; l_res->numbands = 1; } else { tlcbgxstart = opj_int_ceildivpow2(l_tl_prc_x_start, 1); tlcbgystart = opj_int_ceildivpow2(l_tl_prc_y_start, 1); /*brcbgxend = opj_int_ceildivpow2(l_br_prc_x_end, 1);*/ /*brcbgyend = opj_int_ceildivpow2(l_br_prc_y_end, 1);*/ cbgwidthexpn = l_pdx - 1; cbgheightexpn = l_pdy - 1; l_res->numbands = 3; } cblkwidthexpn = opj_uint_min(l_tccp->cblkw, cbgwidthexpn); cblkheightexpn = opj_uint_min(l_tccp->cblkh, cbgheightexpn); l_band = l_res->bands; for (bandno = 0; bandno < l_res->numbands; ++bandno) { OPJ_INT32 numbps; /*fprintf(stderr, "\t\t\tband_no=%d/%d\n", bandno, l_res->numbands );*/ if (resno == 0) { l_band->bandno = 0 ; l_band->x0 = opj_int_ceildivpow2(l_tilec->x0, (OPJ_INT32)l_level_no); l_band->y0 = opj_int_ceildivpow2(l_tilec->y0, (OPJ_INT32)l_level_no); l_band->x1 = opj_int_ceildivpow2(l_tilec->x1, (OPJ_INT32)l_level_no); l_band->y1 = opj_int_ceildivpow2(l_tilec->y1, (OPJ_INT32)l_level_no); } else { l_band->bandno = bandno + 1; /* x0b = 1 if bandno = 1 or 3 */ l_x0b = l_band->bandno&1; /* y0b = 1 if bandno = 2 or 3 */ l_y0b = (OPJ_INT32)((l_band->bandno)>>1); /* l_band border (global) */ l_band->x0 = opj_int_ceildivpow2(l_tilec->x0 - (1 << l_level_no) * l_x0b, (OPJ_INT32)(l_level_no + 1)); l_band->y0 = opj_int_ceildivpow2(l_tilec->y0 - (1 << l_level_no) * l_y0b, (OPJ_INT32)(l_level_no + 1)); l_band->x1 = opj_int_ceildivpow2(l_tilec->x1 - (1 << l_level_no) * l_x0b, (OPJ_INT32)(l_level_no + 1)); l_band->y1 = opj_int_ceildivpow2(l_tilec->y1 - (1 << l_level_no) * l_y0b, (OPJ_INT32)(l_level_no + 1)); } /** avoid an if with storing function pointer */ l_gain = (*l_gain_ptr) (l_band->bandno); numbps = (OPJ_INT32)(l_image_comp->prec + l_gain); l_band->stepsize = (OPJ_FLOAT32)(((1.0 + l_step_size->mant / 2048.0) * pow(2.0, (OPJ_INT32) (numbps - l_step_size->expn)))) * fraction; l_band->numbps = l_step_size->expn + (OPJ_INT32)l_tccp->numgbits - 1; /* WHY -1 ? */ if (! l_band->precincts) { l_band->precincts = (opj_tcd_precinct_t *) opj_malloc( /*3 * */ l_nb_precinct_size); if (! l_band->precincts) { return OPJ_FALSE; } /*fprintf(stderr, "\t\t\t\tAllocate precincts of a band (opj_tcd_precinct_t): %d\n",l_nb_precinct_size); */ memset(l_band->precincts,0,l_nb_precinct_size); l_band->precincts_data_size = l_nb_precinct_size; } else if (l_band->precincts_data_size < l_nb_precinct_size) { opj_tcd_precinct_t * new_precincts = (opj_tcd_precinct_t *) opj_realloc(l_band->precincts,/*3 * */ l_nb_precinct_size); if (! new_precincts) { /* opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to handle band precints\n"); */ fprintf(stderr, "Not enough memory to handle band precints\n"); opj_free(l_band->precincts); l_band->precincts = NULL; l_band->precincts_data_size = 0; return OPJ_FALSE; } l_band->precincts = new_precincts; /*fprintf(stderr, "\t\t\t\tReallocate precincts of a band (opj_tcd_precinct_t): from %d to %d\n",l_band->precincts_data_size, l_nb_precinct_size);*/ memset(((OPJ_BYTE *) l_band->precincts) + l_band->precincts_data_size,0,l_nb_precinct_size - l_band->precincts_data_size); l_band->precincts_data_size = l_nb_precinct_size; } l_current_precinct = l_band->precincts; for (precno = 0; precno < l_nb_precincts; ++precno) { OPJ_INT32 tlcblkxstart, tlcblkystart, brcblkxend, brcblkyend; OPJ_INT32 cbgxstart = tlcbgxstart + (OPJ_INT32)(precno % l_res->pw) * (1 << cbgwidthexpn); OPJ_INT32 cbgystart = tlcbgystart + (OPJ_INT32)(precno / l_res->pw) * (1 << cbgheightexpn); OPJ_INT32 cbgxend = cbgxstart + (1 << cbgwidthexpn); OPJ_INT32 cbgyend = cbgystart + (1 << cbgheightexpn); /*fprintf(stderr, "\t precno=%d; bandno=%d, resno=%d; compno=%d\n", precno, bandno , resno, compno);*/ /*fprintf(stderr, "\t tlcbgxstart(=%d) + (precno(=%d) percent res->pw(=%d)) * (1 << cbgwidthexpn(=%d)) \n",tlcbgxstart,precno,l_res->pw,cbgwidthexpn);*/ /* precinct size (global) */ /*fprintf(stderr, "\t cbgxstart=%d, l_band->x0 = %d \n",cbgxstart, l_band->x0);*/ l_current_precinct->x0 = opj_int_max(cbgxstart, l_band->x0); l_current_precinct->y0 = opj_int_max(cbgystart, l_band->y0); l_current_precinct->x1 = opj_int_min(cbgxend, l_band->x1); l_current_precinct->y1 = opj_int_min(cbgyend, l_band->y1); /*fprintf(stderr, "\t prc_x0=%d; prc_y0=%d, prc_x1=%d; prc_y1=%d\n",l_current_precinct->x0, l_current_precinct->y0 ,l_current_precinct->x1, l_current_precinct->y1);*/ tlcblkxstart = opj_int_floordivpow2(l_current_precinct->x0, (OPJ_INT32)cblkwidthexpn) << cblkwidthexpn; /*fprintf(stderr, "\t tlcblkxstart =%d\n",tlcblkxstart );*/ tlcblkystart = opj_int_floordivpow2(l_current_precinct->y0, (OPJ_INT32)cblkheightexpn) << cblkheightexpn; /*fprintf(stderr, "\t tlcblkystart =%d\n",tlcblkystart );*/ brcblkxend = opj_int_ceildivpow2(l_current_precinct->x1, (OPJ_INT32)cblkwidthexpn) << cblkwidthexpn; /*fprintf(stderr, "\t brcblkxend =%d\n",brcblkxend );*/ brcblkyend = opj_int_ceildivpow2(l_current_precinct->y1, (OPJ_INT32)cblkheightexpn) << cblkheightexpn; /*fprintf(stderr, "\t brcblkyend =%d\n",brcblkyend );*/ l_current_precinct->cw = (OPJ_UINT32)((brcblkxend - tlcblkxstart) >> cblkwidthexpn); l_current_precinct->ch = (OPJ_UINT32)((brcblkyend - tlcblkystart) >> cblkheightexpn); l_nb_code_blocks = l_current_precinct->cw * l_current_precinct->ch; /*fprintf(stderr, "\t\t\t\t precinct_cw = %d x recinct_ch = %d\n",l_current_precinct->cw, l_current_precinct->ch); */ l_nb_code_blocks_size = l_nb_code_blocks * (OPJ_UINT32)sizeof_block; if (! l_current_precinct->cblks.blocks) { l_current_precinct->cblks.blocks = opj_malloc(l_nb_code_blocks_size); if (! l_current_precinct->cblks.blocks ) { return OPJ_FALSE; } /*fprintf(stderr, "\t\t\t\tAllocate cblks of a precinct (opj_tcd_cblk_dec_t): %d\n",l_nb_code_blocks_size);*/ memset(l_current_precinct->cblks.blocks,0,l_nb_code_blocks_size); l_current_precinct->block_size = l_nb_code_blocks_size; } else if (l_nb_code_blocks_size > l_current_precinct->block_size) { void *new_blocks = opj_realloc(l_current_precinct->cblks.blocks, l_nb_code_blocks_size); if (! new_blocks) { opj_free(l_current_precinct->cblks.blocks); l_current_precinct->cblks.blocks = NULL; l_current_precinct->block_size = 0; /* opj_event_msg(p_manager, EVT_ERROR, "Not enough memory for current precinct codeblock element\n"); */ fprintf(stderr, "Not enough memory for current precinct codeblock element\n"); return OPJ_FALSE; } l_current_precinct->cblks.blocks = new_blocks; /*fprintf(stderr, "\t\t\t\tReallocate cblks of a precinct (opj_tcd_cblk_dec_t): from %d to %d\n",l_current_precinct->block_size, l_nb_code_blocks_size); */ memset(((OPJ_BYTE *) l_current_precinct->cblks.blocks) + l_current_precinct->block_size ,0 ,l_nb_code_blocks_size - l_current_precinct->block_size); l_current_precinct->block_size = l_nb_code_blocks_size; } if (! l_current_precinct->incltree) { l_current_precinct->incltree = opj_tgt_create(l_current_precinct->cw, l_current_precinct->ch); } else{ l_current_precinct->incltree = opj_tgt_init(l_current_precinct->incltree, l_current_precinct->cw, l_current_precinct->ch); } if (! l_current_precinct->incltree) { fprintf(stderr, "WARNING: No incltree created.\n"); /*return OPJ_FALSE;*/ } if (! l_current_precinct->imsbtree) { l_current_precinct->imsbtree = opj_tgt_create( l_current_precinct->cw, l_current_precinct->ch); } else { l_current_precinct->imsbtree = opj_tgt_init( l_current_precinct->imsbtree, l_current_precinct->cw, l_current_precinct->ch); } if (! l_current_precinct->imsbtree) { fprintf(stderr, "WARNING: No imsbtree created.\n"); /*return OPJ_FALSE;*/ } for (cblkno = 0; cblkno < l_nb_code_blocks; ++cblkno) { OPJ_INT32 cblkxstart = tlcblkxstart + (OPJ_INT32)(cblkno % l_current_precinct->cw) * (1 << cblkwidthexpn); OPJ_INT32 cblkystart = tlcblkystart + (OPJ_INT32)(cblkno / l_current_precinct->cw) * (1 << cblkheightexpn); OPJ_INT32 cblkxend = cblkxstart + (1 << cblkwidthexpn); OPJ_INT32 cblkyend = cblkystart + (1 << cblkheightexpn); if (isEncoder) { opj_tcd_cblk_enc_t* l_code_block = l_current_precinct->cblks.enc + cblkno; if (! opj_tcd_code_block_enc_allocate(l_code_block)) { return OPJ_FALSE; } /* code-block size (global) */ l_code_block->x0 = opj_int_max(cblkxstart, l_current_precinct->x0); l_code_block->y0 = opj_int_max(cblkystart, l_current_precinct->y0); l_code_block->x1 = opj_int_min(cblkxend, l_current_precinct->x1); l_code_block->y1 = opj_int_min(cblkyend, l_current_precinct->y1); if (! opj_tcd_code_block_enc_allocate_data(l_code_block)) { return OPJ_FALSE; } } else { opj_tcd_cblk_dec_t* l_code_block = l_current_precinct->cblks.dec + cblkno; if (! opj_tcd_code_block_dec_allocate(l_code_block)) { return OPJ_FALSE; } /* code-block size (global) */ l_code_block->x0 = opj_int_max(cblkxstart, l_current_precinct->x0); l_code_block->y0 = opj_int_max(cblkystart, l_current_precinct->y0); l_code_block->x1 = opj_int_min(cblkxend, l_current_precinct->x1); l_code_block->y1 = opj_int_min(cblkyend, l_current_precinct->y1); } } ++l_current_precinct; } /* precno */ ++l_band; ++l_step_size; } /* bandno */ ++l_res; --l_level_no; } /* resno */ ++l_tccp; ++l_tilec; ++l_image_comp; } /* compno */ return OPJ_TRUE; } OPJ_BOOL opj_tcd_init_encode_tile (opj_tcd_t *p_tcd, OPJ_UINT32 p_tile_no) { return opj_tcd_init_tile(p_tcd, p_tile_no, OPJ_TRUE, 1.0F, sizeof(opj_tcd_cblk_enc_t)); } OPJ_BOOL opj_tcd_init_decode_tile (opj_tcd_t *p_tcd, OPJ_UINT32 p_tile_no) { return opj_tcd_init_tile(p_tcd, p_tile_no, OPJ_FALSE, 0.5F, sizeof(opj_tcd_cblk_dec_t)); } /** * Allocates memory for an encoding code block (but not data memory). */ static OPJ_BOOL opj_tcd_code_block_enc_allocate (opj_tcd_cblk_enc_t * p_code_block) { if (! p_code_block->layers) { /* no memset since data */ p_code_block->layers = (opj_tcd_layer_t*) opj_calloc(100, sizeof(opj_tcd_layer_t)); if (! p_code_block->layers) { return OPJ_FALSE; } } if (! p_code_block->passes) { p_code_block->passes = (opj_tcd_pass_t*) opj_calloc(100, sizeof(opj_tcd_pass_t)); if (! p_code_block->passes) { return OPJ_FALSE; } } return OPJ_TRUE; } /** * Allocates data memory for an encoding code block. */ static OPJ_BOOL opj_tcd_code_block_enc_allocate_data (opj_tcd_cblk_enc_t * p_code_block) { OPJ_UINT32 l_data_size; l_data_size = (OPJ_UINT32)((p_code_block->x1 - p_code_block->x0) * (p_code_block->y1 - p_code_block->y0) * (OPJ_INT32)sizeof(OPJ_UINT32)); if (l_data_size > p_code_block->data_size) { if (p_code_block->data) { opj_free(p_code_block->data - 1); /* again, why -1 */ } p_code_block->data = (OPJ_BYTE*) opj_malloc(l_data_size); if(! p_code_block->data) { p_code_block->data_size = 0U; return OPJ_FALSE; } p_code_block->data_size = l_data_size; p_code_block->data[0] = 0; p_code_block->data+=1; /*why +1 ?*/ } return OPJ_TRUE; } /** * Allocates memory for a decoding code block. */ static OPJ_BOOL opj_tcd_code_block_dec_allocate (opj_tcd_cblk_dec_t * p_code_block) { if (! p_code_block->data) { p_code_block->data = (OPJ_BYTE*) opj_malloc(OPJ_J2K_DEFAULT_CBLK_DATA_SIZE); if (! p_code_block->data) { return OPJ_FALSE; } p_code_block->data_max_size = OPJ_J2K_DEFAULT_CBLK_DATA_SIZE; /*fprintf(stderr, "Allocate 8192 elements of code_block->data\n");*/ p_code_block->segs = (opj_tcd_seg_t *) opj_calloc(OPJ_J2K_DEFAULT_NB_SEGS,sizeof(opj_tcd_seg_t)); if (! p_code_block->segs) { return OPJ_FALSE; } /*fprintf(stderr, "Allocate %d elements of code_block->data\n", OPJ_J2K_DEFAULT_NB_SEGS * sizeof(opj_tcd_seg_t));*/ p_code_block->m_current_max_segs = OPJ_J2K_DEFAULT_NB_SEGS; /*fprintf(stderr, "m_current_max_segs of code_block->data = %d\n", p_code_block->m_current_max_segs);*/ } else { /* sanitize */ OPJ_BYTE* l_data = p_code_block->data; OPJ_UINT32 l_data_max_size = p_code_block->data_max_size; opj_tcd_seg_t * l_segs = p_code_block->segs; OPJ_UINT32 l_current_max_segs = p_code_block->m_current_max_segs; memset(p_code_block, 0, sizeof(opj_tcd_cblk_dec_t)); p_code_block->data = l_data; p_code_block->data_max_size = l_data_max_size; p_code_block->segs = l_segs; p_code_block->m_current_max_segs = l_current_max_segs; } return OPJ_TRUE; } OPJ_UINT32 opj_tcd_get_decoded_tile_size ( opj_tcd_t *p_tcd ) { OPJ_UINT32 i; OPJ_UINT32 l_data_size = 0; opj_image_comp_t * l_img_comp = 00; opj_tcd_tilecomp_t * l_tile_comp = 00; opj_tcd_resolution_t * l_res = 00; OPJ_UINT32 l_size_comp, l_remaining; l_tile_comp = p_tcd->tcd_image->tiles->comps; l_img_comp = p_tcd->image->comps; for (i=0;i<p_tcd->image->numcomps;++i) { l_size_comp = l_img_comp->prec >> 3; /*(/ 8)*/ l_remaining = l_img_comp->prec & 7; /* (%8) */ if(l_remaining) { ++l_size_comp; } if (l_size_comp == 3) { l_size_comp = 4; } l_res = l_tile_comp->resolutions + l_tile_comp->minimum_num_resolutions - 1; l_data_size += l_size_comp * (OPJ_UINT32)((l_res->x1 - l_res->x0) * (l_res->y1 - l_res->y0)); ++l_img_comp; ++l_tile_comp; } return l_data_size; } OPJ_BOOL opj_tcd_encode_tile( opj_tcd_t *p_tcd, OPJ_UINT32 p_tile_no, OPJ_BYTE *p_dest, OPJ_UINT32 * p_data_written, OPJ_UINT32 p_max_length, opj_codestream_info_t *p_cstr_info) { if (p_tcd->cur_tp_num == 0) { p_tcd->tcd_tileno = p_tile_no; p_tcd->tcp = &p_tcd->cp->tcps[p_tile_no]; /* INDEX >> "Precinct_nb_X et Precinct_nb_Y" */ if(p_cstr_info) { OPJ_UINT32 l_num_packs = 0; OPJ_UINT32 i; opj_tcd_tilecomp_t *l_tilec_idx = &p_tcd->tcd_image->tiles->comps[0]; /* based on component 0 */ opj_tccp_t *l_tccp = p_tcd->tcp->tccps; /* based on component 0 */ for (i = 0; i < l_tilec_idx->numresolutions; i++) { opj_tcd_resolution_t *l_res_idx = &l_tilec_idx->resolutions[i]; p_cstr_info->tile[p_tile_no].pw[i] = (int)l_res_idx->pw; p_cstr_info->tile[p_tile_no].ph[i] = (int)l_res_idx->ph; l_num_packs += l_res_idx->pw * l_res_idx->ph; p_cstr_info->tile[p_tile_no].pdx[i] = (int)l_tccp->prcw[i]; p_cstr_info->tile[p_tile_no].pdy[i] = (int)l_tccp->prch[i]; } p_cstr_info->tile[p_tile_no].packet = (opj_packet_info_t*) opj_calloc((size_t)p_cstr_info->numcomps * (size_t)p_cstr_info->numlayers * l_num_packs, sizeof(opj_packet_info_t)); if (!p_cstr_info->tile[p_tile_no].packet) { /* FIXME event manager error callback */ return OPJ_FALSE; } } /* << INDEX */ /* FIXME _ProfStart(PGROUP_DC_SHIFT); */ /*---------------TILE-------------------*/ if (! opj_tcd_dc_level_shift_encode(p_tcd)) { return OPJ_FALSE; } /* FIXME _ProfStop(PGROUP_DC_SHIFT); */ /* FIXME _ProfStart(PGROUP_MCT); */ if (! opj_tcd_mct_encode(p_tcd)) { return OPJ_FALSE; } /* FIXME _ProfStop(PGROUP_MCT); */ /* FIXME _ProfStart(PGROUP_DWT); */ if (! opj_tcd_dwt_encode(p_tcd)) { return OPJ_FALSE; } /* FIXME _ProfStop(PGROUP_DWT); */ /* FIXME _ProfStart(PGROUP_T1); */ if (! opj_tcd_t1_encode(p_tcd)) { return OPJ_FALSE; } /* FIXME _ProfStop(PGROUP_T1); */ /* FIXME _ProfStart(PGROUP_RATE); */ if (! opj_tcd_rate_allocate_encode(p_tcd,p_dest,p_max_length,p_cstr_info)) { return OPJ_FALSE; } /* FIXME _ProfStop(PGROUP_RATE); */ } /*--------------TIER2------------------*/ /* INDEX */ if (p_cstr_info) { p_cstr_info->index_write = 1; } /* FIXME _ProfStart(PGROUP_T2); */ if (! opj_tcd_t2_encode(p_tcd,p_dest,p_data_written,p_max_length,p_cstr_info)) { return OPJ_FALSE; } /* FIXME _ProfStop(PGROUP_T2); */ /*---------------CLEAN-------------------*/ return OPJ_TRUE; } OPJ_BOOL opj_tcd_decode_tile( opj_tcd_t *p_tcd, OPJ_BYTE *p_src, OPJ_UINT32 p_max_length, OPJ_UINT32 p_tile_no, opj_codestream_index_t *p_cstr_index ) { OPJ_UINT32 l_data_read; p_tcd->tcd_tileno = p_tile_no; p_tcd->tcp = &(p_tcd->cp->tcps[p_tile_no]); #ifdef TODO_MSD /* FIXME */ /* INDEX >> */ if(p_cstr_info) { OPJ_UINT32 resno, compno, numprec = 0; for (compno = 0; compno < (OPJ_UINT32) p_cstr_info->numcomps; compno++) { opj_tcp_t *tcp = &p_tcd->cp->tcps[0]; opj_tccp_t *tccp = &tcp->tccps[compno]; opj_tcd_tilecomp_t *tilec_idx = &p_tcd->tcd_image->tiles->comps[compno]; for (resno = 0; resno < tilec_idx->numresolutions; resno++) { opj_tcd_resolution_t *res_idx = &tilec_idx->resolutions[resno]; p_cstr_info->tile[p_tile_no].pw[resno] = res_idx->pw; p_cstr_info->tile[p_tile_no].ph[resno] = res_idx->ph; numprec += res_idx->pw * res_idx->ph; p_cstr_info->tile[p_tile_no].pdx[resno] = tccp->prcw[resno]; p_cstr_info->tile[p_tile_no].pdy[resno] = tccp->prch[resno]; } } p_cstr_info->tile[p_tile_no].packet = (opj_packet_info_t *) opj_malloc(p_cstr_info->numlayers * numprec * sizeof(opj_packet_info_t)); p_cstr_info->packno = 0; } /* << INDEX */ #endif /*--------------TIER2------------------*/ /* FIXME _ProfStart(PGROUP_T2); */ l_data_read = 0; if (! opj_tcd_t2_decode(p_tcd, p_src, &l_data_read, p_max_length, p_cstr_index)) { return OPJ_FALSE; } /* FIXME _ProfStop(PGROUP_T2); */ /*------------------TIER1-----------------*/ /* FIXME _ProfStart(PGROUP_T1); */ if (! opj_tcd_t1_decode(p_tcd)) { return OPJ_FALSE; } /* FIXME _ProfStop(PGROUP_T1); */ /*----------------DWT---------------------*/ /* FIXME _ProfStart(PGROUP_DWT); */ if (! opj_tcd_dwt_decode(p_tcd)) { return OPJ_FALSE; } /* FIXME _ProfStop(PGROUP_DWT); */ /*----------------MCT-------------------*/ /* FIXME _ProfStart(PGROUP_MCT); */ if (! opj_tcd_mct_decode(p_tcd)) { return OPJ_FALSE; } /* FIXME _ProfStop(PGROUP_MCT); */ /* FIXME _ProfStart(PGROUP_DC_SHIFT); */ if (! opj_tcd_dc_level_shift_decode(p_tcd)) { return OPJ_FALSE; } /* FIXME _ProfStop(PGROUP_DC_SHIFT); */ /*---------------TILE-------------------*/ return OPJ_TRUE; } OPJ_BOOL opj_tcd_update_tile_data ( opj_tcd_t *p_tcd, OPJ_BYTE * p_dest, OPJ_UINT32 p_dest_length ) { OPJ_UINT32 i,j,k,l_data_size = 0; opj_image_comp_t * l_img_comp = 00; opj_tcd_tilecomp_t * l_tilec = 00; opj_tcd_resolution_t * l_res; OPJ_UINT32 l_size_comp, l_remaining; OPJ_UINT32 l_stride, l_width,l_height; l_data_size = opj_tcd_get_decoded_tile_size(p_tcd); if (l_data_size > p_dest_length) { return OPJ_FALSE; } l_tilec = p_tcd->tcd_image->tiles->comps; l_img_comp = p_tcd->image->comps; for (i=0;i<p_tcd->image->numcomps;++i) { l_size_comp = l_img_comp->prec >> 3; /*(/ 8)*/ l_remaining = l_img_comp->prec & 7; /* (%8) */ l_res = l_tilec->resolutions + l_img_comp->resno_decoded; l_width = (OPJ_UINT32)(l_res->x1 - l_res->x0); l_height = (OPJ_UINT32)(l_res->y1 - l_res->y0); l_stride = (OPJ_UINT32)(l_tilec->x1 - l_tilec->x0) - l_width; if (l_remaining) { ++l_size_comp; } if (l_size_comp == 3) { l_size_comp = 4; } switch (l_size_comp) { case 1: { OPJ_CHAR * l_dest_ptr = (OPJ_CHAR *) p_dest; const OPJ_INT32 * l_src_ptr = l_tilec->data; if (l_img_comp->sgnd) { for (j=0;j<l_height;++j) { for (k=0;k<l_width;++k) { *(l_dest_ptr++) = (OPJ_CHAR) (*(l_src_ptr++)); } l_src_ptr += l_stride; } } else { for (j=0;j<l_height;++j) { for (k=0;k<l_width;++k) { *(l_dest_ptr++) = (OPJ_CHAR) ((*(l_src_ptr++))&0xff); } l_src_ptr += l_stride; } } p_dest = (OPJ_BYTE *)l_dest_ptr; } break; case 2: { const OPJ_INT32 * l_src_ptr = l_tilec->data; OPJ_INT16 * l_dest_ptr = (OPJ_INT16 *) p_dest; if (l_img_comp->sgnd) { for (j=0;j<l_height;++j) { for (k=0;k<l_width;++k) { *(l_dest_ptr++) = (OPJ_INT16) (*(l_src_ptr++)); } l_src_ptr += l_stride; } } else { for (j=0;j<l_height;++j) { for (k=0;k<l_width;++k) { *(l_dest_ptr++) = (OPJ_INT16) ((*(l_src_ptr++))&0xffff); } l_src_ptr += l_stride; } } p_dest = (OPJ_BYTE*) l_dest_ptr; } break; case 4: { OPJ_INT32 * l_dest_ptr = (OPJ_INT32 *) p_dest; OPJ_INT32 * l_src_ptr = l_tilec->data; for (j=0;j<l_height;++j) { for (k=0;k<l_width;++k) { *(l_dest_ptr++) = (*(l_src_ptr++)); } l_src_ptr += l_stride; } p_dest = (OPJ_BYTE*) l_dest_ptr; } break; } ++l_img_comp; ++l_tilec; } return OPJ_TRUE; } void opj_tcd_free_tile(opj_tcd_t *p_tcd) { OPJ_UINT32 compno, resno, bandno, precno; opj_tcd_tile_t *l_tile = 00; opj_tcd_tilecomp_t *l_tile_comp = 00; opj_tcd_resolution_t *l_res = 00; opj_tcd_band_t *l_band = 00; opj_tcd_precinct_t *l_precinct = 00; OPJ_UINT32 l_nb_resolutions, l_nb_precincts; void (* l_tcd_code_block_deallocate) (opj_tcd_precinct_t *) = 00; if (! p_tcd) { return; } if (! p_tcd->tcd_image) { return; } if (p_tcd->m_is_decoder) { l_tcd_code_block_deallocate = opj_tcd_code_block_dec_deallocate; } else { l_tcd_code_block_deallocate = opj_tcd_code_block_enc_deallocate; } l_tile = p_tcd->tcd_image->tiles; if (! l_tile) { return; } l_tile_comp = l_tile->comps; for (compno = 0; compno < l_tile->numcomps; ++compno) { l_res = l_tile_comp->resolutions; if (l_res) { l_nb_resolutions = l_tile_comp->resolutions_size / sizeof(opj_tcd_resolution_t); for (resno = 0; resno < l_nb_resolutions; ++resno) { l_band = l_res->bands; for (bandno = 0; bandno < 3; ++bandno) { l_precinct = l_band->precincts; if (l_precinct) { l_nb_precincts = l_band->precincts_data_size / sizeof(opj_tcd_precinct_t); for (precno = 0; precno < l_nb_precincts; ++precno) { opj_tgt_destroy(l_precinct->incltree); l_precinct->incltree = 00; opj_tgt_destroy(l_precinct->imsbtree); l_precinct->imsbtree = 00; (*l_tcd_code_block_deallocate) (l_precinct); ++l_precinct; } opj_free(l_band->precincts); l_band->precincts = 00; } ++l_band; } /* for (resno */ ++l_res; } opj_free(l_tile_comp->resolutions); l_tile_comp->resolutions = 00; } if (l_tile_comp->ownsData && l_tile_comp->data) { opj_free(l_tile_comp->data); l_tile_comp->data = 00; l_tile_comp->ownsData = 0; l_tile_comp->data_size = 0; l_tile_comp->data_size_needed = 0; } ++l_tile_comp; } opj_free(l_tile->comps); l_tile->comps = 00; opj_free(p_tcd->tcd_image->tiles); p_tcd->tcd_image->tiles = 00; } OPJ_BOOL opj_tcd_t2_decode (opj_tcd_t *p_tcd, OPJ_BYTE * p_src_data, OPJ_UINT32 * p_data_read, OPJ_UINT32 p_max_src_size, opj_codestream_index_t *p_cstr_index ) { opj_t2_t * l_t2; l_t2 = opj_t2_create(p_tcd->image, p_tcd->cp); if (l_t2 == 00) { return OPJ_FALSE; } if (! opj_t2_decode_packets( l_t2, p_tcd->tcd_tileno, p_tcd->tcd_image->tiles, p_src_data, p_data_read, p_max_src_size, p_cstr_index)) { opj_t2_destroy(l_t2); return OPJ_FALSE; } opj_t2_destroy(l_t2); /*---------------CLEAN-------------------*/ return OPJ_TRUE; } OPJ_BOOL opj_tcd_t1_decode ( opj_tcd_t *p_tcd ) { OPJ_UINT32 compno; opj_t1_t * l_t1; opj_tcd_tile_t * l_tile = p_tcd->tcd_image->tiles; opj_tcd_tilecomp_t* l_tile_comp = l_tile->comps; opj_tccp_t * l_tccp = p_tcd->tcp->tccps; l_t1 = opj_t1_create(OPJ_FALSE); if (l_t1 == 00) { return OPJ_FALSE; } for (compno = 0; compno < l_tile->numcomps; ++compno) { /* The +3 is headroom required by the vectorized DWT */ if (OPJ_FALSE == opj_t1_decode_cblks(l_t1, l_tile_comp, l_tccp)) { opj_t1_destroy(l_t1); return OPJ_FALSE; } ++l_tile_comp; ++l_tccp; } opj_t1_destroy(l_t1); return OPJ_TRUE; } OPJ_BOOL opj_tcd_dwt_decode ( opj_tcd_t *p_tcd ) { OPJ_UINT32 compno; opj_tcd_tile_t * l_tile = p_tcd->tcd_image->tiles; opj_tcd_tilecomp_t * l_tile_comp = l_tile->comps; opj_tccp_t * l_tccp = p_tcd->tcp->tccps; opj_image_comp_t * l_img_comp = p_tcd->image->comps; for (compno = 0; compno < l_tile->numcomps; compno++) { /* if (tcd->cp->reduce != 0) { tcd->image->comps[compno].resno_decoded = tile->comps[compno].numresolutions - tcd->cp->reduce - 1; if (tcd->image->comps[compno].resno_decoded < 0) { return false; } } numres2decode = tcd->image->comps[compno].resno_decoded + 1; if(numres2decode > 0){ */ if (l_tccp->qmfbid == 1) { if (! opj_dwt_decode(l_tile_comp, l_img_comp->resno_decoded+1)) { return OPJ_FALSE; } } else { if (! opj_dwt_decode_real(l_tile_comp, l_img_comp->resno_decoded+1)) { return OPJ_FALSE; } } ++l_tile_comp; ++l_img_comp; ++l_tccp; } return OPJ_TRUE; } OPJ_BOOL opj_tcd_mct_decode ( opj_tcd_t *p_tcd ) { opj_tcd_tile_t * l_tile = p_tcd->tcd_image->tiles; opj_tcp_t * l_tcp = p_tcd->tcp; opj_tcd_tilecomp_t * l_tile_comp = l_tile->comps; OPJ_UINT32 l_samples,i; if (! l_tcp->mct) { return OPJ_TRUE; } l_samples = (OPJ_UINT32)((l_tile_comp->x1 - l_tile_comp->x0) * (l_tile_comp->y1 - l_tile_comp->y0)); if (l_tile->numcomps >= 3 ){ /* testcase 1336.pdf.asan.47.376 */ if ((l_tile->comps[0].x1 - l_tile->comps[0].x0) * (l_tile->comps[0].y1 - l_tile->comps[0].y0) < (OPJ_INT32)l_samples || (l_tile->comps[1].x1 - l_tile->comps[1].x0) * (l_tile->comps[1].y1 - l_tile->comps[1].y0) < (OPJ_INT32)l_samples || (l_tile->comps[2].x1 - l_tile->comps[2].x0) * (l_tile->comps[2].y1 - l_tile->comps[2].y0) < (OPJ_INT32)l_samples) { fprintf(stderr, "Tiles don't all have the same dimension. Skip the MCT step.\n"); return OPJ_FALSE; } else if (l_tcp->mct == 2) { OPJ_BYTE ** l_data; if (! l_tcp->m_mct_decoding_matrix) { return OPJ_TRUE; } l_data = (OPJ_BYTE **) opj_malloc(l_tile->numcomps*sizeof(OPJ_BYTE*)); if (! l_data) { return OPJ_FALSE; } for (i=0;i<l_tile->numcomps;++i) { l_data[i] = (OPJ_BYTE*) l_tile_comp->data; ++l_tile_comp; } if (! opj_mct_decode_custom(/* MCT data */ (OPJ_BYTE*) l_tcp->m_mct_decoding_matrix, /* size of components */ l_samples, /* components */ l_data, /* nb of components (i.e. size of pData) */ l_tile->numcomps, /* tells if the data is signed */ p_tcd->image->comps->sgnd)) { opj_free(l_data); return OPJ_FALSE; } opj_free(l_data); } else { if (l_tcp->tccps->qmfbid == 1) { opj_mct_decode( l_tile->comps[0].data, l_tile->comps[1].data, l_tile->comps[2].data, l_samples); } else { opj_mct_decode_real((OPJ_FLOAT32*)l_tile->comps[0].data, (OPJ_FLOAT32*)l_tile->comps[1].data, (OPJ_FLOAT32*)l_tile->comps[2].data, l_samples); } } } else { /* FIXME need to use opj_event_msg function */ fprintf(stderr,"Number of components (%d) is inconsistent with a MCT. Skip the MCT step.\n",l_tile->numcomps); } return OPJ_TRUE; } OPJ_BOOL opj_tcd_dc_level_shift_decode ( opj_tcd_t *p_tcd ) { OPJ_UINT32 compno; opj_tcd_tilecomp_t * l_tile_comp = 00; opj_tccp_t * l_tccp = 00; opj_image_comp_t * l_img_comp = 00; opj_tcd_resolution_t* l_res = 00; opj_tcd_tile_t * l_tile; OPJ_UINT32 l_width,l_height,i,j; OPJ_INT32 * l_current_ptr; OPJ_INT32 l_min, l_max; OPJ_UINT32 l_stride; l_tile = p_tcd->tcd_image->tiles; l_tile_comp = l_tile->comps; l_tccp = p_tcd->tcp->tccps; l_img_comp = p_tcd->image->comps; for (compno = 0; compno < l_tile->numcomps; compno++) { l_res = l_tile_comp->resolutions + l_img_comp->resno_decoded; l_width = (OPJ_UINT32)(l_res->x1 - l_res->x0); l_height = (OPJ_UINT32)(l_res->y1 - l_res->y0); l_stride = (OPJ_UINT32)(l_tile_comp->x1 - l_tile_comp->x0) - l_width; assert(l_height == 0 || l_width + l_stride <= l_tile_comp->data_size / l_height); /*MUPDF*/ if (l_img_comp->sgnd) { l_min = -(1 << (l_img_comp->prec - 1)); l_max = (1 << (l_img_comp->prec - 1)) - 1; } else { l_min = 0; l_max = (1 << l_img_comp->prec) - 1; } l_current_ptr = l_tile_comp->data; if (l_tccp->qmfbid == 1) { for (j=0;j<l_height;++j) { for (i = 0; i < l_width; ++i) { *l_current_ptr = opj_int_clamp(*l_current_ptr + l_tccp->m_dc_level_shift, l_min, l_max); ++l_current_ptr; } l_current_ptr += l_stride; } } else { for (j=0;j<l_height;++j) { for (i = 0; i < l_width; ++i) { OPJ_FLOAT32 l_value = *((OPJ_FLOAT32 *) l_current_ptr); *l_current_ptr = opj_int_clamp((OPJ_INT32)lrintf(l_value) + l_tccp->m_dc_level_shift, l_min, l_max); ; ++l_current_ptr; } l_current_ptr += l_stride; } } ++l_img_comp; ++l_tccp; ++l_tile_comp; } return OPJ_TRUE; } /** * Deallocates the encoding data of the given precinct. */ void opj_tcd_code_block_dec_deallocate (opj_tcd_precinct_t * p_precinct) { OPJ_UINT32 cblkno , l_nb_code_blocks; opj_tcd_cblk_dec_t * l_code_block = p_precinct->cblks.dec; if (l_code_block) { /*fprintf(stderr,"deallocate codeblock:{\n");*/ /*fprintf(stderr,"\t x0=%d, y0=%d, x1=%d, y1=%d\n",l_code_block->x0, l_code_block->y0, l_code_block->x1, l_code_block->y1);*/ /*fprintf(stderr,"\t numbps=%d, numlenbits=%d, len=%d, numnewpasses=%d, real_num_segs=%d, m_current_max_segs=%d\n ", l_code_block->numbps, l_code_block->numlenbits, l_code_block->len, l_code_block->numnewpasses, l_code_block->real_num_segs, l_code_block->m_current_max_segs );*/ l_nb_code_blocks = p_precinct->block_size / sizeof(opj_tcd_cblk_dec_t); /*fprintf(stderr,"nb_code_blocks =%d\t}\n", l_nb_code_blocks);*/ for (cblkno = 0; cblkno < l_nb_code_blocks; ++cblkno) { if (l_code_block->data) { opj_free(l_code_block->data); l_code_block->data = 00; } if (l_code_block->segs) { opj_free(l_code_block->segs ); l_code_block->segs = 00; } ++l_code_block; } opj_free(p_precinct->cblks.dec); p_precinct->cblks.dec = 00; } } /** * Deallocates the encoding data of the given precinct. */ void opj_tcd_code_block_enc_deallocate (opj_tcd_precinct_t * p_precinct) { OPJ_UINT32 cblkno , l_nb_code_blocks; opj_tcd_cblk_enc_t * l_code_block = p_precinct->cblks.enc; if (l_code_block) { l_nb_code_blocks = p_precinct->block_size / sizeof(opj_tcd_cblk_enc_t); for (cblkno = 0; cblkno < l_nb_code_blocks; ++cblkno) { if (l_code_block->data) { opj_free(l_code_block->data - 1); l_code_block->data = 00; } if (l_code_block->layers) { opj_free(l_code_block->layers ); l_code_block->layers = 00; } if (l_code_block->passes) { opj_free(l_code_block->passes ); l_code_block->passes = 00; } ++l_code_block; } opj_free(p_precinct->cblks.enc); p_precinct->cblks.enc = 00; } } OPJ_UINT32 opj_tcd_get_encoded_tile_size ( opj_tcd_t *p_tcd ) { OPJ_UINT32 i,l_data_size = 0; opj_image_comp_t * l_img_comp = 00; opj_tcd_tilecomp_t * l_tilec = 00; OPJ_UINT32 l_size_comp, l_remaining; l_tilec = p_tcd->tcd_image->tiles->comps; l_img_comp = p_tcd->image->comps; for (i=0;i<p_tcd->image->numcomps;++i) { l_size_comp = l_img_comp->prec >> 3; /*(/ 8)*/ l_remaining = l_img_comp->prec & 7; /* (%8) */ if (l_remaining) { ++l_size_comp; } if (l_size_comp == 3) { l_size_comp = 4; } l_data_size += l_size_comp * (OPJ_UINT32)((l_tilec->x1 - l_tilec->x0) * (l_tilec->y1 - l_tilec->y0)); ++l_img_comp; ++l_tilec; } return l_data_size; } OPJ_BOOL opj_tcd_dc_level_shift_encode ( opj_tcd_t *p_tcd ) { OPJ_UINT32 compno; opj_tcd_tilecomp_t * l_tile_comp = 00; opj_tccp_t * l_tccp = 00; opj_image_comp_t * l_img_comp = 00; opj_tcd_tile_t * l_tile; OPJ_UINT32 l_nb_elem,i; OPJ_INT32 * l_current_ptr; l_tile = p_tcd->tcd_image->tiles; l_tile_comp = l_tile->comps; l_tccp = p_tcd->tcp->tccps; l_img_comp = p_tcd->image->comps; for (compno = 0; compno < l_tile->numcomps; compno++) { l_current_ptr = l_tile_comp->data; l_nb_elem = (OPJ_UINT32)((l_tile_comp->x1 - l_tile_comp->x0) * (l_tile_comp->y1 - l_tile_comp->y0)); if (l_tccp->qmfbid == 1) { for (i = 0; i < l_nb_elem; ++i) { *l_current_ptr -= l_tccp->m_dc_level_shift ; ++l_current_ptr; } } else { for (i = 0; i < l_nb_elem; ++i) { *l_current_ptr = (*l_current_ptr - l_tccp->m_dc_level_shift) << 11 ; ++l_current_ptr; } } ++l_img_comp; ++l_tccp; ++l_tile_comp; } return OPJ_TRUE; } OPJ_BOOL opj_tcd_mct_encode ( opj_tcd_t *p_tcd ) { opj_tcd_tile_t * l_tile = p_tcd->tcd_image->tiles; opj_tcd_tilecomp_t * l_tile_comp = p_tcd->tcd_image->tiles->comps; OPJ_UINT32 samples = (OPJ_UINT32)((l_tile_comp->x1 - l_tile_comp->x0) * (l_tile_comp->y1 - l_tile_comp->y0)); OPJ_UINT32 i; OPJ_BYTE ** l_data = 00; opj_tcp_t * l_tcp = p_tcd->tcp; if(!p_tcd->tcp->mct) { return OPJ_TRUE; } if (p_tcd->tcp->mct == 2) { if (! p_tcd->tcp->m_mct_coding_matrix) { return OPJ_TRUE; } l_data = (OPJ_BYTE **) opj_malloc(l_tile->numcomps*sizeof(OPJ_BYTE*)); if (! l_data) { return OPJ_FALSE; } for (i=0;i<l_tile->numcomps;++i) { l_data[i] = (OPJ_BYTE*) l_tile_comp->data; ++l_tile_comp; } if (! opj_mct_encode_custom(/* MCT data */ (OPJ_BYTE*) p_tcd->tcp->m_mct_coding_matrix, /* size of components */ samples, /* components */ l_data, /* nb of components (i.e. size of pData) */ l_tile->numcomps, /* tells if the data is signed */ p_tcd->image->comps->sgnd) ) { opj_free(l_data); return OPJ_FALSE; } opj_free(l_data); } else if (l_tcp->tccps->qmfbid == 0) { opj_mct_encode_real(l_tile->comps[0].data, l_tile->comps[1].data, l_tile->comps[2].data, samples); } else { opj_mct_encode(l_tile->comps[0].data, l_tile->comps[1].data, l_tile->comps[2].data, samples); } return OPJ_TRUE; } OPJ_BOOL opj_tcd_dwt_encode ( opj_tcd_t *p_tcd ) { opj_tcd_tile_t * l_tile = p_tcd->tcd_image->tiles; opj_tcd_tilecomp_t * l_tile_comp = p_tcd->tcd_image->tiles->comps; opj_tccp_t * l_tccp = p_tcd->tcp->tccps; OPJ_UINT32 compno; for (compno = 0; compno < l_tile->numcomps; ++compno) { if (l_tccp->qmfbid == 1) { if (! opj_dwt_encode(l_tile_comp)) { return OPJ_FALSE; } } else if (l_tccp->qmfbid == 0) { if (! opj_dwt_encode_real(l_tile_comp)) { return OPJ_FALSE; } } ++l_tile_comp; ++l_tccp; } return OPJ_TRUE; } OPJ_BOOL opj_tcd_t1_encode ( opj_tcd_t *p_tcd ) { opj_t1_t * l_t1; const OPJ_FLOAT64 * l_mct_norms; OPJ_UINT32 l_mct_numcomps = 0U; opj_tcp_t * l_tcp = p_tcd->tcp; l_t1 = opj_t1_create(OPJ_TRUE); if (l_t1 == 00) { return OPJ_FALSE; } if (l_tcp->mct == 1) { l_mct_numcomps = 3U; /* irreversible encoding */ if (l_tcp->tccps->qmfbid == 0) { l_mct_norms = opj_mct_get_mct_norms_real(); } else { l_mct_norms = opj_mct_get_mct_norms(); } } else { l_mct_numcomps = p_tcd->image->numcomps; l_mct_norms = (const OPJ_FLOAT64 *) (l_tcp->mct_norms); } if (! opj_t1_encode_cblks(l_t1, p_tcd->tcd_image->tiles , l_tcp, l_mct_norms, l_mct_numcomps)) { opj_t1_destroy(l_t1); return OPJ_FALSE; } opj_t1_destroy(l_t1); return OPJ_TRUE; } OPJ_BOOL opj_tcd_t2_encode (opj_tcd_t *p_tcd, OPJ_BYTE * p_dest_data, OPJ_UINT32 * p_data_written, OPJ_UINT32 p_max_dest_size, opj_codestream_info_t *p_cstr_info ) { opj_t2_t * l_t2; l_t2 = opj_t2_create(p_tcd->image, p_tcd->cp); if (l_t2 == 00) { return OPJ_FALSE; } if (! opj_t2_encode_packets( l_t2, p_tcd->tcd_tileno, p_tcd->tcd_image->tiles, p_tcd->tcp->numlayers, p_dest_data, p_data_written, p_max_dest_size, p_cstr_info, p_tcd->tp_num, p_tcd->tp_pos, p_tcd->cur_pino, FINAL_PASS)) { opj_t2_destroy(l_t2); return OPJ_FALSE; } opj_t2_destroy(l_t2); /*---------------CLEAN-------------------*/ return OPJ_TRUE; } OPJ_BOOL opj_tcd_rate_allocate_encode( opj_tcd_t *p_tcd, OPJ_BYTE * p_dest_data, OPJ_UINT32 p_max_dest_size, opj_codestream_info_t *p_cstr_info ) { opj_cp_t * l_cp = p_tcd->cp; OPJ_UINT32 l_nb_written = 0; if (p_cstr_info) { p_cstr_info->index_write = 0; } if (l_cp->m_specific_param.m_enc.m_disto_alloc|| l_cp->m_specific_param.m_enc.m_fixed_quality) { /* fixed_quality */ /* Normal Rate/distortion allocation */ if (! opj_tcd_rateallocate(p_tcd, p_dest_data,&l_nb_written, p_max_dest_size, p_cstr_info)) { return OPJ_FALSE; } } else { /* Fixed layer allocation */ opj_tcd_rateallocate_fixed(p_tcd); } return OPJ_TRUE; } OPJ_BOOL opj_tcd_copy_tile_data ( opj_tcd_t *p_tcd, OPJ_BYTE * p_src, OPJ_UINT32 p_src_length ) { OPJ_UINT32 i,j,l_data_size = 0; opj_image_comp_t * l_img_comp = 00; opj_tcd_tilecomp_t * l_tilec = 00; OPJ_UINT32 l_size_comp, l_remaining; OPJ_UINT32 l_nb_elem; l_data_size = opj_tcd_get_encoded_tile_size(p_tcd); if (l_data_size != p_src_length) { return OPJ_FALSE; } l_tilec = p_tcd->tcd_image->tiles->comps; l_img_comp = p_tcd->image->comps; for (i=0;i<p_tcd->image->numcomps;++i) { l_size_comp = l_img_comp->prec >> 3; /*(/ 8)*/ l_remaining = l_img_comp->prec & 7; /* (%8) */ l_nb_elem = (OPJ_UINT32)((l_tilec->x1 - l_tilec->x0) * (l_tilec->y1 - l_tilec->y0)); if (l_remaining) { ++l_size_comp; } if (l_size_comp == 3) { l_size_comp = 4; } switch (l_size_comp) { case 1: { OPJ_CHAR * l_src_ptr = (OPJ_CHAR *) p_src; OPJ_INT32 * l_dest_ptr = l_tilec->data; if (l_img_comp->sgnd) { for (j=0;j<l_nb_elem;++j) { *(l_dest_ptr++) = (OPJ_INT32) (*(l_src_ptr++)); } } else { for (j=0;j<l_nb_elem;++j) { *(l_dest_ptr++) = (*(l_src_ptr++))&0xff; } } p_src = (OPJ_BYTE*) l_src_ptr; } break; case 2: { OPJ_INT32 * l_dest_ptr = l_tilec->data; OPJ_INT16 * l_src_ptr = (OPJ_INT16 *) p_src; if (l_img_comp->sgnd) { for (j=0;j<l_nb_elem;++j) { *(l_dest_ptr++) = (OPJ_INT32) (*(l_src_ptr++)); } } else { for (j=0;j<l_nb_elem;++j) { *(l_dest_ptr++) = (*(l_src_ptr++))&0xffff; } } p_src = (OPJ_BYTE*) l_src_ptr; } break; case 4: { OPJ_INT32 * l_src_ptr = (OPJ_INT32 *) p_src; OPJ_INT32 * l_dest_ptr = l_tilec->data; for (j=0;j<l_nb_elem;++j) { *(l_dest_ptr++) = (OPJ_INT32) (*(l_src_ptr++)); } p_src = (OPJ_BYTE*) l_src_ptr; } break; } ++l_img_comp; ++l_tilec; } return OPJ_TRUE; }
static INLINE OPJ_BOOL opj_tcd_init_tile(opj_tcd_t *p_tcd, OPJ_UINT32 p_tile_no, OPJ_BOOL isEncoder, OPJ_FLOAT32 fraction, OPJ_SIZE_T sizeof_block) { OPJ_UINT32 (*l_gain_ptr)(OPJ_UINT32) = 00; OPJ_UINT32 compno, resno, bandno, precno, cblkno; opj_tcp_t * l_tcp = 00; opj_cp_t * l_cp = 00; opj_tcd_tile_t * l_tile = 00; opj_tccp_t *l_tccp = 00; opj_tcd_tilecomp_t *l_tilec = 00; opj_image_comp_t * l_image_comp = 00; opj_tcd_resolution_t *l_res = 00; opj_tcd_band_t *l_band = 00; opj_stepsize_t * l_step_size = 00; opj_tcd_precinct_t *l_current_precinct = 00; opj_image_t *l_image = 00; OPJ_UINT32 p,q; OPJ_UINT32 l_level_no; OPJ_UINT32 l_pdx, l_pdy; OPJ_UINT32 l_gain; OPJ_INT32 l_x0b, l_y0b; /* extent of precincts , top left, bottom right**/ OPJ_INT32 l_tl_prc_x_start, l_tl_prc_y_start, l_br_prc_x_end, l_br_prc_y_end; /* number of precinct for a resolution */ OPJ_UINT32 l_nb_precincts; /* room needed to store l_nb_precinct precinct for a resolution */ OPJ_UINT32 l_nb_precinct_size; /* number of code blocks for a precinct*/ OPJ_UINT32 l_nb_code_blocks; /* room needed to store l_nb_code_blocks code blocks for a precinct*/ OPJ_UINT32 l_nb_code_blocks_size; /* size of data for a tile */ OPJ_UINT32 l_data_size; l_cp = p_tcd->cp; l_tcp = &(l_cp->tcps[p_tile_no]); l_tile = p_tcd->tcd_image->tiles; l_tccp = l_tcp->tccps; l_tilec = l_tile->comps; l_image = p_tcd->image; l_image_comp = p_tcd->image->comps; p = p_tile_no % l_cp->tw; /* tile coordinates */ q = p_tile_no / l_cp->tw; /*fprintf(stderr, "Tile coordinate = %d,%d\n", p, q);*/ /* 4 borders of the tile rescale on the image if necessary */ l_tile->x0 = opj_int_max((OPJ_INT32)(l_cp->tx0 + p * l_cp->tdx), (OPJ_INT32)l_image->x0); l_tile->y0 = opj_int_max((OPJ_INT32)(l_cp->ty0 + q * l_cp->tdy), (OPJ_INT32)l_image->y0); l_tile->x1 = opj_int_min((OPJ_INT32)(l_cp->tx0 + (p + 1) * l_cp->tdx), (OPJ_INT32)l_image->x1); l_tile->y1 = opj_int_min((OPJ_INT32)(l_cp->ty0 + (q + 1) * l_cp->tdy), (OPJ_INT32)l_image->y1); /* testcase 1888.pdf.asan.35.988 */ if (l_tccp->numresolutions == 0) { fprintf(stderr, "tiles require at least one resolution\n"); return OPJ_FALSE; } /*fprintf(stderr, "Tile border = %d,%d,%d,%d\n", l_tile->x0, l_tile->y0,l_tile->x1,l_tile->y1);*/ /*tile->numcomps = image->numcomps; */ for (compno = 0; compno < l_tile->numcomps; ++compno) { /*fprintf(stderr, "compno = %d/%d\n", compno, l_tile->numcomps);*/ l_image_comp->resno_decoded = 0; /* border of each l_tile component (global) */ l_tilec->x0 = opj_int_ceildiv(l_tile->x0, (OPJ_INT32)l_image_comp->dx); l_tilec->y0 = opj_int_ceildiv(l_tile->y0, (OPJ_INT32)l_image_comp->dy); l_tilec->x1 = opj_int_ceildiv(l_tile->x1, (OPJ_INT32)l_image_comp->dx); l_tilec->y1 = opj_int_ceildiv(l_tile->y1, (OPJ_INT32)l_image_comp->dy); /*fprintf(stderr, "\tTile compo border = %d,%d,%d,%d\n", l_tilec->x0, l_tilec->y0,l_tilec->x1,l_tilec->y1);*/ /* compute l_data_size with overflow check */ l_data_size = (OPJ_UINT32)(l_tilec->x1 - l_tilec->x0); if ((((OPJ_UINT32)-1) / l_data_size) < (OPJ_UINT32)(l_tilec->y1 - l_tilec->y0)) { /* TODO event */ return OPJ_FALSE; } l_data_size = l_data_size * (OPJ_UINT32)(l_tilec->y1 - l_tilec->y0); if ((((OPJ_UINT32)-1) / (OPJ_UINT32)sizeof(OPJ_UINT32)) < l_data_size) { /* TODO event */ return OPJ_FALSE; } l_data_size = l_data_size * (OPJ_UINT32)sizeof(OPJ_UINT32); l_tilec->numresolutions = l_tccp->numresolutions; if (l_tccp->numresolutions < l_cp->m_specific_param.m_dec.m_reduce) { l_tilec->minimum_num_resolutions = 1; } else { l_tilec->minimum_num_resolutions = l_tccp->numresolutions - l_cp->m_specific_param.m_dec.m_reduce; } l_tilec->data_size_needed = l_data_size; if (p_tcd->m_is_decoder && !opj_alloc_tile_component_data(l_tilec)) { return OPJ_FALSE; } l_data_size = l_tilec->numresolutions * (OPJ_UINT32)sizeof(opj_tcd_resolution_t); if (l_tilec->resolutions == 00) { l_tilec->resolutions = (opj_tcd_resolution_t *) opj_malloc(l_data_size); if (! l_tilec->resolutions ) { return OPJ_FALSE; } /*fprintf(stderr, "\tAllocate resolutions of tilec (opj_tcd_resolution_t): %d\n",l_data_size);*/ l_tilec->resolutions_size = l_data_size; memset(l_tilec->resolutions,0,l_data_size); } else if (l_data_size > l_tilec->resolutions_size) { opj_tcd_resolution_t* new_resolutions = (opj_tcd_resolution_t *) opj_realloc(l_tilec->resolutions, l_data_size); if (! new_resolutions) { /* opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to tile resolutions\n"); */ fprintf(stderr, "Not enough memory to tile resolutions\n"); opj_free(l_tilec->resolutions); l_tilec->resolutions = NULL; l_tilec->resolutions_size = 0; return OPJ_FALSE; } l_tilec->resolutions = new_resolutions; /*fprintf(stderr, "\tReallocate data of tilec (int): from %d to %d x OPJ_UINT32\n", l_tilec->resolutions_size, l_data_size);*/ memset(((OPJ_BYTE*) l_tilec->resolutions)+l_tilec->resolutions_size,0,l_data_size - l_tilec->resolutions_size); l_tilec->resolutions_size = l_data_size; } l_level_no = l_tilec->numresolutions - 1; l_res = l_tilec->resolutions; l_step_size = l_tccp->stepsizes; if (l_tccp->qmfbid == 0) { l_gain_ptr = &opj_dwt_getgain_real; } else { l_gain_ptr = &opj_dwt_getgain; } /*fprintf(stderr, "\tlevel_no=%d\n",l_level_no);*/ for (resno = 0; resno < l_tilec->numresolutions; ++resno) { /*fprintf(stderr, "\t\tresno = %d/%d\n", resno, l_tilec->numresolutions);*/ OPJ_INT32 tlcbgxstart, tlcbgystart /*, brcbgxend, brcbgyend*/; OPJ_UINT32 cbgwidthexpn, cbgheightexpn; OPJ_UINT32 cblkwidthexpn, cblkheightexpn; /* border for each resolution level (global) */ l_res->x0 = opj_int_ceildivpow2(l_tilec->x0, (OPJ_INT32)l_level_no); l_res->y0 = opj_int_ceildivpow2(l_tilec->y0, (OPJ_INT32)l_level_no); l_res->x1 = opj_int_ceildivpow2(l_tilec->x1, (OPJ_INT32)l_level_no); l_res->y1 = opj_int_ceildivpow2(l_tilec->y1, (OPJ_INT32)l_level_no); /*fprintf(stderr, "\t\t\tres_x0= %d, res_y0 =%d, res_x1=%d, res_y1=%d\n", l_res->x0, l_res->y0, l_res->x1, l_res->y1);*/ /* p. 35, table A-23, ISO/IEC FDIS154444-1 : 2000 (18 august 2000) */ l_pdx = l_tccp->prcw[resno]; l_pdy = l_tccp->prch[resno]; /*fprintf(stderr, "\t\t\tpdx=%d, pdy=%d\n", l_pdx, l_pdy);*/ /* p. 64, B.6, ISO/IEC FDIS15444-1 : 2000 (18 august 2000) */ l_tl_prc_x_start = opj_int_floordivpow2(l_res->x0, (OPJ_INT32)l_pdx) << l_pdx; l_tl_prc_y_start = opj_int_floordivpow2(l_res->y0, (OPJ_INT32)l_pdy) << l_pdy; l_br_prc_x_end = opj_int_ceildivpow2(l_res->x1, (OPJ_INT32)l_pdx) << l_pdx; l_br_prc_y_end = opj_int_ceildivpow2(l_res->y1, (OPJ_INT32)l_pdy) << l_pdy; /*fprintf(stderr, "\t\t\tprc_x_start=%d, prc_y_start=%d, br_prc_x_end=%d, br_prc_y_end=%d \n", l_tl_prc_x_start, l_tl_prc_y_start, l_br_prc_x_end ,l_br_prc_y_end );*/ l_res->pw = (l_res->x0 == l_res->x1) ? 0 : (OPJ_UINT32)((l_br_prc_x_end - l_tl_prc_x_start) >> l_pdx); l_res->ph = (l_res->y0 == l_res->y1) ? 0 : (OPJ_UINT32)((l_br_prc_y_end - l_tl_prc_y_start) >> l_pdy); /*fprintf(stderr, "\t\t\tres_pw=%d, res_ph=%d\n", l_res->pw, l_res->ph );*/ l_nb_precincts = l_res->pw * l_res->ph; l_nb_precinct_size = l_nb_precincts * (OPJ_UINT32)sizeof(opj_tcd_precinct_t); if (resno == 0) { tlcbgxstart = l_tl_prc_x_start; tlcbgystart = l_tl_prc_y_start; /*brcbgxend = l_br_prc_x_end;*/ /* brcbgyend = l_br_prc_y_end;*/ cbgwidthexpn = l_pdx; cbgheightexpn = l_pdy; l_res->numbands = 1; } else { tlcbgxstart = opj_int_ceildivpow2(l_tl_prc_x_start, 1); tlcbgystart = opj_int_ceildivpow2(l_tl_prc_y_start, 1); /*brcbgxend = opj_int_ceildivpow2(l_br_prc_x_end, 1);*/ /*brcbgyend = opj_int_ceildivpow2(l_br_prc_y_end, 1);*/ cbgwidthexpn = l_pdx - 1; cbgheightexpn = l_pdy - 1; l_res->numbands = 3; } cblkwidthexpn = opj_uint_min(l_tccp->cblkw, cbgwidthexpn); cblkheightexpn = opj_uint_min(l_tccp->cblkh, cbgheightexpn); l_band = l_res->bands; for (bandno = 0; bandno < l_res->numbands; ++bandno) { OPJ_INT32 numbps; /*fprintf(stderr, "\t\t\tband_no=%d/%d\n", bandno, l_res->numbands );*/ if (resno == 0) { l_band->bandno = 0 ; l_band->x0 = opj_int_ceildivpow2(l_tilec->x0, (OPJ_INT32)l_level_no); l_band->y0 = opj_int_ceildivpow2(l_tilec->y0, (OPJ_INT32)l_level_no); l_band->x1 = opj_int_ceildivpow2(l_tilec->x1, (OPJ_INT32)l_level_no); l_band->y1 = opj_int_ceildivpow2(l_tilec->y1, (OPJ_INT32)l_level_no); } else { l_band->bandno = bandno + 1; /* x0b = 1 if bandno = 1 or 3 */ l_x0b = l_band->bandno&1; /* y0b = 1 if bandno = 2 or 3 */ l_y0b = (OPJ_INT32)((l_band->bandno)>>1); /* l_band border (global) */ l_band->x0 = opj_int_ceildivpow2(l_tilec->x0 - (1 << l_level_no) * l_x0b, (OPJ_INT32)(l_level_no + 1)); l_band->y0 = opj_int_ceildivpow2(l_tilec->y0 - (1 << l_level_no) * l_y0b, (OPJ_INT32)(l_level_no + 1)); l_band->x1 = opj_int_ceildivpow2(l_tilec->x1 - (1 << l_level_no) * l_x0b, (OPJ_INT32)(l_level_no + 1)); l_band->y1 = opj_int_ceildivpow2(l_tilec->y1 - (1 << l_level_no) * l_y0b, (OPJ_INT32)(l_level_no + 1)); } /** avoid an if with storing function pointer */ l_gain = (*l_gain_ptr) (l_band->bandno); numbps = (OPJ_INT32)(l_image_comp->prec + l_gain); l_band->stepsize = (OPJ_FLOAT32)(((1.0 + l_step_size->mant / 2048.0) * pow(2.0, (OPJ_INT32) (numbps - l_step_size->expn)))) * fraction; l_band->numbps = l_step_size->expn + (OPJ_INT32)l_tccp->numgbits - 1; /* WHY -1 ? */ if (! l_band->precincts) { l_band->precincts = (opj_tcd_precinct_t *) opj_malloc( /*3 * */ l_nb_precinct_size); if (! l_band->precincts) { return OPJ_FALSE; } /*fprintf(stderr, "\t\t\t\tAllocate precincts of a band (opj_tcd_precinct_t): %d\n",l_nb_precinct_size); */ memset(l_band->precincts,0,l_nb_precinct_size); l_band->precincts_data_size = l_nb_precinct_size; } else if (l_band->precincts_data_size < l_nb_precinct_size) { opj_tcd_precinct_t * new_precincts = (opj_tcd_precinct_t *) opj_realloc(l_band->precincts,/*3 * */ l_nb_precinct_size); if (! new_precincts) { /* opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to handle band precints\n"); */ fprintf(stderr, "Not enough memory to handle band precints\n"); opj_free(l_band->precincts); l_band->precincts = NULL; l_band->precincts_data_size = 0; return OPJ_FALSE; } l_band->precincts = new_precincts; /*fprintf(stderr, "\t\t\t\tReallocate precincts of a band (opj_tcd_precinct_t): from %d to %d\n",l_band->precincts_data_size, l_nb_precinct_size);*/ memset(((OPJ_BYTE *) l_band->precincts) + l_band->precincts_data_size,0,l_nb_precinct_size - l_band->precincts_data_size); l_band->precincts_data_size = l_nb_precinct_size; } l_current_precinct = l_band->precincts; for (precno = 0; precno < l_nb_precincts; ++precno) { OPJ_INT32 tlcblkxstart, tlcblkystart, brcblkxend, brcblkyend; OPJ_INT32 cbgxstart = tlcbgxstart + (OPJ_INT32)(precno % l_res->pw) * (1 << cbgwidthexpn); OPJ_INT32 cbgystart = tlcbgystart + (OPJ_INT32)(precno / l_res->pw) * (1 << cbgheightexpn); OPJ_INT32 cbgxend = cbgxstart + (1 << cbgwidthexpn); OPJ_INT32 cbgyend = cbgystart + (1 << cbgheightexpn); /*fprintf(stderr, "\t precno=%d; bandno=%d, resno=%d; compno=%d\n", precno, bandno , resno, compno);*/ /*fprintf(stderr, "\t tlcbgxstart(=%d) + (precno(=%d) percent res->pw(=%d)) * (1 << cbgwidthexpn(=%d)) \n",tlcbgxstart,precno,l_res->pw,cbgwidthexpn);*/ /* precinct size (global) */ /*fprintf(stderr, "\t cbgxstart=%d, l_band->x0 = %d \n",cbgxstart, l_band->x0);*/ l_current_precinct->x0 = opj_int_max(cbgxstart, l_band->x0); l_current_precinct->y0 = opj_int_max(cbgystart, l_band->y0); l_current_precinct->x1 = opj_int_min(cbgxend, l_band->x1); l_current_precinct->y1 = opj_int_min(cbgyend, l_band->y1); /*fprintf(stderr, "\t prc_x0=%d; prc_y0=%d, prc_x1=%d; prc_y1=%d\n",l_current_precinct->x0, l_current_precinct->y0 ,l_current_precinct->x1, l_current_precinct->y1);*/ tlcblkxstart = opj_int_floordivpow2(l_current_precinct->x0, (OPJ_INT32)cblkwidthexpn) << cblkwidthexpn; /*fprintf(stderr, "\t tlcblkxstart =%d\n",tlcblkxstart );*/ tlcblkystart = opj_int_floordivpow2(l_current_precinct->y0, (OPJ_INT32)cblkheightexpn) << cblkheightexpn; /*fprintf(stderr, "\t tlcblkystart =%d\n",tlcblkystart );*/ brcblkxend = opj_int_ceildivpow2(l_current_precinct->x1, (OPJ_INT32)cblkwidthexpn) << cblkwidthexpn; /*fprintf(stderr, "\t brcblkxend =%d\n",brcblkxend );*/ brcblkyend = opj_int_ceildivpow2(l_current_precinct->y1, (OPJ_INT32)cblkheightexpn) << cblkheightexpn; /*fprintf(stderr, "\t brcblkyend =%d\n",brcblkyend );*/ l_current_precinct->cw = (OPJ_UINT32)((brcblkxend - tlcblkxstart) >> cblkwidthexpn); l_current_precinct->ch = (OPJ_UINT32)((brcblkyend - tlcblkystart) >> cblkheightexpn); l_nb_code_blocks = l_current_precinct->cw * l_current_precinct->ch; /*fprintf(stderr, "\t\t\t\t precinct_cw = %d x recinct_ch = %d\n",l_current_precinct->cw, l_current_precinct->ch); */ l_nb_code_blocks_size = l_nb_code_blocks * (OPJ_UINT32)sizeof_block; if (! l_current_precinct->cblks.blocks) { l_current_precinct->cblks.blocks = opj_malloc(l_nb_code_blocks_size); if (! l_current_precinct->cblks.blocks ) { return OPJ_FALSE; } /*fprintf(stderr, "\t\t\t\tAllocate cblks of a precinct (opj_tcd_cblk_dec_t): %d\n",l_nb_code_blocks_size);*/ memset(l_current_precinct->cblks.blocks,0,l_nb_code_blocks_size); l_current_precinct->block_size = l_nb_code_blocks_size; } else if (l_nb_code_blocks_size > l_current_precinct->block_size) { void *new_blocks = opj_realloc(l_current_precinct->cblks.blocks, l_nb_code_blocks_size); if (! new_blocks) { opj_free(l_current_precinct->cblks.blocks); l_current_precinct->cblks.blocks = NULL; l_current_precinct->block_size = 0; /* opj_event_msg(p_manager, EVT_ERROR, "Not enough memory for current precinct codeblock element\n"); */ fprintf(stderr, "Not enough memory for current precinct codeblock element\n"); return OPJ_FALSE; } l_current_precinct->cblks.blocks = new_blocks; /*fprintf(stderr, "\t\t\t\tReallocate cblks of a precinct (opj_tcd_cblk_dec_t): from %d to %d\n",l_current_precinct->block_size, l_nb_code_blocks_size); */ memset(((OPJ_BYTE *) l_current_precinct->cblks.blocks) + l_current_precinct->block_size ,0 ,l_nb_code_blocks_size - l_current_precinct->block_size); l_current_precinct->block_size = l_nb_code_blocks_size; } if (! l_current_precinct->incltree) { l_current_precinct->incltree = opj_tgt_create(l_current_precinct->cw, l_current_precinct->ch); } else{ l_current_precinct->incltree = opj_tgt_init(l_current_precinct->incltree, l_current_precinct->cw, l_current_precinct->ch); } if (! l_current_precinct->incltree) { fprintf(stderr, "WARNING: No incltree created.\n"); /*return OPJ_FALSE;*/ } if (! l_current_precinct->imsbtree) { l_current_precinct->imsbtree = opj_tgt_create( l_current_precinct->cw, l_current_precinct->ch); } else { l_current_precinct->imsbtree = opj_tgt_init( l_current_precinct->imsbtree, l_current_precinct->cw, l_current_precinct->ch); } if (! l_current_precinct->imsbtree) { fprintf(stderr, "WARNING: No imsbtree created.\n"); /*return OPJ_FALSE;*/ } for (cblkno = 0; cblkno < l_nb_code_blocks; ++cblkno) { OPJ_INT32 cblkxstart = tlcblkxstart + (OPJ_INT32)(cblkno % l_current_precinct->cw) * (1 << cblkwidthexpn); OPJ_INT32 cblkystart = tlcblkystart + (OPJ_INT32)(cblkno / l_current_precinct->cw) * (1 << cblkheightexpn); OPJ_INT32 cblkxend = cblkxstart + (1 << cblkwidthexpn); OPJ_INT32 cblkyend = cblkystart + (1 << cblkheightexpn); if (isEncoder) { opj_tcd_cblk_enc_t* l_code_block = l_current_precinct->cblks.enc + cblkno; if (! opj_tcd_code_block_enc_allocate(l_code_block)) { return OPJ_FALSE; } /* code-block size (global) */ l_code_block->x0 = opj_int_max(cblkxstart, l_current_precinct->x0); l_code_block->y0 = opj_int_max(cblkystart, l_current_precinct->y0); l_code_block->x1 = opj_int_min(cblkxend, l_current_precinct->x1); l_code_block->y1 = opj_int_min(cblkyend, l_current_precinct->y1); if (! opj_tcd_code_block_enc_allocate_data(l_code_block)) { return OPJ_FALSE; } } else { opj_tcd_cblk_dec_t* l_code_block = l_current_precinct->cblks.dec + cblkno; if (! opj_tcd_code_block_dec_allocate(l_code_block)) { return OPJ_FALSE; } /* code-block size (global) */ l_code_block->x0 = opj_int_max(cblkxstart, l_current_precinct->x0); l_code_block->y0 = opj_int_max(cblkystart, l_current_precinct->y0); l_code_block->x1 = opj_int_min(cblkxend, l_current_precinct->x1); l_code_block->y1 = opj_int_min(cblkyend, l_current_precinct->y1); } } ++l_current_precinct; } /* precno */ ++l_band; ++l_step_size; } /* bandno */ ++l_res; --l_level_no; } /* resno */ ++l_tccp; ++l_tilec; ++l_image_comp; } /* compno */ return OPJ_TRUE; }
static INLINE OPJ_BOOL opj_tcd_init_tile(opj_tcd_t *p_tcd, OPJ_UINT32 p_tile_no, OPJ_BOOL isEncoder, OPJ_FLOAT32 fraction, OPJ_SIZE_T sizeof_block) { OPJ_UINT32 (*l_gain_ptr)(OPJ_UINT32) = 00; OPJ_UINT32 compno, resno, bandno, precno, cblkno; opj_tcp_t * l_tcp = 00; opj_cp_t * l_cp = 00; opj_tcd_tile_t * l_tile = 00; opj_tccp_t *l_tccp = 00; opj_tcd_tilecomp_t *l_tilec = 00; opj_image_comp_t * l_image_comp = 00; opj_tcd_resolution_t *l_res = 00; opj_tcd_band_t *l_band = 00; opj_stepsize_t * l_step_size = 00; opj_tcd_precinct_t *l_current_precinct = 00; opj_image_t *l_image = 00; OPJ_UINT32 p,q; OPJ_UINT32 l_level_no; OPJ_UINT32 l_pdx, l_pdy; OPJ_UINT32 l_gain; OPJ_INT32 l_x0b, l_y0b; /* extent of precincts , top left, bottom right**/ OPJ_INT32 l_tl_prc_x_start, l_tl_prc_y_start, l_br_prc_x_end, l_br_prc_y_end; /* number of precinct for a resolution */ OPJ_UINT32 l_nb_precincts; /* room needed to store l_nb_precinct precinct for a resolution */ OPJ_UINT32 l_nb_precinct_size; /* number of code blocks for a precinct*/ OPJ_UINT32 l_nb_code_blocks; /* room needed to store l_nb_code_blocks code blocks for a precinct*/ OPJ_UINT32 l_nb_code_blocks_size; /* size of data for a tile */ OPJ_UINT32 l_data_size; l_cp = p_tcd->cp; l_tcp = &(l_cp->tcps[p_tile_no]); l_tile = p_tcd->tcd_image->tiles; l_tccp = l_tcp->tccps; l_tilec = l_tile->comps; l_image = p_tcd->image; l_image_comp = p_tcd->image->comps; p = p_tile_no % l_cp->tw; /* tile coordinates */ q = p_tile_no / l_cp->tw; /*fprintf(stderr, "Tile coordinate = %d,%d\n", p, q);*/ /* 4 borders of the tile rescale on the image if necessary */ l_tile->x0 = (OPJ_INT32)opj_uint_max(l_cp->tx0 + p * l_cp->tdx, l_image->x0); l_tile->y0 = (OPJ_INT32)opj_uint_max(l_cp->ty0 + q * l_cp->tdy, l_image->y0); l_tile->x1 = (OPJ_INT32)opj_uint_min(l_cp->tx0 + (p + 1) * l_cp->tdx, l_image->x1); l_tile->y1 = (OPJ_INT32)opj_uint_min(l_cp->ty0 + (q + 1) * l_cp->tdy, l_image->y1); /* testcase 1888.pdf.asan.35.988 */ if (l_tccp->numresolutions == 0) { fprintf(stderr, "tiles require at least one resolution\n"); return OPJ_FALSE; } /*fprintf(stderr, "Tile border = %d,%d,%d,%d\n", l_tile->x0, l_tile->y0,l_tile->x1,l_tile->y1);*/ /*tile->numcomps = image->numcomps; */ for (compno = 0; compno < l_tile->numcomps; ++compno) { /*fprintf(stderr, "compno = %d/%d\n", compno, l_tile->numcomps);*/ l_image_comp->resno_decoded = 0; /* border of each l_tile component (global) */ l_tilec->x0 = opj_int_ceildiv(l_tile->x0, (OPJ_INT32)l_image_comp->dx); l_tilec->y0 = opj_int_ceildiv(l_tile->y0, (OPJ_INT32)l_image_comp->dy); l_tilec->x1 = opj_int_ceildiv(l_tile->x1, (OPJ_INT32)l_image_comp->dx); l_tilec->y1 = opj_int_ceildiv(l_tile->y1, (OPJ_INT32)l_image_comp->dy); /*fprintf(stderr, "\tTile compo border = %d,%d,%d,%d\n", l_tilec->x0, l_tilec->y0,l_tilec->x1,l_tilec->y1);*/ /* compute l_data_size with overflow check */ l_data_size = (OPJ_UINT32)(l_tilec->x1 - l_tilec->x0); if ((((OPJ_UINT32)-1) / l_data_size) < (OPJ_UINT32)(l_tilec->y1 - l_tilec->y0)) { /* TODO event */ return OPJ_FALSE; } l_data_size = l_data_size * (OPJ_UINT32)(l_tilec->y1 - l_tilec->y0); if ((((OPJ_UINT32)-1) / (OPJ_UINT32)sizeof(OPJ_UINT32)) < l_data_size) { /* TODO event */ return OPJ_FALSE; } l_data_size = l_data_size * (OPJ_UINT32)sizeof(OPJ_UINT32); l_tilec->numresolutions = l_tccp->numresolutions; if (l_tccp->numresolutions < l_cp->m_specific_param.m_dec.m_reduce) { l_tilec->minimum_num_resolutions = 1; } else { l_tilec->minimum_num_resolutions = l_tccp->numresolutions - l_cp->m_specific_param.m_dec.m_reduce; } l_tilec->data_size_needed = l_data_size; if (p_tcd->m_is_decoder && !opj_alloc_tile_component_data(l_tilec)) { return OPJ_FALSE; } l_data_size = l_tilec->numresolutions * (OPJ_UINT32)sizeof(opj_tcd_resolution_t); if (l_tilec->resolutions == 00) { l_tilec->resolutions = (opj_tcd_resolution_t *) opj_malloc(l_data_size); if (! l_tilec->resolutions ) { return OPJ_FALSE; } /*fprintf(stderr, "\tAllocate resolutions of tilec (opj_tcd_resolution_t): %d\n",l_data_size);*/ l_tilec->resolutions_size = l_data_size; memset(l_tilec->resolutions,0,l_data_size); } else if (l_data_size > l_tilec->resolutions_size) { opj_tcd_resolution_t* new_resolutions = (opj_tcd_resolution_t *) opj_realloc(l_tilec->resolutions, l_data_size); if (! new_resolutions) { /* opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to tile resolutions\n"); */ fprintf(stderr, "Not enough memory to tile resolutions\n"); opj_free(l_tilec->resolutions); l_tilec->resolutions = NULL; l_tilec->resolutions_size = 0; return OPJ_FALSE; } l_tilec->resolutions = new_resolutions; /*fprintf(stderr, "\tReallocate data of tilec (int): from %d to %d x OPJ_UINT32\n", l_tilec->resolutions_size, l_data_size);*/ memset(((OPJ_BYTE*) l_tilec->resolutions)+l_tilec->resolutions_size,0,l_data_size - l_tilec->resolutions_size); l_tilec->resolutions_size = l_data_size; } l_level_no = l_tilec->numresolutions - 1; l_res = l_tilec->resolutions; l_step_size = l_tccp->stepsizes; if (l_tccp->qmfbid == 0) { l_gain_ptr = &opj_dwt_getgain_real; } else { l_gain_ptr = &opj_dwt_getgain; } /*fprintf(stderr, "\tlevel_no=%d\n",l_level_no);*/ for (resno = 0; resno < l_tilec->numresolutions; ++resno) { /*fprintf(stderr, "\t\tresno = %d/%d\n", resno, l_tilec->numresolutions);*/ OPJ_INT32 tlcbgxstart, tlcbgystart /*, brcbgxend, brcbgyend*/; OPJ_UINT32 cbgwidthexpn, cbgheightexpn; OPJ_UINT32 cblkwidthexpn, cblkheightexpn; /* border for each resolution level (global) */ l_res->x0 = opj_int_ceildivpow2(l_tilec->x0, (OPJ_INT32)l_level_no); l_res->y0 = opj_int_ceildivpow2(l_tilec->y0, (OPJ_INT32)l_level_no); l_res->x1 = opj_int_ceildivpow2(l_tilec->x1, (OPJ_INT32)l_level_no); l_res->y1 = opj_int_ceildivpow2(l_tilec->y1, (OPJ_INT32)l_level_no); /*fprintf(stderr, "\t\t\tres_x0= %d, res_y0 =%d, res_x1=%d, res_y1=%d\n", l_res->x0, l_res->y0, l_res->x1, l_res->y1);*/ /* p. 35, table A-23, ISO/IEC FDIS154444-1 : 2000 (18 august 2000) */ l_pdx = l_tccp->prcw[resno]; l_pdy = l_tccp->prch[resno]; /*fprintf(stderr, "\t\t\tpdx=%d, pdy=%d\n", l_pdx, l_pdy);*/ /* p. 64, B.6, ISO/IEC FDIS15444-1 : 2000 (18 august 2000) */ l_tl_prc_x_start = opj_int_floordivpow2(l_res->x0, (OPJ_INT32)l_pdx) << l_pdx; l_tl_prc_y_start = opj_int_floordivpow2(l_res->y0, (OPJ_INT32)l_pdy) << l_pdy; l_br_prc_x_end = opj_int_ceildivpow2(l_res->x1, (OPJ_INT32)l_pdx) << l_pdx; l_br_prc_y_end = opj_int_ceildivpow2(l_res->y1, (OPJ_INT32)l_pdy) << l_pdy; /*fprintf(stderr, "\t\t\tprc_x_start=%d, prc_y_start=%d, br_prc_x_end=%d, br_prc_y_end=%d \n", l_tl_prc_x_start, l_tl_prc_y_start, l_br_prc_x_end ,l_br_prc_y_end );*/ l_res->pw = (l_res->x0 == l_res->x1) ? 0 : (OPJ_UINT32)((l_br_prc_x_end - l_tl_prc_x_start) >> l_pdx); l_res->ph = (l_res->y0 == l_res->y1) ? 0 : (OPJ_UINT32)((l_br_prc_y_end - l_tl_prc_y_start) >> l_pdy); /*fprintf(stderr, "\t\t\tres_pw=%d, res_ph=%d\n", l_res->pw, l_res->ph );*/ l_nb_precincts = l_res->pw * l_res->ph; l_nb_precinct_size = l_nb_precincts * (OPJ_UINT32)sizeof(opj_tcd_precinct_t); if (resno == 0) { tlcbgxstart = l_tl_prc_x_start; tlcbgystart = l_tl_prc_y_start; /*brcbgxend = l_br_prc_x_end;*/ /* brcbgyend = l_br_prc_y_end;*/ cbgwidthexpn = l_pdx; cbgheightexpn = l_pdy; l_res->numbands = 1; } else { tlcbgxstart = opj_int_ceildivpow2(l_tl_prc_x_start, 1); tlcbgystart = opj_int_ceildivpow2(l_tl_prc_y_start, 1); /*brcbgxend = opj_int_ceildivpow2(l_br_prc_x_end, 1);*/ /*brcbgyend = opj_int_ceildivpow2(l_br_prc_y_end, 1);*/ cbgwidthexpn = l_pdx - 1; cbgheightexpn = l_pdy - 1; l_res->numbands = 3; } cblkwidthexpn = opj_uint_min(l_tccp->cblkw, cbgwidthexpn); cblkheightexpn = opj_uint_min(l_tccp->cblkh, cbgheightexpn); l_band = l_res->bands; for (bandno = 0; bandno < l_res->numbands; ++bandno) { OPJ_INT32 numbps; /*fprintf(stderr, "\t\t\tband_no=%d/%d\n", bandno, l_res->numbands );*/ if (resno == 0) { l_band->bandno = 0 ; l_band->x0 = opj_int_ceildivpow2(l_tilec->x0, (OPJ_INT32)l_level_no); l_band->y0 = opj_int_ceildivpow2(l_tilec->y0, (OPJ_INT32)l_level_no); l_band->x1 = opj_int_ceildivpow2(l_tilec->x1, (OPJ_INT32)l_level_no); l_band->y1 = opj_int_ceildivpow2(l_tilec->y1, (OPJ_INT32)l_level_no); } else { l_band->bandno = bandno + 1; /* x0b = 1 if bandno = 1 or 3 */ l_x0b = l_band->bandno&1; /* y0b = 1 if bandno = 2 or 3 */ l_y0b = (OPJ_INT32)((l_band->bandno)>>1); /* l_band border (global) */ l_band->x0 = opj_int_ceildivpow2(l_tilec->x0 - (1 << l_level_no) * l_x0b, (OPJ_INT32)(l_level_no + 1)); l_band->y0 = opj_int_ceildivpow2(l_tilec->y0 - (1 << l_level_no) * l_y0b, (OPJ_INT32)(l_level_no + 1)); l_band->x1 = opj_int_ceildivpow2(l_tilec->x1 - (1 << l_level_no) * l_x0b, (OPJ_INT32)(l_level_no + 1)); l_band->y1 = opj_int_ceildivpow2(l_tilec->y1 - (1 << l_level_no) * l_y0b, (OPJ_INT32)(l_level_no + 1)); } /** avoid an if with storing function pointer */ l_gain = (*l_gain_ptr) (l_band->bandno); numbps = (OPJ_INT32)(l_image_comp->prec + l_gain); l_band->stepsize = (OPJ_FLOAT32)(((1.0 + l_step_size->mant / 2048.0) * pow(2.0, (OPJ_INT32) (numbps - l_step_size->expn)))) * fraction; l_band->numbps = l_step_size->expn + (OPJ_INT32)l_tccp->numgbits - 1; /* WHY -1 ? */ if (! l_band->precincts) { l_band->precincts = (opj_tcd_precinct_t *) opj_malloc( /*3 * */ l_nb_precinct_size); if (! l_band->precincts) { return OPJ_FALSE; } /*fprintf(stderr, "\t\t\t\tAllocate precincts of a band (opj_tcd_precinct_t): %d\n",l_nb_precinct_size); */ memset(l_band->precincts,0,l_nb_precinct_size); l_band->precincts_data_size = l_nb_precinct_size; } else if (l_band->precincts_data_size < l_nb_precinct_size) { opj_tcd_precinct_t * new_precincts = (opj_tcd_precinct_t *) opj_realloc(l_band->precincts,/*3 * */ l_nb_precinct_size); if (! new_precincts) { /* opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to handle band precints\n"); */ fprintf(stderr, "Not enough memory to handle band precints\n"); opj_free(l_band->precincts); l_band->precincts = NULL; l_band->precincts_data_size = 0; return OPJ_FALSE; } l_band->precincts = new_precincts; /*fprintf(stderr, "\t\t\t\tReallocate precincts of a band (opj_tcd_precinct_t): from %d to %d\n",l_band->precincts_data_size, l_nb_precinct_size);*/ memset(((OPJ_BYTE *) l_band->precincts) + l_band->precincts_data_size,0,l_nb_precinct_size - l_band->precincts_data_size); l_band->precincts_data_size = l_nb_precinct_size; } l_current_precinct = l_band->precincts; for (precno = 0; precno < l_nb_precincts; ++precno) { OPJ_INT32 tlcblkxstart, tlcblkystart, brcblkxend, brcblkyend; OPJ_INT32 cbgxstart = tlcbgxstart + (OPJ_INT32)(precno % l_res->pw) * (1 << cbgwidthexpn); OPJ_INT32 cbgystart = tlcbgystart + (OPJ_INT32)(precno / l_res->pw) * (1 << cbgheightexpn); OPJ_INT32 cbgxend = cbgxstart + (1 << cbgwidthexpn); OPJ_INT32 cbgyend = cbgystart + (1 << cbgheightexpn); /*fprintf(stderr, "\t precno=%d; bandno=%d, resno=%d; compno=%d\n", precno, bandno , resno, compno);*/ /*fprintf(stderr, "\t tlcbgxstart(=%d) + (precno(=%d) percent res->pw(=%d)) * (1 << cbgwidthexpn(=%d)) \n",tlcbgxstart,precno,l_res->pw,cbgwidthexpn);*/ /* precinct size (global) */ /*fprintf(stderr, "\t cbgxstart=%d, l_band->x0 = %d \n",cbgxstart, l_band->x0);*/ l_current_precinct->x0 = opj_int_max(cbgxstart, l_band->x0); l_current_precinct->y0 = opj_int_max(cbgystart, l_band->y0); l_current_precinct->x1 = opj_int_min(cbgxend, l_band->x1); l_current_precinct->y1 = opj_int_min(cbgyend, l_band->y1); /*fprintf(stderr, "\t prc_x0=%d; prc_y0=%d, prc_x1=%d; prc_y1=%d\n",l_current_precinct->x0, l_current_precinct->y0 ,l_current_precinct->x1, l_current_precinct->y1);*/ tlcblkxstart = opj_int_floordivpow2(l_current_precinct->x0, (OPJ_INT32)cblkwidthexpn) << cblkwidthexpn; /*fprintf(stderr, "\t tlcblkxstart =%d\n",tlcblkxstart );*/ tlcblkystart = opj_int_floordivpow2(l_current_precinct->y0, (OPJ_INT32)cblkheightexpn) << cblkheightexpn; /*fprintf(stderr, "\t tlcblkystart =%d\n",tlcblkystart );*/ brcblkxend = opj_int_ceildivpow2(l_current_precinct->x1, (OPJ_INT32)cblkwidthexpn) << cblkwidthexpn; /*fprintf(stderr, "\t brcblkxend =%d\n",brcblkxend );*/ brcblkyend = opj_int_ceildivpow2(l_current_precinct->y1, (OPJ_INT32)cblkheightexpn) << cblkheightexpn; /*fprintf(stderr, "\t brcblkyend =%d\n",brcblkyend );*/ l_current_precinct->cw = (OPJ_UINT32)((brcblkxend - tlcblkxstart) >> cblkwidthexpn); l_current_precinct->ch = (OPJ_UINT32)((brcblkyend - tlcblkystart) >> cblkheightexpn); l_nb_code_blocks = l_current_precinct->cw * l_current_precinct->ch; /*fprintf(stderr, "\t\t\t\t precinct_cw = %d x recinct_ch = %d\n",l_current_precinct->cw, l_current_precinct->ch); */ l_nb_code_blocks_size = l_nb_code_blocks * (OPJ_UINT32)sizeof_block; if (! l_current_precinct->cblks.blocks) { l_current_precinct->cblks.blocks = opj_malloc(l_nb_code_blocks_size); if (! l_current_precinct->cblks.blocks ) { return OPJ_FALSE; } /*fprintf(stderr, "\t\t\t\tAllocate cblks of a precinct (opj_tcd_cblk_dec_t): %d\n",l_nb_code_blocks_size);*/ memset(l_current_precinct->cblks.blocks,0,l_nb_code_blocks_size); l_current_precinct->block_size = l_nb_code_blocks_size; } else if (l_nb_code_blocks_size > l_current_precinct->block_size) { void *new_blocks = opj_realloc(l_current_precinct->cblks.blocks, l_nb_code_blocks_size); if (! new_blocks) { opj_free(l_current_precinct->cblks.blocks); l_current_precinct->cblks.blocks = NULL; l_current_precinct->block_size = 0; /* opj_event_msg(p_manager, EVT_ERROR, "Not enough memory for current precinct codeblock element\n"); */ fprintf(stderr, "Not enough memory for current precinct codeblock element\n"); return OPJ_FALSE; } l_current_precinct->cblks.blocks = new_blocks; /*fprintf(stderr, "\t\t\t\tReallocate cblks of a precinct (opj_tcd_cblk_dec_t): from %d to %d\n",l_current_precinct->block_size, l_nb_code_blocks_size); */ memset(((OPJ_BYTE *) l_current_precinct->cblks.blocks) + l_current_precinct->block_size ,0 ,l_nb_code_blocks_size - l_current_precinct->block_size); l_current_precinct->block_size = l_nb_code_blocks_size; } if (! l_current_precinct->incltree) { l_current_precinct->incltree = opj_tgt_create(l_current_precinct->cw, l_current_precinct->ch); } else{ l_current_precinct->incltree = opj_tgt_init(l_current_precinct->incltree, l_current_precinct->cw, l_current_precinct->ch); } if (! l_current_precinct->incltree) { fprintf(stderr, "WARNING: No incltree created.\n"); /*return OPJ_FALSE;*/ } if (! l_current_precinct->imsbtree) { l_current_precinct->imsbtree = opj_tgt_create( l_current_precinct->cw, l_current_precinct->ch); } else { l_current_precinct->imsbtree = opj_tgt_init( l_current_precinct->imsbtree, l_current_precinct->cw, l_current_precinct->ch); } if (! l_current_precinct->imsbtree) { fprintf(stderr, "WARNING: No imsbtree created.\n"); /*return OPJ_FALSE;*/ } for (cblkno = 0; cblkno < l_nb_code_blocks; ++cblkno) { OPJ_INT32 cblkxstart = tlcblkxstart + (OPJ_INT32)(cblkno % l_current_precinct->cw) * (1 << cblkwidthexpn); OPJ_INT32 cblkystart = tlcblkystart + (OPJ_INT32)(cblkno / l_current_precinct->cw) * (1 << cblkheightexpn); OPJ_INT32 cblkxend = cblkxstart + (1 << cblkwidthexpn); OPJ_INT32 cblkyend = cblkystart + (1 << cblkheightexpn); if (isEncoder) { opj_tcd_cblk_enc_t* l_code_block = l_current_precinct->cblks.enc + cblkno; if (! opj_tcd_code_block_enc_allocate(l_code_block)) { return OPJ_FALSE; } /* code-block size (global) */ l_code_block->x0 = opj_int_max(cblkxstart, l_current_precinct->x0); l_code_block->y0 = opj_int_max(cblkystart, l_current_precinct->y0); l_code_block->x1 = opj_int_min(cblkxend, l_current_precinct->x1); l_code_block->y1 = opj_int_min(cblkyend, l_current_precinct->y1); if (! opj_tcd_code_block_enc_allocate_data(l_code_block)) { return OPJ_FALSE; } } else { opj_tcd_cblk_dec_t* l_code_block = l_current_precinct->cblks.dec + cblkno; if (! opj_tcd_code_block_dec_allocate(l_code_block)) { return OPJ_FALSE; } /* code-block size (global) */ l_code_block->x0 = opj_int_max(cblkxstart, l_current_precinct->x0); l_code_block->y0 = opj_int_max(cblkystart, l_current_precinct->y0); l_code_block->x1 = opj_int_min(cblkxend, l_current_precinct->x1); l_code_block->y1 = opj_int_min(cblkyend, l_current_precinct->y1); } } ++l_current_precinct; } /* precno */ ++l_band; ++l_step_size; } /* bandno */ ++l_res; --l_level_no; } /* resno */ ++l_tccp; ++l_tilec; ++l_image_comp; } /* compno */ return OPJ_TRUE; }
{'added': [(704, '\tl_tile->x0 = (OPJ_INT32)opj_uint_max(l_cp->tx0 + p * l_cp->tdx, l_image->x0);'), (705, '\tl_tile->y0 = (OPJ_INT32)opj_uint_max(l_cp->ty0 + q * l_cp->tdy, l_image->y0);'), (706, '\tl_tile->x1 = (OPJ_INT32)opj_uint_min(l_cp->tx0 + (p + 1) * l_cp->tdx, l_image->x1);'), (707, '\tl_tile->y1 = (OPJ_INT32)opj_uint_min(l_cp->ty0 + (q + 1) * l_cp->tdy, l_image->y1);'), (708, '')], 'deleted': [(704, '\tl_tile->x0 = opj_int_max((OPJ_INT32)(l_cp->tx0 + p * l_cp->tdx), (OPJ_INT32)l_image->x0);'), (705, '\tl_tile->y0 = opj_int_max((OPJ_INT32)(l_cp->ty0 + q * l_cp->tdy), (OPJ_INT32)l_image->y0);'), (706, '\tl_tile->x1 = opj_int_min((OPJ_INT32)(l_cp->tx0 + (p + 1) * l_cp->tdx), (OPJ_INT32)l_image->x1);'), (707, '\tl_tile->y1 = opj_int_min((OPJ_INT32)(l_cp->ty0 + (q + 1) * l_cp->tdy), (OPJ_INT32)l_image->y1);')]}
5
4
1,610
11,503
https://github.com/uclouvain/openjpeg
CVE-2018-20847
['CWE-190']
heap_2.c
pvPortMalloc
/* * FreeRTOS Kernel V10.4.2 * Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a copy of * this software and associated documentation files (the "Software"), to deal in * the Software without restriction, including without limitation the rights to * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of * the Software, and to permit persons to whom the Software is furnished to do so, * subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * * https://www.FreeRTOS.org * https://github.com/FreeRTOS * * 1 tab == 4 spaces! */ /* * A sample implementation of pvPortMalloc() and vPortFree() that permits * allocated blocks to be freed, but does not combine adjacent free blocks * into a single larger block (and so will fragment memory). See heap_4.c for * an equivalent that does combine adjacent blocks into single larger blocks. * * See heap_1.c, heap_3.c and heap_4.c for alternative implementations, and the * memory management pages of https://www.FreeRTOS.org for more information. */ #include <stdlib.h> /* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining * all the API functions to use the MPU wrappers. That should only be done when * task.h is included from an application file. */ #define MPU_WRAPPERS_INCLUDED_FROM_API_FILE #include "FreeRTOS.h" #include "task.h" #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE #if ( configSUPPORT_DYNAMIC_ALLOCATION == 0 ) #error This file must not be used if configSUPPORT_DYNAMIC_ALLOCATION is 0 #endif /* A few bytes might be lost to byte aligning the heap start address. */ #define configADJUSTED_HEAP_SIZE ( configTOTAL_HEAP_SIZE - portBYTE_ALIGNMENT ) /* * Initialises the heap structures before their first use. */ static void prvHeapInit( void ); /* Allocate the memory for the heap. */ #if ( configAPPLICATION_ALLOCATED_HEAP == 1 ) /* The application writer has already defined the array used for the RTOS * heap - probably so it can be placed in a special segment or address. */ extern uint8_t ucHeap[ configTOTAL_HEAP_SIZE ]; #else static uint8_t ucHeap[ configTOTAL_HEAP_SIZE ]; #endif /* configAPPLICATION_ALLOCATED_HEAP */ /* Define the linked list structure. This is used to link free blocks in order * of their size. */ typedef struct A_BLOCK_LINK { struct A_BLOCK_LINK * pxNextFreeBlock; /*<< The next free block in the list. */ size_t xBlockSize; /*<< The size of the free block. */ } BlockLink_t; static const uint16_t heapSTRUCT_SIZE = ( ( sizeof( BlockLink_t ) + ( portBYTE_ALIGNMENT - 1 ) ) & ~portBYTE_ALIGNMENT_MASK ); #define heapMINIMUM_BLOCK_SIZE ( ( size_t ) ( heapSTRUCT_SIZE * 2 ) ) /* Create a couple of list links to mark the start and end of the list. */ static BlockLink_t xStart, xEnd; /* Keeps track of the number of free bytes remaining, but says nothing about * fragmentation. */ static size_t xFreeBytesRemaining = configADJUSTED_HEAP_SIZE; /* STATIC FUNCTIONS ARE DEFINED AS MACROS TO MINIMIZE THE FUNCTION CALL DEPTH. */ /* * Insert a block into the list of free blocks - which is ordered by size of * the block. Small blocks at the start of the list and large blocks at the end * of the list. */ #define prvInsertBlockIntoFreeList( pxBlockToInsert ) \ { \ BlockLink_t * pxIterator; \ size_t xBlockSize; \ \ xBlockSize = pxBlockToInsert->xBlockSize; \ \ /* Iterate through the list until a block is found that has a larger size */ \ /* than the block we are inserting. */ \ for( pxIterator = &xStart; pxIterator->pxNextFreeBlock->xBlockSize < xBlockSize; pxIterator = pxIterator->pxNextFreeBlock ) \ { \ /* There is nothing to do here - just iterate to the correct position. */ \ } \ \ /* Update the list to include the block being inserted in the correct */ \ /* position. */ \ pxBlockToInsert->pxNextFreeBlock = pxIterator->pxNextFreeBlock; \ pxIterator->pxNextFreeBlock = pxBlockToInsert; \ } /*-----------------------------------------------------------*/ void * pvPortMalloc( size_t xWantedSize ) { BlockLink_t * pxBlock, * pxPreviousBlock, * pxNewBlockLink; static BaseType_t xHeapHasBeenInitialised = pdFALSE; void * pvReturn = NULL; vTaskSuspendAll(); { /* If this is the first call to malloc then the heap will require * initialisation to setup the list of free blocks. */ if( xHeapHasBeenInitialised == pdFALSE ) { prvHeapInit(); xHeapHasBeenInitialised = pdTRUE; } /* The wanted size is increased so it can contain a BlockLink_t * structure in addition to the requested amount of bytes. */ if( xWantedSize > 0 ) { xWantedSize += heapSTRUCT_SIZE; /* Ensure that blocks are always aligned to the required number of bytes. */ if( ( xWantedSize & portBYTE_ALIGNMENT_MASK ) != 0 ) { /* Byte alignment required. */ xWantedSize += ( portBYTE_ALIGNMENT - ( xWantedSize & portBYTE_ALIGNMENT_MASK ) ); } } if( ( xWantedSize > 0 ) && ( xWantedSize < configADJUSTED_HEAP_SIZE ) ) { /* Blocks are stored in byte order - traverse the list from the start * (smallest) block until one of adequate size is found. */ pxPreviousBlock = &xStart; pxBlock = xStart.pxNextFreeBlock; while( ( pxBlock->xBlockSize < xWantedSize ) && ( pxBlock->pxNextFreeBlock != NULL ) ) { pxPreviousBlock = pxBlock; pxBlock = pxBlock->pxNextFreeBlock; } /* If we found the end marker then a block of adequate size was not found. */ if( pxBlock != &xEnd ) { /* Return the memory space - jumping over the BlockLink_t structure * at its start. */ pvReturn = ( void * ) ( ( ( uint8_t * ) pxPreviousBlock->pxNextFreeBlock ) + heapSTRUCT_SIZE ); /* This block is being returned for use so must be taken out of the * list of free blocks. */ pxPreviousBlock->pxNextFreeBlock = pxBlock->pxNextFreeBlock; /* If the block is larger than required it can be split into two. */ if( ( pxBlock->xBlockSize - xWantedSize ) > heapMINIMUM_BLOCK_SIZE ) { /* This block is to be split into two. Create a new block * following the number of bytes requested. The void cast is * used to prevent byte alignment warnings from the compiler. */ pxNewBlockLink = ( void * ) ( ( ( uint8_t * ) pxBlock ) + xWantedSize ); /* Calculate the sizes of two blocks split from the single * block. */ pxNewBlockLink->xBlockSize = pxBlock->xBlockSize - xWantedSize; pxBlock->xBlockSize = xWantedSize; /* Insert the new block into the list of free blocks. */ prvInsertBlockIntoFreeList( ( pxNewBlockLink ) ); } xFreeBytesRemaining -= pxBlock->xBlockSize; } } traceMALLOC( pvReturn, xWantedSize ); } ( void ) xTaskResumeAll(); #if ( configUSE_MALLOC_FAILED_HOOK == 1 ) { if( pvReturn == NULL ) { extern void vApplicationMallocFailedHook( void ); vApplicationMallocFailedHook(); } } #endif return pvReturn; } /*-----------------------------------------------------------*/ void vPortFree( void * pv ) { uint8_t * puc = ( uint8_t * ) pv; BlockLink_t * pxLink; if( pv != NULL ) { /* The memory being freed will have an BlockLink_t structure immediately * before it. */ puc -= heapSTRUCT_SIZE; /* This unexpected casting is to keep some compilers from issuing * byte alignment warnings. */ pxLink = ( void * ) puc; vTaskSuspendAll(); { /* Add this block to the list of free blocks. */ prvInsertBlockIntoFreeList( ( ( BlockLink_t * ) pxLink ) ); xFreeBytesRemaining += pxLink->xBlockSize; traceFREE( pv, pxLink->xBlockSize ); } ( void ) xTaskResumeAll(); } } /*-----------------------------------------------------------*/ size_t xPortGetFreeHeapSize( void ) { return xFreeBytesRemaining; } /*-----------------------------------------------------------*/ void vPortInitialiseBlocks( void ) { /* This just exists to keep the linker quiet. */ } /*-----------------------------------------------------------*/ static void prvHeapInit( void ) { BlockLink_t * pxFirstFreeBlock; uint8_t * pucAlignedHeap; /* Ensure the heap starts on a correctly aligned boundary. */ pucAlignedHeap = ( uint8_t * ) ( ( ( portPOINTER_SIZE_TYPE ) & ucHeap[ portBYTE_ALIGNMENT ] ) & ( ~( ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) ) ); /* xStart is used to hold a pointer to the first item in the list of free * blocks. The void cast is used to prevent compiler warnings. */ xStart.pxNextFreeBlock = ( void * ) pucAlignedHeap; xStart.xBlockSize = ( size_t ) 0; /* xEnd is used to mark the end of the list of free blocks. */ xEnd.xBlockSize = configADJUSTED_HEAP_SIZE; xEnd.pxNextFreeBlock = NULL; /* To start with there is a single free block that is sized to take up the * entire heap space. */ pxFirstFreeBlock = ( void * ) pucAlignedHeap; pxFirstFreeBlock->xBlockSize = configADJUSTED_HEAP_SIZE; pxFirstFreeBlock->pxNextFreeBlock = &xEnd; } /*-----------------------------------------------------------*/
/* * FreeRTOS Kernel V10.4.2 * Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a copy of * this software and associated documentation files (the "Software"), to deal in * the Software without restriction, including without limitation the rights to * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of * the Software, and to permit persons to whom the Software is furnished to do so, * subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * * https://www.FreeRTOS.org * https://github.com/FreeRTOS * */ /* * A sample implementation of pvPortMalloc() and vPortFree() that permits * allocated blocks to be freed, but does not combine adjacent free blocks * into a single larger block (and so will fragment memory). See heap_4.c for * an equivalent that does combine adjacent blocks into single larger blocks. * * See heap_1.c, heap_3.c and heap_4.c for alternative implementations, and the * memory management pages of https://www.FreeRTOS.org for more information. */ #include <stdlib.h> /* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining * all the API functions to use the MPU wrappers. That should only be done when * task.h is included from an application file. */ #define MPU_WRAPPERS_INCLUDED_FROM_API_FILE #include "FreeRTOS.h" #include "task.h" #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE #if ( configSUPPORT_DYNAMIC_ALLOCATION == 0 ) #error This file must not be used if configSUPPORT_DYNAMIC_ALLOCATION is 0 #endif /* A few bytes might be lost to byte aligning the heap start address. */ #define configADJUSTED_HEAP_SIZE ( configTOTAL_HEAP_SIZE - portBYTE_ALIGNMENT ) /* * Initialises the heap structures before their first use. */ static void prvHeapInit( void ); /* Allocate the memory for the heap. */ #if ( configAPPLICATION_ALLOCATED_HEAP == 1 ) /* The application writer has already defined the array used for the RTOS * heap - probably so it can be placed in a special segment or address. */ extern uint8_t ucHeap[ configTOTAL_HEAP_SIZE ]; #else static uint8_t ucHeap[ configTOTAL_HEAP_SIZE ]; #endif /* configAPPLICATION_ALLOCATED_HEAP */ /* Define the linked list structure. This is used to link free blocks in order * of their size. */ typedef struct A_BLOCK_LINK { struct A_BLOCK_LINK * pxNextFreeBlock; /*<< The next free block in the list. */ size_t xBlockSize; /*<< The size of the free block. */ } BlockLink_t; static const uint16_t heapSTRUCT_SIZE = ( ( sizeof( BlockLink_t ) + ( portBYTE_ALIGNMENT - 1 ) ) & ~portBYTE_ALIGNMENT_MASK ); #define heapMINIMUM_BLOCK_SIZE ( ( size_t ) ( heapSTRUCT_SIZE * 2 ) ) /* Create a couple of list links to mark the start and end of the list. */ static BlockLink_t xStart, xEnd; /* Keeps track of the number of free bytes remaining, but says nothing about * fragmentation. */ static size_t xFreeBytesRemaining = configADJUSTED_HEAP_SIZE; /* STATIC FUNCTIONS ARE DEFINED AS MACROS TO MINIMIZE THE FUNCTION CALL DEPTH. */ /* * Insert a block into the list of free blocks - which is ordered by size of * the block. Small blocks at the start of the list and large blocks at the end * of the list. */ #define prvInsertBlockIntoFreeList( pxBlockToInsert ) \ { \ BlockLink_t * pxIterator; \ size_t xBlockSize; \ \ xBlockSize = pxBlockToInsert->xBlockSize; \ \ /* Iterate through the list until a block is found that has a larger size */ \ /* than the block we are inserting. */ \ for( pxIterator = &xStart; pxIterator->pxNextFreeBlock->xBlockSize < xBlockSize; pxIterator = pxIterator->pxNextFreeBlock ) \ { \ /* There is nothing to do here - just iterate to the correct position. */ \ } \ \ /* Update the list to include the block being inserted in the correct */ \ /* position. */ \ pxBlockToInsert->pxNextFreeBlock = pxIterator->pxNextFreeBlock; \ pxIterator->pxNextFreeBlock = pxBlockToInsert; \ } /*-----------------------------------------------------------*/ void * pvPortMalloc( size_t xWantedSize ) { BlockLink_t * pxBlock, * pxPreviousBlock, * pxNewBlockLink; static BaseType_t xHeapHasBeenInitialised = pdFALSE; void * pvReturn = NULL; vTaskSuspendAll(); { /* If this is the first call to malloc then the heap will require * initialisation to setup the list of free blocks. */ if( xHeapHasBeenInitialised == pdFALSE ) { prvHeapInit(); xHeapHasBeenInitialised = pdTRUE; } /* The wanted size must be increased so it can contain a BlockLink_t * structure in addition to the requested amount of bytes. */ if( ( xWantedSize > 0 ) && ( ( xWantedSize + heapSTRUCT_SIZE ) > xWantedSize ) ) /* Overflow check */ { xWantedSize += heapSTRUCT_SIZE; /* Byte alignment required. Check for overflow. */ if( ( xWantedSize + ( portBYTE_ALIGNMENT - ( xWantedSize & portBYTE_ALIGNMENT_MASK ) ) ) > xWantedSize ) { xWantedSize += ( portBYTE_ALIGNMENT - ( xWantedSize & portBYTE_ALIGNMENT_MASK ) ); configASSERT( ( xWantedSize & portBYTE_ALIGNMENT_MASK ) == 0 ); } else { xWantedSize = 0; } } else { xWantedSize = 0; } if( ( xWantedSize > 0 ) && ( xWantedSize <= xFreeBytesRemaining ) ) { /* Blocks are stored in byte order - traverse the list from the start * (smallest) block until one of adequate size is found. */ pxPreviousBlock = &xStart; pxBlock = xStart.pxNextFreeBlock; while( ( pxBlock->xBlockSize < xWantedSize ) && ( pxBlock->pxNextFreeBlock != NULL ) ) { pxPreviousBlock = pxBlock; pxBlock = pxBlock->pxNextFreeBlock; } /* If we found the end marker then a block of adequate size was not found. */ if( pxBlock != &xEnd ) { /* Return the memory space - jumping over the BlockLink_t structure * at its start. */ pvReturn = ( void * ) ( ( ( uint8_t * ) pxPreviousBlock->pxNextFreeBlock ) + heapSTRUCT_SIZE ); /* This block is being returned for use so must be taken out of the * list of free blocks. */ pxPreviousBlock->pxNextFreeBlock = pxBlock->pxNextFreeBlock; /* If the block is larger than required it can be split into two. */ if( ( pxBlock->xBlockSize - xWantedSize ) > heapMINIMUM_BLOCK_SIZE ) { /* This block is to be split into two. Create a new block * following the number of bytes requested. The void cast is * used to prevent byte alignment warnings from the compiler. */ pxNewBlockLink = ( void * ) ( ( ( uint8_t * ) pxBlock ) + xWantedSize ); /* Calculate the sizes of two blocks split from the single * block. */ pxNewBlockLink->xBlockSize = pxBlock->xBlockSize - xWantedSize; pxBlock->xBlockSize = xWantedSize; /* Insert the new block into the list of free blocks. */ prvInsertBlockIntoFreeList( ( pxNewBlockLink ) ); } xFreeBytesRemaining -= pxBlock->xBlockSize; } } traceMALLOC( pvReturn, xWantedSize ); } ( void ) xTaskResumeAll(); #if ( configUSE_MALLOC_FAILED_HOOK == 1 ) { if( pvReturn == NULL ) { extern void vApplicationMallocFailedHook( void ); vApplicationMallocFailedHook(); } } #endif return pvReturn; } /*-----------------------------------------------------------*/ void vPortFree( void * pv ) { uint8_t * puc = ( uint8_t * ) pv; BlockLink_t * pxLink; if( pv != NULL ) { /* The memory being freed will have an BlockLink_t structure immediately * before it. */ puc -= heapSTRUCT_SIZE; /* This unexpected casting is to keep some compilers from issuing * byte alignment warnings. */ pxLink = ( void * ) puc; vTaskSuspendAll(); { /* Add this block to the list of free blocks. */ prvInsertBlockIntoFreeList( ( ( BlockLink_t * ) pxLink ) ); xFreeBytesRemaining += pxLink->xBlockSize; traceFREE( pv, pxLink->xBlockSize ); } ( void ) xTaskResumeAll(); } } /*-----------------------------------------------------------*/ size_t xPortGetFreeHeapSize( void ) { return xFreeBytesRemaining; } /*-----------------------------------------------------------*/ void vPortInitialiseBlocks( void ) { /* This just exists to keep the linker quiet. */ } /*-----------------------------------------------------------*/ static void prvHeapInit( void ) { BlockLink_t * pxFirstFreeBlock; uint8_t * pucAlignedHeap; /* Ensure the heap starts on a correctly aligned boundary. */ pucAlignedHeap = ( uint8_t * ) ( ( ( portPOINTER_SIZE_TYPE ) & ucHeap[ portBYTE_ALIGNMENT ] ) & ( ~( ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) ) ); /* xStart is used to hold a pointer to the first item in the list of free * blocks. The void cast is used to prevent compiler warnings. */ xStart.pxNextFreeBlock = ( void * ) pucAlignedHeap; xStart.xBlockSize = ( size_t ) 0; /* xEnd is used to mark the end of the list of free blocks. */ xEnd.xBlockSize = configADJUSTED_HEAP_SIZE; xEnd.pxNextFreeBlock = NULL; /* To start with there is a single free block that is sized to take up the * entire heap space. */ pxFirstFreeBlock = ( void * ) pucAlignedHeap; pxFirstFreeBlock->xBlockSize = configADJUSTED_HEAP_SIZE; pxFirstFreeBlock->pxNextFreeBlock = &xEnd; } /*-----------------------------------------------------------*/
void * pvPortMalloc( size_t xWantedSize ) { BlockLink_t * pxBlock, * pxPreviousBlock, * pxNewBlockLink; static BaseType_t xHeapHasBeenInitialised = pdFALSE; void * pvReturn = NULL; vTaskSuspendAll(); { /* If this is the first call to malloc then the heap will require * initialisation to setup the list of free blocks. */ if( xHeapHasBeenInitialised == pdFALSE ) { prvHeapInit(); xHeapHasBeenInitialised = pdTRUE; } /* The wanted size is increased so it can contain a BlockLink_t * structure in addition to the requested amount of bytes. */ if( xWantedSize > 0 ) { xWantedSize += heapSTRUCT_SIZE; /* Ensure that blocks are always aligned to the required number of bytes. */ if( ( xWantedSize & portBYTE_ALIGNMENT_MASK ) != 0 ) { /* Byte alignment required. */ xWantedSize += ( portBYTE_ALIGNMENT - ( xWantedSize & portBYTE_ALIGNMENT_MASK ) ); } } if( ( xWantedSize > 0 ) && ( xWantedSize < configADJUSTED_HEAP_SIZE ) ) { /* Blocks are stored in byte order - traverse the list from the start * (smallest) block until one of adequate size is found. */ pxPreviousBlock = &xStart; pxBlock = xStart.pxNextFreeBlock; while( ( pxBlock->xBlockSize < xWantedSize ) && ( pxBlock->pxNextFreeBlock != NULL ) ) { pxPreviousBlock = pxBlock; pxBlock = pxBlock->pxNextFreeBlock; } /* If we found the end marker then a block of adequate size was not found. */ if( pxBlock != &xEnd ) { /* Return the memory space - jumping over the BlockLink_t structure * at its start. */ pvReturn = ( void * ) ( ( ( uint8_t * ) pxPreviousBlock->pxNextFreeBlock ) + heapSTRUCT_SIZE ); /* This block is being returned for use so must be taken out of the * list of free blocks. */ pxPreviousBlock->pxNextFreeBlock = pxBlock->pxNextFreeBlock; /* If the block is larger than required it can be split into two. */ if( ( pxBlock->xBlockSize - xWantedSize ) > heapMINIMUM_BLOCK_SIZE ) { /* This block is to be split into two. Create a new block * following the number of bytes requested. The void cast is * used to prevent byte alignment warnings from the compiler. */ pxNewBlockLink = ( void * ) ( ( ( uint8_t * ) pxBlock ) + xWantedSize ); /* Calculate the sizes of two blocks split from the single * block. */ pxNewBlockLink->xBlockSize = pxBlock->xBlockSize - xWantedSize; pxBlock->xBlockSize = xWantedSize; /* Insert the new block into the list of free blocks. */ prvInsertBlockIntoFreeList( ( pxNewBlockLink ) ); } xFreeBytesRemaining -= pxBlock->xBlockSize; } } traceMALLOC( pvReturn, xWantedSize ); } ( void ) xTaskResumeAll(); #if ( configUSE_MALLOC_FAILED_HOOK == 1 ) { if( pvReturn == NULL ) { extern void vApplicationMallocFailedHook( void ); vApplicationMallocFailedHook(); } } #endif return pvReturn; }
void * pvPortMalloc( size_t xWantedSize ) { BlockLink_t * pxBlock, * pxPreviousBlock, * pxNewBlockLink; static BaseType_t xHeapHasBeenInitialised = pdFALSE; void * pvReturn = NULL; vTaskSuspendAll(); { /* If this is the first call to malloc then the heap will require * initialisation to setup the list of free blocks. */ if( xHeapHasBeenInitialised == pdFALSE ) { prvHeapInit(); xHeapHasBeenInitialised = pdTRUE; } /* The wanted size must be increased so it can contain a BlockLink_t * structure in addition to the requested amount of bytes. */ if( ( xWantedSize > 0 ) && ( ( xWantedSize + heapSTRUCT_SIZE ) > xWantedSize ) ) /* Overflow check */ { xWantedSize += heapSTRUCT_SIZE; /* Byte alignment required. Check for overflow. */ if( ( xWantedSize + ( portBYTE_ALIGNMENT - ( xWantedSize & portBYTE_ALIGNMENT_MASK ) ) ) > xWantedSize ) { xWantedSize += ( portBYTE_ALIGNMENT - ( xWantedSize & portBYTE_ALIGNMENT_MASK ) ); configASSERT( ( xWantedSize & portBYTE_ALIGNMENT_MASK ) == 0 ); } else { xWantedSize = 0; } } else { xWantedSize = 0; } if( ( xWantedSize > 0 ) && ( xWantedSize <= xFreeBytesRemaining ) ) { /* Blocks are stored in byte order - traverse the list from the start * (smallest) block until one of adequate size is found. */ pxPreviousBlock = &xStart; pxBlock = xStart.pxNextFreeBlock; while( ( pxBlock->xBlockSize < xWantedSize ) && ( pxBlock->pxNextFreeBlock != NULL ) ) { pxPreviousBlock = pxBlock; pxBlock = pxBlock->pxNextFreeBlock; } /* If we found the end marker then a block of adequate size was not found. */ if( pxBlock != &xEnd ) { /* Return the memory space - jumping over the BlockLink_t structure * at its start. */ pvReturn = ( void * ) ( ( ( uint8_t * ) pxPreviousBlock->pxNextFreeBlock ) + heapSTRUCT_SIZE ); /* This block is being returned for use so must be taken out of the * list of free blocks. */ pxPreviousBlock->pxNextFreeBlock = pxBlock->pxNextFreeBlock; /* If the block is larger than required it can be split into two. */ if( ( pxBlock->xBlockSize - xWantedSize ) > heapMINIMUM_BLOCK_SIZE ) { /* This block is to be split into two. Create a new block * following the number of bytes requested. The void cast is * used to prevent byte alignment warnings from the compiler. */ pxNewBlockLink = ( void * ) ( ( ( uint8_t * ) pxBlock ) + xWantedSize ); /* Calculate the sizes of two blocks split from the single * block. */ pxNewBlockLink->xBlockSize = pxBlock->xBlockSize - xWantedSize; pxBlock->xBlockSize = xWantedSize; /* Insert the new block into the list of free blocks. */ prvInsertBlockIntoFreeList( ( pxNewBlockLink ) ); } xFreeBytesRemaining -= pxBlock->xBlockSize; } } traceMALLOC( pvReturn, xWantedSize ); } ( void ) xTaskResumeAll(); #if ( configUSE_MALLOC_FAILED_HOOK == 1 ) { if( pvReturn == NULL ) { extern void vApplicationMallocFailedHook( void ); vApplicationMallocFailedHook(); } } #endif return pvReturn; }
{'added': [(134, ' /* The wanted size must be increased so it can contain a BlockLink_t'), (136, ' if( ( xWantedSize > 0 ) &&'), (137, ' ( ( xWantedSize + heapSTRUCT_SIZE ) > xWantedSize ) ) /* Overflow check */'), (141, ' /* Byte alignment required. Check for overflow. */'), (142, ' if( ( xWantedSize + ( portBYTE_ALIGNMENT - ( xWantedSize & portBYTE_ALIGNMENT_MASK ) ) )'), (143, ' > xWantedSize )'), (146, ' configASSERT( ( xWantedSize & portBYTE_ALIGNMENT_MASK ) == 0 );'), (148, ' else'), (149, ' {'), (150, ' xWantedSize = 0;'), (151, ' }'), (152, ' }'), (153, ' else'), (154, ' {'), (155, ' xWantedSize = 0;'), (158, ''), (159, ' if( ( xWantedSize > 0 ) && ( xWantedSize <= xFreeBytesRemaining ) )')], 'deleted': [(25, ' * 1 tab == 4 spaces!'), (135, ' /* The wanted size is increased so it can contain a BlockLink_t'), (137, ' if( xWantedSize > 0 )'), (141, ' /* Ensure that blocks are always aligned to the required number of bytes. */'), (142, ' if( ( xWantedSize & portBYTE_ALIGNMENT_MASK ) != 0 )'), (144, ' /* Byte alignment required. */'), (149, ' if( ( xWantedSize > 0 ) && ( xWantedSize < configADJUSTED_HEAP_SIZE ) )')]}
17
7
135
667
https://github.com/FreeRTOS/FreeRTOS-Kernel
CVE-2021-32020
['CWE-119']
key.c
key_create_or_update
/* Basic authentication token and access key management * * Copyright (C) 2004-2008 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/module.h> #include <linux/init.h> #include <linux/poison.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/security.h> #include <linux/workqueue.h> #include <linux/random.h> #include <linux/err.h> #include "internal.h" struct kmem_cache *key_jar; struct rb_root key_serial_tree; /* tree of keys indexed by serial */ DEFINE_SPINLOCK(key_serial_lock); struct rb_root key_user_tree; /* tree of quota records indexed by UID */ DEFINE_SPINLOCK(key_user_lock); unsigned int key_quota_root_maxkeys = 200; /* root's key count quota */ unsigned int key_quota_root_maxbytes = 20000; /* root's key space quota */ unsigned int key_quota_maxkeys = 200; /* general key count quota */ unsigned int key_quota_maxbytes = 20000; /* general key space quota */ static LIST_HEAD(key_types_list); static DECLARE_RWSEM(key_types_sem); /* We serialise key instantiation and link */ DEFINE_MUTEX(key_construction_mutex); #ifdef KEY_DEBUGGING void __key_check(const struct key *key) { printk("__key_check: key %p {%08x} should be {%08x}\n", key, key->magic, KEY_DEBUG_MAGIC); BUG(); } #endif /* * Get the key quota record for a user, allocating a new record if one doesn't * already exist. */ struct key_user *key_user_lookup(kuid_t uid) { struct key_user *candidate = NULL, *user; struct rb_node *parent = NULL; struct rb_node **p; try_again: p = &key_user_tree.rb_node; spin_lock(&key_user_lock); /* search the tree for a user record with a matching UID */ while (*p) { parent = *p; user = rb_entry(parent, struct key_user, node); if (uid_lt(uid, user->uid)) p = &(*p)->rb_left; else if (uid_gt(uid, user->uid)) p = &(*p)->rb_right; else goto found; } /* if we get here, we failed to find a match in the tree */ if (!candidate) { /* allocate a candidate user record if we don't already have * one */ spin_unlock(&key_user_lock); user = NULL; candidate = kmalloc(sizeof(struct key_user), GFP_KERNEL); if (unlikely(!candidate)) goto out; /* the allocation may have scheduled, so we need to repeat the * search lest someone else added the record whilst we were * asleep */ goto try_again; } /* if we get here, then the user record still hadn't appeared on the * second pass - so we use the candidate record */ atomic_set(&candidate->usage, 1); atomic_set(&candidate->nkeys, 0); atomic_set(&candidate->nikeys, 0); candidate->uid = uid; candidate->qnkeys = 0; candidate->qnbytes = 0; spin_lock_init(&candidate->lock); mutex_init(&candidate->cons_lock); rb_link_node(&candidate->node, parent, p); rb_insert_color(&candidate->node, &key_user_tree); spin_unlock(&key_user_lock); user = candidate; goto out; /* okay - we found a user record for this UID */ found: atomic_inc(&user->usage); spin_unlock(&key_user_lock); kfree(candidate); out: return user; } /* * Dispose of a user structure */ void key_user_put(struct key_user *user) { if (atomic_dec_and_lock(&user->usage, &key_user_lock)) { rb_erase(&user->node, &key_user_tree); spin_unlock(&key_user_lock); kfree(user); } } /* * Allocate a serial number for a key. These are assigned randomly to avoid * security issues through covert channel problems. */ static inline void key_alloc_serial(struct key *key) { struct rb_node *parent, **p; struct key *xkey; /* propose a random serial number and look for a hole for it in the * serial number tree */ do { get_random_bytes(&key->serial, sizeof(key->serial)); key->serial >>= 1; /* negative numbers are not permitted */ } while (key->serial < 3); spin_lock(&key_serial_lock); attempt_insertion: parent = NULL; p = &key_serial_tree.rb_node; while (*p) { parent = *p; xkey = rb_entry(parent, struct key, serial_node); if (key->serial < xkey->serial) p = &(*p)->rb_left; else if (key->serial > xkey->serial) p = &(*p)->rb_right; else goto serial_exists; } /* we've found a suitable hole - arrange for this key to occupy it */ rb_link_node(&key->serial_node, parent, p); rb_insert_color(&key->serial_node, &key_serial_tree); spin_unlock(&key_serial_lock); return; /* we found a key with the proposed serial number - walk the tree from * that point looking for the next unused serial number */ serial_exists: for (;;) { key->serial++; if (key->serial < 3) { key->serial = 3; goto attempt_insertion; } parent = rb_next(parent); if (!parent) goto attempt_insertion; xkey = rb_entry(parent, struct key, serial_node); if (key->serial < xkey->serial) goto attempt_insertion; } } /** * key_alloc - Allocate a key of the specified type. * @type: The type of key to allocate. * @desc: The key description to allow the key to be searched out. * @uid: The owner of the new key. * @gid: The group ID for the new key's group permissions. * @cred: The credentials specifying UID namespace. * @perm: The permissions mask of the new key. * @flags: Flags specifying quota properties. * * Allocate a key of the specified type with the attributes given. The key is * returned in an uninstantiated state and the caller needs to instantiate the * key before returning. * * The user's key count quota is updated to reflect the creation of the key and * the user's key data quota has the default for the key type reserved. The * instantiation function should amend this as necessary. If insufficient * quota is available, -EDQUOT will be returned. * * The LSM security modules can prevent a key being created, in which case * -EACCES will be returned. * * Returns a pointer to the new key if successful and an error code otherwise. * * Note that the caller needs to ensure the key type isn't uninstantiated. * Internally this can be done by locking key_types_sem. Externally, this can * be done by either never unregistering the key type, or making sure * key_alloc() calls don't race with module unloading. */ struct key *key_alloc(struct key_type *type, const char *desc, kuid_t uid, kgid_t gid, const struct cred *cred, key_perm_t perm, unsigned long flags) { struct key_user *user = NULL; struct key *key; size_t desclen, quotalen; int ret; key = ERR_PTR(-EINVAL); if (!desc || !*desc) goto error; if (type->vet_description) { ret = type->vet_description(desc); if (ret < 0) { key = ERR_PTR(ret); goto error; } } desclen = strlen(desc); quotalen = desclen + 1 + type->def_datalen; /* get hold of the key tracking for this user */ user = key_user_lookup(uid); if (!user) goto no_memory_1; /* check that the user's quota permits allocation of another key and * its description */ if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) { unsigned maxkeys = uid_eq(uid, GLOBAL_ROOT_UID) ? key_quota_root_maxkeys : key_quota_maxkeys; unsigned maxbytes = uid_eq(uid, GLOBAL_ROOT_UID) ? key_quota_root_maxbytes : key_quota_maxbytes; spin_lock(&user->lock); if (!(flags & KEY_ALLOC_QUOTA_OVERRUN)) { if (user->qnkeys + 1 >= maxkeys || user->qnbytes + quotalen >= maxbytes || user->qnbytes + quotalen < user->qnbytes) goto no_quota; } user->qnkeys++; user->qnbytes += quotalen; spin_unlock(&user->lock); } /* allocate and initialise the key and its description */ key = kmem_cache_zalloc(key_jar, GFP_KERNEL); if (!key) goto no_memory_2; if (desc) { key->index_key.desc_len = desclen; key->index_key.description = kmemdup(desc, desclen + 1, GFP_KERNEL); if (!key->description) goto no_memory_3; } atomic_set(&key->usage, 1); init_rwsem(&key->sem); lockdep_set_class(&key->sem, &type->lock_class); key->index_key.type = type; key->user = user; key->quotalen = quotalen; key->datalen = type->def_datalen; key->uid = uid; key->gid = gid; key->perm = perm; if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) key->flags |= 1 << KEY_FLAG_IN_QUOTA; if (flags & KEY_ALLOC_TRUSTED) key->flags |= 1 << KEY_FLAG_TRUSTED; #ifdef KEY_DEBUGGING key->magic = KEY_DEBUG_MAGIC; #endif /* let the security module know about the key */ ret = security_key_alloc(key, cred, flags); if (ret < 0) goto security_error; /* publish the key by giving it a serial number */ atomic_inc(&user->nkeys); key_alloc_serial(key); error: return key; security_error: kfree(key->description); kmem_cache_free(key_jar, key); if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) { spin_lock(&user->lock); user->qnkeys--; user->qnbytes -= quotalen; spin_unlock(&user->lock); } key_user_put(user); key = ERR_PTR(ret); goto error; no_memory_3: kmem_cache_free(key_jar, key); no_memory_2: if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) { spin_lock(&user->lock); user->qnkeys--; user->qnbytes -= quotalen; spin_unlock(&user->lock); } key_user_put(user); no_memory_1: key = ERR_PTR(-ENOMEM); goto error; no_quota: spin_unlock(&user->lock); key_user_put(user); key = ERR_PTR(-EDQUOT); goto error; } EXPORT_SYMBOL(key_alloc); /** * key_payload_reserve - Adjust data quota reservation for the key's payload * @key: The key to make the reservation for. * @datalen: The amount of data payload the caller now wants. * * Adjust the amount of the owning user's key data quota that a key reserves. * If the amount is increased, then -EDQUOT may be returned if there isn't * enough free quota available. * * If successful, 0 is returned. */ int key_payload_reserve(struct key *key, size_t datalen) { int delta = (int)datalen - key->datalen; int ret = 0; key_check(key); /* contemplate the quota adjustment */ if (delta != 0 && test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) { unsigned maxbytes = uid_eq(key->user->uid, GLOBAL_ROOT_UID) ? key_quota_root_maxbytes : key_quota_maxbytes; spin_lock(&key->user->lock); if (delta > 0 && (key->user->qnbytes + delta >= maxbytes || key->user->qnbytes + delta < key->user->qnbytes)) { ret = -EDQUOT; } else { key->user->qnbytes += delta; key->quotalen += delta; } spin_unlock(&key->user->lock); } /* change the recorded data length if that didn't generate an error */ if (ret == 0) key->datalen = datalen; return ret; } EXPORT_SYMBOL(key_payload_reserve); /* * Instantiate a key and link it into the target keyring atomically. Must be * called with the target keyring's semaphore writelocked. The target key's * semaphore need not be locked as instantiation is serialised by * key_construction_mutex. */ static int __key_instantiate_and_link(struct key *key, struct key_preparsed_payload *prep, struct key *keyring, struct key *authkey, struct assoc_array_edit **_edit) { int ret, awaken; key_check(key); key_check(keyring); awaken = 0; ret = -EBUSY; mutex_lock(&key_construction_mutex); /* can't instantiate twice */ if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) { /* instantiate the key */ ret = key->type->instantiate(key, prep); if (ret == 0) { /* mark the key as being instantiated */ atomic_inc(&key->user->nikeys); set_bit(KEY_FLAG_INSTANTIATED, &key->flags); if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags)) awaken = 1; /* and link it into the destination keyring */ if (keyring) __key_link(key, _edit); /* disable the authorisation key */ if (authkey) key_revoke(authkey); if (prep->expiry != TIME_T_MAX) { key->expiry = prep->expiry; key_schedule_gc(prep->expiry + key_gc_delay); } } } mutex_unlock(&key_construction_mutex); /* wake up anyone waiting for a key to be constructed */ if (awaken) wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT); return ret; } /** * key_instantiate_and_link - Instantiate a key and link it into the keyring. * @key: The key to instantiate. * @data: The data to use to instantiate the keyring. * @datalen: The length of @data. * @keyring: Keyring to create a link in on success (or NULL). * @authkey: The authorisation token permitting instantiation. * * Instantiate a key that's in the uninstantiated state using the provided data * and, if successful, link it in to the destination keyring if one is * supplied. * * If successful, 0 is returned, the authorisation token is revoked and anyone * waiting for the key is woken up. If the key was already instantiated, * -EBUSY will be returned. */ int key_instantiate_and_link(struct key *key, const void *data, size_t datalen, struct key *keyring, struct key *authkey) { struct key_preparsed_payload prep; struct assoc_array_edit *edit; int ret; memset(&prep, 0, sizeof(prep)); prep.data = data; prep.datalen = datalen; prep.quotalen = key->type->def_datalen; prep.expiry = TIME_T_MAX; if (key->type->preparse) { ret = key->type->preparse(&prep); if (ret < 0) goto error; } if (keyring) { ret = __key_link_begin(keyring, &key->index_key, &edit); if (ret < 0) goto error; } ret = __key_instantiate_and_link(key, &prep, keyring, authkey, &edit); if (keyring) __key_link_end(keyring, &key->index_key, edit); error: if (key->type->preparse) key->type->free_preparse(&prep); return ret; } EXPORT_SYMBOL(key_instantiate_and_link); /** * key_reject_and_link - Negatively instantiate a key and link it into the keyring. * @key: The key to instantiate. * @timeout: The timeout on the negative key. * @error: The error to return when the key is hit. * @keyring: Keyring to create a link in on success (or NULL). * @authkey: The authorisation token permitting instantiation. * * Negatively instantiate a key that's in the uninstantiated state and, if * successful, set its timeout and stored error and link it in to the * destination keyring if one is supplied. The key and any links to the key * will be automatically garbage collected after the timeout expires. * * Negative keys are used to rate limit repeated request_key() calls by causing * them to return the stored error code (typically ENOKEY) until the negative * key expires. * * If successful, 0 is returned, the authorisation token is revoked and anyone * waiting for the key is woken up. If the key was already instantiated, * -EBUSY will be returned. */ int key_reject_and_link(struct key *key, unsigned timeout, unsigned error, struct key *keyring, struct key *authkey) { struct assoc_array_edit *edit; struct timespec now; int ret, awaken, link_ret = 0; key_check(key); key_check(keyring); awaken = 0; ret = -EBUSY; if (keyring) link_ret = __key_link_begin(keyring, &key->index_key, &edit); mutex_lock(&key_construction_mutex); /* can't instantiate twice */ if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) { /* mark the key as being negatively instantiated */ atomic_inc(&key->user->nikeys); key->type_data.reject_error = -error; smp_wmb(); set_bit(KEY_FLAG_NEGATIVE, &key->flags); set_bit(KEY_FLAG_INSTANTIATED, &key->flags); now = current_kernel_time(); key->expiry = now.tv_sec + timeout; key_schedule_gc(key->expiry + key_gc_delay); if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags)) awaken = 1; ret = 0; /* and link it into the destination keyring */ if (keyring && link_ret == 0) __key_link(key, &edit); /* disable the authorisation key */ if (authkey) key_revoke(authkey); } mutex_unlock(&key_construction_mutex); if (keyring) __key_link_end(keyring, &key->index_key, edit); /* wake up anyone waiting for a key to be constructed */ if (awaken) wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT); return ret == 0 ? link_ret : ret; } EXPORT_SYMBOL(key_reject_and_link); /** * key_put - Discard a reference to a key. * @key: The key to discard a reference from. * * Discard a reference to a key, and when all the references are gone, we * schedule the cleanup task to come and pull it out of the tree in process * context at some later time. */ void key_put(struct key *key) { if (key) { key_check(key); if (atomic_dec_and_test(&key->usage)) schedule_work(&key_gc_work); } } EXPORT_SYMBOL(key_put); /* * Find a key by its serial number. */ struct key *key_lookup(key_serial_t id) { struct rb_node *n; struct key *key; spin_lock(&key_serial_lock); /* search the tree for the specified key */ n = key_serial_tree.rb_node; while (n) { key = rb_entry(n, struct key, serial_node); if (id < key->serial) n = n->rb_left; else if (id > key->serial) n = n->rb_right; else goto found; } not_found: key = ERR_PTR(-ENOKEY); goto error; found: /* pretend it doesn't exist if it is awaiting deletion */ if (atomic_read(&key->usage) == 0) goto not_found; /* this races with key_put(), but that doesn't matter since key_put() * doesn't actually change the key */ __key_get(key); error: spin_unlock(&key_serial_lock); return key; } /* * Find and lock the specified key type against removal. * * We return with the sem read-locked if successful. If the type wasn't * available -ENOKEY is returned instead. */ struct key_type *key_type_lookup(const char *type) { struct key_type *ktype; down_read(&key_types_sem); /* look up the key type to see if it's one of the registered kernel * types */ list_for_each_entry(ktype, &key_types_list, link) { if (strcmp(ktype->name, type) == 0) goto found_kernel_type; } up_read(&key_types_sem); ktype = ERR_PTR(-ENOKEY); found_kernel_type: return ktype; } void key_set_timeout(struct key *key, unsigned timeout) { struct timespec now; time_t expiry = 0; /* make the changes with the locks held to prevent races */ down_write(&key->sem); if (timeout > 0) { now = current_kernel_time(); expiry = now.tv_sec + timeout; } key->expiry = expiry; key_schedule_gc(key->expiry + key_gc_delay); up_write(&key->sem); } EXPORT_SYMBOL_GPL(key_set_timeout); /* * Unlock a key type locked by key_type_lookup(). */ void key_type_put(struct key_type *ktype) { up_read(&key_types_sem); } /* * Attempt to update an existing key. * * The key is given to us with an incremented refcount that we need to discard * if we get an error. */ static inline key_ref_t __key_update(key_ref_t key_ref, struct key_preparsed_payload *prep) { struct key *key = key_ref_to_ptr(key_ref); int ret; /* need write permission on the key to update it */ ret = key_permission(key_ref, KEY_NEED_WRITE); if (ret < 0) goto error; ret = -EEXIST; if (!key->type->update) goto error; down_write(&key->sem); ret = key->type->update(key, prep); if (ret == 0) /* updating a negative key instantiates it */ clear_bit(KEY_FLAG_NEGATIVE, &key->flags); up_write(&key->sem); if (ret < 0) goto error; out: return key_ref; error: key_put(key); key_ref = ERR_PTR(ret); goto out; } /** * key_create_or_update - Update or create and instantiate a key. * @keyring_ref: A pointer to the destination keyring with possession flag. * @type: The type of key. * @description: The searchable description for the key. * @payload: The data to use to instantiate or update the key. * @plen: The length of @payload. * @perm: The permissions mask for a new key. * @flags: The quota flags for a new key. * * Search the destination keyring for a key of the same description and if one * is found, update it, otherwise create and instantiate a new one and create a * link to it from that keyring. * * If perm is KEY_PERM_UNDEF then an appropriate key permissions mask will be * concocted. * * Returns a pointer to the new key if successful, -ENODEV if the key type * wasn't available, -ENOTDIR if the keyring wasn't a keyring, -EACCES if the * caller isn't permitted to modify the keyring or the LSM did not permit * creation of the key. * * On success, the possession flag from the keyring ref will be tacked on to * the key ref before it is returned. */ key_ref_t key_create_or_update(key_ref_t keyring_ref, const char *type, const char *description, const void *payload, size_t plen, key_perm_t perm, unsigned long flags) { struct keyring_index_key index_key = { .description = description, }; struct key_preparsed_payload prep; struct assoc_array_edit *edit; const struct cred *cred = current_cred(); struct key *keyring, *key = NULL; key_ref_t key_ref; int ret; /* look up the key type to see if it's one of the registered kernel * types */ index_key.type = key_type_lookup(type); if (IS_ERR(index_key.type)) { key_ref = ERR_PTR(-ENODEV); goto error; } key_ref = ERR_PTR(-EINVAL); if (!index_key.type->match || !index_key.type->instantiate || (!index_key.description && !index_key.type->preparse)) goto error_put_type; keyring = key_ref_to_ptr(keyring_ref); key_check(keyring); key_ref = ERR_PTR(-ENOTDIR); if (keyring->type != &key_type_keyring) goto error_put_type; memset(&prep, 0, sizeof(prep)); prep.data = payload; prep.datalen = plen; prep.quotalen = index_key.type->def_datalen; prep.trusted = flags & KEY_ALLOC_TRUSTED; prep.expiry = TIME_T_MAX; if (index_key.type->preparse) { ret = index_key.type->preparse(&prep); if (ret < 0) { key_ref = ERR_PTR(ret); goto error_free_prep; } if (!index_key.description) index_key.description = prep.description; key_ref = ERR_PTR(-EINVAL); if (!index_key.description) goto error_free_prep; } index_key.desc_len = strlen(index_key.description); key_ref = ERR_PTR(-EPERM); if (!prep.trusted && test_bit(KEY_FLAG_TRUSTED_ONLY, &keyring->flags)) goto error_free_prep; flags |= prep.trusted ? KEY_ALLOC_TRUSTED : 0; ret = __key_link_begin(keyring, &index_key, &edit); if (ret < 0) { key_ref = ERR_PTR(ret); goto error_free_prep; } /* if we're going to allocate a new key, we're going to have * to modify the keyring */ ret = key_permission(keyring_ref, KEY_NEED_WRITE); if (ret < 0) { key_ref = ERR_PTR(ret); goto error_link_end; } /* if it's possible to update this type of key, search for an existing * key of the same type and description in the destination keyring and * update that instead if possible */ if (index_key.type->update) { key_ref = find_key_to_update(keyring_ref, &index_key); if (key_ref) goto found_matching_key; } /* if the client doesn't provide, decide on the permissions we want */ if (perm == KEY_PERM_UNDEF) { perm = KEY_POS_VIEW | KEY_POS_SEARCH | KEY_POS_LINK | KEY_POS_SETATTR; perm |= KEY_USR_VIEW; if (index_key.type->read) perm |= KEY_POS_READ; if (index_key.type == &key_type_keyring || index_key.type->update) perm |= KEY_POS_WRITE; } /* allocate a new key */ key = key_alloc(index_key.type, index_key.description, cred->fsuid, cred->fsgid, cred, perm, flags); if (IS_ERR(key)) { key_ref = ERR_CAST(key); goto error_link_end; } /* instantiate it and link it into the target keyring */ ret = __key_instantiate_and_link(key, &prep, keyring, NULL, &edit); if (ret < 0) { key_put(key); key_ref = ERR_PTR(ret); goto error_link_end; } key_ref = make_key_ref(key, is_key_possessed(keyring_ref)); error_link_end: __key_link_end(keyring, &index_key, edit); error_free_prep: if (index_key.type->preparse) index_key.type->free_preparse(&prep); error_put_type: key_type_put(index_key.type); error: return key_ref; found_matching_key: /* we found a matching key, so we're going to try to update it * - we can drop the locks first as we have the key pinned */ __key_link_end(keyring, &index_key, edit); key_ref = __key_update(key_ref, &prep); goto error_free_prep; } EXPORT_SYMBOL(key_create_or_update); /** * key_update - Update a key's contents. * @key_ref: The pointer (plus possession flag) to the key. * @payload: The data to be used to update the key. * @plen: The length of @payload. * * Attempt to update the contents of a key with the given payload data. The * caller must be granted Write permission on the key. Negative keys can be * instantiated by this method. * * Returns 0 on success, -EACCES if not permitted and -EOPNOTSUPP if the key * type does not support updating. The key type may return other errors. */ int key_update(key_ref_t key_ref, const void *payload, size_t plen) { struct key_preparsed_payload prep; struct key *key = key_ref_to_ptr(key_ref); int ret; key_check(key); /* the key must be writable */ ret = key_permission(key_ref, KEY_NEED_WRITE); if (ret < 0) goto error; /* attempt to update it if supported */ ret = -EOPNOTSUPP; if (!key->type->update) goto error; memset(&prep, 0, sizeof(prep)); prep.data = payload; prep.datalen = plen; prep.quotalen = key->type->def_datalen; prep.expiry = TIME_T_MAX; if (key->type->preparse) { ret = key->type->preparse(&prep); if (ret < 0) goto error; } down_write(&key->sem); ret = key->type->update(key, &prep); if (ret == 0) /* updating a negative key instantiates it */ clear_bit(KEY_FLAG_NEGATIVE, &key->flags); up_write(&key->sem); error: if (key->type->preparse) key->type->free_preparse(&prep); return ret; } EXPORT_SYMBOL(key_update); /** * key_revoke - Revoke a key. * @key: The key to be revoked. * * Mark a key as being revoked and ask the type to free up its resources. The * revocation timeout is set and the key and all its links will be * automatically garbage collected after key_gc_delay amount of time if they * are not manually dealt with first. */ void key_revoke(struct key *key) { struct timespec now; time_t time; key_check(key); /* make sure no one's trying to change or use the key when we mark it * - we tell lockdep that we might nest because we might be revoking an * authorisation key whilst holding the sem on a key we've just * instantiated */ down_write_nested(&key->sem, 1); if (!test_and_set_bit(KEY_FLAG_REVOKED, &key->flags) && key->type->revoke) key->type->revoke(key); /* set the death time to no more than the expiry time */ now = current_kernel_time(); time = now.tv_sec; if (key->revoked_at == 0 || key->revoked_at > time) { key->revoked_at = time; key_schedule_gc(key->revoked_at + key_gc_delay); } up_write(&key->sem); } EXPORT_SYMBOL(key_revoke); /** * key_invalidate - Invalidate a key. * @key: The key to be invalidated. * * Mark a key as being invalidated and have it cleaned up immediately. The key * is ignored by all searches and other operations from this point. */ void key_invalidate(struct key *key) { kenter("%d", key_serial(key)); key_check(key); if (!test_bit(KEY_FLAG_INVALIDATED, &key->flags)) { down_write_nested(&key->sem, 1); if (!test_and_set_bit(KEY_FLAG_INVALIDATED, &key->flags)) key_schedule_gc_links(); up_write(&key->sem); } } EXPORT_SYMBOL(key_invalidate); /** * generic_key_instantiate - Simple instantiation of a key from preparsed data * @key: The key to be instantiated * @prep: The preparsed data to load. * * Instantiate a key from preparsed data. We assume we can just copy the data * in directly and clear the old pointers. * * This can be pointed to directly by the key type instantiate op pointer. */ int generic_key_instantiate(struct key *key, struct key_preparsed_payload *prep) { int ret; pr_devel("==>%s()\n", __func__); ret = key_payload_reserve(key, prep->quotalen); if (ret == 0) { key->type_data.p[0] = prep->type_data[0]; key->type_data.p[1] = prep->type_data[1]; rcu_assign_keypointer(key, prep->payload[0]); key->payload.data2[1] = prep->payload[1]; prep->type_data[0] = NULL; prep->type_data[1] = NULL; prep->payload[0] = NULL; prep->payload[1] = NULL; } pr_devel("<==%s() = %d\n", __func__, ret); return ret; } EXPORT_SYMBOL(generic_key_instantiate); /** * register_key_type - Register a type of key. * @ktype: The new key type. * * Register a new key type. * * Returns 0 on success or -EEXIST if a type of this name already exists. */ int register_key_type(struct key_type *ktype) { struct key_type *p; int ret; memset(&ktype->lock_class, 0, sizeof(ktype->lock_class)); ret = -EEXIST; down_write(&key_types_sem); /* disallow key types with the same name */ list_for_each_entry(p, &key_types_list, link) { if (strcmp(p->name, ktype->name) == 0) goto out; } /* store the type */ list_add(&ktype->link, &key_types_list); pr_notice("Key type %s registered\n", ktype->name); ret = 0; out: up_write(&key_types_sem); return ret; } EXPORT_SYMBOL(register_key_type); /** * unregister_key_type - Unregister a type of key. * @ktype: The key type. * * Unregister a key type and mark all the extant keys of this type as dead. * Those keys of this type are then destroyed to get rid of their payloads and * they and their links will be garbage collected as soon as possible. */ void unregister_key_type(struct key_type *ktype) { down_write(&key_types_sem); list_del_init(&ktype->link); downgrade_write(&key_types_sem); key_gc_keytype(ktype); pr_notice("Key type %s unregistered\n", ktype->name); up_read(&key_types_sem); } EXPORT_SYMBOL(unregister_key_type); /* * Initialise the key management state. */ void __init key_init(void) { /* allocate a slab in which we can store keys */ key_jar = kmem_cache_create("key_jar", sizeof(struct key), 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); /* add the special key types */ list_add_tail(&key_type_keyring.link, &key_types_list); list_add_tail(&key_type_dead.link, &key_types_list); list_add_tail(&key_type_user.link, &key_types_list); list_add_tail(&key_type_logon.link, &key_types_list); /* record the root user tracking */ rb_link_node(&root_key_user.node, NULL, &key_user_tree.rb_node); rb_insert_color(&root_key_user.node, &key_user_tree); }
/* Basic authentication token and access key management * * Copyright (C) 2004-2008 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/module.h> #include <linux/init.h> #include <linux/poison.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/security.h> #include <linux/workqueue.h> #include <linux/random.h> #include <linux/err.h> #include "internal.h" struct kmem_cache *key_jar; struct rb_root key_serial_tree; /* tree of keys indexed by serial */ DEFINE_SPINLOCK(key_serial_lock); struct rb_root key_user_tree; /* tree of quota records indexed by UID */ DEFINE_SPINLOCK(key_user_lock); unsigned int key_quota_root_maxkeys = 200; /* root's key count quota */ unsigned int key_quota_root_maxbytes = 20000; /* root's key space quota */ unsigned int key_quota_maxkeys = 200; /* general key count quota */ unsigned int key_quota_maxbytes = 20000; /* general key space quota */ static LIST_HEAD(key_types_list); static DECLARE_RWSEM(key_types_sem); /* We serialise key instantiation and link */ DEFINE_MUTEX(key_construction_mutex); #ifdef KEY_DEBUGGING void __key_check(const struct key *key) { printk("__key_check: key %p {%08x} should be {%08x}\n", key, key->magic, KEY_DEBUG_MAGIC); BUG(); } #endif /* * Get the key quota record for a user, allocating a new record if one doesn't * already exist. */ struct key_user *key_user_lookup(kuid_t uid) { struct key_user *candidate = NULL, *user; struct rb_node *parent = NULL; struct rb_node **p; try_again: p = &key_user_tree.rb_node; spin_lock(&key_user_lock); /* search the tree for a user record with a matching UID */ while (*p) { parent = *p; user = rb_entry(parent, struct key_user, node); if (uid_lt(uid, user->uid)) p = &(*p)->rb_left; else if (uid_gt(uid, user->uid)) p = &(*p)->rb_right; else goto found; } /* if we get here, we failed to find a match in the tree */ if (!candidate) { /* allocate a candidate user record if we don't already have * one */ spin_unlock(&key_user_lock); user = NULL; candidate = kmalloc(sizeof(struct key_user), GFP_KERNEL); if (unlikely(!candidate)) goto out; /* the allocation may have scheduled, so we need to repeat the * search lest someone else added the record whilst we were * asleep */ goto try_again; } /* if we get here, then the user record still hadn't appeared on the * second pass - so we use the candidate record */ atomic_set(&candidate->usage, 1); atomic_set(&candidate->nkeys, 0); atomic_set(&candidate->nikeys, 0); candidate->uid = uid; candidate->qnkeys = 0; candidate->qnbytes = 0; spin_lock_init(&candidate->lock); mutex_init(&candidate->cons_lock); rb_link_node(&candidate->node, parent, p); rb_insert_color(&candidate->node, &key_user_tree); spin_unlock(&key_user_lock); user = candidate; goto out; /* okay - we found a user record for this UID */ found: atomic_inc(&user->usage); spin_unlock(&key_user_lock); kfree(candidate); out: return user; } /* * Dispose of a user structure */ void key_user_put(struct key_user *user) { if (atomic_dec_and_lock(&user->usage, &key_user_lock)) { rb_erase(&user->node, &key_user_tree); spin_unlock(&key_user_lock); kfree(user); } } /* * Allocate a serial number for a key. These are assigned randomly to avoid * security issues through covert channel problems. */ static inline void key_alloc_serial(struct key *key) { struct rb_node *parent, **p; struct key *xkey; /* propose a random serial number and look for a hole for it in the * serial number tree */ do { get_random_bytes(&key->serial, sizeof(key->serial)); key->serial >>= 1; /* negative numbers are not permitted */ } while (key->serial < 3); spin_lock(&key_serial_lock); attempt_insertion: parent = NULL; p = &key_serial_tree.rb_node; while (*p) { parent = *p; xkey = rb_entry(parent, struct key, serial_node); if (key->serial < xkey->serial) p = &(*p)->rb_left; else if (key->serial > xkey->serial) p = &(*p)->rb_right; else goto serial_exists; } /* we've found a suitable hole - arrange for this key to occupy it */ rb_link_node(&key->serial_node, parent, p); rb_insert_color(&key->serial_node, &key_serial_tree); spin_unlock(&key_serial_lock); return; /* we found a key with the proposed serial number - walk the tree from * that point looking for the next unused serial number */ serial_exists: for (;;) { key->serial++; if (key->serial < 3) { key->serial = 3; goto attempt_insertion; } parent = rb_next(parent); if (!parent) goto attempt_insertion; xkey = rb_entry(parent, struct key, serial_node); if (key->serial < xkey->serial) goto attempt_insertion; } } /** * key_alloc - Allocate a key of the specified type. * @type: The type of key to allocate. * @desc: The key description to allow the key to be searched out. * @uid: The owner of the new key. * @gid: The group ID for the new key's group permissions. * @cred: The credentials specifying UID namespace. * @perm: The permissions mask of the new key. * @flags: Flags specifying quota properties. * * Allocate a key of the specified type with the attributes given. The key is * returned in an uninstantiated state and the caller needs to instantiate the * key before returning. * * The user's key count quota is updated to reflect the creation of the key and * the user's key data quota has the default for the key type reserved. The * instantiation function should amend this as necessary. If insufficient * quota is available, -EDQUOT will be returned. * * The LSM security modules can prevent a key being created, in which case * -EACCES will be returned. * * Returns a pointer to the new key if successful and an error code otherwise. * * Note that the caller needs to ensure the key type isn't uninstantiated. * Internally this can be done by locking key_types_sem. Externally, this can * be done by either never unregistering the key type, or making sure * key_alloc() calls don't race with module unloading. */ struct key *key_alloc(struct key_type *type, const char *desc, kuid_t uid, kgid_t gid, const struct cred *cred, key_perm_t perm, unsigned long flags) { struct key_user *user = NULL; struct key *key; size_t desclen, quotalen; int ret; key = ERR_PTR(-EINVAL); if (!desc || !*desc) goto error; if (type->vet_description) { ret = type->vet_description(desc); if (ret < 0) { key = ERR_PTR(ret); goto error; } } desclen = strlen(desc); quotalen = desclen + 1 + type->def_datalen; /* get hold of the key tracking for this user */ user = key_user_lookup(uid); if (!user) goto no_memory_1; /* check that the user's quota permits allocation of another key and * its description */ if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) { unsigned maxkeys = uid_eq(uid, GLOBAL_ROOT_UID) ? key_quota_root_maxkeys : key_quota_maxkeys; unsigned maxbytes = uid_eq(uid, GLOBAL_ROOT_UID) ? key_quota_root_maxbytes : key_quota_maxbytes; spin_lock(&user->lock); if (!(flags & KEY_ALLOC_QUOTA_OVERRUN)) { if (user->qnkeys + 1 >= maxkeys || user->qnbytes + quotalen >= maxbytes || user->qnbytes + quotalen < user->qnbytes) goto no_quota; } user->qnkeys++; user->qnbytes += quotalen; spin_unlock(&user->lock); } /* allocate and initialise the key and its description */ key = kmem_cache_zalloc(key_jar, GFP_KERNEL); if (!key) goto no_memory_2; if (desc) { key->index_key.desc_len = desclen; key->index_key.description = kmemdup(desc, desclen + 1, GFP_KERNEL); if (!key->description) goto no_memory_3; } atomic_set(&key->usage, 1); init_rwsem(&key->sem); lockdep_set_class(&key->sem, &type->lock_class); key->index_key.type = type; key->user = user; key->quotalen = quotalen; key->datalen = type->def_datalen; key->uid = uid; key->gid = gid; key->perm = perm; if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) key->flags |= 1 << KEY_FLAG_IN_QUOTA; if (flags & KEY_ALLOC_TRUSTED) key->flags |= 1 << KEY_FLAG_TRUSTED; #ifdef KEY_DEBUGGING key->magic = KEY_DEBUG_MAGIC; #endif /* let the security module know about the key */ ret = security_key_alloc(key, cred, flags); if (ret < 0) goto security_error; /* publish the key by giving it a serial number */ atomic_inc(&user->nkeys); key_alloc_serial(key); error: return key; security_error: kfree(key->description); kmem_cache_free(key_jar, key); if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) { spin_lock(&user->lock); user->qnkeys--; user->qnbytes -= quotalen; spin_unlock(&user->lock); } key_user_put(user); key = ERR_PTR(ret); goto error; no_memory_3: kmem_cache_free(key_jar, key); no_memory_2: if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) { spin_lock(&user->lock); user->qnkeys--; user->qnbytes -= quotalen; spin_unlock(&user->lock); } key_user_put(user); no_memory_1: key = ERR_PTR(-ENOMEM); goto error; no_quota: spin_unlock(&user->lock); key_user_put(user); key = ERR_PTR(-EDQUOT); goto error; } EXPORT_SYMBOL(key_alloc); /** * key_payload_reserve - Adjust data quota reservation for the key's payload * @key: The key to make the reservation for. * @datalen: The amount of data payload the caller now wants. * * Adjust the amount of the owning user's key data quota that a key reserves. * If the amount is increased, then -EDQUOT may be returned if there isn't * enough free quota available. * * If successful, 0 is returned. */ int key_payload_reserve(struct key *key, size_t datalen) { int delta = (int)datalen - key->datalen; int ret = 0; key_check(key); /* contemplate the quota adjustment */ if (delta != 0 && test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) { unsigned maxbytes = uid_eq(key->user->uid, GLOBAL_ROOT_UID) ? key_quota_root_maxbytes : key_quota_maxbytes; spin_lock(&key->user->lock); if (delta > 0 && (key->user->qnbytes + delta >= maxbytes || key->user->qnbytes + delta < key->user->qnbytes)) { ret = -EDQUOT; } else { key->user->qnbytes += delta; key->quotalen += delta; } spin_unlock(&key->user->lock); } /* change the recorded data length if that didn't generate an error */ if (ret == 0) key->datalen = datalen; return ret; } EXPORT_SYMBOL(key_payload_reserve); /* * Instantiate a key and link it into the target keyring atomically. Must be * called with the target keyring's semaphore writelocked. The target key's * semaphore need not be locked as instantiation is serialised by * key_construction_mutex. */ static int __key_instantiate_and_link(struct key *key, struct key_preparsed_payload *prep, struct key *keyring, struct key *authkey, struct assoc_array_edit **_edit) { int ret, awaken; key_check(key); key_check(keyring); awaken = 0; ret = -EBUSY; mutex_lock(&key_construction_mutex); /* can't instantiate twice */ if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) { /* instantiate the key */ ret = key->type->instantiate(key, prep); if (ret == 0) { /* mark the key as being instantiated */ atomic_inc(&key->user->nikeys); set_bit(KEY_FLAG_INSTANTIATED, &key->flags); if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags)) awaken = 1; /* and link it into the destination keyring */ if (keyring) __key_link(key, _edit); /* disable the authorisation key */ if (authkey) key_revoke(authkey); if (prep->expiry != TIME_T_MAX) { key->expiry = prep->expiry; key_schedule_gc(prep->expiry + key_gc_delay); } } } mutex_unlock(&key_construction_mutex); /* wake up anyone waiting for a key to be constructed */ if (awaken) wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT); return ret; } /** * key_instantiate_and_link - Instantiate a key and link it into the keyring. * @key: The key to instantiate. * @data: The data to use to instantiate the keyring. * @datalen: The length of @data. * @keyring: Keyring to create a link in on success (or NULL). * @authkey: The authorisation token permitting instantiation. * * Instantiate a key that's in the uninstantiated state using the provided data * and, if successful, link it in to the destination keyring if one is * supplied. * * If successful, 0 is returned, the authorisation token is revoked and anyone * waiting for the key is woken up. If the key was already instantiated, * -EBUSY will be returned. */ int key_instantiate_and_link(struct key *key, const void *data, size_t datalen, struct key *keyring, struct key *authkey) { struct key_preparsed_payload prep; struct assoc_array_edit *edit; int ret; memset(&prep, 0, sizeof(prep)); prep.data = data; prep.datalen = datalen; prep.quotalen = key->type->def_datalen; prep.expiry = TIME_T_MAX; if (key->type->preparse) { ret = key->type->preparse(&prep); if (ret < 0) goto error; } if (keyring) { ret = __key_link_begin(keyring, &key->index_key, &edit); if (ret < 0) goto error; } ret = __key_instantiate_and_link(key, &prep, keyring, authkey, &edit); if (keyring) __key_link_end(keyring, &key->index_key, edit); error: if (key->type->preparse) key->type->free_preparse(&prep); return ret; } EXPORT_SYMBOL(key_instantiate_and_link); /** * key_reject_and_link - Negatively instantiate a key and link it into the keyring. * @key: The key to instantiate. * @timeout: The timeout on the negative key. * @error: The error to return when the key is hit. * @keyring: Keyring to create a link in on success (or NULL). * @authkey: The authorisation token permitting instantiation. * * Negatively instantiate a key that's in the uninstantiated state and, if * successful, set its timeout and stored error and link it in to the * destination keyring if one is supplied. The key and any links to the key * will be automatically garbage collected after the timeout expires. * * Negative keys are used to rate limit repeated request_key() calls by causing * them to return the stored error code (typically ENOKEY) until the negative * key expires. * * If successful, 0 is returned, the authorisation token is revoked and anyone * waiting for the key is woken up. If the key was already instantiated, * -EBUSY will be returned. */ int key_reject_and_link(struct key *key, unsigned timeout, unsigned error, struct key *keyring, struct key *authkey) { struct assoc_array_edit *edit; struct timespec now; int ret, awaken, link_ret = 0; key_check(key); key_check(keyring); awaken = 0; ret = -EBUSY; if (keyring) link_ret = __key_link_begin(keyring, &key->index_key, &edit); mutex_lock(&key_construction_mutex); /* can't instantiate twice */ if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) { /* mark the key as being negatively instantiated */ atomic_inc(&key->user->nikeys); key->type_data.reject_error = -error; smp_wmb(); set_bit(KEY_FLAG_NEGATIVE, &key->flags); set_bit(KEY_FLAG_INSTANTIATED, &key->flags); now = current_kernel_time(); key->expiry = now.tv_sec + timeout; key_schedule_gc(key->expiry + key_gc_delay); if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags)) awaken = 1; ret = 0; /* and link it into the destination keyring */ if (keyring && link_ret == 0) __key_link(key, &edit); /* disable the authorisation key */ if (authkey) key_revoke(authkey); } mutex_unlock(&key_construction_mutex); if (keyring) __key_link_end(keyring, &key->index_key, edit); /* wake up anyone waiting for a key to be constructed */ if (awaken) wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT); return ret == 0 ? link_ret : ret; } EXPORT_SYMBOL(key_reject_and_link); /** * key_put - Discard a reference to a key. * @key: The key to discard a reference from. * * Discard a reference to a key, and when all the references are gone, we * schedule the cleanup task to come and pull it out of the tree in process * context at some later time. */ void key_put(struct key *key) { if (key) { key_check(key); if (atomic_dec_and_test(&key->usage)) schedule_work(&key_gc_work); } } EXPORT_SYMBOL(key_put); /* * Find a key by its serial number. */ struct key *key_lookup(key_serial_t id) { struct rb_node *n; struct key *key; spin_lock(&key_serial_lock); /* search the tree for the specified key */ n = key_serial_tree.rb_node; while (n) { key = rb_entry(n, struct key, serial_node); if (id < key->serial) n = n->rb_left; else if (id > key->serial) n = n->rb_right; else goto found; } not_found: key = ERR_PTR(-ENOKEY); goto error; found: /* pretend it doesn't exist if it is awaiting deletion */ if (atomic_read(&key->usage) == 0) goto not_found; /* this races with key_put(), but that doesn't matter since key_put() * doesn't actually change the key */ __key_get(key); error: spin_unlock(&key_serial_lock); return key; } /* * Find and lock the specified key type against removal. * * We return with the sem read-locked if successful. If the type wasn't * available -ENOKEY is returned instead. */ struct key_type *key_type_lookup(const char *type) { struct key_type *ktype; down_read(&key_types_sem); /* look up the key type to see if it's one of the registered kernel * types */ list_for_each_entry(ktype, &key_types_list, link) { if (strcmp(ktype->name, type) == 0) goto found_kernel_type; } up_read(&key_types_sem); ktype = ERR_PTR(-ENOKEY); found_kernel_type: return ktype; } void key_set_timeout(struct key *key, unsigned timeout) { struct timespec now; time_t expiry = 0; /* make the changes with the locks held to prevent races */ down_write(&key->sem); if (timeout > 0) { now = current_kernel_time(); expiry = now.tv_sec + timeout; } key->expiry = expiry; key_schedule_gc(key->expiry + key_gc_delay); up_write(&key->sem); } EXPORT_SYMBOL_GPL(key_set_timeout); /* * Unlock a key type locked by key_type_lookup(). */ void key_type_put(struct key_type *ktype) { up_read(&key_types_sem); } /* * Attempt to update an existing key. * * The key is given to us with an incremented refcount that we need to discard * if we get an error. */ static inline key_ref_t __key_update(key_ref_t key_ref, struct key_preparsed_payload *prep) { struct key *key = key_ref_to_ptr(key_ref); int ret; /* need write permission on the key to update it */ ret = key_permission(key_ref, KEY_NEED_WRITE); if (ret < 0) goto error; ret = -EEXIST; if (!key->type->update) goto error; down_write(&key->sem); ret = key->type->update(key, prep); if (ret == 0) /* updating a negative key instantiates it */ clear_bit(KEY_FLAG_NEGATIVE, &key->flags); up_write(&key->sem); if (ret < 0) goto error; out: return key_ref; error: key_put(key); key_ref = ERR_PTR(ret); goto out; } /** * key_create_or_update - Update or create and instantiate a key. * @keyring_ref: A pointer to the destination keyring with possession flag. * @type: The type of key. * @description: The searchable description for the key. * @payload: The data to use to instantiate or update the key. * @plen: The length of @payload. * @perm: The permissions mask for a new key. * @flags: The quota flags for a new key. * * Search the destination keyring for a key of the same description and if one * is found, update it, otherwise create and instantiate a new one and create a * link to it from that keyring. * * If perm is KEY_PERM_UNDEF then an appropriate key permissions mask will be * concocted. * * Returns a pointer to the new key if successful, -ENODEV if the key type * wasn't available, -ENOTDIR if the keyring wasn't a keyring, -EACCES if the * caller isn't permitted to modify the keyring or the LSM did not permit * creation of the key. * * On success, the possession flag from the keyring ref will be tacked on to * the key ref before it is returned. */ key_ref_t key_create_or_update(key_ref_t keyring_ref, const char *type, const char *description, const void *payload, size_t plen, key_perm_t perm, unsigned long flags) { struct keyring_index_key index_key = { .description = description, }; struct key_preparsed_payload prep; struct assoc_array_edit *edit; const struct cred *cred = current_cred(); struct key *keyring, *key = NULL; key_ref_t key_ref; int ret; /* look up the key type to see if it's one of the registered kernel * types */ index_key.type = key_type_lookup(type); if (IS_ERR(index_key.type)) { key_ref = ERR_PTR(-ENODEV); goto error; } key_ref = ERR_PTR(-EINVAL); if (!index_key.type->instantiate || (!index_key.description && !index_key.type->preparse)) goto error_put_type; keyring = key_ref_to_ptr(keyring_ref); key_check(keyring); key_ref = ERR_PTR(-ENOTDIR); if (keyring->type != &key_type_keyring) goto error_put_type; memset(&prep, 0, sizeof(prep)); prep.data = payload; prep.datalen = plen; prep.quotalen = index_key.type->def_datalen; prep.trusted = flags & KEY_ALLOC_TRUSTED; prep.expiry = TIME_T_MAX; if (index_key.type->preparse) { ret = index_key.type->preparse(&prep); if (ret < 0) { key_ref = ERR_PTR(ret); goto error_free_prep; } if (!index_key.description) index_key.description = prep.description; key_ref = ERR_PTR(-EINVAL); if (!index_key.description) goto error_free_prep; } index_key.desc_len = strlen(index_key.description); key_ref = ERR_PTR(-EPERM); if (!prep.trusted && test_bit(KEY_FLAG_TRUSTED_ONLY, &keyring->flags)) goto error_free_prep; flags |= prep.trusted ? KEY_ALLOC_TRUSTED : 0; ret = __key_link_begin(keyring, &index_key, &edit); if (ret < 0) { key_ref = ERR_PTR(ret); goto error_free_prep; } /* if we're going to allocate a new key, we're going to have * to modify the keyring */ ret = key_permission(keyring_ref, KEY_NEED_WRITE); if (ret < 0) { key_ref = ERR_PTR(ret); goto error_link_end; } /* if it's possible to update this type of key, search for an existing * key of the same type and description in the destination keyring and * update that instead if possible */ if (index_key.type->update) { key_ref = find_key_to_update(keyring_ref, &index_key); if (key_ref) goto found_matching_key; } /* if the client doesn't provide, decide on the permissions we want */ if (perm == KEY_PERM_UNDEF) { perm = KEY_POS_VIEW | KEY_POS_SEARCH | KEY_POS_LINK | KEY_POS_SETATTR; perm |= KEY_USR_VIEW; if (index_key.type->read) perm |= KEY_POS_READ; if (index_key.type == &key_type_keyring || index_key.type->update) perm |= KEY_POS_WRITE; } /* allocate a new key */ key = key_alloc(index_key.type, index_key.description, cred->fsuid, cred->fsgid, cred, perm, flags); if (IS_ERR(key)) { key_ref = ERR_CAST(key); goto error_link_end; } /* instantiate it and link it into the target keyring */ ret = __key_instantiate_and_link(key, &prep, keyring, NULL, &edit); if (ret < 0) { key_put(key); key_ref = ERR_PTR(ret); goto error_link_end; } key_ref = make_key_ref(key, is_key_possessed(keyring_ref)); error_link_end: __key_link_end(keyring, &index_key, edit); error_free_prep: if (index_key.type->preparse) index_key.type->free_preparse(&prep); error_put_type: key_type_put(index_key.type); error: return key_ref; found_matching_key: /* we found a matching key, so we're going to try to update it * - we can drop the locks first as we have the key pinned */ __key_link_end(keyring, &index_key, edit); key_ref = __key_update(key_ref, &prep); goto error_free_prep; } EXPORT_SYMBOL(key_create_or_update); /** * key_update - Update a key's contents. * @key_ref: The pointer (plus possession flag) to the key. * @payload: The data to be used to update the key. * @plen: The length of @payload. * * Attempt to update the contents of a key with the given payload data. The * caller must be granted Write permission on the key. Negative keys can be * instantiated by this method. * * Returns 0 on success, -EACCES if not permitted and -EOPNOTSUPP if the key * type does not support updating. The key type may return other errors. */ int key_update(key_ref_t key_ref, const void *payload, size_t plen) { struct key_preparsed_payload prep; struct key *key = key_ref_to_ptr(key_ref); int ret; key_check(key); /* the key must be writable */ ret = key_permission(key_ref, KEY_NEED_WRITE); if (ret < 0) goto error; /* attempt to update it if supported */ ret = -EOPNOTSUPP; if (!key->type->update) goto error; memset(&prep, 0, sizeof(prep)); prep.data = payload; prep.datalen = plen; prep.quotalen = key->type->def_datalen; prep.expiry = TIME_T_MAX; if (key->type->preparse) { ret = key->type->preparse(&prep); if (ret < 0) goto error; } down_write(&key->sem); ret = key->type->update(key, &prep); if (ret == 0) /* updating a negative key instantiates it */ clear_bit(KEY_FLAG_NEGATIVE, &key->flags); up_write(&key->sem); error: if (key->type->preparse) key->type->free_preparse(&prep); return ret; } EXPORT_SYMBOL(key_update); /** * key_revoke - Revoke a key. * @key: The key to be revoked. * * Mark a key as being revoked and ask the type to free up its resources. The * revocation timeout is set and the key and all its links will be * automatically garbage collected after key_gc_delay amount of time if they * are not manually dealt with first. */ void key_revoke(struct key *key) { struct timespec now; time_t time; key_check(key); /* make sure no one's trying to change or use the key when we mark it * - we tell lockdep that we might nest because we might be revoking an * authorisation key whilst holding the sem on a key we've just * instantiated */ down_write_nested(&key->sem, 1); if (!test_and_set_bit(KEY_FLAG_REVOKED, &key->flags) && key->type->revoke) key->type->revoke(key); /* set the death time to no more than the expiry time */ now = current_kernel_time(); time = now.tv_sec; if (key->revoked_at == 0 || key->revoked_at > time) { key->revoked_at = time; key_schedule_gc(key->revoked_at + key_gc_delay); } up_write(&key->sem); } EXPORT_SYMBOL(key_revoke); /** * key_invalidate - Invalidate a key. * @key: The key to be invalidated. * * Mark a key as being invalidated and have it cleaned up immediately. The key * is ignored by all searches and other operations from this point. */ void key_invalidate(struct key *key) { kenter("%d", key_serial(key)); key_check(key); if (!test_bit(KEY_FLAG_INVALIDATED, &key->flags)) { down_write_nested(&key->sem, 1); if (!test_and_set_bit(KEY_FLAG_INVALIDATED, &key->flags)) key_schedule_gc_links(); up_write(&key->sem); } } EXPORT_SYMBOL(key_invalidate); /** * generic_key_instantiate - Simple instantiation of a key from preparsed data * @key: The key to be instantiated * @prep: The preparsed data to load. * * Instantiate a key from preparsed data. We assume we can just copy the data * in directly and clear the old pointers. * * This can be pointed to directly by the key type instantiate op pointer. */ int generic_key_instantiate(struct key *key, struct key_preparsed_payload *prep) { int ret; pr_devel("==>%s()\n", __func__); ret = key_payload_reserve(key, prep->quotalen); if (ret == 0) { key->type_data.p[0] = prep->type_data[0]; key->type_data.p[1] = prep->type_data[1]; rcu_assign_keypointer(key, prep->payload[0]); key->payload.data2[1] = prep->payload[1]; prep->type_data[0] = NULL; prep->type_data[1] = NULL; prep->payload[0] = NULL; prep->payload[1] = NULL; } pr_devel("<==%s() = %d\n", __func__, ret); return ret; } EXPORT_SYMBOL(generic_key_instantiate); /** * register_key_type - Register a type of key. * @ktype: The new key type. * * Register a new key type. * * Returns 0 on success or -EEXIST if a type of this name already exists. */ int register_key_type(struct key_type *ktype) { struct key_type *p; int ret; memset(&ktype->lock_class, 0, sizeof(ktype->lock_class)); ret = -EEXIST; down_write(&key_types_sem); /* disallow key types with the same name */ list_for_each_entry(p, &key_types_list, link) { if (strcmp(p->name, ktype->name) == 0) goto out; } /* store the type */ list_add(&ktype->link, &key_types_list); pr_notice("Key type %s registered\n", ktype->name); ret = 0; out: up_write(&key_types_sem); return ret; } EXPORT_SYMBOL(register_key_type); /** * unregister_key_type - Unregister a type of key. * @ktype: The key type. * * Unregister a key type and mark all the extant keys of this type as dead. * Those keys of this type are then destroyed to get rid of their payloads and * they and their links will be garbage collected as soon as possible. */ void unregister_key_type(struct key_type *ktype) { down_write(&key_types_sem); list_del_init(&ktype->link); downgrade_write(&key_types_sem); key_gc_keytype(ktype); pr_notice("Key type %s unregistered\n", ktype->name); up_read(&key_types_sem); } EXPORT_SYMBOL(unregister_key_type); /* * Initialise the key management state. */ void __init key_init(void) { /* allocate a slab in which we can store keys */ key_jar = kmem_cache_create("key_jar", sizeof(struct key), 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); /* add the special key types */ list_add_tail(&key_type_keyring.link, &key_types_list); list_add_tail(&key_type_dead.link, &key_types_list); list_add_tail(&key_type_user.link, &key_types_list); list_add_tail(&key_type_logon.link, &key_types_list); /* record the root user tracking */ rb_link_node(&root_key_user.node, NULL, &key_user_tree.rb_node); rb_insert_color(&root_key_user.node, &key_user_tree); }
key_ref_t key_create_or_update(key_ref_t keyring_ref, const char *type, const char *description, const void *payload, size_t plen, key_perm_t perm, unsigned long flags) { struct keyring_index_key index_key = { .description = description, }; struct key_preparsed_payload prep; struct assoc_array_edit *edit; const struct cred *cred = current_cred(); struct key *keyring, *key = NULL; key_ref_t key_ref; int ret; /* look up the key type to see if it's one of the registered kernel * types */ index_key.type = key_type_lookup(type); if (IS_ERR(index_key.type)) { key_ref = ERR_PTR(-ENODEV); goto error; } key_ref = ERR_PTR(-EINVAL); if (!index_key.type->match || !index_key.type->instantiate || (!index_key.description && !index_key.type->preparse)) goto error_put_type; keyring = key_ref_to_ptr(keyring_ref); key_check(keyring); key_ref = ERR_PTR(-ENOTDIR); if (keyring->type != &key_type_keyring) goto error_put_type; memset(&prep, 0, sizeof(prep)); prep.data = payload; prep.datalen = plen; prep.quotalen = index_key.type->def_datalen; prep.trusted = flags & KEY_ALLOC_TRUSTED; prep.expiry = TIME_T_MAX; if (index_key.type->preparse) { ret = index_key.type->preparse(&prep); if (ret < 0) { key_ref = ERR_PTR(ret); goto error_free_prep; } if (!index_key.description) index_key.description = prep.description; key_ref = ERR_PTR(-EINVAL); if (!index_key.description) goto error_free_prep; } index_key.desc_len = strlen(index_key.description); key_ref = ERR_PTR(-EPERM); if (!prep.trusted && test_bit(KEY_FLAG_TRUSTED_ONLY, &keyring->flags)) goto error_free_prep; flags |= prep.trusted ? KEY_ALLOC_TRUSTED : 0; ret = __key_link_begin(keyring, &index_key, &edit); if (ret < 0) { key_ref = ERR_PTR(ret); goto error_free_prep; } /* if we're going to allocate a new key, we're going to have * to modify the keyring */ ret = key_permission(keyring_ref, KEY_NEED_WRITE); if (ret < 0) { key_ref = ERR_PTR(ret); goto error_link_end; } /* if it's possible to update this type of key, search for an existing * key of the same type and description in the destination keyring and * update that instead if possible */ if (index_key.type->update) { key_ref = find_key_to_update(keyring_ref, &index_key); if (key_ref) goto found_matching_key; } /* if the client doesn't provide, decide on the permissions we want */ if (perm == KEY_PERM_UNDEF) { perm = KEY_POS_VIEW | KEY_POS_SEARCH | KEY_POS_LINK | KEY_POS_SETATTR; perm |= KEY_USR_VIEW; if (index_key.type->read) perm |= KEY_POS_READ; if (index_key.type == &key_type_keyring || index_key.type->update) perm |= KEY_POS_WRITE; } /* allocate a new key */ key = key_alloc(index_key.type, index_key.description, cred->fsuid, cred->fsgid, cred, perm, flags); if (IS_ERR(key)) { key_ref = ERR_CAST(key); goto error_link_end; } /* instantiate it and link it into the target keyring */ ret = __key_instantiate_and_link(key, &prep, keyring, NULL, &edit); if (ret < 0) { key_put(key); key_ref = ERR_PTR(ret); goto error_link_end; } key_ref = make_key_ref(key, is_key_possessed(keyring_ref)); error_link_end: __key_link_end(keyring, &index_key, edit); error_free_prep: if (index_key.type->preparse) index_key.type->free_preparse(&prep); error_put_type: key_type_put(index_key.type); error: return key_ref; found_matching_key: /* we found a matching key, so we're going to try to update it * - we can drop the locks first as we have the key pinned */ __key_link_end(keyring, &index_key, edit); key_ref = __key_update(key_ref, &prep); goto error_free_prep; }
key_ref_t key_create_or_update(key_ref_t keyring_ref, const char *type, const char *description, const void *payload, size_t plen, key_perm_t perm, unsigned long flags) { struct keyring_index_key index_key = { .description = description, }; struct key_preparsed_payload prep; struct assoc_array_edit *edit; const struct cred *cred = current_cred(); struct key *keyring, *key = NULL; key_ref_t key_ref; int ret; /* look up the key type to see if it's one of the registered kernel * types */ index_key.type = key_type_lookup(type); if (IS_ERR(index_key.type)) { key_ref = ERR_PTR(-ENODEV); goto error; } key_ref = ERR_PTR(-EINVAL); if (!index_key.type->instantiate || (!index_key.description && !index_key.type->preparse)) goto error_put_type; keyring = key_ref_to_ptr(keyring_ref); key_check(keyring); key_ref = ERR_PTR(-ENOTDIR); if (keyring->type != &key_type_keyring) goto error_put_type; memset(&prep, 0, sizeof(prep)); prep.data = payload; prep.datalen = plen; prep.quotalen = index_key.type->def_datalen; prep.trusted = flags & KEY_ALLOC_TRUSTED; prep.expiry = TIME_T_MAX; if (index_key.type->preparse) { ret = index_key.type->preparse(&prep); if (ret < 0) { key_ref = ERR_PTR(ret); goto error_free_prep; } if (!index_key.description) index_key.description = prep.description; key_ref = ERR_PTR(-EINVAL); if (!index_key.description) goto error_free_prep; } index_key.desc_len = strlen(index_key.description); key_ref = ERR_PTR(-EPERM); if (!prep.trusted && test_bit(KEY_FLAG_TRUSTED_ONLY, &keyring->flags)) goto error_free_prep; flags |= prep.trusted ? KEY_ALLOC_TRUSTED : 0; ret = __key_link_begin(keyring, &index_key, &edit); if (ret < 0) { key_ref = ERR_PTR(ret); goto error_free_prep; } /* if we're going to allocate a new key, we're going to have * to modify the keyring */ ret = key_permission(keyring_ref, KEY_NEED_WRITE); if (ret < 0) { key_ref = ERR_PTR(ret); goto error_link_end; } /* if it's possible to update this type of key, search for an existing * key of the same type and description in the destination keyring and * update that instead if possible */ if (index_key.type->update) { key_ref = find_key_to_update(keyring_ref, &index_key); if (key_ref) goto found_matching_key; } /* if the client doesn't provide, decide on the permissions we want */ if (perm == KEY_PERM_UNDEF) { perm = KEY_POS_VIEW | KEY_POS_SEARCH | KEY_POS_LINK | KEY_POS_SETATTR; perm |= KEY_USR_VIEW; if (index_key.type->read) perm |= KEY_POS_READ; if (index_key.type == &key_type_keyring || index_key.type->update) perm |= KEY_POS_WRITE; } /* allocate a new key */ key = key_alloc(index_key.type, index_key.description, cred->fsuid, cred->fsgid, cred, perm, flags); if (IS_ERR(key)) { key_ref = ERR_CAST(key); goto error_link_end; } /* instantiate it and link it into the target keyring */ ret = __key_instantiate_and_link(key, &prep, keyring, NULL, &edit); if (ret < 0) { key_put(key); key_ref = ERR_PTR(ret); goto error_link_end; } key_ref = make_key_ref(key, is_key_possessed(keyring_ref)); error_link_end: __key_link_end(keyring, &index_key, edit); error_free_prep: if (index_key.type->preparse) index_key.type->free_preparse(&prep); error_put_type: key_type_put(index_key.type); error: return key_ref; found_matching_key: /* we found a matching key, so we're going to try to update it * - we can drop the locks first as we have the key pinned */ __key_link_end(keyring, &index_key, edit); key_ref = __key_update(key_ref, &prep); goto error_free_prep; }
{'added': [(802, '\tif (!index_key.type->instantiate ||')], 'deleted': [(802, '\tif (!index_key.type->match || !index_key.type->instantiate ||')]}
1
1
680
4,142
https://github.com/torvalds/linux
CVE-2017-2647
['CWE-476']
am335x_eth_driver.c
am335xEthAddVlanAddrEntry
/** * @file am335x_eth_driver.c * @brief Sitara AM335x Gigabit Ethernet MAC driver * * @section License * * SPDX-License-Identifier: GPL-2.0-or-later * * Copyright (C) 2010-2020 Oryx Embedded SARL. All rights reserved. * * This file is part of CycloneTCP Open. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * @author Oryx Embedded SARL (www.oryx-embedded.com) * @version 2.0.0 **/ //Switch to the appropriate trace level #define TRACE_LEVEL NIC_TRACE_LEVEL //Dependencies #include "soc_am335x.h" #include "hw_types.h" #include "hw_cm_per.h" #include "hw_control_am335x.h" #include "hw_cpsw_ale.h" #include "hw_cpsw_cpdma.h" #include "hw_cpsw_port.h" #include "hw_cpsw_sl.h" #include "hw_cpsw_ss.h" #include "hw_cpsw_wr.h" #include "hw_mdio.h" #include "interrupt.h" #include "core/net.h" #include "drivers/mac/am335x_eth_driver.h" #include "debug.h" //MDIO input clock frequency #define MDIO_INPUT_CLK 125000000 //MDIO output clock frequency #define MDIO_OUTPUT_CLK 1000000 //Underlying network interface (port 1) static NetInterface *nicDriverInterface1 = NULL; //Underlying network interface (port 2) static NetInterface *nicDriverInterface2 = NULL; //IAR EWARM compiler? #if defined(__ICCARM__) //Transmit buffer (port 1) #pragma data_alignment = 4 #pragma location = AM335X_ETH_RAM_SECTION static uint8_t txBuffer1[AM335X_ETH_TX_BUFFER_COUNT][AM335X_ETH_TX_BUFFER_SIZE]; //Transmit buffer (port 2) #pragma data_alignment = 4 #pragma location = AM335X_ETH_RAM_SECTION static uint8_t txBuffer2[AM335X_ETH_TX_BUFFER_COUNT][AM335X_ETH_TX_BUFFER_SIZE]; //Receive buffer #pragma data_alignment = 4 #pragma location = AM335X_ETH_RAM_SECTION static uint8_t rxBuffer[AM335X_ETH_RX_BUFFER_COUNT][AM335X_ETH_RX_BUFFER_SIZE]; //Transmit buffer descriptors (port 1) #pragma data_alignment = 4 #pragma location = AM335X_ETH_RAM_CPPI_SECTION static Am335xTxBufferDesc txBufferDesc1[AM335X_ETH_TX_BUFFER_COUNT]; //Transmit buffer descriptors (port 2) #pragma data_alignment = 4 #pragma location = AM335X_ETH_RAM_CPPI_SECTION static Am335xTxBufferDesc txBufferDesc2[AM335X_ETH_TX_BUFFER_COUNT]; //Receive buffer descriptors #pragma data_alignment = 4 #pragma location = AM335X_ETH_RAM_CPPI_SECTION static Am335xRxBufferDesc rxBufferDesc[AM335X_ETH_RX_BUFFER_COUNT]; //Keil MDK-ARM or GCC compiler? #else //Transmit buffer (port 1) static uint8_t txBuffer1[AM335X_ETH_TX_BUFFER_COUNT][AM335X_ETH_TX_BUFFER_SIZE] __attribute__((aligned(4), __section__(AM335X_ETH_RAM_SECTION))); //Transmit buffer (port 2) static uint8_t txBuffer2[AM335X_ETH_TX_BUFFER_COUNT][AM335X_ETH_TX_BUFFER_SIZE] __attribute__((aligned(4), __section__(AM335X_ETH_RAM_SECTION))); //Receive buffer static uint8_t rxBuffer[AM335X_ETH_RX_BUFFER_COUNT][AM335X_ETH_RX_BUFFER_SIZE] __attribute__((aligned(4), __section__(AM335X_ETH_RAM_SECTION))); //Transmit buffer descriptors (port 1) static Am335xTxBufferDesc txBufferDesc1[AM335X_ETH_TX_BUFFER_COUNT] __attribute__((aligned(4), __section__(AM335X_ETH_RAM_CPPI_SECTION))); //Transmit buffer descriptors (port 2) static Am335xTxBufferDesc txBufferDesc2[AM335X_ETH_TX_BUFFER_COUNT] __attribute__((aligned(4), __section__(AM335X_ETH_RAM_CPPI_SECTION))); //Receive buffer descriptors static Am335xRxBufferDesc rxBufferDesc[AM335X_ETH_RX_BUFFER_COUNT] __attribute__((aligned(4), __section__(AM335X_ETH_RAM_CPPI_SECTION))); #endif //Pointer to the current TX buffer descriptor (port1) static Am335xTxBufferDesc *txCurBufferDesc1; //Pointer to the current TX buffer descriptor (port 2) static Am335xTxBufferDesc *txCurBufferDesc2; //Pointer to the current RX buffer descriptor static Am335xRxBufferDesc *rxCurBufferDesc; /** * @brief AM335x Ethernet MAC driver (port1) **/ const NicDriver am335xEthPort1Driver = { NIC_TYPE_ETHERNET, ETH_MTU, am335xEthInitPort1, am335xEthTick, am335xEthEnableIrq, am335xEthDisableIrq, am335xEthEventHandler, am335xEthSendPacketPort1, am335xEthUpdateMacAddrFilter, am335xEthUpdateMacConfig, am335xEthWritePhyReg, am335xEthReadPhyReg, FALSE, TRUE, TRUE, FALSE }; /** * @brief AM335x Ethernet MAC driver (port2) **/ const NicDriver am335xEthPort2Driver = { NIC_TYPE_ETHERNET, ETH_MTU, am335xEthInitPort2, am335xEthTick, am335xEthEnableIrq, am335xEthDisableIrq, am335xEthEventHandler, am335xEthSendPacketPort2, am335xEthUpdateMacAddrFilter, am335xEthUpdateMacConfig, am335xEthWritePhyReg, am335xEthReadPhyReg, FALSE, TRUE, TRUE, FALSE }; /** * @brief AM335x Ethernet MAC initialization (port 1) * @param[in] interface Underlying network interface * @return Error code **/ error_t am335xEthInitPort1(NetInterface *interface) { error_t error; uint32_t temp; //Debug message TRACE_INFO("Initializing AM335x Ethernet MAC (port 1)...\r\n"); //Initialize CPSW instance am335xEthInitInstance(interface); //Save underlying network interface nicDriverInterface1 = interface; //Valid Ethernet PHY or switch driver? if(interface->phyDriver != NULL) { //Ethernet PHY initialization error = interface->phyDriver->init(interface); } else if(interface->switchDriver != NULL) { //Ethernet switch initialization error = interface->switchDriver->init(interface); } else { //The interface is not properly configured error = ERROR_FAILURE; } //Any error to report? if(error) { return error; } //Unspecifield MAC address? if(macCompAddr(&interface->macAddr, &MAC_UNSPECIFIED_ADDR)) { //Use the factory preprogrammed MAC address interface->macAddr.b[0] = CONTROL_MAC_ID_HI_R(0) >> CONTROL_MAC_ID0_HI_MACADDR_47_40_SHIFT; interface->macAddr.b[1] = CONTROL_MAC_ID_HI_R(0) >> CONTROL_MAC_ID0_HI_MACADDR_39_32_SHIFT; interface->macAddr.b[2] = CONTROL_MAC_ID_HI_R(0) >> CONTROL_MAC_ID0_HI_MACADDR_31_24_SHIFT; interface->macAddr.b[3] = CONTROL_MAC_ID_HI_R(0) >> CONTROL_MAC_ID0_HI_MACADDR_23_16_SHIFT; interface->macAddr.b[4] = CONTROL_MAC_ID_LO_R(0) >> CONTROL_MAC_ID0_LO_MACADDR_15_8_SHIFT; interface->macAddr.b[5] = CONTROL_MAC_ID_LO_R(0) >> CONTROL_MAC_ID0_LO_MACADDR_7_0_SHIFT; //Generate the 64-bit interface identifier macAddrToEui64(&interface->macAddr, &interface->eui64); } //Set port state to forward temp = CPSW_ALE_PORTCTL_R(1) & ~CPSW_ALE_PORTCTL1_PORT_STATE; CPSW_ALE_PORTCTL_R(1) = temp | CPSW_ALE_PORTCTL_PORT_STATE_FORWARD; //Set the MAC address of the station CPSW_PORT1_SA_HI_R = interface->macAddr.w[0] | (interface->macAddr.w[1] << 16); CPSW_PORT1_SA_LO_R = interface->macAddr.w[2]; //Configure VLAN identifier and VLAN priority CPSW_PORT1_PORT_VLAN_R = (0 << CPSW_PORT_P1_PORT_VLAN_PORT_PRI_SHIFT) | (CPSW_PORT1 << CPSW_PORT_P1_PORT_VLAN_PORT_VID_SHIFT); //Add a VLAN entry in the ALE table am335xEthAddVlanEntry(CPSW_PORT1, CPSW_PORT1); //Add a VLAN/unicast address entry in the ALE table am335xEthAddVlanAddrEntry(CPSW_PORT1, CPSW_PORT1, &interface->macAddr); //Enable CPSW statistics CPSW_SS_STAT_PORT_EN_R |= CPSW_SS_STAT_PORT_EN_P1_STAT_EN; //Enable TX and RX CPSW_SL1_MACCONTROL_R = CPSW_SL_MACCONTROL_GMII_EN; //Accept any packets from the upper layer osSetEvent(&interface->nicTxEvent); //Successful initialization return NO_ERROR; } /** * @brief AM335x Ethernet MAC initialization (port 2) * @param[in] interface Underlying network interface * @return Error code **/ error_t am335xEthInitPort2(NetInterface *interface) { error_t error; uint32_t temp; //Debug message TRACE_INFO("Initializing AM335x Ethernet MAC (port 2)...\r\n"); //Initialize CPSW instance am335xEthInitInstance(interface); //Save underlying network interface nicDriverInterface2 = interface; //PHY transceiver initialization error = interface->phyDriver->init(interface); //Failed to initialize PHY transceiver? if(error) { return error; } //Unspecifield MAC address? if(macCompAddr(&interface->macAddr, &MAC_UNSPECIFIED_ADDR)) { //Use the factory preprogrammed MAC address interface->macAddr.b[0] = CONTROL_MAC_ID_HI_R(1) >> CONTROL_MAC_ID1_HI_MACADDR_47_40_SHIFT; interface->macAddr.b[1] = CONTROL_MAC_ID_HI_R(1) >> CONTROL_MAC_ID1_HI_MACADDR_39_32_SHIFT; interface->macAddr.b[2] = CONTROL_MAC_ID_HI_R(1) >> CONTROL_MAC_ID1_HI_MACADDR_31_24_SHIFT; interface->macAddr.b[3] = CONTROL_MAC_ID_HI_R(1) >> CONTROL_MAC_ID1_HI_MACADDR_23_16_SHIFT; interface->macAddr.b[4] = CONTROL_MAC_ID_LO_R(1) >> CONTROL_MAC_ID1_LO_MACADDR_15_8_SHIFT; interface->macAddr.b[5] = CONTROL_MAC_ID_LO_R(1) >> CONTROL_MAC_ID1_LO_MACADDR_7_0_SHIFT; //Generate the 64-bit interface identifier macAddrToEui64(&interface->macAddr, &interface->eui64); } //Set port state to forward temp = CPSW_ALE_PORTCTL_R(2) & ~CPSW_ALE_PORTCTL2_PORT_STATE; CPSW_ALE_PORTCTL_R(2) = temp | CPSW_ALE_PORTCTL_PORT_STATE_FORWARD; //Set the MAC address of the station CPSW_PORT2_SA_HI_R = interface->macAddr.w[0] | (interface->macAddr.w[1] << 16); CPSW_PORT2_SA_LO_R = interface->macAddr.w[2]; //Configure VLAN identifier and VLAN priority CPSW_PORT2_PORT_VLAN_R = (0 << CPSW_PORT_P2_PORT_VLAN_PORT_PRI_SHIFT) | (CPSW_PORT2 << CPSW_PORT_P2_PORT_VLAN_PORT_VID_SHIFT); //Add a VLAN entry in the ALE table am335xEthAddVlanEntry(CPSW_PORT2, CPSW_PORT2); //Add a VLAN/unicast address entry in the ALE table am335xEthAddVlanAddrEntry(CPSW_PORT2, CPSW_PORT2, &interface->macAddr); //Enable CPSW statistics CPSW_SS_STAT_PORT_EN_R |= CPSW_SS_STAT_PORT_EN_P2_STAT_EN; //Enable TX and RX CPSW_SL2_MACCONTROL_R = CPSW_SL_MACCONTROL_GMII_EN; //Accept any packets from the upper layer osSetEvent(&interface->nicTxEvent); //Successful initialization return NO_ERROR; } /** * @brief Initialize CPSW instance * @param[in] interface Underlying network interface **/ void am335xEthInitInstance(NetInterface *interface) { uint_t i; uint32_t temp; #ifdef ti_sysbios_BIOS___VERS Hwi_Params hwiParams; #endif //Initialization sequence is performed once if(nicDriverInterface1 == NULL && nicDriverInterface2 == NULL) { //Select the interface mode (MII/RMII/RGMII) and configure pin muxing am335xEthInitGpio(interface); //Enable the CPSW subsystem clocks CM_PER_CPGMAC0_CLKCTRL_R = CM_PER_CPGMAC0_CLKCTRL_MODULEMODE_ENABLE; //Wait for the CPSW module to be fully functional do { //Get module idle status temp = (CM_PER_CPGMAC0_CLKCTRL_R & CM_PER_CPGMAC0_CLKCTRL_IDLEST) >> CM_PER_CPGMAC0_CLKCTRL_IDLEST_SHIFT; //Keep looping as long as the module is not fully functional } while(temp != CM_PER_CPGMAC0_CLKCTRL_IDLEST_FUNC); //Start a software forced wake-up transition CM_PER_CPSW_CLKSTCTRL_R = CM_PER_CPSW_CLKSTCTRL_CLKTRCTRL_SW_WKUP; //Wait for the CPSW 125 MHz OCP clock to be active do { //Get the state of the CPSW 125 MHz OCP clock temp = (CM_PER_CPSW_CLKSTCTRL_R & CM_PER_CPSW_CLKSTCTRL_CLKACTIVITY_CPSW_125MHZ_GCLK) >> CM_PER_CPSW_CLKSTCTRL_CLKACTIVITY_CPSW_125MHZ_GCLK_SHIFT; //Keep looping as long as the clock is inactive } while(temp != CM_PER_CPSW_CLKSTCTRL_CLKACTIVITY_CPSW_125MHZ_GCLK_ACT); //Reset CPSW subsystem CPSW_SS_SOFT_RESET_R = CPSW_SS_SOFT_RESET_SOFT_RESET; //Wait for the reset to complete while((CPSW_SS_SOFT_RESET_R & CPSW_SS_SOFT_RESET_SOFT_RESET) != 0) { } //Reset CPSW wrapper module CPSW_WR_SOFT_RESET_R = CPSW_WR_SOFT_RESET_SOFT_RESET; //Wait for the reset to complete while((CPSW_WR_SOFT_RESET_R & CPSW_WR_SOFT_RESET_SOFT_RESET) != 0) { } //Reset CPSW sliver 1 logic CPSW_SL1_SOFT_RESET_R = CPSW_SL_SOFT_RESET_SOFT_RESET; //Wait for the reset to complete while((CPSW_SL1_SOFT_RESET_R & CPSW_SL_SOFT_RESET_SOFT_RESET) != 0) { } //Reset CPSW sliver 2 logic CPSW_SL2_SOFT_RESET_R = CPSW_SL_SOFT_RESET_SOFT_RESET; //Wait for the reset to complete while((CPSW_SL2_SOFT_RESET_R & CPSW_SL_SOFT_RESET_SOFT_RESET) != 0) { } //Reset CPSW CPDMA module CPSW_CPDMA_CPDMA_SOFT_RESET_R = CPSW_CPDMA_CPDMA_SOFT_RESET_SOFT_RESET; //Wait for the reset to complete while((CPSW_CPDMA_CPDMA_SOFT_RESET_R & CPSW_CPDMA_CPDMA_SOFT_RESET_SOFT_RESET) != 0) { } //Initialize the HDPs and the CPs to NULL for(i = CPSW_CH0; i <= CPSW_CH7; i++) { //TX head descriptor pointer CPSW_CPDMA_TX_HDP_R(i) = 0; //TX completion pointer CPSW_CPDMA_TX_CP_R(i) = 0; //RX head descriptor pointer CPSW_CPDMA_RX_HDP_R(i) = 0; //RX completion pointer CPSW_CPDMA_RX_CP_R(i) = 0; } //Enable ALE and clear ALE address table CPSW_ALE_CONTROL_R = CPSW_ALE_CONTROL_ENABLE_ALE | CPSW_ALE_CONTROL_CLEAR_TABLE; //For dual MAC mode, configure VLAN aware mode CPSW_ALE_CONTROL_R |= CPSW_ALE_CONTROL_ALE_VLAN_AWARE; //Set dual MAC mode for port 0 temp = CPSW_PORT0_TX_IN_CTL_R & ~CPSW_PORT_P0_TX_IN_CTL_TX_IN_SEL; CPSW_PORT0_TX_IN_CTL_R = temp | CPSW_PORT_P0_TX_IN_CTL_TX_IN_DUAL_MAC; //Set port 0 state to forward temp = CPSW_ALE_PORTCTL_R(0) & ~CPSW_ALE_PORTCTL0_PORT_STATE; CPSW_ALE_PORTCTL_R(0) = temp | CPSW_ALE_PORTCTL_PORT_STATE_FORWARD; //Enable CPSW statistics CPSW_SS_STAT_PORT_EN_R = CPSW_SS_STAT_PORT_EN_P0_STAT_EN; //Configure TX and RX buffer descriptors am335xEthInitBufferDesc(interface); //Acknowledge TX and interrupts for proper interrupt pulsing CPSW_CPDMA_CPDMA_EOI_VECTOR_R = CPSW_CPDMA_EOI_VECTOR_TX_PULSE; CPSW_CPDMA_CPDMA_EOI_VECTOR_R = CPSW_CPDMA_EOI_VECTOR_RX_PULSE; //Enable channel 1 and 2 interrupts of the DMA engine CPSW_CPDMA_TX_INTMASK_SET_R = (1 << CPSW_CH1) | (1 << CPSW_CH2); //Enable TX completion interrupts CPSW_WR_C_TX_EN_R(CPSW_CORE0) |= (1 << CPSW_CH1) | (1 << CPSW_CH2); //Enable channel 0 interrupts of the DMA engine CPSW_CPDMA_RX_INTMASK_SET_R = (1 << CPSW_CH0); //Enable RX completion interrupts CPSW_WR_C_RX_EN_R(CPSW_CORE0) |= (1 << CPSW_CH0); #ifdef ti_sysbios_BIOS___VERS //Configure TX interrupt Hwi_Params_init(&hwiParams); hwiParams.enableInt = FALSE; hwiParams.priority = AM335X_ETH_IRQ_PRIORITY; //Register TX interrupt handler Hwi_create(SYS_INT_3PGSWTXINT0, (Hwi_FuncPtr) am335xEthTxIrqHandler, &hwiParams, NULL); //Configure RX interrupt Hwi_Params_init(&hwiParams); hwiParams.enableInt = FALSE; hwiParams.priority = AM335X_ETH_IRQ_PRIORITY; //Register RX interrupt handler Hwi_create(SYS_INT_3PGSWRXINT0, (Hwi_FuncPtr) am335xEthRxIrqHandler, &hwiParams, NULL); #else //Register interrupt handlers IntRegister(SYS_INT_3PGSWTXINT0, am335xEthTxIrqHandler); IntRegister(SYS_INT_3PGSWRXINT0, am335xEthRxIrqHandler); //Configure TX interrupt priority IntPrioritySet(SYS_INT_3PGSWTXINT0, AM335X_ETH_IRQ_PRIORITY, AINTC_HOSTINT_ROUTE_IRQ); //Configure RX interrupt priority IntPrioritySet(SYS_INT_3PGSWRXINT0, AM335X_ETH_IRQ_PRIORITY, AINTC_HOSTINT_ROUTE_IRQ); #endif //Enable the transmission and reception CPSW_CPDMA_TX_CONTROL_R = CPSW_CPDMA_TX_CONTROL_TX_EN; CPSW_CPDMA_RX_CONTROL_R = CPSW_CPDMA_RX_CONTROL_RX_EN; //Calculate the MDC clock divider to be used temp = (MDIO_INPUT_CLK / MDIO_OUTPUT_CLK) - 1; //Initialize MDIO interface MDIO_CONTROL_R = MDIO_CONTROL_ENABLE | MDIO_CONTROL_FAULTENB | (temp & MDIO_CONTROL_CLKDIV); } } //BeagleBone Black, TMDSSK3358, OSD3358-SM-RED or SBC DIVA board? #if defined(USE_BEAGLEBONE_BLACK) || defined(USE_TMDSSK3358) || \ defined(USE_OSD3358_SM_RED) || defined(USE_SBC_DIVA) /** * @brief GPIO configuration * @param[in] interface Underlying network interface **/ void am335xEthInitGpio(NetInterface *interface) { //BeagleBone Black board? #if defined(USE_BEAGLEBONE_BLACK) //Select MII interface mode for port 1 CONTROL_GMII_SEL_R = CONTROL_GMII_SEL_GMII1_SEL_MII; //Configure MII1_TX_CLK (GPIO3_9) CONTROL_CONF_MII1_TXCLK_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(0); //Configure MII1_TX_EN (GPIO3_3) CONTROL_CONF_MII1_TXEN_R = CONTROL_CONF_MUXMODE(0); //Configure MII1_TXD0 (GPIO0_28) CONTROL_CONF_MII1_TXD0_R = CONTROL_CONF_MUXMODE(0); //Configure MII1_TXD1 (GPIO0_21) CONTROL_CONF_MII1_TXD1_R = CONTROL_CONF_MUXMODE(0); //Configure MII1_TXD2 (GPIO0_17) CONTROL_CONF_MII1_TXD2_R = CONTROL_CONF_MUXMODE(0); //Configure MII1_TXD3 (GPIO0_16) CONTROL_CONF_MII1_TXD3_R = CONTROL_CONF_MUXMODE(0); //Configure MII1_RX_CLK (GPIO3_10) CONTROL_CONF_MII1_RXCLK_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(0); //Configure MII1_RXD0 (GPIO2_21) CONTROL_CONF_MII1_RXD0_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(0); //Configure MII1_RXD1 (GPIO2_20) CONTROL_CONF_MII1_RXD1_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(0); //Configure MII1_RXD2 (GPIO2_19) CONTROL_CONF_MII1_RXD2_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(0); //Configure MII1_RXD3 (GPIO2_18) CONTROL_CONF_MII1_RXD3_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(0); //Configure MII1_COL (GPIO3_0) CONTROL_CONF_MII1_COL_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(0); //Configure MII1_CRS (GPIO3_1) CONTROL_CONF_MII1_CRS_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(0); //Configure MII1_RX_ER (GPIO3_2) CONTROL_CONF_MII1_RXERR_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(0); //Configure MII1_RX_DV (GPIO3_4) CONTROL_CONF_MII1_RXDV_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(0); //Configure MDIO (GPIO0_0) CONTROL_CONF_MDIO_DATA_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_PULLUPSEL | CONTROL_CONF_MUXMODE(0); //Configure MDC (GPIO0_1) CONTROL_CONF_MDIO_CLK_R = CONTROL_CONF_PULLUPSEL | CONTROL_CONF_MUXMODE(0); //TMDSSK3358 board? #elif defined(USE_TMDSSK3358) //Select RGMII interface mode for both port 1 and port 2 CONTROL_GMII_SEL_R = CONTROL_GMII_SEL_GMII1_SEL_RGMII | CONTROL_GMII_SEL_GMII2_SEL_RGMII; //Configure RGMII1_TCLK (GPIO3_9) CONTROL_CONF_MII1_TXCLK_R = CONTROL_CONF_MUXMODE(2); //Configure RGMII1_TCTL (GPIO3_3) CONTROL_CONF_MII1_TXEN_R = CONTROL_CONF_MUXMODE(2); //Configure RGMII1_TD0 (GPIO0_28) CONTROL_CONF_MII1_TXD0_R = CONTROL_CONF_MUXMODE(2); //Configure RGMII1_TD1 (GPIO0_21) CONTROL_CONF_MII1_TXD1_R = CONTROL_CONF_MUXMODE(2); //Configure RGMII1_TD2 (GPIO0_17) CONTROL_CONF_MII1_TXD2_R = CONTROL_CONF_MUXMODE(2); //Configure RGMII1_TD3 (GPIO0_16) CONTROL_CONF_MII1_TXD3_R = CONTROL_CONF_MUXMODE(2); //Configure RGMII1_RCLK (GPIO3_10) CONTROL_CONF_MII1_RXCLK_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII1_RCTL (GPIO3_4) CONTROL_CONF_MII1_RXDV_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII1_RD0 (GPIO2_21) CONTROL_CONF_MII1_RXD0_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure /RGMII1_RD1 (GPIO2_20) CONTROL_CONF_MII1_RXD1_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII1_RD2 (GPIO2_19) CONTROL_CONF_MII1_RXD2_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII1_RD3 (GPIO2_18) CONTROL_CONF_MII1_RXD3_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII2_TCLK (GPIO1_22/GPMC_A6) CONTROL_CONF_GPMC_A_R(6) = CONTROL_CONF_MUXMODE(2); //Configure RGMII2_TCTL (GPIO1_16/GPMC_A0) CONTROL_CONF_GPMC_A_R(0) = CONTROL_CONF_MUXMODE(2); //Configure RGMII2_TD0 (GPIO1_21/GPMC_A5) CONTROL_CONF_GPMC_A_R(5) = CONTROL_CONF_MUXMODE(2); //Configure RGMII2_TD1 (GPIO1_20/GPMC_A4) CONTROL_CONF_GPMC_A_R(4) = CONTROL_CONF_MUXMODE(2); //Configure RGMII2_TD2 (GPIO1_19/GPMC_A3) CONTROL_CONF_GPMC_A_R(3) = CONTROL_CONF_MUXMODE(2); //Configure RGMII2_TD3 (GPIO1_18/GPMC_A2) CONTROL_CONF_GPMC_A_R(2) = CONTROL_CONF_MUXMODE(2); //Configure RGMII2_RCLK (GPIO1_23/GPMC_A7) CONTROL_CONF_GPMC_A_R(7) = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII2_RCTL (GPIO1_17/GPMC_A1) CONTROL_CONF_GPMC_A_R(1) = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII2_RD0 (GPIO1_27/GPMC_A11) CONTROL_CONF_GPMC_A_R(11) = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII2_RD1 (GPIO1_26/GPMC_A10) CONTROL_CONF_GPMC_A_R(10) = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII2_RD2 (GPIO1_25/GPMC_A9) CONTROL_CONF_GPMC_A_R(9) = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII2_RD3 (GPIO1_24/GPMC_A8) CONTROL_CONF_GPMC_A_R(8) = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure MDIO (GPIO0_0) CONTROL_CONF_MDIO_DATA_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_PULLUPSEL | CONTROL_CONF_MUXMODE(0); //Configure MDC (GPIO0_1) CONTROL_CONF_MDIO_CLK_R = CONTROL_CONF_PULLUPSEL | CONTROL_CONF_MUXMODE(0); //OSD3358-SM-RED board? #elif defined(USE_OSD3358_SM_RED) //Select RGMII interface mode for both port 1 CONTROL_GMII_SEL_R = CONTROL_GMII_SEL_GMII1_SEL_RGMII; //Configure RGMII1_TCLK (GPIO3_9) CONTROL_CONF_MII1_TXCLK_R = CONTROL_CONF_MUXMODE(2); //Configure RGMII1_TCTL (GPIO3_3) CONTROL_CONF_MII1_TXEN_R = CONTROL_CONF_MUXMODE(2); //Configure RGMII1_TD0 (GPIO0_28) CONTROL_CONF_MII1_TXD0_R = CONTROL_CONF_MUXMODE(2); //Configure RGMII1_TD1 (GPIO0_21) CONTROL_CONF_MII1_TXD1_R = CONTROL_CONF_MUXMODE(2); //Configure RGMII1_TD2 (GPIO0_17) CONTROL_CONF_MII1_TXD2_R = CONTROL_CONF_MUXMODE(2); //Configure RGMII1_TD3 (GPIO0_16) CONTROL_CONF_MII1_TXD3_R = CONTROL_CONF_MUXMODE(2); //Configure RGMII1_RCLK (GPIO3_10) CONTROL_CONF_MII1_RXCLK_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII1_RCTL (GPIO3_4) CONTROL_CONF_MII1_RXDV_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII1_RD0 (GPIO2_21) CONTROL_CONF_MII1_RXD0_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure /RGMII1_RD1 (GPIO2_20) CONTROL_CONF_MII1_RXD1_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII1_RD2 (GPIO2_19) CONTROL_CONF_MII1_RXD2_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII1_RD3 (GPIO2_18) CONTROL_CONF_MII1_RXD3_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure MDIO (GPIO0_0) CONTROL_CONF_MDIO_DATA_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_PULLUPSEL | CONTROL_CONF_MUXMODE(0); //Configure MDC (GPIO0_1) CONTROL_CONF_MDIO_CLK_R = CONTROL_CONF_PULLUPSEL | CONTROL_CONF_MUXMODE(0); //SBC DIVA board? #elif defined(USE_SBC_DIVA) //Select RMII interface mode for port 1 and RGMII interface mode for port 2 CONTROL_GMII_SEL_R = CONTROL_GMII_SEL_RMII1_IO_CLK_EN | CONTROL_GMII_SEL_GMII1_SEL_RMII | CONTROL_GMII_SEL_GMII2_SEL_RGMII; //Configure RMII1_REF_CLK (GPIO0_29) CONTROL_CONF_RMII1_REFCLK_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(0); //Configure RMII1_TX_EN (GPIO3_3) CONTROL_CONF_MII1_TXEN_R = CONTROL_CONF_MUXMODE(1); //Configure RMII1_TXD0 (GPIO0_28) CONTROL_CONF_MII1_TXD0_R = CONTROL_CONF_MUXMODE(1); //Configure RMII1_TXD1 (GPIO0_21) CONTROL_CONF_MII1_TXD1_R = CONTROL_CONF_MUXMODE(1); //Configure RMII1_RXD0 (GPIO2.21) CONTROL_CONF_MII1_RXD0_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(1); //Configure RMII1_RXD1 (GPIO2.20) CONTROL_CONF_MII1_RXD1_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(1); //Configure RMII1_CRS_DV (GPIO3_1) CONTROL_CONF_MII1_CRS_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(1); //Configure RMII1_RX_ER (GPIO3_2) CONTROL_CONF_MII1_RXERR_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(1); //Configure RGMII2_TCLK (GPIO1_22/GPMC_A6) CONTROL_CONF_GPMC_A_R(6) = CONTROL_CONF_MUXMODE(2); //Configure RGMII2_TCTL (GPIO1_16/GPMC_A0) CONTROL_CONF_GPMC_A_R(0) = CONTROL_CONF_MUXMODE(2); //Configure RGMII2_TD0 (GPIO1_21/GPMC_A5) CONTROL_CONF_GPMC_A_R(5) = CONTROL_CONF_MUXMODE(2); //Configure RGMII2_TD1 (GPIO1_20/GPMC_A4) CONTROL_CONF_GPMC_A_R(4) = CONTROL_CONF_MUXMODE(2); //Configure RGMII2_TD2 (GPIO1_19/GPMC_A3) CONTROL_CONF_GPMC_A_R(3) = CONTROL_CONF_MUXMODE(2); //Configure RGMII2_TD3 (GPIO1_18/GPMC_A2) CONTROL_CONF_GPMC_A_R(2) = CONTROL_CONF_MUXMODE(2); //Configure RGMII2_RCLK (GPIO1_23/GPMC_A7) CONTROL_CONF_GPMC_A_R(7) = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII2_RCTL (GPIO1_17/GPMC_A1) CONTROL_CONF_GPMC_A_R(1) = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII2_RD0 (GPIO1_27/GPMC_A11) CONTROL_CONF_GPMC_A_R(11) = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII2_RD1 (GPIO1_26/GPMC_A10) CONTROL_CONF_GPMC_A_R(10) = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII2_RD2 (GPIO1_25/GPMC_A9) CONTROL_CONF_GPMC_A_R(9) = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII2_RD3 (GPIO1_24/GPMC_A8) CONTROL_CONF_GPMC_A_R(8) = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure MDIO (GPIO0_0) CONTROL_CONF_MDIO_DATA_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_PULLUPSEL | CONTROL_CONF_MUXMODE(0); //Configure MDC (GPIO0_1) CONTROL_CONF_MDIO_CLK_R = CONTROL_CONF_PULLUPSEL | CONTROL_CONF_MUXMODE(0); #endif } #endif /** * @brief Initialize buffer descriptor lists * @param[in] interface Underlying network interface **/ void am335xEthInitBufferDesc(NetInterface *interface) { uint_t i; uint_t nextIndex; uint_t prevIndex; //Initialize TX buffer descriptor list (port 1) for(i = 0; i < AM335X_ETH_TX_BUFFER_COUNT; i++) { //Index of the next buffer nextIndex = (i + 1) % AM335X_ETH_TX_BUFFER_COUNT; //Index of the previous buffer prevIndex = (i + AM335X_ETH_TX_BUFFER_COUNT - 1) % AM335X_ETH_TX_BUFFER_COUNT; //Next descriptor pointer txBufferDesc1[i].word0 = (uint32_t) NULL; //Buffer pointer txBufferDesc1[i].word1 = (uint32_t) txBuffer1[i]; //Buffer offset and buffer length txBufferDesc1[i].word2 = 0; //Status flags and packet length txBufferDesc1[i].word3 = 0; //Form a doubly linked list txBufferDesc1[i].next = &txBufferDesc1[nextIndex]; txBufferDesc1[i].prev = &txBufferDesc1[prevIndex]; } //Point to the very first descriptor txCurBufferDesc1 = &txBufferDesc1[0]; //Mark the end of the queue txCurBufferDesc1->prev->word3 = CPSW_TX_WORD3_SOP | CPSW_TX_WORD3_EOP | CPSW_TX_WORD3_EOQ; //Initialize TX buffer descriptor list (port 2) for(i = 0; i < AM335X_ETH_TX_BUFFER_COUNT; i++) { //Index of the next buffer nextIndex = (i + 1) % AM335X_ETH_TX_BUFFER_COUNT; //Index of the previous buffer prevIndex = (i + AM335X_ETH_TX_BUFFER_COUNT - 1) % AM335X_ETH_TX_BUFFER_COUNT; //Next descriptor pointer txBufferDesc2[i].word0 = (uint32_t) NULL; //Buffer pointer txBufferDesc2[i].word1 = (uint32_t) txBuffer2[i]; //Buffer offset and buffer length txBufferDesc2[i].word2 = 0; //Status flags and packet length txBufferDesc2[i].word3 = 0; //Form a doubly linked list txBufferDesc2[i].next = &txBufferDesc2[nextIndex]; txBufferDesc2[i].prev = &txBufferDesc2[prevIndex]; } //Point to the very first descriptor txCurBufferDesc2 = &txBufferDesc2[0]; //Mark the end of the queue txCurBufferDesc2->prev->word3 = CPSW_TX_WORD3_SOP | CPSW_TX_WORD3_EOP | CPSW_TX_WORD3_EOQ; //Initialize RX buffer descriptor list for(i = 0; i < AM335X_ETH_RX_BUFFER_COUNT; i++) { //Index of the next buffer nextIndex = (i + 1) % AM335X_ETH_RX_BUFFER_COUNT; //Index of the previous buffer prevIndex = (i + AM335X_ETH_RX_BUFFER_COUNT - 1) % AM335X_ETH_RX_BUFFER_COUNT; //Next descriptor pointer rxBufferDesc[i].word0 = (uint32_t) &rxBufferDesc[nextIndex]; //Buffer pointer rxBufferDesc[i].word1 = (uint32_t) rxBuffer[i]; //Buffer offset and buffer length rxBufferDesc[i].word2 = AM335X_ETH_RX_BUFFER_SIZE; //Status flags and packet length rxBufferDesc[i].word3 = CPSW_RX_WORD3_OWNER; //Form a doubly linked list rxBufferDesc[i].next = &rxBufferDesc[nextIndex]; rxBufferDesc[i].prev = &rxBufferDesc[prevIndex]; } //Point to the very first descriptor rxCurBufferDesc = &rxBufferDesc[0]; //Mark the end of the queue rxCurBufferDesc->prev->word0 = (uint32_t) NULL; //Write the RX DMA head descriptor pointer CPSW_CPDMA_RX_HDP_R(CPSW_CH0) = (uint32_t) rxCurBufferDesc; } /** * @brief AM335x Ethernet MAC timer handler * * This routine is periodically called by the TCP/IP stack to handle periodic * operations such as polling the link state * * @param[in] interface Underlying network interface **/ void am335xEthTick(NetInterface *interface) { //Valid Ethernet PHY or switch driver? if(interface->phyDriver != NULL) { //Handle periodic operations interface->phyDriver->tick(interface); } else if(interface->switchDriver != NULL) { //Handle periodic operations interface->switchDriver->tick(interface); } else { //Just for sanity } //Misqueued buffer condition? if((rxCurBufferDesc->word3 & CPSW_RX_WORD3_OWNER) != 0) { if(CPSW_CPDMA_RX_HDP_R(CPSW_CH0) == 0) { //The host acts on the misqueued buffer condition by writing the added //buffer descriptor address to the appropriate RX DMA head descriptor //pointer CPSW_CPDMA_RX_HDP_R(CPSW_CH0) = (uint32_t) rxCurBufferDesc; } } } /** * @brief Enable interrupts * @param[in] interface Underlying network interface **/ void am335xEthEnableIrq(NetInterface *interface) { #ifdef ti_sysbios_BIOS___VERS //Enable Ethernet MAC interrupts Hwi_enableInterrupt(SYS_INT_3PGSWTXINT0); Hwi_enableInterrupt(SYS_INT_3PGSWRXINT0); #else //Enable Ethernet MAC interrupts IntSystemEnable(SYS_INT_3PGSWTXINT0); IntSystemEnable(SYS_INT_3PGSWRXINT0); #endif //Valid Ethernet PHY or switch driver? if(interface->phyDriver != NULL) { //Enable Ethernet PHY interrupts interface->phyDriver->enableIrq(interface); } else if(interface->switchDriver != NULL) { //Enable Ethernet switch interrupts interface->switchDriver->enableIrq(interface); } else { //Just for sanity } } /** * @brief Disable interrupts * @param[in] interface Underlying network interface **/ void am335xEthDisableIrq(NetInterface *interface) { #ifdef ti_sysbios_BIOS___VERS //Disable Ethernet MAC interrupts Hwi_disableInterrupt(SYS_INT_3PGSWTXINT0); Hwi_disableInterrupt(SYS_INT_3PGSWRXINT0); #else //Disable Ethernet MAC interrupts IntSystemDisable(SYS_INT_3PGSWTXINT0); IntSystemDisable(SYS_INT_3PGSWRXINT0); #endif //Valid Ethernet PHY or switch driver? if(interface->phyDriver != NULL) { //Disable Ethernet PHY interrupts interface->phyDriver->disableIrq(interface); } else if(interface->switchDriver != NULL) { //Disable Ethernet switch interrupts interface->switchDriver->disableIrq(interface); } else { //Just for sanity } } /** * @brief Ethernet MAC transmit interrupt **/ void am335xEthTxIrqHandler(void) { bool_t flag; uint32_t status; uint32_t temp; Am335xTxBufferDesc *p; //Interrupt service routine prologue osEnterIsr(); //This flag will be set if a higher priority task must be woken flag = FALSE; //Read the TX_STAT register to determine which channels caused the interrupt status = CPSW_WR_C_TX_STAT_R(CPSW_CORE0); //Packet transmitted on channel 1? if(status & (1 << CPSW_CH1)) { //Point to the buffer descriptor p = (Am335xTxBufferDesc *) CPSW_CPDMA_TX_CP_R(CPSW_CH1); //Read the status flags temp = p->word3 & (CPSW_TX_WORD3_SOP | CPSW_TX_WORD3_EOP | CPSW_TX_WORD3_OWNER | CPSW_TX_WORD3_EOQ); //Misqueued buffer condition? if(temp == (CPSW_TX_WORD3_SOP | CPSW_TX_WORD3_EOP | CPSW_TX_WORD3_EOQ)) { //Check whether the next descriptor pointer is non-zero if(p->word0 != 0) { //The host corrects the misqueued buffer condition by writing the //misqueued packets buffer descriptor address to the appropriate //TX DMA head descriptor pointer CPSW_CPDMA_TX_HDP_R(CPSW_CH1) = (uint32_t) p->word0; } } //Write the TX completion pointer CPSW_CPDMA_TX_CP_R(CPSW_CH1) = (uint32_t) p; //Check whether the TX buffer is available for writing if((txCurBufferDesc1->word3 & CPSW_TX_WORD3_OWNER) == 0) { //Notify the TCP/IP stack that the transmitter is ready to send flag |= osSetEventFromIsr(&nicDriverInterface1->nicTxEvent); } } //Packet transmitted on channel 2? if(status & (1 << CPSW_CH2)) { //Point to the buffer descriptor p = (Am335xTxBufferDesc *) CPSW_CPDMA_TX_CP_R(CPSW_CH2); //Read the status flags temp = p->word3 & (CPSW_TX_WORD3_SOP | CPSW_TX_WORD3_EOP | CPSW_TX_WORD3_OWNER | CPSW_TX_WORD3_EOQ); //Misqueued buffer condition? if(temp == (CPSW_TX_WORD3_SOP | CPSW_TX_WORD3_EOP | CPSW_TX_WORD3_EOQ)) { //Check whether the next descriptor pointer is non-zero if(p->word0 != 0) { //The host corrects the misqueued buffer condition by writing the //misqueued packets buffer descriptor address to the appropriate //TX DMA head descriptor pointer CPSW_CPDMA_TX_HDP_R(CPSW_CH2) = (uint32_t) p->word0; } } //Write the TX completion pointer CPSW_CPDMA_TX_CP_R(CPSW_CH2) = (uint32_t) p; //Check whether the TX buffer is available for writing if((txCurBufferDesc2->word3 & CPSW_TX_WORD3_OWNER) == 0) { //Notify the TCP/IP stack that the transmitter is ready to send flag |= osSetEventFromIsr(&nicDriverInterface2->nicTxEvent); } } //Write the DMA end of interrupt vector CPSW_CPDMA_CPDMA_EOI_VECTOR_R = CPSW_CPDMA_EOI_VECTOR_TX_PULSE; //Interrupt service routine epilogue osExitIsr(flag); } /** * @brief Ethernet MAC receive interrupt **/ void am335xEthRxIrqHandler(void) { bool_t flag; uint32_t status; //Interrupt service routine prologue osEnterIsr(); //This flag will be set if a higher priority task must be woken flag = FALSE; //Read the RX_STAT register to determine which channels caused the interrupt status = CPSW_WR_C_RX_STAT_R(CPSW_CORE0); //Packet received on channel 0? if(status & (1 << CPSW_CH0)) { //Disable RX interrupts CPSW_WR_C_RX_EN_R(CPSW_CORE0) &= ~(1 << CPSW_CH0); //Set event flag if(nicDriverInterface1 != NULL) { nicDriverInterface1->nicEvent = TRUE; } else if(nicDriverInterface2 != NULL) { nicDriverInterface2->nicEvent = TRUE; } //Notify the TCP/IP stack of the event flag |= osSetEventFromIsr(&netEvent); } //Write the DMA end of interrupt vector CPSW_CPDMA_CPDMA_EOI_VECTOR_R = CPSW_CPDMA_EOI_VECTOR_RX_PULSE; //Interrupt service routine epilogue osExitIsr(flag); } /** * @brief AM335x Ethernet MAC event handler * @param[in] interface Underlying network interface **/ void am335xEthEventHandler(NetInterface *interface) { static uint8_t buffer[AM335X_ETH_RX_BUFFER_SIZE]; error_t error; size_t n; uint32_t temp; //Process all pending packets do { //The current buffer is available for reading? if((rxCurBufferDesc->word3 & CPSW_RX_WORD3_OWNER) == 0) { //SOP and EOP flags should be set if((rxCurBufferDesc->word3 & CPSW_RX_WORD3_SOP) != 0 && (rxCurBufferDesc->word3 & CPSW_RX_WORD3_EOP) != 0) { //Make sure no error occurred if((rxCurBufferDesc->word3 & CPSW_RX_WORD3_PKT_ERROR) == 0) { //Check the port on which the packet was received switch(rxCurBufferDesc->word3 & CPSW_RX_WORD3_FROM_PORT) { //Port 1? case CPSW_RX_WORD3_FROM_PORT_1: interface = nicDriverInterface1; break; //Port 1? case CPSW_RX_WORD3_FROM_PORT_2: interface = nicDriverInterface2; break; //Invalid port number? default: interface = NULL; break; } //Retrieve the length of the frame n = rxCurBufferDesc->word3 & CPSW_RX_WORD3_PACKET_LENGTH; //Limit the number of data to read n = MIN(n, AM335X_ETH_RX_BUFFER_SIZE); //Sanity check if(interface != NULL) { //Copy data from the receive buffer osMemcpy(buffer, (uint8_t *) rxCurBufferDesc->word1, (n + 3) & ~3UL); //Packet successfully received error = NO_ERROR; } else { //The port number is invalid error = ERROR_INVALID_PACKET; } } else { //The received packet contains an error error = ERROR_INVALID_PACKET; } } else { //The packet is not valid error = ERROR_INVALID_PACKET; } //Mark the end of the queue with a NULL pointer rxCurBufferDesc->word0 = (uint32_t) NULL; //Restore the length of the buffer rxCurBufferDesc->word2 = AM335X_ETH_RX_BUFFER_SIZE; //Give the ownership of the descriptor back to the DMA rxCurBufferDesc->word3 = CPSW_RX_WORD3_OWNER; //Link the current descriptor to the previous descriptor rxCurBufferDesc->prev->word0 = (uint32_t) rxCurBufferDesc; //Read the status flags of the previous descriptor temp = rxCurBufferDesc->prev->word3 & (CPSW_RX_WORD3_SOP | CPSW_RX_WORD3_EOP | CPSW_RX_WORD3_OWNER | CPSW_RX_WORD3_EOQ); //Misqueued buffer condition? if(temp == (CPSW_RX_WORD3_SOP | CPSW_RX_WORD3_EOP | CPSW_RX_WORD3_EOQ)) { //The host acts on the misqueued buffer condition by writing the added //buffer descriptor address to the appropriate RX DMA head descriptor //pointer CPSW_CPDMA_RX_HDP_R(CPSW_CH0) = (uint32_t) rxCurBufferDesc; } //Write the RX completion pointer CPSW_CPDMA_RX_CP_R(CPSW_CH0) = (uint32_t) rxCurBufferDesc; //Point to the next descriptor in the list rxCurBufferDesc = rxCurBufferDesc->next; } else { //No more data in the receive buffer error = ERROR_BUFFER_EMPTY; } //Check whether a valid packet has been received if(!error) { NetRxAncillary ancillary; //Additional options can be passed to the stack along with the packet ancillary = NET_DEFAULT_RX_ANCILLARY; //Pass the packet to the upper layer nicProcessPacket(interface, buffer, n, &ancillary); } //No more data in the receive buffer? } while(error != ERROR_BUFFER_EMPTY); //Re-enable RX interrupts CPSW_WR_C_RX_EN_R(CPSW_CORE0) |= (1 << CPSW_CH0); } /** * @brief Send a packet (port 1) * @param[in] interface Underlying network interface * @param[in] buffer Multi-part buffer containing the data to send * @param[in] offset Offset to the first data byte * @param[in] ancillary Additional options passed to the stack along with * the packet * @return Error code **/ error_t am335xEthSendPacketPort1(NetInterface *interface, const NetBuffer *buffer, size_t offset, NetTxAncillary *ancillary) { static uint8_t temp[AM335X_ETH_TX_BUFFER_SIZE]; size_t length; uint32_t value; //Retrieve the length of the packet length = netBufferGetLength(buffer) - offset; //Check the frame length if(length > AM335X_ETH_TX_BUFFER_SIZE) { //The transmitter can accept another packet osSetEvent(&interface->nicTxEvent); //Report an error return ERROR_INVALID_LENGTH; } //Make sure the current buffer is available for writing if((txCurBufferDesc1->word3 & CPSW_TX_WORD3_OWNER) != 0) { return ERROR_FAILURE; } //Mark the end of the queue with a NULL pointer txCurBufferDesc1->word0 = (uint32_t) NULL; //Copy user data to the transmit buffer netBufferRead(temp, buffer, offset, length); osMemcpy((uint8_t *) txCurBufferDesc1->word1, temp, (length + 3) & ~3UL); //Set the length of the buffer txCurBufferDesc1->word2 = length & CPSW_TX_WORD2_BUFFER_LENGTH; //Set the length of the packet value = length & CPSW_TX_WORD3_PACKET_LENGTH; //Set SOP and EOP flags as the data fits in a single buffer value |= CPSW_TX_WORD3_SOP | CPSW_TX_WORD3_EOP; //Redirect the packet to the relevant port number value |= CPSW_TX_WORD3_TO_PORT_EN | CPSW_TX_WORD3_TO_PORT_1; //Give the ownership of the descriptor to the DMA txCurBufferDesc1->word3 = CPSW_TX_WORD3_OWNER | value; //Link the current descriptor to the previous descriptor txCurBufferDesc1->prev->word0 = (uint32_t) txCurBufferDesc1; //Read the status flags of the previous descriptor value = txCurBufferDesc1->prev->word3 & (CPSW_TX_WORD3_SOP | CPSW_TX_WORD3_EOP | CPSW_TX_WORD3_OWNER | CPSW_TX_WORD3_EOQ); //Misqueued buffer condition? if(value == (CPSW_TX_WORD3_SOP | CPSW_TX_WORD3_EOP | CPSW_TX_WORD3_EOQ)) { //Clear the misqueued buffer condition txCurBufferDesc1->prev->word3 = 0; //The host corrects the misqueued buffer condition by writing the //misqueued packets buffer descriptor address to the appropriate //TX DMA head descriptor pointer CPSW_CPDMA_TX_HDP_R(CPSW_CH1) = (uint32_t) txCurBufferDesc1; } //Point to the next descriptor in the list txCurBufferDesc1 = txCurBufferDesc1->next; //Check whether the next buffer is available for writing if((txCurBufferDesc1->word3 & CPSW_TX_WORD3_OWNER) == 0) { //The transmitter can accept another packet osSetEvent(&interface->nicTxEvent); } //Data successfully written return NO_ERROR; } /** * @brief Send a packet (port 2) * @param[in] interface Underlying network interface * @param[in] buffer Multi-part buffer containing the data to send * @param[in] offset Offset to the first data byte * @param[in] ancillary Additional options passed to the stack along with * the packet * @return Error code **/ error_t am335xEthSendPacketPort2(NetInterface *interface, const NetBuffer *buffer, size_t offset, NetTxAncillary *ancillary) { static uint8_t temp[AM335X_ETH_TX_BUFFER_SIZE]; size_t length; uint32_t value; //Retrieve the length of the packet length = netBufferGetLength(buffer) - offset; //Check the frame length if(length > AM335X_ETH_TX_BUFFER_SIZE) { //The transmitter can accept another packet osSetEvent(&interface->nicTxEvent); //Report an error return ERROR_INVALID_LENGTH; } //Make sure the current buffer is available for writing if((txCurBufferDesc2->word3 & CPSW_TX_WORD3_OWNER) != 0) { return ERROR_FAILURE; } //Mark the end of the queue with a NULL pointer txCurBufferDesc2->word0 = (uint32_t) NULL; //Copy user data to the transmit buffer netBufferRead(temp, buffer, offset, length); osMemcpy((uint8_t *) txCurBufferDesc2->word1, temp, (length + 3) & ~3UL); //Set the length of the buffer txCurBufferDesc2->word2 = length & CPSW_TX_WORD2_BUFFER_LENGTH; //Set the length of the packet value = length & CPSW_TX_WORD3_PACKET_LENGTH; //Set SOP and EOP flags as the data fits in a single buffer value |= CPSW_TX_WORD3_SOP | CPSW_TX_WORD3_EOP; //Redirect the packet to the relevant port number value |= CPSW_TX_WORD3_TO_PORT_EN | CPSW_TX_WORD3_TO_PORT_2; //Give the ownership of the descriptor to the DMA txCurBufferDesc2->word3 = CPSW_TX_WORD3_OWNER | value; //Link the current descriptor to the previous descriptor txCurBufferDesc2->prev->word0 = (uint32_t) txCurBufferDesc2; //Read the status flags of the previous descriptor value = txCurBufferDesc2->prev->word3 & (CPSW_TX_WORD3_SOP | CPSW_TX_WORD3_EOP | CPSW_TX_WORD3_OWNER | CPSW_TX_WORD3_EOQ); //Misqueued buffer condition? if(value == (CPSW_TX_WORD3_SOP | CPSW_TX_WORD3_EOP | CPSW_TX_WORD3_EOQ)) { //Clear the misqueued buffer condition txCurBufferDesc2->prev->word3 = 0; //The host corrects the misqueued buffer condition by writing the //misqueued packets buffer descriptor address to the appropriate //TX DMA head descriptor pointer CPSW_CPDMA_TX_HDP_R(CPSW_CH2) = (uint32_t) txCurBufferDesc2; } //Point to the next descriptor in the list txCurBufferDesc2 = txCurBufferDesc2->next; //Check whether the next buffer is available for writing if((txCurBufferDesc2->word3 & CPSW_TX_WORD3_OWNER) == 0) { //The transmitter can accept another packet osSetEvent(&interface->nicTxEvent); } //Data successfully written return NO_ERROR; } /** * @brief Configure MAC address filtering * @param[in] interface Underlying network interface * @return Error code **/ error_t am335xEthUpdateMacAddrFilter(NetInterface *interface) { uint_t i; uint_t port; MacFilterEntry *entry; //Debug message TRACE_DEBUG("Updating AM335x ALE table...\r\n"); //Select the relevant port number if(interface == nicDriverInterface1) { port = CPSW_PORT1; } else if(interface == nicDriverInterface2) { port = CPSW_PORT2; } else { port = CPSW_PORT0; } //The MAC address filter contains the list of MAC addresses to accept //when receiving an Ethernet frame for(i = 0; i < MAC_ADDR_FILTER_SIZE; i++) { //Point to the current entry entry = &interface->macAddrFilter[i]; //Check whether the ALE table should be updated for the //current multicast address if(!macCompAddr(&entry->addr, &MAC_UNSPECIFIED_ADDR)) { if(entry->addFlag) { //Add VLAN/multicast address entry to the ALE table am335xEthAddVlanAddrEntry(port, port, &entry->addr); } else if(entry->deleteFlag) { //Remove VLAN/multicast address entry from the ALE table am335xEthDeleteVlanAddrEntry(port, port, &entry->addr); } } } //Successful processing return NO_ERROR; } /** * @brief Adjust MAC configuration parameters for proper operation * @param[in] interface Underlying network interface * @return Error code **/ error_t am335xEthUpdateMacConfig(NetInterface *interface) { uint32_t config = 0; //Read MAC control register if(interface == nicDriverInterface1) { config = CPSW_SL1_MACCONTROL_R; } else if(interface == nicDriverInterface2) { config = CPSW_SL2_MACCONTROL_R; } //1000BASE-T operation mode? if(interface->linkSpeed == NIC_LINK_SPEED_1GBPS) { config |= CPSW_SL_MACCONTROL_GIG; config &= ~(CPSW_SL_MACCONTROL_IFCTL_A | CPSW_SL_MACCONTROL_IFCTL_B); } //100BASE-TX operation mode? else if(interface->linkSpeed == NIC_LINK_SPEED_100MBPS) { config &= ~CPSW_SL_MACCONTROL_GIG; config |= CPSW_SL_MACCONTROL_IFCTL_A | CPSW_SL_MACCONTROL_IFCTL_B; } //10BASE-T operation mode? else { config &= ~CPSW_SL_MACCONTROL_GIG; config &= ~(CPSW_SL_MACCONTROL_IFCTL_A | CPSW_SL_MACCONTROL_IFCTL_B); } //Half-duplex or full-duplex mode? if(interface->duplexMode == NIC_FULL_DUPLEX_MODE) { config |= CPSW_SL_MACCONTROL_FULLDUPLEX; } else { config &= ~CPSW_SL_MACCONTROL_FULLDUPLEX; } //Update MAC control register if(interface == nicDriverInterface1) { CPSW_SL1_MACCONTROL_R = config; } else if(interface == nicDriverInterface2) { CPSW_SL2_MACCONTROL_R = config; } //Successful processing return NO_ERROR; } /** * @brief Write PHY register * @param[in] opcode Access type (2 bits) * @param[in] phyAddr PHY address (5 bits) * @param[in] regAddr Register address (5 bits) * @param[in] data Register value **/ void am335xEthWritePhyReg(uint8_t opcode, uint8_t phyAddr, uint8_t regAddr, uint16_t data) { uint32_t temp; //Valid opcode? if(opcode == SMI_OPCODE_WRITE) { //Set up a write operation temp = MDIO_USERACCESS0_GO | MDIO_USERACCESS0_WRITE; //PHY address temp |= (phyAddr << MDIO_USERACCESS0_PHYADR_SHIFT) & MDIO_USERACCESS0_PHYADR; //Register address temp |= (regAddr << MDIO_USERACCESS0_REGADR_SHIFT) & MDIO_USERACCESS0_REGADR; //Register value temp |= data & MDIO_USERACCESS0_DATA; //Start a write operation MDIO_USERACCESS0_R = temp; //Wait for the write to complete while((MDIO_USERACCESS0_R & MDIO_USERACCESS0_GO) != 0) { } } else { //The MAC peripheral only supports standard Clause 22 opcodes } } /** * @brief Read PHY register * @param[in] opcode Access type (2 bits) * @param[in] phyAddr PHY address (5 bits) * @param[in] regAddr Register address (5 bits) * @return Register value **/ uint16_t am335xEthReadPhyReg(uint8_t opcode, uint8_t phyAddr, uint8_t regAddr) { uint16_t data; uint32_t temp; //Valid opcode? if(opcode == SMI_OPCODE_READ) { //Set up a read operation temp = MDIO_USERACCESS0_GO | MDIO_USERACCESS0_READ; //PHY address temp |= (phyAddr << MDIO_USERACCESS0_PHYADR_SHIFT) & MDIO_USERACCESS0_PHYADR; //Register address temp |= (regAddr << MDIO_USERACCESS0_REGADR_SHIFT) & MDIO_USERACCESS0_REGADR; //Start a read operation MDIO_USERACCESS0_R = temp; //Wait for the read to complete while((MDIO_USERACCESS0_R & MDIO_USERACCESS0_GO) != 0) { } //Get register value data = MDIO_USERACCESS0_R & MDIO_USERACCESS0_DATA; } else { //The MAC peripheral only supports standard Clause 22 opcodes data = 0; } //Return the value of the PHY register return data; } /** * @brief Write an ALE table entry * @param[in] index Entry index * @param[in] entry Pointer to the ALE table entry **/ void am335xEthWriteEntry(uint_t index, const Am335xAleEntry *entry) { //Copy the content of the entry to be written CPSW_ALE_TBLW_R(2) = entry->word2; CPSW_ALE_TBLW_R(1) = entry->word1; CPSW_ALE_TBLW_R(0) = entry->word0; //Write the ALE entry at the specified index CPSW_ALE_TBLCTL_R = CPSW_ALE_TBLCTL_WRITE_RDZ | index; } /** * @brief Read an ALE table entry * @param[in] index Entry index * @param[out] entry Pointer to the ALE table entry **/ void am335xEthReadEntry(uint_t index, Am335xAleEntry *entry) { //Read the ALE entry at the specified index CPSW_ALE_TBLCTL_R = index; //Copy the content of the entry entry->word2 = CPSW_ALE_TBLW_R(2); entry->word1 = CPSW_ALE_TBLW_R(1); entry->word0 = CPSW_ALE_TBLW_R(0); } /** * @brief Find a free entry in the ALE table * @return Index of the first free entry **/ uint_t am335xEthFindFreeEntry(void) { uint_t index; uint32_t type; Am335xAleEntry entry; //Loop through the ALE table entries for(index = 0; index < CPSW_ALE_MAX_ENTRIES; index++) { //Read the current entry am335xEthReadEntry(index, &entry); //Retrieve the type of the ALE entry type = entry.word1 & CPSW_ALE_WORD1_ENTRY_TYPE_MASK; //Free entry? if(type == CPSW_ALE_WORD1_ENTRY_TYPE_FREE) { //Exit immediately break; } } //Return the index of the entry return index; } /** * @brief Search the ALE table for the specified VLAN entry * @param[in] vlanId VLAN identifier * @return Index of the matching entry **/ uint_t am335xEthFindVlanEntry(uint_t vlanId) { uint_t index; uint32_t value; Am335xAleEntry entry; //Loop through the ALE table entries for(index = 0; index < CPSW_ALE_MAX_ENTRIES; index++) { //Read the current entry am335xEthReadEntry(index, &entry); //Retrieve the type of the ALE entry value = entry.word1 & CPSW_ALE_WORD1_ENTRY_TYPE_MASK; //Check the type of the ALE entry if(value == CPSW_ALE_WORD1_ENTRY_TYPE_VLAN_ADDR) { //Get the VLAN identifier value = entry.word1 & CPSW_ALE_WORD1_VLAN_ID_MASK; //Compare the VLAN identifier if(value == CPSW_ALE_WORD1_VLAN_ID(vlanId)) { //Matching ALE entry found break; } } } //Return the index of the entry return index; } /** * @brief Search the ALE table for the specified VLAN/address entry * @param[in] vlanId VLAN identifier * @param[in] macAddr MAC address * @return Index of the matching entry **/ uint_t am335xEthFindVlanAddrEntry(uint_t vlanId, MacAddr *macAddr) { uint_t index; uint32_t value; Am335xAleEntry entry; //Loop through the ALE table entries for(index = 0; index < CPSW_ALE_MAX_ENTRIES; index++) { //Read the current entry am335xEthReadEntry(index, &entry); //Retrieve the type of the ALE entry value = entry.word1 & CPSW_ALE_WORD1_ENTRY_TYPE_MASK; //Check the type of the ALE entry if(value == CPSW_ALE_WORD1_ENTRY_TYPE_VLAN_ADDR) { //Get the VLAN identifier value = entry.word1 & CPSW_ALE_WORD1_VLAN_ID_MASK; //Compare the VLAN identifier if(value == CPSW_ALE_WORD1_VLAN_ID(vlanId)) { //Compare the MAC address if(macAddr->b[0] == (uint8_t) (entry.word1 >> 8) && macAddr->b[1] == (uint8_t) (entry.word1 >> 0) && macAddr->b[2] == (uint8_t) (entry.word0 >> 24) && macAddr->b[3] == (uint8_t) (entry.word0 >> 16) && macAddr->b[4] == (uint8_t) (entry.word0 >> 8) && macAddr->b[5] == (uint8_t) (entry.word0 >> 0)) { //Matching ALE entry found break; } } } } //Return the index of the entry return index; } /** * @brief Add a VLAN entry in the ALE table * @param[in] port Port number * @param[in] vlanId VLAN identifier * @return Error code **/ error_t am335xEthAddVlanEntry(uint_t port, uint_t vlanId) { error_t error; uint_t index; Am335xAleEntry entry; //Ensure that there are no duplicate address entries in the ALE table index = am335xEthFindVlanEntry(vlanId); //No matching entry found? if(index >= CPSW_ALE_MAX_ENTRIES) { //Find a free entry in the ALE table index = am335xEthFindFreeEntry(); } //Sanity check if(index < CPSW_ALE_MAX_ENTRIES) { //Set up a VLAN table entry entry.word2 = 0; entry.word1 = CPSW_ALE_WORD1_ENTRY_TYPE_VLAN; entry.word0 = 0; //Set VLAN identifier entry.word1 |= CPSW_ALE_WORD1_VLAN_ID(vlanId); //Force the packet VLAN tag to be removed on egress entry.word0 |= CPSW_ALE_WORD0_FORCE_UNTAG_EGRESS(1 << port) | CPSW_ALE_WORD0_FORCE_UNTAG_EGRESS(1 << CPSW_PORT0); //Set VLAN member list entry.word0 |= CPSW_ALE_WORD0_VLAN_MEMBER_LIST(1 << port) | CPSW_ALE_WORD0_VLAN_MEMBER_LIST(1 << CPSW_PORT0); //Add a new entry to the ALE table am335xEthWriteEntry(index, &entry); //Sucessful processing error = NO_ERROR; } else { //The ALE table is full error = ERROR_FAILURE; } //Return status code return error; } /** * @brief Add a VLAN/address entry in the ALE table * @param[in] port Port number * @param[in] vlanId VLAN identifier * @param[in] macAddr MAC address * @return Error code **/ error_t am335xEthAddVlanAddrEntry(uint_t port, uint_t vlanId, MacAddr *macAddr) { error_t error; uint_t index; Am335xAleEntry entry; //Ensure that there are no duplicate address entries in the ALE table index = am335xEthFindVlanAddrEntry(vlanId, macAddr); //No matching entry found? if(index >= CPSW_ALE_MAX_ENTRIES) { //Find a free entry in the ALE table index = am335xEthFindFreeEntry(); } //Sanity check if(index < CPSW_ALE_MAX_ENTRIES) { //Set up a VLAN/address table entry entry.word2 = 0; entry.word1 = CPSW_ALE_WORD1_ENTRY_TYPE_VLAN_ADDR; entry.word0 = 0; //Multicast address? if(macIsMulticastAddr(macAddr)) { //Set port mask entry.word2 |= CPSW_ALE_WORD2_SUPER | CPSW_ALE_WORD2_PORT_LIST(1 << port) | CPSW_ALE_WORD2_PORT_LIST(1 << CPSW_CH0); //Set multicast forward state entry.word1 |= CPSW_ALE_WORD1_MCAST_FWD_STATE(0); } //Set VLAN identifier entry.word1 |= CPSW_ALE_WORD1_VLAN_ID(vlanId); //Copy the upper 16 bits of the unicast address entry.word1 |= (macAddr->b[0] << 8) | macAddr->b[1]; //Copy the lower 32 bits of the unicast address entry.word0 |= (macAddr->b[2] << 24) | (macAddr->b[3] << 16) | (macAddr->b[4] << 8) | macAddr->b[5]; //Add a new entry to the ALE table am335xEthWriteEntry(index, &entry); //Sucessful processing error = NO_ERROR; } else { //The ALE table is full error = ERROR_FAILURE; } //Return status code return error; } /** * @brief Remove a VLAN/address entry from the ALE table * @param[in] port Port number * @param[in] vlanId VLAN identifier * @param[in] macAddr MAC address * @return Error code **/ error_t am335xEthDeleteVlanAddrEntry(uint_t port, uint_t vlanId, MacAddr *macAddr) { error_t error; uint_t index; Am335xAleEntry entry; //Search the ALE table for the specified VLAN/address entry index = am335xEthFindVlanAddrEntry(vlanId, macAddr); //Matching ALE entry found? if(index < CPSW_ALE_MAX_ENTRIES) { //Clear the contents of the entry entry.word2 = 0; entry.word1 = 0; entry.word0 = 0; //Update the ALE table am335xEthWriteEntry(index, &entry); //Sucessful processing error = NO_ERROR; } else { //Entry not found error = ERROR_NOT_FOUND; } //Return status code return error; }
/** * @file am335x_eth_driver.c * @brief Sitara AM335x Gigabit Ethernet MAC driver * * @section License * * SPDX-License-Identifier: GPL-2.0-or-later * * Copyright (C) 2010-2021 Oryx Embedded SARL. All rights reserved. * * This file is part of CycloneTCP Open. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * @author Oryx Embedded SARL (www.oryx-embedded.com) * @version 2.0.2 **/ //Switch to the appropriate trace level #define TRACE_LEVEL NIC_TRACE_LEVEL //Dependencies #include "soc_am335x.h" #include "hw_types.h" #include "hw_cm_per.h" #include "hw_control_am335x.h" #include "hw_cpsw_ale.h" #include "hw_cpsw_cpdma.h" #include "hw_cpsw_port.h" #include "hw_cpsw_sl.h" #include "hw_cpsw_ss.h" #include "hw_cpsw_wr.h" #include "hw_mdio.h" #include "interrupt.h" #include "core/net.h" #include "drivers/mac/am335x_eth_driver.h" #include "debug.h" //MDIO input clock frequency #define MDIO_INPUT_CLK 125000000 //MDIO output clock frequency #define MDIO_OUTPUT_CLK 1000000 //Underlying network interface (port 1) static NetInterface *nicDriverInterface1 = NULL; //Underlying network interface (port 2) static NetInterface *nicDriverInterface2 = NULL; //IAR EWARM compiler? #if defined(__ICCARM__) //Transmit buffer (port 1) #pragma data_alignment = 4 #pragma location = AM335X_ETH_RAM_SECTION static uint8_t txBuffer1[AM335X_ETH_TX_BUFFER_COUNT][AM335X_ETH_TX_BUFFER_SIZE]; //Transmit buffer (port 2) #pragma data_alignment = 4 #pragma location = AM335X_ETH_RAM_SECTION static uint8_t txBuffer2[AM335X_ETH_TX_BUFFER_COUNT][AM335X_ETH_TX_BUFFER_SIZE]; //Receive buffer #pragma data_alignment = 4 #pragma location = AM335X_ETH_RAM_SECTION static uint8_t rxBuffer[AM335X_ETH_RX_BUFFER_COUNT][AM335X_ETH_RX_BUFFER_SIZE]; //Transmit buffer descriptors (port 1) #pragma data_alignment = 4 #pragma location = AM335X_ETH_RAM_CPPI_SECTION static Am335xTxBufferDesc txBufferDesc1[AM335X_ETH_TX_BUFFER_COUNT]; //Transmit buffer descriptors (port 2) #pragma data_alignment = 4 #pragma location = AM335X_ETH_RAM_CPPI_SECTION static Am335xTxBufferDesc txBufferDesc2[AM335X_ETH_TX_BUFFER_COUNT]; //Receive buffer descriptors #pragma data_alignment = 4 #pragma location = AM335X_ETH_RAM_CPPI_SECTION static Am335xRxBufferDesc rxBufferDesc[AM335X_ETH_RX_BUFFER_COUNT]; //GCC compiler? #else //Transmit buffer (port 1) static uint8_t txBuffer1[AM335X_ETH_TX_BUFFER_COUNT][AM335X_ETH_TX_BUFFER_SIZE] __attribute__((aligned(4), __section__(AM335X_ETH_RAM_SECTION))); //Transmit buffer (port 2) static uint8_t txBuffer2[AM335X_ETH_TX_BUFFER_COUNT][AM335X_ETH_TX_BUFFER_SIZE] __attribute__((aligned(4), __section__(AM335X_ETH_RAM_SECTION))); //Receive buffer static uint8_t rxBuffer[AM335X_ETH_RX_BUFFER_COUNT][AM335X_ETH_RX_BUFFER_SIZE] __attribute__((aligned(4), __section__(AM335X_ETH_RAM_SECTION))); //Transmit buffer descriptors (port 1) static Am335xTxBufferDesc txBufferDesc1[AM335X_ETH_TX_BUFFER_COUNT] __attribute__((aligned(4), __section__(AM335X_ETH_RAM_CPPI_SECTION))); //Transmit buffer descriptors (port 2) static Am335xTxBufferDesc txBufferDesc2[AM335X_ETH_TX_BUFFER_COUNT] __attribute__((aligned(4), __section__(AM335X_ETH_RAM_CPPI_SECTION))); //Receive buffer descriptors static Am335xRxBufferDesc rxBufferDesc[AM335X_ETH_RX_BUFFER_COUNT] __attribute__((aligned(4), __section__(AM335X_ETH_RAM_CPPI_SECTION))); #endif //Pointer to the current TX buffer descriptor (port1) static Am335xTxBufferDesc *txCurBufferDesc1; //Pointer to the current TX buffer descriptor (port 2) static Am335xTxBufferDesc *txCurBufferDesc2; //Pointer to the current RX buffer descriptor static Am335xRxBufferDesc *rxCurBufferDesc; /** * @brief AM335x Ethernet MAC driver (port1) **/ const NicDriver am335xEthPort1Driver = { NIC_TYPE_ETHERNET, ETH_MTU, am335xEthInitPort1, am335xEthTick, am335xEthEnableIrq, am335xEthDisableIrq, am335xEthEventHandler, am335xEthSendPacketPort1, am335xEthUpdateMacAddrFilter, am335xEthUpdateMacConfig, am335xEthWritePhyReg, am335xEthReadPhyReg, FALSE, TRUE, TRUE, FALSE }; /** * @brief AM335x Ethernet MAC driver (port2) **/ const NicDriver am335xEthPort2Driver = { NIC_TYPE_ETHERNET, ETH_MTU, am335xEthInitPort2, am335xEthTick, am335xEthEnableIrq, am335xEthDisableIrq, am335xEthEventHandler, am335xEthSendPacketPort2, am335xEthUpdateMacAddrFilter, am335xEthUpdateMacConfig, am335xEthWritePhyReg, am335xEthReadPhyReg, FALSE, TRUE, TRUE, FALSE }; /** * @brief AM335x Ethernet MAC initialization (port 1) * @param[in] interface Underlying network interface * @return Error code **/ error_t am335xEthInitPort1(NetInterface *interface) { error_t error; uint32_t temp; //Debug message TRACE_INFO("Initializing AM335x Ethernet MAC (port 1)...\r\n"); //Initialize CPSW instance am335xEthInitInstance(interface); //Save underlying network interface nicDriverInterface1 = interface; //Valid Ethernet PHY or switch driver? if(interface->phyDriver != NULL) { //Ethernet PHY initialization error = interface->phyDriver->init(interface); } else if(interface->switchDriver != NULL) { //Ethernet switch initialization error = interface->switchDriver->init(interface); } else { //The interface is not properly configured error = ERROR_FAILURE; } //Any error to report? if(error) { return error; } //Unspecifield MAC address? if(macCompAddr(&interface->macAddr, &MAC_UNSPECIFIED_ADDR)) { //Use the factory preprogrammed MAC address interface->macAddr.b[0] = CONTROL_MAC_ID_HI_R(0) >> CONTROL_MAC_ID0_HI_MACADDR_47_40_SHIFT; interface->macAddr.b[1] = CONTROL_MAC_ID_HI_R(0) >> CONTROL_MAC_ID0_HI_MACADDR_39_32_SHIFT; interface->macAddr.b[2] = CONTROL_MAC_ID_HI_R(0) >> CONTROL_MAC_ID0_HI_MACADDR_31_24_SHIFT; interface->macAddr.b[3] = CONTROL_MAC_ID_HI_R(0) >> CONTROL_MAC_ID0_HI_MACADDR_23_16_SHIFT; interface->macAddr.b[4] = CONTROL_MAC_ID_LO_R(0) >> CONTROL_MAC_ID0_LO_MACADDR_15_8_SHIFT; interface->macAddr.b[5] = CONTROL_MAC_ID_LO_R(0) >> CONTROL_MAC_ID0_LO_MACADDR_7_0_SHIFT; //Generate the 64-bit interface identifier macAddrToEui64(&interface->macAddr, &interface->eui64); } //Set port state to forward temp = CPSW_ALE_PORTCTL_R(1) & ~CPSW_ALE_PORTCTL1_PORT_STATE; CPSW_ALE_PORTCTL_R(1) = temp | CPSW_ALE_PORTCTL_PORT_STATE_FORWARD; //Set the MAC address of the station CPSW_PORT1_SA_HI_R = interface->macAddr.w[0] | (interface->macAddr.w[1] << 16); CPSW_PORT1_SA_LO_R = interface->macAddr.w[2]; //Configure VLAN identifier and VLAN priority CPSW_PORT1_PORT_VLAN_R = (0 << CPSW_PORT_P1_PORT_VLAN_PORT_PRI_SHIFT) | (CPSW_PORT1 << CPSW_PORT_P1_PORT_VLAN_PORT_VID_SHIFT); //Add a VLAN entry in the ALE table am335xEthAddVlanEntry(CPSW_PORT1, CPSW_PORT1); //Add a VLAN/unicast address entry in the ALE table am335xEthAddVlanAddrEntry(CPSW_PORT1, CPSW_PORT1, &interface->macAddr); //Enable CPSW statistics CPSW_SS_STAT_PORT_EN_R |= CPSW_SS_STAT_PORT_EN_P1_STAT_EN; //Enable TX and RX CPSW_SL1_MACCONTROL_R = CPSW_SL_MACCONTROL_GMII_EN; //Accept any packets from the upper layer osSetEvent(&interface->nicTxEvent); //Successful initialization return NO_ERROR; } /** * @brief AM335x Ethernet MAC initialization (port 2) * @param[in] interface Underlying network interface * @return Error code **/ error_t am335xEthInitPort2(NetInterface *interface) { error_t error; uint32_t temp; //Debug message TRACE_INFO("Initializing AM335x Ethernet MAC (port 2)...\r\n"); //Initialize CPSW instance am335xEthInitInstance(interface); //Save underlying network interface nicDriverInterface2 = interface; //PHY transceiver initialization error = interface->phyDriver->init(interface); //Failed to initialize PHY transceiver? if(error) { return error; } //Unspecifield MAC address? if(macCompAddr(&interface->macAddr, &MAC_UNSPECIFIED_ADDR)) { //Use the factory preprogrammed MAC address interface->macAddr.b[0] = CONTROL_MAC_ID_HI_R(1) >> CONTROL_MAC_ID1_HI_MACADDR_47_40_SHIFT; interface->macAddr.b[1] = CONTROL_MAC_ID_HI_R(1) >> CONTROL_MAC_ID1_HI_MACADDR_39_32_SHIFT; interface->macAddr.b[2] = CONTROL_MAC_ID_HI_R(1) >> CONTROL_MAC_ID1_HI_MACADDR_31_24_SHIFT; interface->macAddr.b[3] = CONTROL_MAC_ID_HI_R(1) >> CONTROL_MAC_ID1_HI_MACADDR_23_16_SHIFT; interface->macAddr.b[4] = CONTROL_MAC_ID_LO_R(1) >> CONTROL_MAC_ID1_LO_MACADDR_15_8_SHIFT; interface->macAddr.b[5] = CONTROL_MAC_ID_LO_R(1) >> CONTROL_MAC_ID1_LO_MACADDR_7_0_SHIFT; //Generate the 64-bit interface identifier macAddrToEui64(&interface->macAddr, &interface->eui64); } //Set port state to forward temp = CPSW_ALE_PORTCTL_R(2) & ~CPSW_ALE_PORTCTL2_PORT_STATE; CPSW_ALE_PORTCTL_R(2) = temp | CPSW_ALE_PORTCTL_PORT_STATE_FORWARD; //Set the MAC address of the station CPSW_PORT2_SA_HI_R = interface->macAddr.w[0] | (interface->macAddr.w[1] << 16); CPSW_PORT2_SA_LO_R = interface->macAddr.w[2]; //Configure VLAN identifier and VLAN priority CPSW_PORT2_PORT_VLAN_R = (0 << CPSW_PORT_P2_PORT_VLAN_PORT_PRI_SHIFT) | (CPSW_PORT2 << CPSW_PORT_P2_PORT_VLAN_PORT_VID_SHIFT); //Add a VLAN entry in the ALE table am335xEthAddVlanEntry(CPSW_PORT2, CPSW_PORT2); //Add a VLAN/unicast address entry in the ALE table am335xEthAddVlanAddrEntry(CPSW_PORT2, CPSW_PORT2, &interface->macAddr); //Enable CPSW statistics CPSW_SS_STAT_PORT_EN_R |= CPSW_SS_STAT_PORT_EN_P2_STAT_EN; //Enable TX and RX CPSW_SL2_MACCONTROL_R = CPSW_SL_MACCONTROL_GMII_EN; //Accept any packets from the upper layer osSetEvent(&interface->nicTxEvent); //Successful initialization return NO_ERROR; } /** * @brief Initialize CPSW instance * @param[in] interface Underlying network interface **/ void am335xEthInitInstance(NetInterface *interface) { uint_t i; uint32_t temp; #ifdef ti_sysbios_BIOS___VERS Hwi_Params hwiParams; #endif //Initialization sequence is performed once if(nicDriverInterface1 == NULL && nicDriverInterface2 == NULL) { //Select the interface mode (MII/RMII/RGMII) and configure pin muxing am335xEthInitGpio(interface); //Enable the CPSW subsystem clocks CM_PER_CPGMAC0_CLKCTRL_R = CM_PER_CPGMAC0_CLKCTRL_MODULEMODE_ENABLE; //Wait for the CPSW module to be fully functional do { //Get module idle status temp = (CM_PER_CPGMAC0_CLKCTRL_R & CM_PER_CPGMAC0_CLKCTRL_IDLEST) >> CM_PER_CPGMAC0_CLKCTRL_IDLEST_SHIFT; //Keep looping as long as the module is not fully functional } while(temp != CM_PER_CPGMAC0_CLKCTRL_IDLEST_FUNC); //Start a software forced wake-up transition CM_PER_CPSW_CLKSTCTRL_R = CM_PER_CPSW_CLKSTCTRL_CLKTRCTRL_SW_WKUP; //Wait for the CPSW 125 MHz OCP clock to be active do { //Get the state of the CPSW 125 MHz OCP clock temp = (CM_PER_CPSW_CLKSTCTRL_R & CM_PER_CPSW_CLKSTCTRL_CLKACTIVITY_CPSW_125MHZ_GCLK) >> CM_PER_CPSW_CLKSTCTRL_CLKACTIVITY_CPSW_125MHZ_GCLK_SHIFT; //Keep looping as long as the clock is inactive } while(temp != CM_PER_CPSW_CLKSTCTRL_CLKACTIVITY_CPSW_125MHZ_GCLK_ACT); //Reset CPSW subsystem CPSW_SS_SOFT_RESET_R = CPSW_SS_SOFT_RESET_SOFT_RESET; //Wait for the reset to complete while((CPSW_SS_SOFT_RESET_R & CPSW_SS_SOFT_RESET_SOFT_RESET) != 0) { } //Reset CPSW wrapper module CPSW_WR_SOFT_RESET_R = CPSW_WR_SOFT_RESET_SOFT_RESET; //Wait for the reset to complete while((CPSW_WR_SOFT_RESET_R & CPSW_WR_SOFT_RESET_SOFT_RESET) != 0) { } //Reset CPSW sliver 1 logic CPSW_SL1_SOFT_RESET_R = CPSW_SL_SOFT_RESET_SOFT_RESET; //Wait for the reset to complete while((CPSW_SL1_SOFT_RESET_R & CPSW_SL_SOFT_RESET_SOFT_RESET) != 0) { } //Reset CPSW sliver 2 logic CPSW_SL2_SOFT_RESET_R = CPSW_SL_SOFT_RESET_SOFT_RESET; //Wait for the reset to complete while((CPSW_SL2_SOFT_RESET_R & CPSW_SL_SOFT_RESET_SOFT_RESET) != 0) { } //Reset CPSW CPDMA module CPSW_CPDMA_CPDMA_SOFT_RESET_R = CPSW_CPDMA_CPDMA_SOFT_RESET_SOFT_RESET; //Wait for the reset to complete while((CPSW_CPDMA_CPDMA_SOFT_RESET_R & CPSW_CPDMA_CPDMA_SOFT_RESET_SOFT_RESET) != 0) { } //Initialize the HDPs and the CPs to NULL for(i = CPSW_CH0; i <= CPSW_CH7; i++) { //TX head descriptor pointer CPSW_CPDMA_TX_HDP_R(i) = 0; //TX completion pointer CPSW_CPDMA_TX_CP_R(i) = 0; //RX head descriptor pointer CPSW_CPDMA_RX_HDP_R(i) = 0; //RX completion pointer CPSW_CPDMA_RX_CP_R(i) = 0; } //Enable ALE and clear ALE address table CPSW_ALE_CONTROL_R = CPSW_ALE_CONTROL_ENABLE_ALE | CPSW_ALE_CONTROL_CLEAR_TABLE; //For dual MAC mode, configure VLAN aware mode CPSW_ALE_CONTROL_R |= CPSW_ALE_CONTROL_ALE_VLAN_AWARE; //Set dual MAC mode for port 0 temp = CPSW_PORT0_TX_IN_CTL_R & ~CPSW_PORT_P0_TX_IN_CTL_TX_IN_SEL; CPSW_PORT0_TX_IN_CTL_R = temp | CPSW_PORT_P0_TX_IN_CTL_TX_IN_DUAL_MAC; //Set port 0 state to forward temp = CPSW_ALE_PORTCTL_R(0) & ~CPSW_ALE_PORTCTL0_PORT_STATE; CPSW_ALE_PORTCTL_R(0) = temp | CPSW_ALE_PORTCTL_PORT_STATE_FORWARD; //Enable CPSW statistics CPSW_SS_STAT_PORT_EN_R = CPSW_SS_STAT_PORT_EN_P0_STAT_EN; //Configure TX and RX buffer descriptors am335xEthInitBufferDesc(interface); //Acknowledge TX and interrupts for proper interrupt pulsing CPSW_CPDMA_CPDMA_EOI_VECTOR_R = CPSW_CPDMA_EOI_VECTOR_TX_PULSE; CPSW_CPDMA_CPDMA_EOI_VECTOR_R = CPSW_CPDMA_EOI_VECTOR_RX_PULSE; //Enable channel 1 and 2 interrupts of the DMA engine CPSW_CPDMA_TX_INTMASK_SET_R = (1 << CPSW_CH1) | (1 << CPSW_CH2); //Enable TX completion interrupts CPSW_WR_C_TX_EN_R(CPSW_CORE0) |= (1 << CPSW_CH1) | (1 << CPSW_CH2); //Enable channel 0 interrupts of the DMA engine CPSW_CPDMA_RX_INTMASK_SET_R = (1 << CPSW_CH0); //Enable RX completion interrupts CPSW_WR_C_RX_EN_R(CPSW_CORE0) |= (1 << CPSW_CH0); #ifdef ti_sysbios_BIOS___VERS //Configure TX interrupt Hwi_Params_init(&hwiParams); hwiParams.enableInt = FALSE; hwiParams.priority = AM335X_ETH_IRQ_PRIORITY; //Register TX interrupt handler Hwi_create(SYS_INT_3PGSWTXINT0, (Hwi_FuncPtr) am335xEthTxIrqHandler, &hwiParams, NULL); //Configure RX interrupt Hwi_Params_init(&hwiParams); hwiParams.enableInt = FALSE; hwiParams.priority = AM335X_ETH_IRQ_PRIORITY; //Register RX interrupt handler Hwi_create(SYS_INT_3PGSWRXINT0, (Hwi_FuncPtr) am335xEthRxIrqHandler, &hwiParams, NULL); #else //Register interrupt handlers IntRegister(SYS_INT_3PGSWTXINT0, am335xEthTxIrqHandler); IntRegister(SYS_INT_3PGSWRXINT0, am335xEthRxIrqHandler); //Configure TX interrupt priority IntPrioritySet(SYS_INT_3PGSWTXINT0, AM335X_ETH_IRQ_PRIORITY, AINTC_HOSTINT_ROUTE_IRQ); //Configure RX interrupt priority IntPrioritySet(SYS_INT_3PGSWRXINT0, AM335X_ETH_IRQ_PRIORITY, AINTC_HOSTINT_ROUTE_IRQ); #endif //Enable the transmission and reception CPSW_CPDMA_TX_CONTROL_R = CPSW_CPDMA_TX_CONTROL_TX_EN; CPSW_CPDMA_RX_CONTROL_R = CPSW_CPDMA_RX_CONTROL_RX_EN; //Calculate the MDC clock divider to be used temp = (MDIO_INPUT_CLK / MDIO_OUTPUT_CLK) - 1; //Initialize MDIO interface MDIO_CONTROL_R = MDIO_CONTROL_ENABLE | MDIO_CONTROL_FAULTENB | (temp & MDIO_CONTROL_CLKDIV); } } //BeagleBone Black, TMDSSK3358, OSD3358-SM-RED or SBC DIVA board? #if defined(USE_BEAGLEBONE_BLACK) || defined(USE_TMDSSK3358) || \ defined(USE_OSD3358_SM_RED) || defined(USE_SBC_DIVA) /** * @brief GPIO configuration * @param[in] interface Underlying network interface **/ void am335xEthInitGpio(NetInterface *interface) { //BeagleBone Black board? #if defined(USE_BEAGLEBONE_BLACK) //Select MII interface mode for port 1 CONTROL_GMII_SEL_R = CONTROL_GMII_SEL_GMII1_SEL_MII; //Configure MII1_TX_CLK (GPIO3_9) CONTROL_CONF_MII1_TXCLK_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(0); //Configure MII1_TX_EN (GPIO3_3) CONTROL_CONF_MII1_TXEN_R = CONTROL_CONF_MUXMODE(0); //Configure MII1_TXD0 (GPIO0_28) CONTROL_CONF_MII1_TXD0_R = CONTROL_CONF_MUXMODE(0); //Configure MII1_TXD1 (GPIO0_21) CONTROL_CONF_MII1_TXD1_R = CONTROL_CONF_MUXMODE(0); //Configure MII1_TXD2 (GPIO0_17) CONTROL_CONF_MII1_TXD2_R = CONTROL_CONF_MUXMODE(0); //Configure MII1_TXD3 (GPIO0_16) CONTROL_CONF_MII1_TXD3_R = CONTROL_CONF_MUXMODE(0); //Configure MII1_RX_CLK (GPIO3_10) CONTROL_CONF_MII1_RXCLK_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(0); //Configure MII1_RXD0 (GPIO2_21) CONTROL_CONF_MII1_RXD0_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(0); //Configure MII1_RXD1 (GPIO2_20) CONTROL_CONF_MII1_RXD1_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(0); //Configure MII1_RXD2 (GPIO2_19) CONTROL_CONF_MII1_RXD2_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(0); //Configure MII1_RXD3 (GPIO2_18) CONTROL_CONF_MII1_RXD3_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(0); //Configure MII1_COL (GPIO3_0) CONTROL_CONF_MII1_COL_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(0); //Configure MII1_CRS (GPIO3_1) CONTROL_CONF_MII1_CRS_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(0); //Configure MII1_RX_ER (GPIO3_2) CONTROL_CONF_MII1_RXERR_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(0); //Configure MII1_RX_DV (GPIO3_4) CONTROL_CONF_MII1_RXDV_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(0); //Configure MDIO (GPIO0_0) CONTROL_CONF_MDIO_DATA_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_PULLUPSEL | CONTROL_CONF_MUXMODE(0); //Configure MDC (GPIO0_1) CONTROL_CONF_MDIO_CLK_R = CONTROL_CONF_PULLUPSEL | CONTROL_CONF_MUXMODE(0); //TMDSSK3358 board? #elif defined(USE_TMDSSK3358) //Select RGMII interface mode for both port 1 and port 2 CONTROL_GMII_SEL_R = CONTROL_GMII_SEL_GMII1_SEL_RGMII | CONTROL_GMII_SEL_GMII2_SEL_RGMII; //Configure RGMII1_TCLK (GPIO3_9) CONTROL_CONF_MII1_TXCLK_R = CONTROL_CONF_MUXMODE(2); //Configure RGMII1_TCTL (GPIO3_3) CONTROL_CONF_MII1_TXEN_R = CONTROL_CONF_MUXMODE(2); //Configure RGMII1_TD0 (GPIO0_28) CONTROL_CONF_MII1_TXD0_R = CONTROL_CONF_MUXMODE(2); //Configure RGMII1_TD1 (GPIO0_21) CONTROL_CONF_MII1_TXD1_R = CONTROL_CONF_MUXMODE(2); //Configure RGMII1_TD2 (GPIO0_17) CONTROL_CONF_MII1_TXD2_R = CONTROL_CONF_MUXMODE(2); //Configure RGMII1_TD3 (GPIO0_16) CONTROL_CONF_MII1_TXD3_R = CONTROL_CONF_MUXMODE(2); //Configure RGMII1_RCLK (GPIO3_10) CONTROL_CONF_MII1_RXCLK_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII1_RCTL (GPIO3_4) CONTROL_CONF_MII1_RXDV_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII1_RD0 (GPIO2_21) CONTROL_CONF_MII1_RXD0_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure /RGMII1_RD1 (GPIO2_20) CONTROL_CONF_MII1_RXD1_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII1_RD2 (GPIO2_19) CONTROL_CONF_MII1_RXD2_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII1_RD3 (GPIO2_18) CONTROL_CONF_MII1_RXD3_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII2_TCLK (GPIO1_22/GPMC_A6) CONTROL_CONF_GPMC_A_R(6) = CONTROL_CONF_MUXMODE(2); //Configure RGMII2_TCTL (GPIO1_16/GPMC_A0) CONTROL_CONF_GPMC_A_R(0) = CONTROL_CONF_MUXMODE(2); //Configure RGMII2_TD0 (GPIO1_21/GPMC_A5) CONTROL_CONF_GPMC_A_R(5) = CONTROL_CONF_MUXMODE(2); //Configure RGMII2_TD1 (GPIO1_20/GPMC_A4) CONTROL_CONF_GPMC_A_R(4) = CONTROL_CONF_MUXMODE(2); //Configure RGMII2_TD2 (GPIO1_19/GPMC_A3) CONTROL_CONF_GPMC_A_R(3) = CONTROL_CONF_MUXMODE(2); //Configure RGMII2_TD3 (GPIO1_18/GPMC_A2) CONTROL_CONF_GPMC_A_R(2) = CONTROL_CONF_MUXMODE(2); //Configure RGMII2_RCLK (GPIO1_23/GPMC_A7) CONTROL_CONF_GPMC_A_R(7) = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII2_RCTL (GPIO1_17/GPMC_A1) CONTROL_CONF_GPMC_A_R(1) = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII2_RD0 (GPIO1_27/GPMC_A11) CONTROL_CONF_GPMC_A_R(11) = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII2_RD1 (GPIO1_26/GPMC_A10) CONTROL_CONF_GPMC_A_R(10) = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII2_RD2 (GPIO1_25/GPMC_A9) CONTROL_CONF_GPMC_A_R(9) = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII2_RD3 (GPIO1_24/GPMC_A8) CONTROL_CONF_GPMC_A_R(8) = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure MDIO (GPIO0_0) CONTROL_CONF_MDIO_DATA_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_PULLUPSEL | CONTROL_CONF_MUXMODE(0); //Configure MDC (GPIO0_1) CONTROL_CONF_MDIO_CLK_R = CONTROL_CONF_PULLUPSEL | CONTROL_CONF_MUXMODE(0); //OSD3358-SM-RED board? #elif defined(USE_OSD3358_SM_RED) //Select RGMII interface mode for both port 1 CONTROL_GMII_SEL_R = CONTROL_GMII_SEL_GMII1_SEL_RGMII; //Configure RGMII1_TCLK (GPIO3_9) CONTROL_CONF_MII1_TXCLK_R = CONTROL_CONF_MUXMODE(2); //Configure RGMII1_TCTL (GPIO3_3) CONTROL_CONF_MII1_TXEN_R = CONTROL_CONF_MUXMODE(2); //Configure RGMII1_TD0 (GPIO0_28) CONTROL_CONF_MII1_TXD0_R = CONTROL_CONF_MUXMODE(2); //Configure RGMII1_TD1 (GPIO0_21) CONTROL_CONF_MII1_TXD1_R = CONTROL_CONF_MUXMODE(2); //Configure RGMII1_TD2 (GPIO0_17) CONTROL_CONF_MII1_TXD2_R = CONTROL_CONF_MUXMODE(2); //Configure RGMII1_TD3 (GPIO0_16) CONTROL_CONF_MII1_TXD3_R = CONTROL_CONF_MUXMODE(2); //Configure RGMII1_RCLK (GPIO3_10) CONTROL_CONF_MII1_RXCLK_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII1_RCTL (GPIO3_4) CONTROL_CONF_MII1_RXDV_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII1_RD0 (GPIO2_21) CONTROL_CONF_MII1_RXD0_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure /RGMII1_RD1 (GPIO2_20) CONTROL_CONF_MII1_RXD1_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII1_RD2 (GPIO2_19) CONTROL_CONF_MII1_RXD2_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII1_RD3 (GPIO2_18) CONTROL_CONF_MII1_RXD3_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure MDIO (GPIO0_0) CONTROL_CONF_MDIO_DATA_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_PULLUPSEL | CONTROL_CONF_MUXMODE(0); //Configure MDC (GPIO0_1) CONTROL_CONF_MDIO_CLK_R = CONTROL_CONF_PULLUPSEL | CONTROL_CONF_MUXMODE(0); //SBC DIVA board? #elif defined(USE_SBC_DIVA) //Select RMII interface mode for port 1 and RGMII interface mode for port 2 CONTROL_GMII_SEL_R = CONTROL_GMII_SEL_RMII1_IO_CLK_EN | CONTROL_GMII_SEL_GMII1_SEL_RMII | CONTROL_GMII_SEL_GMII2_SEL_RGMII; //Configure RMII1_REF_CLK (GPIO0_29) CONTROL_CONF_RMII1_REFCLK_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(0); //Configure RMII1_TX_EN (GPIO3_3) CONTROL_CONF_MII1_TXEN_R = CONTROL_CONF_MUXMODE(1); //Configure RMII1_TXD0 (GPIO0_28) CONTROL_CONF_MII1_TXD0_R = CONTROL_CONF_MUXMODE(1); //Configure RMII1_TXD1 (GPIO0_21) CONTROL_CONF_MII1_TXD1_R = CONTROL_CONF_MUXMODE(1); //Configure RMII1_RXD0 (GPIO2.21) CONTROL_CONF_MII1_RXD0_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(1); //Configure RMII1_RXD1 (GPIO2.20) CONTROL_CONF_MII1_RXD1_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(1); //Configure RMII1_CRS_DV (GPIO3_1) CONTROL_CONF_MII1_CRS_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(1); //Configure RMII1_RX_ER (GPIO3_2) CONTROL_CONF_MII1_RXERR_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(1); //Configure RGMII2_TCLK (GPIO1_22/GPMC_A6) CONTROL_CONF_GPMC_A_R(6) = CONTROL_CONF_MUXMODE(2); //Configure RGMII2_TCTL (GPIO1_16/GPMC_A0) CONTROL_CONF_GPMC_A_R(0) = CONTROL_CONF_MUXMODE(2); //Configure RGMII2_TD0 (GPIO1_21/GPMC_A5) CONTROL_CONF_GPMC_A_R(5) = CONTROL_CONF_MUXMODE(2); //Configure RGMII2_TD1 (GPIO1_20/GPMC_A4) CONTROL_CONF_GPMC_A_R(4) = CONTROL_CONF_MUXMODE(2); //Configure RGMII2_TD2 (GPIO1_19/GPMC_A3) CONTROL_CONF_GPMC_A_R(3) = CONTROL_CONF_MUXMODE(2); //Configure RGMII2_TD3 (GPIO1_18/GPMC_A2) CONTROL_CONF_GPMC_A_R(2) = CONTROL_CONF_MUXMODE(2); //Configure RGMII2_RCLK (GPIO1_23/GPMC_A7) CONTROL_CONF_GPMC_A_R(7) = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII2_RCTL (GPIO1_17/GPMC_A1) CONTROL_CONF_GPMC_A_R(1) = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII2_RD0 (GPIO1_27/GPMC_A11) CONTROL_CONF_GPMC_A_R(11) = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII2_RD1 (GPIO1_26/GPMC_A10) CONTROL_CONF_GPMC_A_R(10) = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII2_RD2 (GPIO1_25/GPMC_A9) CONTROL_CONF_GPMC_A_R(9) = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII2_RD3 (GPIO1_24/GPMC_A8) CONTROL_CONF_GPMC_A_R(8) = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure MDIO (GPIO0_0) CONTROL_CONF_MDIO_DATA_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_PULLUPSEL | CONTROL_CONF_MUXMODE(0); //Configure MDC (GPIO0_1) CONTROL_CONF_MDIO_CLK_R = CONTROL_CONF_PULLUPSEL | CONTROL_CONF_MUXMODE(0); #endif } #endif /** * @brief Initialize buffer descriptor lists * @param[in] interface Underlying network interface **/ void am335xEthInitBufferDesc(NetInterface *interface) { uint_t i; uint_t nextIndex; uint_t prevIndex; //Initialize TX buffer descriptor list (port 1) for(i = 0; i < AM335X_ETH_TX_BUFFER_COUNT; i++) { //Index of the next buffer nextIndex = (i + 1) % AM335X_ETH_TX_BUFFER_COUNT; //Index of the previous buffer prevIndex = (i + AM335X_ETH_TX_BUFFER_COUNT - 1) % AM335X_ETH_TX_BUFFER_COUNT; //Next descriptor pointer txBufferDesc1[i].word0 = (uint32_t) NULL; //Buffer pointer txBufferDesc1[i].word1 = (uint32_t) txBuffer1[i]; //Buffer offset and buffer length txBufferDesc1[i].word2 = 0; //Status flags and packet length txBufferDesc1[i].word3 = 0; //Form a doubly linked list txBufferDesc1[i].next = &txBufferDesc1[nextIndex]; txBufferDesc1[i].prev = &txBufferDesc1[prevIndex]; } //Point to the very first descriptor txCurBufferDesc1 = &txBufferDesc1[0]; //Mark the end of the queue txCurBufferDesc1->prev->word3 = CPSW_TX_WORD3_SOP | CPSW_TX_WORD3_EOP | CPSW_TX_WORD3_EOQ; //Initialize TX buffer descriptor list (port 2) for(i = 0; i < AM335X_ETH_TX_BUFFER_COUNT; i++) { //Index of the next buffer nextIndex = (i + 1) % AM335X_ETH_TX_BUFFER_COUNT; //Index of the previous buffer prevIndex = (i + AM335X_ETH_TX_BUFFER_COUNT - 1) % AM335X_ETH_TX_BUFFER_COUNT; //Next descriptor pointer txBufferDesc2[i].word0 = (uint32_t) NULL; //Buffer pointer txBufferDesc2[i].word1 = (uint32_t) txBuffer2[i]; //Buffer offset and buffer length txBufferDesc2[i].word2 = 0; //Status flags and packet length txBufferDesc2[i].word3 = 0; //Form a doubly linked list txBufferDesc2[i].next = &txBufferDesc2[nextIndex]; txBufferDesc2[i].prev = &txBufferDesc2[prevIndex]; } //Point to the very first descriptor txCurBufferDesc2 = &txBufferDesc2[0]; //Mark the end of the queue txCurBufferDesc2->prev->word3 = CPSW_TX_WORD3_SOP | CPSW_TX_WORD3_EOP | CPSW_TX_WORD3_EOQ; //Initialize RX buffer descriptor list for(i = 0; i < AM335X_ETH_RX_BUFFER_COUNT; i++) { //Index of the next buffer nextIndex = (i + 1) % AM335X_ETH_RX_BUFFER_COUNT; //Index of the previous buffer prevIndex = (i + AM335X_ETH_RX_BUFFER_COUNT - 1) % AM335X_ETH_RX_BUFFER_COUNT; //Next descriptor pointer rxBufferDesc[i].word0 = (uint32_t) &rxBufferDesc[nextIndex]; //Buffer pointer rxBufferDesc[i].word1 = (uint32_t) rxBuffer[i]; //Buffer offset and buffer length rxBufferDesc[i].word2 = AM335X_ETH_RX_BUFFER_SIZE; //Status flags and packet length rxBufferDesc[i].word3 = CPSW_RX_WORD3_OWNER; //Form a doubly linked list rxBufferDesc[i].next = &rxBufferDesc[nextIndex]; rxBufferDesc[i].prev = &rxBufferDesc[prevIndex]; } //Point to the very first descriptor rxCurBufferDesc = &rxBufferDesc[0]; //Mark the end of the queue rxCurBufferDesc->prev->word0 = (uint32_t) NULL; //Write the RX DMA head descriptor pointer CPSW_CPDMA_RX_HDP_R(CPSW_CH0) = (uint32_t) rxCurBufferDesc; } /** * @brief AM335x Ethernet MAC timer handler * * This routine is periodically called by the TCP/IP stack to handle periodic * operations such as polling the link state * * @param[in] interface Underlying network interface **/ void am335xEthTick(NetInterface *interface) { //Valid Ethernet PHY or switch driver? if(interface->phyDriver != NULL) { //Handle periodic operations interface->phyDriver->tick(interface); } else if(interface->switchDriver != NULL) { //Handle periodic operations interface->switchDriver->tick(interface); } else { //Just for sanity } //Misqueued buffer condition? if((rxCurBufferDesc->word3 & CPSW_RX_WORD3_OWNER) != 0) { if(CPSW_CPDMA_RX_HDP_R(CPSW_CH0) == 0) { //The host acts on the misqueued buffer condition by writing the added //buffer descriptor address to the appropriate RX DMA head descriptor //pointer CPSW_CPDMA_RX_HDP_R(CPSW_CH0) = (uint32_t) rxCurBufferDesc; } } } /** * @brief Enable interrupts * @param[in] interface Underlying network interface **/ void am335xEthEnableIrq(NetInterface *interface) { #ifdef ti_sysbios_BIOS___VERS //Enable Ethernet MAC interrupts Hwi_enableInterrupt(SYS_INT_3PGSWTXINT0); Hwi_enableInterrupt(SYS_INT_3PGSWRXINT0); #else //Enable Ethernet MAC interrupts IntSystemEnable(SYS_INT_3PGSWTXINT0); IntSystemEnable(SYS_INT_3PGSWRXINT0); #endif //Valid Ethernet PHY or switch driver? if(interface->phyDriver != NULL) { //Enable Ethernet PHY interrupts interface->phyDriver->enableIrq(interface); } else if(interface->switchDriver != NULL) { //Enable Ethernet switch interrupts interface->switchDriver->enableIrq(interface); } else { //Just for sanity } } /** * @brief Disable interrupts * @param[in] interface Underlying network interface **/ void am335xEthDisableIrq(NetInterface *interface) { #ifdef ti_sysbios_BIOS___VERS //Disable Ethernet MAC interrupts Hwi_disableInterrupt(SYS_INT_3PGSWTXINT0); Hwi_disableInterrupt(SYS_INT_3PGSWRXINT0); #else //Disable Ethernet MAC interrupts IntSystemDisable(SYS_INT_3PGSWTXINT0); IntSystemDisable(SYS_INT_3PGSWRXINT0); #endif //Valid Ethernet PHY or switch driver? if(interface->phyDriver != NULL) { //Disable Ethernet PHY interrupts interface->phyDriver->disableIrq(interface); } else if(interface->switchDriver != NULL) { //Disable Ethernet switch interrupts interface->switchDriver->disableIrq(interface); } else { //Just for sanity } } /** * @brief Ethernet MAC transmit interrupt **/ void am335xEthTxIrqHandler(void) { bool_t flag; uint32_t status; uint32_t temp; Am335xTxBufferDesc *p; //Interrupt service routine prologue osEnterIsr(); //This flag will be set if a higher priority task must be woken flag = FALSE; //Read the TX_STAT register to determine which channels caused the interrupt status = CPSW_WR_C_TX_STAT_R(CPSW_CORE0); //Packet transmitted on channel 1? if(status & (1 << CPSW_CH1)) { //Point to the buffer descriptor p = (Am335xTxBufferDesc *) CPSW_CPDMA_TX_CP_R(CPSW_CH1); //Read the status flags temp = p->word3 & (CPSW_TX_WORD3_SOP | CPSW_TX_WORD3_EOP | CPSW_TX_WORD3_OWNER | CPSW_TX_WORD3_EOQ); //Misqueued buffer condition? if(temp == (CPSW_TX_WORD3_SOP | CPSW_TX_WORD3_EOP | CPSW_TX_WORD3_EOQ)) { //Check whether the next descriptor pointer is non-zero if(p->word0 != 0) { //The host corrects the misqueued buffer condition by writing the //misqueued packets buffer descriptor address to the appropriate //TX DMA head descriptor pointer CPSW_CPDMA_TX_HDP_R(CPSW_CH1) = (uint32_t) p->word0; } } //Write the TX completion pointer CPSW_CPDMA_TX_CP_R(CPSW_CH1) = (uint32_t) p; //Check whether the TX buffer is available for writing if((txCurBufferDesc1->word3 & CPSW_TX_WORD3_OWNER) == 0) { //Notify the TCP/IP stack that the transmitter is ready to send flag |= osSetEventFromIsr(&nicDriverInterface1->nicTxEvent); } } //Packet transmitted on channel 2? if(status & (1 << CPSW_CH2)) { //Point to the buffer descriptor p = (Am335xTxBufferDesc *) CPSW_CPDMA_TX_CP_R(CPSW_CH2); //Read the status flags temp = p->word3 & (CPSW_TX_WORD3_SOP | CPSW_TX_WORD3_EOP | CPSW_TX_WORD3_OWNER | CPSW_TX_WORD3_EOQ); //Misqueued buffer condition? if(temp == (CPSW_TX_WORD3_SOP | CPSW_TX_WORD3_EOP | CPSW_TX_WORD3_EOQ)) { //Check whether the next descriptor pointer is non-zero if(p->word0 != 0) { //The host corrects the misqueued buffer condition by writing the //misqueued packets buffer descriptor address to the appropriate //TX DMA head descriptor pointer CPSW_CPDMA_TX_HDP_R(CPSW_CH2) = (uint32_t) p->word0; } } //Write the TX completion pointer CPSW_CPDMA_TX_CP_R(CPSW_CH2) = (uint32_t) p; //Check whether the TX buffer is available for writing if((txCurBufferDesc2->word3 & CPSW_TX_WORD3_OWNER) == 0) { //Notify the TCP/IP stack that the transmitter is ready to send flag |= osSetEventFromIsr(&nicDriverInterface2->nicTxEvent); } } //Write the DMA end of interrupt vector CPSW_CPDMA_CPDMA_EOI_VECTOR_R = CPSW_CPDMA_EOI_VECTOR_TX_PULSE; //Interrupt service routine epilogue osExitIsr(flag); } /** * @brief Ethernet MAC receive interrupt **/ void am335xEthRxIrqHandler(void) { bool_t flag; uint32_t status; //Interrupt service routine prologue osEnterIsr(); //This flag will be set if a higher priority task must be woken flag = FALSE; //Read the RX_STAT register to determine which channels caused the interrupt status = CPSW_WR_C_RX_STAT_R(CPSW_CORE0); //Packet received on channel 0? if(status & (1 << CPSW_CH0)) { //Disable RX interrupts CPSW_WR_C_RX_EN_R(CPSW_CORE0) &= ~(1 << CPSW_CH0); //Set event flag if(nicDriverInterface1 != NULL) { nicDriverInterface1->nicEvent = TRUE; } else if(nicDriverInterface2 != NULL) { nicDriverInterface2->nicEvent = TRUE; } //Notify the TCP/IP stack of the event flag |= osSetEventFromIsr(&netEvent); } //Write the DMA end of interrupt vector CPSW_CPDMA_CPDMA_EOI_VECTOR_R = CPSW_CPDMA_EOI_VECTOR_RX_PULSE; //Interrupt service routine epilogue osExitIsr(flag); } /** * @brief AM335x Ethernet MAC event handler * @param[in] interface Underlying network interface **/ void am335xEthEventHandler(NetInterface *interface) { static uint8_t buffer[AM335X_ETH_RX_BUFFER_SIZE]; error_t error; size_t n; uint32_t temp; //Process all pending packets do { //The current buffer is available for reading? if((rxCurBufferDesc->word3 & CPSW_RX_WORD3_OWNER) == 0) { //SOP and EOP flags should be set if((rxCurBufferDesc->word3 & CPSW_RX_WORD3_SOP) != 0 && (rxCurBufferDesc->word3 & CPSW_RX_WORD3_EOP) != 0) { //Make sure no error occurred if((rxCurBufferDesc->word3 & CPSW_RX_WORD3_PKT_ERROR) == 0) { //Check the port on which the packet was received switch(rxCurBufferDesc->word3 & CPSW_RX_WORD3_FROM_PORT) { //Port 1? case CPSW_RX_WORD3_FROM_PORT_1: interface = nicDriverInterface1; break; //Port 1? case CPSW_RX_WORD3_FROM_PORT_2: interface = nicDriverInterface2; break; //Invalid port number? default: interface = NULL; break; } //Retrieve the length of the frame n = rxCurBufferDesc->word3 & CPSW_RX_WORD3_PACKET_LENGTH; //Limit the number of data to read n = MIN(n, AM335X_ETH_RX_BUFFER_SIZE); //Sanity check if(interface != NULL) { //Copy data from the receive buffer osMemcpy(buffer, (uint8_t *) rxCurBufferDesc->word1, (n + 3) & ~3UL); //Packet successfully received error = NO_ERROR; } else { //The port number is invalid error = ERROR_INVALID_PACKET; } } else { //The received packet contains an error error = ERROR_INVALID_PACKET; } } else { //The packet is not valid error = ERROR_INVALID_PACKET; } //Mark the end of the queue with a NULL pointer rxCurBufferDesc->word0 = (uint32_t) NULL; //Restore the length of the buffer rxCurBufferDesc->word2 = AM335X_ETH_RX_BUFFER_SIZE; //Give the ownership of the descriptor back to the DMA rxCurBufferDesc->word3 = CPSW_RX_WORD3_OWNER; //Link the current descriptor to the previous descriptor rxCurBufferDesc->prev->word0 = (uint32_t) rxCurBufferDesc; //Read the status flags of the previous descriptor temp = rxCurBufferDesc->prev->word3 & (CPSW_RX_WORD3_SOP | CPSW_RX_WORD3_EOP | CPSW_RX_WORD3_OWNER | CPSW_RX_WORD3_EOQ); //Misqueued buffer condition? if(temp == (CPSW_RX_WORD3_SOP | CPSW_RX_WORD3_EOP | CPSW_RX_WORD3_EOQ)) { //The host acts on the misqueued buffer condition by writing the added //buffer descriptor address to the appropriate RX DMA head descriptor //pointer CPSW_CPDMA_RX_HDP_R(CPSW_CH0) = (uint32_t) rxCurBufferDesc; } //Write the RX completion pointer CPSW_CPDMA_RX_CP_R(CPSW_CH0) = (uint32_t) rxCurBufferDesc; //Point to the next descriptor in the list rxCurBufferDesc = rxCurBufferDesc->next; } else { //No more data in the receive buffer error = ERROR_BUFFER_EMPTY; } //Check whether a valid packet has been received if(!error) { NetRxAncillary ancillary; //Additional options can be passed to the stack along with the packet ancillary = NET_DEFAULT_RX_ANCILLARY; //Pass the packet to the upper layer nicProcessPacket(interface, buffer, n, &ancillary); } //No more data in the receive buffer? } while(error != ERROR_BUFFER_EMPTY); //Re-enable RX interrupts CPSW_WR_C_RX_EN_R(CPSW_CORE0) |= (1 << CPSW_CH0); } /** * @brief Send a packet (port 1) * @param[in] interface Underlying network interface * @param[in] buffer Multi-part buffer containing the data to send * @param[in] offset Offset to the first data byte * @param[in] ancillary Additional options passed to the stack along with * the packet * @return Error code **/ error_t am335xEthSendPacketPort1(NetInterface *interface, const NetBuffer *buffer, size_t offset, NetTxAncillary *ancillary) { static uint8_t temp[AM335X_ETH_TX_BUFFER_SIZE]; size_t length; uint32_t value; //Retrieve the length of the packet length = netBufferGetLength(buffer) - offset; //Check the frame length if(length > AM335X_ETH_TX_BUFFER_SIZE) { //The transmitter can accept another packet osSetEvent(&interface->nicTxEvent); //Report an error return ERROR_INVALID_LENGTH; } //Make sure the current buffer is available for writing if((txCurBufferDesc1->word3 & CPSW_TX_WORD3_OWNER) != 0) { return ERROR_FAILURE; } //Mark the end of the queue with a NULL pointer txCurBufferDesc1->word0 = (uint32_t) NULL; //Copy user data to the transmit buffer netBufferRead(temp, buffer, offset, length); osMemcpy((uint8_t *) txCurBufferDesc1->word1, temp, (length + 3) & ~3UL); //Set the length of the buffer txCurBufferDesc1->word2 = length & CPSW_TX_WORD2_BUFFER_LENGTH; //Set the length of the packet value = length & CPSW_TX_WORD3_PACKET_LENGTH; //Set SOP and EOP flags as the data fits in a single buffer value |= CPSW_TX_WORD3_SOP | CPSW_TX_WORD3_EOP; //Redirect the packet to the relevant port number value |= CPSW_TX_WORD3_TO_PORT_EN | CPSW_TX_WORD3_TO_PORT_1; //Give the ownership of the descriptor to the DMA txCurBufferDesc1->word3 = CPSW_TX_WORD3_OWNER | value; //Link the current descriptor to the previous descriptor txCurBufferDesc1->prev->word0 = (uint32_t) txCurBufferDesc1; //Read the status flags of the previous descriptor value = txCurBufferDesc1->prev->word3 & (CPSW_TX_WORD3_SOP | CPSW_TX_WORD3_EOP | CPSW_TX_WORD3_OWNER | CPSW_TX_WORD3_EOQ); //Misqueued buffer condition? if(value == (CPSW_TX_WORD3_SOP | CPSW_TX_WORD3_EOP | CPSW_TX_WORD3_EOQ)) { //Clear the misqueued buffer condition txCurBufferDesc1->prev->word3 = 0; //The host corrects the misqueued buffer condition by writing the //misqueued packets buffer descriptor address to the appropriate //TX DMA head descriptor pointer CPSW_CPDMA_TX_HDP_R(CPSW_CH1) = (uint32_t) txCurBufferDesc1; } //Point to the next descriptor in the list txCurBufferDesc1 = txCurBufferDesc1->next; //Check whether the next buffer is available for writing if((txCurBufferDesc1->word3 & CPSW_TX_WORD3_OWNER) == 0) { //The transmitter can accept another packet osSetEvent(&interface->nicTxEvent); } //Data successfully written return NO_ERROR; } /** * @brief Send a packet (port 2) * @param[in] interface Underlying network interface * @param[in] buffer Multi-part buffer containing the data to send * @param[in] offset Offset to the first data byte * @param[in] ancillary Additional options passed to the stack along with * the packet * @return Error code **/ error_t am335xEthSendPacketPort2(NetInterface *interface, const NetBuffer *buffer, size_t offset, NetTxAncillary *ancillary) { static uint8_t temp[AM335X_ETH_TX_BUFFER_SIZE]; size_t length; uint32_t value; //Retrieve the length of the packet length = netBufferGetLength(buffer) - offset; //Check the frame length if(length > AM335X_ETH_TX_BUFFER_SIZE) { //The transmitter can accept another packet osSetEvent(&interface->nicTxEvent); //Report an error return ERROR_INVALID_LENGTH; } //Make sure the current buffer is available for writing if((txCurBufferDesc2->word3 & CPSW_TX_WORD3_OWNER) != 0) { return ERROR_FAILURE; } //Mark the end of the queue with a NULL pointer txCurBufferDesc2->word0 = (uint32_t) NULL; //Copy user data to the transmit buffer netBufferRead(temp, buffer, offset, length); osMemcpy((uint8_t *) txCurBufferDesc2->word1, temp, (length + 3) & ~3UL); //Set the length of the buffer txCurBufferDesc2->word2 = length & CPSW_TX_WORD2_BUFFER_LENGTH; //Set the length of the packet value = length & CPSW_TX_WORD3_PACKET_LENGTH; //Set SOP and EOP flags as the data fits in a single buffer value |= CPSW_TX_WORD3_SOP | CPSW_TX_WORD3_EOP; //Redirect the packet to the relevant port number value |= CPSW_TX_WORD3_TO_PORT_EN | CPSW_TX_WORD3_TO_PORT_2; //Give the ownership of the descriptor to the DMA txCurBufferDesc2->word3 = CPSW_TX_WORD3_OWNER | value; //Link the current descriptor to the previous descriptor txCurBufferDesc2->prev->word0 = (uint32_t) txCurBufferDesc2; //Read the status flags of the previous descriptor value = txCurBufferDesc2->prev->word3 & (CPSW_TX_WORD3_SOP | CPSW_TX_WORD3_EOP | CPSW_TX_WORD3_OWNER | CPSW_TX_WORD3_EOQ); //Misqueued buffer condition? if(value == (CPSW_TX_WORD3_SOP | CPSW_TX_WORD3_EOP | CPSW_TX_WORD3_EOQ)) { //Clear the misqueued buffer condition txCurBufferDesc2->prev->word3 = 0; //The host corrects the misqueued buffer condition by writing the //misqueued packets buffer descriptor address to the appropriate //TX DMA head descriptor pointer CPSW_CPDMA_TX_HDP_R(CPSW_CH2) = (uint32_t) txCurBufferDesc2; } //Point to the next descriptor in the list txCurBufferDesc2 = txCurBufferDesc2->next; //Check whether the next buffer is available for writing if((txCurBufferDesc2->word3 & CPSW_TX_WORD3_OWNER) == 0) { //The transmitter can accept another packet osSetEvent(&interface->nicTxEvent); } //Data successfully written return NO_ERROR; } /** * @brief Configure MAC address filtering * @param[in] interface Underlying network interface * @return Error code **/ error_t am335xEthUpdateMacAddrFilter(NetInterface *interface) { uint_t i; uint_t port; MacFilterEntry *entry; //Debug message TRACE_DEBUG("Updating AM335x ALE table...\r\n"); //Select the relevant port number if(interface == nicDriverInterface1) { port = CPSW_PORT1; } else if(interface == nicDriverInterface2) { port = CPSW_PORT2; } else { port = CPSW_PORT0; } //The MAC address filter contains the list of MAC addresses to accept //when receiving an Ethernet frame for(i = 0; i < MAC_ADDR_FILTER_SIZE; i++) { //Point to the current entry entry = &interface->macAddrFilter[i]; //Check whether the ALE table should be updated for the //current multicast address if(!macCompAddr(&entry->addr, &MAC_UNSPECIFIED_ADDR)) { if(entry->addFlag) { //Add VLAN/multicast address entry to the ALE table am335xEthAddVlanAddrEntry(port, port, &entry->addr); } else if(entry->deleteFlag) { //Remove VLAN/multicast address entry from the ALE table am335xEthDeleteVlanAddrEntry(port, port, &entry->addr); } } } //Successful processing return NO_ERROR; } /** * @brief Adjust MAC configuration parameters for proper operation * @param[in] interface Underlying network interface * @return Error code **/ error_t am335xEthUpdateMacConfig(NetInterface *interface) { uint32_t config = 0; //Read MAC control register if(interface == nicDriverInterface1) { config = CPSW_SL1_MACCONTROL_R; } else if(interface == nicDriverInterface2) { config = CPSW_SL2_MACCONTROL_R; } //1000BASE-T operation mode? if(interface->linkSpeed == NIC_LINK_SPEED_1GBPS) { config |= CPSW_SL_MACCONTROL_GIG; config &= ~(CPSW_SL_MACCONTROL_IFCTL_A | CPSW_SL_MACCONTROL_IFCTL_B); } //100BASE-TX operation mode? else if(interface->linkSpeed == NIC_LINK_SPEED_100MBPS) { config &= ~CPSW_SL_MACCONTROL_GIG; config |= CPSW_SL_MACCONTROL_IFCTL_A | CPSW_SL_MACCONTROL_IFCTL_B; } //10BASE-T operation mode? else { config &= ~CPSW_SL_MACCONTROL_GIG; config &= ~(CPSW_SL_MACCONTROL_IFCTL_A | CPSW_SL_MACCONTROL_IFCTL_B); } //Half-duplex or full-duplex mode? if(interface->duplexMode == NIC_FULL_DUPLEX_MODE) { config |= CPSW_SL_MACCONTROL_FULLDUPLEX; } else { config &= ~CPSW_SL_MACCONTROL_FULLDUPLEX; } //Update MAC control register if(interface == nicDriverInterface1) { CPSW_SL1_MACCONTROL_R = config; } else if(interface == nicDriverInterface2) { CPSW_SL2_MACCONTROL_R = config; } //Successful processing return NO_ERROR; } /** * @brief Write PHY register * @param[in] opcode Access type (2 bits) * @param[in] phyAddr PHY address (5 bits) * @param[in] regAddr Register address (5 bits) * @param[in] data Register value **/ void am335xEthWritePhyReg(uint8_t opcode, uint8_t phyAddr, uint8_t regAddr, uint16_t data) { uint32_t temp; //Valid opcode? if(opcode == SMI_OPCODE_WRITE) { //Set up a write operation temp = MDIO_USERACCESS0_GO | MDIO_USERACCESS0_WRITE; //PHY address temp |= (phyAddr << MDIO_USERACCESS0_PHYADR_SHIFT) & MDIO_USERACCESS0_PHYADR; //Register address temp |= (regAddr << MDIO_USERACCESS0_REGADR_SHIFT) & MDIO_USERACCESS0_REGADR; //Register value temp |= data & MDIO_USERACCESS0_DATA; //Start a write operation MDIO_USERACCESS0_R = temp; //Wait for the write to complete while((MDIO_USERACCESS0_R & MDIO_USERACCESS0_GO) != 0) { } } else { //The MAC peripheral only supports standard Clause 22 opcodes } } /** * @brief Read PHY register * @param[in] opcode Access type (2 bits) * @param[in] phyAddr PHY address (5 bits) * @param[in] regAddr Register address (5 bits) * @return Register value **/ uint16_t am335xEthReadPhyReg(uint8_t opcode, uint8_t phyAddr, uint8_t regAddr) { uint16_t data; uint32_t temp; //Valid opcode? if(opcode == SMI_OPCODE_READ) { //Set up a read operation temp = MDIO_USERACCESS0_GO | MDIO_USERACCESS0_READ; //PHY address temp |= (phyAddr << MDIO_USERACCESS0_PHYADR_SHIFT) & MDIO_USERACCESS0_PHYADR; //Register address temp |= (regAddr << MDIO_USERACCESS0_REGADR_SHIFT) & MDIO_USERACCESS0_REGADR; //Start a read operation MDIO_USERACCESS0_R = temp; //Wait for the read to complete while((MDIO_USERACCESS0_R & MDIO_USERACCESS0_GO) != 0) { } //Get register value data = MDIO_USERACCESS0_R & MDIO_USERACCESS0_DATA; } else { //The MAC peripheral only supports standard Clause 22 opcodes data = 0; } //Return the value of the PHY register return data; } /** * @brief Write an ALE table entry * @param[in] index Entry index * @param[in] entry Pointer to the ALE table entry **/ void am335xEthWriteEntry(uint_t index, const Am335xAleEntry *entry) { //Copy the content of the entry to be written CPSW_ALE_TBLW_R(2) = entry->word2; CPSW_ALE_TBLW_R(1) = entry->word1; CPSW_ALE_TBLW_R(0) = entry->word0; //Write the ALE entry at the specified index CPSW_ALE_TBLCTL_R = CPSW_ALE_TBLCTL_WRITE_RDZ | index; } /** * @brief Read an ALE table entry * @param[in] index Entry index * @param[out] entry Pointer to the ALE table entry **/ void am335xEthReadEntry(uint_t index, Am335xAleEntry *entry) { //Read the ALE entry at the specified index CPSW_ALE_TBLCTL_R = index; //Copy the content of the entry entry->word2 = CPSW_ALE_TBLW_R(2); entry->word1 = CPSW_ALE_TBLW_R(1); entry->word0 = CPSW_ALE_TBLW_R(0); } /** * @brief Find a free entry in the ALE table * @return Index of the first free entry **/ uint_t am335xEthFindFreeEntry(void) { uint_t index; uint32_t type; Am335xAleEntry entry; //Loop through the ALE table entries for(index = 0; index < CPSW_ALE_MAX_ENTRIES; index++) { //Read the current entry am335xEthReadEntry(index, &entry); //Retrieve the type of the ALE entry type = entry.word1 & CPSW_ALE_WORD1_ENTRY_TYPE_MASK; //Free entry? if(type == CPSW_ALE_WORD1_ENTRY_TYPE_FREE) { //Exit immediately break; } } //Return the index of the entry return index; } /** * @brief Search the ALE table for the specified VLAN entry * @param[in] vlanId VLAN identifier * @return Index of the matching entry **/ uint_t am335xEthFindVlanEntry(uint_t vlanId) { uint_t index; uint32_t value; Am335xAleEntry entry; //Loop through the ALE table entries for(index = 0; index < CPSW_ALE_MAX_ENTRIES; index++) { //Read the current entry am335xEthReadEntry(index, &entry); //Retrieve the type of the ALE entry value = entry.word1 & CPSW_ALE_WORD1_ENTRY_TYPE_MASK; //Check the type of the ALE entry if(value == CPSW_ALE_WORD1_ENTRY_TYPE_VLAN_ADDR) { //Get the VLAN identifier value = entry.word1 & CPSW_ALE_WORD1_VLAN_ID_MASK; //Compare the VLAN identifier if(value == CPSW_ALE_WORD1_VLAN_ID(vlanId)) { //Matching ALE entry found break; } } } //Return the index of the entry return index; } /** * @brief Search the ALE table for the specified VLAN/address entry * @param[in] vlanId VLAN identifier * @param[in] macAddr MAC address * @return Index of the matching entry **/ uint_t am335xEthFindVlanAddrEntry(uint_t vlanId, MacAddr *macAddr) { uint_t index; uint32_t value; Am335xAleEntry entry; //Loop through the ALE table entries for(index = 0; index < CPSW_ALE_MAX_ENTRIES; index++) { //Read the current entry am335xEthReadEntry(index, &entry); //Retrieve the type of the ALE entry value = entry.word1 & CPSW_ALE_WORD1_ENTRY_TYPE_MASK; //Check the type of the ALE entry if(value == CPSW_ALE_WORD1_ENTRY_TYPE_VLAN_ADDR) { //Get the VLAN identifier value = entry.word1 & CPSW_ALE_WORD1_VLAN_ID_MASK; //Compare the VLAN identifier if(value == CPSW_ALE_WORD1_VLAN_ID(vlanId)) { //Compare the MAC address if(macAddr->b[0] == (uint8_t) (entry.word1 >> 8) && macAddr->b[1] == (uint8_t) (entry.word1 >> 0) && macAddr->b[2] == (uint8_t) (entry.word0 >> 24) && macAddr->b[3] == (uint8_t) (entry.word0 >> 16) && macAddr->b[4] == (uint8_t) (entry.word0 >> 8) && macAddr->b[5] == (uint8_t) (entry.word0 >> 0)) { //Matching ALE entry found break; } } } } //Return the index of the entry return index; } /** * @brief Add a VLAN entry in the ALE table * @param[in] port Port number * @param[in] vlanId VLAN identifier * @return Error code **/ error_t am335xEthAddVlanEntry(uint_t port, uint_t vlanId) { error_t error; uint_t index; Am335xAleEntry entry; //Ensure that there are no duplicate address entries in the ALE table index = am335xEthFindVlanEntry(vlanId); //No matching entry found? if(index >= CPSW_ALE_MAX_ENTRIES) { //Find a free entry in the ALE table index = am335xEthFindFreeEntry(); } //Sanity check if(index < CPSW_ALE_MAX_ENTRIES) { //Set up a VLAN table entry entry.word2 = 0; entry.word1 = CPSW_ALE_WORD1_ENTRY_TYPE_VLAN; entry.word0 = 0; //Set VLAN identifier entry.word1 |= CPSW_ALE_WORD1_VLAN_ID(vlanId); //Force the packet VLAN tag to be removed on egress entry.word0 |= CPSW_ALE_WORD0_FORCE_UNTAG_EGRESS(1 << port) | CPSW_ALE_WORD0_FORCE_UNTAG_EGRESS(1 << CPSW_PORT0); //Set VLAN member list entry.word0 |= CPSW_ALE_WORD0_VLAN_MEMBER_LIST(1 << port) | CPSW_ALE_WORD0_VLAN_MEMBER_LIST(1 << CPSW_PORT0); //Add a new entry to the ALE table am335xEthWriteEntry(index, &entry); //Successful processing error = NO_ERROR; } else { //The ALE table is full error = ERROR_FAILURE; } //Return status code return error; } /** * @brief Add a VLAN/address entry in the ALE table * @param[in] port Port number * @param[in] vlanId VLAN identifier * @param[in] macAddr MAC address * @return Error code **/ error_t am335xEthAddVlanAddrEntry(uint_t port, uint_t vlanId, MacAddr *macAddr) { error_t error; uint_t index; Am335xAleEntry entry; //Ensure that there are no duplicate address entries in the ALE table index = am335xEthFindVlanAddrEntry(vlanId, macAddr); //No matching entry found? if(index >= CPSW_ALE_MAX_ENTRIES) { //Find a free entry in the ALE table index = am335xEthFindFreeEntry(); } //Sanity check if(index < CPSW_ALE_MAX_ENTRIES) { //Set up a VLAN/address table entry entry.word2 = 0; entry.word1 = CPSW_ALE_WORD1_ENTRY_TYPE_VLAN_ADDR; entry.word0 = 0; //Multicast address? if(macIsMulticastAddr(macAddr)) { //Set port mask entry.word2 |= CPSW_ALE_WORD2_SUPER | CPSW_ALE_WORD2_PORT_LIST(1 << port) | CPSW_ALE_WORD2_PORT_LIST(1 << CPSW_CH0); //Set multicast forward state entry.word1 |= CPSW_ALE_WORD1_MCAST_FWD_STATE(0); } //Set VLAN identifier entry.word1 |= CPSW_ALE_WORD1_VLAN_ID(vlanId); //Copy the upper 16 bits of the unicast address entry.word1 |= (macAddr->b[0] << 8) | macAddr->b[1]; //Copy the lower 32 bits of the unicast address entry.word0 |= (macAddr->b[2] << 24) | (macAddr->b[3] << 16) | (macAddr->b[4] << 8) | macAddr->b[5]; //Add a new entry to the ALE table am335xEthWriteEntry(index, &entry); //Successful processing error = NO_ERROR; } else { //The ALE table is full error = ERROR_FAILURE; } //Return status code return error; } /** * @brief Remove a VLAN/address entry from the ALE table * @param[in] port Port number * @param[in] vlanId VLAN identifier * @param[in] macAddr MAC address * @return Error code **/ error_t am335xEthDeleteVlanAddrEntry(uint_t port, uint_t vlanId, MacAddr *macAddr) { error_t error; uint_t index; Am335xAleEntry entry; //Search the ALE table for the specified VLAN/address entry index = am335xEthFindVlanAddrEntry(vlanId, macAddr); //Matching ALE entry found? if(index < CPSW_ALE_MAX_ENTRIES) { //Clear the contents of the entry entry.word2 = 0; entry.word1 = 0; entry.word0 = 0; //Update the ALE table am335xEthWriteEntry(index, &entry); //Successful processing error = NO_ERROR; } else { //Entry not found error = ERROR_NOT_FOUND; } //Return status code return error; }
error_t am335xEthAddVlanAddrEntry(uint_t port, uint_t vlanId, MacAddr *macAddr) { error_t error; uint_t index; Am335xAleEntry entry; //Ensure that there are no duplicate address entries in the ALE table index = am335xEthFindVlanAddrEntry(vlanId, macAddr); //No matching entry found? if(index >= CPSW_ALE_MAX_ENTRIES) { //Find a free entry in the ALE table index = am335xEthFindFreeEntry(); } //Sanity check if(index < CPSW_ALE_MAX_ENTRIES) { //Set up a VLAN/address table entry entry.word2 = 0; entry.word1 = CPSW_ALE_WORD1_ENTRY_TYPE_VLAN_ADDR; entry.word0 = 0; //Multicast address? if(macIsMulticastAddr(macAddr)) { //Set port mask entry.word2 |= CPSW_ALE_WORD2_SUPER | CPSW_ALE_WORD2_PORT_LIST(1 << port) | CPSW_ALE_WORD2_PORT_LIST(1 << CPSW_CH0); //Set multicast forward state entry.word1 |= CPSW_ALE_WORD1_MCAST_FWD_STATE(0); } //Set VLAN identifier entry.word1 |= CPSW_ALE_WORD1_VLAN_ID(vlanId); //Copy the upper 16 bits of the unicast address entry.word1 |= (macAddr->b[0] << 8) | macAddr->b[1]; //Copy the lower 32 bits of the unicast address entry.word0 |= (macAddr->b[2] << 24) | (macAddr->b[3] << 16) | (macAddr->b[4] << 8) | macAddr->b[5]; //Add a new entry to the ALE table am335xEthWriteEntry(index, &entry); //Sucessful processing error = NO_ERROR; } else { //The ALE table is full error = ERROR_FAILURE; } //Return status code return error; }
error_t am335xEthAddVlanAddrEntry(uint_t port, uint_t vlanId, MacAddr *macAddr) { error_t error; uint_t index; Am335xAleEntry entry; //Ensure that there are no duplicate address entries in the ALE table index = am335xEthFindVlanAddrEntry(vlanId, macAddr); //No matching entry found? if(index >= CPSW_ALE_MAX_ENTRIES) { //Find a free entry in the ALE table index = am335xEthFindFreeEntry(); } //Sanity check if(index < CPSW_ALE_MAX_ENTRIES) { //Set up a VLAN/address table entry entry.word2 = 0; entry.word1 = CPSW_ALE_WORD1_ENTRY_TYPE_VLAN_ADDR; entry.word0 = 0; //Multicast address? if(macIsMulticastAddr(macAddr)) { //Set port mask entry.word2 |= CPSW_ALE_WORD2_SUPER | CPSW_ALE_WORD2_PORT_LIST(1 << port) | CPSW_ALE_WORD2_PORT_LIST(1 << CPSW_CH0); //Set multicast forward state entry.word1 |= CPSW_ALE_WORD1_MCAST_FWD_STATE(0); } //Set VLAN identifier entry.word1 |= CPSW_ALE_WORD1_VLAN_ID(vlanId); //Copy the upper 16 bits of the unicast address entry.word1 |= (macAddr->b[0] << 8) | macAddr->b[1]; //Copy the lower 32 bits of the unicast address entry.word0 |= (macAddr->b[2] << 24) | (macAddr->b[3] << 16) | (macAddr->b[4] << 8) | macAddr->b[5]; //Add a new entry to the ALE table am335xEthWriteEntry(index, &entry); //Successful processing error = NO_ERROR; } else { //The ALE table is full error = ERROR_FAILURE; } //Return status code return error; }
{'added': [(9, ' * Copyright (C) 2010-2021 Oryx Embedded SARL. All rights reserved.'), (28, ' * @version 2.0.2'), (89, '//GCC compiler?'), (1807, ' //Successful processing'), (1878, ' //Successful processing'), (1920, ' //Successful processing')], 'deleted': [(9, ' * Copyright (C) 2010-2020 Oryx Embedded SARL. All rights reserved.'), (28, ' * @version 2.0.0'), (89, '//Keil MDK-ARM or GCC compiler?'), (1807, ' //Sucessful processing'), (1878, ' //Sucessful processing'), (1920, ' //Sucessful processing')]}
6
6
944
5,288
https://github.com/Oryx-Embedded/CycloneTCP
CVE-2021-26788
['CWE-20']
am335x_eth_driver.c
am335xEthAddVlanEntry
/** * @file am335x_eth_driver.c * @brief Sitara AM335x Gigabit Ethernet MAC driver * * @section License * * SPDX-License-Identifier: GPL-2.0-or-later * * Copyright (C) 2010-2020 Oryx Embedded SARL. All rights reserved. * * This file is part of CycloneTCP Open. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * @author Oryx Embedded SARL (www.oryx-embedded.com) * @version 2.0.0 **/ //Switch to the appropriate trace level #define TRACE_LEVEL NIC_TRACE_LEVEL //Dependencies #include "soc_am335x.h" #include "hw_types.h" #include "hw_cm_per.h" #include "hw_control_am335x.h" #include "hw_cpsw_ale.h" #include "hw_cpsw_cpdma.h" #include "hw_cpsw_port.h" #include "hw_cpsw_sl.h" #include "hw_cpsw_ss.h" #include "hw_cpsw_wr.h" #include "hw_mdio.h" #include "interrupt.h" #include "core/net.h" #include "drivers/mac/am335x_eth_driver.h" #include "debug.h" //MDIO input clock frequency #define MDIO_INPUT_CLK 125000000 //MDIO output clock frequency #define MDIO_OUTPUT_CLK 1000000 //Underlying network interface (port 1) static NetInterface *nicDriverInterface1 = NULL; //Underlying network interface (port 2) static NetInterface *nicDriverInterface2 = NULL; //IAR EWARM compiler? #if defined(__ICCARM__) //Transmit buffer (port 1) #pragma data_alignment = 4 #pragma location = AM335X_ETH_RAM_SECTION static uint8_t txBuffer1[AM335X_ETH_TX_BUFFER_COUNT][AM335X_ETH_TX_BUFFER_SIZE]; //Transmit buffer (port 2) #pragma data_alignment = 4 #pragma location = AM335X_ETH_RAM_SECTION static uint8_t txBuffer2[AM335X_ETH_TX_BUFFER_COUNT][AM335X_ETH_TX_BUFFER_SIZE]; //Receive buffer #pragma data_alignment = 4 #pragma location = AM335X_ETH_RAM_SECTION static uint8_t rxBuffer[AM335X_ETH_RX_BUFFER_COUNT][AM335X_ETH_RX_BUFFER_SIZE]; //Transmit buffer descriptors (port 1) #pragma data_alignment = 4 #pragma location = AM335X_ETH_RAM_CPPI_SECTION static Am335xTxBufferDesc txBufferDesc1[AM335X_ETH_TX_BUFFER_COUNT]; //Transmit buffer descriptors (port 2) #pragma data_alignment = 4 #pragma location = AM335X_ETH_RAM_CPPI_SECTION static Am335xTxBufferDesc txBufferDesc2[AM335X_ETH_TX_BUFFER_COUNT]; //Receive buffer descriptors #pragma data_alignment = 4 #pragma location = AM335X_ETH_RAM_CPPI_SECTION static Am335xRxBufferDesc rxBufferDesc[AM335X_ETH_RX_BUFFER_COUNT]; //Keil MDK-ARM or GCC compiler? #else //Transmit buffer (port 1) static uint8_t txBuffer1[AM335X_ETH_TX_BUFFER_COUNT][AM335X_ETH_TX_BUFFER_SIZE] __attribute__((aligned(4), __section__(AM335X_ETH_RAM_SECTION))); //Transmit buffer (port 2) static uint8_t txBuffer2[AM335X_ETH_TX_BUFFER_COUNT][AM335X_ETH_TX_BUFFER_SIZE] __attribute__((aligned(4), __section__(AM335X_ETH_RAM_SECTION))); //Receive buffer static uint8_t rxBuffer[AM335X_ETH_RX_BUFFER_COUNT][AM335X_ETH_RX_BUFFER_SIZE] __attribute__((aligned(4), __section__(AM335X_ETH_RAM_SECTION))); //Transmit buffer descriptors (port 1) static Am335xTxBufferDesc txBufferDesc1[AM335X_ETH_TX_BUFFER_COUNT] __attribute__((aligned(4), __section__(AM335X_ETH_RAM_CPPI_SECTION))); //Transmit buffer descriptors (port 2) static Am335xTxBufferDesc txBufferDesc2[AM335X_ETH_TX_BUFFER_COUNT] __attribute__((aligned(4), __section__(AM335X_ETH_RAM_CPPI_SECTION))); //Receive buffer descriptors static Am335xRxBufferDesc rxBufferDesc[AM335X_ETH_RX_BUFFER_COUNT] __attribute__((aligned(4), __section__(AM335X_ETH_RAM_CPPI_SECTION))); #endif //Pointer to the current TX buffer descriptor (port1) static Am335xTxBufferDesc *txCurBufferDesc1; //Pointer to the current TX buffer descriptor (port 2) static Am335xTxBufferDesc *txCurBufferDesc2; //Pointer to the current RX buffer descriptor static Am335xRxBufferDesc *rxCurBufferDesc; /** * @brief AM335x Ethernet MAC driver (port1) **/ const NicDriver am335xEthPort1Driver = { NIC_TYPE_ETHERNET, ETH_MTU, am335xEthInitPort1, am335xEthTick, am335xEthEnableIrq, am335xEthDisableIrq, am335xEthEventHandler, am335xEthSendPacketPort1, am335xEthUpdateMacAddrFilter, am335xEthUpdateMacConfig, am335xEthWritePhyReg, am335xEthReadPhyReg, FALSE, TRUE, TRUE, FALSE }; /** * @brief AM335x Ethernet MAC driver (port2) **/ const NicDriver am335xEthPort2Driver = { NIC_TYPE_ETHERNET, ETH_MTU, am335xEthInitPort2, am335xEthTick, am335xEthEnableIrq, am335xEthDisableIrq, am335xEthEventHandler, am335xEthSendPacketPort2, am335xEthUpdateMacAddrFilter, am335xEthUpdateMacConfig, am335xEthWritePhyReg, am335xEthReadPhyReg, FALSE, TRUE, TRUE, FALSE }; /** * @brief AM335x Ethernet MAC initialization (port 1) * @param[in] interface Underlying network interface * @return Error code **/ error_t am335xEthInitPort1(NetInterface *interface) { error_t error; uint32_t temp; //Debug message TRACE_INFO("Initializing AM335x Ethernet MAC (port 1)...\r\n"); //Initialize CPSW instance am335xEthInitInstance(interface); //Save underlying network interface nicDriverInterface1 = interface; //Valid Ethernet PHY or switch driver? if(interface->phyDriver != NULL) { //Ethernet PHY initialization error = interface->phyDriver->init(interface); } else if(interface->switchDriver != NULL) { //Ethernet switch initialization error = interface->switchDriver->init(interface); } else { //The interface is not properly configured error = ERROR_FAILURE; } //Any error to report? if(error) { return error; } //Unspecifield MAC address? if(macCompAddr(&interface->macAddr, &MAC_UNSPECIFIED_ADDR)) { //Use the factory preprogrammed MAC address interface->macAddr.b[0] = CONTROL_MAC_ID_HI_R(0) >> CONTROL_MAC_ID0_HI_MACADDR_47_40_SHIFT; interface->macAddr.b[1] = CONTROL_MAC_ID_HI_R(0) >> CONTROL_MAC_ID0_HI_MACADDR_39_32_SHIFT; interface->macAddr.b[2] = CONTROL_MAC_ID_HI_R(0) >> CONTROL_MAC_ID0_HI_MACADDR_31_24_SHIFT; interface->macAddr.b[3] = CONTROL_MAC_ID_HI_R(0) >> CONTROL_MAC_ID0_HI_MACADDR_23_16_SHIFT; interface->macAddr.b[4] = CONTROL_MAC_ID_LO_R(0) >> CONTROL_MAC_ID0_LO_MACADDR_15_8_SHIFT; interface->macAddr.b[5] = CONTROL_MAC_ID_LO_R(0) >> CONTROL_MAC_ID0_LO_MACADDR_7_0_SHIFT; //Generate the 64-bit interface identifier macAddrToEui64(&interface->macAddr, &interface->eui64); } //Set port state to forward temp = CPSW_ALE_PORTCTL_R(1) & ~CPSW_ALE_PORTCTL1_PORT_STATE; CPSW_ALE_PORTCTL_R(1) = temp | CPSW_ALE_PORTCTL_PORT_STATE_FORWARD; //Set the MAC address of the station CPSW_PORT1_SA_HI_R = interface->macAddr.w[0] | (interface->macAddr.w[1] << 16); CPSW_PORT1_SA_LO_R = interface->macAddr.w[2]; //Configure VLAN identifier and VLAN priority CPSW_PORT1_PORT_VLAN_R = (0 << CPSW_PORT_P1_PORT_VLAN_PORT_PRI_SHIFT) | (CPSW_PORT1 << CPSW_PORT_P1_PORT_VLAN_PORT_VID_SHIFT); //Add a VLAN entry in the ALE table am335xEthAddVlanEntry(CPSW_PORT1, CPSW_PORT1); //Add a VLAN/unicast address entry in the ALE table am335xEthAddVlanAddrEntry(CPSW_PORT1, CPSW_PORT1, &interface->macAddr); //Enable CPSW statistics CPSW_SS_STAT_PORT_EN_R |= CPSW_SS_STAT_PORT_EN_P1_STAT_EN; //Enable TX and RX CPSW_SL1_MACCONTROL_R = CPSW_SL_MACCONTROL_GMII_EN; //Accept any packets from the upper layer osSetEvent(&interface->nicTxEvent); //Successful initialization return NO_ERROR; } /** * @brief AM335x Ethernet MAC initialization (port 2) * @param[in] interface Underlying network interface * @return Error code **/ error_t am335xEthInitPort2(NetInterface *interface) { error_t error; uint32_t temp; //Debug message TRACE_INFO("Initializing AM335x Ethernet MAC (port 2)...\r\n"); //Initialize CPSW instance am335xEthInitInstance(interface); //Save underlying network interface nicDriverInterface2 = interface; //PHY transceiver initialization error = interface->phyDriver->init(interface); //Failed to initialize PHY transceiver? if(error) { return error; } //Unspecifield MAC address? if(macCompAddr(&interface->macAddr, &MAC_UNSPECIFIED_ADDR)) { //Use the factory preprogrammed MAC address interface->macAddr.b[0] = CONTROL_MAC_ID_HI_R(1) >> CONTROL_MAC_ID1_HI_MACADDR_47_40_SHIFT; interface->macAddr.b[1] = CONTROL_MAC_ID_HI_R(1) >> CONTROL_MAC_ID1_HI_MACADDR_39_32_SHIFT; interface->macAddr.b[2] = CONTROL_MAC_ID_HI_R(1) >> CONTROL_MAC_ID1_HI_MACADDR_31_24_SHIFT; interface->macAddr.b[3] = CONTROL_MAC_ID_HI_R(1) >> CONTROL_MAC_ID1_HI_MACADDR_23_16_SHIFT; interface->macAddr.b[4] = CONTROL_MAC_ID_LO_R(1) >> CONTROL_MAC_ID1_LO_MACADDR_15_8_SHIFT; interface->macAddr.b[5] = CONTROL_MAC_ID_LO_R(1) >> CONTROL_MAC_ID1_LO_MACADDR_7_0_SHIFT; //Generate the 64-bit interface identifier macAddrToEui64(&interface->macAddr, &interface->eui64); } //Set port state to forward temp = CPSW_ALE_PORTCTL_R(2) & ~CPSW_ALE_PORTCTL2_PORT_STATE; CPSW_ALE_PORTCTL_R(2) = temp | CPSW_ALE_PORTCTL_PORT_STATE_FORWARD; //Set the MAC address of the station CPSW_PORT2_SA_HI_R = interface->macAddr.w[0] | (interface->macAddr.w[1] << 16); CPSW_PORT2_SA_LO_R = interface->macAddr.w[2]; //Configure VLAN identifier and VLAN priority CPSW_PORT2_PORT_VLAN_R = (0 << CPSW_PORT_P2_PORT_VLAN_PORT_PRI_SHIFT) | (CPSW_PORT2 << CPSW_PORT_P2_PORT_VLAN_PORT_VID_SHIFT); //Add a VLAN entry in the ALE table am335xEthAddVlanEntry(CPSW_PORT2, CPSW_PORT2); //Add a VLAN/unicast address entry in the ALE table am335xEthAddVlanAddrEntry(CPSW_PORT2, CPSW_PORT2, &interface->macAddr); //Enable CPSW statistics CPSW_SS_STAT_PORT_EN_R |= CPSW_SS_STAT_PORT_EN_P2_STAT_EN; //Enable TX and RX CPSW_SL2_MACCONTROL_R = CPSW_SL_MACCONTROL_GMII_EN; //Accept any packets from the upper layer osSetEvent(&interface->nicTxEvent); //Successful initialization return NO_ERROR; } /** * @brief Initialize CPSW instance * @param[in] interface Underlying network interface **/ void am335xEthInitInstance(NetInterface *interface) { uint_t i; uint32_t temp; #ifdef ti_sysbios_BIOS___VERS Hwi_Params hwiParams; #endif //Initialization sequence is performed once if(nicDriverInterface1 == NULL && nicDriverInterface2 == NULL) { //Select the interface mode (MII/RMII/RGMII) and configure pin muxing am335xEthInitGpio(interface); //Enable the CPSW subsystem clocks CM_PER_CPGMAC0_CLKCTRL_R = CM_PER_CPGMAC0_CLKCTRL_MODULEMODE_ENABLE; //Wait for the CPSW module to be fully functional do { //Get module idle status temp = (CM_PER_CPGMAC0_CLKCTRL_R & CM_PER_CPGMAC0_CLKCTRL_IDLEST) >> CM_PER_CPGMAC0_CLKCTRL_IDLEST_SHIFT; //Keep looping as long as the module is not fully functional } while(temp != CM_PER_CPGMAC0_CLKCTRL_IDLEST_FUNC); //Start a software forced wake-up transition CM_PER_CPSW_CLKSTCTRL_R = CM_PER_CPSW_CLKSTCTRL_CLKTRCTRL_SW_WKUP; //Wait for the CPSW 125 MHz OCP clock to be active do { //Get the state of the CPSW 125 MHz OCP clock temp = (CM_PER_CPSW_CLKSTCTRL_R & CM_PER_CPSW_CLKSTCTRL_CLKACTIVITY_CPSW_125MHZ_GCLK) >> CM_PER_CPSW_CLKSTCTRL_CLKACTIVITY_CPSW_125MHZ_GCLK_SHIFT; //Keep looping as long as the clock is inactive } while(temp != CM_PER_CPSW_CLKSTCTRL_CLKACTIVITY_CPSW_125MHZ_GCLK_ACT); //Reset CPSW subsystem CPSW_SS_SOFT_RESET_R = CPSW_SS_SOFT_RESET_SOFT_RESET; //Wait for the reset to complete while((CPSW_SS_SOFT_RESET_R & CPSW_SS_SOFT_RESET_SOFT_RESET) != 0) { } //Reset CPSW wrapper module CPSW_WR_SOFT_RESET_R = CPSW_WR_SOFT_RESET_SOFT_RESET; //Wait for the reset to complete while((CPSW_WR_SOFT_RESET_R & CPSW_WR_SOFT_RESET_SOFT_RESET) != 0) { } //Reset CPSW sliver 1 logic CPSW_SL1_SOFT_RESET_R = CPSW_SL_SOFT_RESET_SOFT_RESET; //Wait for the reset to complete while((CPSW_SL1_SOFT_RESET_R & CPSW_SL_SOFT_RESET_SOFT_RESET) != 0) { } //Reset CPSW sliver 2 logic CPSW_SL2_SOFT_RESET_R = CPSW_SL_SOFT_RESET_SOFT_RESET; //Wait for the reset to complete while((CPSW_SL2_SOFT_RESET_R & CPSW_SL_SOFT_RESET_SOFT_RESET) != 0) { } //Reset CPSW CPDMA module CPSW_CPDMA_CPDMA_SOFT_RESET_R = CPSW_CPDMA_CPDMA_SOFT_RESET_SOFT_RESET; //Wait for the reset to complete while((CPSW_CPDMA_CPDMA_SOFT_RESET_R & CPSW_CPDMA_CPDMA_SOFT_RESET_SOFT_RESET) != 0) { } //Initialize the HDPs and the CPs to NULL for(i = CPSW_CH0; i <= CPSW_CH7; i++) { //TX head descriptor pointer CPSW_CPDMA_TX_HDP_R(i) = 0; //TX completion pointer CPSW_CPDMA_TX_CP_R(i) = 0; //RX head descriptor pointer CPSW_CPDMA_RX_HDP_R(i) = 0; //RX completion pointer CPSW_CPDMA_RX_CP_R(i) = 0; } //Enable ALE and clear ALE address table CPSW_ALE_CONTROL_R = CPSW_ALE_CONTROL_ENABLE_ALE | CPSW_ALE_CONTROL_CLEAR_TABLE; //For dual MAC mode, configure VLAN aware mode CPSW_ALE_CONTROL_R |= CPSW_ALE_CONTROL_ALE_VLAN_AWARE; //Set dual MAC mode for port 0 temp = CPSW_PORT0_TX_IN_CTL_R & ~CPSW_PORT_P0_TX_IN_CTL_TX_IN_SEL; CPSW_PORT0_TX_IN_CTL_R = temp | CPSW_PORT_P0_TX_IN_CTL_TX_IN_DUAL_MAC; //Set port 0 state to forward temp = CPSW_ALE_PORTCTL_R(0) & ~CPSW_ALE_PORTCTL0_PORT_STATE; CPSW_ALE_PORTCTL_R(0) = temp | CPSW_ALE_PORTCTL_PORT_STATE_FORWARD; //Enable CPSW statistics CPSW_SS_STAT_PORT_EN_R = CPSW_SS_STAT_PORT_EN_P0_STAT_EN; //Configure TX and RX buffer descriptors am335xEthInitBufferDesc(interface); //Acknowledge TX and interrupts for proper interrupt pulsing CPSW_CPDMA_CPDMA_EOI_VECTOR_R = CPSW_CPDMA_EOI_VECTOR_TX_PULSE; CPSW_CPDMA_CPDMA_EOI_VECTOR_R = CPSW_CPDMA_EOI_VECTOR_RX_PULSE; //Enable channel 1 and 2 interrupts of the DMA engine CPSW_CPDMA_TX_INTMASK_SET_R = (1 << CPSW_CH1) | (1 << CPSW_CH2); //Enable TX completion interrupts CPSW_WR_C_TX_EN_R(CPSW_CORE0) |= (1 << CPSW_CH1) | (1 << CPSW_CH2); //Enable channel 0 interrupts of the DMA engine CPSW_CPDMA_RX_INTMASK_SET_R = (1 << CPSW_CH0); //Enable RX completion interrupts CPSW_WR_C_RX_EN_R(CPSW_CORE0) |= (1 << CPSW_CH0); #ifdef ti_sysbios_BIOS___VERS //Configure TX interrupt Hwi_Params_init(&hwiParams); hwiParams.enableInt = FALSE; hwiParams.priority = AM335X_ETH_IRQ_PRIORITY; //Register TX interrupt handler Hwi_create(SYS_INT_3PGSWTXINT0, (Hwi_FuncPtr) am335xEthTxIrqHandler, &hwiParams, NULL); //Configure RX interrupt Hwi_Params_init(&hwiParams); hwiParams.enableInt = FALSE; hwiParams.priority = AM335X_ETH_IRQ_PRIORITY; //Register RX interrupt handler Hwi_create(SYS_INT_3PGSWRXINT0, (Hwi_FuncPtr) am335xEthRxIrqHandler, &hwiParams, NULL); #else //Register interrupt handlers IntRegister(SYS_INT_3PGSWTXINT0, am335xEthTxIrqHandler); IntRegister(SYS_INT_3PGSWRXINT0, am335xEthRxIrqHandler); //Configure TX interrupt priority IntPrioritySet(SYS_INT_3PGSWTXINT0, AM335X_ETH_IRQ_PRIORITY, AINTC_HOSTINT_ROUTE_IRQ); //Configure RX interrupt priority IntPrioritySet(SYS_INT_3PGSWRXINT0, AM335X_ETH_IRQ_PRIORITY, AINTC_HOSTINT_ROUTE_IRQ); #endif //Enable the transmission and reception CPSW_CPDMA_TX_CONTROL_R = CPSW_CPDMA_TX_CONTROL_TX_EN; CPSW_CPDMA_RX_CONTROL_R = CPSW_CPDMA_RX_CONTROL_RX_EN; //Calculate the MDC clock divider to be used temp = (MDIO_INPUT_CLK / MDIO_OUTPUT_CLK) - 1; //Initialize MDIO interface MDIO_CONTROL_R = MDIO_CONTROL_ENABLE | MDIO_CONTROL_FAULTENB | (temp & MDIO_CONTROL_CLKDIV); } } //BeagleBone Black, TMDSSK3358, OSD3358-SM-RED or SBC DIVA board? #if defined(USE_BEAGLEBONE_BLACK) || defined(USE_TMDSSK3358) || \ defined(USE_OSD3358_SM_RED) || defined(USE_SBC_DIVA) /** * @brief GPIO configuration * @param[in] interface Underlying network interface **/ void am335xEthInitGpio(NetInterface *interface) { //BeagleBone Black board? #if defined(USE_BEAGLEBONE_BLACK) //Select MII interface mode for port 1 CONTROL_GMII_SEL_R = CONTROL_GMII_SEL_GMII1_SEL_MII; //Configure MII1_TX_CLK (GPIO3_9) CONTROL_CONF_MII1_TXCLK_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(0); //Configure MII1_TX_EN (GPIO3_3) CONTROL_CONF_MII1_TXEN_R = CONTROL_CONF_MUXMODE(0); //Configure MII1_TXD0 (GPIO0_28) CONTROL_CONF_MII1_TXD0_R = CONTROL_CONF_MUXMODE(0); //Configure MII1_TXD1 (GPIO0_21) CONTROL_CONF_MII1_TXD1_R = CONTROL_CONF_MUXMODE(0); //Configure MII1_TXD2 (GPIO0_17) CONTROL_CONF_MII1_TXD2_R = CONTROL_CONF_MUXMODE(0); //Configure MII1_TXD3 (GPIO0_16) CONTROL_CONF_MII1_TXD3_R = CONTROL_CONF_MUXMODE(0); //Configure MII1_RX_CLK (GPIO3_10) CONTROL_CONF_MII1_RXCLK_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(0); //Configure MII1_RXD0 (GPIO2_21) CONTROL_CONF_MII1_RXD0_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(0); //Configure MII1_RXD1 (GPIO2_20) CONTROL_CONF_MII1_RXD1_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(0); //Configure MII1_RXD2 (GPIO2_19) CONTROL_CONF_MII1_RXD2_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(0); //Configure MII1_RXD3 (GPIO2_18) CONTROL_CONF_MII1_RXD3_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(0); //Configure MII1_COL (GPIO3_0) CONTROL_CONF_MII1_COL_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(0); //Configure MII1_CRS (GPIO3_1) CONTROL_CONF_MII1_CRS_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(0); //Configure MII1_RX_ER (GPIO3_2) CONTROL_CONF_MII1_RXERR_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(0); //Configure MII1_RX_DV (GPIO3_4) CONTROL_CONF_MII1_RXDV_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(0); //Configure MDIO (GPIO0_0) CONTROL_CONF_MDIO_DATA_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_PULLUPSEL | CONTROL_CONF_MUXMODE(0); //Configure MDC (GPIO0_1) CONTROL_CONF_MDIO_CLK_R = CONTROL_CONF_PULLUPSEL | CONTROL_CONF_MUXMODE(0); //TMDSSK3358 board? #elif defined(USE_TMDSSK3358) //Select RGMII interface mode for both port 1 and port 2 CONTROL_GMII_SEL_R = CONTROL_GMII_SEL_GMII1_SEL_RGMII | CONTROL_GMII_SEL_GMII2_SEL_RGMII; //Configure RGMII1_TCLK (GPIO3_9) CONTROL_CONF_MII1_TXCLK_R = CONTROL_CONF_MUXMODE(2); //Configure RGMII1_TCTL (GPIO3_3) CONTROL_CONF_MII1_TXEN_R = CONTROL_CONF_MUXMODE(2); //Configure RGMII1_TD0 (GPIO0_28) CONTROL_CONF_MII1_TXD0_R = CONTROL_CONF_MUXMODE(2); //Configure RGMII1_TD1 (GPIO0_21) CONTROL_CONF_MII1_TXD1_R = CONTROL_CONF_MUXMODE(2); //Configure RGMII1_TD2 (GPIO0_17) CONTROL_CONF_MII1_TXD2_R = CONTROL_CONF_MUXMODE(2); //Configure RGMII1_TD3 (GPIO0_16) CONTROL_CONF_MII1_TXD3_R = CONTROL_CONF_MUXMODE(2); //Configure RGMII1_RCLK (GPIO3_10) CONTROL_CONF_MII1_RXCLK_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII1_RCTL (GPIO3_4) CONTROL_CONF_MII1_RXDV_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII1_RD0 (GPIO2_21) CONTROL_CONF_MII1_RXD0_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure /RGMII1_RD1 (GPIO2_20) CONTROL_CONF_MII1_RXD1_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII1_RD2 (GPIO2_19) CONTROL_CONF_MII1_RXD2_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII1_RD3 (GPIO2_18) CONTROL_CONF_MII1_RXD3_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII2_TCLK (GPIO1_22/GPMC_A6) CONTROL_CONF_GPMC_A_R(6) = CONTROL_CONF_MUXMODE(2); //Configure RGMII2_TCTL (GPIO1_16/GPMC_A0) CONTROL_CONF_GPMC_A_R(0) = CONTROL_CONF_MUXMODE(2); //Configure RGMII2_TD0 (GPIO1_21/GPMC_A5) CONTROL_CONF_GPMC_A_R(5) = CONTROL_CONF_MUXMODE(2); //Configure RGMII2_TD1 (GPIO1_20/GPMC_A4) CONTROL_CONF_GPMC_A_R(4) = CONTROL_CONF_MUXMODE(2); //Configure RGMII2_TD2 (GPIO1_19/GPMC_A3) CONTROL_CONF_GPMC_A_R(3) = CONTROL_CONF_MUXMODE(2); //Configure RGMII2_TD3 (GPIO1_18/GPMC_A2) CONTROL_CONF_GPMC_A_R(2) = CONTROL_CONF_MUXMODE(2); //Configure RGMII2_RCLK (GPIO1_23/GPMC_A7) CONTROL_CONF_GPMC_A_R(7) = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII2_RCTL (GPIO1_17/GPMC_A1) CONTROL_CONF_GPMC_A_R(1) = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII2_RD0 (GPIO1_27/GPMC_A11) CONTROL_CONF_GPMC_A_R(11) = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII2_RD1 (GPIO1_26/GPMC_A10) CONTROL_CONF_GPMC_A_R(10) = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII2_RD2 (GPIO1_25/GPMC_A9) CONTROL_CONF_GPMC_A_R(9) = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII2_RD3 (GPIO1_24/GPMC_A8) CONTROL_CONF_GPMC_A_R(8) = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure MDIO (GPIO0_0) CONTROL_CONF_MDIO_DATA_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_PULLUPSEL | CONTROL_CONF_MUXMODE(0); //Configure MDC (GPIO0_1) CONTROL_CONF_MDIO_CLK_R = CONTROL_CONF_PULLUPSEL | CONTROL_CONF_MUXMODE(0); //OSD3358-SM-RED board? #elif defined(USE_OSD3358_SM_RED) //Select RGMII interface mode for both port 1 CONTROL_GMII_SEL_R = CONTROL_GMII_SEL_GMII1_SEL_RGMII; //Configure RGMII1_TCLK (GPIO3_9) CONTROL_CONF_MII1_TXCLK_R = CONTROL_CONF_MUXMODE(2); //Configure RGMII1_TCTL (GPIO3_3) CONTROL_CONF_MII1_TXEN_R = CONTROL_CONF_MUXMODE(2); //Configure RGMII1_TD0 (GPIO0_28) CONTROL_CONF_MII1_TXD0_R = CONTROL_CONF_MUXMODE(2); //Configure RGMII1_TD1 (GPIO0_21) CONTROL_CONF_MII1_TXD1_R = CONTROL_CONF_MUXMODE(2); //Configure RGMII1_TD2 (GPIO0_17) CONTROL_CONF_MII1_TXD2_R = CONTROL_CONF_MUXMODE(2); //Configure RGMII1_TD3 (GPIO0_16) CONTROL_CONF_MII1_TXD3_R = CONTROL_CONF_MUXMODE(2); //Configure RGMII1_RCLK (GPIO3_10) CONTROL_CONF_MII1_RXCLK_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII1_RCTL (GPIO3_4) CONTROL_CONF_MII1_RXDV_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII1_RD0 (GPIO2_21) CONTROL_CONF_MII1_RXD0_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure /RGMII1_RD1 (GPIO2_20) CONTROL_CONF_MII1_RXD1_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII1_RD2 (GPIO2_19) CONTROL_CONF_MII1_RXD2_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII1_RD3 (GPIO2_18) CONTROL_CONF_MII1_RXD3_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure MDIO (GPIO0_0) CONTROL_CONF_MDIO_DATA_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_PULLUPSEL | CONTROL_CONF_MUXMODE(0); //Configure MDC (GPIO0_1) CONTROL_CONF_MDIO_CLK_R = CONTROL_CONF_PULLUPSEL | CONTROL_CONF_MUXMODE(0); //SBC DIVA board? #elif defined(USE_SBC_DIVA) //Select RMII interface mode for port 1 and RGMII interface mode for port 2 CONTROL_GMII_SEL_R = CONTROL_GMII_SEL_RMII1_IO_CLK_EN | CONTROL_GMII_SEL_GMII1_SEL_RMII | CONTROL_GMII_SEL_GMII2_SEL_RGMII; //Configure RMII1_REF_CLK (GPIO0_29) CONTROL_CONF_RMII1_REFCLK_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(0); //Configure RMII1_TX_EN (GPIO3_3) CONTROL_CONF_MII1_TXEN_R = CONTROL_CONF_MUXMODE(1); //Configure RMII1_TXD0 (GPIO0_28) CONTROL_CONF_MII1_TXD0_R = CONTROL_CONF_MUXMODE(1); //Configure RMII1_TXD1 (GPIO0_21) CONTROL_CONF_MII1_TXD1_R = CONTROL_CONF_MUXMODE(1); //Configure RMII1_RXD0 (GPIO2.21) CONTROL_CONF_MII1_RXD0_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(1); //Configure RMII1_RXD1 (GPIO2.20) CONTROL_CONF_MII1_RXD1_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(1); //Configure RMII1_CRS_DV (GPIO3_1) CONTROL_CONF_MII1_CRS_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(1); //Configure RMII1_RX_ER (GPIO3_2) CONTROL_CONF_MII1_RXERR_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(1); //Configure RGMII2_TCLK (GPIO1_22/GPMC_A6) CONTROL_CONF_GPMC_A_R(6) = CONTROL_CONF_MUXMODE(2); //Configure RGMII2_TCTL (GPIO1_16/GPMC_A0) CONTROL_CONF_GPMC_A_R(0) = CONTROL_CONF_MUXMODE(2); //Configure RGMII2_TD0 (GPIO1_21/GPMC_A5) CONTROL_CONF_GPMC_A_R(5) = CONTROL_CONF_MUXMODE(2); //Configure RGMII2_TD1 (GPIO1_20/GPMC_A4) CONTROL_CONF_GPMC_A_R(4) = CONTROL_CONF_MUXMODE(2); //Configure RGMII2_TD2 (GPIO1_19/GPMC_A3) CONTROL_CONF_GPMC_A_R(3) = CONTROL_CONF_MUXMODE(2); //Configure RGMII2_TD3 (GPIO1_18/GPMC_A2) CONTROL_CONF_GPMC_A_R(2) = CONTROL_CONF_MUXMODE(2); //Configure RGMII2_RCLK (GPIO1_23/GPMC_A7) CONTROL_CONF_GPMC_A_R(7) = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII2_RCTL (GPIO1_17/GPMC_A1) CONTROL_CONF_GPMC_A_R(1) = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII2_RD0 (GPIO1_27/GPMC_A11) CONTROL_CONF_GPMC_A_R(11) = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII2_RD1 (GPIO1_26/GPMC_A10) CONTROL_CONF_GPMC_A_R(10) = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII2_RD2 (GPIO1_25/GPMC_A9) CONTROL_CONF_GPMC_A_R(9) = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII2_RD3 (GPIO1_24/GPMC_A8) CONTROL_CONF_GPMC_A_R(8) = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure MDIO (GPIO0_0) CONTROL_CONF_MDIO_DATA_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_PULLUPSEL | CONTROL_CONF_MUXMODE(0); //Configure MDC (GPIO0_1) CONTROL_CONF_MDIO_CLK_R = CONTROL_CONF_PULLUPSEL | CONTROL_CONF_MUXMODE(0); #endif } #endif /** * @brief Initialize buffer descriptor lists * @param[in] interface Underlying network interface **/ void am335xEthInitBufferDesc(NetInterface *interface) { uint_t i; uint_t nextIndex; uint_t prevIndex; //Initialize TX buffer descriptor list (port 1) for(i = 0; i < AM335X_ETH_TX_BUFFER_COUNT; i++) { //Index of the next buffer nextIndex = (i + 1) % AM335X_ETH_TX_BUFFER_COUNT; //Index of the previous buffer prevIndex = (i + AM335X_ETH_TX_BUFFER_COUNT - 1) % AM335X_ETH_TX_BUFFER_COUNT; //Next descriptor pointer txBufferDesc1[i].word0 = (uint32_t) NULL; //Buffer pointer txBufferDesc1[i].word1 = (uint32_t) txBuffer1[i]; //Buffer offset and buffer length txBufferDesc1[i].word2 = 0; //Status flags and packet length txBufferDesc1[i].word3 = 0; //Form a doubly linked list txBufferDesc1[i].next = &txBufferDesc1[nextIndex]; txBufferDesc1[i].prev = &txBufferDesc1[prevIndex]; } //Point to the very first descriptor txCurBufferDesc1 = &txBufferDesc1[0]; //Mark the end of the queue txCurBufferDesc1->prev->word3 = CPSW_TX_WORD3_SOP | CPSW_TX_WORD3_EOP | CPSW_TX_WORD3_EOQ; //Initialize TX buffer descriptor list (port 2) for(i = 0; i < AM335X_ETH_TX_BUFFER_COUNT; i++) { //Index of the next buffer nextIndex = (i + 1) % AM335X_ETH_TX_BUFFER_COUNT; //Index of the previous buffer prevIndex = (i + AM335X_ETH_TX_BUFFER_COUNT - 1) % AM335X_ETH_TX_BUFFER_COUNT; //Next descriptor pointer txBufferDesc2[i].word0 = (uint32_t) NULL; //Buffer pointer txBufferDesc2[i].word1 = (uint32_t) txBuffer2[i]; //Buffer offset and buffer length txBufferDesc2[i].word2 = 0; //Status flags and packet length txBufferDesc2[i].word3 = 0; //Form a doubly linked list txBufferDesc2[i].next = &txBufferDesc2[nextIndex]; txBufferDesc2[i].prev = &txBufferDesc2[prevIndex]; } //Point to the very first descriptor txCurBufferDesc2 = &txBufferDesc2[0]; //Mark the end of the queue txCurBufferDesc2->prev->word3 = CPSW_TX_WORD3_SOP | CPSW_TX_WORD3_EOP | CPSW_TX_WORD3_EOQ; //Initialize RX buffer descriptor list for(i = 0; i < AM335X_ETH_RX_BUFFER_COUNT; i++) { //Index of the next buffer nextIndex = (i + 1) % AM335X_ETH_RX_BUFFER_COUNT; //Index of the previous buffer prevIndex = (i + AM335X_ETH_RX_BUFFER_COUNT - 1) % AM335X_ETH_RX_BUFFER_COUNT; //Next descriptor pointer rxBufferDesc[i].word0 = (uint32_t) &rxBufferDesc[nextIndex]; //Buffer pointer rxBufferDesc[i].word1 = (uint32_t) rxBuffer[i]; //Buffer offset and buffer length rxBufferDesc[i].word2 = AM335X_ETH_RX_BUFFER_SIZE; //Status flags and packet length rxBufferDesc[i].word3 = CPSW_RX_WORD3_OWNER; //Form a doubly linked list rxBufferDesc[i].next = &rxBufferDesc[nextIndex]; rxBufferDesc[i].prev = &rxBufferDesc[prevIndex]; } //Point to the very first descriptor rxCurBufferDesc = &rxBufferDesc[0]; //Mark the end of the queue rxCurBufferDesc->prev->word0 = (uint32_t) NULL; //Write the RX DMA head descriptor pointer CPSW_CPDMA_RX_HDP_R(CPSW_CH0) = (uint32_t) rxCurBufferDesc; } /** * @brief AM335x Ethernet MAC timer handler * * This routine is periodically called by the TCP/IP stack to handle periodic * operations such as polling the link state * * @param[in] interface Underlying network interface **/ void am335xEthTick(NetInterface *interface) { //Valid Ethernet PHY or switch driver? if(interface->phyDriver != NULL) { //Handle periodic operations interface->phyDriver->tick(interface); } else if(interface->switchDriver != NULL) { //Handle periodic operations interface->switchDriver->tick(interface); } else { //Just for sanity } //Misqueued buffer condition? if((rxCurBufferDesc->word3 & CPSW_RX_WORD3_OWNER) != 0) { if(CPSW_CPDMA_RX_HDP_R(CPSW_CH0) == 0) { //The host acts on the misqueued buffer condition by writing the added //buffer descriptor address to the appropriate RX DMA head descriptor //pointer CPSW_CPDMA_RX_HDP_R(CPSW_CH0) = (uint32_t) rxCurBufferDesc; } } } /** * @brief Enable interrupts * @param[in] interface Underlying network interface **/ void am335xEthEnableIrq(NetInterface *interface) { #ifdef ti_sysbios_BIOS___VERS //Enable Ethernet MAC interrupts Hwi_enableInterrupt(SYS_INT_3PGSWTXINT0); Hwi_enableInterrupt(SYS_INT_3PGSWRXINT0); #else //Enable Ethernet MAC interrupts IntSystemEnable(SYS_INT_3PGSWTXINT0); IntSystemEnable(SYS_INT_3PGSWRXINT0); #endif //Valid Ethernet PHY or switch driver? if(interface->phyDriver != NULL) { //Enable Ethernet PHY interrupts interface->phyDriver->enableIrq(interface); } else if(interface->switchDriver != NULL) { //Enable Ethernet switch interrupts interface->switchDriver->enableIrq(interface); } else { //Just for sanity } } /** * @brief Disable interrupts * @param[in] interface Underlying network interface **/ void am335xEthDisableIrq(NetInterface *interface) { #ifdef ti_sysbios_BIOS___VERS //Disable Ethernet MAC interrupts Hwi_disableInterrupt(SYS_INT_3PGSWTXINT0); Hwi_disableInterrupt(SYS_INT_3PGSWRXINT0); #else //Disable Ethernet MAC interrupts IntSystemDisable(SYS_INT_3PGSWTXINT0); IntSystemDisable(SYS_INT_3PGSWRXINT0); #endif //Valid Ethernet PHY or switch driver? if(interface->phyDriver != NULL) { //Disable Ethernet PHY interrupts interface->phyDriver->disableIrq(interface); } else if(interface->switchDriver != NULL) { //Disable Ethernet switch interrupts interface->switchDriver->disableIrq(interface); } else { //Just for sanity } } /** * @brief Ethernet MAC transmit interrupt **/ void am335xEthTxIrqHandler(void) { bool_t flag; uint32_t status; uint32_t temp; Am335xTxBufferDesc *p; //Interrupt service routine prologue osEnterIsr(); //This flag will be set if a higher priority task must be woken flag = FALSE; //Read the TX_STAT register to determine which channels caused the interrupt status = CPSW_WR_C_TX_STAT_R(CPSW_CORE0); //Packet transmitted on channel 1? if(status & (1 << CPSW_CH1)) { //Point to the buffer descriptor p = (Am335xTxBufferDesc *) CPSW_CPDMA_TX_CP_R(CPSW_CH1); //Read the status flags temp = p->word3 & (CPSW_TX_WORD3_SOP | CPSW_TX_WORD3_EOP | CPSW_TX_WORD3_OWNER | CPSW_TX_WORD3_EOQ); //Misqueued buffer condition? if(temp == (CPSW_TX_WORD3_SOP | CPSW_TX_WORD3_EOP | CPSW_TX_WORD3_EOQ)) { //Check whether the next descriptor pointer is non-zero if(p->word0 != 0) { //The host corrects the misqueued buffer condition by writing the //misqueued packets buffer descriptor address to the appropriate //TX DMA head descriptor pointer CPSW_CPDMA_TX_HDP_R(CPSW_CH1) = (uint32_t) p->word0; } } //Write the TX completion pointer CPSW_CPDMA_TX_CP_R(CPSW_CH1) = (uint32_t) p; //Check whether the TX buffer is available for writing if((txCurBufferDesc1->word3 & CPSW_TX_WORD3_OWNER) == 0) { //Notify the TCP/IP stack that the transmitter is ready to send flag |= osSetEventFromIsr(&nicDriverInterface1->nicTxEvent); } } //Packet transmitted on channel 2? if(status & (1 << CPSW_CH2)) { //Point to the buffer descriptor p = (Am335xTxBufferDesc *) CPSW_CPDMA_TX_CP_R(CPSW_CH2); //Read the status flags temp = p->word3 & (CPSW_TX_WORD3_SOP | CPSW_TX_WORD3_EOP | CPSW_TX_WORD3_OWNER | CPSW_TX_WORD3_EOQ); //Misqueued buffer condition? if(temp == (CPSW_TX_WORD3_SOP | CPSW_TX_WORD3_EOP | CPSW_TX_WORD3_EOQ)) { //Check whether the next descriptor pointer is non-zero if(p->word0 != 0) { //The host corrects the misqueued buffer condition by writing the //misqueued packets buffer descriptor address to the appropriate //TX DMA head descriptor pointer CPSW_CPDMA_TX_HDP_R(CPSW_CH2) = (uint32_t) p->word0; } } //Write the TX completion pointer CPSW_CPDMA_TX_CP_R(CPSW_CH2) = (uint32_t) p; //Check whether the TX buffer is available for writing if((txCurBufferDesc2->word3 & CPSW_TX_WORD3_OWNER) == 0) { //Notify the TCP/IP stack that the transmitter is ready to send flag |= osSetEventFromIsr(&nicDriverInterface2->nicTxEvent); } } //Write the DMA end of interrupt vector CPSW_CPDMA_CPDMA_EOI_VECTOR_R = CPSW_CPDMA_EOI_VECTOR_TX_PULSE; //Interrupt service routine epilogue osExitIsr(flag); } /** * @brief Ethernet MAC receive interrupt **/ void am335xEthRxIrqHandler(void) { bool_t flag; uint32_t status; //Interrupt service routine prologue osEnterIsr(); //This flag will be set if a higher priority task must be woken flag = FALSE; //Read the RX_STAT register to determine which channels caused the interrupt status = CPSW_WR_C_RX_STAT_R(CPSW_CORE0); //Packet received on channel 0? if(status & (1 << CPSW_CH0)) { //Disable RX interrupts CPSW_WR_C_RX_EN_R(CPSW_CORE0) &= ~(1 << CPSW_CH0); //Set event flag if(nicDriverInterface1 != NULL) { nicDriverInterface1->nicEvent = TRUE; } else if(nicDriverInterface2 != NULL) { nicDriverInterface2->nicEvent = TRUE; } //Notify the TCP/IP stack of the event flag |= osSetEventFromIsr(&netEvent); } //Write the DMA end of interrupt vector CPSW_CPDMA_CPDMA_EOI_VECTOR_R = CPSW_CPDMA_EOI_VECTOR_RX_PULSE; //Interrupt service routine epilogue osExitIsr(flag); } /** * @brief AM335x Ethernet MAC event handler * @param[in] interface Underlying network interface **/ void am335xEthEventHandler(NetInterface *interface) { static uint8_t buffer[AM335X_ETH_RX_BUFFER_SIZE]; error_t error; size_t n; uint32_t temp; //Process all pending packets do { //The current buffer is available for reading? if((rxCurBufferDesc->word3 & CPSW_RX_WORD3_OWNER) == 0) { //SOP and EOP flags should be set if((rxCurBufferDesc->word3 & CPSW_RX_WORD3_SOP) != 0 && (rxCurBufferDesc->word3 & CPSW_RX_WORD3_EOP) != 0) { //Make sure no error occurred if((rxCurBufferDesc->word3 & CPSW_RX_WORD3_PKT_ERROR) == 0) { //Check the port on which the packet was received switch(rxCurBufferDesc->word3 & CPSW_RX_WORD3_FROM_PORT) { //Port 1? case CPSW_RX_WORD3_FROM_PORT_1: interface = nicDriverInterface1; break; //Port 1? case CPSW_RX_WORD3_FROM_PORT_2: interface = nicDriverInterface2; break; //Invalid port number? default: interface = NULL; break; } //Retrieve the length of the frame n = rxCurBufferDesc->word3 & CPSW_RX_WORD3_PACKET_LENGTH; //Limit the number of data to read n = MIN(n, AM335X_ETH_RX_BUFFER_SIZE); //Sanity check if(interface != NULL) { //Copy data from the receive buffer osMemcpy(buffer, (uint8_t *) rxCurBufferDesc->word1, (n + 3) & ~3UL); //Packet successfully received error = NO_ERROR; } else { //The port number is invalid error = ERROR_INVALID_PACKET; } } else { //The received packet contains an error error = ERROR_INVALID_PACKET; } } else { //The packet is not valid error = ERROR_INVALID_PACKET; } //Mark the end of the queue with a NULL pointer rxCurBufferDesc->word0 = (uint32_t) NULL; //Restore the length of the buffer rxCurBufferDesc->word2 = AM335X_ETH_RX_BUFFER_SIZE; //Give the ownership of the descriptor back to the DMA rxCurBufferDesc->word3 = CPSW_RX_WORD3_OWNER; //Link the current descriptor to the previous descriptor rxCurBufferDesc->prev->word0 = (uint32_t) rxCurBufferDesc; //Read the status flags of the previous descriptor temp = rxCurBufferDesc->prev->word3 & (CPSW_RX_WORD3_SOP | CPSW_RX_WORD3_EOP | CPSW_RX_WORD3_OWNER | CPSW_RX_WORD3_EOQ); //Misqueued buffer condition? if(temp == (CPSW_RX_WORD3_SOP | CPSW_RX_WORD3_EOP | CPSW_RX_WORD3_EOQ)) { //The host acts on the misqueued buffer condition by writing the added //buffer descriptor address to the appropriate RX DMA head descriptor //pointer CPSW_CPDMA_RX_HDP_R(CPSW_CH0) = (uint32_t) rxCurBufferDesc; } //Write the RX completion pointer CPSW_CPDMA_RX_CP_R(CPSW_CH0) = (uint32_t) rxCurBufferDesc; //Point to the next descriptor in the list rxCurBufferDesc = rxCurBufferDesc->next; } else { //No more data in the receive buffer error = ERROR_BUFFER_EMPTY; } //Check whether a valid packet has been received if(!error) { NetRxAncillary ancillary; //Additional options can be passed to the stack along with the packet ancillary = NET_DEFAULT_RX_ANCILLARY; //Pass the packet to the upper layer nicProcessPacket(interface, buffer, n, &ancillary); } //No more data in the receive buffer? } while(error != ERROR_BUFFER_EMPTY); //Re-enable RX interrupts CPSW_WR_C_RX_EN_R(CPSW_CORE0) |= (1 << CPSW_CH0); } /** * @brief Send a packet (port 1) * @param[in] interface Underlying network interface * @param[in] buffer Multi-part buffer containing the data to send * @param[in] offset Offset to the first data byte * @param[in] ancillary Additional options passed to the stack along with * the packet * @return Error code **/ error_t am335xEthSendPacketPort1(NetInterface *interface, const NetBuffer *buffer, size_t offset, NetTxAncillary *ancillary) { static uint8_t temp[AM335X_ETH_TX_BUFFER_SIZE]; size_t length; uint32_t value; //Retrieve the length of the packet length = netBufferGetLength(buffer) - offset; //Check the frame length if(length > AM335X_ETH_TX_BUFFER_SIZE) { //The transmitter can accept another packet osSetEvent(&interface->nicTxEvent); //Report an error return ERROR_INVALID_LENGTH; } //Make sure the current buffer is available for writing if((txCurBufferDesc1->word3 & CPSW_TX_WORD3_OWNER) != 0) { return ERROR_FAILURE; } //Mark the end of the queue with a NULL pointer txCurBufferDesc1->word0 = (uint32_t) NULL; //Copy user data to the transmit buffer netBufferRead(temp, buffer, offset, length); osMemcpy((uint8_t *) txCurBufferDesc1->word1, temp, (length + 3) & ~3UL); //Set the length of the buffer txCurBufferDesc1->word2 = length & CPSW_TX_WORD2_BUFFER_LENGTH; //Set the length of the packet value = length & CPSW_TX_WORD3_PACKET_LENGTH; //Set SOP and EOP flags as the data fits in a single buffer value |= CPSW_TX_WORD3_SOP | CPSW_TX_WORD3_EOP; //Redirect the packet to the relevant port number value |= CPSW_TX_WORD3_TO_PORT_EN | CPSW_TX_WORD3_TO_PORT_1; //Give the ownership of the descriptor to the DMA txCurBufferDesc1->word3 = CPSW_TX_WORD3_OWNER | value; //Link the current descriptor to the previous descriptor txCurBufferDesc1->prev->word0 = (uint32_t) txCurBufferDesc1; //Read the status flags of the previous descriptor value = txCurBufferDesc1->prev->word3 & (CPSW_TX_WORD3_SOP | CPSW_TX_WORD3_EOP | CPSW_TX_WORD3_OWNER | CPSW_TX_WORD3_EOQ); //Misqueued buffer condition? if(value == (CPSW_TX_WORD3_SOP | CPSW_TX_WORD3_EOP | CPSW_TX_WORD3_EOQ)) { //Clear the misqueued buffer condition txCurBufferDesc1->prev->word3 = 0; //The host corrects the misqueued buffer condition by writing the //misqueued packets buffer descriptor address to the appropriate //TX DMA head descriptor pointer CPSW_CPDMA_TX_HDP_R(CPSW_CH1) = (uint32_t) txCurBufferDesc1; } //Point to the next descriptor in the list txCurBufferDesc1 = txCurBufferDesc1->next; //Check whether the next buffer is available for writing if((txCurBufferDesc1->word3 & CPSW_TX_WORD3_OWNER) == 0) { //The transmitter can accept another packet osSetEvent(&interface->nicTxEvent); } //Data successfully written return NO_ERROR; } /** * @brief Send a packet (port 2) * @param[in] interface Underlying network interface * @param[in] buffer Multi-part buffer containing the data to send * @param[in] offset Offset to the first data byte * @param[in] ancillary Additional options passed to the stack along with * the packet * @return Error code **/ error_t am335xEthSendPacketPort2(NetInterface *interface, const NetBuffer *buffer, size_t offset, NetTxAncillary *ancillary) { static uint8_t temp[AM335X_ETH_TX_BUFFER_SIZE]; size_t length; uint32_t value; //Retrieve the length of the packet length = netBufferGetLength(buffer) - offset; //Check the frame length if(length > AM335X_ETH_TX_BUFFER_SIZE) { //The transmitter can accept another packet osSetEvent(&interface->nicTxEvent); //Report an error return ERROR_INVALID_LENGTH; } //Make sure the current buffer is available for writing if((txCurBufferDesc2->word3 & CPSW_TX_WORD3_OWNER) != 0) { return ERROR_FAILURE; } //Mark the end of the queue with a NULL pointer txCurBufferDesc2->word0 = (uint32_t) NULL; //Copy user data to the transmit buffer netBufferRead(temp, buffer, offset, length); osMemcpy((uint8_t *) txCurBufferDesc2->word1, temp, (length + 3) & ~3UL); //Set the length of the buffer txCurBufferDesc2->word2 = length & CPSW_TX_WORD2_BUFFER_LENGTH; //Set the length of the packet value = length & CPSW_TX_WORD3_PACKET_LENGTH; //Set SOP and EOP flags as the data fits in a single buffer value |= CPSW_TX_WORD3_SOP | CPSW_TX_WORD3_EOP; //Redirect the packet to the relevant port number value |= CPSW_TX_WORD3_TO_PORT_EN | CPSW_TX_WORD3_TO_PORT_2; //Give the ownership of the descriptor to the DMA txCurBufferDesc2->word3 = CPSW_TX_WORD3_OWNER | value; //Link the current descriptor to the previous descriptor txCurBufferDesc2->prev->word0 = (uint32_t) txCurBufferDesc2; //Read the status flags of the previous descriptor value = txCurBufferDesc2->prev->word3 & (CPSW_TX_WORD3_SOP | CPSW_TX_WORD3_EOP | CPSW_TX_WORD3_OWNER | CPSW_TX_WORD3_EOQ); //Misqueued buffer condition? if(value == (CPSW_TX_WORD3_SOP | CPSW_TX_WORD3_EOP | CPSW_TX_WORD3_EOQ)) { //Clear the misqueued buffer condition txCurBufferDesc2->prev->word3 = 0; //The host corrects the misqueued buffer condition by writing the //misqueued packets buffer descriptor address to the appropriate //TX DMA head descriptor pointer CPSW_CPDMA_TX_HDP_R(CPSW_CH2) = (uint32_t) txCurBufferDesc2; } //Point to the next descriptor in the list txCurBufferDesc2 = txCurBufferDesc2->next; //Check whether the next buffer is available for writing if((txCurBufferDesc2->word3 & CPSW_TX_WORD3_OWNER) == 0) { //The transmitter can accept another packet osSetEvent(&interface->nicTxEvent); } //Data successfully written return NO_ERROR; } /** * @brief Configure MAC address filtering * @param[in] interface Underlying network interface * @return Error code **/ error_t am335xEthUpdateMacAddrFilter(NetInterface *interface) { uint_t i; uint_t port; MacFilterEntry *entry; //Debug message TRACE_DEBUG("Updating AM335x ALE table...\r\n"); //Select the relevant port number if(interface == nicDriverInterface1) { port = CPSW_PORT1; } else if(interface == nicDriverInterface2) { port = CPSW_PORT2; } else { port = CPSW_PORT0; } //The MAC address filter contains the list of MAC addresses to accept //when receiving an Ethernet frame for(i = 0; i < MAC_ADDR_FILTER_SIZE; i++) { //Point to the current entry entry = &interface->macAddrFilter[i]; //Check whether the ALE table should be updated for the //current multicast address if(!macCompAddr(&entry->addr, &MAC_UNSPECIFIED_ADDR)) { if(entry->addFlag) { //Add VLAN/multicast address entry to the ALE table am335xEthAddVlanAddrEntry(port, port, &entry->addr); } else if(entry->deleteFlag) { //Remove VLAN/multicast address entry from the ALE table am335xEthDeleteVlanAddrEntry(port, port, &entry->addr); } } } //Successful processing return NO_ERROR; } /** * @brief Adjust MAC configuration parameters for proper operation * @param[in] interface Underlying network interface * @return Error code **/ error_t am335xEthUpdateMacConfig(NetInterface *interface) { uint32_t config = 0; //Read MAC control register if(interface == nicDriverInterface1) { config = CPSW_SL1_MACCONTROL_R; } else if(interface == nicDriverInterface2) { config = CPSW_SL2_MACCONTROL_R; } //1000BASE-T operation mode? if(interface->linkSpeed == NIC_LINK_SPEED_1GBPS) { config |= CPSW_SL_MACCONTROL_GIG; config &= ~(CPSW_SL_MACCONTROL_IFCTL_A | CPSW_SL_MACCONTROL_IFCTL_B); } //100BASE-TX operation mode? else if(interface->linkSpeed == NIC_LINK_SPEED_100MBPS) { config &= ~CPSW_SL_MACCONTROL_GIG; config |= CPSW_SL_MACCONTROL_IFCTL_A | CPSW_SL_MACCONTROL_IFCTL_B; } //10BASE-T operation mode? else { config &= ~CPSW_SL_MACCONTROL_GIG; config &= ~(CPSW_SL_MACCONTROL_IFCTL_A | CPSW_SL_MACCONTROL_IFCTL_B); } //Half-duplex or full-duplex mode? if(interface->duplexMode == NIC_FULL_DUPLEX_MODE) { config |= CPSW_SL_MACCONTROL_FULLDUPLEX; } else { config &= ~CPSW_SL_MACCONTROL_FULLDUPLEX; } //Update MAC control register if(interface == nicDriverInterface1) { CPSW_SL1_MACCONTROL_R = config; } else if(interface == nicDriverInterface2) { CPSW_SL2_MACCONTROL_R = config; } //Successful processing return NO_ERROR; } /** * @brief Write PHY register * @param[in] opcode Access type (2 bits) * @param[in] phyAddr PHY address (5 bits) * @param[in] regAddr Register address (5 bits) * @param[in] data Register value **/ void am335xEthWritePhyReg(uint8_t opcode, uint8_t phyAddr, uint8_t regAddr, uint16_t data) { uint32_t temp; //Valid opcode? if(opcode == SMI_OPCODE_WRITE) { //Set up a write operation temp = MDIO_USERACCESS0_GO | MDIO_USERACCESS0_WRITE; //PHY address temp |= (phyAddr << MDIO_USERACCESS0_PHYADR_SHIFT) & MDIO_USERACCESS0_PHYADR; //Register address temp |= (regAddr << MDIO_USERACCESS0_REGADR_SHIFT) & MDIO_USERACCESS0_REGADR; //Register value temp |= data & MDIO_USERACCESS0_DATA; //Start a write operation MDIO_USERACCESS0_R = temp; //Wait for the write to complete while((MDIO_USERACCESS0_R & MDIO_USERACCESS0_GO) != 0) { } } else { //The MAC peripheral only supports standard Clause 22 opcodes } } /** * @brief Read PHY register * @param[in] opcode Access type (2 bits) * @param[in] phyAddr PHY address (5 bits) * @param[in] regAddr Register address (5 bits) * @return Register value **/ uint16_t am335xEthReadPhyReg(uint8_t opcode, uint8_t phyAddr, uint8_t regAddr) { uint16_t data; uint32_t temp; //Valid opcode? if(opcode == SMI_OPCODE_READ) { //Set up a read operation temp = MDIO_USERACCESS0_GO | MDIO_USERACCESS0_READ; //PHY address temp |= (phyAddr << MDIO_USERACCESS0_PHYADR_SHIFT) & MDIO_USERACCESS0_PHYADR; //Register address temp |= (regAddr << MDIO_USERACCESS0_REGADR_SHIFT) & MDIO_USERACCESS0_REGADR; //Start a read operation MDIO_USERACCESS0_R = temp; //Wait for the read to complete while((MDIO_USERACCESS0_R & MDIO_USERACCESS0_GO) != 0) { } //Get register value data = MDIO_USERACCESS0_R & MDIO_USERACCESS0_DATA; } else { //The MAC peripheral only supports standard Clause 22 opcodes data = 0; } //Return the value of the PHY register return data; } /** * @brief Write an ALE table entry * @param[in] index Entry index * @param[in] entry Pointer to the ALE table entry **/ void am335xEthWriteEntry(uint_t index, const Am335xAleEntry *entry) { //Copy the content of the entry to be written CPSW_ALE_TBLW_R(2) = entry->word2; CPSW_ALE_TBLW_R(1) = entry->word1; CPSW_ALE_TBLW_R(0) = entry->word0; //Write the ALE entry at the specified index CPSW_ALE_TBLCTL_R = CPSW_ALE_TBLCTL_WRITE_RDZ | index; } /** * @brief Read an ALE table entry * @param[in] index Entry index * @param[out] entry Pointer to the ALE table entry **/ void am335xEthReadEntry(uint_t index, Am335xAleEntry *entry) { //Read the ALE entry at the specified index CPSW_ALE_TBLCTL_R = index; //Copy the content of the entry entry->word2 = CPSW_ALE_TBLW_R(2); entry->word1 = CPSW_ALE_TBLW_R(1); entry->word0 = CPSW_ALE_TBLW_R(0); } /** * @brief Find a free entry in the ALE table * @return Index of the first free entry **/ uint_t am335xEthFindFreeEntry(void) { uint_t index; uint32_t type; Am335xAleEntry entry; //Loop through the ALE table entries for(index = 0; index < CPSW_ALE_MAX_ENTRIES; index++) { //Read the current entry am335xEthReadEntry(index, &entry); //Retrieve the type of the ALE entry type = entry.word1 & CPSW_ALE_WORD1_ENTRY_TYPE_MASK; //Free entry? if(type == CPSW_ALE_WORD1_ENTRY_TYPE_FREE) { //Exit immediately break; } } //Return the index of the entry return index; } /** * @brief Search the ALE table for the specified VLAN entry * @param[in] vlanId VLAN identifier * @return Index of the matching entry **/ uint_t am335xEthFindVlanEntry(uint_t vlanId) { uint_t index; uint32_t value; Am335xAleEntry entry; //Loop through the ALE table entries for(index = 0; index < CPSW_ALE_MAX_ENTRIES; index++) { //Read the current entry am335xEthReadEntry(index, &entry); //Retrieve the type of the ALE entry value = entry.word1 & CPSW_ALE_WORD1_ENTRY_TYPE_MASK; //Check the type of the ALE entry if(value == CPSW_ALE_WORD1_ENTRY_TYPE_VLAN_ADDR) { //Get the VLAN identifier value = entry.word1 & CPSW_ALE_WORD1_VLAN_ID_MASK; //Compare the VLAN identifier if(value == CPSW_ALE_WORD1_VLAN_ID(vlanId)) { //Matching ALE entry found break; } } } //Return the index of the entry return index; } /** * @brief Search the ALE table for the specified VLAN/address entry * @param[in] vlanId VLAN identifier * @param[in] macAddr MAC address * @return Index of the matching entry **/ uint_t am335xEthFindVlanAddrEntry(uint_t vlanId, MacAddr *macAddr) { uint_t index; uint32_t value; Am335xAleEntry entry; //Loop through the ALE table entries for(index = 0; index < CPSW_ALE_MAX_ENTRIES; index++) { //Read the current entry am335xEthReadEntry(index, &entry); //Retrieve the type of the ALE entry value = entry.word1 & CPSW_ALE_WORD1_ENTRY_TYPE_MASK; //Check the type of the ALE entry if(value == CPSW_ALE_WORD1_ENTRY_TYPE_VLAN_ADDR) { //Get the VLAN identifier value = entry.word1 & CPSW_ALE_WORD1_VLAN_ID_MASK; //Compare the VLAN identifier if(value == CPSW_ALE_WORD1_VLAN_ID(vlanId)) { //Compare the MAC address if(macAddr->b[0] == (uint8_t) (entry.word1 >> 8) && macAddr->b[1] == (uint8_t) (entry.word1 >> 0) && macAddr->b[2] == (uint8_t) (entry.word0 >> 24) && macAddr->b[3] == (uint8_t) (entry.word0 >> 16) && macAddr->b[4] == (uint8_t) (entry.word0 >> 8) && macAddr->b[5] == (uint8_t) (entry.word0 >> 0)) { //Matching ALE entry found break; } } } } //Return the index of the entry return index; } /** * @brief Add a VLAN entry in the ALE table * @param[in] port Port number * @param[in] vlanId VLAN identifier * @return Error code **/ error_t am335xEthAddVlanEntry(uint_t port, uint_t vlanId) { error_t error; uint_t index; Am335xAleEntry entry; //Ensure that there are no duplicate address entries in the ALE table index = am335xEthFindVlanEntry(vlanId); //No matching entry found? if(index >= CPSW_ALE_MAX_ENTRIES) { //Find a free entry in the ALE table index = am335xEthFindFreeEntry(); } //Sanity check if(index < CPSW_ALE_MAX_ENTRIES) { //Set up a VLAN table entry entry.word2 = 0; entry.word1 = CPSW_ALE_WORD1_ENTRY_TYPE_VLAN; entry.word0 = 0; //Set VLAN identifier entry.word1 |= CPSW_ALE_WORD1_VLAN_ID(vlanId); //Force the packet VLAN tag to be removed on egress entry.word0 |= CPSW_ALE_WORD0_FORCE_UNTAG_EGRESS(1 << port) | CPSW_ALE_WORD0_FORCE_UNTAG_EGRESS(1 << CPSW_PORT0); //Set VLAN member list entry.word0 |= CPSW_ALE_WORD0_VLAN_MEMBER_LIST(1 << port) | CPSW_ALE_WORD0_VLAN_MEMBER_LIST(1 << CPSW_PORT0); //Add a new entry to the ALE table am335xEthWriteEntry(index, &entry); //Sucessful processing error = NO_ERROR; } else { //The ALE table is full error = ERROR_FAILURE; } //Return status code return error; } /** * @brief Add a VLAN/address entry in the ALE table * @param[in] port Port number * @param[in] vlanId VLAN identifier * @param[in] macAddr MAC address * @return Error code **/ error_t am335xEthAddVlanAddrEntry(uint_t port, uint_t vlanId, MacAddr *macAddr) { error_t error; uint_t index; Am335xAleEntry entry; //Ensure that there are no duplicate address entries in the ALE table index = am335xEthFindVlanAddrEntry(vlanId, macAddr); //No matching entry found? if(index >= CPSW_ALE_MAX_ENTRIES) { //Find a free entry in the ALE table index = am335xEthFindFreeEntry(); } //Sanity check if(index < CPSW_ALE_MAX_ENTRIES) { //Set up a VLAN/address table entry entry.word2 = 0; entry.word1 = CPSW_ALE_WORD1_ENTRY_TYPE_VLAN_ADDR; entry.word0 = 0; //Multicast address? if(macIsMulticastAddr(macAddr)) { //Set port mask entry.word2 |= CPSW_ALE_WORD2_SUPER | CPSW_ALE_WORD2_PORT_LIST(1 << port) | CPSW_ALE_WORD2_PORT_LIST(1 << CPSW_CH0); //Set multicast forward state entry.word1 |= CPSW_ALE_WORD1_MCAST_FWD_STATE(0); } //Set VLAN identifier entry.word1 |= CPSW_ALE_WORD1_VLAN_ID(vlanId); //Copy the upper 16 bits of the unicast address entry.word1 |= (macAddr->b[0] << 8) | macAddr->b[1]; //Copy the lower 32 bits of the unicast address entry.word0 |= (macAddr->b[2] << 24) | (macAddr->b[3] << 16) | (macAddr->b[4] << 8) | macAddr->b[5]; //Add a new entry to the ALE table am335xEthWriteEntry(index, &entry); //Sucessful processing error = NO_ERROR; } else { //The ALE table is full error = ERROR_FAILURE; } //Return status code return error; } /** * @brief Remove a VLAN/address entry from the ALE table * @param[in] port Port number * @param[in] vlanId VLAN identifier * @param[in] macAddr MAC address * @return Error code **/ error_t am335xEthDeleteVlanAddrEntry(uint_t port, uint_t vlanId, MacAddr *macAddr) { error_t error; uint_t index; Am335xAleEntry entry; //Search the ALE table for the specified VLAN/address entry index = am335xEthFindVlanAddrEntry(vlanId, macAddr); //Matching ALE entry found? if(index < CPSW_ALE_MAX_ENTRIES) { //Clear the contents of the entry entry.word2 = 0; entry.word1 = 0; entry.word0 = 0; //Update the ALE table am335xEthWriteEntry(index, &entry); //Sucessful processing error = NO_ERROR; } else { //Entry not found error = ERROR_NOT_FOUND; } //Return status code return error; }
/** * @file am335x_eth_driver.c * @brief Sitara AM335x Gigabit Ethernet MAC driver * * @section License * * SPDX-License-Identifier: GPL-2.0-or-later * * Copyright (C) 2010-2021 Oryx Embedded SARL. All rights reserved. * * This file is part of CycloneTCP Open. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * @author Oryx Embedded SARL (www.oryx-embedded.com) * @version 2.0.2 **/ //Switch to the appropriate trace level #define TRACE_LEVEL NIC_TRACE_LEVEL //Dependencies #include "soc_am335x.h" #include "hw_types.h" #include "hw_cm_per.h" #include "hw_control_am335x.h" #include "hw_cpsw_ale.h" #include "hw_cpsw_cpdma.h" #include "hw_cpsw_port.h" #include "hw_cpsw_sl.h" #include "hw_cpsw_ss.h" #include "hw_cpsw_wr.h" #include "hw_mdio.h" #include "interrupt.h" #include "core/net.h" #include "drivers/mac/am335x_eth_driver.h" #include "debug.h" //MDIO input clock frequency #define MDIO_INPUT_CLK 125000000 //MDIO output clock frequency #define MDIO_OUTPUT_CLK 1000000 //Underlying network interface (port 1) static NetInterface *nicDriverInterface1 = NULL; //Underlying network interface (port 2) static NetInterface *nicDriverInterface2 = NULL; //IAR EWARM compiler? #if defined(__ICCARM__) //Transmit buffer (port 1) #pragma data_alignment = 4 #pragma location = AM335X_ETH_RAM_SECTION static uint8_t txBuffer1[AM335X_ETH_TX_BUFFER_COUNT][AM335X_ETH_TX_BUFFER_SIZE]; //Transmit buffer (port 2) #pragma data_alignment = 4 #pragma location = AM335X_ETH_RAM_SECTION static uint8_t txBuffer2[AM335X_ETH_TX_BUFFER_COUNT][AM335X_ETH_TX_BUFFER_SIZE]; //Receive buffer #pragma data_alignment = 4 #pragma location = AM335X_ETH_RAM_SECTION static uint8_t rxBuffer[AM335X_ETH_RX_BUFFER_COUNT][AM335X_ETH_RX_BUFFER_SIZE]; //Transmit buffer descriptors (port 1) #pragma data_alignment = 4 #pragma location = AM335X_ETH_RAM_CPPI_SECTION static Am335xTxBufferDesc txBufferDesc1[AM335X_ETH_TX_BUFFER_COUNT]; //Transmit buffer descriptors (port 2) #pragma data_alignment = 4 #pragma location = AM335X_ETH_RAM_CPPI_SECTION static Am335xTxBufferDesc txBufferDesc2[AM335X_ETH_TX_BUFFER_COUNT]; //Receive buffer descriptors #pragma data_alignment = 4 #pragma location = AM335X_ETH_RAM_CPPI_SECTION static Am335xRxBufferDesc rxBufferDesc[AM335X_ETH_RX_BUFFER_COUNT]; //GCC compiler? #else //Transmit buffer (port 1) static uint8_t txBuffer1[AM335X_ETH_TX_BUFFER_COUNT][AM335X_ETH_TX_BUFFER_SIZE] __attribute__((aligned(4), __section__(AM335X_ETH_RAM_SECTION))); //Transmit buffer (port 2) static uint8_t txBuffer2[AM335X_ETH_TX_BUFFER_COUNT][AM335X_ETH_TX_BUFFER_SIZE] __attribute__((aligned(4), __section__(AM335X_ETH_RAM_SECTION))); //Receive buffer static uint8_t rxBuffer[AM335X_ETH_RX_BUFFER_COUNT][AM335X_ETH_RX_BUFFER_SIZE] __attribute__((aligned(4), __section__(AM335X_ETH_RAM_SECTION))); //Transmit buffer descriptors (port 1) static Am335xTxBufferDesc txBufferDesc1[AM335X_ETH_TX_BUFFER_COUNT] __attribute__((aligned(4), __section__(AM335X_ETH_RAM_CPPI_SECTION))); //Transmit buffer descriptors (port 2) static Am335xTxBufferDesc txBufferDesc2[AM335X_ETH_TX_BUFFER_COUNT] __attribute__((aligned(4), __section__(AM335X_ETH_RAM_CPPI_SECTION))); //Receive buffer descriptors static Am335xRxBufferDesc rxBufferDesc[AM335X_ETH_RX_BUFFER_COUNT] __attribute__((aligned(4), __section__(AM335X_ETH_RAM_CPPI_SECTION))); #endif //Pointer to the current TX buffer descriptor (port1) static Am335xTxBufferDesc *txCurBufferDesc1; //Pointer to the current TX buffer descriptor (port 2) static Am335xTxBufferDesc *txCurBufferDesc2; //Pointer to the current RX buffer descriptor static Am335xRxBufferDesc *rxCurBufferDesc; /** * @brief AM335x Ethernet MAC driver (port1) **/ const NicDriver am335xEthPort1Driver = { NIC_TYPE_ETHERNET, ETH_MTU, am335xEthInitPort1, am335xEthTick, am335xEthEnableIrq, am335xEthDisableIrq, am335xEthEventHandler, am335xEthSendPacketPort1, am335xEthUpdateMacAddrFilter, am335xEthUpdateMacConfig, am335xEthWritePhyReg, am335xEthReadPhyReg, FALSE, TRUE, TRUE, FALSE }; /** * @brief AM335x Ethernet MAC driver (port2) **/ const NicDriver am335xEthPort2Driver = { NIC_TYPE_ETHERNET, ETH_MTU, am335xEthInitPort2, am335xEthTick, am335xEthEnableIrq, am335xEthDisableIrq, am335xEthEventHandler, am335xEthSendPacketPort2, am335xEthUpdateMacAddrFilter, am335xEthUpdateMacConfig, am335xEthWritePhyReg, am335xEthReadPhyReg, FALSE, TRUE, TRUE, FALSE }; /** * @brief AM335x Ethernet MAC initialization (port 1) * @param[in] interface Underlying network interface * @return Error code **/ error_t am335xEthInitPort1(NetInterface *interface) { error_t error; uint32_t temp; //Debug message TRACE_INFO("Initializing AM335x Ethernet MAC (port 1)...\r\n"); //Initialize CPSW instance am335xEthInitInstance(interface); //Save underlying network interface nicDriverInterface1 = interface; //Valid Ethernet PHY or switch driver? if(interface->phyDriver != NULL) { //Ethernet PHY initialization error = interface->phyDriver->init(interface); } else if(interface->switchDriver != NULL) { //Ethernet switch initialization error = interface->switchDriver->init(interface); } else { //The interface is not properly configured error = ERROR_FAILURE; } //Any error to report? if(error) { return error; } //Unspecifield MAC address? if(macCompAddr(&interface->macAddr, &MAC_UNSPECIFIED_ADDR)) { //Use the factory preprogrammed MAC address interface->macAddr.b[0] = CONTROL_MAC_ID_HI_R(0) >> CONTROL_MAC_ID0_HI_MACADDR_47_40_SHIFT; interface->macAddr.b[1] = CONTROL_MAC_ID_HI_R(0) >> CONTROL_MAC_ID0_HI_MACADDR_39_32_SHIFT; interface->macAddr.b[2] = CONTROL_MAC_ID_HI_R(0) >> CONTROL_MAC_ID0_HI_MACADDR_31_24_SHIFT; interface->macAddr.b[3] = CONTROL_MAC_ID_HI_R(0) >> CONTROL_MAC_ID0_HI_MACADDR_23_16_SHIFT; interface->macAddr.b[4] = CONTROL_MAC_ID_LO_R(0) >> CONTROL_MAC_ID0_LO_MACADDR_15_8_SHIFT; interface->macAddr.b[5] = CONTROL_MAC_ID_LO_R(0) >> CONTROL_MAC_ID0_LO_MACADDR_7_0_SHIFT; //Generate the 64-bit interface identifier macAddrToEui64(&interface->macAddr, &interface->eui64); } //Set port state to forward temp = CPSW_ALE_PORTCTL_R(1) & ~CPSW_ALE_PORTCTL1_PORT_STATE; CPSW_ALE_PORTCTL_R(1) = temp | CPSW_ALE_PORTCTL_PORT_STATE_FORWARD; //Set the MAC address of the station CPSW_PORT1_SA_HI_R = interface->macAddr.w[0] | (interface->macAddr.w[1] << 16); CPSW_PORT1_SA_LO_R = interface->macAddr.w[2]; //Configure VLAN identifier and VLAN priority CPSW_PORT1_PORT_VLAN_R = (0 << CPSW_PORT_P1_PORT_VLAN_PORT_PRI_SHIFT) | (CPSW_PORT1 << CPSW_PORT_P1_PORT_VLAN_PORT_VID_SHIFT); //Add a VLAN entry in the ALE table am335xEthAddVlanEntry(CPSW_PORT1, CPSW_PORT1); //Add a VLAN/unicast address entry in the ALE table am335xEthAddVlanAddrEntry(CPSW_PORT1, CPSW_PORT1, &interface->macAddr); //Enable CPSW statistics CPSW_SS_STAT_PORT_EN_R |= CPSW_SS_STAT_PORT_EN_P1_STAT_EN; //Enable TX and RX CPSW_SL1_MACCONTROL_R = CPSW_SL_MACCONTROL_GMII_EN; //Accept any packets from the upper layer osSetEvent(&interface->nicTxEvent); //Successful initialization return NO_ERROR; } /** * @brief AM335x Ethernet MAC initialization (port 2) * @param[in] interface Underlying network interface * @return Error code **/ error_t am335xEthInitPort2(NetInterface *interface) { error_t error; uint32_t temp; //Debug message TRACE_INFO("Initializing AM335x Ethernet MAC (port 2)...\r\n"); //Initialize CPSW instance am335xEthInitInstance(interface); //Save underlying network interface nicDriverInterface2 = interface; //PHY transceiver initialization error = interface->phyDriver->init(interface); //Failed to initialize PHY transceiver? if(error) { return error; } //Unspecifield MAC address? if(macCompAddr(&interface->macAddr, &MAC_UNSPECIFIED_ADDR)) { //Use the factory preprogrammed MAC address interface->macAddr.b[0] = CONTROL_MAC_ID_HI_R(1) >> CONTROL_MAC_ID1_HI_MACADDR_47_40_SHIFT; interface->macAddr.b[1] = CONTROL_MAC_ID_HI_R(1) >> CONTROL_MAC_ID1_HI_MACADDR_39_32_SHIFT; interface->macAddr.b[2] = CONTROL_MAC_ID_HI_R(1) >> CONTROL_MAC_ID1_HI_MACADDR_31_24_SHIFT; interface->macAddr.b[3] = CONTROL_MAC_ID_HI_R(1) >> CONTROL_MAC_ID1_HI_MACADDR_23_16_SHIFT; interface->macAddr.b[4] = CONTROL_MAC_ID_LO_R(1) >> CONTROL_MAC_ID1_LO_MACADDR_15_8_SHIFT; interface->macAddr.b[5] = CONTROL_MAC_ID_LO_R(1) >> CONTROL_MAC_ID1_LO_MACADDR_7_0_SHIFT; //Generate the 64-bit interface identifier macAddrToEui64(&interface->macAddr, &interface->eui64); } //Set port state to forward temp = CPSW_ALE_PORTCTL_R(2) & ~CPSW_ALE_PORTCTL2_PORT_STATE; CPSW_ALE_PORTCTL_R(2) = temp | CPSW_ALE_PORTCTL_PORT_STATE_FORWARD; //Set the MAC address of the station CPSW_PORT2_SA_HI_R = interface->macAddr.w[0] | (interface->macAddr.w[1] << 16); CPSW_PORT2_SA_LO_R = interface->macAddr.w[2]; //Configure VLAN identifier and VLAN priority CPSW_PORT2_PORT_VLAN_R = (0 << CPSW_PORT_P2_PORT_VLAN_PORT_PRI_SHIFT) | (CPSW_PORT2 << CPSW_PORT_P2_PORT_VLAN_PORT_VID_SHIFT); //Add a VLAN entry in the ALE table am335xEthAddVlanEntry(CPSW_PORT2, CPSW_PORT2); //Add a VLAN/unicast address entry in the ALE table am335xEthAddVlanAddrEntry(CPSW_PORT2, CPSW_PORT2, &interface->macAddr); //Enable CPSW statistics CPSW_SS_STAT_PORT_EN_R |= CPSW_SS_STAT_PORT_EN_P2_STAT_EN; //Enable TX and RX CPSW_SL2_MACCONTROL_R = CPSW_SL_MACCONTROL_GMII_EN; //Accept any packets from the upper layer osSetEvent(&interface->nicTxEvent); //Successful initialization return NO_ERROR; } /** * @brief Initialize CPSW instance * @param[in] interface Underlying network interface **/ void am335xEthInitInstance(NetInterface *interface) { uint_t i; uint32_t temp; #ifdef ti_sysbios_BIOS___VERS Hwi_Params hwiParams; #endif //Initialization sequence is performed once if(nicDriverInterface1 == NULL && nicDriverInterface2 == NULL) { //Select the interface mode (MII/RMII/RGMII) and configure pin muxing am335xEthInitGpio(interface); //Enable the CPSW subsystem clocks CM_PER_CPGMAC0_CLKCTRL_R = CM_PER_CPGMAC0_CLKCTRL_MODULEMODE_ENABLE; //Wait for the CPSW module to be fully functional do { //Get module idle status temp = (CM_PER_CPGMAC0_CLKCTRL_R & CM_PER_CPGMAC0_CLKCTRL_IDLEST) >> CM_PER_CPGMAC0_CLKCTRL_IDLEST_SHIFT; //Keep looping as long as the module is not fully functional } while(temp != CM_PER_CPGMAC0_CLKCTRL_IDLEST_FUNC); //Start a software forced wake-up transition CM_PER_CPSW_CLKSTCTRL_R = CM_PER_CPSW_CLKSTCTRL_CLKTRCTRL_SW_WKUP; //Wait for the CPSW 125 MHz OCP clock to be active do { //Get the state of the CPSW 125 MHz OCP clock temp = (CM_PER_CPSW_CLKSTCTRL_R & CM_PER_CPSW_CLKSTCTRL_CLKACTIVITY_CPSW_125MHZ_GCLK) >> CM_PER_CPSW_CLKSTCTRL_CLKACTIVITY_CPSW_125MHZ_GCLK_SHIFT; //Keep looping as long as the clock is inactive } while(temp != CM_PER_CPSW_CLKSTCTRL_CLKACTIVITY_CPSW_125MHZ_GCLK_ACT); //Reset CPSW subsystem CPSW_SS_SOFT_RESET_R = CPSW_SS_SOFT_RESET_SOFT_RESET; //Wait for the reset to complete while((CPSW_SS_SOFT_RESET_R & CPSW_SS_SOFT_RESET_SOFT_RESET) != 0) { } //Reset CPSW wrapper module CPSW_WR_SOFT_RESET_R = CPSW_WR_SOFT_RESET_SOFT_RESET; //Wait for the reset to complete while((CPSW_WR_SOFT_RESET_R & CPSW_WR_SOFT_RESET_SOFT_RESET) != 0) { } //Reset CPSW sliver 1 logic CPSW_SL1_SOFT_RESET_R = CPSW_SL_SOFT_RESET_SOFT_RESET; //Wait for the reset to complete while((CPSW_SL1_SOFT_RESET_R & CPSW_SL_SOFT_RESET_SOFT_RESET) != 0) { } //Reset CPSW sliver 2 logic CPSW_SL2_SOFT_RESET_R = CPSW_SL_SOFT_RESET_SOFT_RESET; //Wait for the reset to complete while((CPSW_SL2_SOFT_RESET_R & CPSW_SL_SOFT_RESET_SOFT_RESET) != 0) { } //Reset CPSW CPDMA module CPSW_CPDMA_CPDMA_SOFT_RESET_R = CPSW_CPDMA_CPDMA_SOFT_RESET_SOFT_RESET; //Wait for the reset to complete while((CPSW_CPDMA_CPDMA_SOFT_RESET_R & CPSW_CPDMA_CPDMA_SOFT_RESET_SOFT_RESET) != 0) { } //Initialize the HDPs and the CPs to NULL for(i = CPSW_CH0; i <= CPSW_CH7; i++) { //TX head descriptor pointer CPSW_CPDMA_TX_HDP_R(i) = 0; //TX completion pointer CPSW_CPDMA_TX_CP_R(i) = 0; //RX head descriptor pointer CPSW_CPDMA_RX_HDP_R(i) = 0; //RX completion pointer CPSW_CPDMA_RX_CP_R(i) = 0; } //Enable ALE and clear ALE address table CPSW_ALE_CONTROL_R = CPSW_ALE_CONTROL_ENABLE_ALE | CPSW_ALE_CONTROL_CLEAR_TABLE; //For dual MAC mode, configure VLAN aware mode CPSW_ALE_CONTROL_R |= CPSW_ALE_CONTROL_ALE_VLAN_AWARE; //Set dual MAC mode for port 0 temp = CPSW_PORT0_TX_IN_CTL_R & ~CPSW_PORT_P0_TX_IN_CTL_TX_IN_SEL; CPSW_PORT0_TX_IN_CTL_R = temp | CPSW_PORT_P0_TX_IN_CTL_TX_IN_DUAL_MAC; //Set port 0 state to forward temp = CPSW_ALE_PORTCTL_R(0) & ~CPSW_ALE_PORTCTL0_PORT_STATE; CPSW_ALE_PORTCTL_R(0) = temp | CPSW_ALE_PORTCTL_PORT_STATE_FORWARD; //Enable CPSW statistics CPSW_SS_STAT_PORT_EN_R = CPSW_SS_STAT_PORT_EN_P0_STAT_EN; //Configure TX and RX buffer descriptors am335xEthInitBufferDesc(interface); //Acknowledge TX and interrupts for proper interrupt pulsing CPSW_CPDMA_CPDMA_EOI_VECTOR_R = CPSW_CPDMA_EOI_VECTOR_TX_PULSE; CPSW_CPDMA_CPDMA_EOI_VECTOR_R = CPSW_CPDMA_EOI_VECTOR_RX_PULSE; //Enable channel 1 and 2 interrupts of the DMA engine CPSW_CPDMA_TX_INTMASK_SET_R = (1 << CPSW_CH1) | (1 << CPSW_CH2); //Enable TX completion interrupts CPSW_WR_C_TX_EN_R(CPSW_CORE0) |= (1 << CPSW_CH1) | (1 << CPSW_CH2); //Enable channel 0 interrupts of the DMA engine CPSW_CPDMA_RX_INTMASK_SET_R = (1 << CPSW_CH0); //Enable RX completion interrupts CPSW_WR_C_RX_EN_R(CPSW_CORE0) |= (1 << CPSW_CH0); #ifdef ti_sysbios_BIOS___VERS //Configure TX interrupt Hwi_Params_init(&hwiParams); hwiParams.enableInt = FALSE; hwiParams.priority = AM335X_ETH_IRQ_PRIORITY; //Register TX interrupt handler Hwi_create(SYS_INT_3PGSWTXINT0, (Hwi_FuncPtr) am335xEthTxIrqHandler, &hwiParams, NULL); //Configure RX interrupt Hwi_Params_init(&hwiParams); hwiParams.enableInt = FALSE; hwiParams.priority = AM335X_ETH_IRQ_PRIORITY; //Register RX interrupt handler Hwi_create(SYS_INT_3PGSWRXINT0, (Hwi_FuncPtr) am335xEthRxIrqHandler, &hwiParams, NULL); #else //Register interrupt handlers IntRegister(SYS_INT_3PGSWTXINT0, am335xEthTxIrqHandler); IntRegister(SYS_INT_3PGSWRXINT0, am335xEthRxIrqHandler); //Configure TX interrupt priority IntPrioritySet(SYS_INT_3PGSWTXINT0, AM335X_ETH_IRQ_PRIORITY, AINTC_HOSTINT_ROUTE_IRQ); //Configure RX interrupt priority IntPrioritySet(SYS_INT_3PGSWRXINT0, AM335X_ETH_IRQ_PRIORITY, AINTC_HOSTINT_ROUTE_IRQ); #endif //Enable the transmission and reception CPSW_CPDMA_TX_CONTROL_R = CPSW_CPDMA_TX_CONTROL_TX_EN; CPSW_CPDMA_RX_CONTROL_R = CPSW_CPDMA_RX_CONTROL_RX_EN; //Calculate the MDC clock divider to be used temp = (MDIO_INPUT_CLK / MDIO_OUTPUT_CLK) - 1; //Initialize MDIO interface MDIO_CONTROL_R = MDIO_CONTROL_ENABLE | MDIO_CONTROL_FAULTENB | (temp & MDIO_CONTROL_CLKDIV); } } //BeagleBone Black, TMDSSK3358, OSD3358-SM-RED or SBC DIVA board? #if defined(USE_BEAGLEBONE_BLACK) || defined(USE_TMDSSK3358) || \ defined(USE_OSD3358_SM_RED) || defined(USE_SBC_DIVA) /** * @brief GPIO configuration * @param[in] interface Underlying network interface **/ void am335xEthInitGpio(NetInterface *interface) { //BeagleBone Black board? #if defined(USE_BEAGLEBONE_BLACK) //Select MII interface mode for port 1 CONTROL_GMII_SEL_R = CONTROL_GMII_SEL_GMII1_SEL_MII; //Configure MII1_TX_CLK (GPIO3_9) CONTROL_CONF_MII1_TXCLK_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(0); //Configure MII1_TX_EN (GPIO3_3) CONTROL_CONF_MII1_TXEN_R = CONTROL_CONF_MUXMODE(0); //Configure MII1_TXD0 (GPIO0_28) CONTROL_CONF_MII1_TXD0_R = CONTROL_CONF_MUXMODE(0); //Configure MII1_TXD1 (GPIO0_21) CONTROL_CONF_MII1_TXD1_R = CONTROL_CONF_MUXMODE(0); //Configure MII1_TXD2 (GPIO0_17) CONTROL_CONF_MII1_TXD2_R = CONTROL_CONF_MUXMODE(0); //Configure MII1_TXD3 (GPIO0_16) CONTROL_CONF_MII1_TXD3_R = CONTROL_CONF_MUXMODE(0); //Configure MII1_RX_CLK (GPIO3_10) CONTROL_CONF_MII1_RXCLK_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(0); //Configure MII1_RXD0 (GPIO2_21) CONTROL_CONF_MII1_RXD0_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(0); //Configure MII1_RXD1 (GPIO2_20) CONTROL_CONF_MII1_RXD1_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(0); //Configure MII1_RXD2 (GPIO2_19) CONTROL_CONF_MII1_RXD2_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(0); //Configure MII1_RXD3 (GPIO2_18) CONTROL_CONF_MII1_RXD3_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(0); //Configure MII1_COL (GPIO3_0) CONTROL_CONF_MII1_COL_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(0); //Configure MII1_CRS (GPIO3_1) CONTROL_CONF_MII1_CRS_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(0); //Configure MII1_RX_ER (GPIO3_2) CONTROL_CONF_MII1_RXERR_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(0); //Configure MII1_RX_DV (GPIO3_4) CONTROL_CONF_MII1_RXDV_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(0); //Configure MDIO (GPIO0_0) CONTROL_CONF_MDIO_DATA_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_PULLUPSEL | CONTROL_CONF_MUXMODE(0); //Configure MDC (GPIO0_1) CONTROL_CONF_MDIO_CLK_R = CONTROL_CONF_PULLUPSEL | CONTROL_CONF_MUXMODE(0); //TMDSSK3358 board? #elif defined(USE_TMDSSK3358) //Select RGMII interface mode for both port 1 and port 2 CONTROL_GMII_SEL_R = CONTROL_GMII_SEL_GMII1_SEL_RGMII | CONTROL_GMII_SEL_GMII2_SEL_RGMII; //Configure RGMII1_TCLK (GPIO3_9) CONTROL_CONF_MII1_TXCLK_R = CONTROL_CONF_MUXMODE(2); //Configure RGMII1_TCTL (GPIO3_3) CONTROL_CONF_MII1_TXEN_R = CONTROL_CONF_MUXMODE(2); //Configure RGMII1_TD0 (GPIO0_28) CONTROL_CONF_MII1_TXD0_R = CONTROL_CONF_MUXMODE(2); //Configure RGMII1_TD1 (GPIO0_21) CONTROL_CONF_MII1_TXD1_R = CONTROL_CONF_MUXMODE(2); //Configure RGMII1_TD2 (GPIO0_17) CONTROL_CONF_MII1_TXD2_R = CONTROL_CONF_MUXMODE(2); //Configure RGMII1_TD3 (GPIO0_16) CONTROL_CONF_MII1_TXD3_R = CONTROL_CONF_MUXMODE(2); //Configure RGMII1_RCLK (GPIO3_10) CONTROL_CONF_MII1_RXCLK_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII1_RCTL (GPIO3_4) CONTROL_CONF_MII1_RXDV_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII1_RD0 (GPIO2_21) CONTROL_CONF_MII1_RXD0_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure /RGMII1_RD1 (GPIO2_20) CONTROL_CONF_MII1_RXD1_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII1_RD2 (GPIO2_19) CONTROL_CONF_MII1_RXD2_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII1_RD3 (GPIO2_18) CONTROL_CONF_MII1_RXD3_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII2_TCLK (GPIO1_22/GPMC_A6) CONTROL_CONF_GPMC_A_R(6) = CONTROL_CONF_MUXMODE(2); //Configure RGMII2_TCTL (GPIO1_16/GPMC_A0) CONTROL_CONF_GPMC_A_R(0) = CONTROL_CONF_MUXMODE(2); //Configure RGMII2_TD0 (GPIO1_21/GPMC_A5) CONTROL_CONF_GPMC_A_R(5) = CONTROL_CONF_MUXMODE(2); //Configure RGMII2_TD1 (GPIO1_20/GPMC_A4) CONTROL_CONF_GPMC_A_R(4) = CONTROL_CONF_MUXMODE(2); //Configure RGMII2_TD2 (GPIO1_19/GPMC_A3) CONTROL_CONF_GPMC_A_R(3) = CONTROL_CONF_MUXMODE(2); //Configure RGMII2_TD3 (GPIO1_18/GPMC_A2) CONTROL_CONF_GPMC_A_R(2) = CONTROL_CONF_MUXMODE(2); //Configure RGMII2_RCLK (GPIO1_23/GPMC_A7) CONTROL_CONF_GPMC_A_R(7) = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII2_RCTL (GPIO1_17/GPMC_A1) CONTROL_CONF_GPMC_A_R(1) = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII2_RD0 (GPIO1_27/GPMC_A11) CONTROL_CONF_GPMC_A_R(11) = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII2_RD1 (GPIO1_26/GPMC_A10) CONTROL_CONF_GPMC_A_R(10) = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII2_RD2 (GPIO1_25/GPMC_A9) CONTROL_CONF_GPMC_A_R(9) = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII2_RD3 (GPIO1_24/GPMC_A8) CONTROL_CONF_GPMC_A_R(8) = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure MDIO (GPIO0_0) CONTROL_CONF_MDIO_DATA_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_PULLUPSEL | CONTROL_CONF_MUXMODE(0); //Configure MDC (GPIO0_1) CONTROL_CONF_MDIO_CLK_R = CONTROL_CONF_PULLUPSEL | CONTROL_CONF_MUXMODE(0); //OSD3358-SM-RED board? #elif defined(USE_OSD3358_SM_RED) //Select RGMII interface mode for both port 1 CONTROL_GMII_SEL_R = CONTROL_GMII_SEL_GMII1_SEL_RGMII; //Configure RGMII1_TCLK (GPIO3_9) CONTROL_CONF_MII1_TXCLK_R = CONTROL_CONF_MUXMODE(2); //Configure RGMII1_TCTL (GPIO3_3) CONTROL_CONF_MII1_TXEN_R = CONTROL_CONF_MUXMODE(2); //Configure RGMII1_TD0 (GPIO0_28) CONTROL_CONF_MII1_TXD0_R = CONTROL_CONF_MUXMODE(2); //Configure RGMII1_TD1 (GPIO0_21) CONTROL_CONF_MII1_TXD1_R = CONTROL_CONF_MUXMODE(2); //Configure RGMII1_TD2 (GPIO0_17) CONTROL_CONF_MII1_TXD2_R = CONTROL_CONF_MUXMODE(2); //Configure RGMII1_TD3 (GPIO0_16) CONTROL_CONF_MII1_TXD3_R = CONTROL_CONF_MUXMODE(2); //Configure RGMII1_RCLK (GPIO3_10) CONTROL_CONF_MII1_RXCLK_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII1_RCTL (GPIO3_4) CONTROL_CONF_MII1_RXDV_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII1_RD0 (GPIO2_21) CONTROL_CONF_MII1_RXD0_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure /RGMII1_RD1 (GPIO2_20) CONTROL_CONF_MII1_RXD1_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII1_RD2 (GPIO2_19) CONTROL_CONF_MII1_RXD2_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII1_RD3 (GPIO2_18) CONTROL_CONF_MII1_RXD3_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure MDIO (GPIO0_0) CONTROL_CONF_MDIO_DATA_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_PULLUPSEL | CONTROL_CONF_MUXMODE(0); //Configure MDC (GPIO0_1) CONTROL_CONF_MDIO_CLK_R = CONTROL_CONF_PULLUPSEL | CONTROL_CONF_MUXMODE(0); //SBC DIVA board? #elif defined(USE_SBC_DIVA) //Select RMII interface mode for port 1 and RGMII interface mode for port 2 CONTROL_GMII_SEL_R = CONTROL_GMII_SEL_RMII1_IO_CLK_EN | CONTROL_GMII_SEL_GMII1_SEL_RMII | CONTROL_GMII_SEL_GMII2_SEL_RGMII; //Configure RMII1_REF_CLK (GPIO0_29) CONTROL_CONF_RMII1_REFCLK_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(0); //Configure RMII1_TX_EN (GPIO3_3) CONTROL_CONF_MII1_TXEN_R = CONTROL_CONF_MUXMODE(1); //Configure RMII1_TXD0 (GPIO0_28) CONTROL_CONF_MII1_TXD0_R = CONTROL_CONF_MUXMODE(1); //Configure RMII1_TXD1 (GPIO0_21) CONTROL_CONF_MII1_TXD1_R = CONTROL_CONF_MUXMODE(1); //Configure RMII1_RXD0 (GPIO2.21) CONTROL_CONF_MII1_RXD0_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(1); //Configure RMII1_RXD1 (GPIO2.20) CONTROL_CONF_MII1_RXD1_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(1); //Configure RMII1_CRS_DV (GPIO3_1) CONTROL_CONF_MII1_CRS_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(1); //Configure RMII1_RX_ER (GPIO3_2) CONTROL_CONF_MII1_RXERR_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(1); //Configure RGMII2_TCLK (GPIO1_22/GPMC_A6) CONTROL_CONF_GPMC_A_R(6) = CONTROL_CONF_MUXMODE(2); //Configure RGMII2_TCTL (GPIO1_16/GPMC_A0) CONTROL_CONF_GPMC_A_R(0) = CONTROL_CONF_MUXMODE(2); //Configure RGMII2_TD0 (GPIO1_21/GPMC_A5) CONTROL_CONF_GPMC_A_R(5) = CONTROL_CONF_MUXMODE(2); //Configure RGMII2_TD1 (GPIO1_20/GPMC_A4) CONTROL_CONF_GPMC_A_R(4) = CONTROL_CONF_MUXMODE(2); //Configure RGMII2_TD2 (GPIO1_19/GPMC_A3) CONTROL_CONF_GPMC_A_R(3) = CONTROL_CONF_MUXMODE(2); //Configure RGMII2_TD3 (GPIO1_18/GPMC_A2) CONTROL_CONF_GPMC_A_R(2) = CONTROL_CONF_MUXMODE(2); //Configure RGMII2_RCLK (GPIO1_23/GPMC_A7) CONTROL_CONF_GPMC_A_R(7) = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII2_RCTL (GPIO1_17/GPMC_A1) CONTROL_CONF_GPMC_A_R(1) = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII2_RD0 (GPIO1_27/GPMC_A11) CONTROL_CONF_GPMC_A_R(11) = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII2_RD1 (GPIO1_26/GPMC_A10) CONTROL_CONF_GPMC_A_R(10) = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII2_RD2 (GPIO1_25/GPMC_A9) CONTROL_CONF_GPMC_A_R(9) = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII2_RD3 (GPIO1_24/GPMC_A8) CONTROL_CONF_GPMC_A_R(8) = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure MDIO (GPIO0_0) CONTROL_CONF_MDIO_DATA_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_PULLUPSEL | CONTROL_CONF_MUXMODE(0); //Configure MDC (GPIO0_1) CONTROL_CONF_MDIO_CLK_R = CONTROL_CONF_PULLUPSEL | CONTROL_CONF_MUXMODE(0); #endif } #endif /** * @brief Initialize buffer descriptor lists * @param[in] interface Underlying network interface **/ void am335xEthInitBufferDesc(NetInterface *interface) { uint_t i; uint_t nextIndex; uint_t prevIndex; //Initialize TX buffer descriptor list (port 1) for(i = 0; i < AM335X_ETH_TX_BUFFER_COUNT; i++) { //Index of the next buffer nextIndex = (i + 1) % AM335X_ETH_TX_BUFFER_COUNT; //Index of the previous buffer prevIndex = (i + AM335X_ETH_TX_BUFFER_COUNT - 1) % AM335X_ETH_TX_BUFFER_COUNT; //Next descriptor pointer txBufferDesc1[i].word0 = (uint32_t) NULL; //Buffer pointer txBufferDesc1[i].word1 = (uint32_t) txBuffer1[i]; //Buffer offset and buffer length txBufferDesc1[i].word2 = 0; //Status flags and packet length txBufferDesc1[i].word3 = 0; //Form a doubly linked list txBufferDesc1[i].next = &txBufferDesc1[nextIndex]; txBufferDesc1[i].prev = &txBufferDesc1[prevIndex]; } //Point to the very first descriptor txCurBufferDesc1 = &txBufferDesc1[0]; //Mark the end of the queue txCurBufferDesc1->prev->word3 = CPSW_TX_WORD3_SOP | CPSW_TX_WORD3_EOP | CPSW_TX_WORD3_EOQ; //Initialize TX buffer descriptor list (port 2) for(i = 0; i < AM335X_ETH_TX_BUFFER_COUNT; i++) { //Index of the next buffer nextIndex = (i + 1) % AM335X_ETH_TX_BUFFER_COUNT; //Index of the previous buffer prevIndex = (i + AM335X_ETH_TX_BUFFER_COUNT - 1) % AM335X_ETH_TX_BUFFER_COUNT; //Next descriptor pointer txBufferDesc2[i].word0 = (uint32_t) NULL; //Buffer pointer txBufferDesc2[i].word1 = (uint32_t) txBuffer2[i]; //Buffer offset and buffer length txBufferDesc2[i].word2 = 0; //Status flags and packet length txBufferDesc2[i].word3 = 0; //Form a doubly linked list txBufferDesc2[i].next = &txBufferDesc2[nextIndex]; txBufferDesc2[i].prev = &txBufferDesc2[prevIndex]; } //Point to the very first descriptor txCurBufferDesc2 = &txBufferDesc2[0]; //Mark the end of the queue txCurBufferDesc2->prev->word3 = CPSW_TX_WORD3_SOP | CPSW_TX_WORD3_EOP | CPSW_TX_WORD3_EOQ; //Initialize RX buffer descriptor list for(i = 0; i < AM335X_ETH_RX_BUFFER_COUNT; i++) { //Index of the next buffer nextIndex = (i + 1) % AM335X_ETH_RX_BUFFER_COUNT; //Index of the previous buffer prevIndex = (i + AM335X_ETH_RX_BUFFER_COUNT - 1) % AM335X_ETH_RX_BUFFER_COUNT; //Next descriptor pointer rxBufferDesc[i].word0 = (uint32_t) &rxBufferDesc[nextIndex]; //Buffer pointer rxBufferDesc[i].word1 = (uint32_t) rxBuffer[i]; //Buffer offset and buffer length rxBufferDesc[i].word2 = AM335X_ETH_RX_BUFFER_SIZE; //Status flags and packet length rxBufferDesc[i].word3 = CPSW_RX_WORD3_OWNER; //Form a doubly linked list rxBufferDesc[i].next = &rxBufferDesc[nextIndex]; rxBufferDesc[i].prev = &rxBufferDesc[prevIndex]; } //Point to the very first descriptor rxCurBufferDesc = &rxBufferDesc[0]; //Mark the end of the queue rxCurBufferDesc->prev->word0 = (uint32_t) NULL; //Write the RX DMA head descriptor pointer CPSW_CPDMA_RX_HDP_R(CPSW_CH0) = (uint32_t) rxCurBufferDesc; } /** * @brief AM335x Ethernet MAC timer handler * * This routine is periodically called by the TCP/IP stack to handle periodic * operations such as polling the link state * * @param[in] interface Underlying network interface **/ void am335xEthTick(NetInterface *interface) { //Valid Ethernet PHY or switch driver? if(interface->phyDriver != NULL) { //Handle periodic operations interface->phyDriver->tick(interface); } else if(interface->switchDriver != NULL) { //Handle periodic operations interface->switchDriver->tick(interface); } else { //Just for sanity } //Misqueued buffer condition? if((rxCurBufferDesc->word3 & CPSW_RX_WORD3_OWNER) != 0) { if(CPSW_CPDMA_RX_HDP_R(CPSW_CH0) == 0) { //The host acts on the misqueued buffer condition by writing the added //buffer descriptor address to the appropriate RX DMA head descriptor //pointer CPSW_CPDMA_RX_HDP_R(CPSW_CH0) = (uint32_t) rxCurBufferDesc; } } } /** * @brief Enable interrupts * @param[in] interface Underlying network interface **/ void am335xEthEnableIrq(NetInterface *interface) { #ifdef ti_sysbios_BIOS___VERS //Enable Ethernet MAC interrupts Hwi_enableInterrupt(SYS_INT_3PGSWTXINT0); Hwi_enableInterrupt(SYS_INT_3PGSWRXINT0); #else //Enable Ethernet MAC interrupts IntSystemEnable(SYS_INT_3PGSWTXINT0); IntSystemEnable(SYS_INT_3PGSWRXINT0); #endif //Valid Ethernet PHY or switch driver? if(interface->phyDriver != NULL) { //Enable Ethernet PHY interrupts interface->phyDriver->enableIrq(interface); } else if(interface->switchDriver != NULL) { //Enable Ethernet switch interrupts interface->switchDriver->enableIrq(interface); } else { //Just for sanity } } /** * @brief Disable interrupts * @param[in] interface Underlying network interface **/ void am335xEthDisableIrq(NetInterface *interface) { #ifdef ti_sysbios_BIOS___VERS //Disable Ethernet MAC interrupts Hwi_disableInterrupt(SYS_INT_3PGSWTXINT0); Hwi_disableInterrupt(SYS_INT_3PGSWRXINT0); #else //Disable Ethernet MAC interrupts IntSystemDisable(SYS_INT_3PGSWTXINT0); IntSystemDisable(SYS_INT_3PGSWRXINT0); #endif //Valid Ethernet PHY or switch driver? if(interface->phyDriver != NULL) { //Disable Ethernet PHY interrupts interface->phyDriver->disableIrq(interface); } else if(interface->switchDriver != NULL) { //Disable Ethernet switch interrupts interface->switchDriver->disableIrq(interface); } else { //Just for sanity } } /** * @brief Ethernet MAC transmit interrupt **/ void am335xEthTxIrqHandler(void) { bool_t flag; uint32_t status; uint32_t temp; Am335xTxBufferDesc *p; //Interrupt service routine prologue osEnterIsr(); //This flag will be set if a higher priority task must be woken flag = FALSE; //Read the TX_STAT register to determine which channels caused the interrupt status = CPSW_WR_C_TX_STAT_R(CPSW_CORE0); //Packet transmitted on channel 1? if(status & (1 << CPSW_CH1)) { //Point to the buffer descriptor p = (Am335xTxBufferDesc *) CPSW_CPDMA_TX_CP_R(CPSW_CH1); //Read the status flags temp = p->word3 & (CPSW_TX_WORD3_SOP | CPSW_TX_WORD3_EOP | CPSW_TX_WORD3_OWNER | CPSW_TX_WORD3_EOQ); //Misqueued buffer condition? if(temp == (CPSW_TX_WORD3_SOP | CPSW_TX_WORD3_EOP | CPSW_TX_WORD3_EOQ)) { //Check whether the next descriptor pointer is non-zero if(p->word0 != 0) { //The host corrects the misqueued buffer condition by writing the //misqueued packets buffer descriptor address to the appropriate //TX DMA head descriptor pointer CPSW_CPDMA_TX_HDP_R(CPSW_CH1) = (uint32_t) p->word0; } } //Write the TX completion pointer CPSW_CPDMA_TX_CP_R(CPSW_CH1) = (uint32_t) p; //Check whether the TX buffer is available for writing if((txCurBufferDesc1->word3 & CPSW_TX_WORD3_OWNER) == 0) { //Notify the TCP/IP stack that the transmitter is ready to send flag |= osSetEventFromIsr(&nicDriverInterface1->nicTxEvent); } } //Packet transmitted on channel 2? if(status & (1 << CPSW_CH2)) { //Point to the buffer descriptor p = (Am335xTxBufferDesc *) CPSW_CPDMA_TX_CP_R(CPSW_CH2); //Read the status flags temp = p->word3 & (CPSW_TX_WORD3_SOP | CPSW_TX_WORD3_EOP | CPSW_TX_WORD3_OWNER | CPSW_TX_WORD3_EOQ); //Misqueued buffer condition? if(temp == (CPSW_TX_WORD3_SOP | CPSW_TX_WORD3_EOP | CPSW_TX_WORD3_EOQ)) { //Check whether the next descriptor pointer is non-zero if(p->word0 != 0) { //The host corrects the misqueued buffer condition by writing the //misqueued packets buffer descriptor address to the appropriate //TX DMA head descriptor pointer CPSW_CPDMA_TX_HDP_R(CPSW_CH2) = (uint32_t) p->word0; } } //Write the TX completion pointer CPSW_CPDMA_TX_CP_R(CPSW_CH2) = (uint32_t) p; //Check whether the TX buffer is available for writing if((txCurBufferDesc2->word3 & CPSW_TX_WORD3_OWNER) == 0) { //Notify the TCP/IP stack that the transmitter is ready to send flag |= osSetEventFromIsr(&nicDriverInterface2->nicTxEvent); } } //Write the DMA end of interrupt vector CPSW_CPDMA_CPDMA_EOI_VECTOR_R = CPSW_CPDMA_EOI_VECTOR_TX_PULSE; //Interrupt service routine epilogue osExitIsr(flag); } /** * @brief Ethernet MAC receive interrupt **/ void am335xEthRxIrqHandler(void) { bool_t flag; uint32_t status; //Interrupt service routine prologue osEnterIsr(); //This flag will be set if a higher priority task must be woken flag = FALSE; //Read the RX_STAT register to determine which channels caused the interrupt status = CPSW_WR_C_RX_STAT_R(CPSW_CORE0); //Packet received on channel 0? if(status & (1 << CPSW_CH0)) { //Disable RX interrupts CPSW_WR_C_RX_EN_R(CPSW_CORE0) &= ~(1 << CPSW_CH0); //Set event flag if(nicDriverInterface1 != NULL) { nicDriverInterface1->nicEvent = TRUE; } else if(nicDriverInterface2 != NULL) { nicDriverInterface2->nicEvent = TRUE; } //Notify the TCP/IP stack of the event flag |= osSetEventFromIsr(&netEvent); } //Write the DMA end of interrupt vector CPSW_CPDMA_CPDMA_EOI_VECTOR_R = CPSW_CPDMA_EOI_VECTOR_RX_PULSE; //Interrupt service routine epilogue osExitIsr(flag); } /** * @brief AM335x Ethernet MAC event handler * @param[in] interface Underlying network interface **/ void am335xEthEventHandler(NetInterface *interface) { static uint8_t buffer[AM335X_ETH_RX_BUFFER_SIZE]; error_t error; size_t n; uint32_t temp; //Process all pending packets do { //The current buffer is available for reading? if((rxCurBufferDesc->word3 & CPSW_RX_WORD3_OWNER) == 0) { //SOP and EOP flags should be set if((rxCurBufferDesc->word3 & CPSW_RX_WORD3_SOP) != 0 && (rxCurBufferDesc->word3 & CPSW_RX_WORD3_EOP) != 0) { //Make sure no error occurred if((rxCurBufferDesc->word3 & CPSW_RX_WORD3_PKT_ERROR) == 0) { //Check the port on which the packet was received switch(rxCurBufferDesc->word3 & CPSW_RX_WORD3_FROM_PORT) { //Port 1? case CPSW_RX_WORD3_FROM_PORT_1: interface = nicDriverInterface1; break; //Port 1? case CPSW_RX_WORD3_FROM_PORT_2: interface = nicDriverInterface2; break; //Invalid port number? default: interface = NULL; break; } //Retrieve the length of the frame n = rxCurBufferDesc->word3 & CPSW_RX_WORD3_PACKET_LENGTH; //Limit the number of data to read n = MIN(n, AM335X_ETH_RX_BUFFER_SIZE); //Sanity check if(interface != NULL) { //Copy data from the receive buffer osMemcpy(buffer, (uint8_t *) rxCurBufferDesc->word1, (n + 3) & ~3UL); //Packet successfully received error = NO_ERROR; } else { //The port number is invalid error = ERROR_INVALID_PACKET; } } else { //The received packet contains an error error = ERROR_INVALID_PACKET; } } else { //The packet is not valid error = ERROR_INVALID_PACKET; } //Mark the end of the queue with a NULL pointer rxCurBufferDesc->word0 = (uint32_t) NULL; //Restore the length of the buffer rxCurBufferDesc->word2 = AM335X_ETH_RX_BUFFER_SIZE; //Give the ownership of the descriptor back to the DMA rxCurBufferDesc->word3 = CPSW_RX_WORD3_OWNER; //Link the current descriptor to the previous descriptor rxCurBufferDesc->prev->word0 = (uint32_t) rxCurBufferDesc; //Read the status flags of the previous descriptor temp = rxCurBufferDesc->prev->word3 & (CPSW_RX_WORD3_SOP | CPSW_RX_WORD3_EOP | CPSW_RX_WORD3_OWNER | CPSW_RX_WORD3_EOQ); //Misqueued buffer condition? if(temp == (CPSW_RX_WORD3_SOP | CPSW_RX_WORD3_EOP | CPSW_RX_WORD3_EOQ)) { //The host acts on the misqueued buffer condition by writing the added //buffer descriptor address to the appropriate RX DMA head descriptor //pointer CPSW_CPDMA_RX_HDP_R(CPSW_CH0) = (uint32_t) rxCurBufferDesc; } //Write the RX completion pointer CPSW_CPDMA_RX_CP_R(CPSW_CH0) = (uint32_t) rxCurBufferDesc; //Point to the next descriptor in the list rxCurBufferDesc = rxCurBufferDesc->next; } else { //No more data in the receive buffer error = ERROR_BUFFER_EMPTY; } //Check whether a valid packet has been received if(!error) { NetRxAncillary ancillary; //Additional options can be passed to the stack along with the packet ancillary = NET_DEFAULT_RX_ANCILLARY; //Pass the packet to the upper layer nicProcessPacket(interface, buffer, n, &ancillary); } //No more data in the receive buffer? } while(error != ERROR_BUFFER_EMPTY); //Re-enable RX interrupts CPSW_WR_C_RX_EN_R(CPSW_CORE0) |= (1 << CPSW_CH0); } /** * @brief Send a packet (port 1) * @param[in] interface Underlying network interface * @param[in] buffer Multi-part buffer containing the data to send * @param[in] offset Offset to the first data byte * @param[in] ancillary Additional options passed to the stack along with * the packet * @return Error code **/ error_t am335xEthSendPacketPort1(NetInterface *interface, const NetBuffer *buffer, size_t offset, NetTxAncillary *ancillary) { static uint8_t temp[AM335X_ETH_TX_BUFFER_SIZE]; size_t length; uint32_t value; //Retrieve the length of the packet length = netBufferGetLength(buffer) - offset; //Check the frame length if(length > AM335X_ETH_TX_BUFFER_SIZE) { //The transmitter can accept another packet osSetEvent(&interface->nicTxEvent); //Report an error return ERROR_INVALID_LENGTH; } //Make sure the current buffer is available for writing if((txCurBufferDesc1->word3 & CPSW_TX_WORD3_OWNER) != 0) { return ERROR_FAILURE; } //Mark the end of the queue with a NULL pointer txCurBufferDesc1->word0 = (uint32_t) NULL; //Copy user data to the transmit buffer netBufferRead(temp, buffer, offset, length); osMemcpy((uint8_t *) txCurBufferDesc1->word1, temp, (length + 3) & ~3UL); //Set the length of the buffer txCurBufferDesc1->word2 = length & CPSW_TX_WORD2_BUFFER_LENGTH; //Set the length of the packet value = length & CPSW_TX_WORD3_PACKET_LENGTH; //Set SOP and EOP flags as the data fits in a single buffer value |= CPSW_TX_WORD3_SOP | CPSW_TX_WORD3_EOP; //Redirect the packet to the relevant port number value |= CPSW_TX_WORD3_TO_PORT_EN | CPSW_TX_WORD3_TO_PORT_1; //Give the ownership of the descriptor to the DMA txCurBufferDesc1->word3 = CPSW_TX_WORD3_OWNER | value; //Link the current descriptor to the previous descriptor txCurBufferDesc1->prev->word0 = (uint32_t) txCurBufferDesc1; //Read the status flags of the previous descriptor value = txCurBufferDesc1->prev->word3 & (CPSW_TX_WORD3_SOP | CPSW_TX_WORD3_EOP | CPSW_TX_WORD3_OWNER | CPSW_TX_WORD3_EOQ); //Misqueued buffer condition? if(value == (CPSW_TX_WORD3_SOP | CPSW_TX_WORD3_EOP | CPSW_TX_WORD3_EOQ)) { //Clear the misqueued buffer condition txCurBufferDesc1->prev->word3 = 0; //The host corrects the misqueued buffer condition by writing the //misqueued packets buffer descriptor address to the appropriate //TX DMA head descriptor pointer CPSW_CPDMA_TX_HDP_R(CPSW_CH1) = (uint32_t) txCurBufferDesc1; } //Point to the next descriptor in the list txCurBufferDesc1 = txCurBufferDesc1->next; //Check whether the next buffer is available for writing if((txCurBufferDesc1->word3 & CPSW_TX_WORD3_OWNER) == 0) { //The transmitter can accept another packet osSetEvent(&interface->nicTxEvent); } //Data successfully written return NO_ERROR; } /** * @brief Send a packet (port 2) * @param[in] interface Underlying network interface * @param[in] buffer Multi-part buffer containing the data to send * @param[in] offset Offset to the first data byte * @param[in] ancillary Additional options passed to the stack along with * the packet * @return Error code **/ error_t am335xEthSendPacketPort2(NetInterface *interface, const NetBuffer *buffer, size_t offset, NetTxAncillary *ancillary) { static uint8_t temp[AM335X_ETH_TX_BUFFER_SIZE]; size_t length; uint32_t value; //Retrieve the length of the packet length = netBufferGetLength(buffer) - offset; //Check the frame length if(length > AM335X_ETH_TX_BUFFER_SIZE) { //The transmitter can accept another packet osSetEvent(&interface->nicTxEvent); //Report an error return ERROR_INVALID_LENGTH; } //Make sure the current buffer is available for writing if((txCurBufferDesc2->word3 & CPSW_TX_WORD3_OWNER) != 0) { return ERROR_FAILURE; } //Mark the end of the queue with a NULL pointer txCurBufferDesc2->word0 = (uint32_t) NULL; //Copy user data to the transmit buffer netBufferRead(temp, buffer, offset, length); osMemcpy((uint8_t *) txCurBufferDesc2->word1, temp, (length + 3) & ~3UL); //Set the length of the buffer txCurBufferDesc2->word2 = length & CPSW_TX_WORD2_BUFFER_LENGTH; //Set the length of the packet value = length & CPSW_TX_WORD3_PACKET_LENGTH; //Set SOP and EOP flags as the data fits in a single buffer value |= CPSW_TX_WORD3_SOP | CPSW_TX_WORD3_EOP; //Redirect the packet to the relevant port number value |= CPSW_TX_WORD3_TO_PORT_EN | CPSW_TX_WORD3_TO_PORT_2; //Give the ownership of the descriptor to the DMA txCurBufferDesc2->word3 = CPSW_TX_WORD3_OWNER | value; //Link the current descriptor to the previous descriptor txCurBufferDesc2->prev->word0 = (uint32_t) txCurBufferDesc2; //Read the status flags of the previous descriptor value = txCurBufferDesc2->prev->word3 & (CPSW_TX_WORD3_SOP | CPSW_TX_WORD3_EOP | CPSW_TX_WORD3_OWNER | CPSW_TX_WORD3_EOQ); //Misqueued buffer condition? if(value == (CPSW_TX_WORD3_SOP | CPSW_TX_WORD3_EOP | CPSW_TX_WORD3_EOQ)) { //Clear the misqueued buffer condition txCurBufferDesc2->prev->word3 = 0; //The host corrects the misqueued buffer condition by writing the //misqueued packets buffer descriptor address to the appropriate //TX DMA head descriptor pointer CPSW_CPDMA_TX_HDP_R(CPSW_CH2) = (uint32_t) txCurBufferDesc2; } //Point to the next descriptor in the list txCurBufferDesc2 = txCurBufferDesc2->next; //Check whether the next buffer is available for writing if((txCurBufferDesc2->word3 & CPSW_TX_WORD3_OWNER) == 0) { //The transmitter can accept another packet osSetEvent(&interface->nicTxEvent); } //Data successfully written return NO_ERROR; } /** * @brief Configure MAC address filtering * @param[in] interface Underlying network interface * @return Error code **/ error_t am335xEthUpdateMacAddrFilter(NetInterface *interface) { uint_t i; uint_t port; MacFilterEntry *entry; //Debug message TRACE_DEBUG("Updating AM335x ALE table...\r\n"); //Select the relevant port number if(interface == nicDriverInterface1) { port = CPSW_PORT1; } else if(interface == nicDriverInterface2) { port = CPSW_PORT2; } else { port = CPSW_PORT0; } //The MAC address filter contains the list of MAC addresses to accept //when receiving an Ethernet frame for(i = 0; i < MAC_ADDR_FILTER_SIZE; i++) { //Point to the current entry entry = &interface->macAddrFilter[i]; //Check whether the ALE table should be updated for the //current multicast address if(!macCompAddr(&entry->addr, &MAC_UNSPECIFIED_ADDR)) { if(entry->addFlag) { //Add VLAN/multicast address entry to the ALE table am335xEthAddVlanAddrEntry(port, port, &entry->addr); } else if(entry->deleteFlag) { //Remove VLAN/multicast address entry from the ALE table am335xEthDeleteVlanAddrEntry(port, port, &entry->addr); } } } //Successful processing return NO_ERROR; } /** * @brief Adjust MAC configuration parameters for proper operation * @param[in] interface Underlying network interface * @return Error code **/ error_t am335xEthUpdateMacConfig(NetInterface *interface) { uint32_t config = 0; //Read MAC control register if(interface == nicDriverInterface1) { config = CPSW_SL1_MACCONTROL_R; } else if(interface == nicDriverInterface2) { config = CPSW_SL2_MACCONTROL_R; } //1000BASE-T operation mode? if(interface->linkSpeed == NIC_LINK_SPEED_1GBPS) { config |= CPSW_SL_MACCONTROL_GIG; config &= ~(CPSW_SL_MACCONTROL_IFCTL_A | CPSW_SL_MACCONTROL_IFCTL_B); } //100BASE-TX operation mode? else if(interface->linkSpeed == NIC_LINK_SPEED_100MBPS) { config &= ~CPSW_SL_MACCONTROL_GIG; config |= CPSW_SL_MACCONTROL_IFCTL_A | CPSW_SL_MACCONTROL_IFCTL_B; } //10BASE-T operation mode? else { config &= ~CPSW_SL_MACCONTROL_GIG; config &= ~(CPSW_SL_MACCONTROL_IFCTL_A | CPSW_SL_MACCONTROL_IFCTL_B); } //Half-duplex or full-duplex mode? if(interface->duplexMode == NIC_FULL_DUPLEX_MODE) { config |= CPSW_SL_MACCONTROL_FULLDUPLEX; } else { config &= ~CPSW_SL_MACCONTROL_FULLDUPLEX; } //Update MAC control register if(interface == nicDriverInterface1) { CPSW_SL1_MACCONTROL_R = config; } else if(interface == nicDriverInterface2) { CPSW_SL2_MACCONTROL_R = config; } //Successful processing return NO_ERROR; } /** * @brief Write PHY register * @param[in] opcode Access type (2 bits) * @param[in] phyAddr PHY address (5 bits) * @param[in] regAddr Register address (5 bits) * @param[in] data Register value **/ void am335xEthWritePhyReg(uint8_t opcode, uint8_t phyAddr, uint8_t regAddr, uint16_t data) { uint32_t temp; //Valid opcode? if(opcode == SMI_OPCODE_WRITE) { //Set up a write operation temp = MDIO_USERACCESS0_GO | MDIO_USERACCESS0_WRITE; //PHY address temp |= (phyAddr << MDIO_USERACCESS0_PHYADR_SHIFT) & MDIO_USERACCESS0_PHYADR; //Register address temp |= (regAddr << MDIO_USERACCESS0_REGADR_SHIFT) & MDIO_USERACCESS0_REGADR; //Register value temp |= data & MDIO_USERACCESS0_DATA; //Start a write operation MDIO_USERACCESS0_R = temp; //Wait for the write to complete while((MDIO_USERACCESS0_R & MDIO_USERACCESS0_GO) != 0) { } } else { //The MAC peripheral only supports standard Clause 22 opcodes } } /** * @brief Read PHY register * @param[in] opcode Access type (2 bits) * @param[in] phyAddr PHY address (5 bits) * @param[in] regAddr Register address (5 bits) * @return Register value **/ uint16_t am335xEthReadPhyReg(uint8_t opcode, uint8_t phyAddr, uint8_t regAddr) { uint16_t data; uint32_t temp; //Valid opcode? if(opcode == SMI_OPCODE_READ) { //Set up a read operation temp = MDIO_USERACCESS0_GO | MDIO_USERACCESS0_READ; //PHY address temp |= (phyAddr << MDIO_USERACCESS0_PHYADR_SHIFT) & MDIO_USERACCESS0_PHYADR; //Register address temp |= (regAddr << MDIO_USERACCESS0_REGADR_SHIFT) & MDIO_USERACCESS0_REGADR; //Start a read operation MDIO_USERACCESS0_R = temp; //Wait for the read to complete while((MDIO_USERACCESS0_R & MDIO_USERACCESS0_GO) != 0) { } //Get register value data = MDIO_USERACCESS0_R & MDIO_USERACCESS0_DATA; } else { //The MAC peripheral only supports standard Clause 22 opcodes data = 0; } //Return the value of the PHY register return data; } /** * @brief Write an ALE table entry * @param[in] index Entry index * @param[in] entry Pointer to the ALE table entry **/ void am335xEthWriteEntry(uint_t index, const Am335xAleEntry *entry) { //Copy the content of the entry to be written CPSW_ALE_TBLW_R(2) = entry->word2; CPSW_ALE_TBLW_R(1) = entry->word1; CPSW_ALE_TBLW_R(0) = entry->word0; //Write the ALE entry at the specified index CPSW_ALE_TBLCTL_R = CPSW_ALE_TBLCTL_WRITE_RDZ | index; } /** * @brief Read an ALE table entry * @param[in] index Entry index * @param[out] entry Pointer to the ALE table entry **/ void am335xEthReadEntry(uint_t index, Am335xAleEntry *entry) { //Read the ALE entry at the specified index CPSW_ALE_TBLCTL_R = index; //Copy the content of the entry entry->word2 = CPSW_ALE_TBLW_R(2); entry->word1 = CPSW_ALE_TBLW_R(1); entry->word0 = CPSW_ALE_TBLW_R(0); } /** * @brief Find a free entry in the ALE table * @return Index of the first free entry **/ uint_t am335xEthFindFreeEntry(void) { uint_t index; uint32_t type; Am335xAleEntry entry; //Loop through the ALE table entries for(index = 0; index < CPSW_ALE_MAX_ENTRIES; index++) { //Read the current entry am335xEthReadEntry(index, &entry); //Retrieve the type of the ALE entry type = entry.word1 & CPSW_ALE_WORD1_ENTRY_TYPE_MASK; //Free entry? if(type == CPSW_ALE_WORD1_ENTRY_TYPE_FREE) { //Exit immediately break; } } //Return the index of the entry return index; } /** * @brief Search the ALE table for the specified VLAN entry * @param[in] vlanId VLAN identifier * @return Index of the matching entry **/ uint_t am335xEthFindVlanEntry(uint_t vlanId) { uint_t index; uint32_t value; Am335xAleEntry entry; //Loop through the ALE table entries for(index = 0; index < CPSW_ALE_MAX_ENTRIES; index++) { //Read the current entry am335xEthReadEntry(index, &entry); //Retrieve the type of the ALE entry value = entry.word1 & CPSW_ALE_WORD1_ENTRY_TYPE_MASK; //Check the type of the ALE entry if(value == CPSW_ALE_WORD1_ENTRY_TYPE_VLAN_ADDR) { //Get the VLAN identifier value = entry.word1 & CPSW_ALE_WORD1_VLAN_ID_MASK; //Compare the VLAN identifier if(value == CPSW_ALE_WORD1_VLAN_ID(vlanId)) { //Matching ALE entry found break; } } } //Return the index of the entry return index; } /** * @brief Search the ALE table for the specified VLAN/address entry * @param[in] vlanId VLAN identifier * @param[in] macAddr MAC address * @return Index of the matching entry **/ uint_t am335xEthFindVlanAddrEntry(uint_t vlanId, MacAddr *macAddr) { uint_t index; uint32_t value; Am335xAleEntry entry; //Loop through the ALE table entries for(index = 0; index < CPSW_ALE_MAX_ENTRIES; index++) { //Read the current entry am335xEthReadEntry(index, &entry); //Retrieve the type of the ALE entry value = entry.word1 & CPSW_ALE_WORD1_ENTRY_TYPE_MASK; //Check the type of the ALE entry if(value == CPSW_ALE_WORD1_ENTRY_TYPE_VLAN_ADDR) { //Get the VLAN identifier value = entry.word1 & CPSW_ALE_WORD1_VLAN_ID_MASK; //Compare the VLAN identifier if(value == CPSW_ALE_WORD1_VLAN_ID(vlanId)) { //Compare the MAC address if(macAddr->b[0] == (uint8_t) (entry.word1 >> 8) && macAddr->b[1] == (uint8_t) (entry.word1 >> 0) && macAddr->b[2] == (uint8_t) (entry.word0 >> 24) && macAddr->b[3] == (uint8_t) (entry.word0 >> 16) && macAddr->b[4] == (uint8_t) (entry.word0 >> 8) && macAddr->b[5] == (uint8_t) (entry.word0 >> 0)) { //Matching ALE entry found break; } } } } //Return the index of the entry return index; } /** * @brief Add a VLAN entry in the ALE table * @param[in] port Port number * @param[in] vlanId VLAN identifier * @return Error code **/ error_t am335xEthAddVlanEntry(uint_t port, uint_t vlanId) { error_t error; uint_t index; Am335xAleEntry entry; //Ensure that there are no duplicate address entries in the ALE table index = am335xEthFindVlanEntry(vlanId); //No matching entry found? if(index >= CPSW_ALE_MAX_ENTRIES) { //Find a free entry in the ALE table index = am335xEthFindFreeEntry(); } //Sanity check if(index < CPSW_ALE_MAX_ENTRIES) { //Set up a VLAN table entry entry.word2 = 0; entry.word1 = CPSW_ALE_WORD1_ENTRY_TYPE_VLAN; entry.word0 = 0; //Set VLAN identifier entry.word1 |= CPSW_ALE_WORD1_VLAN_ID(vlanId); //Force the packet VLAN tag to be removed on egress entry.word0 |= CPSW_ALE_WORD0_FORCE_UNTAG_EGRESS(1 << port) | CPSW_ALE_WORD0_FORCE_UNTAG_EGRESS(1 << CPSW_PORT0); //Set VLAN member list entry.word0 |= CPSW_ALE_WORD0_VLAN_MEMBER_LIST(1 << port) | CPSW_ALE_WORD0_VLAN_MEMBER_LIST(1 << CPSW_PORT0); //Add a new entry to the ALE table am335xEthWriteEntry(index, &entry); //Successful processing error = NO_ERROR; } else { //The ALE table is full error = ERROR_FAILURE; } //Return status code return error; } /** * @brief Add a VLAN/address entry in the ALE table * @param[in] port Port number * @param[in] vlanId VLAN identifier * @param[in] macAddr MAC address * @return Error code **/ error_t am335xEthAddVlanAddrEntry(uint_t port, uint_t vlanId, MacAddr *macAddr) { error_t error; uint_t index; Am335xAleEntry entry; //Ensure that there are no duplicate address entries in the ALE table index = am335xEthFindVlanAddrEntry(vlanId, macAddr); //No matching entry found? if(index >= CPSW_ALE_MAX_ENTRIES) { //Find a free entry in the ALE table index = am335xEthFindFreeEntry(); } //Sanity check if(index < CPSW_ALE_MAX_ENTRIES) { //Set up a VLAN/address table entry entry.word2 = 0; entry.word1 = CPSW_ALE_WORD1_ENTRY_TYPE_VLAN_ADDR; entry.word0 = 0; //Multicast address? if(macIsMulticastAddr(macAddr)) { //Set port mask entry.word2 |= CPSW_ALE_WORD2_SUPER | CPSW_ALE_WORD2_PORT_LIST(1 << port) | CPSW_ALE_WORD2_PORT_LIST(1 << CPSW_CH0); //Set multicast forward state entry.word1 |= CPSW_ALE_WORD1_MCAST_FWD_STATE(0); } //Set VLAN identifier entry.word1 |= CPSW_ALE_WORD1_VLAN_ID(vlanId); //Copy the upper 16 bits of the unicast address entry.word1 |= (macAddr->b[0] << 8) | macAddr->b[1]; //Copy the lower 32 bits of the unicast address entry.word0 |= (macAddr->b[2] << 24) | (macAddr->b[3] << 16) | (macAddr->b[4] << 8) | macAddr->b[5]; //Add a new entry to the ALE table am335xEthWriteEntry(index, &entry); //Successful processing error = NO_ERROR; } else { //The ALE table is full error = ERROR_FAILURE; } //Return status code return error; } /** * @brief Remove a VLAN/address entry from the ALE table * @param[in] port Port number * @param[in] vlanId VLAN identifier * @param[in] macAddr MAC address * @return Error code **/ error_t am335xEthDeleteVlanAddrEntry(uint_t port, uint_t vlanId, MacAddr *macAddr) { error_t error; uint_t index; Am335xAleEntry entry; //Search the ALE table for the specified VLAN/address entry index = am335xEthFindVlanAddrEntry(vlanId, macAddr); //Matching ALE entry found? if(index < CPSW_ALE_MAX_ENTRIES) { //Clear the contents of the entry entry.word2 = 0; entry.word1 = 0; entry.word0 = 0; //Update the ALE table am335xEthWriteEntry(index, &entry); //Successful processing error = NO_ERROR; } else { //Entry not found error = ERROR_NOT_FOUND; } //Return status code return error; }
error_t am335xEthAddVlanEntry(uint_t port, uint_t vlanId) { error_t error; uint_t index; Am335xAleEntry entry; //Ensure that there are no duplicate address entries in the ALE table index = am335xEthFindVlanEntry(vlanId); //No matching entry found? if(index >= CPSW_ALE_MAX_ENTRIES) { //Find a free entry in the ALE table index = am335xEthFindFreeEntry(); } //Sanity check if(index < CPSW_ALE_MAX_ENTRIES) { //Set up a VLAN table entry entry.word2 = 0; entry.word1 = CPSW_ALE_WORD1_ENTRY_TYPE_VLAN; entry.word0 = 0; //Set VLAN identifier entry.word1 |= CPSW_ALE_WORD1_VLAN_ID(vlanId); //Force the packet VLAN tag to be removed on egress entry.word0 |= CPSW_ALE_WORD0_FORCE_UNTAG_EGRESS(1 << port) | CPSW_ALE_WORD0_FORCE_UNTAG_EGRESS(1 << CPSW_PORT0); //Set VLAN member list entry.word0 |= CPSW_ALE_WORD0_VLAN_MEMBER_LIST(1 << port) | CPSW_ALE_WORD0_VLAN_MEMBER_LIST(1 << CPSW_PORT0); //Add a new entry to the ALE table am335xEthWriteEntry(index, &entry); //Sucessful processing error = NO_ERROR; } else { //The ALE table is full error = ERROR_FAILURE; } //Return status code return error; }
error_t am335xEthAddVlanEntry(uint_t port, uint_t vlanId) { error_t error; uint_t index; Am335xAleEntry entry; //Ensure that there are no duplicate address entries in the ALE table index = am335xEthFindVlanEntry(vlanId); //No matching entry found? if(index >= CPSW_ALE_MAX_ENTRIES) { //Find a free entry in the ALE table index = am335xEthFindFreeEntry(); } //Sanity check if(index < CPSW_ALE_MAX_ENTRIES) { //Set up a VLAN table entry entry.word2 = 0; entry.word1 = CPSW_ALE_WORD1_ENTRY_TYPE_VLAN; entry.word0 = 0; //Set VLAN identifier entry.word1 |= CPSW_ALE_WORD1_VLAN_ID(vlanId); //Force the packet VLAN tag to be removed on egress entry.word0 |= CPSW_ALE_WORD0_FORCE_UNTAG_EGRESS(1 << port) | CPSW_ALE_WORD0_FORCE_UNTAG_EGRESS(1 << CPSW_PORT0); //Set VLAN member list entry.word0 |= CPSW_ALE_WORD0_VLAN_MEMBER_LIST(1 << port) | CPSW_ALE_WORD0_VLAN_MEMBER_LIST(1 << CPSW_PORT0); //Add a new entry to the ALE table am335xEthWriteEntry(index, &entry); //Successful processing error = NO_ERROR; } else { //The ALE table is full error = ERROR_FAILURE; } //Return status code return error; }
{'added': [(9, ' * Copyright (C) 2010-2021 Oryx Embedded SARL. All rights reserved.'), (28, ' * @version 2.0.2'), (89, '//GCC compiler?'), (1807, ' //Successful processing'), (1878, ' //Successful processing'), (1920, ' //Successful processing')], 'deleted': [(9, ' * Copyright (C) 2010-2020 Oryx Embedded SARL. All rights reserved.'), (28, ' * @version 2.0.0'), (89, '//Keil MDK-ARM or GCC compiler?'), (1807, ' //Sucessful processing'), (1878, ' //Sucessful processing'), (1920, ' //Sucessful processing')]}
6
6
944
5,288
https://github.com/Oryx-Embedded/CycloneTCP
CVE-2021-26788
['CWE-20']
am335x_eth_driver.c
am335xEthDeleteVlanAddrEntry
/** * @file am335x_eth_driver.c * @brief Sitara AM335x Gigabit Ethernet MAC driver * * @section License * * SPDX-License-Identifier: GPL-2.0-or-later * * Copyright (C) 2010-2020 Oryx Embedded SARL. All rights reserved. * * This file is part of CycloneTCP Open. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * @author Oryx Embedded SARL (www.oryx-embedded.com) * @version 2.0.0 **/ //Switch to the appropriate trace level #define TRACE_LEVEL NIC_TRACE_LEVEL //Dependencies #include "soc_am335x.h" #include "hw_types.h" #include "hw_cm_per.h" #include "hw_control_am335x.h" #include "hw_cpsw_ale.h" #include "hw_cpsw_cpdma.h" #include "hw_cpsw_port.h" #include "hw_cpsw_sl.h" #include "hw_cpsw_ss.h" #include "hw_cpsw_wr.h" #include "hw_mdio.h" #include "interrupt.h" #include "core/net.h" #include "drivers/mac/am335x_eth_driver.h" #include "debug.h" //MDIO input clock frequency #define MDIO_INPUT_CLK 125000000 //MDIO output clock frequency #define MDIO_OUTPUT_CLK 1000000 //Underlying network interface (port 1) static NetInterface *nicDriverInterface1 = NULL; //Underlying network interface (port 2) static NetInterface *nicDriverInterface2 = NULL; //IAR EWARM compiler? #if defined(__ICCARM__) //Transmit buffer (port 1) #pragma data_alignment = 4 #pragma location = AM335X_ETH_RAM_SECTION static uint8_t txBuffer1[AM335X_ETH_TX_BUFFER_COUNT][AM335X_ETH_TX_BUFFER_SIZE]; //Transmit buffer (port 2) #pragma data_alignment = 4 #pragma location = AM335X_ETH_RAM_SECTION static uint8_t txBuffer2[AM335X_ETH_TX_BUFFER_COUNT][AM335X_ETH_TX_BUFFER_SIZE]; //Receive buffer #pragma data_alignment = 4 #pragma location = AM335X_ETH_RAM_SECTION static uint8_t rxBuffer[AM335X_ETH_RX_BUFFER_COUNT][AM335X_ETH_RX_BUFFER_SIZE]; //Transmit buffer descriptors (port 1) #pragma data_alignment = 4 #pragma location = AM335X_ETH_RAM_CPPI_SECTION static Am335xTxBufferDesc txBufferDesc1[AM335X_ETH_TX_BUFFER_COUNT]; //Transmit buffer descriptors (port 2) #pragma data_alignment = 4 #pragma location = AM335X_ETH_RAM_CPPI_SECTION static Am335xTxBufferDesc txBufferDesc2[AM335X_ETH_TX_BUFFER_COUNT]; //Receive buffer descriptors #pragma data_alignment = 4 #pragma location = AM335X_ETH_RAM_CPPI_SECTION static Am335xRxBufferDesc rxBufferDesc[AM335X_ETH_RX_BUFFER_COUNT]; //Keil MDK-ARM or GCC compiler? #else //Transmit buffer (port 1) static uint8_t txBuffer1[AM335X_ETH_TX_BUFFER_COUNT][AM335X_ETH_TX_BUFFER_SIZE] __attribute__((aligned(4), __section__(AM335X_ETH_RAM_SECTION))); //Transmit buffer (port 2) static uint8_t txBuffer2[AM335X_ETH_TX_BUFFER_COUNT][AM335X_ETH_TX_BUFFER_SIZE] __attribute__((aligned(4), __section__(AM335X_ETH_RAM_SECTION))); //Receive buffer static uint8_t rxBuffer[AM335X_ETH_RX_BUFFER_COUNT][AM335X_ETH_RX_BUFFER_SIZE] __attribute__((aligned(4), __section__(AM335X_ETH_RAM_SECTION))); //Transmit buffer descriptors (port 1) static Am335xTxBufferDesc txBufferDesc1[AM335X_ETH_TX_BUFFER_COUNT] __attribute__((aligned(4), __section__(AM335X_ETH_RAM_CPPI_SECTION))); //Transmit buffer descriptors (port 2) static Am335xTxBufferDesc txBufferDesc2[AM335X_ETH_TX_BUFFER_COUNT] __attribute__((aligned(4), __section__(AM335X_ETH_RAM_CPPI_SECTION))); //Receive buffer descriptors static Am335xRxBufferDesc rxBufferDesc[AM335X_ETH_RX_BUFFER_COUNT] __attribute__((aligned(4), __section__(AM335X_ETH_RAM_CPPI_SECTION))); #endif //Pointer to the current TX buffer descriptor (port1) static Am335xTxBufferDesc *txCurBufferDesc1; //Pointer to the current TX buffer descriptor (port 2) static Am335xTxBufferDesc *txCurBufferDesc2; //Pointer to the current RX buffer descriptor static Am335xRxBufferDesc *rxCurBufferDesc; /** * @brief AM335x Ethernet MAC driver (port1) **/ const NicDriver am335xEthPort1Driver = { NIC_TYPE_ETHERNET, ETH_MTU, am335xEthInitPort1, am335xEthTick, am335xEthEnableIrq, am335xEthDisableIrq, am335xEthEventHandler, am335xEthSendPacketPort1, am335xEthUpdateMacAddrFilter, am335xEthUpdateMacConfig, am335xEthWritePhyReg, am335xEthReadPhyReg, FALSE, TRUE, TRUE, FALSE }; /** * @brief AM335x Ethernet MAC driver (port2) **/ const NicDriver am335xEthPort2Driver = { NIC_TYPE_ETHERNET, ETH_MTU, am335xEthInitPort2, am335xEthTick, am335xEthEnableIrq, am335xEthDisableIrq, am335xEthEventHandler, am335xEthSendPacketPort2, am335xEthUpdateMacAddrFilter, am335xEthUpdateMacConfig, am335xEthWritePhyReg, am335xEthReadPhyReg, FALSE, TRUE, TRUE, FALSE }; /** * @brief AM335x Ethernet MAC initialization (port 1) * @param[in] interface Underlying network interface * @return Error code **/ error_t am335xEthInitPort1(NetInterface *interface) { error_t error; uint32_t temp; //Debug message TRACE_INFO("Initializing AM335x Ethernet MAC (port 1)...\r\n"); //Initialize CPSW instance am335xEthInitInstance(interface); //Save underlying network interface nicDriverInterface1 = interface; //Valid Ethernet PHY or switch driver? if(interface->phyDriver != NULL) { //Ethernet PHY initialization error = interface->phyDriver->init(interface); } else if(interface->switchDriver != NULL) { //Ethernet switch initialization error = interface->switchDriver->init(interface); } else { //The interface is not properly configured error = ERROR_FAILURE; } //Any error to report? if(error) { return error; } //Unspecifield MAC address? if(macCompAddr(&interface->macAddr, &MAC_UNSPECIFIED_ADDR)) { //Use the factory preprogrammed MAC address interface->macAddr.b[0] = CONTROL_MAC_ID_HI_R(0) >> CONTROL_MAC_ID0_HI_MACADDR_47_40_SHIFT; interface->macAddr.b[1] = CONTROL_MAC_ID_HI_R(0) >> CONTROL_MAC_ID0_HI_MACADDR_39_32_SHIFT; interface->macAddr.b[2] = CONTROL_MAC_ID_HI_R(0) >> CONTROL_MAC_ID0_HI_MACADDR_31_24_SHIFT; interface->macAddr.b[3] = CONTROL_MAC_ID_HI_R(0) >> CONTROL_MAC_ID0_HI_MACADDR_23_16_SHIFT; interface->macAddr.b[4] = CONTROL_MAC_ID_LO_R(0) >> CONTROL_MAC_ID0_LO_MACADDR_15_8_SHIFT; interface->macAddr.b[5] = CONTROL_MAC_ID_LO_R(0) >> CONTROL_MAC_ID0_LO_MACADDR_7_0_SHIFT; //Generate the 64-bit interface identifier macAddrToEui64(&interface->macAddr, &interface->eui64); } //Set port state to forward temp = CPSW_ALE_PORTCTL_R(1) & ~CPSW_ALE_PORTCTL1_PORT_STATE; CPSW_ALE_PORTCTL_R(1) = temp | CPSW_ALE_PORTCTL_PORT_STATE_FORWARD; //Set the MAC address of the station CPSW_PORT1_SA_HI_R = interface->macAddr.w[0] | (interface->macAddr.w[1] << 16); CPSW_PORT1_SA_LO_R = interface->macAddr.w[2]; //Configure VLAN identifier and VLAN priority CPSW_PORT1_PORT_VLAN_R = (0 << CPSW_PORT_P1_PORT_VLAN_PORT_PRI_SHIFT) | (CPSW_PORT1 << CPSW_PORT_P1_PORT_VLAN_PORT_VID_SHIFT); //Add a VLAN entry in the ALE table am335xEthAddVlanEntry(CPSW_PORT1, CPSW_PORT1); //Add a VLAN/unicast address entry in the ALE table am335xEthAddVlanAddrEntry(CPSW_PORT1, CPSW_PORT1, &interface->macAddr); //Enable CPSW statistics CPSW_SS_STAT_PORT_EN_R |= CPSW_SS_STAT_PORT_EN_P1_STAT_EN; //Enable TX and RX CPSW_SL1_MACCONTROL_R = CPSW_SL_MACCONTROL_GMII_EN; //Accept any packets from the upper layer osSetEvent(&interface->nicTxEvent); //Successful initialization return NO_ERROR; } /** * @brief AM335x Ethernet MAC initialization (port 2) * @param[in] interface Underlying network interface * @return Error code **/ error_t am335xEthInitPort2(NetInterface *interface) { error_t error; uint32_t temp; //Debug message TRACE_INFO("Initializing AM335x Ethernet MAC (port 2)...\r\n"); //Initialize CPSW instance am335xEthInitInstance(interface); //Save underlying network interface nicDriverInterface2 = interface; //PHY transceiver initialization error = interface->phyDriver->init(interface); //Failed to initialize PHY transceiver? if(error) { return error; } //Unspecifield MAC address? if(macCompAddr(&interface->macAddr, &MAC_UNSPECIFIED_ADDR)) { //Use the factory preprogrammed MAC address interface->macAddr.b[0] = CONTROL_MAC_ID_HI_R(1) >> CONTROL_MAC_ID1_HI_MACADDR_47_40_SHIFT; interface->macAddr.b[1] = CONTROL_MAC_ID_HI_R(1) >> CONTROL_MAC_ID1_HI_MACADDR_39_32_SHIFT; interface->macAddr.b[2] = CONTROL_MAC_ID_HI_R(1) >> CONTROL_MAC_ID1_HI_MACADDR_31_24_SHIFT; interface->macAddr.b[3] = CONTROL_MAC_ID_HI_R(1) >> CONTROL_MAC_ID1_HI_MACADDR_23_16_SHIFT; interface->macAddr.b[4] = CONTROL_MAC_ID_LO_R(1) >> CONTROL_MAC_ID1_LO_MACADDR_15_8_SHIFT; interface->macAddr.b[5] = CONTROL_MAC_ID_LO_R(1) >> CONTROL_MAC_ID1_LO_MACADDR_7_0_SHIFT; //Generate the 64-bit interface identifier macAddrToEui64(&interface->macAddr, &interface->eui64); } //Set port state to forward temp = CPSW_ALE_PORTCTL_R(2) & ~CPSW_ALE_PORTCTL2_PORT_STATE; CPSW_ALE_PORTCTL_R(2) = temp | CPSW_ALE_PORTCTL_PORT_STATE_FORWARD; //Set the MAC address of the station CPSW_PORT2_SA_HI_R = interface->macAddr.w[0] | (interface->macAddr.w[1] << 16); CPSW_PORT2_SA_LO_R = interface->macAddr.w[2]; //Configure VLAN identifier and VLAN priority CPSW_PORT2_PORT_VLAN_R = (0 << CPSW_PORT_P2_PORT_VLAN_PORT_PRI_SHIFT) | (CPSW_PORT2 << CPSW_PORT_P2_PORT_VLAN_PORT_VID_SHIFT); //Add a VLAN entry in the ALE table am335xEthAddVlanEntry(CPSW_PORT2, CPSW_PORT2); //Add a VLAN/unicast address entry in the ALE table am335xEthAddVlanAddrEntry(CPSW_PORT2, CPSW_PORT2, &interface->macAddr); //Enable CPSW statistics CPSW_SS_STAT_PORT_EN_R |= CPSW_SS_STAT_PORT_EN_P2_STAT_EN; //Enable TX and RX CPSW_SL2_MACCONTROL_R = CPSW_SL_MACCONTROL_GMII_EN; //Accept any packets from the upper layer osSetEvent(&interface->nicTxEvent); //Successful initialization return NO_ERROR; } /** * @brief Initialize CPSW instance * @param[in] interface Underlying network interface **/ void am335xEthInitInstance(NetInterface *interface) { uint_t i; uint32_t temp; #ifdef ti_sysbios_BIOS___VERS Hwi_Params hwiParams; #endif //Initialization sequence is performed once if(nicDriverInterface1 == NULL && nicDriverInterface2 == NULL) { //Select the interface mode (MII/RMII/RGMII) and configure pin muxing am335xEthInitGpio(interface); //Enable the CPSW subsystem clocks CM_PER_CPGMAC0_CLKCTRL_R = CM_PER_CPGMAC0_CLKCTRL_MODULEMODE_ENABLE; //Wait for the CPSW module to be fully functional do { //Get module idle status temp = (CM_PER_CPGMAC0_CLKCTRL_R & CM_PER_CPGMAC0_CLKCTRL_IDLEST) >> CM_PER_CPGMAC0_CLKCTRL_IDLEST_SHIFT; //Keep looping as long as the module is not fully functional } while(temp != CM_PER_CPGMAC0_CLKCTRL_IDLEST_FUNC); //Start a software forced wake-up transition CM_PER_CPSW_CLKSTCTRL_R = CM_PER_CPSW_CLKSTCTRL_CLKTRCTRL_SW_WKUP; //Wait for the CPSW 125 MHz OCP clock to be active do { //Get the state of the CPSW 125 MHz OCP clock temp = (CM_PER_CPSW_CLKSTCTRL_R & CM_PER_CPSW_CLKSTCTRL_CLKACTIVITY_CPSW_125MHZ_GCLK) >> CM_PER_CPSW_CLKSTCTRL_CLKACTIVITY_CPSW_125MHZ_GCLK_SHIFT; //Keep looping as long as the clock is inactive } while(temp != CM_PER_CPSW_CLKSTCTRL_CLKACTIVITY_CPSW_125MHZ_GCLK_ACT); //Reset CPSW subsystem CPSW_SS_SOFT_RESET_R = CPSW_SS_SOFT_RESET_SOFT_RESET; //Wait for the reset to complete while((CPSW_SS_SOFT_RESET_R & CPSW_SS_SOFT_RESET_SOFT_RESET) != 0) { } //Reset CPSW wrapper module CPSW_WR_SOFT_RESET_R = CPSW_WR_SOFT_RESET_SOFT_RESET; //Wait for the reset to complete while((CPSW_WR_SOFT_RESET_R & CPSW_WR_SOFT_RESET_SOFT_RESET) != 0) { } //Reset CPSW sliver 1 logic CPSW_SL1_SOFT_RESET_R = CPSW_SL_SOFT_RESET_SOFT_RESET; //Wait for the reset to complete while((CPSW_SL1_SOFT_RESET_R & CPSW_SL_SOFT_RESET_SOFT_RESET) != 0) { } //Reset CPSW sliver 2 logic CPSW_SL2_SOFT_RESET_R = CPSW_SL_SOFT_RESET_SOFT_RESET; //Wait for the reset to complete while((CPSW_SL2_SOFT_RESET_R & CPSW_SL_SOFT_RESET_SOFT_RESET) != 0) { } //Reset CPSW CPDMA module CPSW_CPDMA_CPDMA_SOFT_RESET_R = CPSW_CPDMA_CPDMA_SOFT_RESET_SOFT_RESET; //Wait for the reset to complete while((CPSW_CPDMA_CPDMA_SOFT_RESET_R & CPSW_CPDMA_CPDMA_SOFT_RESET_SOFT_RESET) != 0) { } //Initialize the HDPs and the CPs to NULL for(i = CPSW_CH0; i <= CPSW_CH7; i++) { //TX head descriptor pointer CPSW_CPDMA_TX_HDP_R(i) = 0; //TX completion pointer CPSW_CPDMA_TX_CP_R(i) = 0; //RX head descriptor pointer CPSW_CPDMA_RX_HDP_R(i) = 0; //RX completion pointer CPSW_CPDMA_RX_CP_R(i) = 0; } //Enable ALE and clear ALE address table CPSW_ALE_CONTROL_R = CPSW_ALE_CONTROL_ENABLE_ALE | CPSW_ALE_CONTROL_CLEAR_TABLE; //For dual MAC mode, configure VLAN aware mode CPSW_ALE_CONTROL_R |= CPSW_ALE_CONTROL_ALE_VLAN_AWARE; //Set dual MAC mode for port 0 temp = CPSW_PORT0_TX_IN_CTL_R & ~CPSW_PORT_P0_TX_IN_CTL_TX_IN_SEL; CPSW_PORT0_TX_IN_CTL_R = temp | CPSW_PORT_P0_TX_IN_CTL_TX_IN_DUAL_MAC; //Set port 0 state to forward temp = CPSW_ALE_PORTCTL_R(0) & ~CPSW_ALE_PORTCTL0_PORT_STATE; CPSW_ALE_PORTCTL_R(0) = temp | CPSW_ALE_PORTCTL_PORT_STATE_FORWARD; //Enable CPSW statistics CPSW_SS_STAT_PORT_EN_R = CPSW_SS_STAT_PORT_EN_P0_STAT_EN; //Configure TX and RX buffer descriptors am335xEthInitBufferDesc(interface); //Acknowledge TX and interrupts for proper interrupt pulsing CPSW_CPDMA_CPDMA_EOI_VECTOR_R = CPSW_CPDMA_EOI_VECTOR_TX_PULSE; CPSW_CPDMA_CPDMA_EOI_VECTOR_R = CPSW_CPDMA_EOI_VECTOR_RX_PULSE; //Enable channel 1 and 2 interrupts of the DMA engine CPSW_CPDMA_TX_INTMASK_SET_R = (1 << CPSW_CH1) | (1 << CPSW_CH2); //Enable TX completion interrupts CPSW_WR_C_TX_EN_R(CPSW_CORE0) |= (1 << CPSW_CH1) | (1 << CPSW_CH2); //Enable channel 0 interrupts of the DMA engine CPSW_CPDMA_RX_INTMASK_SET_R = (1 << CPSW_CH0); //Enable RX completion interrupts CPSW_WR_C_RX_EN_R(CPSW_CORE0) |= (1 << CPSW_CH0); #ifdef ti_sysbios_BIOS___VERS //Configure TX interrupt Hwi_Params_init(&hwiParams); hwiParams.enableInt = FALSE; hwiParams.priority = AM335X_ETH_IRQ_PRIORITY; //Register TX interrupt handler Hwi_create(SYS_INT_3PGSWTXINT0, (Hwi_FuncPtr) am335xEthTxIrqHandler, &hwiParams, NULL); //Configure RX interrupt Hwi_Params_init(&hwiParams); hwiParams.enableInt = FALSE; hwiParams.priority = AM335X_ETH_IRQ_PRIORITY; //Register RX interrupt handler Hwi_create(SYS_INT_3PGSWRXINT0, (Hwi_FuncPtr) am335xEthRxIrqHandler, &hwiParams, NULL); #else //Register interrupt handlers IntRegister(SYS_INT_3PGSWTXINT0, am335xEthTxIrqHandler); IntRegister(SYS_INT_3PGSWRXINT0, am335xEthRxIrqHandler); //Configure TX interrupt priority IntPrioritySet(SYS_INT_3PGSWTXINT0, AM335X_ETH_IRQ_PRIORITY, AINTC_HOSTINT_ROUTE_IRQ); //Configure RX interrupt priority IntPrioritySet(SYS_INT_3PGSWRXINT0, AM335X_ETH_IRQ_PRIORITY, AINTC_HOSTINT_ROUTE_IRQ); #endif //Enable the transmission and reception CPSW_CPDMA_TX_CONTROL_R = CPSW_CPDMA_TX_CONTROL_TX_EN; CPSW_CPDMA_RX_CONTROL_R = CPSW_CPDMA_RX_CONTROL_RX_EN; //Calculate the MDC clock divider to be used temp = (MDIO_INPUT_CLK / MDIO_OUTPUT_CLK) - 1; //Initialize MDIO interface MDIO_CONTROL_R = MDIO_CONTROL_ENABLE | MDIO_CONTROL_FAULTENB | (temp & MDIO_CONTROL_CLKDIV); } } //BeagleBone Black, TMDSSK3358, OSD3358-SM-RED or SBC DIVA board? #if defined(USE_BEAGLEBONE_BLACK) || defined(USE_TMDSSK3358) || \ defined(USE_OSD3358_SM_RED) || defined(USE_SBC_DIVA) /** * @brief GPIO configuration * @param[in] interface Underlying network interface **/ void am335xEthInitGpio(NetInterface *interface) { //BeagleBone Black board? #if defined(USE_BEAGLEBONE_BLACK) //Select MII interface mode for port 1 CONTROL_GMII_SEL_R = CONTROL_GMII_SEL_GMII1_SEL_MII; //Configure MII1_TX_CLK (GPIO3_9) CONTROL_CONF_MII1_TXCLK_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(0); //Configure MII1_TX_EN (GPIO3_3) CONTROL_CONF_MII1_TXEN_R = CONTROL_CONF_MUXMODE(0); //Configure MII1_TXD0 (GPIO0_28) CONTROL_CONF_MII1_TXD0_R = CONTROL_CONF_MUXMODE(0); //Configure MII1_TXD1 (GPIO0_21) CONTROL_CONF_MII1_TXD1_R = CONTROL_CONF_MUXMODE(0); //Configure MII1_TXD2 (GPIO0_17) CONTROL_CONF_MII1_TXD2_R = CONTROL_CONF_MUXMODE(0); //Configure MII1_TXD3 (GPIO0_16) CONTROL_CONF_MII1_TXD3_R = CONTROL_CONF_MUXMODE(0); //Configure MII1_RX_CLK (GPIO3_10) CONTROL_CONF_MII1_RXCLK_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(0); //Configure MII1_RXD0 (GPIO2_21) CONTROL_CONF_MII1_RXD0_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(0); //Configure MII1_RXD1 (GPIO2_20) CONTROL_CONF_MII1_RXD1_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(0); //Configure MII1_RXD2 (GPIO2_19) CONTROL_CONF_MII1_RXD2_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(0); //Configure MII1_RXD3 (GPIO2_18) CONTROL_CONF_MII1_RXD3_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(0); //Configure MII1_COL (GPIO3_0) CONTROL_CONF_MII1_COL_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(0); //Configure MII1_CRS (GPIO3_1) CONTROL_CONF_MII1_CRS_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(0); //Configure MII1_RX_ER (GPIO3_2) CONTROL_CONF_MII1_RXERR_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(0); //Configure MII1_RX_DV (GPIO3_4) CONTROL_CONF_MII1_RXDV_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(0); //Configure MDIO (GPIO0_0) CONTROL_CONF_MDIO_DATA_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_PULLUPSEL | CONTROL_CONF_MUXMODE(0); //Configure MDC (GPIO0_1) CONTROL_CONF_MDIO_CLK_R = CONTROL_CONF_PULLUPSEL | CONTROL_CONF_MUXMODE(0); //TMDSSK3358 board? #elif defined(USE_TMDSSK3358) //Select RGMII interface mode for both port 1 and port 2 CONTROL_GMII_SEL_R = CONTROL_GMII_SEL_GMII1_SEL_RGMII | CONTROL_GMII_SEL_GMII2_SEL_RGMII; //Configure RGMII1_TCLK (GPIO3_9) CONTROL_CONF_MII1_TXCLK_R = CONTROL_CONF_MUXMODE(2); //Configure RGMII1_TCTL (GPIO3_3) CONTROL_CONF_MII1_TXEN_R = CONTROL_CONF_MUXMODE(2); //Configure RGMII1_TD0 (GPIO0_28) CONTROL_CONF_MII1_TXD0_R = CONTROL_CONF_MUXMODE(2); //Configure RGMII1_TD1 (GPIO0_21) CONTROL_CONF_MII1_TXD1_R = CONTROL_CONF_MUXMODE(2); //Configure RGMII1_TD2 (GPIO0_17) CONTROL_CONF_MII1_TXD2_R = CONTROL_CONF_MUXMODE(2); //Configure RGMII1_TD3 (GPIO0_16) CONTROL_CONF_MII1_TXD3_R = CONTROL_CONF_MUXMODE(2); //Configure RGMII1_RCLK (GPIO3_10) CONTROL_CONF_MII1_RXCLK_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII1_RCTL (GPIO3_4) CONTROL_CONF_MII1_RXDV_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII1_RD0 (GPIO2_21) CONTROL_CONF_MII1_RXD0_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure /RGMII1_RD1 (GPIO2_20) CONTROL_CONF_MII1_RXD1_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII1_RD2 (GPIO2_19) CONTROL_CONF_MII1_RXD2_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII1_RD3 (GPIO2_18) CONTROL_CONF_MII1_RXD3_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII2_TCLK (GPIO1_22/GPMC_A6) CONTROL_CONF_GPMC_A_R(6) = CONTROL_CONF_MUXMODE(2); //Configure RGMII2_TCTL (GPIO1_16/GPMC_A0) CONTROL_CONF_GPMC_A_R(0) = CONTROL_CONF_MUXMODE(2); //Configure RGMII2_TD0 (GPIO1_21/GPMC_A5) CONTROL_CONF_GPMC_A_R(5) = CONTROL_CONF_MUXMODE(2); //Configure RGMII2_TD1 (GPIO1_20/GPMC_A4) CONTROL_CONF_GPMC_A_R(4) = CONTROL_CONF_MUXMODE(2); //Configure RGMII2_TD2 (GPIO1_19/GPMC_A3) CONTROL_CONF_GPMC_A_R(3) = CONTROL_CONF_MUXMODE(2); //Configure RGMII2_TD3 (GPIO1_18/GPMC_A2) CONTROL_CONF_GPMC_A_R(2) = CONTROL_CONF_MUXMODE(2); //Configure RGMII2_RCLK (GPIO1_23/GPMC_A7) CONTROL_CONF_GPMC_A_R(7) = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII2_RCTL (GPIO1_17/GPMC_A1) CONTROL_CONF_GPMC_A_R(1) = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII2_RD0 (GPIO1_27/GPMC_A11) CONTROL_CONF_GPMC_A_R(11) = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII2_RD1 (GPIO1_26/GPMC_A10) CONTROL_CONF_GPMC_A_R(10) = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII2_RD2 (GPIO1_25/GPMC_A9) CONTROL_CONF_GPMC_A_R(9) = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII2_RD3 (GPIO1_24/GPMC_A8) CONTROL_CONF_GPMC_A_R(8) = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure MDIO (GPIO0_0) CONTROL_CONF_MDIO_DATA_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_PULLUPSEL | CONTROL_CONF_MUXMODE(0); //Configure MDC (GPIO0_1) CONTROL_CONF_MDIO_CLK_R = CONTROL_CONF_PULLUPSEL | CONTROL_CONF_MUXMODE(0); //OSD3358-SM-RED board? #elif defined(USE_OSD3358_SM_RED) //Select RGMII interface mode for both port 1 CONTROL_GMII_SEL_R = CONTROL_GMII_SEL_GMII1_SEL_RGMII; //Configure RGMII1_TCLK (GPIO3_9) CONTROL_CONF_MII1_TXCLK_R = CONTROL_CONF_MUXMODE(2); //Configure RGMII1_TCTL (GPIO3_3) CONTROL_CONF_MII1_TXEN_R = CONTROL_CONF_MUXMODE(2); //Configure RGMII1_TD0 (GPIO0_28) CONTROL_CONF_MII1_TXD0_R = CONTROL_CONF_MUXMODE(2); //Configure RGMII1_TD1 (GPIO0_21) CONTROL_CONF_MII1_TXD1_R = CONTROL_CONF_MUXMODE(2); //Configure RGMII1_TD2 (GPIO0_17) CONTROL_CONF_MII1_TXD2_R = CONTROL_CONF_MUXMODE(2); //Configure RGMII1_TD3 (GPIO0_16) CONTROL_CONF_MII1_TXD3_R = CONTROL_CONF_MUXMODE(2); //Configure RGMII1_RCLK (GPIO3_10) CONTROL_CONF_MII1_RXCLK_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII1_RCTL (GPIO3_4) CONTROL_CONF_MII1_RXDV_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII1_RD0 (GPIO2_21) CONTROL_CONF_MII1_RXD0_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure /RGMII1_RD1 (GPIO2_20) CONTROL_CONF_MII1_RXD1_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII1_RD2 (GPIO2_19) CONTROL_CONF_MII1_RXD2_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII1_RD3 (GPIO2_18) CONTROL_CONF_MII1_RXD3_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure MDIO (GPIO0_0) CONTROL_CONF_MDIO_DATA_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_PULLUPSEL | CONTROL_CONF_MUXMODE(0); //Configure MDC (GPIO0_1) CONTROL_CONF_MDIO_CLK_R = CONTROL_CONF_PULLUPSEL | CONTROL_CONF_MUXMODE(0); //SBC DIVA board? #elif defined(USE_SBC_DIVA) //Select RMII interface mode for port 1 and RGMII interface mode for port 2 CONTROL_GMII_SEL_R = CONTROL_GMII_SEL_RMII1_IO_CLK_EN | CONTROL_GMII_SEL_GMII1_SEL_RMII | CONTROL_GMII_SEL_GMII2_SEL_RGMII; //Configure RMII1_REF_CLK (GPIO0_29) CONTROL_CONF_RMII1_REFCLK_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(0); //Configure RMII1_TX_EN (GPIO3_3) CONTROL_CONF_MII1_TXEN_R = CONTROL_CONF_MUXMODE(1); //Configure RMII1_TXD0 (GPIO0_28) CONTROL_CONF_MII1_TXD0_R = CONTROL_CONF_MUXMODE(1); //Configure RMII1_TXD1 (GPIO0_21) CONTROL_CONF_MII1_TXD1_R = CONTROL_CONF_MUXMODE(1); //Configure RMII1_RXD0 (GPIO2.21) CONTROL_CONF_MII1_RXD0_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(1); //Configure RMII1_RXD1 (GPIO2.20) CONTROL_CONF_MII1_RXD1_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(1); //Configure RMII1_CRS_DV (GPIO3_1) CONTROL_CONF_MII1_CRS_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(1); //Configure RMII1_RX_ER (GPIO3_2) CONTROL_CONF_MII1_RXERR_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(1); //Configure RGMII2_TCLK (GPIO1_22/GPMC_A6) CONTROL_CONF_GPMC_A_R(6) = CONTROL_CONF_MUXMODE(2); //Configure RGMII2_TCTL (GPIO1_16/GPMC_A0) CONTROL_CONF_GPMC_A_R(0) = CONTROL_CONF_MUXMODE(2); //Configure RGMII2_TD0 (GPIO1_21/GPMC_A5) CONTROL_CONF_GPMC_A_R(5) = CONTROL_CONF_MUXMODE(2); //Configure RGMII2_TD1 (GPIO1_20/GPMC_A4) CONTROL_CONF_GPMC_A_R(4) = CONTROL_CONF_MUXMODE(2); //Configure RGMII2_TD2 (GPIO1_19/GPMC_A3) CONTROL_CONF_GPMC_A_R(3) = CONTROL_CONF_MUXMODE(2); //Configure RGMII2_TD3 (GPIO1_18/GPMC_A2) CONTROL_CONF_GPMC_A_R(2) = CONTROL_CONF_MUXMODE(2); //Configure RGMII2_RCLK (GPIO1_23/GPMC_A7) CONTROL_CONF_GPMC_A_R(7) = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII2_RCTL (GPIO1_17/GPMC_A1) CONTROL_CONF_GPMC_A_R(1) = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII2_RD0 (GPIO1_27/GPMC_A11) CONTROL_CONF_GPMC_A_R(11) = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII2_RD1 (GPIO1_26/GPMC_A10) CONTROL_CONF_GPMC_A_R(10) = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII2_RD2 (GPIO1_25/GPMC_A9) CONTROL_CONF_GPMC_A_R(9) = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII2_RD3 (GPIO1_24/GPMC_A8) CONTROL_CONF_GPMC_A_R(8) = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure MDIO (GPIO0_0) CONTROL_CONF_MDIO_DATA_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_PULLUPSEL | CONTROL_CONF_MUXMODE(0); //Configure MDC (GPIO0_1) CONTROL_CONF_MDIO_CLK_R = CONTROL_CONF_PULLUPSEL | CONTROL_CONF_MUXMODE(0); #endif } #endif /** * @brief Initialize buffer descriptor lists * @param[in] interface Underlying network interface **/ void am335xEthInitBufferDesc(NetInterface *interface) { uint_t i; uint_t nextIndex; uint_t prevIndex; //Initialize TX buffer descriptor list (port 1) for(i = 0; i < AM335X_ETH_TX_BUFFER_COUNT; i++) { //Index of the next buffer nextIndex = (i + 1) % AM335X_ETH_TX_BUFFER_COUNT; //Index of the previous buffer prevIndex = (i + AM335X_ETH_TX_BUFFER_COUNT - 1) % AM335X_ETH_TX_BUFFER_COUNT; //Next descriptor pointer txBufferDesc1[i].word0 = (uint32_t) NULL; //Buffer pointer txBufferDesc1[i].word1 = (uint32_t) txBuffer1[i]; //Buffer offset and buffer length txBufferDesc1[i].word2 = 0; //Status flags and packet length txBufferDesc1[i].word3 = 0; //Form a doubly linked list txBufferDesc1[i].next = &txBufferDesc1[nextIndex]; txBufferDesc1[i].prev = &txBufferDesc1[prevIndex]; } //Point to the very first descriptor txCurBufferDesc1 = &txBufferDesc1[0]; //Mark the end of the queue txCurBufferDesc1->prev->word3 = CPSW_TX_WORD3_SOP | CPSW_TX_WORD3_EOP | CPSW_TX_WORD3_EOQ; //Initialize TX buffer descriptor list (port 2) for(i = 0; i < AM335X_ETH_TX_BUFFER_COUNT; i++) { //Index of the next buffer nextIndex = (i + 1) % AM335X_ETH_TX_BUFFER_COUNT; //Index of the previous buffer prevIndex = (i + AM335X_ETH_TX_BUFFER_COUNT - 1) % AM335X_ETH_TX_BUFFER_COUNT; //Next descriptor pointer txBufferDesc2[i].word0 = (uint32_t) NULL; //Buffer pointer txBufferDesc2[i].word1 = (uint32_t) txBuffer2[i]; //Buffer offset and buffer length txBufferDesc2[i].word2 = 0; //Status flags and packet length txBufferDesc2[i].word3 = 0; //Form a doubly linked list txBufferDesc2[i].next = &txBufferDesc2[nextIndex]; txBufferDesc2[i].prev = &txBufferDesc2[prevIndex]; } //Point to the very first descriptor txCurBufferDesc2 = &txBufferDesc2[0]; //Mark the end of the queue txCurBufferDesc2->prev->word3 = CPSW_TX_WORD3_SOP | CPSW_TX_WORD3_EOP | CPSW_TX_WORD3_EOQ; //Initialize RX buffer descriptor list for(i = 0; i < AM335X_ETH_RX_BUFFER_COUNT; i++) { //Index of the next buffer nextIndex = (i + 1) % AM335X_ETH_RX_BUFFER_COUNT; //Index of the previous buffer prevIndex = (i + AM335X_ETH_RX_BUFFER_COUNT - 1) % AM335X_ETH_RX_BUFFER_COUNT; //Next descriptor pointer rxBufferDesc[i].word0 = (uint32_t) &rxBufferDesc[nextIndex]; //Buffer pointer rxBufferDesc[i].word1 = (uint32_t) rxBuffer[i]; //Buffer offset and buffer length rxBufferDesc[i].word2 = AM335X_ETH_RX_BUFFER_SIZE; //Status flags and packet length rxBufferDesc[i].word3 = CPSW_RX_WORD3_OWNER; //Form a doubly linked list rxBufferDesc[i].next = &rxBufferDesc[nextIndex]; rxBufferDesc[i].prev = &rxBufferDesc[prevIndex]; } //Point to the very first descriptor rxCurBufferDesc = &rxBufferDesc[0]; //Mark the end of the queue rxCurBufferDesc->prev->word0 = (uint32_t) NULL; //Write the RX DMA head descriptor pointer CPSW_CPDMA_RX_HDP_R(CPSW_CH0) = (uint32_t) rxCurBufferDesc; } /** * @brief AM335x Ethernet MAC timer handler * * This routine is periodically called by the TCP/IP stack to handle periodic * operations such as polling the link state * * @param[in] interface Underlying network interface **/ void am335xEthTick(NetInterface *interface) { //Valid Ethernet PHY or switch driver? if(interface->phyDriver != NULL) { //Handle periodic operations interface->phyDriver->tick(interface); } else if(interface->switchDriver != NULL) { //Handle periodic operations interface->switchDriver->tick(interface); } else { //Just for sanity } //Misqueued buffer condition? if((rxCurBufferDesc->word3 & CPSW_RX_WORD3_OWNER) != 0) { if(CPSW_CPDMA_RX_HDP_R(CPSW_CH0) == 0) { //The host acts on the misqueued buffer condition by writing the added //buffer descriptor address to the appropriate RX DMA head descriptor //pointer CPSW_CPDMA_RX_HDP_R(CPSW_CH0) = (uint32_t) rxCurBufferDesc; } } } /** * @brief Enable interrupts * @param[in] interface Underlying network interface **/ void am335xEthEnableIrq(NetInterface *interface) { #ifdef ti_sysbios_BIOS___VERS //Enable Ethernet MAC interrupts Hwi_enableInterrupt(SYS_INT_3PGSWTXINT0); Hwi_enableInterrupt(SYS_INT_3PGSWRXINT0); #else //Enable Ethernet MAC interrupts IntSystemEnable(SYS_INT_3PGSWTXINT0); IntSystemEnable(SYS_INT_3PGSWRXINT0); #endif //Valid Ethernet PHY or switch driver? if(interface->phyDriver != NULL) { //Enable Ethernet PHY interrupts interface->phyDriver->enableIrq(interface); } else if(interface->switchDriver != NULL) { //Enable Ethernet switch interrupts interface->switchDriver->enableIrq(interface); } else { //Just for sanity } } /** * @brief Disable interrupts * @param[in] interface Underlying network interface **/ void am335xEthDisableIrq(NetInterface *interface) { #ifdef ti_sysbios_BIOS___VERS //Disable Ethernet MAC interrupts Hwi_disableInterrupt(SYS_INT_3PGSWTXINT0); Hwi_disableInterrupt(SYS_INT_3PGSWRXINT0); #else //Disable Ethernet MAC interrupts IntSystemDisable(SYS_INT_3PGSWTXINT0); IntSystemDisable(SYS_INT_3PGSWRXINT0); #endif //Valid Ethernet PHY or switch driver? if(interface->phyDriver != NULL) { //Disable Ethernet PHY interrupts interface->phyDriver->disableIrq(interface); } else if(interface->switchDriver != NULL) { //Disable Ethernet switch interrupts interface->switchDriver->disableIrq(interface); } else { //Just for sanity } } /** * @brief Ethernet MAC transmit interrupt **/ void am335xEthTxIrqHandler(void) { bool_t flag; uint32_t status; uint32_t temp; Am335xTxBufferDesc *p; //Interrupt service routine prologue osEnterIsr(); //This flag will be set if a higher priority task must be woken flag = FALSE; //Read the TX_STAT register to determine which channels caused the interrupt status = CPSW_WR_C_TX_STAT_R(CPSW_CORE0); //Packet transmitted on channel 1? if(status & (1 << CPSW_CH1)) { //Point to the buffer descriptor p = (Am335xTxBufferDesc *) CPSW_CPDMA_TX_CP_R(CPSW_CH1); //Read the status flags temp = p->word3 & (CPSW_TX_WORD3_SOP | CPSW_TX_WORD3_EOP | CPSW_TX_WORD3_OWNER | CPSW_TX_WORD3_EOQ); //Misqueued buffer condition? if(temp == (CPSW_TX_WORD3_SOP | CPSW_TX_WORD3_EOP | CPSW_TX_WORD3_EOQ)) { //Check whether the next descriptor pointer is non-zero if(p->word0 != 0) { //The host corrects the misqueued buffer condition by writing the //misqueued packets buffer descriptor address to the appropriate //TX DMA head descriptor pointer CPSW_CPDMA_TX_HDP_R(CPSW_CH1) = (uint32_t) p->word0; } } //Write the TX completion pointer CPSW_CPDMA_TX_CP_R(CPSW_CH1) = (uint32_t) p; //Check whether the TX buffer is available for writing if((txCurBufferDesc1->word3 & CPSW_TX_WORD3_OWNER) == 0) { //Notify the TCP/IP stack that the transmitter is ready to send flag |= osSetEventFromIsr(&nicDriverInterface1->nicTxEvent); } } //Packet transmitted on channel 2? if(status & (1 << CPSW_CH2)) { //Point to the buffer descriptor p = (Am335xTxBufferDesc *) CPSW_CPDMA_TX_CP_R(CPSW_CH2); //Read the status flags temp = p->word3 & (CPSW_TX_WORD3_SOP | CPSW_TX_WORD3_EOP | CPSW_TX_WORD3_OWNER | CPSW_TX_WORD3_EOQ); //Misqueued buffer condition? if(temp == (CPSW_TX_WORD3_SOP | CPSW_TX_WORD3_EOP | CPSW_TX_WORD3_EOQ)) { //Check whether the next descriptor pointer is non-zero if(p->word0 != 0) { //The host corrects the misqueued buffer condition by writing the //misqueued packets buffer descriptor address to the appropriate //TX DMA head descriptor pointer CPSW_CPDMA_TX_HDP_R(CPSW_CH2) = (uint32_t) p->word0; } } //Write the TX completion pointer CPSW_CPDMA_TX_CP_R(CPSW_CH2) = (uint32_t) p; //Check whether the TX buffer is available for writing if((txCurBufferDesc2->word3 & CPSW_TX_WORD3_OWNER) == 0) { //Notify the TCP/IP stack that the transmitter is ready to send flag |= osSetEventFromIsr(&nicDriverInterface2->nicTxEvent); } } //Write the DMA end of interrupt vector CPSW_CPDMA_CPDMA_EOI_VECTOR_R = CPSW_CPDMA_EOI_VECTOR_TX_PULSE; //Interrupt service routine epilogue osExitIsr(flag); } /** * @brief Ethernet MAC receive interrupt **/ void am335xEthRxIrqHandler(void) { bool_t flag; uint32_t status; //Interrupt service routine prologue osEnterIsr(); //This flag will be set if a higher priority task must be woken flag = FALSE; //Read the RX_STAT register to determine which channels caused the interrupt status = CPSW_WR_C_RX_STAT_R(CPSW_CORE0); //Packet received on channel 0? if(status & (1 << CPSW_CH0)) { //Disable RX interrupts CPSW_WR_C_RX_EN_R(CPSW_CORE0) &= ~(1 << CPSW_CH0); //Set event flag if(nicDriverInterface1 != NULL) { nicDriverInterface1->nicEvent = TRUE; } else if(nicDriverInterface2 != NULL) { nicDriverInterface2->nicEvent = TRUE; } //Notify the TCP/IP stack of the event flag |= osSetEventFromIsr(&netEvent); } //Write the DMA end of interrupt vector CPSW_CPDMA_CPDMA_EOI_VECTOR_R = CPSW_CPDMA_EOI_VECTOR_RX_PULSE; //Interrupt service routine epilogue osExitIsr(flag); } /** * @brief AM335x Ethernet MAC event handler * @param[in] interface Underlying network interface **/ void am335xEthEventHandler(NetInterface *interface) { static uint8_t buffer[AM335X_ETH_RX_BUFFER_SIZE]; error_t error; size_t n; uint32_t temp; //Process all pending packets do { //The current buffer is available for reading? if((rxCurBufferDesc->word3 & CPSW_RX_WORD3_OWNER) == 0) { //SOP and EOP flags should be set if((rxCurBufferDesc->word3 & CPSW_RX_WORD3_SOP) != 0 && (rxCurBufferDesc->word3 & CPSW_RX_WORD3_EOP) != 0) { //Make sure no error occurred if((rxCurBufferDesc->word3 & CPSW_RX_WORD3_PKT_ERROR) == 0) { //Check the port on which the packet was received switch(rxCurBufferDesc->word3 & CPSW_RX_WORD3_FROM_PORT) { //Port 1? case CPSW_RX_WORD3_FROM_PORT_1: interface = nicDriverInterface1; break; //Port 1? case CPSW_RX_WORD3_FROM_PORT_2: interface = nicDriverInterface2; break; //Invalid port number? default: interface = NULL; break; } //Retrieve the length of the frame n = rxCurBufferDesc->word3 & CPSW_RX_WORD3_PACKET_LENGTH; //Limit the number of data to read n = MIN(n, AM335X_ETH_RX_BUFFER_SIZE); //Sanity check if(interface != NULL) { //Copy data from the receive buffer osMemcpy(buffer, (uint8_t *) rxCurBufferDesc->word1, (n + 3) & ~3UL); //Packet successfully received error = NO_ERROR; } else { //The port number is invalid error = ERROR_INVALID_PACKET; } } else { //The received packet contains an error error = ERROR_INVALID_PACKET; } } else { //The packet is not valid error = ERROR_INVALID_PACKET; } //Mark the end of the queue with a NULL pointer rxCurBufferDesc->word0 = (uint32_t) NULL; //Restore the length of the buffer rxCurBufferDesc->word2 = AM335X_ETH_RX_BUFFER_SIZE; //Give the ownership of the descriptor back to the DMA rxCurBufferDesc->word3 = CPSW_RX_WORD3_OWNER; //Link the current descriptor to the previous descriptor rxCurBufferDesc->prev->word0 = (uint32_t) rxCurBufferDesc; //Read the status flags of the previous descriptor temp = rxCurBufferDesc->prev->word3 & (CPSW_RX_WORD3_SOP | CPSW_RX_WORD3_EOP | CPSW_RX_WORD3_OWNER | CPSW_RX_WORD3_EOQ); //Misqueued buffer condition? if(temp == (CPSW_RX_WORD3_SOP | CPSW_RX_WORD3_EOP | CPSW_RX_WORD3_EOQ)) { //The host acts on the misqueued buffer condition by writing the added //buffer descriptor address to the appropriate RX DMA head descriptor //pointer CPSW_CPDMA_RX_HDP_R(CPSW_CH0) = (uint32_t) rxCurBufferDesc; } //Write the RX completion pointer CPSW_CPDMA_RX_CP_R(CPSW_CH0) = (uint32_t) rxCurBufferDesc; //Point to the next descriptor in the list rxCurBufferDesc = rxCurBufferDesc->next; } else { //No more data in the receive buffer error = ERROR_BUFFER_EMPTY; } //Check whether a valid packet has been received if(!error) { NetRxAncillary ancillary; //Additional options can be passed to the stack along with the packet ancillary = NET_DEFAULT_RX_ANCILLARY; //Pass the packet to the upper layer nicProcessPacket(interface, buffer, n, &ancillary); } //No more data in the receive buffer? } while(error != ERROR_BUFFER_EMPTY); //Re-enable RX interrupts CPSW_WR_C_RX_EN_R(CPSW_CORE0) |= (1 << CPSW_CH0); } /** * @brief Send a packet (port 1) * @param[in] interface Underlying network interface * @param[in] buffer Multi-part buffer containing the data to send * @param[in] offset Offset to the first data byte * @param[in] ancillary Additional options passed to the stack along with * the packet * @return Error code **/ error_t am335xEthSendPacketPort1(NetInterface *interface, const NetBuffer *buffer, size_t offset, NetTxAncillary *ancillary) { static uint8_t temp[AM335X_ETH_TX_BUFFER_SIZE]; size_t length; uint32_t value; //Retrieve the length of the packet length = netBufferGetLength(buffer) - offset; //Check the frame length if(length > AM335X_ETH_TX_BUFFER_SIZE) { //The transmitter can accept another packet osSetEvent(&interface->nicTxEvent); //Report an error return ERROR_INVALID_LENGTH; } //Make sure the current buffer is available for writing if((txCurBufferDesc1->word3 & CPSW_TX_WORD3_OWNER) != 0) { return ERROR_FAILURE; } //Mark the end of the queue with a NULL pointer txCurBufferDesc1->word0 = (uint32_t) NULL; //Copy user data to the transmit buffer netBufferRead(temp, buffer, offset, length); osMemcpy((uint8_t *) txCurBufferDesc1->word1, temp, (length + 3) & ~3UL); //Set the length of the buffer txCurBufferDesc1->word2 = length & CPSW_TX_WORD2_BUFFER_LENGTH; //Set the length of the packet value = length & CPSW_TX_WORD3_PACKET_LENGTH; //Set SOP and EOP flags as the data fits in a single buffer value |= CPSW_TX_WORD3_SOP | CPSW_TX_WORD3_EOP; //Redirect the packet to the relevant port number value |= CPSW_TX_WORD3_TO_PORT_EN | CPSW_TX_WORD3_TO_PORT_1; //Give the ownership of the descriptor to the DMA txCurBufferDesc1->word3 = CPSW_TX_WORD3_OWNER | value; //Link the current descriptor to the previous descriptor txCurBufferDesc1->prev->word0 = (uint32_t) txCurBufferDesc1; //Read the status flags of the previous descriptor value = txCurBufferDesc1->prev->word3 & (CPSW_TX_WORD3_SOP | CPSW_TX_WORD3_EOP | CPSW_TX_WORD3_OWNER | CPSW_TX_WORD3_EOQ); //Misqueued buffer condition? if(value == (CPSW_TX_WORD3_SOP | CPSW_TX_WORD3_EOP | CPSW_TX_WORD3_EOQ)) { //Clear the misqueued buffer condition txCurBufferDesc1->prev->word3 = 0; //The host corrects the misqueued buffer condition by writing the //misqueued packets buffer descriptor address to the appropriate //TX DMA head descriptor pointer CPSW_CPDMA_TX_HDP_R(CPSW_CH1) = (uint32_t) txCurBufferDesc1; } //Point to the next descriptor in the list txCurBufferDesc1 = txCurBufferDesc1->next; //Check whether the next buffer is available for writing if((txCurBufferDesc1->word3 & CPSW_TX_WORD3_OWNER) == 0) { //The transmitter can accept another packet osSetEvent(&interface->nicTxEvent); } //Data successfully written return NO_ERROR; } /** * @brief Send a packet (port 2) * @param[in] interface Underlying network interface * @param[in] buffer Multi-part buffer containing the data to send * @param[in] offset Offset to the first data byte * @param[in] ancillary Additional options passed to the stack along with * the packet * @return Error code **/ error_t am335xEthSendPacketPort2(NetInterface *interface, const NetBuffer *buffer, size_t offset, NetTxAncillary *ancillary) { static uint8_t temp[AM335X_ETH_TX_BUFFER_SIZE]; size_t length; uint32_t value; //Retrieve the length of the packet length = netBufferGetLength(buffer) - offset; //Check the frame length if(length > AM335X_ETH_TX_BUFFER_SIZE) { //The transmitter can accept another packet osSetEvent(&interface->nicTxEvent); //Report an error return ERROR_INVALID_LENGTH; } //Make sure the current buffer is available for writing if((txCurBufferDesc2->word3 & CPSW_TX_WORD3_OWNER) != 0) { return ERROR_FAILURE; } //Mark the end of the queue with a NULL pointer txCurBufferDesc2->word0 = (uint32_t) NULL; //Copy user data to the transmit buffer netBufferRead(temp, buffer, offset, length); osMemcpy((uint8_t *) txCurBufferDesc2->word1, temp, (length + 3) & ~3UL); //Set the length of the buffer txCurBufferDesc2->word2 = length & CPSW_TX_WORD2_BUFFER_LENGTH; //Set the length of the packet value = length & CPSW_TX_WORD3_PACKET_LENGTH; //Set SOP and EOP flags as the data fits in a single buffer value |= CPSW_TX_WORD3_SOP | CPSW_TX_WORD3_EOP; //Redirect the packet to the relevant port number value |= CPSW_TX_WORD3_TO_PORT_EN | CPSW_TX_WORD3_TO_PORT_2; //Give the ownership of the descriptor to the DMA txCurBufferDesc2->word3 = CPSW_TX_WORD3_OWNER | value; //Link the current descriptor to the previous descriptor txCurBufferDesc2->prev->word0 = (uint32_t) txCurBufferDesc2; //Read the status flags of the previous descriptor value = txCurBufferDesc2->prev->word3 & (CPSW_TX_WORD3_SOP | CPSW_TX_WORD3_EOP | CPSW_TX_WORD3_OWNER | CPSW_TX_WORD3_EOQ); //Misqueued buffer condition? if(value == (CPSW_TX_WORD3_SOP | CPSW_TX_WORD3_EOP | CPSW_TX_WORD3_EOQ)) { //Clear the misqueued buffer condition txCurBufferDesc2->prev->word3 = 0; //The host corrects the misqueued buffer condition by writing the //misqueued packets buffer descriptor address to the appropriate //TX DMA head descriptor pointer CPSW_CPDMA_TX_HDP_R(CPSW_CH2) = (uint32_t) txCurBufferDesc2; } //Point to the next descriptor in the list txCurBufferDesc2 = txCurBufferDesc2->next; //Check whether the next buffer is available for writing if((txCurBufferDesc2->word3 & CPSW_TX_WORD3_OWNER) == 0) { //The transmitter can accept another packet osSetEvent(&interface->nicTxEvent); } //Data successfully written return NO_ERROR; } /** * @brief Configure MAC address filtering * @param[in] interface Underlying network interface * @return Error code **/ error_t am335xEthUpdateMacAddrFilter(NetInterface *interface) { uint_t i; uint_t port; MacFilterEntry *entry; //Debug message TRACE_DEBUG("Updating AM335x ALE table...\r\n"); //Select the relevant port number if(interface == nicDriverInterface1) { port = CPSW_PORT1; } else if(interface == nicDriverInterface2) { port = CPSW_PORT2; } else { port = CPSW_PORT0; } //The MAC address filter contains the list of MAC addresses to accept //when receiving an Ethernet frame for(i = 0; i < MAC_ADDR_FILTER_SIZE; i++) { //Point to the current entry entry = &interface->macAddrFilter[i]; //Check whether the ALE table should be updated for the //current multicast address if(!macCompAddr(&entry->addr, &MAC_UNSPECIFIED_ADDR)) { if(entry->addFlag) { //Add VLAN/multicast address entry to the ALE table am335xEthAddVlanAddrEntry(port, port, &entry->addr); } else if(entry->deleteFlag) { //Remove VLAN/multicast address entry from the ALE table am335xEthDeleteVlanAddrEntry(port, port, &entry->addr); } } } //Successful processing return NO_ERROR; } /** * @brief Adjust MAC configuration parameters for proper operation * @param[in] interface Underlying network interface * @return Error code **/ error_t am335xEthUpdateMacConfig(NetInterface *interface) { uint32_t config = 0; //Read MAC control register if(interface == nicDriverInterface1) { config = CPSW_SL1_MACCONTROL_R; } else if(interface == nicDriverInterface2) { config = CPSW_SL2_MACCONTROL_R; } //1000BASE-T operation mode? if(interface->linkSpeed == NIC_LINK_SPEED_1GBPS) { config |= CPSW_SL_MACCONTROL_GIG; config &= ~(CPSW_SL_MACCONTROL_IFCTL_A | CPSW_SL_MACCONTROL_IFCTL_B); } //100BASE-TX operation mode? else if(interface->linkSpeed == NIC_LINK_SPEED_100MBPS) { config &= ~CPSW_SL_MACCONTROL_GIG; config |= CPSW_SL_MACCONTROL_IFCTL_A | CPSW_SL_MACCONTROL_IFCTL_B; } //10BASE-T operation mode? else { config &= ~CPSW_SL_MACCONTROL_GIG; config &= ~(CPSW_SL_MACCONTROL_IFCTL_A | CPSW_SL_MACCONTROL_IFCTL_B); } //Half-duplex or full-duplex mode? if(interface->duplexMode == NIC_FULL_DUPLEX_MODE) { config |= CPSW_SL_MACCONTROL_FULLDUPLEX; } else { config &= ~CPSW_SL_MACCONTROL_FULLDUPLEX; } //Update MAC control register if(interface == nicDriverInterface1) { CPSW_SL1_MACCONTROL_R = config; } else if(interface == nicDriverInterface2) { CPSW_SL2_MACCONTROL_R = config; } //Successful processing return NO_ERROR; } /** * @brief Write PHY register * @param[in] opcode Access type (2 bits) * @param[in] phyAddr PHY address (5 bits) * @param[in] regAddr Register address (5 bits) * @param[in] data Register value **/ void am335xEthWritePhyReg(uint8_t opcode, uint8_t phyAddr, uint8_t regAddr, uint16_t data) { uint32_t temp; //Valid opcode? if(opcode == SMI_OPCODE_WRITE) { //Set up a write operation temp = MDIO_USERACCESS0_GO | MDIO_USERACCESS0_WRITE; //PHY address temp |= (phyAddr << MDIO_USERACCESS0_PHYADR_SHIFT) & MDIO_USERACCESS0_PHYADR; //Register address temp |= (regAddr << MDIO_USERACCESS0_REGADR_SHIFT) & MDIO_USERACCESS0_REGADR; //Register value temp |= data & MDIO_USERACCESS0_DATA; //Start a write operation MDIO_USERACCESS0_R = temp; //Wait for the write to complete while((MDIO_USERACCESS0_R & MDIO_USERACCESS0_GO) != 0) { } } else { //The MAC peripheral only supports standard Clause 22 opcodes } } /** * @brief Read PHY register * @param[in] opcode Access type (2 bits) * @param[in] phyAddr PHY address (5 bits) * @param[in] regAddr Register address (5 bits) * @return Register value **/ uint16_t am335xEthReadPhyReg(uint8_t opcode, uint8_t phyAddr, uint8_t regAddr) { uint16_t data; uint32_t temp; //Valid opcode? if(opcode == SMI_OPCODE_READ) { //Set up a read operation temp = MDIO_USERACCESS0_GO | MDIO_USERACCESS0_READ; //PHY address temp |= (phyAddr << MDIO_USERACCESS0_PHYADR_SHIFT) & MDIO_USERACCESS0_PHYADR; //Register address temp |= (regAddr << MDIO_USERACCESS0_REGADR_SHIFT) & MDIO_USERACCESS0_REGADR; //Start a read operation MDIO_USERACCESS0_R = temp; //Wait for the read to complete while((MDIO_USERACCESS0_R & MDIO_USERACCESS0_GO) != 0) { } //Get register value data = MDIO_USERACCESS0_R & MDIO_USERACCESS0_DATA; } else { //The MAC peripheral only supports standard Clause 22 opcodes data = 0; } //Return the value of the PHY register return data; } /** * @brief Write an ALE table entry * @param[in] index Entry index * @param[in] entry Pointer to the ALE table entry **/ void am335xEthWriteEntry(uint_t index, const Am335xAleEntry *entry) { //Copy the content of the entry to be written CPSW_ALE_TBLW_R(2) = entry->word2; CPSW_ALE_TBLW_R(1) = entry->word1; CPSW_ALE_TBLW_R(0) = entry->word0; //Write the ALE entry at the specified index CPSW_ALE_TBLCTL_R = CPSW_ALE_TBLCTL_WRITE_RDZ | index; } /** * @brief Read an ALE table entry * @param[in] index Entry index * @param[out] entry Pointer to the ALE table entry **/ void am335xEthReadEntry(uint_t index, Am335xAleEntry *entry) { //Read the ALE entry at the specified index CPSW_ALE_TBLCTL_R = index; //Copy the content of the entry entry->word2 = CPSW_ALE_TBLW_R(2); entry->word1 = CPSW_ALE_TBLW_R(1); entry->word0 = CPSW_ALE_TBLW_R(0); } /** * @brief Find a free entry in the ALE table * @return Index of the first free entry **/ uint_t am335xEthFindFreeEntry(void) { uint_t index; uint32_t type; Am335xAleEntry entry; //Loop through the ALE table entries for(index = 0; index < CPSW_ALE_MAX_ENTRIES; index++) { //Read the current entry am335xEthReadEntry(index, &entry); //Retrieve the type of the ALE entry type = entry.word1 & CPSW_ALE_WORD1_ENTRY_TYPE_MASK; //Free entry? if(type == CPSW_ALE_WORD1_ENTRY_TYPE_FREE) { //Exit immediately break; } } //Return the index of the entry return index; } /** * @brief Search the ALE table for the specified VLAN entry * @param[in] vlanId VLAN identifier * @return Index of the matching entry **/ uint_t am335xEthFindVlanEntry(uint_t vlanId) { uint_t index; uint32_t value; Am335xAleEntry entry; //Loop through the ALE table entries for(index = 0; index < CPSW_ALE_MAX_ENTRIES; index++) { //Read the current entry am335xEthReadEntry(index, &entry); //Retrieve the type of the ALE entry value = entry.word1 & CPSW_ALE_WORD1_ENTRY_TYPE_MASK; //Check the type of the ALE entry if(value == CPSW_ALE_WORD1_ENTRY_TYPE_VLAN_ADDR) { //Get the VLAN identifier value = entry.word1 & CPSW_ALE_WORD1_VLAN_ID_MASK; //Compare the VLAN identifier if(value == CPSW_ALE_WORD1_VLAN_ID(vlanId)) { //Matching ALE entry found break; } } } //Return the index of the entry return index; } /** * @brief Search the ALE table for the specified VLAN/address entry * @param[in] vlanId VLAN identifier * @param[in] macAddr MAC address * @return Index of the matching entry **/ uint_t am335xEthFindVlanAddrEntry(uint_t vlanId, MacAddr *macAddr) { uint_t index; uint32_t value; Am335xAleEntry entry; //Loop through the ALE table entries for(index = 0; index < CPSW_ALE_MAX_ENTRIES; index++) { //Read the current entry am335xEthReadEntry(index, &entry); //Retrieve the type of the ALE entry value = entry.word1 & CPSW_ALE_WORD1_ENTRY_TYPE_MASK; //Check the type of the ALE entry if(value == CPSW_ALE_WORD1_ENTRY_TYPE_VLAN_ADDR) { //Get the VLAN identifier value = entry.word1 & CPSW_ALE_WORD1_VLAN_ID_MASK; //Compare the VLAN identifier if(value == CPSW_ALE_WORD1_VLAN_ID(vlanId)) { //Compare the MAC address if(macAddr->b[0] == (uint8_t) (entry.word1 >> 8) && macAddr->b[1] == (uint8_t) (entry.word1 >> 0) && macAddr->b[2] == (uint8_t) (entry.word0 >> 24) && macAddr->b[3] == (uint8_t) (entry.word0 >> 16) && macAddr->b[4] == (uint8_t) (entry.word0 >> 8) && macAddr->b[5] == (uint8_t) (entry.word0 >> 0)) { //Matching ALE entry found break; } } } } //Return the index of the entry return index; } /** * @brief Add a VLAN entry in the ALE table * @param[in] port Port number * @param[in] vlanId VLAN identifier * @return Error code **/ error_t am335xEthAddVlanEntry(uint_t port, uint_t vlanId) { error_t error; uint_t index; Am335xAleEntry entry; //Ensure that there are no duplicate address entries in the ALE table index = am335xEthFindVlanEntry(vlanId); //No matching entry found? if(index >= CPSW_ALE_MAX_ENTRIES) { //Find a free entry in the ALE table index = am335xEthFindFreeEntry(); } //Sanity check if(index < CPSW_ALE_MAX_ENTRIES) { //Set up a VLAN table entry entry.word2 = 0; entry.word1 = CPSW_ALE_WORD1_ENTRY_TYPE_VLAN; entry.word0 = 0; //Set VLAN identifier entry.word1 |= CPSW_ALE_WORD1_VLAN_ID(vlanId); //Force the packet VLAN tag to be removed on egress entry.word0 |= CPSW_ALE_WORD0_FORCE_UNTAG_EGRESS(1 << port) | CPSW_ALE_WORD0_FORCE_UNTAG_EGRESS(1 << CPSW_PORT0); //Set VLAN member list entry.word0 |= CPSW_ALE_WORD0_VLAN_MEMBER_LIST(1 << port) | CPSW_ALE_WORD0_VLAN_MEMBER_LIST(1 << CPSW_PORT0); //Add a new entry to the ALE table am335xEthWriteEntry(index, &entry); //Sucessful processing error = NO_ERROR; } else { //The ALE table is full error = ERROR_FAILURE; } //Return status code return error; } /** * @brief Add a VLAN/address entry in the ALE table * @param[in] port Port number * @param[in] vlanId VLAN identifier * @param[in] macAddr MAC address * @return Error code **/ error_t am335xEthAddVlanAddrEntry(uint_t port, uint_t vlanId, MacAddr *macAddr) { error_t error; uint_t index; Am335xAleEntry entry; //Ensure that there are no duplicate address entries in the ALE table index = am335xEthFindVlanAddrEntry(vlanId, macAddr); //No matching entry found? if(index >= CPSW_ALE_MAX_ENTRIES) { //Find a free entry in the ALE table index = am335xEthFindFreeEntry(); } //Sanity check if(index < CPSW_ALE_MAX_ENTRIES) { //Set up a VLAN/address table entry entry.word2 = 0; entry.word1 = CPSW_ALE_WORD1_ENTRY_TYPE_VLAN_ADDR; entry.word0 = 0; //Multicast address? if(macIsMulticastAddr(macAddr)) { //Set port mask entry.word2 |= CPSW_ALE_WORD2_SUPER | CPSW_ALE_WORD2_PORT_LIST(1 << port) | CPSW_ALE_WORD2_PORT_LIST(1 << CPSW_CH0); //Set multicast forward state entry.word1 |= CPSW_ALE_WORD1_MCAST_FWD_STATE(0); } //Set VLAN identifier entry.word1 |= CPSW_ALE_WORD1_VLAN_ID(vlanId); //Copy the upper 16 bits of the unicast address entry.word1 |= (macAddr->b[0] << 8) | macAddr->b[1]; //Copy the lower 32 bits of the unicast address entry.word0 |= (macAddr->b[2] << 24) | (macAddr->b[3] << 16) | (macAddr->b[4] << 8) | macAddr->b[5]; //Add a new entry to the ALE table am335xEthWriteEntry(index, &entry); //Sucessful processing error = NO_ERROR; } else { //The ALE table is full error = ERROR_FAILURE; } //Return status code return error; } /** * @brief Remove a VLAN/address entry from the ALE table * @param[in] port Port number * @param[in] vlanId VLAN identifier * @param[in] macAddr MAC address * @return Error code **/ error_t am335xEthDeleteVlanAddrEntry(uint_t port, uint_t vlanId, MacAddr *macAddr) { error_t error; uint_t index; Am335xAleEntry entry; //Search the ALE table for the specified VLAN/address entry index = am335xEthFindVlanAddrEntry(vlanId, macAddr); //Matching ALE entry found? if(index < CPSW_ALE_MAX_ENTRIES) { //Clear the contents of the entry entry.word2 = 0; entry.word1 = 0; entry.word0 = 0; //Update the ALE table am335xEthWriteEntry(index, &entry); //Sucessful processing error = NO_ERROR; } else { //Entry not found error = ERROR_NOT_FOUND; } //Return status code return error; }
/** * @file am335x_eth_driver.c * @brief Sitara AM335x Gigabit Ethernet MAC driver * * @section License * * SPDX-License-Identifier: GPL-2.0-or-later * * Copyright (C) 2010-2021 Oryx Embedded SARL. All rights reserved. * * This file is part of CycloneTCP Open. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * @author Oryx Embedded SARL (www.oryx-embedded.com) * @version 2.0.2 **/ //Switch to the appropriate trace level #define TRACE_LEVEL NIC_TRACE_LEVEL //Dependencies #include "soc_am335x.h" #include "hw_types.h" #include "hw_cm_per.h" #include "hw_control_am335x.h" #include "hw_cpsw_ale.h" #include "hw_cpsw_cpdma.h" #include "hw_cpsw_port.h" #include "hw_cpsw_sl.h" #include "hw_cpsw_ss.h" #include "hw_cpsw_wr.h" #include "hw_mdio.h" #include "interrupt.h" #include "core/net.h" #include "drivers/mac/am335x_eth_driver.h" #include "debug.h" //MDIO input clock frequency #define MDIO_INPUT_CLK 125000000 //MDIO output clock frequency #define MDIO_OUTPUT_CLK 1000000 //Underlying network interface (port 1) static NetInterface *nicDriverInterface1 = NULL; //Underlying network interface (port 2) static NetInterface *nicDriverInterface2 = NULL; //IAR EWARM compiler? #if defined(__ICCARM__) //Transmit buffer (port 1) #pragma data_alignment = 4 #pragma location = AM335X_ETH_RAM_SECTION static uint8_t txBuffer1[AM335X_ETH_TX_BUFFER_COUNT][AM335X_ETH_TX_BUFFER_SIZE]; //Transmit buffer (port 2) #pragma data_alignment = 4 #pragma location = AM335X_ETH_RAM_SECTION static uint8_t txBuffer2[AM335X_ETH_TX_BUFFER_COUNT][AM335X_ETH_TX_BUFFER_SIZE]; //Receive buffer #pragma data_alignment = 4 #pragma location = AM335X_ETH_RAM_SECTION static uint8_t rxBuffer[AM335X_ETH_RX_BUFFER_COUNT][AM335X_ETH_RX_BUFFER_SIZE]; //Transmit buffer descriptors (port 1) #pragma data_alignment = 4 #pragma location = AM335X_ETH_RAM_CPPI_SECTION static Am335xTxBufferDesc txBufferDesc1[AM335X_ETH_TX_BUFFER_COUNT]; //Transmit buffer descriptors (port 2) #pragma data_alignment = 4 #pragma location = AM335X_ETH_RAM_CPPI_SECTION static Am335xTxBufferDesc txBufferDesc2[AM335X_ETH_TX_BUFFER_COUNT]; //Receive buffer descriptors #pragma data_alignment = 4 #pragma location = AM335X_ETH_RAM_CPPI_SECTION static Am335xRxBufferDesc rxBufferDesc[AM335X_ETH_RX_BUFFER_COUNT]; //GCC compiler? #else //Transmit buffer (port 1) static uint8_t txBuffer1[AM335X_ETH_TX_BUFFER_COUNT][AM335X_ETH_TX_BUFFER_SIZE] __attribute__((aligned(4), __section__(AM335X_ETH_RAM_SECTION))); //Transmit buffer (port 2) static uint8_t txBuffer2[AM335X_ETH_TX_BUFFER_COUNT][AM335X_ETH_TX_BUFFER_SIZE] __attribute__((aligned(4), __section__(AM335X_ETH_RAM_SECTION))); //Receive buffer static uint8_t rxBuffer[AM335X_ETH_RX_BUFFER_COUNT][AM335X_ETH_RX_BUFFER_SIZE] __attribute__((aligned(4), __section__(AM335X_ETH_RAM_SECTION))); //Transmit buffer descriptors (port 1) static Am335xTxBufferDesc txBufferDesc1[AM335X_ETH_TX_BUFFER_COUNT] __attribute__((aligned(4), __section__(AM335X_ETH_RAM_CPPI_SECTION))); //Transmit buffer descriptors (port 2) static Am335xTxBufferDesc txBufferDesc2[AM335X_ETH_TX_BUFFER_COUNT] __attribute__((aligned(4), __section__(AM335X_ETH_RAM_CPPI_SECTION))); //Receive buffer descriptors static Am335xRxBufferDesc rxBufferDesc[AM335X_ETH_RX_BUFFER_COUNT] __attribute__((aligned(4), __section__(AM335X_ETH_RAM_CPPI_SECTION))); #endif //Pointer to the current TX buffer descriptor (port1) static Am335xTxBufferDesc *txCurBufferDesc1; //Pointer to the current TX buffer descriptor (port 2) static Am335xTxBufferDesc *txCurBufferDesc2; //Pointer to the current RX buffer descriptor static Am335xRxBufferDesc *rxCurBufferDesc; /** * @brief AM335x Ethernet MAC driver (port1) **/ const NicDriver am335xEthPort1Driver = { NIC_TYPE_ETHERNET, ETH_MTU, am335xEthInitPort1, am335xEthTick, am335xEthEnableIrq, am335xEthDisableIrq, am335xEthEventHandler, am335xEthSendPacketPort1, am335xEthUpdateMacAddrFilter, am335xEthUpdateMacConfig, am335xEthWritePhyReg, am335xEthReadPhyReg, FALSE, TRUE, TRUE, FALSE }; /** * @brief AM335x Ethernet MAC driver (port2) **/ const NicDriver am335xEthPort2Driver = { NIC_TYPE_ETHERNET, ETH_MTU, am335xEthInitPort2, am335xEthTick, am335xEthEnableIrq, am335xEthDisableIrq, am335xEthEventHandler, am335xEthSendPacketPort2, am335xEthUpdateMacAddrFilter, am335xEthUpdateMacConfig, am335xEthWritePhyReg, am335xEthReadPhyReg, FALSE, TRUE, TRUE, FALSE }; /** * @brief AM335x Ethernet MAC initialization (port 1) * @param[in] interface Underlying network interface * @return Error code **/ error_t am335xEthInitPort1(NetInterface *interface) { error_t error; uint32_t temp; //Debug message TRACE_INFO("Initializing AM335x Ethernet MAC (port 1)...\r\n"); //Initialize CPSW instance am335xEthInitInstance(interface); //Save underlying network interface nicDriverInterface1 = interface; //Valid Ethernet PHY or switch driver? if(interface->phyDriver != NULL) { //Ethernet PHY initialization error = interface->phyDriver->init(interface); } else if(interface->switchDriver != NULL) { //Ethernet switch initialization error = interface->switchDriver->init(interface); } else { //The interface is not properly configured error = ERROR_FAILURE; } //Any error to report? if(error) { return error; } //Unspecifield MAC address? if(macCompAddr(&interface->macAddr, &MAC_UNSPECIFIED_ADDR)) { //Use the factory preprogrammed MAC address interface->macAddr.b[0] = CONTROL_MAC_ID_HI_R(0) >> CONTROL_MAC_ID0_HI_MACADDR_47_40_SHIFT; interface->macAddr.b[1] = CONTROL_MAC_ID_HI_R(0) >> CONTROL_MAC_ID0_HI_MACADDR_39_32_SHIFT; interface->macAddr.b[2] = CONTROL_MAC_ID_HI_R(0) >> CONTROL_MAC_ID0_HI_MACADDR_31_24_SHIFT; interface->macAddr.b[3] = CONTROL_MAC_ID_HI_R(0) >> CONTROL_MAC_ID0_HI_MACADDR_23_16_SHIFT; interface->macAddr.b[4] = CONTROL_MAC_ID_LO_R(0) >> CONTROL_MAC_ID0_LO_MACADDR_15_8_SHIFT; interface->macAddr.b[5] = CONTROL_MAC_ID_LO_R(0) >> CONTROL_MAC_ID0_LO_MACADDR_7_0_SHIFT; //Generate the 64-bit interface identifier macAddrToEui64(&interface->macAddr, &interface->eui64); } //Set port state to forward temp = CPSW_ALE_PORTCTL_R(1) & ~CPSW_ALE_PORTCTL1_PORT_STATE; CPSW_ALE_PORTCTL_R(1) = temp | CPSW_ALE_PORTCTL_PORT_STATE_FORWARD; //Set the MAC address of the station CPSW_PORT1_SA_HI_R = interface->macAddr.w[0] | (interface->macAddr.w[1] << 16); CPSW_PORT1_SA_LO_R = interface->macAddr.w[2]; //Configure VLAN identifier and VLAN priority CPSW_PORT1_PORT_VLAN_R = (0 << CPSW_PORT_P1_PORT_VLAN_PORT_PRI_SHIFT) | (CPSW_PORT1 << CPSW_PORT_P1_PORT_VLAN_PORT_VID_SHIFT); //Add a VLAN entry in the ALE table am335xEthAddVlanEntry(CPSW_PORT1, CPSW_PORT1); //Add a VLAN/unicast address entry in the ALE table am335xEthAddVlanAddrEntry(CPSW_PORT1, CPSW_PORT1, &interface->macAddr); //Enable CPSW statistics CPSW_SS_STAT_PORT_EN_R |= CPSW_SS_STAT_PORT_EN_P1_STAT_EN; //Enable TX and RX CPSW_SL1_MACCONTROL_R = CPSW_SL_MACCONTROL_GMII_EN; //Accept any packets from the upper layer osSetEvent(&interface->nicTxEvent); //Successful initialization return NO_ERROR; } /** * @brief AM335x Ethernet MAC initialization (port 2) * @param[in] interface Underlying network interface * @return Error code **/ error_t am335xEthInitPort2(NetInterface *interface) { error_t error; uint32_t temp; //Debug message TRACE_INFO("Initializing AM335x Ethernet MAC (port 2)...\r\n"); //Initialize CPSW instance am335xEthInitInstance(interface); //Save underlying network interface nicDriverInterface2 = interface; //PHY transceiver initialization error = interface->phyDriver->init(interface); //Failed to initialize PHY transceiver? if(error) { return error; } //Unspecifield MAC address? if(macCompAddr(&interface->macAddr, &MAC_UNSPECIFIED_ADDR)) { //Use the factory preprogrammed MAC address interface->macAddr.b[0] = CONTROL_MAC_ID_HI_R(1) >> CONTROL_MAC_ID1_HI_MACADDR_47_40_SHIFT; interface->macAddr.b[1] = CONTROL_MAC_ID_HI_R(1) >> CONTROL_MAC_ID1_HI_MACADDR_39_32_SHIFT; interface->macAddr.b[2] = CONTROL_MAC_ID_HI_R(1) >> CONTROL_MAC_ID1_HI_MACADDR_31_24_SHIFT; interface->macAddr.b[3] = CONTROL_MAC_ID_HI_R(1) >> CONTROL_MAC_ID1_HI_MACADDR_23_16_SHIFT; interface->macAddr.b[4] = CONTROL_MAC_ID_LO_R(1) >> CONTROL_MAC_ID1_LO_MACADDR_15_8_SHIFT; interface->macAddr.b[5] = CONTROL_MAC_ID_LO_R(1) >> CONTROL_MAC_ID1_LO_MACADDR_7_0_SHIFT; //Generate the 64-bit interface identifier macAddrToEui64(&interface->macAddr, &interface->eui64); } //Set port state to forward temp = CPSW_ALE_PORTCTL_R(2) & ~CPSW_ALE_PORTCTL2_PORT_STATE; CPSW_ALE_PORTCTL_R(2) = temp | CPSW_ALE_PORTCTL_PORT_STATE_FORWARD; //Set the MAC address of the station CPSW_PORT2_SA_HI_R = interface->macAddr.w[0] | (interface->macAddr.w[1] << 16); CPSW_PORT2_SA_LO_R = interface->macAddr.w[2]; //Configure VLAN identifier and VLAN priority CPSW_PORT2_PORT_VLAN_R = (0 << CPSW_PORT_P2_PORT_VLAN_PORT_PRI_SHIFT) | (CPSW_PORT2 << CPSW_PORT_P2_PORT_VLAN_PORT_VID_SHIFT); //Add a VLAN entry in the ALE table am335xEthAddVlanEntry(CPSW_PORT2, CPSW_PORT2); //Add a VLAN/unicast address entry in the ALE table am335xEthAddVlanAddrEntry(CPSW_PORT2, CPSW_PORT2, &interface->macAddr); //Enable CPSW statistics CPSW_SS_STAT_PORT_EN_R |= CPSW_SS_STAT_PORT_EN_P2_STAT_EN; //Enable TX and RX CPSW_SL2_MACCONTROL_R = CPSW_SL_MACCONTROL_GMII_EN; //Accept any packets from the upper layer osSetEvent(&interface->nicTxEvent); //Successful initialization return NO_ERROR; } /** * @brief Initialize CPSW instance * @param[in] interface Underlying network interface **/ void am335xEthInitInstance(NetInterface *interface) { uint_t i; uint32_t temp; #ifdef ti_sysbios_BIOS___VERS Hwi_Params hwiParams; #endif //Initialization sequence is performed once if(nicDriverInterface1 == NULL && nicDriverInterface2 == NULL) { //Select the interface mode (MII/RMII/RGMII) and configure pin muxing am335xEthInitGpio(interface); //Enable the CPSW subsystem clocks CM_PER_CPGMAC0_CLKCTRL_R = CM_PER_CPGMAC0_CLKCTRL_MODULEMODE_ENABLE; //Wait for the CPSW module to be fully functional do { //Get module idle status temp = (CM_PER_CPGMAC0_CLKCTRL_R & CM_PER_CPGMAC0_CLKCTRL_IDLEST) >> CM_PER_CPGMAC0_CLKCTRL_IDLEST_SHIFT; //Keep looping as long as the module is not fully functional } while(temp != CM_PER_CPGMAC0_CLKCTRL_IDLEST_FUNC); //Start a software forced wake-up transition CM_PER_CPSW_CLKSTCTRL_R = CM_PER_CPSW_CLKSTCTRL_CLKTRCTRL_SW_WKUP; //Wait for the CPSW 125 MHz OCP clock to be active do { //Get the state of the CPSW 125 MHz OCP clock temp = (CM_PER_CPSW_CLKSTCTRL_R & CM_PER_CPSW_CLKSTCTRL_CLKACTIVITY_CPSW_125MHZ_GCLK) >> CM_PER_CPSW_CLKSTCTRL_CLKACTIVITY_CPSW_125MHZ_GCLK_SHIFT; //Keep looping as long as the clock is inactive } while(temp != CM_PER_CPSW_CLKSTCTRL_CLKACTIVITY_CPSW_125MHZ_GCLK_ACT); //Reset CPSW subsystem CPSW_SS_SOFT_RESET_R = CPSW_SS_SOFT_RESET_SOFT_RESET; //Wait for the reset to complete while((CPSW_SS_SOFT_RESET_R & CPSW_SS_SOFT_RESET_SOFT_RESET) != 0) { } //Reset CPSW wrapper module CPSW_WR_SOFT_RESET_R = CPSW_WR_SOFT_RESET_SOFT_RESET; //Wait for the reset to complete while((CPSW_WR_SOFT_RESET_R & CPSW_WR_SOFT_RESET_SOFT_RESET) != 0) { } //Reset CPSW sliver 1 logic CPSW_SL1_SOFT_RESET_R = CPSW_SL_SOFT_RESET_SOFT_RESET; //Wait for the reset to complete while((CPSW_SL1_SOFT_RESET_R & CPSW_SL_SOFT_RESET_SOFT_RESET) != 0) { } //Reset CPSW sliver 2 logic CPSW_SL2_SOFT_RESET_R = CPSW_SL_SOFT_RESET_SOFT_RESET; //Wait for the reset to complete while((CPSW_SL2_SOFT_RESET_R & CPSW_SL_SOFT_RESET_SOFT_RESET) != 0) { } //Reset CPSW CPDMA module CPSW_CPDMA_CPDMA_SOFT_RESET_R = CPSW_CPDMA_CPDMA_SOFT_RESET_SOFT_RESET; //Wait for the reset to complete while((CPSW_CPDMA_CPDMA_SOFT_RESET_R & CPSW_CPDMA_CPDMA_SOFT_RESET_SOFT_RESET) != 0) { } //Initialize the HDPs and the CPs to NULL for(i = CPSW_CH0; i <= CPSW_CH7; i++) { //TX head descriptor pointer CPSW_CPDMA_TX_HDP_R(i) = 0; //TX completion pointer CPSW_CPDMA_TX_CP_R(i) = 0; //RX head descriptor pointer CPSW_CPDMA_RX_HDP_R(i) = 0; //RX completion pointer CPSW_CPDMA_RX_CP_R(i) = 0; } //Enable ALE and clear ALE address table CPSW_ALE_CONTROL_R = CPSW_ALE_CONTROL_ENABLE_ALE | CPSW_ALE_CONTROL_CLEAR_TABLE; //For dual MAC mode, configure VLAN aware mode CPSW_ALE_CONTROL_R |= CPSW_ALE_CONTROL_ALE_VLAN_AWARE; //Set dual MAC mode for port 0 temp = CPSW_PORT0_TX_IN_CTL_R & ~CPSW_PORT_P0_TX_IN_CTL_TX_IN_SEL; CPSW_PORT0_TX_IN_CTL_R = temp | CPSW_PORT_P0_TX_IN_CTL_TX_IN_DUAL_MAC; //Set port 0 state to forward temp = CPSW_ALE_PORTCTL_R(0) & ~CPSW_ALE_PORTCTL0_PORT_STATE; CPSW_ALE_PORTCTL_R(0) = temp | CPSW_ALE_PORTCTL_PORT_STATE_FORWARD; //Enable CPSW statistics CPSW_SS_STAT_PORT_EN_R = CPSW_SS_STAT_PORT_EN_P0_STAT_EN; //Configure TX and RX buffer descriptors am335xEthInitBufferDesc(interface); //Acknowledge TX and interrupts for proper interrupt pulsing CPSW_CPDMA_CPDMA_EOI_VECTOR_R = CPSW_CPDMA_EOI_VECTOR_TX_PULSE; CPSW_CPDMA_CPDMA_EOI_VECTOR_R = CPSW_CPDMA_EOI_VECTOR_RX_PULSE; //Enable channel 1 and 2 interrupts of the DMA engine CPSW_CPDMA_TX_INTMASK_SET_R = (1 << CPSW_CH1) | (1 << CPSW_CH2); //Enable TX completion interrupts CPSW_WR_C_TX_EN_R(CPSW_CORE0) |= (1 << CPSW_CH1) | (1 << CPSW_CH2); //Enable channel 0 interrupts of the DMA engine CPSW_CPDMA_RX_INTMASK_SET_R = (1 << CPSW_CH0); //Enable RX completion interrupts CPSW_WR_C_RX_EN_R(CPSW_CORE0) |= (1 << CPSW_CH0); #ifdef ti_sysbios_BIOS___VERS //Configure TX interrupt Hwi_Params_init(&hwiParams); hwiParams.enableInt = FALSE; hwiParams.priority = AM335X_ETH_IRQ_PRIORITY; //Register TX interrupt handler Hwi_create(SYS_INT_3PGSWTXINT0, (Hwi_FuncPtr) am335xEthTxIrqHandler, &hwiParams, NULL); //Configure RX interrupt Hwi_Params_init(&hwiParams); hwiParams.enableInt = FALSE; hwiParams.priority = AM335X_ETH_IRQ_PRIORITY; //Register RX interrupt handler Hwi_create(SYS_INT_3PGSWRXINT0, (Hwi_FuncPtr) am335xEthRxIrqHandler, &hwiParams, NULL); #else //Register interrupt handlers IntRegister(SYS_INT_3PGSWTXINT0, am335xEthTxIrqHandler); IntRegister(SYS_INT_3PGSWRXINT0, am335xEthRxIrqHandler); //Configure TX interrupt priority IntPrioritySet(SYS_INT_3PGSWTXINT0, AM335X_ETH_IRQ_PRIORITY, AINTC_HOSTINT_ROUTE_IRQ); //Configure RX interrupt priority IntPrioritySet(SYS_INT_3PGSWRXINT0, AM335X_ETH_IRQ_PRIORITY, AINTC_HOSTINT_ROUTE_IRQ); #endif //Enable the transmission and reception CPSW_CPDMA_TX_CONTROL_R = CPSW_CPDMA_TX_CONTROL_TX_EN; CPSW_CPDMA_RX_CONTROL_R = CPSW_CPDMA_RX_CONTROL_RX_EN; //Calculate the MDC clock divider to be used temp = (MDIO_INPUT_CLK / MDIO_OUTPUT_CLK) - 1; //Initialize MDIO interface MDIO_CONTROL_R = MDIO_CONTROL_ENABLE | MDIO_CONTROL_FAULTENB | (temp & MDIO_CONTROL_CLKDIV); } } //BeagleBone Black, TMDSSK3358, OSD3358-SM-RED or SBC DIVA board? #if defined(USE_BEAGLEBONE_BLACK) || defined(USE_TMDSSK3358) || \ defined(USE_OSD3358_SM_RED) || defined(USE_SBC_DIVA) /** * @brief GPIO configuration * @param[in] interface Underlying network interface **/ void am335xEthInitGpio(NetInterface *interface) { //BeagleBone Black board? #if defined(USE_BEAGLEBONE_BLACK) //Select MII interface mode for port 1 CONTROL_GMII_SEL_R = CONTROL_GMII_SEL_GMII1_SEL_MII; //Configure MII1_TX_CLK (GPIO3_9) CONTROL_CONF_MII1_TXCLK_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(0); //Configure MII1_TX_EN (GPIO3_3) CONTROL_CONF_MII1_TXEN_R = CONTROL_CONF_MUXMODE(0); //Configure MII1_TXD0 (GPIO0_28) CONTROL_CONF_MII1_TXD0_R = CONTROL_CONF_MUXMODE(0); //Configure MII1_TXD1 (GPIO0_21) CONTROL_CONF_MII1_TXD1_R = CONTROL_CONF_MUXMODE(0); //Configure MII1_TXD2 (GPIO0_17) CONTROL_CONF_MII1_TXD2_R = CONTROL_CONF_MUXMODE(0); //Configure MII1_TXD3 (GPIO0_16) CONTROL_CONF_MII1_TXD3_R = CONTROL_CONF_MUXMODE(0); //Configure MII1_RX_CLK (GPIO3_10) CONTROL_CONF_MII1_RXCLK_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(0); //Configure MII1_RXD0 (GPIO2_21) CONTROL_CONF_MII1_RXD0_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(0); //Configure MII1_RXD1 (GPIO2_20) CONTROL_CONF_MII1_RXD1_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(0); //Configure MII1_RXD2 (GPIO2_19) CONTROL_CONF_MII1_RXD2_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(0); //Configure MII1_RXD3 (GPIO2_18) CONTROL_CONF_MII1_RXD3_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(0); //Configure MII1_COL (GPIO3_0) CONTROL_CONF_MII1_COL_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(0); //Configure MII1_CRS (GPIO3_1) CONTROL_CONF_MII1_CRS_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(0); //Configure MII1_RX_ER (GPIO3_2) CONTROL_CONF_MII1_RXERR_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(0); //Configure MII1_RX_DV (GPIO3_4) CONTROL_CONF_MII1_RXDV_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(0); //Configure MDIO (GPIO0_0) CONTROL_CONF_MDIO_DATA_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_PULLUPSEL | CONTROL_CONF_MUXMODE(0); //Configure MDC (GPIO0_1) CONTROL_CONF_MDIO_CLK_R = CONTROL_CONF_PULLUPSEL | CONTROL_CONF_MUXMODE(0); //TMDSSK3358 board? #elif defined(USE_TMDSSK3358) //Select RGMII interface mode for both port 1 and port 2 CONTROL_GMII_SEL_R = CONTROL_GMII_SEL_GMII1_SEL_RGMII | CONTROL_GMII_SEL_GMII2_SEL_RGMII; //Configure RGMII1_TCLK (GPIO3_9) CONTROL_CONF_MII1_TXCLK_R = CONTROL_CONF_MUXMODE(2); //Configure RGMII1_TCTL (GPIO3_3) CONTROL_CONF_MII1_TXEN_R = CONTROL_CONF_MUXMODE(2); //Configure RGMII1_TD0 (GPIO0_28) CONTROL_CONF_MII1_TXD0_R = CONTROL_CONF_MUXMODE(2); //Configure RGMII1_TD1 (GPIO0_21) CONTROL_CONF_MII1_TXD1_R = CONTROL_CONF_MUXMODE(2); //Configure RGMII1_TD2 (GPIO0_17) CONTROL_CONF_MII1_TXD2_R = CONTROL_CONF_MUXMODE(2); //Configure RGMII1_TD3 (GPIO0_16) CONTROL_CONF_MII1_TXD3_R = CONTROL_CONF_MUXMODE(2); //Configure RGMII1_RCLK (GPIO3_10) CONTROL_CONF_MII1_RXCLK_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII1_RCTL (GPIO3_4) CONTROL_CONF_MII1_RXDV_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII1_RD0 (GPIO2_21) CONTROL_CONF_MII1_RXD0_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure /RGMII1_RD1 (GPIO2_20) CONTROL_CONF_MII1_RXD1_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII1_RD2 (GPIO2_19) CONTROL_CONF_MII1_RXD2_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII1_RD3 (GPIO2_18) CONTROL_CONF_MII1_RXD3_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII2_TCLK (GPIO1_22/GPMC_A6) CONTROL_CONF_GPMC_A_R(6) = CONTROL_CONF_MUXMODE(2); //Configure RGMII2_TCTL (GPIO1_16/GPMC_A0) CONTROL_CONF_GPMC_A_R(0) = CONTROL_CONF_MUXMODE(2); //Configure RGMII2_TD0 (GPIO1_21/GPMC_A5) CONTROL_CONF_GPMC_A_R(5) = CONTROL_CONF_MUXMODE(2); //Configure RGMII2_TD1 (GPIO1_20/GPMC_A4) CONTROL_CONF_GPMC_A_R(4) = CONTROL_CONF_MUXMODE(2); //Configure RGMII2_TD2 (GPIO1_19/GPMC_A3) CONTROL_CONF_GPMC_A_R(3) = CONTROL_CONF_MUXMODE(2); //Configure RGMII2_TD3 (GPIO1_18/GPMC_A2) CONTROL_CONF_GPMC_A_R(2) = CONTROL_CONF_MUXMODE(2); //Configure RGMII2_RCLK (GPIO1_23/GPMC_A7) CONTROL_CONF_GPMC_A_R(7) = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII2_RCTL (GPIO1_17/GPMC_A1) CONTROL_CONF_GPMC_A_R(1) = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII2_RD0 (GPIO1_27/GPMC_A11) CONTROL_CONF_GPMC_A_R(11) = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII2_RD1 (GPIO1_26/GPMC_A10) CONTROL_CONF_GPMC_A_R(10) = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII2_RD2 (GPIO1_25/GPMC_A9) CONTROL_CONF_GPMC_A_R(9) = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII2_RD3 (GPIO1_24/GPMC_A8) CONTROL_CONF_GPMC_A_R(8) = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure MDIO (GPIO0_0) CONTROL_CONF_MDIO_DATA_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_PULLUPSEL | CONTROL_CONF_MUXMODE(0); //Configure MDC (GPIO0_1) CONTROL_CONF_MDIO_CLK_R = CONTROL_CONF_PULLUPSEL | CONTROL_CONF_MUXMODE(0); //OSD3358-SM-RED board? #elif defined(USE_OSD3358_SM_RED) //Select RGMII interface mode for both port 1 CONTROL_GMII_SEL_R = CONTROL_GMII_SEL_GMII1_SEL_RGMII; //Configure RGMII1_TCLK (GPIO3_9) CONTROL_CONF_MII1_TXCLK_R = CONTROL_CONF_MUXMODE(2); //Configure RGMII1_TCTL (GPIO3_3) CONTROL_CONF_MII1_TXEN_R = CONTROL_CONF_MUXMODE(2); //Configure RGMII1_TD0 (GPIO0_28) CONTROL_CONF_MII1_TXD0_R = CONTROL_CONF_MUXMODE(2); //Configure RGMII1_TD1 (GPIO0_21) CONTROL_CONF_MII1_TXD1_R = CONTROL_CONF_MUXMODE(2); //Configure RGMII1_TD2 (GPIO0_17) CONTROL_CONF_MII1_TXD2_R = CONTROL_CONF_MUXMODE(2); //Configure RGMII1_TD3 (GPIO0_16) CONTROL_CONF_MII1_TXD3_R = CONTROL_CONF_MUXMODE(2); //Configure RGMII1_RCLK (GPIO3_10) CONTROL_CONF_MII1_RXCLK_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII1_RCTL (GPIO3_4) CONTROL_CONF_MII1_RXDV_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII1_RD0 (GPIO2_21) CONTROL_CONF_MII1_RXD0_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure /RGMII1_RD1 (GPIO2_20) CONTROL_CONF_MII1_RXD1_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII1_RD2 (GPIO2_19) CONTROL_CONF_MII1_RXD2_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII1_RD3 (GPIO2_18) CONTROL_CONF_MII1_RXD3_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure MDIO (GPIO0_0) CONTROL_CONF_MDIO_DATA_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_PULLUPSEL | CONTROL_CONF_MUXMODE(0); //Configure MDC (GPIO0_1) CONTROL_CONF_MDIO_CLK_R = CONTROL_CONF_PULLUPSEL | CONTROL_CONF_MUXMODE(0); //SBC DIVA board? #elif defined(USE_SBC_DIVA) //Select RMII interface mode for port 1 and RGMII interface mode for port 2 CONTROL_GMII_SEL_R = CONTROL_GMII_SEL_RMII1_IO_CLK_EN | CONTROL_GMII_SEL_GMII1_SEL_RMII | CONTROL_GMII_SEL_GMII2_SEL_RGMII; //Configure RMII1_REF_CLK (GPIO0_29) CONTROL_CONF_RMII1_REFCLK_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(0); //Configure RMII1_TX_EN (GPIO3_3) CONTROL_CONF_MII1_TXEN_R = CONTROL_CONF_MUXMODE(1); //Configure RMII1_TXD0 (GPIO0_28) CONTROL_CONF_MII1_TXD0_R = CONTROL_CONF_MUXMODE(1); //Configure RMII1_TXD1 (GPIO0_21) CONTROL_CONF_MII1_TXD1_R = CONTROL_CONF_MUXMODE(1); //Configure RMII1_RXD0 (GPIO2.21) CONTROL_CONF_MII1_RXD0_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(1); //Configure RMII1_RXD1 (GPIO2.20) CONTROL_CONF_MII1_RXD1_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(1); //Configure RMII1_CRS_DV (GPIO3_1) CONTROL_CONF_MII1_CRS_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(1); //Configure RMII1_RX_ER (GPIO3_2) CONTROL_CONF_MII1_RXERR_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(1); //Configure RGMII2_TCLK (GPIO1_22/GPMC_A6) CONTROL_CONF_GPMC_A_R(6) = CONTROL_CONF_MUXMODE(2); //Configure RGMII2_TCTL (GPIO1_16/GPMC_A0) CONTROL_CONF_GPMC_A_R(0) = CONTROL_CONF_MUXMODE(2); //Configure RGMII2_TD0 (GPIO1_21/GPMC_A5) CONTROL_CONF_GPMC_A_R(5) = CONTROL_CONF_MUXMODE(2); //Configure RGMII2_TD1 (GPIO1_20/GPMC_A4) CONTROL_CONF_GPMC_A_R(4) = CONTROL_CONF_MUXMODE(2); //Configure RGMII2_TD2 (GPIO1_19/GPMC_A3) CONTROL_CONF_GPMC_A_R(3) = CONTROL_CONF_MUXMODE(2); //Configure RGMII2_TD3 (GPIO1_18/GPMC_A2) CONTROL_CONF_GPMC_A_R(2) = CONTROL_CONF_MUXMODE(2); //Configure RGMII2_RCLK (GPIO1_23/GPMC_A7) CONTROL_CONF_GPMC_A_R(7) = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII2_RCTL (GPIO1_17/GPMC_A1) CONTROL_CONF_GPMC_A_R(1) = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII2_RD0 (GPIO1_27/GPMC_A11) CONTROL_CONF_GPMC_A_R(11) = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII2_RD1 (GPIO1_26/GPMC_A10) CONTROL_CONF_GPMC_A_R(10) = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII2_RD2 (GPIO1_25/GPMC_A9) CONTROL_CONF_GPMC_A_R(9) = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure RGMII2_RD3 (GPIO1_24/GPMC_A8) CONTROL_CONF_GPMC_A_R(8) = CONTROL_CONF_RXACTIVE | CONTROL_CONF_MUXMODE(2); //Configure MDIO (GPIO0_0) CONTROL_CONF_MDIO_DATA_R = CONTROL_CONF_RXACTIVE | CONTROL_CONF_PULLUPSEL | CONTROL_CONF_MUXMODE(0); //Configure MDC (GPIO0_1) CONTROL_CONF_MDIO_CLK_R = CONTROL_CONF_PULLUPSEL | CONTROL_CONF_MUXMODE(0); #endif } #endif /** * @brief Initialize buffer descriptor lists * @param[in] interface Underlying network interface **/ void am335xEthInitBufferDesc(NetInterface *interface) { uint_t i; uint_t nextIndex; uint_t prevIndex; //Initialize TX buffer descriptor list (port 1) for(i = 0; i < AM335X_ETH_TX_BUFFER_COUNT; i++) { //Index of the next buffer nextIndex = (i + 1) % AM335X_ETH_TX_BUFFER_COUNT; //Index of the previous buffer prevIndex = (i + AM335X_ETH_TX_BUFFER_COUNT - 1) % AM335X_ETH_TX_BUFFER_COUNT; //Next descriptor pointer txBufferDesc1[i].word0 = (uint32_t) NULL; //Buffer pointer txBufferDesc1[i].word1 = (uint32_t) txBuffer1[i]; //Buffer offset and buffer length txBufferDesc1[i].word2 = 0; //Status flags and packet length txBufferDesc1[i].word3 = 0; //Form a doubly linked list txBufferDesc1[i].next = &txBufferDesc1[nextIndex]; txBufferDesc1[i].prev = &txBufferDesc1[prevIndex]; } //Point to the very first descriptor txCurBufferDesc1 = &txBufferDesc1[0]; //Mark the end of the queue txCurBufferDesc1->prev->word3 = CPSW_TX_WORD3_SOP | CPSW_TX_WORD3_EOP | CPSW_TX_WORD3_EOQ; //Initialize TX buffer descriptor list (port 2) for(i = 0; i < AM335X_ETH_TX_BUFFER_COUNT; i++) { //Index of the next buffer nextIndex = (i + 1) % AM335X_ETH_TX_BUFFER_COUNT; //Index of the previous buffer prevIndex = (i + AM335X_ETH_TX_BUFFER_COUNT - 1) % AM335X_ETH_TX_BUFFER_COUNT; //Next descriptor pointer txBufferDesc2[i].word0 = (uint32_t) NULL; //Buffer pointer txBufferDesc2[i].word1 = (uint32_t) txBuffer2[i]; //Buffer offset and buffer length txBufferDesc2[i].word2 = 0; //Status flags and packet length txBufferDesc2[i].word3 = 0; //Form a doubly linked list txBufferDesc2[i].next = &txBufferDesc2[nextIndex]; txBufferDesc2[i].prev = &txBufferDesc2[prevIndex]; } //Point to the very first descriptor txCurBufferDesc2 = &txBufferDesc2[0]; //Mark the end of the queue txCurBufferDesc2->prev->word3 = CPSW_TX_WORD3_SOP | CPSW_TX_WORD3_EOP | CPSW_TX_WORD3_EOQ; //Initialize RX buffer descriptor list for(i = 0; i < AM335X_ETH_RX_BUFFER_COUNT; i++) { //Index of the next buffer nextIndex = (i + 1) % AM335X_ETH_RX_BUFFER_COUNT; //Index of the previous buffer prevIndex = (i + AM335X_ETH_RX_BUFFER_COUNT - 1) % AM335X_ETH_RX_BUFFER_COUNT; //Next descriptor pointer rxBufferDesc[i].word0 = (uint32_t) &rxBufferDesc[nextIndex]; //Buffer pointer rxBufferDesc[i].word1 = (uint32_t) rxBuffer[i]; //Buffer offset and buffer length rxBufferDesc[i].word2 = AM335X_ETH_RX_BUFFER_SIZE; //Status flags and packet length rxBufferDesc[i].word3 = CPSW_RX_WORD3_OWNER; //Form a doubly linked list rxBufferDesc[i].next = &rxBufferDesc[nextIndex]; rxBufferDesc[i].prev = &rxBufferDesc[prevIndex]; } //Point to the very first descriptor rxCurBufferDesc = &rxBufferDesc[0]; //Mark the end of the queue rxCurBufferDesc->prev->word0 = (uint32_t) NULL; //Write the RX DMA head descriptor pointer CPSW_CPDMA_RX_HDP_R(CPSW_CH0) = (uint32_t) rxCurBufferDesc; } /** * @brief AM335x Ethernet MAC timer handler * * This routine is periodically called by the TCP/IP stack to handle periodic * operations such as polling the link state * * @param[in] interface Underlying network interface **/ void am335xEthTick(NetInterface *interface) { //Valid Ethernet PHY or switch driver? if(interface->phyDriver != NULL) { //Handle periodic operations interface->phyDriver->tick(interface); } else if(interface->switchDriver != NULL) { //Handle periodic operations interface->switchDriver->tick(interface); } else { //Just for sanity } //Misqueued buffer condition? if((rxCurBufferDesc->word3 & CPSW_RX_WORD3_OWNER) != 0) { if(CPSW_CPDMA_RX_HDP_R(CPSW_CH0) == 0) { //The host acts on the misqueued buffer condition by writing the added //buffer descriptor address to the appropriate RX DMA head descriptor //pointer CPSW_CPDMA_RX_HDP_R(CPSW_CH0) = (uint32_t) rxCurBufferDesc; } } } /** * @brief Enable interrupts * @param[in] interface Underlying network interface **/ void am335xEthEnableIrq(NetInterface *interface) { #ifdef ti_sysbios_BIOS___VERS //Enable Ethernet MAC interrupts Hwi_enableInterrupt(SYS_INT_3PGSWTXINT0); Hwi_enableInterrupt(SYS_INT_3PGSWRXINT0); #else //Enable Ethernet MAC interrupts IntSystemEnable(SYS_INT_3PGSWTXINT0); IntSystemEnable(SYS_INT_3PGSWRXINT0); #endif //Valid Ethernet PHY or switch driver? if(interface->phyDriver != NULL) { //Enable Ethernet PHY interrupts interface->phyDriver->enableIrq(interface); } else if(interface->switchDriver != NULL) { //Enable Ethernet switch interrupts interface->switchDriver->enableIrq(interface); } else { //Just for sanity } } /** * @brief Disable interrupts * @param[in] interface Underlying network interface **/ void am335xEthDisableIrq(NetInterface *interface) { #ifdef ti_sysbios_BIOS___VERS //Disable Ethernet MAC interrupts Hwi_disableInterrupt(SYS_INT_3PGSWTXINT0); Hwi_disableInterrupt(SYS_INT_3PGSWRXINT0); #else //Disable Ethernet MAC interrupts IntSystemDisable(SYS_INT_3PGSWTXINT0); IntSystemDisable(SYS_INT_3PGSWRXINT0); #endif //Valid Ethernet PHY or switch driver? if(interface->phyDriver != NULL) { //Disable Ethernet PHY interrupts interface->phyDriver->disableIrq(interface); } else if(interface->switchDriver != NULL) { //Disable Ethernet switch interrupts interface->switchDriver->disableIrq(interface); } else { //Just for sanity } } /** * @brief Ethernet MAC transmit interrupt **/ void am335xEthTxIrqHandler(void) { bool_t flag; uint32_t status; uint32_t temp; Am335xTxBufferDesc *p; //Interrupt service routine prologue osEnterIsr(); //This flag will be set if a higher priority task must be woken flag = FALSE; //Read the TX_STAT register to determine which channels caused the interrupt status = CPSW_WR_C_TX_STAT_R(CPSW_CORE0); //Packet transmitted on channel 1? if(status & (1 << CPSW_CH1)) { //Point to the buffer descriptor p = (Am335xTxBufferDesc *) CPSW_CPDMA_TX_CP_R(CPSW_CH1); //Read the status flags temp = p->word3 & (CPSW_TX_WORD3_SOP | CPSW_TX_WORD3_EOP | CPSW_TX_WORD3_OWNER | CPSW_TX_WORD3_EOQ); //Misqueued buffer condition? if(temp == (CPSW_TX_WORD3_SOP | CPSW_TX_WORD3_EOP | CPSW_TX_WORD3_EOQ)) { //Check whether the next descriptor pointer is non-zero if(p->word0 != 0) { //The host corrects the misqueued buffer condition by writing the //misqueued packets buffer descriptor address to the appropriate //TX DMA head descriptor pointer CPSW_CPDMA_TX_HDP_R(CPSW_CH1) = (uint32_t) p->word0; } } //Write the TX completion pointer CPSW_CPDMA_TX_CP_R(CPSW_CH1) = (uint32_t) p; //Check whether the TX buffer is available for writing if((txCurBufferDesc1->word3 & CPSW_TX_WORD3_OWNER) == 0) { //Notify the TCP/IP stack that the transmitter is ready to send flag |= osSetEventFromIsr(&nicDriverInterface1->nicTxEvent); } } //Packet transmitted on channel 2? if(status & (1 << CPSW_CH2)) { //Point to the buffer descriptor p = (Am335xTxBufferDesc *) CPSW_CPDMA_TX_CP_R(CPSW_CH2); //Read the status flags temp = p->word3 & (CPSW_TX_WORD3_SOP | CPSW_TX_WORD3_EOP | CPSW_TX_WORD3_OWNER | CPSW_TX_WORD3_EOQ); //Misqueued buffer condition? if(temp == (CPSW_TX_WORD3_SOP | CPSW_TX_WORD3_EOP | CPSW_TX_WORD3_EOQ)) { //Check whether the next descriptor pointer is non-zero if(p->word0 != 0) { //The host corrects the misqueued buffer condition by writing the //misqueued packets buffer descriptor address to the appropriate //TX DMA head descriptor pointer CPSW_CPDMA_TX_HDP_R(CPSW_CH2) = (uint32_t) p->word0; } } //Write the TX completion pointer CPSW_CPDMA_TX_CP_R(CPSW_CH2) = (uint32_t) p; //Check whether the TX buffer is available for writing if((txCurBufferDesc2->word3 & CPSW_TX_WORD3_OWNER) == 0) { //Notify the TCP/IP stack that the transmitter is ready to send flag |= osSetEventFromIsr(&nicDriverInterface2->nicTxEvent); } } //Write the DMA end of interrupt vector CPSW_CPDMA_CPDMA_EOI_VECTOR_R = CPSW_CPDMA_EOI_VECTOR_TX_PULSE; //Interrupt service routine epilogue osExitIsr(flag); } /** * @brief Ethernet MAC receive interrupt **/ void am335xEthRxIrqHandler(void) { bool_t flag; uint32_t status; //Interrupt service routine prologue osEnterIsr(); //This flag will be set if a higher priority task must be woken flag = FALSE; //Read the RX_STAT register to determine which channels caused the interrupt status = CPSW_WR_C_RX_STAT_R(CPSW_CORE0); //Packet received on channel 0? if(status & (1 << CPSW_CH0)) { //Disable RX interrupts CPSW_WR_C_RX_EN_R(CPSW_CORE0) &= ~(1 << CPSW_CH0); //Set event flag if(nicDriverInterface1 != NULL) { nicDriverInterface1->nicEvent = TRUE; } else if(nicDriverInterface2 != NULL) { nicDriverInterface2->nicEvent = TRUE; } //Notify the TCP/IP stack of the event flag |= osSetEventFromIsr(&netEvent); } //Write the DMA end of interrupt vector CPSW_CPDMA_CPDMA_EOI_VECTOR_R = CPSW_CPDMA_EOI_VECTOR_RX_PULSE; //Interrupt service routine epilogue osExitIsr(flag); } /** * @brief AM335x Ethernet MAC event handler * @param[in] interface Underlying network interface **/ void am335xEthEventHandler(NetInterface *interface) { static uint8_t buffer[AM335X_ETH_RX_BUFFER_SIZE]; error_t error; size_t n; uint32_t temp; //Process all pending packets do { //The current buffer is available for reading? if((rxCurBufferDesc->word3 & CPSW_RX_WORD3_OWNER) == 0) { //SOP and EOP flags should be set if((rxCurBufferDesc->word3 & CPSW_RX_WORD3_SOP) != 0 && (rxCurBufferDesc->word3 & CPSW_RX_WORD3_EOP) != 0) { //Make sure no error occurred if((rxCurBufferDesc->word3 & CPSW_RX_WORD3_PKT_ERROR) == 0) { //Check the port on which the packet was received switch(rxCurBufferDesc->word3 & CPSW_RX_WORD3_FROM_PORT) { //Port 1? case CPSW_RX_WORD3_FROM_PORT_1: interface = nicDriverInterface1; break; //Port 1? case CPSW_RX_WORD3_FROM_PORT_2: interface = nicDriverInterface2; break; //Invalid port number? default: interface = NULL; break; } //Retrieve the length of the frame n = rxCurBufferDesc->word3 & CPSW_RX_WORD3_PACKET_LENGTH; //Limit the number of data to read n = MIN(n, AM335X_ETH_RX_BUFFER_SIZE); //Sanity check if(interface != NULL) { //Copy data from the receive buffer osMemcpy(buffer, (uint8_t *) rxCurBufferDesc->word1, (n + 3) & ~3UL); //Packet successfully received error = NO_ERROR; } else { //The port number is invalid error = ERROR_INVALID_PACKET; } } else { //The received packet contains an error error = ERROR_INVALID_PACKET; } } else { //The packet is not valid error = ERROR_INVALID_PACKET; } //Mark the end of the queue with a NULL pointer rxCurBufferDesc->word0 = (uint32_t) NULL; //Restore the length of the buffer rxCurBufferDesc->word2 = AM335X_ETH_RX_BUFFER_SIZE; //Give the ownership of the descriptor back to the DMA rxCurBufferDesc->word3 = CPSW_RX_WORD3_OWNER; //Link the current descriptor to the previous descriptor rxCurBufferDesc->prev->word0 = (uint32_t) rxCurBufferDesc; //Read the status flags of the previous descriptor temp = rxCurBufferDesc->prev->word3 & (CPSW_RX_WORD3_SOP | CPSW_RX_WORD3_EOP | CPSW_RX_WORD3_OWNER | CPSW_RX_WORD3_EOQ); //Misqueued buffer condition? if(temp == (CPSW_RX_WORD3_SOP | CPSW_RX_WORD3_EOP | CPSW_RX_WORD3_EOQ)) { //The host acts on the misqueued buffer condition by writing the added //buffer descriptor address to the appropriate RX DMA head descriptor //pointer CPSW_CPDMA_RX_HDP_R(CPSW_CH0) = (uint32_t) rxCurBufferDesc; } //Write the RX completion pointer CPSW_CPDMA_RX_CP_R(CPSW_CH0) = (uint32_t) rxCurBufferDesc; //Point to the next descriptor in the list rxCurBufferDesc = rxCurBufferDesc->next; } else { //No more data in the receive buffer error = ERROR_BUFFER_EMPTY; } //Check whether a valid packet has been received if(!error) { NetRxAncillary ancillary; //Additional options can be passed to the stack along with the packet ancillary = NET_DEFAULT_RX_ANCILLARY; //Pass the packet to the upper layer nicProcessPacket(interface, buffer, n, &ancillary); } //No more data in the receive buffer? } while(error != ERROR_BUFFER_EMPTY); //Re-enable RX interrupts CPSW_WR_C_RX_EN_R(CPSW_CORE0) |= (1 << CPSW_CH0); } /** * @brief Send a packet (port 1) * @param[in] interface Underlying network interface * @param[in] buffer Multi-part buffer containing the data to send * @param[in] offset Offset to the first data byte * @param[in] ancillary Additional options passed to the stack along with * the packet * @return Error code **/ error_t am335xEthSendPacketPort1(NetInterface *interface, const NetBuffer *buffer, size_t offset, NetTxAncillary *ancillary) { static uint8_t temp[AM335X_ETH_TX_BUFFER_SIZE]; size_t length; uint32_t value; //Retrieve the length of the packet length = netBufferGetLength(buffer) - offset; //Check the frame length if(length > AM335X_ETH_TX_BUFFER_SIZE) { //The transmitter can accept another packet osSetEvent(&interface->nicTxEvent); //Report an error return ERROR_INVALID_LENGTH; } //Make sure the current buffer is available for writing if((txCurBufferDesc1->word3 & CPSW_TX_WORD3_OWNER) != 0) { return ERROR_FAILURE; } //Mark the end of the queue with a NULL pointer txCurBufferDesc1->word0 = (uint32_t) NULL; //Copy user data to the transmit buffer netBufferRead(temp, buffer, offset, length); osMemcpy((uint8_t *) txCurBufferDesc1->word1, temp, (length + 3) & ~3UL); //Set the length of the buffer txCurBufferDesc1->word2 = length & CPSW_TX_WORD2_BUFFER_LENGTH; //Set the length of the packet value = length & CPSW_TX_WORD3_PACKET_LENGTH; //Set SOP and EOP flags as the data fits in a single buffer value |= CPSW_TX_WORD3_SOP | CPSW_TX_WORD3_EOP; //Redirect the packet to the relevant port number value |= CPSW_TX_WORD3_TO_PORT_EN | CPSW_TX_WORD3_TO_PORT_1; //Give the ownership of the descriptor to the DMA txCurBufferDesc1->word3 = CPSW_TX_WORD3_OWNER | value; //Link the current descriptor to the previous descriptor txCurBufferDesc1->prev->word0 = (uint32_t) txCurBufferDesc1; //Read the status flags of the previous descriptor value = txCurBufferDesc1->prev->word3 & (CPSW_TX_WORD3_SOP | CPSW_TX_WORD3_EOP | CPSW_TX_WORD3_OWNER | CPSW_TX_WORD3_EOQ); //Misqueued buffer condition? if(value == (CPSW_TX_WORD3_SOP | CPSW_TX_WORD3_EOP | CPSW_TX_WORD3_EOQ)) { //Clear the misqueued buffer condition txCurBufferDesc1->prev->word3 = 0; //The host corrects the misqueued buffer condition by writing the //misqueued packets buffer descriptor address to the appropriate //TX DMA head descriptor pointer CPSW_CPDMA_TX_HDP_R(CPSW_CH1) = (uint32_t) txCurBufferDesc1; } //Point to the next descriptor in the list txCurBufferDesc1 = txCurBufferDesc1->next; //Check whether the next buffer is available for writing if((txCurBufferDesc1->word3 & CPSW_TX_WORD3_OWNER) == 0) { //The transmitter can accept another packet osSetEvent(&interface->nicTxEvent); } //Data successfully written return NO_ERROR; } /** * @brief Send a packet (port 2) * @param[in] interface Underlying network interface * @param[in] buffer Multi-part buffer containing the data to send * @param[in] offset Offset to the first data byte * @param[in] ancillary Additional options passed to the stack along with * the packet * @return Error code **/ error_t am335xEthSendPacketPort2(NetInterface *interface, const NetBuffer *buffer, size_t offset, NetTxAncillary *ancillary) { static uint8_t temp[AM335X_ETH_TX_BUFFER_SIZE]; size_t length; uint32_t value; //Retrieve the length of the packet length = netBufferGetLength(buffer) - offset; //Check the frame length if(length > AM335X_ETH_TX_BUFFER_SIZE) { //The transmitter can accept another packet osSetEvent(&interface->nicTxEvent); //Report an error return ERROR_INVALID_LENGTH; } //Make sure the current buffer is available for writing if((txCurBufferDesc2->word3 & CPSW_TX_WORD3_OWNER) != 0) { return ERROR_FAILURE; } //Mark the end of the queue with a NULL pointer txCurBufferDesc2->word0 = (uint32_t) NULL; //Copy user data to the transmit buffer netBufferRead(temp, buffer, offset, length); osMemcpy((uint8_t *) txCurBufferDesc2->word1, temp, (length + 3) & ~3UL); //Set the length of the buffer txCurBufferDesc2->word2 = length & CPSW_TX_WORD2_BUFFER_LENGTH; //Set the length of the packet value = length & CPSW_TX_WORD3_PACKET_LENGTH; //Set SOP and EOP flags as the data fits in a single buffer value |= CPSW_TX_WORD3_SOP | CPSW_TX_WORD3_EOP; //Redirect the packet to the relevant port number value |= CPSW_TX_WORD3_TO_PORT_EN | CPSW_TX_WORD3_TO_PORT_2; //Give the ownership of the descriptor to the DMA txCurBufferDesc2->word3 = CPSW_TX_WORD3_OWNER | value; //Link the current descriptor to the previous descriptor txCurBufferDesc2->prev->word0 = (uint32_t) txCurBufferDesc2; //Read the status flags of the previous descriptor value = txCurBufferDesc2->prev->word3 & (CPSW_TX_WORD3_SOP | CPSW_TX_WORD3_EOP | CPSW_TX_WORD3_OWNER | CPSW_TX_WORD3_EOQ); //Misqueued buffer condition? if(value == (CPSW_TX_WORD3_SOP | CPSW_TX_WORD3_EOP | CPSW_TX_WORD3_EOQ)) { //Clear the misqueued buffer condition txCurBufferDesc2->prev->word3 = 0; //The host corrects the misqueued buffer condition by writing the //misqueued packets buffer descriptor address to the appropriate //TX DMA head descriptor pointer CPSW_CPDMA_TX_HDP_R(CPSW_CH2) = (uint32_t) txCurBufferDesc2; } //Point to the next descriptor in the list txCurBufferDesc2 = txCurBufferDesc2->next; //Check whether the next buffer is available for writing if((txCurBufferDesc2->word3 & CPSW_TX_WORD3_OWNER) == 0) { //The transmitter can accept another packet osSetEvent(&interface->nicTxEvent); } //Data successfully written return NO_ERROR; } /** * @brief Configure MAC address filtering * @param[in] interface Underlying network interface * @return Error code **/ error_t am335xEthUpdateMacAddrFilter(NetInterface *interface) { uint_t i; uint_t port; MacFilterEntry *entry; //Debug message TRACE_DEBUG("Updating AM335x ALE table...\r\n"); //Select the relevant port number if(interface == nicDriverInterface1) { port = CPSW_PORT1; } else if(interface == nicDriverInterface2) { port = CPSW_PORT2; } else { port = CPSW_PORT0; } //The MAC address filter contains the list of MAC addresses to accept //when receiving an Ethernet frame for(i = 0; i < MAC_ADDR_FILTER_SIZE; i++) { //Point to the current entry entry = &interface->macAddrFilter[i]; //Check whether the ALE table should be updated for the //current multicast address if(!macCompAddr(&entry->addr, &MAC_UNSPECIFIED_ADDR)) { if(entry->addFlag) { //Add VLAN/multicast address entry to the ALE table am335xEthAddVlanAddrEntry(port, port, &entry->addr); } else if(entry->deleteFlag) { //Remove VLAN/multicast address entry from the ALE table am335xEthDeleteVlanAddrEntry(port, port, &entry->addr); } } } //Successful processing return NO_ERROR; } /** * @brief Adjust MAC configuration parameters for proper operation * @param[in] interface Underlying network interface * @return Error code **/ error_t am335xEthUpdateMacConfig(NetInterface *interface) { uint32_t config = 0; //Read MAC control register if(interface == nicDriverInterface1) { config = CPSW_SL1_MACCONTROL_R; } else if(interface == nicDriverInterface2) { config = CPSW_SL2_MACCONTROL_R; } //1000BASE-T operation mode? if(interface->linkSpeed == NIC_LINK_SPEED_1GBPS) { config |= CPSW_SL_MACCONTROL_GIG; config &= ~(CPSW_SL_MACCONTROL_IFCTL_A | CPSW_SL_MACCONTROL_IFCTL_B); } //100BASE-TX operation mode? else if(interface->linkSpeed == NIC_LINK_SPEED_100MBPS) { config &= ~CPSW_SL_MACCONTROL_GIG; config |= CPSW_SL_MACCONTROL_IFCTL_A | CPSW_SL_MACCONTROL_IFCTL_B; } //10BASE-T operation mode? else { config &= ~CPSW_SL_MACCONTROL_GIG; config &= ~(CPSW_SL_MACCONTROL_IFCTL_A | CPSW_SL_MACCONTROL_IFCTL_B); } //Half-duplex or full-duplex mode? if(interface->duplexMode == NIC_FULL_DUPLEX_MODE) { config |= CPSW_SL_MACCONTROL_FULLDUPLEX; } else { config &= ~CPSW_SL_MACCONTROL_FULLDUPLEX; } //Update MAC control register if(interface == nicDriverInterface1) { CPSW_SL1_MACCONTROL_R = config; } else if(interface == nicDriverInterface2) { CPSW_SL2_MACCONTROL_R = config; } //Successful processing return NO_ERROR; } /** * @brief Write PHY register * @param[in] opcode Access type (2 bits) * @param[in] phyAddr PHY address (5 bits) * @param[in] regAddr Register address (5 bits) * @param[in] data Register value **/ void am335xEthWritePhyReg(uint8_t opcode, uint8_t phyAddr, uint8_t regAddr, uint16_t data) { uint32_t temp; //Valid opcode? if(opcode == SMI_OPCODE_WRITE) { //Set up a write operation temp = MDIO_USERACCESS0_GO | MDIO_USERACCESS0_WRITE; //PHY address temp |= (phyAddr << MDIO_USERACCESS0_PHYADR_SHIFT) & MDIO_USERACCESS0_PHYADR; //Register address temp |= (regAddr << MDIO_USERACCESS0_REGADR_SHIFT) & MDIO_USERACCESS0_REGADR; //Register value temp |= data & MDIO_USERACCESS0_DATA; //Start a write operation MDIO_USERACCESS0_R = temp; //Wait for the write to complete while((MDIO_USERACCESS0_R & MDIO_USERACCESS0_GO) != 0) { } } else { //The MAC peripheral only supports standard Clause 22 opcodes } } /** * @brief Read PHY register * @param[in] opcode Access type (2 bits) * @param[in] phyAddr PHY address (5 bits) * @param[in] regAddr Register address (5 bits) * @return Register value **/ uint16_t am335xEthReadPhyReg(uint8_t opcode, uint8_t phyAddr, uint8_t regAddr) { uint16_t data; uint32_t temp; //Valid opcode? if(opcode == SMI_OPCODE_READ) { //Set up a read operation temp = MDIO_USERACCESS0_GO | MDIO_USERACCESS0_READ; //PHY address temp |= (phyAddr << MDIO_USERACCESS0_PHYADR_SHIFT) & MDIO_USERACCESS0_PHYADR; //Register address temp |= (regAddr << MDIO_USERACCESS0_REGADR_SHIFT) & MDIO_USERACCESS0_REGADR; //Start a read operation MDIO_USERACCESS0_R = temp; //Wait for the read to complete while((MDIO_USERACCESS0_R & MDIO_USERACCESS0_GO) != 0) { } //Get register value data = MDIO_USERACCESS0_R & MDIO_USERACCESS0_DATA; } else { //The MAC peripheral only supports standard Clause 22 opcodes data = 0; } //Return the value of the PHY register return data; } /** * @brief Write an ALE table entry * @param[in] index Entry index * @param[in] entry Pointer to the ALE table entry **/ void am335xEthWriteEntry(uint_t index, const Am335xAleEntry *entry) { //Copy the content of the entry to be written CPSW_ALE_TBLW_R(2) = entry->word2; CPSW_ALE_TBLW_R(1) = entry->word1; CPSW_ALE_TBLW_R(0) = entry->word0; //Write the ALE entry at the specified index CPSW_ALE_TBLCTL_R = CPSW_ALE_TBLCTL_WRITE_RDZ | index; } /** * @brief Read an ALE table entry * @param[in] index Entry index * @param[out] entry Pointer to the ALE table entry **/ void am335xEthReadEntry(uint_t index, Am335xAleEntry *entry) { //Read the ALE entry at the specified index CPSW_ALE_TBLCTL_R = index; //Copy the content of the entry entry->word2 = CPSW_ALE_TBLW_R(2); entry->word1 = CPSW_ALE_TBLW_R(1); entry->word0 = CPSW_ALE_TBLW_R(0); } /** * @brief Find a free entry in the ALE table * @return Index of the first free entry **/ uint_t am335xEthFindFreeEntry(void) { uint_t index; uint32_t type; Am335xAleEntry entry; //Loop through the ALE table entries for(index = 0; index < CPSW_ALE_MAX_ENTRIES; index++) { //Read the current entry am335xEthReadEntry(index, &entry); //Retrieve the type of the ALE entry type = entry.word1 & CPSW_ALE_WORD1_ENTRY_TYPE_MASK; //Free entry? if(type == CPSW_ALE_WORD1_ENTRY_TYPE_FREE) { //Exit immediately break; } } //Return the index of the entry return index; } /** * @brief Search the ALE table for the specified VLAN entry * @param[in] vlanId VLAN identifier * @return Index of the matching entry **/ uint_t am335xEthFindVlanEntry(uint_t vlanId) { uint_t index; uint32_t value; Am335xAleEntry entry; //Loop through the ALE table entries for(index = 0; index < CPSW_ALE_MAX_ENTRIES; index++) { //Read the current entry am335xEthReadEntry(index, &entry); //Retrieve the type of the ALE entry value = entry.word1 & CPSW_ALE_WORD1_ENTRY_TYPE_MASK; //Check the type of the ALE entry if(value == CPSW_ALE_WORD1_ENTRY_TYPE_VLAN_ADDR) { //Get the VLAN identifier value = entry.word1 & CPSW_ALE_WORD1_VLAN_ID_MASK; //Compare the VLAN identifier if(value == CPSW_ALE_WORD1_VLAN_ID(vlanId)) { //Matching ALE entry found break; } } } //Return the index of the entry return index; } /** * @brief Search the ALE table for the specified VLAN/address entry * @param[in] vlanId VLAN identifier * @param[in] macAddr MAC address * @return Index of the matching entry **/ uint_t am335xEthFindVlanAddrEntry(uint_t vlanId, MacAddr *macAddr) { uint_t index; uint32_t value; Am335xAleEntry entry; //Loop through the ALE table entries for(index = 0; index < CPSW_ALE_MAX_ENTRIES; index++) { //Read the current entry am335xEthReadEntry(index, &entry); //Retrieve the type of the ALE entry value = entry.word1 & CPSW_ALE_WORD1_ENTRY_TYPE_MASK; //Check the type of the ALE entry if(value == CPSW_ALE_WORD1_ENTRY_TYPE_VLAN_ADDR) { //Get the VLAN identifier value = entry.word1 & CPSW_ALE_WORD1_VLAN_ID_MASK; //Compare the VLAN identifier if(value == CPSW_ALE_WORD1_VLAN_ID(vlanId)) { //Compare the MAC address if(macAddr->b[0] == (uint8_t) (entry.word1 >> 8) && macAddr->b[1] == (uint8_t) (entry.word1 >> 0) && macAddr->b[2] == (uint8_t) (entry.word0 >> 24) && macAddr->b[3] == (uint8_t) (entry.word0 >> 16) && macAddr->b[4] == (uint8_t) (entry.word0 >> 8) && macAddr->b[5] == (uint8_t) (entry.word0 >> 0)) { //Matching ALE entry found break; } } } } //Return the index of the entry return index; } /** * @brief Add a VLAN entry in the ALE table * @param[in] port Port number * @param[in] vlanId VLAN identifier * @return Error code **/ error_t am335xEthAddVlanEntry(uint_t port, uint_t vlanId) { error_t error; uint_t index; Am335xAleEntry entry; //Ensure that there are no duplicate address entries in the ALE table index = am335xEthFindVlanEntry(vlanId); //No matching entry found? if(index >= CPSW_ALE_MAX_ENTRIES) { //Find a free entry in the ALE table index = am335xEthFindFreeEntry(); } //Sanity check if(index < CPSW_ALE_MAX_ENTRIES) { //Set up a VLAN table entry entry.word2 = 0; entry.word1 = CPSW_ALE_WORD1_ENTRY_TYPE_VLAN; entry.word0 = 0; //Set VLAN identifier entry.word1 |= CPSW_ALE_WORD1_VLAN_ID(vlanId); //Force the packet VLAN tag to be removed on egress entry.word0 |= CPSW_ALE_WORD0_FORCE_UNTAG_EGRESS(1 << port) | CPSW_ALE_WORD0_FORCE_UNTAG_EGRESS(1 << CPSW_PORT0); //Set VLAN member list entry.word0 |= CPSW_ALE_WORD0_VLAN_MEMBER_LIST(1 << port) | CPSW_ALE_WORD0_VLAN_MEMBER_LIST(1 << CPSW_PORT0); //Add a new entry to the ALE table am335xEthWriteEntry(index, &entry); //Successful processing error = NO_ERROR; } else { //The ALE table is full error = ERROR_FAILURE; } //Return status code return error; } /** * @brief Add a VLAN/address entry in the ALE table * @param[in] port Port number * @param[in] vlanId VLAN identifier * @param[in] macAddr MAC address * @return Error code **/ error_t am335xEthAddVlanAddrEntry(uint_t port, uint_t vlanId, MacAddr *macAddr) { error_t error; uint_t index; Am335xAleEntry entry; //Ensure that there are no duplicate address entries in the ALE table index = am335xEthFindVlanAddrEntry(vlanId, macAddr); //No matching entry found? if(index >= CPSW_ALE_MAX_ENTRIES) { //Find a free entry in the ALE table index = am335xEthFindFreeEntry(); } //Sanity check if(index < CPSW_ALE_MAX_ENTRIES) { //Set up a VLAN/address table entry entry.word2 = 0; entry.word1 = CPSW_ALE_WORD1_ENTRY_TYPE_VLAN_ADDR; entry.word0 = 0; //Multicast address? if(macIsMulticastAddr(macAddr)) { //Set port mask entry.word2 |= CPSW_ALE_WORD2_SUPER | CPSW_ALE_WORD2_PORT_LIST(1 << port) | CPSW_ALE_WORD2_PORT_LIST(1 << CPSW_CH0); //Set multicast forward state entry.word1 |= CPSW_ALE_WORD1_MCAST_FWD_STATE(0); } //Set VLAN identifier entry.word1 |= CPSW_ALE_WORD1_VLAN_ID(vlanId); //Copy the upper 16 bits of the unicast address entry.word1 |= (macAddr->b[0] << 8) | macAddr->b[1]; //Copy the lower 32 bits of the unicast address entry.word0 |= (macAddr->b[2] << 24) | (macAddr->b[3] << 16) | (macAddr->b[4] << 8) | macAddr->b[5]; //Add a new entry to the ALE table am335xEthWriteEntry(index, &entry); //Successful processing error = NO_ERROR; } else { //The ALE table is full error = ERROR_FAILURE; } //Return status code return error; } /** * @brief Remove a VLAN/address entry from the ALE table * @param[in] port Port number * @param[in] vlanId VLAN identifier * @param[in] macAddr MAC address * @return Error code **/ error_t am335xEthDeleteVlanAddrEntry(uint_t port, uint_t vlanId, MacAddr *macAddr) { error_t error; uint_t index; Am335xAleEntry entry; //Search the ALE table for the specified VLAN/address entry index = am335xEthFindVlanAddrEntry(vlanId, macAddr); //Matching ALE entry found? if(index < CPSW_ALE_MAX_ENTRIES) { //Clear the contents of the entry entry.word2 = 0; entry.word1 = 0; entry.word0 = 0; //Update the ALE table am335xEthWriteEntry(index, &entry); //Successful processing error = NO_ERROR; } else { //Entry not found error = ERROR_NOT_FOUND; } //Return status code return error; }
error_t am335xEthDeleteVlanAddrEntry(uint_t port, uint_t vlanId, MacAddr *macAddr) { error_t error; uint_t index; Am335xAleEntry entry; //Search the ALE table for the specified VLAN/address entry index = am335xEthFindVlanAddrEntry(vlanId, macAddr); //Matching ALE entry found? if(index < CPSW_ALE_MAX_ENTRIES) { //Clear the contents of the entry entry.word2 = 0; entry.word1 = 0; entry.word0 = 0; //Update the ALE table am335xEthWriteEntry(index, &entry); //Sucessful processing error = NO_ERROR; } else { //Entry not found error = ERROR_NOT_FOUND; } //Return status code return error; }
error_t am335xEthDeleteVlanAddrEntry(uint_t port, uint_t vlanId, MacAddr *macAddr) { error_t error; uint_t index; Am335xAleEntry entry; //Search the ALE table for the specified VLAN/address entry index = am335xEthFindVlanAddrEntry(vlanId, macAddr); //Matching ALE entry found? if(index < CPSW_ALE_MAX_ENTRIES) { //Clear the contents of the entry entry.word2 = 0; entry.word1 = 0; entry.word0 = 0; //Update the ALE table am335xEthWriteEntry(index, &entry); //Successful processing error = NO_ERROR; } else { //Entry not found error = ERROR_NOT_FOUND; } //Return status code return error; }
{'added': [(9, ' * Copyright (C) 2010-2021 Oryx Embedded SARL. All rights reserved.'), (28, ' * @version 2.0.2'), (89, '//GCC compiler?'), (1807, ' //Successful processing'), (1878, ' //Successful processing'), (1920, ' //Successful processing')], 'deleted': [(9, ' * Copyright (C) 2010-2020 Oryx Embedded SARL. All rights reserved.'), (28, ' * @version 2.0.0'), (89, '//Keil MDK-ARM or GCC compiler?'), (1807, ' //Sucessful processing'), (1878, ' //Sucessful processing'), (1920, ' //Sucessful processing')]}
6
6
944
5,288
https://github.com/Oryx-Embedded/CycloneTCP
CVE-2021-26788
['CWE-20']
ws_decode.c
hybiReadAndDecode
#include "ws_decode.h" #include "base64.h" #include <string.h> #include <errno.h> #define WS_HYBI_MASK_LEN 4 #define WS_HYBI_HEADER_LEN_SHORT 2 + WS_HYBI_MASK_LEN #define WS_HYBI_HEADER_LEN_EXTENDED 4 + WS_HYBI_MASK_LEN #define WS_HYBI_HEADER_LEN_LONG 10 + WS_HYBI_MASK_LEN #undef WS_DECODE_DEBUG /* set to 1 to produce very fine debugging output */ #define WS_DECODE_DEBUG 0 #if WS_DECODE_DEBUG == 1 #define ws_dbg(fmt, ...) rfbLog((fmt), ##__VA_ARGS) #else #define ws_dbg(fmt, ...) #endif static inline int isControlFrame(ws_ctx_t *wsctx) { return 0 != (wsctx->header.opcode & 0x08); } static uint64_t hybiRemaining(ws_ctx_t *wsctx) { return wsctx->header.payloadLen - wsctx->nReadPayload; } static void hybiDecodeCleanupBasics(ws_ctx_t *wsctx) { /* keep opcode, cleanup rest */ wsctx->header.opcode = WS_OPCODE_INVALID; wsctx->header.payloadLen = 0; wsctx->header.mask.u = 0; wsctx->header.headerLen = 0; wsctx->header.data = NULL; wsctx->header.nRead = 0; wsctx->nReadPayload = 0; wsctx->carrylen = 0; wsctx->readPos = (unsigned char *)wsctx->codeBufDecode; wsctx->readlen = 0; wsctx->hybiDecodeState = WS_HYBI_STATE_HEADER_PENDING; wsctx->writePos = NULL; } static void hybiDecodeCleanupForContinuation(ws_ctx_t *wsctx) { hybiDecodeCleanupBasics(wsctx); ws_dbg("clean up frame, but expect continuation with opcode %d\n", wsctx->continuation_opcode); } void hybiDecodeCleanupComplete(ws_ctx_t *wsctx) { hybiDecodeCleanupBasics(wsctx); wsctx->continuation_opcode = WS_OPCODE_INVALID; ws_dbg("cleaned up wsctx completely\n"); } /** * Return payload data that has been decoded/unmasked from * a websocket frame. * * @param[out] dst destination buffer * @param[in] len bytes to copy to destination buffer * @param[in,out] wsctx internal state of decoding procedure * @param[out] number of bytes actually written to dst buffer * @return next hybi decoding state */ static int hybiReturnData(char *dst, int len, ws_ctx_t *wsctx, int *nWritten) { int nextState = WS_HYBI_STATE_ERR; /* if we have something already decoded copy and return */ if (wsctx->readlen > 0) { /* simply return what we have */ if (wsctx->readlen > len) { ws_dbg("copy to %d bytes to dst buffer; readPos=%p, readLen=%d\n", len, wsctx->readPos, wsctx->readlen); memcpy(dst, wsctx->readPos, len); *nWritten = len; wsctx->readlen -= len; wsctx->readPos += len; nextState = WS_HYBI_STATE_DATA_AVAILABLE; } else { ws_dbg("copy to %d bytes to dst buffer; readPos=%p, readLen=%d\n", wsctx->readlen, wsctx->readPos, wsctx->readlen); memcpy(dst, wsctx->readPos, wsctx->readlen); *nWritten = wsctx->readlen; wsctx->readlen = 0; wsctx->readPos = NULL; if (hybiRemaining(wsctx) == 0) { nextState = WS_HYBI_STATE_FRAME_COMPLETE; } else { nextState = WS_HYBI_STATE_DATA_NEEDED; } } ws_dbg("after copy: readPos=%p, readLen=%d\n", wsctx->readPos, wsctx->readlen); } else { /* it may happen that we read some bytes but could not decode them, * in that case, set errno to EAGAIN and return -1 */ nextState = wsctx->hybiDecodeState; errno = EAGAIN; *nWritten = -1; } return nextState; } /** * Read an RFC 6455 websocket frame (IETF hybi working group). * * Internal state is updated according to bytes received and the * decoding of header information. * * @param[in] cl client ptr with ptr to raw socket and ws_ctx_t ptr * @param[out] sockRet emulated recv return value * @param[out] nPayload number of payload bytes already read * @return next hybi decoding state; WS_HYBI_STATE_HEADER_PENDING indicates * that the header was not received completely. */ static int hybiReadHeader(ws_ctx_t *wsctx, int *sockRet, int *nPayload) { int ret; char *headerDst = wsctx->codeBufDecode + wsctx->header.nRead; int n = ((uint64_t)WSHLENMAX) - wsctx->header.nRead; ws_dbg("header_read to %p with len=%d\n", headerDst, n); ret = wsctx->ctxInfo.readFunc(wsctx->ctxInfo.ctxPtr, headerDst, n); ws_dbg("read %d bytes from socket\n", ret); if (ret <= 0) { if (-1 == ret) { /* save errno because rfbErr() will tamper it */ int olderrno = errno; rfbErr("%s: read; %s\n", __func__, strerror(errno)); errno = olderrno; goto err_cleanup_state; } else { *sockRet = 0; goto err_cleanup_state_sock_closed; } } wsctx->header.nRead += ret; if (wsctx->header.nRead < 2) { /* cannot decode header with less than two bytes */ goto ret_header_pending; } /* first two header bytes received; interpret header data and get rest */ wsctx->header.data = (ws_header_t *)wsctx->codeBufDecode; wsctx->header.opcode = wsctx->header.data->b0 & 0x0f; wsctx->header.fin = (wsctx->header.data->b0 & 0x80) >> 7; if (isControlFrame(wsctx)) { ws_dbg("is control frame\n"); /* is a control frame, leave remembered continuation opcode unchanged; * just check if there is a wrong fragmentation */ if (wsctx->header.fin == 0) { /* we only accept text/binary continuation frames; RFC6455: * Control frames (see Section 5.5) MAY be injected in the middle of * a fragmented message. Control frames themselves MUST NOT be * fragmented. */ rfbErr("control frame with FIN bit cleared received, aborting\n"); errno = EPROTO; goto err_cleanup_state; } } else { ws_dbg("not a control frame\n"); /* not a control frame, check for continuation opcode */ if (wsctx->header.opcode == WS_OPCODE_CONTINUATION) { ws_dbg("cont_frame\n"); /* do we have state (i.e., opcode) for continuation frame? */ if (wsctx->continuation_opcode == WS_OPCODE_INVALID) { rfbErr("no continuation state\n"); errno = EPROTO; goto err_cleanup_state; } /* otherwise, set opcode = continuation_opcode */ wsctx->header.opcode = wsctx->continuation_opcode; ws_dbg("set opcode to continuation_opcode: %d\n", wsctx->header.opcode); } else { if (wsctx->header.fin == 0) { wsctx->continuation_opcode = wsctx->header.opcode; } else { wsctx->continuation_opcode = WS_OPCODE_INVALID; } ws_dbg("set continuation_opcode to %d\n", wsctx->continuation_opcode); } } wsctx->header.payloadLen = (uint64_t)(wsctx->header.data->b1 & 0x7f); ws_dbg("first header bytes received; opcode=%d lenbyte=%d fin=%d\n", wsctx->header.opcode, wsctx->header.payloadLen, wsctx->header.fin); /* * 4.3. Client-to-Server Masking * * The client MUST mask all frames sent to the server. A server MUST * close the connection upon receiving a frame with the MASK bit set to 0. **/ if (!(wsctx->header.data->b1 & 0x80)) { rfbErr("%s: got frame without mask; ret=%d\n", __func__, ret); errno = EPROTO; goto err_cleanup_state; } if (wsctx->header.payloadLen < 126 && wsctx->header.nRead >= 6) { wsctx->header.headerLen = WS_HYBI_HEADER_LEN_SHORT; wsctx->header.mask = wsctx->header.data->u.m; } else if (wsctx->header.payloadLen == 126 && 8 <= wsctx->header.nRead) { wsctx->header.headerLen = WS_HYBI_HEADER_LEN_EXTENDED; wsctx->header.payloadLen = WS_NTOH16(wsctx->header.data->u.s16.l16); wsctx->header.mask = wsctx->header.data->u.s16.m16; } else if (wsctx->header.payloadLen == 127 && 14 <= wsctx->header.nRead) { wsctx->header.headerLen = WS_HYBI_HEADER_LEN_LONG; wsctx->header.payloadLen = WS_NTOH64(wsctx->header.data->u.s64.l64); wsctx->header.mask = wsctx->header.data->u.s64.m64; } else { /* Incomplete frame header, try again */ rfbErr("%s: incomplete frame header; ret=%d\n", __func__, ret); goto ret_header_pending; } char *h = wsctx->codeBufDecode; int i; ws_dbg("Header:\n"); for (i=0; i <10; i++) { ws_dbg("0x%02X\n", (unsigned char)h[i]); } ws_dbg("\n"); /* while RFC 6455 mandates that lengths MUST be encoded with the minimum * number of bytes, it does not specify for the server how to react on * 'wrongly' encoded frames --- this implementation rejects them*/ if ((wsctx->header.headerLen > WS_HYBI_HEADER_LEN_SHORT && wsctx->header.payloadLen < (uint64_t)126) || (wsctx->header.headerLen > WS_HYBI_HEADER_LEN_EXTENDED && wsctx->header.payloadLen < (uint64_t)65536)) { rfbErr("%s: invalid length field; headerLen=%d payloadLen=%llu\n", __func__, wsctx->header.headerLen, wsctx->header.payloadLen); errno = EPROTO; goto err_cleanup_state; } /* update write position for next bytes */ wsctx->writePos = wsctx->codeBufDecode + wsctx->header.nRead; /* set payload pointer just after header */ wsctx->readPos = (unsigned char *)(wsctx->codeBufDecode + wsctx->header.headerLen); *nPayload = wsctx->header.nRead - wsctx->header.headerLen; wsctx->nReadPayload = *nPayload; ws_dbg("header complete: state=%d headerlen=%d payloadlen=%llu writeTo=%p nPayload=%d\n", wsctx->hybiDecodeState, wsctx->header.headerLen, wsctx->header.payloadLen, wsctx->writePos, *nPayload); return WS_HYBI_STATE_DATA_NEEDED; ret_header_pending: errno = EAGAIN; *sockRet = -1; return WS_HYBI_STATE_HEADER_PENDING; err_cleanup_state: *sockRet = -1; err_cleanup_state_sock_closed: hybiDecodeCleanupComplete(wsctx); return WS_HYBI_STATE_ERR; } static int hybiWsFrameComplete(ws_ctx_t *wsctx) { return wsctx != NULL && hybiRemaining(wsctx) == 0; } static char * hybiPayloadStart(ws_ctx_t *wsctx) { return wsctx->codeBufDecode + wsctx->header.headerLen; } /** * Read the remaining payload bytes from associated raw socket. * * - try to read remaining bytes from socket * - unmask all multiples of 4 * - if frame incomplete but some bytes are left, these are copied to * the carry buffer * - if opcode is TEXT: Base64-decode all unmasked received bytes * - set state for reading decoded data * - reset write position to begin of buffer (+ header) * --> before we retrieve more data we let the caller clear all bytes * from the reception buffer * - execute return data routine * * Sets errno corresponding to what it gets from the underlying * socket or EPROTO if some invalid data is in the received frame * or ECONNRESET if a close reason + message is received. EIO is used if * an internal sanity check fails. * * @param[in] cl client ptr with raw socket reference * @param[out] dst destination buffer * @param[in] len size of destination buffer * @param[out] sockRet emulated recv return value * @param[in] nInBuf number of undecoded bytes before writePos from header read * @return next hybi decode state */ static int hybiReadAndDecode(ws_ctx_t *wsctx, char *dst, int len, int *sockRet, int nInBuf) { int n; int i; int toReturn; /* number of data bytes to return */ int toDecode; /* number of bytes to decode starting at wsctx->writePos */ int bufsize; int nextRead; unsigned char *data; uint32_t *data32; /* if data was carried over, copy to start of buffer */ memcpy(wsctx->writePos, wsctx->carryBuf, wsctx->carrylen); wsctx->writePos += wsctx->carrylen; /* -1 accounts for potential '\0' terminator for base64 decoding */ bufsize = wsctx->codeBufDecode + ARRAYSIZE(wsctx->codeBufDecode) - wsctx->writePos - 1; ws_dbg("bufsize=%d\n", bufsize); if (hybiRemaining(wsctx) > bufsize) { nextRead = bufsize; } else { nextRead = hybiRemaining(wsctx); } ws_dbg("calling read with buf=%p and len=%d (decodebuf=%p headerLen=%d)\n", wsctx->writePos, nextRead, wsctx->codeBufDecode, wsctx->header.headerLen); if (nextRead > 0) { /* decode more data */ if (-1 == (n = wsctx->ctxInfo.readFunc(wsctx->ctxInfo.ctxPtr, wsctx->writePos, nextRead))) { int olderrno = errno; rfbErr("%s: read; %s", __func__, strerror(errno)); errno = olderrno; *sockRet = -1; return WS_HYBI_STATE_ERR; } else if (n == 0) { *sockRet = 0; return WS_HYBI_STATE_ERR; } else { ws_dbg("read %d bytes from socket; nRead=%d\n", n, wsctx->nReadPayload); } } else { n = 0; } wsctx->nReadPayload += n; wsctx->writePos += n; if (hybiRemaining(wsctx) == 0) { wsctx->hybiDecodeState = WS_HYBI_STATE_FRAME_COMPLETE; } /* number of not yet unmasked payload bytes: what we read here + what was * carried over + what was read with the header */ toDecode = n + wsctx->carrylen + nInBuf; ws_dbg("toDecode=%d from n=%d carrylen=%d headerLen=%d\n", toDecode, n, wsctx->carrylen, wsctx->header.headerLen); if (toDecode < 0) { rfbErr("%s: internal error; negative number of bytes to decode: %d", __func__, toDecode); errno=EIO; *sockRet = -1; return WS_HYBI_STATE_ERR; } /* for a possible base64 decoding, we decode multiples of 4 bytes until * the whole frame is received and carry over any remaining bytes in the carry buf*/ data = (unsigned char *)(wsctx->writePos - toDecode); data32= (uint32_t *)data; for (i = 0; i < (toDecode >> 2); i++) { data32[i] ^= wsctx->header.mask.u; } ws_dbg("mask decoding; i=%d toDecode=%d\n", i, toDecode); if (wsctx->hybiDecodeState == WS_HYBI_STATE_FRAME_COMPLETE) { /* process the remaining bytes (if any) */ for (i*=4; i < toDecode; i++) { data[i] ^= wsctx->header.mask.c[i % 4]; } /* all data is here, no carrying */ wsctx->carrylen = 0; } else { /* carry over remaining, non-multiple-of-four bytes */ wsctx->carrylen = toDecode - (i * 4); if (wsctx->carrylen < 0 || wsctx->carrylen > ARRAYSIZE(wsctx->carryBuf)) { rfbErr("%s: internal error, invalid carry over size: carrylen=%d, toDecode=%d, i=%d", __func__, wsctx->carrylen, toDecode, i); *sockRet = -1; errno = EIO; return WS_HYBI_STATE_ERR; } ws_dbg("carrying over %d bytes from %p to %p\n", wsctx->carrylen, wsctx->writePos + (i * 4), wsctx->carryBuf); memcpy(wsctx->carryBuf, data + (i * 4), wsctx->carrylen); wsctx->writePos -= wsctx->carrylen; } toReturn = toDecode - wsctx->carrylen; switch (wsctx->header.opcode) { case WS_OPCODE_CLOSE: /* this data is not returned as payload data */ if (hybiWsFrameComplete(wsctx)) { *(wsctx->writePos) = '\0'; ws_dbg("got close cmd %d, reason %d: %s\n", (int)(wsctx->writePos - hybiPayloadStart(wsctx)), WS_NTOH16(((uint16_t *)hybiPayloadStart(wsctx))[0]), &hybiPayloadStart(wsctx)[2]); errno = ECONNRESET; *sockRet = -1; return WS_HYBI_STATE_FRAME_COMPLETE; } else { ws_dbg("got close cmd; waiting for %d more bytes to arrive\n", hybiRemaining(wsctx)); *sockRet = -1; errno = EAGAIN; return WS_HYBI_STATE_CLOSE_REASON_PENDING; } break; case WS_OPCODE_TEXT_FRAME: data[toReturn] = '\0'; ws_dbg("Initiate Base64 decoding in %p with max size %d and '\\0' at %p\n", data, bufsize, data + toReturn); if (-1 == (wsctx->readlen = rfbBase64PtoN((char *)data, data, bufsize))) { rfbErr("%s: Base64 decode error; %s\n", __func__, strerror(errno)); } wsctx->writePos = hybiPayloadStart(wsctx); break; case WS_OPCODE_BINARY_FRAME: wsctx->readlen = toReturn; wsctx->writePos = hybiPayloadStart(wsctx); ws_dbg("set readlen=%d writePos=%p\n", wsctx->readlen, wsctx->writePos); break; default: rfbErr("%s: unhandled opcode %d, b0: %02x, b1: %02x\n", __func__, (int)wsctx->header.opcode, wsctx->header.data->b0, wsctx->header.data->b1); } wsctx->readPos = data; return hybiReturnData(dst, len, wsctx, sockRet); } /** * Read function for websocket-socket emulation. * * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-------+-+-------------+-------------------------------+ * |F|R|R|R| opcode|M| Payload len | Extended payload length | * |I|S|S|S| (4) |A| (7) | (16/64) | * |N|V|V|V| |S| | (if payload len==126/127) | * | |1|2|3| |K| | | * +-+-+-+-+-------+-+-------------+ - - - - - - - - - - - - - - - + * | Extended payload length continued, if payload len == 127 | * + - - - - - - - - - - - - - - - +-------------------------------+ * | |Masking-key, if MASK set to 1 | * +-------------------------------+-------------------------------+ * | Masking-key (continued) | Payload Data | * +-------------------------------- - - - - - - - - - - - - - - - + * : Payload Data continued ... : * + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + * | Payload Data continued ... | * +---------------------------------------------------------------+ * * Using the decode buffer, this function: * - reads the complete header from the underlying socket * - reads any remaining data bytes * - unmasks the payload data using the provided mask * - decodes Base64 encoded text data * - copies len bytes of decoded payload data into dst * * Emulates a read call on a socket. */ int webSocketsDecodeHybi(ws_ctx_t *wsctx, char *dst, int len) { int result = -1; /* int fin; */ /* not used atm */ ws_dbg("%s_enter: len=%d; " "CTX: readlen=%d readPos=%p " "writeTo=%p " "state=%d payloadtoRead=%d payloadRemaining=%llu " " nReadPayload=%d carrylen=%d carryBuf=%p\n", __func__, len, wsctx->readlen, wsctx->readPos, wsctx->writePos, wsctx->hybiDecodeState, wsctx->header.payloadLen, hybiRemaining(wsctx), wsctx->nReadPayload, wsctx->carrylen, wsctx->carryBuf); switch (wsctx->hybiDecodeState){ int nInBuf; case WS_HYBI_STATE_HEADER_PENDING: wsctx->hybiDecodeState = hybiReadHeader(wsctx, &result, &nInBuf); if (wsctx->hybiDecodeState == WS_HYBI_STATE_ERR) { goto spor; } if (wsctx->hybiDecodeState != WS_HYBI_STATE_HEADER_PENDING) { /* when header is complete, try to read some more data */ wsctx->hybiDecodeState = hybiReadAndDecode(wsctx, dst, len, &result, nInBuf); } break; case WS_HYBI_STATE_DATA_AVAILABLE: wsctx->hybiDecodeState = hybiReturnData(dst, len, wsctx, &result); break; case WS_HYBI_STATE_DATA_NEEDED: wsctx->hybiDecodeState = hybiReadAndDecode(wsctx, dst, len, &result, 0); break; case WS_HYBI_STATE_CLOSE_REASON_PENDING: wsctx->hybiDecodeState = hybiReadAndDecode(wsctx, dst, len, &result, 0); break; default: /* invalid state */ rfbErr("%s: called with invalid state %d\n", wsctx->hybiDecodeState); result = -1; errno = EIO; wsctx->hybiDecodeState = WS_HYBI_STATE_ERR; } /* single point of return, if someone has questions :-) */ spor: if (wsctx->hybiDecodeState == WS_HYBI_STATE_FRAME_COMPLETE) { ws_dbg("frame received successfully, cleaning up: read=%d hlen=%d plen=%d\n", wsctx->header.nRead, wsctx->header.headerLen, wsctx->header.payloadLen); if (wsctx->header.fin && !isControlFrame(wsctx)) { /* frame finished, cleanup state */ hybiDecodeCleanupComplete(wsctx); } else { /* always retain continuation opcode for unfinished data frames * or control frames, which may interleave with data frames */ hybiDecodeCleanupForContinuation(wsctx); } } else if (wsctx->hybiDecodeState == WS_HYBI_STATE_ERR) { hybiDecodeCleanupComplete(wsctx); } ws_dbg("%s_exit: len=%d; " "CTX: readlen=%d readPos=%p " "writePos=%p " "state=%d payloadtoRead=%d payloadRemaining=%d " "nRead=%d carrylen=%d carryBuf=%p " "result=%d " "errno=%d\n", __func__, len, wsctx->readlen, wsctx->readPos, wsctx->writePos, wsctx->hybiDecodeState, wsctx->header.payloadLen, hybiRemaining(wsctx), wsctx->nReadPayload, wsctx->carrylen, wsctx->carryBuf, result, errno); return result; }
#include "ws_decode.h" #include "base64.h" #include <string.h> #include <errno.h> #define WS_HYBI_MASK_LEN 4 #define WS_HYBI_HEADER_LEN_SHORT 2 + WS_HYBI_MASK_LEN #define WS_HYBI_HEADER_LEN_EXTENDED 4 + WS_HYBI_MASK_LEN #define WS_HYBI_HEADER_LEN_LONG 10 + WS_HYBI_MASK_LEN #undef WS_DECODE_DEBUG /* set to 1 to produce very fine debugging output */ #define WS_DECODE_DEBUG 0 #if WS_DECODE_DEBUG == 1 #define ws_dbg(fmt, ...) rfbLog((fmt), ##__VA_ARGS) #else #define ws_dbg(fmt, ...) #endif static inline int isControlFrame(ws_ctx_t *wsctx) { return 0 != (wsctx->header.opcode & 0x08); } static uint64_t hybiRemaining(ws_ctx_t *wsctx) { return wsctx->header.payloadLen - wsctx->nReadPayload; } static void hybiDecodeCleanupBasics(ws_ctx_t *wsctx) { /* keep opcode, cleanup rest */ wsctx->header.opcode = WS_OPCODE_INVALID; wsctx->header.payloadLen = 0; wsctx->header.mask.u = 0; wsctx->header.headerLen = 0; wsctx->header.data = NULL; wsctx->header.nRead = 0; wsctx->nReadPayload = 0; wsctx->carrylen = 0; wsctx->readPos = (unsigned char *)wsctx->codeBufDecode; wsctx->readlen = 0; wsctx->hybiDecodeState = WS_HYBI_STATE_HEADER_PENDING; wsctx->writePos = NULL; } static void hybiDecodeCleanupForContinuation(ws_ctx_t *wsctx) { hybiDecodeCleanupBasics(wsctx); ws_dbg("clean up frame, but expect continuation with opcode %d\n", wsctx->continuation_opcode); } void hybiDecodeCleanupComplete(ws_ctx_t *wsctx) { hybiDecodeCleanupBasics(wsctx); wsctx->continuation_opcode = WS_OPCODE_INVALID; ws_dbg("cleaned up wsctx completely\n"); } /** * Return payload data that has been decoded/unmasked from * a websocket frame. * * @param[out] dst destination buffer * @param[in] len bytes to copy to destination buffer * @param[in,out] wsctx internal state of decoding procedure * @param[out] number of bytes actually written to dst buffer * @return next hybi decoding state */ static int hybiReturnData(char *dst, int len, ws_ctx_t *wsctx, int *nWritten) { int nextState = WS_HYBI_STATE_ERR; /* if we have something already decoded copy and return */ if (wsctx->readlen > 0) { /* simply return what we have */ if (wsctx->readlen > len) { ws_dbg("copy to %d bytes to dst buffer; readPos=%p, readLen=%d\n", len, wsctx->readPos, wsctx->readlen); memcpy(dst, wsctx->readPos, len); *nWritten = len; wsctx->readlen -= len; wsctx->readPos += len; nextState = WS_HYBI_STATE_DATA_AVAILABLE; } else { ws_dbg("copy to %d bytes to dst buffer; readPos=%p, readLen=%d\n", wsctx->readlen, wsctx->readPos, wsctx->readlen); memcpy(dst, wsctx->readPos, wsctx->readlen); *nWritten = wsctx->readlen; wsctx->readlen = 0; wsctx->readPos = NULL; if (hybiRemaining(wsctx) == 0) { nextState = WS_HYBI_STATE_FRAME_COMPLETE; } else { nextState = WS_HYBI_STATE_DATA_NEEDED; } } ws_dbg("after copy: readPos=%p, readLen=%d\n", wsctx->readPos, wsctx->readlen); } else { /* it may happen that we read some bytes but could not decode them, * in that case, set errno to EAGAIN and return -1 */ nextState = wsctx->hybiDecodeState; errno = EAGAIN; *nWritten = -1; } return nextState; } /** * Read an RFC 6455 websocket frame (IETF hybi working group). * * Internal state is updated according to bytes received and the * decoding of header information. * * @param[in] cl client ptr with ptr to raw socket and ws_ctx_t ptr * @param[out] sockRet emulated recv return value * @param[out] nPayload number of payload bytes already read * @return next hybi decoding state; WS_HYBI_STATE_HEADER_PENDING indicates * that the header was not received completely. */ static int hybiReadHeader(ws_ctx_t *wsctx, int *sockRet, int *nPayload) { int ret; char *headerDst = wsctx->codeBufDecode + wsctx->header.nRead; int n = ((uint64_t)WSHLENMAX) - wsctx->header.nRead; ws_dbg("header_read to %p with len=%d\n", headerDst, n); ret = wsctx->ctxInfo.readFunc(wsctx->ctxInfo.ctxPtr, headerDst, n); ws_dbg("read %d bytes from socket\n", ret); if (ret <= 0) { if (-1 == ret) { /* save errno because rfbErr() will tamper it */ int olderrno = errno; rfbErr("%s: read; %s\n", __func__, strerror(errno)); errno = olderrno; goto err_cleanup_state; } else { *sockRet = 0; goto err_cleanup_state_sock_closed; } } wsctx->header.nRead += ret; if (wsctx->header.nRead < 2) { /* cannot decode header with less than two bytes */ goto ret_header_pending; } /* first two header bytes received; interpret header data and get rest */ wsctx->header.data = (ws_header_t *)wsctx->codeBufDecode; wsctx->header.opcode = wsctx->header.data->b0 & 0x0f; wsctx->header.fin = (wsctx->header.data->b0 & 0x80) >> 7; if (isControlFrame(wsctx)) { ws_dbg("is control frame\n"); /* is a control frame, leave remembered continuation opcode unchanged; * just check if there is a wrong fragmentation */ if (wsctx->header.fin == 0) { /* we only accept text/binary continuation frames; RFC6455: * Control frames (see Section 5.5) MAY be injected in the middle of * a fragmented message. Control frames themselves MUST NOT be * fragmented. */ rfbErr("control frame with FIN bit cleared received, aborting\n"); errno = EPROTO; goto err_cleanup_state; } } else { ws_dbg("not a control frame\n"); /* not a control frame, check for continuation opcode */ if (wsctx->header.opcode == WS_OPCODE_CONTINUATION) { ws_dbg("cont_frame\n"); /* do we have state (i.e., opcode) for continuation frame? */ if (wsctx->continuation_opcode == WS_OPCODE_INVALID) { rfbErr("no continuation state\n"); errno = EPROTO; goto err_cleanup_state; } /* otherwise, set opcode = continuation_opcode */ wsctx->header.opcode = wsctx->continuation_opcode; ws_dbg("set opcode to continuation_opcode: %d\n", wsctx->header.opcode); } else { if (wsctx->header.fin == 0) { wsctx->continuation_opcode = wsctx->header.opcode; } else { wsctx->continuation_opcode = WS_OPCODE_INVALID; } ws_dbg("set continuation_opcode to %d\n", wsctx->continuation_opcode); } } wsctx->header.payloadLen = (uint64_t)(wsctx->header.data->b1 & 0x7f); ws_dbg("first header bytes received; opcode=%d lenbyte=%d fin=%d\n", wsctx->header.opcode, wsctx->header.payloadLen, wsctx->header.fin); /* * 4.3. Client-to-Server Masking * * The client MUST mask all frames sent to the server. A server MUST * close the connection upon receiving a frame with the MASK bit set to 0. **/ if (!(wsctx->header.data->b1 & 0x80)) { rfbErr("%s: got frame without mask; ret=%d\n", __func__, ret); errno = EPROTO; goto err_cleanup_state; } if (wsctx->header.payloadLen < 126 && wsctx->header.nRead >= 6) { wsctx->header.headerLen = WS_HYBI_HEADER_LEN_SHORT; wsctx->header.mask = wsctx->header.data->u.m; } else if (wsctx->header.payloadLen == 126 && 8 <= wsctx->header.nRead) { wsctx->header.headerLen = WS_HYBI_HEADER_LEN_EXTENDED; wsctx->header.payloadLen = WS_NTOH16(wsctx->header.data->u.s16.l16); wsctx->header.mask = wsctx->header.data->u.s16.m16; } else if (wsctx->header.payloadLen == 127 && 14 <= wsctx->header.nRead) { wsctx->header.headerLen = WS_HYBI_HEADER_LEN_LONG; wsctx->header.payloadLen = WS_NTOH64(wsctx->header.data->u.s64.l64); wsctx->header.mask = wsctx->header.data->u.s64.m64; } else { /* Incomplete frame header, try again */ rfbErr("%s: incomplete frame header; ret=%d\n", __func__, ret); goto ret_header_pending; } char *h = wsctx->codeBufDecode; int i; ws_dbg("Header:\n"); for (i=0; i <10; i++) { ws_dbg("0x%02X\n", (unsigned char)h[i]); } ws_dbg("\n"); /* while RFC 6455 mandates that lengths MUST be encoded with the minimum * number of bytes, it does not specify for the server how to react on * 'wrongly' encoded frames --- this implementation rejects them*/ if ((wsctx->header.headerLen > WS_HYBI_HEADER_LEN_SHORT && wsctx->header.payloadLen < (uint64_t)126) || (wsctx->header.headerLen > WS_HYBI_HEADER_LEN_EXTENDED && wsctx->header.payloadLen < (uint64_t)65536)) { rfbErr("%s: invalid length field; headerLen=%d payloadLen=%llu\n", __func__, wsctx->header.headerLen, wsctx->header.payloadLen); errno = EPROTO; goto err_cleanup_state; } /* update write position for next bytes */ wsctx->writePos = wsctx->codeBufDecode + wsctx->header.nRead; /* set payload pointer just after header */ wsctx->readPos = (unsigned char *)(wsctx->codeBufDecode + wsctx->header.headerLen); *nPayload = wsctx->header.nRead - wsctx->header.headerLen; wsctx->nReadPayload = *nPayload; ws_dbg("header complete: state=%d headerlen=%d payloadlen=%llu writeTo=%p nPayload=%d\n", wsctx->hybiDecodeState, wsctx->header.headerLen, wsctx->header.payloadLen, wsctx->writePos, *nPayload); return WS_HYBI_STATE_DATA_NEEDED; ret_header_pending: errno = EAGAIN; *sockRet = -1; return WS_HYBI_STATE_HEADER_PENDING; err_cleanup_state: *sockRet = -1; err_cleanup_state_sock_closed: hybiDecodeCleanupComplete(wsctx); return WS_HYBI_STATE_ERR; } static int hybiWsFrameComplete(ws_ctx_t *wsctx) { return wsctx != NULL && hybiRemaining(wsctx) == 0; } static char * hybiPayloadStart(ws_ctx_t *wsctx) { return wsctx->codeBufDecode + wsctx->header.headerLen; } /** * Read the remaining payload bytes from associated raw socket. * * - try to read remaining bytes from socket * - unmask all multiples of 4 * - if frame incomplete but some bytes are left, these are copied to * the carry buffer * - if opcode is TEXT: Base64-decode all unmasked received bytes * - set state for reading decoded data * - reset write position to begin of buffer (+ header) * --> before we retrieve more data we let the caller clear all bytes * from the reception buffer * - execute return data routine * * Sets errno corresponding to what it gets from the underlying * socket or EPROTO if some invalid data is in the received frame * or ECONNRESET if a close reason + message is received. EIO is used if * an internal sanity check fails. * * @param[in] cl client ptr with raw socket reference * @param[out] dst destination buffer * @param[in] len size of destination buffer * @param[out] sockRet emulated recv return value * @param[in] nInBuf number of undecoded bytes before writePos from header read * @return next hybi decode state */ static int hybiReadAndDecode(ws_ctx_t *wsctx, char *dst, int len, int *sockRet, int nInBuf) { int n; int i; int toReturn; /* number of data bytes to return */ int toDecode; /* number of bytes to decode starting at wsctx->writePos */ int bufsize; int nextRead; unsigned char *data; /* if data was carried over, copy to start of buffer */ memcpy(wsctx->writePos, wsctx->carryBuf, wsctx->carrylen); wsctx->writePos += wsctx->carrylen; /* -1 accounts for potential '\0' terminator for base64 decoding */ bufsize = wsctx->codeBufDecode + ARRAYSIZE(wsctx->codeBufDecode) - wsctx->writePos - 1; ws_dbg("bufsize=%d\n", bufsize); if (hybiRemaining(wsctx) > bufsize) { nextRead = bufsize; } else { nextRead = hybiRemaining(wsctx); } ws_dbg("calling read with buf=%p and len=%d (decodebuf=%p headerLen=%d)\n", wsctx->writePos, nextRead, wsctx->codeBufDecode, wsctx->header.headerLen); if (nextRead > 0) { /* decode more data */ if (-1 == (n = wsctx->ctxInfo.readFunc(wsctx->ctxInfo.ctxPtr, wsctx->writePos, nextRead))) { int olderrno = errno; rfbErr("%s: read; %s", __func__, strerror(errno)); errno = olderrno; *sockRet = -1; return WS_HYBI_STATE_ERR; } else if (n == 0) { *sockRet = 0; return WS_HYBI_STATE_ERR; } else { ws_dbg("read %d bytes from socket; nRead=%d\n", n, wsctx->nReadPayload); } } else { n = 0; } wsctx->nReadPayload += n; wsctx->writePos += n; if (hybiRemaining(wsctx) == 0) { wsctx->hybiDecodeState = WS_HYBI_STATE_FRAME_COMPLETE; } /* number of not yet unmasked payload bytes: what we read here + what was * carried over + what was read with the header */ toDecode = n + wsctx->carrylen + nInBuf; ws_dbg("toDecode=%d from n=%d carrylen=%d headerLen=%d\n", toDecode, n, wsctx->carrylen, wsctx->header.headerLen); if (toDecode < 0) { rfbErr("%s: internal error; negative number of bytes to decode: %d", __func__, toDecode); errno=EIO; *sockRet = -1; return WS_HYBI_STATE_ERR; } /* for a possible base64 decoding, we decode multiples of 4 bytes until * the whole frame is received and carry over any remaining bytes in the carry buf*/ data = (unsigned char *)(wsctx->writePos - toDecode); for (i = 0; i < (toDecode >> 2); i++) { uint32_t tmp; memcpy(&tmp, data + i * sizeof(tmp), sizeof(tmp)); tmp ^= wsctx->header.mask.u; memcpy(data + i * sizeof(tmp), &tmp, sizeof(tmp)); } ws_dbg("mask decoding; i=%d toDecode=%d\n", i, toDecode); if (wsctx->hybiDecodeState == WS_HYBI_STATE_FRAME_COMPLETE) { /* process the remaining bytes (if any) */ for (i*=4; i < toDecode; i++) { data[i] ^= wsctx->header.mask.c[i % 4]; } /* all data is here, no carrying */ wsctx->carrylen = 0; } else { /* carry over remaining, non-multiple-of-four bytes */ wsctx->carrylen = toDecode - (i * 4); if (wsctx->carrylen < 0 || wsctx->carrylen > ARRAYSIZE(wsctx->carryBuf)) { rfbErr("%s: internal error, invalid carry over size: carrylen=%d, toDecode=%d, i=%d", __func__, wsctx->carrylen, toDecode, i); *sockRet = -1; errno = EIO; return WS_HYBI_STATE_ERR; } ws_dbg("carrying over %d bytes from %p to %p\n", wsctx->carrylen, wsctx->writePos + (i * 4), wsctx->carryBuf); memcpy(wsctx->carryBuf, data + (i * 4), wsctx->carrylen); wsctx->writePos -= wsctx->carrylen; } toReturn = toDecode - wsctx->carrylen; switch (wsctx->header.opcode) { case WS_OPCODE_CLOSE: /* this data is not returned as payload data */ if (hybiWsFrameComplete(wsctx)) { *(wsctx->writePos) = '\0'; ws_dbg("got close cmd %d, reason %d: %s\n", (int)(wsctx->writePos - hybiPayloadStart(wsctx)), WS_NTOH16(((uint16_t *)hybiPayloadStart(wsctx))[0]), &hybiPayloadStart(wsctx)[2]); errno = ECONNRESET; *sockRet = -1; return WS_HYBI_STATE_FRAME_COMPLETE; } else { ws_dbg("got close cmd; waiting for %d more bytes to arrive\n", hybiRemaining(wsctx)); *sockRet = -1; errno = EAGAIN; return WS_HYBI_STATE_CLOSE_REASON_PENDING; } break; case WS_OPCODE_TEXT_FRAME: data[toReturn] = '\0'; ws_dbg("Initiate Base64 decoding in %p with max size %d and '\\0' at %p\n", data, bufsize, data + toReturn); if (-1 == (wsctx->readlen = rfbBase64PtoN((char *)data, data, bufsize))) { rfbErr("%s: Base64 decode error; %s\n", __func__, strerror(errno)); } wsctx->writePos = hybiPayloadStart(wsctx); break; case WS_OPCODE_BINARY_FRAME: wsctx->readlen = toReturn; wsctx->writePos = hybiPayloadStart(wsctx); ws_dbg("set readlen=%d writePos=%p\n", wsctx->readlen, wsctx->writePos); break; default: rfbErr("%s: unhandled opcode %d, b0: %02x, b1: %02x\n", __func__, (int)wsctx->header.opcode, wsctx->header.data->b0, wsctx->header.data->b1); } wsctx->readPos = data; return hybiReturnData(dst, len, wsctx, sockRet); } /** * Read function for websocket-socket emulation. * * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-------+-+-------------+-------------------------------+ * |F|R|R|R| opcode|M| Payload len | Extended payload length | * |I|S|S|S| (4) |A| (7) | (16/64) | * |N|V|V|V| |S| | (if payload len==126/127) | * | |1|2|3| |K| | | * +-+-+-+-+-------+-+-------------+ - - - - - - - - - - - - - - - + * | Extended payload length continued, if payload len == 127 | * + - - - - - - - - - - - - - - - +-------------------------------+ * | |Masking-key, if MASK set to 1 | * +-------------------------------+-------------------------------+ * | Masking-key (continued) | Payload Data | * +-------------------------------- - - - - - - - - - - - - - - - + * : Payload Data continued ... : * + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + * | Payload Data continued ... | * +---------------------------------------------------------------+ * * Using the decode buffer, this function: * - reads the complete header from the underlying socket * - reads any remaining data bytes * - unmasks the payload data using the provided mask * - decodes Base64 encoded text data * - copies len bytes of decoded payload data into dst * * Emulates a read call on a socket. */ int webSocketsDecodeHybi(ws_ctx_t *wsctx, char *dst, int len) { int result = -1; /* int fin; */ /* not used atm */ ws_dbg("%s_enter: len=%d; " "CTX: readlen=%d readPos=%p " "writeTo=%p " "state=%d payloadtoRead=%d payloadRemaining=%llu " " nReadPayload=%d carrylen=%d carryBuf=%p\n", __func__, len, wsctx->readlen, wsctx->readPos, wsctx->writePos, wsctx->hybiDecodeState, wsctx->header.payloadLen, hybiRemaining(wsctx), wsctx->nReadPayload, wsctx->carrylen, wsctx->carryBuf); switch (wsctx->hybiDecodeState){ int nInBuf; case WS_HYBI_STATE_HEADER_PENDING: wsctx->hybiDecodeState = hybiReadHeader(wsctx, &result, &nInBuf); if (wsctx->hybiDecodeState == WS_HYBI_STATE_ERR) { goto spor; } if (wsctx->hybiDecodeState != WS_HYBI_STATE_HEADER_PENDING) { /* when header is complete, try to read some more data */ wsctx->hybiDecodeState = hybiReadAndDecode(wsctx, dst, len, &result, nInBuf); } break; case WS_HYBI_STATE_DATA_AVAILABLE: wsctx->hybiDecodeState = hybiReturnData(dst, len, wsctx, &result); break; case WS_HYBI_STATE_DATA_NEEDED: wsctx->hybiDecodeState = hybiReadAndDecode(wsctx, dst, len, &result, 0); break; case WS_HYBI_STATE_CLOSE_REASON_PENDING: wsctx->hybiDecodeState = hybiReadAndDecode(wsctx, dst, len, &result, 0); break; default: /* invalid state */ rfbErr("%s: called with invalid state %d\n", wsctx->hybiDecodeState); result = -1; errno = EIO; wsctx->hybiDecodeState = WS_HYBI_STATE_ERR; } /* single point of return, if someone has questions :-) */ spor: if (wsctx->hybiDecodeState == WS_HYBI_STATE_FRAME_COMPLETE) { ws_dbg("frame received successfully, cleaning up: read=%d hlen=%d plen=%d\n", wsctx->header.nRead, wsctx->header.headerLen, wsctx->header.payloadLen); if (wsctx->header.fin && !isControlFrame(wsctx)) { /* frame finished, cleanup state */ hybiDecodeCleanupComplete(wsctx); } else { /* always retain continuation opcode for unfinished data frames * or control frames, which may interleave with data frames */ hybiDecodeCleanupForContinuation(wsctx); } } else if (wsctx->hybiDecodeState == WS_HYBI_STATE_ERR) { hybiDecodeCleanupComplete(wsctx); } ws_dbg("%s_exit: len=%d; " "CTX: readlen=%d readPos=%p " "writePos=%p " "state=%d payloadtoRead=%d payloadRemaining=%d " "nRead=%d carrylen=%d carryBuf=%p " "result=%d " "errno=%d\n", __func__, len, wsctx->readlen, wsctx->readPos, wsctx->writePos, wsctx->hybiDecodeState, wsctx->header.payloadLen, hybiRemaining(wsctx), wsctx->nReadPayload, wsctx->carrylen, wsctx->carryBuf, result, errno); return result; }
hybiReadAndDecode(ws_ctx_t *wsctx, char *dst, int len, int *sockRet, int nInBuf) { int n; int i; int toReturn; /* number of data bytes to return */ int toDecode; /* number of bytes to decode starting at wsctx->writePos */ int bufsize; int nextRead; unsigned char *data; uint32_t *data32; /* if data was carried over, copy to start of buffer */ memcpy(wsctx->writePos, wsctx->carryBuf, wsctx->carrylen); wsctx->writePos += wsctx->carrylen; /* -1 accounts for potential '\0' terminator for base64 decoding */ bufsize = wsctx->codeBufDecode + ARRAYSIZE(wsctx->codeBufDecode) - wsctx->writePos - 1; ws_dbg("bufsize=%d\n", bufsize); if (hybiRemaining(wsctx) > bufsize) { nextRead = bufsize; } else { nextRead = hybiRemaining(wsctx); } ws_dbg("calling read with buf=%p and len=%d (decodebuf=%p headerLen=%d)\n", wsctx->writePos, nextRead, wsctx->codeBufDecode, wsctx->header.headerLen); if (nextRead > 0) { /* decode more data */ if (-1 == (n = wsctx->ctxInfo.readFunc(wsctx->ctxInfo.ctxPtr, wsctx->writePos, nextRead))) { int olderrno = errno; rfbErr("%s: read; %s", __func__, strerror(errno)); errno = olderrno; *sockRet = -1; return WS_HYBI_STATE_ERR; } else if (n == 0) { *sockRet = 0; return WS_HYBI_STATE_ERR; } else { ws_dbg("read %d bytes from socket; nRead=%d\n", n, wsctx->nReadPayload); } } else { n = 0; } wsctx->nReadPayload += n; wsctx->writePos += n; if (hybiRemaining(wsctx) == 0) { wsctx->hybiDecodeState = WS_HYBI_STATE_FRAME_COMPLETE; } /* number of not yet unmasked payload bytes: what we read here + what was * carried over + what was read with the header */ toDecode = n + wsctx->carrylen + nInBuf; ws_dbg("toDecode=%d from n=%d carrylen=%d headerLen=%d\n", toDecode, n, wsctx->carrylen, wsctx->header.headerLen); if (toDecode < 0) { rfbErr("%s: internal error; negative number of bytes to decode: %d", __func__, toDecode); errno=EIO; *sockRet = -1; return WS_HYBI_STATE_ERR; } /* for a possible base64 decoding, we decode multiples of 4 bytes until * the whole frame is received and carry over any remaining bytes in the carry buf*/ data = (unsigned char *)(wsctx->writePos - toDecode); data32= (uint32_t *)data; for (i = 0; i < (toDecode >> 2); i++) { data32[i] ^= wsctx->header.mask.u; } ws_dbg("mask decoding; i=%d toDecode=%d\n", i, toDecode); if (wsctx->hybiDecodeState == WS_HYBI_STATE_FRAME_COMPLETE) { /* process the remaining bytes (if any) */ for (i*=4; i < toDecode; i++) { data[i] ^= wsctx->header.mask.c[i % 4]; } /* all data is here, no carrying */ wsctx->carrylen = 0; } else { /* carry over remaining, non-multiple-of-four bytes */ wsctx->carrylen = toDecode - (i * 4); if (wsctx->carrylen < 0 || wsctx->carrylen > ARRAYSIZE(wsctx->carryBuf)) { rfbErr("%s: internal error, invalid carry over size: carrylen=%d, toDecode=%d, i=%d", __func__, wsctx->carrylen, toDecode, i); *sockRet = -1; errno = EIO; return WS_HYBI_STATE_ERR; } ws_dbg("carrying over %d bytes from %p to %p\n", wsctx->carrylen, wsctx->writePos + (i * 4), wsctx->carryBuf); memcpy(wsctx->carryBuf, data + (i * 4), wsctx->carrylen); wsctx->writePos -= wsctx->carrylen; } toReturn = toDecode - wsctx->carrylen; switch (wsctx->header.opcode) { case WS_OPCODE_CLOSE: /* this data is not returned as payload data */ if (hybiWsFrameComplete(wsctx)) { *(wsctx->writePos) = '\0'; ws_dbg("got close cmd %d, reason %d: %s\n", (int)(wsctx->writePos - hybiPayloadStart(wsctx)), WS_NTOH16(((uint16_t *)hybiPayloadStart(wsctx))[0]), &hybiPayloadStart(wsctx)[2]); errno = ECONNRESET; *sockRet = -1; return WS_HYBI_STATE_FRAME_COMPLETE; } else { ws_dbg("got close cmd; waiting for %d more bytes to arrive\n", hybiRemaining(wsctx)); *sockRet = -1; errno = EAGAIN; return WS_HYBI_STATE_CLOSE_REASON_PENDING; } break; case WS_OPCODE_TEXT_FRAME: data[toReturn] = '\0'; ws_dbg("Initiate Base64 decoding in %p with max size %d and '\\0' at %p\n", data, bufsize, data + toReturn); if (-1 == (wsctx->readlen = rfbBase64PtoN((char *)data, data, bufsize))) { rfbErr("%s: Base64 decode error; %s\n", __func__, strerror(errno)); } wsctx->writePos = hybiPayloadStart(wsctx); break; case WS_OPCODE_BINARY_FRAME: wsctx->readlen = toReturn; wsctx->writePos = hybiPayloadStart(wsctx); ws_dbg("set readlen=%d writePos=%p\n", wsctx->readlen, wsctx->writePos); break; default: rfbErr("%s: unhandled opcode %d, b0: %02x, b1: %02x\n", __func__, (int)wsctx->header.opcode, wsctx->header.data->b0, wsctx->header.data->b1); } wsctx->readPos = data; return hybiReturnData(dst, len, wsctx, sockRet); }
hybiReadAndDecode(ws_ctx_t *wsctx, char *dst, int len, int *sockRet, int nInBuf) { int n; int i; int toReturn; /* number of data bytes to return */ int toDecode; /* number of bytes to decode starting at wsctx->writePos */ int bufsize; int nextRead; unsigned char *data; /* if data was carried over, copy to start of buffer */ memcpy(wsctx->writePos, wsctx->carryBuf, wsctx->carrylen); wsctx->writePos += wsctx->carrylen; /* -1 accounts for potential '\0' terminator for base64 decoding */ bufsize = wsctx->codeBufDecode + ARRAYSIZE(wsctx->codeBufDecode) - wsctx->writePos - 1; ws_dbg("bufsize=%d\n", bufsize); if (hybiRemaining(wsctx) > bufsize) { nextRead = bufsize; } else { nextRead = hybiRemaining(wsctx); } ws_dbg("calling read with buf=%p and len=%d (decodebuf=%p headerLen=%d)\n", wsctx->writePos, nextRead, wsctx->codeBufDecode, wsctx->header.headerLen); if (nextRead > 0) { /* decode more data */ if (-1 == (n = wsctx->ctxInfo.readFunc(wsctx->ctxInfo.ctxPtr, wsctx->writePos, nextRead))) { int olderrno = errno; rfbErr("%s: read; %s", __func__, strerror(errno)); errno = olderrno; *sockRet = -1; return WS_HYBI_STATE_ERR; } else if (n == 0) { *sockRet = 0; return WS_HYBI_STATE_ERR; } else { ws_dbg("read %d bytes from socket; nRead=%d\n", n, wsctx->nReadPayload); } } else { n = 0; } wsctx->nReadPayload += n; wsctx->writePos += n; if (hybiRemaining(wsctx) == 0) { wsctx->hybiDecodeState = WS_HYBI_STATE_FRAME_COMPLETE; } /* number of not yet unmasked payload bytes: what we read here + what was * carried over + what was read with the header */ toDecode = n + wsctx->carrylen + nInBuf; ws_dbg("toDecode=%d from n=%d carrylen=%d headerLen=%d\n", toDecode, n, wsctx->carrylen, wsctx->header.headerLen); if (toDecode < 0) { rfbErr("%s: internal error; negative number of bytes to decode: %d", __func__, toDecode); errno=EIO; *sockRet = -1; return WS_HYBI_STATE_ERR; } /* for a possible base64 decoding, we decode multiples of 4 bytes until * the whole frame is received and carry over any remaining bytes in the carry buf*/ data = (unsigned char *)(wsctx->writePos - toDecode); for (i = 0; i < (toDecode >> 2); i++) { uint32_t tmp; memcpy(&tmp, data + i * sizeof(tmp), sizeof(tmp)); tmp ^= wsctx->header.mask.u; memcpy(data + i * sizeof(tmp), &tmp, sizeof(tmp)); } ws_dbg("mask decoding; i=%d toDecode=%d\n", i, toDecode); if (wsctx->hybiDecodeState == WS_HYBI_STATE_FRAME_COMPLETE) { /* process the remaining bytes (if any) */ for (i*=4; i < toDecode; i++) { data[i] ^= wsctx->header.mask.c[i % 4]; } /* all data is here, no carrying */ wsctx->carrylen = 0; } else { /* carry over remaining, non-multiple-of-four bytes */ wsctx->carrylen = toDecode - (i * 4); if (wsctx->carrylen < 0 || wsctx->carrylen > ARRAYSIZE(wsctx->carryBuf)) { rfbErr("%s: internal error, invalid carry over size: carrylen=%d, toDecode=%d, i=%d", __func__, wsctx->carrylen, toDecode, i); *sockRet = -1; errno = EIO; return WS_HYBI_STATE_ERR; } ws_dbg("carrying over %d bytes from %p to %p\n", wsctx->carrylen, wsctx->writePos + (i * 4), wsctx->carryBuf); memcpy(wsctx->carryBuf, data + (i * 4), wsctx->carrylen); wsctx->writePos -= wsctx->carrylen; } toReturn = toDecode - wsctx->carrylen; switch (wsctx->header.opcode) { case WS_OPCODE_CLOSE: /* this data is not returned as payload data */ if (hybiWsFrameComplete(wsctx)) { *(wsctx->writePos) = '\0'; ws_dbg("got close cmd %d, reason %d: %s\n", (int)(wsctx->writePos - hybiPayloadStart(wsctx)), WS_NTOH16(((uint16_t *)hybiPayloadStart(wsctx))[0]), &hybiPayloadStart(wsctx)[2]); errno = ECONNRESET; *sockRet = -1; return WS_HYBI_STATE_FRAME_COMPLETE; } else { ws_dbg("got close cmd; waiting for %d more bytes to arrive\n", hybiRemaining(wsctx)); *sockRet = -1; errno = EAGAIN; return WS_HYBI_STATE_CLOSE_REASON_PENDING; } break; case WS_OPCODE_TEXT_FRAME: data[toReturn] = '\0'; ws_dbg("Initiate Base64 decoding in %p with max size %d and '\\0' at %p\n", data, bufsize, data + toReturn); if (-1 == (wsctx->readlen = rfbBase64PtoN((char *)data, data, bufsize))) { rfbErr("%s: Base64 decode error; %s\n", __func__, strerror(errno)); } wsctx->writePos = hybiPayloadStart(wsctx); break; case WS_OPCODE_BINARY_FRAME: wsctx->readlen = toReturn; wsctx->writePos = hybiPayloadStart(wsctx); ws_dbg("set readlen=%d writePos=%p\n", wsctx->readlen, wsctx->writePos); break; default: rfbErr("%s: unhandled opcode %d, b0: %02x, b1: %02x\n", __func__, (int)wsctx->header.opcode, wsctx->header.data->b0, wsctx->header.data->b1); } wsctx->readPos = data; return hybiReturnData(dst, len, wsctx, sockRet); }
{'added': [(387, ' uint32_t tmp;'), (388, ' memcpy(&tmp, data + i * sizeof(tmp), sizeof(tmp));'), (389, ' tmp ^= wsctx->header.mask.u;'), (390, ' memcpy(data + i * sizeof(tmp), &tmp, sizeof(tmp));')], 'deleted': [(330, ' uint32_t *data32;'), (386, ' data32= (uint32_t *)data;'), (389, ' data32[i] ^= wsctx->header.mask.u;')]}
4
3
368
2,586
https://github.com/LibVNC/libvncserver
CVE-2019-20840
['CWE-787']
libevt_record_values.c
libevt_record_values_read_event
/* * Record values functions * * Copyright (C) 2011-2018, Joachim Metz <joachim.metz@gmail.com> * * Refer to AUTHORS for acknowledgements. * * This software is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this software. If not, see <http://www.gnu.org/licenses/>. */ #include <common.h> #include <byte_stream.h> #include <memory.h> #include <system_string.h> #include <types.h> #include "libevt_debug.h" #include "libevt_io_handle.h" #include "libevt_libbfio.h" #include "libevt_libcerror.h" #include "libevt_libcnotify.h" #include "libevt_libfdatetime.h" #include "libevt_libfvalue.h" #include "libevt_libfwnt.h" #include "libevt_record_values.h" #include "libevt_unused.h" #include "evt_file_header.h" #include "evt_record.h" const uint8_t evt_end_of_file_record_signature1[ 4 ] = { 0x11, 0x11, 0x11, 0x11 }; const uint8_t evt_end_of_file_record_signature2[ 4 ] = { 0x22, 0x22, 0x22, 0x22 }; const uint8_t evt_end_of_file_record_signature3[ 4 ] = { 0x33, 0x33, 0x33, 0x33 }; const uint8_t evt_end_of_file_record_signature4[ 4 ] = { 0x44, 0x44, 0x44, 0x44 }; /* Creates record values * Make sure the value record_values is referencing, is set to NULL * Returns 1 if successful or -1 on error */ int libevt_record_values_initialize( libevt_record_values_t **record_values, libcerror_error_t **error ) { static char *function = "libevt_record_values_initialize"; if( record_values == NULL ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_ARGUMENTS, LIBCERROR_ARGUMENT_ERROR_INVALID_VALUE, "%s: invalid record values.", function ); return( -1 ); } if( *record_values != NULL ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_VALUE_ALREADY_SET, "%s: invalid record values value already set.", function ); return( -1 ); } *record_values = memory_allocate_structure( libevt_record_values_t ); if( *record_values == NULL ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_MEMORY, LIBCERROR_MEMORY_ERROR_INSUFFICIENT, "%s: unable to create record values.", function ); goto on_error; } if( memory_set( *record_values, 0, sizeof( libevt_record_values_t ) ) == NULL ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_MEMORY, LIBCERROR_MEMORY_ERROR_SET_FAILED, "%s: unable to clear record values.", function ); goto on_error; } return( 1 ); on_error: if( *record_values != NULL ) { memory_free( *record_values ); *record_values = NULL; } return( -1 ); } /* Frees record values * Returns 1 if successful or -1 on error */ int libevt_record_values_free( libevt_record_values_t **record_values, libcerror_error_t **error ) { static char *function = "libevt_record_values_free"; int result = 1; if( record_values == NULL ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_ARGUMENTS, LIBCERROR_ARGUMENT_ERROR_INVALID_VALUE, "%s: invalid record values.", function ); return( -1 ); } if( *record_values != NULL ) { if( ( *record_values )->source_name != NULL ) { if( libfvalue_value_free( &( ( *record_values )->source_name ), error ) != 1 ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_FINALIZE_FAILED, "%s: unable to free source name value.", function ); result = -1; } } if( ( *record_values )->computer_name != NULL ) { if( libfvalue_value_free( &( ( *record_values )->computer_name ), error ) != 1 ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_FINALIZE_FAILED, "%s: unable to free computer name value.", function ); result = -1; } } if( ( *record_values )->user_security_identifier != NULL ) { if( libfvalue_value_free( &( ( *record_values )->user_security_identifier ), error ) != 1 ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_FINALIZE_FAILED, "%s: unable to free user security identifier (SID).", function ); result = -1; } } if( ( *record_values )->strings != NULL ) { if( libfvalue_value_free( &( ( *record_values )->strings ), error ) != 1 ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_FINALIZE_FAILED, "%s: unable to free strings.", function ); result = -1; } } if( ( *record_values )->data != NULL ) { if( libfvalue_value_free( &( ( *record_values )->data ), error ) != 1 ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_FINALIZE_FAILED, "%s: unable to free data.", function ); result = -1; } } memory_free( *record_values ); *record_values = NULL; } return( result ); } /* Clones the record values * Returns 1 if successful or -1 on error */ int libevt_record_values_clone( libevt_record_values_t **destination_record_values, libevt_record_values_t *source_record_values, libcerror_error_t **error ) { static char *function = "libevt_record_values_clone"; if( destination_record_values == NULL ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_ARGUMENTS, LIBCERROR_ARGUMENT_ERROR_INVALID_VALUE, "%s: invalid destination record values.", function ); return( -1 ); } if( *destination_record_values != NULL ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_VALUE_ALREADY_SET, "%s: invalid destination record values value already set.", function ); return( -1 ); } if( source_record_values == NULL ) { *destination_record_values = NULL; return( 1 ); } *destination_record_values = memory_allocate_structure( libevt_record_values_t ); if( *destination_record_values == NULL ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_MEMORY, LIBCERROR_MEMORY_ERROR_INSUFFICIENT, "%s: unable to create destination record values.", function ); goto on_error; } if( memory_copy( *destination_record_values, source_record_values, sizeof( libevt_record_values_t ) ) == NULL ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_MEMORY, LIBCERROR_MEMORY_ERROR_COPY_FAILED, "%s: unable to copy source to destination record values.", function ); goto on_error; } return( 1 ); on_error: if( *destination_record_values != NULL ) { memory_free( *destination_record_values ); *destination_record_values = NULL; } return( -1 ); } /* Reads a record_values * Returns the number of bytes read if successful or -1 on error */ ssize_t libevt_record_values_read( libevt_record_values_t *record_values, libbfio_handle_t *file_io_handle, libevt_io_handle_t *io_handle, off64_t *file_offset, uint8_t strict_mode, libcerror_error_t **error ) { uint8_t record_size_data[ 4 ]; uint8_t *record_data = NULL; static char *function = "libevt_record_values_read"; size_t read_size = 0; size_t record_data_offset = 0; ssize_t read_count = 0; ssize_t total_read_count = 0; uint32_t record_data_size = 0; if( record_values == NULL ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_ARGUMENTS, LIBCERROR_ARGUMENT_ERROR_INVALID_VALUE, "%s: invalid record values.", function ); return( -1 ); } if( io_handle == NULL ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_ARGUMENTS, LIBCERROR_ARGUMENT_ERROR_INVALID_VALUE, "%s: invalid IO handle.", function ); return( -1 ); } if( file_offset == NULL ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_ARGUMENTS, LIBCERROR_ARGUMENT_ERROR_INVALID_VALUE, "%s: invalid file offset.", function ); return( -1 ); } record_values->offset = *file_offset; read_count = libbfio_handle_read_buffer( file_io_handle, record_size_data, sizeof( uint32_t ), error ); if( read_count != (ssize_t) sizeof( uint32_t ) ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_IO, LIBCERROR_IO_ERROR_READ_FAILED, "%s: unable to read record size data.", function ); goto on_error; } *file_offset += read_count; total_read_count = read_count; byte_stream_copy_to_uint32_little_endian( record_size_data, record_data_size ); if( record_data_size < 4 ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_VALUE_OUT_OF_BOUNDS, "%s: record data size value out of bounds.", function ); goto on_error; } #if SIZEOF_SIZE_T <= 4 if( (size_t) record_data_size > (size_t) SSIZE_MAX ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_VALUE_EXCEEDS_MAXIMUM, "%s: invalid record data size value exceeds maximum.", function ); goto on_error; } #endif /* Allocating record data as 4 bytes and then using realloc here * corrupts the memory */ record_data = (uint8_t *) memory_allocate( sizeof( uint8_t ) * record_data_size ); if( record_data == NULL ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_MEMORY, LIBCERROR_MEMORY_ERROR_INSUFFICIENT, "%s: unable to create record data.", function ); goto on_error; } byte_stream_copy_from_uint32_little_endian( record_data, record_data_size ); record_data_offset = 4; read_size = record_data_size - record_data_offset; if( ( (size64_t) *file_offset + read_size ) > io_handle->file_size ) { read_size = (size_t) ( io_handle->file_size - *file_offset ); } read_count = libbfio_handle_read_buffer( file_io_handle, &( record_data[ record_data_offset ] ), read_size, error ); if( read_count != (ssize_t) read_size ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_IO, LIBCERROR_IO_ERROR_READ_FAILED, "%s: unable to read record data.", function ); goto on_error; } *file_offset += read_count; record_data_offset += read_count; total_read_count += read_count; if( record_data_offset < (size_t) record_data_size ) { if( libbfio_handle_seek_offset( file_io_handle, (off64_t) sizeof( evt_file_header_t ), SEEK_SET, error ) == -1 ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_IO, LIBCERROR_IO_ERROR_SEEK_FAILED, "%s: unable to seek file header offset: %" PRIzd ".", function, sizeof( evt_file_header_t ) ); goto on_error; } *file_offset = (off64_t) sizeof( evt_file_header_t ); read_size = (size_t) record_data_size - record_data_offset; read_count = libbfio_handle_read_buffer( file_io_handle, &( record_data[ record_data_offset ] ), read_size, error ); if( read_count != (ssize_t) read_size ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_IO, LIBCERROR_IO_ERROR_READ_FAILED, "%s: unable to read record data.", function ); goto on_error; } *file_offset += read_count; total_read_count += read_count; } #if defined( HAVE_DEBUG_OUTPUT ) if( libcnotify_verbose != 0 ) { libcnotify_printf( "%s: record data:\n", function ); libcnotify_print_data( record_data, (size_t) record_data_size, LIBCNOTIFY_PRINT_DATA_FLAG_GROUP_DATA ); } #endif if( memory_compare( &( record_data[ 4 ] ), evt_file_signature, 4 ) == 0 ) { record_values->type = LIBEVT_RECORD_TYPE_EVENT; } else if( memory_compare( &( record_data[ 4 ] ), evt_end_of_file_record_signature1, 4 ) == 0 ) { record_values->type = LIBEVT_RECORD_TYPE_END_OF_FILE; } else { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_UNSUPPORTED_VALUE, "%s: unsupported record values signature.", function ); goto on_error; } if( record_values->type == LIBEVT_RECORD_TYPE_EVENT ) { if( libevt_record_values_read_event( record_values, record_data, (size_t) record_data_size, strict_mode, error ) != 1 ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_IO, LIBCERROR_IO_ERROR_READ_FAILED, "%s: unable to read event record values.", function ); goto on_error; } } else if( record_values->type == LIBEVT_RECORD_TYPE_END_OF_FILE ) { if( libevt_record_values_read_end_of_file( record_values, record_data, (size_t) record_data_size, error ) != 1 ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_IO, LIBCERROR_IO_ERROR_READ_FAILED, "%s: unable to read end of file record values.", function ); goto on_error; } } memory_free( record_data ); return( total_read_count ); on_error: if( record_data != NULL ) { memory_free( record_data ); } return( -1 ); } /* Reads the event record values * Returns 1 if successful or -1 on error */ int libevt_record_values_read_event( libevt_record_values_t *record_values, uint8_t *record_data, size_t record_data_size, uint8_t strict_mode, libcerror_error_t **error ) { static char *function = "libevt_record_values_read_event"; size_t record_data_offset = 0; size_t strings_data_offset = 0; ssize_t value_data_size = 0; uint32_t data_offset = 0; uint32_t data_size = 0; uint32_t members_data_size = 0; uint32_t size = 0; uint32_t size_copy = 0; uint32_t strings_offset = 0; uint32_t strings_size = 0; uint32_t user_sid_offset = 0; uint32_t user_sid_size = 0; #if defined( HAVE_DEBUG_OUTPUT ) uint32_t value_32bit = 0; uint16_t value_16bit = 0; #endif if( record_values == NULL ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_ARGUMENTS, LIBCERROR_ARGUMENT_ERROR_INVALID_VALUE, "%s: invalid record values.", function ); return( -1 ); } if( record_data == NULL ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_ARGUMENTS, LIBCERROR_ARGUMENT_ERROR_INVALID_VALUE, "%s: invalid record data.", function ); return( -1 ); } if( record_data_size > (size_t) SSIZE_MAX ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_ARGUMENTS, LIBCERROR_ARGUMENT_ERROR_VALUE_EXCEEDS_MAXIMUM, "%s: invalid record data size value exceeds maximum.", function ); return( -1 ); } if( record_data_size < ( sizeof( evt_record_event_header_t ) + 4 ) ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_VALUE_OUT_OF_BOUNDS, "%s: record data size value out of bounds.", function ); return( -1 ); } byte_stream_copy_to_uint32_little_endian( ( (evt_record_event_header_t *) record_data )->size, size ); byte_stream_copy_to_uint32_little_endian( ( (evt_record_event_header_t *) record_data )->record_number, record_values->number ); byte_stream_copy_to_uint32_little_endian( ( (evt_record_event_header_t *) record_data )->creation_time, record_values->creation_time ); byte_stream_copy_to_uint32_little_endian( ( (evt_record_event_header_t *) record_data )->written_time, record_values->written_time ); byte_stream_copy_to_uint32_little_endian( ( (evt_record_event_header_t *) record_data )->event_identifier, record_values->event_identifier ); byte_stream_copy_to_uint16_little_endian( ( (evt_record_event_header_t *) record_data )->event_type, record_values->event_type ); byte_stream_copy_to_uint16_little_endian( ( (evt_record_event_header_t *) record_data )->event_category, record_values->event_category ); byte_stream_copy_to_uint32_little_endian( ( (evt_record_event_header_t *) record_data )->strings_offset, strings_offset ); byte_stream_copy_to_uint32_little_endian( ( (evt_record_event_header_t *) record_data )->user_sid_size, user_sid_size ); byte_stream_copy_to_uint32_little_endian( ( (evt_record_event_header_t *) record_data )->user_sid_offset, user_sid_offset ); byte_stream_copy_to_uint32_little_endian( ( (evt_record_event_header_t *) record_data )->data_size, data_size ); byte_stream_copy_to_uint32_little_endian( ( (evt_record_event_header_t *) record_data )->data_offset, data_offset ); byte_stream_copy_to_uint32_little_endian( &( record_data[ record_data_size - 4 ] ), size_copy ); #if defined( HAVE_DEBUG_OUTPUT ) if( libcnotify_verbose != 0 ) { libcnotify_printf( "%s: size\t\t\t\t\t: %" PRIu32 "\n", function, size ); libcnotify_printf( "%s: signature\t\t\t\t: %c%c%c%c\n", function, ( (evt_record_event_header_t *) record_data )->signature[ 0 ], ( (evt_record_event_header_t *) record_data )->signature[ 1 ], ( (evt_record_event_header_t *) record_data )->signature[ 2 ], ( (evt_record_event_header_t *) record_data )->signature[ 3 ] ); libcnotify_printf( "%s: record number\t\t\t\t: %" PRIu32 "\n", function, record_values->number ); if( libevt_debug_print_posix_time_value( function, "creation time\t\t\t\t", ( (evt_record_event_header_t *) record_data )->creation_time, 4, LIBFDATETIME_ENDIAN_LITTLE, LIBFDATETIME_POSIX_TIME_VALUE_TYPE_SECONDS_32BIT_SIGNED, LIBFDATETIME_STRING_FORMAT_TYPE_CTIME | LIBFDATETIME_STRING_FORMAT_FLAG_DATE_TIME, error ) != 1 ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_PRINT_FAILED, "%s: unable to print POSIX time value.", function ); goto on_error; } if( libevt_debug_print_posix_time_value( function, "written time\t\t\t\t", ( (evt_record_event_header_t *) record_data )->written_time, 4, LIBFDATETIME_ENDIAN_LITTLE, LIBFDATETIME_POSIX_TIME_VALUE_TYPE_SECONDS_32BIT_SIGNED, LIBFDATETIME_STRING_FORMAT_TYPE_CTIME | LIBFDATETIME_STRING_FORMAT_FLAG_DATE_TIME, error ) != 1 ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_PRINT_FAILED, "%s: unable to print POSIX time value.", function ); goto on_error; } libcnotify_printf( "%s: event identifier\t\t\t: 0x%08" PRIx32 "\n", function, record_values->event_identifier ); libcnotify_printf( "%s: event identifier: code\t\t\t: %" PRIu32 "\n", function, record_values->event_identifier & 0x0000ffffUL ); libcnotify_printf( "%s: event identifier: facility\t\t: %" PRIu32 "\n", function, ( record_values->event_identifier & 0x0fff0000UL ) >> 16 ); libcnotify_printf( "%s: event identifier: reserved\t\t: %" PRIu32 "\n", function, ( record_values->event_identifier & 0x10000000UL ) >> 28 ); libcnotify_printf( "%s: event identifier: customer flags\t: %" PRIu32 "\n", function, ( record_values->event_identifier & 0x20000000UL ) >> 29 ); libcnotify_printf( "%s: event identifier: severity\t\t: %" PRIu32 " (", function, ( record_values->event_identifier & 0xc0000000UL ) >> 30 ); libevt_debug_print_event_identifier_severity( record_values->event_identifier ); libcnotify_printf( ")\n" ); libcnotify_printf( "%s: event type\t\t\t\t: %" PRIu16 " (", function, record_values->event_type ); libevt_debug_print_event_type( record_values->event_type ); libcnotify_printf( ")\n" ); byte_stream_copy_to_uint16_little_endian( ( (evt_record_event_header_t *) record_data )->number_of_strings, value_16bit ); libcnotify_printf( "%s: number of strings\t\t\t: %" PRIu16 "\n", function, value_16bit ); libcnotify_printf( "%s: event category\t\t\t\t: %" PRIu16 "\n", function, record_values->event_category ); byte_stream_copy_to_uint16_little_endian( ( (evt_record_event_header_t *) record_data )->event_flags, value_16bit ); libcnotify_printf( "%s: event flags\t\t\t\t: 0x%04" PRIx16 "\n", function, value_16bit ); byte_stream_copy_to_uint32_little_endian( ( (evt_record_event_header_t *) record_data )->closing_record_number, value_32bit ); libcnotify_printf( "%s: closing record values number\t\t: %" PRIu32 "\n", function, value_32bit ); libcnotify_printf( "%s: strings offset\t\t\t\t: %" PRIu32 "\n", function, strings_offset ); libcnotify_printf( "%s: user security identifier (SID) size\t: %" PRIu32 "\n", function, user_sid_size ); libcnotify_printf( "%s: user security identifier (SID) offset\t: %" PRIu32 "\n", function, user_sid_offset ); libcnotify_printf( "%s: data size\t\t\t\t: %" PRIu32 "\n", function, data_size ); libcnotify_printf( "%s: data offset\t\t\t\t: %" PRIu32 "\n", function, data_offset ); } #endif record_data_offset = sizeof( evt_record_event_header_t ); if( ( user_sid_offset == 0 ) && ( user_sid_size != 0 ) ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_VALUE_OUT_OF_BOUNDS, "%s: user SID offset or size value out of bounds.", function ); goto on_error; } if( user_sid_offset != 0 ) { if( ( (size_t) user_sid_offset < record_data_offset ) || ( (size_t) user_sid_offset >= ( record_data_size - 4 ) ) ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_VALUE_OUT_OF_BOUNDS, "%s: user SID offset value out of bounds.", function ); goto on_error; } if( user_sid_size != 0 ) { if( (size_t) ( user_sid_offset + user_sid_size ) > ( record_data_size - 4 ) ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_VALUE_OUT_OF_BOUNDS, "%s: user SID size value out of bounds.", function ); goto on_error; } } } /* If the strings offset is points at the offset at record data size - 4 * the strings are empty. For this to be sane the data offset should * be the same as the strings offset or the data size 0. */ if( ( (size_t) strings_offset < user_sid_offset ) || ( (size_t) strings_offset >= ( record_data_size - 4 ) ) ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_VALUE_OUT_OF_BOUNDS, "%s: strings offset value out of bounds.", function ); goto on_error; } if( ( (size_t) data_offset < strings_offset ) || ( (size_t) data_offset >= ( record_data_size - 4 ) ) ) { if( data_size != 0 ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_VALUE_OUT_OF_BOUNDS, "%s: data offset value out of bounds.", function ); goto on_error; } data_offset = (uint32_t) record_data_size - 4; } if( ( (size_t) strings_offset >= ( record_data_size - 4 ) ) && ( strings_offset != data_offset ) ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_VALUE_OUT_OF_BOUNDS, "%s: strings offset value out of bounds.", function ); goto on_error; } if( strings_offset != 0 ) { if( strings_offset < record_data_offset ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_VALUE_OUT_OF_BOUNDS, "%s: strings offset value out of bounds.", function ); goto on_error; } } if( user_sid_offset != 0 ) { members_data_size = user_sid_offset - (uint32_t) record_data_offset; } else if( strings_offset != 0 ) { members_data_size = strings_offset - (uint32_t) record_data_offset; } if( strings_offset != 0 ) { strings_size = data_offset - strings_offset; } if( data_size != 0 ) { if( (size_t) ( data_offset + data_size ) > ( record_data_size - 4 ) ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_VALUE_OUT_OF_BOUNDS, "%s: data size value out of bounds.", function ); goto on_error; } } if( members_data_size != 0 ) { #if defined( HAVE_DEBUG_OUTPUT ) if( libcnotify_verbose != 0 ) { libcnotify_printf( "%s: members data:\n", function ); libcnotify_print_data( &( record_data[ record_data_offset ] ), members_data_size, LIBCNOTIFY_PRINT_DATA_FLAG_GROUP_DATA ); } #endif if( libfvalue_value_type_initialize( &( record_values->source_name ), LIBFVALUE_VALUE_TYPE_STRING_UTF16, error ) != 1 ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_INITIALIZE_FAILED, "%s: unable to create source name value.", function ); goto on_error; } value_data_size = libfvalue_value_type_set_data_string( record_values->source_name, &( record_data[ record_data_offset ] ), members_data_size, LIBFVALUE_CODEPAGE_UTF16_LITTLE_ENDIAN, LIBFVALUE_VALUE_DATA_FLAG_MANAGED, error ); if( value_data_size == -1 ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_SET_FAILED, "%s: unable to set data of source name value.", function ); goto on_error; } #if defined( HAVE_DEBUG_OUTPUT ) if( libcnotify_verbose != 0 ) { libcnotify_printf( "%s: source name\t\t\t\t: ", function ); if( libfvalue_value_print( record_values->source_name, 0, 0, error ) != 1 ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_PRINT_FAILED, "%s: unable to print source name value.", function ); goto on_error; } libcnotify_printf( "\n" ); } #endif record_data_offset += value_data_size; members_data_size -= (uint32_t) value_data_size; if( libfvalue_value_type_initialize( &( record_values->computer_name ), LIBFVALUE_VALUE_TYPE_STRING_UTF16, error ) != 1 ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_INITIALIZE_FAILED, "%s: unable to create computer name value.", function ); goto on_error; } value_data_size = libfvalue_value_type_set_data_string( record_values->computer_name, &( record_data[ record_data_offset ] ), members_data_size, LIBFVALUE_CODEPAGE_UTF16_LITTLE_ENDIAN, LIBFVALUE_VALUE_DATA_FLAG_MANAGED, error ); if( value_data_size == -1 ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_SET_FAILED, "%s: unable to set data of computer name value.", function ); goto on_error; } #if defined( HAVE_DEBUG_OUTPUT ) if( libcnotify_verbose != 0 ) { libcnotify_printf( "%s: computer name\t\t\t\t: ", function ); if( libfvalue_value_print( record_values->computer_name, 0, 0, error ) != 1 ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_PRINT_FAILED, "%s: unable to print computer name value.", function ); goto on_error; } libcnotify_printf( "\n" ); } #endif record_data_offset += value_data_size; members_data_size -= (uint32_t) value_data_size; if( members_data_size > 0 ) { #if defined( HAVE_DEBUG_OUTPUT ) if( libcnotify_verbose != 0 ) { libcnotify_printf( "%s: members trailing data:\n", function ); libcnotify_print_data( &( record_data[ record_data_offset ] ), members_data_size, LIBCNOTIFY_PRINT_DATA_FLAG_GROUP_DATA ); } #endif record_data_offset += members_data_size; } } if( user_sid_size != 0 ) { if( libfvalue_value_type_initialize( &( record_values->user_security_identifier ), LIBFVALUE_VALUE_TYPE_NT_SECURITY_IDENTIFIER, error ) != 1 ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_INITIALIZE_FAILED, "%s: unable to create user security identifier (SID) value.", function ); goto on_error; } if( libfvalue_value_set_data( record_values->user_security_identifier, &( record_data[ user_sid_offset ] ), (size_t) user_sid_size, LIBFVALUE_ENDIAN_LITTLE, LIBFVALUE_VALUE_DATA_FLAG_MANAGED, error ) != 1 ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_SET_FAILED, "%s: unable to set data of user security identifier (SID) value.", function ); goto on_error; } #if defined( HAVE_DEBUG_OUTPUT ) if( libcnotify_verbose != 0 ) { libcnotify_printf( "%s: user security identifier (SID)\t\t: ", function ); if( libfvalue_value_print( record_values->user_security_identifier, 0, 0, error ) != 1 ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_PRINT_FAILED, "%s: unable to print user security identifier (SID) value.", function ); goto on_error; } libcnotify_printf( "\n" ); } #endif record_data_offset += user_sid_size; } if( strings_size != 0 ) { #if defined( HAVE_DEBUG_OUTPUT ) if( libcnotify_verbose != 0 ) { libcnotify_printf( "%s: strings data:\n", function ); libcnotify_print_data( &( record_data[ strings_offset ] ), strings_size, LIBCNOTIFY_PRINT_DATA_FLAG_GROUP_DATA ); } #endif if( size_copy == 0 ) { /* If the strings data is truncated */ strings_data_offset = strings_offset + strings_size - 2; while( strings_data_offset > strings_offset ) { if( ( record_data[ strings_data_offset ] != 0 ) || ( record_data[ strings_data_offset + 1 ] != 0 ) ) { strings_size += 2; break; } strings_data_offset -= 2; strings_size -= 2; } } if( libfvalue_value_type_initialize( &( record_values->strings ), LIBFVALUE_VALUE_TYPE_STRING_UTF16, error ) != 1 ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_INITIALIZE_FAILED, "%s: unable to create strings value.", function ); goto on_error; } value_data_size = libfvalue_value_type_set_data_strings_array( record_values->strings, &( record_data[ strings_offset ] ), strings_size, LIBFVALUE_CODEPAGE_UTF16_LITTLE_ENDIAN, error ); if( value_data_size == -1 ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_SET_FAILED, "%s: unable to set data of strings value.", function ); goto on_error; } record_data_offset += strings_size; } if( data_size != 0 ) { #if defined( HAVE_DEBUG_OUTPUT ) if( libcnotify_verbose != 0 ) { libcnotify_printf( "%s: data:\n", function ); libcnotify_print_data( &( record_data[ data_offset ] ), (size_t) data_size, LIBCNOTIFY_PRINT_DATA_FLAG_GROUP_DATA ); } #endif if( libfvalue_value_type_initialize( &( record_values->data ), LIBFVALUE_VALUE_TYPE_BINARY_DATA, error ) != 1 ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_INITIALIZE_FAILED, "%s: unable to create data value.", function ); goto on_error; } if( libfvalue_value_set_data( record_values->data, &( record_data[ record_data_offset ] ), (size_t) data_size, LIBFVALUE_ENDIAN_LITTLE, LIBFVALUE_VALUE_DATA_FLAG_MANAGED, error ) != 1 ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_SET_FAILED, "%s: unable to set data of data value.", function ); goto on_error; } #if defined( HAVE_DEBUG_OUTPUT ) record_data_offset += data_size; #endif } #if defined( HAVE_DEBUG_OUTPUT ) if( libcnotify_verbose != 0 ) { if( record_data_offset < ( record_data_size - 4 ) ) { libcnotify_printf( "%s: padding:\n", function ); libcnotify_print_data( &( record_data[ record_data_offset ] ), (size_t) record_data_size - record_data_offset - 4, LIBCNOTIFY_PRINT_DATA_FLAG_GROUP_DATA ); } libcnotify_printf( "%s: size copy\t\t\t\t: %" PRIu32 "\n", function, size_copy ); libcnotify_printf( "\n" ); } #endif if( ( strict_mode == 0 ) && ( size_copy == 0 ) ) { size_copy = size; } if( size != size_copy ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_INPUT, LIBCERROR_INPUT_ERROR_VALUE_MISMATCH, "%s: value mismatch for size and size copy.", function ); goto on_error; } if( record_data_size != (size_t) size ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_INPUT, LIBCERROR_INPUT_ERROR_VALUE_MISMATCH, "%s: value mismatch for record_values data size and size.", function ); goto on_error; } return( 1 ); on_error: if( record_values->data != NULL ) { libfvalue_value_free( &( record_values->data ), NULL ); } if( record_values->strings != NULL ) { libfvalue_value_free( &( record_values->strings ), NULL ); } if( record_values->user_security_identifier != NULL ) { libfvalue_value_free( &( record_values->user_security_identifier ), NULL ); } if( record_values->computer_name != NULL ) { libfvalue_value_free( &( record_values->computer_name ), NULL ); } if( record_values->source_name != NULL ) { libfvalue_value_free( &( record_values->source_name ), NULL ); } return( -1 ); } /* Reads the end of file record values * Returns 1 if successful or -1 on error */ int libevt_record_values_read_end_of_file( libevt_record_values_t *record_values, uint8_t *record_data, size_t record_data_size, libcerror_error_t **error ) { static char *function = "libevt_record_values_read_end_of_file"; uint32_t size = 0; uint32_t size_copy = 0; #if defined( HAVE_DEBUG_OUTPUT ) uint32_t value_32bit = 0; #endif if( record_values == NULL ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_ARGUMENTS, LIBCERROR_ARGUMENT_ERROR_INVALID_VALUE, "%s: invalid record values.", function ); return( -1 ); } if( record_data == NULL ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_ARGUMENTS, LIBCERROR_ARGUMENT_ERROR_INVALID_VALUE, "%s: invalid record data.", function ); return( -1 ); } if( record_data_size > (size_t) SSIZE_MAX ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_ARGUMENTS, LIBCERROR_ARGUMENT_ERROR_VALUE_EXCEEDS_MAXIMUM, "%s: invalid record data size value exceeds maximum.", function ); return( -1 ); } if( record_data_size < sizeof( evt_record_end_of_file_t ) ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_VALUE_OUT_OF_BOUNDS, "%s: record data size value out of bounds.", function ); return( -1 ); } byte_stream_copy_to_uint32_little_endian( ( (evt_record_end_of_file_t *) record_data )->size, size ); byte_stream_copy_to_uint32_little_endian( ( (evt_record_end_of_file_t *) record_data )->size_copy, size_copy ); #if defined( HAVE_DEBUG_OUTPUT ) if( libcnotify_verbose != 0 ) { libcnotify_printf( "%s: size\t\t\t\t: %" PRIu32 "\n", function, size ); byte_stream_copy_to_uint32_little_endian( ( (evt_record_end_of_file_t *) record_data )->signature1, value_32bit ); libcnotify_printf( "%s: signature1\t\t\t: 0x%08" PRIx32 "\n", function, value_32bit ); byte_stream_copy_to_uint32_little_endian( ( (evt_record_end_of_file_t *) record_data )->signature2, value_32bit ); libcnotify_printf( "%s: signature2\t\t\t: 0x%08" PRIx32 "\n", function, value_32bit ); byte_stream_copy_to_uint32_little_endian( ( (evt_record_end_of_file_t *) record_data )->signature3, value_32bit ); libcnotify_printf( "%s: signature3\t\t\t: 0x%08" PRIx32 "\n", function, value_32bit ); byte_stream_copy_to_uint32_little_endian( ( (evt_record_end_of_file_t *) record_data )->signature4, value_32bit ); libcnotify_printf( "%s: signature4\t\t\t: 0x%08" PRIx32 "\n", function, value_32bit ); byte_stream_copy_to_uint32_little_endian( ( (evt_record_end_of_file_t *) record_data )->first_record_offset, value_32bit ); libcnotify_printf( "%s: first record offset\t\t: 0x%08" PRIx32 "\n", function, value_32bit ); byte_stream_copy_to_uint32_little_endian( ( (evt_record_end_of_file_t *) record_data )->end_of_file_record_offset, value_32bit ); libcnotify_printf( "%s: end of file record offset\t: 0x%08" PRIx32 "\n", function, value_32bit ); byte_stream_copy_to_uint32_little_endian( ( (evt_record_end_of_file_t *) record_data )->last_record_number, value_32bit ); libcnotify_printf( "%s: last record number\t\t: %" PRIu32 "\n", function, value_32bit ); byte_stream_copy_to_uint32_little_endian( ( (evt_record_end_of_file_t *) record_data )->first_record_number, value_32bit ); libcnotify_printf( "%s: first record number\t\t: %" PRIu32 "\n", function, value_32bit ); libcnotify_printf( "%s: size copy\t\t\t: %" PRIu32 "\n", function, size_copy ); libcnotify_printf( "\n" ); } #endif if( size != size_copy ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_INPUT, LIBCERROR_INPUT_ERROR_VALUE_MISMATCH, "%s: value mismatch for size and size copy.", function ); return( -1 ); } if( record_data_size != (size_t) size ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_INPUT, LIBCERROR_INPUT_ERROR_VALUE_MISMATCH, "%s: value mismatch for record data size and size.", function ); return( -1 ); } /* TODO correct values in IO handle if necessary */ return( 1 ); } /* Retrieves the type * Returns 1 if successful or -1 on error */ int libevt_record_values_get_type( libevt_record_values_t *record_values, uint8_t *type, libcerror_error_t **error ) { static char *function = "libevt_record_values_get_type"; if( record_values == NULL ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_ARGUMENTS, LIBCERROR_ARGUMENT_ERROR_INVALID_VALUE, "%s: invalid record values.", function ); return( -1 ); } if( type == NULL ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_ARGUMENTS, LIBCERROR_ARGUMENT_ERROR_INVALID_VALUE, "%s: invalid type.", function ); return( -1 ); } *type = record_values->type; return( 1 ); } /* Reads record values * Callback for the (recovered) records list * Returns 1 if successful or -1 on error */ int libevt_record_values_read_element_data( libevt_io_handle_t *io_handle, libbfio_handle_t *file_io_handle, libfdata_list_element_t *element, libfcache_cache_t *cache, int element_file_index LIBEVT_ATTRIBUTE_UNUSED, off64_t element_offset, size64_t element_size LIBEVT_ATTRIBUTE_UNUSED, uint32_t element_flags LIBEVT_ATTRIBUTE_UNUSED, uint8_t read_flags LIBEVT_ATTRIBUTE_UNUSED, libcerror_error_t **error ) { libevt_record_values_t *record_values = NULL; static char *function = "libevt_record_values_read_element_data"; off64_t file_offset = 0; ssize_t read_count = 0; LIBEVT_UNREFERENCED_PARAMETER( element_size ) LIBEVT_UNREFERENCED_PARAMETER( element_file_index ) LIBEVT_UNREFERENCED_PARAMETER( element_flags ) LIBEVT_UNREFERENCED_PARAMETER( read_flags ) #if defined( HAVE_DEBUG_OUTPUT ) if( libcnotify_verbose != 0 ) { libcnotify_printf( "%s: reading record at offset: %" PRIi64 " (0x%08" PRIx64 ")\n", function, element_offset, element_offset ); } #endif if( libbfio_handle_seek_offset( file_io_handle, element_offset, SEEK_SET, error ) == -1 ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_IO, LIBCERROR_IO_ERROR_SEEK_FAILED, "%s: unable to seek record offset: %" PRIi64 ".", function, element_offset ); goto on_error; } if( libevt_record_values_initialize( &record_values, error ) != 1 ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_INITIALIZE_FAILED, "%s: unable to create record values.", function ); goto on_error; } /* File offset must be before being passed to libevt_record_values_read */ file_offset = element_offset; read_count = libevt_record_values_read( record_values, file_io_handle, io_handle, &file_offset, 0, error ); if( read_count == -1 ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_IO, LIBCERROR_IO_ERROR_READ_FAILED, "%s: unable to read record at offset: %" PRIi64 ".", function, element_offset ); goto on_error; } if( libfdata_list_element_set_element_value( element, (intptr_t *) file_io_handle, cache, (intptr_t *) record_values, (int (*)(intptr_t **, libcerror_error_t **)) &libevt_record_values_free, LIBFDATA_LIST_ELEMENT_VALUE_FLAG_MANAGED, error ) != 1 ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_SET_FAILED, "%s: unable to set record values as element value.", function ); goto on_error; } return( 1 ); on_error: if( record_values != NULL ) { libevt_record_values_free( &record_values, NULL ); } return( -1 ); }
/* * Record values functions * * Copyright (C) 2011-2018, Joachim Metz <joachim.metz@gmail.com> * * Refer to AUTHORS for acknowledgements. * * This software is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this software. If not, see <http://www.gnu.org/licenses/>. */ #include <common.h> #include <byte_stream.h> #include <memory.h> #include <system_string.h> #include <types.h> #include "libevt_debug.h" #include "libevt_io_handle.h" #include "libevt_libbfio.h" #include "libevt_libcerror.h" #include "libevt_libcnotify.h" #include "libevt_libfdatetime.h" #include "libevt_libfvalue.h" #include "libevt_libfwnt.h" #include "libevt_record_values.h" #include "libevt_unused.h" #include "evt_file_header.h" #include "evt_record.h" const uint8_t evt_end_of_file_record_signature1[ 4 ] = { 0x11, 0x11, 0x11, 0x11 }; const uint8_t evt_end_of_file_record_signature2[ 4 ] = { 0x22, 0x22, 0x22, 0x22 }; const uint8_t evt_end_of_file_record_signature3[ 4 ] = { 0x33, 0x33, 0x33, 0x33 }; const uint8_t evt_end_of_file_record_signature4[ 4 ] = { 0x44, 0x44, 0x44, 0x44 }; /* Creates record values * Make sure the value record_values is referencing, is set to NULL * Returns 1 if successful or -1 on error */ int libevt_record_values_initialize( libevt_record_values_t **record_values, libcerror_error_t **error ) { static char *function = "libevt_record_values_initialize"; if( record_values == NULL ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_ARGUMENTS, LIBCERROR_ARGUMENT_ERROR_INVALID_VALUE, "%s: invalid record values.", function ); return( -1 ); } if( *record_values != NULL ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_VALUE_ALREADY_SET, "%s: invalid record values value already set.", function ); return( -1 ); } *record_values = memory_allocate_structure( libevt_record_values_t ); if( *record_values == NULL ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_MEMORY, LIBCERROR_MEMORY_ERROR_INSUFFICIENT, "%s: unable to create record values.", function ); goto on_error; } if( memory_set( *record_values, 0, sizeof( libevt_record_values_t ) ) == NULL ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_MEMORY, LIBCERROR_MEMORY_ERROR_SET_FAILED, "%s: unable to clear record values.", function ); goto on_error; } return( 1 ); on_error: if( *record_values != NULL ) { memory_free( *record_values ); *record_values = NULL; } return( -1 ); } /* Frees record values * Returns 1 if successful or -1 on error */ int libevt_record_values_free( libevt_record_values_t **record_values, libcerror_error_t **error ) { static char *function = "libevt_record_values_free"; int result = 1; if( record_values == NULL ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_ARGUMENTS, LIBCERROR_ARGUMENT_ERROR_INVALID_VALUE, "%s: invalid record values.", function ); return( -1 ); } if( *record_values != NULL ) { if( ( *record_values )->source_name != NULL ) { if( libfvalue_value_free( &( ( *record_values )->source_name ), error ) != 1 ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_FINALIZE_FAILED, "%s: unable to free source name value.", function ); result = -1; } } if( ( *record_values )->computer_name != NULL ) { if( libfvalue_value_free( &( ( *record_values )->computer_name ), error ) != 1 ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_FINALIZE_FAILED, "%s: unable to free computer name value.", function ); result = -1; } } if( ( *record_values )->user_security_identifier != NULL ) { if( libfvalue_value_free( &( ( *record_values )->user_security_identifier ), error ) != 1 ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_FINALIZE_FAILED, "%s: unable to free user security identifier (SID).", function ); result = -1; } } if( ( *record_values )->strings != NULL ) { if( libfvalue_value_free( &( ( *record_values )->strings ), error ) != 1 ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_FINALIZE_FAILED, "%s: unable to free strings.", function ); result = -1; } } if( ( *record_values )->data != NULL ) { if( libfvalue_value_free( &( ( *record_values )->data ), error ) != 1 ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_FINALIZE_FAILED, "%s: unable to free data.", function ); result = -1; } } memory_free( *record_values ); *record_values = NULL; } return( result ); } /* Clones the record values * Returns 1 if successful or -1 on error */ int libevt_record_values_clone( libevt_record_values_t **destination_record_values, libevt_record_values_t *source_record_values, libcerror_error_t **error ) { static char *function = "libevt_record_values_clone"; if( destination_record_values == NULL ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_ARGUMENTS, LIBCERROR_ARGUMENT_ERROR_INVALID_VALUE, "%s: invalid destination record values.", function ); return( -1 ); } if( *destination_record_values != NULL ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_VALUE_ALREADY_SET, "%s: invalid destination record values value already set.", function ); return( -1 ); } if( source_record_values == NULL ) { *destination_record_values = NULL; return( 1 ); } *destination_record_values = memory_allocate_structure( libevt_record_values_t ); if( *destination_record_values == NULL ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_MEMORY, LIBCERROR_MEMORY_ERROR_INSUFFICIENT, "%s: unable to create destination record values.", function ); goto on_error; } if( memory_copy( *destination_record_values, source_record_values, sizeof( libevt_record_values_t ) ) == NULL ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_MEMORY, LIBCERROR_MEMORY_ERROR_COPY_FAILED, "%s: unable to copy source to destination record values.", function ); goto on_error; } return( 1 ); on_error: if( *destination_record_values != NULL ) { memory_free( *destination_record_values ); *destination_record_values = NULL; } return( -1 ); } /* Reads a record_values * Returns the number of bytes read if successful or -1 on error */ ssize_t libevt_record_values_read( libevt_record_values_t *record_values, libbfio_handle_t *file_io_handle, libevt_io_handle_t *io_handle, off64_t *file_offset, uint8_t strict_mode, libcerror_error_t **error ) { uint8_t record_size_data[ 4 ]; uint8_t *record_data = NULL; static char *function = "libevt_record_values_read"; size_t read_size = 0; size_t record_data_offset = 0; ssize_t read_count = 0; ssize_t total_read_count = 0; uint32_t record_data_size = 0; if( record_values == NULL ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_ARGUMENTS, LIBCERROR_ARGUMENT_ERROR_INVALID_VALUE, "%s: invalid record values.", function ); return( -1 ); } if( io_handle == NULL ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_ARGUMENTS, LIBCERROR_ARGUMENT_ERROR_INVALID_VALUE, "%s: invalid IO handle.", function ); return( -1 ); } if( file_offset == NULL ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_ARGUMENTS, LIBCERROR_ARGUMENT_ERROR_INVALID_VALUE, "%s: invalid file offset.", function ); return( -1 ); } record_values->offset = *file_offset; read_count = libbfio_handle_read_buffer( file_io_handle, record_size_data, sizeof( uint32_t ), error ); if( read_count != (ssize_t) sizeof( uint32_t ) ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_IO, LIBCERROR_IO_ERROR_READ_FAILED, "%s: unable to read record size data.", function ); goto on_error; } *file_offset += read_count; total_read_count = read_count; byte_stream_copy_to_uint32_little_endian( record_size_data, record_data_size ); if( record_data_size < 4 ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_VALUE_OUT_OF_BOUNDS, "%s: record data size value out of bounds.", function ); goto on_error; } #if SIZEOF_SIZE_T <= 4 if( (size_t) record_data_size > (size_t) SSIZE_MAX ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_VALUE_EXCEEDS_MAXIMUM, "%s: invalid record data size value exceeds maximum.", function ); goto on_error; } #endif /* Allocating record data as 4 bytes and then using realloc here * corrupts the memory */ record_data = (uint8_t *) memory_allocate( sizeof( uint8_t ) * record_data_size ); if( record_data == NULL ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_MEMORY, LIBCERROR_MEMORY_ERROR_INSUFFICIENT, "%s: unable to create record data.", function ); goto on_error; } byte_stream_copy_from_uint32_little_endian( record_data, record_data_size ); record_data_offset = 4; read_size = record_data_size - record_data_offset; if( ( (size64_t) *file_offset + read_size ) > io_handle->file_size ) { read_size = (size_t) ( io_handle->file_size - *file_offset ); } read_count = libbfio_handle_read_buffer( file_io_handle, &( record_data[ record_data_offset ] ), read_size, error ); if( read_count != (ssize_t) read_size ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_IO, LIBCERROR_IO_ERROR_READ_FAILED, "%s: unable to read record data.", function ); goto on_error; } *file_offset += read_count; record_data_offset += read_count; total_read_count += read_count; if( record_data_offset < (size_t) record_data_size ) { if( libbfio_handle_seek_offset( file_io_handle, (off64_t) sizeof( evt_file_header_t ), SEEK_SET, error ) == -1 ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_IO, LIBCERROR_IO_ERROR_SEEK_FAILED, "%s: unable to seek file header offset: %" PRIzd ".", function, sizeof( evt_file_header_t ) ); goto on_error; } *file_offset = (off64_t) sizeof( evt_file_header_t ); read_size = (size_t) record_data_size - record_data_offset; read_count = libbfio_handle_read_buffer( file_io_handle, &( record_data[ record_data_offset ] ), read_size, error ); if( read_count != (ssize_t) read_size ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_IO, LIBCERROR_IO_ERROR_READ_FAILED, "%s: unable to read record data.", function ); goto on_error; } *file_offset += read_count; total_read_count += read_count; } #if defined( HAVE_DEBUG_OUTPUT ) if( libcnotify_verbose != 0 ) { libcnotify_printf( "%s: record data:\n", function ); libcnotify_print_data( record_data, (size_t) record_data_size, LIBCNOTIFY_PRINT_DATA_FLAG_GROUP_DATA ); } #endif if( memory_compare( &( record_data[ 4 ] ), evt_file_signature, 4 ) == 0 ) { record_values->type = LIBEVT_RECORD_TYPE_EVENT; } else if( memory_compare( &( record_data[ 4 ] ), evt_end_of_file_record_signature1, 4 ) == 0 ) { record_values->type = LIBEVT_RECORD_TYPE_END_OF_FILE; } else { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_UNSUPPORTED_VALUE, "%s: unsupported record values signature.", function ); goto on_error; } if( record_values->type == LIBEVT_RECORD_TYPE_EVENT ) { if( libevt_record_values_read_event( record_values, record_data, (size_t) record_data_size, strict_mode, error ) != 1 ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_IO, LIBCERROR_IO_ERROR_READ_FAILED, "%s: unable to read event record values.", function ); goto on_error; } } else if( record_values->type == LIBEVT_RECORD_TYPE_END_OF_FILE ) { if( libevt_record_values_read_end_of_file( record_values, record_data, (size_t) record_data_size, error ) != 1 ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_IO, LIBCERROR_IO_ERROR_READ_FAILED, "%s: unable to read end of file record values.", function ); goto on_error; } } memory_free( record_data ); return( total_read_count ); on_error: if( record_data != NULL ) { memory_free( record_data ); } return( -1 ); } /* Reads the event record values * Returns 1 if successful or -1 on error */ int libevt_record_values_read_event( libevt_record_values_t *record_values, uint8_t *record_data, size_t record_data_size, uint8_t strict_mode, libcerror_error_t **error ) { static char *function = "libevt_record_values_read_event"; size_t record_data_offset = 0; size_t strings_data_offset = 0; ssize_t value_data_size = 0; uint32_t data_offset = 0; uint32_t data_size = 0; uint32_t members_data_size = 0; uint32_t size = 0; uint32_t size_copy = 0; uint32_t strings_offset = 0; uint32_t strings_size = 0; uint32_t user_sid_offset = 0; uint32_t user_sid_size = 0; #if defined( HAVE_DEBUG_OUTPUT ) uint32_t value_32bit = 0; uint16_t value_16bit = 0; #endif if( record_values == NULL ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_ARGUMENTS, LIBCERROR_ARGUMENT_ERROR_INVALID_VALUE, "%s: invalid record values.", function ); return( -1 ); } if( record_data == NULL ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_ARGUMENTS, LIBCERROR_ARGUMENT_ERROR_INVALID_VALUE, "%s: invalid record data.", function ); return( -1 ); } if( record_data_size > (size_t) SSIZE_MAX ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_ARGUMENTS, LIBCERROR_ARGUMENT_ERROR_VALUE_EXCEEDS_MAXIMUM, "%s: invalid record data size value exceeds maximum.", function ); return( -1 ); } if( record_data_size < ( sizeof( evt_record_event_header_t ) + 4 ) ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_VALUE_OUT_OF_BOUNDS, "%s: record data size value out of bounds.", function ); return( -1 ); } byte_stream_copy_to_uint32_little_endian( ( (evt_record_event_header_t *) record_data )->size, size ); byte_stream_copy_to_uint32_little_endian( ( (evt_record_event_header_t *) record_data )->record_number, record_values->number ); byte_stream_copy_to_uint32_little_endian( ( (evt_record_event_header_t *) record_data )->creation_time, record_values->creation_time ); byte_stream_copy_to_uint32_little_endian( ( (evt_record_event_header_t *) record_data )->written_time, record_values->written_time ); byte_stream_copy_to_uint32_little_endian( ( (evt_record_event_header_t *) record_data )->event_identifier, record_values->event_identifier ); byte_stream_copy_to_uint16_little_endian( ( (evt_record_event_header_t *) record_data )->event_type, record_values->event_type ); byte_stream_copy_to_uint16_little_endian( ( (evt_record_event_header_t *) record_data )->event_category, record_values->event_category ); byte_stream_copy_to_uint32_little_endian( ( (evt_record_event_header_t *) record_data )->strings_offset, strings_offset ); byte_stream_copy_to_uint32_little_endian( ( (evt_record_event_header_t *) record_data )->user_sid_size, user_sid_size ); byte_stream_copy_to_uint32_little_endian( ( (evt_record_event_header_t *) record_data )->user_sid_offset, user_sid_offset ); byte_stream_copy_to_uint32_little_endian( ( (evt_record_event_header_t *) record_data )->data_size, data_size ); byte_stream_copy_to_uint32_little_endian( ( (evt_record_event_header_t *) record_data )->data_offset, data_offset ); byte_stream_copy_to_uint32_little_endian( &( record_data[ record_data_size - 4 ] ), size_copy ); #if defined( HAVE_DEBUG_OUTPUT ) if( libcnotify_verbose != 0 ) { libcnotify_printf( "%s: size\t\t\t\t\t: %" PRIu32 "\n", function, size ); libcnotify_printf( "%s: signature\t\t\t\t: %c%c%c%c\n", function, ( (evt_record_event_header_t *) record_data )->signature[ 0 ], ( (evt_record_event_header_t *) record_data )->signature[ 1 ], ( (evt_record_event_header_t *) record_data )->signature[ 2 ], ( (evt_record_event_header_t *) record_data )->signature[ 3 ] ); libcnotify_printf( "%s: record number\t\t\t\t: %" PRIu32 "\n", function, record_values->number ); if( libevt_debug_print_posix_time_value( function, "creation time\t\t\t\t", ( (evt_record_event_header_t *) record_data )->creation_time, 4, LIBFDATETIME_ENDIAN_LITTLE, LIBFDATETIME_POSIX_TIME_VALUE_TYPE_SECONDS_32BIT_SIGNED, LIBFDATETIME_STRING_FORMAT_TYPE_CTIME | LIBFDATETIME_STRING_FORMAT_FLAG_DATE_TIME, error ) != 1 ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_PRINT_FAILED, "%s: unable to print POSIX time value.", function ); goto on_error; } if( libevt_debug_print_posix_time_value( function, "written time\t\t\t\t", ( (evt_record_event_header_t *) record_data )->written_time, 4, LIBFDATETIME_ENDIAN_LITTLE, LIBFDATETIME_POSIX_TIME_VALUE_TYPE_SECONDS_32BIT_SIGNED, LIBFDATETIME_STRING_FORMAT_TYPE_CTIME | LIBFDATETIME_STRING_FORMAT_FLAG_DATE_TIME, error ) != 1 ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_PRINT_FAILED, "%s: unable to print POSIX time value.", function ); goto on_error; } libcnotify_printf( "%s: event identifier\t\t\t: 0x%08" PRIx32 "\n", function, record_values->event_identifier ); libcnotify_printf( "%s: event identifier: code\t\t\t: %" PRIu32 "\n", function, record_values->event_identifier & 0x0000ffffUL ); libcnotify_printf( "%s: event identifier: facility\t\t: %" PRIu32 "\n", function, ( record_values->event_identifier & 0x0fff0000UL ) >> 16 ); libcnotify_printf( "%s: event identifier: reserved\t\t: %" PRIu32 "\n", function, ( record_values->event_identifier & 0x10000000UL ) >> 28 ); libcnotify_printf( "%s: event identifier: customer flags\t: %" PRIu32 "\n", function, ( record_values->event_identifier & 0x20000000UL ) >> 29 ); libcnotify_printf( "%s: event identifier: severity\t\t: %" PRIu32 " (", function, ( record_values->event_identifier & 0xc0000000UL ) >> 30 ); libevt_debug_print_event_identifier_severity( record_values->event_identifier ); libcnotify_printf( ")\n" ); libcnotify_printf( "%s: event type\t\t\t\t: %" PRIu16 " (", function, record_values->event_type ); libevt_debug_print_event_type( record_values->event_type ); libcnotify_printf( ")\n" ); byte_stream_copy_to_uint16_little_endian( ( (evt_record_event_header_t *) record_data )->number_of_strings, value_16bit ); libcnotify_printf( "%s: number of strings\t\t\t: %" PRIu16 "\n", function, value_16bit ); libcnotify_printf( "%s: event category\t\t\t\t: %" PRIu16 "\n", function, record_values->event_category ); byte_stream_copy_to_uint16_little_endian( ( (evt_record_event_header_t *) record_data )->event_flags, value_16bit ); libcnotify_printf( "%s: event flags\t\t\t\t: 0x%04" PRIx16 "\n", function, value_16bit ); byte_stream_copy_to_uint32_little_endian( ( (evt_record_event_header_t *) record_data )->closing_record_number, value_32bit ); libcnotify_printf( "%s: closing record values number\t\t: %" PRIu32 "\n", function, value_32bit ); libcnotify_printf( "%s: strings offset\t\t\t\t: %" PRIu32 "\n", function, strings_offset ); libcnotify_printf( "%s: user security identifier (SID) size\t: %" PRIu32 "\n", function, user_sid_size ); libcnotify_printf( "%s: user security identifier (SID) offset\t: %" PRIu32 "\n", function, user_sid_offset ); libcnotify_printf( "%s: data size\t\t\t\t: %" PRIu32 "\n", function, data_size ); libcnotify_printf( "%s: data offset\t\t\t\t: %" PRIu32 "\n", function, data_offset ); } #endif record_data_offset = sizeof( evt_record_event_header_t ); if( ( user_sid_offset == 0 ) && ( user_sid_size != 0 ) ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_VALUE_OUT_OF_BOUNDS, "%s: user SID offset or size value out of bounds.", function ); goto on_error; } if( user_sid_offset != 0 ) { if( ( (size_t) user_sid_offset < record_data_offset ) || ( (size_t) user_sid_offset >= ( record_data_size - 4 ) ) ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_VALUE_OUT_OF_BOUNDS, "%s: user SID offset value out of bounds.", function ); goto on_error; } if( user_sid_size != 0 ) { if( (size_t) ( user_sid_offset + user_sid_size ) > ( record_data_size - 4 ) ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_VALUE_OUT_OF_BOUNDS, "%s: user SID size value out of bounds.", function ); goto on_error; } } } /* If the strings offset is points at the offset at record data size - 4 * the strings are empty. For this to be sane the data offset should * be the same as the strings offset or the data size 0. */ if( ( (size_t) strings_offset < user_sid_offset ) || ( (size_t) strings_offset >= ( record_data_size - 4 ) ) ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_VALUE_OUT_OF_BOUNDS, "%s: strings offset value out of bounds.", function ); goto on_error; } if( ( (size_t) data_offset < strings_offset ) || ( (size_t) data_offset >= ( record_data_size - 4 ) ) ) { if( data_size != 0 ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_VALUE_OUT_OF_BOUNDS, "%s: data offset value out of bounds.", function ); goto on_error; } data_offset = (uint32_t) record_data_size - 4; } if( ( (size_t) strings_offset >= ( record_data_size - 4 ) ) && ( strings_offset != data_offset ) ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_VALUE_OUT_OF_BOUNDS, "%s: strings offset value out of bounds.", function ); goto on_error; } if( strings_offset != 0 ) { if( strings_offset < record_data_offset ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_VALUE_OUT_OF_BOUNDS, "%s: strings offset value out of bounds.", function ); goto on_error; } } if( user_sid_offset != 0 ) { members_data_size = user_sid_offset - (uint32_t) record_data_offset; } else if( strings_offset != 0 ) { members_data_size = strings_offset - (uint32_t) record_data_offset; } if( strings_offset != 0 ) { strings_size = data_offset - strings_offset; } if( data_size != 0 ) { if( (size_t) ( data_offset + data_size ) > ( record_data_size - 4 ) ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_VALUE_OUT_OF_BOUNDS, "%s: data size value out of bounds.", function ); goto on_error; } } if( members_data_size != 0 ) { #if defined( HAVE_DEBUG_OUTPUT ) if( libcnotify_verbose != 0 ) { libcnotify_printf( "%s: members data:\n", function ); libcnotify_print_data( &( record_data[ record_data_offset ] ), members_data_size, LIBCNOTIFY_PRINT_DATA_FLAG_GROUP_DATA ); } #endif if( libfvalue_value_type_initialize( &( record_values->source_name ), LIBFVALUE_VALUE_TYPE_STRING_UTF16, error ) != 1 ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_INITIALIZE_FAILED, "%s: unable to create source name value.", function ); goto on_error; } value_data_size = libfvalue_value_type_set_data_string( record_values->source_name, &( record_data[ record_data_offset ] ), members_data_size, LIBFVALUE_CODEPAGE_UTF16_LITTLE_ENDIAN, LIBFVALUE_VALUE_DATA_FLAG_MANAGED, error ); if( value_data_size == -1 ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_SET_FAILED, "%s: unable to set data of source name value.", function ); goto on_error; } #if defined( HAVE_DEBUG_OUTPUT ) if( libcnotify_verbose != 0 ) { libcnotify_printf( "%s: source name\t\t\t\t: ", function ); if( libfvalue_value_print( record_values->source_name, 0, 0, error ) != 1 ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_PRINT_FAILED, "%s: unable to print source name value.", function ); goto on_error; } libcnotify_printf( "\n" ); } #endif record_data_offset += value_data_size; members_data_size -= (uint32_t) value_data_size; if( libfvalue_value_type_initialize( &( record_values->computer_name ), LIBFVALUE_VALUE_TYPE_STRING_UTF16, error ) != 1 ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_INITIALIZE_FAILED, "%s: unable to create computer name value.", function ); goto on_error; } value_data_size = libfvalue_value_type_set_data_string( record_values->computer_name, &( record_data[ record_data_offset ] ), members_data_size, LIBFVALUE_CODEPAGE_UTF16_LITTLE_ENDIAN, LIBFVALUE_VALUE_DATA_FLAG_MANAGED, error ); if( value_data_size == -1 ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_SET_FAILED, "%s: unable to set data of computer name value.", function ); goto on_error; } #if defined( HAVE_DEBUG_OUTPUT ) if( libcnotify_verbose != 0 ) { libcnotify_printf( "%s: computer name\t\t\t\t: ", function ); if( libfvalue_value_print( record_values->computer_name, 0, 0, error ) != 1 ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_PRINT_FAILED, "%s: unable to print computer name value.", function ); goto on_error; } libcnotify_printf( "\n" ); } #endif record_data_offset += value_data_size; members_data_size -= (uint32_t) value_data_size; if( members_data_size > 0 ) { #if defined( HAVE_DEBUG_OUTPUT ) if( libcnotify_verbose != 0 ) { libcnotify_printf( "%s: members trailing data:\n", function ); libcnotify_print_data( &( record_data[ record_data_offset ] ), members_data_size, LIBCNOTIFY_PRINT_DATA_FLAG_GROUP_DATA ); } #endif record_data_offset += members_data_size; } } if( user_sid_size != 0 ) { if( user_sid_size > ( ( record_data_size - 4 ) - user_sid_offset ) ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_VALUE_OUT_OF_BOUNDS, "%s: user SID data size value out of bounds.", function ); goto on_error; } if( libfvalue_value_type_initialize( &( record_values->user_security_identifier ), LIBFVALUE_VALUE_TYPE_NT_SECURITY_IDENTIFIER, error ) != 1 ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_INITIALIZE_FAILED, "%s: unable to create user security identifier (SID) value.", function ); goto on_error; } if( libfvalue_value_set_data( record_values->user_security_identifier, &( record_data[ user_sid_offset ] ), (size_t) user_sid_size, LIBFVALUE_ENDIAN_LITTLE, LIBFVALUE_VALUE_DATA_FLAG_MANAGED, error ) != 1 ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_SET_FAILED, "%s: unable to set data of user security identifier (SID) value.", function ); goto on_error; } #if defined( HAVE_DEBUG_OUTPUT ) if( libcnotify_verbose != 0 ) { libcnotify_printf( "%s: user security identifier (SID)\t\t: ", function ); if( libfvalue_value_print( record_values->user_security_identifier, 0, 0, error ) != 1 ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_PRINT_FAILED, "%s: unable to print user security identifier (SID) value.", function ); goto on_error; } libcnotify_printf( "\n" ); } #endif record_data_offset += user_sid_size; } if( strings_size != 0 ) { if( strings_size > ( ( record_data_size - 4 ) - strings_offset ) ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_VALUE_OUT_OF_BOUNDS, "%s: strings size value out of bounds.", function ); goto on_error; } #if defined( HAVE_DEBUG_OUTPUT ) if( libcnotify_verbose != 0 ) { libcnotify_printf( "%s: strings data:\n", function ); libcnotify_print_data( &( record_data[ strings_offset ] ), strings_size, LIBCNOTIFY_PRINT_DATA_FLAG_GROUP_DATA ); } #endif if( size_copy == 0 ) { /* If the strings data is truncated */ strings_data_offset = strings_offset + strings_size - 2; while( strings_data_offset > strings_offset ) { if( ( record_data[ strings_data_offset ] != 0 ) || ( record_data[ strings_data_offset + 1 ] != 0 ) ) { strings_size += 2; break; } strings_data_offset -= 2; strings_size -= 2; } } if( libfvalue_value_type_initialize( &( record_values->strings ), LIBFVALUE_VALUE_TYPE_STRING_UTF16, error ) != 1 ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_INITIALIZE_FAILED, "%s: unable to create strings value.", function ); goto on_error; } value_data_size = libfvalue_value_type_set_data_strings_array( record_values->strings, &( record_data[ strings_offset ] ), strings_size, LIBFVALUE_CODEPAGE_UTF16_LITTLE_ENDIAN, error ); if( value_data_size == -1 ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_SET_FAILED, "%s: unable to set data of strings value.", function ); goto on_error; } record_data_offset += strings_size; } if( data_size != 0 ) { if( data_size > ( ( record_data_size - 4 ) - record_data_offset ) ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_VALUE_OUT_OF_BOUNDS, "%s: data size value out of bounds.", function ); goto on_error; } #if defined( HAVE_DEBUG_OUTPUT ) if( libcnotify_verbose != 0 ) { libcnotify_printf( "%s: data:\n", function ); libcnotify_print_data( &( record_data[ record_data_offset ] ), (size_t) data_size, LIBCNOTIFY_PRINT_DATA_FLAG_GROUP_DATA ); } #endif if( libfvalue_value_type_initialize( &( record_values->data ), LIBFVALUE_VALUE_TYPE_BINARY_DATA, error ) != 1 ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_INITIALIZE_FAILED, "%s: unable to create data value.", function ); goto on_error; } if( libfvalue_value_set_data( record_values->data, &( record_data[ record_data_offset ] ), (size_t) data_size, LIBFVALUE_ENDIAN_LITTLE, LIBFVALUE_VALUE_DATA_FLAG_MANAGED, error ) != 1 ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_SET_FAILED, "%s: unable to set data of data value.", function ); goto on_error; } #if defined( HAVE_DEBUG_OUTPUT ) record_data_offset += data_size; #endif } #if defined( HAVE_DEBUG_OUTPUT ) if( libcnotify_verbose != 0 ) { if( record_data_offset < ( record_data_size - 4 ) ) { libcnotify_printf( "%s: padding:\n", function ); libcnotify_print_data( &( record_data[ record_data_offset ] ), (size_t) record_data_size - record_data_offset - 4, LIBCNOTIFY_PRINT_DATA_FLAG_GROUP_DATA ); } libcnotify_printf( "%s: size copy\t\t\t\t: %" PRIu32 "\n", function, size_copy ); libcnotify_printf( "\n" ); } #endif if( ( strict_mode == 0 ) && ( size_copy == 0 ) ) { size_copy = size; } if( size != size_copy ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_INPUT, LIBCERROR_INPUT_ERROR_VALUE_MISMATCH, "%s: value mismatch for size and size copy.", function ); goto on_error; } if( record_data_size != (size_t) size ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_INPUT, LIBCERROR_INPUT_ERROR_VALUE_MISMATCH, "%s: value mismatch for record_values data size and size.", function ); goto on_error; } return( 1 ); on_error: if( record_values->data != NULL ) { libfvalue_value_free( &( record_values->data ), NULL ); } if( record_values->strings != NULL ) { libfvalue_value_free( &( record_values->strings ), NULL ); } if( record_values->user_security_identifier != NULL ) { libfvalue_value_free( &( record_values->user_security_identifier ), NULL ); } if( record_values->computer_name != NULL ) { libfvalue_value_free( &( record_values->computer_name ), NULL ); } if( record_values->source_name != NULL ) { libfvalue_value_free( &( record_values->source_name ), NULL ); } return( -1 ); } /* Reads the end of file record values * Returns 1 if successful or -1 on error */ int libevt_record_values_read_end_of_file( libevt_record_values_t *record_values, uint8_t *record_data, size_t record_data_size, libcerror_error_t **error ) { static char *function = "libevt_record_values_read_end_of_file"; uint32_t size = 0; uint32_t size_copy = 0; #if defined( HAVE_DEBUG_OUTPUT ) uint32_t value_32bit = 0; #endif if( record_values == NULL ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_ARGUMENTS, LIBCERROR_ARGUMENT_ERROR_INVALID_VALUE, "%s: invalid record values.", function ); return( -1 ); } if( record_data == NULL ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_ARGUMENTS, LIBCERROR_ARGUMENT_ERROR_INVALID_VALUE, "%s: invalid record data.", function ); return( -1 ); } if( record_data_size > (size_t) SSIZE_MAX ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_ARGUMENTS, LIBCERROR_ARGUMENT_ERROR_VALUE_EXCEEDS_MAXIMUM, "%s: invalid record data size value exceeds maximum.", function ); return( -1 ); } if( record_data_size < sizeof( evt_record_end_of_file_t ) ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_VALUE_OUT_OF_BOUNDS, "%s: record data size value out of bounds.", function ); return( -1 ); } byte_stream_copy_to_uint32_little_endian( ( (evt_record_end_of_file_t *) record_data )->size, size ); byte_stream_copy_to_uint32_little_endian( ( (evt_record_end_of_file_t *) record_data )->size_copy, size_copy ); #if defined( HAVE_DEBUG_OUTPUT ) if( libcnotify_verbose != 0 ) { libcnotify_printf( "%s: size\t\t\t\t: %" PRIu32 "\n", function, size ); byte_stream_copy_to_uint32_little_endian( ( (evt_record_end_of_file_t *) record_data )->signature1, value_32bit ); libcnotify_printf( "%s: signature1\t\t\t: 0x%08" PRIx32 "\n", function, value_32bit ); byte_stream_copy_to_uint32_little_endian( ( (evt_record_end_of_file_t *) record_data )->signature2, value_32bit ); libcnotify_printf( "%s: signature2\t\t\t: 0x%08" PRIx32 "\n", function, value_32bit ); byte_stream_copy_to_uint32_little_endian( ( (evt_record_end_of_file_t *) record_data )->signature3, value_32bit ); libcnotify_printf( "%s: signature3\t\t\t: 0x%08" PRIx32 "\n", function, value_32bit ); byte_stream_copy_to_uint32_little_endian( ( (evt_record_end_of_file_t *) record_data )->signature4, value_32bit ); libcnotify_printf( "%s: signature4\t\t\t: 0x%08" PRIx32 "\n", function, value_32bit ); byte_stream_copy_to_uint32_little_endian( ( (evt_record_end_of_file_t *) record_data )->first_record_offset, value_32bit ); libcnotify_printf( "%s: first record offset\t\t: 0x%08" PRIx32 "\n", function, value_32bit ); byte_stream_copy_to_uint32_little_endian( ( (evt_record_end_of_file_t *) record_data )->end_of_file_record_offset, value_32bit ); libcnotify_printf( "%s: end of file record offset\t: 0x%08" PRIx32 "\n", function, value_32bit ); byte_stream_copy_to_uint32_little_endian( ( (evt_record_end_of_file_t *) record_data )->last_record_number, value_32bit ); libcnotify_printf( "%s: last record number\t\t: %" PRIu32 "\n", function, value_32bit ); byte_stream_copy_to_uint32_little_endian( ( (evt_record_end_of_file_t *) record_data )->first_record_number, value_32bit ); libcnotify_printf( "%s: first record number\t\t: %" PRIu32 "\n", function, value_32bit ); libcnotify_printf( "%s: size copy\t\t\t: %" PRIu32 "\n", function, size_copy ); libcnotify_printf( "\n" ); } #endif if( size != size_copy ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_INPUT, LIBCERROR_INPUT_ERROR_VALUE_MISMATCH, "%s: value mismatch for size and size copy.", function ); return( -1 ); } if( record_data_size != (size_t) size ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_INPUT, LIBCERROR_INPUT_ERROR_VALUE_MISMATCH, "%s: value mismatch for record data size and size.", function ); return( -1 ); } /* TODO correct values in IO handle if necessary */ return( 1 ); } /* Retrieves the type * Returns 1 if successful or -1 on error */ int libevt_record_values_get_type( libevt_record_values_t *record_values, uint8_t *type, libcerror_error_t **error ) { static char *function = "libevt_record_values_get_type"; if( record_values == NULL ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_ARGUMENTS, LIBCERROR_ARGUMENT_ERROR_INVALID_VALUE, "%s: invalid record values.", function ); return( -1 ); } if( type == NULL ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_ARGUMENTS, LIBCERROR_ARGUMENT_ERROR_INVALID_VALUE, "%s: invalid type.", function ); return( -1 ); } *type = record_values->type; return( 1 ); } /* Reads record values * Callback for the (recovered) records list * Returns 1 if successful or -1 on error */ int libevt_record_values_read_element_data( libevt_io_handle_t *io_handle, libbfio_handle_t *file_io_handle, libfdata_list_element_t *element, libfcache_cache_t *cache, int element_file_index LIBEVT_ATTRIBUTE_UNUSED, off64_t element_offset, size64_t element_size LIBEVT_ATTRIBUTE_UNUSED, uint32_t element_flags LIBEVT_ATTRIBUTE_UNUSED, uint8_t read_flags LIBEVT_ATTRIBUTE_UNUSED, libcerror_error_t **error ) { libevt_record_values_t *record_values = NULL; static char *function = "libevt_record_values_read_element_data"; off64_t file_offset = 0; ssize_t read_count = 0; LIBEVT_UNREFERENCED_PARAMETER( element_size ) LIBEVT_UNREFERENCED_PARAMETER( element_file_index ) LIBEVT_UNREFERENCED_PARAMETER( element_flags ) LIBEVT_UNREFERENCED_PARAMETER( read_flags ) #if defined( HAVE_DEBUG_OUTPUT ) if( libcnotify_verbose != 0 ) { libcnotify_printf( "%s: reading record at offset: %" PRIi64 " (0x%08" PRIx64 ")\n", function, element_offset, element_offset ); } #endif if( libbfio_handle_seek_offset( file_io_handle, element_offset, SEEK_SET, error ) == -1 ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_IO, LIBCERROR_IO_ERROR_SEEK_FAILED, "%s: unable to seek record offset: %" PRIi64 ".", function, element_offset ); goto on_error; } if( libevt_record_values_initialize( &record_values, error ) != 1 ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_INITIALIZE_FAILED, "%s: unable to create record values.", function ); goto on_error; } /* File offset must be before being passed to libevt_record_values_read */ file_offset = element_offset; read_count = libevt_record_values_read( record_values, file_io_handle, io_handle, &file_offset, 0, error ); if( read_count == -1 ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_IO, LIBCERROR_IO_ERROR_READ_FAILED, "%s: unable to read record at offset: %" PRIi64 ".", function, element_offset ); goto on_error; } if( libfdata_list_element_set_element_value( element, (intptr_t *) file_io_handle, cache, (intptr_t *) record_values, (int (*)(intptr_t **, libcerror_error_t **)) &libevt_record_values_free, LIBFDATA_LIST_ELEMENT_VALUE_FLAG_MANAGED, error ) != 1 ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_SET_FAILED, "%s: unable to set record values as element value.", function ); goto on_error; } return( 1 ); on_error: if( record_values != NULL ) { libevt_record_values_free( &record_values, NULL ); } return( -1 ); }
int libevt_record_values_read_event( libevt_record_values_t *record_values, uint8_t *record_data, size_t record_data_size, uint8_t strict_mode, libcerror_error_t **error ) { static char *function = "libevt_record_values_read_event"; size_t record_data_offset = 0; size_t strings_data_offset = 0; ssize_t value_data_size = 0; uint32_t data_offset = 0; uint32_t data_size = 0; uint32_t members_data_size = 0; uint32_t size = 0; uint32_t size_copy = 0; uint32_t strings_offset = 0; uint32_t strings_size = 0; uint32_t user_sid_offset = 0; uint32_t user_sid_size = 0; #if defined( HAVE_DEBUG_OUTPUT ) uint32_t value_32bit = 0; uint16_t value_16bit = 0; #endif if( record_values == NULL ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_ARGUMENTS, LIBCERROR_ARGUMENT_ERROR_INVALID_VALUE, "%s: invalid record values.", function ); return( -1 ); } if( record_data == NULL ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_ARGUMENTS, LIBCERROR_ARGUMENT_ERROR_INVALID_VALUE, "%s: invalid record data.", function ); return( -1 ); } if( record_data_size > (size_t) SSIZE_MAX ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_ARGUMENTS, LIBCERROR_ARGUMENT_ERROR_VALUE_EXCEEDS_MAXIMUM, "%s: invalid record data size value exceeds maximum.", function ); return( -1 ); } if( record_data_size < ( sizeof( evt_record_event_header_t ) + 4 ) ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_VALUE_OUT_OF_BOUNDS, "%s: record data size value out of bounds.", function ); return( -1 ); } byte_stream_copy_to_uint32_little_endian( ( (evt_record_event_header_t *) record_data )->size, size ); byte_stream_copy_to_uint32_little_endian( ( (evt_record_event_header_t *) record_data )->record_number, record_values->number ); byte_stream_copy_to_uint32_little_endian( ( (evt_record_event_header_t *) record_data )->creation_time, record_values->creation_time ); byte_stream_copy_to_uint32_little_endian( ( (evt_record_event_header_t *) record_data )->written_time, record_values->written_time ); byte_stream_copy_to_uint32_little_endian( ( (evt_record_event_header_t *) record_data )->event_identifier, record_values->event_identifier ); byte_stream_copy_to_uint16_little_endian( ( (evt_record_event_header_t *) record_data )->event_type, record_values->event_type ); byte_stream_copy_to_uint16_little_endian( ( (evt_record_event_header_t *) record_data )->event_category, record_values->event_category ); byte_stream_copy_to_uint32_little_endian( ( (evt_record_event_header_t *) record_data )->strings_offset, strings_offset ); byte_stream_copy_to_uint32_little_endian( ( (evt_record_event_header_t *) record_data )->user_sid_size, user_sid_size ); byte_stream_copy_to_uint32_little_endian( ( (evt_record_event_header_t *) record_data )->user_sid_offset, user_sid_offset ); byte_stream_copy_to_uint32_little_endian( ( (evt_record_event_header_t *) record_data )->data_size, data_size ); byte_stream_copy_to_uint32_little_endian( ( (evt_record_event_header_t *) record_data )->data_offset, data_offset ); byte_stream_copy_to_uint32_little_endian( &( record_data[ record_data_size - 4 ] ), size_copy ); #if defined( HAVE_DEBUG_OUTPUT ) if( libcnotify_verbose != 0 ) { libcnotify_printf( "%s: size\t\t\t\t\t: %" PRIu32 "\n", function, size ); libcnotify_printf( "%s: signature\t\t\t\t: %c%c%c%c\n", function, ( (evt_record_event_header_t *) record_data )->signature[ 0 ], ( (evt_record_event_header_t *) record_data )->signature[ 1 ], ( (evt_record_event_header_t *) record_data )->signature[ 2 ], ( (evt_record_event_header_t *) record_data )->signature[ 3 ] ); libcnotify_printf( "%s: record number\t\t\t\t: %" PRIu32 "\n", function, record_values->number ); if( libevt_debug_print_posix_time_value( function, "creation time\t\t\t\t", ( (evt_record_event_header_t *) record_data )->creation_time, 4, LIBFDATETIME_ENDIAN_LITTLE, LIBFDATETIME_POSIX_TIME_VALUE_TYPE_SECONDS_32BIT_SIGNED, LIBFDATETIME_STRING_FORMAT_TYPE_CTIME | LIBFDATETIME_STRING_FORMAT_FLAG_DATE_TIME, error ) != 1 ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_PRINT_FAILED, "%s: unable to print POSIX time value.", function ); goto on_error; } if( libevt_debug_print_posix_time_value( function, "written time\t\t\t\t", ( (evt_record_event_header_t *) record_data )->written_time, 4, LIBFDATETIME_ENDIAN_LITTLE, LIBFDATETIME_POSIX_TIME_VALUE_TYPE_SECONDS_32BIT_SIGNED, LIBFDATETIME_STRING_FORMAT_TYPE_CTIME | LIBFDATETIME_STRING_FORMAT_FLAG_DATE_TIME, error ) != 1 ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_PRINT_FAILED, "%s: unable to print POSIX time value.", function ); goto on_error; } libcnotify_printf( "%s: event identifier\t\t\t: 0x%08" PRIx32 "\n", function, record_values->event_identifier ); libcnotify_printf( "%s: event identifier: code\t\t\t: %" PRIu32 "\n", function, record_values->event_identifier & 0x0000ffffUL ); libcnotify_printf( "%s: event identifier: facility\t\t: %" PRIu32 "\n", function, ( record_values->event_identifier & 0x0fff0000UL ) >> 16 ); libcnotify_printf( "%s: event identifier: reserved\t\t: %" PRIu32 "\n", function, ( record_values->event_identifier & 0x10000000UL ) >> 28 ); libcnotify_printf( "%s: event identifier: customer flags\t: %" PRIu32 "\n", function, ( record_values->event_identifier & 0x20000000UL ) >> 29 ); libcnotify_printf( "%s: event identifier: severity\t\t: %" PRIu32 " (", function, ( record_values->event_identifier & 0xc0000000UL ) >> 30 ); libevt_debug_print_event_identifier_severity( record_values->event_identifier ); libcnotify_printf( ")\n" ); libcnotify_printf( "%s: event type\t\t\t\t: %" PRIu16 " (", function, record_values->event_type ); libevt_debug_print_event_type( record_values->event_type ); libcnotify_printf( ")\n" ); byte_stream_copy_to_uint16_little_endian( ( (evt_record_event_header_t *) record_data )->number_of_strings, value_16bit ); libcnotify_printf( "%s: number of strings\t\t\t: %" PRIu16 "\n", function, value_16bit ); libcnotify_printf( "%s: event category\t\t\t\t: %" PRIu16 "\n", function, record_values->event_category ); byte_stream_copy_to_uint16_little_endian( ( (evt_record_event_header_t *) record_data )->event_flags, value_16bit ); libcnotify_printf( "%s: event flags\t\t\t\t: 0x%04" PRIx16 "\n", function, value_16bit ); byte_stream_copy_to_uint32_little_endian( ( (evt_record_event_header_t *) record_data )->closing_record_number, value_32bit ); libcnotify_printf( "%s: closing record values number\t\t: %" PRIu32 "\n", function, value_32bit ); libcnotify_printf( "%s: strings offset\t\t\t\t: %" PRIu32 "\n", function, strings_offset ); libcnotify_printf( "%s: user security identifier (SID) size\t: %" PRIu32 "\n", function, user_sid_size ); libcnotify_printf( "%s: user security identifier (SID) offset\t: %" PRIu32 "\n", function, user_sid_offset ); libcnotify_printf( "%s: data size\t\t\t\t: %" PRIu32 "\n", function, data_size ); libcnotify_printf( "%s: data offset\t\t\t\t: %" PRIu32 "\n", function, data_offset ); } #endif record_data_offset = sizeof( evt_record_event_header_t ); if( ( user_sid_offset == 0 ) && ( user_sid_size != 0 ) ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_VALUE_OUT_OF_BOUNDS, "%s: user SID offset or size value out of bounds.", function ); goto on_error; } if( user_sid_offset != 0 ) { if( ( (size_t) user_sid_offset < record_data_offset ) || ( (size_t) user_sid_offset >= ( record_data_size - 4 ) ) ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_VALUE_OUT_OF_BOUNDS, "%s: user SID offset value out of bounds.", function ); goto on_error; } if( user_sid_size != 0 ) { if( (size_t) ( user_sid_offset + user_sid_size ) > ( record_data_size - 4 ) ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_VALUE_OUT_OF_BOUNDS, "%s: user SID size value out of bounds.", function ); goto on_error; } } } /* If the strings offset is points at the offset at record data size - 4 * the strings are empty. For this to be sane the data offset should * be the same as the strings offset or the data size 0. */ if( ( (size_t) strings_offset < user_sid_offset ) || ( (size_t) strings_offset >= ( record_data_size - 4 ) ) ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_VALUE_OUT_OF_BOUNDS, "%s: strings offset value out of bounds.", function ); goto on_error; } if( ( (size_t) data_offset < strings_offset ) || ( (size_t) data_offset >= ( record_data_size - 4 ) ) ) { if( data_size != 0 ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_VALUE_OUT_OF_BOUNDS, "%s: data offset value out of bounds.", function ); goto on_error; } data_offset = (uint32_t) record_data_size - 4; } if( ( (size_t) strings_offset >= ( record_data_size - 4 ) ) && ( strings_offset != data_offset ) ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_VALUE_OUT_OF_BOUNDS, "%s: strings offset value out of bounds.", function ); goto on_error; } if( strings_offset != 0 ) { if( strings_offset < record_data_offset ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_VALUE_OUT_OF_BOUNDS, "%s: strings offset value out of bounds.", function ); goto on_error; } } if( user_sid_offset != 0 ) { members_data_size = user_sid_offset - (uint32_t) record_data_offset; } else if( strings_offset != 0 ) { members_data_size = strings_offset - (uint32_t) record_data_offset; } if( strings_offset != 0 ) { strings_size = data_offset - strings_offset; } if( data_size != 0 ) { if( (size_t) ( data_offset + data_size ) > ( record_data_size - 4 ) ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_VALUE_OUT_OF_BOUNDS, "%s: data size value out of bounds.", function ); goto on_error; } } if( members_data_size != 0 ) { #if defined( HAVE_DEBUG_OUTPUT ) if( libcnotify_verbose != 0 ) { libcnotify_printf( "%s: members data:\n", function ); libcnotify_print_data( &( record_data[ record_data_offset ] ), members_data_size, LIBCNOTIFY_PRINT_DATA_FLAG_GROUP_DATA ); } #endif if( libfvalue_value_type_initialize( &( record_values->source_name ), LIBFVALUE_VALUE_TYPE_STRING_UTF16, error ) != 1 ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_INITIALIZE_FAILED, "%s: unable to create source name value.", function ); goto on_error; } value_data_size = libfvalue_value_type_set_data_string( record_values->source_name, &( record_data[ record_data_offset ] ), members_data_size, LIBFVALUE_CODEPAGE_UTF16_LITTLE_ENDIAN, LIBFVALUE_VALUE_DATA_FLAG_MANAGED, error ); if( value_data_size == -1 ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_SET_FAILED, "%s: unable to set data of source name value.", function ); goto on_error; } #if defined( HAVE_DEBUG_OUTPUT ) if( libcnotify_verbose != 0 ) { libcnotify_printf( "%s: source name\t\t\t\t: ", function ); if( libfvalue_value_print( record_values->source_name, 0, 0, error ) != 1 ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_PRINT_FAILED, "%s: unable to print source name value.", function ); goto on_error; } libcnotify_printf( "\n" ); } #endif record_data_offset += value_data_size; members_data_size -= (uint32_t) value_data_size; if( libfvalue_value_type_initialize( &( record_values->computer_name ), LIBFVALUE_VALUE_TYPE_STRING_UTF16, error ) != 1 ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_INITIALIZE_FAILED, "%s: unable to create computer name value.", function ); goto on_error; } value_data_size = libfvalue_value_type_set_data_string( record_values->computer_name, &( record_data[ record_data_offset ] ), members_data_size, LIBFVALUE_CODEPAGE_UTF16_LITTLE_ENDIAN, LIBFVALUE_VALUE_DATA_FLAG_MANAGED, error ); if( value_data_size == -1 ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_SET_FAILED, "%s: unable to set data of computer name value.", function ); goto on_error; } #if defined( HAVE_DEBUG_OUTPUT ) if( libcnotify_verbose != 0 ) { libcnotify_printf( "%s: computer name\t\t\t\t: ", function ); if( libfvalue_value_print( record_values->computer_name, 0, 0, error ) != 1 ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_PRINT_FAILED, "%s: unable to print computer name value.", function ); goto on_error; } libcnotify_printf( "\n" ); } #endif record_data_offset += value_data_size; members_data_size -= (uint32_t) value_data_size; if( members_data_size > 0 ) { #if defined( HAVE_DEBUG_OUTPUT ) if( libcnotify_verbose != 0 ) { libcnotify_printf( "%s: members trailing data:\n", function ); libcnotify_print_data( &( record_data[ record_data_offset ] ), members_data_size, LIBCNOTIFY_PRINT_DATA_FLAG_GROUP_DATA ); } #endif record_data_offset += members_data_size; } } if( user_sid_size != 0 ) { if( libfvalue_value_type_initialize( &( record_values->user_security_identifier ), LIBFVALUE_VALUE_TYPE_NT_SECURITY_IDENTIFIER, error ) != 1 ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_INITIALIZE_FAILED, "%s: unable to create user security identifier (SID) value.", function ); goto on_error; } if( libfvalue_value_set_data( record_values->user_security_identifier, &( record_data[ user_sid_offset ] ), (size_t) user_sid_size, LIBFVALUE_ENDIAN_LITTLE, LIBFVALUE_VALUE_DATA_FLAG_MANAGED, error ) != 1 ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_SET_FAILED, "%s: unable to set data of user security identifier (SID) value.", function ); goto on_error; } #if defined( HAVE_DEBUG_OUTPUT ) if( libcnotify_verbose != 0 ) { libcnotify_printf( "%s: user security identifier (SID)\t\t: ", function ); if( libfvalue_value_print( record_values->user_security_identifier, 0, 0, error ) != 1 ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_PRINT_FAILED, "%s: unable to print user security identifier (SID) value.", function ); goto on_error; } libcnotify_printf( "\n" ); } #endif record_data_offset += user_sid_size; } if( strings_size != 0 ) { #if defined( HAVE_DEBUG_OUTPUT ) if( libcnotify_verbose != 0 ) { libcnotify_printf( "%s: strings data:\n", function ); libcnotify_print_data( &( record_data[ strings_offset ] ), strings_size, LIBCNOTIFY_PRINT_DATA_FLAG_GROUP_DATA ); } #endif if( size_copy == 0 ) { /* If the strings data is truncated */ strings_data_offset = strings_offset + strings_size - 2; while( strings_data_offset > strings_offset ) { if( ( record_data[ strings_data_offset ] != 0 ) || ( record_data[ strings_data_offset + 1 ] != 0 ) ) { strings_size += 2; break; } strings_data_offset -= 2; strings_size -= 2; } } if( libfvalue_value_type_initialize( &( record_values->strings ), LIBFVALUE_VALUE_TYPE_STRING_UTF16, error ) != 1 ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_INITIALIZE_FAILED, "%s: unable to create strings value.", function ); goto on_error; } value_data_size = libfvalue_value_type_set_data_strings_array( record_values->strings, &( record_data[ strings_offset ] ), strings_size, LIBFVALUE_CODEPAGE_UTF16_LITTLE_ENDIAN, error ); if( value_data_size == -1 ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_SET_FAILED, "%s: unable to set data of strings value.", function ); goto on_error; } record_data_offset += strings_size; } if( data_size != 0 ) { #if defined( HAVE_DEBUG_OUTPUT ) if( libcnotify_verbose != 0 ) { libcnotify_printf( "%s: data:\n", function ); libcnotify_print_data( &( record_data[ data_offset ] ), (size_t) data_size, LIBCNOTIFY_PRINT_DATA_FLAG_GROUP_DATA ); } #endif if( libfvalue_value_type_initialize( &( record_values->data ), LIBFVALUE_VALUE_TYPE_BINARY_DATA, error ) != 1 ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_INITIALIZE_FAILED, "%s: unable to create data value.", function ); goto on_error; } if( libfvalue_value_set_data( record_values->data, &( record_data[ record_data_offset ] ), (size_t) data_size, LIBFVALUE_ENDIAN_LITTLE, LIBFVALUE_VALUE_DATA_FLAG_MANAGED, error ) != 1 ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_SET_FAILED, "%s: unable to set data of data value.", function ); goto on_error; } #if defined( HAVE_DEBUG_OUTPUT ) record_data_offset += data_size; #endif } #if defined( HAVE_DEBUG_OUTPUT ) if( libcnotify_verbose != 0 ) { if( record_data_offset < ( record_data_size - 4 ) ) { libcnotify_printf( "%s: padding:\n", function ); libcnotify_print_data( &( record_data[ record_data_offset ] ), (size_t) record_data_size - record_data_offset - 4, LIBCNOTIFY_PRINT_DATA_FLAG_GROUP_DATA ); } libcnotify_printf( "%s: size copy\t\t\t\t: %" PRIu32 "\n", function, size_copy ); libcnotify_printf( "\n" ); } #endif if( ( strict_mode == 0 ) && ( size_copy == 0 ) ) { size_copy = size; } if( size != size_copy ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_INPUT, LIBCERROR_INPUT_ERROR_VALUE_MISMATCH, "%s: value mismatch for size and size copy.", function ); goto on_error; } if( record_data_size != (size_t) size ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_INPUT, LIBCERROR_INPUT_ERROR_VALUE_MISMATCH, "%s: value mismatch for record_values data size and size.", function ); goto on_error; } return( 1 ); on_error: if( record_values->data != NULL ) { libfvalue_value_free( &( record_values->data ), NULL ); } if( record_values->strings != NULL ) { libfvalue_value_free( &( record_values->strings ), NULL ); } if( record_values->user_security_identifier != NULL ) { libfvalue_value_free( &( record_values->user_security_identifier ), NULL ); } if( record_values->computer_name != NULL ) { libfvalue_value_free( &( record_values->computer_name ), NULL ); } if( record_values->source_name != NULL ) { libfvalue_value_free( &( record_values->source_name ), NULL ); } return( -1 ); }
int libevt_record_values_read_event( libevt_record_values_t *record_values, uint8_t *record_data, size_t record_data_size, uint8_t strict_mode, libcerror_error_t **error ) { static char *function = "libevt_record_values_read_event"; size_t record_data_offset = 0; size_t strings_data_offset = 0; ssize_t value_data_size = 0; uint32_t data_offset = 0; uint32_t data_size = 0; uint32_t members_data_size = 0; uint32_t size = 0; uint32_t size_copy = 0; uint32_t strings_offset = 0; uint32_t strings_size = 0; uint32_t user_sid_offset = 0; uint32_t user_sid_size = 0; #if defined( HAVE_DEBUG_OUTPUT ) uint32_t value_32bit = 0; uint16_t value_16bit = 0; #endif if( record_values == NULL ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_ARGUMENTS, LIBCERROR_ARGUMENT_ERROR_INVALID_VALUE, "%s: invalid record values.", function ); return( -1 ); } if( record_data == NULL ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_ARGUMENTS, LIBCERROR_ARGUMENT_ERROR_INVALID_VALUE, "%s: invalid record data.", function ); return( -1 ); } if( record_data_size > (size_t) SSIZE_MAX ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_ARGUMENTS, LIBCERROR_ARGUMENT_ERROR_VALUE_EXCEEDS_MAXIMUM, "%s: invalid record data size value exceeds maximum.", function ); return( -1 ); } if( record_data_size < ( sizeof( evt_record_event_header_t ) + 4 ) ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_VALUE_OUT_OF_BOUNDS, "%s: record data size value out of bounds.", function ); return( -1 ); } byte_stream_copy_to_uint32_little_endian( ( (evt_record_event_header_t *) record_data )->size, size ); byte_stream_copy_to_uint32_little_endian( ( (evt_record_event_header_t *) record_data )->record_number, record_values->number ); byte_stream_copy_to_uint32_little_endian( ( (evt_record_event_header_t *) record_data )->creation_time, record_values->creation_time ); byte_stream_copy_to_uint32_little_endian( ( (evt_record_event_header_t *) record_data )->written_time, record_values->written_time ); byte_stream_copy_to_uint32_little_endian( ( (evt_record_event_header_t *) record_data )->event_identifier, record_values->event_identifier ); byte_stream_copy_to_uint16_little_endian( ( (evt_record_event_header_t *) record_data )->event_type, record_values->event_type ); byte_stream_copy_to_uint16_little_endian( ( (evt_record_event_header_t *) record_data )->event_category, record_values->event_category ); byte_stream_copy_to_uint32_little_endian( ( (evt_record_event_header_t *) record_data )->strings_offset, strings_offset ); byte_stream_copy_to_uint32_little_endian( ( (evt_record_event_header_t *) record_data )->user_sid_size, user_sid_size ); byte_stream_copy_to_uint32_little_endian( ( (evt_record_event_header_t *) record_data )->user_sid_offset, user_sid_offset ); byte_stream_copy_to_uint32_little_endian( ( (evt_record_event_header_t *) record_data )->data_size, data_size ); byte_stream_copy_to_uint32_little_endian( ( (evt_record_event_header_t *) record_data )->data_offset, data_offset ); byte_stream_copy_to_uint32_little_endian( &( record_data[ record_data_size - 4 ] ), size_copy ); #if defined( HAVE_DEBUG_OUTPUT ) if( libcnotify_verbose != 0 ) { libcnotify_printf( "%s: size\t\t\t\t\t: %" PRIu32 "\n", function, size ); libcnotify_printf( "%s: signature\t\t\t\t: %c%c%c%c\n", function, ( (evt_record_event_header_t *) record_data )->signature[ 0 ], ( (evt_record_event_header_t *) record_data )->signature[ 1 ], ( (evt_record_event_header_t *) record_data )->signature[ 2 ], ( (evt_record_event_header_t *) record_data )->signature[ 3 ] ); libcnotify_printf( "%s: record number\t\t\t\t: %" PRIu32 "\n", function, record_values->number ); if( libevt_debug_print_posix_time_value( function, "creation time\t\t\t\t", ( (evt_record_event_header_t *) record_data )->creation_time, 4, LIBFDATETIME_ENDIAN_LITTLE, LIBFDATETIME_POSIX_TIME_VALUE_TYPE_SECONDS_32BIT_SIGNED, LIBFDATETIME_STRING_FORMAT_TYPE_CTIME | LIBFDATETIME_STRING_FORMAT_FLAG_DATE_TIME, error ) != 1 ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_PRINT_FAILED, "%s: unable to print POSIX time value.", function ); goto on_error; } if( libevt_debug_print_posix_time_value( function, "written time\t\t\t\t", ( (evt_record_event_header_t *) record_data )->written_time, 4, LIBFDATETIME_ENDIAN_LITTLE, LIBFDATETIME_POSIX_TIME_VALUE_TYPE_SECONDS_32BIT_SIGNED, LIBFDATETIME_STRING_FORMAT_TYPE_CTIME | LIBFDATETIME_STRING_FORMAT_FLAG_DATE_TIME, error ) != 1 ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_PRINT_FAILED, "%s: unable to print POSIX time value.", function ); goto on_error; } libcnotify_printf( "%s: event identifier\t\t\t: 0x%08" PRIx32 "\n", function, record_values->event_identifier ); libcnotify_printf( "%s: event identifier: code\t\t\t: %" PRIu32 "\n", function, record_values->event_identifier & 0x0000ffffUL ); libcnotify_printf( "%s: event identifier: facility\t\t: %" PRIu32 "\n", function, ( record_values->event_identifier & 0x0fff0000UL ) >> 16 ); libcnotify_printf( "%s: event identifier: reserved\t\t: %" PRIu32 "\n", function, ( record_values->event_identifier & 0x10000000UL ) >> 28 ); libcnotify_printf( "%s: event identifier: customer flags\t: %" PRIu32 "\n", function, ( record_values->event_identifier & 0x20000000UL ) >> 29 ); libcnotify_printf( "%s: event identifier: severity\t\t: %" PRIu32 " (", function, ( record_values->event_identifier & 0xc0000000UL ) >> 30 ); libevt_debug_print_event_identifier_severity( record_values->event_identifier ); libcnotify_printf( ")\n" ); libcnotify_printf( "%s: event type\t\t\t\t: %" PRIu16 " (", function, record_values->event_type ); libevt_debug_print_event_type( record_values->event_type ); libcnotify_printf( ")\n" ); byte_stream_copy_to_uint16_little_endian( ( (evt_record_event_header_t *) record_data )->number_of_strings, value_16bit ); libcnotify_printf( "%s: number of strings\t\t\t: %" PRIu16 "\n", function, value_16bit ); libcnotify_printf( "%s: event category\t\t\t\t: %" PRIu16 "\n", function, record_values->event_category ); byte_stream_copy_to_uint16_little_endian( ( (evt_record_event_header_t *) record_data )->event_flags, value_16bit ); libcnotify_printf( "%s: event flags\t\t\t\t: 0x%04" PRIx16 "\n", function, value_16bit ); byte_stream_copy_to_uint32_little_endian( ( (evt_record_event_header_t *) record_data )->closing_record_number, value_32bit ); libcnotify_printf( "%s: closing record values number\t\t: %" PRIu32 "\n", function, value_32bit ); libcnotify_printf( "%s: strings offset\t\t\t\t: %" PRIu32 "\n", function, strings_offset ); libcnotify_printf( "%s: user security identifier (SID) size\t: %" PRIu32 "\n", function, user_sid_size ); libcnotify_printf( "%s: user security identifier (SID) offset\t: %" PRIu32 "\n", function, user_sid_offset ); libcnotify_printf( "%s: data size\t\t\t\t: %" PRIu32 "\n", function, data_size ); libcnotify_printf( "%s: data offset\t\t\t\t: %" PRIu32 "\n", function, data_offset ); } #endif record_data_offset = sizeof( evt_record_event_header_t ); if( ( user_sid_offset == 0 ) && ( user_sid_size != 0 ) ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_VALUE_OUT_OF_BOUNDS, "%s: user SID offset or size value out of bounds.", function ); goto on_error; } if( user_sid_offset != 0 ) { if( ( (size_t) user_sid_offset < record_data_offset ) || ( (size_t) user_sid_offset >= ( record_data_size - 4 ) ) ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_VALUE_OUT_OF_BOUNDS, "%s: user SID offset value out of bounds.", function ); goto on_error; } if( user_sid_size != 0 ) { if( (size_t) ( user_sid_offset + user_sid_size ) > ( record_data_size - 4 ) ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_VALUE_OUT_OF_BOUNDS, "%s: user SID size value out of bounds.", function ); goto on_error; } } } /* If the strings offset is points at the offset at record data size - 4 * the strings are empty. For this to be sane the data offset should * be the same as the strings offset or the data size 0. */ if( ( (size_t) strings_offset < user_sid_offset ) || ( (size_t) strings_offset >= ( record_data_size - 4 ) ) ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_VALUE_OUT_OF_BOUNDS, "%s: strings offset value out of bounds.", function ); goto on_error; } if( ( (size_t) data_offset < strings_offset ) || ( (size_t) data_offset >= ( record_data_size - 4 ) ) ) { if( data_size != 0 ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_VALUE_OUT_OF_BOUNDS, "%s: data offset value out of bounds.", function ); goto on_error; } data_offset = (uint32_t) record_data_size - 4; } if( ( (size_t) strings_offset >= ( record_data_size - 4 ) ) && ( strings_offset != data_offset ) ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_VALUE_OUT_OF_BOUNDS, "%s: strings offset value out of bounds.", function ); goto on_error; } if( strings_offset != 0 ) { if( strings_offset < record_data_offset ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_VALUE_OUT_OF_BOUNDS, "%s: strings offset value out of bounds.", function ); goto on_error; } } if( user_sid_offset != 0 ) { members_data_size = user_sid_offset - (uint32_t) record_data_offset; } else if( strings_offset != 0 ) { members_data_size = strings_offset - (uint32_t) record_data_offset; } if( strings_offset != 0 ) { strings_size = data_offset - strings_offset; } if( data_size != 0 ) { if( (size_t) ( data_offset + data_size ) > ( record_data_size - 4 ) ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_VALUE_OUT_OF_BOUNDS, "%s: data size value out of bounds.", function ); goto on_error; } } if( members_data_size != 0 ) { #if defined( HAVE_DEBUG_OUTPUT ) if( libcnotify_verbose != 0 ) { libcnotify_printf( "%s: members data:\n", function ); libcnotify_print_data( &( record_data[ record_data_offset ] ), members_data_size, LIBCNOTIFY_PRINT_DATA_FLAG_GROUP_DATA ); } #endif if( libfvalue_value_type_initialize( &( record_values->source_name ), LIBFVALUE_VALUE_TYPE_STRING_UTF16, error ) != 1 ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_INITIALIZE_FAILED, "%s: unable to create source name value.", function ); goto on_error; } value_data_size = libfvalue_value_type_set_data_string( record_values->source_name, &( record_data[ record_data_offset ] ), members_data_size, LIBFVALUE_CODEPAGE_UTF16_LITTLE_ENDIAN, LIBFVALUE_VALUE_DATA_FLAG_MANAGED, error ); if( value_data_size == -1 ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_SET_FAILED, "%s: unable to set data of source name value.", function ); goto on_error; } #if defined( HAVE_DEBUG_OUTPUT ) if( libcnotify_verbose != 0 ) { libcnotify_printf( "%s: source name\t\t\t\t: ", function ); if( libfvalue_value_print( record_values->source_name, 0, 0, error ) != 1 ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_PRINT_FAILED, "%s: unable to print source name value.", function ); goto on_error; } libcnotify_printf( "\n" ); } #endif record_data_offset += value_data_size; members_data_size -= (uint32_t) value_data_size; if( libfvalue_value_type_initialize( &( record_values->computer_name ), LIBFVALUE_VALUE_TYPE_STRING_UTF16, error ) != 1 ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_INITIALIZE_FAILED, "%s: unable to create computer name value.", function ); goto on_error; } value_data_size = libfvalue_value_type_set_data_string( record_values->computer_name, &( record_data[ record_data_offset ] ), members_data_size, LIBFVALUE_CODEPAGE_UTF16_LITTLE_ENDIAN, LIBFVALUE_VALUE_DATA_FLAG_MANAGED, error ); if( value_data_size == -1 ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_SET_FAILED, "%s: unable to set data of computer name value.", function ); goto on_error; } #if defined( HAVE_DEBUG_OUTPUT ) if( libcnotify_verbose != 0 ) { libcnotify_printf( "%s: computer name\t\t\t\t: ", function ); if( libfvalue_value_print( record_values->computer_name, 0, 0, error ) != 1 ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_PRINT_FAILED, "%s: unable to print computer name value.", function ); goto on_error; } libcnotify_printf( "\n" ); } #endif record_data_offset += value_data_size; members_data_size -= (uint32_t) value_data_size; if( members_data_size > 0 ) { #if defined( HAVE_DEBUG_OUTPUT ) if( libcnotify_verbose != 0 ) { libcnotify_printf( "%s: members trailing data:\n", function ); libcnotify_print_data( &( record_data[ record_data_offset ] ), members_data_size, LIBCNOTIFY_PRINT_DATA_FLAG_GROUP_DATA ); } #endif record_data_offset += members_data_size; } } if( user_sid_size != 0 ) { if( user_sid_size > ( ( record_data_size - 4 ) - user_sid_offset ) ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_VALUE_OUT_OF_BOUNDS, "%s: user SID data size value out of bounds.", function ); goto on_error; } if( libfvalue_value_type_initialize( &( record_values->user_security_identifier ), LIBFVALUE_VALUE_TYPE_NT_SECURITY_IDENTIFIER, error ) != 1 ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_INITIALIZE_FAILED, "%s: unable to create user security identifier (SID) value.", function ); goto on_error; } if( libfvalue_value_set_data( record_values->user_security_identifier, &( record_data[ user_sid_offset ] ), (size_t) user_sid_size, LIBFVALUE_ENDIAN_LITTLE, LIBFVALUE_VALUE_DATA_FLAG_MANAGED, error ) != 1 ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_SET_FAILED, "%s: unable to set data of user security identifier (SID) value.", function ); goto on_error; } #if defined( HAVE_DEBUG_OUTPUT ) if( libcnotify_verbose != 0 ) { libcnotify_printf( "%s: user security identifier (SID)\t\t: ", function ); if( libfvalue_value_print( record_values->user_security_identifier, 0, 0, error ) != 1 ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_PRINT_FAILED, "%s: unable to print user security identifier (SID) value.", function ); goto on_error; } libcnotify_printf( "\n" ); } #endif record_data_offset += user_sid_size; } if( strings_size != 0 ) { if( strings_size > ( ( record_data_size - 4 ) - strings_offset ) ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_VALUE_OUT_OF_BOUNDS, "%s: strings size value out of bounds.", function ); goto on_error; } #if defined( HAVE_DEBUG_OUTPUT ) if( libcnotify_verbose != 0 ) { libcnotify_printf( "%s: strings data:\n", function ); libcnotify_print_data( &( record_data[ strings_offset ] ), strings_size, LIBCNOTIFY_PRINT_DATA_FLAG_GROUP_DATA ); } #endif if( size_copy == 0 ) { /* If the strings data is truncated */ strings_data_offset = strings_offset + strings_size - 2; while( strings_data_offset > strings_offset ) { if( ( record_data[ strings_data_offset ] != 0 ) || ( record_data[ strings_data_offset + 1 ] != 0 ) ) { strings_size += 2; break; } strings_data_offset -= 2; strings_size -= 2; } } if( libfvalue_value_type_initialize( &( record_values->strings ), LIBFVALUE_VALUE_TYPE_STRING_UTF16, error ) != 1 ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_INITIALIZE_FAILED, "%s: unable to create strings value.", function ); goto on_error; } value_data_size = libfvalue_value_type_set_data_strings_array( record_values->strings, &( record_data[ strings_offset ] ), strings_size, LIBFVALUE_CODEPAGE_UTF16_LITTLE_ENDIAN, error ); if( value_data_size == -1 ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_SET_FAILED, "%s: unable to set data of strings value.", function ); goto on_error; } record_data_offset += strings_size; } if( data_size != 0 ) { if( data_size > ( ( record_data_size - 4 ) - record_data_offset ) ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_VALUE_OUT_OF_BOUNDS, "%s: data size value out of bounds.", function ); goto on_error; } #if defined( HAVE_DEBUG_OUTPUT ) if( libcnotify_verbose != 0 ) { libcnotify_printf( "%s: data:\n", function ); libcnotify_print_data( &( record_data[ record_data_offset ] ), (size_t) data_size, LIBCNOTIFY_PRINT_DATA_FLAG_GROUP_DATA ); } #endif if( libfvalue_value_type_initialize( &( record_values->data ), LIBFVALUE_VALUE_TYPE_BINARY_DATA, error ) != 1 ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_INITIALIZE_FAILED, "%s: unable to create data value.", function ); goto on_error; } if( libfvalue_value_set_data( record_values->data, &( record_data[ record_data_offset ] ), (size_t) data_size, LIBFVALUE_ENDIAN_LITTLE, LIBFVALUE_VALUE_DATA_FLAG_MANAGED, error ) != 1 ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_RUNTIME, LIBCERROR_RUNTIME_ERROR_SET_FAILED, "%s: unable to set data of data value.", function ); goto on_error; } #if defined( HAVE_DEBUG_OUTPUT ) record_data_offset += data_size; #endif } #if defined( HAVE_DEBUG_OUTPUT ) if( libcnotify_verbose != 0 ) { if( record_data_offset < ( record_data_size - 4 ) ) { libcnotify_printf( "%s: padding:\n", function ); libcnotify_print_data( &( record_data[ record_data_offset ] ), (size_t) record_data_size - record_data_offset - 4, LIBCNOTIFY_PRINT_DATA_FLAG_GROUP_DATA ); } libcnotify_printf( "%s: size copy\t\t\t\t: %" PRIu32 "\n", function, size_copy ); libcnotify_printf( "\n" ); } #endif if( ( strict_mode == 0 ) && ( size_copy == 0 ) ) { size_copy = size; } if( size != size_copy ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_INPUT, LIBCERROR_INPUT_ERROR_VALUE_MISMATCH, "%s: value mismatch for size and size copy.", function ); goto on_error; } if( record_data_size != (size_t) size ) { libcerror_error_set( error, LIBCERROR_ERROR_DOMAIN_INPUT, LIBCERROR_INPUT_ERROR_VALUE_MISMATCH, "%s: value mismatch for record_values data size and size.", function ); goto on_error; } return( 1 ); on_error: if( record_values->data != NULL ) { libfvalue_value_free( &( record_values->data ), NULL ); } if( record_values->strings != NULL ) { libfvalue_value_free( &( record_values->strings ), NULL ); } if( record_values->user_security_identifier != NULL ) { libfvalue_value_free( &( record_values->user_security_identifier ), NULL ); } if( record_values->computer_name != NULL ) { libfvalue_value_free( &( record_values->computer_name ), NULL ); } if( record_values->source_name != NULL ) { libfvalue_value_free( &( record_values->source_name ), NULL ); } return( -1 ); }
{'added': [(1160, '\t\tif( user_sid_size > ( ( record_data_size - 4 ) - user_sid_offset ) )'), (1161, '\t\t{'), (1162, '\t\t\tlibcerror_error_set('), (1163, '\t\t\t error,'), (1164, '\t\t\t LIBCERROR_ERROR_DOMAIN_RUNTIME,'), (1165, '\t\t\t LIBCERROR_RUNTIME_ERROR_VALUE_OUT_OF_BOUNDS,'), (1166, '\t\t\t "%s: user SID data size value out of bounds.",'), (1167, '\t\t\t function );'), (1168, ''), (1169, '\t\t\tgoto on_error;'), (1170, '\t\t}'), (1232, '\t\tif( strings_size > ( ( record_data_size - 4 ) - strings_offset ) )'), (1233, '\t\t{'), (1234, '\t\t\tlibcerror_error_set('), (1235, '\t\t\t error,'), (1236, '\t\t\t LIBCERROR_ERROR_DOMAIN_RUNTIME,'), (1237, '\t\t\t LIBCERROR_RUNTIME_ERROR_VALUE_OUT_OF_BOUNDS,'), (1238, '\t\t\t "%s: strings size value out of bounds.",'), (1239, '\t\t\t function );'), (1240, ''), (1241, '\t\t\tgoto on_error;'), (1242, '\t\t}'), (1310, '\t\tif( data_size > ( ( record_data_size - 4 ) - record_data_offset ) )'), (1311, '\t\t{'), (1312, '\t\t\tlibcerror_error_set('), (1313, '\t\t\t error,'), (1314, '\t\t\t LIBCERROR_ERROR_DOMAIN_RUNTIME,'), (1315, '\t\t\t LIBCERROR_RUNTIME_ERROR_VALUE_OUT_OF_BOUNDS,'), (1316, '\t\t\t "%s: data size value out of bounds.",'), (1317, '\t\t\t function );'), (1318, ''), (1319, '\t\t\tgoto on_error;'), (1320, '\t\t}'), (1328, '\t\t\t &( record_data[ record_data_offset ] ),')], 'deleted': [(1295, '\t\t\t &( record_data[ data_offset ] ),')]}
34
1
1,515
5,005
https://github.com/libyal/libevt
CVE-2018-8754
['CWE-125']
h323.c
ndpi_search_h323
/* * h323.c * * Copyright (C) 2015-18 ntop.org * Copyright (C) 2013 Remy Mudingay <mudingay@ill.fr> * */ #include "ndpi_protocol_ids.h" #define NDPI_CURRENT_PROTO NDPI_PROTOCOL_H323 #include "ndpi_api.h" struct tpkt { u_int8_t version, reserved; u_int16_t len; }; void ndpi_search_h323(struct ndpi_detection_module_struct *ndpi_struct, struct ndpi_flow_struct *flow) { struct ndpi_packet_struct *packet = &flow->packet; u_int16_t dport = 0, sport = 0; NDPI_LOG_DBG(ndpi_struct, "search H323\n"); /* The TPKT protocol is used by ISO 8072 (on port 102) and H.323. So this check below is to avoid ambiguities */ if((packet->tcp != NULL) && (packet->tcp->dest != ntohs(102))) { NDPI_LOG_DBG2(ndpi_struct, "calculated dport over tcp\n"); /* H323 */ if(packet->payload_packet_len >= 4 && (packet->payload[0] == 0x03) && (packet->payload[1] == 0x00)) { struct tpkt *t = (struct tpkt*)packet->payload; u_int16_t len = ntohs(t->len); if(packet->payload_packet_len == len) { /* We need to check if this packet is in reality a RDP (Remote Desktop) packet encapsulated on TPTK */ if(packet->payload[4] == (packet->payload_packet_len - sizeof(struct tpkt) - 1)) { /* ISO 8073/X.224 */ if((packet->payload[5] == 0xE0 /* CC Connect Request */) || (packet->payload[5] == 0xD0 /* CC Connect Confirm */)) { NDPI_LOG_INFO(ndpi_struct, "found RDP\n"); ndpi_set_detected_protocol(ndpi_struct, flow, NDPI_PROTOCOL_RDP, NDPI_PROTOCOL_UNKNOWN); return; } } flow->l4.tcp.h323_valid_packets++; if(flow->l4.tcp.h323_valid_packets >= 2) { NDPI_LOG_INFO(ndpi_struct, "found H323 broadcast\n"); ndpi_set_detected_protocol(ndpi_struct, flow, NDPI_PROTOCOL_H323, NDPI_PROTOCOL_UNKNOWN); } } else { /* This is not H.323 */ NDPI_EXCLUDE_PROTO(ndpi_struct, flow); return; } } } else if(packet->udp != NULL) { sport = ntohs(packet->udp->source), dport = ntohs(packet->udp->dest); NDPI_LOG_DBG2(ndpi_struct, "calculated dport over udp\n"); if(packet->payload_packet_len >= 6 && packet->payload[0] == 0x80 && packet->payload[1] == 0x08 && (packet->payload[2] == 0xe7 || packet->payload[2] == 0x26) && packet->payload[4] == 0x00 && packet->payload[5] == 0x00) { NDPI_LOG_INFO(ndpi_struct, "found H323 broadcast\n"); ndpi_set_detected_protocol(ndpi_struct, flow, NDPI_PROTOCOL_H323, NDPI_PROTOCOL_UNKNOWN); return; } /* H323 */ if(sport == 1719 || dport == 1719) { if(packet->payload[0] == 0x16 && packet->payload[1] == 0x80 && packet->payload[4] == 0x06 && packet->payload[5] == 0x00) { NDPI_LOG_INFO(ndpi_struct, "found H323 broadcast\n"); ndpi_set_detected_protocol(ndpi_struct, flow, NDPI_PROTOCOL_H323, NDPI_PROTOCOL_UNKNOWN); return; } else if(packet->payload_packet_len >= 20 && packet->payload_packet_len <= 117) { NDPI_LOG_INFO(ndpi_struct, "found H323 broadcast\n"); ndpi_set_detected_protocol(ndpi_struct, flow, NDPI_PROTOCOL_H323, NDPI_PROTOCOL_UNKNOWN); return; } else { NDPI_EXCLUDE_PROTO(ndpi_struct, flow); return; } } } } void init_h323_dissector(struct ndpi_detection_module_struct *ndpi_struct, u_int32_t *id, NDPI_PROTOCOL_BITMASK *detection_bitmask) { ndpi_set_bitmask_protocol_detection("H323", ndpi_struct, detection_bitmask, *id, NDPI_PROTOCOL_H323, ndpi_search_h323, NDPI_SELECTION_BITMASK_PROTOCOL_TCP_OR_UDP_WITH_PAYLOAD, SAVE_DETECTION_BITMASK_AS_UNKNOWN, ADD_TO_DETECTION_BITMASK); *id += 1; }
/* * h323.c * * Copyright (C) 2015-20 ntop.org * Copyright (C) 2013 Remy Mudingay <mudingay@ill.fr> * */ #include "ndpi_protocol_ids.h" #define NDPI_CURRENT_PROTO NDPI_PROTOCOL_H323 #include "ndpi_api.h" struct tpkt { u_int8_t version, reserved; u_int16_t len; }; void ndpi_search_h323(struct ndpi_detection_module_struct *ndpi_struct, struct ndpi_flow_struct *flow) { struct ndpi_packet_struct *packet = &flow->packet; u_int16_t dport = 0, sport = 0; NDPI_LOG_DBG(ndpi_struct, "search H323\n"); /* The TPKT protocol is used by ISO 8072 (on port 102) and H.323. So this check below is to avoid ambiguities */ if((packet->tcp != NULL) && (packet->tcp->dest != ntohs(102))) { NDPI_LOG_DBG2(ndpi_struct, "calculated dport over tcp\n"); /* H323 */ if(packet->payload_packet_len >= 4 && (packet->payload[0] == 0x03) && (packet->payload[1] == 0x00)) { struct tpkt *t = (struct tpkt*)packet->payload; u_int16_t len = ntohs(t->len); if(packet->payload_packet_len == len) { /* We need to check if this packet is in reality a RDP (Remote Desktop) packet encapsulated on TPTK */ if(packet->payload[4] == (packet->payload_packet_len - sizeof(struct tpkt) - 1)) { /* ISO 8073/X.224 */ if((packet->payload[5] == 0xE0 /* CC Connect Request */) || (packet->payload[5] == 0xD0 /* CC Connect Confirm */)) { NDPI_LOG_INFO(ndpi_struct, "found RDP\n"); ndpi_set_detected_protocol(ndpi_struct, flow, NDPI_PROTOCOL_RDP, NDPI_PROTOCOL_UNKNOWN); return; } } flow->l4.tcp.h323_valid_packets++; if(flow->l4.tcp.h323_valid_packets >= 2) { NDPI_LOG_INFO(ndpi_struct, "found H323 broadcast\n"); ndpi_set_detected_protocol(ndpi_struct, flow, NDPI_PROTOCOL_H323, NDPI_PROTOCOL_UNKNOWN); } } else { /* This is not H.323 */ NDPI_EXCLUDE_PROTO(ndpi_struct, flow); return; } } } else if(packet->udp != NULL) { sport = ntohs(packet->udp->source), dport = ntohs(packet->udp->dest); NDPI_LOG_DBG2(ndpi_struct, "calculated dport over udp\n"); if(packet->payload_packet_len >= 6 && packet->payload[0] == 0x80 && packet->payload[1] == 0x08 && (packet->payload[2] == 0xe7 || packet->payload[2] == 0x26) && packet->payload[4] == 0x00 && packet->payload[5] == 0x00) { NDPI_LOG_INFO(ndpi_struct, "found H323 broadcast\n"); ndpi_set_detected_protocol(ndpi_struct, flow, NDPI_PROTOCOL_H323, NDPI_PROTOCOL_UNKNOWN); return; } /* H323 */ if(sport == 1719 || dport == 1719) { if((packet->payload_packet_len >= 5) && (packet->payload[0] == 0x16) && (packet->payload[1] == 0x80) && (packet->payload[4] == 0x06) && (packet->payload[5] == 0x00)) { NDPI_LOG_INFO(ndpi_struct, "found H323 broadcast\n"); ndpi_set_detected_protocol(ndpi_struct, flow, NDPI_PROTOCOL_H323, NDPI_PROTOCOL_UNKNOWN); return; } else if(packet->payload_packet_len >= 20 && packet->payload_packet_len <= 117) { NDPI_LOG_INFO(ndpi_struct, "found H323 broadcast\n"); ndpi_set_detected_protocol(ndpi_struct, flow, NDPI_PROTOCOL_H323, NDPI_PROTOCOL_UNKNOWN); return; } else { NDPI_EXCLUDE_PROTO(ndpi_struct, flow); return; } } } } void init_h323_dissector(struct ndpi_detection_module_struct *ndpi_struct, u_int32_t *id, NDPI_PROTOCOL_BITMASK *detection_bitmask) { ndpi_set_bitmask_protocol_detection("H323", ndpi_struct, detection_bitmask, *id, NDPI_PROTOCOL_H323, ndpi_search_h323, NDPI_SELECTION_BITMASK_PROTOCOL_TCP_OR_UDP_WITH_PAYLOAD, SAVE_DETECTION_BITMASK_AS_UNKNOWN, ADD_TO_DETECTION_BITMASK); *id += 1; }
void ndpi_search_h323(struct ndpi_detection_module_struct *ndpi_struct, struct ndpi_flow_struct *flow) { struct ndpi_packet_struct *packet = &flow->packet; u_int16_t dport = 0, sport = 0; NDPI_LOG_DBG(ndpi_struct, "search H323\n"); /* The TPKT protocol is used by ISO 8072 (on port 102) and H.323. So this check below is to avoid ambiguities */ if((packet->tcp != NULL) && (packet->tcp->dest != ntohs(102))) { NDPI_LOG_DBG2(ndpi_struct, "calculated dport over tcp\n"); /* H323 */ if(packet->payload_packet_len >= 4 && (packet->payload[0] == 0x03) && (packet->payload[1] == 0x00)) { struct tpkt *t = (struct tpkt*)packet->payload; u_int16_t len = ntohs(t->len); if(packet->payload_packet_len == len) { /* We need to check if this packet is in reality a RDP (Remote Desktop) packet encapsulated on TPTK */ if(packet->payload[4] == (packet->payload_packet_len - sizeof(struct tpkt) - 1)) { /* ISO 8073/X.224 */ if((packet->payload[5] == 0xE0 /* CC Connect Request */) || (packet->payload[5] == 0xD0 /* CC Connect Confirm */)) { NDPI_LOG_INFO(ndpi_struct, "found RDP\n"); ndpi_set_detected_protocol(ndpi_struct, flow, NDPI_PROTOCOL_RDP, NDPI_PROTOCOL_UNKNOWN); return; } } flow->l4.tcp.h323_valid_packets++; if(flow->l4.tcp.h323_valid_packets >= 2) { NDPI_LOG_INFO(ndpi_struct, "found H323 broadcast\n"); ndpi_set_detected_protocol(ndpi_struct, flow, NDPI_PROTOCOL_H323, NDPI_PROTOCOL_UNKNOWN); } } else { /* This is not H.323 */ NDPI_EXCLUDE_PROTO(ndpi_struct, flow); return; } } } else if(packet->udp != NULL) { sport = ntohs(packet->udp->source), dport = ntohs(packet->udp->dest); NDPI_LOG_DBG2(ndpi_struct, "calculated dport over udp\n"); if(packet->payload_packet_len >= 6 && packet->payload[0] == 0x80 && packet->payload[1] == 0x08 && (packet->payload[2] == 0xe7 || packet->payload[2] == 0x26) && packet->payload[4] == 0x00 && packet->payload[5] == 0x00) { NDPI_LOG_INFO(ndpi_struct, "found H323 broadcast\n"); ndpi_set_detected_protocol(ndpi_struct, flow, NDPI_PROTOCOL_H323, NDPI_PROTOCOL_UNKNOWN); return; } /* H323 */ if(sport == 1719 || dport == 1719) { if(packet->payload[0] == 0x16 && packet->payload[1] == 0x80 && packet->payload[4] == 0x06 && packet->payload[5] == 0x00) { NDPI_LOG_INFO(ndpi_struct, "found H323 broadcast\n"); ndpi_set_detected_protocol(ndpi_struct, flow, NDPI_PROTOCOL_H323, NDPI_PROTOCOL_UNKNOWN); return; } else if(packet->payload_packet_len >= 20 && packet->payload_packet_len <= 117) { NDPI_LOG_INFO(ndpi_struct, "found H323 broadcast\n"); ndpi_set_detected_protocol(ndpi_struct, flow, NDPI_PROTOCOL_H323, NDPI_PROTOCOL_UNKNOWN); return; } else { NDPI_EXCLUDE_PROTO(ndpi_struct, flow); return; } } } }
void ndpi_search_h323(struct ndpi_detection_module_struct *ndpi_struct, struct ndpi_flow_struct *flow) { struct ndpi_packet_struct *packet = &flow->packet; u_int16_t dport = 0, sport = 0; NDPI_LOG_DBG(ndpi_struct, "search H323\n"); /* The TPKT protocol is used by ISO 8072 (on port 102) and H.323. So this check below is to avoid ambiguities */ if((packet->tcp != NULL) && (packet->tcp->dest != ntohs(102))) { NDPI_LOG_DBG2(ndpi_struct, "calculated dport over tcp\n"); /* H323 */ if(packet->payload_packet_len >= 4 && (packet->payload[0] == 0x03) && (packet->payload[1] == 0x00)) { struct tpkt *t = (struct tpkt*)packet->payload; u_int16_t len = ntohs(t->len); if(packet->payload_packet_len == len) { /* We need to check if this packet is in reality a RDP (Remote Desktop) packet encapsulated on TPTK */ if(packet->payload[4] == (packet->payload_packet_len - sizeof(struct tpkt) - 1)) { /* ISO 8073/X.224 */ if((packet->payload[5] == 0xE0 /* CC Connect Request */) || (packet->payload[5] == 0xD0 /* CC Connect Confirm */)) { NDPI_LOG_INFO(ndpi_struct, "found RDP\n"); ndpi_set_detected_protocol(ndpi_struct, flow, NDPI_PROTOCOL_RDP, NDPI_PROTOCOL_UNKNOWN); return; } } flow->l4.tcp.h323_valid_packets++; if(flow->l4.tcp.h323_valid_packets >= 2) { NDPI_LOG_INFO(ndpi_struct, "found H323 broadcast\n"); ndpi_set_detected_protocol(ndpi_struct, flow, NDPI_PROTOCOL_H323, NDPI_PROTOCOL_UNKNOWN); } } else { /* This is not H.323 */ NDPI_EXCLUDE_PROTO(ndpi_struct, flow); return; } } } else if(packet->udp != NULL) { sport = ntohs(packet->udp->source), dport = ntohs(packet->udp->dest); NDPI_LOG_DBG2(ndpi_struct, "calculated dport over udp\n"); if(packet->payload_packet_len >= 6 && packet->payload[0] == 0x80 && packet->payload[1] == 0x08 && (packet->payload[2] == 0xe7 || packet->payload[2] == 0x26) && packet->payload[4] == 0x00 && packet->payload[5] == 0x00) { NDPI_LOG_INFO(ndpi_struct, "found H323 broadcast\n"); ndpi_set_detected_protocol(ndpi_struct, flow, NDPI_PROTOCOL_H323, NDPI_PROTOCOL_UNKNOWN); return; } /* H323 */ if(sport == 1719 || dport == 1719) { if((packet->payload_packet_len >= 5) && (packet->payload[0] == 0x16) && (packet->payload[1] == 0x80) && (packet->payload[4] == 0x06) && (packet->payload[5] == 0x00)) { NDPI_LOG_INFO(ndpi_struct, "found H323 broadcast\n"); ndpi_set_detected_protocol(ndpi_struct, flow, NDPI_PROTOCOL_H323, NDPI_PROTOCOL_UNKNOWN); return; } else if(packet->payload_packet_len >= 20 && packet->payload_packet_len <= 117) { NDPI_LOG_INFO(ndpi_struct, "found H323 broadcast\n"); ndpi_set_detected_protocol(ndpi_struct, flow, NDPI_PROTOCOL_H323, NDPI_PROTOCOL_UNKNOWN); return; } else { NDPI_EXCLUDE_PROTO(ndpi_struct, flow); return; } } } }
{'added': [(4, ' * Copyright (C) 2015-20 ntop.org'), (39, ' struct tpkt *t = (struct tpkt*)packet->payload;'), (40, ' u_int16_t len = ntohs(t->len);'), (41, ''), (42, ' if(packet->payload_packet_len == len) {'), (43, '\t/*'), (44, '\t We need to check if this packet is in reality'), (45, '\t a RDP (Remote Desktop) packet encapsulated on TPTK'), (46, '\t*/'), (47, ''), (48, '\tif(packet->payload[4] == (packet->payload_packet_len - sizeof(struct tpkt) - 1)) {'), (49, '\t /* ISO 8073/X.224 */'), (50, '\t if((packet->payload[5] == 0xE0 /* CC Connect Request */)'), (51, '\t || (packet->payload[5] == 0xD0 /* CC Connect Confirm */)) {'), (52, '\t NDPI_LOG_INFO(ndpi_struct, "found RDP\\n");'), (53, '\t ndpi_set_detected_protocol(ndpi_struct, flow, NDPI_PROTOCOL_RDP, NDPI_PROTOCOL_UNKNOWN);'), (54, '\t return;'), (56, '\t}'), (58, '\tflow->l4.tcp.h323_valid_packets++;'), (60, '\tif(flow->l4.tcp.h323_valid_packets >= 2) {'), (61, '\t NDPI_LOG_INFO(ndpi_struct, "found H323 broadcast\\n");'), (62, '\t ndpi_set_detected_protocol(ndpi_struct, flow, NDPI_PROTOCOL_H323, NDPI_PROTOCOL_UNKNOWN);'), (64, ' } else {'), (65, '\t/* This is not H.323 */'), (66, '\tNDPI_EXCLUDE_PROTO(ndpi_struct, flow);'), (67, '\treturn;'), (69, ' }'), (83, ' if(sport == 1719 || dport == 1719) {'), (84, ' if((packet->payload_packet_len >= 5)'), (85, '\t && (packet->payload[0] == 0x16)'), (86, '\t && (packet->payload[1] == 0x80)'), (87, '\t && (packet->payload[4] == 0x06)'), (88, '\t && (packet->payload[5] == 0x00)) {'), (89, '\tNDPI_LOG_INFO(ndpi_struct, "found H323 broadcast\\n");'), (90, '\tndpi_set_detected_protocol(ndpi_struct, flow, NDPI_PROTOCOL_H323, NDPI_PROTOCOL_UNKNOWN);'), (91, '\treturn;'), (92, ' } else if(packet->payload_packet_len >= 20 && packet->payload_packet_len <= 117) {'), (93, '\tNDPI_LOG_INFO(ndpi_struct, "found H323 broadcast\\n");'), (94, '\tndpi_set_detected_protocol(ndpi_struct, flow, NDPI_PROTOCOL_H323, NDPI_PROTOCOL_UNKNOWN);'), (95, '\treturn;'), (96, ' } else {'), (97, '\tNDPI_EXCLUDE_PROTO(ndpi_struct, flow);'), (98, '\treturn;'), (100, ' }')], 'deleted': [(4, ' * Copyright (C) 2015-18 ntop.org'), (39, '\tstruct tpkt *t = (struct tpkt*)packet->payload;'), (40, '\tu_int16_t len = ntohs(t->len);'), (41, ''), (42, '\tif(packet->payload_packet_len == len) {'), (43, '\t /*'), (44, '\t We need to check if this packet is in reality'), (45, '\t a RDP (Remote Desktop) packet encapsulated on TPTK'), (46, '\t */'), (47, ''), (48, '\t if(packet->payload[4] == (packet->payload_packet_len - sizeof(struct tpkt) - 1)) {'), (49, '\t /* ISO 8073/X.224 */'), (50, '\t if((packet->payload[5] == 0xE0 /* CC Connect Request */)'), (51, '\t || (packet->payload[5] == 0xD0 /* CC Connect Confirm */)) {'), (52, '\t NDPI_LOG_INFO(ndpi_struct, "found RDP\\n");'), (53, '\t ndpi_set_detected_protocol(ndpi_struct, flow, NDPI_PROTOCOL_RDP, NDPI_PROTOCOL_UNKNOWN);'), (54, '\t return;'), (55, '\t }'), (58, '\t flow->l4.tcp.h323_valid_packets++;'), (60, '\t if(flow->l4.tcp.h323_valid_packets >= 2) {'), (61, '\t NDPI_LOG_INFO(ndpi_struct, "found H323 broadcast\\n");'), (62, '\t ndpi_set_detected_protocol(ndpi_struct, flow, NDPI_PROTOCOL_H323, NDPI_PROTOCOL_UNKNOWN);'), (63, '\t }'), (64, '\t} else {'), (65, '\t /* This is not H.323 */'), (66, '\t NDPI_EXCLUDE_PROTO(ndpi_struct, flow);'), (67, '\t return;'), (83, ' if(sport == 1719 || dport == 1719)'), (84, ' {'), (85, ' if(packet->payload[0] == 0x16 && packet->payload[1] == 0x80 && packet->payload[4] == 0x06 && packet->payload[5] == 0x00)'), (86, '\t {'), (87, '\t NDPI_LOG_INFO(ndpi_struct, "found H323 broadcast\\n");'), (88, '\t ndpi_set_detected_protocol(ndpi_struct, flow, NDPI_PROTOCOL_H323, NDPI_PROTOCOL_UNKNOWN);'), (89, '\t return;'), (90, '\t }'), (91, ' else if(packet->payload_packet_len >= 20 && packet->payload_packet_len <= 117)'), (92, '\t {'), (93, '\t NDPI_LOG_INFO(ndpi_struct, "found H323 broadcast\\n");'), (94, '\t ndpi_set_detected_protocol(ndpi_struct, flow, NDPI_PROTOCOL_H323, NDPI_PROTOCOL_UNKNOWN);'), (95, '\t return;'), (96, '\t }'), (97, ' else'), (98, '\t {'), (99, '\t NDPI_EXCLUDE_PROTO(ndpi_struct, flow);'), (100, '\t return;'), (101, '\t }'), (104, '')]}
44
47
78
588
https://github.com/ntop/nDPI
CVE-2020-15472
['CWE-125']
mlock.c
__munlock_pagevec
/* * linux/mm/mlock.c * * (C) Copyright 1995 Linus Torvalds * (C) Copyright 2002 Christoph Hellwig */ #include <linux/capability.h> #include <linux/mman.h> #include <linux/mm.h> #include <linux/sched/user.h> #include <linux/swap.h> #include <linux/swapops.h> #include <linux/pagemap.h> #include <linux/pagevec.h> #include <linux/mempolicy.h> #include <linux/syscalls.h> #include <linux/sched.h> #include <linux/export.h> #include <linux/rmap.h> #include <linux/mmzone.h> #include <linux/hugetlb.h> #include <linux/memcontrol.h> #include <linux/mm_inline.h> #include "internal.h" bool can_do_mlock(void) { if (rlimit(RLIMIT_MEMLOCK) != 0) return true; if (capable(CAP_IPC_LOCK)) return true; return false; } EXPORT_SYMBOL(can_do_mlock); /* * Mlocked pages are marked with PageMlocked() flag for efficient testing * in vmscan and, possibly, the fault path; and to support semi-accurate * statistics. * * An mlocked page [PageMlocked(page)] is unevictable. As such, it will * be placed on the LRU "unevictable" list, rather than the [in]active lists. * The unevictable list is an LRU sibling list to the [in]active lists. * PageUnevictable is set to indicate the unevictable state. * * When lazy mlocking via vmscan, it is important to ensure that the * vma's VM_LOCKED status is not concurrently being modified, otherwise we * may have mlocked a page that is being munlocked. So lazy mlock must take * the mmap_sem for read, and verify that the vma really is locked * (see mm/rmap.c). */ /* * LRU accounting for clear_page_mlock() */ void clear_page_mlock(struct page *page) { if (!TestClearPageMlocked(page)) return; mod_zone_page_state(page_zone(page), NR_MLOCK, -hpage_nr_pages(page)); count_vm_event(UNEVICTABLE_PGCLEARED); if (!isolate_lru_page(page)) { putback_lru_page(page); } else { /* * We lost the race. the page already moved to evictable list. */ if (PageUnevictable(page)) count_vm_event(UNEVICTABLE_PGSTRANDED); } } /* * Mark page as mlocked if not already. * If page on LRU, isolate and putback to move to unevictable list. */ void mlock_vma_page(struct page *page) { /* Serialize with page migration */ BUG_ON(!PageLocked(page)); VM_BUG_ON_PAGE(PageTail(page), page); VM_BUG_ON_PAGE(PageCompound(page) && PageDoubleMap(page), page); if (!TestSetPageMlocked(page)) { mod_zone_page_state(page_zone(page), NR_MLOCK, hpage_nr_pages(page)); count_vm_event(UNEVICTABLE_PGMLOCKED); if (!isolate_lru_page(page)) putback_lru_page(page); } } /* * Isolate a page from LRU with optional get_page() pin. * Assumes lru_lock already held and page already pinned. */ static bool __munlock_isolate_lru_page(struct page *page, bool getpage) { if (PageLRU(page)) { struct lruvec *lruvec; lruvec = mem_cgroup_page_lruvec(page, page_pgdat(page)); if (getpage) get_page(page); ClearPageLRU(page); del_page_from_lru_list(page, lruvec, page_lru(page)); return true; } return false; } /* * Finish munlock after successful page isolation * * Page must be locked. This is a wrapper for try_to_munlock() * and putback_lru_page() with munlock accounting. */ static void __munlock_isolated_page(struct page *page) { /* * Optimization: if the page was mapped just once, that's our mapping * and we don't need to check all the other vmas. */ if (page_mapcount(page) > 1) try_to_munlock(page); /* Did try_to_unlock() succeed or punt? */ if (!PageMlocked(page)) count_vm_event(UNEVICTABLE_PGMUNLOCKED); putback_lru_page(page); } /* * Accounting for page isolation fail during munlock * * Performs accounting when page isolation fails in munlock. There is nothing * else to do because it means some other task has already removed the page * from the LRU. putback_lru_page() will take care of removing the page from * the unevictable list, if necessary. vmscan [page_referenced()] will move * the page back to the unevictable list if some other vma has it mlocked. */ static void __munlock_isolation_failed(struct page *page) { if (PageUnevictable(page)) __count_vm_event(UNEVICTABLE_PGSTRANDED); else __count_vm_event(UNEVICTABLE_PGMUNLOCKED); } /** * munlock_vma_page - munlock a vma page * @page - page to be unlocked, either a normal page or THP page head * * returns the size of the page as a page mask (0 for normal page, * HPAGE_PMD_NR - 1 for THP head page) * * called from munlock()/munmap() path with page supposedly on the LRU. * When we munlock a page, because the vma where we found the page is being * munlock()ed or munmap()ed, we want to check whether other vmas hold the * page locked so that we can leave it on the unevictable lru list and not * bother vmscan with it. However, to walk the page's rmap list in * try_to_munlock() we must isolate the page from the LRU. If some other * task has removed the page from the LRU, we won't be able to do that. * So we clear the PageMlocked as we might not get another chance. If we * can't isolate the page, we leave it for putback_lru_page() and vmscan * [page_referenced()/try_to_unmap()] to deal with. */ unsigned int munlock_vma_page(struct page *page) { int nr_pages; struct zone *zone = page_zone(page); /* For try_to_munlock() and to serialize with page migration */ BUG_ON(!PageLocked(page)); VM_BUG_ON_PAGE(PageTail(page), page); /* * Serialize with any parallel __split_huge_page_refcount() which * might otherwise copy PageMlocked to part of the tail pages before * we clear it in the head page. It also stabilizes hpage_nr_pages(). */ spin_lock_irq(zone_lru_lock(zone)); if (!TestClearPageMlocked(page)) { /* Potentially, PTE-mapped THP: do not skip the rest PTEs */ nr_pages = 1; goto unlock_out; } nr_pages = hpage_nr_pages(page); __mod_zone_page_state(zone, NR_MLOCK, -nr_pages); if (__munlock_isolate_lru_page(page, true)) { spin_unlock_irq(zone_lru_lock(zone)); __munlock_isolated_page(page); goto out; } __munlock_isolation_failed(page); unlock_out: spin_unlock_irq(zone_lru_lock(zone)); out: return nr_pages - 1; } /* * convert get_user_pages() return value to posix mlock() error */ static int __mlock_posix_error_return(long retval) { if (retval == -EFAULT) retval = -ENOMEM; else if (retval == -ENOMEM) retval = -EAGAIN; return retval; } /* * Prepare page for fast batched LRU putback via putback_lru_evictable_pagevec() * * The fast path is available only for evictable pages with single mapping. * Then we can bypass the per-cpu pvec and get better performance. * when mapcount > 1 we need try_to_munlock() which can fail. * when !page_evictable(), we need the full redo logic of putback_lru_page to * avoid leaving evictable page in unevictable list. * * In case of success, @page is added to @pvec and @pgrescued is incremented * in case that the page was previously unevictable. @page is also unlocked. */ static bool __putback_lru_fast_prepare(struct page *page, struct pagevec *pvec, int *pgrescued) { VM_BUG_ON_PAGE(PageLRU(page), page); VM_BUG_ON_PAGE(!PageLocked(page), page); if (page_mapcount(page) <= 1 && page_evictable(page)) { pagevec_add(pvec, page); if (TestClearPageUnevictable(page)) (*pgrescued)++; unlock_page(page); return true; } return false; } /* * Putback multiple evictable pages to the LRU * * Batched putback of evictable pages that bypasses the per-cpu pvec. Some of * the pages might have meanwhile become unevictable but that is OK. */ static void __putback_lru_fast(struct pagevec *pvec, int pgrescued) { count_vm_events(UNEVICTABLE_PGMUNLOCKED, pagevec_count(pvec)); /* *__pagevec_lru_add() calls release_pages() so we don't call * put_page() explicitly */ __pagevec_lru_add(pvec); count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued); } /* * Munlock a batch of pages from the same zone * * The work is split to two main phases. First phase clears the Mlocked flag * and attempts to isolate the pages, all under a single zone lru lock. * The second phase finishes the munlock only for pages where isolation * succeeded. * * Note that the pagevec may be modified during the process. */ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone) { int i; int nr = pagevec_count(pvec); int delta_munlocked; struct pagevec pvec_putback; int pgrescued = 0; pagevec_init(&pvec_putback, 0); /* Phase 1: page isolation */ spin_lock_irq(zone_lru_lock(zone)); for (i = 0; i < nr; i++) { struct page *page = pvec->pages[i]; if (TestClearPageMlocked(page)) { /* * We already have pin from follow_page_mask() * so we can spare the get_page() here. */ if (__munlock_isolate_lru_page(page, false)) continue; else __munlock_isolation_failed(page); } /* * We won't be munlocking this page in the next phase * but we still need to release the follow_page_mask() * pin. We cannot do it under lru_lock however. If it's * the last pin, __page_cache_release() would deadlock. */ pagevec_add(&pvec_putback, pvec->pages[i]); pvec->pages[i] = NULL; } delta_munlocked = -nr + pagevec_count(&pvec_putback); __mod_zone_page_state(zone, NR_MLOCK, delta_munlocked); spin_unlock_irq(zone_lru_lock(zone)); /* Now we can release pins of pages that we are not munlocking */ pagevec_release(&pvec_putback); /* Phase 2: page munlock */ for (i = 0; i < nr; i++) { struct page *page = pvec->pages[i]; if (page) { lock_page(page); if (!__putback_lru_fast_prepare(page, &pvec_putback, &pgrescued)) { /* * Slow path. We don't want to lose the last * pin before unlock_page() */ get_page(page); /* for putback_lru_page() */ __munlock_isolated_page(page); unlock_page(page); put_page(page); /* from follow_page_mask() */ } } } /* * Phase 3: page putback for pages that qualified for the fast path * This will also call put_page() to return pin from follow_page_mask() */ if (pagevec_count(&pvec_putback)) __putback_lru_fast(&pvec_putback, pgrescued); } /* * Fill up pagevec for __munlock_pagevec using pte walk * * The function expects that the struct page corresponding to @start address is * a non-TPH page already pinned and in the @pvec, and that it belongs to @zone. * * The rest of @pvec is filled by subsequent pages within the same pmd and same * zone, as long as the pte's are present and vm_normal_page() succeeds. These * pages also get pinned. * * Returns the address of the next page that should be scanned. This equals * @start + PAGE_SIZE when no page could be added by the pte walk. */ static unsigned long __munlock_pagevec_fill(struct pagevec *pvec, struct vm_area_struct *vma, int zoneid, unsigned long start, unsigned long end) { pte_t *pte; spinlock_t *ptl; /* * Initialize pte walk starting at the already pinned page where we * are sure that there is a pte, as it was pinned under the same * mmap_sem write op. */ pte = get_locked_pte(vma->vm_mm, start, &ptl); /* Make sure we do not cross the page table boundary */ end = pgd_addr_end(start, end); end = p4d_addr_end(start, end); end = pud_addr_end(start, end); end = pmd_addr_end(start, end); /* The page next to the pinned page is the first we will try to get */ start += PAGE_SIZE; while (start < end) { struct page *page = NULL; pte++; if (pte_present(*pte)) page = vm_normal_page(vma, start, *pte); /* * Break if page could not be obtained or the page's node+zone does not * match */ if (!page || page_zone_id(page) != zoneid) break; /* * Do not use pagevec for PTE-mapped THP, * munlock_vma_pages_range() will handle them. */ if (PageTransCompound(page)) break; get_page(page); /* * Increase the address that will be returned *before* the * eventual break due to pvec becoming full by adding the page */ start += PAGE_SIZE; if (pagevec_add(pvec, page) == 0) break; } pte_unmap_unlock(pte, ptl); return start; } /* * munlock_vma_pages_range() - munlock all pages in the vma range.' * @vma - vma containing range to be munlock()ed. * @start - start address in @vma of the range * @end - end of range in @vma. * * For mremap(), munmap() and exit(). * * Called with @vma VM_LOCKED. * * Returns with VM_LOCKED cleared. Callers must be prepared to * deal with this. * * We don't save and restore VM_LOCKED here because pages are * still on lru. In unmap path, pages might be scanned by reclaim * and re-mlocked by try_to_{munlock|unmap} before we unmap and * free them. This will result in freeing mlocked pages. */ void munlock_vma_pages_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { vma->vm_flags &= VM_LOCKED_CLEAR_MASK; while (start < end) { struct page *page; unsigned int page_mask = 0; unsigned long page_increm; struct pagevec pvec; struct zone *zone; int zoneid; pagevec_init(&pvec, 0); /* * Although FOLL_DUMP is intended for get_dump_page(), * it just so happens that its special treatment of the * ZERO_PAGE (returning an error instead of doing get_page) * suits munlock very well (and if somehow an abnormal page * has sneaked into the range, we won't oops here: great). */ page = follow_page(vma, start, FOLL_GET | FOLL_DUMP); if (page && !IS_ERR(page)) { if (PageTransTail(page)) { VM_BUG_ON_PAGE(PageMlocked(page), page); put_page(page); /* follow_page_mask() */ } else if (PageTransHuge(page)) { lock_page(page); /* * Any THP page found by follow_page_mask() may * have gotten split before reaching * munlock_vma_page(), so we need to compute * the page_mask here instead. */ page_mask = munlock_vma_page(page); unlock_page(page); put_page(page); /* follow_page_mask() */ } else { /* * Non-huge pages are handled in batches via * pagevec. The pin from follow_page_mask() * prevents them from collapsing by THP. */ pagevec_add(&pvec, page); zone = page_zone(page); zoneid = page_zone_id(page); /* * Try to fill the rest of pagevec using fast * pte walk. This will also update start to * the next page to process. Then munlock the * pagevec. */ start = __munlock_pagevec_fill(&pvec, vma, zoneid, start, end); __munlock_pagevec(&pvec, zone); goto next; } } page_increm = 1 + page_mask; start += page_increm * PAGE_SIZE; next: cond_resched(); } } /* * mlock_fixup - handle mlock[all]/munlock[all] requests. * * Filters out "special" vmas -- VM_LOCKED never gets set for these, and * munlock is a no-op. However, for some special vmas, we go ahead and * populate the ptes. * * For vmas that pass the filters, merge/split as appropriate. */ static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev, unsigned long start, unsigned long end, vm_flags_t newflags) { struct mm_struct *mm = vma->vm_mm; pgoff_t pgoff; int nr_pages; int ret = 0; int lock = !!(newflags & VM_LOCKED); vm_flags_t old_flags = vma->vm_flags; if (newflags == vma->vm_flags || (vma->vm_flags & VM_SPECIAL) || is_vm_hugetlb_page(vma) || vma == get_gate_vma(current->mm)) /* don't set VM_LOCKED or VM_LOCKONFAULT and don't count */ goto out; pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); *prev = vma_merge(mm, *prev, start, end, newflags, vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma), vma->vm_userfaultfd_ctx); if (*prev) { vma = *prev; goto success; } if (start != vma->vm_start) { ret = split_vma(mm, vma, start, 1); if (ret) goto out; } if (end != vma->vm_end) { ret = split_vma(mm, vma, end, 0); if (ret) goto out; } success: /* * Keep track of amount of locked VM. */ nr_pages = (end - start) >> PAGE_SHIFT; if (!lock) nr_pages = -nr_pages; else if (old_flags & VM_LOCKED) nr_pages = 0; mm->locked_vm += nr_pages; /* * vm_flags is protected by the mmap_sem held in write mode. * It's okay if try_to_unmap_one unmaps a page just after we * set VM_LOCKED, populate_vma_page_range will bring it back. */ if (lock) vma->vm_flags = newflags; else munlock_vma_pages_range(vma, start, end); out: *prev = vma; return ret; } static int apply_vma_lock_flags(unsigned long start, size_t len, vm_flags_t flags) { unsigned long nstart, end, tmp; struct vm_area_struct * vma, * prev; int error; VM_BUG_ON(offset_in_page(start)); VM_BUG_ON(len != PAGE_ALIGN(len)); end = start + len; if (end < start) return -EINVAL; if (end == start) return 0; vma = find_vma(current->mm, start); if (!vma || vma->vm_start > start) return -ENOMEM; prev = vma->vm_prev; if (start > vma->vm_start) prev = vma; for (nstart = start ; ; ) { vm_flags_t newflags = vma->vm_flags & VM_LOCKED_CLEAR_MASK; newflags |= flags; /* Here we know that vma->vm_start <= nstart < vma->vm_end. */ tmp = vma->vm_end; if (tmp > end) tmp = end; error = mlock_fixup(vma, &prev, nstart, tmp, newflags); if (error) break; nstart = tmp; if (nstart < prev->vm_end) nstart = prev->vm_end; if (nstart >= end) break; vma = prev->vm_next; if (!vma || vma->vm_start != nstart) { error = -ENOMEM; break; } } return error; } /* * Go through vma areas and sum size of mlocked * vma pages, as return value. * Note deferred memory locking case(mlock2(,,MLOCK_ONFAULT) * is also counted. * Return value: previously mlocked page counts */ static int count_mm_mlocked_page_nr(struct mm_struct *mm, unsigned long start, size_t len) { struct vm_area_struct *vma; int count = 0; if (mm == NULL) mm = current->mm; vma = find_vma(mm, start); if (vma == NULL) vma = mm->mmap; for (; vma ; vma = vma->vm_next) { if (start >= vma->vm_end) continue; if (start + len <= vma->vm_start) break; if (vma->vm_flags & VM_LOCKED) { if (start > vma->vm_start) count -= (start - vma->vm_start); if (start + len < vma->vm_end) { count += start + len - vma->vm_start; break; } count += vma->vm_end - vma->vm_start; } } return count >> PAGE_SHIFT; } static __must_check int do_mlock(unsigned long start, size_t len, vm_flags_t flags) { unsigned long locked; unsigned long lock_limit; int error = -ENOMEM; if (!can_do_mlock()) return -EPERM; lru_add_drain_all(); /* flush pagevec */ len = PAGE_ALIGN(len + (offset_in_page(start))); start &= PAGE_MASK; lock_limit = rlimit(RLIMIT_MEMLOCK); lock_limit >>= PAGE_SHIFT; locked = len >> PAGE_SHIFT; if (down_write_killable(&current->mm->mmap_sem)) return -EINTR; locked += current->mm->locked_vm; if ((locked > lock_limit) && (!capable(CAP_IPC_LOCK))) { /* * It is possible that the regions requested intersect with * previously mlocked areas, that part area in "mm->locked_vm" * should not be counted to new mlock increment count. So check * and adjust locked count if necessary. */ locked -= count_mm_mlocked_page_nr(current->mm, start, len); } /* check against resource limits */ if ((locked <= lock_limit) || capable(CAP_IPC_LOCK)) error = apply_vma_lock_flags(start, len, flags); up_write(&current->mm->mmap_sem); if (error) return error; error = __mm_populate(start, len, 0); if (error) return __mlock_posix_error_return(error); return 0; } SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len) { return do_mlock(start, len, VM_LOCKED); } SYSCALL_DEFINE3(mlock2, unsigned long, start, size_t, len, int, flags) { vm_flags_t vm_flags = VM_LOCKED; if (flags & ~MLOCK_ONFAULT) return -EINVAL; if (flags & MLOCK_ONFAULT) vm_flags |= VM_LOCKONFAULT; return do_mlock(start, len, vm_flags); } SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len) { int ret; len = PAGE_ALIGN(len + (offset_in_page(start))); start &= PAGE_MASK; if (down_write_killable(&current->mm->mmap_sem)) return -EINTR; ret = apply_vma_lock_flags(start, len, 0); up_write(&current->mm->mmap_sem); return ret; } /* * Take the MCL_* flags passed into mlockall (or 0 if called from munlockall) * and translate into the appropriate modifications to mm->def_flags and/or the * flags for all current VMAs. * * There are a couple of subtleties with this. If mlockall() is called multiple * times with different flags, the values do not necessarily stack. If mlockall * is called once including the MCL_FUTURE flag and then a second time without * it, VM_LOCKED and VM_LOCKONFAULT will be cleared from mm->def_flags. */ static int apply_mlockall_flags(int flags) { struct vm_area_struct * vma, * prev = NULL; vm_flags_t to_add = 0; current->mm->def_flags &= VM_LOCKED_CLEAR_MASK; if (flags & MCL_FUTURE) { current->mm->def_flags |= VM_LOCKED; if (flags & MCL_ONFAULT) current->mm->def_flags |= VM_LOCKONFAULT; if (!(flags & MCL_CURRENT)) goto out; } if (flags & MCL_CURRENT) { to_add |= VM_LOCKED; if (flags & MCL_ONFAULT) to_add |= VM_LOCKONFAULT; } for (vma = current->mm->mmap; vma ; vma = prev->vm_next) { vm_flags_t newflags; newflags = vma->vm_flags & VM_LOCKED_CLEAR_MASK; newflags |= to_add; /* Ignore errors */ mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags); cond_resched_rcu_qs(); } out: return 0; } SYSCALL_DEFINE1(mlockall, int, flags) { unsigned long lock_limit; int ret; if (!flags || (flags & ~(MCL_CURRENT | MCL_FUTURE | MCL_ONFAULT))) return -EINVAL; if (!can_do_mlock()) return -EPERM; if (flags & MCL_CURRENT) lru_add_drain_all(); /* flush pagevec */ lock_limit = rlimit(RLIMIT_MEMLOCK); lock_limit >>= PAGE_SHIFT; if (down_write_killable(&current->mm->mmap_sem)) return -EINTR; ret = -ENOMEM; if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) || capable(CAP_IPC_LOCK)) ret = apply_mlockall_flags(flags); up_write(&current->mm->mmap_sem); if (!ret && (flags & MCL_CURRENT)) mm_populate(0, TASK_SIZE); return ret; } SYSCALL_DEFINE0(munlockall) { int ret; if (down_write_killable(&current->mm->mmap_sem)) return -EINTR; ret = apply_mlockall_flags(0); up_write(&current->mm->mmap_sem); return ret; } /* * Objects with different lifetime than processes (SHM_LOCK and SHM_HUGETLB * shm segments) get accounted against the user_struct instead. */ static DEFINE_SPINLOCK(shmlock_user_lock); int user_shm_lock(size_t size, struct user_struct *user) { unsigned long lock_limit, locked; int allowed = 0; locked = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; lock_limit = rlimit(RLIMIT_MEMLOCK); if (lock_limit == RLIM_INFINITY) allowed = 1; lock_limit >>= PAGE_SHIFT; spin_lock(&shmlock_user_lock); if (!allowed && locked + user->locked_shm > lock_limit && !capable(CAP_IPC_LOCK)) goto out; get_uid(user); user->locked_shm += locked; allowed = 1; out: spin_unlock(&shmlock_user_lock); return allowed; } void user_shm_unlock(size_t size, struct user_struct *user) { spin_lock(&shmlock_user_lock); user->locked_shm -= (size + PAGE_SIZE - 1) >> PAGE_SHIFT; spin_unlock(&shmlock_user_lock); free_uid(user); }
/* * linux/mm/mlock.c * * (C) Copyright 1995 Linus Torvalds * (C) Copyright 2002 Christoph Hellwig */ #include <linux/capability.h> #include <linux/mman.h> #include <linux/mm.h> #include <linux/sched/user.h> #include <linux/swap.h> #include <linux/swapops.h> #include <linux/pagemap.h> #include <linux/pagevec.h> #include <linux/mempolicy.h> #include <linux/syscalls.h> #include <linux/sched.h> #include <linux/export.h> #include <linux/rmap.h> #include <linux/mmzone.h> #include <linux/hugetlb.h> #include <linux/memcontrol.h> #include <linux/mm_inline.h> #include "internal.h" bool can_do_mlock(void) { if (rlimit(RLIMIT_MEMLOCK) != 0) return true; if (capable(CAP_IPC_LOCK)) return true; return false; } EXPORT_SYMBOL(can_do_mlock); /* * Mlocked pages are marked with PageMlocked() flag for efficient testing * in vmscan and, possibly, the fault path; and to support semi-accurate * statistics. * * An mlocked page [PageMlocked(page)] is unevictable. As such, it will * be placed on the LRU "unevictable" list, rather than the [in]active lists. * The unevictable list is an LRU sibling list to the [in]active lists. * PageUnevictable is set to indicate the unevictable state. * * When lazy mlocking via vmscan, it is important to ensure that the * vma's VM_LOCKED status is not concurrently being modified, otherwise we * may have mlocked a page that is being munlocked. So lazy mlock must take * the mmap_sem for read, and verify that the vma really is locked * (see mm/rmap.c). */ /* * LRU accounting for clear_page_mlock() */ void clear_page_mlock(struct page *page) { if (!TestClearPageMlocked(page)) return; mod_zone_page_state(page_zone(page), NR_MLOCK, -hpage_nr_pages(page)); count_vm_event(UNEVICTABLE_PGCLEARED); if (!isolate_lru_page(page)) { putback_lru_page(page); } else { /* * We lost the race. the page already moved to evictable list. */ if (PageUnevictable(page)) count_vm_event(UNEVICTABLE_PGSTRANDED); } } /* * Mark page as mlocked if not already. * If page on LRU, isolate and putback to move to unevictable list. */ void mlock_vma_page(struct page *page) { /* Serialize with page migration */ BUG_ON(!PageLocked(page)); VM_BUG_ON_PAGE(PageTail(page), page); VM_BUG_ON_PAGE(PageCompound(page) && PageDoubleMap(page), page); if (!TestSetPageMlocked(page)) { mod_zone_page_state(page_zone(page), NR_MLOCK, hpage_nr_pages(page)); count_vm_event(UNEVICTABLE_PGMLOCKED); if (!isolate_lru_page(page)) putback_lru_page(page); } } /* * Isolate a page from LRU with optional get_page() pin. * Assumes lru_lock already held and page already pinned. */ static bool __munlock_isolate_lru_page(struct page *page, bool getpage) { if (PageLRU(page)) { struct lruvec *lruvec; lruvec = mem_cgroup_page_lruvec(page, page_pgdat(page)); if (getpage) get_page(page); ClearPageLRU(page); del_page_from_lru_list(page, lruvec, page_lru(page)); return true; } return false; } /* * Finish munlock after successful page isolation * * Page must be locked. This is a wrapper for try_to_munlock() * and putback_lru_page() with munlock accounting. */ static void __munlock_isolated_page(struct page *page) { /* * Optimization: if the page was mapped just once, that's our mapping * and we don't need to check all the other vmas. */ if (page_mapcount(page) > 1) try_to_munlock(page); /* Did try_to_unlock() succeed or punt? */ if (!PageMlocked(page)) count_vm_event(UNEVICTABLE_PGMUNLOCKED); putback_lru_page(page); } /* * Accounting for page isolation fail during munlock * * Performs accounting when page isolation fails in munlock. There is nothing * else to do because it means some other task has already removed the page * from the LRU. putback_lru_page() will take care of removing the page from * the unevictable list, if necessary. vmscan [page_referenced()] will move * the page back to the unevictable list if some other vma has it mlocked. */ static void __munlock_isolation_failed(struct page *page) { if (PageUnevictable(page)) __count_vm_event(UNEVICTABLE_PGSTRANDED); else __count_vm_event(UNEVICTABLE_PGMUNLOCKED); } /** * munlock_vma_page - munlock a vma page * @page - page to be unlocked, either a normal page or THP page head * * returns the size of the page as a page mask (0 for normal page, * HPAGE_PMD_NR - 1 for THP head page) * * called from munlock()/munmap() path with page supposedly on the LRU. * When we munlock a page, because the vma where we found the page is being * munlock()ed or munmap()ed, we want to check whether other vmas hold the * page locked so that we can leave it on the unevictable lru list and not * bother vmscan with it. However, to walk the page's rmap list in * try_to_munlock() we must isolate the page from the LRU. If some other * task has removed the page from the LRU, we won't be able to do that. * So we clear the PageMlocked as we might not get another chance. If we * can't isolate the page, we leave it for putback_lru_page() and vmscan * [page_referenced()/try_to_unmap()] to deal with. */ unsigned int munlock_vma_page(struct page *page) { int nr_pages; struct zone *zone = page_zone(page); /* For try_to_munlock() and to serialize with page migration */ BUG_ON(!PageLocked(page)); VM_BUG_ON_PAGE(PageTail(page), page); /* * Serialize with any parallel __split_huge_page_refcount() which * might otherwise copy PageMlocked to part of the tail pages before * we clear it in the head page. It also stabilizes hpage_nr_pages(). */ spin_lock_irq(zone_lru_lock(zone)); if (!TestClearPageMlocked(page)) { /* Potentially, PTE-mapped THP: do not skip the rest PTEs */ nr_pages = 1; goto unlock_out; } nr_pages = hpage_nr_pages(page); __mod_zone_page_state(zone, NR_MLOCK, -nr_pages); if (__munlock_isolate_lru_page(page, true)) { spin_unlock_irq(zone_lru_lock(zone)); __munlock_isolated_page(page); goto out; } __munlock_isolation_failed(page); unlock_out: spin_unlock_irq(zone_lru_lock(zone)); out: return nr_pages - 1; } /* * convert get_user_pages() return value to posix mlock() error */ static int __mlock_posix_error_return(long retval) { if (retval == -EFAULT) retval = -ENOMEM; else if (retval == -ENOMEM) retval = -EAGAIN; return retval; } /* * Prepare page for fast batched LRU putback via putback_lru_evictable_pagevec() * * The fast path is available only for evictable pages with single mapping. * Then we can bypass the per-cpu pvec and get better performance. * when mapcount > 1 we need try_to_munlock() which can fail. * when !page_evictable(), we need the full redo logic of putback_lru_page to * avoid leaving evictable page in unevictable list. * * In case of success, @page is added to @pvec and @pgrescued is incremented * in case that the page was previously unevictable. @page is also unlocked. */ static bool __putback_lru_fast_prepare(struct page *page, struct pagevec *pvec, int *pgrescued) { VM_BUG_ON_PAGE(PageLRU(page), page); VM_BUG_ON_PAGE(!PageLocked(page), page); if (page_mapcount(page) <= 1 && page_evictable(page)) { pagevec_add(pvec, page); if (TestClearPageUnevictable(page)) (*pgrescued)++; unlock_page(page); return true; } return false; } /* * Putback multiple evictable pages to the LRU * * Batched putback of evictable pages that bypasses the per-cpu pvec. Some of * the pages might have meanwhile become unevictable but that is OK. */ static void __putback_lru_fast(struct pagevec *pvec, int pgrescued) { count_vm_events(UNEVICTABLE_PGMUNLOCKED, pagevec_count(pvec)); /* *__pagevec_lru_add() calls release_pages() so we don't call * put_page() explicitly */ __pagevec_lru_add(pvec); count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued); } /* * Munlock a batch of pages from the same zone * * The work is split to two main phases. First phase clears the Mlocked flag * and attempts to isolate the pages, all under a single zone lru lock. * The second phase finishes the munlock only for pages where isolation * succeeded. * * Note that the pagevec may be modified during the process. */ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone) { int i; int nr = pagevec_count(pvec); int delta_munlocked = -nr; struct pagevec pvec_putback; int pgrescued = 0; pagevec_init(&pvec_putback, 0); /* Phase 1: page isolation */ spin_lock_irq(zone_lru_lock(zone)); for (i = 0; i < nr; i++) { struct page *page = pvec->pages[i]; if (TestClearPageMlocked(page)) { /* * We already have pin from follow_page_mask() * so we can spare the get_page() here. */ if (__munlock_isolate_lru_page(page, false)) continue; else __munlock_isolation_failed(page); } else { delta_munlocked++; } /* * We won't be munlocking this page in the next phase * but we still need to release the follow_page_mask() * pin. We cannot do it under lru_lock however. If it's * the last pin, __page_cache_release() would deadlock. */ pagevec_add(&pvec_putback, pvec->pages[i]); pvec->pages[i] = NULL; } __mod_zone_page_state(zone, NR_MLOCK, delta_munlocked); spin_unlock_irq(zone_lru_lock(zone)); /* Now we can release pins of pages that we are not munlocking */ pagevec_release(&pvec_putback); /* Phase 2: page munlock */ for (i = 0; i < nr; i++) { struct page *page = pvec->pages[i]; if (page) { lock_page(page); if (!__putback_lru_fast_prepare(page, &pvec_putback, &pgrescued)) { /* * Slow path. We don't want to lose the last * pin before unlock_page() */ get_page(page); /* for putback_lru_page() */ __munlock_isolated_page(page); unlock_page(page); put_page(page); /* from follow_page_mask() */ } } } /* * Phase 3: page putback for pages that qualified for the fast path * This will also call put_page() to return pin from follow_page_mask() */ if (pagevec_count(&pvec_putback)) __putback_lru_fast(&pvec_putback, pgrescued); } /* * Fill up pagevec for __munlock_pagevec using pte walk * * The function expects that the struct page corresponding to @start address is * a non-TPH page already pinned and in the @pvec, and that it belongs to @zone. * * The rest of @pvec is filled by subsequent pages within the same pmd and same * zone, as long as the pte's are present and vm_normal_page() succeeds. These * pages also get pinned. * * Returns the address of the next page that should be scanned. This equals * @start + PAGE_SIZE when no page could be added by the pte walk. */ static unsigned long __munlock_pagevec_fill(struct pagevec *pvec, struct vm_area_struct *vma, int zoneid, unsigned long start, unsigned long end) { pte_t *pte; spinlock_t *ptl; /* * Initialize pte walk starting at the already pinned page where we * are sure that there is a pte, as it was pinned under the same * mmap_sem write op. */ pte = get_locked_pte(vma->vm_mm, start, &ptl); /* Make sure we do not cross the page table boundary */ end = pgd_addr_end(start, end); end = p4d_addr_end(start, end); end = pud_addr_end(start, end); end = pmd_addr_end(start, end); /* The page next to the pinned page is the first we will try to get */ start += PAGE_SIZE; while (start < end) { struct page *page = NULL; pte++; if (pte_present(*pte)) page = vm_normal_page(vma, start, *pte); /* * Break if page could not be obtained or the page's node+zone does not * match */ if (!page || page_zone_id(page) != zoneid) break; /* * Do not use pagevec for PTE-mapped THP, * munlock_vma_pages_range() will handle them. */ if (PageTransCompound(page)) break; get_page(page); /* * Increase the address that will be returned *before* the * eventual break due to pvec becoming full by adding the page */ start += PAGE_SIZE; if (pagevec_add(pvec, page) == 0) break; } pte_unmap_unlock(pte, ptl); return start; } /* * munlock_vma_pages_range() - munlock all pages in the vma range.' * @vma - vma containing range to be munlock()ed. * @start - start address in @vma of the range * @end - end of range in @vma. * * For mremap(), munmap() and exit(). * * Called with @vma VM_LOCKED. * * Returns with VM_LOCKED cleared. Callers must be prepared to * deal with this. * * We don't save and restore VM_LOCKED here because pages are * still on lru. In unmap path, pages might be scanned by reclaim * and re-mlocked by try_to_{munlock|unmap} before we unmap and * free them. This will result in freeing mlocked pages. */ void munlock_vma_pages_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { vma->vm_flags &= VM_LOCKED_CLEAR_MASK; while (start < end) { struct page *page; unsigned int page_mask = 0; unsigned long page_increm; struct pagevec pvec; struct zone *zone; int zoneid; pagevec_init(&pvec, 0); /* * Although FOLL_DUMP is intended for get_dump_page(), * it just so happens that its special treatment of the * ZERO_PAGE (returning an error instead of doing get_page) * suits munlock very well (and if somehow an abnormal page * has sneaked into the range, we won't oops here: great). */ page = follow_page(vma, start, FOLL_GET | FOLL_DUMP); if (page && !IS_ERR(page)) { if (PageTransTail(page)) { VM_BUG_ON_PAGE(PageMlocked(page), page); put_page(page); /* follow_page_mask() */ } else if (PageTransHuge(page)) { lock_page(page); /* * Any THP page found by follow_page_mask() may * have gotten split before reaching * munlock_vma_page(), so we need to compute * the page_mask here instead. */ page_mask = munlock_vma_page(page); unlock_page(page); put_page(page); /* follow_page_mask() */ } else { /* * Non-huge pages are handled in batches via * pagevec. The pin from follow_page_mask() * prevents them from collapsing by THP. */ pagevec_add(&pvec, page); zone = page_zone(page); zoneid = page_zone_id(page); /* * Try to fill the rest of pagevec using fast * pte walk. This will also update start to * the next page to process. Then munlock the * pagevec. */ start = __munlock_pagevec_fill(&pvec, vma, zoneid, start, end); __munlock_pagevec(&pvec, zone); goto next; } } page_increm = 1 + page_mask; start += page_increm * PAGE_SIZE; next: cond_resched(); } } /* * mlock_fixup - handle mlock[all]/munlock[all] requests. * * Filters out "special" vmas -- VM_LOCKED never gets set for these, and * munlock is a no-op. However, for some special vmas, we go ahead and * populate the ptes. * * For vmas that pass the filters, merge/split as appropriate. */ static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev, unsigned long start, unsigned long end, vm_flags_t newflags) { struct mm_struct *mm = vma->vm_mm; pgoff_t pgoff; int nr_pages; int ret = 0; int lock = !!(newflags & VM_LOCKED); vm_flags_t old_flags = vma->vm_flags; if (newflags == vma->vm_flags || (vma->vm_flags & VM_SPECIAL) || is_vm_hugetlb_page(vma) || vma == get_gate_vma(current->mm)) /* don't set VM_LOCKED or VM_LOCKONFAULT and don't count */ goto out; pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); *prev = vma_merge(mm, *prev, start, end, newflags, vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma), vma->vm_userfaultfd_ctx); if (*prev) { vma = *prev; goto success; } if (start != vma->vm_start) { ret = split_vma(mm, vma, start, 1); if (ret) goto out; } if (end != vma->vm_end) { ret = split_vma(mm, vma, end, 0); if (ret) goto out; } success: /* * Keep track of amount of locked VM. */ nr_pages = (end - start) >> PAGE_SHIFT; if (!lock) nr_pages = -nr_pages; else if (old_flags & VM_LOCKED) nr_pages = 0; mm->locked_vm += nr_pages; /* * vm_flags is protected by the mmap_sem held in write mode. * It's okay if try_to_unmap_one unmaps a page just after we * set VM_LOCKED, populate_vma_page_range will bring it back. */ if (lock) vma->vm_flags = newflags; else munlock_vma_pages_range(vma, start, end); out: *prev = vma; return ret; } static int apply_vma_lock_flags(unsigned long start, size_t len, vm_flags_t flags) { unsigned long nstart, end, tmp; struct vm_area_struct * vma, * prev; int error; VM_BUG_ON(offset_in_page(start)); VM_BUG_ON(len != PAGE_ALIGN(len)); end = start + len; if (end < start) return -EINVAL; if (end == start) return 0; vma = find_vma(current->mm, start); if (!vma || vma->vm_start > start) return -ENOMEM; prev = vma->vm_prev; if (start > vma->vm_start) prev = vma; for (nstart = start ; ; ) { vm_flags_t newflags = vma->vm_flags & VM_LOCKED_CLEAR_MASK; newflags |= flags; /* Here we know that vma->vm_start <= nstart < vma->vm_end. */ tmp = vma->vm_end; if (tmp > end) tmp = end; error = mlock_fixup(vma, &prev, nstart, tmp, newflags); if (error) break; nstart = tmp; if (nstart < prev->vm_end) nstart = prev->vm_end; if (nstart >= end) break; vma = prev->vm_next; if (!vma || vma->vm_start != nstart) { error = -ENOMEM; break; } } return error; } /* * Go through vma areas and sum size of mlocked * vma pages, as return value. * Note deferred memory locking case(mlock2(,,MLOCK_ONFAULT) * is also counted. * Return value: previously mlocked page counts */ static int count_mm_mlocked_page_nr(struct mm_struct *mm, unsigned long start, size_t len) { struct vm_area_struct *vma; int count = 0; if (mm == NULL) mm = current->mm; vma = find_vma(mm, start); if (vma == NULL) vma = mm->mmap; for (; vma ; vma = vma->vm_next) { if (start >= vma->vm_end) continue; if (start + len <= vma->vm_start) break; if (vma->vm_flags & VM_LOCKED) { if (start > vma->vm_start) count -= (start - vma->vm_start); if (start + len < vma->vm_end) { count += start + len - vma->vm_start; break; } count += vma->vm_end - vma->vm_start; } } return count >> PAGE_SHIFT; } static __must_check int do_mlock(unsigned long start, size_t len, vm_flags_t flags) { unsigned long locked; unsigned long lock_limit; int error = -ENOMEM; if (!can_do_mlock()) return -EPERM; lru_add_drain_all(); /* flush pagevec */ len = PAGE_ALIGN(len + (offset_in_page(start))); start &= PAGE_MASK; lock_limit = rlimit(RLIMIT_MEMLOCK); lock_limit >>= PAGE_SHIFT; locked = len >> PAGE_SHIFT; if (down_write_killable(&current->mm->mmap_sem)) return -EINTR; locked += current->mm->locked_vm; if ((locked > lock_limit) && (!capable(CAP_IPC_LOCK))) { /* * It is possible that the regions requested intersect with * previously mlocked areas, that part area in "mm->locked_vm" * should not be counted to new mlock increment count. So check * and adjust locked count if necessary. */ locked -= count_mm_mlocked_page_nr(current->mm, start, len); } /* check against resource limits */ if ((locked <= lock_limit) || capable(CAP_IPC_LOCK)) error = apply_vma_lock_flags(start, len, flags); up_write(&current->mm->mmap_sem); if (error) return error; error = __mm_populate(start, len, 0); if (error) return __mlock_posix_error_return(error); return 0; } SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len) { return do_mlock(start, len, VM_LOCKED); } SYSCALL_DEFINE3(mlock2, unsigned long, start, size_t, len, int, flags) { vm_flags_t vm_flags = VM_LOCKED; if (flags & ~MLOCK_ONFAULT) return -EINVAL; if (flags & MLOCK_ONFAULT) vm_flags |= VM_LOCKONFAULT; return do_mlock(start, len, vm_flags); } SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len) { int ret; len = PAGE_ALIGN(len + (offset_in_page(start))); start &= PAGE_MASK; if (down_write_killable(&current->mm->mmap_sem)) return -EINTR; ret = apply_vma_lock_flags(start, len, 0); up_write(&current->mm->mmap_sem); return ret; } /* * Take the MCL_* flags passed into mlockall (or 0 if called from munlockall) * and translate into the appropriate modifications to mm->def_flags and/or the * flags for all current VMAs. * * There are a couple of subtleties with this. If mlockall() is called multiple * times with different flags, the values do not necessarily stack. If mlockall * is called once including the MCL_FUTURE flag and then a second time without * it, VM_LOCKED and VM_LOCKONFAULT will be cleared from mm->def_flags. */ static int apply_mlockall_flags(int flags) { struct vm_area_struct * vma, * prev = NULL; vm_flags_t to_add = 0; current->mm->def_flags &= VM_LOCKED_CLEAR_MASK; if (flags & MCL_FUTURE) { current->mm->def_flags |= VM_LOCKED; if (flags & MCL_ONFAULT) current->mm->def_flags |= VM_LOCKONFAULT; if (!(flags & MCL_CURRENT)) goto out; } if (flags & MCL_CURRENT) { to_add |= VM_LOCKED; if (flags & MCL_ONFAULT) to_add |= VM_LOCKONFAULT; } for (vma = current->mm->mmap; vma ; vma = prev->vm_next) { vm_flags_t newflags; newflags = vma->vm_flags & VM_LOCKED_CLEAR_MASK; newflags |= to_add; /* Ignore errors */ mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags); cond_resched_rcu_qs(); } out: return 0; } SYSCALL_DEFINE1(mlockall, int, flags) { unsigned long lock_limit; int ret; if (!flags || (flags & ~(MCL_CURRENT | MCL_FUTURE | MCL_ONFAULT))) return -EINVAL; if (!can_do_mlock()) return -EPERM; if (flags & MCL_CURRENT) lru_add_drain_all(); /* flush pagevec */ lock_limit = rlimit(RLIMIT_MEMLOCK); lock_limit >>= PAGE_SHIFT; if (down_write_killable(&current->mm->mmap_sem)) return -EINTR; ret = -ENOMEM; if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) || capable(CAP_IPC_LOCK)) ret = apply_mlockall_flags(flags); up_write(&current->mm->mmap_sem); if (!ret && (flags & MCL_CURRENT)) mm_populate(0, TASK_SIZE); return ret; } SYSCALL_DEFINE0(munlockall) { int ret; if (down_write_killable(&current->mm->mmap_sem)) return -EINTR; ret = apply_mlockall_flags(0); up_write(&current->mm->mmap_sem); return ret; } /* * Objects with different lifetime than processes (SHM_LOCK and SHM_HUGETLB * shm segments) get accounted against the user_struct instead. */ static DEFINE_SPINLOCK(shmlock_user_lock); int user_shm_lock(size_t size, struct user_struct *user) { unsigned long lock_limit, locked; int allowed = 0; locked = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; lock_limit = rlimit(RLIMIT_MEMLOCK); if (lock_limit == RLIM_INFINITY) allowed = 1; lock_limit >>= PAGE_SHIFT; spin_lock(&shmlock_user_lock); if (!allowed && locked + user->locked_shm > lock_limit && !capable(CAP_IPC_LOCK)) goto out; get_uid(user); user->locked_shm += locked; allowed = 1; out: spin_unlock(&shmlock_user_lock); return allowed; } void user_shm_unlock(size_t size, struct user_struct *user) { spin_lock(&shmlock_user_lock); user->locked_shm -= (size + PAGE_SIZE - 1) >> PAGE_SHIFT; spin_unlock(&shmlock_user_lock); free_uid(user); }
static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone) { int i; int nr = pagevec_count(pvec); int delta_munlocked; struct pagevec pvec_putback; int pgrescued = 0; pagevec_init(&pvec_putback, 0); /* Phase 1: page isolation */ spin_lock_irq(zone_lru_lock(zone)); for (i = 0; i < nr; i++) { struct page *page = pvec->pages[i]; if (TestClearPageMlocked(page)) { /* * We already have pin from follow_page_mask() * so we can spare the get_page() here. */ if (__munlock_isolate_lru_page(page, false)) continue; else __munlock_isolation_failed(page); } /* * We won't be munlocking this page in the next phase * but we still need to release the follow_page_mask() * pin. We cannot do it under lru_lock however. If it's * the last pin, __page_cache_release() would deadlock. */ pagevec_add(&pvec_putback, pvec->pages[i]); pvec->pages[i] = NULL; } delta_munlocked = -nr + pagevec_count(&pvec_putback); __mod_zone_page_state(zone, NR_MLOCK, delta_munlocked); spin_unlock_irq(zone_lru_lock(zone)); /* Now we can release pins of pages that we are not munlocking */ pagevec_release(&pvec_putback); /* Phase 2: page munlock */ for (i = 0; i < nr; i++) { struct page *page = pvec->pages[i]; if (page) { lock_page(page); if (!__putback_lru_fast_prepare(page, &pvec_putback, &pgrescued)) { /* * Slow path. We don't want to lose the last * pin before unlock_page() */ get_page(page); /* for putback_lru_page() */ __munlock_isolated_page(page); unlock_page(page); put_page(page); /* from follow_page_mask() */ } } } /* * Phase 3: page putback for pages that qualified for the fast path * This will also call put_page() to return pin from follow_page_mask() */ if (pagevec_count(&pvec_putback)) __putback_lru_fast(&pvec_putback, pgrescued); }
static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone) { int i; int nr = pagevec_count(pvec); int delta_munlocked = -nr; struct pagevec pvec_putback; int pgrescued = 0; pagevec_init(&pvec_putback, 0); /* Phase 1: page isolation */ spin_lock_irq(zone_lru_lock(zone)); for (i = 0; i < nr; i++) { struct page *page = pvec->pages[i]; if (TestClearPageMlocked(page)) { /* * We already have pin from follow_page_mask() * so we can spare the get_page() here. */ if (__munlock_isolate_lru_page(page, false)) continue; else __munlock_isolation_failed(page); } else { delta_munlocked++; } /* * We won't be munlocking this page in the next phase * but we still need to release the follow_page_mask() * pin. We cannot do it under lru_lock however. If it's * the last pin, __page_cache_release() would deadlock. */ pagevec_add(&pvec_putback, pvec->pages[i]); pvec->pages[i] = NULL; } __mod_zone_page_state(zone, NR_MLOCK, delta_munlocked); spin_unlock_irq(zone_lru_lock(zone)); /* Now we can release pins of pages that we are not munlocking */ pagevec_release(&pvec_putback); /* Phase 2: page munlock */ for (i = 0; i < nr; i++) { struct page *page = pvec->pages[i]; if (page) { lock_page(page); if (!__putback_lru_fast_prepare(page, &pvec_putback, &pgrescued)) { /* * Slow path. We don't want to lose the last * pin before unlock_page() */ get_page(page); /* for putback_lru_page() */ __munlock_isolated_page(page); unlock_page(page); put_page(page); /* from follow_page_mask() */ } } } /* * Phase 3: page putback for pages that qualified for the fast path * This will also call put_page() to return pin from follow_page_mask() */ if (pagevec_count(&pvec_putback)) __putback_lru_fast(&pvec_putback, pgrescued); }
{'added': [(287, '\tint delta_munlocked = -nr;'), (307, '\t\t} else {'), (308, '\t\t\tdelta_munlocked++;')], 'deleted': [(287, '\tint delta_munlocked;'), (318, '\tdelta_munlocked = -nr + pagevec_count(&pvec_putback);')]}
3
2
496
2,899
https://github.com/torvalds/linux
CVE-2017-18221
['CWE-20']
x86.c
handle_emulation_failure
/* * Kernel-based Virtual Machine driver for Linux * * derived from drivers/kvm/kvm_main.c * * Copyright (C) 2006 Qumranet, Inc. * Copyright (C) 2008 Qumranet, Inc. * Copyright IBM Corporation, 2008 * Copyright 2010 Red Hat, Inc. and/or its affiliates. * * Authors: * Avi Kivity <avi@qumranet.com> * Yaniv Kamay <yaniv@qumranet.com> * Amit Shah <amit.shah@qumranet.com> * Ben-Ami Yassour <benami@il.ibm.com> * * This work is licensed under the terms of the GNU GPL, version 2. See * the COPYING file in the top-level directory. * */ #include <linux/kvm_host.h> #include "irq.h" #include "mmu.h" #include "i8254.h" #include "tss.h" #include "kvm_cache_regs.h" #include "x86.h" #include "cpuid.h" #include <linux/clocksource.h> #include <linux/interrupt.h> #include <linux/kvm.h> #include <linux/fs.h> #include <linux/vmalloc.h> #include <linux/module.h> #include <linux/mman.h> #include <linux/highmem.h> #include <linux/iommu.h> #include <linux/intel-iommu.h> #include <linux/cpufreq.h> #include <linux/user-return-notifier.h> #include <linux/srcu.h> #include <linux/slab.h> #include <linux/perf_event.h> #include <linux/uaccess.h> #include <linux/hash.h> #include <linux/pci.h> #include <linux/timekeeper_internal.h> #include <linux/pvclock_gtod.h> #include <trace/events/kvm.h> #define CREATE_TRACE_POINTS #include "trace.h" #include <asm/debugreg.h> #include <asm/msr.h> #include <asm/desc.h> #include <asm/mtrr.h> #include <asm/mce.h> #include <asm/i387.h> #include <asm/fpu-internal.h> /* Ugh! */ #include <asm/xcr.h> #include <asm/pvclock.h> #include <asm/div64.h> #define MAX_IO_MSRS 256 #define KVM_MAX_MCE_BANKS 32 #define KVM_MCE_CAP_SUPPORTED (MCG_CTL_P | MCG_SER_P) #define emul_to_vcpu(ctxt) \ container_of(ctxt, struct kvm_vcpu, arch.emulate_ctxt) /* EFER defaults: * - enable syscall per default because its emulated by KVM * - enable LME and LMA per default on 64 bit KVM */ #ifdef CONFIG_X86_64 static u64 __read_mostly efer_reserved_bits = ~((u64)(EFER_SCE | EFER_LME | EFER_LMA)); #else static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE); #endif #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU static void update_cr8_intercept(struct kvm_vcpu *vcpu); static void process_nmi(struct kvm_vcpu *vcpu); static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags); struct kvm_x86_ops *kvm_x86_ops; EXPORT_SYMBOL_GPL(kvm_x86_ops); static bool ignore_msrs = 0; module_param(ignore_msrs, bool, S_IRUGO | S_IWUSR); unsigned int min_timer_period_us = 500; module_param(min_timer_period_us, uint, S_IRUGO | S_IWUSR); bool kvm_has_tsc_control; EXPORT_SYMBOL_GPL(kvm_has_tsc_control); u32 kvm_max_guest_tsc_khz; EXPORT_SYMBOL_GPL(kvm_max_guest_tsc_khz); /* tsc tolerance in parts per million - default to 1/2 of the NTP threshold */ static u32 tsc_tolerance_ppm = 250; module_param(tsc_tolerance_ppm, uint, S_IRUGO | S_IWUSR); static bool backwards_tsc_observed = false; #define KVM_NR_SHARED_MSRS 16 struct kvm_shared_msrs_global { int nr; u32 msrs[KVM_NR_SHARED_MSRS]; }; struct kvm_shared_msrs { struct user_return_notifier urn; bool registered; struct kvm_shared_msr_values { u64 host; u64 curr; } values[KVM_NR_SHARED_MSRS]; }; static struct kvm_shared_msrs_global __read_mostly shared_msrs_global; static struct kvm_shared_msrs __percpu *shared_msrs; struct kvm_stats_debugfs_item debugfs_entries[] = { { "pf_fixed", VCPU_STAT(pf_fixed) }, { "pf_guest", VCPU_STAT(pf_guest) }, { "tlb_flush", VCPU_STAT(tlb_flush) }, { "invlpg", VCPU_STAT(invlpg) }, { "exits", VCPU_STAT(exits) }, { "io_exits", VCPU_STAT(io_exits) }, { "mmio_exits", VCPU_STAT(mmio_exits) }, { "signal_exits", VCPU_STAT(signal_exits) }, { "irq_window", VCPU_STAT(irq_window_exits) }, { "nmi_window", VCPU_STAT(nmi_window_exits) }, { "halt_exits", VCPU_STAT(halt_exits) }, { "halt_wakeup", VCPU_STAT(halt_wakeup) }, { "hypercalls", VCPU_STAT(hypercalls) }, { "request_irq", VCPU_STAT(request_irq_exits) }, { "irq_exits", VCPU_STAT(irq_exits) }, { "host_state_reload", VCPU_STAT(host_state_reload) }, { "efer_reload", VCPU_STAT(efer_reload) }, { "fpu_reload", VCPU_STAT(fpu_reload) }, { "insn_emulation", VCPU_STAT(insn_emulation) }, { "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) }, { "irq_injections", VCPU_STAT(irq_injections) }, { "nmi_injections", VCPU_STAT(nmi_injections) }, { "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) }, { "mmu_pte_write", VM_STAT(mmu_pte_write) }, { "mmu_pte_updated", VM_STAT(mmu_pte_updated) }, { "mmu_pde_zapped", VM_STAT(mmu_pde_zapped) }, { "mmu_flooded", VM_STAT(mmu_flooded) }, { "mmu_recycled", VM_STAT(mmu_recycled) }, { "mmu_cache_miss", VM_STAT(mmu_cache_miss) }, { "mmu_unsync", VM_STAT(mmu_unsync) }, { "remote_tlb_flush", VM_STAT(remote_tlb_flush) }, { "largepages", VM_STAT(lpages) }, { NULL } }; u64 __read_mostly host_xcr0; static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt); static inline void kvm_async_pf_hash_reset(struct kvm_vcpu *vcpu) { int i; for (i = 0; i < roundup_pow_of_two(ASYNC_PF_PER_VCPU); i++) vcpu->arch.apf.gfns[i] = ~0; } static void kvm_on_user_return(struct user_return_notifier *urn) { unsigned slot; struct kvm_shared_msrs *locals = container_of(urn, struct kvm_shared_msrs, urn); struct kvm_shared_msr_values *values; for (slot = 0; slot < shared_msrs_global.nr; ++slot) { values = &locals->values[slot]; if (values->host != values->curr) { wrmsrl(shared_msrs_global.msrs[slot], values->host); values->curr = values->host; } } locals->registered = false; user_return_notifier_unregister(urn); } static void shared_msr_update(unsigned slot, u32 msr) { u64 value; unsigned int cpu = smp_processor_id(); struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu); /* only read, and nobody should modify it at this time, * so don't need lock */ if (slot >= shared_msrs_global.nr) { printk(KERN_ERR "kvm: invalid MSR slot!"); return; } rdmsrl_safe(msr, &value); smsr->values[slot].host = value; smsr->values[slot].curr = value; } void kvm_define_shared_msr(unsigned slot, u32 msr) { BUG_ON(slot >= KVM_NR_SHARED_MSRS); if (slot >= shared_msrs_global.nr) shared_msrs_global.nr = slot + 1; shared_msrs_global.msrs[slot] = msr; /* we need ensured the shared_msr_global have been updated */ smp_wmb(); } EXPORT_SYMBOL_GPL(kvm_define_shared_msr); static void kvm_shared_msr_cpu_online(void) { unsigned i; for (i = 0; i < shared_msrs_global.nr; ++i) shared_msr_update(i, shared_msrs_global.msrs[i]); } void kvm_set_shared_msr(unsigned slot, u64 value, u64 mask) { unsigned int cpu = smp_processor_id(); struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu); if (((value ^ smsr->values[slot].curr) & mask) == 0) return; smsr->values[slot].curr = value; wrmsrl(shared_msrs_global.msrs[slot], value); if (!smsr->registered) { smsr->urn.on_user_return = kvm_on_user_return; user_return_notifier_register(&smsr->urn); smsr->registered = true; } } EXPORT_SYMBOL_GPL(kvm_set_shared_msr); static void drop_user_return_notifiers(void) { unsigned int cpu = smp_processor_id(); struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu); if (smsr->registered) kvm_on_user_return(&smsr->urn); } u64 kvm_get_apic_base(struct kvm_vcpu *vcpu) { return vcpu->arch.apic_base; } EXPORT_SYMBOL_GPL(kvm_get_apic_base); int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info) { u64 old_state = vcpu->arch.apic_base & (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE); u64 new_state = msr_info->data & (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE); u64 reserved_bits = ((~0ULL) << cpuid_maxphyaddr(vcpu)) | 0x2ff | (guest_cpuid_has_x2apic(vcpu) ? 0 : X2APIC_ENABLE); if (!msr_info->host_initiated && ((msr_info->data & reserved_bits) != 0 || new_state == X2APIC_ENABLE || (new_state == MSR_IA32_APICBASE_ENABLE && old_state == (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE)) || (new_state == (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE) && old_state == 0))) return 1; kvm_lapic_set_base(vcpu, msr_info->data); return 0; } EXPORT_SYMBOL_GPL(kvm_set_apic_base); asmlinkage __visible void kvm_spurious_fault(void) { /* Fault while not rebooting. We want the trace. */ BUG(); } EXPORT_SYMBOL_GPL(kvm_spurious_fault); #define EXCPT_BENIGN 0 #define EXCPT_CONTRIBUTORY 1 #define EXCPT_PF 2 static int exception_class(int vector) { switch (vector) { case PF_VECTOR: return EXCPT_PF; case DE_VECTOR: case TS_VECTOR: case NP_VECTOR: case SS_VECTOR: case GP_VECTOR: return EXCPT_CONTRIBUTORY; default: break; } return EXCPT_BENIGN; } #define EXCPT_FAULT 0 #define EXCPT_TRAP 1 #define EXCPT_ABORT 2 #define EXCPT_INTERRUPT 3 static int exception_type(int vector) { unsigned int mask; if (WARN_ON(vector > 31 || vector == NMI_VECTOR)) return EXCPT_INTERRUPT; mask = 1 << vector; /* #DB is trap, as instruction watchpoints are handled elsewhere */ if (mask & ((1 << DB_VECTOR) | (1 << BP_VECTOR) | (1 << OF_VECTOR))) return EXCPT_TRAP; if (mask & ((1 << DF_VECTOR) | (1 << MC_VECTOR))) return EXCPT_ABORT; /* Reserved exceptions will result in fault */ return EXCPT_FAULT; } static void kvm_multiple_exception(struct kvm_vcpu *vcpu, unsigned nr, bool has_error, u32 error_code, bool reinject) { u32 prev_nr; int class1, class2; kvm_make_request(KVM_REQ_EVENT, vcpu); if (!vcpu->arch.exception.pending) { queue: vcpu->arch.exception.pending = true; vcpu->arch.exception.has_error_code = has_error; vcpu->arch.exception.nr = nr; vcpu->arch.exception.error_code = error_code; vcpu->arch.exception.reinject = reinject; return; } /* to check exception */ prev_nr = vcpu->arch.exception.nr; if (prev_nr == DF_VECTOR) { /* triple fault -> shutdown */ kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); return; } class1 = exception_class(prev_nr); class2 = exception_class(nr); if ((class1 == EXCPT_CONTRIBUTORY && class2 == EXCPT_CONTRIBUTORY) || (class1 == EXCPT_PF && class2 != EXCPT_BENIGN)) { /* generate double fault per SDM Table 5-5 */ vcpu->arch.exception.pending = true; vcpu->arch.exception.has_error_code = true; vcpu->arch.exception.nr = DF_VECTOR; vcpu->arch.exception.error_code = 0; } else /* replace previous exception with a new one in a hope that instruction re-execution will regenerate lost exception */ goto queue; } void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr) { kvm_multiple_exception(vcpu, nr, false, 0, false); } EXPORT_SYMBOL_GPL(kvm_queue_exception); void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr) { kvm_multiple_exception(vcpu, nr, false, 0, true); } EXPORT_SYMBOL_GPL(kvm_requeue_exception); void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err) { if (err) kvm_inject_gp(vcpu, 0); else kvm_x86_ops->skip_emulated_instruction(vcpu); } EXPORT_SYMBOL_GPL(kvm_complete_insn_gp); void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault) { ++vcpu->stat.pf_guest; vcpu->arch.cr2 = fault->address; kvm_queue_exception_e(vcpu, PF_VECTOR, fault->error_code); } EXPORT_SYMBOL_GPL(kvm_inject_page_fault); static bool kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault) { if (mmu_is_nested(vcpu) && !fault->nested_page_fault) vcpu->arch.nested_mmu.inject_page_fault(vcpu, fault); else vcpu->arch.mmu.inject_page_fault(vcpu, fault); return fault->nested_page_fault; } void kvm_inject_nmi(struct kvm_vcpu *vcpu) { atomic_inc(&vcpu->arch.nmi_queued); kvm_make_request(KVM_REQ_NMI, vcpu); } EXPORT_SYMBOL_GPL(kvm_inject_nmi); void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code) { kvm_multiple_exception(vcpu, nr, true, error_code, false); } EXPORT_SYMBOL_GPL(kvm_queue_exception_e); void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code) { kvm_multiple_exception(vcpu, nr, true, error_code, true); } EXPORT_SYMBOL_GPL(kvm_requeue_exception_e); /* * Checks if cpl <= required_cpl; if true, return true. Otherwise queue * a #GP and return false. */ bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl) { if (kvm_x86_ops->get_cpl(vcpu) <= required_cpl) return true; kvm_queue_exception_e(vcpu, GP_VECTOR, 0); return false; } EXPORT_SYMBOL_GPL(kvm_require_cpl); /* * This function will be used to read from the physical memory of the currently * running guest. The difference to kvm_read_guest_page is that this function * can read from guest physical or from the guest's guest physical memory. */ int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, gfn_t ngfn, void *data, int offset, int len, u32 access) { struct x86_exception exception; gfn_t real_gfn; gpa_t ngpa; ngpa = gfn_to_gpa(ngfn); real_gfn = mmu->translate_gpa(vcpu, ngpa, access, &exception); if (real_gfn == UNMAPPED_GVA) return -EFAULT; real_gfn = gpa_to_gfn(real_gfn); return kvm_read_guest_page(vcpu->kvm, real_gfn, data, offset, len); } EXPORT_SYMBOL_GPL(kvm_read_guest_page_mmu); int kvm_read_nested_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset, int len, u32 access) { return kvm_read_guest_page_mmu(vcpu, vcpu->arch.walk_mmu, gfn, data, offset, len, access); } /* * Load the pae pdptrs. Return true is they are all valid. */ int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3) { gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT; unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2; int i; int ret; u64 pdpte[ARRAY_SIZE(mmu->pdptrs)]; ret = kvm_read_guest_page_mmu(vcpu, mmu, pdpt_gfn, pdpte, offset * sizeof(u64), sizeof(pdpte), PFERR_USER_MASK|PFERR_WRITE_MASK); if (ret < 0) { ret = 0; goto out; } for (i = 0; i < ARRAY_SIZE(pdpte); ++i) { if (is_present_gpte(pdpte[i]) && (pdpte[i] & vcpu->arch.mmu.rsvd_bits_mask[0][2])) { ret = 0; goto out; } } ret = 1; memcpy(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs)); __set_bit(VCPU_EXREG_PDPTR, (unsigned long *)&vcpu->arch.regs_avail); __set_bit(VCPU_EXREG_PDPTR, (unsigned long *)&vcpu->arch.regs_dirty); out: return ret; } EXPORT_SYMBOL_GPL(load_pdptrs); static bool pdptrs_changed(struct kvm_vcpu *vcpu) { u64 pdpte[ARRAY_SIZE(vcpu->arch.walk_mmu->pdptrs)]; bool changed = true; int offset; gfn_t gfn; int r; if (is_long_mode(vcpu) || !is_pae(vcpu)) return false; if (!test_bit(VCPU_EXREG_PDPTR, (unsigned long *)&vcpu->arch.regs_avail)) return true; gfn = (kvm_read_cr3(vcpu) & ~31u) >> PAGE_SHIFT; offset = (kvm_read_cr3(vcpu) & ~31u) & (PAGE_SIZE - 1); r = kvm_read_nested_guest_page(vcpu, gfn, pdpte, offset, sizeof(pdpte), PFERR_USER_MASK | PFERR_WRITE_MASK); if (r < 0) goto out; changed = memcmp(pdpte, vcpu->arch.walk_mmu->pdptrs, sizeof(pdpte)) != 0; out: return changed; } int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) { unsigned long old_cr0 = kvm_read_cr0(vcpu); unsigned long update_bits = X86_CR0_PG | X86_CR0_WP | X86_CR0_CD | X86_CR0_NW; cr0 |= X86_CR0_ET; #ifdef CONFIG_X86_64 if (cr0 & 0xffffffff00000000UL) return 1; #endif cr0 &= ~CR0_RESERVED_BITS; if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) return 1; if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) return 1; if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) { #ifdef CONFIG_X86_64 if ((vcpu->arch.efer & EFER_LME)) { int cs_db, cs_l; if (!is_pae(vcpu)) return 1; kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); if (cs_l) return 1; } else #endif if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu))) return 1; } if (!(cr0 & X86_CR0_PG) && kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE)) return 1; kvm_x86_ops->set_cr0(vcpu, cr0); if ((cr0 ^ old_cr0) & X86_CR0_PG) { kvm_clear_async_pf_completion_queue(vcpu); kvm_async_pf_hash_reset(vcpu); } if ((cr0 ^ old_cr0) & update_bits) kvm_mmu_reset_context(vcpu); return 0; } EXPORT_SYMBOL_GPL(kvm_set_cr0); void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw) { (void)kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0eul) | (msw & 0x0f)); } EXPORT_SYMBOL_GPL(kvm_lmsw); static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu) { if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) && !vcpu->guest_xcr0_loaded) { /* kvm_set_xcr() also depends on this */ xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0); vcpu->guest_xcr0_loaded = 1; } } static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu) { if (vcpu->guest_xcr0_loaded) { if (vcpu->arch.xcr0 != host_xcr0) xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0); vcpu->guest_xcr0_loaded = 0; } } int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) { u64 xcr0 = xcr; u64 old_xcr0 = vcpu->arch.xcr0; u64 valid_bits; /* Only support XCR_XFEATURE_ENABLED_MASK(xcr0) now */ if (index != XCR_XFEATURE_ENABLED_MASK) return 1; if (!(xcr0 & XSTATE_FP)) return 1; if ((xcr0 & XSTATE_YMM) && !(xcr0 & XSTATE_SSE)) return 1; /* * Do not allow the guest to set bits that we do not support * saving. However, xcr0 bit 0 is always set, even if the * emulated CPU does not support XSAVE (see fx_init). */ valid_bits = vcpu->arch.guest_supported_xcr0 | XSTATE_FP; if (xcr0 & ~valid_bits) return 1; if ((!(xcr0 & XSTATE_BNDREGS)) != (!(xcr0 & XSTATE_BNDCSR))) return 1; kvm_put_guest_xcr0(vcpu); vcpu->arch.xcr0 = xcr0; if ((xcr0 ^ old_xcr0) & XSTATE_EXTEND_MASK) kvm_update_cpuid(vcpu); return 0; } int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) { if (kvm_x86_ops->get_cpl(vcpu) != 0 || __kvm_set_xcr(vcpu, index, xcr)) { kvm_inject_gp(vcpu, 0); return 1; } return 0; } EXPORT_SYMBOL_GPL(kvm_set_xcr); int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) { unsigned long old_cr4 = kvm_read_cr4(vcpu); unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_SMEP; if (cr4 & CR4_RESERVED_BITS) return 1; if (!guest_cpuid_has_xsave(vcpu) && (cr4 & X86_CR4_OSXSAVE)) return 1; if (!guest_cpuid_has_smep(vcpu) && (cr4 & X86_CR4_SMEP)) return 1; if (!guest_cpuid_has_smap(vcpu) && (cr4 & X86_CR4_SMAP)) return 1; if (!guest_cpuid_has_fsgsbase(vcpu) && (cr4 & X86_CR4_FSGSBASE)) return 1; if (is_long_mode(vcpu)) { if (!(cr4 & X86_CR4_PAE)) return 1; } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE) && ((cr4 ^ old_cr4) & pdptr_bits) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu))) return 1; if ((cr4 & X86_CR4_PCIDE) && !(old_cr4 & X86_CR4_PCIDE)) { if (!guest_cpuid_has_pcid(vcpu)) return 1; /* PCID can not be enabled when cr3[11:0]!=000H or EFER.LMA=0 */ if ((kvm_read_cr3(vcpu) & X86_CR3_PCID_MASK) || !is_long_mode(vcpu)) return 1; } if (kvm_x86_ops->set_cr4(vcpu, cr4)) return 1; if (((cr4 ^ old_cr4) & pdptr_bits) || (!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE))) kvm_mmu_reset_context(vcpu); if ((cr4 ^ old_cr4) & X86_CR4_SMAP) update_permission_bitmask(vcpu, vcpu->arch.walk_mmu, false); if ((cr4 ^ old_cr4) & X86_CR4_OSXSAVE) kvm_update_cpuid(vcpu); return 0; } EXPORT_SYMBOL_GPL(kvm_set_cr4); int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) { if (cr3 == kvm_read_cr3(vcpu) && !pdptrs_changed(vcpu)) { kvm_mmu_sync_roots(vcpu); kvm_mmu_flush_tlb(vcpu); return 0; } if (is_long_mode(vcpu)) { if (cr3 & CR3_L_MODE_RESERVED_BITS) return 1; } else if (is_pae(vcpu) && is_paging(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)) return 1; vcpu->arch.cr3 = cr3; __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail); kvm_mmu_new_cr3(vcpu); return 0; } EXPORT_SYMBOL_GPL(kvm_set_cr3); int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8) { if (cr8 & CR8_RESERVED_BITS) return 1; if (irqchip_in_kernel(vcpu->kvm)) kvm_lapic_set_tpr(vcpu, cr8); else vcpu->arch.cr8 = cr8; return 0; } EXPORT_SYMBOL_GPL(kvm_set_cr8); unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu) { if (irqchip_in_kernel(vcpu->kvm)) return kvm_lapic_get_cr8(vcpu); else return vcpu->arch.cr8; } EXPORT_SYMBOL_GPL(kvm_get_cr8); static void kvm_update_dr6(struct kvm_vcpu *vcpu) { if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) kvm_x86_ops->set_dr6(vcpu, vcpu->arch.dr6); } static void kvm_update_dr7(struct kvm_vcpu *vcpu) { unsigned long dr7; if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) dr7 = vcpu->arch.guest_debug_dr7; else dr7 = vcpu->arch.dr7; kvm_x86_ops->set_dr7(vcpu, dr7); vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_BP_ENABLED; if (dr7 & DR7_BP_EN_MASK) vcpu->arch.switch_db_regs |= KVM_DEBUGREG_BP_ENABLED; } static u64 kvm_dr6_fixed(struct kvm_vcpu *vcpu) { u64 fixed = DR6_FIXED_1; if (!guest_cpuid_has_rtm(vcpu)) fixed |= DR6_RTM; return fixed; } static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val) { switch (dr) { case 0 ... 3: vcpu->arch.db[dr] = val; if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) vcpu->arch.eff_db[dr] = val; break; case 4: if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) return 1; /* #UD */ /* fall through */ case 6: if (val & 0xffffffff00000000ULL) return -1; /* #GP */ vcpu->arch.dr6 = (val & DR6_VOLATILE) | kvm_dr6_fixed(vcpu); kvm_update_dr6(vcpu); break; case 5: if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) return 1; /* #UD */ /* fall through */ default: /* 7 */ if (val & 0xffffffff00000000ULL) return -1; /* #GP */ vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1; kvm_update_dr7(vcpu); break; } return 0; } int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val) { int res; res = __kvm_set_dr(vcpu, dr, val); if (res > 0) kvm_queue_exception(vcpu, UD_VECTOR); else if (res < 0) kvm_inject_gp(vcpu, 0); return res; } EXPORT_SYMBOL_GPL(kvm_set_dr); static int _kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val) { switch (dr) { case 0 ... 3: *val = vcpu->arch.db[dr]; break; case 4: if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) return 1; /* fall through */ case 6: if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) *val = vcpu->arch.dr6; else *val = kvm_x86_ops->get_dr6(vcpu); break; case 5: if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) return 1; /* fall through */ default: /* 7 */ *val = vcpu->arch.dr7; break; } return 0; } int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val) { if (_kvm_get_dr(vcpu, dr, val)) { kvm_queue_exception(vcpu, UD_VECTOR); return 1; } return 0; } EXPORT_SYMBOL_GPL(kvm_get_dr); bool kvm_rdpmc(struct kvm_vcpu *vcpu) { u32 ecx = kvm_register_read(vcpu, VCPU_REGS_RCX); u64 data; int err; err = kvm_pmu_read_pmc(vcpu, ecx, &data); if (err) return err; kvm_register_write(vcpu, VCPU_REGS_RAX, (u32)data); kvm_register_write(vcpu, VCPU_REGS_RDX, data >> 32); return err; } EXPORT_SYMBOL_GPL(kvm_rdpmc); /* * List of msr numbers which we expose to userspace through KVM_GET_MSRS * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST. * * This list is modified at module load time to reflect the * capabilities of the host cpu. This capabilities test skips MSRs that are * kvm-specific. Those are put in the beginning of the list. */ #define KVM_SAVE_MSRS_BEGIN 12 static u32 msrs_to_save[] = { MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK, MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW, HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL, HV_X64_MSR_TIME_REF_COUNT, HV_X64_MSR_REFERENCE_TSC, HV_X64_MSR_APIC_ASSIST_PAGE, MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME, MSR_KVM_PV_EOI_EN, MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP, MSR_STAR, #ifdef CONFIG_X86_64 MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR, #endif MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA, MSR_IA32_FEATURE_CONTROL, MSR_IA32_BNDCFGS }; static unsigned num_msrs_to_save; static const u32 emulated_msrs[] = { MSR_IA32_TSC_ADJUST, MSR_IA32_TSCDEADLINE, MSR_IA32_MISC_ENABLE, MSR_IA32_MCG_STATUS, MSR_IA32_MCG_CTL, }; bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer) { if (efer & efer_reserved_bits) return false; if (efer & EFER_FFXSR) { struct kvm_cpuid_entry2 *feat; feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT))) return false; } if (efer & EFER_SVME) { struct kvm_cpuid_entry2 *feat; feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM))) return false; } return true; } EXPORT_SYMBOL_GPL(kvm_valid_efer); static int set_efer(struct kvm_vcpu *vcpu, u64 efer) { u64 old_efer = vcpu->arch.efer; if (!kvm_valid_efer(vcpu, efer)) return 1; if (is_paging(vcpu) && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) return 1; efer &= ~EFER_LMA; efer |= vcpu->arch.efer & EFER_LMA; kvm_x86_ops->set_efer(vcpu, efer); /* Update reserved bits */ if ((efer ^ old_efer) & EFER_NX) kvm_mmu_reset_context(vcpu); return 0; } void kvm_enable_efer_bits(u64 mask) { efer_reserved_bits &= ~mask; } EXPORT_SYMBOL_GPL(kvm_enable_efer_bits); /* * Writes msr value into into the appropriate "register". * Returns 0 on success, non-0 otherwise. * Assumes vcpu_load() was already called. */ int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) { return kvm_x86_ops->set_msr(vcpu, msr); } /* * Adapt set_msr() to msr_io()'s calling convention */ static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data) { struct msr_data msr; msr.data = *data; msr.index = index; msr.host_initiated = true; return kvm_set_msr(vcpu, &msr); } #ifdef CONFIG_X86_64 struct pvclock_gtod_data { seqcount_t seq; struct { /* extract of a clocksource struct */ int vclock_mode; cycle_t cycle_last; cycle_t mask; u32 mult; u32 shift; } clock; u64 boot_ns; u64 nsec_base; }; static struct pvclock_gtod_data pvclock_gtod_data; static void update_pvclock_gtod(struct timekeeper *tk) { struct pvclock_gtod_data *vdata = &pvclock_gtod_data; u64 boot_ns; boot_ns = ktime_to_ns(ktime_add(tk->tkr.base_mono, tk->offs_boot)); write_seqcount_begin(&vdata->seq); /* copy pvclock gtod data */ vdata->clock.vclock_mode = tk->tkr.clock->archdata.vclock_mode; vdata->clock.cycle_last = tk->tkr.cycle_last; vdata->clock.mask = tk->tkr.mask; vdata->clock.mult = tk->tkr.mult; vdata->clock.shift = tk->tkr.shift; vdata->boot_ns = boot_ns; vdata->nsec_base = tk->tkr.xtime_nsec; write_seqcount_end(&vdata->seq); } #endif static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock) { int version; int r; struct pvclock_wall_clock wc; struct timespec boot; if (!wall_clock) return; r = kvm_read_guest(kvm, wall_clock, &version, sizeof(version)); if (r) return; if (version & 1) ++version; /* first time write, random junk */ ++version; kvm_write_guest(kvm, wall_clock, &version, sizeof(version)); /* * The guest calculates current wall clock time by adding * system time (updated by kvm_guest_time_update below) to the * wall clock specified here. guest system time equals host * system time for us, thus we must fill in host boot time here. */ getboottime(&boot); if (kvm->arch.kvmclock_offset) { struct timespec ts = ns_to_timespec(kvm->arch.kvmclock_offset); boot = timespec_sub(boot, ts); } wc.sec = boot.tv_sec; wc.nsec = boot.tv_nsec; wc.version = version; kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc)); version++; kvm_write_guest(kvm, wall_clock, &version, sizeof(version)); } static uint32_t div_frac(uint32_t dividend, uint32_t divisor) { uint32_t quotient, remainder; /* Don't try to replace with do_div(), this one calculates * "(dividend << 32) / divisor" */ __asm__ ( "divl %4" : "=a" (quotient), "=d" (remainder) : "0" (0), "1" (dividend), "r" (divisor) ); return quotient; } static void kvm_get_time_scale(uint32_t scaled_khz, uint32_t base_khz, s8 *pshift, u32 *pmultiplier) { uint64_t scaled64; int32_t shift = 0; uint64_t tps64; uint32_t tps32; tps64 = base_khz * 1000LL; scaled64 = scaled_khz * 1000LL; while (tps64 > scaled64*2 || tps64 & 0xffffffff00000000ULL) { tps64 >>= 1; shift--; } tps32 = (uint32_t)tps64; while (tps32 <= scaled64 || scaled64 & 0xffffffff00000000ULL) { if (scaled64 & 0xffffffff00000000ULL || tps32 & 0x80000000) scaled64 >>= 1; else tps32 <<= 1; shift++; } *pshift = shift; *pmultiplier = div_frac(scaled64, tps32); pr_debug("%s: base_khz %u => %u, shift %d, mul %u\n", __func__, base_khz, scaled_khz, shift, *pmultiplier); } static inline u64 get_kernel_ns(void) { return ktime_get_boot_ns(); } #ifdef CONFIG_X86_64 static atomic_t kvm_guest_has_master_clock = ATOMIC_INIT(0); #endif static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz); unsigned long max_tsc_khz; static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec) { return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult, vcpu->arch.virtual_tsc_shift); } static u32 adjust_tsc_khz(u32 khz, s32 ppm) { u64 v = (u64)khz * (1000000 + ppm); do_div(v, 1000000); return v; } static void kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 this_tsc_khz) { u32 thresh_lo, thresh_hi; int use_scaling = 0; /* tsc_khz can be zero if TSC calibration fails */ if (this_tsc_khz == 0) return; /* Compute a scale to convert nanoseconds in TSC cycles */ kvm_get_time_scale(this_tsc_khz, NSEC_PER_SEC / 1000, &vcpu->arch.virtual_tsc_shift, &vcpu->arch.virtual_tsc_mult); vcpu->arch.virtual_tsc_khz = this_tsc_khz; /* * Compute the variation in TSC rate which is acceptable * within the range of tolerance and decide if the * rate being applied is within that bounds of the hardware * rate. If so, no scaling or compensation need be done. */ thresh_lo = adjust_tsc_khz(tsc_khz, -tsc_tolerance_ppm); thresh_hi = adjust_tsc_khz(tsc_khz, tsc_tolerance_ppm); if (this_tsc_khz < thresh_lo || this_tsc_khz > thresh_hi) { pr_debug("kvm: requested TSC rate %u falls outside tolerance [%u,%u]\n", this_tsc_khz, thresh_lo, thresh_hi); use_scaling = 1; } kvm_x86_ops->set_tsc_khz(vcpu, this_tsc_khz, use_scaling); } static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns) { u64 tsc = pvclock_scale_delta(kernel_ns-vcpu->arch.this_tsc_nsec, vcpu->arch.virtual_tsc_mult, vcpu->arch.virtual_tsc_shift); tsc += vcpu->arch.this_tsc_write; return tsc; } void kvm_track_tsc_matching(struct kvm_vcpu *vcpu) { #ifdef CONFIG_X86_64 bool vcpus_matched; bool do_request = false; struct kvm_arch *ka = &vcpu->kvm->arch; struct pvclock_gtod_data *gtod = &pvclock_gtod_data; vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 == atomic_read(&vcpu->kvm->online_vcpus)); if (vcpus_matched && gtod->clock.vclock_mode == VCLOCK_TSC) if (!ka->use_master_clock) do_request = 1; if (!vcpus_matched && ka->use_master_clock) do_request = 1; if (do_request) kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); trace_kvm_track_tsc(vcpu->vcpu_id, ka->nr_vcpus_matched_tsc, atomic_read(&vcpu->kvm->online_vcpus), ka->use_master_clock, gtod->clock.vclock_mode); #endif } static void update_ia32_tsc_adjust_msr(struct kvm_vcpu *vcpu, s64 offset) { u64 curr_offset = kvm_x86_ops->read_tsc_offset(vcpu); vcpu->arch.ia32_tsc_adjust_msr += offset - curr_offset; } void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr) { struct kvm *kvm = vcpu->kvm; u64 offset, ns, elapsed; unsigned long flags; s64 usdiff; bool matched; bool already_matched; u64 data = msr->data; raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags); offset = kvm_x86_ops->compute_tsc_offset(vcpu, data); ns = get_kernel_ns(); elapsed = ns - kvm->arch.last_tsc_nsec; if (vcpu->arch.virtual_tsc_khz) { int faulted = 0; /* n.b - signed multiplication and division required */ usdiff = data - kvm->arch.last_tsc_write; #ifdef CONFIG_X86_64 usdiff = (usdiff * 1000) / vcpu->arch.virtual_tsc_khz; #else /* do_div() only does unsigned */ asm("1: idivl %[divisor]\n" "2: xor %%edx, %%edx\n" " movl $0, %[faulted]\n" "3:\n" ".section .fixup,\"ax\"\n" "4: movl $1, %[faulted]\n" " jmp 3b\n" ".previous\n" _ASM_EXTABLE(1b, 4b) : "=A"(usdiff), [faulted] "=r" (faulted) : "A"(usdiff * 1000), [divisor] "rm"(vcpu->arch.virtual_tsc_khz)); #endif do_div(elapsed, 1000); usdiff -= elapsed; if (usdiff < 0) usdiff = -usdiff; /* idivl overflow => difference is larger than USEC_PER_SEC */ if (faulted) usdiff = USEC_PER_SEC; } else usdiff = USEC_PER_SEC; /* disable TSC match window below */ /* * Special case: TSC write with a small delta (1 second) of virtual * cycle time against real time is interpreted as an attempt to * synchronize the CPU. * * For a reliable TSC, we can match TSC offsets, and for an unstable * TSC, we add elapsed time in this computation. We could let the * compensation code attempt to catch up if we fall behind, but * it's better to try to match offsets from the beginning. */ if (usdiff < USEC_PER_SEC && vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) { if (!check_tsc_unstable()) { offset = kvm->arch.cur_tsc_offset; pr_debug("kvm: matched tsc offset for %llu\n", data); } else { u64 delta = nsec_to_cycles(vcpu, elapsed); data += delta; offset = kvm_x86_ops->compute_tsc_offset(vcpu, data); pr_debug("kvm: adjusted tsc offset by %llu\n", delta); } matched = true; already_matched = (vcpu->arch.this_tsc_generation == kvm->arch.cur_tsc_generation); } else { /* * We split periods of matched TSC writes into generations. * For each generation, we track the original measured * nanosecond time, offset, and write, so if TSCs are in * sync, we can match exact offset, and if not, we can match * exact software computation in compute_guest_tsc() * * These values are tracked in kvm->arch.cur_xxx variables. */ kvm->arch.cur_tsc_generation++; kvm->arch.cur_tsc_nsec = ns; kvm->arch.cur_tsc_write = data; kvm->arch.cur_tsc_offset = offset; matched = false; pr_debug("kvm: new tsc generation %llu, clock %llu\n", kvm->arch.cur_tsc_generation, data); } /* * We also track th most recent recorded KHZ, write and time to * allow the matching interval to be extended at each write. */ kvm->arch.last_tsc_nsec = ns; kvm->arch.last_tsc_write = data; kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz; vcpu->arch.last_guest_tsc = data; /* Keep track of which generation this VCPU has synchronized to */ vcpu->arch.this_tsc_generation = kvm->arch.cur_tsc_generation; vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec; vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write; if (guest_cpuid_has_tsc_adjust(vcpu) && !msr->host_initiated) update_ia32_tsc_adjust_msr(vcpu, offset); kvm_x86_ops->write_tsc_offset(vcpu, offset); raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags); spin_lock(&kvm->arch.pvclock_gtod_sync_lock); if (!matched) { kvm->arch.nr_vcpus_matched_tsc = 0; } else if (!already_matched) { kvm->arch.nr_vcpus_matched_tsc++; } kvm_track_tsc_matching(vcpu); spin_unlock(&kvm->arch.pvclock_gtod_sync_lock); } EXPORT_SYMBOL_GPL(kvm_write_tsc); #ifdef CONFIG_X86_64 static cycle_t read_tsc(void) { cycle_t ret; u64 last; /* * Empirically, a fence (of type that depends on the CPU) * before rdtsc is enough to ensure that rdtsc is ordered * with respect to loads. The various CPU manuals are unclear * as to whether rdtsc can be reordered with later loads, * but no one has ever seen it happen. */ rdtsc_barrier(); ret = (cycle_t)vget_cycles(); last = pvclock_gtod_data.clock.cycle_last; if (likely(ret >= last)) return ret; /* * GCC likes to generate cmov here, but this branch is extremely * predictable (it's just a funciton of time and the likely is * very likely) and there's a data dependence, so force GCC * to generate a branch instead. I don't barrier() because * we don't actually need a barrier, and if this function * ever gets inlined it will generate worse code. */ asm volatile (""); return last; } static inline u64 vgettsc(cycle_t *cycle_now) { long v; struct pvclock_gtod_data *gtod = &pvclock_gtod_data; *cycle_now = read_tsc(); v = (*cycle_now - gtod->clock.cycle_last) & gtod->clock.mask; return v * gtod->clock.mult; } static int do_monotonic_boot(s64 *t, cycle_t *cycle_now) { struct pvclock_gtod_data *gtod = &pvclock_gtod_data; unsigned long seq; int mode; u64 ns; do { seq = read_seqcount_begin(&gtod->seq); mode = gtod->clock.vclock_mode; ns = gtod->nsec_base; ns += vgettsc(cycle_now); ns >>= gtod->clock.shift; ns += gtod->boot_ns; } while (unlikely(read_seqcount_retry(&gtod->seq, seq))); *t = ns; return mode; } /* returns true if host is using tsc clocksource */ static bool kvm_get_time_and_clockread(s64 *kernel_ns, cycle_t *cycle_now) { /* checked again under seqlock below */ if (pvclock_gtod_data.clock.vclock_mode != VCLOCK_TSC) return false; return do_monotonic_boot(kernel_ns, cycle_now) == VCLOCK_TSC; } #endif /* * * Assuming a stable TSC across physical CPUS, and a stable TSC * across virtual CPUs, the following condition is possible. * Each numbered line represents an event visible to both * CPUs at the next numbered event. * * "timespecX" represents host monotonic time. "tscX" represents * RDTSC value. * * VCPU0 on CPU0 | VCPU1 on CPU1 * * 1. read timespec0,tsc0 * 2. | timespec1 = timespec0 + N * | tsc1 = tsc0 + M * 3. transition to guest | transition to guest * 4. ret0 = timespec0 + (rdtsc - tsc0) | * 5. | ret1 = timespec1 + (rdtsc - tsc1) * | ret1 = timespec0 + N + (rdtsc - (tsc0 + M)) * * Since ret0 update is visible to VCPU1 at time 5, to obey monotonicity: * * - ret0 < ret1 * - timespec0 + (rdtsc - tsc0) < timespec0 + N + (rdtsc - (tsc0 + M)) * ... * - 0 < N - M => M < N * * That is, when timespec0 != timespec1, M < N. Unfortunately that is not * always the case (the difference between two distinct xtime instances * might be smaller then the difference between corresponding TSC reads, * when updating guest vcpus pvclock areas). * * To avoid that problem, do not allow visibility of distinct * system_timestamp/tsc_timestamp values simultaneously: use a master * copy of host monotonic time values. Update that master copy * in lockstep. * * Rely on synchronization of host TSCs and guest TSCs for monotonicity. * */ static void pvclock_update_vm_gtod_copy(struct kvm *kvm) { #ifdef CONFIG_X86_64 struct kvm_arch *ka = &kvm->arch; int vclock_mode; bool host_tsc_clocksource, vcpus_matched; vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 == atomic_read(&kvm->online_vcpus)); /* * If the host uses TSC clock, then passthrough TSC as stable * to the guest. */ host_tsc_clocksource = kvm_get_time_and_clockread( &ka->master_kernel_ns, &ka->master_cycle_now); ka->use_master_clock = host_tsc_clocksource && vcpus_matched && !backwards_tsc_observed; if (ka->use_master_clock) atomic_set(&kvm_guest_has_master_clock, 1); vclock_mode = pvclock_gtod_data.clock.vclock_mode; trace_kvm_update_master_clock(ka->use_master_clock, vclock_mode, vcpus_matched); #endif } static void kvm_gen_update_masterclock(struct kvm *kvm) { #ifdef CONFIG_X86_64 int i; struct kvm_vcpu *vcpu; struct kvm_arch *ka = &kvm->arch; spin_lock(&ka->pvclock_gtod_sync_lock); kvm_make_mclock_inprogress_request(kvm); /* no guest entries from this point */ pvclock_update_vm_gtod_copy(kvm); kvm_for_each_vcpu(i, vcpu, kvm) kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); /* guest entries allowed */ kvm_for_each_vcpu(i, vcpu, kvm) clear_bit(KVM_REQ_MCLOCK_INPROGRESS, &vcpu->requests); spin_unlock(&ka->pvclock_gtod_sync_lock); #endif } static int kvm_guest_time_update(struct kvm_vcpu *v) { unsigned long flags, this_tsc_khz; struct kvm_vcpu_arch *vcpu = &v->arch; struct kvm_arch *ka = &v->kvm->arch; s64 kernel_ns; u64 tsc_timestamp, host_tsc; struct pvclock_vcpu_time_info guest_hv_clock; u8 pvclock_flags; bool use_master_clock; kernel_ns = 0; host_tsc = 0; /* * If the host uses TSC clock, then passthrough TSC as stable * to the guest. */ spin_lock(&ka->pvclock_gtod_sync_lock); use_master_clock = ka->use_master_clock; if (use_master_clock) { host_tsc = ka->master_cycle_now; kernel_ns = ka->master_kernel_ns; } spin_unlock(&ka->pvclock_gtod_sync_lock); /* Keep irq disabled to prevent changes to the clock */ local_irq_save(flags); this_tsc_khz = __get_cpu_var(cpu_tsc_khz); if (unlikely(this_tsc_khz == 0)) { local_irq_restore(flags); kvm_make_request(KVM_REQ_CLOCK_UPDATE, v); return 1; } if (!use_master_clock) { host_tsc = native_read_tsc(); kernel_ns = get_kernel_ns(); } tsc_timestamp = kvm_x86_ops->read_l1_tsc(v, host_tsc); /* * We may have to catch up the TSC to match elapsed wall clock * time for two reasons, even if kvmclock is used. * 1) CPU could have been running below the maximum TSC rate * 2) Broken TSC compensation resets the base at each VCPU * entry to avoid unknown leaps of TSC even when running * again on the same CPU. This may cause apparent elapsed * time to disappear, and the guest to stand still or run * very slowly. */ if (vcpu->tsc_catchup) { u64 tsc = compute_guest_tsc(v, kernel_ns); if (tsc > tsc_timestamp) { adjust_tsc_offset_guest(v, tsc - tsc_timestamp); tsc_timestamp = tsc; } } local_irq_restore(flags); if (!vcpu->pv_time_enabled) return 0; if (unlikely(vcpu->hw_tsc_khz != this_tsc_khz)) { kvm_get_time_scale(NSEC_PER_SEC / 1000, this_tsc_khz, &vcpu->hv_clock.tsc_shift, &vcpu->hv_clock.tsc_to_system_mul); vcpu->hw_tsc_khz = this_tsc_khz; } /* With all the info we got, fill in the values */ vcpu->hv_clock.tsc_timestamp = tsc_timestamp; vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset; vcpu->last_guest_tsc = tsc_timestamp; /* * The interface expects us to write an even number signaling that the * update is finished. Since the guest won't see the intermediate * state, we just increase by 2 at the end. */ vcpu->hv_clock.version += 2; if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time, &guest_hv_clock, sizeof(guest_hv_clock)))) return 0; /* retain PVCLOCK_GUEST_STOPPED if set in guest copy */ pvclock_flags = (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED); if (vcpu->pvclock_set_guest_stopped_request) { pvclock_flags |= PVCLOCK_GUEST_STOPPED; vcpu->pvclock_set_guest_stopped_request = false; } /* If the host uses TSC clocksource, then it is stable */ if (use_master_clock) pvclock_flags |= PVCLOCK_TSC_STABLE_BIT; vcpu->hv_clock.flags = pvclock_flags; kvm_write_guest_cached(v->kvm, &vcpu->pv_time, &vcpu->hv_clock, sizeof(vcpu->hv_clock)); return 0; } /* * kvmclock updates which are isolated to a given vcpu, such as * vcpu->cpu migration, should not allow system_timestamp from * the rest of the vcpus to remain static. Otherwise ntp frequency * correction applies to one vcpu's system_timestamp but not * the others. * * So in those cases, request a kvmclock update for all vcpus. * We need to rate-limit these requests though, as they can * considerably slow guests that have a large number of vcpus. * The time for a remote vcpu to update its kvmclock is bound * by the delay we use to rate-limit the updates. */ #define KVMCLOCK_UPDATE_DELAY msecs_to_jiffies(100) static void kvmclock_update_fn(struct work_struct *work) { int i; struct delayed_work *dwork = to_delayed_work(work); struct kvm_arch *ka = container_of(dwork, struct kvm_arch, kvmclock_update_work); struct kvm *kvm = container_of(ka, struct kvm, arch); struct kvm_vcpu *vcpu; kvm_for_each_vcpu(i, vcpu, kvm) { kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); kvm_vcpu_kick(vcpu); } } static void kvm_gen_kvmclock_update(struct kvm_vcpu *v) { struct kvm *kvm = v->kvm; kvm_make_request(KVM_REQ_CLOCK_UPDATE, v); schedule_delayed_work(&kvm->arch.kvmclock_update_work, KVMCLOCK_UPDATE_DELAY); } #define KVMCLOCK_SYNC_PERIOD (300 * HZ) static void kvmclock_sync_fn(struct work_struct *work) { struct delayed_work *dwork = to_delayed_work(work); struct kvm_arch *ka = container_of(dwork, struct kvm_arch, kvmclock_sync_work); struct kvm *kvm = container_of(ka, struct kvm, arch); schedule_delayed_work(&kvm->arch.kvmclock_update_work, 0); schedule_delayed_work(&kvm->arch.kvmclock_sync_work, KVMCLOCK_SYNC_PERIOD); } static bool msr_mtrr_valid(unsigned msr) { switch (msr) { case 0x200 ... 0x200 + 2 * KVM_NR_VAR_MTRR - 1: case MSR_MTRRfix64K_00000: case MSR_MTRRfix16K_80000: case MSR_MTRRfix16K_A0000: case MSR_MTRRfix4K_C0000: case MSR_MTRRfix4K_C8000: case MSR_MTRRfix4K_D0000: case MSR_MTRRfix4K_D8000: case MSR_MTRRfix4K_E0000: case MSR_MTRRfix4K_E8000: case MSR_MTRRfix4K_F0000: case MSR_MTRRfix4K_F8000: case MSR_MTRRdefType: case MSR_IA32_CR_PAT: return true; case 0x2f8: return true; } return false; } static bool valid_pat_type(unsigned t) { return t < 8 && (1 << t) & 0xf3; /* 0, 1, 4, 5, 6, 7 */ } static bool valid_mtrr_type(unsigned t) { return t < 8 && (1 << t) & 0x73; /* 0, 1, 4, 5, 6 */ } static bool mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data) { int i; u64 mask; if (!msr_mtrr_valid(msr)) return false; if (msr == MSR_IA32_CR_PAT) { for (i = 0; i < 8; i++) if (!valid_pat_type((data >> (i * 8)) & 0xff)) return false; return true; } else if (msr == MSR_MTRRdefType) { if (data & ~0xcff) return false; return valid_mtrr_type(data & 0xff); } else if (msr >= MSR_MTRRfix64K_00000 && msr <= MSR_MTRRfix4K_F8000) { for (i = 0; i < 8 ; i++) if (!valid_mtrr_type((data >> (i * 8)) & 0xff)) return false; return true; } /* variable MTRRs */ WARN_ON(!(msr >= 0x200 && msr < 0x200 + 2 * KVM_NR_VAR_MTRR)); mask = (~0ULL) << cpuid_maxphyaddr(vcpu); if ((msr & 1) == 0) { /* MTRR base */ if (!valid_mtrr_type(data & 0xff)) return false; mask |= 0xf00; } else /* MTRR mask */ mask |= 0x7ff; if (data & mask) { kvm_inject_gp(vcpu, 0); return false; } return true; } static int set_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 data) { u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges; if (!mtrr_valid(vcpu, msr, data)) return 1; if (msr == MSR_MTRRdefType) { vcpu->arch.mtrr_state.def_type = data; vcpu->arch.mtrr_state.enabled = (data & 0xc00) >> 10; } else if (msr == MSR_MTRRfix64K_00000) p[0] = data; else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000) p[1 + msr - MSR_MTRRfix16K_80000] = data; else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000) p[3 + msr - MSR_MTRRfix4K_C0000] = data; else if (msr == MSR_IA32_CR_PAT) vcpu->arch.pat = data; else { /* Variable MTRRs */ int idx, is_mtrr_mask; u64 *pt; idx = (msr - 0x200) / 2; is_mtrr_mask = msr - 0x200 - 2 * idx; if (!is_mtrr_mask) pt = (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo; else pt = (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo; *pt = data; } kvm_mmu_reset_context(vcpu); return 0; } static int set_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 data) { u64 mcg_cap = vcpu->arch.mcg_cap; unsigned bank_num = mcg_cap & 0xff; switch (msr) { case MSR_IA32_MCG_STATUS: vcpu->arch.mcg_status = data; break; case MSR_IA32_MCG_CTL: if (!(mcg_cap & MCG_CTL_P)) return 1; if (data != 0 && data != ~(u64)0) return -1; vcpu->arch.mcg_ctl = data; break; default: if (msr >= MSR_IA32_MC0_CTL && msr < MSR_IA32_MC0_CTL + 4 * bank_num) { u32 offset = msr - MSR_IA32_MC0_CTL; /* only 0 or all 1s can be written to IA32_MCi_CTL * some Linux kernels though clear bit 10 in bank 4 to * workaround a BIOS/GART TBL issue on AMD K8s, ignore * this to avoid an uncatched #GP in the guest */ if ((offset & 0x3) == 0 && data != 0 && (data | (1 << 10)) != ~(u64)0) return -1; vcpu->arch.mce_banks[offset] = data; break; } return 1; } return 0; } static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data) { struct kvm *kvm = vcpu->kvm; int lm = is_long_mode(vcpu); u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64 : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32; u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64 : kvm->arch.xen_hvm_config.blob_size_32; u32 page_num = data & ~PAGE_MASK; u64 page_addr = data & PAGE_MASK; u8 *page; int r; r = -E2BIG; if (page_num >= blob_size) goto out; r = -ENOMEM; page = memdup_user(blob_addr + (page_num * PAGE_SIZE), PAGE_SIZE); if (IS_ERR(page)) { r = PTR_ERR(page); goto out; } if (kvm_write_guest(kvm, page_addr, page, PAGE_SIZE)) goto out_free; r = 0; out_free: kfree(page); out: return r; } static bool kvm_hv_hypercall_enabled(struct kvm *kvm) { return kvm->arch.hv_hypercall & HV_X64_MSR_HYPERCALL_ENABLE; } static bool kvm_hv_msr_partition_wide(u32 msr) { bool r = false; switch (msr) { case HV_X64_MSR_GUEST_OS_ID: case HV_X64_MSR_HYPERCALL: case HV_X64_MSR_REFERENCE_TSC: case HV_X64_MSR_TIME_REF_COUNT: r = true; break; } return r; } static int set_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data) { struct kvm *kvm = vcpu->kvm; switch (msr) { case HV_X64_MSR_GUEST_OS_ID: kvm->arch.hv_guest_os_id = data; /* setting guest os id to zero disables hypercall page */ if (!kvm->arch.hv_guest_os_id) kvm->arch.hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE; break; case HV_X64_MSR_HYPERCALL: { u64 gfn; unsigned long addr; u8 instructions[4]; /* if guest os id is not set hypercall should remain disabled */ if (!kvm->arch.hv_guest_os_id) break; if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) { kvm->arch.hv_hypercall = data; break; } gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT; addr = gfn_to_hva(kvm, gfn); if (kvm_is_error_hva(addr)) return 1; kvm_x86_ops->patch_hypercall(vcpu, instructions); ((unsigned char *)instructions)[3] = 0xc3; /* ret */ if (__copy_to_user((void __user *)addr, instructions, 4)) return 1; kvm->arch.hv_hypercall = data; mark_page_dirty(kvm, gfn); break; } case HV_X64_MSR_REFERENCE_TSC: { u64 gfn; HV_REFERENCE_TSC_PAGE tsc_ref; memset(&tsc_ref, 0, sizeof(tsc_ref)); kvm->arch.hv_tsc_page = data; if (!(data & HV_X64_MSR_TSC_REFERENCE_ENABLE)) break; gfn = data >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT; if (kvm_write_guest(kvm, gfn << HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT, &tsc_ref, sizeof(tsc_ref))) return 1; mark_page_dirty(kvm, gfn); break; } default: vcpu_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x " "data 0x%llx\n", msr, data); return 1; } return 0; } static int set_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 data) { switch (msr) { case HV_X64_MSR_APIC_ASSIST_PAGE: { u64 gfn; unsigned long addr; if (!(data & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE)) { vcpu->arch.hv_vapic = data; if (kvm_lapic_enable_pv_eoi(vcpu, 0)) return 1; break; } gfn = data >> HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT; addr = gfn_to_hva(vcpu->kvm, gfn); if (kvm_is_error_hva(addr)) return 1; if (__clear_user((void __user *)addr, PAGE_SIZE)) return 1; vcpu->arch.hv_vapic = data; mark_page_dirty(vcpu->kvm, gfn); if (kvm_lapic_enable_pv_eoi(vcpu, gfn_to_gpa(gfn) | KVM_MSR_ENABLED)) return 1; break; } case HV_X64_MSR_EOI: return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data); case HV_X64_MSR_ICR: return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data); case HV_X64_MSR_TPR: return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data); default: vcpu_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x " "data 0x%llx\n", msr, data); return 1; } return 0; } static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data) { gpa_t gpa = data & ~0x3f; /* Bits 2:5 are reserved, Should be zero */ if (data & 0x3c) return 1; vcpu->arch.apf.msr_val = data; if (!(data & KVM_ASYNC_PF_ENABLED)) { kvm_clear_async_pf_completion_queue(vcpu); kvm_async_pf_hash_reset(vcpu); return 0; } if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa, sizeof(u32))) return 1; vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS); kvm_async_pf_wakeup_all(vcpu); return 0; } static void kvmclock_reset(struct kvm_vcpu *vcpu) { vcpu->arch.pv_time_enabled = false; } static void accumulate_steal_time(struct kvm_vcpu *vcpu) { u64 delta; if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) return; delta = current->sched_info.run_delay - vcpu->arch.st.last_steal; vcpu->arch.st.last_steal = current->sched_info.run_delay; vcpu->arch.st.accum_steal = delta; } static void record_steal_time(struct kvm_vcpu *vcpu) { if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) return; if (unlikely(kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.st.stime, &vcpu->arch.st.steal, sizeof(struct kvm_steal_time)))) return; vcpu->arch.st.steal.steal += vcpu->arch.st.accum_steal; vcpu->arch.st.steal.version += 2; vcpu->arch.st.accum_steal = 0; kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime, &vcpu->arch.st.steal, sizeof(struct kvm_steal_time)); } int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) { bool pr = false; u32 msr = msr_info->index; u64 data = msr_info->data; switch (msr) { case MSR_AMD64_NB_CFG: case MSR_IA32_UCODE_REV: case MSR_IA32_UCODE_WRITE: case MSR_VM_HSAVE_PA: case MSR_AMD64_PATCH_LOADER: case MSR_AMD64_BU_CFG2: break; case MSR_EFER: return set_efer(vcpu, data); case MSR_K7_HWCR: data &= ~(u64)0x40; /* ignore flush filter disable */ data &= ~(u64)0x100; /* ignore ignne emulation enable */ data &= ~(u64)0x8; /* ignore TLB cache disable */ data &= ~(u64)0x40000; /* ignore Mc status write enable */ if (data != 0) { vcpu_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n", data); return 1; } break; case MSR_FAM10H_MMIO_CONF_BASE: if (data != 0) { vcpu_unimpl(vcpu, "unimplemented MMIO_CONF_BASE wrmsr: " "0x%llx\n", data); return 1; } break; case MSR_IA32_DEBUGCTLMSR: if (!data) { /* We support the non-activated case already */ break; } else if (data & ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_BTF)) { /* Values other than LBR and BTF are vendor-specific, thus reserved and should throw a #GP */ return 1; } vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n", __func__, data); break; case 0x200 ... 0x2ff: return set_msr_mtrr(vcpu, msr, data); case MSR_IA32_APICBASE: return kvm_set_apic_base(vcpu, msr_info); case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff: return kvm_x2apic_msr_write(vcpu, msr, data); case MSR_IA32_TSCDEADLINE: kvm_set_lapic_tscdeadline_msr(vcpu, data); break; case MSR_IA32_TSC_ADJUST: if (guest_cpuid_has_tsc_adjust(vcpu)) { if (!msr_info->host_initiated) { u64 adj = data - vcpu->arch.ia32_tsc_adjust_msr; kvm_x86_ops->adjust_tsc_offset(vcpu, adj, true); } vcpu->arch.ia32_tsc_adjust_msr = data; } break; case MSR_IA32_MISC_ENABLE: vcpu->arch.ia32_misc_enable_msr = data; break; case MSR_KVM_WALL_CLOCK_NEW: case MSR_KVM_WALL_CLOCK: vcpu->kvm->arch.wall_clock = data; kvm_write_wall_clock(vcpu->kvm, data); break; case MSR_KVM_SYSTEM_TIME_NEW: case MSR_KVM_SYSTEM_TIME: { u64 gpa_offset; kvmclock_reset(vcpu); vcpu->arch.time = data; kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu); /* we verify if the enable bit is set... */ if (!(data & 1)) break; gpa_offset = data & ~(PAGE_MASK | 1); if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.pv_time, data & ~1ULL, sizeof(struct pvclock_vcpu_time_info))) vcpu->arch.pv_time_enabled = false; else vcpu->arch.pv_time_enabled = true; break; } case MSR_KVM_ASYNC_PF_EN: if (kvm_pv_enable_async_pf(vcpu, data)) return 1; break; case MSR_KVM_STEAL_TIME: if (unlikely(!sched_info_on())) return 1; if (data & KVM_STEAL_RESERVED_MASK) return 1; if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime, data & KVM_STEAL_VALID_BITS, sizeof(struct kvm_steal_time))) return 1; vcpu->arch.st.msr_val = data; if (!(data & KVM_MSR_ENABLED)) break; vcpu->arch.st.last_steal = current->sched_info.run_delay; preempt_disable(); accumulate_steal_time(vcpu); preempt_enable(); kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); break; case MSR_KVM_PV_EOI_EN: if (kvm_lapic_enable_pv_eoi(vcpu, data)) return 1; break; case MSR_IA32_MCG_CTL: case MSR_IA32_MCG_STATUS: case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1: return set_msr_mce(vcpu, msr, data); /* Performance counters are not protected by a CPUID bit, * so we should check all of them in the generic path for the sake of * cross vendor migration. * Writing a zero into the event select MSRs disables them, * which we perfectly emulate ;-). Any other value should be at least * reported, some guests depend on them. */ case MSR_K7_EVNTSEL0: case MSR_K7_EVNTSEL1: case MSR_K7_EVNTSEL2: case MSR_K7_EVNTSEL3: if (data != 0) vcpu_unimpl(vcpu, "unimplemented perfctr wrmsr: " "0x%x data 0x%llx\n", msr, data); break; /* at least RHEL 4 unconditionally writes to the perfctr registers, * so we ignore writes to make it happy. */ case MSR_K7_PERFCTR0: case MSR_K7_PERFCTR1: case MSR_K7_PERFCTR2: case MSR_K7_PERFCTR3: vcpu_unimpl(vcpu, "unimplemented perfctr wrmsr: " "0x%x data 0x%llx\n", msr, data); break; case MSR_P6_PERFCTR0: case MSR_P6_PERFCTR1: pr = true; case MSR_P6_EVNTSEL0: case MSR_P6_EVNTSEL1: if (kvm_pmu_msr(vcpu, msr)) return kvm_pmu_set_msr(vcpu, msr_info); if (pr || data != 0) vcpu_unimpl(vcpu, "disabled perfctr wrmsr: " "0x%x data 0x%llx\n", msr, data); break; case MSR_K7_CLK_CTL: /* * Ignore all writes to this no longer documented MSR. * Writes are only relevant for old K7 processors, * all pre-dating SVM, but a recommended workaround from * AMD for these chips. It is possible to specify the * affected processor models on the command line, hence * the need to ignore the workaround. */ break; case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15: if (kvm_hv_msr_partition_wide(msr)) { int r; mutex_lock(&vcpu->kvm->lock); r = set_msr_hyperv_pw(vcpu, msr, data); mutex_unlock(&vcpu->kvm->lock); return r; } else return set_msr_hyperv(vcpu, msr, data); break; case MSR_IA32_BBL_CR_CTL3: /* Drop writes to this legacy MSR -- see rdmsr * counterpart for further detail. */ vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n", msr, data); break; case MSR_AMD64_OSVW_ID_LENGTH: if (!guest_cpuid_has_osvw(vcpu)) return 1; vcpu->arch.osvw.length = data; break; case MSR_AMD64_OSVW_STATUS: if (!guest_cpuid_has_osvw(vcpu)) return 1; vcpu->arch.osvw.status = data; break; default: if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr)) return xen_hvm_config(vcpu, data); if (kvm_pmu_msr(vcpu, msr)) return kvm_pmu_set_msr(vcpu, msr_info); if (!ignore_msrs) { vcpu_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n", msr, data); return 1; } else { vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n", msr, data); break; } } return 0; } EXPORT_SYMBOL_GPL(kvm_set_msr_common); /* * Reads an msr value (of 'msr_index') into 'pdata'. * Returns 0 on success, non-0 otherwise. * Assumes vcpu_load() was already called. */ int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) { return kvm_x86_ops->get_msr(vcpu, msr_index, pdata); } static int get_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) { u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges; if (!msr_mtrr_valid(msr)) return 1; if (msr == MSR_MTRRdefType) *pdata = vcpu->arch.mtrr_state.def_type + (vcpu->arch.mtrr_state.enabled << 10); else if (msr == MSR_MTRRfix64K_00000) *pdata = p[0]; else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000) *pdata = p[1 + msr - MSR_MTRRfix16K_80000]; else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000) *pdata = p[3 + msr - MSR_MTRRfix4K_C0000]; else if (msr == MSR_IA32_CR_PAT) *pdata = vcpu->arch.pat; else { /* Variable MTRRs */ int idx, is_mtrr_mask; u64 *pt; idx = (msr - 0x200) / 2; is_mtrr_mask = msr - 0x200 - 2 * idx; if (!is_mtrr_mask) pt = (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo; else pt = (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo; *pdata = *pt; } return 0; } static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) { u64 data; u64 mcg_cap = vcpu->arch.mcg_cap; unsigned bank_num = mcg_cap & 0xff; switch (msr) { case MSR_IA32_P5_MC_ADDR: case MSR_IA32_P5_MC_TYPE: data = 0; break; case MSR_IA32_MCG_CAP: data = vcpu->arch.mcg_cap; break; case MSR_IA32_MCG_CTL: if (!(mcg_cap & MCG_CTL_P)) return 1; data = vcpu->arch.mcg_ctl; break; case MSR_IA32_MCG_STATUS: data = vcpu->arch.mcg_status; break; default: if (msr >= MSR_IA32_MC0_CTL && msr < MSR_IA32_MC0_CTL + 4 * bank_num) { u32 offset = msr - MSR_IA32_MC0_CTL; data = vcpu->arch.mce_banks[offset]; break; } return 1; } *pdata = data; return 0; } static int get_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) { u64 data = 0; struct kvm *kvm = vcpu->kvm; switch (msr) { case HV_X64_MSR_GUEST_OS_ID: data = kvm->arch.hv_guest_os_id; break; case HV_X64_MSR_HYPERCALL: data = kvm->arch.hv_hypercall; break; case HV_X64_MSR_TIME_REF_COUNT: { data = div_u64(get_kernel_ns() + kvm->arch.kvmclock_offset, 100); break; } case HV_X64_MSR_REFERENCE_TSC: data = kvm->arch.hv_tsc_page; break; default: vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr); return 1; } *pdata = data; return 0; } static int get_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) { u64 data = 0; switch (msr) { case HV_X64_MSR_VP_INDEX: { int r; struct kvm_vcpu *v; kvm_for_each_vcpu(r, v, vcpu->kvm) { if (v == vcpu) { data = r; break; } } break; } case HV_X64_MSR_EOI: return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata); case HV_X64_MSR_ICR: return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata); case HV_X64_MSR_TPR: return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata); case HV_X64_MSR_APIC_ASSIST_PAGE: data = vcpu->arch.hv_vapic; break; default: vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr); return 1; } *pdata = data; return 0; } int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) { u64 data; switch (msr) { case MSR_IA32_PLATFORM_ID: case MSR_IA32_EBL_CR_POWERON: case MSR_IA32_DEBUGCTLMSR: case MSR_IA32_LASTBRANCHFROMIP: case MSR_IA32_LASTBRANCHTOIP: case MSR_IA32_LASTINTFROMIP: case MSR_IA32_LASTINTTOIP: case MSR_K8_SYSCFG: case MSR_K7_HWCR: case MSR_VM_HSAVE_PA: case MSR_K7_EVNTSEL0: case MSR_K7_EVNTSEL1: case MSR_K7_EVNTSEL2: case MSR_K7_EVNTSEL3: case MSR_K7_PERFCTR0: case MSR_K7_PERFCTR1: case MSR_K7_PERFCTR2: case MSR_K7_PERFCTR3: case MSR_K8_INT_PENDING_MSG: case MSR_AMD64_NB_CFG: case MSR_FAM10H_MMIO_CONF_BASE: case MSR_AMD64_BU_CFG2: data = 0; break; case MSR_P6_PERFCTR0: case MSR_P6_PERFCTR1: case MSR_P6_EVNTSEL0: case MSR_P6_EVNTSEL1: if (kvm_pmu_msr(vcpu, msr)) return kvm_pmu_get_msr(vcpu, msr, pdata); data = 0; break; case MSR_IA32_UCODE_REV: data = 0x100000000ULL; break; case MSR_MTRRcap: data = 0x500 | KVM_NR_VAR_MTRR; break; case 0x200 ... 0x2ff: return get_msr_mtrr(vcpu, msr, pdata); case 0xcd: /* fsb frequency */ data = 3; break; /* * MSR_EBC_FREQUENCY_ID * Conservative value valid for even the basic CPU models. * Models 0,1: 000 in bits 23:21 indicating a bus speed of * 100MHz, model 2 000 in bits 18:16 indicating 100MHz, * and 266MHz for model 3, or 4. Set Core Clock * Frequency to System Bus Frequency Ratio to 1 (bits * 31:24) even though these are only valid for CPU * models > 2, however guests may end up dividing or * multiplying by zero otherwise. */ case MSR_EBC_FREQUENCY_ID: data = 1 << 24; break; case MSR_IA32_APICBASE: data = kvm_get_apic_base(vcpu); break; case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff: return kvm_x2apic_msr_read(vcpu, msr, pdata); break; case MSR_IA32_TSCDEADLINE: data = kvm_get_lapic_tscdeadline_msr(vcpu); break; case MSR_IA32_TSC_ADJUST: data = (u64)vcpu->arch.ia32_tsc_adjust_msr; break; case MSR_IA32_MISC_ENABLE: data = vcpu->arch.ia32_misc_enable_msr; break; case MSR_IA32_PERF_STATUS: /* TSC increment by tick */ data = 1000ULL; /* CPU multiplier */ data |= (((uint64_t)4ULL) << 40); break; case MSR_EFER: data = vcpu->arch.efer; break; case MSR_KVM_WALL_CLOCK: case MSR_KVM_WALL_CLOCK_NEW: data = vcpu->kvm->arch.wall_clock; break; case MSR_KVM_SYSTEM_TIME: case MSR_KVM_SYSTEM_TIME_NEW: data = vcpu->arch.time; break; case MSR_KVM_ASYNC_PF_EN: data = vcpu->arch.apf.msr_val; break; case MSR_KVM_STEAL_TIME: data = vcpu->arch.st.msr_val; break; case MSR_KVM_PV_EOI_EN: data = vcpu->arch.pv_eoi.msr_val; break; case MSR_IA32_P5_MC_ADDR: case MSR_IA32_P5_MC_TYPE: case MSR_IA32_MCG_CAP: case MSR_IA32_MCG_CTL: case MSR_IA32_MCG_STATUS: case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1: return get_msr_mce(vcpu, msr, pdata); case MSR_K7_CLK_CTL: /* * Provide expected ramp-up count for K7. All other * are set to zero, indicating minimum divisors for * every field. * * This prevents guest kernels on AMD host with CPU * type 6, model 8 and higher from exploding due to * the rdmsr failing. */ data = 0x20000000; break; case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15: if (kvm_hv_msr_partition_wide(msr)) { int r; mutex_lock(&vcpu->kvm->lock); r = get_msr_hyperv_pw(vcpu, msr, pdata); mutex_unlock(&vcpu->kvm->lock); return r; } else return get_msr_hyperv(vcpu, msr, pdata); break; case MSR_IA32_BBL_CR_CTL3: /* This legacy MSR exists but isn't fully documented in current * silicon. It is however accessed by winxp in very narrow * scenarios where it sets bit #19, itself documented as * a "reserved" bit. Best effort attempt to source coherent * read data here should the balance of the register be * interpreted by the guest: * * L2 cache control register 3: 64GB range, 256KB size, * enabled, latency 0x1, configured */ data = 0xbe702111; break; case MSR_AMD64_OSVW_ID_LENGTH: if (!guest_cpuid_has_osvw(vcpu)) return 1; data = vcpu->arch.osvw.length; break; case MSR_AMD64_OSVW_STATUS: if (!guest_cpuid_has_osvw(vcpu)) return 1; data = vcpu->arch.osvw.status; break; default: if (kvm_pmu_msr(vcpu, msr)) return kvm_pmu_get_msr(vcpu, msr, pdata); if (!ignore_msrs) { vcpu_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr); return 1; } else { vcpu_unimpl(vcpu, "ignored rdmsr: 0x%x\n", msr); data = 0; } break; } *pdata = data; return 0; } EXPORT_SYMBOL_GPL(kvm_get_msr_common); /* * Read or write a bunch of msrs. All parameters are kernel addresses. * * @return number of msrs set successfully. */ static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs, struct kvm_msr_entry *entries, int (*do_msr)(struct kvm_vcpu *vcpu, unsigned index, u64 *data)) { int i, idx; idx = srcu_read_lock(&vcpu->kvm->srcu); for (i = 0; i < msrs->nmsrs; ++i) if (do_msr(vcpu, entries[i].index, &entries[i].data)) break; srcu_read_unlock(&vcpu->kvm->srcu, idx); return i; } /* * Read or write a bunch of msrs. Parameters are user addresses. * * @return number of msrs set successfully. */ static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs, int (*do_msr)(struct kvm_vcpu *vcpu, unsigned index, u64 *data), int writeback) { struct kvm_msrs msrs; struct kvm_msr_entry *entries; int r, n; unsigned size; r = -EFAULT; if (copy_from_user(&msrs, user_msrs, sizeof msrs)) goto out; r = -E2BIG; if (msrs.nmsrs >= MAX_IO_MSRS) goto out; size = sizeof(struct kvm_msr_entry) * msrs.nmsrs; entries = memdup_user(user_msrs->entries, size); if (IS_ERR(entries)) { r = PTR_ERR(entries); goto out; } r = n = __msr_io(vcpu, &msrs, entries, do_msr); if (r < 0) goto out_free; r = -EFAULT; if (writeback && copy_to_user(user_msrs->entries, entries, size)) goto out_free; r = n; out_free: kfree(entries); out: return r; } int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) { int r; switch (ext) { case KVM_CAP_IRQCHIP: case KVM_CAP_HLT: case KVM_CAP_MMU_SHADOW_CACHE_CONTROL: case KVM_CAP_SET_TSS_ADDR: case KVM_CAP_EXT_CPUID: case KVM_CAP_EXT_EMUL_CPUID: case KVM_CAP_CLOCKSOURCE: case KVM_CAP_PIT: case KVM_CAP_NOP_IO_DELAY: case KVM_CAP_MP_STATE: case KVM_CAP_SYNC_MMU: case KVM_CAP_USER_NMI: case KVM_CAP_REINJECT_CONTROL: case KVM_CAP_IRQ_INJECT_STATUS: case KVM_CAP_IRQFD: case KVM_CAP_IOEVENTFD: case KVM_CAP_IOEVENTFD_NO_LENGTH: case KVM_CAP_PIT2: case KVM_CAP_PIT_STATE2: case KVM_CAP_SET_IDENTITY_MAP_ADDR: case KVM_CAP_XEN_HVM: case KVM_CAP_ADJUST_CLOCK: case KVM_CAP_VCPU_EVENTS: case KVM_CAP_HYPERV: case KVM_CAP_HYPERV_VAPIC: case KVM_CAP_HYPERV_SPIN: case KVM_CAP_PCI_SEGMENT: case KVM_CAP_DEBUGREGS: case KVM_CAP_X86_ROBUST_SINGLESTEP: case KVM_CAP_XSAVE: case KVM_CAP_ASYNC_PF: case KVM_CAP_GET_TSC_KHZ: case KVM_CAP_KVMCLOCK_CTRL: case KVM_CAP_READONLY_MEM: case KVM_CAP_HYPERV_TIME: case KVM_CAP_IOAPIC_POLARITY_IGNORED: #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT case KVM_CAP_ASSIGN_DEV_IRQ: case KVM_CAP_PCI_2_3: #endif r = 1; break; case KVM_CAP_COALESCED_MMIO: r = KVM_COALESCED_MMIO_PAGE_OFFSET; break; case KVM_CAP_VAPIC: r = !kvm_x86_ops->cpu_has_accelerated_tpr(); break; case KVM_CAP_NR_VCPUS: r = KVM_SOFT_MAX_VCPUS; break; case KVM_CAP_MAX_VCPUS: r = KVM_MAX_VCPUS; break; case KVM_CAP_NR_MEMSLOTS: r = KVM_USER_MEM_SLOTS; break; case KVM_CAP_PV_MMU: /* obsolete */ r = 0; break; #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT case KVM_CAP_IOMMU: r = iommu_present(&pci_bus_type); break; #endif case KVM_CAP_MCE: r = KVM_MAX_MCE_BANKS; break; case KVM_CAP_XCRS: r = cpu_has_xsave; break; case KVM_CAP_TSC_CONTROL: r = kvm_has_tsc_control; break; case KVM_CAP_TSC_DEADLINE_TIMER: r = boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER); break; default: r = 0; break; } return r; } long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { void __user *argp = (void __user *)arg; long r; switch (ioctl) { case KVM_GET_MSR_INDEX_LIST: { struct kvm_msr_list __user *user_msr_list = argp; struct kvm_msr_list msr_list; unsigned n; r = -EFAULT; if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list)) goto out; n = msr_list.nmsrs; msr_list.nmsrs = num_msrs_to_save + ARRAY_SIZE(emulated_msrs); if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list)) goto out; r = -E2BIG; if (n < msr_list.nmsrs) goto out; r = -EFAULT; if (copy_to_user(user_msr_list->indices, &msrs_to_save, num_msrs_to_save * sizeof(u32))) goto out; if (copy_to_user(user_msr_list->indices + num_msrs_to_save, &emulated_msrs, ARRAY_SIZE(emulated_msrs) * sizeof(u32))) goto out; r = 0; break; } case KVM_GET_SUPPORTED_CPUID: case KVM_GET_EMULATED_CPUID: { struct kvm_cpuid2 __user *cpuid_arg = argp; struct kvm_cpuid2 cpuid; r = -EFAULT; if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid)) goto out; r = kvm_dev_ioctl_get_cpuid(&cpuid, cpuid_arg->entries, ioctl); if (r) goto out; r = -EFAULT; if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid)) goto out; r = 0; break; } case KVM_X86_GET_MCE_CAP_SUPPORTED: { u64 mce_cap; mce_cap = KVM_MCE_CAP_SUPPORTED; r = -EFAULT; if (copy_to_user(argp, &mce_cap, sizeof mce_cap)) goto out; r = 0; break; } default: r = -EINVAL; } out: return r; } static void wbinvd_ipi(void *garbage) { wbinvd(); } static bool need_emulate_wbinvd(struct kvm_vcpu *vcpu) { return kvm_arch_has_noncoherent_dma(vcpu->kvm); } void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { /* Address WBINVD may be executed by guest */ if (need_emulate_wbinvd(vcpu)) { if (kvm_x86_ops->has_wbinvd_exit()) cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask); else if (vcpu->cpu != -1 && vcpu->cpu != cpu) smp_call_function_single(vcpu->cpu, wbinvd_ipi, NULL, 1); } kvm_x86_ops->vcpu_load(vcpu, cpu); /* Apply any externally detected TSC adjustments (due to suspend) */ if (unlikely(vcpu->arch.tsc_offset_adjustment)) { adjust_tsc_offset_host(vcpu, vcpu->arch.tsc_offset_adjustment); vcpu->arch.tsc_offset_adjustment = 0; kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); } if (unlikely(vcpu->cpu != cpu) || check_tsc_unstable()) { s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 : native_read_tsc() - vcpu->arch.last_host_tsc; if (tsc_delta < 0) mark_tsc_unstable("KVM discovered backwards TSC"); if (check_tsc_unstable()) { u64 offset = kvm_x86_ops->compute_tsc_offset(vcpu, vcpu->arch.last_guest_tsc); kvm_x86_ops->write_tsc_offset(vcpu, offset); vcpu->arch.tsc_catchup = 1; } /* * On a host with synchronized TSC, there is no need to update * kvmclock on vcpu->cpu migration */ if (!vcpu->kvm->arch.use_master_clock || vcpu->cpu == -1) kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu); if (vcpu->cpu != cpu) kvm_migrate_timers(vcpu); vcpu->cpu = cpu; } accumulate_steal_time(vcpu); kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); } void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) { kvm_x86_ops->vcpu_put(vcpu); kvm_put_guest_fpu(vcpu); vcpu->arch.last_host_tsc = native_read_tsc(); } static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s) { kvm_x86_ops->sync_pir_to_irr(vcpu); memcpy(s->regs, vcpu->arch.apic->regs, sizeof *s); return 0; } static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s) { kvm_apic_post_state_restore(vcpu, s); update_cr8_intercept(vcpu); return 0; } static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) { if (irq->irq >= KVM_NR_INTERRUPTS) return -EINVAL; if (irqchip_in_kernel(vcpu->kvm)) return -ENXIO; kvm_queue_interrupt(vcpu, irq->irq, false); kvm_make_request(KVM_REQ_EVENT, vcpu); return 0; } static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu) { kvm_inject_nmi(vcpu); return 0; } static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu, struct kvm_tpr_access_ctl *tac) { if (tac->flags) return -EINVAL; vcpu->arch.tpr_access_reporting = !!tac->enabled; return 0; } static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu, u64 mcg_cap) { int r; unsigned bank_num = mcg_cap & 0xff, bank; r = -EINVAL; if (!bank_num || bank_num >= KVM_MAX_MCE_BANKS) goto out; if (mcg_cap & ~(KVM_MCE_CAP_SUPPORTED | 0xff | 0xff0000)) goto out; r = 0; vcpu->arch.mcg_cap = mcg_cap; /* Init IA32_MCG_CTL to all 1s */ if (mcg_cap & MCG_CTL_P) vcpu->arch.mcg_ctl = ~(u64)0; /* Init IA32_MCi_CTL to all 1s */ for (bank = 0; bank < bank_num; bank++) vcpu->arch.mce_banks[bank*4] = ~(u64)0; out: return r; } static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu, struct kvm_x86_mce *mce) { u64 mcg_cap = vcpu->arch.mcg_cap; unsigned bank_num = mcg_cap & 0xff; u64 *banks = vcpu->arch.mce_banks; if (mce->bank >= bank_num || !(mce->status & MCI_STATUS_VAL)) return -EINVAL; /* * if IA32_MCG_CTL is not all 1s, the uncorrected error * reporting is disabled */ if ((mce->status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) && vcpu->arch.mcg_ctl != ~(u64)0) return 0; banks += 4 * mce->bank; /* * if IA32_MCi_CTL is not all 1s, the uncorrected error * reporting is disabled for the bank */ if ((mce->status & MCI_STATUS_UC) && banks[0] != ~(u64)0) return 0; if (mce->status & MCI_STATUS_UC) { if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) || !kvm_read_cr4_bits(vcpu, X86_CR4_MCE)) { kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); return 0; } if (banks[1] & MCI_STATUS_VAL) mce->status |= MCI_STATUS_OVER; banks[2] = mce->addr; banks[3] = mce->misc; vcpu->arch.mcg_status = mce->mcg_status; banks[1] = mce->status; kvm_queue_exception(vcpu, MC_VECTOR); } else if (!(banks[1] & MCI_STATUS_VAL) || !(banks[1] & MCI_STATUS_UC)) { if (banks[1] & MCI_STATUS_VAL) mce->status |= MCI_STATUS_OVER; banks[2] = mce->addr; banks[3] = mce->misc; banks[1] = mce->status; } else banks[1] |= MCI_STATUS_OVER; return 0; } static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu, struct kvm_vcpu_events *events) { process_nmi(vcpu); events->exception.injected = vcpu->arch.exception.pending && !kvm_exception_is_soft(vcpu->arch.exception.nr); events->exception.nr = vcpu->arch.exception.nr; events->exception.has_error_code = vcpu->arch.exception.has_error_code; events->exception.pad = 0; events->exception.error_code = vcpu->arch.exception.error_code; events->interrupt.injected = vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft; events->interrupt.nr = vcpu->arch.interrupt.nr; events->interrupt.soft = 0; events->interrupt.shadow = kvm_x86_ops->get_interrupt_shadow(vcpu); events->nmi.injected = vcpu->arch.nmi_injected; events->nmi.pending = vcpu->arch.nmi_pending != 0; events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu); events->nmi.pad = 0; events->sipi_vector = 0; /* never valid when reporting to user space */ events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING | KVM_VCPUEVENT_VALID_SHADOW); memset(&events->reserved, 0, sizeof(events->reserved)); } static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, struct kvm_vcpu_events *events) { if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING | KVM_VCPUEVENT_VALID_SIPI_VECTOR | KVM_VCPUEVENT_VALID_SHADOW)) return -EINVAL; process_nmi(vcpu); vcpu->arch.exception.pending = events->exception.injected; vcpu->arch.exception.nr = events->exception.nr; vcpu->arch.exception.has_error_code = events->exception.has_error_code; vcpu->arch.exception.error_code = events->exception.error_code; vcpu->arch.interrupt.pending = events->interrupt.injected; vcpu->arch.interrupt.nr = events->interrupt.nr; vcpu->arch.interrupt.soft = events->interrupt.soft; if (events->flags & KVM_VCPUEVENT_VALID_SHADOW) kvm_x86_ops->set_interrupt_shadow(vcpu, events->interrupt.shadow); vcpu->arch.nmi_injected = events->nmi.injected; if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING) vcpu->arch.nmi_pending = events->nmi.pending; kvm_x86_ops->set_nmi_mask(vcpu, events->nmi.masked); if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR && kvm_vcpu_has_lapic(vcpu)) vcpu->arch.apic->sipi_vector = events->sipi_vector; kvm_make_request(KVM_REQ_EVENT, vcpu); return 0; } static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu, struct kvm_debugregs *dbgregs) { unsigned long val; memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db)); _kvm_get_dr(vcpu, 6, &val); dbgregs->dr6 = val; dbgregs->dr7 = vcpu->arch.dr7; dbgregs->flags = 0; memset(&dbgregs->reserved, 0, sizeof(dbgregs->reserved)); } static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu, struct kvm_debugregs *dbgregs) { if (dbgregs->flags) return -EINVAL; memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db)); vcpu->arch.dr6 = dbgregs->dr6; kvm_update_dr6(vcpu); vcpu->arch.dr7 = dbgregs->dr7; kvm_update_dr7(vcpu); return 0; } static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu, struct kvm_xsave *guest_xsave) { if (cpu_has_xsave) { memcpy(guest_xsave->region, &vcpu->arch.guest_fpu.state->xsave, vcpu->arch.guest_xstate_size); *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)] &= vcpu->arch.guest_supported_xcr0 | XSTATE_FPSSE; } else { memcpy(guest_xsave->region, &vcpu->arch.guest_fpu.state->fxsave, sizeof(struct i387_fxsave_struct)); *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)] = XSTATE_FPSSE; } } static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu, struct kvm_xsave *guest_xsave) { u64 xstate_bv = *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)]; if (cpu_has_xsave) { /* * Here we allow setting states that are not present in * CPUID leaf 0xD, index 0, EDX:EAX. This is for compatibility * with old userspace. */ if (xstate_bv & ~kvm_supported_xcr0()) return -EINVAL; memcpy(&vcpu->arch.guest_fpu.state->xsave, guest_xsave->region, vcpu->arch.guest_xstate_size); } else { if (xstate_bv & ~XSTATE_FPSSE) return -EINVAL; memcpy(&vcpu->arch.guest_fpu.state->fxsave, guest_xsave->region, sizeof(struct i387_fxsave_struct)); } return 0; } static void kvm_vcpu_ioctl_x86_get_xcrs(struct kvm_vcpu *vcpu, struct kvm_xcrs *guest_xcrs) { if (!cpu_has_xsave) { guest_xcrs->nr_xcrs = 0; return; } guest_xcrs->nr_xcrs = 1; guest_xcrs->flags = 0; guest_xcrs->xcrs[0].xcr = XCR_XFEATURE_ENABLED_MASK; guest_xcrs->xcrs[0].value = vcpu->arch.xcr0; } static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu, struct kvm_xcrs *guest_xcrs) { int i, r = 0; if (!cpu_has_xsave) return -EINVAL; if (guest_xcrs->nr_xcrs > KVM_MAX_XCRS || guest_xcrs->flags) return -EINVAL; for (i = 0; i < guest_xcrs->nr_xcrs; i++) /* Only support XCR0 currently */ if (guest_xcrs->xcrs[i].xcr == XCR_XFEATURE_ENABLED_MASK) { r = __kvm_set_xcr(vcpu, XCR_XFEATURE_ENABLED_MASK, guest_xcrs->xcrs[i].value); break; } if (r) r = -EINVAL; return r; } /* * kvm_set_guest_paused() indicates to the guest kernel that it has been * stopped by the hypervisor. This function will be called from the host only. * EINVAL is returned when the host attempts to set the flag for a guest that * does not support pv clocks. */ static int kvm_set_guest_paused(struct kvm_vcpu *vcpu) { if (!vcpu->arch.pv_time_enabled) return -EINVAL; vcpu->arch.pvclock_set_guest_stopped_request = true; kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); return 0; } long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { struct kvm_vcpu *vcpu = filp->private_data; void __user *argp = (void __user *)arg; int r; union { struct kvm_lapic_state *lapic; struct kvm_xsave *xsave; struct kvm_xcrs *xcrs; void *buffer; } u; u.buffer = NULL; switch (ioctl) { case KVM_GET_LAPIC: { r = -EINVAL; if (!vcpu->arch.apic) goto out; u.lapic = kzalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL); r = -ENOMEM; if (!u.lapic) goto out; r = kvm_vcpu_ioctl_get_lapic(vcpu, u.lapic); if (r) goto out; r = -EFAULT; if (copy_to_user(argp, u.lapic, sizeof(struct kvm_lapic_state))) goto out; r = 0; break; } case KVM_SET_LAPIC: { r = -EINVAL; if (!vcpu->arch.apic) goto out; u.lapic = memdup_user(argp, sizeof(*u.lapic)); if (IS_ERR(u.lapic)) return PTR_ERR(u.lapic); r = kvm_vcpu_ioctl_set_lapic(vcpu, u.lapic); break; } case KVM_INTERRUPT: { struct kvm_interrupt irq; r = -EFAULT; if (copy_from_user(&irq, argp, sizeof irq)) goto out; r = kvm_vcpu_ioctl_interrupt(vcpu, &irq); break; } case KVM_NMI: { r = kvm_vcpu_ioctl_nmi(vcpu); break; } case KVM_SET_CPUID: { struct kvm_cpuid __user *cpuid_arg = argp; struct kvm_cpuid cpuid; r = -EFAULT; if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid)) goto out; r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries); break; } case KVM_SET_CPUID2: { struct kvm_cpuid2 __user *cpuid_arg = argp; struct kvm_cpuid2 cpuid; r = -EFAULT; if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid)) goto out; r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid, cpuid_arg->entries); break; } case KVM_GET_CPUID2: { struct kvm_cpuid2 __user *cpuid_arg = argp; struct kvm_cpuid2 cpuid; r = -EFAULT; if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid)) goto out; r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid, cpuid_arg->entries); if (r) goto out; r = -EFAULT; if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid)) goto out; r = 0; break; } case KVM_GET_MSRS: r = msr_io(vcpu, argp, kvm_get_msr, 1); break; case KVM_SET_MSRS: r = msr_io(vcpu, argp, do_set_msr, 0); break; case KVM_TPR_ACCESS_REPORTING: { struct kvm_tpr_access_ctl tac; r = -EFAULT; if (copy_from_user(&tac, argp, sizeof tac)) goto out; r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac); if (r) goto out; r = -EFAULT; if (copy_to_user(argp, &tac, sizeof tac)) goto out; r = 0; break; }; case KVM_SET_VAPIC_ADDR: { struct kvm_vapic_addr va; r = -EINVAL; if (!irqchip_in_kernel(vcpu->kvm)) goto out; r = -EFAULT; if (copy_from_user(&va, argp, sizeof va)) goto out; r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); break; } case KVM_X86_SETUP_MCE: { u64 mcg_cap; r = -EFAULT; if (copy_from_user(&mcg_cap, argp, sizeof mcg_cap)) goto out; r = kvm_vcpu_ioctl_x86_setup_mce(vcpu, mcg_cap); break; } case KVM_X86_SET_MCE: { struct kvm_x86_mce mce; r = -EFAULT; if (copy_from_user(&mce, argp, sizeof mce)) goto out; r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce); break; } case KVM_GET_VCPU_EVENTS: { struct kvm_vcpu_events events; kvm_vcpu_ioctl_x86_get_vcpu_events(vcpu, &events); r = -EFAULT; if (copy_to_user(argp, &events, sizeof(struct kvm_vcpu_events))) break; r = 0; break; } case KVM_SET_VCPU_EVENTS: { struct kvm_vcpu_events events; r = -EFAULT; if (copy_from_user(&events, argp, sizeof(struct kvm_vcpu_events))) break; r = kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events); break; } case KVM_GET_DEBUGREGS: { struct kvm_debugregs dbgregs; kvm_vcpu_ioctl_x86_get_debugregs(vcpu, &dbgregs); r = -EFAULT; if (copy_to_user(argp, &dbgregs, sizeof(struct kvm_debugregs))) break; r = 0; break; } case KVM_SET_DEBUGREGS: { struct kvm_debugregs dbgregs; r = -EFAULT; if (copy_from_user(&dbgregs, argp, sizeof(struct kvm_debugregs))) break; r = kvm_vcpu_ioctl_x86_set_debugregs(vcpu, &dbgregs); break; } case KVM_GET_XSAVE: { u.xsave = kzalloc(sizeof(struct kvm_xsave), GFP_KERNEL); r = -ENOMEM; if (!u.xsave) break; kvm_vcpu_ioctl_x86_get_xsave(vcpu, u.xsave); r = -EFAULT; if (copy_to_user(argp, u.xsave, sizeof(struct kvm_xsave))) break; r = 0; break; } case KVM_SET_XSAVE: { u.xsave = memdup_user(argp, sizeof(*u.xsave)); if (IS_ERR(u.xsave)) return PTR_ERR(u.xsave); r = kvm_vcpu_ioctl_x86_set_xsave(vcpu, u.xsave); break; } case KVM_GET_XCRS: { u.xcrs = kzalloc(sizeof(struct kvm_xcrs), GFP_KERNEL); r = -ENOMEM; if (!u.xcrs) break; kvm_vcpu_ioctl_x86_get_xcrs(vcpu, u.xcrs); r = -EFAULT; if (copy_to_user(argp, u.xcrs, sizeof(struct kvm_xcrs))) break; r = 0; break; } case KVM_SET_XCRS: { u.xcrs = memdup_user(argp, sizeof(*u.xcrs)); if (IS_ERR(u.xcrs)) return PTR_ERR(u.xcrs); r = kvm_vcpu_ioctl_x86_set_xcrs(vcpu, u.xcrs); break; } case KVM_SET_TSC_KHZ: { u32 user_tsc_khz; r = -EINVAL; user_tsc_khz = (u32)arg; if (user_tsc_khz >= kvm_max_guest_tsc_khz) goto out; if (user_tsc_khz == 0) user_tsc_khz = tsc_khz; kvm_set_tsc_khz(vcpu, user_tsc_khz); r = 0; goto out; } case KVM_GET_TSC_KHZ: { r = vcpu->arch.virtual_tsc_khz; goto out; } case KVM_KVMCLOCK_CTRL: { r = kvm_set_guest_paused(vcpu); goto out; } default: r = -EINVAL; } out: kfree(u.buffer); return r; } int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) { return VM_FAULT_SIGBUS; } static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr) { int ret; if (addr > (unsigned int)(-3 * PAGE_SIZE)) return -EINVAL; ret = kvm_x86_ops->set_tss_addr(kvm, addr); return ret; } static int kvm_vm_ioctl_set_identity_map_addr(struct kvm *kvm, u64 ident_addr) { kvm->arch.ept_identity_map_addr = ident_addr; return 0; } static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm, u32 kvm_nr_mmu_pages) { if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES) return -EINVAL; mutex_lock(&kvm->slots_lock); kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages); kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages; mutex_unlock(&kvm->slots_lock); return 0; } static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm) { return kvm->arch.n_max_mmu_pages; } static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) { int r; r = 0; switch (chip->chip_id) { case KVM_IRQCHIP_PIC_MASTER: memcpy(&chip->chip.pic, &pic_irqchip(kvm)->pics[0], sizeof(struct kvm_pic_state)); break; case KVM_IRQCHIP_PIC_SLAVE: memcpy(&chip->chip.pic, &pic_irqchip(kvm)->pics[1], sizeof(struct kvm_pic_state)); break; case KVM_IRQCHIP_IOAPIC: r = kvm_get_ioapic(kvm, &chip->chip.ioapic); break; default: r = -EINVAL; break; } return r; } static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) { int r; r = 0; switch (chip->chip_id) { case KVM_IRQCHIP_PIC_MASTER: spin_lock(&pic_irqchip(kvm)->lock); memcpy(&pic_irqchip(kvm)->pics[0], &chip->chip.pic, sizeof(struct kvm_pic_state)); spin_unlock(&pic_irqchip(kvm)->lock); break; case KVM_IRQCHIP_PIC_SLAVE: spin_lock(&pic_irqchip(kvm)->lock); memcpy(&pic_irqchip(kvm)->pics[1], &chip->chip.pic, sizeof(struct kvm_pic_state)); spin_unlock(&pic_irqchip(kvm)->lock); break; case KVM_IRQCHIP_IOAPIC: r = kvm_set_ioapic(kvm, &chip->chip.ioapic); break; default: r = -EINVAL; break; } kvm_pic_update_irq(pic_irqchip(kvm)); return r; } static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps) { int r = 0; mutex_lock(&kvm->arch.vpit->pit_state.lock); memcpy(ps, &kvm->arch.vpit->pit_state, sizeof(struct kvm_pit_state)); mutex_unlock(&kvm->arch.vpit->pit_state.lock); return r; } static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps) { int r = 0; mutex_lock(&kvm->arch.vpit->pit_state.lock); memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state)); kvm_pit_load_count(kvm, 0, ps->channels[0].count, 0); mutex_unlock(&kvm->arch.vpit->pit_state.lock); return r; } static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps) { int r = 0; mutex_lock(&kvm->arch.vpit->pit_state.lock); memcpy(ps->channels, &kvm->arch.vpit->pit_state.channels, sizeof(ps->channels)); ps->flags = kvm->arch.vpit->pit_state.flags; mutex_unlock(&kvm->arch.vpit->pit_state.lock); memset(&ps->reserved, 0, sizeof(ps->reserved)); return r; } static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps) { int r = 0, start = 0; u32 prev_legacy, cur_legacy; mutex_lock(&kvm->arch.vpit->pit_state.lock); prev_legacy = kvm->arch.vpit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY; cur_legacy = ps->flags & KVM_PIT_FLAGS_HPET_LEGACY; if (!prev_legacy && cur_legacy) start = 1; memcpy(&kvm->arch.vpit->pit_state.channels, &ps->channels, sizeof(kvm->arch.vpit->pit_state.channels)); kvm->arch.vpit->pit_state.flags = ps->flags; kvm_pit_load_count(kvm, 0, kvm->arch.vpit->pit_state.channels[0].count, start); mutex_unlock(&kvm->arch.vpit->pit_state.lock); return r; } static int kvm_vm_ioctl_reinject(struct kvm *kvm, struct kvm_reinject_control *control) { if (!kvm->arch.vpit) return -ENXIO; mutex_lock(&kvm->arch.vpit->pit_state.lock); kvm->arch.vpit->pit_state.reinject = control->pit_reinject; mutex_unlock(&kvm->arch.vpit->pit_state.lock); return 0; } /** * kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot * @kvm: kvm instance * @log: slot id and address to which we copy the log * * We need to keep it in mind that VCPU threads can write to the bitmap * concurrently. So, to avoid losing data, we keep the following order for * each bit: * * 1. Take a snapshot of the bit and clear it if needed. * 2. Write protect the corresponding page. * 3. Flush TLB's if needed. * 4. Copy the snapshot to the userspace. * * Between 2 and 3, the guest may write to the page using the remaining TLB * entry. This is not a problem because the page will be reported dirty at * step 4 using the snapshot taken before and step 3 ensures that successive * writes will be logged for the next call. */ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) { int r; struct kvm_memory_slot *memslot; unsigned long n, i; unsigned long *dirty_bitmap; unsigned long *dirty_bitmap_buffer; bool is_dirty = false; mutex_lock(&kvm->slots_lock); r = -EINVAL; if (log->slot >= KVM_USER_MEM_SLOTS) goto out; memslot = id_to_memslot(kvm->memslots, log->slot); dirty_bitmap = memslot->dirty_bitmap; r = -ENOENT; if (!dirty_bitmap) goto out; n = kvm_dirty_bitmap_bytes(memslot); dirty_bitmap_buffer = dirty_bitmap + n / sizeof(long); memset(dirty_bitmap_buffer, 0, n); spin_lock(&kvm->mmu_lock); for (i = 0; i < n / sizeof(long); i++) { unsigned long mask; gfn_t offset; if (!dirty_bitmap[i]) continue; is_dirty = true; mask = xchg(&dirty_bitmap[i], 0); dirty_bitmap_buffer[i] = mask; offset = i * BITS_PER_LONG; kvm_mmu_write_protect_pt_masked(kvm, memslot, offset, mask); } spin_unlock(&kvm->mmu_lock); /* See the comments in kvm_mmu_slot_remove_write_access(). */ lockdep_assert_held(&kvm->slots_lock); /* * All the TLBs can be flushed out of mmu lock, see the comments in * kvm_mmu_slot_remove_write_access(). */ if (is_dirty) kvm_flush_remote_tlbs(kvm); r = -EFAULT; if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n)) goto out; r = 0; out: mutex_unlock(&kvm->slots_lock); return r; } int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event, bool line_status) { if (!irqchip_in_kernel(kvm)) return -ENXIO; irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irq_event->irq, irq_event->level, line_status); return 0; } long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { struct kvm *kvm = filp->private_data; void __user *argp = (void __user *)arg; int r = -ENOTTY; /* * This union makes it completely explicit to gcc-3.x * that these two variables' stack usage should be * combined, not added together. */ union { struct kvm_pit_state ps; struct kvm_pit_state2 ps2; struct kvm_pit_config pit_config; } u; switch (ioctl) { case KVM_SET_TSS_ADDR: r = kvm_vm_ioctl_set_tss_addr(kvm, arg); break; case KVM_SET_IDENTITY_MAP_ADDR: { u64 ident_addr; r = -EFAULT; if (copy_from_user(&ident_addr, argp, sizeof ident_addr)) goto out; r = kvm_vm_ioctl_set_identity_map_addr(kvm, ident_addr); break; } case KVM_SET_NR_MMU_PAGES: r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg); break; case KVM_GET_NR_MMU_PAGES: r = kvm_vm_ioctl_get_nr_mmu_pages(kvm); break; case KVM_CREATE_IRQCHIP: { struct kvm_pic *vpic; mutex_lock(&kvm->lock); r = -EEXIST; if (kvm->arch.vpic) goto create_irqchip_unlock; r = -EINVAL; if (atomic_read(&kvm->online_vcpus)) goto create_irqchip_unlock; r = -ENOMEM; vpic = kvm_create_pic(kvm); if (vpic) { r = kvm_ioapic_init(kvm); if (r) { mutex_lock(&kvm->slots_lock); kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &vpic->dev_master); kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &vpic->dev_slave); kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &vpic->dev_eclr); mutex_unlock(&kvm->slots_lock); kfree(vpic); goto create_irqchip_unlock; } } else goto create_irqchip_unlock; smp_wmb(); kvm->arch.vpic = vpic; smp_wmb(); r = kvm_setup_default_irq_routing(kvm); if (r) { mutex_lock(&kvm->slots_lock); mutex_lock(&kvm->irq_lock); kvm_ioapic_destroy(kvm); kvm_destroy_pic(kvm); mutex_unlock(&kvm->irq_lock); mutex_unlock(&kvm->slots_lock); } create_irqchip_unlock: mutex_unlock(&kvm->lock); break; } case KVM_CREATE_PIT: u.pit_config.flags = KVM_PIT_SPEAKER_DUMMY; goto create_pit; case KVM_CREATE_PIT2: r = -EFAULT; if (copy_from_user(&u.pit_config, argp, sizeof(struct kvm_pit_config))) goto out; create_pit: mutex_lock(&kvm->slots_lock); r = -EEXIST; if (kvm->arch.vpit) goto create_pit_unlock; r = -ENOMEM; kvm->arch.vpit = kvm_create_pit(kvm, u.pit_config.flags); if (kvm->arch.vpit) r = 0; create_pit_unlock: mutex_unlock(&kvm->slots_lock); break; case KVM_GET_IRQCHIP: { /* 0: PIC master, 1: PIC slave, 2: IOAPIC */ struct kvm_irqchip *chip; chip = memdup_user(argp, sizeof(*chip)); if (IS_ERR(chip)) { r = PTR_ERR(chip); goto out; } r = -ENXIO; if (!irqchip_in_kernel(kvm)) goto get_irqchip_out; r = kvm_vm_ioctl_get_irqchip(kvm, chip); if (r) goto get_irqchip_out; r = -EFAULT; if (copy_to_user(argp, chip, sizeof *chip)) goto get_irqchip_out; r = 0; get_irqchip_out: kfree(chip); break; } case KVM_SET_IRQCHIP: { /* 0: PIC master, 1: PIC slave, 2: IOAPIC */ struct kvm_irqchip *chip; chip = memdup_user(argp, sizeof(*chip)); if (IS_ERR(chip)) { r = PTR_ERR(chip); goto out; } r = -ENXIO; if (!irqchip_in_kernel(kvm)) goto set_irqchip_out; r = kvm_vm_ioctl_set_irqchip(kvm, chip); if (r) goto set_irqchip_out; r = 0; set_irqchip_out: kfree(chip); break; } case KVM_GET_PIT: { r = -EFAULT; if (copy_from_user(&u.ps, argp, sizeof(struct kvm_pit_state))) goto out; r = -ENXIO; if (!kvm->arch.vpit) goto out; r = kvm_vm_ioctl_get_pit(kvm, &u.ps); if (r) goto out; r = -EFAULT; if (copy_to_user(argp, &u.ps, sizeof(struct kvm_pit_state))) goto out; r = 0; break; } case KVM_SET_PIT: { r = -EFAULT; if (copy_from_user(&u.ps, argp, sizeof u.ps)) goto out; r = -ENXIO; if (!kvm->arch.vpit) goto out; r = kvm_vm_ioctl_set_pit(kvm, &u.ps); break; } case KVM_GET_PIT2: { r = -ENXIO; if (!kvm->arch.vpit) goto out; r = kvm_vm_ioctl_get_pit2(kvm, &u.ps2); if (r) goto out; r = -EFAULT; if (copy_to_user(argp, &u.ps2, sizeof(u.ps2))) goto out; r = 0; break; } case KVM_SET_PIT2: { r = -EFAULT; if (copy_from_user(&u.ps2, argp, sizeof(u.ps2))) goto out; r = -ENXIO; if (!kvm->arch.vpit) goto out; r = kvm_vm_ioctl_set_pit2(kvm, &u.ps2); break; } case KVM_REINJECT_CONTROL: { struct kvm_reinject_control control; r = -EFAULT; if (copy_from_user(&control, argp, sizeof(control))) goto out; r = kvm_vm_ioctl_reinject(kvm, &control); break; } case KVM_XEN_HVM_CONFIG: { r = -EFAULT; if (copy_from_user(&kvm->arch.xen_hvm_config, argp, sizeof(struct kvm_xen_hvm_config))) goto out; r = -EINVAL; if (kvm->arch.xen_hvm_config.flags) goto out; r = 0; break; } case KVM_SET_CLOCK: { struct kvm_clock_data user_ns; u64 now_ns; s64 delta; r = -EFAULT; if (copy_from_user(&user_ns, argp, sizeof(user_ns))) goto out; r = -EINVAL; if (user_ns.flags) goto out; r = 0; local_irq_disable(); now_ns = get_kernel_ns(); delta = user_ns.clock - now_ns; local_irq_enable(); kvm->arch.kvmclock_offset = delta; kvm_gen_update_masterclock(kvm); break; } case KVM_GET_CLOCK: { struct kvm_clock_data user_ns; u64 now_ns; local_irq_disable(); now_ns = get_kernel_ns(); user_ns.clock = kvm->arch.kvmclock_offset + now_ns; local_irq_enable(); user_ns.flags = 0; memset(&user_ns.pad, 0, sizeof(user_ns.pad)); r = -EFAULT; if (copy_to_user(argp, &user_ns, sizeof(user_ns))) goto out; r = 0; break; } default: ; } out: return r; } static void kvm_init_msr_list(void) { u32 dummy[2]; unsigned i, j; /* skip the first msrs in the list. KVM-specific */ for (i = j = KVM_SAVE_MSRS_BEGIN; i < ARRAY_SIZE(msrs_to_save); i++) { if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0) continue; /* * Even MSRs that are valid in the host may not be exposed * to the guests in some cases. We could work around this * in VMX with the generic MSR save/load machinery, but it * is not really worthwhile since it will really only * happen with nested virtualization. */ switch (msrs_to_save[i]) { case MSR_IA32_BNDCFGS: if (!kvm_x86_ops->mpx_supported()) continue; break; default: break; } if (j < i) msrs_to_save[j] = msrs_to_save[i]; j++; } num_msrs_to_save = j; } static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len, const void *v) { int handled = 0; int n; do { n = min(len, 8); if (!(vcpu->arch.apic && !kvm_iodevice_write(&vcpu->arch.apic->dev, addr, n, v)) && kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, addr, n, v)) break; handled += n; addr += n; len -= n; v += n; } while (len); return handled; } static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v) { int handled = 0; int n; do { n = min(len, 8); if (!(vcpu->arch.apic && !kvm_iodevice_read(&vcpu->arch.apic->dev, addr, n, v)) && kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, addr, n, v)) break; trace_kvm_mmio(KVM_TRACE_MMIO_READ, n, addr, *(u64 *)v); handled += n; addr += n; len -= n; v += n; } while (len); return handled; } static void kvm_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg) { kvm_x86_ops->set_segment(vcpu, var, seg); } void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg) { kvm_x86_ops->get_segment(vcpu, var, seg); } gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access, struct x86_exception *exception) { gpa_t t_gpa; BUG_ON(!mmu_is_nested(vcpu)); /* NPT walks are always user-walks */ access |= PFERR_USER_MASK; t_gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gpa, access, exception); return t_gpa; } gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, struct x86_exception *exception) { u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); } gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, struct x86_exception *exception) { u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; access |= PFERR_FETCH_MASK; return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); } gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, struct x86_exception *exception) { u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; access |= PFERR_WRITE_MASK; return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); } /* uses this to access any guest's mapped memory without checking CPL */ gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, struct x86_exception *exception) { return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, 0, exception); } static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes, struct kvm_vcpu *vcpu, u32 access, struct x86_exception *exception) { void *data = val; int r = X86EMUL_CONTINUE; while (bytes) { gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access, exception); unsigned offset = addr & (PAGE_SIZE-1); unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset); int ret; if (gpa == UNMAPPED_GVA) return X86EMUL_PROPAGATE_FAULT; ret = kvm_read_guest_page(vcpu->kvm, gpa >> PAGE_SHIFT, data, offset, toread); if (ret < 0) { r = X86EMUL_IO_NEEDED; goto out; } bytes -= toread; data += toread; addr += toread; } out: return r; } /* used for instruction fetching */ static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt, gva_t addr, void *val, unsigned int bytes, struct x86_exception *exception) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; unsigned offset; int ret; /* Inline kvm_read_guest_virt_helper for speed. */ gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access|PFERR_FETCH_MASK, exception); if (unlikely(gpa == UNMAPPED_GVA)) return X86EMUL_PROPAGATE_FAULT; offset = addr & (PAGE_SIZE-1); if (WARN_ON(offset + bytes > PAGE_SIZE)) bytes = (unsigned)PAGE_SIZE - offset; ret = kvm_read_guest_page(vcpu->kvm, gpa >> PAGE_SHIFT, val, offset, bytes); if (unlikely(ret < 0)) return X86EMUL_IO_NEEDED; return X86EMUL_CONTINUE; } int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt, gva_t addr, void *val, unsigned int bytes, struct x86_exception *exception) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, exception); } EXPORT_SYMBOL_GPL(kvm_read_guest_virt); static int kvm_read_guest_virt_system(struct x86_emulate_ctxt *ctxt, gva_t addr, void *val, unsigned int bytes, struct x86_exception *exception) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, exception); } int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt, gva_t addr, void *val, unsigned int bytes, struct x86_exception *exception) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); void *data = val; int r = X86EMUL_CONTINUE; while (bytes) { gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, PFERR_WRITE_MASK, exception); unsigned offset = addr & (PAGE_SIZE-1); unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset); int ret; if (gpa == UNMAPPED_GVA) return X86EMUL_PROPAGATE_FAULT; ret = kvm_write_guest(vcpu->kvm, gpa, data, towrite); if (ret < 0) { r = X86EMUL_IO_NEEDED; goto out; } bytes -= towrite; data += towrite; addr += towrite; } out: return r; } EXPORT_SYMBOL_GPL(kvm_write_guest_virt_system); static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva, gpa_t *gpa, struct x86_exception *exception, bool write) { u32 access = ((kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0) | (write ? PFERR_WRITE_MASK : 0); if (vcpu_match_mmio_gva(vcpu, gva) && !permission_fault(vcpu, vcpu->arch.walk_mmu, vcpu->arch.access, access)) { *gpa = vcpu->arch.mmio_gfn << PAGE_SHIFT | (gva & (PAGE_SIZE - 1)); trace_vcpu_match_mmio(gva, *gpa, write, false); return 1; } *gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); if (*gpa == UNMAPPED_GVA) return -1; /* For APIC access vmexit */ if ((*gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) return 1; if (vcpu_match_mmio_gpa(vcpu, *gpa)) { trace_vcpu_match_mmio(gva, *gpa, write, true); return 1; } return 0; } int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, const void *val, int bytes) { int ret; ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes); if (ret < 0) return 0; kvm_mmu_pte_write(vcpu, gpa, val, bytes); return 1; } struct read_write_emulator_ops { int (*read_write_prepare)(struct kvm_vcpu *vcpu, void *val, int bytes); int (*read_write_emulate)(struct kvm_vcpu *vcpu, gpa_t gpa, void *val, int bytes); int (*read_write_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes, void *val); int (*read_write_exit_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa, void *val, int bytes); bool write; }; static int read_prepare(struct kvm_vcpu *vcpu, void *val, int bytes) { if (vcpu->mmio_read_completed) { trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes, vcpu->mmio_fragments[0].gpa, *(u64 *)val); vcpu->mmio_read_completed = 0; return 1; } return 0; } static int read_emulate(struct kvm_vcpu *vcpu, gpa_t gpa, void *val, int bytes) { return !kvm_read_guest(vcpu->kvm, gpa, val, bytes); } static int write_emulate(struct kvm_vcpu *vcpu, gpa_t gpa, void *val, int bytes) { return emulator_write_phys(vcpu, gpa, val, bytes); } static int write_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes, void *val) { trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, *(u64 *)val); return vcpu_mmio_write(vcpu, gpa, bytes, val); } static int read_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, void *val, int bytes) { trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, 0); return X86EMUL_IO_NEEDED; } static int write_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, void *val, int bytes) { struct kvm_mmio_fragment *frag = &vcpu->mmio_fragments[0]; memcpy(vcpu->run->mmio.data, frag->data, min(8u, frag->len)); return X86EMUL_CONTINUE; } static const struct read_write_emulator_ops read_emultor = { .read_write_prepare = read_prepare, .read_write_emulate = read_emulate, .read_write_mmio = vcpu_mmio_read, .read_write_exit_mmio = read_exit_mmio, }; static const struct read_write_emulator_ops write_emultor = { .read_write_emulate = write_emulate, .read_write_mmio = write_mmio, .read_write_exit_mmio = write_exit_mmio, .write = true, }; static int emulator_read_write_onepage(unsigned long addr, void *val, unsigned int bytes, struct x86_exception *exception, struct kvm_vcpu *vcpu, const struct read_write_emulator_ops *ops) { gpa_t gpa; int handled, ret; bool write = ops->write; struct kvm_mmio_fragment *frag; ret = vcpu_mmio_gva_to_gpa(vcpu, addr, &gpa, exception, write); if (ret < 0) return X86EMUL_PROPAGATE_FAULT; /* For APIC access vmexit */ if (ret) goto mmio; if (ops->read_write_emulate(vcpu, gpa, val, bytes)) return X86EMUL_CONTINUE; mmio: /* * Is this MMIO handled locally? */ handled = ops->read_write_mmio(vcpu, gpa, bytes, val); if (handled == bytes) return X86EMUL_CONTINUE; gpa += handled; bytes -= handled; val += handled; WARN_ON(vcpu->mmio_nr_fragments >= KVM_MAX_MMIO_FRAGMENTS); frag = &vcpu->mmio_fragments[vcpu->mmio_nr_fragments++]; frag->gpa = gpa; frag->data = val; frag->len = bytes; return X86EMUL_CONTINUE; } int emulator_read_write(struct x86_emulate_ctxt *ctxt, unsigned long addr, void *val, unsigned int bytes, struct x86_exception *exception, const struct read_write_emulator_ops *ops) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); gpa_t gpa; int rc; if (ops->read_write_prepare && ops->read_write_prepare(vcpu, val, bytes)) return X86EMUL_CONTINUE; vcpu->mmio_nr_fragments = 0; /* Crossing a page boundary? */ if (((addr + bytes - 1) ^ addr) & PAGE_MASK) { int now; now = -addr & ~PAGE_MASK; rc = emulator_read_write_onepage(addr, val, now, exception, vcpu, ops); if (rc != X86EMUL_CONTINUE) return rc; addr += now; val += now; bytes -= now; } rc = emulator_read_write_onepage(addr, val, bytes, exception, vcpu, ops); if (rc != X86EMUL_CONTINUE) return rc; if (!vcpu->mmio_nr_fragments) return rc; gpa = vcpu->mmio_fragments[0].gpa; vcpu->mmio_needed = 1; vcpu->mmio_cur_fragment = 0; vcpu->run->mmio.len = min(8u, vcpu->mmio_fragments[0].len); vcpu->run->mmio.is_write = vcpu->mmio_is_write = ops->write; vcpu->run->exit_reason = KVM_EXIT_MMIO; vcpu->run->mmio.phys_addr = gpa; return ops->read_write_exit_mmio(vcpu, gpa, val, bytes); } static int emulator_read_emulated(struct x86_emulate_ctxt *ctxt, unsigned long addr, void *val, unsigned int bytes, struct x86_exception *exception) { return emulator_read_write(ctxt, addr, val, bytes, exception, &read_emultor); } int emulator_write_emulated(struct x86_emulate_ctxt *ctxt, unsigned long addr, const void *val, unsigned int bytes, struct x86_exception *exception) { return emulator_read_write(ctxt, addr, (void *)val, bytes, exception, &write_emultor); } #define CMPXCHG_TYPE(t, ptr, old, new) \ (cmpxchg((t *)(ptr), *(t *)(old), *(t *)(new)) == *(t *)(old)) #ifdef CONFIG_X86_64 # define CMPXCHG64(ptr, old, new) CMPXCHG_TYPE(u64, ptr, old, new) #else # define CMPXCHG64(ptr, old, new) \ (cmpxchg64((u64 *)(ptr), *(u64 *)(old), *(u64 *)(new)) == *(u64 *)(old)) #endif static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt, unsigned long addr, const void *old, const void *new, unsigned int bytes, struct x86_exception *exception) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); gpa_t gpa; struct page *page; char *kaddr; bool exchanged; /* guests cmpxchg8b have to be emulated atomically */ if (bytes > 8 || (bytes & (bytes - 1))) goto emul_write; gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, NULL); if (gpa == UNMAPPED_GVA || (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) goto emul_write; if (((gpa + bytes - 1) & PAGE_MASK) != (gpa & PAGE_MASK)) goto emul_write; page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT); if (is_error_page(page)) goto emul_write; kaddr = kmap_atomic(page); kaddr += offset_in_page(gpa); switch (bytes) { case 1: exchanged = CMPXCHG_TYPE(u8, kaddr, old, new); break; case 2: exchanged = CMPXCHG_TYPE(u16, kaddr, old, new); break; case 4: exchanged = CMPXCHG_TYPE(u32, kaddr, old, new); break; case 8: exchanged = CMPXCHG64(kaddr, old, new); break; default: BUG(); } kunmap_atomic(kaddr); kvm_release_page_dirty(page); if (!exchanged) return X86EMUL_CMPXCHG_FAILED; mark_page_dirty(vcpu->kvm, gpa >> PAGE_SHIFT); kvm_mmu_pte_write(vcpu, gpa, new, bytes); return X86EMUL_CONTINUE; emul_write: printk_once(KERN_WARNING "kvm: emulating exchange as write\n"); return emulator_write_emulated(ctxt, addr, new, bytes, exception); } static int kernel_pio(struct kvm_vcpu *vcpu, void *pd) { /* TODO: String I/O for in kernel device */ int r; if (vcpu->arch.pio.in) r = kvm_io_bus_read(vcpu->kvm, KVM_PIO_BUS, vcpu->arch.pio.port, vcpu->arch.pio.size, pd); else r = kvm_io_bus_write(vcpu->kvm, KVM_PIO_BUS, vcpu->arch.pio.port, vcpu->arch.pio.size, pd); return r; } static int emulator_pio_in_out(struct kvm_vcpu *vcpu, int size, unsigned short port, void *val, unsigned int count, bool in) { vcpu->arch.pio.port = port; vcpu->arch.pio.in = in; vcpu->arch.pio.count = count; vcpu->arch.pio.size = size; if (!kernel_pio(vcpu, vcpu->arch.pio_data)) { vcpu->arch.pio.count = 0; return 1; } vcpu->run->exit_reason = KVM_EXIT_IO; vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT; vcpu->run->io.size = size; vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE; vcpu->run->io.count = count; vcpu->run->io.port = port; return 0; } static int emulator_pio_in_emulated(struct x86_emulate_ctxt *ctxt, int size, unsigned short port, void *val, unsigned int count) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); int ret; if (vcpu->arch.pio.count) goto data_avail; ret = emulator_pio_in_out(vcpu, size, port, val, count, true); if (ret) { data_avail: memcpy(val, vcpu->arch.pio_data, size * count); trace_kvm_pio(KVM_PIO_IN, port, size, count, vcpu->arch.pio_data); vcpu->arch.pio.count = 0; return 1; } return 0; } static int emulator_pio_out_emulated(struct x86_emulate_ctxt *ctxt, int size, unsigned short port, const void *val, unsigned int count) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); memcpy(vcpu->arch.pio_data, val, size * count); trace_kvm_pio(KVM_PIO_OUT, port, size, count, vcpu->arch.pio_data); return emulator_pio_in_out(vcpu, size, port, (void *)val, count, false); } static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg) { return kvm_x86_ops->get_segment_base(vcpu, seg); } static void emulator_invlpg(struct x86_emulate_ctxt *ctxt, ulong address) { kvm_mmu_invlpg(emul_to_vcpu(ctxt), address); } int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu) { if (!need_emulate_wbinvd(vcpu)) return X86EMUL_CONTINUE; if (kvm_x86_ops->has_wbinvd_exit()) { int cpu = get_cpu(); cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask); smp_call_function_many(vcpu->arch.wbinvd_dirty_mask, wbinvd_ipi, NULL, 1); put_cpu(); cpumask_clear(vcpu->arch.wbinvd_dirty_mask); } else wbinvd(); return X86EMUL_CONTINUE; } EXPORT_SYMBOL_GPL(kvm_emulate_wbinvd); static void emulator_wbinvd(struct x86_emulate_ctxt *ctxt) { kvm_emulate_wbinvd(emul_to_vcpu(ctxt)); } int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest) { return _kvm_get_dr(emul_to_vcpu(ctxt), dr, dest); } int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value) { return __kvm_set_dr(emul_to_vcpu(ctxt), dr, value); } static u64 mk_cr_64(u64 curr_cr, u32 new_val) { return (curr_cr & ~((1ULL << 32) - 1)) | new_val; } static unsigned long emulator_get_cr(struct x86_emulate_ctxt *ctxt, int cr) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); unsigned long value; switch (cr) { case 0: value = kvm_read_cr0(vcpu); break; case 2: value = vcpu->arch.cr2; break; case 3: value = kvm_read_cr3(vcpu); break; case 4: value = kvm_read_cr4(vcpu); break; case 8: value = kvm_get_cr8(vcpu); break; default: kvm_err("%s: unexpected cr %u\n", __func__, cr); return 0; } return value; } static int emulator_set_cr(struct x86_emulate_ctxt *ctxt, int cr, ulong val) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); int res = 0; switch (cr) { case 0: res = kvm_set_cr0(vcpu, mk_cr_64(kvm_read_cr0(vcpu), val)); break; case 2: vcpu->arch.cr2 = val; break; case 3: res = kvm_set_cr3(vcpu, val); break; case 4: res = kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val)); break; case 8: res = kvm_set_cr8(vcpu, val); break; default: kvm_err("%s: unexpected cr %u\n", __func__, cr); res = -1; } return res; } static int emulator_get_cpl(struct x86_emulate_ctxt *ctxt) { return kvm_x86_ops->get_cpl(emul_to_vcpu(ctxt)); } static void emulator_get_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) { kvm_x86_ops->get_gdt(emul_to_vcpu(ctxt), dt); } static void emulator_get_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) { kvm_x86_ops->get_idt(emul_to_vcpu(ctxt), dt); } static void emulator_set_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) { kvm_x86_ops->set_gdt(emul_to_vcpu(ctxt), dt); } static void emulator_set_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) { kvm_x86_ops->set_idt(emul_to_vcpu(ctxt), dt); } static unsigned long emulator_get_cached_segment_base( struct x86_emulate_ctxt *ctxt, int seg) { return get_segment_base(emul_to_vcpu(ctxt), seg); } static bool emulator_get_segment(struct x86_emulate_ctxt *ctxt, u16 *selector, struct desc_struct *desc, u32 *base3, int seg) { struct kvm_segment var; kvm_get_segment(emul_to_vcpu(ctxt), &var, seg); *selector = var.selector; if (var.unusable) { memset(desc, 0, sizeof(*desc)); return false; } if (var.g) var.limit >>= 12; set_desc_limit(desc, var.limit); set_desc_base(desc, (unsigned long)var.base); #ifdef CONFIG_X86_64 if (base3) *base3 = var.base >> 32; #endif desc->type = var.type; desc->s = var.s; desc->dpl = var.dpl; desc->p = var.present; desc->avl = var.avl; desc->l = var.l; desc->d = var.db; desc->g = var.g; return true; } static void emulator_set_segment(struct x86_emulate_ctxt *ctxt, u16 selector, struct desc_struct *desc, u32 base3, int seg) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); struct kvm_segment var; var.selector = selector; var.base = get_desc_base(desc); #ifdef CONFIG_X86_64 var.base |= ((u64)base3) << 32; #endif var.limit = get_desc_limit(desc); if (desc->g) var.limit = (var.limit << 12) | 0xfff; var.type = desc->type; var.dpl = desc->dpl; var.db = desc->d; var.s = desc->s; var.l = desc->l; var.g = desc->g; var.avl = desc->avl; var.present = desc->p; var.unusable = !var.present; var.padding = 0; kvm_set_segment(vcpu, &var, seg); return; } static int emulator_get_msr(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 *pdata) { return kvm_get_msr(emul_to_vcpu(ctxt), msr_index, pdata); } static int emulator_set_msr(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 data) { struct msr_data msr; msr.data = data; msr.index = msr_index; msr.host_initiated = false; return kvm_set_msr(emul_to_vcpu(ctxt), &msr); } static int emulator_check_pmc(struct x86_emulate_ctxt *ctxt, u32 pmc) { return kvm_pmu_check_pmc(emul_to_vcpu(ctxt), pmc); } static int emulator_read_pmc(struct x86_emulate_ctxt *ctxt, u32 pmc, u64 *pdata) { return kvm_pmu_read_pmc(emul_to_vcpu(ctxt), pmc, pdata); } static void emulator_halt(struct x86_emulate_ctxt *ctxt) { emul_to_vcpu(ctxt)->arch.halt_request = 1; } static void emulator_get_fpu(struct x86_emulate_ctxt *ctxt) { preempt_disable(); kvm_load_guest_fpu(emul_to_vcpu(ctxt)); /* * CR0.TS may reference the host fpu state, not the guest fpu state, * so it may be clear at this point. */ clts(); } static void emulator_put_fpu(struct x86_emulate_ctxt *ctxt) { preempt_enable(); } static int emulator_intercept(struct x86_emulate_ctxt *ctxt, struct x86_instruction_info *info, enum x86_intercept_stage stage) { return kvm_x86_ops->check_intercept(emul_to_vcpu(ctxt), info, stage); } static void emulator_get_cpuid(struct x86_emulate_ctxt *ctxt, u32 *eax, u32 *ebx, u32 *ecx, u32 *edx) { kvm_cpuid(emul_to_vcpu(ctxt), eax, ebx, ecx, edx); } static ulong emulator_read_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg) { return kvm_register_read(emul_to_vcpu(ctxt), reg); } static void emulator_write_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg, ulong val) { kvm_register_write(emul_to_vcpu(ctxt), reg, val); } static const struct x86_emulate_ops emulate_ops = { .read_gpr = emulator_read_gpr, .write_gpr = emulator_write_gpr, .read_std = kvm_read_guest_virt_system, .write_std = kvm_write_guest_virt_system, .fetch = kvm_fetch_guest_virt, .read_emulated = emulator_read_emulated, .write_emulated = emulator_write_emulated, .cmpxchg_emulated = emulator_cmpxchg_emulated, .invlpg = emulator_invlpg, .pio_in_emulated = emulator_pio_in_emulated, .pio_out_emulated = emulator_pio_out_emulated, .get_segment = emulator_get_segment, .set_segment = emulator_set_segment, .get_cached_segment_base = emulator_get_cached_segment_base, .get_gdt = emulator_get_gdt, .get_idt = emulator_get_idt, .set_gdt = emulator_set_gdt, .set_idt = emulator_set_idt, .get_cr = emulator_get_cr, .set_cr = emulator_set_cr, .cpl = emulator_get_cpl, .get_dr = emulator_get_dr, .set_dr = emulator_set_dr, .set_msr = emulator_set_msr, .get_msr = emulator_get_msr, .check_pmc = emulator_check_pmc, .read_pmc = emulator_read_pmc, .halt = emulator_halt, .wbinvd = emulator_wbinvd, .fix_hypercall = emulator_fix_hypercall, .get_fpu = emulator_get_fpu, .put_fpu = emulator_put_fpu, .intercept = emulator_intercept, .get_cpuid = emulator_get_cpuid, }; static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask) { u32 int_shadow = kvm_x86_ops->get_interrupt_shadow(vcpu); /* * an sti; sti; sequence only disable interrupts for the first * instruction. So, if the last instruction, be it emulated or * not, left the system with the INT_STI flag enabled, it * means that the last instruction is an sti. We should not * leave the flag on in this case. The same goes for mov ss */ if (int_shadow & mask) mask = 0; if (unlikely(int_shadow || mask)) { kvm_x86_ops->set_interrupt_shadow(vcpu, mask); if (!mask) kvm_make_request(KVM_REQ_EVENT, vcpu); } } static bool inject_emulated_exception(struct kvm_vcpu *vcpu) { struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; if (ctxt->exception.vector == PF_VECTOR) return kvm_propagate_fault(vcpu, &ctxt->exception); if (ctxt->exception.error_code_valid) kvm_queue_exception_e(vcpu, ctxt->exception.vector, ctxt->exception.error_code); else kvm_queue_exception(vcpu, ctxt->exception.vector); return false; } static void init_emulate_ctxt(struct kvm_vcpu *vcpu) { struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; int cs_db, cs_l; kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); ctxt->eflags = kvm_get_rflags(vcpu); ctxt->eip = kvm_rip_read(vcpu); ctxt->mode = (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL : (ctxt->eflags & X86_EFLAGS_VM) ? X86EMUL_MODE_VM86 : (cs_l && is_long_mode(vcpu)) ? X86EMUL_MODE_PROT64 : cs_db ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16; ctxt->guest_mode = is_guest_mode(vcpu); init_decode_cache(ctxt); vcpu->arch.emulate_regs_need_sync_from_vcpu = false; } int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip) { struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; int ret; init_emulate_ctxt(vcpu); ctxt->op_bytes = 2; ctxt->ad_bytes = 2; ctxt->_eip = ctxt->eip + inc_eip; ret = emulate_int_real(ctxt, irq); if (ret != X86EMUL_CONTINUE) return EMULATE_FAIL; ctxt->eip = ctxt->_eip; kvm_rip_write(vcpu, ctxt->eip); kvm_set_rflags(vcpu, ctxt->eflags); if (irq == NMI_VECTOR) vcpu->arch.nmi_pending = 0; else vcpu->arch.interrupt.pending = false; return EMULATE_DONE; } EXPORT_SYMBOL_GPL(kvm_inject_realmode_interrupt); static int handle_emulation_failure(struct kvm_vcpu *vcpu) { int r = EMULATE_DONE; ++vcpu->stat.insn_emulation_fail; trace_kvm_emulate_insn_failed(vcpu); if (!is_guest_mode(vcpu)) { vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; vcpu->run->internal.ndata = 0; r = EMULATE_FAIL; } kvm_queue_exception(vcpu, UD_VECTOR); return r; } static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2, bool write_fault_to_shadow_pgtable, int emulation_type) { gpa_t gpa = cr2; pfn_t pfn; if (emulation_type & EMULTYPE_NO_REEXECUTE) return false; if (!vcpu->arch.mmu.direct_map) { /* * Write permission should be allowed since only * write access need to be emulated. */ gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL); /* * If the mapping is invalid in guest, let cpu retry * it to generate fault. */ if (gpa == UNMAPPED_GVA) return true; } /* * Do not retry the unhandleable instruction if it faults on the * readonly host memory, otherwise it will goto a infinite loop: * retry instruction -> write #PF -> emulation fail -> retry * instruction -> ... */ pfn = gfn_to_pfn(vcpu->kvm, gpa_to_gfn(gpa)); /* * If the instruction failed on the error pfn, it can not be fixed, * report the error to userspace. */ if (is_error_noslot_pfn(pfn)) return false; kvm_release_pfn_clean(pfn); /* The instructions are well-emulated on direct mmu. */ if (vcpu->arch.mmu.direct_map) { unsigned int indirect_shadow_pages; spin_lock(&vcpu->kvm->mmu_lock); indirect_shadow_pages = vcpu->kvm->arch.indirect_shadow_pages; spin_unlock(&vcpu->kvm->mmu_lock); if (indirect_shadow_pages) kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); return true; } /* * if emulation was due to access to shadowed page table * and it failed try to unshadow page and re-enter the * guest to let CPU execute the instruction. */ kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); /* * If the access faults on its page table, it can not * be fixed by unprotecting shadow page and it should * be reported to userspace. */ return !write_fault_to_shadow_pgtable; } static bool retry_instruction(struct x86_emulate_ctxt *ctxt, unsigned long cr2, int emulation_type) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); unsigned long last_retry_eip, last_retry_addr, gpa = cr2; last_retry_eip = vcpu->arch.last_retry_eip; last_retry_addr = vcpu->arch.last_retry_addr; /* * If the emulation is caused by #PF and it is non-page_table * writing instruction, it means the VM-EXIT is caused by shadow * page protected, we can zap the shadow page and retry this * instruction directly. * * Note: if the guest uses a non-page-table modifying instruction * on the PDE that points to the instruction, then we will unmap * the instruction and go to an infinite loop. So, we cache the * last retried eip and the last fault address, if we meet the eip * and the address again, we can break out of the potential infinite * loop. */ vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0; if (!(emulation_type & EMULTYPE_RETRY)) return false; if (x86_page_table_writing_insn(ctxt)) return false; if (ctxt->eip == last_retry_eip && last_retry_addr == cr2) return false; vcpu->arch.last_retry_eip = ctxt->eip; vcpu->arch.last_retry_addr = cr2; if (!vcpu->arch.mmu.direct_map) gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL); kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); return true; } static int complete_emulated_mmio(struct kvm_vcpu *vcpu); static int complete_emulated_pio(struct kvm_vcpu *vcpu); static int kvm_vcpu_check_hw_bp(unsigned long addr, u32 type, u32 dr7, unsigned long *db) { u32 dr6 = 0; int i; u32 enable, rwlen; enable = dr7; rwlen = dr7 >> 16; for (i = 0; i < 4; i++, enable >>= 2, rwlen >>= 4) if ((enable & 3) && (rwlen & 15) == type && db[i] == addr) dr6 |= (1 << i); return dr6; } static void kvm_vcpu_check_singlestep(struct kvm_vcpu *vcpu, unsigned long rflags, int *r) { struct kvm_run *kvm_run = vcpu->run; /* * rflags is the old, "raw" value of the flags. The new value has * not been saved yet. * * This is correct even for TF set by the guest, because "the * processor will not generate this exception after the instruction * that sets the TF flag". */ if (unlikely(rflags & X86_EFLAGS_TF)) { if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1 | DR6_RTM; kvm_run->debug.arch.pc = vcpu->arch.singlestep_rip; kvm_run->debug.arch.exception = DB_VECTOR; kvm_run->exit_reason = KVM_EXIT_DEBUG; *r = EMULATE_USER_EXIT; } else { vcpu->arch.emulate_ctxt.eflags &= ~X86_EFLAGS_TF; /* * "Certain debug exceptions may clear bit 0-3. The * remaining contents of the DR6 register are never * cleared by the processor". */ vcpu->arch.dr6 &= ~15; vcpu->arch.dr6 |= DR6_BS | DR6_RTM; kvm_queue_exception(vcpu, DB_VECTOR); } } } static bool kvm_vcpu_check_breakpoint(struct kvm_vcpu *vcpu, int *r) { struct kvm_run *kvm_run = vcpu->run; unsigned long eip = vcpu->arch.emulate_ctxt.eip; u32 dr6 = 0; if (unlikely(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) && (vcpu->arch.guest_debug_dr7 & DR7_BP_EN_MASK)) { dr6 = kvm_vcpu_check_hw_bp(eip, 0, vcpu->arch.guest_debug_dr7, vcpu->arch.eff_db); if (dr6 != 0) { kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1 | DR6_RTM; kvm_run->debug.arch.pc = kvm_rip_read(vcpu) + get_segment_base(vcpu, VCPU_SREG_CS); kvm_run->debug.arch.exception = DB_VECTOR; kvm_run->exit_reason = KVM_EXIT_DEBUG; *r = EMULATE_USER_EXIT; return true; } } if (unlikely(vcpu->arch.dr7 & DR7_BP_EN_MASK) && !(kvm_get_rflags(vcpu) & X86_EFLAGS_RF)) { dr6 = kvm_vcpu_check_hw_bp(eip, 0, vcpu->arch.dr7, vcpu->arch.db); if (dr6 != 0) { vcpu->arch.dr6 &= ~15; vcpu->arch.dr6 |= dr6 | DR6_RTM; kvm_queue_exception(vcpu, DB_VECTOR); *r = EMULATE_DONE; return true; } } return false; } int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2, int emulation_type, void *insn, int insn_len) { int r; struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; bool writeback = true; bool write_fault_to_spt = vcpu->arch.write_fault_to_shadow_pgtable; /* * Clear write_fault_to_shadow_pgtable here to ensure it is * never reused. */ vcpu->arch.write_fault_to_shadow_pgtable = false; kvm_clear_exception_queue(vcpu); if (!(emulation_type & EMULTYPE_NO_DECODE)) { init_emulate_ctxt(vcpu); /* * We will reenter on the same instruction since * we do not set complete_userspace_io. This does not * handle watchpoints yet, those would be handled in * the emulate_ops. */ if (kvm_vcpu_check_breakpoint(vcpu, &r)) return r; ctxt->interruptibility = 0; ctxt->have_exception = false; ctxt->exception.vector = -1; ctxt->perm_ok = false; ctxt->ud = emulation_type & EMULTYPE_TRAP_UD; r = x86_decode_insn(ctxt, insn, insn_len); trace_kvm_emulate_insn_start(vcpu); ++vcpu->stat.insn_emulation; if (r != EMULATION_OK) { if (emulation_type & EMULTYPE_TRAP_UD) return EMULATE_FAIL; if (reexecute_instruction(vcpu, cr2, write_fault_to_spt, emulation_type)) return EMULATE_DONE; if (emulation_type & EMULTYPE_SKIP) return EMULATE_FAIL; return handle_emulation_failure(vcpu); } } if (emulation_type & EMULTYPE_SKIP) { kvm_rip_write(vcpu, ctxt->_eip); if (ctxt->eflags & X86_EFLAGS_RF) kvm_set_rflags(vcpu, ctxt->eflags & ~X86_EFLAGS_RF); return EMULATE_DONE; } if (retry_instruction(ctxt, cr2, emulation_type)) return EMULATE_DONE; /* this is needed for vmware backdoor interface to work since it changes registers values during IO operation */ if (vcpu->arch.emulate_regs_need_sync_from_vcpu) { vcpu->arch.emulate_regs_need_sync_from_vcpu = false; emulator_invalidate_register_cache(ctxt); } restart: r = x86_emulate_insn(ctxt); if (r == EMULATION_INTERCEPTED) return EMULATE_DONE; if (r == EMULATION_FAILED) { if (reexecute_instruction(vcpu, cr2, write_fault_to_spt, emulation_type)) return EMULATE_DONE; return handle_emulation_failure(vcpu); } if (ctxt->have_exception) { r = EMULATE_DONE; if (inject_emulated_exception(vcpu)) return r; } else if (vcpu->arch.pio.count) { if (!vcpu->arch.pio.in) { /* FIXME: return into emulator if single-stepping. */ vcpu->arch.pio.count = 0; } else { writeback = false; vcpu->arch.complete_userspace_io = complete_emulated_pio; } r = EMULATE_USER_EXIT; } else if (vcpu->mmio_needed) { if (!vcpu->mmio_is_write) writeback = false; r = EMULATE_USER_EXIT; vcpu->arch.complete_userspace_io = complete_emulated_mmio; } else if (r == EMULATION_RESTART) goto restart; else r = EMULATE_DONE; if (writeback) { unsigned long rflags = kvm_x86_ops->get_rflags(vcpu); toggle_interruptibility(vcpu, ctxt->interruptibility); vcpu->arch.emulate_regs_need_sync_to_vcpu = false; kvm_rip_write(vcpu, ctxt->eip); if (r == EMULATE_DONE) kvm_vcpu_check_singlestep(vcpu, rflags, &r); __kvm_set_rflags(vcpu, ctxt->eflags); /* * For STI, interrupts are shadowed; so KVM_REQ_EVENT will * do nothing, and it will be requested again as soon as * the shadow expires. But we still need to check here, * because POPF has no interrupt shadow. */ if (unlikely((ctxt->eflags & ~rflags) & X86_EFLAGS_IF)) kvm_make_request(KVM_REQ_EVENT, vcpu); } else vcpu->arch.emulate_regs_need_sync_to_vcpu = true; return r; } EXPORT_SYMBOL_GPL(x86_emulate_instruction); int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port) { unsigned long val = kvm_register_read(vcpu, VCPU_REGS_RAX); int ret = emulator_pio_out_emulated(&vcpu->arch.emulate_ctxt, size, port, &val, 1); /* do not return to emulator after return from userspace */ vcpu->arch.pio.count = 0; return ret; } EXPORT_SYMBOL_GPL(kvm_fast_pio_out); static void tsc_bad(void *info) { __this_cpu_write(cpu_tsc_khz, 0); } static void tsc_khz_changed(void *data) { struct cpufreq_freqs *freq = data; unsigned long khz = 0; if (data) khz = freq->new; else if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) khz = cpufreq_quick_get(raw_smp_processor_id()); if (!khz) khz = tsc_khz; __this_cpu_write(cpu_tsc_khz, khz); } static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data) { struct cpufreq_freqs *freq = data; struct kvm *kvm; struct kvm_vcpu *vcpu; int i, send_ipi = 0; /* * We allow guests to temporarily run on slowing clocks, * provided we notify them after, or to run on accelerating * clocks, provided we notify them before. Thus time never * goes backwards. * * However, we have a problem. We can't atomically update * the frequency of a given CPU from this function; it is * merely a notifier, which can be called from any CPU. * Changing the TSC frequency at arbitrary points in time * requires a recomputation of local variables related to * the TSC for each VCPU. We must flag these local variables * to be updated and be sure the update takes place with the * new frequency before any guests proceed. * * Unfortunately, the combination of hotplug CPU and frequency * change creates an intractable locking scenario; the order * of when these callouts happen is undefined with respect to * CPU hotplug, and they can race with each other. As such, * merely setting per_cpu(cpu_tsc_khz) = X during a hotadd is * undefined; you can actually have a CPU frequency change take * place in between the computation of X and the setting of the * variable. To protect against this problem, all updates of * the per_cpu tsc_khz variable are done in an interrupt * protected IPI, and all callers wishing to update the value * must wait for a synchronous IPI to complete (which is trivial * if the caller is on the CPU already). This establishes the * necessary total order on variable updates. * * Note that because a guest time update may take place * anytime after the setting of the VCPU's request bit, the * correct TSC value must be set before the request. However, * to ensure the update actually makes it to any guest which * starts running in hardware virtualization between the set * and the acquisition of the spinlock, we must also ping the * CPU after setting the request bit. * */ if (val == CPUFREQ_PRECHANGE && freq->old > freq->new) return 0; if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new) return 0; smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1); spin_lock(&kvm_lock); list_for_each_entry(kvm, &vm_list, vm_list) { kvm_for_each_vcpu(i, vcpu, kvm) { if (vcpu->cpu != freq->cpu) continue; kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); if (vcpu->cpu != smp_processor_id()) send_ipi = 1; } } spin_unlock(&kvm_lock); if (freq->old < freq->new && send_ipi) { /* * We upscale the frequency. Must make the guest * doesn't see old kvmclock values while running with * the new frequency, otherwise we risk the guest sees * time go backwards. * * In case we update the frequency for another cpu * (which might be in guest context) send an interrupt * to kick the cpu out of guest context. Next time * guest context is entered kvmclock will be updated, * so the guest will not see stale values. */ smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1); } return 0; } static struct notifier_block kvmclock_cpufreq_notifier_block = { .notifier_call = kvmclock_cpufreq_notifier }; static int kvmclock_cpu_notifier(struct notifier_block *nfb, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned long)hcpu; switch (action) { case CPU_ONLINE: case CPU_DOWN_FAILED: smp_call_function_single(cpu, tsc_khz_changed, NULL, 1); break; case CPU_DOWN_PREPARE: smp_call_function_single(cpu, tsc_bad, NULL, 1); break; } return NOTIFY_OK; } static struct notifier_block kvmclock_cpu_notifier_block = { .notifier_call = kvmclock_cpu_notifier, .priority = -INT_MAX }; static void kvm_timer_init(void) { int cpu; max_tsc_khz = tsc_khz; cpu_notifier_register_begin(); if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) { #ifdef CONFIG_CPU_FREQ struct cpufreq_policy policy; memset(&policy, 0, sizeof(policy)); cpu = get_cpu(); cpufreq_get_policy(&policy, cpu); if (policy.cpuinfo.max_freq) max_tsc_khz = policy.cpuinfo.max_freq; put_cpu(); #endif cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER); } pr_debug("kvm: max_tsc_khz = %ld\n", max_tsc_khz); for_each_online_cpu(cpu) smp_call_function_single(cpu, tsc_khz_changed, NULL, 1); __register_hotcpu_notifier(&kvmclock_cpu_notifier_block); cpu_notifier_register_done(); } static DEFINE_PER_CPU(struct kvm_vcpu *, current_vcpu); int kvm_is_in_guest(void) { return __this_cpu_read(current_vcpu) != NULL; } static int kvm_is_user_mode(void) { int user_mode = 3; if (__this_cpu_read(current_vcpu)) user_mode = kvm_x86_ops->get_cpl(__this_cpu_read(current_vcpu)); return user_mode != 0; } static unsigned long kvm_get_guest_ip(void) { unsigned long ip = 0; if (__this_cpu_read(current_vcpu)) ip = kvm_rip_read(__this_cpu_read(current_vcpu)); return ip; } static struct perf_guest_info_callbacks kvm_guest_cbs = { .is_in_guest = kvm_is_in_guest, .is_user_mode = kvm_is_user_mode, .get_guest_ip = kvm_get_guest_ip, }; void kvm_before_handle_nmi(struct kvm_vcpu *vcpu) { __this_cpu_write(current_vcpu, vcpu); } EXPORT_SYMBOL_GPL(kvm_before_handle_nmi); void kvm_after_handle_nmi(struct kvm_vcpu *vcpu) { __this_cpu_write(current_vcpu, NULL); } EXPORT_SYMBOL_GPL(kvm_after_handle_nmi); static void kvm_set_mmio_spte_mask(void) { u64 mask; int maxphyaddr = boot_cpu_data.x86_phys_bits; /* * Set the reserved bits and the present bit of an paging-structure * entry to generate page fault with PFER.RSV = 1. */ /* Mask the reserved physical address bits. */ mask = rsvd_bits(maxphyaddr, 51); /* Bit 62 is always reserved for 32bit host. */ mask |= 0x3ull << 62; /* Set the present bit. */ mask |= 1ull; #ifdef CONFIG_X86_64 /* * If reserved bit is not supported, clear the present bit to disable * mmio page fault. */ if (maxphyaddr == 52) mask &= ~1ull; #endif kvm_mmu_set_mmio_spte_mask(mask); } #ifdef CONFIG_X86_64 static void pvclock_gtod_update_fn(struct work_struct *work) { struct kvm *kvm; struct kvm_vcpu *vcpu; int i; spin_lock(&kvm_lock); list_for_each_entry(kvm, &vm_list, vm_list) kvm_for_each_vcpu(i, vcpu, kvm) kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); atomic_set(&kvm_guest_has_master_clock, 0); spin_unlock(&kvm_lock); } static DECLARE_WORK(pvclock_gtod_work, pvclock_gtod_update_fn); /* * Notification about pvclock gtod data update. */ static int pvclock_gtod_notify(struct notifier_block *nb, unsigned long unused, void *priv) { struct pvclock_gtod_data *gtod = &pvclock_gtod_data; struct timekeeper *tk = priv; update_pvclock_gtod(tk); /* disable master clock if host does not trust, or does not * use, TSC clocksource */ if (gtod->clock.vclock_mode != VCLOCK_TSC && atomic_read(&kvm_guest_has_master_clock) != 0) queue_work(system_long_wq, &pvclock_gtod_work); return 0; } static struct notifier_block pvclock_gtod_notifier = { .notifier_call = pvclock_gtod_notify, }; #endif int kvm_arch_init(void *opaque) { int r; struct kvm_x86_ops *ops = opaque; if (kvm_x86_ops) { printk(KERN_ERR "kvm: already loaded the other module\n"); r = -EEXIST; goto out; } if (!ops->cpu_has_kvm_support()) { printk(KERN_ERR "kvm: no hardware support\n"); r = -EOPNOTSUPP; goto out; } if (ops->disabled_by_bios()) { printk(KERN_ERR "kvm: disabled by bios\n"); r = -EOPNOTSUPP; goto out; } r = -ENOMEM; shared_msrs = alloc_percpu(struct kvm_shared_msrs); if (!shared_msrs) { printk(KERN_ERR "kvm: failed to allocate percpu kvm_shared_msrs\n"); goto out; } r = kvm_mmu_module_init(); if (r) goto out_free_percpu; kvm_set_mmio_spte_mask(); kvm_x86_ops = ops; kvm_init_msr_list(); kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK, PT_DIRTY_MASK, PT64_NX_MASK, 0); kvm_timer_init(); perf_register_guest_info_callbacks(&kvm_guest_cbs); if (cpu_has_xsave) host_xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK); kvm_lapic_init(); #ifdef CONFIG_X86_64 pvclock_gtod_register_notifier(&pvclock_gtod_notifier); #endif return 0; out_free_percpu: free_percpu(shared_msrs); out: return r; } void kvm_arch_exit(void) { perf_unregister_guest_info_callbacks(&kvm_guest_cbs); if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER); unregister_hotcpu_notifier(&kvmclock_cpu_notifier_block); #ifdef CONFIG_X86_64 pvclock_gtod_unregister_notifier(&pvclock_gtod_notifier); #endif kvm_x86_ops = NULL; kvm_mmu_module_exit(); free_percpu(shared_msrs); } int kvm_emulate_halt(struct kvm_vcpu *vcpu) { ++vcpu->stat.halt_exits; if (irqchip_in_kernel(vcpu->kvm)) { vcpu->arch.mp_state = KVM_MP_STATE_HALTED; return 1; } else { vcpu->run->exit_reason = KVM_EXIT_HLT; return 0; } } EXPORT_SYMBOL_GPL(kvm_emulate_halt); int kvm_hv_hypercall(struct kvm_vcpu *vcpu) { u64 param, ingpa, outgpa, ret; uint16_t code, rep_idx, rep_cnt, res = HV_STATUS_SUCCESS, rep_done = 0; bool fast, longmode; /* * hypercall generates UD from non zero cpl and real mode * per HYPER-V spec */ if (kvm_x86_ops->get_cpl(vcpu) != 0 || !is_protmode(vcpu)) { kvm_queue_exception(vcpu, UD_VECTOR); return 0; } longmode = is_64_bit_mode(vcpu); if (!longmode) { param = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDX) << 32) | (kvm_register_read(vcpu, VCPU_REGS_RAX) & 0xffffffff); ingpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RBX) << 32) | (kvm_register_read(vcpu, VCPU_REGS_RCX) & 0xffffffff); outgpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDI) << 32) | (kvm_register_read(vcpu, VCPU_REGS_RSI) & 0xffffffff); } #ifdef CONFIG_X86_64 else { param = kvm_register_read(vcpu, VCPU_REGS_RCX); ingpa = kvm_register_read(vcpu, VCPU_REGS_RDX); outgpa = kvm_register_read(vcpu, VCPU_REGS_R8); } #endif code = param & 0xffff; fast = (param >> 16) & 0x1; rep_cnt = (param >> 32) & 0xfff; rep_idx = (param >> 48) & 0xfff; trace_kvm_hv_hypercall(code, fast, rep_cnt, rep_idx, ingpa, outgpa); switch (code) { case HV_X64_HV_NOTIFY_LONG_SPIN_WAIT: kvm_vcpu_on_spin(vcpu); break; default: res = HV_STATUS_INVALID_HYPERCALL_CODE; break; } ret = res | (((u64)rep_done & 0xfff) << 32); if (longmode) { kvm_register_write(vcpu, VCPU_REGS_RAX, ret); } else { kvm_register_write(vcpu, VCPU_REGS_RDX, ret >> 32); kvm_register_write(vcpu, VCPU_REGS_RAX, ret & 0xffffffff); } return 1; } /* * kvm_pv_kick_cpu_op: Kick a vcpu. * * @apicid - apicid of vcpu to be kicked. */ static void kvm_pv_kick_cpu_op(struct kvm *kvm, unsigned long flags, int apicid) { struct kvm_lapic_irq lapic_irq; lapic_irq.shorthand = 0; lapic_irq.dest_mode = 0; lapic_irq.dest_id = apicid; lapic_irq.delivery_mode = APIC_DM_REMRD; kvm_irq_delivery_to_apic(kvm, 0, &lapic_irq, NULL); } int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) { unsigned long nr, a0, a1, a2, a3, ret; int op_64_bit, r = 1; if (kvm_hv_hypercall_enabled(vcpu->kvm)) return kvm_hv_hypercall(vcpu); nr = kvm_register_read(vcpu, VCPU_REGS_RAX); a0 = kvm_register_read(vcpu, VCPU_REGS_RBX); a1 = kvm_register_read(vcpu, VCPU_REGS_RCX); a2 = kvm_register_read(vcpu, VCPU_REGS_RDX); a3 = kvm_register_read(vcpu, VCPU_REGS_RSI); trace_kvm_hypercall(nr, a0, a1, a2, a3); op_64_bit = is_64_bit_mode(vcpu); if (!op_64_bit) { nr &= 0xFFFFFFFF; a0 &= 0xFFFFFFFF; a1 &= 0xFFFFFFFF; a2 &= 0xFFFFFFFF; a3 &= 0xFFFFFFFF; } if (kvm_x86_ops->get_cpl(vcpu) != 0) { ret = -KVM_EPERM; goto out; } switch (nr) { case KVM_HC_VAPIC_POLL_IRQ: ret = 0; break; case KVM_HC_KICK_CPU: kvm_pv_kick_cpu_op(vcpu->kvm, a0, a1); ret = 0; break; default: ret = -KVM_ENOSYS; break; } out: if (!op_64_bit) ret = (u32)ret; kvm_register_write(vcpu, VCPU_REGS_RAX, ret); ++vcpu->stat.hypercalls; return r; } EXPORT_SYMBOL_GPL(kvm_emulate_hypercall); static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); char instruction[3]; unsigned long rip = kvm_rip_read(vcpu); kvm_x86_ops->patch_hypercall(vcpu, instruction); return emulator_write_emulated(ctxt, rip, instruction, 3, NULL); } /* * Check if userspace requested an interrupt window, and that the * interrupt window is open. * * No need to exit to userspace if we already have an interrupt queued. */ static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu) { return (!irqchip_in_kernel(vcpu->kvm) && !kvm_cpu_has_interrupt(vcpu) && vcpu->run->request_interrupt_window && kvm_arch_interrupt_allowed(vcpu)); } static void post_kvm_run_save(struct kvm_vcpu *vcpu) { struct kvm_run *kvm_run = vcpu->run; kvm_run->if_flag = (kvm_get_rflags(vcpu) & X86_EFLAGS_IF) != 0; kvm_run->cr8 = kvm_get_cr8(vcpu); kvm_run->apic_base = kvm_get_apic_base(vcpu); if (irqchip_in_kernel(vcpu->kvm)) kvm_run->ready_for_interrupt_injection = 1; else kvm_run->ready_for_interrupt_injection = kvm_arch_interrupt_allowed(vcpu) && !kvm_cpu_has_interrupt(vcpu) && !kvm_event_needs_reinjection(vcpu); } static void update_cr8_intercept(struct kvm_vcpu *vcpu) { int max_irr, tpr; if (!kvm_x86_ops->update_cr8_intercept) return; if (!vcpu->arch.apic) return; if (!vcpu->arch.apic->vapic_addr) max_irr = kvm_lapic_find_highest_irr(vcpu); else max_irr = -1; if (max_irr != -1) max_irr >>= 4; tpr = kvm_lapic_get_cr8(vcpu); kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr); } static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win) { int r; /* try to reinject previous events if any */ if (vcpu->arch.exception.pending) { trace_kvm_inj_exception(vcpu->arch.exception.nr, vcpu->arch.exception.has_error_code, vcpu->arch.exception.error_code); if (exception_type(vcpu->arch.exception.nr) == EXCPT_FAULT) __kvm_set_rflags(vcpu, kvm_get_rflags(vcpu) | X86_EFLAGS_RF); kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr, vcpu->arch.exception.has_error_code, vcpu->arch.exception.error_code, vcpu->arch.exception.reinject); return 0; } if (vcpu->arch.nmi_injected) { kvm_x86_ops->set_nmi(vcpu); return 0; } if (vcpu->arch.interrupt.pending) { kvm_x86_ops->set_irq(vcpu); return 0; } if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) { r = kvm_x86_ops->check_nested_events(vcpu, req_int_win); if (r != 0) return r; } /* try to inject new event if pending */ if (vcpu->arch.nmi_pending) { if (kvm_x86_ops->nmi_allowed(vcpu)) { --vcpu->arch.nmi_pending; vcpu->arch.nmi_injected = true; kvm_x86_ops->set_nmi(vcpu); } } else if (kvm_cpu_has_injectable_intr(vcpu)) { /* * Because interrupts can be injected asynchronously, we are * calling check_nested_events again here to avoid a race condition. * See https://lkml.org/lkml/2014/7/2/60 for discussion about this * proposal and current concerns. Perhaps we should be setting * KVM_REQ_EVENT only on certain events and not unconditionally? */ if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) { r = kvm_x86_ops->check_nested_events(vcpu, req_int_win); if (r != 0) return r; } if (kvm_x86_ops->interrupt_allowed(vcpu)) { kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu), false); kvm_x86_ops->set_irq(vcpu); } } return 0; } static void process_nmi(struct kvm_vcpu *vcpu) { unsigned limit = 2; /* * x86 is limited to one NMI running, and one NMI pending after it. * If an NMI is already in progress, limit further NMIs to just one. * Otherwise, allow two (and we'll inject the first one immediately). */ if (kvm_x86_ops->get_nmi_mask(vcpu) || vcpu->arch.nmi_injected) limit = 1; vcpu->arch.nmi_pending += atomic_xchg(&vcpu->arch.nmi_queued, 0); vcpu->arch.nmi_pending = min(vcpu->arch.nmi_pending, limit); kvm_make_request(KVM_REQ_EVENT, vcpu); } static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu) { u64 eoi_exit_bitmap[4]; u32 tmr[8]; if (!kvm_apic_hw_enabled(vcpu->arch.apic)) return; memset(eoi_exit_bitmap, 0, 32); memset(tmr, 0, 32); kvm_ioapic_scan_entry(vcpu, eoi_exit_bitmap, tmr); kvm_x86_ops->load_eoi_exitmap(vcpu, eoi_exit_bitmap); kvm_apic_update_tmr(vcpu, tmr); } /* * Returns 1 to let __vcpu_run() continue the guest execution loop without * exiting to the userspace. Otherwise, the value will be returned to the * userspace. */ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) { int r; bool req_int_win = !irqchip_in_kernel(vcpu->kvm) && vcpu->run->request_interrupt_window; bool req_immediate_exit = false; if (vcpu->requests) { if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) kvm_mmu_unload(vcpu); if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu)) __kvm_migrate_timers(vcpu); if (kvm_check_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu)) kvm_gen_update_masterclock(vcpu->kvm); if (kvm_check_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu)) kvm_gen_kvmclock_update(vcpu); if (kvm_check_request(KVM_REQ_CLOCK_UPDATE, vcpu)) { r = kvm_guest_time_update(vcpu); if (unlikely(r)) goto out; } if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu)) kvm_mmu_sync_roots(vcpu); if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) kvm_x86_ops->tlb_flush(vcpu); if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) { vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS; r = 0; goto out; } if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) { vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN; r = 0; goto out; } if (kvm_check_request(KVM_REQ_DEACTIVATE_FPU, vcpu)) { vcpu->fpu_active = 0; kvm_x86_ops->fpu_deactivate(vcpu); } if (kvm_check_request(KVM_REQ_APF_HALT, vcpu)) { /* Page is swapped out. Do synthetic halt */ vcpu->arch.apf.halted = true; r = 1; goto out; } if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu)) record_steal_time(vcpu); if (kvm_check_request(KVM_REQ_NMI, vcpu)) process_nmi(vcpu); if (kvm_check_request(KVM_REQ_PMU, vcpu)) kvm_handle_pmu_event(vcpu); if (kvm_check_request(KVM_REQ_PMI, vcpu)) kvm_deliver_pmi(vcpu); if (kvm_check_request(KVM_REQ_SCAN_IOAPIC, vcpu)) vcpu_scan_ioapic(vcpu); } if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) { kvm_apic_accept_events(vcpu); if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) { r = 1; goto out; } if (inject_pending_event(vcpu, req_int_win) != 0) req_immediate_exit = true; /* enable NMI/IRQ window open exits if needed */ else if (vcpu->arch.nmi_pending) kvm_x86_ops->enable_nmi_window(vcpu); else if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win) kvm_x86_ops->enable_irq_window(vcpu); if (kvm_lapic_enabled(vcpu)) { /* * Update architecture specific hints for APIC * virtual interrupt delivery. */ if (kvm_x86_ops->hwapic_irr_update) kvm_x86_ops->hwapic_irr_update(vcpu, kvm_lapic_find_highest_irr(vcpu)); update_cr8_intercept(vcpu); kvm_lapic_sync_to_vapic(vcpu); } } r = kvm_mmu_reload(vcpu); if (unlikely(r)) { goto cancel_injection; } preempt_disable(); kvm_x86_ops->prepare_guest_switch(vcpu); if (vcpu->fpu_active) kvm_load_guest_fpu(vcpu); kvm_load_guest_xcr0(vcpu); vcpu->mode = IN_GUEST_MODE; srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); /* We should set ->mode before check ->requests, * see the comment in make_all_cpus_request. */ smp_mb__after_srcu_read_unlock(); local_irq_disable(); if (vcpu->mode == EXITING_GUEST_MODE || vcpu->requests || need_resched() || signal_pending(current)) { vcpu->mode = OUTSIDE_GUEST_MODE; smp_wmb(); local_irq_enable(); preempt_enable(); vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); r = 1; goto cancel_injection; } if (req_immediate_exit) smp_send_reschedule(vcpu->cpu); kvm_guest_enter(); if (unlikely(vcpu->arch.switch_db_regs)) { set_debugreg(0, 7); set_debugreg(vcpu->arch.eff_db[0], 0); set_debugreg(vcpu->arch.eff_db[1], 1); set_debugreg(vcpu->arch.eff_db[2], 2); set_debugreg(vcpu->arch.eff_db[3], 3); set_debugreg(vcpu->arch.dr6, 6); } trace_kvm_entry(vcpu->vcpu_id); kvm_x86_ops->run(vcpu); /* * Do this here before restoring debug registers on the host. And * since we do this before handling the vmexit, a DR access vmexit * can (a) read the correct value of the debug registers, (b) set * KVM_DEBUGREG_WONT_EXIT again. */ if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) { int i; WARN_ON(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP); kvm_x86_ops->sync_dirty_debug_regs(vcpu); for (i = 0; i < KVM_NR_DB_REGS; i++) vcpu->arch.eff_db[i] = vcpu->arch.db[i]; } /* * If the guest has used debug registers, at least dr7 * will be disabled while returning to the host. * If we don't have active breakpoints in the host, we don't * care about the messed up debug address registers. But if * we have some of them active, restore the old state. */ if (hw_breakpoint_active()) hw_breakpoint_restore(); vcpu->arch.last_guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu, native_read_tsc()); vcpu->mode = OUTSIDE_GUEST_MODE; smp_wmb(); /* Interrupt is enabled by handle_external_intr() */ kvm_x86_ops->handle_external_intr(vcpu); ++vcpu->stat.exits; /* * We must have an instruction between local_irq_enable() and * kvm_guest_exit(), so the timer interrupt isn't delayed by * the interrupt shadow. The stat.exits increment will do nicely. * But we need to prevent reordering, hence this barrier(): */ barrier(); kvm_guest_exit(); preempt_enable(); vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); /* * Profile KVM exit RIPs: */ if (unlikely(prof_on == KVM_PROFILING)) { unsigned long rip = kvm_rip_read(vcpu); profile_hit(KVM_PROFILING, (void *)rip); } if (unlikely(vcpu->arch.tsc_always_catchup)) kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); if (vcpu->arch.apic_attention) kvm_lapic_sync_from_vapic(vcpu); r = kvm_x86_ops->handle_exit(vcpu); return r; cancel_injection: kvm_x86_ops->cancel_injection(vcpu); if (unlikely(vcpu->arch.apic_attention)) kvm_lapic_sync_from_vapic(vcpu); out: return r; } static int __vcpu_run(struct kvm_vcpu *vcpu) { int r; struct kvm *kvm = vcpu->kvm; vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); r = 1; while (r > 0) { if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && !vcpu->arch.apf.halted) r = vcpu_enter_guest(vcpu); else { srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); kvm_vcpu_block(vcpu); vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) { kvm_apic_accept_events(vcpu); switch(vcpu->arch.mp_state) { case KVM_MP_STATE_HALTED: vcpu->arch.pv.pv_unhalted = false; vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; case KVM_MP_STATE_RUNNABLE: vcpu->arch.apf.halted = false; break; case KVM_MP_STATE_INIT_RECEIVED: break; default: r = -EINTR; break; } } } if (r <= 0) break; clear_bit(KVM_REQ_PENDING_TIMER, &vcpu->requests); if (kvm_cpu_has_pending_timer(vcpu)) kvm_inject_pending_timer_irqs(vcpu); if (dm_request_for_irq_injection(vcpu)) { r = -EINTR; vcpu->run->exit_reason = KVM_EXIT_INTR; ++vcpu->stat.request_irq_exits; } kvm_check_async_pf_completion(vcpu); if (signal_pending(current)) { r = -EINTR; vcpu->run->exit_reason = KVM_EXIT_INTR; ++vcpu->stat.signal_exits; } if (need_resched()) { srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); cond_resched(); vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); } } srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); return r; } static inline int complete_emulated_io(struct kvm_vcpu *vcpu) { int r; vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); r = emulate_instruction(vcpu, EMULTYPE_NO_DECODE); srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); if (r != EMULATE_DONE) return 0; return 1; } static int complete_emulated_pio(struct kvm_vcpu *vcpu) { BUG_ON(!vcpu->arch.pio.count); return complete_emulated_io(vcpu); } /* * Implements the following, as a state machine: * * read: * for each fragment * for each mmio piece in the fragment * write gpa, len * exit * copy data * execute insn * * write: * for each fragment * for each mmio piece in the fragment * write gpa, len * copy data * exit */ static int complete_emulated_mmio(struct kvm_vcpu *vcpu) { struct kvm_run *run = vcpu->run; struct kvm_mmio_fragment *frag; unsigned len; BUG_ON(!vcpu->mmio_needed); /* Complete previous fragment */ frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment]; len = min(8u, frag->len); if (!vcpu->mmio_is_write) memcpy(frag->data, run->mmio.data, len); if (frag->len <= 8) { /* Switch to the next fragment. */ frag++; vcpu->mmio_cur_fragment++; } else { /* Go forward to the next mmio piece. */ frag->data += len; frag->gpa += len; frag->len -= len; } if (vcpu->mmio_cur_fragment >= vcpu->mmio_nr_fragments) { vcpu->mmio_needed = 0; /* FIXME: return into emulator if single-stepping. */ if (vcpu->mmio_is_write) return 1; vcpu->mmio_read_completed = 1; return complete_emulated_io(vcpu); } run->exit_reason = KVM_EXIT_MMIO; run->mmio.phys_addr = frag->gpa; if (vcpu->mmio_is_write) memcpy(run->mmio.data, frag->data, min(8u, frag->len)); run->mmio.len = min(8u, frag->len); run->mmio.is_write = vcpu->mmio_is_write; vcpu->arch.complete_userspace_io = complete_emulated_mmio; return 0; } int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { int r; sigset_t sigsaved; if (!tsk_used_math(current) && init_fpu(current)) return -ENOMEM; if (vcpu->sigset_active) sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { kvm_vcpu_block(vcpu); kvm_apic_accept_events(vcpu); clear_bit(KVM_REQ_UNHALT, &vcpu->requests); r = -EAGAIN; goto out; } /* re-sync apic's tpr */ if (!irqchip_in_kernel(vcpu->kvm)) { if (kvm_set_cr8(vcpu, kvm_run->cr8) != 0) { r = -EINVAL; goto out; } } if (unlikely(vcpu->arch.complete_userspace_io)) { int (*cui)(struct kvm_vcpu *) = vcpu->arch.complete_userspace_io; vcpu->arch.complete_userspace_io = NULL; r = cui(vcpu); if (r <= 0) goto out; } else WARN_ON(vcpu->arch.pio.count || vcpu->mmio_needed); r = __vcpu_run(vcpu); out: post_kvm_run_save(vcpu); if (vcpu->sigset_active) sigprocmask(SIG_SETMASK, &sigsaved, NULL); return r; } int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) { if (vcpu->arch.emulate_regs_need_sync_to_vcpu) { /* * We are here if userspace calls get_regs() in the middle of * instruction emulation. Registers state needs to be copied * back from emulation context to vcpu. Userspace shouldn't do * that usually, but some bad designed PV devices (vmware * backdoor interface) need this to work */ emulator_writeback_register_cache(&vcpu->arch.emulate_ctxt); vcpu->arch.emulate_regs_need_sync_to_vcpu = false; } regs->rax = kvm_register_read(vcpu, VCPU_REGS_RAX); regs->rbx = kvm_register_read(vcpu, VCPU_REGS_RBX); regs->rcx = kvm_register_read(vcpu, VCPU_REGS_RCX); regs->rdx = kvm_register_read(vcpu, VCPU_REGS_RDX); regs->rsi = kvm_register_read(vcpu, VCPU_REGS_RSI); regs->rdi = kvm_register_read(vcpu, VCPU_REGS_RDI); regs->rsp = kvm_register_read(vcpu, VCPU_REGS_RSP); regs->rbp = kvm_register_read(vcpu, VCPU_REGS_RBP); #ifdef CONFIG_X86_64 regs->r8 = kvm_register_read(vcpu, VCPU_REGS_R8); regs->r9 = kvm_register_read(vcpu, VCPU_REGS_R9); regs->r10 = kvm_register_read(vcpu, VCPU_REGS_R10); regs->r11 = kvm_register_read(vcpu, VCPU_REGS_R11); regs->r12 = kvm_register_read(vcpu, VCPU_REGS_R12); regs->r13 = kvm_register_read(vcpu, VCPU_REGS_R13); regs->r14 = kvm_register_read(vcpu, VCPU_REGS_R14); regs->r15 = kvm_register_read(vcpu, VCPU_REGS_R15); #endif regs->rip = kvm_rip_read(vcpu); regs->rflags = kvm_get_rflags(vcpu); return 0; } int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) { vcpu->arch.emulate_regs_need_sync_from_vcpu = true; vcpu->arch.emulate_regs_need_sync_to_vcpu = false; kvm_register_write(vcpu, VCPU_REGS_RAX, regs->rax); kvm_register_write(vcpu, VCPU_REGS_RBX, regs->rbx); kvm_register_write(vcpu, VCPU_REGS_RCX, regs->rcx); kvm_register_write(vcpu, VCPU_REGS_RDX, regs->rdx); kvm_register_write(vcpu, VCPU_REGS_RSI, regs->rsi); kvm_register_write(vcpu, VCPU_REGS_RDI, regs->rdi); kvm_register_write(vcpu, VCPU_REGS_RSP, regs->rsp); kvm_register_write(vcpu, VCPU_REGS_RBP, regs->rbp); #ifdef CONFIG_X86_64 kvm_register_write(vcpu, VCPU_REGS_R8, regs->r8); kvm_register_write(vcpu, VCPU_REGS_R9, regs->r9); kvm_register_write(vcpu, VCPU_REGS_R10, regs->r10); kvm_register_write(vcpu, VCPU_REGS_R11, regs->r11); kvm_register_write(vcpu, VCPU_REGS_R12, regs->r12); kvm_register_write(vcpu, VCPU_REGS_R13, regs->r13); kvm_register_write(vcpu, VCPU_REGS_R14, regs->r14); kvm_register_write(vcpu, VCPU_REGS_R15, regs->r15); #endif kvm_rip_write(vcpu, regs->rip); kvm_set_rflags(vcpu, regs->rflags); vcpu->arch.exception.pending = false; kvm_make_request(KVM_REQ_EVENT, vcpu); return 0; } void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l) { struct kvm_segment cs; kvm_get_segment(vcpu, &cs, VCPU_SREG_CS); *db = cs.db; *l = cs.l; } EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits); int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) { struct desc_ptr dt; kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS); kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS); kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES); kvm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS); kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS); kvm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS); kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR); kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR); kvm_x86_ops->get_idt(vcpu, &dt); sregs->idt.limit = dt.size; sregs->idt.base = dt.address; kvm_x86_ops->get_gdt(vcpu, &dt); sregs->gdt.limit = dt.size; sregs->gdt.base = dt.address; sregs->cr0 = kvm_read_cr0(vcpu); sregs->cr2 = vcpu->arch.cr2; sregs->cr3 = kvm_read_cr3(vcpu); sregs->cr4 = kvm_read_cr4(vcpu); sregs->cr8 = kvm_get_cr8(vcpu); sregs->efer = vcpu->arch.efer; sregs->apic_base = kvm_get_apic_base(vcpu); memset(sregs->interrupt_bitmap, 0, sizeof sregs->interrupt_bitmap); if (vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft) set_bit(vcpu->arch.interrupt.nr, (unsigned long *)sregs->interrupt_bitmap); return 0; } int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, struct kvm_mp_state *mp_state) { kvm_apic_accept_events(vcpu); if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED && vcpu->arch.pv.pv_unhalted) mp_state->mp_state = KVM_MP_STATE_RUNNABLE; else mp_state->mp_state = vcpu->arch.mp_state; return 0; } int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, struct kvm_mp_state *mp_state) { if (!kvm_vcpu_has_lapic(vcpu) && mp_state->mp_state != KVM_MP_STATE_RUNNABLE) return -EINVAL; if (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED) { vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED; set_bit(KVM_APIC_SIPI, &vcpu->arch.apic->pending_events); } else vcpu->arch.mp_state = mp_state->mp_state; kvm_make_request(KVM_REQ_EVENT, vcpu); return 0; } int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index, int reason, bool has_error_code, u32 error_code) { struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; int ret; init_emulate_ctxt(vcpu); ret = emulator_task_switch(ctxt, tss_selector, idt_index, reason, has_error_code, error_code); if (ret) return EMULATE_FAIL; kvm_rip_write(vcpu, ctxt->eip); kvm_set_rflags(vcpu, ctxt->eflags); kvm_make_request(KVM_REQ_EVENT, vcpu); return EMULATE_DONE; } EXPORT_SYMBOL_GPL(kvm_task_switch); int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) { struct msr_data apic_base_msr; int mmu_reset_needed = 0; int pending_vec, max_bits, idx; struct desc_ptr dt; if (!guest_cpuid_has_xsave(vcpu) && (sregs->cr4 & X86_CR4_OSXSAVE)) return -EINVAL; dt.size = sregs->idt.limit; dt.address = sregs->idt.base; kvm_x86_ops->set_idt(vcpu, &dt); dt.size = sregs->gdt.limit; dt.address = sregs->gdt.base; kvm_x86_ops->set_gdt(vcpu, &dt); vcpu->arch.cr2 = sregs->cr2; mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3; vcpu->arch.cr3 = sregs->cr3; __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail); kvm_set_cr8(vcpu, sregs->cr8); mmu_reset_needed |= vcpu->arch.efer != sregs->efer; kvm_x86_ops->set_efer(vcpu, sregs->efer); apic_base_msr.data = sregs->apic_base; apic_base_msr.host_initiated = true; kvm_set_apic_base(vcpu, &apic_base_msr); mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0; kvm_x86_ops->set_cr0(vcpu, sregs->cr0); vcpu->arch.cr0 = sregs->cr0; mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4; kvm_x86_ops->set_cr4(vcpu, sregs->cr4); if (sregs->cr4 & X86_CR4_OSXSAVE) kvm_update_cpuid(vcpu); idx = srcu_read_lock(&vcpu->kvm->srcu); if (!is_long_mode(vcpu) && is_pae(vcpu)) { load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu)); mmu_reset_needed = 1; } srcu_read_unlock(&vcpu->kvm->srcu, idx); if (mmu_reset_needed) kvm_mmu_reset_context(vcpu); max_bits = KVM_NR_INTERRUPTS; pending_vec = find_first_bit( (const unsigned long *)sregs->interrupt_bitmap, max_bits); if (pending_vec < max_bits) { kvm_queue_interrupt(vcpu, pending_vec, false); pr_debug("Set back pending irq %d\n", pending_vec); } kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS); kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS); kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES); kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS); kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS); kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS); kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR); kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR); update_cr8_intercept(vcpu); /* Older userspace won't unhalt the vcpu on reset. */ if (kvm_vcpu_is_bsp(vcpu) && kvm_rip_read(vcpu) == 0xfff0 && sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 && !is_protmode(vcpu)) vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; kvm_make_request(KVM_REQ_EVENT, vcpu); return 0; } int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg) { unsigned long rflags; int i, r; if (dbg->control & (KVM_GUESTDBG_INJECT_DB | KVM_GUESTDBG_INJECT_BP)) { r = -EBUSY; if (vcpu->arch.exception.pending) goto out; if (dbg->control & KVM_GUESTDBG_INJECT_DB) kvm_queue_exception(vcpu, DB_VECTOR); else kvm_queue_exception(vcpu, BP_VECTOR); } /* * Read rflags as long as potentially injected trace flags are still * filtered out. */ rflags = kvm_get_rflags(vcpu); vcpu->guest_debug = dbg->control; if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE)) vcpu->guest_debug = 0; if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) { for (i = 0; i < KVM_NR_DB_REGS; ++i) vcpu->arch.eff_db[i] = dbg->arch.debugreg[i]; vcpu->arch.guest_debug_dr7 = dbg->arch.debugreg[7]; } else { for (i = 0; i < KVM_NR_DB_REGS; i++) vcpu->arch.eff_db[i] = vcpu->arch.db[i]; } kvm_update_dr7(vcpu); if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) vcpu->arch.singlestep_rip = kvm_rip_read(vcpu) + get_segment_base(vcpu, VCPU_SREG_CS); /* * Trigger an rflags update that will inject or remove the trace * flags. */ kvm_set_rflags(vcpu, rflags); kvm_x86_ops->update_db_bp_intercept(vcpu); r = 0; out: return r; } /* * Translate a guest virtual address to a guest physical address. */ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, struct kvm_translation *tr) { unsigned long vaddr = tr->linear_address; gpa_t gpa; int idx; idx = srcu_read_lock(&vcpu->kvm->srcu); gpa = kvm_mmu_gva_to_gpa_system(vcpu, vaddr, NULL); srcu_read_unlock(&vcpu->kvm->srcu, idx); tr->physical_address = gpa; tr->valid = gpa != UNMAPPED_GVA; tr->writeable = 1; tr->usermode = 0; return 0; } int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) { struct i387_fxsave_struct *fxsave = &vcpu->arch.guest_fpu.state->fxsave; memcpy(fpu->fpr, fxsave->st_space, 128); fpu->fcw = fxsave->cwd; fpu->fsw = fxsave->swd; fpu->ftwx = fxsave->twd; fpu->last_opcode = fxsave->fop; fpu->last_ip = fxsave->rip; fpu->last_dp = fxsave->rdp; memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space); return 0; } int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) { struct i387_fxsave_struct *fxsave = &vcpu->arch.guest_fpu.state->fxsave; memcpy(fxsave->st_space, fpu->fpr, 128); fxsave->cwd = fpu->fcw; fxsave->swd = fpu->fsw; fxsave->twd = fpu->ftwx; fxsave->fop = fpu->last_opcode; fxsave->rip = fpu->last_ip; fxsave->rdp = fpu->last_dp; memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space); return 0; } int fx_init(struct kvm_vcpu *vcpu) { int err; err = fpu_alloc(&vcpu->arch.guest_fpu); if (err) return err; fpu_finit(&vcpu->arch.guest_fpu); /* * Ensure guest xcr0 is valid for loading */ vcpu->arch.xcr0 = XSTATE_FP; vcpu->arch.cr0 |= X86_CR0_ET; return 0; } EXPORT_SYMBOL_GPL(fx_init); static void fx_free(struct kvm_vcpu *vcpu) { fpu_free(&vcpu->arch.guest_fpu); } void kvm_load_guest_fpu(struct kvm_vcpu *vcpu) { if (vcpu->guest_fpu_loaded) return; /* * Restore all possible states in the guest, * and assume host would use all available bits. * Guest xcr0 would be loaded later. */ kvm_put_guest_xcr0(vcpu); vcpu->guest_fpu_loaded = 1; __kernel_fpu_begin(); fpu_restore_checking(&vcpu->arch.guest_fpu); trace_kvm_fpu(1); } void kvm_put_guest_fpu(struct kvm_vcpu *vcpu) { kvm_put_guest_xcr0(vcpu); if (!vcpu->guest_fpu_loaded) return; vcpu->guest_fpu_loaded = 0; fpu_save_init(&vcpu->arch.guest_fpu); __kernel_fpu_end(); ++vcpu->stat.fpu_reload; kvm_make_request(KVM_REQ_DEACTIVATE_FPU, vcpu); trace_kvm_fpu(0); } void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) { kvmclock_reset(vcpu); free_cpumask_var(vcpu->arch.wbinvd_dirty_mask); fx_free(vcpu); kvm_x86_ops->vcpu_free(vcpu); } struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) { if (check_tsc_unstable() && atomic_read(&kvm->online_vcpus) != 0) printk_once(KERN_WARNING "kvm: SMP vm created on host with unstable TSC; " "guest TSC will not be reliable\n"); return kvm_x86_ops->vcpu_create(kvm, id); } int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) { int r; vcpu->arch.mtrr_state.have_fixed = 1; r = vcpu_load(vcpu); if (r) return r; kvm_vcpu_reset(vcpu); kvm_mmu_setup(vcpu); vcpu_put(vcpu); return r; } int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) { int r; struct msr_data msr; struct kvm *kvm = vcpu->kvm; r = vcpu_load(vcpu); if (r) return r; msr.data = 0x0; msr.index = MSR_IA32_TSC; msr.host_initiated = true; kvm_write_tsc(vcpu, &msr); vcpu_put(vcpu); schedule_delayed_work(&kvm->arch.kvmclock_sync_work, KVMCLOCK_SYNC_PERIOD); return r; } void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) { int r; vcpu->arch.apf.msr_val = 0; r = vcpu_load(vcpu); BUG_ON(r); kvm_mmu_unload(vcpu); vcpu_put(vcpu); fx_free(vcpu); kvm_x86_ops->vcpu_free(vcpu); } void kvm_vcpu_reset(struct kvm_vcpu *vcpu) { atomic_set(&vcpu->arch.nmi_queued, 0); vcpu->arch.nmi_pending = 0; vcpu->arch.nmi_injected = false; kvm_clear_interrupt_queue(vcpu); kvm_clear_exception_queue(vcpu); memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db)); vcpu->arch.dr6 = DR6_INIT; kvm_update_dr6(vcpu); vcpu->arch.dr7 = DR7_FIXED_1; kvm_update_dr7(vcpu); kvm_make_request(KVM_REQ_EVENT, vcpu); vcpu->arch.apf.msr_val = 0; vcpu->arch.st.msr_val = 0; kvmclock_reset(vcpu); kvm_clear_async_pf_completion_queue(vcpu); kvm_async_pf_hash_reset(vcpu); vcpu->arch.apf.halted = false; kvm_pmu_reset(vcpu); memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs)); vcpu->arch.regs_avail = ~0; vcpu->arch.regs_dirty = ~0; kvm_x86_ops->vcpu_reset(vcpu); } void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, unsigned int vector) { struct kvm_segment cs; kvm_get_segment(vcpu, &cs, VCPU_SREG_CS); cs.selector = vector << 8; cs.base = vector << 12; kvm_set_segment(vcpu, &cs, VCPU_SREG_CS); kvm_rip_write(vcpu, 0); } int kvm_arch_hardware_enable(void) { struct kvm *kvm; struct kvm_vcpu *vcpu; int i; int ret; u64 local_tsc; u64 max_tsc = 0; bool stable, backwards_tsc = false; kvm_shared_msr_cpu_online(); ret = kvm_x86_ops->hardware_enable(); if (ret != 0) return ret; local_tsc = native_read_tsc(); stable = !check_tsc_unstable(); list_for_each_entry(kvm, &vm_list, vm_list) { kvm_for_each_vcpu(i, vcpu, kvm) { if (!stable && vcpu->cpu == smp_processor_id()) kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); if (stable && vcpu->arch.last_host_tsc > local_tsc) { backwards_tsc = true; if (vcpu->arch.last_host_tsc > max_tsc) max_tsc = vcpu->arch.last_host_tsc; } } } /* * Sometimes, even reliable TSCs go backwards. This happens on * platforms that reset TSC during suspend or hibernate actions, but * maintain synchronization. We must compensate. Fortunately, we can * detect that condition here, which happens early in CPU bringup, * before any KVM threads can be running. Unfortunately, we can't * bring the TSCs fully up to date with real time, as we aren't yet far * enough into CPU bringup that we know how much real time has actually * elapsed; our helper function, get_kernel_ns() will be using boot * variables that haven't been updated yet. * * So we simply find the maximum observed TSC above, then record the * adjustment to TSC in each VCPU. When the VCPU later gets loaded, * the adjustment will be applied. Note that we accumulate * adjustments, in case multiple suspend cycles happen before some VCPU * gets a chance to run again. In the event that no KVM threads get a * chance to run, we will miss the entire elapsed period, as we'll have * reset last_host_tsc, so VCPUs will not have the TSC adjusted and may * loose cycle time. This isn't too big a deal, since the loss will be * uniform across all VCPUs (not to mention the scenario is extremely * unlikely). It is possible that a second hibernate recovery happens * much faster than a first, causing the observed TSC here to be * smaller; this would require additional padding adjustment, which is * why we set last_host_tsc to the local tsc observed here. * * N.B. - this code below runs only on platforms with reliable TSC, * as that is the only way backwards_tsc is set above. Also note * that this runs for ALL vcpus, which is not a bug; all VCPUs should * have the same delta_cyc adjustment applied if backwards_tsc * is detected. Note further, this adjustment is only done once, * as we reset last_host_tsc on all VCPUs to stop this from being * called multiple times (one for each physical CPU bringup). * * Platforms with unreliable TSCs don't have to deal with this, they * will be compensated by the logic in vcpu_load, which sets the TSC to * catchup mode. This will catchup all VCPUs to real time, but cannot * guarantee that they stay in perfect synchronization. */ if (backwards_tsc) { u64 delta_cyc = max_tsc - local_tsc; backwards_tsc_observed = true; list_for_each_entry(kvm, &vm_list, vm_list) { kvm_for_each_vcpu(i, vcpu, kvm) { vcpu->arch.tsc_offset_adjustment += delta_cyc; vcpu->arch.last_host_tsc = local_tsc; kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); } /* * We have to disable TSC offset matching.. if you were * booting a VM while issuing an S4 host suspend.... * you may have some problem. Solving this issue is * left as an exercise to the reader. */ kvm->arch.last_tsc_nsec = 0; kvm->arch.last_tsc_write = 0; } } return 0; } void kvm_arch_hardware_disable(void) { kvm_x86_ops->hardware_disable(); drop_user_return_notifiers(); } int kvm_arch_hardware_setup(void) { return kvm_x86_ops->hardware_setup(); } void kvm_arch_hardware_unsetup(void) { kvm_x86_ops->hardware_unsetup(); } void kvm_arch_check_processor_compat(void *rtn) { kvm_x86_ops->check_processor_compatibility(rtn); } bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) { return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL); } struct static_key kvm_no_apic_vcpu __read_mostly; int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) { struct page *page; struct kvm *kvm; int r; BUG_ON(vcpu->kvm == NULL); kvm = vcpu->kvm; vcpu->arch.pv.pv_unhalted = false; vcpu->arch.emulate_ctxt.ops = &emulate_ops; if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_bsp(vcpu)) vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; else vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED; page = alloc_page(GFP_KERNEL | __GFP_ZERO); if (!page) { r = -ENOMEM; goto fail; } vcpu->arch.pio_data = page_address(page); kvm_set_tsc_khz(vcpu, max_tsc_khz); r = kvm_mmu_create(vcpu); if (r < 0) goto fail_free_pio_data; if (irqchip_in_kernel(kvm)) { r = kvm_create_lapic(vcpu); if (r < 0) goto fail_mmu_destroy; } else static_key_slow_inc(&kvm_no_apic_vcpu); vcpu->arch.mce_banks = kzalloc(KVM_MAX_MCE_BANKS * sizeof(u64) * 4, GFP_KERNEL); if (!vcpu->arch.mce_banks) { r = -ENOMEM; goto fail_free_lapic; } vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS; if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, GFP_KERNEL)) { r = -ENOMEM; goto fail_free_mce_banks; } r = fx_init(vcpu); if (r) goto fail_free_wbinvd_dirty_mask; vcpu->arch.ia32_tsc_adjust_msr = 0x0; vcpu->arch.pv_time_enabled = false; vcpu->arch.guest_supported_xcr0 = 0; vcpu->arch.guest_xstate_size = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET; kvm_async_pf_hash_reset(vcpu); kvm_pmu_init(vcpu); return 0; fail_free_wbinvd_dirty_mask: free_cpumask_var(vcpu->arch.wbinvd_dirty_mask); fail_free_mce_banks: kfree(vcpu->arch.mce_banks); fail_free_lapic: kvm_free_lapic(vcpu); fail_mmu_destroy: kvm_mmu_destroy(vcpu); fail_free_pio_data: free_page((unsigned long)vcpu->arch.pio_data); fail: return r; } void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) { int idx; kvm_pmu_destroy(vcpu); kfree(vcpu->arch.mce_banks); kvm_free_lapic(vcpu); idx = srcu_read_lock(&vcpu->kvm->srcu); kvm_mmu_destroy(vcpu); srcu_read_unlock(&vcpu->kvm->srcu, idx); free_page((unsigned long)vcpu->arch.pio_data); if (!irqchip_in_kernel(vcpu->kvm)) static_key_slow_dec(&kvm_no_apic_vcpu); } void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) { kvm_x86_ops->sched_in(vcpu, cpu); } int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) { if (type) return -EINVAL; INIT_LIST_HEAD(&kvm->arch.active_mmu_pages); INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages); INIT_LIST_HEAD(&kvm->arch.assigned_dev_head); atomic_set(&kvm->arch.noncoherent_dma_count, 0); /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */ set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap); /* Reserve bit 1 of irq_sources_bitmap for irqfd-resampler */ set_bit(KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap); raw_spin_lock_init(&kvm->arch.tsc_write_lock); mutex_init(&kvm->arch.apic_map_lock); spin_lock_init(&kvm->arch.pvclock_gtod_sync_lock); pvclock_update_vm_gtod_copy(kvm); INIT_DELAYED_WORK(&kvm->arch.kvmclock_update_work, kvmclock_update_fn); INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn); return 0; } static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu) { int r; r = vcpu_load(vcpu); BUG_ON(r); kvm_mmu_unload(vcpu); vcpu_put(vcpu); } static void kvm_free_vcpus(struct kvm *kvm) { unsigned int i; struct kvm_vcpu *vcpu; /* * Unpin any mmu pages first. */ kvm_for_each_vcpu(i, vcpu, kvm) { kvm_clear_async_pf_completion_queue(vcpu); kvm_unload_vcpu_mmu(vcpu); } kvm_for_each_vcpu(i, vcpu, kvm) kvm_arch_vcpu_free(vcpu); mutex_lock(&kvm->lock); for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) kvm->vcpus[i] = NULL; atomic_set(&kvm->online_vcpus, 0); mutex_unlock(&kvm->lock); } void kvm_arch_sync_events(struct kvm *kvm) { cancel_delayed_work_sync(&kvm->arch.kvmclock_sync_work); cancel_delayed_work_sync(&kvm->arch.kvmclock_update_work); kvm_free_all_assigned_devices(kvm); kvm_free_pit(kvm); } void kvm_arch_destroy_vm(struct kvm *kvm) { if (current->mm == kvm->mm) { /* * Free memory regions allocated on behalf of userspace, * unless the the memory map has changed due to process exit * or fd copying. */ struct kvm_userspace_memory_region mem; memset(&mem, 0, sizeof(mem)); mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT; kvm_set_memory_region(kvm, &mem); mem.slot = IDENTITY_PAGETABLE_PRIVATE_MEMSLOT; kvm_set_memory_region(kvm, &mem); mem.slot = TSS_PRIVATE_MEMSLOT; kvm_set_memory_region(kvm, &mem); } kvm_iommu_unmap_guest(kvm); kfree(kvm->arch.vpic); kfree(kvm->arch.vioapic); kvm_free_vcpus(kvm); if (kvm->arch.apic_access_page) put_page(kvm->arch.apic_access_page); kfree(rcu_dereference_check(kvm->arch.apic_map, 1)); } void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, struct kvm_memory_slot *dont) { int i; for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) { if (!dont || free->arch.rmap[i] != dont->arch.rmap[i]) { kvm_kvfree(free->arch.rmap[i]); free->arch.rmap[i] = NULL; } if (i == 0) continue; if (!dont || free->arch.lpage_info[i - 1] != dont->arch.lpage_info[i - 1]) { kvm_kvfree(free->arch.lpage_info[i - 1]); free->arch.lpage_info[i - 1] = NULL; } } } int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, unsigned long npages) { int i; for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) { unsigned long ugfn; int lpages; int level = i + 1; lpages = gfn_to_index(slot->base_gfn + npages - 1, slot->base_gfn, level) + 1; slot->arch.rmap[i] = kvm_kvzalloc(lpages * sizeof(*slot->arch.rmap[i])); if (!slot->arch.rmap[i]) goto out_free; if (i == 0) continue; slot->arch.lpage_info[i - 1] = kvm_kvzalloc(lpages * sizeof(*slot->arch.lpage_info[i - 1])); if (!slot->arch.lpage_info[i - 1]) goto out_free; if (slot->base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1)) slot->arch.lpage_info[i - 1][0].write_count = 1; if ((slot->base_gfn + npages) & (KVM_PAGES_PER_HPAGE(level) - 1)) slot->arch.lpage_info[i - 1][lpages - 1].write_count = 1; ugfn = slot->userspace_addr >> PAGE_SHIFT; /* * If the gfn and userspace address are not aligned wrt each * other, or if explicitly asked to, disable large page * support for this slot */ if ((slot->base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1) || !kvm_largepages_enabled()) { unsigned long j; for (j = 0; j < lpages; ++j) slot->arch.lpage_info[i - 1][j].write_count = 1; } } return 0; out_free: for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) { kvm_kvfree(slot->arch.rmap[i]); slot->arch.rmap[i] = NULL; if (i == 0) continue; kvm_kvfree(slot->arch.lpage_info[i - 1]); slot->arch.lpage_info[i - 1] = NULL; } return -ENOMEM; } void kvm_arch_memslots_updated(struct kvm *kvm) { /* * memslots->generation has been incremented. * mmio generation may have reached its maximum value. */ kvm_mmu_invalidate_mmio_sptes(kvm); } int kvm_arch_prepare_memory_region(struct kvm *kvm, struct kvm_memory_slot *memslot, struct kvm_userspace_memory_region *mem, enum kvm_mr_change change) { /* * Only private memory slots need to be mapped here since * KVM_SET_MEMORY_REGION ioctl is no longer supported. */ if ((memslot->id >= KVM_USER_MEM_SLOTS) && (change == KVM_MR_CREATE)) { unsigned long userspace_addr; /* * MAP_SHARED to prevent internal slot pages from being moved * by fork()/COW. */ userspace_addr = vm_mmap(NULL, 0, memslot->npages * PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, 0); if (IS_ERR((void *)userspace_addr)) return PTR_ERR((void *)userspace_addr); memslot->userspace_addr = userspace_addr; } return 0; } void kvm_arch_commit_memory_region(struct kvm *kvm, struct kvm_userspace_memory_region *mem, const struct kvm_memory_slot *old, enum kvm_mr_change change) { int nr_mmu_pages = 0; if ((mem->slot >= KVM_USER_MEM_SLOTS) && (change == KVM_MR_DELETE)) { int ret; ret = vm_munmap(old->userspace_addr, old->npages * PAGE_SIZE); if (ret < 0) printk(KERN_WARNING "kvm_vm_ioctl_set_memory_region: " "failed to munmap memory\n"); } if (!kvm->arch.n_requested_mmu_pages) nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm); if (nr_mmu_pages) kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages); /* * Write protect all pages for dirty logging. * * All the sptes including the large sptes which point to this * slot are set to readonly. We can not create any new large * spte on this slot until the end of the logging. * * See the comments in fast_page_fault(). */ if ((change != KVM_MR_DELETE) && (mem->flags & KVM_MEM_LOG_DIRTY_PAGES)) kvm_mmu_slot_remove_write_access(kvm, mem->slot); } void kvm_arch_flush_shadow_all(struct kvm *kvm) { kvm_mmu_invalidate_zap_all_pages(kvm); } void kvm_arch_flush_shadow_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) { kvm_mmu_invalidate_zap_all_pages(kvm); } int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) { if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) kvm_x86_ops->check_nested_events(vcpu, false); return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && !vcpu->arch.apf.halted) || !list_empty_careful(&vcpu->async_pf.done) || kvm_apic_has_events(vcpu) || vcpu->arch.pv.pv_unhalted || atomic_read(&vcpu->arch.nmi_queued) || (kvm_arch_interrupt_allowed(vcpu) && kvm_cpu_has_interrupt(vcpu)); } int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) { return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE; } int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu) { return kvm_x86_ops->interrupt_allowed(vcpu); } bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip) { unsigned long current_rip = kvm_rip_read(vcpu) + get_segment_base(vcpu, VCPU_SREG_CS); return current_rip == linear_rip; } EXPORT_SYMBOL_GPL(kvm_is_linear_rip); unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu) { unsigned long rflags; rflags = kvm_x86_ops->get_rflags(vcpu); if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) rflags &= ~X86_EFLAGS_TF; return rflags; } EXPORT_SYMBOL_GPL(kvm_get_rflags); static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) { if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP && kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip)) rflags |= X86_EFLAGS_TF; kvm_x86_ops->set_rflags(vcpu, rflags); } void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) { __kvm_set_rflags(vcpu, rflags); kvm_make_request(KVM_REQ_EVENT, vcpu); } EXPORT_SYMBOL_GPL(kvm_set_rflags); void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work) { int r; if ((vcpu->arch.mmu.direct_map != work->arch.direct_map) || work->wakeup_all) return; r = kvm_mmu_reload(vcpu); if (unlikely(r)) return; if (!vcpu->arch.mmu.direct_map && work->arch.cr3 != vcpu->arch.mmu.get_cr3(vcpu)) return; vcpu->arch.mmu.page_fault(vcpu, work->gva, 0, true); } static inline u32 kvm_async_pf_hash_fn(gfn_t gfn) { return hash_32(gfn & 0xffffffff, order_base_2(ASYNC_PF_PER_VCPU)); } static inline u32 kvm_async_pf_next_probe(u32 key) { return (key + 1) & (roundup_pow_of_two(ASYNC_PF_PER_VCPU) - 1); } static void kvm_add_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) { u32 key = kvm_async_pf_hash_fn(gfn); while (vcpu->arch.apf.gfns[key] != ~0) key = kvm_async_pf_next_probe(key); vcpu->arch.apf.gfns[key] = gfn; } static u32 kvm_async_pf_gfn_slot(struct kvm_vcpu *vcpu, gfn_t gfn) { int i; u32 key = kvm_async_pf_hash_fn(gfn); for (i = 0; i < roundup_pow_of_two(ASYNC_PF_PER_VCPU) && (vcpu->arch.apf.gfns[key] != gfn && vcpu->arch.apf.gfns[key] != ~0); i++) key = kvm_async_pf_next_probe(key); return key; } bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) { return vcpu->arch.apf.gfns[kvm_async_pf_gfn_slot(vcpu, gfn)] == gfn; } static void kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) { u32 i, j, k; i = j = kvm_async_pf_gfn_slot(vcpu, gfn); while (true) { vcpu->arch.apf.gfns[i] = ~0; do { j = kvm_async_pf_next_probe(j); if (vcpu->arch.apf.gfns[j] == ~0) return; k = kvm_async_pf_hash_fn(vcpu->arch.apf.gfns[j]); /* * k lies cyclically in ]i,j] * | i.k.j | * |....j i.k.| or |.k..j i...| */ } while ((i <= j) ? (i < k && k <= j) : (i < k || k <= j)); vcpu->arch.apf.gfns[i] = vcpu->arch.apf.gfns[j]; i = j; } } static int apf_put_user(struct kvm_vcpu *vcpu, u32 val) { return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &val, sizeof(val)); } void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, struct kvm_async_pf *work) { struct x86_exception fault; trace_kvm_async_pf_not_present(work->arch.token, work->gva); kvm_add_async_pf_gfn(vcpu, work->arch.gfn); if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) || (vcpu->arch.apf.send_user_only && kvm_x86_ops->get_cpl(vcpu) == 0)) kvm_make_request(KVM_REQ_APF_HALT, vcpu); else if (!apf_put_user(vcpu, KVM_PV_REASON_PAGE_NOT_PRESENT)) { fault.vector = PF_VECTOR; fault.error_code_valid = true; fault.error_code = 0; fault.nested_page_fault = false; fault.address = work->arch.token; kvm_inject_page_fault(vcpu, &fault); } } void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, struct kvm_async_pf *work) { struct x86_exception fault; trace_kvm_async_pf_ready(work->arch.token, work->gva); if (work->wakeup_all) work->arch.token = ~0; /* broadcast wakeup */ else kvm_del_async_pf_gfn(vcpu, work->arch.gfn); if ((vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) && !apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) { fault.vector = PF_VECTOR; fault.error_code_valid = true; fault.error_code = 0; fault.nested_page_fault = false; fault.address = work->arch.token; kvm_inject_page_fault(vcpu, &fault); } vcpu->arch.apf.halted = false; vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; } bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu) { if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED)) return true; else return !kvm_event_needs_reinjection(vcpu) && kvm_x86_ops->interrupt_allowed(vcpu); } void kvm_arch_register_noncoherent_dma(struct kvm *kvm) { atomic_inc(&kvm->arch.noncoherent_dma_count); } EXPORT_SYMBOL_GPL(kvm_arch_register_noncoherent_dma); void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm) { atomic_dec(&kvm->arch.noncoherent_dma_count); } EXPORT_SYMBOL_GPL(kvm_arch_unregister_noncoherent_dma); bool kvm_arch_has_noncoherent_dma(struct kvm *kvm) { return atomic_read(&kvm->arch.noncoherent_dma_count); } EXPORT_SYMBOL_GPL(kvm_arch_has_noncoherent_dma); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_page_fault); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_msr); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_cr); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmrun); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit_inject); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intr_vmexit); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_invlpga); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_skinit); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intercepts); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_write_tsc_offset); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ple_window);
/* * Kernel-based Virtual Machine driver for Linux * * derived from drivers/kvm/kvm_main.c * * Copyright (C) 2006 Qumranet, Inc. * Copyright (C) 2008 Qumranet, Inc. * Copyright IBM Corporation, 2008 * Copyright 2010 Red Hat, Inc. and/or its affiliates. * * Authors: * Avi Kivity <avi@qumranet.com> * Yaniv Kamay <yaniv@qumranet.com> * Amit Shah <amit.shah@qumranet.com> * Ben-Ami Yassour <benami@il.ibm.com> * * This work is licensed under the terms of the GNU GPL, version 2. See * the COPYING file in the top-level directory. * */ #include <linux/kvm_host.h> #include "irq.h" #include "mmu.h" #include "i8254.h" #include "tss.h" #include "kvm_cache_regs.h" #include "x86.h" #include "cpuid.h" #include <linux/clocksource.h> #include <linux/interrupt.h> #include <linux/kvm.h> #include <linux/fs.h> #include <linux/vmalloc.h> #include <linux/module.h> #include <linux/mman.h> #include <linux/highmem.h> #include <linux/iommu.h> #include <linux/intel-iommu.h> #include <linux/cpufreq.h> #include <linux/user-return-notifier.h> #include <linux/srcu.h> #include <linux/slab.h> #include <linux/perf_event.h> #include <linux/uaccess.h> #include <linux/hash.h> #include <linux/pci.h> #include <linux/timekeeper_internal.h> #include <linux/pvclock_gtod.h> #include <trace/events/kvm.h> #define CREATE_TRACE_POINTS #include "trace.h" #include <asm/debugreg.h> #include <asm/msr.h> #include <asm/desc.h> #include <asm/mtrr.h> #include <asm/mce.h> #include <asm/i387.h> #include <asm/fpu-internal.h> /* Ugh! */ #include <asm/xcr.h> #include <asm/pvclock.h> #include <asm/div64.h> #define MAX_IO_MSRS 256 #define KVM_MAX_MCE_BANKS 32 #define KVM_MCE_CAP_SUPPORTED (MCG_CTL_P | MCG_SER_P) #define emul_to_vcpu(ctxt) \ container_of(ctxt, struct kvm_vcpu, arch.emulate_ctxt) /* EFER defaults: * - enable syscall per default because its emulated by KVM * - enable LME and LMA per default on 64 bit KVM */ #ifdef CONFIG_X86_64 static u64 __read_mostly efer_reserved_bits = ~((u64)(EFER_SCE | EFER_LME | EFER_LMA)); #else static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE); #endif #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU static void update_cr8_intercept(struct kvm_vcpu *vcpu); static void process_nmi(struct kvm_vcpu *vcpu); static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags); struct kvm_x86_ops *kvm_x86_ops; EXPORT_SYMBOL_GPL(kvm_x86_ops); static bool ignore_msrs = 0; module_param(ignore_msrs, bool, S_IRUGO | S_IWUSR); unsigned int min_timer_period_us = 500; module_param(min_timer_period_us, uint, S_IRUGO | S_IWUSR); bool kvm_has_tsc_control; EXPORT_SYMBOL_GPL(kvm_has_tsc_control); u32 kvm_max_guest_tsc_khz; EXPORT_SYMBOL_GPL(kvm_max_guest_tsc_khz); /* tsc tolerance in parts per million - default to 1/2 of the NTP threshold */ static u32 tsc_tolerance_ppm = 250; module_param(tsc_tolerance_ppm, uint, S_IRUGO | S_IWUSR); static bool backwards_tsc_observed = false; #define KVM_NR_SHARED_MSRS 16 struct kvm_shared_msrs_global { int nr; u32 msrs[KVM_NR_SHARED_MSRS]; }; struct kvm_shared_msrs { struct user_return_notifier urn; bool registered; struct kvm_shared_msr_values { u64 host; u64 curr; } values[KVM_NR_SHARED_MSRS]; }; static struct kvm_shared_msrs_global __read_mostly shared_msrs_global; static struct kvm_shared_msrs __percpu *shared_msrs; struct kvm_stats_debugfs_item debugfs_entries[] = { { "pf_fixed", VCPU_STAT(pf_fixed) }, { "pf_guest", VCPU_STAT(pf_guest) }, { "tlb_flush", VCPU_STAT(tlb_flush) }, { "invlpg", VCPU_STAT(invlpg) }, { "exits", VCPU_STAT(exits) }, { "io_exits", VCPU_STAT(io_exits) }, { "mmio_exits", VCPU_STAT(mmio_exits) }, { "signal_exits", VCPU_STAT(signal_exits) }, { "irq_window", VCPU_STAT(irq_window_exits) }, { "nmi_window", VCPU_STAT(nmi_window_exits) }, { "halt_exits", VCPU_STAT(halt_exits) }, { "halt_wakeup", VCPU_STAT(halt_wakeup) }, { "hypercalls", VCPU_STAT(hypercalls) }, { "request_irq", VCPU_STAT(request_irq_exits) }, { "irq_exits", VCPU_STAT(irq_exits) }, { "host_state_reload", VCPU_STAT(host_state_reload) }, { "efer_reload", VCPU_STAT(efer_reload) }, { "fpu_reload", VCPU_STAT(fpu_reload) }, { "insn_emulation", VCPU_STAT(insn_emulation) }, { "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) }, { "irq_injections", VCPU_STAT(irq_injections) }, { "nmi_injections", VCPU_STAT(nmi_injections) }, { "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) }, { "mmu_pte_write", VM_STAT(mmu_pte_write) }, { "mmu_pte_updated", VM_STAT(mmu_pte_updated) }, { "mmu_pde_zapped", VM_STAT(mmu_pde_zapped) }, { "mmu_flooded", VM_STAT(mmu_flooded) }, { "mmu_recycled", VM_STAT(mmu_recycled) }, { "mmu_cache_miss", VM_STAT(mmu_cache_miss) }, { "mmu_unsync", VM_STAT(mmu_unsync) }, { "remote_tlb_flush", VM_STAT(remote_tlb_flush) }, { "largepages", VM_STAT(lpages) }, { NULL } }; u64 __read_mostly host_xcr0; static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt); static inline void kvm_async_pf_hash_reset(struct kvm_vcpu *vcpu) { int i; for (i = 0; i < roundup_pow_of_two(ASYNC_PF_PER_VCPU); i++) vcpu->arch.apf.gfns[i] = ~0; } static void kvm_on_user_return(struct user_return_notifier *urn) { unsigned slot; struct kvm_shared_msrs *locals = container_of(urn, struct kvm_shared_msrs, urn); struct kvm_shared_msr_values *values; for (slot = 0; slot < shared_msrs_global.nr; ++slot) { values = &locals->values[slot]; if (values->host != values->curr) { wrmsrl(shared_msrs_global.msrs[slot], values->host); values->curr = values->host; } } locals->registered = false; user_return_notifier_unregister(urn); } static void shared_msr_update(unsigned slot, u32 msr) { u64 value; unsigned int cpu = smp_processor_id(); struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu); /* only read, and nobody should modify it at this time, * so don't need lock */ if (slot >= shared_msrs_global.nr) { printk(KERN_ERR "kvm: invalid MSR slot!"); return; } rdmsrl_safe(msr, &value); smsr->values[slot].host = value; smsr->values[slot].curr = value; } void kvm_define_shared_msr(unsigned slot, u32 msr) { BUG_ON(slot >= KVM_NR_SHARED_MSRS); if (slot >= shared_msrs_global.nr) shared_msrs_global.nr = slot + 1; shared_msrs_global.msrs[slot] = msr; /* we need ensured the shared_msr_global have been updated */ smp_wmb(); } EXPORT_SYMBOL_GPL(kvm_define_shared_msr); static void kvm_shared_msr_cpu_online(void) { unsigned i; for (i = 0; i < shared_msrs_global.nr; ++i) shared_msr_update(i, shared_msrs_global.msrs[i]); } void kvm_set_shared_msr(unsigned slot, u64 value, u64 mask) { unsigned int cpu = smp_processor_id(); struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu); if (((value ^ smsr->values[slot].curr) & mask) == 0) return; smsr->values[slot].curr = value; wrmsrl(shared_msrs_global.msrs[slot], value); if (!smsr->registered) { smsr->urn.on_user_return = kvm_on_user_return; user_return_notifier_register(&smsr->urn); smsr->registered = true; } } EXPORT_SYMBOL_GPL(kvm_set_shared_msr); static void drop_user_return_notifiers(void) { unsigned int cpu = smp_processor_id(); struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu); if (smsr->registered) kvm_on_user_return(&smsr->urn); } u64 kvm_get_apic_base(struct kvm_vcpu *vcpu) { return vcpu->arch.apic_base; } EXPORT_SYMBOL_GPL(kvm_get_apic_base); int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info) { u64 old_state = vcpu->arch.apic_base & (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE); u64 new_state = msr_info->data & (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE); u64 reserved_bits = ((~0ULL) << cpuid_maxphyaddr(vcpu)) | 0x2ff | (guest_cpuid_has_x2apic(vcpu) ? 0 : X2APIC_ENABLE); if (!msr_info->host_initiated && ((msr_info->data & reserved_bits) != 0 || new_state == X2APIC_ENABLE || (new_state == MSR_IA32_APICBASE_ENABLE && old_state == (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE)) || (new_state == (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE) && old_state == 0))) return 1; kvm_lapic_set_base(vcpu, msr_info->data); return 0; } EXPORT_SYMBOL_GPL(kvm_set_apic_base); asmlinkage __visible void kvm_spurious_fault(void) { /* Fault while not rebooting. We want the trace. */ BUG(); } EXPORT_SYMBOL_GPL(kvm_spurious_fault); #define EXCPT_BENIGN 0 #define EXCPT_CONTRIBUTORY 1 #define EXCPT_PF 2 static int exception_class(int vector) { switch (vector) { case PF_VECTOR: return EXCPT_PF; case DE_VECTOR: case TS_VECTOR: case NP_VECTOR: case SS_VECTOR: case GP_VECTOR: return EXCPT_CONTRIBUTORY; default: break; } return EXCPT_BENIGN; } #define EXCPT_FAULT 0 #define EXCPT_TRAP 1 #define EXCPT_ABORT 2 #define EXCPT_INTERRUPT 3 static int exception_type(int vector) { unsigned int mask; if (WARN_ON(vector > 31 || vector == NMI_VECTOR)) return EXCPT_INTERRUPT; mask = 1 << vector; /* #DB is trap, as instruction watchpoints are handled elsewhere */ if (mask & ((1 << DB_VECTOR) | (1 << BP_VECTOR) | (1 << OF_VECTOR))) return EXCPT_TRAP; if (mask & ((1 << DF_VECTOR) | (1 << MC_VECTOR))) return EXCPT_ABORT; /* Reserved exceptions will result in fault */ return EXCPT_FAULT; } static void kvm_multiple_exception(struct kvm_vcpu *vcpu, unsigned nr, bool has_error, u32 error_code, bool reinject) { u32 prev_nr; int class1, class2; kvm_make_request(KVM_REQ_EVENT, vcpu); if (!vcpu->arch.exception.pending) { queue: vcpu->arch.exception.pending = true; vcpu->arch.exception.has_error_code = has_error; vcpu->arch.exception.nr = nr; vcpu->arch.exception.error_code = error_code; vcpu->arch.exception.reinject = reinject; return; } /* to check exception */ prev_nr = vcpu->arch.exception.nr; if (prev_nr == DF_VECTOR) { /* triple fault -> shutdown */ kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); return; } class1 = exception_class(prev_nr); class2 = exception_class(nr); if ((class1 == EXCPT_CONTRIBUTORY && class2 == EXCPT_CONTRIBUTORY) || (class1 == EXCPT_PF && class2 != EXCPT_BENIGN)) { /* generate double fault per SDM Table 5-5 */ vcpu->arch.exception.pending = true; vcpu->arch.exception.has_error_code = true; vcpu->arch.exception.nr = DF_VECTOR; vcpu->arch.exception.error_code = 0; } else /* replace previous exception with a new one in a hope that instruction re-execution will regenerate lost exception */ goto queue; } void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr) { kvm_multiple_exception(vcpu, nr, false, 0, false); } EXPORT_SYMBOL_GPL(kvm_queue_exception); void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr) { kvm_multiple_exception(vcpu, nr, false, 0, true); } EXPORT_SYMBOL_GPL(kvm_requeue_exception); void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err) { if (err) kvm_inject_gp(vcpu, 0); else kvm_x86_ops->skip_emulated_instruction(vcpu); } EXPORT_SYMBOL_GPL(kvm_complete_insn_gp); void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault) { ++vcpu->stat.pf_guest; vcpu->arch.cr2 = fault->address; kvm_queue_exception_e(vcpu, PF_VECTOR, fault->error_code); } EXPORT_SYMBOL_GPL(kvm_inject_page_fault); static bool kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault) { if (mmu_is_nested(vcpu) && !fault->nested_page_fault) vcpu->arch.nested_mmu.inject_page_fault(vcpu, fault); else vcpu->arch.mmu.inject_page_fault(vcpu, fault); return fault->nested_page_fault; } void kvm_inject_nmi(struct kvm_vcpu *vcpu) { atomic_inc(&vcpu->arch.nmi_queued); kvm_make_request(KVM_REQ_NMI, vcpu); } EXPORT_SYMBOL_GPL(kvm_inject_nmi); void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code) { kvm_multiple_exception(vcpu, nr, true, error_code, false); } EXPORT_SYMBOL_GPL(kvm_queue_exception_e); void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code) { kvm_multiple_exception(vcpu, nr, true, error_code, true); } EXPORT_SYMBOL_GPL(kvm_requeue_exception_e); /* * Checks if cpl <= required_cpl; if true, return true. Otherwise queue * a #GP and return false. */ bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl) { if (kvm_x86_ops->get_cpl(vcpu) <= required_cpl) return true; kvm_queue_exception_e(vcpu, GP_VECTOR, 0); return false; } EXPORT_SYMBOL_GPL(kvm_require_cpl); /* * This function will be used to read from the physical memory of the currently * running guest. The difference to kvm_read_guest_page is that this function * can read from guest physical or from the guest's guest physical memory. */ int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, gfn_t ngfn, void *data, int offset, int len, u32 access) { struct x86_exception exception; gfn_t real_gfn; gpa_t ngpa; ngpa = gfn_to_gpa(ngfn); real_gfn = mmu->translate_gpa(vcpu, ngpa, access, &exception); if (real_gfn == UNMAPPED_GVA) return -EFAULT; real_gfn = gpa_to_gfn(real_gfn); return kvm_read_guest_page(vcpu->kvm, real_gfn, data, offset, len); } EXPORT_SYMBOL_GPL(kvm_read_guest_page_mmu); int kvm_read_nested_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset, int len, u32 access) { return kvm_read_guest_page_mmu(vcpu, vcpu->arch.walk_mmu, gfn, data, offset, len, access); } /* * Load the pae pdptrs. Return true is they are all valid. */ int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3) { gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT; unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2; int i; int ret; u64 pdpte[ARRAY_SIZE(mmu->pdptrs)]; ret = kvm_read_guest_page_mmu(vcpu, mmu, pdpt_gfn, pdpte, offset * sizeof(u64), sizeof(pdpte), PFERR_USER_MASK|PFERR_WRITE_MASK); if (ret < 0) { ret = 0; goto out; } for (i = 0; i < ARRAY_SIZE(pdpte); ++i) { if (is_present_gpte(pdpte[i]) && (pdpte[i] & vcpu->arch.mmu.rsvd_bits_mask[0][2])) { ret = 0; goto out; } } ret = 1; memcpy(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs)); __set_bit(VCPU_EXREG_PDPTR, (unsigned long *)&vcpu->arch.regs_avail); __set_bit(VCPU_EXREG_PDPTR, (unsigned long *)&vcpu->arch.regs_dirty); out: return ret; } EXPORT_SYMBOL_GPL(load_pdptrs); static bool pdptrs_changed(struct kvm_vcpu *vcpu) { u64 pdpte[ARRAY_SIZE(vcpu->arch.walk_mmu->pdptrs)]; bool changed = true; int offset; gfn_t gfn; int r; if (is_long_mode(vcpu) || !is_pae(vcpu)) return false; if (!test_bit(VCPU_EXREG_PDPTR, (unsigned long *)&vcpu->arch.regs_avail)) return true; gfn = (kvm_read_cr3(vcpu) & ~31u) >> PAGE_SHIFT; offset = (kvm_read_cr3(vcpu) & ~31u) & (PAGE_SIZE - 1); r = kvm_read_nested_guest_page(vcpu, gfn, pdpte, offset, sizeof(pdpte), PFERR_USER_MASK | PFERR_WRITE_MASK); if (r < 0) goto out; changed = memcmp(pdpte, vcpu->arch.walk_mmu->pdptrs, sizeof(pdpte)) != 0; out: return changed; } int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) { unsigned long old_cr0 = kvm_read_cr0(vcpu); unsigned long update_bits = X86_CR0_PG | X86_CR0_WP | X86_CR0_CD | X86_CR0_NW; cr0 |= X86_CR0_ET; #ifdef CONFIG_X86_64 if (cr0 & 0xffffffff00000000UL) return 1; #endif cr0 &= ~CR0_RESERVED_BITS; if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) return 1; if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) return 1; if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) { #ifdef CONFIG_X86_64 if ((vcpu->arch.efer & EFER_LME)) { int cs_db, cs_l; if (!is_pae(vcpu)) return 1; kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); if (cs_l) return 1; } else #endif if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu))) return 1; } if (!(cr0 & X86_CR0_PG) && kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE)) return 1; kvm_x86_ops->set_cr0(vcpu, cr0); if ((cr0 ^ old_cr0) & X86_CR0_PG) { kvm_clear_async_pf_completion_queue(vcpu); kvm_async_pf_hash_reset(vcpu); } if ((cr0 ^ old_cr0) & update_bits) kvm_mmu_reset_context(vcpu); return 0; } EXPORT_SYMBOL_GPL(kvm_set_cr0); void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw) { (void)kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0eul) | (msw & 0x0f)); } EXPORT_SYMBOL_GPL(kvm_lmsw); static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu) { if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) && !vcpu->guest_xcr0_loaded) { /* kvm_set_xcr() also depends on this */ xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0); vcpu->guest_xcr0_loaded = 1; } } static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu) { if (vcpu->guest_xcr0_loaded) { if (vcpu->arch.xcr0 != host_xcr0) xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0); vcpu->guest_xcr0_loaded = 0; } } int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) { u64 xcr0 = xcr; u64 old_xcr0 = vcpu->arch.xcr0; u64 valid_bits; /* Only support XCR_XFEATURE_ENABLED_MASK(xcr0) now */ if (index != XCR_XFEATURE_ENABLED_MASK) return 1; if (!(xcr0 & XSTATE_FP)) return 1; if ((xcr0 & XSTATE_YMM) && !(xcr0 & XSTATE_SSE)) return 1; /* * Do not allow the guest to set bits that we do not support * saving. However, xcr0 bit 0 is always set, even if the * emulated CPU does not support XSAVE (see fx_init). */ valid_bits = vcpu->arch.guest_supported_xcr0 | XSTATE_FP; if (xcr0 & ~valid_bits) return 1; if ((!(xcr0 & XSTATE_BNDREGS)) != (!(xcr0 & XSTATE_BNDCSR))) return 1; kvm_put_guest_xcr0(vcpu); vcpu->arch.xcr0 = xcr0; if ((xcr0 ^ old_xcr0) & XSTATE_EXTEND_MASK) kvm_update_cpuid(vcpu); return 0; } int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) { if (kvm_x86_ops->get_cpl(vcpu) != 0 || __kvm_set_xcr(vcpu, index, xcr)) { kvm_inject_gp(vcpu, 0); return 1; } return 0; } EXPORT_SYMBOL_GPL(kvm_set_xcr); int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) { unsigned long old_cr4 = kvm_read_cr4(vcpu); unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_SMEP; if (cr4 & CR4_RESERVED_BITS) return 1; if (!guest_cpuid_has_xsave(vcpu) && (cr4 & X86_CR4_OSXSAVE)) return 1; if (!guest_cpuid_has_smep(vcpu) && (cr4 & X86_CR4_SMEP)) return 1; if (!guest_cpuid_has_smap(vcpu) && (cr4 & X86_CR4_SMAP)) return 1; if (!guest_cpuid_has_fsgsbase(vcpu) && (cr4 & X86_CR4_FSGSBASE)) return 1; if (is_long_mode(vcpu)) { if (!(cr4 & X86_CR4_PAE)) return 1; } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE) && ((cr4 ^ old_cr4) & pdptr_bits) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu))) return 1; if ((cr4 & X86_CR4_PCIDE) && !(old_cr4 & X86_CR4_PCIDE)) { if (!guest_cpuid_has_pcid(vcpu)) return 1; /* PCID can not be enabled when cr3[11:0]!=000H or EFER.LMA=0 */ if ((kvm_read_cr3(vcpu) & X86_CR3_PCID_MASK) || !is_long_mode(vcpu)) return 1; } if (kvm_x86_ops->set_cr4(vcpu, cr4)) return 1; if (((cr4 ^ old_cr4) & pdptr_bits) || (!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE))) kvm_mmu_reset_context(vcpu); if ((cr4 ^ old_cr4) & X86_CR4_SMAP) update_permission_bitmask(vcpu, vcpu->arch.walk_mmu, false); if ((cr4 ^ old_cr4) & X86_CR4_OSXSAVE) kvm_update_cpuid(vcpu); return 0; } EXPORT_SYMBOL_GPL(kvm_set_cr4); int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) { if (cr3 == kvm_read_cr3(vcpu) && !pdptrs_changed(vcpu)) { kvm_mmu_sync_roots(vcpu); kvm_mmu_flush_tlb(vcpu); return 0; } if (is_long_mode(vcpu)) { if (cr3 & CR3_L_MODE_RESERVED_BITS) return 1; } else if (is_pae(vcpu) && is_paging(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)) return 1; vcpu->arch.cr3 = cr3; __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail); kvm_mmu_new_cr3(vcpu); return 0; } EXPORT_SYMBOL_GPL(kvm_set_cr3); int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8) { if (cr8 & CR8_RESERVED_BITS) return 1; if (irqchip_in_kernel(vcpu->kvm)) kvm_lapic_set_tpr(vcpu, cr8); else vcpu->arch.cr8 = cr8; return 0; } EXPORT_SYMBOL_GPL(kvm_set_cr8); unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu) { if (irqchip_in_kernel(vcpu->kvm)) return kvm_lapic_get_cr8(vcpu); else return vcpu->arch.cr8; } EXPORT_SYMBOL_GPL(kvm_get_cr8); static void kvm_update_dr6(struct kvm_vcpu *vcpu) { if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) kvm_x86_ops->set_dr6(vcpu, vcpu->arch.dr6); } static void kvm_update_dr7(struct kvm_vcpu *vcpu) { unsigned long dr7; if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) dr7 = vcpu->arch.guest_debug_dr7; else dr7 = vcpu->arch.dr7; kvm_x86_ops->set_dr7(vcpu, dr7); vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_BP_ENABLED; if (dr7 & DR7_BP_EN_MASK) vcpu->arch.switch_db_regs |= KVM_DEBUGREG_BP_ENABLED; } static u64 kvm_dr6_fixed(struct kvm_vcpu *vcpu) { u64 fixed = DR6_FIXED_1; if (!guest_cpuid_has_rtm(vcpu)) fixed |= DR6_RTM; return fixed; } static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val) { switch (dr) { case 0 ... 3: vcpu->arch.db[dr] = val; if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) vcpu->arch.eff_db[dr] = val; break; case 4: if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) return 1; /* #UD */ /* fall through */ case 6: if (val & 0xffffffff00000000ULL) return -1; /* #GP */ vcpu->arch.dr6 = (val & DR6_VOLATILE) | kvm_dr6_fixed(vcpu); kvm_update_dr6(vcpu); break; case 5: if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) return 1; /* #UD */ /* fall through */ default: /* 7 */ if (val & 0xffffffff00000000ULL) return -1; /* #GP */ vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1; kvm_update_dr7(vcpu); break; } return 0; } int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val) { int res; res = __kvm_set_dr(vcpu, dr, val); if (res > 0) kvm_queue_exception(vcpu, UD_VECTOR); else if (res < 0) kvm_inject_gp(vcpu, 0); return res; } EXPORT_SYMBOL_GPL(kvm_set_dr); static int _kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val) { switch (dr) { case 0 ... 3: *val = vcpu->arch.db[dr]; break; case 4: if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) return 1; /* fall through */ case 6: if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) *val = vcpu->arch.dr6; else *val = kvm_x86_ops->get_dr6(vcpu); break; case 5: if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) return 1; /* fall through */ default: /* 7 */ *val = vcpu->arch.dr7; break; } return 0; } int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val) { if (_kvm_get_dr(vcpu, dr, val)) { kvm_queue_exception(vcpu, UD_VECTOR); return 1; } return 0; } EXPORT_SYMBOL_GPL(kvm_get_dr); bool kvm_rdpmc(struct kvm_vcpu *vcpu) { u32 ecx = kvm_register_read(vcpu, VCPU_REGS_RCX); u64 data; int err; err = kvm_pmu_read_pmc(vcpu, ecx, &data); if (err) return err; kvm_register_write(vcpu, VCPU_REGS_RAX, (u32)data); kvm_register_write(vcpu, VCPU_REGS_RDX, data >> 32); return err; } EXPORT_SYMBOL_GPL(kvm_rdpmc); /* * List of msr numbers which we expose to userspace through KVM_GET_MSRS * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST. * * This list is modified at module load time to reflect the * capabilities of the host cpu. This capabilities test skips MSRs that are * kvm-specific. Those are put in the beginning of the list. */ #define KVM_SAVE_MSRS_BEGIN 12 static u32 msrs_to_save[] = { MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK, MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW, HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL, HV_X64_MSR_TIME_REF_COUNT, HV_X64_MSR_REFERENCE_TSC, HV_X64_MSR_APIC_ASSIST_PAGE, MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME, MSR_KVM_PV_EOI_EN, MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP, MSR_STAR, #ifdef CONFIG_X86_64 MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR, #endif MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA, MSR_IA32_FEATURE_CONTROL, MSR_IA32_BNDCFGS }; static unsigned num_msrs_to_save; static const u32 emulated_msrs[] = { MSR_IA32_TSC_ADJUST, MSR_IA32_TSCDEADLINE, MSR_IA32_MISC_ENABLE, MSR_IA32_MCG_STATUS, MSR_IA32_MCG_CTL, }; bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer) { if (efer & efer_reserved_bits) return false; if (efer & EFER_FFXSR) { struct kvm_cpuid_entry2 *feat; feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT))) return false; } if (efer & EFER_SVME) { struct kvm_cpuid_entry2 *feat; feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM))) return false; } return true; } EXPORT_SYMBOL_GPL(kvm_valid_efer); static int set_efer(struct kvm_vcpu *vcpu, u64 efer) { u64 old_efer = vcpu->arch.efer; if (!kvm_valid_efer(vcpu, efer)) return 1; if (is_paging(vcpu) && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) return 1; efer &= ~EFER_LMA; efer |= vcpu->arch.efer & EFER_LMA; kvm_x86_ops->set_efer(vcpu, efer); /* Update reserved bits */ if ((efer ^ old_efer) & EFER_NX) kvm_mmu_reset_context(vcpu); return 0; } void kvm_enable_efer_bits(u64 mask) { efer_reserved_bits &= ~mask; } EXPORT_SYMBOL_GPL(kvm_enable_efer_bits); /* * Writes msr value into into the appropriate "register". * Returns 0 on success, non-0 otherwise. * Assumes vcpu_load() was already called. */ int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) { return kvm_x86_ops->set_msr(vcpu, msr); } /* * Adapt set_msr() to msr_io()'s calling convention */ static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data) { struct msr_data msr; msr.data = *data; msr.index = index; msr.host_initiated = true; return kvm_set_msr(vcpu, &msr); } #ifdef CONFIG_X86_64 struct pvclock_gtod_data { seqcount_t seq; struct { /* extract of a clocksource struct */ int vclock_mode; cycle_t cycle_last; cycle_t mask; u32 mult; u32 shift; } clock; u64 boot_ns; u64 nsec_base; }; static struct pvclock_gtod_data pvclock_gtod_data; static void update_pvclock_gtod(struct timekeeper *tk) { struct pvclock_gtod_data *vdata = &pvclock_gtod_data; u64 boot_ns; boot_ns = ktime_to_ns(ktime_add(tk->tkr.base_mono, tk->offs_boot)); write_seqcount_begin(&vdata->seq); /* copy pvclock gtod data */ vdata->clock.vclock_mode = tk->tkr.clock->archdata.vclock_mode; vdata->clock.cycle_last = tk->tkr.cycle_last; vdata->clock.mask = tk->tkr.mask; vdata->clock.mult = tk->tkr.mult; vdata->clock.shift = tk->tkr.shift; vdata->boot_ns = boot_ns; vdata->nsec_base = tk->tkr.xtime_nsec; write_seqcount_end(&vdata->seq); } #endif static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock) { int version; int r; struct pvclock_wall_clock wc; struct timespec boot; if (!wall_clock) return; r = kvm_read_guest(kvm, wall_clock, &version, sizeof(version)); if (r) return; if (version & 1) ++version; /* first time write, random junk */ ++version; kvm_write_guest(kvm, wall_clock, &version, sizeof(version)); /* * The guest calculates current wall clock time by adding * system time (updated by kvm_guest_time_update below) to the * wall clock specified here. guest system time equals host * system time for us, thus we must fill in host boot time here. */ getboottime(&boot); if (kvm->arch.kvmclock_offset) { struct timespec ts = ns_to_timespec(kvm->arch.kvmclock_offset); boot = timespec_sub(boot, ts); } wc.sec = boot.tv_sec; wc.nsec = boot.tv_nsec; wc.version = version; kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc)); version++; kvm_write_guest(kvm, wall_clock, &version, sizeof(version)); } static uint32_t div_frac(uint32_t dividend, uint32_t divisor) { uint32_t quotient, remainder; /* Don't try to replace with do_div(), this one calculates * "(dividend << 32) / divisor" */ __asm__ ( "divl %4" : "=a" (quotient), "=d" (remainder) : "0" (0), "1" (dividend), "r" (divisor) ); return quotient; } static void kvm_get_time_scale(uint32_t scaled_khz, uint32_t base_khz, s8 *pshift, u32 *pmultiplier) { uint64_t scaled64; int32_t shift = 0; uint64_t tps64; uint32_t tps32; tps64 = base_khz * 1000LL; scaled64 = scaled_khz * 1000LL; while (tps64 > scaled64*2 || tps64 & 0xffffffff00000000ULL) { tps64 >>= 1; shift--; } tps32 = (uint32_t)tps64; while (tps32 <= scaled64 || scaled64 & 0xffffffff00000000ULL) { if (scaled64 & 0xffffffff00000000ULL || tps32 & 0x80000000) scaled64 >>= 1; else tps32 <<= 1; shift++; } *pshift = shift; *pmultiplier = div_frac(scaled64, tps32); pr_debug("%s: base_khz %u => %u, shift %d, mul %u\n", __func__, base_khz, scaled_khz, shift, *pmultiplier); } static inline u64 get_kernel_ns(void) { return ktime_get_boot_ns(); } #ifdef CONFIG_X86_64 static atomic_t kvm_guest_has_master_clock = ATOMIC_INIT(0); #endif static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz); unsigned long max_tsc_khz; static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec) { return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult, vcpu->arch.virtual_tsc_shift); } static u32 adjust_tsc_khz(u32 khz, s32 ppm) { u64 v = (u64)khz * (1000000 + ppm); do_div(v, 1000000); return v; } static void kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 this_tsc_khz) { u32 thresh_lo, thresh_hi; int use_scaling = 0; /* tsc_khz can be zero if TSC calibration fails */ if (this_tsc_khz == 0) return; /* Compute a scale to convert nanoseconds in TSC cycles */ kvm_get_time_scale(this_tsc_khz, NSEC_PER_SEC / 1000, &vcpu->arch.virtual_tsc_shift, &vcpu->arch.virtual_tsc_mult); vcpu->arch.virtual_tsc_khz = this_tsc_khz; /* * Compute the variation in TSC rate which is acceptable * within the range of tolerance and decide if the * rate being applied is within that bounds of the hardware * rate. If so, no scaling or compensation need be done. */ thresh_lo = adjust_tsc_khz(tsc_khz, -tsc_tolerance_ppm); thresh_hi = adjust_tsc_khz(tsc_khz, tsc_tolerance_ppm); if (this_tsc_khz < thresh_lo || this_tsc_khz > thresh_hi) { pr_debug("kvm: requested TSC rate %u falls outside tolerance [%u,%u]\n", this_tsc_khz, thresh_lo, thresh_hi); use_scaling = 1; } kvm_x86_ops->set_tsc_khz(vcpu, this_tsc_khz, use_scaling); } static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns) { u64 tsc = pvclock_scale_delta(kernel_ns-vcpu->arch.this_tsc_nsec, vcpu->arch.virtual_tsc_mult, vcpu->arch.virtual_tsc_shift); tsc += vcpu->arch.this_tsc_write; return tsc; } void kvm_track_tsc_matching(struct kvm_vcpu *vcpu) { #ifdef CONFIG_X86_64 bool vcpus_matched; bool do_request = false; struct kvm_arch *ka = &vcpu->kvm->arch; struct pvclock_gtod_data *gtod = &pvclock_gtod_data; vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 == atomic_read(&vcpu->kvm->online_vcpus)); if (vcpus_matched && gtod->clock.vclock_mode == VCLOCK_TSC) if (!ka->use_master_clock) do_request = 1; if (!vcpus_matched && ka->use_master_clock) do_request = 1; if (do_request) kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); trace_kvm_track_tsc(vcpu->vcpu_id, ka->nr_vcpus_matched_tsc, atomic_read(&vcpu->kvm->online_vcpus), ka->use_master_clock, gtod->clock.vclock_mode); #endif } static void update_ia32_tsc_adjust_msr(struct kvm_vcpu *vcpu, s64 offset) { u64 curr_offset = kvm_x86_ops->read_tsc_offset(vcpu); vcpu->arch.ia32_tsc_adjust_msr += offset - curr_offset; } void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr) { struct kvm *kvm = vcpu->kvm; u64 offset, ns, elapsed; unsigned long flags; s64 usdiff; bool matched; bool already_matched; u64 data = msr->data; raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags); offset = kvm_x86_ops->compute_tsc_offset(vcpu, data); ns = get_kernel_ns(); elapsed = ns - kvm->arch.last_tsc_nsec; if (vcpu->arch.virtual_tsc_khz) { int faulted = 0; /* n.b - signed multiplication and division required */ usdiff = data - kvm->arch.last_tsc_write; #ifdef CONFIG_X86_64 usdiff = (usdiff * 1000) / vcpu->arch.virtual_tsc_khz; #else /* do_div() only does unsigned */ asm("1: idivl %[divisor]\n" "2: xor %%edx, %%edx\n" " movl $0, %[faulted]\n" "3:\n" ".section .fixup,\"ax\"\n" "4: movl $1, %[faulted]\n" " jmp 3b\n" ".previous\n" _ASM_EXTABLE(1b, 4b) : "=A"(usdiff), [faulted] "=r" (faulted) : "A"(usdiff * 1000), [divisor] "rm"(vcpu->arch.virtual_tsc_khz)); #endif do_div(elapsed, 1000); usdiff -= elapsed; if (usdiff < 0) usdiff = -usdiff; /* idivl overflow => difference is larger than USEC_PER_SEC */ if (faulted) usdiff = USEC_PER_SEC; } else usdiff = USEC_PER_SEC; /* disable TSC match window below */ /* * Special case: TSC write with a small delta (1 second) of virtual * cycle time against real time is interpreted as an attempt to * synchronize the CPU. * * For a reliable TSC, we can match TSC offsets, and for an unstable * TSC, we add elapsed time in this computation. We could let the * compensation code attempt to catch up if we fall behind, but * it's better to try to match offsets from the beginning. */ if (usdiff < USEC_PER_SEC && vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) { if (!check_tsc_unstable()) { offset = kvm->arch.cur_tsc_offset; pr_debug("kvm: matched tsc offset for %llu\n", data); } else { u64 delta = nsec_to_cycles(vcpu, elapsed); data += delta; offset = kvm_x86_ops->compute_tsc_offset(vcpu, data); pr_debug("kvm: adjusted tsc offset by %llu\n", delta); } matched = true; already_matched = (vcpu->arch.this_tsc_generation == kvm->arch.cur_tsc_generation); } else { /* * We split periods of matched TSC writes into generations. * For each generation, we track the original measured * nanosecond time, offset, and write, so if TSCs are in * sync, we can match exact offset, and if not, we can match * exact software computation in compute_guest_tsc() * * These values are tracked in kvm->arch.cur_xxx variables. */ kvm->arch.cur_tsc_generation++; kvm->arch.cur_tsc_nsec = ns; kvm->arch.cur_tsc_write = data; kvm->arch.cur_tsc_offset = offset; matched = false; pr_debug("kvm: new tsc generation %llu, clock %llu\n", kvm->arch.cur_tsc_generation, data); } /* * We also track th most recent recorded KHZ, write and time to * allow the matching interval to be extended at each write. */ kvm->arch.last_tsc_nsec = ns; kvm->arch.last_tsc_write = data; kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz; vcpu->arch.last_guest_tsc = data; /* Keep track of which generation this VCPU has synchronized to */ vcpu->arch.this_tsc_generation = kvm->arch.cur_tsc_generation; vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec; vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write; if (guest_cpuid_has_tsc_adjust(vcpu) && !msr->host_initiated) update_ia32_tsc_adjust_msr(vcpu, offset); kvm_x86_ops->write_tsc_offset(vcpu, offset); raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags); spin_lock(&kvm->arch.pvclock_gtod_sync_lock); if (!matched) { kvm->arch.nr_vcpus_matched_tsc = 0; } else if (!already_matched) { kvm->arch.nr_vcpus_matched_tsc++; } kvm_track_tsc_matching(vcpu); spin_unlock(&kvm->arch.pvclock_gtod_sync_lock); } EXPORT_SYMBOL_GPL(kvm_write_tsc); #ifdef CONFIG_X86_64 static cycle_t read_tsc(void) { cycle_t ret; u64 last; /* * Empirically, a fence (of type that depends on the CPU) * before rdtsc is enough to ensure that rdtsc is ordered * with respect to loads. The various CPU manuals are unclear * as to whether rdtsc can be reordered with later loads, * but no one has ever seen it happen. */ rdtsc_barrier(); ret = (cycle_t)vget_cycles(); last = pvclock_gtod_data.clock.cycle_last; if (likely(ret >= last)) return ret; /* * GCC likes to generate cmov here, but this branch is extremely * predictable (it's just a funciton of time and the likely is * very likely) and there's a data dependence, so force GCC * to generate a branch instead. I don't barrier() because * we don't actually need a barrier, and if this function * ever gets inlined it will generate worse code. */ asm volatile (""); return last; } static inline u64 vgettsc(cycle_t *cycle_now) { long v; struct pvclock_gtod_data *gtod = &pvclock_gtod_data; *cycle_now = read_tsc(); v = (*cycle_now - gtod->clock.cycle_last) & gtod->clock.mask; return v * gtod->clock.mult; } static int do_monotonic_boot(s64 *t, cycle_t *cycle_now) { struct pvclock_gtod_data *gtod = &pvclock_gtod_data; unsigned long seq; int mode; u64 ns; do { seq = read_seqcount_begin(&gtod->seq); mode = gtod->clock.vclock_mode; ns = gtod->nsec_base; ns += vgettsc(cycle_now); ns >>= gtod->clock.shift; ns += gtod->boot_ns; } while (unlikely(read_seqcount_retry(&gtod->seq, seq))); *t = ns; return mode; } /* returns true if host is using tsc clocksource */ static bool kvm_get_time_and_clockread(s64 *kernel_ns, cycle_t *cycle_now) { /* checked again under seqlock below */ if (pvclock_gtod_data.clock.vclock_mode != VCLOCK_TSC) return false; return do_monotonic_boot(kernel_ns, cycle_now) == VCLOCK_TSC; } #endif /* * * Assuming a stable TSC across physical CPUS, and a stable TSC * across virtual CPUs, the following condition is possible. * Each numbered line represents an event visible to both * CPUs at the next numbered event. * * "timespecX" represents host monotonic time. "tscX" represents * RDTSC value. * * VCPU0 on CPU0 | VCPU1 on CPU1 * * 1. read timespec0,tsc0 * 2. | timespec1 = timespec0 + N * | tsc1 = tsc0 + M * 3. transition to guest | transition to guest * 4. ret0 = timespec0 + (rdtsc - tsc0) | * 5. | ret1 = timespec1 + (rdtsc - tsc1) * | ret1 = timespec0 + N + (rdtsc - (tsc0 + M)) * * Since ret0 update is visible to VCPU1 at time 5, to obey monotonicity: * * - ret0 < ret1 * - timespec0 + (rdtsc - tsc0) < timespec0 + N + (rdtsc - (tsc0 + M)) * ... * - 0 < N - M => M < N * * That is, when timespec0 != timespec1, M < N. Unfortunately that is not * always the case (the difference between two distinct xtime instances * might be smaller then the difference between corresponding TSC reads, * when updating guest vcpus pvclock areas). * * To avoid that problem, do not allow visibility of distinct * system_timestamp/tsc_timestamp values simultaneously: use a master * copy of host monotonic time values. Update that master copy * in lockstep. * * Rely on synchronization of host TSCs and guest TSCs for monotonicity. * */ static void pvclock_update_vm_gtod_copy(struct kvm *kvm) { #ifdef CONFIG_X86_64 struct kvm_arch *ka = &kvm->arch; int vclock_mode; bool host_tsc_clocksource, vcpus_matched; vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 == atomic_read(&kvm->online_vcpus)); /* * If the host uses TSC clock, then passthrough TSC as stable * to the guest. */ host_tsc_clocksource = kvm_get_time_and_clockread( &ka->master_kernel_ns, &ka->master_cycle_now); ka->use_master_clock = host_tsc_clocksource && vcpus_matched && !backwards_tsc_observed; if (ka->use_master_clock) atomic_set(&kvm_guest_has_master_clock, 1); vclock_mode = pvclock_gtod_data.clock.vclock_mode; trace_kvm_update_master_clock(ka->use_master_clock, vclock_mode, vcpus_matched); #endif } static void kvm_gen_update_masterclock(struct kvm *kvm) { #ifdef CONFIG_X86_64 int i; struct kvm_vcpu *vcpu; struct kvm_arch *ka = &kvm->arch; spin_lock(&ka->pvclock_gtod_sync_lock); kvm_make_mclock_inprogress_request(kvm); /* no guest entries from this point */ pvclock_update_vm_gtod_copy(kvm); kvm_for_each_vcpu(i, vcpu, kvm) kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); /* guest entries allowed */ kvm_for_each_vcpu(i, vcpu, kvm) clear_bit(KVM_REQ_MCLOCK_INPROGRESS, &vcpu->requests); spin_unlock(&ka->pvclock_gtod_sync_lock); #endif } static int kvm_guest_time_update(struct kvm_vcpu *v) { unsigned long flags, this_tsc_khz; struct kvm_vcpu_arch *vcpu = &v->arch; struct kvm_arch *ka = &v->kvm->arch; s64 kernel_ns; u64 tsc_timestamp, host_tsc; struct pvclock_vcpu_time_info guest_hv_clock; u8 pvclock_flags; bool use_master_clock; kernel_ns = 0; host_tsc = 0; /* * If the host uses TSC clock, then passthrough TSC as stable * to the guest. */ spin_lock(&ka->pvclock_gtod_sync_lock); use_master_clock = ka->use_master_clock; if (use_master_clock) { host_tsc = ka->master_cycle_now; kernel_ns = ka->master_kernel_ns; } spin_unlock(&ka->pvclock_gtod_sync_lock); /* Keep irq disabled to prevent changes to the clock */ local_irq_save(flags); this_tsc_khz = __get_cpu_var(cpu_tsc_khz); if (unlikely(this_tsc_khz == 0)) { local_irq_restore(flags); kvm_make_request(KVM_REQ_CLOCK_UPDATE, v); return 1; } if (!use_master_clock) { host_tsc = native_read_tsc(); kernel_ns = get_kernel_ns(); } tsc_timestamp = kvm_x86_ops->read_l1_tsc(v, host_tsc); /* * We may have to catch up the TSC to match elapsed wall clock * time for two reasons, even if kvmclock is used. * 1) CPU could have been running below the maximum TSC rate * 2) Broken TSC compensation resets the base at each VCPU * entry to avoid unknown leaps of TSC even when running * again on the same CPU. This may cause apparent elapsed * time to disappear, and the guest to stand still or run * very slowly. */ if (vcpu->tsc_catchup) { u64 tsc = compute_guest_tsc(v, kernel_ns); if (tsc > tsc_timestamp) { adjust_tsc_offset_guest(v, tsc - tsc_timestamp); tsc_timestamp = tsc; } } local_irq_restore(flags); if (!vcpu->pv_time_enabled) return 0; if (unlikely(vcpu->hw_tsc_khz != this_tsc_khz)) { kvm_get_time_scale(NSEC_PER_SEC / 1000, this_tsc_khz, &vcpu->hv_clock.tsc_shift, &vcpu->hv_clock.tsc_to_system_mul); vcpu->hw_tsc_khz = this_tsc_khz; } /* With all the info we got, fill in the values */ vcpu->hv_clock.tsc_timestamp = tsc_timestamp; vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset; vcpu->last_guest_tsc = tsc_timestamp; /* * The interface expects us to write an even number signaling that the * update is finished. Since the guest won't see the intermediate * state, we just increase by 2 at the end. */ vcpu->hv_clock.version += 2; if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time, &guest_hv_clock, sizeof(guest_hv_clock)))) return 0; /* retain PVCLOCK_GUEST_STOPPED if set in guest copy */ pvclock_flags = (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED); if (vcpu->pvclock_set_guest_stopped_request) { pvclock_flags |= PVCLOCK_GUEST_STOPPED; vcpu->pvclock_set_guest_stopped_request = false; } /* If the host uses TSC clocksource, then it is stable */ if (use_master_clock) pvclock_flags |= PVCLOCK_TSC_STABLE_BIT; vcpu->hv_clock.flags = pvclock_flags; kvm_write_guest_cached(v->kvm, &vcpu->pv_time, &vcpu->hv_clock, sizeof(vcpu->hv_clock)); return 0; } /* * kvmclock updates which are isolated to a given vcpu, such as * vcpu->cpu migration, should not allow system_timestamp from * the rest of the vcpus to remain static. Otherwise ntp frequency * correction applies to one vcpu's system_timestamp but not * the others. * * So in those cases, request a kvmclock update for all vcpus. * We need to rate-limit these requests though, as they can * considerably slow guests that have a large number of vcpus. * The time for a remote vcpu to update its kvmclock is bound * by the delay we use to rate-limit the updates. */ #define KVMCLOCK_UPDATE_DELAY msecs_to_jiffies(100) static void kvmclock_update_fn(struct work_struct *work) { int i; struct delayed_work *dwork = to_delayed_work(work); struct kvm_arch *ka = container_of(dwork, struct kvm_arch, kvmclock_update_work); struct kvm *kvm = container_of(ka, struct kvm, arch); struct kvm_vcpu *vcpu; kvm_for_each_vcpu(i, vcpu, kvm) { kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); kvm_vcpu_kick(vcpu); } } static void kvm_gen_kvmclock_update(struct kvm_vcpu *v) { struct kvm *kvm = v->kvm; kvm_make_request(KVM_REQ_CLOCK_UPDATE, v); schedule_delayed_work(&kvm->arch.kvmclock_update_work, KVMCLOCK_UPDATE_DELAY); } #define KVMCLOCK_SYNC_PERIOD (300 * HZ) static void kvmclock_sync_fn(struct work_struct *work) { struct delayed_work *dwork = to_delayed_work(work); struct kvm_arch *ka = container_of(dwork, struct kvm_arch, kvmclock_sync_work); struct kvm *kvm = container_of(ka, struct kvm, arch); schedule_delayed_work(&kvm->arch.kvmclock_update_work, 0); schedule_delayed_work(&kvm->arch.kvmclock_sync_work, KVMCLOCK_SYNC_PERIOD); } static bool msr_mtrr_valid(unsigned msr) { switch (msr) { case 0x200 ... 0x200 + 2 * KVM_NR_VAR_MTRR - 1: case MSR_MTRRfix64K_00000: case MSR_MTRRfix16K_80000: case MSR_MTRRfix16K_A0000: case MSR_MTRRfix4K_C0000: case MSR_MTRRfix4K_C8000: case MSR_MTRRfix4K_D0000: case MSR_MTRRfix4K_D8000: case MSR_MTRRfix4K_E0000: case MSR_MTRRfix4K_E8000: case MSR_MTRRfix4K_F0000: case MSR_MTRRfix4K_F8000: case MSR_MTRRdefType: case MSR_IA32_CR_PAT: return true; case 0x2f8: return true; } return false; } static bool valid_pat_type(unsigned t) { return t < 8 && (1 << t) & 0xf3; /* 0, 1, 4, 5, 6, 7 */ } static bool valid_mtrr_type(unsigned t) { return t < 8 && (1 << t) & 0x73; /* 0, 1, 4, 5, 6 */ } static bool mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data) { int i; u64 mask; if (!msr_mtrr_valid(msr)) return false; if (msr == MSR_IA32_CR_PAT) { for (i = 0; i < 8; i++) if (!valid_pat_type((data >> (i * 8)) & 0xff)) return false; return true; } else if (msr == MSR_MTRRdefType) { if (data & ~0xcff) return false; return valid_mtrr_type(data & 0xff); } else if (msr >= MSR_MTRRfix64K_00000 && msr <= MSR_MTRRfix4K_F8000) { for (i = 0; i < 8 ; i++) if (!valid_mtrr_type((data >> (i * 8)) & 0xff)) return false; return true; } /* variable MTRRs */ WARN_ON(!(msr >= 0x200 && msr < 0x200 + 2 * KVM_NR_VAR_MTRR)); mask = (~0ULL) << cpuid_maxphyaddr(vcpu); if ((msr & 1) == 0) { /* MTRR base */ if (!valid_mtrr_type(data & 0xff)) return false; mask |= 0xf00; } else /* MTRR mask */ mask |= 0x7ff; if (data & mask) { kvm_inject_gp(vcpu, 0); return false; } return true; } static int set_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 data) { u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges; if (!mtrr_valid(vcpu, msr, data)) return 1; if (msr == MSR_MTRRdefType) { vcpu->arch.mtrr_state.def_type = data; vcpu->arch.mtrr_state.enabled = (data & 0xc00) >> 10; } else if (msr == MSR_MTRRfix64K_00000) p[0] = data; else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000) p[1 + msr - MSR_MTRRfix16K_80000] = data; else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000) p[3 + msr - MSR_MTRRfix4K_C0000] = data; else if (msr == MSR_IA32_CR_PAT) vcpu->arch.pat = data; else { /* Variable MTRRs */ int idx, is_mtrr_mask; u64 *pt; idx = (msr - 0x200) / 2; is_mtrr_mask = msr - 0x200 - 2 * idx; if (!is_mtrr_mask) pt = (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo; else pt = (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo; *pt = data; } kvm_mmu_reset_context(vcpu); return 0; } static int set_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 data) { u64 mcg_cap = vcpu->arch.mcg_cap; unsigned bank_num = mcg_cap & 0xff; switch (msr) { case MSR_IA32_MCG_STATUS: vcpu->arch.mcg_status = data; break; case MSR_IA32_MCG_CTL: if (!(mcg_cap & MCG_CTL_P)) return 1; if (data != 0 && data != ~(u64)0) return -1; vcpu->arch.mcg_ctl = data; break; default: if (msr >= MSR_IA32_MC0_CTL && msr < MSR_IA32_MC0_CTL + 4 * bank_num) { u32 offset = msr - MSR_IA32_MC0_CTL; /* only 0 or all 1s can be written to IA32_MCi_CTL * some Linux kernels though clear bit 10 in bank 4 to * workaround a BIOS/GART TBL issue on AMD K8s, ignore * this to avoid an uncatched #GP in the guest */ if ((offset & 0x3) == 0 && data != 0 && (data | (1 << 10)) != ~(u64)0) return -1; vcpu->arch.mce_banks[offset] = data; break; } return 1; } return 0; } static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data) { struct kvm *kvm = vcpu->kvm; int lm = is_long_mode(vcpu); u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64 : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32; u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64 : kvm->arch.xen_hvm_config.blob_size_32; u32 page_num = data & ~PAGE_MASK; u64 page_addr = data & PAGE_MASK; u8 *page; int r; r = -E2BIG; if (page_num >= blob_size) goto out; r = -ENOMEM; page = memdup_user(blob_addr + (page_num * PAGE_SIZE), PAGE_SIZE); if (IS_ERR(page)) { r = PTR_ERR(page); goto out; } if (kvm_write_guest(kvm, page_addr, page, PAGE_SIZE)) goto out_free; r = 0; out_free: kfree(page); out: return r; } static bool kvm_hv_hypercall_enabled(struct kvm *kvm) { return kvm->arch.hv_hypercall & HV_X64_MSR_HYPERCALL_ENABLE; } static bool kvm_hv_msr_partition_wide(u32 msr) { bool r = false; switch (msr) { case HV_X64_MSR_GUEST_OS_ID: case HV_X64_MSR_HYPERCALL: case HV_X64_MSR_REFERENCE_TSC: case HV_X64_MSR_TIME_REF_COUNT: r = true; break; } return r; } static int set_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data) { struct kvm *kvm = vcpu->kvm; switch (msr) { case HV_X64_MSR_GUEST_OS_ID: kvm->arch.hv_guest_os_id = data; /* setting guest os id to zero disables hypercall page */ if (!kvm->arch.hv_guest_os_id) kvm->arch.hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE; break; case HV_X64_MSR_HYPERCALL: { u64 gfn; unsigned long addr; u8 instructions[4]; /* if guest os id is not set hypercall should remain disabled */ if (!kvm->arch.hv_guest_os_id) break; if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) { kvm->arch.hv_hypercall = data; break; } gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT; addr = gfn_to_hva(kvm, gfn); if (kvm_is_error_hva(addr)) return 1; kvm_x86_ops->patch_hypercall(vcpu, instructions); ((unsigned char *)instructions)[3] = 0xc3; /* ret */ if (__copy_to_user((void __user *)addr, instructions, 4)) return 1; kvm->arch.hv_hypercall = data; mark_page_dirty(kvm, gfn); break; } case HV_X64_MSR_REFERENCE_TSC: { u64 gfn; HV_REFERENCE_TSC_PAGE tsc_ref; memset(&tsc_ref, 0, sizeof(tsc_ref)); kvm->arch.hv_tsc_page = data; if (!(data & HV_X64_MSR_TSC_REFERENCE_ENABLE)) break; gfn = data >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT; if (kvm_write_guest(kvm, gfn << HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT, &tsc_ref, sizeof(tsc_ref))) return 1; mark_page_dirty(kvm, gfn); break; } default: vcpu_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x " "data 0x%llx\n", msr, data); return 1; } return 0; } static int set_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 data) { switch (msr) { case HV_X64_MSR_APIC_ASSIST_PAGE: { u64 gfn; unsigned long addr; if (!(data & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE)) { vcpu->arch.hv_vapic = data; if (kvm_lapic_enable_pv_eoi(vcpu, 0)) return 1; break; } gfn = data >> HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT; addr = gfn_to_hva(vcpu->kvm, gfn); if (kvm_is_error_hva(addr)) return 1; if (__clear_user((void __user *)addr, PAGE_SIZE)) return 1; vcpu->arch.hv_vapic = data; mark_page_dirty(vcpu->kvm, gfn); if (kvm_lapic_enable_pv_eoi(vcpu, gfn_to_gpa(gfn) | KVM_MSR_ENABLED)) return 1; break; } case HV_X64_MSR_EOI: return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data); case HV_X64_MSR_ICR: return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data); case HV_X64_MSR_TPR: return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data); default: vcpu_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x " "data 0x%llx\n", msr, data); return 1; } return 0; } static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data) { gpa_t gpa = data & ~0x3f; /* Bits 2:5 are reserved, Should be zero */ if (data & 0x3c) return 1; vcpu->arch.apf.msr_val = data; if (!(data & KVM_ASYNC_PF_ENABLED)) { kvm_clear_async_pf_completion_queue(vcpu); kvm_async_pf_hash_reset(vcpu); return 0; } if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa, sizeof(u32))) return 1; vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS); kvm_async_pf_wakeup_all(vcpu); return 0; } static void kvmclock_reset(struct kvm_vcpu *vcpu) { vcpu->arch.pv_time_enabled = false; } static void accumulate_steal_time(struct kvm_vcpu *vcpu) { u64 delta; if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) return; delta = current->sched_info.run_delay - vcpu->arch.st.last_steal; vcpu->arch.st.last_steal = current->sched_info.run_delay; vcpu->arch.st.accum_steal = delta; } static void record_steal_time(struct kvm_vcpu *vcpu) { if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) return; if (unlikely(kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.st.stime, &vcpu->arch.st.steal, sizeof(struct kvm_steal_time)))) return; vcpu->arch.st.steal.steal += vcpu->arch.st.accum_steal; vcpu->arch.st.steal.version += 2; vcpu->arch.st.accum_steal = 0; kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime, &vcpu->arch.st.steal, sizeof(struct kvm_steal_time)); } int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) { bool pr = false; u32 msr = msr_info->index; u64 data = msr_info->data; switch (msr) { case MSR_AMD64_NB_CFG: case MSR_IA32_UCODE_REV: case MSR_IA32_UCODE_WRITE: case MSR_VM_HSAVE_PA: case MSR_AMD64_PATCH_LOADER: case MSR_AMD64_BU_CFG2: break; case MSR_EFER: return set_efer(vcpu, data); case MSR_K7_HWCR: data &= ~(u64)0x40; /* ignore flush filter disable */ data &= ~(u64)0x100; /* ignore ignne emulation enable */ data &= ~(u64)0x8; /* ignore TLB cache disable */ data &= ~(u64)0x40000; /* ignore Mc status write enable */ if (data != 0) { vcpu_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n", data); return 1; } break; case MSR_FAM10H_MMIO_CONF_BASE: if (data != 0) { vcpu_unimpl(vcpu, "unimplemented MMIO_CONF_BASE wrmsr: " "0x%llx\n", data); return 1; } break; case MSR_IA32_DEBUGCTLMSR: if (!data) { /* We support the non-activated case already */ break; } else if (data & ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_BTF)) { /* Values other than LBR and BTF are vendor-specific, thus reserved and should throw a #GP */ return 1; } vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n", __func__, data); break; case 0x200 ... 0x2ff: return set_msr_mtrr(vcpu, msr, data); case MSR_IA32_APICBASE: return kvm_set_apic_base(vcpu, msr_info); case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff: return kvm_x2apic_msr_write(vcpu, msr, data); case MSR_IA32_TSCDEADLINE: kvm_set_lapic_tscdeadline_msr(vcpu, data); break; case MSR_IA32_TSC_ADJUST: if (guest_cpuid_has_tsc_adjust(vcpu)) { if (!msr_info->host_initiated) { u64 adj = data - vcpu->arch.ia32_tsc_adjust_msr; kvm_x86_ops->adjust_tsc_offset(vcpu, adj, true); } vcpu->arch.ia32_tsc_adjust_msr = data; } break; case MSR_IA32_MISC_ENABLE: vcpu->arch.ia32_misc_enable_msr = data; break; case MSR_KVM_WALL_CLOCK_NEW: case MSR_KVM_WALL_CLOCK: vcpu->kvm->arch.wall_clock = data; kvm_write_wall_clock(vcpu->kvm, data); break; case MSR_KVM_SYSTEM_TIME_NEW: case MSR_KVM_SYSTEM_TIME: { u64 gpa_offset; kvmclock_reset(vcpu); vcpu->arch.time = data; kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu); /* we verify if the enable bit is set... */ if (!(data & 1)) break; gpa_offset = data & ~(PAGE_MASK | 1); if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.pv_time, data & ~1ULL, sizeof(struct pvclock_vcpu_time_info))) vcpu->arch.pv_time_enabled = false; else vcpu->arch.pv_time_enabled = true; break; } case MSR_KVM_ASYNC_PF_EN: if (kvm_pv_enable_async_pf(vcpu, data)) return 1; break; case MSR_KVM_STEAL_TIME: if (unlikely(!sched_info_on())) return 1; if (data & KVM_STEAL_RESERVED_MASK) return 1; if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime, data & KVM_STEAL_VALID_BITS, sizeof(struct kvm_steal_time))) return 1; vcpu->arch.st.msr_val = data; if (!(data & KVM_MSR_ENABLED)) break; vcpu->arch.st.last_steal = current->sched_info.run_delay; preempt_disable(); accumulate_steal_time(vcpu); preempt_enable(); kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); break; case MSR_KVM_PV_EOI_EN: if (kvm_lapic_enable_pv_eoi(vcpu, data)) return 1; break; case MSR_IA32_MCG_CTL: case MSR_IA32_MCG_STATUS: case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1: return set_msr_mce(vcpu, msr, data); /* Performance counters are not protected by a CPUID bit, * so we should check all of them in the generic path for the sake of * cross vendor migration. * Writing a zero into the event select MSRs disables them, * which we perfectly emulate ;-). Any other value should be at least * reported, some guests depend on them. */ case MSR_K7_EVNTSEL0: case MSR_K7_EVNTSEL1: case MSR_K7_EVNTSEL2: case MSR_K7_EVNTSEL3: if (data != 0) vcpu_unimpl(vcpu, "unimplemented perfctr wrmsr: " "0x%x data 0x%llx\n", msr, data); break; /* at least RHEL 4 unconditionally writes to the perfctr registers, * so we ignore writes to make it happy. */ case MSR_K7_PERFCTR0: case MSR_K7_PERFCTR1: case MSR_K7_PERFCTR2: case MSR_K7_PERFCTR3: vcpu_unimpl(vcpu, "unimplemented perfctr wrmsr: " "0x%x data 0x%llx\n", msr, data); break; case MSR_P6_PERFCTR0: case MSR_P6_PERFCTR1: pr = true; case MSR_P6_EVNTSEL0: case MSR_P6_EVNTSEL1: if (kvm_pmu_msr(vcpu, msr)) return kvm_pmu_set_msr(vcpu, msr_info); if (pr || data != 0) vcpu_unimpl(vcpu, "disabled perfctr wrmsr: " "0x%x data 0x%llx\n", msr, data); break; case MSR_K7_CLK_CTL: /* * Ignore all writes to this no longer documented MSR. * Writes are only relevant for old K7 processors, * all pre-dating SVM, but a recommended workaround from * AMD for these chips. It is possible to specify the * affected processor models on the command line, hence * the need to ignore the workaround. */ break; case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15: if (kvm_hv_msr_partition_wide(msr)) { int r; mutex_lock(&vcpu->kvm->lock); r = set_msr_hyperv_pw(vcpu, msr, data); mutex_unlock(&vcpu->kvm->lock); return r; } else return set_msr_hyperv(vcpu, msr, data); break; case MSR_IA32_BBL_CR_CTL3: /* Drop writes to this legacy MSR -- see rdmsr * counterpart for further detail. */ vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n", msr, data); break; case MSR_AMD64_OSVW_ID_LENGTH: if (!guest_cpuid_has_osvw(vcpu)) return 1; vcpu->arch.osvw.length = data; break; case MSR_AMD64_OSVW_STATUS: if (!guest_cpuid_has_osvw(vcpu)) return 1; vcpu->arch.osvw.status = data; break; default: if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr)) return xen_hvm_config(vcpu, data); if (kvm_pmu_msr(vcpu, msr)) return kvm_pmu_set_msr(vcpu, msr_info); if (!ignore_msrs) { vcpu_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n", msr, data); return 1; } else { vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n", msr, data); break; } } return 0; } EXPORT_SYMBOL_GPL(kvm_set_msr_common); /* * Reads an msr value (of 'msr_index') into 'pdata'. * Returns 0 on success, non-0 otherwise. * Assumes vcpu_load() was already called. */ int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) { return kvm_x86_ops->get_msr(vcpu, msr_index, pdata); } static int get_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) { u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges; if (!msr_mtrr_valid(msr)) return 1; if (msr == MSR_MTRRdefType) *pdata = vcpu->arch.mtrr_state.def_type + (vcpu->arch.mtrr_state.enabled << 10); else if (msr == MSR_MTRRfix64K_00000) *pdata = p[0]; else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000) *pdata = p[1 + msr - MSR_MTRRfix16K_80000]; else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000) *pdata = p[3 + msr - MSR_MTRRfix4K_C0000]; else if (msr == MSR_IA32_CR_PAT) *pdata = vcpu->arch.pat; else { /* Variable MTRRs */ int idx, is_mtrr_mask; u64 *pt; idx = (msr - 0x200) / 2; is_mtrr_mask = msr - 0x200 - 2 * idx; if (!is_mtrr_mask) pt = (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo; else pt = (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo; *pdata = *pt; } return 0; } static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) { u64 data; u64 mcg_cap = vcpu->arch.mcg_cap; unsigned bank_num = mcg_cap & 0xff; switch (msr) { case MSR_IA32_P5_MC_ADDR: case MSR_IA32_P5_MC_TYPE: data = 0; break; case MSR_IA32_MCG_CAP: data = vcpu->arch.mcg_cap; break; case MSR_IA32_MCG_CTL: if (!(mcg_cap & MCG_CTL_P)) return 1; data = vcpu->arch.mcg_ctl; break; case MSR_IA32_MCG_STATUS: data = vcpu->arch.mcg_status; break; default: if (msr >= MSR_IA32_MC0_CTL && msr < MSR_IA32_MC0_CTL + 4 * bank_num) { u32 offset = msr - MSR_IA32_MC0_CTL; data = vcpu->arch.mce_banks[offset]; break; } return 1; } *pdata = data; return 0; } static int get_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) { u64 data = 0; struct kvm *kvm = vcpu->kvm; switch (msr) { case HV_X64_MSR_GUEST_OS_ID: data = kvm->arch.hv_guest_os_id; break; case HV_X64_MSR_HYPERCALL: data = kvm->arch.hv_hypercall; break; case HV_X64_MSR_TIME_REF_COUNT: { data = div_u64(get_kernel_ns() + kvm->arch.kvmclock_offset, 100); break; } case HV_X64_MSR_REFERENCE_TSC: data = kvm->arch.hv_tsc_page; break; default: vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr); return 1; } *pdata = data; return 0; } static int get_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) { u64 data = 0; switch (msr) { case HV_X64_MSR_VP_INDEX: { int r; struct kvm_vcpu *v; kvm_for_each_vcpu(r, v, vcpu->kvm) { if (v == vcpu) { data = r; break; } } break; } case HV_X64_MSR_EOI: return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata); case HV_X64_MSR_ICR: return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata); case HV_X64_MSR_TPR: return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata); case HV_X64_MSR_APIC_ASSIST_PAGE: data = vcpu->arch.hv_vapic; break; default: vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr); return 1; } *pdata = data; return 0; } int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) { u64 data; switch (msr) { case MSR_IA32_PLATFORM_ID: case MSR_IA32_EBL_CR_POWERON: case MSR_IA32_DEBUGCTLMSR: case MSR_IA32_LASTBRANCHFROMIP: case MSR_IA32_LASTBRANCHTOIP: case MSR_IA32_LASTINTFROMIP: case MSR_IA32_LASTINTTOIP: case MSR_K8_SYSCFG: case MSR_K7_HWCR: case MSR_VM_HSAVE_PA: case MSR_K7_EVNTSEL0: case MSR_K7_EVNTSEL1: case MSR_K7_EVNTSEL2: case MSR_K7_EVNTSEL3: case MSR_K7_PERFCTR0: case MSR_K7_PERFCTR1: case MSR_K7_PERFCTR2: case MSR_K7_PERFCTR3: case MSR_K8_INT_PENDING_MSG: case MSR_AMD64_NB_CFG: case MSR_FAM10H_MMIO_CONF_BASE: case MSR_AMD64_BU_CFG2: data = 0; break; case MSR_P6_PERFCTR0: case MSR_P6_PERFCTR1: case MSR_P6_EVNTSEL0: case MSR_P6_EVNTSEL1: if (kvm_pmu_msr(vcpu, msr)) return kvm_pmu_get_msr(vcpu, msr, pdata); data = 0; break; case MSR_IA32_UCODE_REV: data = 0x100000000ULL; break; case MSR_MTRRcap: data = 0x500 | KVM_NR_VAR_MTRR; break; case 0x200 ... 0x2ff: return get_msr_mtrr(vcpu, msr, pdata); case 0xcd: /* fsb frequency */ data = 3; break; /* * MSR_EBC_FREQUENCY_ID * Conservative value valid for even the basic CPU models. * Models 0,1: 000 in bits 23:21 indicating a bus speed of * 100MHz, model 2 000 in bits 18:16 indicating 100MHz, * and 266MHz for model 3, or 4. Set Core Clock * Frequency to System Bus Frequency Ratio to 1 (bits * 31:24) even though these are only valid for CPU * models > 2, however guests may end up dividing or * multiplying by zero otherwise. */ case MSR_EBC_FREQUENCY_ID: data = 1 << 24; break; case MSR_IA32_APICBASE: data = kvm_get_apic_base(vcpu); break; case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff: return kvm_x2apic_msr_read(vcpu, msr, pdata); break; case MSR_IA32_TSCDEADLINE: data = kvm_get_lapic_tscdeadline_msr(vcpu); break; case MSR_IA32_TSC_ADJUST: data = (u64)vcpu->arch.ia32_tsc_adjust_msr; break; case MSR_IA32_MISC_ENABLE: data = vcpu->arch.ia32_misc_enable_msr; break; case MSR_IA32_PERF_STATUS: /* TSC increment by tick */ data = 1000ULL; /* CPU multiplier */ data |= (((uint64_t)4ULL) << 40); break; case MSR_EFER: data = vcpu->arch.efer; break; case MSR_KVM_WALL_CLOCK: case MSR_KVM_WALL_CLOCK_NEW: data = vcpu->kvm->arch.wall_clock; break; case MSR_KVM_SYSTEM_TIME: case MSR_KVM_SYSTEM_TIME_NEW: data = vcpu->arch.time; break; case MSR_KVM_ASYNC_PF_EN: data = vcpu->arch.apf.msr_val; break; case MSR_KVM_STEAL_TIME: data = vcpu->arch.st.msr_val; break; case MSR_KVM_PV_EOI_EN: data = vcpu->arch.pv_eoi.msr_val; break; case MSR_IA32_P5_MC_ADDR: case MSR_IA32_P5_MC_TYPE: case MSR_IA32_MCG_CAP: case MSR_IA32_MCG_CTL: case MSR_IA32_MCG_STATUS: case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1: return get_msr_mce(vcpu, msr, pdata); case MSR_K7_CLK_CTL: /* * Provide expected ramp-up count for K7. All other * are set to zero, indicating minimum divisors for * every field. * * This prevents guest kernels on AMD host with CPU * type 6, model 8 and higher from exploding due to * the rdmsr failing. */ data = 0x20000000; break; case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15: if (kvm_hv_msr_partition_wide(msr)) { int r; mutex_lock(&vcpu->kvm->lock); r = get_msr_hyperv_pw(vcpu, msr, pdata); mutex_unlock(&vcpu->kvm->lock); return r; } else return get_msr_hyperv(vcpu, msr, pdata); break; case MSR_IA32_BBL_CR_CTL3: /* This legacy MSR exists but isn't fully documented in current * silicon. It is however accessed by winxp in very narrow * scenarios where it sets bit #19, itself documented as * a "reserved" bit. Best effort attempt to source coherent * read data here should the balance of the register be * interpreted by the guest: * * L2 cache control register 3: 64GB range, 256KB size, * enabled, latency 0x1, configured */ data = 0xbe702111; break; case MSR_AMD64_OSVW_ID_LENGTH: if (!guest_cpuid_has_osvw(vcpu)) return 1; data = vcpu->arch.osvw.length; break; case MSR_AMD64_OSVW_STATUS: if (!guest_cpuid_has_osvw(vcpu)) return 1; data = vcpu->arch.osvw.status; break; default: if (kvm_pmu_msr(vcpu, msr)) return kvm_pmu_get_msr(vcpu, msr, pdata); if (!ignore_msrs) { vcpu_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr); return 1; } else { vcpu_unimpl(vcpu, "ignored rdmsr: 0x%x\n", msr); data = 0; } break; } *pdata = data; return 0; } EXPORT_SYMBOL_GPL(kvm_get_msr_common); /* * Read or write a bunch of msrs. All parameters are kernel addresses. * * @return number of msrs set successfully. */ static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs, struct kvm_msr_entry *entries, int (*do_msr)(struct kvm_vcpu *vcpu, unsigned index, u64 *data)) { int i, idx; idx = srcu_read_lock(&vcpu->kvm->srcu); for (i = 0; i < msrs->nmsrs; ++i) if (do_msr(vcpu, entries[i].index, &entries[i].data)) break; srcu_read_unlock(&vcpu->kvm->srcu, idx); return i; } /* * Read or write a bunch of msrs. Parameters are user addresses. * * @return number of msrs set successfully. */ static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs, int (*do_msr)(struct kvm_vcpu *vcpu, unsigned index, u64 *data), int writeback) { struct kvm_msrs msrs; struct kvm_msr_entry *entries; int r, n; unsigned size; r = -EFAULT; if (copy_from_user(&msrs, user_msrs, sizeof msrs)) goto out; r = -E2BIG; if (msrs.nmsrs >= MAX_IO_MSRS) goto out; size = sizeof(struct kvm_msr_entry) * msrs.nmsrs; entries = memdup_user(user_msrs->entries, size); if (IS_ERR(entries)) { r = PTR_ERR(entries); goto out; } r = n = __msr_io(vcpu, &msrs, entries, do_msr); if (r < 0) goto out_free; r = -EFAULT; if (writeback && copy_to_user(user_msrs->entries, entries, size)) goto out_free; r = n; out_free: kfree(entries); out: return r; } int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) { int r; switch (ext) { case KVM_CAP_IRQCHIP: case KVM_CAP_HLT: case KVM_CAP_MMU_SHADOW_CACHE_CONTROL: case KVM_CAP_SET_TSS_ADDR: case KVM_CAP_EXT_CPUID: case KVM_CAP_EXT_EMUL_CPUID: case KVM_CAP_CLOCKSOURCE: case KVM_CAP_PIT: case KVM_CAP_NOP_IO_DELAY: case KVM_CAP_MP_STATE: case KVM_CAP_SYNC_MMU: case KVM_CAP_USER_NMI: case KVM_CAP_REINJECT_CONTROL: case KVM_CAP_IRQ_INJECT_STATUS: case KVM_CAP_IRQFD: case KVM_CAP_IOEVENTFD: case KVM_CAP_IOEVENTFD_NO_LENGTH: case KVM_CAP_PIT2: case KVM_CAP_PIT_STATE2: case KVM_CAP_SET_IDENTITY_MAP_ADDR: case KVM_CAP_XEN_HVM: case KVM_CAP_ADJUST_CLOCK: case KVM_CAP_VCPU_EVENTS: case KVM_CAP_HYPERV: case KVM_CAP_HYPERV_VAPIC: case KVM_CAP_HYPERV_SPIN: case KVM_CAP_PCI_SEGMENT: case KVM_CAP_DEBUGREGS: case KVM_CAP_X86_ROBUST_SINGLESTEP: case KVM_CAP_XSAVE: case KVM_CAP_ASYNC_PF: case KVM_CAP_GET_TSC_KHZ: case KVM_CAP_KVMCLOCK_CTRL: case KVM_CAP_READONLY_MEM: case KVM_CAP_HYPERV_TIME: case KVM_CAP_IOAPIC_POLARITY_IGNORED: #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT case KVM_CAP_ASSIGN_DEV_IRQ: case KVM_CAP_PCI_2_3: #endif r = 1; break; case KVM_CAP_COALESCED_MMIO: r = KVM_COALESCED_MMIO_PAGE_OFFSET; break; case KVM_CAP_VAPIC: r = !kvm_x86_ops->cpu_has_accelerated_tpr(); break; case KVM_CAP_NR_VCPUS: r = KVM_SOFT_MAX_VCPUS; break; case KVM_CAP_MAX_VCPUS: r = KVM_MAX_VCPUS; break; case KVM_CAP_NR_MEMSLOTS: r = KVM_USER_MEM_SLOTS; break; case KVM_CAP_PV_MMU: /* obsolete */ r = 0; break; #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT case KVM_CAP_IOMMU: r = iommu_present(&pci_bus_type); break; #endif case KVM_CAP_MCE: r = KVM_MAX_MCE_BANKS; break; case KVM_CAP_XCRS: r = cpu_has_xsave; break; case KVM_CAP_TSC_CONTROL: r = kvm_has_tsc_control; break; case KVM_CAP_TSC_DEADLINE_TIMER: r = boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER); break; default: r = 0; break; } return r; } long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { void __user *argp = (void __user *)arg; long r; switch (ioctl) { case KVM_GET_MSR_INDEX_LIST: { struct kvm_msr_list __user *user_msr_list = argp; struct kvm_msr_list msr_list; unsigned n; r = -EFAULT; if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list)) goto out; n = msr_list.nmsrs; msr_list.nmsrs = num_msrs_to_save + ARRAY_SIZE(emulated_msrs); if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list)) goto out; r = -E2BIG; if (n < msr_list.nmsrs) goto out; r = -EFAULT; if (copy_to_user(user_msr_list->indices, &msrs_to_save, num_msrs_to_save * sizeof(u32))) goto out; if (copy_to_user(user_msr_list->indices + num_msrs_to_save, &emulated_msrs, ARRAY_SIZE(emulated_msrs) * sizeof(u32))) goto out; r = 0; break; } case KVM_GET_SUPPORTED_CPUID: case KVM_GET_EMULATED_CPUID: { struct kvm_cpuid2 __user *cpuid_arg = argp; struct kvm_cpuid2 cpuid; r = -EFAULT; if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid)) goto out; r = kvm_dev_ioctl_get_cpuid(&cpuid, cpuid_arg->entries, ioctl); if (r) goto out; r = -EFAULT; if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid)) goto out; r = 0; break; } case KVM_X86_GET_MCE_CAP_SUPPORTED: { u64 mce_cap; mce_cap = KVM_MCE_CAP_SUPPORTED; r = -EFAULT; if (copy_to_user(argp, &mce_cap, sizeof mce_cap)) goto out; r = 0; break; } default: r = -EINVAL; } out: return r; } static void wbinvd_ipi(void *garbage) { wbinvd(); } static bool need_emulate_wbinvd(struct kvm_vcpu *vcpu) { return kvm_arch_has_noncoherent_dma(vcpu->kvm); } void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { /* Address WBINVD may be executed by guest */ if (need_emulate_wbinvd(vcpu)) { if (kvm_x86_ops->has_wbinvd_exit()) cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask); else if (vcpu->cpu != -1 && vcpu->cpu != cpu) smp_call_function_single(vcpu->cpu, wbinvd_ipi, NULL, 1); } kvm_x86_ops->vcpu_load(vcpu, cpu); /* Apply any externally detected TSC adjustments (due to suspend) */ if (unlikely(vcpu->arch.tsc_offset_adjustment)) { adjust_tsc_offset_host(vcpu, vcpu->arch.tsc_offset_adjustment); vcpu->arch.tsc_offset_adjustment = 0; kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); } if (unlikely(vcpu->cpu != cpu) || check_tsc_unstable()) { s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 : native_read_tsc() - vcpu->arch.last_host_tsc; if (tsc_delta < 0) mark_tsc_unstable("KVM discovered backwards TSC"); if (check_tsc_unstable()) { u64 offset = kvm_x86_ops->compute_tsc_offset(vcpu, vcpu->arch.last_guest_tsc); kvm_x86_ops->write_tsc_offset(vcpu, offset); vcpu->arch.tsc_catchup = 1; } /* * On a host with synchronized TSC, there is no need to update * kvmclock on vcpu->cpu migration */ if (!vcpu->kvm->arch.use_master_clock || vcpu->cpu == -1) kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu); if (vcpu->cpu != cpu) kvm_migrate_timers(vcpu); vcpu->cpu = cpu; } accumulate_steal_time(vcpu); kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); } void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) { kvm_x86_ops->vcpu_put(vcpu); kvm_put_guest_fpu(vcpu); vcpu->arch.last_host_tsc = native_read_tsc(); } static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s) { kvm_x86_ops->sync_pir_to_irr(vcpu); memcpy(s->regs, vcpu->arch.apic->regs, sizeof *s); return 0; } static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s) { kvm_apic_post_state_restore(vcpu, s); update_cr8_intercept(vcpu); return 0; } static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) { if (irq->irq >= KVM_NR_INTERRUPTS) return -EINVAL; if (irqchip_in_kernel(vcpu->kvm)) return -ENXIO; kvm_queue_interrupt(vcpu, irq->irq, false); kvm_make_request(KVM_REQ_EVENT, vcpu); return 0; } static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu) { kvm_inject_nmi(vcpu); return 0; } static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu, struct kvm_tpr_access_ctl *tac) { if (tac->flags) return -EINVAL; vcpu->arch.tpr_access_reporting = !!tac->enabled; return 0; } static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu, u64 mcg_cap) { int r; unsigned bank_num = mcg_cap & 0xff, bank; r = -EINVAL; if (!bank_num || bank_num >= KVM_MAX_MCE_BANKS) goto out; if (mcg_cap & ~(KVM_MCE_CAP_SUPPORTED | 0xff | 0xff0000)) goto out; r = 0; vcpu->arch.mcg_cap = mcg_cap; /* Init IA32_MCG_CTL to all 1s */ if (mcg_cap & MCG_CTL_P) vcpu->arch.mcg_ctl = ~(u64)0; /* Init IA32_MCi_CTL to all 1s */ for (bank = 0; bank < bank_num; bank++) vcpu->arch.mce_banks[bank*4] = ~(u64)0; out: return r; } static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu, struct kvm_x86_mce *mce) { u64 mcg_cap = vcpu->arch.mcg_cap; unsigned bank_num = mcg_cap & 0xff; u64 *banks = vcpu->arch.mce_banks; if (mce->bank >= bank_num || !(mce->status & MCI_STATUS_VAL)) return -EINVAL; /* * if IA32_MCG_CTL is not all 1s, the uncorrected error * reporting is disabled */ if ((mce->status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) && vcpu->arch.mcg_ctl != ~(u64)0) return 0; banks += 4 * mce->bank; /* * if IA32_MCi_CTL is not all 1s, the uncorrected error * reporting is disabled for the bank */ if ((mce->status & MCI_STATUS_UC) && banks[0] != ~(u64)0) return 0; if (mce->status & MCI_STATUS_UC) { if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) || !kvm_read_cr4_bits(vcpu, X86_CR4_MCE)) { kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); return 0; } if (banks[1] & MCI_STATUS_VAL) mce->status |= MCI_STATUS_OVER; banks[2] = mce->addr; banks[3] = mce->misc; vcpu->arch.mcg_status = mce->mcg_status; banks[1] = mce->status; kvm_queue_exception(vcpu, MC_VECTOR); } else if (!(banks[1] & MCI_STATUS_VAL) || !(banks[1] & MCI_STATUS_UC)) { if (banks[1] & MCI_STATUS_VAL) mce->status |= MCI_STATUS_OVER; banks[2] = mce->addr; banks[3] = mce->misc; banks[1] = mce->status; } else banks[1] |= MCI_STATUS_OVER; return 0; } static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu, struct kvm_vcpu_events *events) { process_nmi(vcpu); events->exception.injected = vcpu->arch.exception.pending && !kvm_exception_is_soft(vcpu->arch.exception.nr); events->exception.nr = vcpu->arch.exception.nr; events->exception.has_error_code = vcpu->arch.exception.has_error_code; events->exception.pad = 0; events->exception.error_code = vcpu->arch.exception.error_code; events->interrupt.injected = vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft; events->interrupt.nr = vcpu->arch.interrupt.nr; events->interrupt.soft = 0; events->interrupt.shadow = kvm_x86_ops->get_interrupt_shadow(vcpu); events->nmi.injected = vcpu->arch.nmi_injected; events->nmi.pending = vcpu->arch.nmi_pending != 0; events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu); events->nmi.pad = 0; events->sipi_vector = 0; /* never valid when reporting to user space */ events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING | KVM_VCPUEVENT_VALID_SHADOW); memset(&events->reserved, 0, sizeof(events->reserved)); } static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, struct kvm_vcpu_events *events) { if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING | KVM_VCPUEVENT_VALID_SIPI_VECTOR | KVM_VCPUEVENT_VALID_SHADOW)) return -EINVAL; process_nmi(vcpu); vcpu->arch.exception.pending = events->exception.injected; vcpu->arch.exception.nr = events->exception.nr; vcpu->arch.exception.has_error_code = events->exception.has_error_code; vcpu->arch.exception.error_code = events->exception.error_code; vcpu->arch.interrupt.pending = events->interrupt.injected; vcpu->arch.interrupt.nr = events->interrupt.nr; vcpu->arch.interrupt.soft = events->interrupt.soft; if (events->flags & KVM_VCPUEVENT_VALID_SHADOW) kvm_x86_ops->set_interrupt_shadow(vcpu, events->interrupt.shadow); vcpu->arch.nmi_injected = events->nmi.injected; if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING) vcpu->arch.nmi_pending = events->nmi.pending; kvm_x86_ops->set_nmi_mask(vcpu, events->nmi.masked); if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR && kvm_vcpu_has_lapic(vcpu)) vcpu->arch.apic->sipi_vector = events->sipi_vector; kvm_make_request(KVM_REQ_EVENT, vcpu); return 0; } static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu, struct kvm_debugregs *dbgregs) { unsigned long val; memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db)); _kvm_get_dr(vcpu, 6, &val); dbgregs->dr6 = val; dbgregs->dr7 = vcpu->arch.dr7; dbgregs->flags = 0; memset(&dbgregs->reserved, 0, sizeof(dbgregs->reserved)); } static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu, struct kvm_debugregs *dbgregs) { if (dbgregs->flags) return -EINVAL; memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db)); vcpu->arch.dr6 = dbgregs->dr6; kvm_update_dr6(vcpu); vcpu->arch.dr7 = dbgregs->dr7; kvm_update_dr7(vcpu); return 0; } static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu, struct kvm_xsave *guest_xsave) { if (cpu_has_xsave) { memcpy(guest_xsave->region, &vcpu->arch.guest_fpu.state->xsave, vcpu->arch.guest_xstate_size); *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)] &= vcpu->arch.guest_supported_xcr0 | XSTATE_FPSSE; } else { memcpy(guest_xsave->region, &vcpu->arch.guest_fpu.state->fxsave, sizeof(struct i387_fxsave_struct)); *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)] = XSTATE_FPSSE; } } static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu, struct kvm_xsave *guest_xsave) { u64 xstate_bv = *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)]; if (cpu_has_xsave) { /* * Here we allow setting states that are not present in * CPUID leaf 0xD, index 0, EDX:EAX. This is for compatibility * with old userspace. */ if (xstate_bv & ~kvm_supported_xcr0()) return -EINVAL; memcpy(&vcpu->arch.guest_fpu.state->xsave, guest_xsave->region, vcpu->arch.guest_xstate_size); } else { if (xstate_bv & ~XSTATE_FPSSE) return -EINVAL; memcpy(&vcpu->arch.guest_fpu.state->fxsave, guest_xsave->region, sizeof(struct i387_fxsave_struct)); } return 0; } static void kvm_vcpu_ioctl_x86_get_xcrs(struct kvm_vcpu *vcpu, struct kvm_xcrs *guest_xcrs) { if (!cpu_has_xsave) { guest_xcrs->nr_xcrs = 0; return; } guest_xcrs->nr_xcrs = 1; guest_xcrs->flags = 0; guest_xcrs->xcrs[0].xcr = XCR_XFEATURE_ENABLED_MASK; guest_xcrs->xcrs[0].value = vcpu->arch.xcr0; } static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu, struct kvm_xcrs *guest_xcrs) { int i, r = 0; if (!cpu_has_xsave) return -EINVAL; if (guest_xcrs->nr_xcrs > KVM_MAX_XCRS || guest_xcrs->flags) return -EINVAL; for (i = 0; i < guest_xcrs->nr_xcrs; i++) /* Only support XCR0 currently */ if (guest_xcrs->xcrs[i].xcr == XCR_XFEATURE_ENABLED_MASK) { r = __kvm_set_xcr(vcpu, XCR_XFEATURE_ENABLED_MASK, guest_xcrs->xcrs[i].value); break; } if (r) r = -EINVAL; return r; } /* * kvm_set_guest_paused() indicates to the guest kernel that it has been * stopped by the hypervisor. This function will be called from the host only. * EINVAL is returned when the host attempts to set the flag for a guest that * does not support pv clocks. */ static int kvm_set_guest_paused(struct kvm_vcpu *vcpu) { if (!vcpu->arch.pv_time_enabled) return -EINVAL; vcpu->arch.pvclock_set_guest_stopped_request = true; kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); return 0; } long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { struct kvm_vcpu *vcpu = filp->private_data; void __user *argp = (void __user *)arg; int r; union { struct kvm_lapic_state *lapic; struct kvm_xsave *xsave; struct kvm_xcrs *xcrs; void *buffer; } u; u.buffer = NULL; switch (ioctl) { case KVM_GET_LAPIC: { r = -EINVAL; if (!vcpu->arch.apic) goto out; u.lapic = kzalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL); r = -ENOMEM; if (!u.lapic) goto out; r = kvm_vcpu_ioctl_get_lapic(vcpu, u.lapic); if (r) goto out; r = -EFAULT; if (copy_to_user(argp, u.lapic, sizeof(struct kvm_lapic_state))) goto out; r = 0; break; } case KVM_SET_LAPIC: { r = -EINVAL; if (!vcpu->arch.apic) goto out; u.lapic = memdup_user(argp, sizeof(*u.lapic)); if (IS_ERR(u.lapic)) return PTR_ERR(u.lapic); r = kvm_vcpu_ioctl_set_lapic(vcpu, u.lapic); break; } case KVM_INTERRUPT: { struct kvm_interrupt irq; r = -EFAULT; if (copy_from_user(&irq, argp, sizeof irq)) goto out; r = kvm_vcpu_ioctl_interrupt(vcpu, &irq); break; } case KVM_NMI: { r = kvm_vcpu_ioctl_nmi(vcpu); break; } case KVM_SET_CPUID: { struct kvm_cpuid __user *cpuid_arg = argp; struct kvm_cpuid cpuid; r = -EFAULT; if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid)) goto out; r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries); break; } case KVM_SET_CPUID2: { struct kvm_cpuid2 __user *cpuid_arg = argp; struct kvm_cpuid2 cpuid; r = -EFAULT; if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid)) goto out; r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid, cpuid_arg->entries); break; } case KVM_GET_CPUID2: { struct kvm_cpuid2 __user *cpuid_arg = argp; struct kvm_cpuid2 cpuid; r = -EFAULT; if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid)) goto out; r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid, cpuid_arg->entries); if (r) goto out; r = -EFAULT; if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid)) goto out; r = 0; break; } case KVM_GET_MSRS: r = msr_io(vcpu, argp, kvm_get_msr, 1); break; case KVM_SET_MSRS: r = msr_io(vcpu, argp, do_set_msr, 0); break; case KVM_TPR_ACCESS_REPORTING: { struct kvm_tpr_access_ctl tac; r = -EFAULT; if (copy_from_user(&tac, argp, sizeof tac)) goto out; r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac); if (r) goto out; r = -EFAULT; if (copy_to_user(argp, &tac, sizeof tac)) goto out; r = 0; break; }; case KVM_SET_VAPIC_ADDR: { struct kvm_vapic_addr va; r = -EINVAL; if (!irqchip_in_kernel(vcpu->kvm)) goto out; r = -EFAULT; if (copy_from_user(&va, argp, sizeof va)) goto out; r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); break; } case KVM_X86_SETUP_MCE: { u64 mcg_cap; r = -EFAULT; if (copy_from_user(&mcg_cap, argp, sizeof mcg_cap)) goto out; r = kvm_vcpu_ioctl_x86_setup_mce(vcpu, mcg_cap); break; } case KVM_X86_SET_MCE: { struct kvm_x86_mce mce; r = -EFAULT; if (copy_from_user(&mce, argp, sizeof mce)) goto out; r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce); break; } case KVM_GET_VCPU_EVENTS: { struct kvm_vcpu_events events; kvm_vcpu_ioctl_x86_get_vcpu_events(vcpu, &events); r = -EFAULT; if (copy_to_user(argp, &events, sizeof(struct kvm_vcpu_events))) break; r = 0; break; } case KVM_SET_VCPU_EVENTS: { struct kvm_vcpu_events events; r = -EFAULT; if (copy_from_user(&events, argp, sizeof(struct kvm_vcpu_events))) break; r = kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events); break; } case KVM_GET_DEBUGREGS: { struct kvm_debugregs dbgregs; kvm_vcpu_ioctl_x86_get_debugregs(vcpu, &dbgregs); r = -EFAULT; if (copy_to_user(argp, &dbgregs, sizeof(struct kvm_debugregs))) break; r = 0; break; } case KVM_SET_DEBUGREGS: { struct kvm_debugregs dbgregs; r = -EFAULT; if (copy_from_user(&dbgregs, argp, sizeof(struct kvm_debugregs))) break; r = kvm_vcpu_ioctl_x86_set_debugregs(vcpu, &dbgregs); break; } case KVM_GET_XSAVE: { u.xsave = kzalloc(sizeof(struct kvm_xsave), GFP_KERNEL); r = -ENOMEM; if (!u.xsave) break; kvm_vcpu_ioctl_x86_get_xsave(vcpu, u.xsave); r = -EFAULT; if (copy_to_user(argp, u.xsave, sizeof(struct kvm_xsave))) break; r = 0; break; } case KVM_SET_XSAVE: { u.xsave = memdup_user(argp, sizeof(*u.xsave)); if (IS_ERR(u.xsave)) return PTR_ERR(u.xsave); r = kvm_vcpu_ioctl_x86_set_xsave(vcpu, u.xsave); break; } case KVM_GET_XCRS: { u.xcrs = kzalloc(sizeof(struct kvm_xcrs), GFP_KERNEL); r = -ENOMEM; if (!u.xcrs) break; kvm_vcpu_ioctl_x86_get_xcrs(vcpu, u.xcrs); r = -EFAULT; if (copy_to_user(argp, u.xcrs, sizeof(struct kvm_xcrs))) break; r = 0; break; } case KVM_SET_XCRS: { u.xcrs = memdup_user(argp, sizeof(*u.xcrs)); if (IS_ERR(u.xcrs)) return PTR_ERR(u.xcrs); r = kvm_vcpu_ioctl_x86_set_xcrs(vcpu, u.xcrs); break; } case KVM_SET_TSC_KHZ: { u32 user_tsc_khz; r = -EINVAL; user_tsc_khz = (u32)arg; if (user_tsc_khz >= kvm_max_guest_tsc_khz) goto out; if (user_tsc_khz == 0) user_tsc_khz = tsc_khz; kvm_set_tsc_khz(vcpu, user_tsc_khz); r = 0; goto out; } case KVM_GET_TSC_KHZ: { r = vcpu->arch.virtual_tsc_khz; goto out; } case KVM_KVMCLOCK_CTRL: { r = kvm_set_guest_paused(vcpu); goto out; } default: r = -EINVAL; } out: kfree(u.buffer); return r; } int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) { return VM_FAULT_SIGBUS; } static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr) { int ret; if (addr > (unsigned int)(-3 * PAGE_SIZE)) return -EINVAL; ret = kvm_x86_ops->set_tss_addr(kvm, addr); return ret; } static int kvm_vm_ioctl_set_identity_map_addr(struct kvm *kvm, u64 ident_addr) { kvm->arch.ept_identity_map_addr = ident_addr; return 0; } static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm, u32 kvm_nr_mmu_pages) { if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES) return -EINVAL; mutex_lock(&kvm->slots_lock); kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages); kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages; mutex_unlock(&kvm->slots_lock); return 0; } static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm) { return kvm->arch.n_max_mmu_pages; } static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) { int r; r = 0; switch (chip->chip_id) { case KVM_IRQCHIP_PIC_MASTER: memcpy(&chip->chip.pic, &pic_irqchip(kvm)->pics[0], sizeof(struct kvm_pic_state)); break; case KVM_IRQCHIP_PIC_SLAVE: memcpy(&chip->chip.pic, &pic_irqchip(kvm)->pics[1], sizeof(struct kvm_pic_state)); break; case KVM_IRQCHIP_IOAPIC: r = kvm_get_ioapic(kvm, &chip->chip.ioapic); break; default: r = -EINVAL; break; } return r; } static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) { int r; r = 0; switch (chip->chip_id) { case KVM_IRQCHIP_PIC_MASTER: spin_lock(&pic_irqchip(kvm)->lock); memcpy(&pic_irqchip(kvm)->pics[0], &chip->chip.pic, sizeof(struct kvm_pic_state)); spin_unlock(&pic_irqchip(kvm)->lock); break; case KVM_IRQCHIP_PIC_SLAVE: spin_lock(&pic_irqchip(kvm)->lock); memcpy(&pic_irqchip(kvm)->pics[1], &chip->chip.pic, sizeof(struct kvm_pic_state)); spin_unlock(&pic_irqchip(kvm)->lock); break; case KVM_IRQCHIP_IOAPIC: r = kvm_set_ioapic(kvm, &chip->chip.ioapic); break; default: r = -EINVAL; break; } kvm_pic_update_irq(pic_irqchip(kvm)); return r; } static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps) { int r = 0; mutex_lock(&kvm->arch.vpit->pit_state.lock); memcpy(ps, &kvm->arch.vpit->pit_state, sizeof(struct kvm_pit_state)); mutex_unlock(&kvm->arch.vpit->pit_state.lock); return r; } static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps) { int r = 0; mutex_lock(&kvm->arch.vpit->pit_state.lock); memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state)); kvm_pit_load_count(kvm, 0, ps->channels[0].count, 0); mutex_unlock(&kvm->arch.vpit->pit_state.lock); return r; } static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps) { int r = 0; mutex_lock(&kvm->arch.vpit->pit_state.lock); memcpy(ps->channels, &kvm->arch.vpit->pit_state.channels, sizeof(ps->channels)); ps->flags = kvm->arch.vpit->pit_state.flags; mutex_unlock(&kvm->arch.vpit->pit_state.lock); memset(&ps->reserved, 0, sizeof(ps->reserved)); return r; } static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps) { int r = 0, start = 0; u32 prev_legacy, cur_legacy; mutex_lock(&kvm->arch.vpit->pit_state.lock); prev_legacy = kvm->arch.vpit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY; cur_legacy = ps->flags & KVM_PIT_FLAGS_HPET_LEGACY; if (!prev_legacy && cur_legacy) start = 1; memcpy(&kvm->arch.vpit->pit_state.channels, &ps->channels, sizeof(kvm->arch.vpit->pit_state.channels)); kvm->arch.vpit->pit_state.flags = ps->flags; kvm_pit_load_count(kvm, 0, kvm->arch.vpit->pit_state.channels[0].count, start); mutex_unlock(&kvm->arch.vpit->pit_state.lock); return r; } static int kvm_vm_ioctl_reinject(struct kvm *kvm, struct kvm_reinject_control *control) { if (!kvm->arch.vpit) return -ENXIO; mutex_lock(&kvm->arch.vpit->pit_state.lock); kvm->arch.vpit->pit_state.reinject = control->pit_reinject; mutex_unlock(&kvm->arch.vpit->pit_state.lock); return 0; } /** * kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot * @kvm: kvm instance * @log: slot id and address to which we copy the log * * We need to keep it in mind that VCPU threads can write to the bitmap * concurrently. So, to avoid losing data, we keep the following order for * each bit: * * 1. Take a snapshot of the bit and clear it if needed. * 2. Write protect the corresponding page. * 3. Flush TLB's if needed. * 4. Copy the snapshot to the userspace. * * Between 2 and 3, the guest may write to the page using the remaining TLB * entry. This is not a problem because the page will be reported dirty at * step 4 using the snapshot taken before and step 3 ensures that successive * writes will be logged for the next call. */ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) { int r; struct kvm_memory_slot *memslot; unsigned long n, i; unsigned long *dirty_bitmap; unsigned long *dirty_bitmap_buffer; bool is_dirty = false; mutex_lock(&kvm->slots_lock); r = -EINVAL; if (log->slot >= KVM_USER_MEM_SLOTS) goto out; memslot = id_to_memslot(kvm->memslots, log->slot); dirty_bitmap = memslot->dirty_bitmap; r = -ENOENT; if (!dirty_bitmap) goto out; n = kvm_dirty_bitmap_bytes(memslot); dirty_bitmap_buffer = dirty_bitmap + n / sizeof(long); memset(dirty_bitmap_buffer, 0, n); spin_lock(&kvm->mmu_lock); for (i = 0; i < n / sizeof(long); i++) { unsigned long mask; gfn_t offset; if (!dirty_bitmap[i]) continue; is_dirty = true; mask = xchg(&dirty_bitmap[i], 0); dirty_bitmap_buffer[i] = mask; offset = i * BITS_PER_LONG; kvm_mmu_write_protect_pt_masked(kvm, memslot, offset, mask); } spin_unlock(&kvm->mmu_lock); /* See the comments in kvm_mmu_slot_remove_write_access(). */ lockdep_assert_held(&kvm->slots_lock); /* * All the TLBs can be flushed out of mmu lock, see the comments in * kvm_mmu_slot_remove_write_access(). */ if (is_dirty) kvm_flush_remote_tlbs(kvm); r = -EFAULT; if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n)) goto out; r = 0; out: mutex_unlock(&kvm->slots_lock); return r; } int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event, bool line_status) { if (!irqchip_in_kernel(kvm)) return -ENXIO; irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irq_event->irq, irq_event->level, line_status); return 0; } long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { struct kvm *kvm = filp->private_data; void __user *argp = (void __user *)arg; int r = -ENOTTY; /* * This union makes it completely explicit to gcc-3.x * that these two variables' stack usage should be * combined, not added together. */ union { struct kvm_pit_state ps; struct kvm_pit_state2 ps2; struct kvm_pit_config pit_config; } u; switch (ioctl) { case KVM_SET_TSS_ADDR: r = kvm_vm_ioctl_set_tss_addr(kvm, arg); break; case KVM_SET_IDENTITY_MAP_ADDR: { u64 ident_addr; r = -EFAULT; if (copy_from_user(&ident_addr, argp, sizeof ident_addr)) goto out; r = kvm_vm_ioctl_set_identity_map_addr(kvm, ident_addr); break; } case KVM_SET_NR_MMU_PAGES: r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg); break; case KVM_GET_NR_MMU_PAGES: r = kvm_vm_ioctl_get_nr_mmu_pages(kvm); break; case KVM_CREATE_IRQCHIP: { struct kvm_pic *vpic; mutex_lock(&kvm->lock); r = -EEXIST; if (kvm->arch.vpic) goto create_irqchip_unlock; r = -EINVAL; if (atomic_read(&kvm->online_vcpus)) goto create_irqchip_unlock; r = -ENOMEM; vpic = kvm_create_pic(kvm); if (vpic) { r = kvm_ioapic_init(kvm); if (r) { mutex_lock(&kvm->slots_lock); kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &vpic->dev_master); kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &vpic->dev_slave); kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &vpic->dev_eclr); mutex_unlock(&kvm->slots_lock); kfree(vpic); goto create_irqchip_unlock; } } else goto create_irqchip_unlock; smp_wmb(); kvm->arch.vpic = vpic; smp_wmb(); r = kvm_setup_default_irq_routing(kvm); if (r) { mutex_lock(&kvm->slots_lock); mutex_lock(&kvm->irq_lock); kvm_ioapic_destroy(kvm); kvm_destroy_pic(kvm); mutex_unlock(&kvm->irq_lock); mutex_unlock(&kvm->slots_lock); } create_irqchip_unlock: mutex_unlock(&kvm->lock); break; } case KVM_CREATE_PIT: u.pit_config.flags = KVM_PIT_SPEAKER_DUMMY; goto create_pit; case KVM_CREATE_PIT2: r = -EFAULT; if (copy_from_user(&u.pit_config, argp, sizeof(struct kvm_pit_config))) goto out; create_pit: mutex_lock(&kvm->slots_lock); r = -EEXIST; if (kvm->arch.vpit) goto create_pit_unlock; r = -ENOMEM; kvm->arch.vpit = kvm_create_pit(kvm, u.pit_config.flags); if (kvm->arch.vpit) r = 0; create_pit_unlock: mutex_unlock(&kvm->slots_lock); break; case KVM_GET_IRQCHIP: { /* 0: PIC master, 1: PIC slave, 2: IOAPIC */ struct kvm_irqchip *chip; chip = memdup_user(argp, sizeof(*chip)); if (IS_ERR(chip)) { r = PTR_ERR(chip); goto out; } r = -ENXIO; if (!irqchip_in_kernel(kvm)) goto get_irqchip_out; r = kvm_vm_ioctl_get_irqchip(kvm, chip); if (r) goto get_irqchip_out; r = -EFAULT; if (copy_to_user(argp, chip, sizeof *chip)) goto get_irqchip_out; r = 0; get_irqchip_out: kfree(chip); break; } case KVM_SET_IRQCHIP: { /* 0: PIC master, 1: PIC slave, 2: IOAPIC */ struct kvm_irqchip *chip; chip = memdup_user(argp, sizeof(*chip)); if (IS_ERR(chip)) { r = PTR_ERR(chip); goto out; } r = -ENXIO; if (!irqchip_in_kernel(kvm)) goto set_irqchip_out; r = kvm_vm_ioctl_set_irqchip(kvm, chip); if (r) goto set_irqchip_out; r = 0; set_irqchip_out: kfree(chip); break; } case KVM_GET_PIT: { r = -EFAULT; if (copy_from_user(&u.ps, argp, sizeof(struct kvm_pit_state))) goto out; r = -ENXIO; if (!kvm->arch.vpit) goto out; r = kvm_vm_ioctl_get_pit(kvm, &u.ps); if (r) goto out; r = -EFAULT; if (copy_to_user(argp, &u.ps, sizeof(struct kvm_pit_state))) goto out; r = 0; break; } case KVM_SET_PIT: { r = -EFAULT; if (copy_from_user(&u.ps, argp, sizeof u.ps)) goto out; r = -ENXIO; if (!kvm->arch.vpit) goto out; r = kvm_vm_ioctl_set_pit(kvm, &u.ps); break; } case KVM_GET_PIT2: { r = -ENXIO; if (!kvm->arch.vpit) goto out; r = kvm_vm_ioctl_get_pit2(kvm, &u.ps2); if (r) goto out; r = -EFAULT; if (copy_to_user(argp, &u.ps2, sizeof(u.ps2))) goto out; r = 0; break; } case KVM_SET_PIT2: { r = -EFAULT; if (copy_from_user(&u.ps2, argp, sizeof(u.ps2))) goto out; r = -ENXIO; if (!kvm->arch.vpit) goto out; r = kvm_vm_ioctl_set_pit2(kvm, &u.ps2); break; } case KVM_REINJECT_CONTROL: { struct kvm_reinject_control control; r = -EFAULT; if (copy_from_user(&control, argp, sizeof(control))) goto out; r = kvm_vm_ioctl_reinject(kvm, &control); break; } case KVM_XEN_HVM_CONFIG: { r = -EFAULT; if (copy_from_user(&kvm->arch.xen_hvm_config, argp, sizeof(struct kvm_xen_hvm_config))) goto out; r = -EINVAL; if (kvm->arch.xen_hvm_config.flags) goto out; r = 0; break; } case KVM_SET_CLOCK: { struct kvm_clock_data user_ns; u64 now_ns; s64 delta; r = -EFAULT; if (copy_from_user(&user_ns, argp, sizeof(user_ns))) goto out; r = -EINVAL; if (user_ns.flags) goto out; r = 0; local_irq_disable(); now_ns = get_kernel_ns(); delta = user_ns.clock - now_ns; local_irq_enable(); kvm->arch.kvmclock_offset = delta; kvm_gen_update_masterclock(kvm); break; } case KVM_GET_CLOCK: { struct kvm_clock_data user_ns; u64 now_ns; local_irq_disable(); now_ns = get_kernel_ns(); user_ns.clock = kvm->arch.kvmclock_offset + now_ns; local_irq_enable(); user_ns.flags = 0; memset(&user_ns.pad, 0, sizeof(user_ns.pad)); r = -EFAULT; if (copy_to_user(argp, &user_ns, sizeof(user_ns))) goto out; r = 0; break; } default: ; } out: return r; } static void kvm_init_msr_list(void) { u32 dummy[2]; unsigned i, j; /* skip the first msrs in the list. KVM-specific */ for (i = j = KVM_SAVE_MSRS_BEGIN; i < ARRAY_SIZE(msrs_to_save); i++) { if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0) continue; /* * Even MSRs that are valid in the host may not be exposed * to the guests in some cases. We could work around this * in VMX with the generic MSR save/load machinery, but it * is not really worthwhile since it will really only * happen with nested virtualization. */ switch (msrs_to_save[i]) { case MSR_IA32_BNDCFGS: if (!kvm_x86_ops->mpx_supported()) continue; break; default: break; } if (j < i) msrs_to_save[j] = msrs_to_save[i]; j++; } num_msrs_to_save = j; } static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len, const void *v) { int handled = 0; int n; do { n = min(len, 8); if (!(vcpu->arch.apic && !kvm_iodevice_write(&vcpu->arch.apic->dev, addr, n, v)) && kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, addr, n, v)) break; handled += n; addr += n; len -= n; v += n; } while (len); return handled; } static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v) { int handled = 0; int n; do { n = min(len, 8); if (!(vcpu->arch.apic && !kvm_iodevice_read(&vcpu->arch.apic->dev, addr, n, v)) && kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, addr, n, v)) break; trace_kvm_mmio(KVM_TRACE_MMIO_READ, n, addr, *(u64 *)v); handled += n; addr += n; len -= n; v += n; } while (len); return handled; } static void kvm_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg) { kvm_x86_ops->set_segment(vcpu, var, seg); } void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg) { kvm_x86_ops->get_segment(vcpu, var, seg); } gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access, struct x86_exception *exception) { gpa_t t_gpa; BUG_ON(!mmu_is_nested(vcpu)); /* NPT walks are always user-walks */ access |= PFERR_USER_MASK; t_gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gpa, access, exception); return t_gpa; } gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, struct x86_exception *exception) { u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); } gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, struct x86_exception *exception) { u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; access |= PFERR_FETCH_MASK; return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); } gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, struct x86_exception *exception) { u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; access |= PFERR_WRITE_MASK; return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); } /* uses this to access any guest's mapped memory without checking CPL */ gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, struct x86_exception *exception) { return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, 0, exception); } static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes, struct kvm_vcpu *vcpu, u32 access, struct x86_exception *exception) { void *data = val; int r = X86EMUL_CONTINUE; while (bytes) { gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access, exception); unsigned offset = addr & (PAGE_SIZE-1); unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset); int ret; if (gpa == UNMAPPED_GVA) return X86EMUL_PROPAGATE_FAULT; ret = kvm_read_guest_page(vcpu->kvm, gpa >> PAGE_SHIFT, data, offset, toread); if (ret < 0) { r = X86EMUL_IO_NEEDED; goto out; } bytes -= toread; data += toread; addr += toread; } out: return r; } /* used for instruction fetching */ static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt, gva_t addr, void *val, unsigned int bytes, struct x86_exception *exception) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; unsigned offset; int ret; /* Inline kvm_read_guest_virt_helper for speed. */ gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access|PFERR_FETCH_MASK, exception); if (unlikely(gpa == UNMAPPED_GVA)) return X86EMUL_PROPAGATE_FAULT; offset = addr & (PAGE_SIZE-1); if (WARN_ON(offset + bytes > PAGE_SIZE)) bytes = (unsigned)PAGE_SIZE - offset; ret = kvm_read_guest_page(vcpu->kvm, gpa >> PAGE_SHIFT, val, offset, bytes); if (unlikely(ret < 0)) return X86EMUL_IO_NEEDED; return X86EMUL_CONTINUE; } int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt, gva_t addr, void *val, unsigned int bytes, struct x86_exception *exception) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, exception); } EXPORT_SYMBOL_GPL(kvm_read_guest_virt); static int kvm_read_guest_virt_system(struct x86_emulate_ctxt *ctxt, gva_t addr, void *val, unsigned int bytes, struct x86_exception *exception) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, exception); } int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt, gva_t addr, void *val, unsigned int bytes, struct x86_exception *exception) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); void *data = val; int r = X86EMUL_CONTINUE; while (bytes) { gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, PFERR_WRITE_MASK, exception); unsigned offset = addr & (PAGE_SIZE-1); unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset); int ret; if (gpa == UNMAPPED_GVA) return X86EMUL_PROPAGATE_FAULT; ret = kvm_write_guest(vcpu->kvm, gpa, data, towrite); if (ret < 0) { r = X86EMUL_IO_NEEDED; goto out; } bytes -= towrite; data += towrite; addr += towrite; } out: return r; } EXPORT_SYMBOL_GPL(kvm_write_guest_virt_system); static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva, gpa_t *gpa, struct x86_exception *exception, bool write) { u32 access = ((kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0) | (write ? PFERR_WRITE_MASK : 0); if (vcpu_match_mmio_gva(vcpu, gva) && !permission_fault(vcpu, vcpu->arch.walk_mmu, vcpu->arch.access, access)) { *gpa = vcpu->arch.mmio_gfn << PAGE_SHIFT | (gva & (PAGE_SIZE - 1)); trace_vcpu_match_mmio(gva, *gpa, write, false); return 1; } *gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); if (*gpa == UNMAPPED_GVA) return -1; /* For APIC access vmexit */ if ((*gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) return 1; if (vcpu_match_mmio_gpa(vcpu, *gpa)) { trace_vcpu_match_mmio(gva, *gpa, write, true); return 1; } return 0; } int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, const void *val, int bytes) { int ret; ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes); if (ret < 0) return 0; kvm_mmu_pte_write(vcpu, gpa, val, bytes); return 1; } struct read_write_emulator_ops { int (*read_write_prepare)(struct kvm_vcpu *vcpu, void *val, int bytes); int (*read_write_emulate)(struct kvm_vcpu *vcpu, gpa_t gpa, void *val, int bytes); int (*read_write_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes, void *val); int (*read_write_exit_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa, void *val, int bytes); bool write; }; static int read_prepare(struct kvm_vcpu *vcpu, void *val, int bytes) { if (vcpu->mmio_read_completed) { trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes, vcpu->mmio_fragments[0].gpa, *(u64 *)val); vcpu->mmio_read_completed = 0; return 1; } return 0; } static int read_emulate(struct kvm_vcpu *vcpu, gpa_t gpa, void *val, int bytes) { return !kvm_read_guest(vcpu->kvm, gpa, val, bytes); } static int write_emulate(struct kvm_vcpu *vcpu, gpa_t gpa, void *val, int bytes) { return emulator_write_phys(vcpu, gpa, val, bytes); } static int write_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes, void *val) { trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, *(u64 *)val); return vcpu_mmio_write(vcpu, gpa, bytes, val); } static int read_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, void *val, int bytes) { trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, 0); return X86EMUL_IO_NEEDED; } static int write_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, void *val, int bytes) { struct kvm_mmio_fragment *frag = &vcpu->mmio_fragments[0]; memcpy(vcpu->run->mmio.data, frag->data, min(8u, frag->len)); return X86EMUL_CONTINUE; } static const struct read_write_emulator_ops read_emultor = { .read_write_prepare = read_prepare, .read_write_emulate = read_emulate, .read_write_mmio = vcpu_mmio_read, .read_write_exit_mmio = read_exit_mmio, }; static const struct read_write_emulator_ops write_emultor = { .read_write_emulate = write_emulate, .read_write_mmio = write_mmio, .read_write_exit_mmio = write_exit_mmio, .write = true, }; static int emulator_read_write_onepage(unsigned long addr, void *val, unsigned int bytes, struct x86_exception *exception, struct kvm_vcpu *vcpu, const struct read_write_emulator_ops *ops) { gpa_t gpa; int handled, ret; bool write = ops->write; struct kvm_mmio_fragment *frag; ret = vcpu_mmio_gva_to_gpa(vcpu, addr, &gpa, exception, write); if (ret < 0) return X86EMUL_PROPAGATE_FAULT; /* For APIC access vmexit */ if (ret) goto mmio; if (ops->read_write_emulate(vcpu, gpa, val, bytes)) return X86EMUL_CONTINUE; mmio: /* * Is this MMIO handled locally? */ handled = ops->read_write_mmio(vcpu, gpa, bytes, val); if (handled == bytes) return X86EMUL_CONTINUE; gpa += handled; bytes -= handled; val += handled; WARN_ON(vcpu->mmio_nr_fragments >= KVM_MAX_MMIO_FRAGMENTS); frag = &vcpu->mmio_fragments[vcpu->mmio_nr_fragments++]; frag->gpa = gpa; frag->data = val; frag->len = bytes; return X86EMUL_CONTINUE; } int emulator_read_write(struct x86_emulate_ctxt *ctxt, unsigned long addr, void *val, unsigned int bytes, struct x86_exception *exception, const struct read_write_emulator_ops *ops) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); gpa_t gpa; int rc; if (ops->read_write_prepare && ops->read_write_prepare(vcpu, val, bytes)) return X86EMUL_CONTINUE; vcpu->mmio_nr_fragments = 0; /* Crossing a page boundary? */ if (((addr + bytes - 1) ^ addr) & PAGE_MASK) { int now; now = -addr & ~PAGE_MASK; rc = emulator_read_write_onepage(addr, val, now, exception, vcpu, ops); if (rc != X86EMUL_CONTINUE) return rc; addr += now; val += now; bytes -= now; } rc = emulator_read_write_onepage(addr, val, bytes, exception, vcpu, ops); if (rc != X86EMUL_CONTINUE) return rc; if (!vcpu->mmio_nr_fragments) return rc; gpa = vcpu->mmio_fragments[0].gpa; vcpu->mmio_needed = 1; vcpu->mmio_cur_fragment = 0; vcpu->run->mmio.len = min(8u, vcpu->mmio_fragments[0].len); vcpu->run->mmio.is_write = vcpu->mmio_is_write = ops->write; vcpu->run->exit_reason = KVM_EXIT_MMIO; vcpu->run->mmio.phys_addr = gpa; return ops->read_write_exit_mmio(vcpu, gpa, val, bytes); } static int emulator_read_emulated(struct x86_emulate_ctxt *ctxt, unsigned long addr, void *val, unsigned int bytes, struct x86_exception *exception) { return emulator_read_write(ctxt, addr, val, bytes, exception, &read_emultor); } int emulator_write_emulated(struct x86_emulate_ctxt *ctxt, unsigned long addr, const void *val, unsigned int bytes, struct x86_exception *exception) { return emulator_read_write(ctxt, addr, (void *)val, bytes, exception, &write_emultor); } #define CMPXCHG_TYPE(t, ptr, old, new) \ (cmpxchg((t *)(ptr), *(t *)(old), *(t *)(new)) == *(t *)(old)) #ifdef CONFIG_X86_64 # define CMPXCHG64(ptr, old, new) CMPXCHG_TYPE(u64, ptr, old, new) #else # define CMPXCHG64(ptr, old, new) \ (cmpxchg64((u64 *)(ptr), *(u64 *)(old), *(u64 *)(new)) == *(u64 *)(old)) #endif static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt, unsigned long addr, const void *old, const void *new, unsigned int bytes, struct x86_exception *exception) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); gpa_t gpa; struct page *page; char *kaddr; bool exchanged; /* guests cmpxchg8b have to be emulated atomically */ if (bytes > 8 || (bytes & (bytes - 1))) goto emul_write; gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, NULL); if (gpa == UNMAPPED_GVA || (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) goto emul_write; if (((gpa + bytes - 1) & PAGE_MASK) != (gpa & PAGE_MASK)) goto emul_write; page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT); if (is_error_page(page)) goto emul_write; kaddr = kmap_atomic(page); kaddr += offset_in_page(gpa); switch (bytes) { case 1: exchanged = CMPXCHG_TYPE(u8, kaddr, old, new); break; case 2: exchanged = CMPXCHG_TYPE(u16, kaddr, old, new); break; case 4: exchanged = CMPXCHG_TYPE(u32, kaddr, old, new); break; case 8: exchanged = CMPXCHG64(kaddr, old, new); break; default: BUG(); } kunmap_atomic(kaddr); kvm_release_page_dirty(page); if (!exchanged) return X86EMUL_CMPXCHG_FAILED; mark_page_dirty(vcpu->kvm, gpa >> PAGE_SHIFT); kvm_mmu_pte_write(vcpu, gpa, new, bytes); return X86EMUL_CONTINUE; emul_write: printk_once(KERN_WARNING "kvm: emulating exchange as write\n"); return emulator_write_emulated(ctxt, addr, new, bytes, exception); } static int kernel_pio(struct kvm_vcpu *vcpu, void *pd) { /* TODO: String I/O for in kernel device */ int r; if (vcpu->arch.pio.in) r = kvm_io_bus_read(vcpu->kvm, KVM_PIO_BUS, vcpu->arch.pio.port, vcpu->arch.pio.size, pd); else r = kvm_io_bus_write(vcpu->kvm, KVM_PIO_BUS, vcpu->arch.pio.port, vcpu->arch.pio.size, pd); return r; } static int emulator_pio_in_out(struct kvm_vcpu *vcpu, int size, unsigned short port, void *val, unsigned int count, bool in) { vcpu->arch.pio.port = port; vcpu->arch.pio.in = in; vcpu->arch.pio.count = count; vcpu->arch.pio.size = size; if (!kernel_pio(vcpu, vcpu->arch.pio_data)) { vcpu->arch.pio.count = 0; return 1; } vcpu->run->exit_reason = KVM_EXIT_IO; vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT; vcpu->run->io.size = size; vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE; vcpu->run->io.count = count; vcpu->run->io.port = port; return 0; } static int emulator_pio_in_emulated(struct x86_emulate_ctxt *ctxt, int size, unsigned short port, void *val, unsigned int count) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); int ret; if (vcpu->arch.pio.count) goto data_avail; ret = emulator_pio_in_out(vcpu, size, port, val, count, true); if (ret) { data_avail: memcpy(val, vcpu->arch.pio_data, size * count); trace_kvm_pio(KVM_PIO_IN, port, size, count, vcpu->arch.pio_data); vcpu->arch.pio.count = 0; return 1; } return 0; } static int emulator_pio_out_emulated(struct x86_emulate_ctxt *ctxt, int size, unsigned short port, const void *val, unsigned int count) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); memcpy(vcpu->arch.pio_data, val, size * count); trace_kvm_pio(KVM_PIO_OUT, port, size, count, vcpu->arch.pio_data); return emulator_pio_in_out(vcpu, size, port, (void *)val, count, false); } static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg) { return kvm_x86_ops->get_segment_base(vcpu, seg); } static void emulator_invlpg(struct x86_emulate_ctxt *ctxt, ulong address) { kvm_mmu_invlpg(emul_to_vcpu(ctxt), address); } int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu) { if (!need_emulate_wbinvd(vcpu)) return X86EMUL_CONTINUE; if (kvm_x86_ops->has_wbinvd_exit()) { int cpu = get_cpu(); cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask); smp_call_function_many(vcpu->arch.wbinvd_dirty_mask, wbinvd_ipi, NULL, 1); put_cpu(); cpumask_clear(vcpu->arch.wbinvd_dirty_mask); } else wbinvd(); return X86EMUL_CONTINUE; } EXPORT_SYMBOL_GPL(kvm_emulate_wbinvd); static void emulator_wbinvd(struct x86_emulate_ctxt *ctxt) { kvm_emulate_wbinvd(emul_to_vcpu(ctxt)); } int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest) { return _kvm_get_dr(emul_to_vcpu(ctxt), dr, dest); } int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value) { return __kvm_set_dr(emul_to_vcpu(ctxt), dr, value); } static u64 mk_cr_64(u64 curr_cr, u32 new_val) { return (curr_cr & ~((1ULL << 32) - 1)) | new_val; } static unsigned long emulator_get_cr(struct x86_emulate_ctxt *ctxt, int cr) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); unsigned long value; switch (cr) { case 0: value = kvm_read_cr0(vcpu); break; case 2: value = vcpu->arch.cr2; break; case 3: value = kvm_read_cr3(vcpu); break; case 4: value = kvm_read_cr4(vcpu); break; case 8: value = kvm_get_cr8(vcpu); break; default: kvm_err("%s: unexpected cr %u\n", __func__, cr); return 0; } return value; } static int emulator_set_cr(struct x86_emulate_ctxt *ctxt, int cr, ulong val) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); int res = 0; switch (cr) { case 0: res = kvm_set_cr0(vcpu, mk_cr_64(kvm_read_cr0(vcpu), val)); break; case 2: vcpu->arch.cr2 = val; break; case 3: res = kvm_set_cr3(vcpu, val); break; case 4: res = kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val)); break; case 8: res = kvm_set_cr8(vcpu, val); break; default: kvm_err("%s: unexpected cr %u\n", __func__, cr); res = -1; } return res; } static int emulator_get_cpl(struct x86_emulate_ctxt *ctxt) { return kvm_x86_ops->get_cpl(emul_to_vcpu(ctxt)); } static void emulator_get_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) { kvm_x86_ops->get_gdt(emul_to_vcpu(ctxt), dt); } static void emulator_get_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) { kvm_x86_ops->get_idt(emul_to_vcpu(ctxt), dt); } static void emulator_set_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) { kvm_x86_ops->set_gdt(emul_to_vcpu(ctxt), dt); } static void emulator_set_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) { kvm_x86_ops->set_idt(emul_to_vcpu(ctxt), dt); } static unsigned long emulator_get_cached_segment_base( struct x86_emulate_ctxt *ctxt, int seg) { return get_segment_base(emul_to_vcpu(ctxt), seg); } static bool emulator_get_segment(struct x86_emulate_ctxt *ctxt, u16 *selector, struct desc_struct *desc, u32 *base3, int seg) { struct kvm_segment var; kvm_get_segment(emul_to_vcpu(ctxt), &var, seg); *selector = var.selector; if (var.unusable) { memset(desc, 0, sizeof(*desc)); return false; } if (var.g) var.limit >>= 12; set_desc_limit(desc, var.limit); set_desc_base(desc, (unsigned long)var.base); #ifdef CONFIG_X86_64 if (base3) *base3 = var.base >> 32; #endif desc->type = var.type; desc->s = var.s; desc->dpl = var.dpl; desc->p = var.present; desc->avl = var.avl; desc->l = var.l; desc->d = var.db; desc->g = var.g; return true; } static void emulator_set_segment(struct x86_emulate_ctxt *ctxt, u16 selector, struct desc_struct *desc, u32 base3, int seg) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); struct kvm_segment var; var.selector = selector; var.base = get_desc_base(desc); #ifdef CONFIG_X86_64 var.base |= ((u64)base3) << 32; #endif var.limit = get_desc_limit(desc); if (desc->g) var.limit = (var.limit << 12) | 0xfff; var.type = desc->type; var.dpl = desc->dpl; var.db = desc->d; var.s = desc->s; var.l = desc->l; var.g = desc->g; var.avl = desc->avl; var.present = desc->p; var.unusable = !var.present; var.padding = 0; kvm_set_segment(vcpu, &var, seg); return; } static int emulator_get_msr(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 *pdata) { return kvm_get_msr(emul_to_vcpu(ctxt), msr_index, pdata); } static int emulator_set_msr(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 data) { struct msr_data msr; msr.data = data; msr.index = msr_index; msr.host_initiated = false; return kvm_set_msr(emul_to_vcpu(ctxt), &msr); } static int emulator_check_pmc(struct x86_emulate_ctxt *ctxt, u32 pmc) { return kvm_pmu_check_pmc(emul_to_vcpu(ctxt), pmc); } static int emulator_read_pmc(struct x86_emulate_ctxt *ctxt, u32 pmc, u64 *pdata) { return kvm_pmu_read_pmc(emul_to_vcpu(ctxt), pmc, pdata); } static void emulator_halt(struct x86_emulate_ctxt *ctxt) { emul_to_vcpu(ctxt)->arch.halt_request = 1; } static void emulator_get_fpu(struct x86_emulate_ctxt *ctxt) { preempt_disable(); kvm_load_guest_fpu(emul_to_vcpu(ctxt)); /* * CR0.TS may reference the host fpu state, not the guest fpu state, * so it may be clear at this point. */ clts(); } static void emulator_put_fpu(struct x86_emulate_ctxt *ctxt) { preempt_enable(); } static int emulator_intercept(struct x86_emulate_ctxt *ctxt, struct x86_instruction_info *info, enum x86_intercept_stage stage) { return kvm_x86_ops->check_intercept(emul_to_vcpu(ctxt), info, stage); } static void emulator_get_cpuid(struct x86_emulate_ctxt *ctxt, u32 *eax, u32 *ebx, u32 *ecx, u32 *edx) { kvm_cpuid(emul_to_vcpu(ctxt), eax, ebx, ecx, edx); } static ulong emulator_read_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg) { return kvm_register_read(emul_to_vcpu(ctxt), reg); } static void emulator_write_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg, ulong val) { kvm_register_write(emul_to_vcpu(ctxt), reg, val); } static const struct x86_emulate_ops emulate_ops = { .read_gpr = emulator_read_gpr, .write_gpr = emulator_write_gpr, .read_std = kvm_read_guest_virt_system, .write_std = kvm_write_guest_virt_system, .fetch = kvm_fetch_guest_virt, .read_emulated = emulator_read_emulated, .write_emulated = emulator_write_emulated, .cmpxchg_emulated = emulator_cmpxchg_emulated, .invlpg = emulator_invlpg, .pio_in_emulated = emulator_pio_in_emulated, .pio_out_emulated = emulator_pio_out_emulated, .get_segment = emulator_get_segment, .set_segment = emulator_set_segment, .get_cached_segment_base = emulator_get_cached_segment_base, .get_gdt = emulator_get_gdt, .get_idt = emulator_get_idt, .set_gdt = emulator_set_gdt, .set_idt = emulator_set_idt, .get_cr = emulator_get_cr, .set_cr = emulator_set_cr, .cpl = emulator_get_cpl, .get_dr = emulator_get_dr, .set_dr = emulator_set_dr, .set_msr = emulator_set_msr, .get_msr = emulator_get_msr, .check_pmc = emulator_check_pmc, .read_pmc = emulator_read_pmc, .halt = emulator_halt, .wbinvd = emulator_wbinvd, .fix_hypercall = emulator_fix_hypercall, .get_fpu = emulator_get_fpu, .put_fpu = emulator_put_fpu, .intercept = emulator_intercept, .get_cpuid = emulator_get_cpuid, }; static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask) { u32 int_shadow = kvm_x86_ops->get_interrupt_shadow(vcpu); /* * an sti; sti; sequence only disable interrupts for the first * instruction. So, if the last instruction, be it emulated or * not, left the system with the INT_STI flag enabled, it * means that the last instruction is an sti. We should not * leave the flag on in this case. The same goes for mov ss */ if (int_shadow & mask) mask = 0; if (unlikely(int_shadow || mask)) { kvm_x86_ops->set_interrupt_shadow(vcpu, mask); if (!mask) kvm_make_request(KVM_REQ_EVENT, vcpu); } } static bool inject_emulated_exception(struct kvm_vcpu *vcpu) { struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; if (ctxt->exception.vector == PF_VECTOR) return kvm_propagate_fault(vcpu, &ctxt->exception); if (ctxt->exception.error_code_valid) kvm_queue_exception_e(vcpu, ctxt->exception.vector, ctxt->exception.error_code); else kvm_queue_exception(vcpu, ctxt->exception.vector); return false; } static void init_emulate_ctxt(struct kvm_vcpu *vcpu) { struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; int cs_db, cs_l; kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); ctxt->eflags = kvm_get_rflags(vcpu); ctxt->eip = kvm_rip_read(vcpu); ctxt->mode = (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL : (ctxt->eflags & X86_EFLAGS_VM) ? X86EMUL_MODE_VM86 : (cs_l && is_long_mode(vcpu)) ? X86EMUL_MODE_PROT64 : cs_db ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16; ctxt->guest_mode = is_guest_mode(vcpu); init_decode_cache(ctxt); vcpu->arch.emulate_regs_need_sync_from_vcpu = false; } int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip) { struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; int ret; init_emulate_ctxt(vcpu); ctxt->op_bytes = 2; ctxt->ad_bytes = 2; ctxt->_eip = ctxt->eip + inc_eip; ret = emulate_int_real(ctxt, irq); if (ret != X86EMUL_CONTINUE) return EMULATE_FAIL; ctxt->eip = ctxt->_eip; kvm_rip_write(vcpu, ctxt->eip); kvm_set_rflags(vcpu, ctxt->eflags); if (irq == NMI_VECTOR) vcpu->arch.nmi_pending = 0; else vcpu->arch.interrupt.pending = false; return EMULATE_DONE; } EXPORT_SYMBOL_GPL(kvm_inject_realmode_interrupt); static int handle_emulation_failure(struct kvm_vcpu *vcpu) { int r = EMULATE_DONE; ++vcpu->stat.insn_emulation_fail; trace_kvm_emulate_insn_failed(vcpu); if (!is_guest_mode(vcpu) && kvm_x86_ops->get_cpl(vcpu) == 0) { vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; vcpu->run->internal.ndata = 0; r = EMULATE_FAIL; } kvm_queue_exception(vcpu, UD_VECTOR); return r; } static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2, bool write_fault_to_shadow_pgtable, int emulation_type) { gpa_t gpa = cr2; pfn_t pfn; if (emulation_type & EMULTYPE_NO_REEXECUTE) return false; if (!vcpu->arch.mmu.direct_map) { /* * Write permission should be allowed since only * write access need to be emulated. */ gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL); /* * If the mapping is invalid in guest, let cpu retry * it to generate fault. */ if (gpa == UNMAPPED_GVA) return true; } /* * Do not retry the unhandleable instruction if it faults on the * readonly host memory, otherwise it will goto a infinite loop: * retry instruction -> write #PF -> emulation fail -> retry * instruction -> ... */ pfn = gfn_to_pfn(vcpu->kvm, gpa_to_gfn(gpa)); /* * If the instruction failed on the error pfn, it can not be fixed, * report the error to userspace. */ if (is_error_noslot_pfn(pfn)) return false; kvm_release_pfn_clean(pfn); /* The instructions are well-emulated on direct mmu. */ if (vcpu->arch.mmu.direct_map) { unsigned int indirect_shadow_pages; spin_lock(&vcpu->kvm->mmu_lock); indirect_shadow_pages = vcpu->kvm->arch.indirect_shadow_pages; spin_unlock(&vcpu->kvm->mmu_lock); if (indirect_shadow_pages) kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); return true; } /* * if emulation was due to access to shadowed page table * and it failed try to unshadow page and re-enter the * guest to let CPU execute the instruction. */ kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); /* * If the access faults on its page table, it can not * be fixed by unprotecting shadow page and it should * be reported to userspace. */ return !write_fault_to_shadow_pgtable; } static bool retry_instruction(struct x86_emulate_ctxt *ctxt, unsigned long cr2, int emulation_type) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); unsigned long last_retry_eip, last_retry_addr, gpa = cr2; last_retry_eip = vcpu->arch.last_retry_eip; last_retry_addr = vcpu->arch.last_retry_addr; /* * If the emulation is caused by #PF and it is non-page_table * writing instruction, it means the VM-EXIT is caused by shadow * page protected, we can zap the shadow page and retry this * instruction directly. * * Note: if the guest uses a non-page-table modifying instruction * on the PDE that points to the instruction, then we will unmap * the instruction and go to an infinite loop. So, we cache the * last retried eip and the last fault address, if we meet the eip * and the address again, we can break out of the potential infinite * loop. */ vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0; if (!(emulation_type & EMULTYPE_RETRY)) return false; if (x86_page_table_writing_insn(ctxt)) return false; if (ctxt->eip == last_retry_eip && last_retry_addr == cr2) return false; vcpu->arch.last_retry_eip = ctxt->eip; vcpu->arch.last_retry_addr = cr2; if (!vcpu->arch.mmu.direct_map) gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL); kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); return true; } static int complete_emulated_mmio(struct kvm_vcpu *vcpu); static int complete_emulated_pio(struct kvm_vcpu *vcpu); static int kvm_vcpu_check_hw_bp(unsigned long addr, u32 type, u32 dr7, unsigned long *db) { u32 dr6 = 0; int i; u32 enable, rwlen; enable = dr7; rwlen = dr7 >> 16; for (i = 0; i < 4; i++, enable >>= 2, rwlen >>= 4) if ((enable & 3) && (rwlen & 15) == type && db[i] == addr) dr6 |= (1 << i); return dr6; } static void kvm_vcpu_check_singlestep(struct kvm_vcpu *vcpu, unsigned long rflags, int *r) { struct kvm_run *kvm_run = vcpu->run; /* * rflags is the old, "raw" value of the flags. The new value has * not been saved yet. * * This is correct even for TF set by the guest, because "the * processor will not generate this exception after the instruction * that sets the TF flag". */ if (unlikely(rflags & X86_EFLAGS_TF)) { if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1 | DR6_RTM; kvm_run->debug.arch.pc = vcpu->arch.singlestep_rip; kvm_run->debug.arch.exception = DB_VECTOR; kvm_run->exit_reason = KVM_EXIT_DEBUG; *r = EMULATE_USER_EXIT; } else { vcpu->arch.emulate_ctxt.eflags &= ~X86_EFLAGS_TF; /* * "Certain debug exceptions may clear bit 0-3. The * remaining contents of the DR6 register are never * cleared by the processor". */ vcpu->arch.dr6 &= ~15; vcpu->arch.dr6 |= DR6_BS | DR6_RTM; kvm_queue_exception(vcpu, DB_VECTOR); } } } static bool kvm_vcpu_check_breakpoint(struct kvm_vcpu *vcpu, int *r) { struct kvm_run *kvm_run = vcpu->run; unsigned long eip = vcpu->arch.emulate_ctxt.eip; u32 dr6 = 0; if (unlikely(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) && (vcpu->arch.guest_debug_dr7 & DR7_BP_EN_MASK)) { dr6 = kvm_vcpu_check_hw_bp(eip, 0, vcpu->arch.guest_debug_dr7, vcpu->arch.eff_db); if (dr6 != 0) { kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1 | DR6_RTM; kvm_run->debug.arch.pc = kvm_rip_read(vcpu) + get_segment_base(vcpu, VCPU_SREG_CS); kvm_run->debug.arch.exception = DB_VECTOR; kvm_run->exit_reason = KVM_EXIT_DEBUG; *r = EMULATE_USER_EXIT; return true; } } if (unlikely(vcpu->arch.dr7 & DR7_BP_EN_MASK) && !(kvm_get_rflags(vcpu) & X86_EFLAGS_RF)) { dr6 = kvm_vcpu_check_hw_bp(eip, 0, vcpu->arch.dr7, vcpu->arch.db); if (dr6 != 0) { vcpu->arch.dr6 &= ~15; vcpu->arch.dr6 |= dr6 | DR6_RTM; kvm_queue_exception(vcpu, DB_VECTOR); *r = EMULATE_DONE; return true; } } return false; } int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2, int emulation_type, void *insn, int insn_len) { int r; struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; bool writeback = true; bool write_fault_to_spt = vcpu->arch.write_fault_to_shadow_pgtable; /* * Clear write_fault_to_shadow_pgtable here to ensure it is * never reused. */ vcpu->arch.write_fault_to_shadow_pgtable = false; kvm_clear_exception_queue(vcpu); if (!(emulation_type & EMULTYPE_NO_DECODE)) { init_emulate_ctxt(vcpu); /* * We will reenter on the same instruction since * we do not set complete_userspace_io. This does not * handle watchpoints yet, those would be handled in * the emulate_ops. */ if (kvm_vcpu_check_breakpoint(vcpu, &r)) return r; ctxt->interruptibility = 0; ctxt->have_exception = false; ctxt->exception.vector = -1; ctxt->perm_ok = false; ctxt->ud = emulation_type & EMULTYPE_TRAP_UD; r = x86_decode_insn(ctxt, insn, insn_len); trace_kvm_emulate_insn_start(vcpu); ++vcpu->stat.insn_emulation; if (r != EMULATION_OK) { if (emulation_type & EMULTYPE_TRAP_UD) return EMULATE_FAIL; if (reexecute_instruction(vcpu, cr2, write_fault_to_spt, emulation_type)) return EMULATE_DONE; if (emulation_type & EMULTYPE_SKIP) return EMULATE_FAIL; return handle_emulation_failure(vcpu); } } if (emulation_type & EMULTYPE_SKIP) { kvm_rip_write(vcpu, ctxt->_eip); if (ctxt->eflags & X86_EFLAGS_RF) kvm_set_rflags(vcpu, ctxt->eflags & ~X86_EFLAGS_RF); return EMULATE_DONE; } if (retry_instruction(ctxt, cr2, emulation_type)) return EMULATE_DONE; /* this is needed for vmware backdoor interface to work since it changes registers values during IO operation */ if (vcpu->arch.emulate_regs_need_sync_from_vcpu) { vcpu->arch.emulate_regs_need_sync_from_vcpu = false; emulator_invalidate_register_cache(ctxt); } restart: r = x86_emulate_insn(ctxt); if (r == EMULATION_INTERCEPTED) return EMULATE_DONE; if (r == EMULATION_FAILED) { if (reexecute_instruction(vcpu, cr2, write_fault_to_spt, emulation_type)) return EMULATE_DONE; return handle_emulation_failure(vcpu); } if (ctxt->have_exception) { r = EMULATE_DONE; if (inject_emulated_exception(vcpu)) return r; } else if (vcpu->arch.pio.count) { if (!vcpu->arch.pio.in) { /* FIXME: return into emulator if single-stepping. */ vcpu->arch.pio.count = 0; } else { writeback = false; vcpu->arch.complete_userspace_io = complete_emulated_pio; } r = EMULATE_USER_EXIT; } else if (vcpu->mmio_needed) { if (!vcpu->mmio_is_write) writeback = false; r = EMULATE_USER_EXIT; vcpu->arch.complete_userspace_io = complete_emulated_mmio; } else if (r == EMULATION_RESTART) goto restart; else r = EMULATE_DONE; if (writeback) { unsigned long rflags = kvm_x86_ops->get_rflags(vcpu); toggle_interruptibility(vcpu, ctxt->interruptibility); vcpu->arch.emulate_regs_need_sync_to_vcpu = false; kvm_rip_write(vcpu, ctxt->eip); if (r == EMULATE_DONE) kvm_vcpu_check_singlestep(vcpu, rflags, &r); __kvm_set_rflags(vcpu, ctxt->eflags); /* * For STI, interrupts are shadowed; so KVM_REQ_EVENT will * do nothing, and it will be requested again as soon as * the shadow expires. But we still need to check here, * because POPF has no interrupt shadow. */ if (unlikely((ctxt->eflags & ~rflags) & X86_EFLAGS_IF)) kvm_make_request(KVM_REQ_EVENT, vcpu); } else vcpu->arch.emulate_regs_need_sync_to_vcpu = true; return r; } EXPORT_SYMBOL_GPL(x86_emulate_instruction); int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port) { unsigned long val = kvm_register_read(vcpu, VCPU_REGS_RAX); int ret = emulator_pio_out_emulated(&vcpu->arch.emulate_ctxt, size, port, &val, 1); /* do not return to emulator after return from userspace */ vcpu->arch.pio.count = 0; return ret; } EXPORT_SYMBOL_GPL(kvm_fast_pio_out); static void tsc_bad(void *info) { __this_cpu_write(cpu_tsc_khz, 0); } static void tsc_khz_changed(void *data) { struct cpufreq_freqs *freq = data; unsigned long khz = 0; if (data) khz = freq->new; else if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) khz = cpufreq_quick_get(raw_smp_processor_id()); if (!khz) khz = tsc_khz; __this_cpu_write(cpu_tsc_khz, khz); } static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data) { struct cpufreq_freqs *freq = data; struct kvm *kvm; struct kvm_vcpu *vcpu; int i, send_ipi = 0; /* * We allow guests to temporarily run on slowing clocks, * provided we notify them after, or to run on accelerating * clocks, provided we notify them before. Thus time never * goes backwards. * * However, we have a problem. We can't atomically update * the frequency of a given CPU from this function; it is * merely a notifier, which can be called from any CPU. * Changing the TSC frequency at arbitrary points in time * requires a recomputation of local variables related to * the TSC for each VCPU. We must flag these local variables * to be updated and be sure the update takes place with the * new frequency before any guests proceed. * * Unfortunately, the combination of hotplug CPU and frequency * change creates an intractable locking scenario; the order * of when these callouts happen is undefined with respect to * CPU hotplug, and they can race with each other. As such, * merely setting per_cpu(cpu_tsc_khz) = X during a hotadd is * undefined; you can actually have a CPU frequency change take * place in between the computation of X and the setting of the * variable. To protect against this problem, all updates of * the per_cpu tsc_khz variable are done in an interrupt * protected IPI, and all callers wishing to update the value * must wait for a synchronous IPI to complete (which is trivial * if the caller is on the CPU already). This establishes the * necessary total order on variable updates. * * Note that because a guest time update may take place * anytime after the setting of the VCPU's request bit, the * correct TSC value must be set before the request. However, * to ensure the update actually makes it to any guest which * starts running in hardware virtualization between the set * and the acquisition of the spinlock, we must also ping the * CPU after setting the request bit. * */ if (val == CPUFREQ_PRECHANGE && freq->old > freq->new) return 0; if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new) return 0; smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1); spin_lock(&kvm_lock); list_for_each_entry(kvm, &vm_list, vm_list) { kvm_for_each_vcpu(i, vcpu, kvm) { if (vcpu->cpu != freq->cpu) continue; kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); if (vcpu->cpu != smp_processor_id()) send_ipi = 1; } } spin_unlock(&kvm_lock); if (freq->old < freq->new && send_ipi) { /* * We upscale the frequency. Must make the guest * doesn't see old kvmclock values while running with * the new frequency, otherwise we risk the guest sees * time go backwards. * * In case we update the frequency for another cpu * (which might be in guest context) send an interrupt * to kick the cpu out of guest context. Next time * guest context is entered kvmclock will be updated, * so the guest will not see stale values. */ smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1); } return 0; } static struct notifier_block kvmclock_cpufreq_notifier_block = { .notifier_call = kvmclock_cpufreq_notifier }; static int kvmclock_cpu_notifier(struct notifier_block *nfb, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned long)hcpu; switch (action) { case CPU_ONLINE: case CPU_DOWN_FAILED: smp_call_function_single(cpu, tsc_khz_changed, NULL, 1); break; case CPU_DOWN_PREPARE: smp_call_function_single(cpu, tsc_bad, NULL, 1); break; } return NOTIFY_OK; } static struct notifier_block kvmclock_cpu_notifier_block = { .notifier_call = kvmclock_cpu_notifier, .priority = -INT_MAX }; static void kvm_timer_init(void) { int cpu; max_tsc_khz = tsc_khz; cpu_notifier_register_begin(); if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) { #ifdef CONFIG_CPU_FREQ struct cpufreq_policy policy; memset(&policy, 0, sizeof(policy)); cpu = get_cpu(); cpufreq_get_policy(&policy, cpu); if (policy.cpuinfo.max_freq) max_tsc_khz = policy.cpuinfo.max_freq; put_cpu(); #endif cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER); } pr_debug("kvm: max_tsc_khz = %ld\n", max_tsc_khz); for_each_online_cpu(cpu) smp_call_function_single(cpu, tsc_khz_changed, NULL, 1); __register_hotcpu_notifier(&kvmclock_cpu_notifier_block); cpu_notifier_register_done(); } static DEFINE_PER_CPU(struct kvm_vcpu *, current_vcpu); int kvm_is_in_guest(void) { return __this_cpu_read(current_vcpu) != NULL; } static int kvm_is_user_mode(void) { int user_mode = 3; if (__this_cpu_read(current_vcpu)) user_mode = kvm_x86_ops->get_cpl(__this_cpu_read(current_vcpu)); return user_mode != 0; } static unsigned long kvm_get_guest_ip(void) { unsigned long ip = 0; if (__this_cpu_read(current_vcpu)) ip = kvm_rip_read(__this_cpu_read(current_vcpu)); return ip; } static struct perf_guest_info_callbacks kvm_guest_cbs = { .is_in_guest = kvm_is_in_guest, .is_user_mode = kvm_is_user_mode, .get_guest_ip = kvm_get_guest_ip, }; void kvm_before_handle_nmi(struct kvm_vcpu *vcpu) { __this_cpu_write(current_vcpu, vcpu); } EXPORT_SYMBOL_GPL(kvm_before_handle_nmi); void kvm_after_handle_nmi(struct kvm_vcpu *vcpu) { __this_cpu_write(current_vcpu, NULL); } EXPORT_SYMBOL_GPL(kvm_after_handle_nmi); static void kvm_set_mmio_spte_mask(void) { u64 mask; int maxphyaddr = boot_cpu_data.x86_phys_bits; /* * Set the reserved bits and the present bit of an paging-structure * entry to generate page fault with PFER.RSV = 1. */ /* Mask the reserved physical address bits. */ mask = rsvd_bits(maxphyaddr, 51); /* Bit 62 is always reserved for 32bit host. */ mask |= 0x3ull << 62; /* Set the present bit. */ mask |= 1ull; #ifdef CONFIG_X86_64 /* * If reserved bit is not supported, clear the present bit to disable * mmio page fault. */ if (maxphyaddr == 52) mask &= ~1ull; #endif kvm_mmu_set_mmio_spte_mask(mask); } #ifdef CONFIG_X86_64 static void pvclock_gtod_update_fn(struct work_struct *work) { struct kvm *kvm; struct kvm_vcpu *vcpu; int i; spin_lock(&kvm_lock); list_for_each_entry(kvm, &vm_list, vm_list) kvm_for_each_vcpu(i, vcpu, kvm) kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); atomic_set(&kvm_guest_has_master_clock, 0); spin_unlock(&kvm_lock); } static DECLARE_WORK(pvclock_gtod_work, pvclock_gtod_update_fn); /* * Notification about pvclock gtod data update. */ static int pvclock_gtod_notify(struct notifier_block *nb, unsigned long unused, void *priv) { struct pvclock_gtod_data *gtod = &pvclock_gtod_data; struct timekeeper *tk = priv; update_pvclock_gtod(tk); /* disable master clock if host does not trust, or does not * use, TSC clocksource */ if (gtod->clock.vclock_mode != VCLOCK_TSC && atomic_read(&kvm_guest_has_master_clock) != 0) queue_work(system_long_wq, &pvclock_gtod_work); return 0; } static struct notifier_block pvclock_gtod_notifier = { .notifier_call = pvclock_gtod_notify, }; #endif int kvm_arch_init(void *opaque) { int r; struct kvm_x86_ops *ops = opaque; if (kvm_x86_ops) { printk(KERN_ERR "kvm: already loaded the other module\n"); r = -EEXIST; goto out; } if (!ops->cpu_has_kvm_support()) { printk(KERN_ERR "kvm: no hardware support\n"); r = -EOPNOTSUPP; goto out; } if (ops->disabled_by_bios()) { printk(KERN_ERR "kvm: disabled by bios\n"); r = -EOPNOTSUPP; goto out; } r = -ENOMEM; shared_msrs = alloc_percpu(struct kvm_shared_msrs); if (!shared_msrs) { printk(KERN_ERR "kvm: failed to allocate percpu kvm_shared_msrs\n"); goto out; } r = kvm_mmu_module_init(); if (r) goto out_free_percpu; kvm_set_mmio_spte_mask(); kvm_x86_ops = ops; kvm_init_msr_list(); kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK, PT_DIRTY_MASK, PT64_NX_MASK, 0); kvm_timer_init(); perf_register_guest_info_callbacks(&kvm_guest_cbs); if (cpu_has_xsave) host_xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK); kvm_lapic_init(); #ifdef CONFIG_X86_64 pvclock_gtod_register_notifier(&pvclock_gtod_notifier); #endif return 0; out_free_percpu: free_percpu(shared_msrs); out: return r; } void kvm_arch_exit(void) { perf_unregister_guest_info_callbacks(&kvm_guest_cbs); if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER); unregister_hotcpu_notifier(&kvmclock_cpu_notifier_block); #ifdef CONFIG_X86_64 pvclock_gtod_unregister_notifier(&pvclock_gtod_notifier); #endif kvm_x86_ops = NULL; kvm_mmu_module_exit(); free_percpu(shared_msrs); } int kvm_emulate_halt(struct kvm_vcpu *vcpu) { ++vcpu->stat.halt_exits; if (irqchip_in_kernel(vcpu->kvm)) { vcpu->arch.mp_state = KVM_MP_STATE_HALTED; return 1; } else { vcpu->run->exit_reason = KVM_EXIT_HLT; return 0; } } EXPORT_SYMBOL_GPL(kvm_emulate_halt); int kvm_hv_hypercall(struct kvm_vcpu *vcpu) { u64 param, ingpa, outgpa, ret; uint16_t code, rep_idx, rep_cnt, res = HV_STATUS_SUCCESS, rep_done = 0; bool fast, longmode; /* * hypercall generates UD from non zero cpl and real mode * per HYPER-V spec */ if (kvm_x86_ops->get_cpl(vcpu) != 0 || !is_protmode(vcpu)) { kvm_queue_exception(vcpu, UD_VECTOR); return 0; } longmode = is_64_bit_mode(vcpu); if (!longmode) { param = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDX) << 32) | (kvm_register_read(vcpu, VCPU_REGS_RAX) & 0xffffffff); ingpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RBX) << 32) | (kvm_register_read(vcpu, VCPU_REGS_RCX) & 0xffffffff); outgpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDI) << 32) | (kvm_register_read(vcpu, VCPU_REGS_RSI) & 0xffffffff); } #ifdef CONFIG_X86_64 else { param = kvm_register_read(vcpu, VCPU_REGS_RCX); ingpa = kvm_register_read(vcpu, VCPU_REGS_RDX); outgpa = kvm_register_read(vcpu, VCPU_REGS_R8); } #endif code = param & 0xffff; fast = (param >> 16) & 0x1; rep_cnt = (param >> 32) & 0xfff; rep_idx = (param >> 48) & 0xfff; trace_kvm_hv_hypercall(code, fast, rep_cnt, rep_idx, ingpa, outgpa); switch (code) { case HV_X64_HV_NOTIFY_LONG_SPIN_WAIT: kvm_vcpu_on_spin(vcpu); break; default: res = HV_STATUS_INVALID_HYPERCALL_CODE; break; } ret = res | (((u64)rep_done & 0xfff) << 32); if (longmode) { kvm_register_write(vcpu, VCPU_REGS_RAX, ret); } else { kvm_register_write(vcpu, VCPU_REGS_RDX, ret >> 32); kvm_register_write(vcpu, VCPU_REGS_RAX, ret & 0xffffffff); } return 1; } /* * kvm_pv_kick_cpu_op: Kick a vcpu. * * @apicid - apicid of vcpu to be kicked. */ static void kvm_pv_kick_cpu_op(struct kvm *kvm, unsigned long flags, int apicid) { struct kvm_lapic_irq lapic_irq; lapic_irq.shorthand = 0; lapic_irq.dest_mode = 0; lapic_irq.dest_id = apicid; lapic_irq.delivery_mode = APIC_DM_REMRD; kvm_irq_delivery_to_apic(kvm, 0, &lapic_irq, NULL); } int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) { unsigned long nr, a0, a1, a2, a3, ret; int op_64_bit, r = 1; if (kvm_hv_hypercall_enabled(vcpu->kvm)) return kvm_hv_hypercall(vcpu); nr = kvm_register_read(vcpu, VCPU_REGS_RAX); a0 = kvm_register_read(vcpu, VCPU_REGS_RBX); a1 = kvm_register_read(vcpu, VCPU_REGS_RCX); a2 = kvm_register_read(vcpu, VCPU_REGS_RDX); a3 = kvm_register_read(vcpu, VCPU_REGS_RSI); trace_kvm_hypercall(nr, a0, a1, a2, a3); op_64_bit = is_64_bit_mode(vcpu); if (!op_64_bit) { nr &= 0xFFFFFFFF; a0 &= 0xFFFFFFFF; a1 &= 0xFFFFFFFF; a2 &= 0xFFFFFFFF; a3 &= 0xFFFFFFFF; } if (kvm_x86_ops->get_cpl(vcpu) != 0) { ret = -KVM_EPERM; goto out; } switch (nr) { case KVM_HC_VAPIC_POLL_IRQ: ret = 0; break; case KVM_HC_KICK_CPU: kvm_pv_kick_cpu_op(vcpu->kvm, a0, a1); ret = 0; break; default: ret = -KVM_ENOSYS; break; } out: if (!op_64_bit) ret = (u32)ret; kvm_register_write(vcpu, VCPU_REGS_RAX, ret); ++vcpu->stat.hypercalls; return r; } EXPORT_SYMBOL_GPL(kvm_emulate_hypercall); static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); char instruction[3]; unsigned long rip = kvm_rip_read(vcpu); kvm_x86_ops->patch_hypercall(vcpu, instruction); return emulator_write_emulated(ctxt, rip, instruction, 3, NULL); } /* * Check if userspace requested an interrupt window, and that the * interrupt window is open. * * No need to exit to userspace if we already have an interrupt queued. */ static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu) { return (!irqchip_in_kernel(vcpu->kvm) && !kvm_cpu_has_interrupt(vcpu) && vcpu->run->request_interrupt_window && kvm_arch_interrupt_allowed(vcpu)); } static void post_kvm_run_save(struct kvm_vcpu *vcpu) { struct kvm_run *kvm_run = vcpu->run; kvm_run->if_flag = (kvm_get_rflags(vcpu) & X86_EFLAGS_IF) != 0; kvm_run->cr8 = kvm_get_cr8(vcpu); kvm_run->apic_base = kvm_get_apic_base(vcpu); if (irqchip_in_kernel(vcpu->kvm)) kvm_run->ready_for_interrupt_injection = 1; else kvm_run->ready_for_interrupt_injection = kvm_arch_interrupt_allowed(vcpu) && !kvm_cpu_has_interrupt(vcpu) && !kvm_event_needs_reinjection(vcpu); } static void update_cr8_intercept(struct kvm_vcpu *vcpu) { int max_irr, tpr; if (!kvm_x86_ops->update_cr8_intercept) return; if (!vcpu->arch.apic) return; if (!vcpu->arch.apic->vapic_addr) max_irr = kvm_lapic_find_highest_irr(vcpu); else max_irr = -1; if (max_irr != -1) max_irr >>= 4; tpr = kvm_lapic_get_cr8(vcpu); kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr); } static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win) { int r; /* try to reinject previous events if any */ if (vcpu->arch.exception.pending) { trace_kvm_inj_exception(vcpu->arch.exception.nr, vcpu->arch.exception.has_error_code, vcpu->arch.exception.error_code); if (exception_type(vcpu->arch.exception.nr) == EXCPT_FAULT) __kvm_set_rflags(vcpu, kvm_get_rflags(vcpu) | X86_EFLAGS_RF); kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr, vcpu->arch.exception.has_error_code, vcpu->arch.exception.error_code, vcpu->arch.exception.reinject); return 0; } if (vcpu->arch.nmi_injected) { kvm_x86_ops->set_nmi(vcpu); return 0; } if (vcpu->arch.interrupt.pending) { kvm_x86_ops->set_irq(vcpu); return 0; } if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) { r = kvm_x86_ops->check_nested_events(vcpu, req_int_win); if (r != 0) return r; } /* try to inject new event if pending */ if (vcpu->arch.nmi_pending) { if (kvm_x86_ops->nmi_allowed(vcpu)) { --vcpu->arch.nmi_pending; vcpu->arch.nmi_injected = true; kvm_x86_ops->set_nmi(vcpu); } } else if (kvm_cpu_has_injectable_intr(vcpu)) { /* * Because interrupts can be injected asynchronously, we are * calling check_nested_events again here to avoid a race condition. * See https://lkml.org/lkml/2014/7/2/60 for discussion about this * proposal and current concerns. Perhaps we should be setting * KVM_REQ_EVENT only on certain events and not unconditionally? */ if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) { r = kvm_x86_ops->check_nested_events(vcpu, req_int_win); if (r != 0) return r; } if (kvm_x86_ops->interrupt_allowed(vcpu)) { kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu), false); kvm_x86_ops->set_irq(vcpu); } } return 0; } static void process_nmi(struct kvm_vcpu *vcpu) { unsigned limit = 2; /* * x86 is limited to one NMI running, and one NMI pending after it. * If an NMI is already in progress, limit further NMIs to just one. * Otherwise, allow two (and we'll inject the first one immediately). */ if (kvm_x86_ops->get_nmi_mask(vcpu) || vcpu->arch.nmi_injected) limit = 1; vcpu->arch.nmi_pending += atomic_xchg(&vcpu->arch.nmi_queued, 0); vcpu->arch.nmi_pending = min(vcpu->arch.nmi_pending, limit); kvm_make_request(KVM_REQ_EVENT, vcpu); } static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu) { u64 eoi_exit_bitmap[4]; u32 tmr[8]; if (!kvm_apic_hw_enabled(vcpu->arch.apic)) return; memset(eoi_exit_bitmap, 0, 32); memset(tmr, 0, 32); kvm_ioapic_scan_entry(vcpu, eoi_exit_bitmap, tmr); kvm_x86_ops->load_eoi_exitmap(vcpu, eoi_exit_bitmap); kvm_apic_update_tmr(vcpu, tmr); } /* * Returns 1 to let __vcpu_run() continue the guest execution loop without * exiting to the userspace. Otherwise, the value will be returned to the * userspace. */ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) { int r; bool req_int_win = !irqchip_in_kernel(vcpu->kvm) && vcpu->run->request_interrupt_window; bool req_immediate_exit = false; if (vcpu->requests) { if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) kvm_mmu_unload(vcpu); if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu)) __kvm_migrate_timers(vcpu); if (kvm_check_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu)) kvm_gen_update_masterclock(vcpu->kvm); if (kvm_check_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu)) kvm_gen_kvmclock_update(vcpu); if (kvm_check_request(KVM_REQ_CLOCK_UPDATE, vcpu)) { r = kvm_guest_time_update(vcpu); if (unlikely(r)) goto out; } if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu)) kvm_mmu_sync_roots(vcpu); if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) kvm_x86_ops->tlb_flush(vcpu); if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) { vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS; r = 0; goto out; } if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) { vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN; r = 0; goto out; } if (kvm_check_request(KVM_REQ_DEACTIVATE_FPU, vcpu)) { vcpu->fpu_active = 0; kvm_x86_ops->fpu_deactivate(vcpu); } if (kvm_check_request(KVM_REQ_APF_HALT, vcpu)) { /* Page is swapped out. Do synthetic halt */ vcpu->arch.apf.halted = true; r = 1; goto out; } if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu)) record_steal_time(vcpu); if (kvm_check_request(KVM_REQ_NMI, vcpu)) process_nmi(vcpu); if (kvm_check_request(KVM_REQ_PMU, vcpu)) kvm_handle_pmu_event(vcpu); if (kvm_check_request(KVM_REQ_PMI, vcpu)) kvm_deliver_pmi(vcpu); if (kvm_check_request(KVM_REQ_SCAN_IOAPIC, vcpu)) vcpu_scan_ioapic(vcpu); } if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) { kvm_apic_accept_events(vcpu); if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) { r = 1; goto out; } if (inject_pending_event(vcpu, req_int_win) != 0) req_immediate_exit = true; /* enable NMI/IRQ window open exits if needed */ else if (vcpu->arch.nmi_pending) kvm_x86_ops->enable_nmi_window(vcpu); else if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win) kvm_x86_ops->enable_irq_window(vcpu); if (kvm_lapic_enabled(vcpu)) { /* * Update architecture specific hints for APIC * virtual interrupt delivery. */ if (kvm_x86_ops->hwapic_irr_update) kvm_x86_ops->hwapic_irr_update(vcpu, kvm_lapic_find_highest_irr(vcpu)); update_cr8_intercept(vcpu); kvm_lapic_sync_to_vapic(vcpu); } } r = kvm_mmu_reload(vcpu); if (unlikely(r)) { goto cancel_injection; } preempt_disable(); kvm_x86_ops->prepare_guest_switch(vcpu); if (vcpu->fpu_active) kvm_load_guest_fpu(vcpu); kvm_load_guest_xcr0(vcpu); vcpu->mode = IN_GUEST_MODE; srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); /* We should set ->mode before check ->requests, * see the comment in make_all_cpus_request. */ smp_mb__after_srcu_read_unlock(); local_irq_disable(); if (vcpu->mode == EXITING_GUEST_MODE || vcpu->requests || need_resched() || signal_pending(current)) { vcpu->mode = OUTSIDE_GUEST_MODE; smp_wmb(); local_irq_enable(); preempt_enable(); vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); r = 1; goto cancel_injection; } if (req_immediate_exit) smp_send_reschedule(vcpu->cpu); kvm_guest_enter(); if (unlikely(vcpu->arch.switch_db_regs)) { set_debugreg(0, 7); set_debugreg(vcpu->arch.eff_db[0], 0); set_debugreg(vcpu->arch.eff_db[1], 1); set_debugreg(vcpu->arch.eff_db[2], 2); set_debugreg(vcpu->arch.eff_db[3], 3); set_debugreg(vcpu->arch.dr6, 6); } trace_kvm_entry(vcpu->vcpu_id); kvm_x86_ops->run(vcpu); /* * Do this here before restoring debug registers on the host. And * since we do this before handling the vmexit, a DR access vmexit * can (a) read the correct value of the debug registers, (b) set * KVM_DEBUGREG_WONT_EXIT again. */ if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) { int i; WARN_ON(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP); kvm_x86_ops->sync_dirty_debug_regs(vcpu); for (i = 0; i < KVM_NR_DB_REGS; i++) vcpu->arch.eff_db[i] = vcpu->arch.db[i]; } /* * If the guest has used debug registers, at least dr7 * will be disabled while returning to the host. * If we don't have active breakpoints in the host, we don't * care about the messed up debug address registers. But if * we have some of them active, restore the old state. */ if (hw_breakpoint_active()) hw_breakpoint_restore(); vcpu->arch.last_guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu, native_read_tsc()); vcpu->mode = OUTSIDE_GUEST_MODE; smp_wmb(); /* Interrupt is enabled by handle_external_intr() */ kvm_x86_ops->handle_external_intr(vcpu); ++vcpu->stat.exits; /* * We must have an instruction between local_irq_enable() and * kvm_guest_exit(), so the timer interrupt isn't delayed by * the interrupt shadow. The stat.exits increment will do nicely. * But we need to prevent reordering, hence this barrier(): */ barrier(); kvm_guest_exit(); preempt_enable(); vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); /* * Profile KVM exit RIPs: */ if (unlikely(prof_on == KVM_PROFILING)) { unsigned long rip = kvm_rip_read(vcpu); profile_hit(KVM_PROFILING, (void *)rip); } if (unlikely(vcpu->arch.tsc_always_catchup)) kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); if (vcpu->arch.apic_attention) kvm_lapic_sync_from_vapic(vcpu); r = kvm_x86_ops->handle_exit(vcpu); return r; cancel_injection: kvm_x86_ops->cancel_injection(vcpu); if (unlikely(vcpu->arch.apic_attention)) kvm_lapic_sync_from_vapic(vcpu); out: return r; } static int __vcpu_run(struct kvm_vcpu *vcpu) { int r; struct kvm *kvm = vcpu->kvm; vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); r = 1; while (r > 0) { if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && !vcpu->arch.apf.halted) r = vcpu_enter_guest(vcpu); else { srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); kvm_vcpu_block(vcpu); vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) { kvm_apic_accept_events(vcpu); switch(vcpu->arch.mp_state) { case KVM_MP_STATE_HALTED: vcpu->arch.pv.pv_unhalted = false; vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; case KVM_MP_STATE_RUNNABLE: vcpu->arch.apf.halted = false; break; case KVM_MP_STATE_INIT_RECEIVED: break; default: r = -EINTR; break; } } } if (r <= 0) break; clear_bit(KVM_REQ_PENDING_TIMER, &vcpu->requests); if (kvm_cpu_has_pending_timer(vcpu)) kvm_inject_pending_timer_irqs(vcpu); if (dm_request_for_irq_injection(vcpu)) { r = -EINTR; vcpu->run->exit_reason = KVM_EXIT_INTR; ++vcpu->stat.request_irq_exits; } kvm_check_async_pf_completion(vcpu); if (signal_pending(current)) { r = -EINTR; vcpu->run->exit_reason = KVM_EXIT_INTR; ++vcpu->stat.signal_exits; } if (need_resched()) { srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); cond_resched(); vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); } } srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); return r; } static inline int complete_emulated_io(struct kvm_vcpu *vcpu) { int r; vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); r = emulate_instruction(vcpu, EMULTYPE_NO_DECODE); srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); if (r != EMULATE_DONE) return 0; return 1; } static int complete_emulated_pio(struct kvm_vcpu *vcpu) { BUG_ON(!vcpu->arch.pio.count); return complete_emulated_io(vcpu); } /* * Implements the following, as a state machine: * * read: * for each fragment * for each mmio piece in the fragment * write gpa, len * exit * copy data * execute insn * * write: * for each fragment * for each mmio piece in the fragment * write gpa, len * copy data * exit */ static int complete_emulated_mmio(struct kvm_vcpu *vcpu) { struct kvm_run *run = vcpu->run; struct kvm_mmio_fragment *frag; unsigned len; BUG_ON(!vcpu->mmio_needed); /* Complete previous fragment */ frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment]; len = min(8u, frag->len); if (!vcpu->mmio_is_write) memcpy(frag->data, run->mmio.data, len); if (frag->len <= 8) { /* Switch to the next fragment. */ frag++; vcpu->mmio_cur_fragment++; } else { /* Go forward to the next mmio piece. */ frag->data += len; frag->gpa += len; frag->len -= len; } if (vcpu->mmio_cur_fragment >= vcpu->mmio_nr_fragments) { vcpu->mmio_needed = 0; /* FIXME: return into emulator if single-stepping. */ if (vcpu->mmio_is_write) return 1; vcpu->mmio_read_completed = 1; return complete_emulated_io(vcpu); } run->exit_reason = KVM_EXIT_MMIO; run->mmio.phys_addr = frag->gpa; if (vcpu->mmio_is_write) memcpy(run->mmio.data, frag->data, min(8u, frag->len)); run->mmio.len = min(8u, frag->len); run->mmio.is_write = vcpu->mmio_is_write; vcpu->arch.complete_userspace_io = complete_emulated_mmio; return 0; } int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { int r; sigset_t sigsaved; if (!tsk_used_math(current) && init_fpu(current)) return -ENOMEM; if (vcpu->sigset_active) sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { kvm_vcpu_block(vcpu); kvm_apic_accept_events(vcpu); clear_bit(KVM_REQ_UNHALT, &vcpu->requests); r = -EAGAIN; goto out; } /* re-sync apic's tpr */ if (!irqchip_in_kernel(vcpu->kvm)) { if (kvm_set_cr8(vcpu, kvm_run->cr8) != 0) { r = -EINVAL; goto out; } } if (unlikely(vcpu->arch.complete_userspace_io)) { int (*cui)(struct kvm_vcpu *) = vcpu->arch.complete_userspace_io; vcpu->arch.complete_userspace_io = NULL; r = cui(vcpu); if (r <= 0) goto out; } else WARN_ON(vcpu->arch.pio.count || vcpu->mmio_needed); r = __vcpu_run(vcpu); out: post_kvm_run_save(vcpu); if (vcpu->sigset_active) sigprocmask(SIG_SETMASK, &sigsaved, NULL); return r; } int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) { if (vcpu->arch.emulate_regs_need_sync_to_vcpu) { /* * We are here if userspace calls get_regs() in the middle of * instruction emulation. Registers state needs to be copied * back from emulation context to vcpu. Userspace shouldn't do * that usually, but some bad designed PV devices (vmware * backdoor interface) need this to work */ emulator_writeback_register_cache(&vcpu->arch.emulate_ctxt); vcpu->arch.emulate_regs_need_sync_to_vcpu = false; } regs->rax = kvm_register_read(vcpu, VCPU_REGS_RAX); regs->rbx = kvm_register_read(vcpu, VCPU_REGS_RBX); regs->rcx = kvm_register_read(vcpu, VCPU_REGS_RCX); regs->rdx = kvm_register_read(vcpu, VCPU_REGS_RDX); regs->rsi = kvm_register_read(vcpu, VCPU_REGS_RSI); regs->rdi = kvm_register_read(vcpu, VCPU_REGS_RDI); regs->rsp = kvm_register_read(vcpu, VCPU_REGS_RSP); regs->rbp = kvm_register_read(vcpu, VCPU_REGS_RBP); #ifdef CONFIG_X86_64 regs->r8 = kvm_register_read(vcpu, VCPU_REGS_R8); regs->r9 = kvm_register_read(vcpu, VCPU_REGS_R9); regs->r10 = kvm_register_read(vcpu, VCPU_REGS_R10); regs->r11 = kvm_register_read(vcpu, VCPU_REGS_R11); regs->r12 = kvm_register_read(vcpu, VCPU_REGS_R12); regs->r13 = kvm_register_read(vcpu, VCPU_REGS_R13); regs->r14 = kvm_register_read(vcpu, VCPU_REGS_R14); regs->r15 = kvm_register_read(vcpu, VCPU_REGS_R15); #endif regs->rip = kvm_rip_read(vcpu); regs->rflags = kvm_get_rflags(vcpu); return 0; } int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) { vcpu->arch.emulate_regs_need_sync_from_vcpu = true; vcpu->arch.emulate_regs_need_sync_to_vcpu = false; kvm_register_write(vcpu, VCPU_REGS_RAX, regs->rax); kvm_register_write(vcpu, VCPU_REGS_RBX, regs->rbx); kvm_register_write(vcpu, VCPU_REGS_RCX, regs->rcx); kvm_register_write(vcpu, VCPU_REGS_RDX, regs->rdx); kvm_register_write(vcpu, VCPU_REGS_RSI, regs->rsi); kvm_register_write(vcpu, VCPU_REGS_RDI, regs->rdi); kvm_register_write(vcpu, VCPU_REGS_RSP, regs->rsp); kvm_register_write(vcpu, VCPU_REGS_RBP, regs->rbp); #ifdef CONFIG_X86_64 kvm_register_write(vcpu, VCPU_REGS_R8, regs->r8); kvm_register_write(vcpu, VCPU_REGS_R9, regs->r9); kvm_register_write(vcpu, VCPU_REGS_R10, regs->r10); kvm_register_write(vcpu, VCPU_REGS_R11, regs->r11); kvm_register_write(vcpu, VCPU_REGS_R12, regs->r12); kvm_register_write(vcpu, VCPU_REGS_R13, regs->r13); kvm_register_write(vcpu, VCPU_REGS_R14, regs->r14); kvm_register_write(vcpu, VCPU_REGS_R15, regs->r15); #endif kvm_rip_write(vcpu, regs->rip); kvm_set_rflags(vcpu, regs->rflags); vcpu->arch.exception.pending = false; kvm_make_request(KVM_REQ_EVENT, vcpu); return 0; } void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l) { struct kvm_segment cs; kvm_get_segment(vcpu, &cs, VCPU_SREG_CS); *db = cs.db; *l = cs.l; } EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits); int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) { struct desc_ptr dt; kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS); kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS); kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES); kvm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS); kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS); kvm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS); kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR); kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR); kvm_x86_ops->get_idt(vcpu, &dt); sregs->idt.limit = dt.size; sregs->idt.base = dt.address; kvm_x86_ops->get_gdt(vcpu, &dt); sregs->gdt.limit = dt.size; sregs->gdt.base = dt.address; sregs->cr0 = kvm_read_cr0(vcpu); sregs->cr2 = vcpu->arch.cr2; sregs->cr3 = kvm_read_cr3(vcpu); sregs->cr4 = kvm_read_cr4(vcpu); sregs->cr8 = kvm_get_cr8(vcpu); sregs->efer = vcpu->arch.efer; sregs->apic_base = kvm_get_apic_base(vcpu); memset(sregs->interrupt_bitmap, 0, sizeof sregs->interrupt_bitmap); if (vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft) set_bit(vcpu->arch.interrupt.nr, (unsigned long *)sregs->interrupt_bitmap); return 0; } int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, struct kvm_mp_state *mp_state) { kvm_apic_accept_events(vcpu); if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED && vcpu->arch.pv.pv_unhalted) mp_state->mp_state = KVM_MP_STATE_RUNNABLE; else mp_state->mp_state = vcpu->arch.mp_state; return 0; } int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, struct kvm_mp_state *mp_state) { if (!kvm_vcpu_has_lapic(vcpu) && mp_state->mp_state != KVM_MP_STATE_RUNNABLE) return -EINVAL; if (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED) { vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED; set_bit(KVM_APIC_SIPI, &vcpu->arch.apic->pending_events); } else vcpu->arch.mp_state = mp_state->mp_state; kvm_make_request(KVM_REQ_EVENT, vcpu); return 0; } int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index, int reason, bool has_error_code, u32 error_code) { struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; int ret; init_emulate_ctxt(vcpu); ret = emulator_task_switch(ctxt, tss_selector, idt_index, reason, has_error_code, error_code); if (ret) return EMULATE_FAIL; kvm_rip_write(vcpu, ctxt->eip); kvm_set_rflags(vcpu, ctxt->eflags); kvm_make_request(KVM_REQ_EVENT, vcpu); return EMULATE_DONE; } EXPORT_SYMBOL_GPL(kvm_task_switch); int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) { struct msr_data apic_base_msr; int mmu_reset_needed = 0; int pending_vec, max_bits, idx; struct desc_ptr dt; if (!guest_cpuid_has_xsave(vcpu) && (sregs->cr4 & X86_CR4_OSXSAVE)) return -EINVAL; dt.size = sregs->idt.limit; dt.address = sregs->idt.base; kvm_x86_ops->set_idt(vcpu, &dt); dt.size = sregs->gdt.limit; dt.address = sregs->gdt.base; kvm_x86_ops->set_gdt(vcpu, &dt); vcpu->arch.cr2 = sregs->cr2; mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3; vcpu->arch.cr3 = sregs->cr3; __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail); kvm_set_cr8(vcpu, sregs->cr8); mmu_reset_needed |= vcpu->arch.efer != sregs->efer; kvm_x86_ops->set_efer(vcpu, sregs->efer); apic_base_msr.data = sregs->apic_base; apic_base_msr.host_initiated = true; kvm_set_apic_base(vcpu, &apic_base_msr); mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0; kvm_x86_ops->set_cr0(vcpu, sregs->cr0); vcpu->arch.cr0 = sregs->cr0; mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4; kvm_x86_ops->set_cr4(vcpu, sregs->cr4); if (sregs->cr4 & X86_CR4_OSXSAVE) kvm_update_cpuid(vcpu); idx = srcu_read_lock(&vcpu->kvm->srcu); if (!is_long_mode(vcpu) && is_pae(vcpu)) { load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu)); mmu_reset_needed = 1; } srcu_read_unlock(&vcpu->kvm->srcu, idx); if (mmu_reset_needed) kvm_mmu_reset_context(vcpu); max_bits = KVM_NR_INTERRUPTS; pending_vec = find_first_bit( (const unsigned long *)sregs->interrupt_bitmap, max_bits); if (pending_vec < max_bits) { kvm_queue_interrupt(vcpu, pending_vec, false); pr_debug("Set back pending irq %d\n", pending_vec); } kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS); kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS); kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES); kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS); kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS); kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS); kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR); kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR); update_cr8_intercept(vcpu); /* Older userspace won't unhalt the vcpu on reset. */ if (kvm_vcpu_is_bsp(vcpu) && kvm_rip_read(vcpu) == 0xfff0 && sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 && !is_protmode(vcpu)) vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; kvm_make_request(KVM_REQ_EVENT, vcpu); return 0; } int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg) { unsigned long rflags; int i, r; if (dbg->control & (KVM_GUESTDBG_INJECT_DB | KVM_GUESTDBG_INJECT_BP)) { r = -EBUSY; if (vcpu->arch.exception.pending) goto out; if (dbg->control & KVM_GUESTDBG_INJECT_DB) kvm_queue_exception(vcpu, DB_VECTOR); else kvm_queue_exception(vcpu, BP_VECTOR); } /* * Read rflags as long as potentially injected trace flags are still * filtered out. */ rflags = kvm_get_rflags(vcpu); vcpu->guest_debug = dbg->control; if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE)) vcpu->guest_debug = 0; if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) { for (i = 0; i < KVM_NR_DB_REGS; ++i) vcpu->arch.eff_db[i] = dbg->arch.debugreg[i]; vcpu->arch.guest_debug_dr7 = dbg->arch.debugreg[7]; } else { for (i = 0; i < KVM_NR_DB_REGS; i++) vcpu->arch.eff_db[i] = vcpu->arch.db[i]; } kvm_update_dr7(vcpu); if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) vcpu->arch.singlestep_rip = kvm_rip_read(vcpu) + get_segment_base(vcpu, VCPU_SREG_CS); /* * Trigger an rflags update that will inject or remove the trace * flags. */ kvm_set_rflags(vcpu, rflags); kvm_x86_ops->update_db_bp_intercept(vcpu); r = 0; out: return r; } /* * Translate a guest virtual address to a guest physical address. */ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, struct kvm_translation *tr) { unsigned long vaddr = tr->linear_address; gpa_t gpa; int idx; idx = srcu_read_lock(&vcpu->kvm->srcu); gpa = kvm_mmu_gva_to_gpa_system(vcpu, vaddr, NULL); srcu_read_unlock(&vcpu->kvm->srcu, idx); tr->physical_address = gpa; tr->valid = gpa != UNMAPPED_GVA; tr->writeable = 1; tr->usermode = 0; return 0; } int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) { struct i387_fxsave_struct *fxsave = &vcpu->arch.guest_fpu.state->fxsave; memcpy(fpu->fpr, fxsave->st_space, 128); fpu->fcw = fxsave->cwd; fpu->fsw = fxsave->swd; fpu->ftwx = fxsave->twd; fpu->last_opcode = fxsave->fop; fpu->last_ip = fxsave->rip; fpu->last_dp = fxsave->rdp; memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space); return 0; } int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) { struct i387_fxsave_struct *fxsave = &vcpu->arch.guest_fpu.state->fxsave; memcpy(fxsave->st_space, fpu->fpr, 128); fxsave->cwd = fpu->fcw; fxsave->swd = fpu->fsw; fxsave->twd = fpu->ftwx; fxsave->fop = fpu->last_opcode; fxsave->rip = fpu->last_ip; fxsave->rdp = fpu->last_dp; memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space); return 0; } int fx_init(struct kvm_vcpu *vcpu) { int err; err = fpu_alloc(&vcpu->arch.guest_fpu); if (err) return err; fpu_finit(&vcpu->arch.guest_fpu); /* * Ensure guest xcr0 is valid for loading */ vcpu->arch.xcr0 = XSTATE_FP; vcpu->arch.cr0 |= X86_CR0_ET; return 0; } EXPORT_SYMBOL_GPL(fx_init); static void fx_free(struct kvm_vcpu *vcpu) { fpu_free(&vcpu->arch.guest_fpu); } void kvm_load_guest_fpu(struct kvm_vcpu *vcpu) { if (vcpu->guest_fpu_loaded) return; /* * Restore all possible states in the guest, * and assume host would use all available bits. * Guest xcr0 would be loaded later. */ kvm_put_guest_xcr0(vcpu); vcpu->guest_fpu_loaded = 1; __kernel_fpu_begin(); fpu_restore_checking(&vcpu->arch.guest_fpu); trace_kvm_fpu(1); } void kvm_put_guest_fpu(struct kvm_vcpu *vcpu) { kvm_put_guest_xcr0(vcpu); if (!vcpu->guest_fpu_loaded) return; vcpu->guest_fpu_loaded = 0; fpu_save_init(&vcpu->arch.guest_fpu); __kernel_fpu_end(); ++vcpu->stat.fpu_reload; kvm_make_request(KVM_REQ_DEACTIVATE_FPU, vcpu); trace_kvm_fpu(0); } void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) { kvmclock_reset(vcpu); free_cpumask_var(vcpu->arch.wbinvd_dirty_mask); fx_free(vcpu); kvm_x86_ops->vcpu_free(vcpu); } struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) { if (check_tsc_unstable() && atomic_read(&kvm->online_vcpus) != 0) printk_once(KERN_WARNING "kvm: SMP vm created on host with unstable TSC; " "guest TSC will not be reliable\n"); return kvm_x86_ops->vcpu_create(kvm, id); } int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) { int r; vcpu->arch.mtrr_state.have_fixed = 1; r = vcpu_load(vcpu); if (r) return r; kvm_vcpu_reset(vcpu); kvm_mmu_setup(vcpu); vcpu_put(vcpu); return r; } int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) { int r; struct msr_data msr; struct kvm *kvm = vcpu->kvm; r = vcpu_load(vcpu); if (r) return r; msr.data = 0x0; msr.index = MSR_IA32_TSC; msr.host_initiated = true; kvm_write_tsc(vcpu, &msr); vcpu_put(vcpu); schedule_delayed_work(&kvm->arch.kvmclock_sync_work, KVMCLOCK_SYNC_PERIOD); return r; } void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) { int r; vcpu->arch.apf.msr_val = 0; r = vcpu_load(vcpu); BUG_ON(r); kvm_mmu_unload(vcpu); vcpu_put(vcpu); fx_free(vcpu); kvm_x86_ops->vcpu_free(vcpu); } void kvm_vcpu_reset(struct kvm_vcpu *vcpu) { atomic_set(&vcpu->arch.nmi_queued, 0); vcpu->arch.nmi_pending = 0; vcpu->arch.nmi_injected = false; kvm_clear_interrupt_queue(vcpu); kvm_clear_exception_queue(vcpu); memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db)); vcpu->arch.dr6 = DR6_INIT; kvm_update_dr6(vcpu); vcpu->arch.dr7 = DR7_FIXED_1; kvm_update_dr7(vcpu); kvm_make_request(KVM_REQ_EVENT, vcpu); vcpu->arch.apf.msr_val = 0; vcpu->arch.st.msr_val = 0; kvmclock_reset(vcpu); kvm_clear_async_pf_completion_queue(vcpu); kvm_async_pf_hash_reset(vcpu); vcpu->arch.apf.halted = false; kvm_pmu_reset(vcpu); memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs)); vcpu->arch.regs_avail = ~0; vcpu->arch.regs_dirty = ~0; kvm_x86_ops->vcpu_reset(vcpu); } void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, unsigned int vector) { struct kvm_segment cs; kvm_get_segment(vcpu, &cs, VCPU_SREG_CS); cs.selector = vector << 8; cs.base = vector << 12; kvm_set_segment(vcpu, &cs, VCPU_SREG_CS); kvm_rip_write(vcpu, 0); } int kvm_arch_hardware_enable(void) { struct kvm *kvm; struct kvm_vcpu *vcpu; int i; int ret; u64 local_tsc; u64 max_tsc = 0; bool stable, backwards_tsc = false; kvm_shared_msr_cpu_online(); ret = kvm_x86_ops->hardware_enable(); if (ret != 0) return ret; local_tsc = native_read_tsc(); stable = !check_tsc_unstable(); list_for_each_entry(kvm, &vm_list, vm_list) { kvm_for_each_vcpu(i, vcpu, kvm) { if (!stable && vcpu->cpu == smp_processor_id()) kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); if (stable && vcpu->arch.last_host_tsc > local_tsc) { backwards_tsc = true; if (vcpu->arch.last_host_tsc > max_tsc) max_tsc = vcpu->arch.last_host_tsc; } } } /* * Sometimes, even reliable TSCs go backwards. This happens on * platforms that reset TSC during suspend or hibernate actions, but * maintain synchronization. We must compensate. Fortunately, we can * detect that condition here, which happens early in CPU bringup, * before any KVM threads can be running. Unfortunately, we can't * bring the TSCs fully up to date with real time, as we aren't yet far * enough into CPU bringup that we know how much real time has actually * elapsed; our helper function, get_kernel_ns() will be using boot * variables that haven't been updated yet. * * So we simply find the maximum observed TSC above, then record the * adjustment to TSC in each VCPU. When the VCPU later gets loaded, * the adjustment will be applied. Note that we accumulate * adjustments, in case multiple suspend cycles happen before some VCPU * gets a chance to run again. In the event that no KVM threads get a * chance to run, we will miss the entire elapsed period, as we'll have * reset last_host_tsc, so VCPUs will not have the TSC adjusted and may * loose cycle time. This isn't too big a deal, since the loss will be * uniform across all VCPUs (not to mention the scenario is extremely * unlikely). It is possible that a second hibernate recovery happens * much faster than a first, causing the observed TSC here to be * smaller; this would require additional padding adjustment, which is * why we set last_host_tsc to the local tsc observed here. * * N.B. - this code below runs only on platforms with reliable TSC, * as that is the only way backwards_tsc is set above. Also note * that this runs for ALL vcpus, which is not a bug; all VCPUs should * have the same delta_cyc adjustment applied if backwards_tsc * is detected. Note further, this adjustment is only done once, * as we reset last_host_tsc on all VCPUs to stop this from being * called multiple times (one for each physical CPU bringup). * * Platforms with unreliable TSCs don't have to deal with this, they * will be compensated by the logic in vcpu_load, which sets the TSC to * catchup mode. This will catchup all VCPUs to real time, but cannot * guarantee that they stay in perfect synchronization. */ if (backwards_tsc) { u64 delta_cyc = max_tsc - local_tsc; backwards_tsc_observed = true; list_for_each_entry(kvm, &vm_list, vm_list) { kvm_for_each_vcpu(i, vcpu, kvm) { vcpu->arch.tsc_offset_adjustment += delta_cyc; vcpu->arch.last_host_tsc = local_tsc; kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); } /* * We have to disable TSC offset matching.. if you were * booting a VM while issuing an S4 host suspend.... * you may have some problem. Solving this issue is * left as an exercise to the reader. */ kvm->arch.last_tsc_nsec = 0; kvm->arch.last_tsc_write = 0; } } return 0; } void kvm_arch_hardware_disable(void) { kvm_x86_ops->hardware_disable(); drop_user_return_notifiers(); } int kvm_arch_hardware_setup(void) { return kvm_x86_ops->hardware_setup(); } void kvm_arch_hardware_unsetup(void) { kvm_x86_ops->hardware_unsetup(); } void kvm_arch_check_processor_compat(void *rtn) { kvm_x86_ops->check_processor_compatibility(rtn); } bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) { return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL); } struct static_key kvm_no_apic_vcpu __read_mostly; int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) { struct page *page; struct kvm *kvm; int r; BUG_ON(vcpu->kvm == NULL); kvm = vcpu->kvm; vcpu->arch.pv.pv_unhalted = false; vcpu->arch.emulate_ctxt.ops = &emulate_ops; if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_bsp(vcpu)) vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; else vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED; page = alloc_page(GFP_KERNEL | __GFP_ZERO); if (!page) { r = -ENOMEM; goto fail; } vcpu->arch.pio_data = page_address(page); kvm_set_tsc_khz(vcpu, max_tsc_khz); r = kvm_mmu_create(vcpu); if (r < 0) goto fail_free_pio_data; if (irqchip_in_kernel(kvm)) { r = kvm_create_lapic(vcpu); if (r < 0) goto fail_mmu_destroy; } else static_key_slow_inc(&kvm_no_apic_vcpu); vcpu->arch.mce_banks = kzalloc(KVM_MAX_MCE_BANKS * sizeof(u64) * 4, GFP_KERNEL); if (!vcpu->arch.mce_banks) { r = -ENOMEM; goto fail_free_lapic; } vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS; if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, GFP_KERNEL)) { r = -ENOMEM; goto fail_free_mce_banks; } r = fx_init(vcpu); if (r) goto fail_free_wbinvd_dirty_mask; vcpu->arch.ia32_tsc_adjust_msr = 0x0; vcpu->arch.pv_time_enabled = false; vcpu->arch.guest_supported_xcr0 = 0; vcpu->arch.guest_xstate_size = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET; kvm_async_pf_hash_reset(vcpu); kvm_pmu_init(vcpu); return 0; fail_free_wbinvd_dirty_mask: free_cpumask_var(vcpu->arch.wbinvd_dirty_mask); fail_free_mce_banks: kfree(vcpu->arch.mce_banks); fail_free_lapic: kvm_free_lapic(vcpu); fail_mmu_destroy: kvm_mmu_destroy(vcpu); fail_free_pio_data: free_page((unsigned long)vcpu->arch.pio_data); fail: return r; } void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) { int idx; kvm_pmu_destroy(vcpu); kfree(vcpu->arch.mce_banks); kvm_free_lapic(vcpu); idx = srcu_read_lock(&vcpu->kvm->srcu); kvm_mmu_destroy(vcpu); srcu_read_unlock(&vcpu->kvm->srcu, idx); free_page((unsigned long)vcpu->arch.pio_data); if (!irqchip_in_kernel(vcpu->kvm)) static_key_slow_dec(&kvm_no_apic_vcpu); } void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) { kvm_x86_ops->sched_in(vcpu, cpu); } int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) { if (type) return -EINVAL; INIT_LIST_HEAD(&kvm->arch.active_mmu_pages); INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages); INIT_LIST_HEAD(&kvm->arch.assigned_dev_head); atomic_set(&kvm->arch.noncoherent_dma_count, 0); /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */ set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap); /* Reserve bit 1 of irq_sources_bitmap for irqfd-resampler */ set_bit(KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap); raw_spin_lock_init(&kvm->arch.tsc_write_lock); mutex_init(&kvm->arch.apic_map_lock); spin_lock_init(&kvm->arch.pvclock_gtod_sync_lock); pvclock_update_vm_gtod_copy(kvm); INIT_DELAYED_WORK(&kvm->arch.kvmclock_update_work, kvmclock_update_fn); INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn); return 0; } static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu) { int r; r = vcpu_load(vcpu); BUG_ON(r); kvm_mmu_unload(vcpu); vcpu_put(vcpu); } static void kvm_free_vcpus(struct kvm *kvm) { unsigned int i; struct kvm_vcpu *vcpu; /* * Unpin any mmu pages first. */ kvm_for_each_vcpu(i, vcpu, kvm) { kvm_clear_async_pf_completion_queue(vcpu); kvm_unload_vcpu_mmu(vcpu); } kvm_for_each_vcpu(i, vcpu, kvm) kvm_arch_vcpu_free(vcpu); mutex_lock(&kvm->lock); for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) kvm->vcpus[i] = NULL; atomic_set(&kvm->online_vcpus, 0); mutex_unlock(&kvm->lock); } void kvm_arch_sync_events(struct kvm *kvm) { cancel_delayed_work_sync(&kvm->arch.kvmclock_sync_work); cancel_delayed_work_sync(&kvm->arch.kvmclock_update_work); kvm_free_all_assigned_devices(kvm); kvm_free_pit(kvm); } void kvm_arch_destroy_vm(struct kvm *kvm) { if (current->mm == kvm->mm) { /* * Free memory regions allocated on behalf of userspace, * unless the the memory map has changed due to process exit * or fd copying. */ struct kvm_userspace_memory_region mem; memset(&mem, 0, sizeof(mem)); mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT; kvm_set_memory_region(kvm, &mem); mem.slot = IDENTITY_PAGETABLE_PRIVATE_MEMSLOT; kvm_set_memory_region(kvm, &mem); mem.slot = TSS_PRIVATE_MEMSLOT; kvm_set_memory_region(kvm, &mem); } kvm_iommu_unmap_guest(kvm); kfree(kvm->arch.vpic); kfree(kvm->arch.vioapic); kvm_free_vcpus(kvm); if (kvm->arch.apic_access_page) put_page(kvm->arch.apic_access_page); kfree(rcu_dereference_check(kvm->arch.apic_map, 1)); } void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, struct kvm_memory_slot *dont) { int i; for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) { if (!dont || free->arch.rmap[i] != dont->arch.rmap[i]) { kvm_kvfree(free->arch.rmap[i]); free->arch.rmap[i] = NULL; } if (i == 0) continue; if (!dont || free->arch.lpage_info[i - 1] != dont->arch.lpage_info[i - 1]) { kvm_kvfree(free->arch.lpage_info[i - 1]); free->arch.lpage_info[i - 1] = NULL; } } } int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, unsigned long npages) { int i; for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) { unsigned long ugfn; int lpages; int level = i + 1; lpages = gfn_to_index(slot->base_gfn + npages - 1, slot->base_gfn, level) + 1; slot->arch.rmap[i] = kvm_kvzalloc(lpages * sizeof(*slot->arch.rmap[i])); if (!slot->arch.rmap[i]) goto out_free; if (i == 0) continue; slot->arch.lpage_info[i - 1] = kvm_kvzalloc(lpages * sizeof(*slot->arch.lpage_info[i - 1])); if (!slot->arch.lpage_info[i - 1]) goto out_free; if (slot->base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1)) slot->arch.lpage_info[i - 1][0].write_count = 1; if ((slot->base_gfn + npages) & (KVM_PAGES_PER_HPAGE(level) - 1)) slot->arch.lpage_info[i - 1][lpages - 1].write_count = 1; ugfn = slot->userspace_addr >> PAGE_SHIFT; /* * If the gfn and userspace address are not aligned wrt each * other, or if explicitly asked to, disable large page * support for this slot */ if ((slot->base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1) || !kvm_largepages_enabled()) { unsigned long j; for (j = 0; j < lpages; ++j) slot->arch.lpage_info[i - 1][j].write_count = 1; } } return 0; out_free: for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) { kvm_kvfree(slot->arch.rmap[i]); slot->arch.rmap[i] = NULL; if (i == 0) continue; kvm_kvfree(slot->arch.lpage_info[i - 1]); slot->arch.lpage_info[i - 1] = NULL; } return -ENOMEM; } void kvm_arch_memslots_updated(struct kvm *kvm) { /* * memslots->generation has been incremented. * mmio generation may have reached its maximum value. */ kvm_mmu_invalidate_mmio_sptes(kvm); } int kvm_arch_prepare_memory_region(struct kvm *kvm, struct kvm_memory_slot *memslot, struct kvm_userspace_memory_region *mem, enum kvm_mr_change change) { /* * Only private memory slots need to be mapped here since * KVM_SET_MEMORY_REGION ioctl is no longer supported. */ if ((memslot->id >= KVM_USER_MEM_SLOTS) && (change == KVM_MR_CREATE)) { unsigned long userspace_addr; /* * MAP_SHARED to prevent internal slot pages from being moved * by fork()/COW. */ userspace_addr = vm_mmap(NULL, 0, memslot->npages * PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, 0); if (IS_ERR((void *)userspace_addr)) return PTR_ERR((void *)userspace_addr); memslot->userspace_addr = userspace_addr; } return 0; } void kvm_arch_commit_memory_region(struct kvm *kvm, struct kvm_userspace_memory_region *mem, const struct kvm_memory_slot *old, enum kvm_mr_change change) { int nr_mmu_pages = 0; if ((mem->slot >= KVM_USER_MEM_SLOTS) && (change == KVM_MR_DELETE)) { int ret; ret = vm_munmap(old->userspace_addr, old->npages * PAGE_SIZE); if (ret < 0) printk(KERN_WARNING "kvm_vm_ioctl_set_memory_region: " "failed to munmap memory\n"); } if (!kvm->arch.n_requested_mmu_pages) nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm); if (nr_mmu_pages) kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages); /* * Write protect all pages for dirty logging. * * All the sptes including the large sptes which point to this * slot are set to readonly. We can not create any new large * spte on this slot until the end of the logging. * * See the comments in fast_page_fault(). */ if ((change != KVM_MR_DELETE) && (mem->flags & KVM_MEM_LOG_DIRTY_PAGES)) kvm_mmu_slot_remove_write_access(kvm, mem->slot); } void kvm_arch_flush_shadow_all(struct kvm *kvm) { kvm_mmu_invalidate_zap_all_pages(kvm); } void kvm_arch_flush_shadow_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) { kvm_mmu_invalidate_zap_all_pages(kvm); } int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) { if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) kvm_x86_ops->check_nested_events(vcpu, false); return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && !vcpu->arch.apf.halted) || !list_empty_careful(&vcpu->async_pf.done) || kvm_apic_has_events(vcpu) || vcpu->arch.pv.pv_unhalted || atomic_read(&vcpu->arch.nmi_queued) || (kvm_arch_interrupt_allowed(vcpu) && kvm_cpu_has_interrupt(vcpu)); } int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) { return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE; } int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu) { return kvm_x86_ops->interrupt_allowed(vcpu); } bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip) { unsigned long current_rip = kvm_rip_read(vcpu) + get_segment_base(vcpu, VCPU_SREG_CS); return current_rip == linear_rip; } EXPORT_SYMBOL_GPL(kvm_is_linear_rip); unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu) { unsigned long rflags; rflags = kvm_x86_ops->get_rflags(vcpu); if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) rflags &= ~X86_EFLAGS_TF; return rflags; } EXPORT_SYMBOL_GPL(kvm_get_rflags); static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) { if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP && kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip)) rflags |= X86_EFLAGS_TF; kvm_x86_ops->set_rflags(vcpu, rflags); } void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) { __kvm_set_rflags(vcpu, rflags); kvm_make_request(KVM_REQ_EVENT, vcpu); } EXPORT_SYMBOL_GPL(kvm_set_rflags); void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work) { int r; if ((vcpu->arch.mmu.direct_map != work->arch.direct_map) || work->wakeup_all) return; r = kvm_mmu_reload(vcpu); if (unlikely(r)) return; if (!vcpu->arch.mmu.direct_map && work->arch.cr3 != vcpu->arch.mmu.get_cr3(vcpu)) return; vcpu->arch.mmu.page_fault(vcpu, work->gva, 0, true); } static inline u32 kvm_async_pf_hash_fn(gfn_t gfn) { return hash_32(gfn & 0xffffffff, order_base_2(ASYNC_PF_PER_VCPU)); } static inline u32 kvm_async_pf_next_probe(u32 key) { return (key + 1) & (roundup_pow_of_two(ASYNC_PF_PER_VCPU) - 1); } static void kvm_add_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) { u32 key = kvm_async_pf_hash_fn(gfn); while (vcpu->arch.apf.gfns[key] != ~0) key = kvm_async_pf_next_probe(key); vcpu->arch.apf.gfns[key] = gfn; } static u32 kvm_async_pf_gfn_slot(struct kvm_vcpu *vcpu, gfn_t gfn) { int i; u32 key = kvm_async_pf_hash_fn(gfn); for (i = 0; i < roundup_pow_of_two(ASYNC_PF_PER_VCPU) && (vcpu->arch.apf.gfns[key] != gfn && vcpu->arch.apf.gfns[key] != ~0); i++) key = kvm_async_pf_next_probe(key); return key; } bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) { return vcpu->arch.apf.gfns[kvm_async_pf_gfn_slot(vcpu, gfn)] == gfn; } static void kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) { u32 i, j, k; i = j = kvm_async_pf_gfn_slot(vcpu, gfn); while (true) { vcpu->arch.apf.gfns[i] = ~0; do { j = kvm_async_pf_next_probe(j); if (vcpu->arch.apf.gfns[j] == ~0) return; k = kvm_async_pf_hash_fn(vcpu->arch.apf.gfns[j]); /* * k lies cyclically in ]i,j] * | i.k.j | * |....j i.k.| or |.k..j i...| */ } while ((i <= j) ? (i < k && k <= j) : (i < k || k <= j)); vcpu->arch.apf.gfns[i] = vcpu->arch.apf.gfns[j]; i = j; } } static int apf_put_user(struct kvm_vcpu *vcpu, u32 val) { return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &val, sizeof(val)); } void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, struct kvm_async_pf *work) { struct x86_exception fault; trace_kvm_async_pf_not_present(work->arch.token, work->gva); kvm_add_async_pf_gfn(vcpu, work->arch.gfn); if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) || (vcpu->arch.apf.send_user_only && kvm_x86_ops->get_cpl(vcpu) == 0)) kvm_make_request(KVM_REQ_APF_HALT, vcpu); else if (!apf_put_user(vcpu, KVM_PV_REASON_PAGE_NOT_PRESENT)) { fault.vector = PF_VECTOR; fault.error_code_valid = true; fault.error_code = 0; fault.nested_page_fault = false; fault.address = work->arch.token; kvm_inject_page_fault(vcpu, &fault); } } void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, struct kvm_async_pf *work) { struct x86_exception fault; trace_kvm_async_pf_ready(work->arch.token, work->gva); if (work->wakeup_all) work->arch.token = ~0; /* broadcast wakeup */ else kvm_del_async_pf_gfn(vcpu, work->arch.gfn); if ((vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) && !apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) { fault.vector = PF_VECTOR; fault.error_code_valid = true; fault.error_code = 0; fault.nested_page_fault = false; fault.address = work->arch.token; kvm_inject_page_fault(vcpu, &fault); } vcpu->arch.apf.halted = false; vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; } bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu) { if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED)) return true; else return !kvm_event_needs_reinjection(vcpu) && kvm_x86_ops->interrupt_allowed(vcpu); } void kvm_arch_register_noncoherent_dma(struct kvm *kvm) { atomic_inc(&kvm->arch.noncoherent_dma_count); } EXPORT_SYMBOL_GPL(kvm_arch_register_noncoherent_dma); void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm) { atomic_dec(&kvm->arch.noncoherent_dma_count); } EXPORT_SYMBOL_GPL(kvm_arch_unregister_noncoherent_dma); bool kvm_arch_has_noncoherent_dma(struct kvm *kvm) { return atomic_read(&kvm->arch.noncoherent_dma_count); } EXPORT_SYMBOL_GPL(kvm_arch_has_noncoherent_dma); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_page_fault); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_msr); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_cr); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmrun); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit_inject); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intr_vmexit); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_invlpga); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_skinit); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intercepts); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_write_tsc_offset); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ple_window);
static int handle_emulation_failure(struct kvm_vcpu *vcpu) { int r = EMULATE_DONE; ++vcpu->stat.insn_emulation_fail; trace_kvm_emulate_insn_failed(vcpu); if (!is_guest_mode(vcpu)) { vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; vcpu->run->internal.ndata = 0; r = EMULATE_FAIL; } kvm_queue_exception(vcpu, UD_VECTOR); return r; }
static int handle_emulation_failure(struct kvm_vcpu *vcpu) { int r = EMULATE_DONE; ++vcpu->stat.insn_emulation_fail; trace_kvm_emulate_insn_failed(vcpu); if (!is_guest_mode(vcpu) && kvm_x86_ops->get_cpl(vcpu) == 0) { vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; vcpu->run->internal.ndata = 0; r = EMULATE_FAIL; } kvm_queue_exception(vcpu, UD_VECTOR); return r; }
{'added': [(5003, '\tif (!is_guest_mode(vcpu) && kvm_x86_ops->get_cpl(vcpu) == 0) {')], 'deleted': [(5003, '\tif (!is_guest_mode(vcpu)) {')]}
1
1
5,766
35,758
https://github.com/torvalds/linux
CVE-2014-7842
['CWE-362']
tee_svc_cryp.c
syscall_cryp_obj_populate
// SPDX-License-Identifier: BSD-2-Clause /* * Copyright (c) 2014, STMicroelectronics International N.V. */ #include <assert.h> #include <crypto/crypto.h> #include <kernel/tee_ta_manager.h> #include <mm/tee_mmu.h> #include <string_ext.h> #include <string.h> #include <sys/queue.h> #include <tee_api_types.h> #include <tee/tee_cryp_utl.h> #include <tee/tee_obj.h> #include <tee/tee_svc_cryp.h> #include <tee/tee_svc.h> #include <trace.h> #include <utee_defines.h> #include <util.h> #include <tee_api_defines_extensions.h> #if defined(CFG_CRYPTO_HKDF) #include <tee/tee_cryp_hkdf.h> #endif #if defined(CFG_CRYPTO_CONCAT_KDF) #include <tee/tee_cryp_concat_kdf.h> #endif #if defined(CFG_CRYPTO_PBKDF2) #include <tee/tee_cryp_pbkdf2.h> #endif typedef void (*tee_cryp_ctx_finalize_func_t) (void *ctx, uint32_t algo); struct tee_cryp_state { TAILQ_ENTRY(tee_cryp_state) link; uint32_t algo; uint32_t mode; vaddr_t key1; vaddr_t key2; void *ctx; tee_cryp_ctx_finalize_func_t ctx_finalize; }; struct tee_cryp_obj_secret { uint32_t key_size; uint32_t alloc_size; /* * Pseudo code visualize layout of structure * Next follows data, such as: * uint8_t data[alloc_size] * key_size must never exceed alloc_size */ }; #define TEE_TYPE_ATTR_OPTIONAL 0x0 #define TEE_TYPE_ATTR_REQUIRED 0x1 #define TEE_TYPE_ATTR_OPTIONAL_GROUP 0x2 #define TEE_TYPE_ATTR_SIZE_INDICATOR 0x4 #define TEE_TYPE_ATTR_GEN_KEY_OPT 0x8 #define TEE_TYPE_ATTR_GEN_KEY_REQ 0x10 /* Handle storing of generic secret keys of varying lengths */ #define ATTR_OPS_INDEX_SECRET 0 /* Convert to/from big-endian byte array and provider-specific bignum */ #define ATTR_OPS_INDEX_BIGNUM 1 /* Convert to/from value attribute depending on direction */ #define ATTR_OPS_INDEX_VALUE 2 struct tee_cryp_obj_type_attrs { uint32_t attr_id; uint16_t flags; uint16_t ops_index; uint16_t raw_offs; uint16_t raw_size; }; #define RAW_DATA(_x, _y) \ .raw_offs = offsetof(_x, _y), .raw_size = MEMBER_SIZE(_x, _y) static const struct tee_cryp_obj_type_attrs tee_cryp_obj_secret_value_attrs[] = { { .attr_id = TEE_ATTR_SECRET_VALUE, .flags = TEE_TYPE_ATTR_REQUIRED | TEE_TYPE_ATTR_SIZE_INDICATOR, .ops_index = ATTR_OPS_INDEX_SECRET, .raw_offs = 0, .raw_size = 0 }, }; static const struct tee_cryp_obj_type_attrs tee_cryp_obj_rsa_pub_key_attrs[] = { { .attr_id = TEE_ATTR_RSA_MODULUS, .flags = TEE_TYPE_ATTR_REQUIRED | TEE_TYPE_ATTR_SIZE_INDICATOR, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct rsa_public_key, n) }, { .attr_id = TEE_ATTR_RSA_PUBLIC_EXPONENT, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct rsa_public_key, e) }, }; static const struct tee_cryp_obj_type_attrs tee_cryp_obj_rsa_keypair_attrs[] = { { .attr_id = TEE_ATTR_RSA_MODULUS, .flags = TEE_TYPE_ATTR_REQUIRED | TEE_TYPE_ATTR_SIZE_INDICATOR, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct rsa_keypair, n) }, { .attr_id = TEE_ATTR_RSA_PUBLIC_EXPONENT, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct rsa_keypair, e) }, { .attr_id = TEE_ATTR_RSA_PRIVATE_EXPONENT, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct rsa_keypair, d) }, { .attr_id = TEE_ATTR_RSA_PRIME1, .flags = TEE_TYPE_ATTR_OPTIONAL_GROUP, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct rsa_keypair, p) }, { .attr_id = TEE_ATTR_RSA_PRIME2, .flags = TEE_TYPE_ATTR_OPTIONAL_GROUP, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct rsa_keypair, q) }, { .attr_id = TEE_ATTR_RSA_EXPONENT1, .flags = TEE_TYPE_ATTR_OPTIONAL_GROUP, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct rsa_keypair, dp) }, { .attr_id = TEE_ATTR_RSA_EXPONENT2, .flags = TEE_TYPE_ATTR_OPTIONAL_GROUP, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct rsa_keypair, dq) }, { .attr_id = TEE_ATTR_RSA_COEFFICIENT, .flags = TEE_TYPE_ATTR_OPTIONAL_GROUP, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct rsa_keypair, qp) }, }; static const struct tee_cryp_obj_type_attrs tee_cryp_obj_dsa_pub_key_attrs[] = { { .attr_id = TEE_ATTR_DSA_PRIME, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct dsa_public_key, p) }, { .attr_id = TEE_ATTR_DSA_SUBPRIME, .flags = TEE_TYPE_ATTR_REQUIRED | TEE_TYPE_ATTR_SIZE_INDICATOR, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct dsa_public_key, q) }, { .attr_id = TEE_ATTR_DSA_BASE, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct dsa_public_key, g) }, { .attr_id = TEE_ATTR_DSA_PUBLIC_VALUE, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct dsa_public_key, y) }, }; static const struct tee_cryp_obj_type_attrs tee_cryp_obj_dsa_keypair_attrs[] = { { .attr_id = TEE_ATTR_DSA_PRIME, .flags = TEE_TYPE_ATTR_REQUIRED | TEE_TYPE_ATTR_GEN_KEY_REQ, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct dsa_keypair, p) }, { .attr_id = TEE_ATTR_DSA_SUBPRIME, .flags = TEE_TYPE_ATTR_REQUIRED | TEE_TYPE_ATTR_SIZE_INDICATOR | TEE_TYPE_ATTR_GEN_KEY_REQ, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct dsa_keypair, q) }, { .attr_id = TEE_ATTR_DSA_BASE, .flags = TEE_TYPE_ATTR_REQUIRED | TEE_TYPE_ATTR_GEN_KEY_REQ, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct dsa_keypair, g) }, { .attr_id = TEE_ATTR_DSA_PRIVATE_VALUE, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct dsa_keypair, x) }, { .attr_id = TEE_ATTR_DSA_PUBLIC_VALUE, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct dsa_keypair, y) }, }; static const struct tee_cryp_obj_type_attrs tee_cryp_obj_dh_keypair_attrs[] = { { .attr_id = TEE_ATTR_DH_PRIME, .flags = TEE_TYPE_ATTR_REQUIRED | TEE_TYPE_ATTR_SIZE_INDICATOR | TEE_TYPE_ATTR_GEN_KEY_REQ, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct dh_keypair, p) }, { .attr_id = TEE_ATTR_DH_BASE, .flags = TEE_TYPE_ATTR_REQUIRED | TEE_TYPE_ATTR_GEN_KEY_REQ, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct dh_keypair, g) }, { .attr_id = TEE_ATTR_DH_PUBLIC_VALUE, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct dh_keypair, y) }, { .attr_id = TEE_ATTR_DH_PRIVATE_VALUE, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct dh_keypair, x) }, { .attr_id = TEE_ATTR_DH_SUBPRIME, .flags = TEE_TYPE_ATTR_OPTIONAL_GROUP | TEE_TYPE_ATTR_GEN_KEY_OPT, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct dh_keypair, q) }, { .attr_id = TEE_ATTR_DH_X_BITS, .flags = TEE_TYPE_ATTR_GEN_KEY_OPT, .ops_index = ATTR_OPS_INDEX_VALUE, RAW_DATA(struct dh_keypair, xbits) }, }; #if defined(CFG_CRYPTO_HKDF) static const struct tee_cryp_obj_type_attrs tee_cryp_obj_hkdf_ikm_attrs[] = { { .attr_id = TEE_ATTR_HKDF_IKM, .flags = TEE_TYPE_ATTR_REQUIRED | TEE_TYPE_ATTR_SIZE_INDICATOR, .ops_index = ATTR_OPS_INDEX_SECRET, .raw_offs = 0, .raw_size = 0 }, }; #endif #if defined(CFG_CRYPTO_CONCAT_KDF) static const struct tee_cryp_obj_type_attrs tee_cryp_obj_concat_kdf_z_attrs[] = { { .attr_id = TEE_ATTR_CONCAT_KDF_Z, .flags = TEE_TYPE_ATTR_REQUIRED | TEE_TYPE_ATTR_SIZE_INDICATOR, .ops_index = ATTR_OPS_INDEX_SECRET, .raw_offs = 0, .raw_size = 0 }, }; #endif #if defined(CFG_CRYPTO_PBKDF2) static const struct tee_cryp_obj_type_attrs tee_cryp_obj_pbkdf2_passwd_attrs[] = { { .attr_id = TEE_ATTR_PBKDF2_PASSWORD, .flags = TEE_TYPE_ATTR_REQUIRED | TEE_TYPE_ATTR_SIZE_INDICATOR, .ops_index = ATTR_OPS_INDEX_SECRET, .raw_offs = 0, .raw_size = 0 }, }; #endif static const struct tee_cryp_obj_type_attrs tee_cryp_obj_ecc_pub_key_attrs[] = { { .attr_id = TEE_ATTR_ECC_PUBLIC_VALUE_X, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct ecc_public_key, x) }, { .attr_id = TEE_ATTR_ECC_PUBLIC_VALUE_Y, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct ecc_public_key, y) }, { .attr_id = TEE_ATTR_ECC_CURVE, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_VALUE, RAW_DATA(struct ecc_public_key, curve) }, }; static const struct tee_cryp_obj_type_attrs tee_cryp_obj_ecc_keypair_attrs[] = { { .attr_id = TEE_ATTR_ECC_PRIVATE_VALUE, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct ecc_keypair, d) }, { .attr_id = TEE_ATTR_ECC_PUBLIC_VALUE_X, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct ecc_keypair, x) }, { .attr_id = TEE_ATTR_ECC_PUBLIC_VALUE_Y, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct ecc_keypair, y) }, { .attr_id = TEE_ATTR_ECC_CURVE, .flags = TEE_TYPE_ATTR_REQUIRED | TEE_TYPE_ATTR_SIZE_INDICATOR, .ops_index = ATTR_OPS_INDEX_VALUE, RAW_DATA(struct ecc_keypair, curve) }, }; struct tee_cryp_obj_type_props { TEE_ObjectType obj_type; uint16_t min_size; /* may not be smaller than this */ uint16_t max_size; /* may not be larger than this */ uint16_t alloc_size; /* this many bytes are allocated to hold data */ uint8_t quanta; /* may only be an multiple of this */ uint8_t num_type_attrs; const struct tee_cryp_obj_type_attrs *type_attrs; }; #define PROP(obj_type, quanta, min_size, max_size, alloc_size, type_attrs) \ { (obj_type), (min_size), (max_size), (alloc_size), (quanta), \ ARRAY_SIZE(type_attrs), (type_attrs) } static const struct tee_cryp_obj_type_props tee_cryp_obj_props[] = { PROP(TEE_TYPE_AES, 64, 128, 256, /* valid sizes 128, 192, 256 */ 256 / 8 + sizeof(struct tee_cryp_obj_secret), tee_cryp_obj_secret_value_attrs), PROP(TEE_TYPE_DES, 56, 56, 56, /* * Valid size 56 without parity, note that we still allocate * for 64 bits since the key is supplied with parity. */ 64 / 8 + sizeof(struct tee_cryp_obj_secret), tee_cryp_obj_secret_value_attrs), PROP(TEE_TYPE_DES3, 56, 112, 168, /* * Valid sizes 112, 168 without parity, note that we still * allocate for with space for the parity since the key is * supplied with parity. */ 192 / 8 + sizeof(struct tee_cryp_obj_secret), tee_cryp_obj_secret_value_attrs), PROP(TEE_TYPE_HMAC_MD5, 8, 64, 512, 512 / 8 + sizeof(struct tee_cryp_obj_secret), tee_cryp_obj_secret_value_attrs), PROP(TEE_TYPE_HMAC_SHA1, 8, 80, 512, 512 / 8 + sizeof(struct tee_cryp_obj_secret), tee_cryp_obj_secret_value_attrs), PROP(TEE_TYPE_HMAC_SHA224, 8, 112, 512, 512 / 8 + sizeof(struct tee_cryp_obj_secret), tee_cryp_obj_secret_value_attrs), PROP(TEE_TYPE_HMAC_SHA256, 8, 192, 1024, 1024 / 8 + sizeof(struct tee_cryp_obj_secret), tee_cryp_obj_secret_value_attrs), PROP(TEE_TYPE_HMAC_SHA384, 8, 256, 1024, 1024 / 8 + sizeof(struct tee_cryp_obj_secret), tee_cryp_obj_secret_value_attrs), PROP(TEE_TYPE_HMAC_SHA512, 8, 256, 1024, 1024 / 8 + sizeof(struct tee_cryp_obj_secret), tee_cryp_obj_secret_value_attrs), PROP(TEE_TYPE_GENERIC_SECRET, 8, 0, 4096, 4096 / 8 + sizeof(struct tee_cryp_obj_secret), tee_cryp_obj_secret_value_attrs), #if defined(CFG_CRYPTO_HKDF) PROP(TEE_TYPE_HKDF_IKM, 8, 0, 4096, 4096 / 8 + sizeof(struct tee_cryp_obj_secret), tee_cryp_obj_hkdf_ikm_attrs), #endif #if defined(CFG_CRYPTO_CONCAT_KDF) PROP(TEE_TYPE_CONCAT_KDF_Z, 8, 0, 4096, 4096 / 8 + sizeof(struct tee_cryp_obj_secret), tee_cryp_obj_concat_kdf_z_attrs), #endif #if defined(CFG_CRYPTO_PBKDF2) PROP(TEE_TYPE_PBKDF2_PASSWORD, 8, 0, 4096, 4096 / 8 + sizeof(struct tee_cryp_obj_secret), tee_cryp_obj_pbkdf2_passwd_attrs), #endif PROP(TEE_TYPE_RSA_PUBLIC_KEY, 1, 256, CFG_CORE_BIGNUM_MAX_BITS, sizeof(struct rsa_public_key), tee_cryp_obj_rsa_pub_key_attrs), PROP(TEE_TYPE_RSA_KEYPAIR, 1, 256, CFG_CORE_BIGNUM_MAX_BITS, sizeof(struct rsa_keypair), tee_cryp_obj_rsa_keypair_attrs), PROP(TEE_TYPE_DSA_PUBLIC_KEY, 64, 512, 3072, sizeof(struct dsa_public_key), tee_cryp_obj_dsa_pub_key_attrs), PROP(TEE_TYPE_DSA_KEYPAIR, 64, 512, 3072, sizeof(struct dsa_keypair), tee_cryp_obj_dsa_keypair_attrs), PROP(TEE_TYPE_DH_KEYPAIR, 1, 256, 2048, sizeof(struct dh_keypair), tee_cryp_obj_dh_keypair_attrs), PROP(TEE_TYPE_ECDSA_PUBLIC_KEY, 1, 192, 521, sizeof(struct ecc_public_key), tee_cryp_obj_ecc_pub_key_attrs), PROP(TEE_TYPE_ECDSA_KEYPAIR, 1, 192, 521, sizeof(struct ecc_keypair), tee_cryp_obj_ecc_keypair_attrs), PROP(TEE_TYPE_ECDH_PUBLIC_KEY, 1, 192, 521, sizeof(struct ecc_public_key), tee_cryp_obj_ecc_pub_key_attrs), PROP(TEE_TYPE_ECDH_KEYPAIR, 1, 192, 521, sizeof(struct ecc_keypair), tee_cryp_obj_ecc_keypair_attrs), }; struct attr_ops { TEE_Result (*from_user)(void *attr, const void *buffer, size_t size); TEE_Result (*to_user)(void *attr, struct tee_ta_session *sess, void *buffer, uint64_t *size); TEE_Result (*to_binary)(void *attr, void *data, size_t data_len, size_t *offs); bool (*from_binary)(void *attr, const void *data, size_t data_len, size_t *offs); TEE_Result (*from_obj)(void *attr, void *src_attr); void (*free)(void *attr); void (*clear)(void *attr); }; static TEE_Result op_u32_to_binary_helper(uint32_t v, uint8_t *data, size_t data_len, size_t *offs) { uint32_t field; size_t next_offs; if (ADD_OVERFLOW(*offs, sizeof(field), &next_offs)) return TEE_ERROR_OVERFLOW; if (data && next_offs <= data_len) { field = TEE_U32_TO_BIG_ENDIAN(v); memcpy(data + *offs, &field, sizeof(field)); } (*offs) = next_offs; return TEE_SUCCESS; } static bool op_u32_from_binary_helper(uint32_t *v, const uint8_t *data, size_t data_len, size_t *offs) { uint32_t field; if (!data || (*offs + sizeof(field)) > data_len) return false; memcpy(&field, data + *offs, sizeof(field)); *v = TEE_U32_FROM_BIG_ENDIAN(field); (*offs) += sizeof(field); return true; } static TEE_Result op_attr_secret_value_from_user(void *attr, const void *buffer, size_t size) { struct tee_cryp_obj_secret *key = attr; /* Data size has to fit in allocated buffer */ if (size > key->alloc_size) return TEE_ERROR_SECURITY; memcpy(key + 1, buffer, size); key->key_size = size; return TEE_SUCCESS; } static TEE_Result op_attr_secret_value_to_user(void *attr, struct tee_ta_session *sess __unused, void *buffer, uint64_t *size) { TEE_Result res; struct tee_cryp_obj_secret *key = attr; uint64_t s; uint64_t key_size; res = tee_svc_copy_from_user(&s, size, sizeof(s)); if (res != TEE_SUCCESS) return res; key_size = key->key_size; res = tee_svc_copy_to_user(size, &key_size, sizeof(key_size)); if (res != TEE_SUCCESS) return res; if (s < key->key_size || !buffer) return TEE_ERROR_SHORT_BUFFER; return tee_svc_copy_to_user(buffer, key + 1, key->key_size); } static TEE_Result op_attr_secret_value_to_binary(void *attr, void *data, size_t data_len, size_t *offs) { TEE_Result res; struct tee_cryp_obj_secret *key = attr; size_t next_offs; res = op_u32_to_binary_helper(key->key_size, data, data_len, offs); if (res != TEE_SUCCESS) return res; if (ADD_OVERFLOW(*offs, key->key_size, &next_offs)) return TEE_ERROR_OVERFLOW; if (data && next_offs <= data_len) memcpy((uint8_t *)data + *offs, key + 1, key->key_size); (*offs) = next_offs; return TEE_SUCCESS; } static bool op_attr_secret_value_from_binary(void *attr, const void *data, size_t data_len, size_t *offs) { struct tee_cryp_obj_secret *key = attr; uint32_t s; if (!op_u32_from_binary_helper(&s, data, data_len, offs)) return false; if ((*offs + s) > data_len) return false; /* Data size has to fit in allocated buffer */ if (s > key->alloc_size) return false; key->key_size = s; memcpy(key + 1, (const uint8_t *)data + *offs, s); (*offs) += s; return true; } static TEE_Result op_attr_secret_value_from_obj(void *attr, void *src_attr) { struct tee_cryp_obj_secret *key = attr; struct tee_cryp_obj_secret *src_key = src_attr; if (src_key->key_size > key->alloc_size) return TEE_ERROR_BAD_STATE; memcpy(key + 1, src_key + 1, src_key->key_size); key->key_size = src_key->key_size; return TEE_SUCCESS; } static void op_attr_secret_value_clear(void *attr) { struct tee_cryp_obj_secret *key = attr; key->key_size = 0; memset(key + 1, 0, key->alloc_size); } static TEE_Result op_attr_bignum_from_user(void *attr, const void *buffer, size_t size) { struct bignum **bn = attr; return crypto_bignum_bin2bn(buffer, size, *bn); } static TEE_Result op_attr_bignum_to_user(void *attr, struct tee_ta_session *sess, void *buffer, uint64_t *size) { TEE_Result res; struct bignum **bn = attr; uint64_t req_size; uint64_t s; res = tee_svc_copy_from_user(&s, size, sizeof(s)); if (res != TEE_SUCCESS) return res; req_size = crypto_bignum_num_bytes(*bn); res = tee_svc_copy_to_user(size, &req_size, sizeof(req_size)); if (res != TEE_SUCCESS) return res; if (!req_size) return TEE_SUCCESS; if (s < req_size || !buffer) return TEE_ERROR_SHORT_BUFFER; /* Check we can access data using supplied user mode pointer */ res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_WRITE | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)buffer, req_size); if (res != TEE_SUCCESS) return res; /* * Write the bignum (wich raw data points to) into an array of * bytes (stored in buffer) */ crypto_bignum_bn2bin(*bn, buffer); return TEE_SUCCESS; } static TEE_Result op_attr_bignum_to_binary(void *attr, void *data, size_t data_len, size_t *offs) { TEE_Result res; struct bignum **bn = attr; uint32_t n = crypto_bignum_num_bytes(*bn); size_t next_offs; res = op_u32_to_binary_helper(n, data, data_len, offs); if (res != TEE_SUCCESS) return res; if (ADD_OVERFLOW(*offs, n, &next_offs)) return TEE_ERROR_OVERFLOW; if (data && next_offs <= data_len) crypto_bignum_bn2bin(*bn, (uint8_t *)data + *offs); (*offs) = next_offs; return TEE_SUCCESS; } static bool op_attr_bignum_from_binary(void *attr, const void *data, size_t data_len, size_t *offs) { struct bignum **bn = attr; uint32_t n; if (!op_u32_from_binary_helper(&n, data, data_len, offs)) return false; if ((*offs + n) > data_len) return false; if (crypto_bignum_bin2bn((const uint8_t *)data + *offs, n, *bn)) return false; (*offs) += n; return true; } static TEE_Result op_attr_bignum_from_obj(void *attr, void *src_attr) { struct bignum **bn = attr; struct bignum **src_bn = src_attr; crypto_bignum_copy(*bn, *src_bn); return TEE_SUCCESS; } static void op_attr_bignum_clear(void *attr) { struct bignum **bn = attr; crypto_bignum_clear(*bn); } static void op_attr_bignum_free(void *attr) { struct bignum **bn = attr; crypto_bignum_free(*bn); *bn = NULL; } static TEE_Result op_attr_value_from_user(void *attr, const void *buffer, size_t size) { uint32_t *v = attr; if (size != sizeof(uint32_t) * 2) return TEE_ERROR_GENERIC; /* "can't happen */ /* Note that only the first value is copied */ memcpy(v, buffer, sizeof(uint32_t)); return TEE_SUCCESS; } static TEE_Result op_attr_value_to_user(void *attr, struct tee_ta_session *sess __unused, void *buffer, uint64_t *size) { TEE_Result res; uint32_t *v = attr; uint64_t s; uint32_t value[2] = { *v }; uint64_t req_size = sizeof(value); res = tee_svc_copy_from_user(&s, size, sizeof(s)); if (res != TEE_SUCCESS) return res; if (s < req_size || !buffer) return TEE_ERROR_SHORT_BUFFER; return tee_svc_copy_to_user(buffer, value, req_size); } static TEE_Result op_attr_value_to_binary(void *attr, void *data, size_t data_len, size_t *offs) { uint32_t *v = attr; return op_u32_to_binary_helper(*v, data, data_len, offs); } static bool op_attr_value_from_binary(void *attr, const void *data, size_t data_len, size_t *offs) { uint32_t *v = attr; return op_u32_from_binary_helper(v, data, data_len, offs); } static TEE_Result op_attr_value_from_obj(void *attr, void *src_attr) { uint32_t *v = attr; uint32_t *src_v = src_attr; *v = *src_v; return TEE_SUCCESS; } static void op_attr_value_clear(void *attr) { uint32_t *v = attr; *v = 0; } static const struct attr_ops attr_ops[] = { [ATTR_OPS_INDEX_SECRET] = { .from_user = op_attr_secret_value_from_user, .to_user = op_attr_secret_value_to_user, .to_binary = op_attr_secret_value_to_binary, .from_binary = op_attr_secret_value_from_binary, .from_obj = op_attr_secret_value_from_obj, .free = op_attr_secret_value_clear, /* not a typo */ .clear = op_attr_secret_value_clear, }, [ATTR_OPS_INDEX_BIGNUM] = { .from_user = op_attr_bignum_from_user, .to_user = op_attr_bignum_to_user, .to_binary = op_attr_bignum_to_binary, .from_binary = op_attr_bignum_from_binary, .from_obj = op_attr_bignum_from_obj, .free = op_attr_bignum_free, .clear = op_attr_bignum_clear, }, [ATTR_OPS_INDEX_VALUE] = { .from_user = op_attr_value_from_user, .to_user = op_attr_value_to_user, .to_binary = op_attr_value_to_binary, .from_binary = op_attr_value_from_binary, .from_obj = op_attr_value_from_obj, .free = op_attr_value_clear, /* not a typo */ .clear = op_attr_value_clear, }, }; TEE_Result syscall_cryp_obj_get_info(unsigned long obj, TEE_ObjectInfo *info) { TEE_Result res; struct tee_ta_session *sess; struct tee_obj *o; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) goto exit; res = tee_obj_get(to_user_ta_ctx(sess->ctx), tee_svc_uref_to_vaddr(obj), &o); if (res != TEE_SUCCESS) goto exit; res = tee_svc_copy_to_user(info, &o->info, sizeof(o->info)); exit: return res; } TEE_Result syscall_cryp_obj_restrict_usage(unsigned long obj, unsigned long usage) { TEE_Result res; struct tee_ta_session *sess; struct tee_obj *o; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) goto exit; res = tee_obj_get(to_user_ta_ctx(sess->ctx), tee_svc_uref_to_vaddr(obj), &o); if (res != TEE_SUCCESS) goto exit; o->info.objectUsage &= usage; exit: return res; } static int tee_svc_cryp_obj_find_type_attr_idx( uint32_t attr_id, const struct tee_cryp_obj_type_props *type_props) { size_t n; for (n = 0; n < type_props->num_type_attrs; n++) { if (attr_id == type_props->type_attrs[n].attr_id) return n; } return -1; } static const struct tee_cryp_obj_type_props *tee_svc_find_type_props( TEE_ObjectType obj_type) { size_t n; for (n = 0; n < ARRAY_SIZE(tee_cryp_obj_props); n++) { if (tee_cryp_obj_props[n].obj_type == obj_type) return tee_cryp_obj_props + n; } return NULL; } /* Set an attribute on an object */ static void set_attribute(struct tee_obj *o, const struct tee_cryp_obj_type_props *props, uint32_t attr) { int idx = tee_svc_cryp_obj_find_type_attr_idx(attr, props); if (idx < 0) return; o->have_attrs |= BIT(idx); } /* Get an attribute on an object */ static uint32_t get_attribute(const struct tee_obj *o, const struct tee_cryp_obj_type_props *props, uint32_t attr) { int idx = tee_svc_cryp_obj_find_type_attr_idx(attr, props); if (idx < 0) return 0; return o->have_attrs & BIT(idx); } TEE_Result syscall_cryp_obj_get_attr(unsigned long obj, unsigned long attr_id, void *buffer, uint64_t *size) { TEE_Result res; struct tee_ta_session *sess; struct tee_obj *o; const struct tee_cryp_obj_type_props *type_props; int idx; const struct attr_ops *ops; void *attr; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_obj_get(to_user_ta_ctx(sess->ctx), tee_svc_uref_to_vaddr(obj), &o); if (res != TEE_SUCCESS) return TEE_ERROR_ITEM_NOT_FOUND; /* Check that the object is initialized */ if (!(o->info.handleFlags & TEE_HANDLE_FLAG_INITIALIZED)) return TEE_ERROR_BAD_PARAMETERS; /* Check that getting the attribute is allowed */ if (!(attr_id & TEE_ATTR_BIT_PROTECTED) && !(o->info.objectUsage & TEE_USAGE_EXTRACTABLE)) return TEE_ERROR_BAD_PARAMETERS; type_props = tee_svc_find_type_props(o->info.objectType); if (!type_props) { /* Unknown object type, "can't happen" */ return TEE_ERROR_BAD_STATE; } idx = tee_svc_cryp_obj_find_type_attr_idx(attr_id, type_props); if ((idx < 0) || ((o->have_attrs & (1 << idx)) == 0)) return TEE_ERROR_ITEM_NOT_FOUND; ops = attr_ops + type_props->type_attrs[idx].ops_index; attr = (uint8_t *)o->attr + type_props->type_attrs[idx].raw_offs; return ops->to_user(attr, sess, buffer, size); } void tee_obj_attr_free(struct tee_obj *o) { const struct tee_cryp_obj_type_props *tp; size_t n; if (!o->attr) return; tp = tee_svc_find_type_props(o->info.objectType); if (!tp) return; for (n = 0; n < tp->num_type_attrs; n++) { const struct tee_cryp_obj_type_attrs *ta = tp->type_attrs + n; attr_ops[ta->ops_index].free((uint8_t *)o->attr + ta->raw_offs); } } void tee_obj_attr_clear(struct tee_obj *o) { const struct tee_cryp_obj_type_props *tp; size_t n; if (!o->attr) return; tp = tee_svc_find_type_props(o->info.objectType); if (!tp) return; for (n = 0; n < tp->num_type_attrs; n++) { const struct tee_cryp_obj_type_attrs *ta = tp->type_attrs + n; attr_ops[ta->ops_index].clear((uint8_t *)o->attr + ta->raw_offs); } } TEE_Result tee_obj_attr_to_binary(struct tee_obj *o, void *data, size_t *data_len) { const struct tee_cryp_obj_type_props *tp; size_t n; size_t offs = 0; size_t len = data ? *data_len : 0; TEE_Result res; if (o->info.objectType == TEE_TYPE_DATA) { *data_len = 0; return TEE_SUCCESS; /* pure data object */ } if (!o->attr) return TEE_ERROR_BAD_STATE; tp = tee_svc_find_type_props(o->info.objectType); if (!tp) return TEE_ERROR_BAD_STATE; for (n = 0; n < tp->num_type_attrs; n++) { const struct tee_cryp_obj_type_attrs *ta = tp->type_attrs + n; void *attr = (uint8_t *)o->attr + ta->raw_offs; res = attr_ops[ta->ops_index].to_binary(attr, data, len, &offs); if (res != TEE_SUCCESS) return res; } *data_len = offs; if (data && offs > len) return TEE_ERROR_SHORT_BUFFER; return TEE_SUCCESS; } TEE_Result tee_obj_attr_from_binary(struct tee_obj *o, const void *data, size_t data_len) { const struct tee_cryp_obj_type_props *tp; size_t n; size_t offs = 0; if (o->info.objectType == TEE_TYPE_DATA) return TEE_SUCCESS; /* pure data object */ if (!o->attr) return TEE_ERROR_BAD_STATE; tp = tee_svc_find_type_props(o->info.objectType); if (!tp) return TEE_ERROR_BAD_STATE; for (n = 0; n < tp->num_type_attrs; n++) { const struct tee_cryp_obj_type_attrs *ta = tp->type_attrs + n; void *attr = (uint8_t *)o->attr + ta->raw_offs; if (!attr_ops[ta->ops_index].from_binary(attr, data, data_len, &offs)) return TEE_ERROR_CORRUPT_OBJECT; } return TEE_SUCCESS; } TEE_Result tee_obj_attr_copy_from(struct tee_obj *o, const struct tee_obj *src) { TEE_Result res; const struct tee_cryp_obj_type_props *tp; const struct tee_cryp_obj_type_attrs *ta; size_t n; uint32_t have_attrs = 0; void *attr; void *src_attr; if (o->info.objectType == TEE_TYPE_DATA) return TEE_SUCCESS; /* pure data object */ if (!o->attr) return TEE_ERROR_BAD_STATE; tp = tee_svc_find_type_props(o->info.objectType); if (!tp) return TEE_ERROR_BAD_STATE; if (o->info.objectType == src->info.objectType) { have_attrs = src->have_attrs; for (n = 0; n < tp->num_type_attrs; n++) { ta = tp->type_attrs + n; attr = (uint8_t *)o->attr + ta->raw_offs; src_attr = (uint8_t *)src->attr + ta->raw_offs; res = attr_ops[ta->ops_index].from_obj(attr, src_attr); if (res != TEE_SUCCESS) return res; } } else { const struct tee_cryp_obj_type_props *tp_src; int idx; if (o->info.objectType == TEE_TYPE_RSA_PUBLIC_KEY) { if (src->info.objectType != TEE_TYPE_RSA_KEYPAIR) return TEE_ERROR_BAD_PARAMETERS; } else if (o->info.objectType == TEE_TYPE_DSA_PUBLIC_KEY) { if (src->info.objectType != TEE_TYPE_DSA_KEYPAIR) return TEE_ERROR_BAD_PARAMETERS; } else if (o->info.objectType == TEE_TYPE_ECDSA_PUBLIC_KEY) { if (src->info.objectType != TEE_TYPE_ECDSA_KEYPAIR) return TEE_ERROR_BAD_PARAMETERS; } else if (o->info.objectType == TEE_TYPE_ECDH_PUBLIC_KEY) { if (src->info.objectType != TEE_TYPE_ECDH_KEYPAIR) return TEE_ERROR_BAD_PARAMETERS; } else { return TEE_ERROR_BAD_PARAMETERS; } tp_src = tee_svc_find_type_props(src->info.objectType); if (!tp_src) return TEE_ERROR_BAD_STATE; have_attrs = BIT32(tp->num_type_attrs) - 1; for (n = 0; n < tp->num_type_attrs; n++) { ta = tp->type_attrs + n; idx = tee_svc_cryp_obj_find_type_attr_idx(ta->attr_id, tp_src); if (idx < 0) return TEE_ERROR_BAD_STATE; attr = (uint8_t *)o->attr + ta->raw_offs; src_attr = (uint8_t *)src->attr + tp_src->type_attrs[idx].raw_offs; res = attr_ops[ta->ops_index].from_obj(attr, src_attr); if (res != TEE_SUCCESS) return res; } } o->have_attrs = have_attrs; return TEE_SUCCESS; } TEE_Result tee_obj_set_type(struct tee_obj *o, uint32_t obj_type, size_t max_key_size) { TEE_Result res = TEE_SUCCESS; const struct tee_cryp_obj_type_props *type_props; /* Can only set type for newly allocated objs */ if (o->attr) return TEE_ERROR_BAD_STATE; /* * Verify that maxKeySize is supported and find out how * much should be allocated. */ if (obj_type == TEE_TYPE_DATA) { if (max_key_size) return TEE_ERROR_NOT_SUPPORTED; } else { /* Find description of object */ type_props = tee_svc_find_type_props(obj_type); if (!type_props) return TEE_ERROR_NOT_SUPPORTED; /* Check that maxKeySize follows restrictions */ if (max_key_size % type_props->quanta != 0) return TEE_ERROR_NOT_SUPPORTED; if (max_key_size < type_props->min_size) return TEE_ERROR_NOT_SUPPORTED; if (max_key_size > type_props->max_size) return TEE_ERROR_NOT_SUPPORTED; o->attr = calloc(1, type_props->alloc_size); if (!o->attr) return TEE_ERROR_OUT_OF_MEMORY; } /* If we have a key structure, pre-allocate the bignums inside */ switch (obj_type) { case TEE_TYPE_RSA_PUBLIC_KEY: res = crypto_acipher_alloc_rsa_public_key(o->attr, max_key_size); break; case TEE_TYPE_RSA_KEYPAIR: res = crypto_acipher_alloc_rsa_keypair(o->attr, max_key_size); break; case TEE_TYPE_DSA_PUBLIC_KEY: res = crypto_acipher_alloc_dsa_public_key(o->attr, max_key_size); break; case TEE_TYPE_DSA_KEYPAIR: res = crypto_acipher_alloc_dsa_keypair(o->attr, max_key_size); break; case TEE_TYPE_DH_KEYPAIR: res = crypto_acipher_alloc_dh_keypair(o->attr, max_key_size); break; case TEE_TYPE_ECDSA_PUBLIC_KEY: case TEE_TYPE_ECDH_PUBLIC_KEY: res = crypto_acipher_alloc_ecc_public_key(o->attr, max_key_size); break; case TEE_TYPE_ECDSA_KEYPAIR: case TEE_TYPE_ECDH_KEYPAIR: res = crypto_acipher_alloc_ecc_keypair(o->attr, max_key_size); break; default: if (obj_type != TEE_TYPE_DATA) { struct tee_cryp_obj_secret *key = o->attr; key->alloc_size = type_props->alloc_size - sizeof(*key); } break; } if (res != TEE_SUCCESS) return res; o->info.objectType = obj_type; o->info.maxKeySize = max_key_size; o->info.objectUsage = TEE_USAGE_DEFAULT; return TEE_SUCCESS; } TEE_Result syscall_cryp_obj_alloc(unsigned long obj_type, unsigned long max_key_size, uint32_t *obj) { TEE_Result res; struct tee_ta_session *sess; struct tee_obj *o; if (obj_type == TEE_TYPE_DATA) return TEE_ERROR_NOT_SUPPORTED; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; o = tee_obj_alloc(); if (!o) return TEE_ERROR_OUT_OF_MEMORY; res = tee_obj_set_type(o, obj_type, max_key_size); if (res != TEE_SUCCESS) { tee_obj_free(o); return res; } tee_obj_add(to_user_ta_ctx(sess->ctx), o); res = tee_svc_copy_kaddr_to_uref(obj, o); if (res != TEE_SUCCESS) tee_obj_close(to_user_ta_ctx(sess->ctx), o); return res; } TEE_Result syscall_cryp_obj_close(unsigned long obj) { TEE_Result res; struct tee_ta_session *sess; struct tee_obj *o; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_obj_get(to_user_ta_ctx(sess->ctx), tee_svc_uref_to_vaddr(obj), &o); if (res != TEE_SUCCESS) return res; /* * If it's busy it's used by an operation, a client should never have * this handle. */ if (o->busy) return TEE_ERROR_ITEM_NOT_FOUND; tee_obj_close(to_user_ta_ctx(sess->ctx), o); return TEE_SUCCESS; } TEE_Result syscall_cryp_obj_reset(unsigned long obj) { TEE_Result res; struct tee_ta_session *sess; struct tee_obj *o; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_obj_get(to_user_ta_ctx(sess->ctx), tee_svc_uref_to_vaddr(obj), &o); if (res != TEE_SUCCESS) return res; if ((o->info.handleFlags & TEE_HANDLE_FLAG_PERSISTENT) == 0) { tee_obj_attr_clear(o); o->info.keySize = 0; o->info.objectUsage = TEE_USAGE_DEFAULT; } else { return TEE_ERROR_BAD_PARAMETERS; } /* the object is no more initialized */ o->info.handleFlags &= ~TEE_HANDLE_FLAG_INITIALIZED; return TEE_SUCCESS; } static TEE_Result copy_in_attrs(struct user_ta_ctx *utc, const struct utee_attribute *usr_attrs, uint32_t attr_count, TEE_Attribute *attrs) { TEE_Result res; uint32_t n; res = tee_mmu_check_access_rights(utc, TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)usr_attrs, attr_count * sizeof(struct utee_attribute)); if (res != TEE_SUCCESS) return res; for (n = 0; n < attr_count; n++) { attrs[n].attributeID = usr_attrs[n].attribute_id; if (attrs[n].attributeID & TEE_ATTR_BIT_VALUE) { attrs[n].content.value.a = usr_attrs[n].a; attrs[n].content.value.b = usr_attrs[n].b; } else { uintptr_t buf = usr_attrs[n].a; size_t len = usr_attrs[n].b; res = tee_mmu_check_access_rights(utc, TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, buf, len); if (res != TEE_SUCCESS) return res; attrs[n].content.ref.buffer = (void *)buf; attrs[n].content.ref.length = len; } } return TEE_SUCCESS; } enum attr_usage { ATTR_USAGE_POPULATE, ATTR_USAGE_GENERATE_KEY }; static TEE_Result tee_svc_cryp_check_attr(enum attr_usage usage, const struct tee_cryp_obj_type_props *type_props, const TEE_Attribute *attrs, uint32_t attr_count) { uint32_t required_flag; uint32_t opt_flag; bool all_opt_needed; uint32_t req_attrs = 0; uint32_t opt_grp_attrs = 0; uint32_t attrs_found = 0; size_t n; uint32_t bit; uint32_t flags; int idx; if (usage == ATTR_USAGE_POPULATE) { required_flag = TEE_TYPE_ATTR_REQUIRED; opt_flag = TEE_TYPE_ATTR_OPTIONAL_GROUP; all_opt_needed = true; } else { required_flag = TEE_TYPE_ATTR_GEN_KEY_REQ; opt_flag = TEE_TYPE_ATTR_GEN_KEY_OPT; all_opt_needed = false; } /* * First find out which attributes are required and which belong to * the optional group */ for (n = 0; n < type_props->num_type_attrs; n++) { bit = 1 << n; flags = type_props->type_attrs[n].flags; if (flags & required_flag) req_attrs |= bit; else if (flags & opt_flag) opt_grp_attrs |= bit; } /* * Verify that all required attributes are in place and * that the same attribute isn't repeated. */ for (n = 0; n < attr_count; n++) { idx = tee_svc_cryp_obj_find_type_attr_idx( attrs[n].attributeID, type_props); /* attribute not defined in current object type */ if (idx < 0) return TEE_ERROR_ITEM_NOT_FOUND; bit = 1 << idx; /* attribute not repeated */ if ((attrs_found & bit) != 0) return TEE_ERROR_ITEM_NOT_FOUND; attrs_found |= bit; } /* Required attribute missing */ if ((attrs_found & req_attrs) != req_attrs) return TEE_ERROR_ITEM_NOT_FOUND; /* * If the flag says that "if one of the optional attributes are included * all of them has to be included" this must be checked. */ if (all_opt_needed && (attrs_found & opt_grp_attrs) != 0 && (attrs_found & opt_grp_attrs) != opt_grp_attrs) return TEE_ERROR_ITEM_NOT_FOUND; return TEE_SUCCESS; } static TEE_Result get_ec_key_size(uint32_t curve, size_t *key_size) { switch (curve) { case TEE_ECC_CURVE_NIST_P192: *key_size = 192; break; case TEE_ECC_CURVE_NIST_P224: *key_size = 224; break; case TEE_ECC_CURVE_NIST_P256: *key_size = 256; break; case TEE_ECC_CURVE_NIST_P384: *key_size = 384; break; case TEE_ECC_CURVE_NIST_P521: *key_size = 521; break; default: return TEE_ERROR_NOT_SUPPORTED; } return TEE_SUCCESS; } static TEE_Result tee_svc_cryp_obj_populate_type( struct tee_obj *o, const struct tee_cryp_obj_type_props *type_props, const TEE_Attribute *attrs, uint32_t attr_count) { TEE_Result res; uint32_t have_attrs = 0; size_t obj_size = 0; size_t n; int idx; const struct attr_ops *ops; void *attr; for (n = 0; n < attr_count; n++) { idx = tee_svc_cryp_obj_find_type_attr_idx( attrs[n].attributeID, type_props); /* attribute not defined in current object type */ if (idx < 0) return TEE_ERROR_ITEM_NOT_FOUND; have_attrs |= BIT32(idx); ops = attr_ops + type_props->type_attrs[idx].ops_index; attr = (uint8_t *)o->attr + type_props->type_attrs[idx].raw_offs; if (attrs[n].attributeID & TEE_ATTR_BIT_VALUE) res = ops->from_user(attr, &attrs[n].content.value, sizeof(attrs[n].content.value)); else res = ops->from_user(attr, attrs[n].content.ref.buffer, attrs[n].content.ref.length); if (res != TEE_SUCCESS) return res; /* * First attr_idx signifies the attribute that gives the size * of the object */ if (type_props->type_attrs[idx].flags & TEE_TYPE_ATTR_SIZE_INDICATOR) { /* * For ECDSA/ECDH we need to translate curve into * object size */ if (attrs[n].attributeID == TEE_ATTR_ECC_CURVE) { res = get_ec_key_size(attrs[n].content.value.a, &obj_size); if (res != TEE_SUCCESS) return res; } else { obj_size += (attrs[n].content.ref.length * 8); } } } /* * We have to do it like this because the parity bits aren't counted * when telling the size of the key in bits. */ if (o->info.objectType == TEE_TYPE_DES || o->info.objectType == TEE_TYPE_DES3) obj_size -= obj_size / 8; /* Exclude parity in size of key */ o->have_attrs = have_attrs; o->info.keySize = obj_size; return TEE_SUCCESS; } TEE_Result syscall_cryp_obj_populate(unsigned long obj, struct utee_attribute *usr_attrs, unsigned long attr_count) { TEE_Result res; struct tee_ta_session *sess; struct tee_obj *o; const struct tee_cryp_obj_type_props *type_props; TEE_Attribute *attrs = NULL; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_obj_get(to_user_ta_ctx(sess->ctx), tee_svc_uref_to_vaddr(obj), &o); if (res != TEE_SUCCESS) return res; /* Must be a transient object */ if ((o->info.handleFlags & TEE_HANDLE_FLAG_PERSISTENT) != 0) return TEE_ERROR_BAD_PARAMETERS; /* Must not be initialized already */ if ((o->info.handleFlags & TEE_HANDLE_FLAG_INITIALIZED) != 0) return TEE_ERROR_BAD_PARAMETERS; type_props = tee_svc_find_type_props(o->info.objectType); if (!type_props) return TEE_ERROR_NOT_IMPLEMENTED; attrs = malloc(sizeof(TEE_Attribute) * attr_count); if (!attrs) return TEE_ERROR_OUT_OF_MEMORY; res = copy_in_attrs(to_user_ta_ctx(sess->ctx), usr_attrs, attr_count, attrs); if (res != TEE_SUCCESS) goto out; res = tee_svc_cryp_check_attr(ATTR_USAGE_POPULATE, type_props, attrs, attr_count); if (res != TEE_SUCCESS) goto out; res = tee_svc_cryp_obj_populate_type(o, type_props, attrs, attr_count); if (res == TEE_SUCCESS) o->info.handleFlags |= TEE_HANDLE_FLAG_INITIALIZED; out: free(attrs); return res; } TEE_Result syscall_cryp_obj_copy(unsigned long dst, unsigned long src) { TEE_Result res; struct tee_ta_session *sess; struct tee_obj *dst_o; struct tee_obj *src_o; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_obj_get(to_user_ta_ctx(sess->ctx), tee_svc_uref_to_vaddr(dst), &dst_o); if (res != TEE_SUCCESS) return res; res = tee_obj_get(to_user_ta_ctx(sess->ctx), tee_svc_uref_to_vaddr(src), &src_o); if (res != TEE_SUCCESS) return res; if ((src_o->info.handleFlags & TEE_HANDLE_FLAG_INITIALIZED) == 0) return TEE_ERROR_BAD_PARAMETERS; if ((dst_o->info.handleFlags & TEE_HANDLE_FLAG_PERSISTENT) != 0) return TEE_ERROR_BAD_PARAMETERS; if ((dst_o->info.handleFlags & TEE_HANDLE_FLAG_INITIALIZED) != 0) return TEE_ERROR_BAD_PARAMETERS; res = tee_obj_attr_copy_from(dst_o, src_o); if (res != TEE_SUCCESS) return res; dst_o->info.handleFlags |= TEE_HANDLE_FLAG_INITIALIZED; dst_o->info.keySize = src_o->info.keySize; dst_o->info.objectUsage = src_o->info.objectUsage; return TEE_SUCCESS; } static TEE_Result tee_svc_obj_generate_key_rsa( struct tee_obj *o, const struct tee_cryp_obj_type_props *type_props, uint32_t key_size, const TEE_Attribute *params, uint32_t param_count) { TEE_Result res; struct rsa_keypair *key = o->attr; uint32_t e = TEE_U32_TO_BIG_ENDIAN(65537); /* Copy the present attributes into the obj before starting */ res = tee_svc_cryp_obj_populate_type(o, type_props, params, param_count); if (res != TEE_SUCCESS) return res; if (!get_attribute(o, type_props, TEE_ATTR_RSA_PUBLIC_EXPONENT)) crypto_bignum_bin2bn((const uint8_t *)&e, sizeof(e), key->e); res = crypto_acipher_gen_rsa_key(key, key_size); if (res != TEE_SUCCESS) return res; /* Set bits for all known attributes for this object type */ o->have_attrs = (1 << type_props->num_type_attrs) - 1; return TEE_SUCCESS; } static TEE_Result tee_svc_obj_generate_key_dsa( struct tee_obj *o, const struct tee_cryp_obj_type_props *type_props, uint32_t key_size) { TEE_Result res; res = crypto_acipher_gen_dsa_key(o->attr, key_size); if (res != TEE_SUCCESS) return res; /* Set bits for all known attributes for this object type */ o->have_attrs = (1 << type_props->num_type_attrs) - 1; return TEE_SUCCESS; } static TEE_Result tee_svc_obj_generate_key_dh( struct tee_obj *o, const struct tee_cryp_obj_type_props *type_props, uint32_t key_size __unused, const TEE_Attribute *params, uint32_t param_count) { TEE_Result res; struct dh_keypair *tee_dh_key; struct bignum *dh_q = NULL; uint32_t dh_xbits = 0; /* Copy the present attributes into the obj before starting */ res = tee_svc_cryp_obj_populate_type(o, type_props, params, param_count); if (res != TEE_SUCCESS) return res; tee_dh_key = (struct dh_keypair *)o->attr; if (get_attribute(o, type_props, TEE_ATTR_DH_SUBPRIME)) dh_q = tee_dh_key->q; if (get_attribute(o, type_props, TEE_ATTR_DH_X_BITS)) dh_xbits = tee_dh_key->xbits; res = crypto_acipher_gen_dh_key(tee_dh_key, dh_q, dh_xbits); if (res != TEE_SUCCESS) return res; /* Set bits for the generated public and private key */ set_attribute(o, type_props, TEE_ATTR_DH_PUBLIC_VALUE); set_attribute(o, type_props, TEE_ATTR_DH_PRIVATE_VALUE); set_attribute(o, type_props, TEE_ATTR_DH_X_BITS); return TEE_SUCCESS; } static TEE_Result tee_svc_obj_generate_key_ecc( struct tee_obj *o, const struct tee_cryp_obj_type_props *type_props, uint32_t key_size __unused, const TEE_Attribute *params, uint32_t param_count) { TEE_Result res; struct ecc_keypair *tee_ecc_key; /* Copy the present attributes into the obj before starting */ res = tee_svc_cryp_obj_populate_type(o, type_props, params, param_count); if (res != TEE_SUCCESS) return res; tee_ecc_key = (struct ecc_keypair *)o->attr; res = crypto_acipher_gen_ecc_key(tee_ecc_key); if (res != TEE_SUCCESS) return res; /* Set bits for the generated public and private key */ set_attribute(o, type_props, TEE_ATTR_ECC_PRIVATE_VALUE); set_attribute(o, type_props, TEE_ATTR_ECC_PUBLIC_VALUE_X); set_attribute(o, type_props, TEE_ATTR_ECC_PUBLIC_VALUE_Y); set_attribute(o, type_props, TEE_ATTR_ECC_CURVE); return TEE_SUCCESS; } TEE_Result syscall_obj_generate_key(unsigned long obj, unsigned long key_size, const struct utee_attribute *usr_params, unsigned long param_count) { TEE_Result res; struct tee_ta_session *sess; const struct tee_cryp_obj_type_props *type_props; struct tee_obj *o; struct tee_cryp_obj_secret *key; size_t byte_size; TEE_Attribute *params = NULL; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_obj_get(to_user_ta_ctx(sess->ctx), tee_svc_uref_to_vaddr(obj), &o); if (res != TEE_SUCCESS) return res; /* Must be a transient object */ if ((o->info.handleFlags & TEE_HANDLE_FLAG_PERSISTENT) != 0) return TEE_ERROR_BAD_STATE; /* Must not be initialized already */ if ((o->info.handleFlags & TEE_HANDLE_FLAG_INITIALIZED) != 0) return TEE_ERROR_BAD_STATE; /* Find description of object */ type_props = tee_svc_find_type_props(o->info.objectType); if (!type_props) return TEE_ERROR_NOT_SUPPORTED; /* Check that maxKeySize follows restrictions */ if (key_size % type_props->quanta != 0) return TEE_ERROR_NOT_SUPPORTED; if (key_size < type_props->min_size) return TEE_ERROR_NOT_SUPPORTED; if (key_size > type_props->max_size) return TEE_ERROR_NOT_SUPPORTED; params = malloc(sizeof(TEE_Attribute) * param_count); if (!params) return TEE_ERROR_OUT_OF_MEMORY; res = copy_in_attrs(to_user_ta_ctx(sess->ctx), usr_params, param_count, params); if (res != TEE_SUCCESS) goto out; res = tee_svc_cryp_check_attr(ATTR_USAGE_GENERATE_KEY, type_props, params, param_count); if (res != TEE_SUCCESS) goto out; switch (o->info.objectType) { case TEE_TYPE_AES: case TEE_TYPE_DES: case TEE_TYPE_DES3: case TEE_TYPE_HMAC_MD5: case TEE_TYPE_HMAC_SHA1: case TEE_TYPE_HMAC_SHA224: case TEE_TYPE_HMAC_SHA256: case TEE_TYPE_HMAC_SHA384: case TEE_TYPE_HMAC_SHA512: case TEE_TYPE_GENERIC_SECRET: byte_size = key_size / 8; /* * We have to do it like this because the parity bits aren't * counted when telling the size of the key in bits. */ if (o->info.objectType == TEE_TYPE_DES || o->info.objectType == TEE_TYPE_DES3) { byte_size = (key_size + key_size / 7) / 8; } key = (struct tee_cryp_obj_secret *)o->attr; if (byte_size > key->alloc_size) { res = TEE_ERROR_EXCESS_DATA; goto out; } res = crypto_rng_read((void *)(key + 1), byte_size); if (res != TEE_SUCCESS) goto out; key->key_size = byte_size; /* Set bits for all known attributes for this object type */ o->have_attrs = (1 << type_props->num_type_attrs) - 1; break; case TEE_TYPE_RSA_KEYPAIR: res = tee_svc_obj_generate_key_rsa(o, type_props, key_size, params, param_count); if (res != TEE_SUCCESS) goto out; break; case TEE_TYPE_DSA_KEYPAIR: res = tee_svc_obj_generate_key_dsa(o, type_props, key_size); if (res != TEE_SUCCESS) goto out; break; case TEE_TYPE_DH_KEYPAIR: res = tee_svc_obj_generate_key_dh(o, type_props, key_size, params, param_count); if (res != TEE_SUCCESS) goto out; break; case TEE_TYPE_ECDSA_KEYPAIR: case TEE_TYPE_ECDH_KEYPAIR: res = tee_svc_obj_generate_key_ecc(o, type_props, key_size, params, param_count); if (res != TEE_SUCCESS) goto out; break; default: res = TEE_ERROR_BAD_FORMAT; } out: free(params); if (res == TEE_SUCCESS) { o->info.keySize = key_size; o->info.handleFlags |= TEE_HANDLE_FLAG_INITIALIZED; } return res; } static TEE_Result tee_svc_cryp_get_state(struct tee_ta_session *sess, uint32_t state_id, struct tee_cryp_state **state) { struct tee_cryp_state *s; struct user_ta_ctx *utc = to_user_ta_ctx(sess->ctx); TAILQ_FOREACH(s, &utc->cryp_states, link) { if (state_id == (vaddr_t)s) { *state = s; return TEE_SUCCESS; } } return TEE_ERROR_BAD_PARAMETERS; } static void cryp_state_free(struct user_ta_ctx *utc, struct tee_cryp_state *cs) { struct tee_obj *o; if (tee_obj_get(utc, cs->key1, &o) == TEE_SUCCESS) tee_obj_close(utc, o); if (tee_obj_get(utc, cs->key2, &o) == TEE_SUCCESS) tee_obj_close(utc, o); TAILQ_REMOVE(&utc->cryp_states, cs, link); if (cs->ctx_finalize != NULL) cs->ctx_finalize(cs->ctx, cs->algo); switch (TEE_ALG_GET_CLASS(cs->algo)) { case TEE_OPERATION_CIPHER: crypto_cipher_free_ctx(cs->ctx, cs->algo); break; case TEE_OPERATION_AE: crypto_authenc_free_ctx(cs->ctx, cs->algo); break; case TEE_OPERATION_DIGEST: crypto_hash_free_ctx(cs->ctx, cs->algo); break; case TEE_OPERATION_MAC: crypto_mac_free_ctx(cs->ctx, cs->algo); break; default: assert(!cs->ctx); } free(cs); } static TEE_Result tee_svc_cryp_check_key_type(const struct tee_obj *o, uint32_t algo, TEE_OperationMode mode) { uint32_t req_key_type; uint32_t req_key_type2 = 0; switch (TEE_ALG_GET_MAIN_ALG(algo)) { case TEE_MAIN_ALGO_MD5: req_key_type = TEE_TYPE_HMAC_MD5; break; case TEE_MAIN_ALGO_SHA1: req_key_type = TEE_TYPE_HMAC_SHA1; break; case TEE_MAIN_ALGO_SHA224: req_key_type = TEE_TYPE_HMAC_SHA224; break; case TEE_MAIN_ALGO_SHA256: req_key_type = TEE_TYPE_HMAC_SHA256; break; case TEE_MAIN_ALGO_SHA384: req_key_type = TEE_TYPE_HMAC_SHA384; break; case TEE_MAIN_ALGO_SHA512: req_key_type = TEE_TYPE_HMAC_SHA512; break; case TEE_MAIN_ALGO_AES: req_key_type = TEE_TYPE_AES; break; case TEE_MAIN_ALGO_DES: req_key_type = TEE_TYPE_DES; break; case TEE_MAIN_ALGO_DES3: req_key_type = TEE_TYPE_DES3; break; case TEE_MAIN_ALGO_RSA: req_key_type = TEE_TYPE_RSA_KEYPAIR; if (mode == TEE_MODE_ENCRYPT || mode == TEE_MODE_VERIFY) req_key_type2 = TEE_TYPE_RSA_PUBLIC_KEY; break; case TEE_MAIN_ALGO_DSA: req_key_type = TEE_TYPE_DSA_KEYPAIR; if (mode == TEE_MODE_ENCRYPT || mode == TEE_MODE_VERIFY) req_key_type2 = TEE_TYPE_DSA_PUBLIC_KEY; break; case TEE_MAIN_ALGO_DH: req_key_type = TEE_TYPE_DH_KEYPAIR; break; case TEE_MAIN_ALGO_ECDSA: req_key_type = TEE_TYPE_ECDSA_KEYPAIR; if (mode == TEE_MODE_VERIFY) req_key_type2 = TEE_TYPE_ECDSA_PUBLIC_KEY; break; case TEE_MAIN_ALGO_ECDH: req_key_type = TEE_TYPE_ECDH_KEYPAIR; break; #if defined(CFG_CRYPTO_HKDF) case TEE_MAIN_ALGO_HKDF: req_key_type = TEE_TYPE_HKDF_IKM; break; #endif #if defined(CFG_CRYPTO_CONCAT_KDF) case TEE_MAIN_ALGO_CONCAT_KDF: req_key_type = TEE_TYPE_CONCAT_KDF_Z; break; #endif #if defined(CFG_CRYPTO_PBKDF2) case TEE_MAIN_ALGO_PBKDF2: req_key_type = TEE_TYPE_PBKDF2_PASSWORD; break; #endif default: return TEE_ERROR_BAD_PARAMETERS; } if (req_key_type != o->info.objectType && req_key_type2 != o->info.objectType) return TEE_ERROR_BAD_PARAMETERS; return TEE_SUCCESS; } TEE_Result syscall_cryp_state_alloc(unsigned long algo, unsigned long mode, unsigned long key1, unsigned long key2, uint32_t *state) { TEE_Result res; struct tee_cryp_state *cs; struct tee_ta_session *sess; struct tee_obj *o1 = NULL; struct tee_obj *o2 = NULL; struct user_ta_ctx *utc; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; utc = to_user_ta_ctx(sess->ctx); if (key1 != 0) { res = tee_obj_get(utc, tee_svc_uref_to_vaddr(key1), &o1); if (res != TEE_SUCCESS) return res; if (o1->busy) return TEE_ERROR_BAD_PARAMETERS; res = tee_svc_cryp_check_key_type(o1, algo, mode); if (res != TEE_SUCCESS) return res; } if (key2 != 0) { res = tee_obj_get(utc, tee_svc_uref_to_vaddr(key2), &o2); if (res != TEE_SUCCESS) return res; if (o2->busy) return TEE_ERROR_BAD_PARAMETERS; res = tee_svc_cryp_check_key_type(o2, algo, mode); if (res != TEE_SUCCESS) return res; } cs = calloc(1, sizeof(struct tee_cryp_state)); if (!cs) return TEE_ERROR_OUT_OF_MEMORY; TAILQ_INSERT_TAIL(&utc->cryp_states, cs, link); cs->algo = algo; cs->mode = mode; switch (TEE_ALG_GET_CLASS(algo)) { case TEE_OPERATION_EXTENSION: #ifdef CFG_CRYPTO_RSASSA_NA1 if (algo == TEE_ALG_RSASSA_PKCS1_V1_5) goto rsassa_na1; #endif res = TEE_ERROR_NOT_SUPPORTED; break; case TEE_OPERATION_CIPHER: if ((algo == TEE_ALG_AES_XTS && (key1 == 0 || key2 == 0)) || (algo != TEE_ALG_AES_XTS && (key1 == 0 || key2 != 0))) { res = TEE_ERROR_BAD_PARAMETERS; } else { res = crypto_cipher_alloc_ctx(&cs->ctx, algo); if (res != TEE_SUCCESS) break; } break; case TEE_OPERATION_AE: if (key1 == 0 || key2 != 0) { res = TEE_ERROR_BAD_PARAMETERS; } else { res = crypto_authenc_alloc_ctx(&cs->ctx, algo); if (res != TEE_SUCCESS) break; } break; case TEE_OPERATION_MAC: if (key1 == 0 || key2 != 0) { res = TEE_ERROR_BAD_PARAMETERS; } else { res = crypto_mac_alloc_ctx(&cs->ctx, algo); if (res != TEE_SUCCESS) break; } break; case TEE_OPERATION_DIGEST: if (key1 != 0 || key2 != 0) { res = TEE_ERROR_BAD_PARAMETERS; } else { res = crypto_hash_alloc_ctx(&cs->ctx, algo); if (res != TEE_SUCCESS) break; } break; case TEE_OPERATION_ASYMMETRIC_CIPHER: case TEE_OPERATION_ASYMMETRIC_SIGNATURE: rsassa_na1: __maybe_unused if (key1 == 0 || key2 != 0) res = TEE_ERROR_BAD_PARAMETERS; break; case TEE_OPERATION_KEY_DERIVATION: if (key1 == 0 || key2 != 0) res = TEE_ERROR_BAD_PARAMETERS; break; default: res = TEE_ERROR_NOT_SUPPORTED; break; } if (res != TEE_SUCCESS) goto out; res = tee_svc_copy_kaddr_to_uref(state, cs); if (res != TEE_SUCCESS) goto out; /* Register keys */ if (o1 != NULL) { o1->busy = true; cs->key1 = (vaddr_t)o1; } if (o2 != NULL) { o2->busy = true; cs->key2 = (vaddr_t)o2; } out: if (res != TEE_SUCCESS) cryp_state_free(utc, cs); return res; } TEE_Result syscall_cryp_state_copy(unsigned long dst, unsigned long src) { TEE_Result res; struct tee_cryp_state *cs_dst; struct tee_cryp_state *cs_src; struct tee_ta_session *sess; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(dst), &cs_dst); if (res != TEE_SUCCESS) return res; res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(src), &cs_src); if (res != TEE_SUCCESS) return res; if (cs_dst->algo != cs_src->algo || cs_dst->mode != cs_src->mode) return TEE_ERROR_BAD_PARAMETERS; switch (TEE_ALG_GET_CLASS(cs_src->algo)) { case TEE_OPERATION_CIPHER: crypto_cipher_copy_state(cs_dst->ctx, cs_src->ctx, cs_src->algo); break; case TEE_OPERATION_AE: crypto_authenc_copy_state(cs_dst->ctx, cs_src->ctx, cs_src->algo); break; case TEE_OPERATION_DIGEST: crypto_hash_copy_state(cs_dst->ctx, cs_src->ctx, cs_src->algo); break; case TEE_OPERATION_MAC: crypto_mac_copy_state(cs_dst->ctx, cs_src->ctx, cs_src->algo); break; default: return TEE_ERROR_BAD_STATE; } return TEE_SUCCESS; } void tee_svc_cryp_free_states(struct user_ta_ctx *utc) { struct tee_cryp_state_head *states = &utc->cryp_states; while (!TAILQ_EMPTY(states)) cryp_state_free(utc, TAILQ_FIRST(states)); } TEE_Result syscall_cryp_state_free(unsigned long state) { TEE_Result res; struct tee_cryp_state *cs; struct tee_ta_session *sess; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(state), &cs); if (res != TEE_SUCCESS) return res; cryp_state_free(to_user_ta_ctx(sess->ctx), cs); return TEE_SUCCESS; } TEE_Result syscall_hash_init(unsigned long state, const void *iv __maybe_unused, size_t iv_len __maybe_unused) { TEE_Result res; struct tee_cryp_state *cs; struct tee_ta_session *sess; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(state), &cs); if (res != TEE_SUCCESS) return res; switch (TEE_ALG_GET_CLASS(cs->algo)) { case TEE_OPERATION_DIGEST: res = crypto_hash_init(cs->ctx, cs->algo); if (res != TEE_SUCCESS) return res; break; case TEE_OPERATION_MAC: { struct tee_obj *o; struct tee_cryp_obj_secret *key; res = tee_obj_get(to_user_ta_ctx(sess->ctx), cs->key1, &o); if (res != TEE_SUCCESS) return res; if ((o->info.handleFlags & TEE_HANDLE_FLAG_INITIALIZED) == 0) return TEE_ERROR_BAD_PARAMETERS; key = (struct tee_cryp_obj_secret *)o->attr; res = crypto_mac_init(cs->ctx, cs->algo, (void *)(key + 1), key->key_size); if (res != TEE_SUCCESS) return res; break; } default: return TEE_ERROR_BAD_PARAMETERS; } return TEE_SUCCESS; } TEE_Result syscall_hash_update(unsigned long state, const void *chunk, size_t chunk_size) { TEE_Result res; struct tee_cryp_state *cs; struct tee_ta_session *sess; /* No data, but size provided isn't valid parameters. */ if (!chunk && chunk_size) return TEE_ERROR_BAD_PARAMETERS; /* Zero length hash is valid, but nothing we need to do. */ if (!chunk_size) return TEE_SUCCESS; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)chunk, chunk_size); if (res != TEE_SUCCESS) return res; res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(state), &cs); if (res != TEE_SUCCESS) return res; switch (TEE_ALG_GET_CLASS(cs->algo)) { case TEE_OPERATION_DIGEST: res = crypto_hash_update(cs->ctx, cs->algo, chunk, chunk_size); if (res != TEE_SUCCESS) return res; break; case TEE_OPERATION_MAC: res = crypto_mac_update(cs->ctx, cs->algo, chunk, chunk_size); if (res != TEE_SUCCESS) return res; break; default: return TEE_ERROR_BAD_PARAMETERS; } return TEE_SUCCESS; } TEE_Result syscall_hash_final(unsigned long state, const void *chunk, size_t chunk_size, void *hash, uint64_t *hash_len) { TEE_Result res, res2; size_t hash_size; uint64_t hlen; struct tee_cryp_state *cs; struct tee_ta_session *sess; /* No data, but size provided isn't valid parameters. */ if (!chunk && chunk_size) return TEE_ERROR_BAD_PARAMETERS; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)chunk, chunk_size); if (res != TEE_SUCCESS) return res; res = tee_svc_copy_from_user(&hlen, hash_len, sizeof(hlen)); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_WRITE | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)hash, hlen); if (res != TEE_SUCCESS) return res; res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(state), &cs); if (res != TEE_SUCCESS) return res; switch (TEE_ALG_GET_CLASS(cs->algo)) { case TEE_OPERATION_DIGEST: res = tee_hash_get_digest_size(cs->algo, &hash_size); if (res != TEE_SUCCESS) return res; if (*hash_len < hash_size) { res = TEE_ERROR_SHORT_BUFFER; goto out; } if (chunk_size) { res = crypto_hash_update(cs->ctx, cs->algo, chunk, chunk_size); if (res != TEE_SUCCESS) return res; } res = crypto_hash_final(cs->ctx, cs->algo, hash, hash_size); if (res != TEE_SUCCESS) return res; break; case TEE_OPERATION_MAC: res = tee_mac_get_digest_size(cs->algo, &hash_size); if (res != TEE_SUCCESS) return res; if (*hash_len < hash_size) { res = TEE_ERROR_SHORT_BUFFER; goto out; } if (chunk_size) { res = crypto_mac_update(cs->ctx, cs->algo, chunk, chunk_size); if (res != TEE_SUCCESS) return res; } res = crypto_mac_final(cs->ctx, cs->algo, hash, hash_size); if (res != TEE_SUCCESS) return res; break; default: return TEE_ERROR_BAD_PARAMETERS; } out: hlen = hash_size; res2 = tee_svc_copy_to_user(hash_len, &hlen, sizeof(*hash_len)); if (res2 != TEE_SUCCESS) return res2; return res; } TEE_Result syscall_cipher_init(unsigned long state, const void *iv, size_t iv_len) { TEE_Result res; struct tee_cryp_state *cs; struct tee_ta_session *sess; struct tee_obj *o; struct tee_cryp_obj_secret *key1; struct user_ta_ctx *utc; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; utc = to_user_ta_ctx(sess->ctx); res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(state), &cs); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights(utc, TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t) iv, iv_len); if (res != TEE_SUCCESS) return res; res = tee_obj_get(utc, cs->key1, &o); if (res != TEE_SUCCESS) return res; if ((o->info.handleFlags & TEE_HANDLE_FLAG_INITIALIZED) == 0) return TEE_ERROR_BAD_PARAMETERS; key1 = o->attr; if (tee_obj_get(utc, cs->key2, &o) == TEE_SUCCESS) { struct tee_cryp_obj_secret *key2 = o->attr; if ((o->info.handleFlags & TEE_HANDLE_FLAG_INITIALIZED) == 0) return TEE_ERROR_BAD_PARAMETERS; res = crypto_cipher_init(cs->ctx, cs->algo, cs->mode, (uint8_t *)(key1 + 1), key1->key_size, (uint8_t *)(key2 + 1), key2->key_size, iv, iv_len); } else { res = crypto_cipher_init(cs->ctx, cs->algo, cs->mode, (uint8_t *)(key1 + 1), key1->key_size, NULL, 0, iv, iv_len); } if (res != TEE_SUCCESS) return res; cs->ctx_finalize = crypto_cipher_final; return TEE_SUCCESS; } static TEE_Result tee_svc_cipher_update_helper(unsigned long state, bool last_block, const void *src, size_t src_len, void *dst, uint64_t *dst_len) { TEE_Result res; struct tee_cryp_state *cs; struct tee_ta_session *sess; uint64_t dlen; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(state), &cs); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)src, src_len); if (res != TEE_SUCCESS) return res; if (!dst_len) { dlen = 0; } else { res = tee_svc_copy_from_user(&dlen, dst_len, sizeof(dlen)); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_WRITE | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)dst, dlen); if (res != TEE_SUCCESS) return res; } if (dlen < src_len) { res = TEE_ERROR_SHORT_BUFFER; goto out; } if (src_len > 0) { /* Permit src_len == 0 to finalize the operation */ res = tee_do_cipher_update(cs->ctx, cs->algo, cs->mode, last_block, src, src_len, dst); } if (last_block && cs->ctx_finalize != NULL) { cs->ctx_finalize(cs->ctx, cs->algo); cs->ctx_finalize = NULL; } out: if ((res == TEE_SUCCESS || res == TEE_ERROR_SHORT_BUFFER) && dst_len != NULL) { TEE_Result res2; dlen = src_len; res2 = tee_svc_copy_to_user(dst_len, &dlen, sizeof(*dst_len)); if (res2 != TEE_SUCCESS) res = res2; } return res; } TEE_Result syscall_cipher_update(unsigned long state, const void *src, size_t src_len, void *dst, uint64_t *dst_len) { return tee_svc_cipher_update_helper(state, false /* last_block */, src, src_len, dst, dst_len); } TEE_Result syscall_cipher_final(unsigned long state, const void *src, size_t src_len, void *dst, uint64_t *dst_len) { return tee_svc_cipher_update_helper(state, true /* last_block */, src, src_len, dst, dst_len); } #if defined(CFG_CRYPTO_HKDF) static TEE_Result get_hkdf_params(const TEE_Attribute *params, uint32_t param_count, void **salt, size_t *salt_len, void **info, size_t *info_len, size_t *okm_len) { size_t n; enum { SALT = 0x1, LENGTH = 0x2, INFO = 0x4 }; uint8_t found = 0; *salt = *info = NULL; *salt_len = *info_len = *okm_len = 0; for (n = 0; n < param_count; n++) { switch (params[n].attributeID) { case TEE_ATTR_HKDF_SALT: if (!(found & SALT)) { *salt = params[n].content.ref.buffer; *salt_len = params[n].content.ref.length; found |= SALT; } break; case TEE_ATTR_HKDF_OKM_LENGTH: if (!(found & LENGTH)) { *okm_len = params[n].content.value.a; found |= LENGTH; } break; case TEE_ATTR_HKDF_INFO: if (!(found & INFO)) { *info = params[n].content.ref.buffer; *info_len = params[n].content.ref.length; found |= INFO; } break; default: /* Unexpected attribute */ return TEE_ERROR_BAD_PARAMETERS; } } if (!(found & LENGTH)) return TEE_ERROR_BAD_PARAMETERS; return TEE_SUCCESS; } #endif #if defined(CFG_CRYPTO_CONCAT_KDF) static TEE_Result get_concat_kdf_params(const TEE_Attribute *params, uint32_t param_count, void **other_info, size_t *other_info_len, size_t *derived_key_len) { size_t n; enum { LENGTH = 0x1, INFO = 0x2 }; uint8_t found = 0; *other_info = NULL; *other_info_len = *derived_key_len = 0; for (n = 0; n < param_count; n++) { switch (params[n].attributeID) { case TEE_ATTR_CONCAT_KDF_OTHER_INFO: if (!(found & INFO)) { *other_info = params[n].content.ref.buffer; *other_info_len = params[n].content.ref.length; found |= INFO; } break; case TEE_ATTR_CONCAT_KDF_DKM_LENGTH: if (!(found & LENGTH)) { *derived_key_len = params[n].content.value.a; found |= LENGTH; } break; default: /* Unexpected attribute */ return TEE_ERROR_BAD_PARAMETERS; } } if (!(found & LENGTH)) return TEE_ERROR_BAD_PARAMETERS; return TEE_SUCCESS; } #endif #if defined(CFG_CRYPTO_PBKDF2) static TEE_Result get_pbkdf2_params(const TEE_Attribute *params, uint32_t param_count, void **salt, size_t *salt_len, size_t *derived_key_len, size_t *iteration_count) { size_t n; enum { SALT = 0x1, LENGTH = 0x2, COUNT = 0x4 }; uint8_t found = 0; *salt = NULL; *salt_len = *derived_key_len = *iteration_count = 0; for (n = 0; n < param_count; n++) { switch (params[n].attributeID) { case TEE_ATTR_PBKDF2_SALT: if (!(found & SALT)) { *salt = params[n].content.ref.buffer; *salt_len = params[n].content.ref.length; found |= SALT; } break; case TEE_ATTR_PBKDF2_DKM_LENGTH: if (!(found & LENGTH)) { *derived_key_len = params[n].content.value.a; found |= LENGTH; } break; case TEE_ATTR_PBKDF2_ITERATION_COUNT: if (!(found & COUNT)) { *iteration_count = params[n].content.value.a; found |= COUNT; } break; default: /* Unexpected attribute */ return TEE_ERROR_BAD_PARAMETERS; } } if ((found & (LENGTH|COUNT)) != (LENGTH|COUNT)) return TEE_ERROR_BAD_PARAMETERS; return TEE_SUCCESS; } #endif TEE_Result syscall_cryp_derive_key(unsigned long state, const struct utee_attribute *usr_params, unsigned long param_count, unsigned long derived_key) { TEE_Result res = TEE_ERROR_NOT_SUPPORTED; struct tee_ta_session *sess; struct tee_obj *ko; struct tee_obj *so; struct tee_cryp_state *cs; struct tee_cryp_obj_secret *sk; const struct tee_cryp_obj_type_props *type_props; TEE_Attribute *params = NULL; struct user_ta_ctx *utc; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; utc = to_user_ta_ctx(sess->ctx); res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(state), &cs); if (res != TEE_SUCCESS) return res; params = malloc(sizeof(TEE_Attribute) * param_count); if (!params) return TEE_ERROR_OUT_OF_MEMORY; res = copy_in_attrs(utc, usr_params, param_count, params); if (res != TEE_SUCCESS) goto out; /* Get key set in operation */ res = tee_obj_get(utc, cs->key1, &ko); if (res != TEE_SUCCESS) goto out; res = tee_obj_get(utc, tee_svc_uref_to_vaddr(derived_key), &so); if (res != TEE_SUCCESS) goto out; /* Find information needed about the object to initialize */ sk = so->attr; /* Find description of object */ type_props = tee_svc_find_type_props(so->info.objectType); if (!type_props) { res = TEE_ERROR_NOT_SUPPORTED; goto out; } if (cs->algo == TEE_ALG_DH_DERIVE_SHARED_SECRET) { size_t alloc_size; struct bignum *pub; struct bignum *ss; if (param_count != 1 || params[0].attributeID != TEE_ATTR_DH_PUBLIC_VALUE) { res = TEE_ERROR_BAD_PARAMETERS; goto out; } alloc_size = params[0].content.ref.length * 8; pub = crypto_bignum_allocate(alloc_size); ss = crypto_bignum_allocate(alloc_size); if (pub && ss) { crypto_bignum_bin2bn(params[0].content.ref.buffer, params[0].content.ref.length, pub); res = crypto_acipher_dh_shared_secret(ko->attr, pub, ss); if (res == TEE_SUCCESS) { sk->key_size = crypto_bignum_num_bytes(ss); crypto_bignum_bn2bin(ss, (uint8_t *)(sk + 1)); so->info.handleFlags |= TEE_HANDLE_FLAG_INITIALIZED; set_attribute(so, type_props, TEE_ATTR_SECRET_VALUE); } } else { res = TEE_ERROR_OUT_OF_MEMORY; } crypto_bignum_free(pub); crypto_bignum_free(ss); } else if (TEE_ALG_GET_MAIN_ALG(cs->algo) == TEE_MAIN_ALGO_ECDH) { size_t alloc_size; struct ecc_public_key key_public; uint8_t *pt_secret; unsigned long pt_secret_len; if (param_count != 2 || params[0].attributeID != TEE_ATTR_ECC_PUBLIC_VALUE_X || params[1].attributeID != TEE_ATTR_ECC_PUBLIC_VALUE_Y) { res = TEE_ERROR_BAD_PARAMETERS; goto out; } switch (cs->algo) { case TEE_ALG_ECDH_P192: alloc_size = 192; break; case TEE_ALG_ECDH_P224: alloc_size = 224; break; case TEE_ALG_ECDH_P256: alloc_size = 256; break; case TEE_ALG_ECDH_P384: alloc_size = 384; break; case TEE_ALG_ECDH_P521: alloc_size = 521; break; default: res = TEE_ERROR_NOT_IMPLEMENTED; goto out; } /* Create the public key */ res = crypto_acipher_alloc_ecc_public_key(&key_public, alloc_size); if (res != TEE_SUCCESS) goto out; key_public.curve = ((struct ecc_keypair *)ko->attr)->curve; crypto_bignum_bin2bn(params[0].content.ref.buffer, params[0].content.ref.length, key_public.x); crypto_bignum_bin2bn(params[1].content.ref.buffer, params[1].content.ref.length, key_public.y); pt_secret = (uint8_t *)(sk + 1); pt_secret_len = sk->alloc_size; res = crypto_acipher_ecc_shared_secret(ko->attr, &key_public, pt_secret, &pt_secret_len); if (res == TEE_SUCCESS) { sk->key_size = pt_secret_len; so->info.handleFlags |= TEE_HANDLE_FLAG_INITIALIZED; set_attribute(so, type_props, TEE_ATTR_SECRET_VALUE); } /* free the public key */ crypto_acipher_free_ecc_public_key(&key_public); } #if defined(CFG_CRYPTO_HKDF) else if (TEE_ALG_GET_MAIN_ALG(cs->algo) == TEE_MAIN_ALGO_HKDF) { void *salt, *info; size_t salt_len, info_len, okm_len; uint32_t hash_id = TEE_ALG_GET_DIGEST_HASH(cs->algo); struct tee_cryp_obj_secret *ik = ko->attr; const uint8_t *ikm = (const uint8_t *)(ik + 1); res = get_hkdf_params(params, param_count, &salt, &salt_len, &info, &info_len, &okm_len); if (res != TEE_SUCCESS) goto out; /* Requested size must fit into the output object's buffer */ if (okm_len > ik->alloc_size) { res = TEE_ERROR_BAD_PARAMETERS; goto out; } res = tee_cryp_hkdf(hash_id, ikm, ik->key_size, salt, salt_len, info, info_len, (uint8_t *)(sk + 1), okm_len); if (res == TEE_SUCCESS) { sk->key_size = okm_len; so->info.handleFlags |= TEE_HANDLE_FLAG_INITIALIZED; set_attribute(so, type_props, TEE_ATTR_SECRET_VALUE); } } #endif #if defined(CFG_CRYPTO_CONCAT_KDF) else if (TEE_ALG_GET_MAIN_ALG(cs->algo) == TEE_MAIN_ALGO_CONCAT_KDF) { void *info; size_t info_len, derived_key_len; uint32_t hash_id = TEE_ALG_GET_DIGEST_HASH(cs->algo); struct tee_cryp_obj_secret *ss = ko->attr; const uint8_t *shared_secret = (const uint8_t *)(ss + 1); res = get_concat_kdf_params(params, param_count, &info, &info_len, &derived_key_len); if (res != TEE_SUCCESS) goto out; /* Requested size must fit into the output object's buffer */ if (derived_key_len > ss->alloc_size) { res = TEE_ERROR_BAD_PARAMETERS; goto out; } res = tee_cryp_concat_kdf(hash_id, shared_secret, ss->key_size, info, info_len, (uint8_t *)(sk + 1), derived_key_len); if (res == TEE_SUCCESS) { sk->key_size = derived_key_len; so->info.handleFlags |= TEE_HANDLE_FLAG_INITIALIZED; set_attribute(so, type_props, TEE_ATTR_SECRET_VALUE); } } #endif #if defined(CFG_CRYPTO_PBKDF2) else if (TEE_ALG_GET_MAIN_ALG(cs->algo) == TEE_MAIN_ALGO_PBKDF2) { void *salt; size_t salt_len, iteration_count, derived_key_len; uint32_t hash_id = TEE_ALG_GET_DIGEST_HASH(cs->algo); struct tee_cryp_obj_secret *ss = ko->attr; const uint8_t *password = (const uint8_t *)(ss + 1); res = get_pbkdf2_params(params, param_count, &salt, &salt_len, &derived_key_len, &iteration_count); if (res != TEE_SUCCESS) goto out; /* Requested size must fit into the output object's buffer */ if (derived_key_len > ss->alloc_size) { res = TEE_ERROR_BAD_PARAMETERS; goto out; } res = tee_cryp_pbkdf2(hash_id, password, ss->key_size, salt, salt_len, iteration_count, (uint8_t *)(sk + 1), derived_key_len); if (res == TEE_SUCCESS) { sk->key_size = derived_key_len; so->info.handleFlags |= TEE_HANDLE_FLAG_INITIALIZED; set_attribute(so, type_props, TEE_ATTR_SECRET_VALUE); } } #endif else res = TEE_ERROR_NOT_SUPPORTED; out: free(params); return res; } TEE_Result syscall_cryp_random_number_generate(void *buf, size_t blen) { TEE_Result res; struct tee_ta_session *sess; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_WRITE | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)buf, blen); if (res != TEE_SUCCESS) return res; res = crypto_rng_read(buf, blen); if (res != TEE_SUCCESS) return res; return res; } TEE_Result syscall_authenc_init(unsigned long state, const void *nonce, size_t nonce_len, size_t tag_len, size_t aad_len, size_t payload_len) { TEE_Result res; struct tee_cryp_state *cs; struct tee_ta_session *sess; struct tee_obj *o; struct tee_cryp_obj_secret *key; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(state), &cs); if (res != TEE_SUCCESS) return res; res = tee_obj_get(to_user_ta_ctx(sess->ctx), cs->key1, &o); if (res != TEE_SUCCESS) return res; if ((o->info.handleFlags & TEE_HANDLE_FLAG_INITIALIZED) == 0) return TEE_ERROR_BAD_PARAMETERS; key = o->attr; res = crypto_authenc_init(cs->ctx, cs->algo, cs->mode, (uint8_t *)(key + 1), key->key_size, nonce, nonce_len, tag_len, aad_len, payload_len); if (res != TEE_SUCCESS) return res; cs->ctx_finalize = (tee_cryp_ctx_finalize_func_t)crypto_authenc_final; return TEE_SUCCESS; } TEE_Result syscall_authenc_update_aad(unsigned long state, const void *aad_data, size_t aad_data_len) { TEE_Result res; struct tee_cryp_state *cs; struct tee_ta_session *sess; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t) aad_data, aad_data_len); if (res != TEE_SUCCESS) return res; res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(state), &cs); if (res != TEE_SUCCESS) return res; res = crypto_authenc_update_aad(cs->ctx, cs->algo, cs->mode, aad_data, aad_data_len); if (res != TEE_SUCCESS) return res; return TEE_SUCCESS; } TEE_Result syscall_authenc_update_payload(unsigned long state, const void *src_data, size_t src_len, void *dst_data, uint64_t *dst_len) { TEE_Result res; struct tee_cryp_state *cs; struct tee_ta_session *sess; uint64_t dlen; size_t tmp_dlen; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(state), &cs); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t) src_data, src_len); if (res != TEE_SUCCESS) return res; res = tee_svc_copy_from_user(&dlen, dst_len, sizeof(dlen)); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_WRITE | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)dst_data, dlen); if (res != TEE_SUCCESS) return res; if (dlen < src_len) { res = TEE_ERROR_SHORT_BUFFER; goto out; } tmp_dlen = dlen; res = crypto_authenc_update_payload(cs->ctx, cs->algo, cs->mode, src_data, src_len, dst_data, &tmp_dlen); dlen = tmp_dlen; out: if (res == TEE_SUCCESS || res == TEE_ERROR_SHORT_BUFFER) { TEE_Result res2 = tee_svc_copy_to_user(dst_len, &dlen, sizeof(*dst_len)); if (res2 != TEE_SUCCESS) res = res2; } return res; } TEE_Result syscall_authenc_enc_final(unsigned long state, const void *src_data, size_t src_len, void *dst_data, uint64_t *dst_len, void *tag, uint64_t *tag_len) { TEE_Result res; struct tee_cryp_state *cs; struct tee_ta_session *sess; uint64_t dlen; uint64_t tlen = 0; size_t tmp_dlen; size_t tmp_tlen; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(state), &cs); if (res != TEE_SUCCESS) return res; if (cs->mode != TEE_MODE_ENCRYPT) return TEE_ERROR_BAD_PARAMETERS; res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)src_data, src_len); if (res != TEE_SUCCESS) return res; if (!dst_len) { dlen = 0; } else { res = tee_svc_copy_from_user(&dlen, dst_len, sizeof(dlen)); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_WRITE | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)dst_data, dlen); if (res != TEE_SUCCESS) return res; } if (dlen < src_len) { res = TEE_ERROR_SHORT_BUFFER; goto out; } res = tee_svc_copy_from_user(&tlen, tag_len, sizeof(tlen)); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_WRITE | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)tag, tlen); if (res != TEE_SUCCESS) return res; tmp_dlen = dlen; tmp_tlen = tlen; res = crypto_authenc_enc_final(cs->ctx, cs->algo, src_data, src_len, dst_data, &tmp_dlen, tag, &tmp_tlen); dlen = tmp_dlen; tlen = tmp_tlen; out: if (res == TEE_SUCCESS || res == TEE_ERROR_SHORT_BUFFER) { TEE_Result res2; if (dst_len != NULL) { res2 = tee_svc_copy_to_user(dst_len, &dlen, sizeof(*dst_len)); if (res2 != TEE_SUCCESS) return res2; } res2 = tee_svc_copy_to_user(tag_len, &tlen, sizeof(*tag_len)); if (res2 != TEE_SUCCESS) return res2; } return res; } TEE_Result syscall_authenc_dec_final(unsigned long state, const void *src_data, size_t src_len, void *dst_data, uint64_t *dst_len, const void *tag, size_t tag_len) { TEE_Result res; struct tee_cryp_state *cs; struct tee_ta_session *sess; uint64_t dlen; size_t tmp_dlen; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(state), &cs); if (res != TEE_SUCCESS) return res; if (cs->mode != TEE_MODE_DECRYPT) return TEE_ERROR_BAD_PARAMETERS; res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)src_data, src_len); if (res != TEE_SUCCESS) return res; if (!dst_len) { dlen = 0; } else { res = tee_svc_copy_from_user(&dlen, dst_len, sizeof(dlen)); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_WRITE | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)dst_data, dlen); if (res != TEE_SUCCESS) return res; } if (dlen < src_len) { res = TEE_ERROR_SHORT_BUFFER; goto out; } res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)tag, tag_len); if (res != TEE_SUCCESS) return res; tmp_dlen = dlen; res = crypto_authenc_dec_final(cs->ctx, cs->algo, src_data, src_len, dst_data, &tmp_dlen, tag, tag_len); dlen = tmp_dlen; out: if ((res == TEE_SUCCESS || res == TEE_ERROR_SHORT_BUFFER) && dst_len != NULL) { TEE_Result res2; res2 = tee_svc_copy_to_user(dst_len, &dlen, sizeof(*dst_len)); if (res2 != TEE_SUCCESS) return res2; } return res; } static int pkcs1_get_salt_len(const TEE_Attribute *params, uint32_t num_params, size_t default_len) { size_t n; assert(default_len < INT_MAX); for (n = 0; n < num_params; n++) { if (params[n].attributeID == TEE_ATTR_RSA_PSS_SALT_LENGTH) { if (params[n].content.value.a < INT_MAX) return params[n].content.value.a; break; } } /* * If salt length isn't provided use the default value which is * the length of the digest. */ return default_len; } TEE_Result syscall_asymm_operate(unsigned long state, const struct utee_attribute *usr_params, size_t num_params, const void *src_data, size_t src_len, void *dst_data, uint64_t *dst_len) { TEE_Result res; struct tee_cryp_state *cs; struct tee_ta_session *sess; uint64_t dlen64; size_t dlen; struct tee_obj *o; void *label = NULL; size_t label_len = 0; size_t n; int salt_len; TEE_Attribute *params = NULL; struct user_ta_ctx *utc; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; utc = to_user_ta_ctx(sess->ctx); res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(state), &cs); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights( utc, TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t) src_data, src_len); if (res != TEE_SUCCESS) return res; res = tee_svc_copy_from_user(&dlen64, dst_len, sizeof(dlen64)); if (res != TEE_SUCCESS) return res; dlen = dlen64; res = tee_mmu_check_access_rights( utc, TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_WRITE | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t) dst_data, dlen); if (res != TEE_SUCCESS) return res; params = malloc(sizeof(TEE_Attribute) * num_params); if (!params) return TEE_ERROR_OUT_OF_MEMORY; res = copy_in_attrs(utc, usr_params, num_params, params); if (res != TEE_SUCCESS) goto out; res = tee_obj_get(utc, cs->key1, &o); if (res != TEE_SUCCESS) goto out; if ((o->info.handleFlags & TEE_HANDLE_FLAG_INITIALIZED) == 0) { res = TEE_ERROR_GENERIC; goto out; } switch (cs->algo) { case TEE_ALG_RSA_NOPAD: if (cs->mode == TEE_MODE_ENCRYPT) { res = crypto_acipher_rsanopad_encrypt(o->attr, src_data, src_len, dst_data, &dlen); } else if (cs->mode == TEE_MODE_DECRYPT) { res = crypto_acipher_rsanopad_decrypt(o->attr, src_data, src_len, dst_data, &dlen); } else { /* * We will panic because "the mode is not compatible * with the function" */ res = TEE_ERROR_GENERIC; } break; case TEE_ALG_RSAES_PKCS1_V1_5: case TEE_ALG_RSAES_PKCS1_OAEP_MGF1_SHA1: case TEE_ALG_RSAES_PKCS1_OAEP_MGF1_SHA224: case TEE_ALG_RSAES_PKCS1_OAEP_MGF1_SHA256: case TEE_ALG_RSAES_PKCS1_OAEP_MGF1_SHA384: case TEE_ALG_RSAES_PKCS1_OAEP_MGF1_SHA512: for (n = 0; n < num_params; n++) { if (params[n].attributeID == TEE_ATTR_RSA_OAEP_LABEL) { label = params[n].content.ref.buffer; label_len = params[n].content.ref.length; break; } } if (cs->mode == TEE_MODE_ENCRYPT) { res = crypto_acipher_rsaes_encrypt(cs->algo, o->attr, label, label_len, src_data, src_len, dst_data, &dlen); } else if (cs->mode == TEE_MODE_DECRYPT) { res = crypto_acipher_rsaes_decrypt( cs->algo, o->attr, label, label_len, src_data, src_len, dst_data, &dlen); } else { res = TEE_ERROR_BAD_PARAMETERS; } break; #if defined(CFG_CRYPTO_RSASSA_NA1) case TEE_ALG_RSASSA_PKCS1_V1_5: #endif case TEE_ALG_RSASSA_PKCS1_V1_5_MD5: case TEE_ALG_RSASSA_PKCS1_V1_5_SHA1: case TEE_ALG_RSASSA_PKCS1_V1_5_SHA224: case TEE_ALG_RSASSA_PKCS1_V1_5_SHA256: case TEE_ALG_RSASSA_PKCS1_V1_5_SHA384: case TEE_ALG_RSASSA_PKCS1_V1_5_SHA512: case TEE_ALG_RSASSA_PKCS1_PSS_MGF1_SHA1: case TEE_ALG_RSASSA_PKCS1_PSS_MGF1_SHA224: case TEE_ALG_RSASSA_PKCS1_PSS_MGF1_SHA256: case TEE_ALG_RSASSA_PKCS1_PSS_MGF1_SHA384: case TEE_ALG_RSASSA_PKCS1_PSS_MGF1_SHA512: if (cs->mode != TEE_MODE_SIGN) { res = TEE_ERROR_BAD_PARAMETERS; break; } salt_len = pkcs1_get_salt_len(params, num_params, src_len); res = crypto_acipher_rsassa_sign(cs->algo, o->attr, salt_len, src_data, src_len, dst_data, &dlen); break; case TEE_ALG_DSA_SHA1: case TEE_ALG_DSA_SHA224: case TEE_ALG_DSA_SHA256: res = crypto_acipher_dsa_sign(cs->algo, o->attr, src_data, src_len, dst_data, &dlen); break; case TEE_ALG_ECDSA_P192: case TEE_ALG_ECDSA_P224: case TEE_ALG_ECDSA_P256: case TEE_ALG_ECDSA_P384: case TEE_ALG_ECDSA_P521: res = crypto_acipher_ecc_sign(cs->algo, o->attr, src_data, src_len, dst_data, &dlen); break; default: res = TEE_ERROR_BAD_PARAMETERS; break; } out: free(params); if (res == TEE_SUCCESS || res == TEE_ERROR_SHORT_BUFFER) { TEE_Result res2; dlen64 = dlen; res2 = tee_svc_copy_to_user(dst_len, &dlen64, sizeof(*dst_len)); if (res2 != TEE_SUCCESS) return res2; } return res; } TEE_Result syscall_asymm_verify(unsigned long state, const struct utee_attribute *usr_params, size_t num_params, const void *data, size_t data_len, const void *sig, size_t sig_len) { TEE_Result res; struct tee_cryp_state *cs; struct tee_ta_session *sess; struct tee_obj *o; size_t hash_size; int salt_len = 0; TEE_Attribute *params = NULL; uint32_t hash_algo; struct user_ta_ctx *utc; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; utc = to_user_ta_ctx(sess->ctx); res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(state), &cs); if (res != TEE_SUCCESS) return res; if (cs->mode != TEE_MODE_VERIFY) return TEE_ERROR_BAD_PARAMETERS; res = tee_mmu_check_access_rights(utc, TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)data, data_len); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights(utc, TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)sig, sig_len); if (res != TEE_SUCCESS) return res; params = malloc(sizeof(TEE_Attribute) * num_params); if (!params) return TEE_ERROR_OUT_OF_MEMORY; res = copy_in_attrs(utc, usr_params, num_params, params); if (res != TEE_SUCCESS) goto out; res = tee_obj_get(utc, cs->key1, &o); if (res != TEE_SUCCESS) goto out; if ((o->info.handleFlags & TEE_HANDLE_FLAG_INITIALIZED) == 0) { res = TEE_ERROR_BAD_PARAMETERS; goto out; } switch (TEE_ALG_GET_MAIN_ALG(cs->algo)) { case TEE_MAIN_ALGO_RSA: if (cs->algo != TEE_ALG_RSASSA_PKCS1_V1_5) { hash_algo = TEE_DIGEST_HASH_TO_ALGO(cs->algo); res = tee_hash_get_digest_size(hash_algo, &hash_size); if (res != TEE_SUCCESS) break; if (data_len != hash_size) { res = TEE_ERROR_BAD_PARAMETERS; break; } salt_len = pkcs1_get_salt_len(params, num_params, hash_size); } res = crypto_acipher_rsassa_verify(cs->algo, o->attr, salt_len, data, data_len, sig, sig_len); break; case TEE_MAIN_ALGO_DSA: hash_algo = TEE_DIGEST_HASH_TO_ALGO(cs->algo); res = tee_hash_get_digest_size(hash_algo, &hash_size); if (res != TEE_SUCCESS) break; /* * Depending on the DSA algorithm (NIST), the digital signature * output size may be truncated to the size of a key pair * (Q prime size). Q prime size must be less or equal than the * hash output length of the hash algorithm involved. */ if (data_len > hash_size) { res = TEE_ERROR_BAD_PARAMETERS; break; } res = crypto_acipher_dsa_verify(cs->algo, o->attr, data, data_len, sig, sig_len); break; case TEE_MAIN_ALGO_ECDSA: res = crypto_acipher_ecc_verify(cs->algo, o->attr, data, data_len, sig, sig_len); break; default: res = TEE_ERROR_NOT_SUPPORTED; } out: free(params); return res; }
// SPDX-License-Identifier: BSD-2-Clause /* * Copyright (c) 2014, STMicroelectronics International N.V. */ #include <assert.h> #include <compiler.h> #include <crypto/crypto.h> #include <kernel/tee_ta_manager.h> #include <mm/tee_mmu.h> #include <string_ext.h> #include <string.h> #include <sys/queue.h> #include <tee_api_types.h> #include <tee/tee_cryp_utl.h> #include <tee/tee_obj.h> #include <tee/tee_svc_cryp.h> #include <tee/tee_svc.h> #include <trace.h> #include <utee_defines.h> #include <util.h> #include <tee_api_defines_extensions.h> #if defined(CFG_CRYPTO_HKDF) #include <tee/tee_cryp_hkdf.h> #endif #if defined(CFG_CRYPTO_CONCAT_KDF) #include <tee/tee_cryp_concat_kdf.h> #endif #if defined(CFG_CRYPTO_PBKDF2) #include <tee/tee_cryp_pbkdf2.h> #endif typedef void (*tee_cryp_ctx_finalize_func_t) (void *ctx, uint32_t algo); struct tee_cryp_state { TAILQ_ENTRY(tee_cryp_state) link; uint32_t algo; uint32_t mode; vaddr_t key1; vaddr_t key2; void *ctx; tee_cryp_ctx_finalize_func_t ctx_finalize; }; struct tee_cryp_obj_secret { uint32_t key_size; uint32_t alloc_size; /* * Pseudo code visualize layout of structure * Next follows data, such as: * uint8_t data[alloc_size] * key_size must never exceed alloc_size */ }; #define TEE_TYPE_ATTR_OPTIONAL 0x0 #define TEE_TYPE_ATTR_REQUIRED 0x1 #define TEE_TYPE_ATTR_OPTIONAL_GROUP 0x2 #define TEE_TYPE_ATTR_SIZE_INDICATOR 0x4 #define TEE_TYPE_ATTR_GEN_KEY_OPT 0x8 #define TEE_TYPE_ATTR_GEN_KEY_REQ 0x10 /* Handle storing of generic secret keys of varying lengths */ #define ATTR_OPS_INDEX_SECRET 0 /* Convert to/from big-endian byte array and provider-specific bignum */ #define ATTR_OPS_INDEX_BIGNUM 1 /* Convert to/from value attribute depending on direction */ #define ATTR_OPS_INDEX_VALUE 2 struct tee_cryp_obj_type_attrs { uint32_t attr_id; uint16_t flags; uint16_t ops_index; uint16_t raw_offs; uint16_t raw_size; }; #define RAW_DATA(_x, _y) \ .raw_offs = offsetof(_x, _y), .raw_size = MEMBER_SIZE(_x, _y) static const struct tee_cryp_obj_type_attrs tee_cryp_obj_secret_value_attrs[] = { { .attr_id = TEE_ATTR_SECRET_VALUE, .flags = TEE_TYPE_ATTR_REQUIRED | TEE_TYPE_ATTR_SIZE_INDICATOR, .ops_index = ATTR_OPS_INDEX_SECRET, .raw_offs = 0, .raw_size = 0 }, }; static const struct tee_cryp_obj_type_attrs tee_cryp_obj_rsa_pub_key_attrs[] = { { .attr_id = TEE_ATTR_RSA_MODULUS, .flags = TEE_TYPE_ATTR_REQUIRED | TEE_TYPE_ATTR_SIZE_INDICATOR, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct rsa_public_key, n) }, { .attr_id = TEE_ATTR_RSA_PUBLIC_EXPONENT, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct rsa_public_key, e) }, }; static const struct tee_cryp_obj_type_attrs tee_cryp_obj_rsa_keypair_attrs[] = { { .attr_id = TEE_ATTR_RSA_MODULUS, .flags = TEE_TYPE_ATTR_REQUIRED | TEE_TYPE_ATTR_SIZE_INDICATOR, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct rsa_keypair, n) }, { .attr_id = TEE_ATTR_RSA_PUBLIC_EXPONENT, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct rsa_keypair, e) }, { .attr_id = TEE_ATTR_RSA_PRIVATE_EXPONENT, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct rsa_keypair, d) }, { .attr_id = TEE_ATTR_RSA_PRIME1, .flags = TEE_TYPE_ATTR_OPTIONAL_GROUP, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct rsa_keypair, p) }, { .attr_id = TEE_ATTR_RSA_PRIME2, .flags = TEE_TYPE_ATTR_OPTIONAL_GROUP, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct rsa_keypair, q) }, { .attr_id = TEE_ATTR_RSA_EXPONENT1, .flags = TEE_TYPE_ATTR_OPTIONAL_GROUP, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct rsa_keypair, dp) }, { .attr_id = TEE_ATTR_RSA_EXPONENT2, .flags = TEE_TYPE_ATTR_OPTIONAL_GROUP, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct rsa_keypair, dq) }, { .attr_id = TEE_ATTR_RSA_COEFFICIENT, .flags = TEE_TYPE_ATTR_OPTIONAL_GROUP, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct rsa_keypair, qp) }, }; static const struct tee_cryp_obj_type_attrs tee_cryp_obj_dsa_pub_key_attrs[] = { { .attr_id = TEE_ATTR_DSA_PRIME, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct dsa_public_key, p) }, { .attr_id = TEE_ATTR_DSA_SUBPRIME, .flags = TEE_TYPE_ATTR_REQUIRED | TEE_TYPE_ATTR_SIZE_INDICATOR, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct dsa_public_key, q) }, { .attr_id = TEE_ATTR_DSA_BASE, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct dsa_public_key, g) }, { .attr_id = TEE_ATTR_DSA_PUBLIC_VALUE, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct dsa_public_key, y) }, }; static const struct tee_cryp_obj_type_attrs tee_cryp_obj_dsa_keypair_attrs[] = { { .attr_id = TEE_ATTR_DSA_PRIME, .flags = TEE_TYPE_ATTR_REQUIRED | TEE_TYPE_ATTR_GEN_KEY_REQ, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct dsa_keypair, p) }, { .attr_id = TEE_ATTR_DSA_SUBPRIME, .flags = TEE_TYPE_ATTR_REQUIRED | TEE_TYPE_ATTR_SIZE_INDICATOR | TEE_TYPE_ATTR_GEN_KEY_REQ, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct dsa_keypair, q) }, { .attr_id = TEE_ATTR_DSA_BASE, .flags = TEE_TYPE_ATTR_REQUIRED | TEE_TYPE_ATTR_GEN_KEY_REQ, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct dsa_keypair, g) }, { .attr_id = TEE_ATTR_DSA_PRIVATE_VALUE, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct dsa_keypair, x) }, { .attr_id = TEE_ATTR_DSA_PUBLIC_VALUE, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct dsa_keypair, y) }, }; static const struct tee_cryp_obj_type_attrs tee_cryp_obj_dh_keypair_attrs[] = { { .attr_id = TEE_ATTR_DH_PRIME, .flags = TEE_TYPE_ATTR_REQUIRED | TEE_TYPE_ATTR_SIZE_INDICATOR | TEE_TYPE_ATTR_GEN_KEY_REQ, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct dh_keypair, p) }, { .attr_id = TEE_ATTR_DH_BASE, .flags = TEE_TYPE_ATTR_REQUIRED | TEE_TYPE_ATTR_GEN_KEY_REQ, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct dh_keypair, g) }, { .attr_id = TEE_ATTR_DH_PUBLIC_VALUE, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct dh_keypair, y) }, { .attr_id = TEE_ATTR_DH_PRIVATE_VALUE, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct dh_keypair, x) }, { .attr_id = TEE_ATTR_DH_SUBPRIME, .flags = TEE_TYPE_ATTR_OPTIONAL_GROUP | TEE_TYPE_ATTR_GEN_KEY_OPT, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct dh_keypair, q) }, { .attr_id = TEE_ATTR_DH_X_BITS, .flags = TEE_TYPE_ATTR_GEN_KEY_OPT, .ops_index = ATTR_OPS_INDEX_VALUE, RAW_DATA(struct dh_keypair, xbits) }, }; #if defined(CFG_CRYPTO_HKDF) static const struct tee_cryp_obj_type_attrs tee_cryp_obj_hkdf_ikm_attrs[] = { { .attr_id = TEE_ATTR_HKDF_IKM, .flags = TEE_TYPE_ATTR_REQUIRED | TEE_TYPE_ATTR_SIZE_INDICATOR, .ops_index = ATTR_OPS_INDEX_SECRET, .raw_offs = 0, .raw_size = 0 }, }; #endif #if defined(CFG_CRYPTO_CONCAT_KDF) static const struct tee_cryp_obj_type_attrs tee_cryp_obj_concat_kdf_z_attrs[] = { { .attr_id = TEE_ATTR_CONCAT_KDF_Z, .flags = TEE_TYPE_ATTR_REQUIRED | TEE_TYPE_ATTR_SIZE_INDICATOR, .ops_index = ATTR_OPS_INDEX_SECRET, .raw_offs = 0, .raw_size = 0 }, }; #endif #if defined(CFG_CRYPTO_PBKDF2) static const struct tee_cryp_obj_type_attrs tee_cryp_obj_pbkdf2_passwd_attrs[] = { { .attr_id = TEE_ATTR_PBKDF2_PASSWORD, .flags = TEE_TYPE_ATTR_REQUIRED | TEE_TYPE_ATTR_SIZE_INDICATOR, .ops_index = ATTR_OPS_INDEX_SECRET, .raw_offs = 0, .raw_size = 0 }, }; #endif static const struct tee_cryp_obj_type_attrs tee_cryp_obj_ecc_pub_key_attrs[] = { { .attr_id = TEE_ATTR_ECC_PUBLIC_VALUE_X, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct ecc_public_key, x) }, { .attr_id = TEE_ATTR_ECC_PUBLIC_VALUE_Y, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct ecc_public_key, y) }, { .attr_id = TEE_ATTR_ECC_CURVE, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_VALUE, RAW_DATA(struct ecc_public_key, curve) }, }; static const struct tee_cryp_obj_type_attrs tee_cryp_obj_ecc_keypair_attrs[] = { { .attr_id = TEE_ATTR_ECC_PRIVATE_VALUE, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct ecc_keypair, d) }, { .attr_id = TEE_ATTR_ECC_PUBLIC_VALUE_X, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct ecc_keypair, x) }, { .attr_id = TEE_ATTR_ECC_PUBLIC_VALUE_Y, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct ecc_keypair, y) }, { .attr_id = TEE_ATTR_ECC_CURVE, .flags = TEE_TYPE_ATTR_REQUIRED | TEE_TYPE_ATTR_SIZE_INDICATOR, .ops_index = ATTR_OPS_INDEX_VALUE, RAW_DATA(struct ecc_keypair, curve) }, }; struct tee_cryp_obj_type_props { TEE_ObjectType obj_type; uint16_t min_size; /* may not be smaller than this */ uint16_t max_size; /* may not be larger than this */ uint16_t alloc_size; /* this many bytes are allocated to hold data */ uint8_t quanta; /* may only be an multiple of this */ uint8_t num_type_attrs; const struct tee_cryp_obj_type_attrs *type_attrs; }; #define PROP(obj_type, quanta, min_size, max_size, alloc_size, type_attrs) \ { (obj_type), (min_size), (max_size), (alloc_size), (quanta), \ ARRAY_SIZE(type_attrs), (type_attrs) } static const struct tee_cryp_obj_type_props tee_cryp_obj_props[] = { PROP(TEE_TYPE_AES, 64, 128, 256, /* valid sizes 128, 192, 256 */ 256 / 8 + sizeof(struct tee_cryp_obj_secret), tee_cryp_obj_secret_value_attrs), PROP(TEE_TYPE_DES, 56, 56, 56, /* * Valid size 56 without parity, note that we still allocate * for 64 bits since the key is supplied with parity. */ 64 / 8 + sizeof(struct tee_cryp_obj_secret), tee_cryp_obj_secret_value_attrs), PROP(TEE_TYPE_DES3, 56, 112, 168, /* * Valid sizes 112, 168 without parity, note that we still * allocate for with space for the parity since the key is * supplied with parity. */ 192 / 8 + sizeof(struct tee_cryp_obj_secret), tee_cryp_obj_secret_value_attrs), PROP(TEE_TYPE_HMAC_MD5, 8, 64, 512, 512 / 8 + sizeof(struct tee_cryp_obj_secret), tee_cryp_obj_secret_value_attrs), PROP(TEE_TYPE_HMAC_SHA1, 8, 80, 512, 512 / 8 + sizeof(struct tee_cryp_obj_secret), tee_cryp_obj_secret_value_attrs), PROP(TEE_TYPE_HMAC_SHA224, 8, 112, 512, 512 / 8 + sizeof(struct tee_cryp_obj_secret), tee_cryp_obj_secret_value_attrs), PROP(TEE_TYPE_HMAC_SHA256, 8, 192, 1024, 1024 / 8 + sizeof(struct tee_cryp_obj_secret), tee_cryp_obj_secret_value_attrs), PROP(TEE_TYPE_HMAC_SHA384, 8, 256, 1024, 1024 / 8 + sizeof(struct tee_cryp_obj_secret), tee_cryp_obj_secret_value_attrs), PROP(TEE_TYPE_HMAC_SHA512, 8, 256, 1024, 1024 / 8 + sizeof(struct tee_cryp_obj_secret), tee_cryp_obj_secret_value_attrs), PROP(TEE_TYPE_GENERIC_SECRET, 8, 0, 4096, 4096 / 8 + sizeof(struct tee_cryp_obj_secret), tee_cryp_obj_secret_value_attrs), #if defined(CFG_CRYPTO_HKDF) PROP(TEE_TYPE_HKDF_IKM, 8, 0, 4096, 4096 / 8 + sizeof(struct tee_cryp_obj_secret), tee_cryp_obj_hkdf_ikm_attrs), #endif #if defined(CFG_CRYPTO_CONCAT_KDF) PROP(TEE_TYPE_CONCAT_KDF_Z, 8, 0, 4096, 4096 / 8 + sizeof(struct tee_cryp_obj_secret), tee_cryp_obj_concat_kdf_z_attrs), #endif #if defined(CFG_CRYPTO_PBKDF2) PROP(TEE_TYPE_PBKDF2_PASSWORD, 8, 0, 4096, 4096 / 8 + sizeof(struct tee_cryp_obj_secret), tee_cryp_obj_pbkdf2_passwd_attrs), #endif PROP(TEE_TYPE_RSA_PUBLIC_KEY, 1, 256, CFG_CORE_BIGNUM_MAX_BITS, sizeof(struct rsa_public_key), tee_cryp_obj_rsa_pub_key_attrs), PROP(TEE_TYPE_RSA_KEYPAIR, 1, 256, CFG_CORE_BIGNUM_MAX_BITS, sizeof(struct rsa_keypair), tee_cryp_obj_rsa_keypair_attrs), PROP(TEE_TYPE_DSA_PUBLIC_KEY, 64, 512, 3072, sizeof(struct dsa_public_key), tee_cryp_obj_dsa_pub_key_attrs), PROP(TEE_TYPE_DSA_KEYPAIR, 64, 512, 3072, sizeof(struct dsa_keypair), tee_cryp_obj_dsa_keypair_attrs), PROP(TEE_TYPE_DH_KEYPAIR, 1, 256, 2048, sizeof(struct dh_keypair), tee_cryp_obj_dh_keypair_attrs), PROP(TEE_TYPE_ECDSA_PUBLIC_KEY, 1, 192, 521, sizeof(struct ecc_public_key), tee_cryp_obj_ecc_pub_key_attrs), PROP(TEE_TYPE_ECDSA_KEYPAIR, 1, 192, 521, sizeof(struct ecc_keypair), tee_cryp_obj_ecc_keypair_attrs), PROP(TEE_TYPE_ECDH_PUBLIC_KEY, 1, 192, 521, sizeof(struct ecc_public_key), tee_cryp_obj_ecc_pub_key_attrs), PROP(TEE_TYPE_ECDH_KEYPAIR, 1, 192, 521, sizeof(struct ecc_keypair), tee_cryp_obj_ecc_keypair_attrs), }; struct attr_ops { TEE_Result (*from_user)(void *attr, const void *buffer, size_t size); TEE_Result (*to_user)(void *attr, struct tee_ta_session *sess, void *buffer, uint64_t *size); TEE_Result (*to_binary)(void *attr, void *data, size_t data_len, size_t *offs); bool (*from_binary)(void *attr, const void *data, size_t data_len, size_t *offs); TEE_Result (*from_obj)(void *attr, void *src_attr); void (*free)(void *attr); void (*clear)(void *attr); }; static TEE_Result op_u32_to_binary_helper(uint32_t v, uint8_t *data, size_t data_len, size_t *offs) { uint32_t field; size_t next_offs; if (ADD_OVERFLOW(*offs, sizeof(field), &next_offs)) return TEE_ERROR_OVERFLOW; if (data && next_offs <= data_len) { field = TEE_U32_TO_BIG_ENDIAN(v); memcpy(data + *offs, &field, sizeof(field)); } (*offs) = next_offs; return TEE_SUCCESS; } static bool op_u32_from_binary_helper(uint32_t *v, const uint8_t *data, size_t data_len, size_t *offs) { uint32_t field; if (!data || (*offs + sizeof(field)) > data_len) return false; memcpy(&field, data + *offs, sizeof(field)); *v = TEE_U32_FROM_BIG_ENDIAN(field); (*offs) += sizeof(field); return true; } static TEE_Result op_attr_secret_value_from_user(void *attr, const void *buffer, size_t size) { struct tee_cryp_obj_secret *key = attr; /* Data size has to fit in allocated buffer */ if (size > key->alloc_size) return TEE_ERROR_SECURITY; memcpy(key + 1, buffer, size); key->key_size = size; return TEE_SUCCESS; } static TEE_Result op_attr_secret_value_to_user(void *attr, struct tee_ta_session *sess __unused, void *buffer, uint64_t *size) { TEE_Result res; struct tee_cryp_obj_secret *key = attr; uint64_t s; uint64_t key_size; res = tee_svc_copy_from_user(&s, size, sizeof(s)); if (res != TEE_SUCCESS) return res; key_size = key->key_size; res = tee_svc_copy_to_user(size, &key_size, sizeof(key_size)); if (res != TEE_SUCCESS) return res; if (s < key->key_size || !buffer) return TEE_ERROR_SHORT_BUFFER; return tee_svc_copy_to_user(buffer, key + 1, key->key_size); } static TEE_Result op_attr_secret_value_to_binary(void *attr, void *data, size_t data_len, size_t *offs) { TEE_Result res; struct tee_cryp_obj_secret *key = attr; size_t next_offs; res = op_u32_to_binary_helper(key->key_size, data, data_len, offs); if (res != TEE_SUCCESS) return res; if (ADD_OVERFLOW(*offs, key->key_size, &next_offs)) return TEE_ERROR_OVERFLOW; if (data && next_offs <= data_len) memcpy((uint8_t *)data + *offs, key + 1, key->key_size); (*offs) = next_offs; return TEE_SUCCESS; } static bool op_attr_secret_value_from_binary(void *attr, const void *data, size_t data_len, size_t *offs) { struct tee_cryp_obj_secret *key = attr; uint32_t s; if (!op_u32_from_binary_helper(&s, data, data_len, offs)) return false; if ((*offs + s) > data_len) return false; /* Data size has to fit in allocated buffer */ if (s > key->alloc_size) return false; key->key_size = s; memcpy(key + 1, (const uint8_t *)data + *offs, s); (*offs) += s; return true; } static TEE_Result op_attr_secret_value_from_obj(void *attr, void *src_attr) { struct tee_cryp_obj_secret *key = attr; struct tee_cryp_obj_secret *src_key = src_attr; if (src_key->key_size > key->alloc_size) return TEE_ERROR_BAD_STATE; memcpy(key + 1, src_key + 1, src_key->key_size); key->key_size = src_key->key_size; return TEE_SUCCESS; } static void op_attr_secret_value_clear(void *attr) { struct tee_cryp_obj_secret *key = attr; key->key_size = 0; memset(key + 1, 0, key->alloc_size); } static TEE_Result op_attr_bignum_from_user(void *attr, const void *buffer, size_t size) { struct bignum **bn = attr; return crypto_bignum_bin2bn(buffer, size, *bn); } static TEE_Result op_attr_bignum_to_user(void *attr, struct tee_ta_session *sess, void *buffer, uint64_t *size) { TEE_Result res; struct bignum **bn = attr; uint64_t req_size; uint64_t s; res = tee_svc_copy_from_user(&s, size, sizeof(s)); if (res != TEE_SUCCESS) return res; req_size = crypto_bignum_num_bytes(*bn); res = tee_svc_copy_to_user(size, &req_size, sizeof(req_size)); if (res != TEE_SUCCESS) return res; if (!req_size) return TEE_SUCCESS; if (s < req_size || !buffer) return TEE_ERROR_SHORT_BUFFER; /* Check we can access data using supplied user mode pointer */ res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_WRITE | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)buffer, req_size); if (res != TEE_SUCCESS) return res; /* * Write the bignum (wich raw data points to) into an array of * bytes (stored in buffer) */ crypto_bignum_bn2bin(*bn, buffer); return TEE_SUCCESS; } static TEE_Result op_attr_bignum_to_binary(void *attr, void *data, size_t data_len, size_t *offs) { TEE_Result res; struct bignum **bn = attr; uint32_t n = crypto_bignum_num_bytes(*bn); size_t next_offs; res = op_u32_to_binary_helper(n, data, data_len, offs); if (res != TEE_SUCCESS) return res; if (ADD_OVERFLOW(*offs, n, &next_offs)) return TEE_ERROR_OVERFLOW; if (data && next_offs <= data_len) crypto_bignum_bn2bin(*bn, (uint8_t *)data + *offs); (*offs) = next_offs; return TEE_SUCCESS; } static bool op_attr_bignum_from_binary(void *attr, const void *data, size_t data_len, size_t *offs) { struct bignum **bn = attr; uint32_t n; if (!op_u32_from_binary_helper(&n, data, data_len, offs)) return false; if ((*offs + n) > data_len) return false; if (crypto_bignum_bin2bn((const uint8_t *)data + *offs, n, *bn)) return false; (*offs) += n; return true; } static TEE_Result op_attr_bignum_from_obj(void *attr, void *src_attr) { struct bignum **bn = attr; struct bignum **src_bn = src_attr; crypto_bignum_copy(*bn, *src_bn); return TEE_SUCCESS; } static void op_attr_bignum_clear(void *attr) { struct bignum **bn = attr; crypto_bignum_clear(*bn); } static void op_attr_bignum_free(void *attr) { struct bignum **bn = attr; crypto_bignum_free(*bn); *bn = NULL; } static TEE_Result op_attr_value_from_user(void *attr, const void *buffer, size_t size) { uint32_t *v = attr; if (size != sizeof(uint32_t) * 2) return TEE_ERROR_GENERIC; /* "can't happen */ /* Note that only the first value is copied */ memcpy(v, buffer, sizeof(uint32_t)); return TEE_SUCCESS; } static TEE_Result op_attr_value_to_user(void *attr, struct tee_ta_session *sess __unused, void *buffer, uint64_t *size) { TEE_Result res; uint32_t *v = attr; uint64_t s; uint32_t value[2] = { *v }; uint64_t req_size = sizeof(value); res = tee_svc_copy_from_user(&s, size, sizeof(s)); if (res != TEE_SUCCESS) return res; if (s < req_size || !buffer) return TEE_ERROR_SHORT_BUFFER; return tee_svc_copy_to_user(buffer, value, req_size); } static TEE_Result op_attr_value_to_binary(void *attr, void *data, size_t data_len, size_t *offs) { uint32_t *v = attr; return op_u32_to_binary_helper(*v, data, data_len, offs); } static bool op_attr_value_from_binary(void *attr, const void *data, size_t data_len, size_t *offs) { uint32_t *v = attr; return op_u32_from_binary_helper(v, data, data_len, offs); } static TEE_Result op_attr_value_from_obj(void *attr, void *src_attr) { uint32_t *v = attr; uint32_t *src_v = src_attr; *v = *src_v; return TEE_SUCCESS; } static void op_attr_value_clear(void *attr) { uint32_t *v = attr; *v = 0; } static const struct attr_ops attr_ops[] = { [ATTR_OPS_INDEX_SECRET] = { .from_user = op_attr_secret_value_from_user, .to_user = op_attr_secret_value_to_user, .to_binary = op_attr_secret_value_to_binary, .from_binary = op_attr_secret_value_from_binary, .from_obj = op_attr_secret_value_from_obj, .free = op_attr_secret_value_clear, /* not a typo */ .clear = op_attr_secret_value_clear, }, [ATTR_OPS_INDEX_BIGNUM] = { .from_user = op_attr_bignum_from_user, .to_user = op_attr_bignum_to_user, .to_binary = op_attr_bignum_to_binary, .from_binary = op_attr_bignum_from_binary, .from_obj = op_attr_bignum_from_obj, .free = op_attr_bignum_free, .clear = op_attr_bignum_clear, }, [ATTR_OPS_INDEX_VALUE] = { .from_user = op_attr_value_from_user, .to_user = op_attr_value_to_user, .to_binary = op_attr_value_to_binary, .from_binary = op_attr_value_from_binary, .from_obj = op_attr_value_from_obj, .free = op_attr_value_clear, /* not a typo */ .clear = op_attr_value_clear, }, }; TEE_Result syscall_cryp_obj_get_info(unsigned long obj, TEE_ObjectInfo *info) { TEE_Result res; struct tee_ta_session *sess; struct tee_obj *o; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) goto exit; res = tee_obj_get(to_user_ta_ctx(sess->ctx), tee_svc_uref_to_vaddr(obj), &o); if (res != TEE_SUCCESS) goto exit; res = tee_svc_copy_to_user(info, &o->info, sizeof(o->info)); exit: return res; } TEE_Result syscall_cryp_obj_restrict_usage(unsigned long obj, unsigned long usage) { TEE_Result res; struct tee_ta_session *sess; struct tee_obj *o; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) goto exit; res = tee_obj_get(to_user_ta_ctx(sess->ctx), tee_svc_uref_to_vaddr(obj), &o); if (res != TEE_SUCCESS) goto exit; o->info.objectUsage &= usage; exit: return res; } static int tee_svc_cryp_obj_find_type_attr_idx( uint32_t attr_id, const struct tee_cryp_obj_type_props *type_props) { size_t n; for (n = 0; n < type_props->num_type_attrs; n++) { if (attr_id == type_props->type_attrs[n].attr_id) return n; } return -1; } static const struct tee_cryp_obj_type_props *tee_svc_find_type_props( TEE_ObjectType obj_type) { size_t n; for (n = 0; n < ARRAY_SIZE(tee_cryp_obj_props); n++) { if (tee_cryp_obj_props[n].obj_type == obj_type) return tee_cryp_obj_props + n; } return NULL; } /* Set an attribute on an object */ static void set_attribute(struct tee_obj *o, const struct tee_cryp_obj_type_props *props, uint32_t attr) { int idx = tee_svc_cryp_obj_find_type_attr_idx(attr, props); if (idx < 0) return; o->have_attrs |= BIT(idx); } /* Get an attribute on an object */ static uint32_t get_attribute(const struct tee_obj *o, const struct tee_cryp_obj_type_props *props, uint32_t attr) { int idx = tee_svc_cryp_obj_find_type_attr_idx(attr, props); if (idx < 0) return 0; return o->have_attrs & BIT(idx); } TEE_Result syscall_cryp_obj_get_attr(unsigned long obj, unsigned long attr_id, void *buffer, uint64_t *size) { TEE_Result res; struct tee_ta_session *sess; struct tee_obj *o; const struct tee_cryp_obj_type_props *type_props; int idx; const struct attr_ops *ops; void *attr; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_obj_get(to_user_ta_ctx(sess->ctx), tee_svc_uref_to_vaddr(obj), &o); if (res != TEE_SUCCESS) return TEE_ERROR_ITEM_NOT_FOUND; /* Check that the object is initialized */ if (!(o->info.handleFlags & TEE_HANDLE_FLAG_INITIALIZED)) return TEE_ERROR_BAD_PARAMETERS; /* Check that getting the attribute is allowed */ if (!(attr_id & TEE_ATTR_BIT_PROTECTED) && !(o->info.objectUsage & TEE_USAGE_EXTRACTABLE)) return TEE_ERROR_BAD_PARAMETERS; type_props = tee_svc_find_type_props(o->info.objectType); if (!type_props) { /* Unknown object type, "can't happen" */ return TEE_ERROR_BAD_STATE; } idx = tee_svc_cryp_obj_find_type_attr_idx(attr_id, type_props); if ((idx < 0) || ((o->have_attrs & (1 << idx)) == 0)) return TEE_ERROR_ITEM_NOT_FOUND; ops = attr_ops + type_props->type_attrs[idx].ops_index; attr = (uint8_t *)o->attr + type_props->type_attrs[idx].raw_offs; return ops->to_user(attr, sess, buffer, size); } void tee_obj_attr_free(struct tee_obj *o) { const struct tee_cryp_obj_type_props *tp; size_t n; if (!o->attr) return; tp = tee_svc_find_type_props(o->info.objectType); if (!tp) return; for (n = 0; n < tp->num_type_attrs; n++) { const struct tee_cryp_obj_type_attrs *ta = tp->type_attrs + n; attr_ops[ta->ops_index].free((uint8_t *)o->attr + ta->raw_offs); } } void tee_obj_attr_clear(struct tee_obj *o) { const struct tee_cryp_obj_type_props *tp; size_t n; if (!o->attr) return; tp = tee_svc_find_type_props(o->info.objectType); if (!tp) return; for (n = 0; n < tp->num_type_attrs; n++) { const struct tee_cryp_obj_type_attrs *ta = tp->type_attrs + n; attr_ops[ta->ops_index].clear((uint8_t *)o->attr + ta->raw_offs); } } TEE_Result tee_obj_attr_to_binary(struct tee_obj *o, void *data, size_t *data_len) { const struct tee_cryp_obj_type_props *tp; size_t n; size_t offs = 0; size_t len = data ? *data_len : 0; TEE_Result res; if (o->info.objectType == TEE_TYPE_DATA) { *data_len = 0; return TEE_SUCCESS; /* pure data object */ } if (!o->attr) return TEE_ERROR_BAD_STATE; tp = tee_svc_find_type_props(o->info.objectType); if (!tp) return TEE_ERROR_BAD_STATE; for (n = 0; n < tp->num_type_attrs; n++) { const struct tee_cryp_obj_type_attrs *ta = tp->type_attrs + n; void *attr = (uint8_t *)o->attr + ta->raw_offs; res = attr_ops[ta->ops_index].to_binary(attr, data, len, &offs); if (res != TEE_SUCCESS) return res; } *data_len = offs; if (data && offs > len) return TEE_ERROR_SHORT_BUFFER; return TEE_SUCCESS; } TEE_Result tee_obj_attr_from_binary(struct tee_obj *o, const void *data, size_t data_len) { const struct tee_cryp_obj_type_props *tp; size_t n; size_t offs = 0; if (o->info.objectType == TEE_TYPE_DATA) return TEE_SUCCESS; /* pure data object */ if (!o->attr) return TEE_ERROR_BAD_STATE; tp = tee_svc_find_type_props(o->info.objectType); if (!tp) return TEE_ERROR_BAD_STATE; for (n = 0; n < tp->num_type_attrs; n++) { const struct tee_cryp_obj_type_attrs *ta = tp->type_attrs + n; void *attr = (uint8_t *)o->attr + ta->raw_offs; if (!attr_ops[ta->ops_index].from_binary(attr, data, data_len, &offs)) return TEE_ERROR_CORRUPT_OBJECT; } return TEE_SUCCESS; } TEE_Result tee_obj_attr_copy_from(struct tee_obj *o, const struct tee_obj *src) { TEE_Result res; const struct tee_cryp_obj_type_props *tp; const struct tee_cryp_obj_type_attrs *ta; size_t n; uint32_t have_attrs = 0; void *attr; void *src_attr; if (o->info.objectType == TEE_TYPE_DATA) return TEE_SUCCESS; /* pure data object */ if (!o->attr) return TEE_ERROR_BAD_STATE; tp = tee_svc_find_type_props(o->info.objectType); if (!tp) return TEE_ERROR_BAD_STATE; if (o->info.objectType == src->info.objectType) { have_attrs = src->have_attrs; for (n = 0; n < tp->num_type_attrs; n++) { ta = tp->type_attrs + n; attr = (uint8_t *)o->attr + ta->raw_offs; src_attr = (uint8_t *)src->attr + ta->raw_offs; res = attr_ops[ta->ops_index].from_obj(attr, src_attr); if (res != TEE_SUCCESS) return res; } } else { const struct tee_cryp_obj_type_props *tp_src; int idx; if (o->info.objectType == TEE_TYPE_RSA_PUBLIC_KEY) { if (src->info.objectType != TEE_TYPE_RSA_KEYPAIR) return TEE_ERROR_BAD_PARAMETERS; } else if (o->info.objectType == TEE_TYPE_DSA_PUBLIC_KEY) { if (src->info.objectType != TEE_TYPE_DSA_KEYPAIR) return TEE_ERROR_BAD_PARAMETERS; } else if (o->info.objectType == TEE_TYPE_ECDSA_PUBLIC_KEY) { if (src->info.objectType != TEE_TYPE_ECDSA_KEYPAIR) return TEE_ERROR_BAD_PARAMETERS; } else if (o->info.objectType == TEE_TYPE_ECDH_PUBLIC_KEY) { if (src->info.objectType != TEE_TYPE_ECDH_KEYPAIR) return TEE_ERROR_BAD_PARAMETERS; } else { return TEE_ERROR_BAD_PARAMETERS; } tp_src = tee_svc_find_type_props(src->info.objectType); if (!tp_src) return TEE_ERROR_BAD_STATE; have_attrs = BIT32(tp->num_type_attrs) - 1; for (n = 0; n < tp->num_type_attrs; n++) { ta = tp->type_attrs + n; idx = tee_svc_cryp_obj_find_type_attr_idx(ta->attr_id, tp_src); if (idx < 0) return TEE_ERROR_BAD_STATE; attr = (uint8_t *)o->attr + ta->raw_offs; src_attr = (uint8_t *)src->attr + tp_src->type_attrs[idx].raw_offs; res = attr_ops[ta->ops_index].from_obj(attr, src_attr); if (res != TEE_SUCCESS) return res; } } o->have_attrs = have_attrs; return TEE_SUCCESS; } TEE_Result tee_obj_set_type(struct tee_obj *o, uint32_t obj_type, size_t max_key_size) { TEE_Result res = TEE_SUCCESS; const struct tee_cryp_obj_type_props *type_props; /* Can only set type for newly allocated objs */ if (o->attr) return TEE_ERROR_BAD_STATE; /* * Verify that maxKeySize is supported and find out how * much should be allocated. */ if (obj_type == TEE_TYPE_DATA) { if (max_key_size) return TEE_ERROR_NOT_SUPPORTED; } else { /* Find description of object */ type_props = tee_svc_find_type_props(obj_type); if (!type_props) return TEE_ERROR_NOT_SUPPORTED; /* Check that maxKeySize follows restrictions */ if (max_key_size % type_props->quanta != 0) return TEE_ERROR_NOT_SUPPORTED; if (max_key_size < type_props->min_size) return TEE_ERROR_NOT_SUPPORTED; if (max_key_size > type_props->max_size) return TEE_ERROR_NOT_SUPPORTED; o->attr = calloc(1, type_props->alloc_size); if (!o->attr) return TEE_ERROR_OUT_OF_MEMORY; } /* If we have a key structure, pre-allocate the bignums inside */ switch (obj_type) { case TEE_TYPE_RSA_PUBLIC_KEY: res = crypto_acipher_alloc_rsa_public_key(o->attr, max_key_size); break; case TEE_TYPE_RSA_KEYPAIR: res = crypto_acipher_alloc_rsa_keypair(o->attr, max_key_size); break; case TEE_TYPE_DSA_PUBLIC_KEY: res = crypto_acipher_alloc_dsa_public_key(o->attr, max_key_size); break; case TEE_TYPE_DSA_KEYPAIR: res = crypto_acipher_alloc_dsa_keypair(o->attr, max_key_size); break; case TEE_TYPE_DH_KEYPAIR: res = crypto_acipher_alloc_dh_keypair(o->attr, max_key_size); break; case TEE_TYPE_ECDSA_PUBLIC_KEY: case TEE_TYPE_ECDH_PUBLIC_KEY: res = crypto_acipher_alloc_ecc_public_key(o->attr, max_key_size); break; case TEE_TYPE_ECDSA_KEYPAIR: case TEE_TYPE_ECDH_KEYPAIR: res = crypto_acipher_alloc_ecc_keypair(o->attr, max_key_size); break; default: if (obj_type != TEE_TYPE_DATA) { struct tee_cryp_obj_secret *key = o->attr; key->alloc_size = type_props->alloc_size - sizeof(*key); } break; } if (res != TEE_SUCCESS) return res; o->info.objectType = obj_type; o->info.maxKeySize = max_key_size; o->info.objectUsage = TEE_USAGE_DEFAULT; return TEE_SUCCESS; } TEE_Result syscall_cryp_obj_alloc(unsigned long obj_type, unsigned long max_key_size, uint32_t *obj) { TEE_Result res; struct tee_ta_session *sess; struct tee_obj *o; if (obj_type == TEE_TYPE_DATA) return TEE_ERROR_NOT_SUPPORTED; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; o = tee_obj_alloc(); if (!o) return TEE_ERROR_OUT_OF_MEMORY; res = tee_obj_set_type(o, obj_type, max_key_size); if (res != TEE_SUCCESS) { tee_obj_free(o); return res; } tee_obj_add(to_user_ta_ctx(sess->ctx), o); res = tee_svc_copy_kaddr_to_uref(obj, o); if (res != TEE_SUCCESS) tee_obj_close(to_user_ta_ctx(sess->ctx), o); return res; } TEE_Result syscall_cryp_obj_close(unsigned long obj) { TEE_Result res; struct tee_ta_session *sess; struct tee_obj *o; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_obj_get(to_user_ta_ctx(sess->ctx), tee_svc_uref_to_vaddr(obj), &o); if (res != TEE_SUCCESS) return res; /* * If it's busy it's used by an operation, a client should never have * this handle. */ if (o->busy) return TEE_ERROR_ITEM_NOT_FOUND; tee_obj_close(to_user_ta_ctx(sess->ctx), o); return TEE_SUCCESS; } TEE_Result syscall_cryp_obj_reset(unsigned long obj) { TEE_Result res; struct tee_ta_session *sess; struct tee_obj *o; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_obj_get(to_user_ta_ctx(sess->ctx), tee_svc_uref_to_vaddr(obj), &o); if (res != TEE_SUCCESS) return res; if ((o->info.handleFlags & TEE_HANDLE_FLAG_PERSISTENT) == 0) { tee_obj_attr_clear(o); o->info.keySize = 0; o->info.objectUsage = TEE_USAGE_DEFAULT; } else { return TEE_ERROR_BAD_PARAMETERS; } /* the object is no more initialized */ o->info.handleFlags &= ~TEE_HANDLE_FLAG_INITIALIZED; return TEE_SUCCESS; } static TEE_Result copy_in_attrs(struct user_ta_ctx *utc, const struct utee_attribute *usr_attrs, uint32_t attr_count, TEE_Attribute *attrs) { TEE_Result res; uint32_t n; res = tee_mmu_check_access_rights(utc, TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)usr_attrs, attr_count * sizeof(struct utee_attribute)); if (res != TEE_SUCCESS) return res; for (n = 0; n < attr_count; n++) { attrs[n].attributeID = usr_attrs[n].attribute_id; if (attrs[n].attributeID & TEE_ATTR_BIT_VALUE) { attrs[n].content.value.a = usr_attrs[n].a; attrs[n].content.value.b = usr_attrs[n].b; } else { uintptr_t buf = usr_attrs[n].a; size_t len = usr_attrs[n].b; res = tee_mmu_check_access_rights(utc, TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, buf, len); if (res != TEE_SUCCESS) return res; attrs[n].content.ref.buffer = (void *)buf; attrs[n].content.ref.length = len; } } return TEE_SUCCESS; } enum attr_usage { ATTR_USAGE_POPULATE, ATTR_USAGE_GENERATE_KEY }; static TEE_Result tee_svc_cryp_check_attr(enum attr_usage usage, const struct tee_cryp_obj_type_props *type_props, const TEE_Attribute *attrs, uint32_t attr_count) { uint32_t required_flag; uint32_t opt_flag; bool all_opt_needed; uint32_t req_attrs = 0; uint32_t opt_grp_attrs = 0; uint32_t attrs_found = 0; size_t n; uint32_t bit; uint32_t flags; int idx; if (usage == ATTR_USAGE_POPULATE) { required_flag = TEE_TYPE_ATTR_REQUIRED; opt_flag = TEE_TYPE_ATTR_OPTIONAL_GROUP; all_opt_needed = true; } else { required_flag = TEE_TYPE_ATTR_GEN_KEY_REQ; opt_flag = TEE_TYPE_ATTR_GEN_KEY_OPT; all_opt_needed = false; } /* * First find out which attributes are required and which belong to * the optional group */ for (n = 0; n < type_props->num_type_attrs; n++) { bit = 1 << n; flags = type_props->type_attrs[n].flags; if (flags & required_flag) req_attrs |= bit; else if (flags & opt_flag) opt_grp_attrs |= bit; } /* * Verify that all required attributes are in place and * that the same attribute isn't repeated. */ for (n = 0; n < attr_count; n++) { idx = tee_svc_cryp_obj_find_type_attr_idx( attrs[n].attributeID, type_props); /* attribute not defined in current object type */ if (idx < 0) return TEE_ERROR_ITEM_NOT_FOUND; bit = 1 << idx; /* attribute not repeated */ if ((attrs_found & bit) != 0) return TEE_ERROR_ITEM_NOT_FOUND; attrs_found |= bit; } /* Required attribute missing */ if ((attrs_found & req_attrs) != req_attrs) return TEE_ERROR_ITEM_NOT_FOUND; /* * If the flag says that "if one of the optional attributes are included * all of them has to be included" this must be checked. */ if (all_opt_needed && (attrs_found & opt_grp_attrs) != 0 && (attrs_found & opt_grp_attrs) != opt_grp_attrs) return TEE_ERROR_ITEM_NOT_FOUND; return TEE_SUCCESS; } static TEE_Result get_ec_key_size(uint32_t curve, size_t *key_size) { switch (curve) { case TEE_ECC_CURVE_NIST_P192: *key_size = 192; break; case TEE_ECC_CURVE_NIST_P224: *key_size = 224; break; case TEE_ECC_CURVE_NIST_P256: *key_size = 256; break; case TEE_ECC_CURVE_NIST_P384: *key_size = 384; break; case TEE_ECC_CURVE_NIST_P521: *key_size = 521; break; default: return TEE_ERROR_NOT_SUPPORTED; } return TEE_SUCCESS; } static TEE_Result tee_svc_cryp_obj_populate_type( struct tee_obj *o, const struct tee_cryp_obj_type_props *type_props, const TEE_Attribute *attrs, uint32_t attr_count) { TEE_Result res; uint32_t have_attrs = 0; size_t obj_size = 0; size_t n; int idx; const struct attr_ops *ops; void *attr; for (n = 0; n < attr_count; n++) { idx = tee_svc_cryp_obj_find_type_attr_idx( attrs[n].attributeID, type_props); /* attribute not defined in current object type */ if (idx < 0) return TEE_ERROR_ITEM_NOT_FOUND; have_attrs |= BIT32(idx); ops = attr_ops + type_props->type_attrs[idx].ops_index; attr = (uint8_t *)o->attr + type_props->type_attrs[idx].raw_offs; if (attrs[n].attributeID & TEE_ATTR_BIT_VALUE) res = ops->from_user(attr, &attrs[n].content.value, sizeof(attrs[n].content.value)); else res = ops->from_user(attr, attrs[n].content.ref.buffer, attrs[n].content.ref.length); if (res != TEE_SUCCESS) return res; /* * First attr_idx signifies the attribute that gives the size * of the object */ if (type_props->type_attrs[idx].flags & TEE_TYPE_ATTR_SIZE_INDICATOR) { /* * For ECDSA/ECDH we need to translate curve into * object size */ if (attrs[n].attributeID == TEE_ATTR_ECC_CURVE) { res = get_ec_key_size(attrs[n].content.value.a, &obj_size); if (res != TEE_SUCCESS) return res; } else { obj_size += (attrs[n].content.ref.length * 8); } } } /* * We have to do it like this because the parity bits aren't counted * when telling the size of the key in bits. */ if (o->info.objectType == TEE_TYPE_DES || o->info.objectType == TEE_TYPE_DES3) obj_size -= obj_size / 8; /* Exclude parity in size of key */ o->have_attrs = have_attrs; o->info.keySize = obj_size; return TEE_SUCCESS; } TEE_Result syscall_cryp_obj_populate(unsigned long obj, struct utee_attribute *usr_attrs, unsigned long attr_count) { TEE_Result res; struct tee_ta_session *sess; struct tee_obj *o; const struct tee_cryp_obj_type_props *type_props; TEE_Attribute *attrs = NULL; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_obj_get(to_user_ta_ctx(sess->ctx), tee_svc_uref_to_vaddr(obj), &o); if (res != TEE_SUCCESS) return res; /* Must be a transient object */ if ((o->info.handleFlags & TEE_HANDLE_FLAG_PERSISTENT) != 0) return TEE_ERROR_BAD_PARAMETERS; /* Must not be initialized already */ if ((o->info.handleFlags & TEE_HANDLE_FLAG_INITIALIZED) != 0) return TEE_ERROR_BAD_PARAMETERS; type_props = tee_svc_find_type_props(o->info.objectType); if (!type_props) return TEE_ERROR_NOT_IMPLEMENTED; size_t alloc_size = 0; if (MUL_OVERFLOW(sizeof(TEE_Attribute), attr_count, &alloc_size)) return TEE_ERROR_OVERFLOW; attrs = malloc(alloc_size); if (!attrs) return TEE_ERROR_OUT_OF_MEMORY; res = copy_in_attrs(to_user_ta_ctx(sess->ctx), usr_attrs, attr_count, attrs); if (res != TEE_SUCCESS) goto out; res = tee_svc_cryp_check_attr(ATTR_USAGE_POPULATE, type_props, attrs, attr_count); if (res != TEE_SUCCESS) goto out; res = tee_svc_cryp_obj_populate_type(o, type_props, attrs, attr_count); if (res == TEE_SUCCESS) o->info.handleFlags |= TEE_HANDLE_FLAG_INITIALIZED; out: free(attrs); return res; } TEE_Result syscall_cryp_obj_copy(unsigned long dst, unsigned long src) { TEE_Result res; struct tee_ta_session *sess; struct tee_obj *dst_o; struct tee_obj *src_o; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_obj_get(to_user_ta_ctx(sess->ctx), tee_svc_uref_to_vaddr(dst), &dst_o); if (res != TEE_SUCCESS) return res; res = tee_obj_get(to_user_ta_ctx(sess->ctx), tee_svc_uref_to_vaddr(src), &src_o); if (res != TEE_SUCCESS) return res; if ((src_o->info.handleFlags & TEE_HANDLE_FLAG_INITIALIZED) == 0) return TEE_ERROR_BAD_PARAMETERS; if ((dst_o->info.handleFlags & TEE_HANDLE_FLAG_PERSISTENT) != 0) return TEE_ERROR_BAD_PARAMETERS; if ((dst_o->info.handleFlags & TEE_HANDLE_FLAG_INITIALIZED) != 0) return TEE_ERROR_BAD_PARAMETERS; res = tee_obj_attr_copy_from(dst_o, src_o); if (res != TEE_SUCCESS) return res; dst_o->info.handleFlags |= TEE_HANDLE_FLAG_INITIALIZED; dst_o->info.keySize = src_o->info.keySize; dst_o->info.objectUsage = src_o->info.objectUsage; return TEE_SUCCESS; } static TEE_Result tee_svc_obj_generate_key_rsa( struct tee_obj *o, const struct tee_cryp_obj_type_props *type_props, uint32_t key_size, const TEE_Attribute *params, uint32_t param_count) { TEE_Result res; struct rsa_keypair *key = o->attr; uint32_t e = TEE_U32_TO_BIG_ENDIAN(65537); /* Copy the present attributes into the obj before starting */ res = tee_svc_cryp_obj_populate_type(o, type_props, params, param_count); if (res != TEE_SUCCESS) return res; if (!get_attribute(o, type_props, TEE_ATTR_RSA_PUBLIC_EXPONENT)) crypto_bignum_bin2bn((const uint8_t *)&e, sizeof(e), key->e); res = crypto_acipher_gen_rsa_key(key, key_size); if (res != TEE_SUCCESS) return res; /* Set bits for all known attributes for this object type */ o->have_attrs = (1 << type_props->num_type_attrs) - 1; return TEE_SUCCESS; } static TEE_Result tee_svc_obj_generate_key_dsa( struct tee_obj *o, const struct tee_cryp_obj_type_props *type_props, uint32_t key_size) { TEE_Result res; res = crypto_acipher_gen_dsa_key(o->attr, key_size); if (res != TEE_SUCCESS) return res; /* Set bits for all known attributes for this object type */ o->have_attrs = (1 << type_props->num_type_attrs) - 1; return TEE_SUCCESS; } static TEE_Result tee_svc_obj_generate_key_dh( struct tee_obj *o, const struct tee_cryp_obj_type_props *type_props, uint32_t key_size __unused, const TEE_Attribute *params, uint32_t param_count) { TEE_Result res; struct dh_keypair *tee_dh_key; struct bignum *dh_q = NULL; uint32_t dh_xbits = 0; /* Copy the present attributes into the obj before starting */ res = tee_svc_cryp_obj_populate_type(o, type_props, params, param_count); if (res != TEE_SUCCESS) return res; tee_dh_key = (struct dh_keypair *)o->attr; if (get_attribute(o, type_props, TEE_ATTR_DH_SUBPRIME)) dh_q = tee_dh_key->q; if (get_attribute(o, type_props, TEE_ATTR_DH_X_BITS)) dh_xbits = tee_dh_key->xbits; res = crypto_acipher_gen_dh_key(tee_dh_key, dh_q, dh_xbits); if (res != TEE_SUCCESS) return res; /* Set bits for the generated public and private key */ set_attribute(o, type_props, TEE_ATTR_DH_PUBLIC_VALUE); set_attribute(o, type_props, TEE_ATTR_DH_PRIVATE_VALUE); set_attribute(o, type_props, TEE_ATTR_DH_X_BITS); return TEE_SUCCESS; } static TEE_Result tee_svc_obj_generate_key_ecc( struct tee_obj *o, const struct tee_cryp_obj_type_props *type_props, uint32_t key_size __unused, const TEE_Attribute *params, uint32_t param_count) { TEE_Result res; struct ecc_keypair *tee_ecc_key; /* Copy the present attributes into the obj before starting */ res = tee_svc_cryp_obj_populate_type(o, type_props, params, param_count); if (res != TEE_SUCCESS) return res; tee_ecc_key = (struct ecc_keypair *)o->attr; res = crypto_acipher_gen_ecc_key(tee_ecc_key); if (res != TEE_SUCCESS) return res; /* Set bits for the generated public and private key */ set_attribute(o, type_props, TEE_ATTR_ECC_PRIVATE_VALUE); set_attribute(o, type_props, TEE_ATTR_ECC_PUBLIC_VALUE_X); set_attribute(o, type_props, TEE_ATTR_ECC_PUBLIC_VALUE_Y); set_attribute(o, type_props, TEE_ATTR_ECC_CURVE); return TEE_SUCCESS; } TEE_Result syscall_obj_generate_key(unsigned long obj, unsigned long key_size, const struct utee_attribute *usr_params, unsigned long param_count) { TEE_Result res; struct tee_ta_session *sess; const struct tee_cryp_obj_type_props *type_props; struct tee_obj *o; struct tee_cryp_obj_secret *key; size_t byte_size; TEE_Attribute *params = NULL; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_obj_get(to_user_ta_ctx(sess->ctx), tee_svc_uref_to_vaddr(obj), &o); if (res != TEE_SUCCESS) return res; /* Must be a transient object */ if ((o->info.handleFlags & TEE_HANDLE_FLAG_PERSISTENT) != 0) return TEE_ERROR_BAD_STATE; /* Must not be initialized already */ if ((o->info.handleFlags & TEE_HANDLE_FLAG_INITIALIZED) != 0) return TEE_ERROR_BAD_STATE; /* Find description of object */ type_props = tee_svc_find_type_props(o->info.objectType); if (!type_props) return TEE_ERROR_NOT_SUPPORTED; /* Check that maxKeySize follows restrictions */ if (key_size % type_props->quanta != 0) return TEE_ERROR_NOT_SUPPORTED; if (key_size < type_props->min_size) return TEE_ERROR_NOT_SUPPORTED; if (key_size > type_props->max_size) return TEE_ERROR_NOT_SUPPORTED; params = malloc(sizeof(TEE_Attribute) * param_count); if (!params) return TEE_ERROR_OUT_OF_MEMORY; res = copy_in_attrs(to_user_ta_ctx(sess->ctx), usr_params, param_count, params); if (res != TEE_SUCCESS) goto out; res = tee_svc_cryp_check_attr(ATTR_USAGE_GENERATE_KEY, type_props, params, param_count); if (res != TEE_SUCCESS) goto out; switch (o->info.objectType) { case TEE_TYPE_AES: case TEE_TYPE_DES: case TEE_TYPE_DES3: case TEE_TYPE_HMAC_MD5: case TEE_TYPE_HMAC_SHA1: case TEE_TYPE_HMAC_SHA224: case TEE_TYPE_HMAC_SHA256: case TEE_TYPE_HMAC_SHA384: case TEE_TYPE_HMAC_SHA512: case TEE_TYPE_GENERIC_SECRET: byte_size = key_size / 8; /* * We have to do it like this because the parity bits aren't * counted when telling the size of the key in bits. */ if (o->info.objectType == TEE_TYPE_DES || o->info.objectType == TEE_TYPE_DES3) { byte_size = (key_size + key_size / 7) / 8; } key = (struct tee_cryp_obj_secret *)o->attr; if (byte_size > key->alloc_size) { res = TEE_ERROR_EXCESS_DATA; goto out; } res = crypto_rng_read((void *)(key + 1), byte_size); if (res != TEE_SUCCESS) goto out; key->key_size = byte_size; /* Set bits for all known attributes for this object type */ o->have_attrs = (1 << type_props->num_type_attrs) - 1; break; case TEE_TYPE_RSA_KEYPAIR: res = tee_svc_obj_generate_key_rsa(o, type_props, key_size, params, param_count); if (res != TEE_SUCCESS) goto out; break; case TEE_TYPE_DSA_KEYPAIR: res = tee_svc_obj_generate_key_dsa(o, type_props, key_size); if (res != TEE_SUCCESS) goto out; break; case TEE_TYPE_DH_KEYPAIR: res = tee_svc_obj_generate_key_dh(o, type_props, key_size, params, param_count); if (res != TEE_SUCCESS) goto out; break; case TEE_TYPE_ECDSA_KEYPAIR: case TEE_TYPE_ECDH_KEYPAIR: res = tee_svc_obj_generate_key_ecc(o, type_props, key_size, params, param_count); if (res != TEE_SUCCESS) goto out; break; default: res = TEE_ERROR_BAD_FORMAT; } out: free(params); if (res == TEE_SUCCESS) { o->info.keySize = key_size; o->info.handleFlags |= TEE_HANDLE_FLAG_INITIALIZED; } return res; } static TEE_Result tee_svc_cryp_get_state(struct tee_ta_session *sess, uint32_t state_id, struct tee_cryp_state **state) { struct tee_cryp_state *s; struct user_ta_ctx *utc = to_user_ta_ctx(sess->ctx); TAILQ_FOREACH(s, &utc->cryp_states, link) { if (state_id == (vaddr_t)s) { *state = s; return TEE_SUCCESS; } } return TEE_ERROR_BAD_PARAMETERS; } static void cryp_state_free(struct user_ta_ctx *utc, struct tee_cryp_state *cs) { struct tee_obj *o; if (tee_obj_get(utc, cs->key1, &o) == TEE_SUCCESS) tee_obj_close(utc, o); if (tee_obj_get(utc, cs->key2, &o) == TEE_SUCCESS) tee_obj_close(utc, o); TAILQ_REMOVE(&utc->cryp_states, cs, link); if (cs->ctx_finalize != NULL) cs->ctx_finalize(cs->ctx, cs->algo); switch (TEE_ALG_GET_CLASS(cs->algo)) { case TEE_OPERATION_CIPHER: crypto_cipher_free_ctx(cs->ctx, cs->algo); break; case TEE_OPERATION_AE: crypto_authenc_free_ctx(cs->ctx, cs->algo); break; case TEE_OPERATION_DIGEST: crypto_hash_free_ctx(cs->ctx, cs->algo); break; case TEE_OPERATION_MAC: crypto_mac_free_ctx(cs->ctx, cs->algo); break; default: assert(!cs->ctx); } free(cs); } static TEE_Result tee_svc_cryp_check_key_type(const struct tee_obj *o, uint32_t algo, TEE_OperationMode mode) { uint32_t req_key_type; uint32_t req_key_type2 = 0; switch (TEE_ALG_GET_MAIN_ALG(algo)) { case TEE_MAIN_ALGO_MD5: req_key_type = TEE_TYPE_HMAC_MD5; break; case TEE_MAIN_ALGO_SHA1: req_key_type = TEE_TYPE_HMAC_SHA1; break; case TEE_MAIN_ALGO_SHA224: req_key_type = TEE_TYPE_HMAC_SHA224; break; case TEE_MAIN_ALGO_SHA256: req_key_type = TEE_TYPE_HMAC_SHA256; break; case TEE_MAIN_ALGO_SHA384: req_key_type = TEE_TYPE_HMAC_SHA384; break; case TEE_MAIN_ALGO_SHA512: req_key_type = TEE_TYPE_HMAC_SHA512; break; case TEE_MAIN_ALGO_AES: req_key_type = TEE_TYPE_AES; break; case TEE_MAIN_ALGO_DES: req_key_type = TEE_TYPE_DES; break; case TEE_MAIN_ALGO_DES3: req_key_type = TEE_TYPE_DES3; break; case TEE_MAIN_ALGO_RSA: req_key_type = TEE_TYPE_RSA_KEYPAIR; if (mode == TEE_MODE_ENCRYPT || mode == TEE_MODE_VERIFY) req_key_type2 = TEE_TYPE_RSA_PUBLIC_KEY; break; case TEE_MAIN_ALGO_DSA: req_key_type = TEE_TYPE_DSA_KEYPAIR; if (mode == TEE_MODE_ENCRYPT || mode == TEE_MODE_VERIFY) req_key_type2 = TEE_TYPE_DSA_PUBLIC_KEY; break; case TEE_MAIN_ALGO_DH: req_key_type = TEE_TYPE_DH_KEYPAIR; break; case TEE_MAIN_ALGO_ECDSA: req_key_type = TEE_TYPE_ECDSA_KEYPAIR; if (mode == TEE_MODE_VERIFY) req_key_type2 = TEE_TYPE_ECDSA_PUBLIC_KEY; break; case TEE_MAIN_ALGO_ECDH: req_key_type = TEE_TYPE_ECDH_KEYPAIR; break; #if defined(CFG_CRYPTO_HKDF) case TEE_MAIN_ALGO_HKDF: req_key_type = TEE_TYPE_HKDF_IKM; break; #endif #if defined(CFG_CRYPTO_CONCAT_KDF) case TEE_MAIN_ALGO_CONCAT_KDF: req_key_type = TEE_TYPE_CONCAT_KDF_Z; break; #endif #if defined(CFG_CRYPTO_PBKDF2) case TEE_MAIN_ALGO_PBKDF2: req_key_type = TEE_TYPE_PBKDF2_PASSWORD; break; #endif default: return TEE_ERROR_BAD_PARAMETERS; } if (req_key_type != o->info.objectType && req_key_type2 != o->info.objectType) return TEE_ERROR_BAD_PARAMETERS; return TEE_SUCCESS; } TEE_Result syscall_cryp_state_alloc(unsigned long algo, unsigned long mode, unsigned long key1, unsigned long key2, uint32_t *state) { TEE_Result res; struct tee_cryp_state *cs; struct tee_ta_session *sess; struct tee_obj *o1 = NULL; struct tee_obj *o2 = NULL; struct user_ta_ctx *utc; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; utc = to_user_ta_ctx(sess->ctx); if (key1 != 0) { res = tee_obj_get(utc, tee_svc_uref_to_vaddr(key1), &o1); if (res != TEE_SUCCESS) return res; if (o1->busy) return TEE_ERROR_BAD_PARAMETERS; res = tee_svc_cryp_check_key_type(o1, algo, mode); if (res != TEE_SUCCESS) return res; } if (key2 != 0) { res = tee_obj_get(utc, tee_svc_uref_to_vaddr(key2), &o2); if (res != TEE_SUCCESS) return res; if (o2->busy) return TEE_ERROR_BAD_PARAMETERS; res = tee_svc_cryp_check_key_type(o2, algo, mode); if (res != TEE_SUCCESS) return res; } cs = calloc(1, sizeof(struct tee_cryp_state)); if (!cs) return TEE_ERROR_OUT_OF_MEMORY; TAILQ_INSERT_TAIL(&utc->cryp_states, cs, link); cs->algo = algo; cs->mode = mode; switch (TEE_ALG_GET_CLASS(algo)) { case TEE_OPERATION_EXTENSION: #ifdef CFG_CRYPTO_RSASSA_NA1 if (algo == TEE_ALG_RSASSA_PKCS1_V1_5) goto rsassa_na1; #endif res = TEE_ERROR_NOT_SUPPORTED; break; case TEE_OPERATION_CIPHER: if ((algo == TEE_ALG_AES_XTS && (key1 == 0 || key2 == 0)) || (algo != TEE_ALG_AES_XTS && (key1 == 0 || key2 != 0))) { res = TEE_ERROR_BAD_PARAMETERS; } else { res = crypto_cipher_alloc_ctx(&cs->ctx, algo); if (res != TEE_SUCCESS) break; } break; case TEE_OPERATION_AE: if (key1 == 0 || key2 != 0) { res = TEE_ERROR_BAD_PARAMETERS; } else { res = crypto_authenc_alloc_ctx(&cs->ctx, algo); if (res != TEE_SUCCESS) break; } break; case TEE_OPERATION_MAC: if (key1 == 0 || key2 != 0) { res = TEE_ERROR_BAD_PARAMETERS; } else { res = crypto_mac_alloc_ctx(&cs->ctx, algo); if (res != TEE_SUCCESS) break; } break; case TEE_OPERATION_DIGEST: if (key1 != 0 || key2 != 0) { res = TEE_ERROR_BAD_PARAMETERS; } else { res = crypto_hash_alloc_ctx(&cs->ctx, algo); if (res != TEE_SUCCESS) break; } break; case TEE_OPERATION_ASYMMETRIC_CIPHER: case TEE_OPERATION_ASYMMETRIC_SIGNATURE: rsassa_na1: __maybe_unused if (key1 == 0 || key2 != 0) res = TEE_ERROR_BAD_PARAMETERS; break; case TEE_OPERATION_KEY_DERIVATION: if (key1 == 0 || key2 != 0) res = TEE_ERROR_BAD_PARAMETERS; break; default: res = TEE_ERROR_NOT_SUPPORTED; break; } if (res != TEE_SUCCESS) goto out; res = tee_svc_copy_kaddr_to_uref(state, cs); if (res != TEE_SUCCESS) goto out; /* Register keys */ if (o1 != NULL) { o1->busy = true; cs->key1 = (vaddr_t)o1; } if (o2 != NULL) { o2->busy = true; cs->key2 = (vaddr_t)o2; } out: if (res != TEE_SUCCESS) cryp_state_free(utc, cs); return res; } TEE_Result syscall_cryp_state_copy(unsigned long dst, unsigned long src) { TEE_Result res; struct tee_cryp_state *cs_dst; struct tee_cryp_state *cs_src; struct tee_ta_session *sess; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(dst), &cs_dst); if (res != TEE_SUCCESS) return res; res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(src), &cs_src); if (res != TEE_SUCCESS) return res; if (cs_dst->algo != cs_src->algo || cs_dst->mode != cs_src->mode) return TEE_ERROR_BAD_PARAMETERS; switch (TEE_ALG_GET_CLASS(cs_src->algo)) { case TEE_OPERATION_CIPHER: crypto_cipher_copy_state(cs_dst->ctx, cs_src->ctx, cs_src->algo); break; case TEE_OPERATION_AE: crypto_authenc_copy_state(cs_dst->ctx, cs_src->ctx, cs_src->algo); break; case TEE_OPERATION_DIGEST: crypto_hash_copy_state(cs_dst->ctx, cs_src->ctx, cs_src->algo); break; case TEE_OPERATION_MAC: crypto_mac_copy_state(cs_dst->ctx, cs_src->ctx, cs_src->algo); break; default: return TEE_ERROR_BAD_STATE; } return TEE_SUCCESS; } void tee_svc_cryp_free_states(struct user_ta_ctx *utc) { struct tee_cryp_state_head *states = &utc->cryp_states; while (!TAILQ_EMPTY(states)) cryp_state_free(utc, TAILQ_FIRST(states)); } TEE_Result syscall_cryp_state_free(unsigned long state) { TEE_Result res; struct tee_cryp_state *cs; struct tee_ta_session *sess; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(state), &cs); if (res != TEE_SUCCESS) return res; cryp_state_free(to_user_ta_ctx(sess->ctx), cs); return TEE_SUCCESS; } TEE_Result syscall_hash_init(unsigned long state, const void *iv __maybe_unused, size_t iv_len __maybe_unused) { TEE_Result res; struct tee_cryp_state *cs; struct tee_ta_session *sess; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(state), &cs); if (res != TEE_SUCCESS) return res; switch (TEE_ALG_GET_CLASS(cs->algo)) { case TEE_OPERATION_DIGEST: res = crypto_hash_init(cs->ctx, cs->algo); if (res != TEE_SUCCESS) return res; break; case TEE_OPERATION_MAC: { struct tee_obj *o; struct tee_cryp_obj_secret *key; res = tee_obj_get(to_user_ta_ctx(sess->ctx), cs->key1, &o); if (res != TEE_SUCCESS) return res; if ((o->info.handleFlags & TEE_HANDLE_FLAG_INITIALIZED) == 0) return TEE_ERROR_BAD_PARAMETERS; key = (struct tee_cryp_obj_secret *)o->attr; res = crypto_mac_init(cs->ctx, cs->algo, (void *)(key + 1), key->key_size); if (res != TEE_SUCCESS) return res; break; } default: return TEE_ERROR_BAD_PARAMETERS; } return TEE_SUCCESS; } TEE_Result syscall_hash_update(unsigned long state, const void *chunk, size_t chunk_size) { TEE_Result res; struct tee_cryp_state *cs; struct tee_ta_session *sess; /* No data, but size provided isn't valid parameters. */ if (!chunk && chunk_size) return TEE_ERROR_BAD_PARAMETERS; /* Zero length hash is valid, but nothing we need to do. */ if (!chunk_size) return TEE_SUCCESS; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)chunk, chunk_size); if (res != TEE_SUCCESS) return res; res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(state), &cs); if (res != TEE_SUCCESS) return res; switch (TEE_ALG_GET_CLASS(cs->algo)) { case TEE_OPERATION_DIGEST: res = crypto_hash_update(cs->ctx, cs->algo, chunk, chunk_size); if (res != TEE_SUCCESS) return res; break; case TEE_OPERATION_MAC: res = crypto_mac_update(cs->ctx, cs->algo, chunk, chunk_size); if (res != TEE_SUCCESS) return res; break; default: return TEE_ERROR_BAD_PARAMETERS; } return TEE_SUCCESS; } TEE_Result syscall_hash_final(unsigned long state, const void *chunk, size_t chunk_size, void *hash, uint64_t *hash_len) { TEE_Result res, res2; size_t hash_size; uint64_t hlen; struct tee_cryp_state *cs; struct tee_ta_session *sess; /* No data, but size provided isn't valid parameters. */ if (!chunk && chunk_size) return TEE_ERROR_BAD_PARAMETERS; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)chunk, chunk_size); if (res != TEE_SUCCESS) return res; res = tee_svc_copy_from_user(&hlen, hash_len, sizeof(hlen)); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_WRITE | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)hash, hlen); if (res != TEE_SUCCESS) return res; res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(state), &cs); if (res != TEE_SUCCESS) return res; switch (TEE_ALG_GET_CLASS(cs->algo)) { case TEE_OPERATION_DIGEST: res = tee_hash_get_digest_size(cs->algo, &hash_size); if (res != TEE_SUCCESS) return res; if (*hash_len < hash_size) { res = TEE_ERROR_SHORT_BUFFER; goto out; } if (chunk_size) { res = crypto_hash_update(cs->ctx, cs->algo, chunk, chunk_size); if (res != TEE_SUCCESS) return res; } res = crypto_hash_final(cs->ctx, cs->algo, hash, hash_size); if (res != TEE_SUCCESS) return res; break; case TEE_OPERATION_MAC: res = tee_mac_get_digest_size(cs->algo, &hash_size); if (res != TEE_SUCCESS) return res; if (*hash_len < hash_size) { res = TEE_ERROR_SHORT_BUFFER; goto out; } if (chunk_size) { res = crypto_mac_update(cs->ctx, cs->algo, chunk, chunk_size); if (res != TEE_SUCCESS) return res; } res = crypto_mac_final(cs->ctx, cs->algo, hash, hash_size); if (res != TEE_SUCCESS) return res; break; default: return TEE_ERROR_BAD_PARAMETERS; } out: hlen = hash_size; res2 = tee_svc_copy_to_user(hash_len, &hlen, sizeof(*hash_len)); if (res2 != TEE_SUCCESS) return res2; return res; } TEE_Result syscall_cipher_init(unsigned long state, const void *iv, size_t iv_len) { TEE_Result res; struct tee_cryp_state *cs; struct tee_ta_session *sess; struct tee_obj *o; struct tee_cryp_obj_secret *key1; struct user_ta_ctx *utc; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; utc = to_user_ta_ctx(sess->ctx); res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(state), &cs); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights(utc, TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t) iv, iv_len); if (res != TEE_SUCCESS) return res; res = tee_obj_get(utc, cs->key1, &o); if (res != TEE_SUCCESS) return res; if ((o->info.handleFlags & TEE_HANDLE_FLAG_INITIALIZED) == 0) return TEE_ERROR_BAD_PARAMETERS; key1 = o->attr; if (tee_obj_get(utc, cs->key2, &o) == TEE_SUCCESS) { struct tee_cryp_obj_secret *key2 = o->attr; if ((o->info.handleFlags & TEE_HANDLE_FLAG_INITIALIZED) == 0) return TEE_ERROR_BAD_PARAMETERS; res = crypto_cipher_init(cs->ctx, cs->algo, cs->mode, (uint8_t *)(key1 + 1), key1->key_size, (uint8_t *)(key2 + 1), key2->key_size, iv, iv_len); } else { res = crypto_cipher_init(cs->ctx, cs->algo, cs->mode, (uint8_t *)(key1 + 1), key1->key_size, NULL, 0, iv, iv_len); } if (res != TEE_SUCCESS) return res; cs->ctx_finalize = crypto_cipher_final; return TEE_SUCCESS; } static TEE_Result tee_svc_cipher_update_helper(unsigned long state, bool last_block, const void *src, size_t src_len, void *dst, uint64_t *dst_len) { TEE_Result res; struct tee_cryp_state *cs; struct tee_ta_session *sess; uint64_t dlen; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(state), &cs); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)src, src_len); if (res != TEE_SUCCESS) return res; if (!dst_len) { dlen = 0; } else { res = tee_svc_copy_from_user(&dlen, dst_len, sizeof(dlen)); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_WRITE | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)dst, dlen); if (res != TEE_SUCCESS) return res; } if (dlen < src_len) { res = TEE_ERROR_SHORT_BUFFER; goto out; } if (src_len > 0) { /* Permit src_len == 0 to finalize the operation */ res = tee_do_cipher_update(cs->ctx, cs->algo, cs->mode, last_block, src, src_len, dst); } if (last_block && cs->ctx_finalize != NULL) { cs->ctx_finalize(cs->ctx, cs->algo); cs->ctx_finalize = NULL; } out: if ((res == TEE_SUCCESS || res == TEE_ERROR_SHORT_BUFFER) && dst_len != NULL) { TEE_Result res2; dlen = src_len; res2 = tee_svc_copy_to_user(dst_len, &dlen, sizeof(*dst_len)); if (res2 != TEE_SUCCESS) res = res2; } return res; } TEE_Result syscall_cipher_update(unsigned long state, const void *src, size_t src_len, void *dst, uint64_t *dst_len) { return tee_svc_cipher_update_helper(state, false /* last_block */, src, src_len, dst, dst_len); } TEE_Result syscall_cipher_final(unsigned long state, const void *src, size_t src_len, void *dst, uint64_t *dst_len) { return tee_svc_cipher_update_helper(state, true /* last_block */, src, src_len, dst, dst_len); } #if defined(CFG_CRYPTO_HKDF) static TEE_Result get_hkdf_params(const TEE_Attribute *params, uint32_t param_count, void **salt, size_t *salt_len, void **info, size_t *info_len, size_t *okm_len) { size_t n; enum { SALT = 0x1, LENGTH = 0x2, INFO = 0x4 }; uint8_t found = 0; *salt = *info = NULL; *salt_len = *info_len = *okm_len = 0; for (n = 0; n < param_count; n++) { switch (params[n].attributeID) { case TEE_ATTR_HKDF_SALT: if (!(found & SALT)) { *salt = params[n].content.ref.buffer; *salt_len = params[n].content.ref.length; found |= SALT; } break; case TEE_ATTR_HKDF_OKM_LENGTH: if (!(found & LENGTH)) { *okm_len = params[n].content.value.a; found |= LENGTH; } break; case TEE_ATTR_HKDF_INFO: if (!(found & INFO)) { *info = params[n].content.ref.buffer; *info_len = params[n].content.ref.length; found |= INFO; } break; default: /* Unexpected attribute */ return TEE_ERROR_BAD_PARAMETERS; } } if (!(found & LENGTH)) return TEE_ERROR_BAD_PARAMETERS; return TEE_SUCCESS; } #endif #if defined(CFG_CRYPTO_CONCAT_KDF) static TEE_Result get_concat_kdf_params(const TEE_Attribute *params, uint32_t param_count, void **other_info, size_t *other_info_len, size_t *derived_key_len) { size_t n; enum { LENGTH = 0x1, INFO = 0x2 }; uint8_t found = 0; *other_info = NULL; *other_info_len = *derived_key_len = 0; for (n = 0; n < param_count; n++) { switch (params[n].attributeID) { case TEE_ATTR_CONCAT_KDF_OTHER_INFO: if (!(found & INFO)) { *other_info = params[n].content.ref.buffer; *other_info_len = params[n].content.ref.length; found |= INFO; } break; case TEE_ATTR_CONCAT_KDF_DKM_LENGTH: if (!(found & LENGTH)) { *derived_key_len = params[n].content.value.a; found |= LENGTH; } break; default: /* Unexpected attribute */ return TEE_ERROR_BAD_PARAMETERS; } } if (!(found & LENGTH)) return TEE_ERROR_BAD_PARAMETERS; return TEE_SUCCESS; } #endif #if defined(CFG_CRYPTO_PBKDF2) static TEE_Result get_pbkdf2_params(const TEE_Attribute *params, uint32_t param_count, void **salt, size_t *salt_len, size_t *derived_key_len, size_t *iteration_count) { size_t n; enum { SALT = 0x1, LENGTH = 0x2, COUNT = 0x4 }; uint8_t found = 0; *salt = NULL; *salt_len = *derived_key_len = *iteration_count = 0; for (n = 0; n < param_count; n++) { switch (params[n].attributeID) { case TEE_ATTR_PBKDF2_SALT: if (!(found & SALT)) { *salt = params[n].content.ref.buffer; *salt_len = params[n].content.ref.length; found |= SALT; } break; case TEE_ATTR_PBKDF2_DKM_LENGTH: if (!(found & LENGTH)) { *derived_key_len = params[n].content.value.a; found |= LENGTH; } break; case TEE_ATTR_PBKDF2_ITERATION_COUNT: if (!(found & COUNT)) { *iteration_count = params[n].content.value.a; found |= COUNT; } break; default: /* Unexpected attribute */ return TEE_ERROR_BAD_PARAMETERS; } } if ((found & (LENGTH|COUNT)) != (LENGTH|COUNT)) return TEE_ERROR_BAD_PARAMETERS; return TEE_SUCCESS; } #endif TEE_Result syscall_cryp_derive_key(unsigned long state, const struct utee_attribute *usr_params, unsigned long param_count, unsigned long derived_key) { TEE_Result res = TEE_ERROR_NOT_SUPPORTED; struct tee_ta_session *sess; struct tee_obj *ko; struct tee_obj *so; struct tee_cryp_state *cs; struct tee_cryp_obj_secret *sk; const struct tee_cryp_obj_type_props *type_props; TEE_Attribute *params = NULL; struct user_ta_ctx *utc; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; utc = to_user_ta_ctx(sess->ctx); res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(state), &cs); if (res != TEE_SUCCESS) return res; params = malloc(sizeof(TEE_Attribute) * param_count); if (!params) return TEE_ERROR_OUT_OF_MEMORY; res = copy_in_attrs(utc, usr_params, param_count, params); if (res != TEE_SUCCESS) goto out; /* Get key set in operation */ res = tee_obj_get(utc, cs->key1, &ko); if (res != TEE_SUCCESS) goto out; res = tee_obj_get(utc, tee_svc_uref_to_vaddr(derived_key), &so); if (res != TEE_SUCCESS) goto out; /* Find information needed about the object to initialize */ sk = so->attr; /* Find description of object */ type_props = tee_svc_find_type_props(so->info.objectType); if (!type_props) { res = TEE_ERROR_NOT_SUPPORTED; goto out; } if (cs->algo == TEE_ALG_DH_DERIVE_SHARED_SECRET) { size_t alloc_size; struct bignum *pub; struct bignum *ss; if (param_count != 1 || params[0].attributeID != TEE_ATTR_DH_PUBLIC_VALUE) { res = TEE_ERROR_BAD_PARAMETERS; goto out; } alloc_size = params[0].content.ref.length * 8; pub = crypto_bignum_allocate(alloc_size); ss = crypto_bignum_allocate(alloc_size); if (pub && ss) { crypto_bignum_bin2bn(params[0].content.ref.buffer, params[0].content.ref.length, pub); res = crypto_acipher_dh_shared_secret(ko->attr, pub, ss); if (res == TEE_SUCCESS) { sk->key_size = crypto_bignum_num_bytes(ss); crypto_bignum_bn2bin(ss, (uint8_t *)(sk + 1)); so->info.handleFlags |= TEE_HANDLE_FLAG_INITIALIZED; set_attribute(so, type_props, TEE_ATTR_SECRET_VALUE); } } else { res = TEE_ERROR_OUT_OF_MEMORY; } crypto_bignum_free(pub); crypto_bignum_free(ss); } else if (TEE_ALG_GET_MAIN_ALG(cs->algo) == TEE_MAIN_ALGO_ECDH) { size_t alloc_size; struct ecc_public_key key_public; uint8_t *pt_secret; unsigned long pt_secret_len; if (param_count != 2 || params[0].attributeID != TEE_ATTR_ECC_PUBLIC_VALUE_X || params[1].attributeID != TEE_ATTR_ECC_PUBLIC_VALUE_Y) { res = TEE_ERROR_BAD_PARAMETERS; goto out; } switch (cs->algo) { case TEE_ALG_ECDH_P192: alloc_size = 192; break; case TEE_ALG_ECDH_P224: alloc_size = 224; break; case TEE_ALG_ECDH_P256: alloc_size = 256; break; case TEE_ALG_ECDH_P384: alloc_size = 384; break; case TEE_ALG_ECDH_P521: alloc_size = 521; break; default: res = TEE_ERROR_NOT_IMPLEMENTED; goto out; } /* Create the public key */ res = crypto_acipher_alloc_ecc_public_key(&key_public, alloc_size); if (res != TEE_SUCCESS) goto out; key_public.curve = ((struct ecc_keypair *)ko->attr)->curve; crypto_bignum_bin2bn(params[0].content.ref.buffer, params[0].content.ref.length, key_public.x); crypto_bignum_bin2bn(params[1].content.ref.buffer, params[1].content.ref.length, key_public.y); pt_secret = (uint8_t *)(sk + 1); pt_secret_len = sk->alloc_size; res = crypto_acipher_ecc_shared_secret(ko->attr, &key_public, pt_secret, &pt_secret_len); if (res == TEE_SUCCESS) { sk->key_size = pt_secret_len; so->info.handleFlags |= TEE_HANDLE_FLAG_INITIALIZED; set_attribute(so, type_props, TEE_ATTR_SECRET_VALUE); } /* free the public key */ crypto_acipher_free_ecc_public_key(&key_public); } #if defined(CFG_CRYPTO_HKDF) else if (TEE_ALG_GET_MAIN_ALG(cs->algo) == TEE_MAIN_ALGO_HKDF) { void *salt, *info; size_t salt_len, info_len, okm_len; uint32_t hash_id = TEE_ALG_GET_DIGEST_HASH(cs->algo); struct tee_cryp_obj_secret *ik = ko->attr; const uint8_t *ikm = (const uint8_t *)(ik + 1); res = get_hkdf_params(params, param_count, &salt, &salt_len, &info, &info_len, &okm_len); if (res != TEE_SUCCESS) goto out; /* Requested size must fit into the output object's buffer */ if (okm_len > ik->alloc_size) { res = TEE_ERROR_BAD_PARAMETERS; goto out; } res = tee_cryp_hkdf(hash_id, ikm, ik->key_size, salt, salt_len, info, info_len, (uint8_t *)(sk + 1), okm_len); if (res == TEE_SUCCESS) { sk->key_size = okm_len; so->info.handleFlags |= TEE_HANDLE_FLAG_INITIALIZED; set_attribute(so, type_props, TEE_ATTR_SECRET_VALUE); } } #endif #if defined(CFG_CRYPTO_CONCAT_KDF) else if (TEE_ALG_GET_MAIN_ALG(cs->algo) == TEE_MAIN_ALGO_CONCAT_KDF) { void *info; size_t info_len, derived_key_len; uint32_t hash_id = TEE_ALG_GET_DIGEST_HASH(cs->algo); struct tee_cryp_obj_secret *ss = ko->attr; const uint8_t *shared_secret = (const uint8_t *)(ss + 1); res = get_concat_kdf_params(params, param_count, &info, &info_len, &derived_key_len); if (res != TEE_SUCCESS) goto out; /* Requested size must fit into the output object's buffer */ if (derived_key_len > ss->alloc_size) { res = TEE_ERROR_BAD_PARAMETERS; goto out; } res = tee_cryp_concat_kdf(hash_id, shared_secret, ss->key_size, info, info_len, (uint8_t *)(sk + 1), derived_key_len); if (res == TEE_SUCCESS) { sk->key_size = derived_key_len; so->info.handleFlags |= TEE_HANDLE_FLAG_INITIALIZED; set_attribute(so, type_props, TEE_ATTR_SECRET_VALUE); } } #endif #if defined(CFG_CRYPTO_PBKDF2) else if (TEE_ALG_GET_MAIN_ALG(cs->algo) == TEE_MAIN_ALGO_PBKDF2) { void *salt; size_t salt_len, iteration_count, derived_key_len; uint32_t hash_id = TEE_ALG_GET_DIGEST_HASH(cs->algo); struct tee_cryp_obj_secret *ss = ko->attr; const uint8_t *password = (const uint8_t *)(ss + 1); res = get_pbkdf2_params(params, param_count, &salt, &salt_len, &derived_key_len, &iteration_count); if (res != TEE_SUCCESS) goto out; /* Requested size must fit into the output object's buffer */ if (derived_key_len > ss->alloc_size) { res = TEE_ERROR_BAD_PARAMETERS; goto out; } res = tee_cryp_pbkdf2(hash_id, password, ss->key_size, salt, salt_len, iteration_count, (uint8_t *)(sk + 1), derived_key_len); if (res == TEE_SUCCESS) { sk->key_size = derived_key_len; so->info.handleFlags |= TEE_HANDLE_FLAG_INITIALIZED; set_attribute(so, type_props, TEE_ATTR_SECRET_VALUE); } } #endif else res = TEE_ERROR_NOT_SUPPORTED; out: free(params); return res; } TEE_Result syscall_cryp_random_number_generate(void *buf, size_t blen) { TEE_Result res; struct tee_ta_session *sess; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_WRITE | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)buf, blen); if (res != TEE_SUCCESS) return res; res = crypto_rng_read(buf, blen); if (res != TEE_SUCCESS) return res; return res; } TEE_Result syscall_authenc_init(unsigned long state, const void *nonce, size_t nonce_len, size_t tag_len, size_t aad_len, size_t payload_len) { TEE_Result res; struct tee_cryp_state *cs; struct tee_ta_session *sess; struct tee_obj *o; struct tee_cryp_obj_secret *key; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(state), &cs); if (res != TEE_SUCCESS) return res; res = tee_obj_get(to_user_ta_ctx(sess->ctx), cs->key1, &o); if (res != TEE_SUCCESS) return res; if ((o->info.handleFlags & TEE_HANDLE_FLAG_INITIALIZED) == 0) return TEE_ERROR_BAD_PARAMETERS; key = o->attr; res = crypto_authenc_init(cs->ctx, cs->algo, cs->mode, (uint8_t *)(key + 1), key->key_size, nonce, nonce_len, tag_len, aad_len, payload_len); if (res != TEE_SUCCESS) return res; cs->ctx_finalize = (tee_cryp_ctx_finalize_func_t)crypto_authenc_final; return TEE_SUCCESS; } TEE_Result syscall_authenc_update_aad(unsigned long state, const void *aad_data, size_t aad_data_len) { TEE_Result res; struct tee_cryp_state *cs; struct tee_ta_session *sess; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t) aad_data, aad_data_len); if (res != TEE_SUCCESS) return res; res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(state), &cs); if (res != TEE_SUCCESS) return res; res = crypto_authenc_update_aad(cs->ctx, cs->algo, cs->mode, aad_data, aad_data_len); if (res != TEE_SUCCESS) return res; return TEE_SUCCESS; } TEE_Result syscall_authenc_update_payload(unsigned long state, const void *src_data, size_t src_len, void *dst_data, uint64_t *dst_len) { TEE_Result res; struct tee_cryp_state *cs; struct tee_ta_session *sess; uint64_t dlen; size_t tmp_dlen; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(state), &cs); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t) src_data, src_len); if (res != TEE_SUCCESS) return res; res = tee_svc_copy_from_user(&dlen, dst_len, sizeof(dlen)); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_WRITE | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)dst_data, dlen); if (res != TEE_SUCCESS) return res; if (dlen < src_len) { res = TEE_ERROR_SHORT_BUFFER; goto out; } tmp_dlen = dlen; res = crypto_authenc_update_payload(cs->ctx, cs->algo, cs->mode, src_data, src_len, dst_data, &tmp_dlen); dlen = tmp_dlen; out: if (res == TEE_SUCCESS || res == TEE_ERROR_SHORT_BUFFER) { TEE_Result res2 = tee_svc_copy_to_user(dst_len, &dlen, sizeof(*dst_len)); if (res2 != TEE_SUCCESS) res = res2; } return res; } TEE_Result syscall_authenc_enc_final(unsigned long state, const void *src_data, size_t src_len, void *dst_data, uint64_t *dst_len, void *tag, uint64_t *tag_len) { TEE_Result res; struct tee_cryp_state *cs; struct tee_ta_session *sess; uint64_t dlen; uint64_t tlen = 0; size_t tmp_dlen; size_t tmp_tlen; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(state), &cs); if (res != TEE_SUCCESS) return res; if (cs->mode != TEE_MODE_ENCRYPT) return TEE_ERROR_BAD_PARAMETERS; res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)src_data, src_len); if (res != TEE_SUCCESS) return res; if (!dst_len) { dlen = 0; } else { res = tee_svc_copy_from_user(&dlen, dst_len, sizeof(dlen)); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_WRITE | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)dst_data, dlen); if (res != TEE_SUCCESS) return res; } if (dlen < src_len) { res = TEE_ERROR_SHORT_BUFFER; goto out; } res = tee_svc_copy_from_user(&tlen, tag_len, sizeof(tlen)); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_WRITE | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)tag, tlen); if (res != TEE_SUCCESS) return res; tmp_dlen = dlen; tmp_tlen = tlen; res = crypto_authenc_enc_final(cs->ctx, cs->algo, src_data, src_len, dst_data, &tmp_dlen, tag, &tmp_tlen); dlen = tmp_dlen; tlen = tmp_tlen; out: if (res == TEE_SUCCESS || res == TEE_ERROR_SHORT_BUFFER) { TEE_Result res2; if (dst_len != NULL) { res2 = tee_svc_copy_to_user(dst_len, &dlen, sizeof(*dst_len)); if (res2 != TEE_SUCCESS) return res2; } res2 = tee_svc_copy_to_user(tag_len, &tlen, sizeof(*tag_len)); if (res2 != TEE_SUCCESS) return res2; } return res; } TEE_Result syscall_authenc_dec_final(unsigned long state, const void *src_data, size_t src_len, void *dst_data, uint64_t *dst_len, const void *tag, size_t tag_len) { TEE_Result res; struct tee_cryp_state *cs; struct tee_ta_session *sess; uint64_t dlen; size_t tmp_dlen; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(state), &cs); if (res != TEE_SUCCESS) return res; if (cs->mode != TEE_MODE_DECRYPT) return TEE_ERROR_BAD_PARAMETERS; res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)src_data, src_len); if (res != TEE_SUCCESS) return res; if (!dst_len) { dlen = 0; } else { res = tee_svc_copy_from_user(&dlen, dst_len, sizeof(dlen)); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_WRITE | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)dst_data, dlen); if (res != TEE_SUCCESS) return res; } if (dlen < src_len) { res = TEE_ERROR_SHORT_BUFFER; goto out; } res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)tag, tag_len); if (res != TEE_SUCCESS) return res; tmp_dlen = dlen; res = crypto_authenc_dec_final(cs->ctx, cs->algo, src_data, src_len, dst_data, &tmp_dlen, tag, tag_len); dlen = tmp_dlen; out: if ((res == TEE_SUCCESS || res == TEE_ERROR_SHORT_BUFFER) && dst_len != NULL) { TEE_Result res2; res2 = tee_svc_copy_to_user(dst_len, &dlen, sizeof(*dst_len)); if (res2 != TEE_SUCCESS) return res2; } return res; } static int pkcs1_get_salt_len(const TEE_Attribute *params, uint32_t num_params, size_t default_len) { size_t n; assert(default_len < INT_MAX); for (n = 0; n < num_params; n++) { if (params[n].attributeID == TEE_ATTR_RSA_PSS_SALT_LENGTH) { if (params[n].content.value.a < INT_MAX) return params[n].content.value.a; break; } } /* * If salt length isn't provided use the default value which is * the length of the digest. */ return default_len; } TEE_Result syscall_asymm_operate(unsigned long state, const struct utee_attribute *usr_params, size_t num_params, const void *src_data, size_t src_len, void *dst_data, uint64_t *dst_len) { TEE_Result res; struct tee_cryp_state *cs; struct tee_ta_session *sess; uint64_t dlen64; size_t dlen; struct tee_obj *o; void *label = NULL; size_t label_len = 0; size_t n; int salt_len; TEE_Attribute *params = NULL; struct user_ta_ctx *utc; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; utc = to_user_ta_ctx(sess->ctx); res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(state), &cs); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights( utc, TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t) src_data, src_len); if (res != TEE_SUCCESS) return res; res = tee_svc_copy_from_user(&dlen64, dst_len, sizeof(dlen64)); if (res != TEE_SUCCESS) return res; dlen = dlen64; res = tee_mmu_check_access_rights( utc, TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_WRITE | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t) dst_data, dlen); if (res != TEE_SUCCESS) return res; params = malloc(sizeof(TEE_Attribute) * num_params); if (!params) return TEE_ERROR_OUT_OF_MEMORY; res = copy_in_attrs(utc, usr_params, num_params, params); if (res != TEE_SUCCESS) goto out; res = tee_obj_get(utc, cs->key1, &o); if (res != TEE_SUCCESS) goto out; if ((o->info.handleFlags & TEE_HANDLE_FLAG_INITIALIZED) == 0) { res = TEE_ERROR_GENERIC; goto out; } switch (cs->algo) { case TEE_ALG_RSA_NOPAD: if (cs->mode == TEE_MODE_ENCRYPT) { res = crypto_acipher_rsanopad_encrypt(o->attr, src_data, src_len, dst_data, &dlen); } else if (cs->mode == TEE_MODE_DECRYPT) { res = crypto_acipher_rsanopad_decrypt(o->attr, src_data, src_len, dst_data, &dlen); } else { /* * We will panic because "the mode is not compatible * with the function" */ res = TEE_ERROR_GENERIC; } break; case TEE_ALG_RSAES_PKCS1_V1_5: case TEE_ALG_RSAES_PKCS1_OAEP_MGF1_SHA1: case TEE_ALG_RSAES_PKCS1_OAEP_MGF1_SHA224: case TEE_ALG_RSAES_PKCS1_OAEP_MGF1_SHA256: case TEE_ALG_RSAES_PKCS1_OAEP_MGF1_SHA384: case TEE_ALG_RSAES_PKCS1_OAEP_MGF1_SHA512: for (n = 0; n < num_params; n++) { if (params[n].attributeID == TEE_ATTR_RSA_OAEP_LABEL) { label = params[n].content.ref.buffer; label_len = params[n].content.ref.length; break; } } if (cs->mode == TEE_MODE_ENCRYPT) { res = crypto_acipher_rsaes_encrypt(cs->algo, o->attr, label, label_len, src_data, src_len, dst_data, &dlen); } else if (cs->mode == TEE_MODE_DECRYPT) { res = crypto_acipher_rsaes_decrypt( cs->algo, o->attr, label, label_len, src_data, src_len, dst_data, &dlen); } else { res = TEE_ERROR_BAD_PARAMETERS; } break; #if defined(CFG_CRYPTO_RSASSA_NA1) case TEE_ALG_RSASSA_PKCS1_V1_5: #endif case TEE_ALG_RSASSA_PKCS1_V1_5_MD5: case TEE_ALG_RSASSA_PKCS1_V1_5_SHA1: case TEE_ALG_RSASSA_PKCS1_V1_5_SHA224: case TEE_ALG_RSASSA_PKCS1_V1_5_SHA256: case TEE_ALG_RSASSA_PKCS1_V1_5_SHA384: case TEE_ALG_RSASSA_PKCS1_V1_5_SHA512: case TEE_ALG_RSASSA_PKCS1_PSS_MGF1_SHA1: case TEE_ALG_RSASSA_PKCS1_PSS_MGF1_SHA224: case TEE_ALG_RSASSA_PKCS1_PSS_MGF1_SHA256: case TEE_ALG_RSASSA_PKCS1_PSS_MGF1_SHA384: case TEE_ALG_RSASSA_PKCS1_PSS_MGF1_SHA512: if (cs->mode != TEE_MODE_SIGN) { res = TEE_ERROR_BAD_PARAMETERS; break; } salt_len = pkcs1_get_salt_len(params, num_params, src_len); res = crypto_acipher_rsassa_sign(cs->algo, o->attr, salt_len, src_data, src_len, dst_data, &dlen); break; case TEE_ALG_DSA_SHA1: case TEE_ALG_DSA_SHA224: case TEE_ALG_DSA_SHA256: res = crypto_acipher_dsa_sign(cs->algo, o->attr, src_data, src_len, dst_data, &dlen); break; case TEE_ALG_ECDSA_P192: case TEE_ALG_ECDSA_P224: case TEE_ALG_ECDSA_P256: case TEE_ALG_ECDSA_P384: case TEE_ALG_ECDSA_P521: res = crypto_acipher_ecc_sign(cs->algo, o->attr, src_data, src_len, dst_data, &dlen); break; default: res = TEE_ERROR_BAD_PARAMETERS; break; } out: free(params); if (res == TEE_SUCCESS || res == TEE_ERROR_SHORT_BUFFER) { TEE_Result res2; dlen64 = dlen; res2 = tee_svc_copy_to_user(dst_len, &dlen64, sizeof(*dst_len)); if (res2 != TEE_SUCCESS) return res2; } return res; } TEE_Result syscall_asymm_verify(unsigned long state, const struct utee_attribute *usr_params, size_t num_params, const void *data, size_t data_len, const void *sig, size_t sig_len) { TEE_Result res; struct tee_cryp_state *cs; struct tee_ta_session *sess; struct tee_obj *o; size_t hash_size; int salt_len = 0; TEE_Attribute *params = NULL; uint32_t hash_algo; struct user_ta_ctx *utc; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; utc = to_user_ta_ctx(sess->ctx); res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(state), &cs); if (res != TEE_SUCCESS) return res; if (cs->mode != TEE_MODE_VERIFY) return TEE_ERROR_BAD_PARAMETERS; res = tee_mmu_check_access_rights(utc, TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)data, data_len); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights(utc, TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)sig, sig_len); if (res != TEE_SUCCESS) return res; params = malloc(sizeof(TEE_Attribute) * num_params); if (!params) return TEE_ERROR_OUT_OF_MEMORY; res = copy_in_attrs(utc, usr_params, num_params, params); if (res != TEE_SUCCESS) goto out; res = tee_obj_get(utc, cs->key1, &o); if (res != TEE_SUCCESS) goto out; if ((o->info.handleFlags & TEE_HANDLE_FLAG_INITIALIZED) == 0) { res = TEE_ERROR_BAD_PARAMETERS; goto out; } switch (TEE_ALG_GET_MAIN_ALG(cs->algo)) { case TEE_MAIN_ALGO_RSA: if (cs->algo != TEE_ALG_RSASSA_PKCS1_V1_5) { hash_algo = TEE_DIGEST_HASH_TO_ALGO(cs->algo); res = tee_hash_get_digest_size(hash_algo, &hash_size); if (res != TEE_SUCCESS) break; if (data_len != hash_size) { res = TEE_ERROR_BAD_PARAMETERS; break; } salt_len = pkcs1_get_salt_len(params, num_params, hash_size); } res = crypto_acipher_rsassa_verify(cs->algo, o->attr, salt_len, data, data_len, sig, sig_len); break; case TEE_MAIN_ALGO_DSA: hash_algo = TEE_DIGEST_HASH_TO_ALGO(cs->algo); res = tee_hash_get_digest_size(hash_algo, &hash_size); if (res != TEE_SUCCESS) break; /* * Depending on the DSA algorithm (NIST), the digital signature * output size may be truncated to the size of a key pair * (Q prime size). Q prime size must be less or equal than the * hash output length of the hash algorithm involved. */ if (data_len > hash_size) { res = TEE_ERROR_BAD_PARAMETERS; break; } res = crypto_acipher_dsa_verify(cs->algo, o->attr, data, data_len, sig, sig_len); break; case TEE_MAIN_ALGO_ECDSA: res = crypto_acipher_ecc_verify(cs->algo, o->attr, data, data_len, sig, sig_len); break; default: res = TEE_ERROR_NOT_SUPPORTED; } out: free(params); return res; }
TEE_Result syscall_cryp_obj_populate(unsigned long obj, struct utee_attribute *usr_attrs, unsigned long attr_count) { TEE_Result res; struct tee_ta_session *sess; struct tee_obj *o; const struct tee_cryp_obj_type_props *type_props; TEE_Attribute *attrs = NULL; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_obj_get(to_user_ta_ctx(sess->ctx), tee_svc_uref_to_vaddr(obj), &o); if (res != TEE_SUCCESS) return res; /* Must be a transient object */ if ((o->info.handleFlags & TEE_HANDLE_FLAG_PERSISTENT) != 0) return TEE_ERROR_BAD_PARAMETERS; /* Must not be initialized already */ if ((o->info.handleFlags & TEE_HANDLE_FLAG_INITIALIZED) != 0) return TEE_ERROR_BAD_PARAMETERS; type_props = tee_svc_find_type_props(o->info.objectType); if (!type_props) return TEE_ERROR_NOT_IMPLEMENTED; attrs = malloc(sizeof(TEE_Attribute) * attr_count); if (!attrs) return TEE_ERROR_OUT_OF_MEMORY; res = copy_in_attrs(to_user_ta_ctx(sess->ctx), usr_attrs, attr_count, attrs); if (res != TEE_SUCCESS) goto out; res = tee_svc_cryp_check_attr(ATTR_USAGE_POPULATE, type_props, attrs, attr_count); if (res != TEE_SUCCESS) goto out; res = tee_svc_cryp_obj_populate_type(o, type_props, attrs, attr_count); if (res == TEE_SUCCESS) o->info.handleFlags |= TEE_HANDLE_FLAG_INITIALIZED; out: free(attrs); return res; }
TEE_Result syscall_cryp_obj_populate(unsigned long obj, struct utee_attribute *usr_attrs, unsigned long attr_count) { TEE_Result res; struct tee_ta_session *sess; struct tee_obj *o; const struct tee_cryp_obj_type_props *type_props; TEE_Attribute *attrs = NULL; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_obj_get(to_user_ta_ctx(sess->ctx), tee_svc_uref_to_vaddr(obj), &o); if (res != TEE_SUCCESS) return res; /* Must be a transient object */ if ((o->info.handleFlags & TEE_HANDLE_FLAG_PERSISTENT) != 0) return TEE_ERROR_BAD_PARAMETERS; /* Must not be initialized already */ if ((o->info.handleFlags & TEE_HANDLE_FLAG_INITIALIZED) != 0) return TEE_ERROR_BAD_PARAMETERS; type_props = tee_svc_find_type_props(o->info.objectType); if (!type_props) return TEE_ERROR_NOT_IMPLEMENTED; size_t alloc_size = 0; if (MUL_OVERFLOW(sizeof(TEE_Attribute), attr_count, &alloc_size)) return TEE_ERROR_OVERFLOW; attrs = malloc(alloc_size); if (!attrs) return TEE_ERROR_OUT_OF_MEMORY; res = copy_in_attrs(to_user_ta_ctx(sess->ctx), usr_attrs, attr_count, attrs); if (res != TEE_SUCCESS) goto out; res = tee_svc_cryp_check_attr(ATTR_USAGE_POPULATE, type_props, attrs, attr_count); if (res != TEE_SUCCESS) goto out; res = tee_svc_cryp_obj_populate_type(o, type_props, attrs, attr_count); if (res == TEE_SUCCESS) o->info.handleFlags |= TEE_HANDLE_FLAG_INITIALIZED; out: free(attrs); return res; }
{'added': [(7, '#include <compiler.h>'), (1551, '\tsize_t alloc_size = 0;'), (1552, ''), (1553, '\tif (MUL_OVERFLOW(sizeof(TEE_Attribute), attr_count, &alloc_size))'), (1554, '\t\treturn TEE_ERROR_OVERFLOW;'), (1555, ''), (1556, '\tattrs = malloc(alloc_size);'), (1559, '')], 'deleted': [(1550, '\tattrs = malloc(sizeof(TEE_Attribute) * attr_count);')]}
8
1
2,813
15,893
https://github.com/OP-TEE/optee_os
CVE-2019-1010296
['CWE-190', 'CWE-787']
step.c
set_task_blockstep
/* * x86 single-step support code, common to 32-bit and 64-bit. */ #include <linux/sched.h> #include <linux/mm.h> #include <linux/ptrace.h> #include <asm/desc.h> unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs) { unsigned long addr, seg; addr = regs->ip; seg = regs->cs & 0xffff; if (v8086_mode(regs)) { addr = (addr & 0xffff) + (seg << 4); return addr; } /* * We'll assume that the code segments in the GDT * are all zero-based. That is largely true: the * TLS segments are used for data, and the PNPBIOS * and APM bios ones we just ignore here. */ if ((seg & SEGMENT_TI_MASK) == SEGMENT_LDT) { struct desc_struct *desc; unsigned long base; seg &= ~7UL; mutex_lock(&child->mm->context.lock); if (unlikely((seg >> 3) >= child->mm->context.size)) addr = -1L; /* bogus selector, access would fault */ else { desc = child->mm->context.ldt + seg; base = get_desc_base(desc); /* 16-bit code segment? */ if (!desc->d) addr &= 0xffff; addr += base; } mutex_unlock(&child->mm->context.lock); } return addr; } static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs) { int i, copied; unsigned char opcode[15]; unsigned long addr = convert_ip_to_linear(child, regs); copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0); for (i = 0; i < copied; i++) { switch (opcode[i]) { /* popf and iret */ case 0x9d: case 0xcf: return 1; /* CHECKME: 64 65 */ /* opcode and address size prefixes */ case 0x66: case 0x67: continue; /* irrelevant prefixes (segment overrides and repeats) */ case 0x26: case 0x2e: case 0x36: case 0x3e: case 0x64: case 0x65: case 0xf0: case 0xf2: case 0xf3: continue; #ifdef CONFIG_X86_64 case 0x40 ... 0x4f: if (!user_64bit_mode(regs)) /* 32-bit mode: register increment */ return 0; /* 64-bit mode: REX prefix */ continue; #endif /* CHECKME: f2, f3 */ /* * pushf: NOTE! We should probably not let * the user see the TF bit being set. But * it's more pain than it's worth to avoid * it, and a debugger could emulate this * all in user space if it _really_ cares. */ case 0x9c: default: return 0; } } return 0; } /* * Enable single-stepping. Return nonzero if user mode is not using TF itself. */ static int enable_single_step(struct task_struct *child) { struct pt_regs *regs = task_pt_regs(child); unsigned long oflags; /* * If we stepped into a sysenter/syscall insn, it trapped in * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP. * If user-mode had set TF itself, then it's still clear from * do_debug() and we need to set it again to restore the user * state so we don't wrongly set TIF_FORCED_TF below. * If enable_single_step() was used last and that is what * set TIF_SINGLESTEP, then both TF and TIF_FORCED_TF are * already set and our bookkeeping is fine. */ if (unlikely(test_tsk_thread_flag(child, TIF_SINGLESTEP))) regs->flags |= X86_EFLAGS_TF; /* * Always set TIF_SINGLESTEP - this guarantees that * we single-step system calls etc.. This will also * cause us to set TF when returning to user mode. */ set_tsk_thread_flag(child, TIF_SINGLESTEP); oflags = regs->flags; /* Set TF on the kernel stack.. */ regs->flags |= X86_EFLAGS_TF; /* * ..but if TF is changed by the instruction we will trace, * don't mark it as being "us" that set it, so that we * won't clear it by hand later. * * Note that if we don't actually execute the popf because * of a signal arriving right now or suchlike, we will lose * track of the fact that it really was "us" that set it. */ if (is_setting_trap_flag(child, regs)) { clear_tsk_thread_flag(child, TIF_FORCED_TF); return 0; } /* * If TF was already set, check whether it was us who set it. * If not, we should never attempt a block step. */ if (oflags & X86_EFLAGS_TF) return test_tsk_thread_flag(child, TIF_FORCED_TF); set_tsk_thread_flag(child, TIF_FORCED_TF); return 1; } void set_task_blockstep(struct task_struct *task, bool on) { unsigned long debugctl; /* * Ensure irq/preemption can't change debugctl in between. * Note also that both TIF_BLOCKSTEP and debugctl should * be changed atomically wrt preemption. * FIXME: this means that set/clear TIF_BLOCKSTEP is simply * wrong if task != current, SIGKILL can wakeup the stopped * tracee and set/clear can play with the running task, this * can confuse the next __switch_to_xtra(). */ local_irq_disable(); debugctl = get_debugctlmsr(); if (on) { debugctl |= DEBUGCTLMSR_BTF; set_tsk_thread_flag(task, TIF_BLOCKSTEP); } else { debugctl &= ~DEBUGCTLMSR_BTF; clear_tsk_thread_flag(task, TIF_BLOCKSTEP); } if (task == current) update_debugctlmsr(debugctl); local_irq_enable(); } /* * Enable single or block step. */ static void enable_step(struct task_struct *child, bool block) { /* * Make sure block stepping (BTF) is not enabled unless it should be. * Note that we don't try to worry about any is_setting_trap_flag() * instructions after the first when using block stepping. * So no one should try to use debugger block stepping in a program * that uses user-mode single stepping itself. */ if (enable_single_step(child) && block) set_task_blockstep(child, true); else if (test_tsk_thread_flag(child, TIF_BLOCKSTEP)) set_task_blockstep(child, false); } void user_enable_single_step(struct task_struct *child) { enable_step(child, 0); } void user_enable_block_step(struct task_struct *child) { enable_step(child, 1); } void user_disable_single_step(struct task_struct *child) { /* * Make sure block stepping (BTF) is disabled. */ if (test_tsk_thread_flag(child, TIF_BLOCKSTEP)) set_task_blockstep(child, false); /* Always clear TIF_SINGLESTEP... */ clear_tsk_thread_flag(child, TIF_SINGLESTEP); /* But touch TF only if it was set by us.. */ if (test_and_clear_tsk_thread_flag(child, TIF_FORCED_TF)) task_pt_regs(child)->flags &= ~X86_EFLAGS_TF; }
/* * x86 single-step support code, common to 32-bit and 64-bit. */ #include <linux/sched.h> #include <linux/mm.h> #include <linux/ptrace.h> #include <asm/desc.h> unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs) { unsigned long addr, seg; addr = regs->ip; seg = regs->cs & 0xffff; if (v8086_mode(regs)) { addr = (addr & 0xffff) + (seg << 4); return addr; } /* * We'll assume that the code segments in the GDT * are all zero-based. That is largely true: the * TLS segments are used for data, and the PNPBIOS * and APM bios ones we just ignore here. */ if ((seg & SEGMENT_TI_MASK) == SEGMENT_LDT) { struct desc_struct *desc; unsigned long base; seg &= ~7UL; mutex_lock(&child->mm->context.lock); if (unlikely((seg >> 3) >= child->mm->context.size)) addr = -1L; /* bogus selector, access would fault */ else { desc = child->mm->context.ldt + seg; base = get_desc_base(desc); /* 16-bit code segment? */ if (!desc->d) addr &= 0xffff; addr += base; } mutex_unlock(&child->mm->context.lock); } return addr; } static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs) { int i, copied; unsigned char opcode[15]; unsigned long addr = convert_ip_to_linear(child, regs); copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0); for (i = 0; i < copied; i++) { switch (opcode[i]) { /* popf and iret */ case 0x9d: case 0xcf: return 1; /* CHECKME: 64 65 */ /* opcode and address size prefixes */ case 0x66: case 0x67: continue; /* irrelevant prefixes (segment overrides and repeats) */ case 0x26: case 0x2e: case 0x36: case 0x3e: case 0x64: case 0x65: case 0xf0: case 0xf2: case 0xf3: continue; #ifdef CONFIG_X86_64 case 0x40 ... 0x4f: if (!user_64bit_mode(regs)) /* 32-bit mode: register increment */ return 0; /* 64-bit mode: REX prefix */ continue; #endif /* CHECKME: f2, f3 */ /* * pushf: NOTE! We should probably not let * the user see the TF bit being set. But * it's more pain than it's worth to avoid * it, and a debugger could emulate this * all in user space if it _really_ cares. */ case 0x9c: default: return 0; } } return 0; } /* * Enable single-stepping. Return nonzero if user mode is not using TF itself. */ static int enable_single_step(struct task_struct *child) { struct pt_regs *regs = task_pt_regs(child); unsigned long oflags; /* * If we stepped into a sysenter/syscall insn, it trapped in * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP. * If user-mode had set TF itself, then it's still clear from * do_debug() and we need to set it again to restore the user * state so we don't wrongly set TIF_FORCED_TF below. * If enable_single_step() was used last and that is what * set TIF_SINGLESTEP, then both TF and TIF_FORCED_TF are * already set and our bookkeeping is fine. */ if (unlikely(test_tsk_thread_flag(child, TIF_SINGLESTEP))) regs->flags |= X86_EFLAGS_TF; /* * Always set TIF_SINGLESTEP - this guarantees that * we single-step system calls etc.. This will also * cause us to set TF when returning to user mode. */ set_tsk_thread_flag(child, TIF_SINGLESTEP); oflags = regs->flags; /* Set TF on the kernel stack.. */ regs->flags |= X86_EFLAGS_TF; /* * ..but if TF is changed by the instruction we will trace, * don't mark it as being "us" that set it, so that we * won't clear it by hand later. * * Note that if we don't actually execute the popf because * of a signal arriving right now or suchlike, we will lose * track of the fact that it really was "us" that set it. */ if (is_setting_trap_flag(child, regs)) { clear_tsk_thread_flag(child, TIF_FORCED_TF); return 0; } /* * If TF was already set, check whether it was us who set it. * If not, we should never attempt a block step. */ if (oflags & X86_EFLAGS_TF) return test_tsk_thread_flag(child, TIF_FORCED_TF); set_tsk_thread_flag(child, TIF_FORCED_TF); return 1; } void set_task_blockstep(struct task_struct *task, bool on) { unsigned long debugctl; /* * Ensure irq/preemption can't change debugctl in between. * Note also that both TIF_BLOCKSTEP and debugctl should * be changed atomically wrt preemption. * * NOTE: this means that set/clear TIF_BLOCKSTEP is only safe if * task is current or it can't be running, otherwise we can race * with __switch_to_xtra(). We rely on ptrace_freeze_traced() but * PTRACE_KILL is not safe. */ local_irq_disable(); debugctl = get_debugctlmsr(); if (on) { debugctl |= DEBUGCTLMSR_BTF; set_tsk_thread_flag(task, TIF_BLOCKSTEP); } else { debugctl &= ~DEBUGCTLMSR_BTF; clear_tsk_thread_flag(task, TIF_BLOCKSTEP); } if (task == current) update_debugctlmsr(debugctl); local_irq_enable(); } /* * Enable single or block step. */ static void enable_step(struct task_struct *child, bool block) { /* * Make sure block stepping (BTF) is not enabled unless it should be. * Note that we don't try to worry about any is_setting_trap_flag() * instructions after the first when using block stepping. * So no one should try to use debugger block stepping in a program * that uses user-mode single stepping itself. */ if (enable_single_step(child) && block) set_task_blockstep(child, true); else if (test_tsk_thread_flag(child, TIF_BLOCKSTEP)) set_task_blockstep(child, false); } void user_enable_single_step(struct task_struct *child) { enable_step(child, 0); } void user_enable_block_step(struct task_struct *child) { enable_step(child, 1); } void user_disable_single_step(struct task_struct *child) { /* * Make sure block stepping (BTF) is disabled. */ if (test_tsk_thread_flag(child, TIF_BLOCKSTEP)) set_task_blockstep(child, false); /* Always clear TIF_SINGLESTEP... */ clear_tsk_thread_flag(child, TIF_SINGLESTEP); /* But touch TF only if it was set by us.. */ if (test_and_clear_tsk_thread_flag(child, TIF_FORCED_TF)) task_pt_regs(child)->flags &= ~X86_EFLAGS_TF; }
void set_task_blockstep(struct task_struct *task, bool on) { unsigned long debugctl; /* * Ensure irq/preemption can't change debugctl in between. * Note also that both TIF_BLOCKSTEP and debugctl should * be changed atomically wrt preemption. * FIXME: this means that set/clear TIF_BLOCKSTEP is simply * wrong if task != current, SIGKILL can wakeup the stopped * tracee and set/clear can play with the running task, this * can confuse the next __switch_to_xtra(). */ local_irq_disable(); debugctl = get_debugctlmsr(); if (on) { debugctl |= DEBUGCTLMSR_BTF; set_tsk_thread_flag(task, TIF_BLOCKSTEP); } else { debugctl &= ~DEBUGCTLMSR_BTF; clear_tsk_thread_flag(task, TIF_BLOCKSTEP); } if (task == current) update_debugctlmsr(debugctl); local_irq_enable(); }
void set_task_blockstep(struct task_struct *task, bool on) { unsigned long debugctl; /* * Ensure irq/preemption can't change debugctl in between. * Note also that both TIF_BLOCKSTEP and debugctl should * be changed atomically wrt preemption. * * NOTE: this means that set/clear TIF_BLOCKSTEP is only safe if * task is current or it can't be running, otherwise we can race * with __switch_to_xtra(). We rely on ptrace_freeze_traced() but * PTRACE_KILL is not safe. */ local_irq_disable(); debugctl = get_debugctlmsr(); if (on) { debugctl |= DEBUGCTLMSR_BTF; set_tsk_thread_flag(task, TIF_BLOCKSTEP); } else { debugctl &= ~DEBUGCTLMSR_BTF; clear_tsk_thread_flag(task, TIF_BLOCKSTEP); } if (task == current) update_debugctlmsr(debugctl); local_irq_enable(); }
{'added': [(168, '\t *'), (169, '\t * NOTE: this means that set/clear TIF_BLOCKSTEP is only safe if'), (170, "\t * task is current or it can't be running, otherwise we can race"), (171, '\t * with __switch_to_xtra(). We rely on ptrace_freeze_traced() but'), (172, '\t * PTRACE_KILL is not safe.')], 'deleted': [(168, '\t * FIXME: this means that set/clear TIF_BLOCKSTEP is simply'), (169, '\t * wrong if task != current, SIGKILL can wakeup the stopped'), (170, '\t * tracee and set/clear can play with the running task, this'), (171, '\t * can confuse the next __switch_to_xtra().')]}
5
4
116
653
https://github.com/torvalds/linux
CVE-2013-0871
['CWE-362']
ext2.c
grub_ext2_read_block
/* ext2.c - Second Extended filesystem */ /* * GRUB -- GRand Unified Bootloader * Copyright (C) 2003,2004,2005,2007,2008,2009 Free Software Foundation, Inc. * * GRUB is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * GRUB is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with GRUB. If not, see <http://www.gnu.org/licenses/>. */ /* Magic value used to identify an ext2 filesystem. */ #define EXT2_MAGIC 0xEF53 /* Amount of indirect blocks in an inode. */ #define INDIRECT_BLOCKS 12 /* Maximum length of a pathname. */ #define EXT2_PATH_MAX 4096 /* Maximum nesting of symlinks, used to prevent a loop. */ #define EXT2_MAX_SYMLINKCNT 8 /* The good old revision and the default inode size. */ #define EXT2_GOOD_OLD_REVISION 0 #define EXT2_GOOD_OLD_INODE_SIZE 128 /* Filetype used in directory entry. */ #define FILETYPE_UNKNOWN 0 #define FILETYPE_REG 1 #define FILETYPE_DIRECTORY 2 #define FILETYPE_SYMLINK 7 /* Filetype information as used in inodes. */ #define FILETYPE_INO_MASK 0170000 #define FILETYPE_INO_REG 0100000 #define FILETYPE_INO_DIRECTORY 0040000 #define FILETYPE_INO_SYMLINK 0120000 #include <grub/err.h> #include <grub/file.h> #include <grub/mm.h> #include <grub/misc.h> #include <grub/disk.h> #include <grub/dl.h> #include <grub/types.h> #include <grub/fshelp.h> /* Log2 size of ext2 block in 512 blocks. */ #define LOG2_EXT2_BLOCK_SIZE(data) \ (grub_le_to_cpu32 (data->sblock.log2_block_size) + 1) /* Log2 size of ext2 block in bytes. */ #define LOG2_BLOCK_SIZE(data) \ (grub_le_to_cpu32 (data->sblock.log2_block_size) + 10) /* The size of an ext2 block in bytes. */ #define EXT2_BLOCK_SIZE(data) (1 << LOG2_BLOCK_SIZE (data)) /* The revision level. */ #define EXT2_REVISION(data) grub_le_to_cpu32 (data->sblock.revision_level) /* The inode size. */ #define EXT2_INODE_SIZE(data) \ (EXT2_REVISION (data) == EXT2_GOOD_OLD_REVISION \ ? EXT2_GOOD_OLD_INODE_SIZE \ : grub_le_to_cpu16 (data->sblock.inode_size)) /* Superblock filesystem feature flags (RW compatible) * A filesystem with any of these enabled can be read and written by a driver * that does not understand them without causing metadata/data corruption. */ #define EXT2_FEATURE_COMPAT_DIR_PREALLOC 0x0001 #define EXT2_FEATURE_COMPAT_IMAGIC_INODES 0x0002 #define EXT3_FEATURE_COMPAT_HAS_JOURNAL 0x0004 #define EXT2_FEATURE_COMPAT_EXT_ATTR 0x0008 #define EXT2_FEATURE_COMPAT_RESIZE_INODE 0x0010 #define EXT2_FEATURE_COMPAT_DIR_INDEX 0x0020 /* Superblock filesystem feature flags (RO compatible) * A filesystem with any of these enabled can be safely read by a driver that * does not understand them, but should not be written to, usually because * additional metadata is required. */ #define EXT2_FEATURE_RO_COMPAT_SPARSE_SUPER 0x0001 #define EXT2_FEATURE_RO_COMPAT_LARGE_FILE 0x0002 #define EXT2_FEATURE_RO_COMPAT_BTREE_DIR 0x0004 #define EXT4_FEATURE_RO_COMPAT_GDT_CSUM 0x0010 #define EXT4_FEATURE_RO_COMPAT_DIR_NLINK 0x0020 #define EXT4_FEATURE_RO_COMPAT_EXTRA_ISIZE 0x0040 /* Superblock filesystem feature flags (back-incompatible) * A filesystem with any of these enabled should not be attempted to be read * by a driver that does not understand them, since they usually indicate * metadata format changes that might confuse the reader. */ #define EXT2_FEATURE_INCOMPAT_COMPRESSION 0x0001 #define EXT2_FEATURE_INCOMPAT_FILETYPE 0x0002 #define EXT3_FEATURE_INCOMPAT_RECOVER 0x0004 /* Needs recovery */ #define EXT3_FEATURE_INCOMPAT_JOURNAL_DEV 0x0008 /* Volume is journal device */ #define EXT2_FEATURE_INCOMPAT_META_BG 0x0010 #define EXT4_FEATURE_INCOMPAT_EXTENTS 0x0040 /* Extents used */ #define EXT4_FEATURE_INCOMPAT_64BIT 0x0080 #define EXT4_FEATURE_INCOMPAT_FLEX_BG 0x0200 /* The set of back-incompatible features this driver DOES support. Add (OR) * flags here as the related features are implemented into the driver. */ #define EXT2_DRIVER_SUPPORTED_INCOMPAT ( EXT2_FEATURE_INCOMPAT_FILETYPE \ | EXT4_FEATURE_INCOMPAT_EXTENTS \ | EXT4_FEATURE_INCOMPAT_FLEX_BG ) /* List of rationales for the ignored "incompatible" features: * needs_recovery: Not really back-incompatible - was added as such to forbid * ext2 drivers from mounting an ext3 volume with a dirty * journal because they will ignore the journal, but the next * ext3 driver to mount the volume will find the journal and * replay it, potentially corrupting the metadata written by * the ext2 drivers. Safe to ignore for this RO driver. */ #define EXT2_DRIVER_IGNORED_INCOMPAT ( EXT3_FEATURE_INCOMPAT_RECOVER ) #define EXT3_JOURNAL_MAGIC_NUMBER 0xc03b3998U #define EXT3_JOURNAL_DESCRIPTOR_BLOCK 1 #define EXT3_JOURNAL_COMMIT_BLOCK 2 #define EXT3_JOURNAL_SUPERBLOCK_V1 3 #define EXT3_JOURNAL_SUPERBLOCK_V2 4 #define EXT3_JOURNAL_REVOKE_BLOCK 5 #define EXT3_JOURNAL_FLAG_ESCAPE 1 #define EXT3_JOURNAL_FLAG_SAME_UUID 2 #define EXT3_JOURNAL_FLAG_DELETED 4 #define EXT3_JOURNAL_FLAG_LAST_TAG 8 #define EXT4_EXTENTS_FLAG 0x80000 /* The ext2 superblock. */ struct grub_ext2_sblock { grub_uint32_t total_inodes; grub_uint32_t total_blocks; grub_uint32_t reserved_blocks; grub_uint32_t free_blocks; grub_uint32_t free_inodes; grub_uint32_t first_data_block; grub_uint32_t log2_block_size; grub_uint32_t log2_fragment_size; grub_uint32_t blocks_per_group; grub_uint32_t fragments_per_group; grub_uint32_t inodes_per_group; grub_uint32_t mtime; grub_uint32_t utime; grub_uint16_t mnt_count; grub_uint16_t max_mnt_count; grub_uint16_t magic; grub_uint16_t fs_state; grub_uint16_t error_handling; grub_uint16_t minor_revision_level; grub_uint32_t lastcheck; grub_uint32_t checkinterval; grub_uint32_t creator_os; grub_uint32_t revision_level; grub_uint16_t uid_reserved; grub_uint16_t gid_reserved; grub_uint32_t first_inode; grub_uint16_t inode_size; grub_uint16_t block_group_number; grub_uint32_t feature_compatibility; grub_uint32_t feature_incompat; grub_uint32_t feature_ro_compat; grub_uint16_t uuid[8]; char volume_name[16]; char last_mounted_on[64]; grub_uint32_t compression_info; grub_uint8_t prealloc_blocks; grub_uint8_t prealloc_dir_blocks; grub_uint16_t reserved_gdt_blocks; grub_uint8_t journal_uuid[16]; grub_uint32_t journal_inum; grub_uint32_t journal_dev; grub_uint32_t last_orphan; grub_uint32_t hash_seed[4]; grub_uint8_t def_hash_version; grub_uint8_t jnl_backup_type; grub_uint16_t reserved_word_pad; grub_uint32_t default_mount_opts; grub_uint32_t first_meta_bg; grub_uint32_t mkfs_time; grub_uint32_t jnl_blocks[17]; }; /* The ext2 blockgroup. */ struct grub_ext2_block_group { grub_uint32_t block_id; grub_uint32_t inode_id; grub_uint32_t inode_table_id; grub_uint16_t free_blocks; grub_uint16_t free_inodes; grub_uint16_t used_dirs; grub_uint16_t pad; grub_uint32_t reserved[3]; }; /* The ext2 inode. */ struct grub_ext2_inode { grub_uint16_t mode; grub_uint16_t uid; grub_uint32_t size; grub_uint32_t atime; grub_uint32_t ctime; grub_uint32_t mtime; grub_uint32_t dtime; grub_uint16_t gid; grub_uint16_t nlinks; grub_uint32_t blockcnt; /* Blocks of 512 bytes!! */ grub_uint32_t flags; grub_uint32_t osd1; union { struct datablocks { grub_uint32_t dir_blocks[INDIRECT_BLOCKS]; grub_uint32_t indir_block; grub_uint32_t double_indir_block; grub_uint32_t triple_indir_block; } blocks; char symlink[60]; }; grub_uint32_t version; grub_uint32_t acl; grub_uint32_t dir_acl; grub_uint32_t fragment_addr; grub_uint32_t osd2[3]; }; /* The header of an ext2 directory entry. */ struct ext2_dirent { grub_uint32_t inode; grub_uint16_t direntlen; grub_uint8_t namelen; grub_uint8_t filetype; }; struct grub_ext3_journal_header { grub_uint32_t magic; grub_uint32_t block_type; grub_uint32_t sequence; }; struct grub_ext3_journal_revoke_header { struct grub_ext3_journal_header header; grub_uint32_t count; grub_uint32_t data[0]; }; struct grub_ext3_journal_block_tag { grub_uint32_t block; grub_uint32_t flags; }; struct grub_ext3_journal_sblock { struct grub_ext3_journal_header header; grub_uint32_t block_size; grub_uint32_t maxlen; grub_uint32_t first; grub_uint32_t sequence; grub_uint32_t start; }; #define EXT4_EXT_MAGIC 0xf30a struct grub_ext4_extent_header { grub_uint16_t magic; grub_uint16_t entries; grub_uint16_t max; grub_uint16_t depth; grub_uint32_t generation; }; struct grub_ext4_extent { grub_uint32_t block; grub_uint16_t len; grub_uint16_t start_hi; grub_uint32_t start; }; struct grub_ext4_extent_idx { grub_uint32_t block; grub_uint32_t leaf; grub_uint16_t leaf_hi; grub_uint16_t unused; }; struct grub_fshelp_node { struct grub_ext2_data *data; struct grub_ext2_inode inode; int ino; int inode_read; }; /* Information about a "mounted" ext2 filesystem. */ struct grub_ext2_data { struct grub_ext2_sblock sblock; grub_disk_t disk; struct grub_ext2_inode *inode; struct grub_fshelp_node diropen; }; static grub_dl_t my_mod; /* Read into BLKGRP the blockgroup descriptor of blockgroup GROUP of the mounted filesystem DATA. */ inline static grub_err_t grub_ext2_blockgroup (struct grub_ext2_data *data, int group, struct grub_ext2_block_group *blkgrp) { return grub_disk_read (data->disk, ((grub_le_to_cpu32 (data->sblock.first_data_block) + 1) << LOG2_EXT2_BLOCK_SIZE (data)), group * sizeof (struct grub_ext2_block_group), sizeof (struct grub_ext2_block_group), blkgrp); } static struct grub_ext4_extent_header * grub_ext4_find_leaf (struct grub_ext2_data *data, char *buf, struct grub_ext4_extent_header *ext_block, grub_uint32_t fileblock) { struct grub_ext4_extent_idx *index; while (1) { int i; grub_disk_addr_t block; index = (struct grub_ext4_extent_idx *) (ext_block + 1); if (grub_le_to_cpu16(ext_block->magic) != EXT4_EXT_MAGIC) return 0; if (ext_block->depth == 0) return ext_block; for (i = 0; i < grub_le_to_cpu16 (ext_block->entries); i++) { if (fileblock < grub_le_to_cpu32(index[i].block)) break; } if (--i < 0) return 0; block = grub_le_to_cpu16 (index[i].leaf_hi); block = (block << 32) + grub_le_to_cpu32 (index[i].leaf); if (grub_disk_read (data->disk, block << LOG2_EXT2_BLOCK_SIZE (data), 0, EXT2_BLOCK_SIZE(data), buf)) return 0; ext_block = (struct grub_ext4_extent_header *) buf; } } static grub_disk_addr_t grub_ext2_read_block (grub_fshelp_node_t node, grub_disk_addr_t fileblock) { struct grub_ext2_data *data = node->data; struct grub_ext2_inode *inode = &node->inode; int blknr = -1; unsigned int blksz = EXT2_BLOCK_SIZE (data); int log2_blksz = LOG2_EXT2_BLOCK_SIZE (data); if (grub_le_to_cpu32(inode->flags) & EXT4_EXTENTS_FLAG) { #ifndef _MSC_VER char buf[EXT2_BLOCK_SIZE (data)]; #else char * buf = grub_malloc (EXT2_BLOCK_SIZE(data)); #endif struct grub_ext4_extent_header *leaf; struct grub_ext4_extent *ext; int i; leaf = grub_ext4_find_leaf (data, buf, (struct grub_ext4_extent_header *) inode->blocks.dir_blocks, fileblock); if (! leaf) { grub_error (GRUB_ERR_BAD_FS, "invalid extent"); return -1; } ext = (struct grub_ext4_extent *) (leaf + 1); for (i = 0; i < grub_le_to_cpu16 (leaf->entries); i++) { if (fileblock < grub_le_to_cpu32 (ext[i].block)) break; } if (--i >= 0) { fileblock -= grub_le_to_cpu32 (ext[i].block); if (fileblock >= grub_le_to_cpu16 (ext[i].len)) return 0; else { grub_disk_addr_t start; start = grub_le_to_cpu16 (ext[i].start_hi); start = (start << 32) + grub_le_to_cpu32 (ext[i].start); return fileblock + start; } } else { grub_error (GRUB_ERR_BAD_FS, "something wrong with extent"); return -1; } } /* Direct blocks. */ if (fileblock < INDIRECT_BLOCKS) { blknr = grub_le_to_cpu32 (inode->blocks.dir_blocks[fileblock]); /* Indirect. */ } else if (fileblock < INDIRECT_BLOCKS + blksz / 4) { grub_uint32_t *indir; indir = grub_malloc (blksz); if (! indir) return grub_errno; if (grub_disk_read (data->disk, ((grub_disk_addr_t) grub_le_to_cpu32 (inode->blocks.indir_block)) << log2_blksz, 0, blksz, indir)) return grub_errno; blknr = grub_le_to_cpu32 (indir[fileblock - INDIRECT_BLOCKS]); grub_free (indir); } /* Double indirect. */ else if (fileblock < (grub_disk_addr_t)(INDIRECT_BLOCKS + blksz / 4) \ * (grub_disk_addr_t)(blksz / 4 + 1)) { unsigned int perblock = blksz / 4; unsigned int rblock = fileblock - (INDIRECT_BLOCKS + blksz / 4); grub_uint32_t *indir; indir = grub_malloc (blksz); if (! indir) return grub_errno; if (grub_disk_read (data->disk, ((grub_disk_addr_t) grub_le_to_cpu32 (inode->blocks.double_indir_block)) << log2_blksz, 0, blksz, indir)) return grub_errno; if (grub_disk_read (data->disk, ((grub_disk_addr_t) grub_le_to_cpu32 (indir[rblock / perblock])) << log2_blksz, 0, blksz, indir)) return grub_errno; blknr = grub_le_to_cpu32 (indir[rblock % perblock]); grub_free (indir); } /* triple indirect. */ else { grub_error (GRUB_ERR_NOT_IMPLEMENTED_YET, "ext2fs doesn't support triple indirect blocks"); } return blknr; } /* Read LEN bytes from the file described by DATA starting with byte POS. Return the amount of read bytes in READ. */ static grub_ssize_t grub_ext2_read_file (grub_fshelp_node_t node, void (*read_hook) (grub_disk_addr_t sector, unsigned offset, unsigned length, void *closure), void *closure, int flags, int pos, grub_size_t len, char *buf) { return grub_fshelp_read_file (node->data->disk, node, read_hook, closure, flags, pos, len, buf, grub_ext2_read_block, node->inode.size, LOG2_EXT2_BLOCK_SIZE (node->data)); } /* Read the inode INO for the file described by DATA into INODE. */ static grub_err_t grub_ext2_read_inode (struct grub_ext2_data *data, int ino, struct grub_ext2_inode *inode) { struct grub_ext2_block_group blkgrp; struct grub_ext2_sblock *sblock = &data->sblock; int inodes_per_block; unsigned int blkno; unsigned int blkoff; /* It is easier to calculate if the first inode is 0. */ ino--; int div = grub_le_to_cpu32 (sblock->inodes_per_group); if (div < 1) { return grub_errno = GRUB_ERR_BAD_FS; } grub_ext2_blockgroup (data, ino / div, &blkgrp); if (grub_errno) return grub_errno; int inode_size = EXT2_INODE_SIZE (data); if (inode_size < 1) { return grub_errno = GRUB_ERR_BAD_FS; } inodes_per_block = EXT2_BLOCK_SIZE (data) / inode_size; if (inodes_per_block < 1) { return grub_errno = GRUB_ERR_BAD_FS; } blkno = (ino % grub_le_to_cpu32 (sblock->inodes_per_group)) / inodes_per_block; blkoff = (ino % grub_le_to_cpu32 (sblock->inodes_per_group)) % inodes_per_block; /* Read the inode. */ if (grub_disk_read (data->disk, ((grub_le_to_cpu32 (blkgrp.inode_table_id) + blkno) << LOG2_EXT2_BLOCK_SIZE (data)), EXT2_INODE_SIZE (data) * blkoff, sizeof (struct grub_ext2_inode), inode)) return grub_errno; return 0; } static struct grub_ext2_data * grub_ext2_mount (grub_disk_t disk) { struct grub_ext2_data *data; data = grub_malloc (sizeof (struct grub_ext2_data)); if (!data) return 0; /* Read the superblock. */ grub_disk_read (disk, 1 * 2, 0, sizeof (struct grub_ext2_sblock), &data->sblock); if (grub_errno) goto fail; /* Make sure this is an ext2 filesystem. */ if (grub_le_to_cpu16 (data->sblock.magic) != EXT2_MAGIC) { grub_error (GRUB_ERR_BAD_FS, "not an ext2 filesystem"); goto fail; } /* Check the FS doesn't have feature bits enabled that we don't support. */ if (grub_le_to_cpu32 (data->sblock.feature_incompat) & ~(EXT2_DRIVER_SUPPORTED_INCOMPAT | EXT2_DRIVER_IGNORED_INCOMPAT)) { grub_error (GRUB_ERR_BAD_FS, "filesystem has unsupported incompatible features"); goto fail; } data->disk = disk; data->diropen.data = data; data->diropen.ino = 2; data->diropen.inode_read = 1; data->inode = &data->diropen.inode; grub_ext2_read_inode (data, 2, data->inode); if (grub_errno) goto fail; return data; fail: if (grub_errno == GRUB_ERR_OUT_OF_RANGE) grub_error (GRUB_ERR_BAD_FS, "not an ext2 filesystem"); grub_free (data); return 0; } static char * grub_ext2_read_symlink (grub_fshelp_node_t node) { char *symlink; struct grub_fshelp_node *diro = node; if (! diro->inode_read) { grub_ext2_read_inode (diro->data, diro->ino, &diro->inode); if (grub_errno) return 0; } symlink = grub_malloc (grub_le_to_cpu32 (diro->inode.size) + 1); if (! symlink) return 0; /* If the filesize of the symlink is bigger than 60 the symlink is stored in a separate block, otherwise it is stored in the inode. */ if (grub_le_to_cpu32 (diro->inode.size) <= 60) grub_strncpy (symlink, diro->inode.symlink, grub_le_to_cpu32 (diro->inode.size)); else { grub_ext2_read_file (diro, 0, 0, 0, 0, grub_le_to_cpu32 (diro->inode.size), symlink); if (grub_errno) { grub_free (symlink); return 0; } } symlink[grub_le_to_cpu32 (diro->inode.size)] = '\0'; return symlink; } static int grub_ext2_iterate_dir (grub_fshelp_node_t dir, int (*hook) (const char *filename, enum grub_fshelp_filetype filetype, grub_fshelp_node_t node, void *closure), void *closure) { unsigned int fpos = 0; struct grub_fshelp_node *diro = (struct grub_fshelp_node *) dir; if (! diro->inode_read) { grub_ext2_read_inode (diro->data, diro->ino, &diro->inode); if (grub_errno) return 0; } /* Search the file. */ if (hook) while (fpos < grub_le_to_cpu32 (diro->inode.size)) { struct ext2_dirent dirent; grub_ext2_read_file (diro, NULL, NULL, 0, fpos, sizeof (dirent), (char *) &dirent); if (grub_errno) return 0; if (dirent.direntlen == 0) return 0; if (dirent.namelen != 0) { char * filename = grub_malloc (dirent.namelen + 1); struct grub_fshelp_node *fdiro; enum grub_fshelp_filetype type = GRUB_FSHELP_UNKNOWN; if (!filename) { break; } grub_ext2_read_file (diro, 0, 0, 0, fpos + sizeof (struct ext2_dirent), dirent.namelen, filename); if (grub_errno) { grub_free (filename); return 0; } fdiro = grub_malloc (sizeof (struct grub_fshelp_node)); if (! fdiro) { grub_free (filename); return 0; } fdiro->data = diro->data; fdiro->ino = grub_le_to_cpu32 (dirent.inode); filename[dirent.namelen] = '\0'; if (dirent.filetype != FILETYPE_UNKNOWN) { fdiro->inode_read = 0; if (dirent.filetype == FILETYPE_DIRECTORY) type = GRUB_FSHELP_DIR; else if (dirent.filetype == FILETYPE_SYMLINK) type = GRUB_FSHELP_SYMLINK; else if (dirent.filetype == FILETYPE_REG) type = GRUB_FSHELP_REG; } else { /* The filetype can not be read from the dirent, read the inode to get more information. */ grub_ext2_read_inode (diro->data, grub_le_to_cpu32 (dirent.inode), &fdiro->inode); if (grub_errno) { grub_free (filename); grub_free (fdiro); return 0; } fdiro->inode_read = 1; if ((grub_le_to_cpu16 (fdiro->inode.mode) & FILETYPE_INO_MASK) == FILETYPE_INO_DIRECTORY) type = GRUB_FSHELP_DIR; else if ((grub_le_to_cpu16 (fdiro->inode.mode) & FILETYPE_INO_MASK) == FILETYPE_INO_SYMLINK) type = GRUB_FSHELP_SYMLINK; else if ((grub_le_to_cpu16 (fdiro->inode.mode) & FILETYPE_INO_MASK) == FILETYPE_INO_REG) type = GRUB_FSHELP_REG; } if (hook (filename, type, fdiro, closure)) { grub_free (filename); return 1; } grub_free (filename); } fpos += grub_le_to_cpu16 (dirent.direntlen); } return 0; } /* Open a file named NAME and initialize FILE. */ static grub_err_t grub_ext2_open (struct grub_file *file, const char *name) { struct grub_ext2_data *data; struct grub_fshelp_node *fdiro = 0; grub_dl_ref (my_mod); data = grub_ext2_mount (file->device->disk); if (! data) goto fail; grub_fshelp_find_file (name, &data->diropen, &fdiro, grub_ext2_iterate_dir, 0, grub_ext2_read_symlink, GRUB_FSHELP_REG); if (grub_errno) goto fail; if (! fdiro->inode_read) { grub_ext2_read_inode (data, fdiro->ino, &fdiro->inode); if (grub_errno) goto fail; } grub_memcpy (data->inode, &fdiro->inode, sizeof (struct grub_ext2_inode)); grub_free (fdiro); file->size = grub_le_to_cpu32 (data->inode->size); file->data = data; file->offset = 0; return 0; fail: if (fdiro != &data->diropen) grub_free (fdiro); grub_free (data); grub_dl_unref (my_mod); return grub_errno; } static grub_err_t grub_ext2_close (grub_file_t file) { grub_free (file->data); grub_dl_unref (my_mod); return GRUB_ERR_NONE; } /* Read LEN bytes data from FILE into BUF. */ static grub_ssize_t grub_ext2_read (grub_file_t file, char *buf, grub_size_t len) { struct grub_ext2_data *data = (struct grub_ext2_data *) file->data; return grub_ext2_read_file (&data->diropen, file->read_hook, file->closure, file->flags, file->offset, len, buf); } struct grub_ext2_dir_closure { int (*hook) (const char *filename, const struct grub_dirhook_info *info, void *closure); void *closure; struct grub_ext2_data *data; }; static int iterate (const char *filename, enum grub_fshelp_filetype filetype, grub_fshelp_node_t node, void *closure) { struct grub_ext2_dir_closure *c = closure; struct grub_dirhook_info info; grub_memset (&info, 0, sizeof (info)); if (! node->inode_read) { grub_ext2_read_inode (c->data, node->ino, &node->inode); if (!grub_errno) node->inode_read = 1; grub_errno = GRUB_ERR_NONE; } if (node->inode_read) { info.mtimeset = 1; info.mtime = grub_le_to_cpu32 (node->inode.mtime); } info.dir = ((filetype & GRUB_FSHELP_TYPE_MASK) == GRUB_FSHELP_DIR); grub_free (node); return (c->hook != NULL)? c->hook (filename, &info, c->closure): 0; } static grub_err_t grub_ext2_dir (grub_device_t device, const char *path, int (*hook) (const char *filename, const struct grub_dirhook_info *info, void *closure), void *closure) { struct grub_ext2_data *data = 0; struct grub_fshelp_node *fdiro = 0; struct grub_ext2_dir_closure c; grub_dl_ref (my_mod); data = grub_ext2_mount (device->disk); if (! data) goto fail; grub_fshelp_find_file (path, &data->diropen, &fdiro, grub_ext2_iterate_dir, 0, grub_ext2_read_symlink, GRUB_FSHELP_DIR); if (grub_errno) goto fail; c.hook = hook; c.closure = closure; c.data = data; grub_ext2_iterate_dir (fdiro, iterate, &c); fail: if (fdiro != &data->diropen) grub_free (fdiro); grub_free (data); grub_dl_unref (my_mod); return grub_errno; } static grub_err_t grub_ext2_label (grub_device_t device, char **label) { struct grub_ext2_data *data; grub_disk_t disk = device->disk; grub_dl_ref (my_mod); data = grub_ext2_mount (disk); if (data) *label = grub_strndup (data->sblock.volume_name, 14); else *label = NULL; grub_dl_unref (my_mod); grub_free (data); return grub_errno; } static grub_err_t grub_ext2_uuid (grub_device_t device, char **uuid) { struct grub_ext2_data *data; grub_disk_t disk = device->disk; grub_dl_ref (my_mod); data = grub_ext2_mount (disk); if (data) { *uuid = grub_xasprintf ("%04x%04x-%04x-%04x-%04x-%04x%04x%04x", grub_be_to_cpu16 (data->sblock.uuid[0]), grub_be_to_cpu16 (data->sblock.uuid[1]), grub_be_to_cpu16 (data->sblock.uuid[2]), grub_be_to_cpu16 (data->sblock.uuid[3]), grub_be_to_cpu16 (data->sblock.uuid[4]), grub_be_to_cpu16 (data->sblock.uuid[5]), grub_be_to_cpu16 (data->sblock.uuid[6]), grub_be_to_cpu16 (data->sblock.uuid[7])); } else *uuid = NULL; grub_dl_unref (my_mod); grub_free (data); return grub_errno; } /* Get mtime. */ static grub_err_t grub_ext2_mtime (grub_device_t device, grub_int32_t *tm) { struct grub_ext2_data *data; grub_disk_t disk = device->disk; grub_dl_ref (my_mod); data = grub_ext2_mount (disk); if (!data) *tm = 0; else *tm = grub_le_to_cpu32 (data->sblock.utime); grub_dl_unref (my_mod); grub_free (data); return grub_errno; } struct grub_fs grub_ext2_fs = { .name = "ext2", .dir = grub_ext2_dir, .open = grub_ext2_open, .read = grub_ext2_read, .close = grub_ext2_close, .label = grub_ext2_label, .uuid = grub_ext2_uuid, .mtime = grub_ext2_mtime, #ifdef GRUB_UTIL .reserved_first_sector = 1, #endif .next = 0 };
/* ext2.c - Second Extended filesystem */ /* * GRUB -- GRand Unified Bootloader * Copyright (C) 2003,2004,2005,2007,2008,2009 Free Software Foundation, Inc. * * GRUB is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * GRUB is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with GRUB. If not, see <http://www.gnu.org/licenses/>. */ /* Magic value used to identify an ext2 filesystem. */ #define EXT2_MAGIC 0xEF53 /* Amount of indirect blocks in an inode. */ #define INDIRECT_BLOCKS 12 /* Maximum length of a pathname. */ #define EXT2_PATH_MAX 4096 /* Maximum nesting of symlinks, used to prevent a loop. */ #define EXT2_MAX_SYMLINKCNT 8 /* The good old revision and the default inode size. */ #define EXT2_GOOD_OLD_REVISION 0 #define EXT2_GOOD_OLD_INODE_SIZE 128 /* Filetype used in directory entry. */ #define FILETYPE_UNKNOWN 0 #define FILETYPE_REG 1 #define FILETYPE_DIRECTORY 2 #define FILETYPE_SYMLINK 7 /* Filetype information as used in inodes. */ #define FILETYPE_INO_MASK 0170000 #define FILETYPE_INO_REG 0100000 #define FILETYPE_INO_DIRECTORY 0040000 #define FILETYPE_INO_SYMLINK 0120000 #include <stdlib.h> #include <grub/err.h> #include <grub/file.h> #include <grub/mm.h> #include <grub/misc.h> #include <grub/disk.h> #include <grub/dl.h> #include <grub/types.h> #include <grub/fshelp.h> /* Log2 size of ext2 block in 512 blocks. */ #define LOG2_EXT2_BLOCK_SIZE(data) \ (grub_le_to_cpu32 (data->sblock.log2_block_size) + 1) /* Log2 size of ext2 block in bytes. */ #define LOG2_BLOCK_SIZE(data) \ (grub_le_to_cpu32 (data->sblock.log2_block_size) + 10) /* The size of an ext2 block in bytes. */ #define EXT2_BLOCK_SIZE(data) (1 << LOG2_BLOCK_SIZE (data)) /* The revision level. */ #define EXT2_REVISION(data) grub_le_to_cpu32 (data->sblock.revision_level) /* The inode size. */ #define EXT2_INODE_SIZE(data) \ (EXT2_REVISION (data) == EXT2_GOOD_OLD_REVISION \ ? EXT2_GOOD_OLD_INODE_SIZE \ : grub_le_to_cpu16 (data->sblock.inode_size)) /* Superblock filesystem feature flags (RW compatible) * A filesystem with any of these enabled can be read and written by a driver * that does not understand them without causing metadata/data corruption. */ #define EXT2_FEATURE_COMPAT_DIR_PREALLOC 0x0001 #define EXT2_FEATURE_COMPAT_IMAGIC_INODES 0x0002 #define EXT3_FEATURE_COMPAT_HAS_JOURNAL 0x0004 #define EXT2_FEATURE_COMPAT_EXT_ATTR 0x0008 #define EXT2_FEATURE_COMPAT_RESIZE_INODE 0x0010 #define EXT2_FEATURE_COMPAT_DIR_INDEX 0x0020 /* Superblock filesystem feature flags (RO compatible) * A filesystem with any of these enabled can be safely read by a driver that * does not understand them, but should not be written to, usually because * additional metadata is required. */ #define EXT2_FEATURE_RO_COMPAT_SPARSE_SUPER 0x0001 #define EXT2_FEATURE_RO_COMPAT_LARGE_FILE 0x0002 #define EXT2_FEATURE_RO_COMPAT_BTREE_DIR 0x0004 #define EXT4_FEATURE_RO_COMPAT_GDT_CSUM 0x0010 #define EXT4_FEATURE_RO_COMPAT_DIR_NLINK 0x0020 #define EXT4_FEATURE_RO_COMPAT_EXTRA_ISIZE 0x0040 /* Superblock filesystem feature flags (back-incompatible) * A filesystem with any of these enabled should not be attempted to be read * by a driver that does not understand them, since they usually indicate * metadata format changes that might confuse the reader. */ #define EXT2_FEATURE_INCOMPAT_COMPRESSION 0x0001 #define EXT2_FEATURE_INCOMPAT_FILETYPE 0x0002 #define EXT3_FEATURE_INCOMPAT_RECOVER 0x0004 /* Needs recovery */ #define EXT3_FEATURE_INCOMPAT_JOURNAL_DEV 0x0008 /* Volume is journal device */ #define EXT2_FEATURE_INCOMPAT_META_BG 0x0010 #define EXT4_FEATURE_INCOMPAT_EXTENTS 0x0040 /* Extents used */ #define EXT4_FEATURE_INCOMPAT_64BIT 0x0080 #define EXT4_FEATURE_INCOMPAT_FLEX_BG 0x0200 /* The set of back-incompatible features this driver DOES support. Add (OR) * flags here as the related features are implemented into the driver. */ #define EXT2_DRIVER_SUPPORTED_INCOMPAT ( EXT2_FEATURE_INCOMPAT_FILETYPE \ | EXT4_FEATURE_INCOMPAT_EXTENTS \ | EXT4_FEATURE_INCOMPAT_FLEX_BG ) /* List of rationales for the ignored "incompatible" features: * needs_recovery: Not really back-incompatible - was added as such to forbid * ext2 drivers from mounting an ext3 volume with a dirty * journal because they will ignore the journal, but the next * ext3 driver to mount the volume will find the journal and * replay it, potentially corrupting the metadata written by * the ext2 drivers. Safe to ignore for this RO driver. */ #define EXT2_DRIVER_IGNORED_INCOMPAT ( EXT3_FEATURE_INCOMPAT_RECOVER ) #define EXT3_JOURNAL_MAGIC_NUMBER 0xc03b3998U #define EXT3_JOURNAL_DESCRIPTOR_BLOCK 1 #define EXT3_JOURNAL_COMMIT_BLOCK 2 #define EXT3_JOURNAL_SUPERBLOCK_V1 3 #define EXT3_JOURNAL_SUPERBLOCK_V2 4 #define EXT3_JOURNAL_REVOKE_BLOCK 5 #define EXT3_JOURNAL_FLAG_ESCAPE 1 #define EXT3_JOURNAL_FLAG_SAME_UUID 2 #define EXT3_JOURNAL_FLAG_DELETED 4 #define EXT3_JOURNAL_FLAG_LAST_TAG 8 #define EXT4_EXTENTS_FLAG 0x80000 /* The ext2 superblock. */ struct grub_ext2_sblock { grub_uint32_t total_inodes; grub_uint32_t total_blocks; grub_uint32_t reserved_blocks; grub_uint32_t free_blocks; grub_uint32_t free_inodes; grub_uint32_t first_data_block; grub_uint32_t log2_block_size; grub_uint32_t log2_fragment_size; grub_uint32_t blocks_per_group; grub_uint32_t fragments_per_group; grub_uint32_t inodes_per_group; grub_uint32_t mtime; grub_uint32_t utime; grub_uint16_t mnt_count; grub_uint16_t max_mnt_count; grub_uint16_t magic; grub_uint16_t fs_state; grub_uint16_t error_handling; grub_uint16_t minor_revision_level; grub_uint32_t lastcheck; grub_uint32_t checkinterval; grub_uint32_t creator_os; grub_uint32_t revision_level; grub_uint16_t uid_reserved; grub_uint16_t gid_reserved; grub_uint32_t first_inode; grub_uint16_t inode_size; grub_uint16_t block_group_number; grub_uint32_t feature_compatibility; grub_uint32_t feature_incompat; grub_uint32_t feature_ro_compat; grub_uint16_t uuid[8]; char volume_name[16]; char last_mounted_on[64]; grub_uint32_t compression_info; grub_uint8_t prealloc_blocks; grub_uint8_t prealloc_dir_blocks; grub_uint16_t reserved_gdt_blocks; grub_uint8_t journal_uuid[16]; grub_uint32_t journal_inum; grub_uint32_t journal_dev; grub_uint32_t last_orphan; grub_uint32_t hash_seed[4]; grub_uint8_t def_hash_version; grub_uint8_t jnl_backup_type; grub_uint16_t reserved_word_pad; grub_uint32_t default_mount_opts; grub_uint32_t first_meta_bg; grub_uint32_t mkfs_time; grub_uint32_t jnl_blocks[17]; }; /* The ext2 blockgroup. */ struct grub_ext2_block_group { grub_uint32_t block_id; grub_uint32_t inode_id; grub_uint32_t inode_table_id; grub_uint16_t free_blocks; grub_uint16_t free_inodes; grub_uint16_t used_dirs; grub_uint16_t pad; grub_uint32_t reserved[3]; }; /* The ext2 inode. */ struct grub_ext2_inode { grub_uint16_t mode; grub_uint16_t uid; grub_uint32_t size; grub_uint32_t atime; grub_uint32_t ctime; grub_uint32_t mtime; grub_uint32_t dtime; grub_uint16_t gid; grub_uint16_t nlinks; grub_uint32_t blockcnt; /* Blocks of 512 bytes!! */ grub_uint32_t flags; grub_uint32_t osd1; union { struct datablocks { grub_uint32_t dir_blocks[INDIRECT_BLOCKS]; grub_uint32_t indir_block; grub_uint32_t double_indir_block; grub_uint32_t triple_indir_block; } blocks; char symlink[60]; }; grub_uint32_t version; grub_uint32_t acl; grub_uint32_t dir_acl; grub_uint32_t fragment_addr; grub_uint32_t osd2[3]; }; /* The header of an ext2 directory entry. */ struct ext2_dirent { grub_uint32_t inode; grub_uint16_t direntlen; grub_uint8_t namelen; grub_uint8_t filetype; }; struct grub_ext3_journal_header { grub_uint32_t magic; grub_uint32_t block_type; grub_uint32_t sequence; }; struct grub_ext3_journal_revoke_header { struct grub_ext3_journal_header header; grub_uint32_t count; grub_uint32_t data[0]; }; struct grub_ext3_journal_block_tag { grub_uint32_t block; grub_uint32_t flags; }; struct grub_ext3_journal_sblock { struct grub_ext3_journal_header header; grub_uint32_t block_size; grub_uint32_t maxlen; grub_uint32_t first; grub_uint32_t sequence; grub_uint32_t start; }; #define EXT4_EXT_MAGIC 0xf30a struct grub_ext4_extent_header { grub_uint16_t magic; grub_uint16_t entries; grub_uint16_t max; grub_uint16_t depth; grub_uint32_t generation; }; struct grub_ext4_extent { grub_uint32_t block; grub_uint16_t len; grub_uint16_t start_hi; grub_uint32_t start; }; struct grub_ext4_extent_idx { grub_uint32_t block; grub_uint32_t leaf; grub_uint16_t leaf_hi; grub_uint16_t unused; }; struct grub_fshelp_node { struct grub_ext2_data *data; struct grub_ext2_inode inode; int ino; int inode_read; }; /* Information about a "mounted" ext2 filesystem. */ struct grub_ext2_data { struct grub_ext2_sblock sblock; grub_disk_t disk; struct grub_ext2_inode *inode; struct grub_fshelp_node diropen; }; static grub_dl_t my_mod; /* Read into BLKGRP the blockgroup descriptor of blockgroup GROUP of the mounted filesystem DATA. */ inline static grub_err_t grub_ext2_blockgroup (struct grub_ext2_data *data, int group, struct grub_ext2_block_group *blkgrp) { return grub_disk_read (data->disk, ((grub_le_to_cpu32 (data->sblock.first_data_block) + 1) << LOG2_EXT2_BLOCK_SIZE (data)), group * sizeof (struct grub_ext2_block_group), sizeof (struct grub_ext2_block_group), blkgrp); } static struct grub_ext4_extent_header * grub_ext4_find_leaf (struct grub_ext2_data *data, char *buf, struct grub_ext4_extent_header *ext_block, grub_uint32_t fileblock) { struct grub_ext4_extent_idx *index; while (1) { int i; grub_disk_addr_t block; index = (struct grub_ext4_extent_idx *) (ext_block + 1); if (grub_le_to_cpu16(ext_block->magic) != EXT4_EXT_MAGIC) return 0; if (ext_block->depth == 0) return ext_block; for (i = 0; i < grub_le_to_cpu16 (ext_block->entries); i++) { if (fileblock < grub_le_to_cpu32(index[i].block)) break; } if (--i < 0) return 0; block = grub_le_to_cpu16 (index[i].leaf_hi); block = (block << 32) + grub_le_to_cpu32 (index[i].leaf); if (grub_disk_read (data->disk, block << LOG2_EXT2_BLOCK_SIZE (data), 0, EXT2_BLOCK_SIZE(data), buf)) { return 0; } ext_block = (struct grub_ext4_extent_header *) buf; } } static grub_disk_addr_t grub_ext2_read_block (grub_fshelp_node_t node, grub_disk_addr_t fileblock) { struct grub_ext2_data *data = node->data; struct grub_ext2_inode *inode = &node->inode; int blknr = -1; unsigned int blksz = EXT2_BLOCK_SIZE (data); int log2_blksz = LOG2_EXT2_BLOCK_SIZE (data); if (grub_le_to_cpu32(inode->flags) & EXT4_EXTENTS_FLAG) { char * buf = grub_malloc (EXT2_BLOCK_SIZE (data)); if (!buf) { return -1; } struct grub_ext4_extent_header *leaf; struct grub_ext4_extent *ext; int i; leaf = grub_ext4_find_leaf (data, buf, (struct grub_ext4_extent_header *) inode->blocks.dir_blocks, fileblock); if (! leaf) { grub_error (GRUB_ERR_BAD_FS, "invalid extent"); free (buf); return -1; } ext = (struct grub_ext4_extent *) (leaf + 1); for (i = 0; i < grub_le_to_cpu16 (leaf->entries); i++) { if (fileblock < grub_le_to_cpu32 (ext[i].block)) break; } if (--i >= 0) { fileblock -= grub_le_to_cpu32 (ext[i].block); if (fileblock >= grub_le_to_cpu16 (ext[i].len)) { free (buf); return 0; } else { grub_disk_addr_t start; start = grub_le_to_cpu16 (ext[i].start_hi); start = (start << 32) + grub_le_to_cpu32 (ext[i].start); free (buf); return fileblock + start; } } else { grub_error (GRUB_ERR_BAD_FS, "something wrong with extent"); free (buf); return -1; } free (buf); } /* Direct blocks. */ if (fileblock < INDIRECT_BLOCKS) { blknr = grub_le_to_cpu32 (inode->blocks.dir_blocks[fileblock]); /* Indirect. */ } else if (fileblock < INDIRECT_BLOCKS + blksz / 4) { grub_uint32_t *indir; indir = grub_malloc (blksz); if (! indir) { return grub_errno; } if (grub_disk_read (data->disk, ((grub_disk_addr_t) grub_le_to_cpu32 (inode->blocks.indir_block)) << log2_blksz, 0, blksz, indir)) { return grub_errno; } blknr = grub_le_to_cpu32 (indir[fileblock - INDIRECT_BLOCKS]); grub_free (indir); } /* Double indirect. */ else if (fileblock < (grub_disk_addr_t)(INDIRECT_BLOCKS + blksz / 4) \ * (grub_disk_addr_t)(blksz / 4 + 1)) { unsigned int perblock = blksz / 4; unsigned int rblock = fileblock - (INDIRECT_BLOCKS + blksz / 4); grub_uint32_t *indir; indir = grub_malloc (blksz); if (! indir) { return grub_errno; } if (grub_disk_read (data->disk, ((grub_disk_addr_t) grub_le_to_cpu32 (inode->blocks.double_indir_block)) << log2_blksz, 0, blksz, indir)) { return grub_errno; } if (grub_disk_read (data->disk, ((grub_disk_addr_t) grub_le_to_cpu32 (indir[rblock / perblock])) << log2_blksz, 0, blksz, indir)) { return grub_errno; } blknr = grub_le_to_cpu32 (indir[rblock % perblock]); grub_free (indir); } /* triple indirect. */ else { grub_error (GRUB_ERR_NOT_IMPLEMENTED_YET, "ext2fs doesn't support triple indirect blocks"); } return blknr; } /* Read LEN bytes from the file described by DATA starting with byte POS. Return the amount of read bytes in READ. */ static grub_ssize_t grub_ext2_read_file (grub_fshelp_node_t node, void (*read_hook) (grub_disk_addr_t sector, unsigned offset, unsigned length, void *closure), void *closure, int flags, int pos, grub_size_t len, char *buf) { return grub_fshelp_read_file (node->data->disk, node, read_hook, closure, flags, pos, len, buf, grub_ext2_read_block, node->inode.size, LOG2_EXT2_BLOCK_SIZE (node->data)); } /* Read the inode INO for the file described by DATA into INODE. */ static grub_err_t grub_ext2_read_inode (struct grub_ext2_data *data, int ino, struct grub_ext2_inode *inode) { struct grub_ext2_block_group blkgrp; struct grub_ext2_sblock *sblock = &data->sblock; int inodes_per_block; unsigned int blkno; unsigned int blkoff; /* It is easier to calculate if the first inode is 0. */ ino--; int div = grub_le_to_cpu32 (sblock->inodes_per_group); if (div < 1) { return grub_errno = GRUB_ERR_BAD_FS; } grub_ext2_blockgroup (data, ino / div, &blkgrp); if (grub_errno) return grub_errno; int inode_size = EXT2_INODE_SIZE (data); if (inode_size < 1) { return grub_errno = GRUB_ERR_BAD_FS; } inodes_per_block = EXT2_BLOCK_SIZE (data) / inode_size; if (inodes_per_block < 1) { return grub_errno = GRUB_ERR_BAD_FS; } blkno = (ino % grub_le_to_cpu32 (sblock->inodes_per_group)) / inodes_per_block; blkoff = (ino % grub_le_to_cpu32 (sblock->inodes_per_group)) % inodes_per_block; /* Read the inode. */ if (grub_disk_read (data->disk, ((grub_le_to_cpu32 (blkgrp.inode_table_id) + blkno) << LOG2_EXT2_BLOCK_SIZE (data)), EXT2_INODE_SIZE (data) * blkoff, sizeof (struct grub_ext2_inode), inode)) return grub_errno; return 0; } static struct grub_ext2_data * grub_ext2_mount (grub_disk_t disk) { struct grub_ext2_data *data; data = grub_malloc (sizeof (struct grub_ext2_data)); if (!data) return 0; /* Read the superblock. */ grub_disk_read (disk, 1 * 2, 0, sizeof (struct grub_ext2_sblock), &data->sblock); if (grub_errno) goto fail; /* Make sure this is an ext2 filesystem. */ if (grub_le_to_cpu16 (data->sblock.magic) != EXT2_MAGIC) { grub_error (GRUB_ERR_BAD_FS, "not an ext2 filesystem"); goto fail; } /* Check the FS doesn't have feature bits enabled that we don't support. */ if (grub_le_to_cpu32 (data->sblock.feature_incompat) & ~(EXT2_DRIVER_SUPPORTED_INCOMPAT | EXT2_DRIVER_IGNORED_INCOMPAT)) { grub_error (GRUB_ERR_BAD_FS, "filesystem has unsupported incompatible features"); goto fail; } data->disk = disk; data->diropen.data = data; data->diropen.ino = 2; data->diropen.inode_read = 1; data->inode = &data->diropen.inode; grub_ext2_read_inode (data, 2, data->inode); if (grub_errno) goto fail; return data; fail: if (grub_errno == GRUB_ERR_OUT_OF_RANGE) grub_error (GRUB_ERR_BAD_FS, "not an ext2 filesystem"); grub_free (data); return 0; } static char * grub_ext2_read_symlink (grub_fshelp_node_t node) { char *symlink; struct grub_fshelp_node *diro = node; if (! diro->inode_read) { grub_ext2_read_inode (diro->data, diro->ino, &diro->inode); if (grub_errno) return 0; } symlink = grub_malloc (grub_le_to_cpu32 (diro->inode.size) + 1); if (! symlink) return 0; /* If the filesize of the symlink is bigger than 60 the symlink is stored in a separate block, otherwise it is stored in the inode. */ if (grub_le_to_cpu32 (diro->inode.size) <= 60) grub_strncpy (symlink, diro->inode.symlink, grub_le_to_cpu32 (diro->inode.size)); else { grub_ext2_read_file (diro, 0, 0, 0, 0, grub_le_to_cpu32 (diro->inode.size), symlink); if (grub_errno) { grub_free (symlink); return 0; } } symlink[grub_le_to_cpu32 (diro->inode.size)] = '\0'; return symlink; } static int grub_ext2_iterate_dir (grub_fshelp_node_t dir, int (*hook) (const char *filename, enum grub_fshelp_filetype filetype, grub_fshelp_node_t node, void *closure), void *closure) { unsigned int fpos = 0; struct grub_fshelp_node *diro = (struct grub_fshelp_node *) dir; if (! diro->inode_read) { grub_ext2_read_inode (diro->data, diro->ino, &diro->inode); if (grub_errno) return 0; } /* Search the file. */ if (hook) while (fpos < grub_le_to_cpu32 (diro->inode.size)) { struct ext2_dirent dirent; grub_ext2_read_file (diro, NULL, NULL, 0, fpos, sizeof (dirent), (char *) &dirent); if (grub_errno) return 0; if (dirent.direntlen == 0) return 0; if (dirent.namelen != 0) { char * filename = grub_malloc (dirent.namelen + 1); struct grub_fshelp_node *fdiro; enum grub_fshelp_filetype type = GRUB_FSHELP_UNKNOWN; if (!filename) { break; } grub_ext2_read_file (diro, 0, 0, 0, fpos + sizeof (struct ext2_dirent), dirent.namelen, filename); if (grub_errno) { grub_free (filename); return 0; } fdiro = grub_malloc (sizeof (struct grub_fshelp_node)); if (! fdiro) { grub_free (filename); return 0; } fdiro->data = diro->data; fdiro->ino = grub_le_to_cpu32 (dirent.inode); filename[dirent.namelen] = '\0'; if (dirent.filetype != FILETYPE_UNKNOWN) { fdiro->inode_read = 0; if (dirent.filetype == FILETYPE_DIRECTORY) type = GRUB_FSHELP_DIR; else if (dirent.filetype == FILETYPE_SYMLINK) type = GRUB_FSHELP_SYMLINK; else if (dirent.filetype == FILETYPE_REG) type = GRUB_FSHELP_REG; } else { /* The filetype can not be read from the dirent, read the inode to get more information. */ grub_ext2_read_inode (diro->data, grub_le_to_cpu32 (dirent.inode), &fdiro->inode); if (grub_errno) { grub_free (filename); grub_free (fdiro); return 0; } fdiro->inode_read = 1; if ((grub_le_to_cpu16 (fdiro->inode.mode) & FILETYPE_INO_MASK) == FILETYPE_INO_DIRECTORY) type = GRUB_FSHELP_DIR; else if ((grub_le_to_cpu16 (fdiro->inode.mode) & FILETYPE_INO_MASK) == FILETYPE_INO_SYMLINK) type = GRUB_FSHELP_SYMLINK; else if ((grub_le_to_cpu16 (fdiro->inode.mode) & FILETYPE_INO_MASK) == FILETYPE_INO_REG) type = GRUB_FSHELP_REG; } if (hook (filename, type, fdiro, closure)) { grub_free (filename); return 1; } grub_free (filename); } fpos += grub_le_to_cpu16 (dirent.direntlen); } return 0; } /* Open a file named NAME and initialize FILE. */ static grub_err_t grub_ext2_open (struct grub_file *file, const char *name) { struct grub_ext2_data *data; struct grub_fshelp_node *fdiro = 0; grub_dl_ref (my_mod); data = grub_ext2_mount (file->device->disk); if (! data) goto fail; grub_fshelp_find_file (name, &data->diropen, &fdiro, grub_ext2_iterate_dir, 0, grub_ext2_read_symlink, GRUB_FSHELP_REG); if (grub_errno) goto fail; if (! fdiro->inode_read) { grub_ext2_read_inode (data, fdiro->ino, &fdiro->inode); if (grub_errno) goto fail; } grub_memcpy (data->inode, &fdiro->inode, sizeof (struct grub_ext2_inode)); grub_free (fdiro); file->size = grub_le_to_cpu32 (data->inode->size); file->data = data; file->offset = 0; return 0; fail: if (fdiro != &data->diropen) grub_free (fdiro); grub_free (data); grub_dl_unref (my_mod); return grub_errno; } static grub_err_t grub_ext2_close (grub_file_t file) { grub_free (file->data); grub_dl_unref (my_mod); return GRUB_ERR_NONE; } /* Read LEN bytes data from FILE into BUF. */ static grub_ssize_t grub_ext2_read (grub_file_t file, char *buf, grub_size_t len) { struct grub_ext2_data *data = (struct grub_ext2_data *) file->data; return grub_ext2_read_file (&data->diropen, file->read_hook, file->closure, file->flags, file->offset, len, buf); } struct grub_ext2_dir_closure { int (*hook) (const char *filename, const struct grub_dirhook_info *info, void *closure); void *closure; struct grub_ext2_data *data; }; static int iterate (const char *filename, enum grub_fshelp_filetype filetype, grub_fshelp_node_t node, void *closure) { struct grub_ext2_dir_closure *c = closure; struct grub_dirhook_info info; grub_memset (&info, 0, sizeof (info)); if (! node->inode_read) { grub_ext2_read_inode (c->data, node->ino, &node->inode); if (!grub_errno) node->inode_read = 1; grub_errno = GRUB_ERR_NONE; } if (node->inode_read) { info.mtimeset = 1; info.mtime = grub_le_to_cpu32 (node->inode.mtime); } info.dir = ((filetype & GRUB_FSHELP_TYPE_MASK) == GRUB_FSHELP_DIR); grub_free (node); return (c->hook != NULL)? c->hook (filename, &info, c->closure): 0; } static grub_err_t grub_ext2_dir (grub_device_t device, const char *path, int (*hook) (const char *filename, const struct grub_dirhook_info *info, void *closure), void *closure) { struct grub_ext2_data *data = 0; struct grub_fshelp_node *fdiro = 0; struct grub_ext2_dir_closure c; grub_dl_ref (my_mod); data = grub_ext2_mount (device->disk); if (! data) goto fail; grub_fshelp_find_file (path, &data->diropen, &fdiro, grub_ext2_iterate_dir, 0, grub_ext2_read_symlink, GRUB_FSHELP_DIR); if (grub_errno) goto fail; c.hook = hook; c.closure = closure; c.data = data; grub_ext2_iterate_dir (fdiro, iterate, &c); fail: if (fdiro != &data->diropen) grub_free (fdiro); grub_free (data); grub_dl_unref (my_mod); return grub_errno; } static grub_err_t grub_ext2_label (grub_device_t device, char **label) { struct grub_ext2_data *data; grub_disk_t disk = device->disk; grub_dl_ref (my_mod); data = grub_ext2_mount (disk); if (data) *label = grub_strndup (data->sblock.volume_name, 14); else *label = NULL; grub_dl_unref (my_mod); grub_free (data); return grub_errno; } static grub_err_t grub_ext2_uuid (grub_device_t device, char **uuid) { struct grub_ext2_data *data; grub_disk_t disk = device->disk; grub_dl_ref (my_mod); data = grub_ext2_mount (disk); if (data) { *uuid = grub_xasprintf ("%04x%04x-%04x-%04x-%04x-%04x%04x%04x", grub_be_to_cpu16 (data->sblock.uuid[0]), grub_be_to_cpu16 (data->sblock.uuid[1]), grub_be_to_cpu16 (data->sblock.uuid[2]), grub_be_to_cpu16 (data->sblock.uuid[3]), grub_be_to_cpu16 (data->sblock.uuid[4]), grub_be_to_cpu16 (data->sblock.uuid[5]), grub_be_to_cpu16 (data->sblock.uuid[6]), grub_be_to_cpu16 (data->sblock.uuid[7])); } else *uuid = NULL; grub_dl_unref (my_mod); grub_free (data); return grub_errno; } /* Get mtime. */ static grub_err_t grub_ext2_mtime (grub_device_t device, grub_int32_t *tm) { struct grub_ext2_data *data; grub_disk_t disk = device->disk; grub_dl_ref (my_mod); data = grub_ext2_mount (disk); if (!data) *tm = 0; else *tm = grub_le_to_cpu32 (data->sblock.utime); grub_dl_unref (my_mod); grub_free (data); return grub_errno; } struct grub_fs grub_ext2_fs = { .name = "ext2", .dir = grub_ext2_dir, .open = grub_ext2_open, .read = grub_ext2_read, .close = grub_ext2_close, .label = grub_ext2_label, .uuid = grub_ext2_uuid, .mtime = grub_ext2_mtime, #ifdef GRUB_UTIL .reserved_first_sector = 1, #endif .next = 0 };
grub_ext2_read_block (grub_fshelp_node_t node, grub_disk_addr_t fileblock) { struct grub_ext2_data *data = node->data; struct grub_ext2_inode *inode = &node->inode; int blknr = -1; unsigned int blksz = EXT2_BLOCK_SIZE (data); int log2_blksz = LOG2_EXT2_BLOCK_SIZE (data); if (grub_le_to_cpu32(inode->flags) & EXT4_EXTENTS_FLAG) { #ifndef _MSC_VER char buf[EXT2_BLOCK_SIZE (data)]; #else char * buf = grub_malloc (EXT2_BLOCK_SIZE(data)); #endif struct grub_ext4_extent_header *leaf; struct grub_ext4_extent *ext; int i; leaf = grub_ext4_find_leaf (data, buf, (struct grub_ext4_extent_header *) inode->blocks.dir_blocks, fileblock); if (! leaf) { grub_error (GRUB_ERR_BAD_FS, "invalid extent"); return -1; } ext = (struct grub_ext4_extent *) (leaf + 1); for (i = 0; i < grub_le_to_cpu16 (leaf->entries); i++) { if (fileblock < grub_le_to_cpu32 (ext[i].block)) break; } if (--i >= 0) { fileblock -= grub_le_to_cpu32 (ext[i].block); if (fileblock >= grub_le_to_cpu16 (ext[i].len)) return 0; else { grub_disk_addr_t start; start = grub_le_to_cpu16 (ext[i].start_hi); start = (start << 32) + grub_le_to_cpu32 (ext[i].start); return fileblock + start; } } else { grub_error (GRUB_ERR_BAD_FS, "something wrong with extent"); return -1; } } /* Direct blocks. */ if (fileblock < INDIRECT_BLOCKS) { blknr = grub_le_to_cpu32 (inode->blocks.dir_blocks[fileblock]); /* Indirect. */ } else if (fileblock < INDIRECT_BLOCKS + blksz / 4) { grub_uint32_t *indir; indir = grub_malloc (blksz); if (! indir) return grub_errno; if (grub_disk_read (data->disk, ((grub_disk_addr_t) grub_le_to_cpu32 (inode->blocks.indir_block)) << log2_blksz, 0, blksz, indir)) return grub_errno; blknr = grub_le_to_cpu32 (indir[fileblock - INDIRECT_BLOCKS]); grub_free (indir); } /* Double indirect. */ else if (fileblock < (grub_disk_addr_t)(INDIRECT_BLOCKS + blksz / 4) \ * (grub_disk_addr_t)(blksz / 4 + 1)) { unsigned int perblock = blksz / 4; unsigned int rblock = fileblock - (INDIRECT_BLOCKS + blksz / 4); grub_uint32_t *indir; indir = grub_malloc (blksz); if (! indir) return grub_errno; if (grub_disk_read (data->disk, ((grub_disk_addr_t) grub_le_to_cpu32 (inode->blocks.double_indir_block)) << log2_blksz, 0, blksz, indir)) return grub_errno; if (grub_disk_read (data->disk, ((grub_disk_addr_t) grub_le_to_cpu32 (indir[rblock / perblock])) << log2_blksz, 0, blksz, indir)) return grub_errno; blknr = grub_le_to_cpu32 (indir[rblock % perblock]); grub_free (indir); } /* triple indirect. */ else { grub_error (GRUB_ERR_NOT_IMPLEMENTED_YET, "ext2fs doesn't support triple indirect blocks"); } return blknr; }
grub_ext2_read_block (grub_fshelp_node_t node, grub_disk_addr_t fileblock) { struct grub_ext2_data *data = node->data; struct grub_ext2_inode *inode = &node->inode; int blknr = -1; unsigned int blksz = EXT2_BLOCK_SIZE (data); int log2_blksz = LOG2_EXT2_BLOCK_SIZE (data); if (grub_le_to_cpu32(inode->flags) & EXT4_EXTENTS_FLAG) { char * buf = grub_malloc (EXT2_BLOCK_SIZE (data)); if (!buf) { return -1; } struct grub_ext4_extent_header *leaf; struct grub_ext4_extent *ext; int i; leaf = grub_ext4_find_leaf (data, buf, (struct grub_ext4_extent_header *) inode->blocks.dir_blocks, fileblock); if (! leaf) { grub_error (GRUB_ERR_BAD_FS, "invalid extent"); free (buf); return -1; } ext = (struct grub_ext4_extent *) (leaf + 1); for (i = 0; i < grub_le_to_cpu16 (leaf->entries); i++) { if (fileblock < grub_le_to_cpu32 (ext[i].block)) break; } if (--i >= 0) { fileblock -= grub_le_to_cpu32 (ext[i].block); if (fileblock >= grub_le_to_cpu16 (ext[i].len)) { free (buf); return 0; } else { grub_disk_addr_t start; start = grub_le_to_cpu16 (ext[i].start_hi); start = (start << 32) + grub_le_to_cpu32 (ext[i].start); free (buf); return fileblock + start; } } else { grub_error (GRUB_ERR_BAD_FS, "something wrong with extent"); free (buf); return -1; } free (buf); } /* Direct blocks. */ if (fileblock < INDIRECT_BLOCKS) { blknr = grub_le_to_cpu32 (inode->blocks.dir_blocks[fileblock]); /* Indirect. */ } else if (fileblock < INDIRECT_BLOCKS + blksz / 4) { grub_uint32_t *indir; indir = grub_malloc (blksz); if (! indir) { return grub_errno; } if (grub_disk_read (data->disk, ((grub_disk_addr_t) grub_le_to_cpu32 (inode->blocks.indir_block)) << log2_blksz, 0, blksz, indir)) { return grub_errno; } blknr = grub_le_to_cpu32 (indir[fileblock - INDIRECT_BLOCKS]); grub_free (indir); } /* Double indirect. */ else if (fileblock < (grub_disk_addr_t)(INDIRECT_BLOCKS + blksz / 4) \ * (grub_disk_addr_t)(blksz / 4 + 1)) { unsigned int perblock = blksz / 4; unsigned int rblock = fileblock - (INDIRECT_BLOCKS + blksz / 4); grub_uint32_t *indir; indir = grub_malloc (blksz); if (! indir) { return grub_errno; } if (grub_disk_read (data->disk, ((grub_disk_addr_t) grub_le_to_cpu32 (inode->blocks.double_indir_block)) << log2_blksz, 0, blksz, indir)) { return grub_errno; } if (grub_disk_read (data->disk, ((grub_disk_addr_t) grub_le_to_cpu32 (indir[rblock / perblock])) << log2_blksz, 0, blksz, indir)) { return grub_errno; } blknr = grub_le_to_cpu32 (indir[rblock % perblock]); grub_free (indir); } /* triple indirect. */ else { grub_error (GRUB_ERR_NOT_IMPLEMENTED_YET, "ext2fs doesn't support triple indirect blocks"); } return blknr; }
{'added': [(45, '#include <stdlib.h>'), (371, ' 0, EXT2_BLOCK_SIZE(data), buf)) {'), (373, ' }'), (390, '\t char * buf = grub_malloc (EXT2_BLOCK_SIZE (data));'), (391, ' if (!buf) {'), (392, ' return -1;'), (393, ' }'), (404, '\t free (buf);'), (418, ' if (fileblock >= grub_le_to_cpu16 (ext[i].len)) {'), (419, ' \t free (buf);'), (421, ' } else'), (427, ' \t free (buf);'), (435, ' \t free (buf);'), (438, 'free (buf);'), (449, ' if (! indir) {'), (451, '}'), (457, '\t\t\t 0, blksz, indir)) {'), (459, '}'), (474, ' if (! indir) {'), (476, '}'), (482, '\t\t\t 0, blksz, indir)) {'), (484, '}'), (490, '\t\t\t 0, blksz, indir)) {'), (492, '}')], 'deleted': [(370, ' 0, EXT2_BLOCK_SIZE(data), buf))'), (388, '#ifndef _MSC_VER'), (389, '\t char buf[EXT2_BLOCK_SIZE (data)];'), (390, '#else'), (391, '\t char * buf = grub_malloc (EXT2_BLOCK_SIZE(data));'), (392, '#endif'), (416, ' if (fileblock >= grub_le_to_cpu16 (ext[i].len))'), (418, ' else'), (443, ' if (! indir)'), (450, '\t\t\t 0, blksz, indir))'), (466, ' if (! indir)'), (473, '\t\t\t 0, blksz, indir))'), (480, '\t\t\t 0, blksz, indir))')]}
24
13
707
3,520
https://github.com/radare/radare2
CVE-2017-9763
['CWE-119']
ext2.c
grub_ext4_find_leaf
/* ext2.c - Second Extended filesystem */ /* * GRUB -- GRand Unified Bootloader * Copyright (C) 2003,2004,2005,2007,2008,2009 Free Software Foundation, Inc. * * GRUB is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * GRUB is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with GRUB. If not, see <http://www.gnu.org/licenses/>. */ /* Magic value used to identify an ext2 filesystem. */ #define EXT2_MAGIC 0xEF53 /* Amount of indirect blocks in an inode. */ #define INDIRECT_BLOCKS 12 /* Maximum length of a pathname. */ #define EXT2_PATH_MAX 4096 /* Maximum nesting of symlinks, used to prevent a loop. */ #define EXT2_MAX_SYMLINKCNT 8 /* The good old revision and the default inode size. */ #define EXT2_GOOD_OLD_REVISION 0 #define EXT2_GOOD_OLD_INODE_SIZE 128 /* Filetype used in directory entry. */ #define FILETYPE_UNKNOWN 0 #define FILETYPE_REG 1 #define FILETYPE_DIRECTORY 2 #define FILETYPE_SYMLINK 7 /* Filetype information as used in inodes. */ #define FILETYPE_INO_MASK 0170000 #define FILETYPE_INO_REG 0100000 #define FILETYPE_INO_DIRECTORY 0040000 #define FILETYPE_INO_SYMLINK 0120000 #include <grub/err.h> #include <grub/file.h> #include <grub/mm.h> #include <grub/misc.h> #include <grub/disk.h> #include <grub/dl.h> #include <grub/types.h> #include <grub/fshelp.h> /* Log2 size of ext2 block in 512 blocks. */ #define LOG2_EXT2_BLOCK_SIZE(data) \ (grub_le_to_cpu32 (data->sblock.log2_block_size) + 1) /* Log2 size of ext2 block in bytes. */ #define LOG2_BLOCK_SIZE(data) \ (grub_le_to_cpu32 (data->sblock.log2_block_size) + 10) /* The size of an ext2 block in bytes. */ #define EXT2_BLOCK_SIZE(data) (1 << LOG2_BLOCK_SIZE (data)) /* The revision level. */ #define EXT2_REVISION(data) grub_le_to_cpu32 (data->sblock.revision_level) /* The inode size. */ #define EXT2_INODE_SIZE(data) \ (EXT2_REVISION (data) == EXT2_GOOD_OLD_REVISION \ ? EXT2_GOOD_OLD_INODE_SIZE \ : grub_le_to_cpu16 (data->sblock.inode_size)) /* Superblock filesystem feature flags (RW compatible) * A filesystem with any of these enabled can be read and written by a driver * that does not understand them without causing metadata/data corruption. */ #define EXT2_FEATURE_COMPAT_DIR_PREALLOC 0x0001 #define EXT2_FEATURE_COMPAT_IMAGIC_INODES 0x0002 #define EXT3_FEATURE_COMPAT_HAS_JOURNAL 0x0004 #define EXT2_FEATURE_COMPAT_EXT_ATTR 0x0008 #define EXT2_FEATURE_COMPAT_RESIZE_INODE 0x0010 #define EXT2_FEATURE_COMPAT_DIR_INDEX 0x0020 /* Superblock filesystem feature flags (RO compatible) * A filesystem with any of these enabled can be safely read by a driver that * does not understand them, but should not be written to, usually because * additional metadata is required. */ #define EXT2_FEATURE_RO_COMPAT_SPARSE_SUPER 0x0001 #define EXT2_FEATURE_RO_COMPAT_LARGE_FILE 0x0002 #define EXT2_FEATURE_RO_COMPAT_BTREE_DIR 0x0004 #define EXT4_FEATURE_RO_COMPAT_GDT_CSUM 0x0010 #define EXT4_FEATURE_RO_COMPAT_DIR_NLINK 0x0020 #define EXT4_FEATURE_RO_COMPAT_EXTRA_ISIZE 0x0040 /* Superblock filesystem feature flags (back-incompatible) * A filesystem with any of these enabled should not be attempted to be read * by a driver that does not understand them, since they usually indicate * metadata format changes that might confuse the reader. */ #define EXT2_FEATURE_INCOMPAT_COMPRESSION 0x0001 #define EXT2_FEATURE_INCOMPAT_FILETYPE 0x0002 #define EXT3_FEATURE_INCOMPAT_RECOVER 0x0004 /* Needs recovery */ #define EXT3_FEATURE_INCOMPAT_JOURNAL_DEV 0x0008 /* Volume is journal device */ #define EXT2_FEATURE_INCOMPAT_META_BG 0x0010 #define EXT4_FEATURE_INCOMPAT_EXTENTS 0x0040 /* Extents used */ #define EXT4_FEATURE_INCOMPAT_64BIT 0x0080 #define EXT4_FEATURE_INCOMPAT_FLEX_BG 0x0200 /* The set of back-incompatible features this driver DOES support. Add (OR) * flags here as the related features are implemented into the driver. */ #define EXT2_DRIVER_SUPPORTED_INCOMPAT ( EXT2_FEATURE_INCOMPAT_FILETYPE \ | EXT4_FEATURE_INCOMPAT_EXTENTS \ | EXT4_FEATURE_INCOMPAT_FLEX_BG ) /* List of rationales for the ignored "incompatible" features: * needs_recovery: Not really back-incompatible - was added as such to forbid * ext2 drivers from mounting an ext3 volume with a dirty * journal because they will ignore the journal, but the next * ext3 driver to mount the volume will find the journal and * replay it, potentially corrupting the metadata written by * the ext2 drivers. Safe to ignore for this RO driver. */ #define EXT2_DRIVER_IGNORED_INCOMPAT ( EXT3_FEATURE_INCOMPAT_RECOVER ) #define EXT3_JOURNAL_MAGIC_NUMBER 0xc03b3998U #define EXT3_JOURNAL_DESCRIPTOR_BLOCK 1 #define EXT3_JOURNAL_COMMIT_BLOCK 2 #define EXT3_JOURNAL_SUPERBLOCK_V1 3 #define EXT3_JOURNAL_SUPERBLOCK_V2 4 #define EXT3_JOURNAL_REVOKE_BLOCK 5 #define EXT3_JOURNAL_FLAG_ESCAPE 1 #define EXT3_JOURNAL_FLAG_SAME_UUID 2 #define EXT3_JOURNAL_FLAG_DELETED 4 #define EXT3_JOURNAL_FLAG_LAST_TAG 8 #define EXT4_EXTENTS_FLAG 0x80000 /* The ext2 superblock. */ struct grub_ext2_sblock { grub_uint32_t total_inodes; grub_uint32_t total_blocks; grub_uint32_t reserved_blocks; grub_uint32_t free_blocks; grub_uint32_t free_inodes; grub_uint32_t first_data_block; grub_uint32_t log2_block_size; grub_uint32_t log2_fragment_size; grub_uint32_t blocks_per_group; grub_uint32_t fragments_per_group; grub_uint32_t inodes_per_group; grub_uint32_t mtime; grub_uint32_t utime; grub_uint16_t mnt_count; grub_uint16_t max_mnt_count; grub_uint16_t magic; grub_uint16_t fs_state; grub_uint16_t error_handling; grub_uint16_t minor_revision_level; grub_uint32_t lastcheck; grub_uint32_t checkinterval; grub_uint32_t creator_os; grub_uint32_t revision_level; grub_uint16_t uid_reserved; grub_uint16_t gid_reserved; grub_uint32_t first_inode; grub_uint16_t inode_size; grub_uint16_t block_group_number; grub_uint32_t feature_compatibility; grub_uint32_t feature_incompat; grub_uint32_t feature_ro_compat; grub_uint16_t uuid[8]; char volume_name[16]; char last_mounted_on[64]; grub_uint32_t compression_info; grub_uint8_t prealloc_blocks; grub_uint8_t prealloc_dir_blocks; grub_uint16_t reserved_gdt_blocks; grub_uint8_t journal_uuid[16]; grub_uint32_t journal_inum; grub_uint32_t journal_dev; grub_uint32_t last_orphan; grub_uint32_t hash_seed[4]; grub_uint8_t def_hash_version; grub_uint8_t jnl_backup_type; grub_uint16_t reserved_word_pad; grub_uint32_t default_mount_opts; grub_uint32_t first_meta_bg; grub_uint32_t mkfs_time; grub_uint32_t jnl_blocks[17]; }; /* The ext2 blockgroup. */ struct grub_ext2_block_group { grub_uint32_t block_id; grub_uint32_t inode_id; grub_uint32_t inode_table_id; grub_uint16_t free_blocks; grub_uint16_t free_inodes; grub_uint16_t used_dirs; grub_uint16_t pad; grub_uint32_t reserved[3]; }; /* The ext2 inode. */ struct grub_ext2_inode { grub_uint16_t mode; grub_uint16_t uid; grub_uint32_t size; grub_uint32_t atime; grub_uint32_t ctime; grub_uint32_t mtime; grub_uint32_t dtime; grub_uint16_t gid; grub_uint16_t nlinks; grub_uint32_t blockcnt; /* Blocks of 512 bytes!! */ grub_uint32_t flags; grub_uint32_t osd1; union { struct datablocks { grub_uint32_t dir_blocks[INDIRECT_BLOCKS]; grub_uint32_t indir_block; grub_uint32_t double_indir_block; grub_uint32_t triple_indir_block; } blocks; char symlink[60]; }; grub_uint32_t version; grub_uint32_t acl; grub_uint32_t dir_acl; grub_uint32_t fragment_addr; grub_uint32_t osd2[3]; }; /* The header of an ext2 directory entry. */ struct ext2_dirent { grub_uint32_t inode; grub_uint16_t direntlen; grub_uint8_t namelen; grub_uint8_t filetype; }; struct grub_ext3_journal_header { grub_uint32_t magic; grub_uint32_t block_type; grub_uint32_t sequence; }; struct grub_ext3_journal_revoke_header { struct grub_ext3_journal_header header; grub_uint32_t count; grub_uint32_t data[0]; }; struct grub_ext3_journal_block_tag { grub_uint32_t block; grub_uint32_t flags; }; struct grub_ext3_journal_sblock { struct grub_ext3_journal_header header; grub_uint32_t block_size; grub_uint32_t maxlen; grub_uint32_t first; grub_uint32_t sequence; grub_uint32_t start; }; #define EXT4_EXT_MAGIC 0xf30a struct grub_ext4_extent_header { grub_uint16_t magic; grub_uint16_t entries; grub_uint16_t max; grub_uint16_t depth; grub_uint32_t generation; }; struct grub_ext4_extent { grub_uint32_t block; grub_uint16_t len; grub_uint16_t start_hi; grub_uint32_t start; }; struct grub_ext4_extent_idx { grub_uint32_t block; grub_uint32_t leaf; grub_uint16_t leaf_hi; grub_uint16_t unused; }; struct grub_fshelp_node { struct grub_ext2_data *data; struct grub_ext2_inode inode; int ino; int inode_read; }; /* Information about a "mounted" ext2 filesystem. */ struct grub_ext2_data { struct grub_ext2_sblock sblock; grub_disk_t disk; struct grub_ext2_inode *inode; struct grub_fshelp_node diropen; }; static grub_dl_t my_mod; /* Read into BLKGRP the blockgroup descriptor of blockgroup GROUP of the mounted filesystem DATA. */ inline static grub_err_t grub_ext2_blockgroup (struct grub_ext2_data *data, int group, struct grub_ext2_block_group *blkgrp) { return grub_disk_read (data->disk, ((grub_le_to_cpu32 (data->sblock.first_data_block) + 1) << LOG2_EXT2_BLOCK_SIZE (data)), group * sizeof (struct grub_ext2_block_group), sizeof (struct grub_ext2_block_group), blkgrp); } static struct grub_ext4_extent_header * grub_ext4_find_leaf (struct grub_ext2_data *data, char *buf, struct grub_ext4_extent_header *ext_block, grub_uint32_t fileblock) { struct grub_ext4_extent_idx *index; while (1) { int i; grub_disk_addr_t block; index = (struct grub_ext4_extent_idx *) (ext_block + 1); if (grub_le_to_cpu16(ext_block->magic) != EXT4_EXT_MAGIC) return 0; if (ext_block->depth == 0) return ext_block; for (i = 0; i < grub_le_to_cpu16 (ext_block->entries); i++) { if (fileblock < grub_le_to_cpu32(index[i].block)) break; } if (--i < 0) return 0; block = grub_le_to_cpu16 (index[i].leaf_hi); block = (block << 32) + grub_le_to_cpu32 (index[i].leaf); if (grub_disk_read (data->disk, block << LOG2_EXT2_BLOCK_SIZE (data), 0, EXT2_BLOCK_SIZE(data), buf)) return 0; ext_block = (struct grub_ext4_extent_header *) buf; } } static grub_disk_addr_t grub_ext2_read_block (grub_fshelp_node_t node, grub_disk_addr_t fileblock) { struct grub_ext2_data *data = node->data; struct grub_ext2_inode *inode = &node->inode; int blknr = -1; unsigned int blksz = EXT2_BLOCK_SIZE (data); int log2_blksz = LOG2_EXT2_BLOCK_SIZE (data); if (grub_le_to_cpu32(inode->flags) & EXT4_EXTENTS_FLAG) { #ifndef _MSC_VER char buf[EXT2_BLOCK_SIZE (data)]; #else char * buf = grub_malloc (EXT2_BLOCK_SIZE(data)); #endif struct grub_ext4_extent_header *leaf; struct grub_ext4_extent *ext; int i; leaf = grub_ext4_find_leaf (data, buf, (struct grub_ext4_extent_header *) inode->blocks.dir_blocks, fileblock); if (! leaf) { grub_error (GRUB_ERR_BAD_FS, "invalid extent"); return -1; } ext = (struct grub_ext4_extent *) (leaf + 1); for (i = 0; i < grub_le_to_cpu16 (leaf->entries); i++) { if (fileblock < grub_le_to_cpu32 (ext[i].block)) break; } if (--i >= 0) { fileblock -= grub_le_to_cpu32 (ext[i].block); if (fileblock >= grub_le_to_cpu16 (ext[i].len)) return 0; else { grub_disk_addr_t start; start = grub_le_to_cpu16 (ext[i].start_hi); start = (start << 32) + grub_le_to_cpu32 (ext[i].start); return fileblock + start; } } else { grub_error (GRUB_ERR_BAD_FS, "something wrong with extent"); return -1; } } /* Direct blocks. */ if (fileblock < INDIRECT_BLOCKS) { blknr = grub_le_to_cpu32 (inode->blocks.dir_blocks[fileblock]); /* Indirect. */ } else if (fileblock < INDIRECT_BLOCKS + blksz / 4) { grub_uint32_t *indir; indir = grub_malloc (blksz); if (! indir) return grub_errno; if (grub_disk_read (data->disk, ((grub_disk_addr_t) grub_le_to_cpu32 (inode->blocks.indir_block)) << log2_blksz, 0, blksz, indir)) return grub_errno; blknr = grub_le_to_cpu32 (indir[fileblock - INDIRECT_BLOCKS]); grub_free (indir); } /* Double indirect. */ else if (fileblock < (grub_disk_addr_t)(INDIRECT_BLOCKS + blksz / 4) \ * (grub_disk_addr_t)(blksz / 4 + 1)) { unsigned int perblock = blksz / 4; unsigned int rblock = fileblock - (INDIRECT_BLOCKS + blksz / 4); grub_uint32_t *indir; indir = grub_malloc (blksz); if (! indir) return grub_errno; if (grub_disk_read (data->disk, ((grub_disk_addr_t) grub_le_to_cpu32 (inode->blocks.double_indir_block)) << log2_blksz, 0, blksz, indir)) return grub_errno; if (grub_disk_read (data->disk, ((grub_disk_addr_t) grub_le_to_cpu32 (indir[rblock / perblock])) << log2_blksz, 0, blksz, indir)) return grub_errno; blknr = grub_le_to_cpu32 (indir[rblock % perblock]); grub_free (indir); } /* triple indirect. */ else { grub_error (GRUB_ERR_NOT_IMPLEMENTED_YET, "ext2fs doesn't support triple indirect blocks"); } return blknr; } /* Read LEN bytes from the file described by DATA starting with byte POS. Return the amount of read bytes in READ. */ static grub_ssize_t grub_ext2_read_file (grub_fshelp_node_t node, void (*read_hook) (grub_disk_addr_t sector, unsigned offset, unsigned length, void *closure), void *closure, int flags, int pos, grub_size_t len, char *buf) { return grub_fshelp_read_file (node->data->disk, node, read_hook, closure, flags, pos, len, buf, grub_ext2_read_block, node->inode.size, LOG2_EXT2_BLOCK_SIZE (node->data)); } /* Read the inode INO for the file described by DATA into INODE. */ static grub_err_t grub_ext2_read_inode (struct grub_ext2_data *data, int ino, struct grub_ext2_inode *inode) { struct grub_ext2_block_group blkgrp; struct grub_ext2_sblock *sblock = &data->sblock; int inodes_per_block; unsigned int blkno; unsigned int blkoff; /* It is easier to calculate if the first inode is 0. */ ino--; int div = grub_le_to_cpu32 (sblock->inodes_per_group); if (div < 1) { return grub_errno = GRUB_ERR_BAD_FS; } grub_ext2_blockgroup (data, ino / div, &blkgrp); if (grub_errno) return grub_errno; int inode_size = EXT2_INODE_SIZE (data); if (inode_size < 1) { return grub_errno = GRUB_ERR_BAD_FS; } inodes_per_block = EXT2_BLOCK_SIZE (data) / inode_size; if (inodes_per_block < 1) { return grub_errno = GRUB_ERR_BAD_FS; } blkno = (ino % grub_le_to_cpu32 (sblock->inodes_per_group)) / inodes_per_block; blkoff = (ino % grub_le_to_cpu32 (sblock->inodes_per_group)) % inodes_per_block; /* Read the inode. */ if (grub_disk_read (data->disk, ((grub_le_to_cpu32 (blkgrp.inode_table_id) + blkno) << LOG2_EXT2_BLOCK_SIZE (data)), EXT2_INODE_SIZE (data) * blkoff, sizeof (struct grub_ext2_inode), inode)) return grub_errno; return 0; } static struct grub_ext2_data * grub_ext2_mount (grub_disk_t disk) { struct grub_ext2_data *data; data = grub_malloc (sizeof (struct grub_ext2_data)); if (!data) return 0; /* Read the superblock. */ grub_disk_read (disk, 1 * 2, 0, sizeof (struct grub_ext2_sblock), &data->sblock); if (grub_errno) goto fail; /* Make sure this is an ext2 filesystem. */ if (grub_le_to_cpu16 (data->sblock.magic) != EXT2_MAGIC) { grub_error (GRUB_ERR_BAD_FS, "not an ext2 filesystem"); goto fail; } /* Check the FS doesn't have feature bits enabled that we don't support. */ if (grub_le_to_cpu32 (data->sblock.feature_incompat) & ~(EXT2_DRIVER_SUPPORTED_INCOMPAT | EXT2_DRIVER_IGNORED_INCOMPAT)) { grub_error (GRUB_ERR_BAD_FS, "filesystem has unsupported incompatible features"); goto fail; } data->disk = disk; data->diropen.data = data; data->diropen.ino = 2; data->diropen.inode_read = 1; data->inode = &data->diropen.inode; grub_ext2_read_inode (data, 2, data->inode); if (grub_errno) goto fail; return data; fail: if (grub_errno == GRUB_ERR_OUT_OF_RANGE) grub_error (GRUB_ERR_BAD_FS, "not an ext2 filesystem"); grub_free (data); return 0; } static char * grub_ext2_read_symlink (grub_fshelp_node_t node) { char *symlink; struct grub_fshelp_node *diro = node; if (! diro->inode_read) { grub_ext2_read_inode (diro->data, diro->ino, &diro->inode); if (grub_errno) return 0; } symlink = grub_malloc (grub_le_to_cpu32 (diro->inode.size) + 1); if (! symlink) return 0; /* If the filesize of the symlink is bigger than 60 the symlink is stored in a separate block, otherwise it is stored in the inode. */ if (grub_le_to_cpu32 (diro->inode.size) <= 60) grub_strncpy (symlink, diro->inode.symlink, grub_le_to_cpu32 (diro->inode.size)); else { grub_ext2_read_file (diro, 0, 0, 0, 0, grub_le_to_cpu32 (diro->inode.size), symlink); if (grub_errno) { grub_free (symlink); return 0; } } symlink[grub_le_to_cpu32 (diro->inode.size)] = '\0'; return symlink; } static int grub_ext2_iterate_dir (grub_fshelp_node_t dir, int (*hook) (const char *filename, enum grub_fshelp_filetype filetype, grub_fshelp_node_t node, void *closure), void *closure) { unsigned int fpos = 0; struct grub_fshelp_node *diro = (struct grub_fshelp_node *) dir; if (! diro->inode_read) { grub_ext2_read_inode (diro->data, diro->ino, &diro->inode); if (grub_errno) return 0; } /* Search the file. */ if (hook) while (fpos < grub_le_to_cpu32 (diro->inode.size)) { struct ext2_dirent dirent; grub_ext2_read_file (diro, NULL, NULL, 0, fpos, sizeof (dirent), (char *) &dirent); if (grub_errno) return 0; if (dirent.direntlen == 0) return 0; if (dirent.namelen != 0) { char * filename = grub_malloc (dirent.namelen + 1); struct grub_fshelp_node *fdiro; enum grub_fshelp_filetype type = GRUB_FSHELP_UNKNOWN; if (!filename) { break; } grub_ext2_read_file (diro, 0, 0, 0, fpos + sizeof (struct ext2_dirent), dirent.namelen, filename); if (grub_errno) { grub_free (filename); return 0; } fdiro = grub_malloc (sizeof (struct grub_fshelp_node)); if (! fdiro) { grub_free (filename); return 0; } fdiro->data = diro->data; fdiro->ino = grub_le_to_cpu32 (dirent.inode); filename[dirent.namelen] = '\0'; if (dirent.filetype != FILETYPE_UNKNOWN) { fdiro->inode_read = 0; if (dirent.filetype == FILETYPE_DIRECTORY) type = GRUB_FSHELP_DIR; else if (dirent.filetype == FILETYPE_SYMLINK) type = GRUB_FSHELP_SYMLINK; else if (dirent.filetype == FILETYPE_REG) type = GRUB_FSHELP_REG; } else { /* The filetype can not be read from the dirent, read the inode to get more information. */ grub_ext2_read_inode (diro->data, grub_le_to_cpu32 (dirent.inode), &fdiro->inode); if (grub_errno) { grub_free (filename); grub_free (fdiro); return 0; } fdiro->inode_read = 1; if ((grub_le_to_cpu16 (fdiro->inode.mode) & FILETYPE_INO_MASK) == FILETYPE_INO_DIRECTORY) type = GRUB_FSHELP_DIR; else if ((grub_le_to_cpu16 (fdiro->inode.mode) & FILETYPE_INO_MASK) == FILETYPE_INO_SYMLINK) type = GRUB_FSHELP_SYMLINK; else if ((grub_le_to_cpu16 (fdiro->inode.mode) & FILETYPE_INO_MASK) == FILETYPE_INO_REG) type = GRUB_FSHELP_REG; } if (hook (filename, type, fdiro, closure)) { grub_free (filename); return 1; } grub_free (filename); } fpos += grub_le_to_cpu16 (dirent.direntlen); } return 0; } /* Open a file named NAME and initialize FILE. */ static grub_err_t grub_ext2_open (struct grub_file *file, const char *name) { struct grub_ext2_data *data; struct grub_fshelp_node *fdiro = 0; grub_dl_ref (my_mod); data = grub_ext2_mount (file->device->disk); if (! data) goto fail; grub_fshelp_find_file (name, &data->diropen, &fdiro, grub_ext2_iterate_dir, 0, grub_ext2_read_symlink, GRUB_FSHELP_REG); if (grub_errno) goto fail; if (! fdiro->inode_read) { grub_ext2_read_inode (data, fdiro->ino, &fdiro->inode); if (grub_errno) goto fail; } grub_memcpy (data->inode, &fdiro->inode, sizeof (struct grub_ext2_inode)); grub_free (fdiro); file->size = grub_le_to_cpu32 (data->inode->size); file->data = data; file->offset = 0; return 0; fail: if (fdiro != &data->diropen) grub_free (fdiro); grub_free (data); grub_dl_unref (my_mod); return grub_errno; } static grub_err_t grub_ext2_close (grub_file_t file) { grub_free (file->data); grub_dl_unref (my_mod); return GRUB_ERR_NONE; } /* Read LEN bytes data from FILE into BUF. */ static grub_ssize_t grub_ext2_read (grub_file_t file, char *buf, grub_size_t len) { struct grub_ext2_data *data = (struct grub_ext2_data *) file->data; return grub_ext2_read_file (&data->diropen, file->read_hook, file->closure, file->flags, file->offset, len, buf); } struct grub_ext2_dir_closure { int (*hook) (const char *filename, const struct grub_dirhook_info *info, void *closure); void *closure; struct grub_ext2_data *data; }; static int iterate (const char *filename, enum grub_fshelp_filetype filetype, grub_fshelp_node_t node, void *closure) { struct grub_ext2_dir_closure *c = closure; struct grub_dirhook_info info; grub_memset (&info, 0, sizeof (info)); if (! node->inode_read) { grub_ext2_read_inode (c->data, node->ino, &node->inode); if (!grub_errno) node->inode_read = 1; grub_errno = GRUB_ERR_NONE; } if (node->inode_read) { info.mtimeset = 1; info.mtime = grub_le_to_cpu32 (node->inode.mtime); } info.dir = ((filetype & GRUB_FSHELP_TYPE_MASK) == GRUB_FSHELP_DIR); grub_free (node); return (c->hook != NULL)? c->hook (filename, &info, c->closure): 0; } static grub_err_t grub_ext2_dir (grub_device_t device, const char *path, int (*hook) (const char *filename, const struct grub_dirhook_info *info, void *closure), void *closure) { struct grub_ext2_data *data = 0; struct grub_fshelp_node *fdiro = 0; struct grub_ext2_dir_closure c; grub_dl_ref (my_mod); data = grub_ext2_mount (device->disk); if (! data) goto fail; grub_fshelp_find_file (path, &data->diropen, &fdiro, grub_ext2_iterate_dir, 0, grub_ext2_read_symlink, GRUB_FSHELP_DIR); if (grub_errno) goto fail; c.hook = hook; c.closure = closure; c.data = data; grub_ext2_iterate_dir (fdiro, iterate, &c); fail: if (fdiro != &data->diropen) grub_free (fdiro); grub_free (data); grub_dl_unref (my_mod); return grub_errno; } static grub_err_t grub_ext2_label (grub_device_t device, char **label) { struct grub_ext2_data *data; grub_disk_t disk = device->disk; grub_dl_ref (my_mod); data = grub_ext2_mount (disk); if (data) *label = grub_strndup (data->sblock.volume_name, 14); else *label = NULL; grub_dl_unref (my_mod); grub_free (data); return grub_errno; } static grub_err_t grub_ext2_uuid (grub_device_t device, char **uuid) { struct grub_ext2_data *data; grub_disk_t disk = device->disk; grub_dl_ref (my_mod); data = grub_ext2_mount (disk); if (data) { *uuid = grub_xasprintf ("%04x%04x-%04x-%04x-%04x-%04x%04x%04x", grub_be_to_cpu16 (data->sblock.uuid[0]), grub_be_to_cpu16 (data->sblock.uuid[1]), grub_be_to_cpu16 (data->sblock.uuid[2]), grub_be_to_cpu16 (data->sblock.uuid[3]), grub_be_to_cpu16 (data->sblock.uuid[4]), grub_be_to_cpu16 (data->sblock.uuid[5]), grub_be_to_cpu16 (data->sblock.uuid[6]), grub_be_to_cpu16 (data->sblock.uuid[7])); } else *uuid = NULL; grub_dl_unref (my_mod); grub_free (data); return grub_errno; } /* Get mtime. */ static grub_err_t grub_ext2_mtime (grub_device_t device, grub_int32_t *tm) { struct grub_ext2_data *data; grub_disk_t disk = device->disk; grub_dl_ref (my_mod); data = grub_ext2_mount (disk); if (!data) *tm = 0; else *tm = grub_le_to_cpu32 (data->sblock.utime); grub_dl_unref (my_mod); grub_free (data); return grub_errno; } struct grub_fs grub_ext2_fs = { .name = "ext2", .dir = grub_ext2_dir, .open = grub_ext2_open, .read = grub_ext2_read, .close = grub_ext2_close, .label = grub_ext2_label, .uuid = grub_ext2_uuid, .mtime = grub_ext2_mtime, #ifdef GRUB_UTIL .reserved_first_sector = 1, #endif .next = 0 };
/* ext2.c - Second Extended filesystem */ /* * GRUB -- GRand Unified Bootloader * Copyright (C) 2003,2004,2005,2007,2008,2009 Free Software Foundation, Inc. * * GRUB is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * GRUB is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with GRUB. If not, see <http://www.gnu.org/licenses/>. */ /* Magic value used to identify an ext2 filesystem. */ #define EXT2_MAGIC 0xEF53 /* Amount of indirect blocks in an inode. */ #define INDIRECT_BLOCKS 12 /* Maximum length of a pathname. */ #define EXT2_PATH_MAX 4096 /* Maximum nesting of symlinks, used to prevent a loop. */ #define EXT2_MAX_SYMLINKCNT 8 /* The good old revision and the default inode size. */ #define EXT2_GOOD_OLD_REVISION 0 #define EXT2_GOOD_OLD_INODE_SIZE 128 /* Filetype used in directory entry. */ #define FILETYPE_UNKNOWN 0 #define FILETYPE_REG 1 #define FILETYPE_DIRECTORY 2 #define FILETYPE_SYMLINK 7 /* Filetype information as used in inodes. */ #define FILETYPE_INO_MASK 0170000 #define FILETYPE_INO_REG 0100000 #define FILETYPE_INO_DIRECTORY 0040000 #define FILETYPE_INO_SYMLINK 0120000 #include <stdlib.h> #include <grub/err.h> #include <grub/file.h> #include <grub/mm.h> #include <grub/misc.h> #include <grub/disk.h> #include <grub/dl.h> #include <grub/types.h> #include <grub/fshelp.h> /* Log2 size of ext2 block in 512 blocks. */ #define LOG2_EXT2_BLOCK_SIZE(data) \ (grub_le_to_cpu32 (data->sblock.log2_block_size) + 1) /* Log2 size of ext2 block in bytes. */ #define LOG2_BLOCK_SIZE(data) \ (grub_le_to_cpu32 (data->sblock.log2_block_size) + 10) /* The size of an ext2 block in bytes. */ #define EXT2_BLOCK_SIZE(data) (1 << LOG2_BLOCK_SIZE (data)) /* The revision level. */ #define EXT2_REVISION(data) grub_le_to_cpu32 (data->sblock.revision_level) /* The inode size. */ #define EXT2_INODE_SIZE(data) \ (EXT2_REVISION (data) == EXT2_GOOD_OLD_REVISION \ ? EXT2_GOOD_OLD_INODE_SIZE \ : grub_le_to_cpu16 (data->sblock.inode_size)) /* Superblock filesystem feature flags (RW compatible) * A filesystem with any of these enabled can be read and written by a driver * that does not understand them without causing metadata/data corruption. */ #define EXT2_FEATURE_COMPAT_DIR_PREALLOC 0x0001 #define EXT2_FEATURE_COMPAT_IMAGIC_INODES 0x0002 #define EXT3_FEATURE_COMPAT_HAS_JOURNAL 0x0004 #define EXT2_FEATURE_COMPAT_EXT_ATTR 0x0008 #define EXT2_FEATURE_COMPAT_RESIZE_INODE 0x0010 #define EXT2_FEATURE_COMPAT_DIR_INDEX 0x0020 /* Superblock filesystem feature flags (RO compatible) * A filesystem with any of these enabled can be safely read by a driver that * does not understand them, but should not be written to, usually because * additional metadata is required. */ #define EXT2_FEATURE_RO_COMPAT_SPARSE_SUPER 0x0001 #define EXT2_FEATURE_RO_COMPAT_LARGE_FILE 0x0002 #define EXT2_FEATURE_RO_COMPAT_BTREE_DIR 0x0004 #define EXT4_FEATURE_RO_COMPAT_GDT_CSUM 0x0010 #define EXT4_FEATURE_RO_COMPAT_DIR_NLINK 0x0020 #define EXT4_FEATURE_RO_COMPAT_EXTRA_ISIZE 0x0040 /* Superblock filesystem feature flags (back-incompatible) * A filesystem with any of these enabled should not be attempted to be read * by a driver that does not understand them, since they usually indicate * metadata format changes that might confuse the reader. */ #define EXT2_FEATURE_INCOMPAT_COMPRESSION 0x0001 #define EXT2_FEATURE_INCOMPAT_FILETYPE 0x0002 #define EXT3_FEATURE_INCOMPAT_RECOVER 0x0004 /* Needs recovery */ #define EXT3_FEATURE_INCOMPAT_JOURNAL_DEV 0x0008 /* Volume is journal device */ #define EXT2_FEATURE_INCOMPAT_META_BG 0x0010 #define EXT4_FEATURE_INCOMPAT_EXTENTS 0x0040 /* Extents used */ #define EXT4_FEATURE_INCOMPAT_64BIT 0x0080 #define EXT4_FEATURE_INCOMPAT_FLEX_BG 0x0200 /* The set of back-incompatible features this driver DOES support. Add (OR) * flags here as the related features are implemented into the driver. */ #define EXT2_DRIVER_SUPPORTED_INCOMPAT ( EXT2_FEATURE_INCOMPAT_FILETYPE \ | EXT4_FEATURE_INCOMPAT_EXTENTS \ | EXT4_FEATURE_INCOMPAT_FLEX_BG ) /* List of rationales for the ignored "incompatible" features: * needs_recovery: Not really back-incompatible - was added as such to forbid * ext2 drivers from mounting an ext3 volume with a dirty * journal because they will ignore the journal, but the next * ext3 driver to mount the volume will find the journal and * replay it, potentially corrupting the metadata written by * the ext2 drivers. Safe to ignore for this RO driver. */ #define EXT2_DRIVER_IGNORED_INCOMPAT ( EXT3_FEATURE_INCOMPAT_RECOVER ) #define EXT3_JOURNAL_MAGIC_NUMBER 0xc03b3998U #define EXT3_JOURNAL_DESCRIPTOR_BLOCK 1 #define EXT3_JOURNAL_COMMIT_BLOCK 2 #define EXT3_JOURNAL_SUPERBLOCK_V1 3 #define EXT3_JOURNAL_SUPERBLOCK_V2 4 #define EXT3_JOURNAL_REVOKE_BLOCK 5 #define EXT3_JOURNAL_FLAG_ESCAPE 1 #define EXT3_JOURNAL_FLAG_SAME_UUID 2 #define EXT3_JOURNAL_FLAG_DELETED 4 #define EXT3_JOURNAL_FLAG_LAST_TAG 8 #define EXT4_EXTENTS_FLAG 0x80000 /* The ext2 superblock. */ struct grub_ext2_sblock { grub_uint32_t total_inodes; grub_uint32_t total_blocks; grub_uint32_t reserved_blocks; grub_uint32_t free_blocks; grub_uint32_t free_inodes; grub_uint32_t first_data_block; grub_uint32_t log2_block_size; grub_uint32_t log2_fragment_size; grub_uint32_t blocks_per_group; grub_uint32_t fragments_per_group; grub_uint32_t inodes_per_group; grub_uint32_t mtime; grub_uint32_t utime; grub_uint16_t mnt_count; grub_uint16_t max_mnt_count; grub_uint16_t magic; grub_uint16_t fs_state; grub_uint16_t error_handling; grub_uint16_t minor_revision_level; grub_uint32_t lastcheck; grub_uint32_t checkinterval; grub_uint32_t creator_os; grub_uint32_t revision_level; grub_uint16_t uid_reserved; grub_uint16_t gid_reserved; grub_uint32_t first_inode; grub_uint16_t inode_size; grub_uint16_t block_group_number; grub_uint32_t feature_compatibility; grub_uint32_t feature_incompat; grub_uint32_t feature_ro_compat; grub_uint16_t uuid[8]; char volume_name[16]; char last_mounted_on[64]; grub_uint32_t compression_info; grub_uint8_t prealloc_blocks; grub_uint8_t prealloc_dir_blocks; grub_uint16_t reserved_gdt_blocks; grub_uint8_t journal_uuid[16]; grub_uint32_t journal_inum; grub_uint32_t journal_dev; grub_uint32_t last_orphan; grub_uint32_t hash_seed[4]; grub_uint8_t def_hash_version; grub_uint8_t jnl_backup_type; grub_uint16_t reserved_word_pad; grub_uint32_t default_mount_opts; grub_uint32_t first_meta_bg; grub_uint32_t mkfs_time; grub_uint32_t jnl_blocks[17]; }; /* The ext2 blockgroup. */ struct grub_ext2_block_group { grub_uint32_t block_id; grub_uint32_t inode_id; grub_uint32_t inode_table_id; grub_uint16_t free_blocks; grub_uint16_t free_inodes; grub_uint16_t used_dirs; grub_uint16_t pad; grub_uint32_t reserved[3]; }; /* The ext2 inode. */ struct grub_ext2_inode { grub_uint16_t mode; grub_uint16_t uid; grub_uint32_t size; grub_uint32_t atime; grub_uint32_t ctime; grub_uint32_t mtime; grub_uint32_t dtime; grub_uint16_t gid; grub_uint16_t nlinks; grub_uint32_t blockcnt; /* Blocks of 512 bytes!! */ grub_uint32_t flags; grub_uint32_t osd1; union { struct datablocks { grub_uint32_t dir_blocks[INDIRECT_BLOCKS]; grub_uint32_t indir_block; grub_uint32_t double_indir_block; grub_uint32_t triple_indir_block; } blocks; char symlink[60]; }; grub_uint32_t version; grub_uint32_t acl; grub_uint32_t dir_acl; grub_uint32_t fragment_addr; grub_uint32_t osd2[3]; }; /* The header of an ext2 directory entry. */ struct ext2_dirent { grub_uint32_t inode; grub_uint16_t direntlen; grub_uint8_t namelen; grub_uint8_t filetype; }; struct grub_ext3_journal_header { grub_uint32_t magic; grub_uint32_t block_type; grub_uint32_t sequence; }; struct grub_ext3_journal_revoke_header { struct grub_ext3_journal_header header; grub_uint32_t count; grub_uint32_t data[0]; }; struct grub_ext3_journal_block_tag { grub_uint32_t block; grub_uint32_t flags; }; struct grub_ext3_journal_sblock { struct grub_ext3_journal_header header; grub_uint32_t block_size; grub_uint32_t maxlen; grub_uint32_t first; grub_uint32_t sequence; grub_uint32_t start; }; #define EXT4_EXT_MAGIC 0xf30a struct grub_ext4_extent_header { grub_uint16_t magic; grub_uint16_t entries; grub_uint16_t max; grub_uint16_t depth; grub_uint32_t generation; }; struct grub_ext4_extent { grub_uint32_t block; grub_uint16_t len; grub_uint16_t start_hi; grub_uint32_t start; }; struct grub_ext4_extent_idx { grub_uint32_t block; grub_uint32_t leaf; grub_uint16_t leaf_hi; grub_uint16_t unused; }; struct grub_fshelp_node { struct grub_ext2_data *data; struct grub_ext2_inode inode; int ino; int inode_read; }; /* Information about a "mounted" ext2 filesystem. */ struct grub_ext2_data { struct grub_ext2_sblock sblock; grub_disk_t disk; struct grub_ext2_inode *inode; struct grub_fshelp_node diropen; }; static grub_dl_t my_mod; /* Read into BLKGRP the blockgroup descriptor of blockgroup GROUP of the mounted filesystem DATA. */ inline static grub_err_t grub_ext2_blockgroup (struct grub_ext2_data *data, int group, struct grub_ext2_block_group *blkgrp) { return grub_disk_read (data->disk, ((grub_le_to_cpu32 (data->sblock.first_data_block) + 1) << LOG2_EXT2_BLOCK_SIZE (data)), group * sizeof (struct grub_ext2_block_group), sizeof (struct grub_ext2_block_group), blkgrp); } static struct grub_ext4_extent_header * grub_ext4_find_leaf (struct grub_ext2_data *data, char *buf, struct grub_ext4_extent_header *ext_block, grub_uint32_t fileblock) { struct grub_ext4_extent_idx *index; while (1) { int i; grub_disk_addr_t block; index = (struct grub_ext4_extent_idx *) (ext_block + 1); if (grub_le_to_cpu16(ext_block->magic) != EXT4_EXT_MAGIC) return 0; if (ext_block->depth == 0) return ext_block; for (i = 0; i < grub_le_to_cpu16 (ext_block->entries); i++) { if (fileblock < grub_le_to_cpu32(index[i].block)) break; } if (--i < 0) return 0; block = grub_le_to_cpu16 (index[i].leaf_hi); block = (block << 32) + grub_le_to_cpu32 (index[i].leaf); if (grub_disk_read (data->disk, block << LOG2_EXT2_BLOCK_SIZE (data), 0, EXT2_BLOCK_SIZE(data), buf)) { return 0; } ext_block = (struct grub_ext4_extent_header *) buf; } } static grub_disk_addr_t grub_ext2_read_block (grub_fshelp_node_t node, grub_disk_addr_t fileblock) { struct grub_ext2_data *data = node->data; struct grub_ext2_inode *inode = &node->inode; int blknr = -1; unsigned int blksz = EXT2_BLOCK_SIZE (data); int log2_blksz = LOG2_EXT2_BLOCK_SIZE (data); if (grub_le_to_cpu32(inode->flags) & EXT4_EXTENTS_FLAG) { char * buf = grub_malloc (EXT2_BLOCK_SIZE (data)); if (!buf) { return -1; } struct grub_ext4_extent_header *leaf; struct grub_ext4_extent *ext; int i; leaf = grub_ext4_find_leaf (data, buf, (struct grub_ext4_extent_header *) inode->blocks.dir_blocks, fileblock); if (! leaf) { grub_error (GRUB_ERR_BAD_FS, "invalid extent"); free (buf); return -1; } ext = (struct grub_ext4_extent *) (leaf + 1); for (i = 0; i < grub_le_to_cpu16 (leaf->entries); i++) { if (fileblock < grub_le_to_cpu32 (ext[i].block)) break; } if (--i >= 0) { fileblock -= grub_le_to_cpu32 (ext[i].block); if (fileblock >= grub_le_to_cpu16 (ext[i].len)) { free (buf); return 0; } else { grub_disk_addr_t start; start = grub_le_to_cpu16 (ext[i].start_hi); start = (start << 32) + grub_le_to_cpu32 (ext[i].start); free (buf); return fileblock + start; } } else { grub_error (GRUB_ERR_BAD_FS, "something wrong with extent"); free (buf); return -1; } free (buf); } /* Direct blocks. */ if (fileblock < INDIRECT_BLOCKS) { blknr = grub_le_to_cpu32 (inode->blocks.dir_blocks[fileblock]); /* Indirect. */ } else if (fileblock < INDIRECT_BLOCKS + blksz / 4) { grub_uint32_t *indir; indir = grub_malloc (blksz); if (! indir) { return grub_errno; } if (grub_disk_read (data->disk, ((grub_disk_addr_t) grub_le_to_cpu32 (inode->blocks.indir_block)) << log2_blksz, 0, blksz, indir)) { return grub_errno; } blknr = grub_le_to_cpu32 (indir[fileblock - INDIRECT_BLOCKS]); grub_free (indir); } /* Double indirect. */ else if (fileblock < (grub_disk_addr_t)(INDIRECT_BLOCKS + blksz / 4) \ * (grub_disk_addr_t)(blksz / 4 + 1)) { unsigned int perblock = blksz / 4; unsigned int rblock = fileblock - (INDIRECT_BLOCKS + blksz / 4); grub_uint32_t *indir; indir = grub_malloc (blksz); if (! indir) { return grub_errno; } if (grub_disk_read (data->disk, ((grub_disk_addr_t) grub_le_to_cpu32 (inode->blocks.double_indir_block)) << log2_blksz, 0, blksz, indir)) { return grub_errno; } if (grub_disk_read (data->disk, ((grub_disk_addr_t) grub_le_to_cpu32 (indir[rblock / perblock])) << log2_blksz, 0, blksz, indir)) { return grub_errno; } blknr = grub_le_to_cpu32 (indir[rblock % perblock]); grub_free (indir); } /* triple indirect. */ else { grub_error (GRUB_ERR_NOT_IMPLEMENTED_YET, "ext2fs doesn't support triple indirect blocks"); } return blknr; } /* Read LEN bytes from the file described by DATA starting with byte POS. Return the amount of read bytes in READ. */ static grub_ssize_t grub_ext2_read_file (grub_fshelp_node_t node, void (*read_hook) (grub_disk_addr_t sector, unsigned offset, unsigned length, void *closure), void *closure, int flags, int pos, grub_size_t len, char *buf) { return grub_fshelp_read_file (node->data->disk, node, read_hook, closure, flags, pos, len, buf, grub_ext2_read_block, node->inode.size, LOG2_EXT2_BLOCK_SIZE (node->data)); } /* Read the inode INO for the file described by DATA into INODE. */ static grub_err_t grub_ext2_read_inode (struct grub_ext2_data *data, int ino, struct grub_ext2_inode *inode) { struct grub_ext2_block_group blkgrp; struct grub_ext2_sblock *sblock = &data->sblock; int inodes_per_block; unsigned int blkno; unsigned int blkoff; /* It is easier to calculate if the first inode is 0. */ ino--; int div = grub_le_to_cpu32 (sblock->inodes_per_group); if (div < 1) { return grub_errno = GRUB_ERR_BAD_FS; } grub_ext2_blockgroup (data, ino / div, &blkgrp); if (grub_errno) return grub_errno; int inode_size = EXT2_INODE_SIZE (data); if (inode_size < 1) { return grub_errno = GRUB_ERR_BAD_FS; } inodes_per_block = EXT2_BLOCK_SIZE (data) / inode_size; if (inodes_per_block < 1) { return grub_errno = GRUB_ERR_BAD_FS; } blkno = (ino % grub_le_to_cpu32 (sblock->inodes_per_group)) / inodes_per_block; blkoff = (ino % grub_le_to_cpu32 (sblock->inodes_per_group)) % inodes_per_block; /* Read the inode. */ if (grub_disk_read (data->disk, ((grub_le_to_cpu32 (blkgrp.inode_table_id) + blkno) << LOG2_EXT2_BLOCK_SIZE (data)), EXT2_INODE_SIZE (data) * blkoff, sizeof (struct grub_ext2_inode), inode)) return grub_errno; return 0; } static struct grub_ext2_data * grub_ext2_mount (grub_disk_t disk) { struct grub_ext2_data *data; data = grub_malloc (sizeof (struct grub_ext2_data)); if (!data) return 0; /* Read the superblock. */ grub_disk_read (disk, 1 * 2, 0, sizeof (struct grub_ext2_sblock), &data->sblock); if (grub_errno) goto fail; /* Make sure this is an ext2 filesystem. */ if (grub_le_to_cpu16 (data->sblock.magic) != EXT2_MAGIC) { grub_error (GRUB_ERR_BAD_FS, "not an ext2 filesystem"); goto fail; } /* Check the FS doesn't have feature bits enabled that we don't support. */ if (grub_le_to_cpu32 (data->sblock.feature_incompat) & ~(EXT2_DRIVER_SUPPORTED_INCOMPAT | EXT2_DRIVER_IGNORED_INCOMPAT)) { grub_error (GRUB_ERR_BAD_FS, "filesystem has unsupported incompatible features"); goto fail; } data->disk = disk; data->diropen.data = data; data->diropen.ino = 2; data->diropen.inode_read = 1; data->inode = &data->diropen.inode; grub_ext2_read_inode (data, 2, data->inode); if (grub_errno) goto fail; return data; fail: if (grub_errno == GRUB_ERR_OUT_OF_RANGE) grub_error (GRUB_ERR_BAD_FS, "not an ext2 filesystem"); grub_free (data); return 0; } static char * grub_ext2_read_symlink (grub_fshelp_node_t node) { char *symlink; struct grub_fshelp_node *diro = node; if (! diro->inode_read) { grub_ext2_read_inode (diro->data, diro->ino, &diro->inode); if (grub_errno) return 0; } symlink = grub_malloc (grub_le_to_cpu32 (diro->inode.size) + 1); if (! symlink) return 0; /* If the filesize of the symlink is bigger than 60 the symlink is stored in a separate block, otherwise it is stored in the inode. */ if (grub_le_to_cpu32 (diro->inode.size) <= 60) grub_strncpy (symlink, diro->inode.symlink, grub_le_to_cpu32 (diro->inode.size)); else { grub_ext2_read_file (diro, 0, 0, 0, 0, grub_le_to_cpu32 (diro->inode.size), symlink); if (grub_errno) { grub_free (symlink); return 0; } } symlink[grub_le_to_cpu32 (diro->inode.size)] = '\0'; return symlink; } static int grub_ext2_iterate_dir (grub_fshelp_node_t dir, int (*hook) (const char *filename, enum grub_fshelp_filetype filetype, grub_fshelp_node_t node, void *closure), void *closure) { unsigned int fpos = 0; struct grub_fshelp_node *diro = (struct grub_fshelp_node *) dir; if (! diro->inode_read) { grub_ext2_read_inode (diro->data, diro->ino, &diro->inode); if (grub_errno) return 0; } /* Search the file. */ if (hook) while (fpos < grub_le_to_cpu32 (diro->inode.size)) { struct ext2_dirent dirent; grub_ext2_read_file (diro, NULL, NULL, 0, fpos, sizeof (dirent), (char *) &dirent); if (grub_errno) return 0; if (dirent.direntlen == 0) return 0; if (dirent.namelen != 0) { char * filename = grub_malloc (dirent.namelen + 1); struct grub_fshelp_node *fdiro; enum grub_fshelp_filetype type = GRUB_FSHELP_UNKNOWN; if (!filename) { break; } grub_ext2_read_file (diro, 0, 0, 0, fpos + sizeof (struct ext2_dirent), dirent.namelen, filename); if (grub_errno) { grub_free (filename); return 0; } fdiro = grub_malloc (sizeof (struct grub_fshelp_node)); if (! fdiro) { grub_free (filename); return 0; } fdiro->data = diro->data; fdiro->ino = grub_le_to_cpu32 (dirent.inode); filename[dirent.namelen] = '\0'; if (dirent.filetype != FILETYPE_UNKNOWN) { fdiro->inode_read = 0; if (dirent.filetype == FILETYPE_DIRECTORY) type = GRUB_FSHELP_DIR; else if (dirent.filetype == FILETYPE_SYMLINK) type = GRUB_FSHELP_SYMLINK; else if (dirent.filetype == FILETYPE_REG) type = GRUB_FSHELP_REG; } else { /* The filetype can not be read from the dirent, read the inode to get more information. */ grub_ext2_read_inode (diro->data, grub_le_to_cpu32 (dirent.inode), &fdiro->inode); if (grub_errno) { grub_free (filename); grub_free (fdiro); return 0; } fdiro->inode_read = 1; if ((grub_le_to_cpu16 (fdiro->inode.mode) & FILETYPE_INO_MASK) == FILETYPE_INO_DIRECTORY) type = GRUB_FSHELP_DIR; else if ((grub_le_to_cpu16 (fdiro->inode.mode) & FILETYPE_INO_MASK) == FILETYPE_INO_SYMLINK) type = GRUB_FSHELP_SYMLINK; else if ((grub_le_to_cpu16 (fdiro->inode.mode) & FILETYPE_INO_MASK) == FILETYPE_INO_REG) type = GRUB_FSHELP_REG; } if (hook (filename, type, fdiro, closure)) { grub_free (filename); return 1; } grub_free (filename); } fpos += grub_le_to_cpu16 (dirent.direntlen); } return 0; } /* Open a file named NAME and initialize FILE. */ static grub_err_t grub_ext2_open (struct grub_file *file, const char *name) { struct grub_ext2_data *data; struct grub_fshelp_node *fdiro = 0; grub_dl_ref (my_mod); data = grub_ext2_mount (file->device->disk); if (! data) goto fail; grub_fshelp_find_file (name, &data->diropen, &fdiro, grub_ext2_iterate_dir, 0, grub_ext2_read_symlink, GRUB_FSHELP_REG); if (grub_errno) goto fail; if (! fdiro->inode_read) { grub_ext2_read_inode (data, fdiro->ino, &fdiro->inode); if (grub_errno) goto fail; } grub_memcpy (data->inode, &fdiro->inode, sizeof (struct grub_ext2_inode)); grub_free (fdiro); file->size = grub_le_to_cpu32 (data->inode->size); file->data = data; file->offset = 0; return 0; fail: if (fdiro != &data->diropen) grub_free (fdiro); grub_free (data); grub_dl_unref (my_mod); return grub_errno; } static grub_err_t grub_ext2_close (grub_file_t file) { grub_free (file->data); grub_dl_unref (my_mod); return GRUB_ERR_NONE; } /* Read LEN bytes data from FILE into BUF. */ static grub_ssize_t grub_ext2_read (grub_file_t file, char *buf, grub_size_t len) { struct grub_ext2_data *data = (struct grub_ext2_data *) file->data; return grub_ext2_read_file (&data->diropen, file->read_hook, file->closure, file->flags, file->offset, len, buf); } struct grub_ext2_dir_closure { int (*hook) (const char *filename, const struct grub_dirhook_info *info, void *closure); void *closure; struct grub_ext2_data *data; }; static int iterate (const char *filename, enum grub_fshelp_filetype filetype, grub_fshelp_node_t node, void *closure) { struct grub_ext2_dir_closure *c = closure; struct grub_dirhook_info info; grub_memset (&info, 0, sizeof (info)); if (! node->inode_read) { grub_ext2_read_inode (c->data, node->ino, &node->inode); if (!grub_errno) node->inode_read = 1; grub_errno = GRUB_ERR_NONE; } if (node->inode_read) { info.mtimeset = 1; info.mtime = grub_le_to_cpu32 (node->inode.mtime); } info.dir = ((filetype & GRUB_FSHELP_TYPE_MASK) == GRUB_FSHELP_DIR); grub_free (node); return (c->hook != NULL)? c->hook (filename, &info, c->closure): 0; } static grub_err_t grub_ext2_dir (grub_device_t device, const char *path, int (*hook) (const char *filename, const struct grub_dirhook_info *info, void *closure), void *closure) { struct grub_ext2_data *data = 0; struct grub_fshelp_node *fdiro = 0; struct grub_ext2_dir_closure c; grub_dl_ref (my_mod); data = grub_ext2_mount (device->disk); if (! data) goto fail; grub_fshelp_find_file (path, &data->diropen, &fdiro, grub_ext2_iterate_dir, 0, grub_ext2_read_symlink, GRUB_FSHELP_DIR); if (grub_errno) goto fail; c.hook = hook; c.closure = closure; c.data = data; grub_ext2_iterate_dir (fdiro, iterate, &c); fail: if (fdiro != &data->diropen) grub_free (fdiro); grub_free (data); grub_dl_unref (my_mod); return grub_errno; } static grub_err_t grub_ext2_label (grub_device_t device, char **label) { struct grub_ext2_data *data; grub_disk_t disk = device->disk; grub_dl_ref (my_mod); data = grub_ext2_mount (disk); if (data) *label = grub_strndup (data->sblock.volume_name, 14); else *label = NULL; grub_dl_unref (my_mod); grub_free (data); return grub_errno; } static grub_err_t grub_ext2_uuid (grub_device_t device, char **uuid) { struct grub_ext2_data *data; grub_disk_t disk = device->disk; grub_dl_ref (my_mod); data = grub_ext2_mount (disk); if (data) { *uuid = grub_xasprintf ("%04x%04x-%04x-%04x-%04x-%04x%04x%04x", grub_be_to_cpu16 (data->sblock.uuid[0]), grub_be_to_cpu16 (data->sblock.uuid[1]), grub_be_to_cpu16 (data->sblock.uuid[2]), grub_be_to_cpu16 (data->sblock.uuid[3]), grub_be_to_cpu16 (data->sblock.uuid[4]), grub_be_to_cpu16 (data->sblock.uuid[5]), grub_be_to_cpu16 (data->sblock.uuid[6]), grub_be_to_cpu16 (data->sblock.uuid[7])); } else *uuid = NULL; grub_dl_unref (my_mod); grub_free (data); return grub_errno; } /* Get mtime. */ static grub_err_t grub_ext2_mtime (grub_device_t device, grub_int32_t *tm) { struct grub_ext2_data *data; grub_disk_t disk = device->disk; grub_dl_ref (my_mod); data = grub_ext2_mount (disk); if (!data) *tm = 0; else *tm = grub_le_to_cpu32 (data->sblock.utime); grub_dl_unref (my_mod); grub_free (data); return grub_errno; } struct grub_fs grub_ext2_fs = { .name = "ext2", .dir = grub_ext2_dir, .open = grub_ext2_open, .read = grub_ext2_read, .close = grub_ext2_close, .label = grub_ext2_label, .uuid = grub_ext2_uuid, .mtime = grub_ext2_mtime, #ifdef GRUB_UTIL .reserved_first_sector = 1, #endif .next = 0 };
grub_ext4_find_leaf (struct grub_ext2_data *data, char *buf, struct grub_ext4_extent_header *ext_block, grub_uint32_t fileblock) { struct grub_ext4_extent_idx *index; while (1) { int i; grub_disk_addr_t block; index = (struct grub_ext4_extent_idx *) (ext_block + 1); if (grub_le_to_cpu16(ext_block->magic) != EXT4_EXT_MAGIC) return 0; if (ext_block->depth == 0) return ext_block; for (i = 0; i < grub_le_to_cpu16 (ext_block->entries); i++) { if (fileblock < grub_le_to_cpu32(index[i].block)) break; } if (--i < 0) return 0; block = grub_le_to_cpu16 (index[i].leaf_hi); block = (block << 32) + grub_le_to_cpu32 (index[i].leaf); if (grub_disk_read (data->disk, block << LOG2_EXT2_BLOCK_SIZE (data), 0, EXT2_BLOCK_SIZE(data), buf)) return 0; ext_block = (struct grub_ext4_extent_header *) buf; } }
grub_ext4_find_leaf (struct grub_ext2_data *data, char *buf, struct grub_ext4_extent_header *ext_block, grub_uint32_t fileblock) { struct grub_ext4_extent_idx *index; while (1) { int i; grub_disk_addr_t block; index = (struct grub_ext4_extent_idx *) (ext_block + 1); if (grub_le_to_cpu16(ext_block->magic) != EXT4_EXT_MAGIC) return 0; if (ext_block->depth == 0) return ext_block; for (i = 0; i < grub_le_to_cpu16 (ext_block->entries); i++) { if (fileblock < grub_le_to_cpu32(index[i].block)) break; } if (--i < 0) return 0; block = grub_le_to_cpu16 (index[i].leaf_hi); block = (block << 32) + grub_le_to_cpu32 (index[i].leaf); if (grub_disk_read (data->disk, block << LOG2_EXT2_BLOCK_SIZE (data), 0, EXT2_BLOCK_SIZE(data), buf)) { return 0; } ext_block = (struct grub_ext4_extent_header *) buf; } }
{'added': [(45, '#include <stdlib.h>'), (371, ' 0, EXT2_BLOCK_SIZE(data), buf)) {'), (373, ' }'), (390, '\t char * buf = grub_malloc (EXT2_BLOCK_SIZE (data));'), (391, ' if (!buf) {'), (392, ' return -1;'), (393, ' }'), (404, '\t free (buf);'), (418, ' if (fileblock >= grub_le_to_cpu16 (ext[i].len)) {'), (419, ' \t free (buf);'), (421, ' } else'), (427, ' \t free (buf);'), (435, ' \t free (buf);'), (438, 'free (buf);'), (449, ' if (! indir) {'), (451, '}'), (457, '\t\t\t 0, blksz, indir)) {'), (459, '}'), (474, ' if (! indir) {'), (476, '}'), (482, '\t\t\t 0, blksz, indir)) {'), (484, '}'), (490, '\t\t\t 0, blksz, indir)) {'), (492, '}')], 'deleted': [(370, ' 0, EXT2_BLOCK_SIZE(data), buf))'), (388, '#ifndef _MSC_VER'), (389, '\t char buf[EXT2_BLOCK_SIZE (data)];'), (390, '#else'), (391, '\t char * buf = grub_malloc (EXT2_BLOCK_SIZE(data));'), (392, '#endif'), (416, ' if (fileblock >= grub_le_to_cpu16 (ext[i].len))'), (418, ' else'), (443, ' if (! indir)'), (450, '\t\t\t 0, blksz, indir))'), (466, ' if (! indir)'), (473, '\t\t\t 0, blksz, indir))'), (480, '\t\t\t 0, blksz, indir))')]}
24
13
707
3,520
https://github.com/radare/radare2
CVE-2017-9763
['CWE-119']
ipcutils.c
get_sem_elements
#include <inttypes.h> #include "c.h" #include "nls.h" #include "xalloc.h" #include "path.h" #include "pathnames.h" #include "ipcutils.h" #include "strutils.h" #ifndef SEMVMX # define SEMVMX 32767 /* <= 32767 semaphore maximum value */ #endif #ifndef SHMMIN # define SHMMIN 1 /* min shared segment size in bytes */ #endif int ipc_msg_get_limits(struct ipc_limits *lim) { if (access(_PATH_PROC_IPC_MSGMNI, F_OK) == 0 && access(_PATH_PROC_IPC_MSGMNB, F_OK) == 0 && access(_PATH_PROC_IPC_MSGMAX, F_OK) == 0) { if (ul_path_read_s32(NULL, &lim->msgmni, _PATH_PROC_IPC_MSGMNI) != 0) return 1; if (ul_path_read_s32(NULL, &lim->msgmnb, _PATH_PROC_IPC_MSGMNB) != 0) return 1; if (ul_path_read_u64(NULL, &lim->msgmax, _PATH_PROC_IPC_MSGMAX) != 0) return 1; } else { struct msginfo msginfo; if (msgctl(0, IPC_INFO, (struct msqid_ds *) &msginfo) < 0) return 1; lim->msgmni = msginfo.msgmni; lim->msgmnb = msginfo.msgmnb; lim->msgmax = msginfo.msgmax; } return 0; } int ipc_sem_get_limits(struct ipc_limits *lim) { FILE *f; int rc = 0; lim->semvmx = SEMVMX; f = fopen(_PATH_PROC_IPC_SEM, "r"); if (f) { rc = fscanf(f, "%d\t%d\t%d\t%d", &lim->semmsl, &lim->semmns, &lim->semopm, &lim->semmni); fclose(f); } if (rc != 4) { struct seminfo seminfo = { .semmni = 0 }; union semun arg = { .array = (ushort *) &seminfo }; if (semctl(0, 0, IPC_INFO, arg) < 0) return 1; lim->semmni = seminfo.semmni; lim->semmsl = seminfo.semmsl; lim->semmns = seminfo.semmns; lim->semopm = seminfo.semopm; } return 0; } int ipc_shm_get_limits(struct ipc_limits *lim) { lim->shmmin = SHMMIN; if (access(_PATH_PROC_IPC_SHMALL, F_OK) == 0 && access(_PATH_PROC_IPC_SHMMAX, F_OK) == 0 && access(_PATH_PROC_IPC_SHMMNI, F_OK) == 0) { ul_path_read_u64(NULL, &lim->shmall, _PATH_PROC_IPC_SHMALL); ul_path_read_u64(NULL, &lim->shmmax, _PATH_PROC_IPC_SHMMAX); ul_path_read_u64(NULL, &lim->shmmni, _PATH_PROC_IPC_SHMMNI); } else { struct shminfo *shminfo; struct shmid_ds shmbuf; if (shmctl(0, IPC_INFO, &shmbuf) < 0) return 1; shminfo = (struct shminfo *) &shmbuf; lim->shmmni = shminfo->shmmni; lim->shmall = shminfo->shmall; lim->shmmax = shminfo->shmmax; } return 0; } int ipc_shm_get_info(int id, struct shm_data **shmds) { FILE *f; int i = 0, maxid, j; char buf[BUFSIZ]; struct shm_data *p; struct shmid_ds dummy; p = *shmds = xcalloc(1, sizeof(struct shm_data)); p->next = NULL; f = fopen(_PATH_PROC_SYSV_SHM, "r"); if (!f) goto shm_fallback; while (fgetc(f) != '\n'); /* skip header */ while (fgets(buf, sizeof(buf), f) != NULL) { /* scan for the first 14-16 columns (e.g. Linux 2.6.32 has 14) */ p->shm_rss = 0xdead; p->shm_swp = 0xdead; if (sscanf(buf, "%d %d %o %"SCNu64 " %u %u " "%"SCNu64 " %u %u %u %u %"SCNi64 " %"SCNi64 " %"SCNi64 " %"SCNu64 " %"SCNu64 "\n", &p->shm_perm.key, &p->shm_perm.id, &p->shm_perm.mode, &p->shm_segsz, &p->shm_cprid, &p->shm_lprid, &p->shm_nattch, &p->shm_perm.uid, &p->shm_perm.gid, &p->shm_perm.cuid, &p->shm_perm.cgid, &p->shm_atim, &p->shm_dtim, &p->shm_ctim, &p->shm_rss, &p->shm_swp) < 14) continue; /* invalid line, skipped */ if (id > -1) { /* ID specified */ if (id == p->shm_perm.id) { i = 1; break; } continue; } p->next = xcalloc(1, sizeof(struct shm_data)); p = p->next; p->next = NULL; i++; } if (i == 0) free(*shmds); fclose(f); return i; /* Fallback; /proc or /sys file(s) missing. */ shm_fallback: maxid = shmctl(0, SHM_INFO, &dummy); for (j = 0; j <= maxid; j++) { int shmid; struct shmid_ds shmseg; struct ipc_perm *ipcp = &shmseg.shm_perm; shmid = shmctl(j, SHM_STAT, &shmseg); if (shmid < 0 || (id > -1 && shmid != id)) { continue; } i++; p->shm_perm.key = ipcp->KEY; p->shm_perm.id = shmid; p->shm_perm.mode = ipcp->mode; p->shm_segsz = shmseg.shm_segsz; p->shm_cprid = shmseg.shm_cpid; p->shm_lprid = shmseg.shm_lpid; p->shm_nattch = shmseg.shm_nattch; p->shm_perm.uid = ipcp->uid; p->shm_perm.gid = ipcp->gid; p->shm_perm.cuid = ipcp->cuid; p->shm_perm.cgid = ipcp->cuid; p->shm_atim = shmseg.shm_atime; p->shm_dtim = shmseg.shm_dtime; p->shm_ctim = shmseg.shm_ctime; p->shm_rss = 0xdead; p->shm_swp = 0xdead; if (id < 0) { p->next = xcalloc(1, sizeof(struct shm_data)); p = p->next; p->next = NULL; } else break; } if (i == 0) free(*shmds); return i; } void ipc_shm_free_info(struct shm_data *shmds) { while (shmds) { struct shm_data *next = shmds->next; free(shmds); shmds = next; } } static void get_sem_elements(struct sem_data *p) { size_t i; if (!p || !p->sem_nsems || p->sem_perm.id < 0) return; p->elements = xcalloc(p->sem_nsems, sizeof(struct sem_elem)); for (i = 0; i < p->sem_nsems; i++) { struct sem_elem *e = &p->elements[i]; union semun arg = { .val = 0 }; e->semval = semctl(p->sem_perm.id, i, GETVAL, arg); if (e->semval < 0) err(EXIT_FAILURE, _("%s failed"), "semctl(GETVAL)"); e->ncount = semctl(p->sem_perm.id, i, GETNCNT, arg); if (e->ncount < 0) err(EXIT_FAILURE, _("%s failed"), "semctl(GETNCNT)"); e->zcount = semctl(p->sem_perm.id, i, GETZCNT, arg); if (e->zcount < 0) err(EXIT_FAILURE, _("%s failed"), "semctl(GETZCNT)"); e->pid = semctl(p->sem_perm.id, i, GETPID, arg); if (e->pid < 0) err(EXIT_FAILURE, _("%s failed"), "semctl(GETPID)"); } } int ipc_sem_get_info(int id, struct sem_data **semds) { FILE *f; int i = 0, maxid, j; struct sem_data *p; struct seminfo dummy; union semun arg; p = *semds = xcalloc(1, sizeof(struct sem_data)); p->next = NULL; f = fopen(_PATH_PROC_SYSV_SEM, "r"); if (!f) goto sem_fallback; while (fgetc(f) != '\n') ; /* skip header */ while (feof(f) == 0) { if (fscanf(f, "%d %d %o %" SCNu64 " %u %u %u %u %" SCNi64 " %" SCNi64 "\n", &p->sem_perm.key, &p->sem_perm.id, &p->sem_perm.mode, &p->sem_nsems, &p->sem_perm.uid, &p->sem_perm.gid, &p->sem_perm.cuid, &p->sem_perm.cgid, &p->sem_otime, &p->sem_ctime) != 10) continue; if (id > -1) { /* ID specified */ if (id == p->sem_perm.id) { get_sem_elements(p); i = 1; break; } continue; } p->next = xcalloc(1, sizeof(struct sem_data)); p = p->next; p->next = NULL; i++; } if (i == 0) free(*semds); fclose(f); return i; /* Fallback; /proc or /sys file(s) missing. */ sem_fallback: arg.array = (ushort *) (void *)&dummy; maxid = semctl(0, 0, SEM_INFO, arg); for (j = 0; j <= maxid; j++) { int semid; struct semid_ds semseg; struct ipc_perm *ipcp = &semseg.sem_perm; arg.buf = (struct semid_ds *)&semseg; semid = semctl(j, 0, SEM_STAT, arg); if (semid < 0 || (id > -1 && semid != id)) { continue; } i++; p->sem_perm.key = ipcp->KEY; p->sem_perm.id = semid; p->sem_perm.mode = ipcp->mode; p->sem_nsems = semseg.sem_nsems; p->sem_perm.uid = ipcp->uid; p->sem_perm.gid = ipcp->gid; p->sem_perm.cuid = ipcp->cuid; p->sem_perm.cgid = ipcp->cuid; p->sem_otime = semseg.sem_otime; p->sem_ctime = semseg.sem_ctime; if (id < 0) { p->next = xcalloc(1, sizeof(struct sem_data)); p = p->next; p->next = NULL; i++; } else { get_sem_elements(p); break; } } if (i == 0) free(*semds); return i; } void ipc_sem_free_info(struct sem_data *semds) { while (semds) { struct sem_data *next = semds->next; free(semds->elements); free(semds); semds = next; } } int ipc_msg_get_info(int id, struct msg_data **msgds) { FILE *f; int i = 0, maxid, j; struct msg_data *p; struct msqid_ds dummy; struct msqid_ds msgseg; p = *msgds = xcalloc(1, sizeof(struct msg_data)); p->next = NULL; f = fopen(_PATH_PROC_SYSV_MSG, "r"); if (!f) goto msg_fallback; while (fgetc(f) != '\n') ; /* skip header */ while (feof(f) == 0) { if (fscanf(f, "%d %d %o %" SCNu64 " %" SCNu64 " %u %u %u %u %u %u %" SCNi64 " %" SCNi64 " %" SCNi64 "\n", &p->msg_perm.key, &p->msg_perm.id, &p->msg_perm.mode, &p->q_cbytes, &p->q_qnum, &p->q_lspid, &p->q_lrpid, &p->msg_perm.uid, &p->msg_perm.gid, &p->msg_perm.cuid, &p->msg_perm.cgid, &p->q_stime, &p->q_rtime, &p->q_ctime) != 14) continue; if (id > -1) { /* ID specified */ if (id == p->msg_perm.id) { if (msgctl(id, IPC_STAT, &msgseg) != -1) p->q_qbytes = msgseg.msg_qbytes; i = 1; break; } continue; } p->next = xcalloc(1, sizeof(struct msg_data)); p = p->next; p->next = NULL; i++; } if (i == 0) free(*msgds); fclose(f); return i; /* Fallback; /proc or /sys file(s) missing. */ msg_fallback: maxid = msgctl(0, MSG_INFO, &dummy); for (j = 0; j <= maxid; j++) { int msgid; struct ipc_perm *ipcp = &msgseg.msg_perm; msgid = msgctl(j, MSG_STAT, &msgseg); if (msgid < 0 || (id > -1 && msgid != id)) { continue; } i++; p->msg_perm.key = ipcp->KEY; p->msg_perm.id = msgid; p->msg_perm.mode = ipcp->mode; p->q_cbytes = msgseg.msg_cbytes; p->q_qnum = msgseg.msg_qnum; p->q_lspid = msgseg.msg_lspid; p->q_lrpid = msgseg.msg_lrpid; p->msg_perm.uid = ipcp->uid; p->msg_perm.gid = ipcp->gid; p->msg_perm.cuid = ipcp->cuid; p->msg_perm.cgid = ipcp->cgid; p->q_stime = msgseg.msg_stime; p->q_rtime = msgseg.msg_rtime; p->q_ctime = msgseg.msg_ctime; p->q_qbytes = msgseg.msg_qbytes; if (id < 0) { p->next = xcalloc(1, sizeof(struct msg_data)); p = p->next; p->next = NULL; } else break; } if (i == 0) free(*msgds); return i; } void ipc_msg_free_info(struct msg_data *msgds) { while (msgds) { struct msg_data *next = msgds->next; free(msgds); msgds = next; } } void ipc_print_perms(FILE *f, struct ipc_stat *is) { struct passwd *pw; struct group *gr; fprintf(f, "%-10d %-10o", is->id, is->mode & 0777); if ((pw = getpwuid(is->cuid))) fprintf(f, " %-10s", pw->pw_name); else fprintf(f, " %-10u", is->cuid); if ((gr = getgrgid(is->cgid))) fprintf(f, " %-10s", gr->gr_name); else fprintf(f, " %-10u", is->cgid); if ((pw = getpwuid(is->uid))) fprintf(f, " %-10s", pw->pw_name); else fprintf(f, " %-10u", is->uid); if ((gr = getgrgid(is->gid))) fprintf(f, " %-10s\n", gr->gr_name); else fprintf(f, " %-10u\n", is->gid); } void ipc_print_size(int unit, char *msg, uint64_t size, const char *end, int width) { char format[32]; if (!msg) /* NULL */ ; else if (msg[strlen(msg) - 1] == '=') printf("%s", msg); else if (unit == IPC_UNIT_BYTES) printf(_("%s (bytes) = "), msg); else if (unit == IPC_UNIT_KB) printf(_("%s (kbytes) = "), msg); else printf("%s = ", msg); switch (unit) { case IPC_UNIT_DEFAULT: case IPC_UNIT_BYTES: sprintf(format, "%%%dju", width); printf(format, size); break; case IPC_UNIT_KB: sprintf(format, "%%%dju", width); printf(format, size / 1024); break; case IPC_UNIT_HUMAN: { char *tmp; sprintf(format, "%%%ds", width); printf(format, (tmp = size_to_human_string(SIZE_SUFFIX_1LETTER, size))); free(tmp); break; } default: /* impossible occurred */ abort(); } if (end) printf("%s", end); }
#include <inttypes.h> #include "c.h" #include "nls.h" #include "xalloc.h" #include "path.h" #include "pathnames.h" #include "ipcutils.h" #include "strutils.h" #ifndef SEMVMX # define SEMVMX 32767 /* <= 32767 semaphore maximum value */ #endif #ifndef SHMMIN # define SHMMIN 1 /* min shared segment size in bytes */ #endif int ipc_msg_get_limits(struct ipc_limits *lim) { if (access(_PATH_PROC_IPC_MSGMNI, F_OK) == 0 && access(_PATH_PROC_IPC_MSGMNB, F_OK) == 0 && access(_PATH_PROC_IPC_MSGMAX, F_OK) == 0) { if (ul_path_read_s32(NULL, &lim->msgmni, _PATH_PROC_IPC_MSGMNI) != 0) return 1; if (ul_path_read_s32(NULL, &lim->msgmnb, _PATH_PROC_IPC_MSGMNB) != 0) return 1; if (ul_path_read_u64(NULL, &lim->msgmax, _PATH_PROC_IPC_MSGMAX) != 0) return 1; } else { struct msginfo msginfo; if (msgctl(0, IPC_INFO, (struct msqid_ds *) &msginfo) < 0) return 1; lim->msgmni = msginfo.msgmni; lim->msgmnb = msginfo.msgmnb; lim->msgmax = msginfo.msgmax; } return 0; } int ipc_sem_get_limits(struct ipc_limits *lim) { FILE *f; int rc = 0; lim->semvmx = SEMVMX; f = fopen(_PATH_PROC_IPC_SEM, "r"); if (f) { rc = fscanf(f, "%d\t%d\t%d\t%d", &lim->semmsl, &lim->semmns, &lim->semopm, &lim->semmni); fclose(f); } if (rc != 4) { struct seminfo seminfo = { .semmni = 0 }; union semun arg = { .array = (ushort *) &seminfo }; if (semctl(0, 0, IPC_INFO, arg) < 0) return 1; lim->semmni = seminfo.semmni; lim->semmsl = seminfo.semmsl; lim->semmns = seminfo.semmns; lim->semopm = seminfo.semopm; } return 0; } int ipc_shm_get_limits(struct ipc_limits *lim) { lim->shmmin = SHMMIN; if (access(_PATH_PROC_IPC_SHMALL, F_OK) == 0 && access(_PATH_PROC_IPC_SHMMAX, F_OK) == 0 && access(_PATH_PROC_IPC_SHMMNI, F_OK) == 0) { ul_path_read_u64(NULL, &lim->shmall, _PATH_PROC_IPC_SHMALL); ul_path_read_u64(NULL, &lim->shmmax, _PATH_PROC_IPC_SHMMAX); ul_path_read_u64(NULL, &lim->shmmni, _PATH_PROC_IPC_SHMMNI); } else { struct shminfo *shminfo; struct shmid_ds shmbuf; if (shmctl(0, IPC_INFO, &shmbuf) < 0) return 1; shminfo = (struct shminfo *) &shmbuf; lim->shmmni = shminfo->shmmni; lim->shmall = shminfo->shmall; lim->shmmax = shminfo->shmmax; } return 0; } int ipc_shm_get_info(int id, struct shm_data **shmds) { FILE *f; int i = 0, maxid, j; char buf[BUFSIZ]; struct shm_data *p; struct shmid_ds dummy; p = *shmds = xcalloc(1, sizeof(struct shm_data)); p->next = NULL; f = fopen(_PATH_PROC_SYSV_SHM, "r"); if (!f) goto shm_fallback; while (fgetc(f) != '\n'); /* skip header */ while (fgets(buf, sizeof(buf), f) != NULL) { /* scan for the first 14-16 columns (e.g. Linux 2.6.32 has 14) */ p->shm_rss = 0xdead; p->shm_swp = 0xdead; if (sscanf(buf, "%d %d %o %"SCNu64 " %u %u " "%"SCNu64 " %u %u %u %u %"SCNi64 " %"SCNi64 " %"SCNi64 " %"SCNu64 " %"SCNu64 "\n", &p->shm_perm.key, &p->shm_perm.id, &p->shm_perm.mode, &p->shm_segsz, &p->shm_cprid, &p->shm_lprid, &p->shm_nattch, &p->shm_perm.uid, &p->shm_perm.gid, &p->shm_perm.cuid, &p->shm_perm.cgid, &p->shm_atim, &p->shm_dtim, &p->shm_ctim, &p->shm_rss, &p->shm_swp) < 14) continue; /* invalid line, skipped */ if (id > -1) { /* ID specified */ if (id == p->shm_perm.id) { i = 1; break; } continue; } p->next = xcalloc(1, sizeof(struct shm_data)); p = p->next; p->next = NULL; i++; } if (i == 0) free(*shmds); fclose(f); return i; /* Fallback; /proc or /sys file(s) missing. */ shm_fallback: maxid = shmctl(0, SHM_INFO, &dummy); for (j = 0; j <= maxid; j++) { int shmid; struct shmid_ds shmseg; struct ipc_perm *ipcp = &shmseg.shm_perm; shmid = shmctl(j, SHM_STAT, &shmseg); if (shmid < 0 || (id > -1 && shmid != id)) { continue; } i++; p->shm_perm.key = ipcp->KEY; p->shm_perm.id = shmid; p->shm_perm.mode = ipcp->mode; p->shm_segsz = shmseg.shm_segsz; p->shm_cprid = shmseg.shm_cpid; p->shm_lprid = shmseg.shm_lpid; p->shm_nattch = shmseg.shm_nattch; p->shm_perm.uid = ipcp->uid; p->shm_perm.gid = ipcp->gid; p->shm_perm.cuid = ipcp->cuid; p->shm_perm.cgid = ipcp->cuid; p->shm_atim = shmseg.shm_atime; p->shm_dtim = shmseg.shm_dtime; p->shm_ctim = shmseg.shm_ctime; p->shm_rss = 0xdead; p->shm_swp = 0xdead; if (id < 0) { p->next = xcalloc(1, sizeof(struct shm_data)); p = p->next; p->next = NULL; } else break; } if (i == 0) free(*shmds); return i; } void ipc_shm_free_info(struct shm_data *shmds) { while (shmds) { struct shm_data *next = shmds->next; free(shmds); shmds = next; } } static void get_sem_elements(struct sem_data *p) { size_t i; if (!p || !p->sem_nsems || p->sem_nsems > SIZE_MAX || p->sem_perm.id < 0) return; p->elements = xcalloc(p->sem_nsems, sizeof(struct sem_elem)); for (i = 0; i < p->sem_nsems; i++) { struct sem_elem *e = &p->elements[i]; union semun arg = { .val = 0 }; e->semval = semctl(p->sem_perm.id, i, GETVAL, arg); if (e->semval < 0) err(EXIT_FAILURE, _("%s failed"), "semctl(GETVAL)"); e->ncount = semctl(p->sem_perm.id, i, GETNCNT, arg); if (e->ncount < 0) err(EXIT_FAILURE, _("%s failed"), "semctl(GETNCNT)"); e->zcount = semctl(p->sem_perm.id, i, GETZCNT, arg); if (e->zcount < 0) err(EXIT_FAILURE, _("%s failed"), "semctl(GETZCNT)"); e->pid = semctl(p->sem_perm.id, i, GETPID, arg); if (e->pid < 0) err(EXIT_FAILURE, _("%s failed"), "semctl(GETPID)"); } } int ipc_sem_get_info(int id, struct sem_data **semds) { FILE *f; int i = 0, maxid, j; struct sem_data *p; struct seminfo dummy; union semun arg; p = *semds = xcalloc(1, sizeof(struct sem_data)); p->next = NULL; f = fopen(_PATH_PROC_SYSV_SEM, "r"); if (!f) goto sem_fallback; while (fgetc(f) != '\n') ; /* skip header */ while (feof(f) == 0) { if (fscanf(f, "%d %d %o %" SCNu64 " %u %u %u %u %" SCNi64 " %" SCNi64 "\n", &p->sem_perm.key, &p->sem_perm.id, &p->sem_perm.mode, &p->sem_nsems, &p->sem_perm.uid, &p->sem_perm.gid, &p->sem_perm.cuid, &p->sem_perm.cgid, &p->sem_otime, &p->sem_ctime) != 10) continue; if (id > -1) { /* ID specified */ if (id == p->sem_perm.id) { get_sem_elements(p); i = 1; break; } continue; } p->next = xcalloc(1, sizeof(struct sem_data)); p = p->next; p->next = NULL; i++; } if (i == 0) free(*semds); fclose(f); return i; /* Fallback; /proc or /sys file(s) missing. */ sem_fallback: arg.array = (ushort *) (void *)&dummy; maxid = semctl(0, 0, SEM_INFO, arg); for (j = 0; j <= maxid; j++) { int semid; struct semid_ds semseg; struct ipc_perm *ipcp = &semseg.sem_perm; arg.buf = (struct semid_ds *)&semseg; semid = semctl(j, 0, SEM_STAT, arg); if (semid < 0 || (id > -1 && semid != id)) { continue; } i++; p->sem_perm.key = ipcp->KEY; p->sem_perm.id = semid; p->sem_perm.mode = ipcp->mode; p->sem_nsems = semseg.sem_nsems; p->sem_perm.uid = ipcp->uid; p->sem_perm.gid = ipcp->gid; p->sem_perm.cuid = ipcp->cuid; p->sem_perm.cgid = ipcp->cuid; p->sem_otime = semseg.sem_otime; p->sem_ctime = semseg.sem_ctime; if (id < 0) { p->next = xcalloc(1, sizeof(struct sem_data)); p = p->next; p->next = NULL; i++; } else { get_sem_elements(p); break; } } if (i == 0) free(*semds); return i; } void ipc_sem_free_info(struct sem_data *semds) { while (semds) { struct sem_data *next = semds->next; free(semds->elements); free(semds); semds = next; } } int ipc_msg_get_info(int id, struct msg_data **msgds) { FILE *f; int i = 0, maxid, j; struct msg_data *p; struct msqid_ds dummy; struct msqid_ds msgseg; p = *msgds = xcalloc(1, sizeof(struct msg_data)); p->next = NULL; f = fopen(_PATH_PROC_SYSV_MSG, "r"); if (!f) goto msg_fallback; while (fgetc(f) != '\n') ; /* skip header */ while (feof(f) == 0) { if (fscanf(f, "%d %d %o %" SCNu64 " %" SCNu64 " %u %u %u %u %u %u %" SCNi64 " %" SCNi64 " %" SCNi64 "\n", &p->msg_perm.key, &p->msg_perm.id, &p->msg_perm.mode, &p->q_cbytes, &p->q_qnum, &p->q_lspid, &p->q_lrpid, &p->msg_perm.uid, &p->msg_perm.gid, &p->msg_perm.cuid, &p->msg_perm.cgid, &p->q_stime, &p->q_rtime, &p->q_ctime) != 14) continue; if (id > -1) { /* ID specified */ if (id == p->msg_perm.id) { if (msgctl(id, IPC_STAT, &msgseg) != -1) p->q_qbytes = msgseg.msg_qbytes; i = 1; break; } continue; } p->next = xcalloc(1, sizeof(struct msg_data)); p = p->next; p->next = NULL; i++; } if (i == 0) free(*msgds); fclose(f); return i; /* Fallback; /proc or /sys file(s) missing. */ msg_fallback: maxid = msgctl(0, MSG_INFO, &dummy); for (j = 0; j <= maxid; j++) { int msgid; struct ipc_perm *ipcp = &msgseg.msg_perm; msgid = msgctl(j, MSG_STAT, &msgseg); if (msgid < 0 || (id > -1 && msgid != id)) { continue; } i++; p->msg_perm.key = ipcp->KEY; p->msg_perm.id = msgid; p->msg_perm.mode = ipcp->mode; p->q_cbytes = msgseg.msg_cbytes; p->q_qnum = msgseg.msg_qnum; p->q_lspid = msgseg.msg_lspid; p->q_lrpid = msgseg.msg_lrpid; p->msg_perm.uid = ipcp->uid; p->msg_perm.gid = ipcp->gid; p->msg_perm.cuid = ipcp->cuid; p->msg_perm.cgid = ipcp->cgid; p->q_stime = msgseg.msg_stime; p->q_rtime = msgseg.msg_rtime; p->q_ctime = msgseg.msg_ctime; p->q_qbytes = msgseg.msg_qbytes; if (id < 0) { p->next = xcalloc(1, sizeof(struct msg_data)); p = p->next; p->next = NULL; } else break; } if (i == 0) free(*msgds); return i; } void ipc_msg_free_info(struct msg_data *msgds) { while (msgds) { struct msg_data *next = msgds->next; free(msgds); msgds = next; } } void ipc_print_perms(FILE *f, struct ipc_stat *is) { struct passwd *pw; struct group *gr; fprintf(f, "%-10d %-10o", is->id, is->mode & 0777); if ((pw = getpwuid(is->cuid))) fprintf(f, " %-10s", pw->pw_name); else fprintf(f, " %-10u", is->cuid); if ((gr = getgrgid(is->cgid))) fprintf(f, " %-10s", gr->gr_name); else fprintf(f, " %-10u", is->cgid); if ((pw = getpwuid(is->uid))) fprintf(f, " %-10s", pw->pw_name); else fprintf(f, " %-10u", is->uid); if ((gr = getgrgid(is->gid))) fprintf(f, " %-10s\n", gr->gr_name); else fprintf(f, " %-10u\n", is->gid); } void ipc_print_size(int unit, char *msg, uint64_t size, const char *end, int width) { char format[32]; if (!msg) /* NULL */ ; else if (msg[strlen(msg) - 1] == '=') printf("%s", msg); else if (unit == IPC_UNIT_BYTES) printf(_("%s (bytes) = "), msg); else if (unit == IPC_UNIT_KB) printf(_("%s (kbytes) = "), msg); else printf("%s = ", msg); switch (unit) { case IPC_UNIT_DEFAULT: case IPC_UNIT_BYTES: sprintf(format, "%%%dju", width); printf(format, size); break; case IPC_UNIT_KB: sprintf(format, "%%%dju", width); printf(format, size / 1024); break; case IPC_UNIT_HUMAN: { char *tmp; sprintf(format, "%%%ds", width); printf(format, (tmp = size_to_human_string(SIZE_SUFFIX_1LETTER, size))); free(tmp); break; } default: /* impossible occurred */ abort(); } if (end) printf("%s", end); }
static void get_sem_elements(struct sem_data *p) { size_t i; if (!p || !p->sem_nsems || p->sem_perm.id < 0) return; p->elements = xcalloc(p->sem_nsems, sizeof(struct sem_elem)); for (i = 0; i < p->sem_nsems; i++) { struct sem_elem *e = &p->elements[i]; union semun arg = { .val = 0 }; e->semval = semctl(p->sem_perm.id, i, GETVAL, arg); if (e->semval < 0) err(EXIT_FAILURE, _("%s failed"), "semctl(GETVAL)"); e->ncount = semctl(p->sem_perm.id, i, GETNCNT, arg); if (e->ncount < 0) err(EXIT_FAILURE, _("%s failed"), "semctl(GETNCNT)"); e->zcount = semctl(p->sem_perm.id, i, GETZCNT, arg); if (e->zcount < 0) err(EXIT_FAILURE, _("%s failed"), "semctl(GETZCNT)"); e->pid = semctl(p->sem_perm.id, i, GETPID, arg); if (e->pid < 0) err(EXIT_FAILURE, _("%s failed"), "semctl(GETPID)"); } }
static void get_sem_elements(struct sem_data *p) { size_t i; if (!p || !p->sem_nsems || p->sem_nsems > SIZE_MAX || p->sem_perm.id < 0) return; p->elements = xcalloc(p->sem_nsems, sizeof(struct sem_elem)); for (i = 0; i < p->sem_nsems; i++) { struct sem_elem *e = &p->elements[i]; union semun arg = { .val = 0 }; e->semval = semctl(p->sem_perm.id, i, GETVAL, arg); if (e->semval < 0) err(EXIT_FAILURE, _("%s failed"), "semctl(GETVAL)"); e->ncount = semctl(p->sem_perm.id, i, GETNCNT, arg); if (e->ncount < 0) err(EXIT_FAILURE, _("%s failed"), "semctl(GETNCNT)"); e->zcount = semctl(p->sem_perm.id, i, GETZCNT, arg); if (e->zcount < 0) err(EXIT_FAILURE, _("%s failed"), "semctl(GETZCNT)"); e->pid = semctl(p->sem_perm.id, i, GETPID, arg); if (e->pid < 0) err(EXIT_FAILURE, _("%s failed"), "semctl(GETPID)"); } }
{'added': [(221, '\tif (!p || !p->sem_nsems || p->sem_nsems > SIZE_MAX || p->sem_perm.id < 0)')], 'deleted': [(221, '\tif (!p || !p->sem_nsems || p->sem_perm.id < 0)')]}
1
1
440
2,934
https://github.com/karelzak/util-linux
CVE-2021-37600
['CWE-190']
rtadv.c
rtadv_read
/* Router advertisement * Copyright (C) 2005 6WIND <jean-mickael.guerin@6wind.com> * Copyright (C) 1999 Kunihiro Ishiguro * * This file is part of GNU Zebra. * * GNU Zebra is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2, or (at your option) any * later version. * * GNU Zebra is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with GNU Zebra; see the file COPYING. If not, write to the Free * Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA * 02111-1307, USA. */ #include <zebra.h> #include "memory.h" #include "sockopt.h" #include "thread.h" #include "if.h" #include "log.h" #include "prefix.h" #include "linklist.h" #include "command.h" #include "privs.h" #include "vrf.h" #include "zebra/interface.h" #include "zebra/rtadv.h" #include "zebra/debug.h" #include "zebra/rib.h" #include "zebra/zserv.h" extern struct zebra_privs_t zserv_privs; #if defined (HAVE_IPV6) && defined (HAVE_RTADV) #ifdef OPEN_BSD #include <netinet/icmp6.h> #endif /* If RFC2133 definition is used. */ #ifndef IPV6_JOIN_GROUP #define IPV6_JOIN_GROUP IPV6_ADD_MEMBERSHIP #endif #ifndef IPV6_LEAVE_GROUP #define IPV6_LEAVE_GROUP IPV6_DROP_MEMBERSHIP #endif #define ALLNODE "ff02::1" #define ALLROUTER "ff02::2" extern struct zebra_t zebrad; enum rtadv_event {RTADV_START, RTADV_STOP, RTADV_TIMER, RTADV_TIMER_MSEC, RTADV_READ}; static void rtadv_event (struct zebra_vrf *, enum rtadv_event, int); static int if_join_all_router (int, struct interface *); static int if_leave_all_router (int, struct interface *); static int rtadv_recv_packet (int sock, u_char *buf, int buflen, struct sockaddr_in6 *from, ifindex_t *ifindex, int *hoplimit) { int ret; struct msghdr msg; struct iovec iov; struct cmsghdr *cmsgptr; struct in6_addr dst; char adata[1024]; /* Fill in message and iovec. */ msg.msg_name = (void *) from; msg.msg_namelen = sizeof (struct sockaddr_in6); msg.msg_iov = &iov; msg.msg_iovlen = 1; msg.msg_control = (void *) adata; msg.msg_controllen = sizeof adata; iov.iov_base = buf; iov.iov_len = buflen; /* If recvmsg fail return minus value. */ ret = recvmsg (sock, &msg, 0); if (ret < 0) return ret; for (cmsgptr = ZCMSG_FIRSTHDR(&msg); cmsgptr != NULL; cmsgptr = CMSG_NXTHDR(&msg, cmsgptr)) { /* I want interface index which this packet comes from. */ if (cmsgptr->cmsg_level == IPPROTO_IPV6 && cmsgptr->cmsg_type == IPV6_PKTINFO) { struct in6_pktinfo *ptr; ptr = (struct in6_pktinfo *) CMSG_DATA (cmsgptr); *ifindex = ptr->ipi6_ifindex; memcpy(&dst, &ptr->ipi6_addr, sizeof(ptr->ipi6_addr)); } /* Incoming packet's hop limit. */ if (cmsgptr->cmsg_level == IPPROTO_IPV6 && cmsgptr->cmsg_type == IPV6_HOPLIMIT) { int *hoptr = (int *) CMSG_DATA (cmsgptr); *hoplimit = *hoptr; } } return ret; } #define RTADV_MSG_SIZE 4096 /* Send router advertisement packet. */ static void rtadv_send_packet (int sock, struct interface *ifp) { struct msghdr msg; struct iovec iov; struct cmsghdr *cmsgptr; struct in6_pktinfo *pkt; struct sockaddr_in6 addr; #ifdef HAVE_STRUCT_SOCKADDR_DL struct sockaddr_dl *sdl; #endif /* HAVE_STRUCT_SOCKADDR_DL */ static void *adata = NULL; unsigned char buf[RTADV_MSG_SIZE]; struct nd_router_advert *rtadv; int ret; int len = 0; struct zebra_if *zif; struct rtadv_prefix *rprefix; u_char all_nodes_addr[] = {0xff,0x02,0,0,0,0,0,0,0,0,0,0,0,0,0,1}; struct listnode *node; u_int16_t pkt_RouterLifetime; /* * Allocate control message bufffer. This is dynamic because * CMSG_SPACE is not guaranteed not to call a function. Note that * the size will be different on different architectures due to * differing alignment rules. */ if (adata == NULL) { /* XXX Free on shutdown. */ adata = malloc(CMSG_SPACE(sizeof(struct in6_pktinfo))); if (adata == NULL) zlog_err("rtadv_send_packet: can't malloc control data\n"); } /* Logging of packet. */ if (IS_ZEBRA_DEBUG_PACKET) zlog_debug ("Router advertisement send to %s", ifp->name); /* Fill in sockaddr_in6. */ memset (&addr, 0, sizeof (struct sockaddr_in6)); addr.sin6_family = AF_INET6; #ifdef SIN6_LEN addr.sin6_len = sizeof (struct sockaddr_in6); #endif /* SIN6_LEN */ addr.sin6_port = htons (IPPROTO_ICMPV6); IPV6_ADDR_COPY (&addr.sin6_addr, all_nodes_addr); /* Fetch interface information. */ zif = ifp->info; /* Make router advertisement message. */ rtadv = (struct nd_router_advert *) buf; rtadv->nd_ra_type = ND_ROUTER_ADVERT; rtadv->nd_ra_code = 0; rtadv->nd_ra_cksum = 0; rtadv->nd_ra_curhoplimit = 64; /* RFC4191: Default Router Preference is 0 if Router Lifetime is 0. */ rtadv->nd_ra_flags_reserved = zif->rtadv.AdvDefaultLifetime == 0 ? 0 : zif->rtadv.DefaultPreference; rtadv->nd_ra_flags_reserved <<= 3; if (zif->rtadv.AdvManagedFlag) rtadv->nd_ra_flags_reserved |= ND_RA_FLAG_MANAGED; if (zif->rtadv.AdvOtherConfigFlag) rtadv->nd_ra_flags_reserved |= ND_RA_FLAG_OTHER; if (zif->rtadv.AdvHomeAgentFlag) rtadv->nd_ra_flags_reserved |= ND_RA_FLAG_HOME_AGENT; /* Note that according to Neighbor Discovery (RFC 4861 [18]), * AdvDefaultLifetime is by default based on the value of * MaxRtrAdvInterval. AdvDefaultLifetime is used in the Router Lifetime * field of Router Advertisements. Given that this field is expressed * in seconds, a small MaxRtrAdvInterval value can result in a zero * value for this field. To prevent this, routers SHOULD keep * AdvDefaultLifetime in at least one second, even if the use of * MaxRtrAdvInterval would result in a smaller value. -- RFC6275, 7.5 */ pkt_RouterLifetime = zif->rtadv.AdvDefaultLifetime != -1 ? zif->rtadv.AdvDefaultLifetime : MAX (1, 0.003 * zif->rtadv.MaxRtrAdvInterval); rtadv->nd_ra_router_lifetime = htons (pkt_RouterLifetime); rtadv->nd_ra_reachable = htonl (zif->rtadv.AdvReachableTime); rtadv->nd_ra_retransmit = htonl (0); len = sizeof (struct nd_router_advert); /* If both the Home Agent Preference and Home Agent Lifetime are set to * their default values specified above, this option SHOULD NOT be * included in the Router Advertisement messages sent by this home * agent. -- RFC6275, 7.4 */ if ( zif->rtadv.AdvHomeAgentFlag && (zif->rtadv.HomeAgentPreference || zif->rtadv.HomeAgentLifetime != -1) ) { struct nd_opt_homeagent_info *ndopt_hai = (struct nd_opt_homeagent_info *)(buf + len); ndopt_hai->nd_opt_hai_type = ND_OPT_HA_INFORMATION; ndopt_hai->nd_opt_hai_len = 1; ndopt_hai->nd_opt_hai_reserved = 0; ndopt_hai->nd_opt_hai_preference = htons(zif->rtadv.HomeAgentPreference); /* 16-bit unsigned integer. The lifetime associated with the home * agent in units of seconds. The default value is the same as the * Router Lifetime, as specified in the main body of the Router * Advertisement. The maximum value corresponds to 18.2 hours. A * value of 0 MUST NOT be used. -- RFC6275, 7.5 */ ndopt_hai->nd_opt_hai_lifetime = htons ( zif->rtadv.HomeAgentLifetime != -1 ? zif->rtadv.HomeAgentLifetime : MAX (1, pkt_RouterLifetime) /* 0 is OK for RL, but not for HAL*/ ); len += sizeof(struct nd_opt_homeagent_info); } if (zif->rtadv.AdvIntervalOption) { struct nd_opt_adv_interval *ndopt_adv = (struct nd_opt_adv_interval *)(buf + len); ndopt_adv->nd_opt_ai_type = ND_OPT_ADV_INTERVAL; ndopt_adv->nd_opt_ai_len = 1; ndopt_adv->nd_opt_ai_reserved = 0; ndopt_adv->nd_opt_ai_interval = htonl(zif->rtadv.MaxRtrAdvInterval); len += sizeof(struct nd_opt_adv_interval); } /* Fill in prefix. */ for (ALL_LIST_ELEMENTS_RO (zif->rtadv.AdvPrefixList, node, rprefix)) { struct nd_opt_prefix_info *pinfo; pinfo = (struct nd_opt_prefix_info *) (buf + len); pinfo->nd_opt_pi_type = ND_OPT_PREFIX_INFORMATION; pinfo->nd_opt_pi_len = 4; pinfo->nd_opt_pi_prefix_len = rprefix->prefix.prefixlen; pinfo->nd_opt_pi_flags_reserved = 0; if (rprefix->AdvOnLinkFlag) pinfo->nd_opt_pi_flags_reserved |= ND_OPT_PI_FLAG_ONLINK; if (rprefix->AdvAutonomousFlag) pinfo->nd_opt_pi_flags_reserved |= ND_OPT_PI_FLAG_AUTO; if (rprefix->AdvRouterAddressFlag) pinfo->nd_opt_pi_flags_reserved |= ND_OPT_PI_FLAG_RADDR; pinfo->nd_opt_pi_valid_time = htonl (rprefix->AdvValidLifetime); pinfo->nd_opt_pi_preferred_time = htonl (rprefix->AdvPreferredLifetime); pinfo->nd_opt_pi_reserved2 = 0; IPV6_ADDR_COPY (&pinfo->nd_opt_pi_prefix, &rprefix->prefix.prefix); #ifdef DEBUG { u_char buf[INET6_ADDRSTRLEN]; zlog_debug ("DEBUG %s", inet_ntop (AF_INET6, &pinfo->nd_opt_pi_prefix, buf, INET6_ADDRSTRLEN)); } #endif /* DEBUG */ len += sizeof (struct nd_opt_prefix_info); } /* Hardware address. */ if (ifp->hw_addr_len != 0) { buf[len++] = ND_OPT_SOURCE_LINKADDR; /* Option length should be rounded up to next octet if the link address does not end on an octet boundary. */ buf[len++] = (ifp->hw_addr_len + 9) >> 3; memcpy (buf + len, ifp->hw_addr, ifp->hw_addr_len); len += ifp->hw_addr_len; /* Pad option to end on an octet boundary. */ memset (buf + len, 0, -(ifp->hw_addr_len + 2) & 0x7); len += -(ifp->hw_addr_len + 2) & 0x7; } /* MTU */ if (zif->rtadv.AdvLinkMTU) { struct nd_opt_mtu * opt = (struct nd_opt_mtu *) (buf + len); opt->nd_opt_mtu_type = ND_OPT_MTU; opt->nd_opt_mtu_len = 1; opt->nd_opt_mtu_reserved = 0; opt->nd_opt_mtu_mtu = htonl (zif->rtadv.AdvLinkMTU); len += sizeof (struct nd_opt_mtu); } msg.msg_name = (void *) &addr; msg.msg_namelen = sizeof (struct sockaddr_in6); msg.msg_iov = &iov; msg.msg_iovlen = 1; msg.msg_control = (void *) adata; msg.msg_controllen = CMSG_SPACE(sizeof(struct in6_pktinfo)); msg.msg_flags = 0; iov.iov_base = buf; iov.iov_len = len; cmsgptr = ZCMSG_FIRSTHDR(&msg); cmsgptr->cmsg_len = CMSG_LEN(sizeof(struct in6_pktinfo)); cmsgptr->cmsg_level = IPPROTO_IPV6; cmsgptr->cmsg_type = IPV6_PKTINFO; pkt = (struct in6_pktinfo *) CMSG_DATA (cmsgptr); memset (&pkt->ipi6_addr, 0, sizeof (struct in6_addr)); pkt->ipi6_ifindex = ifp->ifindex; ret = sendmsg (sock, &msg, 0); if (ret < 0) { zlog_err ("rtadv_send_packet: sendmsg %d (%s)\n", errno, safe_strerror(errno)); } } static int rtadv_timer (struct thread *thread) { struct zebra_vrf *zvrf = THREAD_ARG (thread); struct listnode *node, *nnode; struct interface *ifp; struct zebra_if *zif; int period; zvrf->rtadv.ra_timer = NULL; if (zvrf->rtadv.adv_msec_if_count == 0) { period = 1000; /* 1 s */ rtadv_event (zvrf, RTADV_TIMER, 1 /* 1 s */); } else { period = 10; /* 10 ms */ rtadv_event (zvrf, RTADV_TIMER_MSEC, 10 /* 10 ms */); } for (ALL_LIST_ELEMENTS (vrf_iflist (zvrf->vrf_id), node, nnode, ifp)) { if (if_is_loopback (ifp) || ! if_is_operative (ifp)) continue; zif = ifp->info; if (zif->rtadv.AdvSendAdvertisements) { zif->rtadv.AdvIntervalTimer -= period; if (zif->rtadv.AdvIntervalTimer <= 0) { /* FIXME: using MaxRtrAdvInterval each time isn't what section 6.2.4 of RFC4861 tells to do. */ zif->rtadv.AdvIntervalTimer = zif->rtadv.MaxRtrAdvInterval; rtadv_send_packet (zvrf->rtadv.sock, ifp); } } } return 0; } static void rtadv_process_solicit (struct interface *ifp) { struct zebra_vrf *zvrf = vrf_info_lookup (ifp->vrf_id); zlog_info ("Router solicitation received on %s vrf %u", ifp->name, zvrf->vrf_id); rtadv_send_packet (zvrf->rtadv.sock, ifp); } static void rtadv_process_advert (void) { zlog_info ("Router advertisement received"); } static void rtadv_process_packet (u_char *buf, unsigned int len, ifindex_t ifindex, int hoplimit, vrf_id_t vrf_id) { struct icmp6_hdr *icmph; struct interface *ifp; struct zebra_if *zif; /* Interface search. */ ifp = if_lookup_by_index_vrf (ifindex, vrf_id); if (ifp == NULL) { zlog_warn ("Unknown interface index: %d, vrf %u", ifindex, vrf_id); return; } if (if_is_loopback (ifp)) return; /* Check interface configuration. */ zif = ifp->info; if (! zif->rtadv.AdvSendAdvertisements) return; /* ICMP message length check. */ if (len < sizeof (struct icmp6_hdr)) { zlog_warn ("Invalid ICMPV6 packet length: %d", len); return; } icmph = (struct icmp6_hdr *) buf; /* ICMP message type check. */ if (icmph->icmp6_type != ND_ROUTER_SOLICIT && icmph->icmp6_type != ND_ROUTER_ADVERT) { zlog_warn ("Unwanted ICMPV6 message type: %d", icmph->icmp6_type); return; } /* Hoplimit check. */ if (hoplimit >= 0 && hoplimit != 255) { zlog_warn ("Invalid hoplimit %d for router advertisement ICMP packet", hoplimit); return; } /* Check ICMP message type. */ if (icmph->icmp6_type == ND_ROUTER_SOLICIT) rtadv_process_solicit (ifp); else if (icmph->icmp6_type == ND_ROUTER_ADVERT) rtadv_process_advert (); return; } static int rtadv_read (struct thread *thread) { int sock; int len; u_char buf[RTADV_MSG_SIZE]; struct sockaddr_in6 from; ifindex_t ifindex = 0; int hoplimit = -1; struct zebra_vrf *zvrf = THREAD_ARG (thread); sock = THREAD_FD (thread); zvrf->rtadv.ra_read = NULL; /* Register myself. */ rtadv_event (zvrf, RTADV_READ, sock); len = rtadv_recv_packet (sock, buf, BUFSIZ, &from, &ifindex, &hoplimit); if (len < 0) { zlog_warn ("router solicitation recv failed: %s.", safe_strerror (errno)); return len; } rtadv_process_packet (buf, (unsigned)len, ifindex, hoplimit, zvrf->vrf_id); return 0; } static int rtadv_make_socket (vrf_id_t vrf_id) { int sock; int ret; struct icmp6_filter filter; if ( zserv_privs.change (ZPRIVS_RAISE) ) zlog_err ("rtadv_make_socket: could not raise privs, %s", safe_strerror (errno) ); sock = vrf_socket (AF_INET6, SOCK_RAW, IPPROTO_ICMPV6, vrf_id); if ( zserv_privs.change (ZPRIVS_LOWER) ) zlog_err ("rtadv_make_socket: could not lower privs, %s", safe_strerror (errno) ); /* When we can't make ICMPV6 socket simply back. Router advertisement feature will not be supported. */ if (sock < 0) { close (sock); return -1; } ret = setsockopt_ipv6_pktinfo (sock, 1); if (ret < 0) { close (sock); return ret; } ret = setsockopt_ipv6_multicast_loop (sock, 0); if (ret < 0) { close (sock); return ret; } ret = setsockopt_ipv6_unicast_hops (sock, 255); if (ret < 0) { close (sock); return ret; } ret = setsockopt_ipv6_multicast_hops (sock, 255); if (ret < 0) { close (sock); return ret; } ret = setsockopt_ipv6_hoplimit (sock, 1); if (ret < 0) { close (sock); return ret; } ICMP6_FILTER_SETBLOCKALL(&filter); ICMP6_FILTER_SETPASS (ND_ROUTER_SOLICIT, &filter); ICMP6_FILTER_SETPASS (ND_ROUTER_ADVERT, &filter); ret = setsockopt (sock, IPPROTO_ICMPV6, ICMP6_FILTER, &filter, sizeof (struct icmp6_filter)); if (ret < 0) { zlog_info ("ICMP6_FILTER set fail: %s", safe_strerror (errno)); return ret; } return sock; } static struct rtadv_prefix * rtadv_prefix_new (void) { return XCALLOC (MTYPE_RTADV_PREFIX, sizeof (struct rtadv_prefix)); } static void rtadv_prefix_free (struct rtadv_prefix *rtadv_prefix) { XFREE (MTYPE_RTADV_PREFIX, rtadv_prefix); } static struct rtadv_prefix * rtadv_prefix_lookup (struct list *rplist, struct prefix_ipv6 *p) { struct listnode *node; struct rtadv_prefix *rprefix; for (ALL_LIST_ELEMENTS_RO (rplist, node, rprefix)) if (prefix_same ((struct prefix *) &rprefix->prefix, (struct prefix *) p)) return rprefix; return NULL; } static struct rtadv_prefix * rtadv_prefix_get (struct list *rplist, struct prefix_ipv6 *p) { struct rtadv_prefix *rprefix; rprefix = rtadv_prefix_lookup (rplist, p); if (rprefix) return rprefix; rprefix = rtadv_prefix_new (); memcpy (&rprefix->prefix, p, sizeof (struct prefix_ipv6)); listnode_add (rplist, rprefix); return rprefix; } static void rtadv_prefix_set (struct zebra_if *zif, struct rtadv_prefix *rp) { struct rtadv_prefix *rprefix; rprefix = rtadv_prefix_get (zif->rtadv.AdvPrefixList, &rp->prefix); /* Set parameters. */ rprefix->AdvValidLifetime = rp->AdvValidLifetime; rprefix->AdvPreferredLifetime = rp->AdvPreferredLifetime; rprefix->AdvOnLinkFlag = rp->AdvOnLinkFlag; rprefix->AdvAutonomousFlag = rp->AdvAutonomousFlag; rprefix->AdvRouterAddressFlag = rp->AdvRouterAddressFlag; } static int rtadv_prefix_reset (struct zebra_if *zif, struct rtadv_prefix *rp) { struct rtadv_prefix *rprefix; rprefix = rtadv_prefix_lookup (zif->rtadv.AdvPrefixList, &rp->prefix); if (rprefix != NULL) { listnode_delete (zif->rtadv.AdvPrefixList, (void *) rprefix); rtadv_prefix_free (rprefix); return 1; } else return 0; } DEFUN (ipv6_nd_suppress_ra, ipv6_nd_suppress_ra_cmd, "ipv6 nd suppress-ra", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Suppress Router Advertisement\n") { struct interface *ifp; struct zebra_if *zif; struct zebra_vrf *zvrf; ifp = vty->index; zif = ifp->info; zvrf = vrf_info_lookup (ifp->vrf_id); if (if_is_loopback (ifp)) { vty_out (vty, "Invalid interface%s", VTY_NEWLINE); return CMD_WARNING; } if (zif->rtadv.AdvSendAdvertisements) { zif->rtadv.AdvSendAdvertisements = 0; zif->rtadv.AdvIntervalTimer = 0; zvrf->rtadv.adv_if_count--; if_leave_all_router (zvrf->rtadv.sock, ifp); if (zvrf->rtadv.adv_if_count == 0) rtadv_event (zvrf, RTADV_STOP, 0); } return CMD_SUCCESS; } DEFUN (no_ipv6_nd_suppress_ra, no_ipv6_nd_suppress_ra_cmd, "no ipv6 nd suppress-ra", NO_STR "Interface IPv6 config commands\n" "Neighbor discovery\n" "Suppress Router Advertisement\n") { struct interface *ifp; struct zebra_if *zif; struct zebra_vrf *zvrf; ifp = vty->index; zif = ifp->info; zvrf = vrf_info_lookup (ifp->vrf_id); if (if_is_loopback (ifp)) { vty_out (vty, "Invalid interface%s", VTY_NEWLINE); return CMD_WARNING; } if (! zif->rtadv.AdvSendAdvertisements) { zif->rtadv.AdvSendAdvertisements = 1; zif->rtadv.AdvIntervalTimer = 0; zvrf->rtadv.adv_if_count++; if_join_all_router (zvrf->rtadv.sock, ifp); if (zvrf->rtadv.adv_if_count == 1) rtadv_event (zvrf, RTADV_START, zvrf->rtadv.sock); } return CMD_SUCCESS; } DEFUN (ipv6_nd_ra_interval_msec, ipv6_nd_ra_interval_msec_cmd, "ipv6 nd ra-interval msec <70-1800000>", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Router Advertisement interval\n" "Router Advertisement interval in milliseconds\n") { unsigned interval; struct interface *ifp = (struct interface *) vty->index; struct zebra_if *zif = ifp->info; struct zebra_vrf *zvrf = vrf_info_lookup (ifp->vrf_id); VTY_GET_INTEGER_RANGE ("router advertisement interval", interval, argv[0], 70, 1800000); if ((zif->rtadv.AdvDefaultLifetime != -1 && interval > (unsigned)zif->rtadv.AdvDefaultLifetime * 1000)) { vty_out (vty, "This ra-interval would conflict with configured ra-lifetime!%s", VTY_NEWLINE); return CMD_WARNING; } if (zif->rtadv.MaxRtrAdvInterval % 1000) zvrf->rtadv.adv_msec_if_count--; if (interval % 1000) zvrf->rtadv.adv_msec_if_count++; zif->rtadv.MaxRtrAdvInterval = interval; zif->rtadv.MinRtrAdvInterval = 0.33 * interval; zif->rtadv.AdvIntervalTimer = 0; return CMD_SUCCESS; } DEFUN (ipv6_nd_ra_interval, ipv6_nd_ra_interval_cmd, "ipv6 nd ra-interval <1-1800>", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Router Advertisement interval\n" "Router Advertisement interval in seconds\n") { unsigned interval; struct interface *ifp = (struct interface *) vty->index; struct zebra_if *zif = ifp->info; struct zebra_vrf *zvrf = vrf_info_lookup (ifp->vrf_id); VTY_GET_INTEGER_RANGE ("router advertisement interval", interval, argv[0], 1, 1800); if ((zif->rtadv.AdvDefaultLifetime != -1 && interval > (unsigned)zif->rtadv.AdvDefaultLifetime)) { vty_out (vty, "This ra-interval would conflict with configured ra-lifetime!%s", VTY_NEWLINE); return CMD_WARNING; } if (zif->rtadv.MaxRtrAdvInterval % 1000) zvrf->rtadv.adv_msec_if_count--; /* convert to milliseconds */ interval = interval * 1000; zif->rtadv.MaxRtrAdvInterval = interval; zif->rtadv.MinRtrAdvInterval = 0.33 * interval; zif->rtadv.AdvIntervalTimer = 0; return CMD_SUCCESS; } DEFUN (no_ipv6_nd_ra_interval, no_ipv6_nd_ra_interval_cmd, "no ipv6 nd ra-interval", NO_STR "Interface IPv6 config commands\n" "Neighbor discovery\n" "Router Advertisement interval\n") { struct interface *ifp; struct zebra_if *zif; struct zebra_vrf *zvrf; ifp = (struct interface *) vty->index; zif = ifp->info; zvrf = vrf_info_lookup (ifp->vrf_id); if (zif->rtadv.MaxRtrAdvInterval % 1000) zvrf->rtadv.adv_msec_if_count--; zif->rtadv.MaxRtrAdvInterval = RTADV_MAX_RTR_ADV_INTERVAL; zif->rtadv.MinRtrAdvInterval = RTADV_MIN_RTR_ADV_INTERVAL; zif->rtadv.AdvIntervalTimer = zif->rtadv.MaxRtrAdvInterval; return CMD_SUCCESS; } ALIAS (no_ipv6_nd_ra_interval, no_ipv6_nd_ra_interval_val_cmd, "no ipv6 nd ra-interval <1-1800>", NO_STR "Interface IPv6 config commands\n" "Neighbor discovery\n" "Router Advertisement interval\n") ALIAS (no_ipv6_nd_ra_interval, no_ipv6_nd_ra_interval_msec_val_cmd, "no ipv6 nd ra-interval msec <1-1800000>", NO_STR "Interface IPv6 config commands\n" "Neighbor discovery\n" "Router Advertisement interval\n" "Router Advertisement interval in milliseconds\n") DEFUN (ipv6_nd_ra_lifetime, ipv6_nd_ra_lifetime_cmd, "ipv6 nd ra-lifetime <0-9000>", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Router lifetime\n" "Router lifetime in seconds (0 stands for a non-default gw)\n") { int lifetime; struct interface *ifp; struct zebra_if *zif; ifp = (struct interface *) vty->index; zif = ifp->info; VTY_GET_INTEGER_RANGE ("router lifetime", lifetime, argv[0], 0, 9000); /* The value to be placed in the Router Lifetime field * of Router Advertisements sent from the interface, * in seconds. MUST be either zero or between * MaxRtrAdvInterval and 9000 seconds. -- RFC4861, 6.2.1 */ if ((lifetime != 0 && lifetime * 1000 < zif->rtadv.MaxRtrAdvInterval)) { vty_out (vty, "This ra-lifetime would conflict with configured ra-interval%s", VTY_NEWLINE); return CMD_WARNING; } zif->rtadv.AdvDefaultLifetime = lifetime; return CMD_SUCCESS; } DEFUN (no_ipv6_nd_ra_lifetime, no_ipv6_nd_ra_lifetime_cmd, "no ipv6 nd ra-lifetime", NO_STR "Interface IPv6 config commands\n" "Neighbor discovery\n" "Router lifetime\n") { struct interface *ifp; struct zebra_if *zif; ifp = (struct interface *) vty->index; zif = ifp->info; zif->rtadv.AdvDefaultLifetime = -1; return CMD_SUCCESS; } ALIAS (no_ipv6_nd_ra_lifetime, no_ipv6_nd_ra_lifetime_val_cmd, "no ipv6 nd ra-lifetime <0-9000>", NO_STR "Interface IPv6 config commands\n" "Neighbor discovery\n" "Router lifetime\n" "Router lifetime in seconds (0 stands for a non-default gw)\n") DEFUN (ipv6_nd_reachable_time, ipv6_nd_reachable_time_cmd, "ipv6 nd reachable-time <1-3600000>", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Reachable time\n" "Reachable time in milliseconds\n") { struct interface *ifp = (struct interface *) vty->index; struct zebra_if *zif = ifp->info; VTY_GET_INTEGER_RANGE ("reachable time", zif->rtadv.AdvReachableTime, argv[0], 1, RTADV_MAX_REACHABLE_TIME); return CMD_SUCCESS; } DEFUN (no_ipv6_nd_reachable_time, no_ipv6_nd_reachable_time_cmd, "no ipv6 nd reachable-time", NO_STR "Interface IPv6 config commands\n" "Neighbor discovery\n" "Reachable time\n") { struct interface *ifp; struct zebra_if *zif; ifp = (struct interface *) vty->index; zif = ifp->info; zif->rtadv.AdvReachableTime = 0; return CMD_SUCCESS; } ALIAS (no_ipv6_nd_reachable_time, no_ipv6_nd_reachable_time_val_cmd, "no ipv6 nd reachable-time <1-3600000>", NO_STR "Interface IPv6 config commands\n" "Neighbor discovery\n" "Reachable time\n" "Reachable time in milliseconds\n") DEFUN (ipv6_nd_homeagent_preference, ipv6_nd_homeagent_preference_cmd, "ipv6 nd home-agent-preference <0-65535>", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Home Agent preference\n" "preference value (default is 0, least preferred)\n") { struct interface *ifp = (struct interface *) vty->index; struct zebra_if *zif = ifp->info; VTY_GET_INTEGER_RANGE ("home agent preference", zif->rtadv.HomeAgentPreference, argv[0], 0, 65535); return CMD_SUCCESS; } DEFUN (no_ipv6_nd_homeagent_preference, no_ipv6_nd_homeagent_preference_cmd, "no ipv6 nd home-agent-preference", NO_STR "Interface IPv6 config commands\n" "Neighbor discovery\n" "Home Agent preference\n") { struct interface *ifp; struct zebra_if *zif; ifp = (struct interface *) vty->index; zif = ifp->info; zif->rtadv.HomeAgentPreference = 0; return CMD_SUCCESS; } ALIAS (no_ipv6_nd_homeagent_preference, no_ipv6_nd_homeagent_preference_val_cmd, "no ipv6 nd home-agent-preference <0-65535>", NO_STR "Interface IPv6 config commands\n" "Neighbor discovery\n" "Home Agent preference\n" "preference value (default is 0, least preferred)\n") DEFUN (ipv6_nd_homeagent_lifetime, ipv6_nd_homeagent_lifetime_cmd, "ipv6 nd home-agent-lifetime <0-65520>", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Home Agent lifetime\n" "Home Agent lifetime in seconds (0 to track ra-lifetime)\n") { struct interface *ifp = (struct interface *) vty->index; struct zebra_if *zif = ifp->info; VTY_GET_INTEGER_RANGE ("home agent lifetime", zif->rtadv.HomeAgentLifetime, argv[0], 0, RTADV_MAX_HALIFETIME); return CMD_SUCCESS; } DEFUN (no_ipv6_nd_homeagent_lifetime, no_ipv6_nd_homeagent_lifetime_cmd, "no ipv6 nd home-agent-lifetime", NO_STR "Interface IPv6 config commands\n" "Neighbor discovery\n" "Home Agent lifetime\n") { struct interface *ifp; struct zebra_if *zif; ifp = (struct interface *) vty->index; zif = ifp->info; zif->rtadv.HomeAgentLifetime = -1; return CMD_SUCCESS; } ALIAS (no_ipv6_nd_homeagent_lifetime, no_ipv6_nd_homeagent_lifetime_val_cmd, "no ipv6 nd home-agent-lifetime <0-65520>", NO_STR "Interface IPv6 config commands\n" "Neighbor discovery\n" "Home Agent lifetime\n" "Home Agent lifetime in seconds (0 to track ra-lifetime)\n") DEFUN (ipv6_nd_managed_config_flag, ipv6_nd_managed_config_flag_cmd, "ipv6 nd managed-config-flag", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Managed address configuration flag\n") { struct interface *ifp; struct zebra_if *zif; ifp = (struct interface *) vty->index; zif = ifp->info; zif->rtadv.AdvManagedFlag = 1; return CMD_SUCCESS; } DEFUN (no_ipv6_nd_managed_config_flag, no_ipv6_nd_managed_config_flag_cmd, "no ipv6 nd managed-config-flag", NO_STR "Interface IPv6 config commands\n" "Neighbor discovery\n" "Managed address configuration flag\n") { struct interface *ifp; struct zebra_if *zif; ifp = (struct interface *) vty->index; zif = ifp->info; zif->rtadv.AdvManagedFlag = 0; return CMD_SUCCESS; } DEFUN (ipv6_nd_homeagent_config_flag, ipv6_nd_homeagent_config_flag_cmd, "ipv6 nd home-agent-config-flag", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Home Agent configuration flag\n") { struct interface *ifp; struct zebra_if *zif; ifp = (struct interface *) vty->index; zif = ifp->info; zif->rtadv.AdvHomeAgentFlag = 1; return CMD_SUCCESS; } DEFUN (no_ipv6_nd_homeagent_config_flag, no_ipv6_nd_homeagent_config_flag_cmd, "no ipv6 nd home-agent-config-flag", NO_STR "Interface IPv6 config commands\n" "Neighbor discovery\n" "Home Agent configuration flag\n") { struct interface *ifp; struct zebra_if *zif; ifp = (struct interface *) vty->index; zif = ifp->info; zif->rtadv.AdvHomeAgentFlag = 0; return CMD_SUCCESS; } DEFUN (ipv6_nd_adv_interval_config_option, ipv6_nd_adv_interval_config_option_cmd, "ipv6 nd adv-interval-option", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Advertisement Interval Option\n") { struct interface *ifp; struct zebra_if *zif; ifp = (struct interface *) vty->index; zif = ifp->info; zif->rtadv.AdvIntervalOption = 1; return CMD_SUCCESS; } DEFUN (no_ipv6_nd_adv_interval_config_option, no_ipv6_nd_adv_interval_config_option_cmd, "no ipv6 nd adv-interval-option", NO_STR "Interface IPv6 config commands\n" "Neighbor discovery\n" "Advertisement Interval Option\n") { struct interface *ifp; struct zebra_if *zif; ifp = (struct interface *) vty->index; zif = ifp->info; zif->rtadv.AdvIntervalOption = 0; return CMD_SUCCESS; } DEFUN (ipv6_nd_other_config_flag, ipv6_nd_other_config_flag_cmd, "ipv6 nd other-config-flag", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Other statefull configuration flag\n") { struct interface *ifp; struct zebra_if *zif; ifp = (struct interface *) vty->index; zif = ifp->info; zif->rtadv.AdvOtherConfigFlag = 1; return CMD_SUCCESS; } DEFUN (no_ipv6_nd_other_config_flag, no_ipv6_nd_other_config_flag_cmd, "no ipv6 nd other-config-flag", NO_STR "Interface IPv6 config commands\n" "Neighbor discovery\n" "Other statefull configuration flag\n") { struct interface *ifp; struct zebra_if *zif; ifp = (struct interface *) vty->index; zif = ifp->info; zif->rtadv.AdvOtherConfigFlag = 0; return CMD_SUCCESS; } DEFUN (ipv6_nd_prefix, ipv6_nd_prefix_cmd, "ipv6 nd prefix X:X::X:X/M (<0-4294967295>|infinite) " "(<0-4294967295>|infinite) (off-link|) (no-autoconfig|) (router-address|)", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Prefix information\n" "IPv6 prefix\n" "Valid lifetime in seconds\n" "Infinite valid lifetime\n" "Preferred lifetime in seconds\n" "Infinite preferred lifetime\n" "Do not use prefix for onlink determination\n" "Do not use prefix for autoconfiguration\n" "Set Router Address flag\n") { int i; int ret; int cursor = 1; struct interface *ifp; struct zebra_if *zebra_if; struct rtadv_prefix rp; ifp = (struct interface *) vty->index; zebra_if = ifp->info; ret = str2prefix_ipv6 (argv[0], &rp.prefix); if (!ret) { vty_out (vty, "Malformed IPv6 prefix%s", VTY_NEWLINE); return CMD_WARNING; } apply_mask_ipv6 (&rp.prefix); /* RFC4861 4.6.2 */ rp.AdvOnLinkFlag = 1; rp.AdvAutonomousFlag = 1; rp.AdvRouterAddressFlag = 0; rp.AdvValidLifetime = RTADV_VALID_LIFETIME; rp.AdvPreferredLifetime = RTADV_PREFERRED_LIFETIME; if (argc > 1) { if ((isdigit((unsigned char)argv[1][0])) || strncmp (argv[1], "i", 1) == 0) { if ( strncmp (argv[1], "i", 1) == 0) rp.AdvValidLifetime = UINT32_MAX; else rp.AdvValidLifetime = (u_int32_t) strtoll (argv[1], (char **)NULL, 10); if ( strncmp (argv[2], "i", 1) == 0) rp.AdvPreferredLifetime = UINT32_MAX; else rp.AdvPreferredLifetime = (u_int32_t) strtoll (argv[2], (char **)NULL, 10); if (rp.AdvPreferredLifetime > rp.AdvValidLifetime) { vty_out (vty, "Invalid preferred lifetime%s", VTY_NEWLINE); return CMD_WARNING; } cursor = cursor + 2; } if (argc > cursor) { for (i = cursor; i < argc; i++) { if (strncmp (argv[i], "of", 2) == 0) rp.AdvOnLinkFlag = 0; if (strncmp (argv[i], "no", 2) == 0) rp.AdvAutonomousFlag = 0; if (strncmp (argv[i], "ro", 2) == 0) rp.AdvRouterAddressFlag = 1; } } } rtadv_prefix_set (zebra_if, &rp); return CMD_SUCCESS; } ALIAS (ipv6_nd_prefix, ipv6_nd_prefix_val_nortaddr_cmd, "ipv6 nd prefix X:X::X:X/M (<0-4294967295>|infinite) " "(<0-4294967295>|infinite) (off-link|) (no-autoconfig|)", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Prefix information\n" "IPv6 prefix\n" "Valid lifetime in seconds\n" "Infinite valid lifetime\n" "Preferred lifetime in seconds\n" "Infinite preferred lifetime\n" "Do not use prefix for onlink determination\n" "Do not use prefix for autoconfiguration\n") ALIAS (ipv6_nd_prefix, ipv6_nd_prefix_val_rev_cmd, "ipv6 nd prefix X:X::X:X/M (<0-4294967295>|infinite) " "(<0-4294967295>|infinite) (no-autoconfig|) (off-link|)", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Prefix information\n" "IPv6 prefix\n" "Valid lifetime in seconds\n" "Infinite valid lifetime\n" "Preferred lifetime in seconds\n" "Infinite preferred lifetime\n" "Do not use prefix for autoconfiguration\n" "Do not use prefix for onlink determination\n") ALIAS (ipv6_nd_prefix, ipv6_nd_prefix_val_rev_rtaddr_cmd, "ipv6 nd prefix X:X::X:X/M (<0-4294967295>|infinite) " "(<0-4294967295>|infinite) (no-autoconfig|) (off-link|) (router-address|)", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Prefix information\n" "IPv6 prefix\n" "Valid lifetime in seconds\n" "Infinite valid lifetime\n" "Preferred lifetime in seconds\n" "Infinite preferred lifetime\n" "Do not use prefix for autoconfiguration\n" "Do not use prefix for onlink determination\n" "Set Router Address flag\n") ALIAS (ipv6_nd_prefix, ipv6_nd_prefix_val_noauto_cmd, "ipv6 nd prefix X:X::X:X/M (<0-4294967295>|infinite) " "(<0-4294967295>|infinite) (no-autoconfig|)", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Prefix information\n" "IPv6 prefix\n" "Valid lifetime in seconds\n" "Infinite valid lifetime\n" "Preferred lifetime in seconds\n" "Infinite preferred lifetime\n" "Do not use prefix for autoconfiguration") ALIAS (ipv6_nd_prefix, ipv6_nd_prefix_val_offlink_cmd, "ipv6 nd prefix X:X::X:X/M (<0-4294967295>|infinite) " "(<0-4294967295>|infinite) (off-link|)", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Prefix information\n" "IPv6 prefix\n" "Valid lifetime in seconds\n" "Infinite valid lifetime\n" "Preferred lifetime in seconds\n" "Infinite preferred lifetime\n" "Do not use prefix for onlink determination\n") ALIAS (ipv6_nd_prefix, ipv6_nd_prefix_val_rtaddr_cmd, "ipv6 nd prefix X:X::X:X/M (<0-4294967295>|infinite) " "(<0-4294967295>|infinite) (router-address|)", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Prefix information\n" "IPv6 prefix\n" "Valid lifetime in seconds\n" "Infinite valid lifetime\n" "Preferred lifetime in seconds\n" "Infinite preferred lifetime\n" "Set Router Address flag\n") ALIAS (ipv6_nd_prefix, ipv6_nd_prefix_val_cmd, "ipv6 nd prefix X:X::X:X/M (<0-4294967295>|infinite) " "(<0-4294967295>|infinite)", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Prefix information\n" "IPv6 prefix\n" "Valid lifetime in seconds\n" "Infinite valid lifetime\n" "Preferred lifetime in seconds\n" "Infinite preferred lifetime\n") ALIAS (ipv6_nd_prefix, ipv6_nd_prefix_noval_cmd, "ipv6 nd prefix X:X::X:X/M (no-autoconfig|) (off-link|)", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Prefix information\n" "IPv6 prefix\n" "Do not use prefix for autoconfiguration\n" "Do not use prefix for onlink determination\n") ALIAS (ipv6_nd_prefix, ipv6_nd_prefix_noval_rev_cmd, "ipv6 nd prefix X:X::X:X/M (off-link|) (no-autoconfig|)", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Prefix information\n" "IPv6 prefix\n" "Do not use prefix for onlink determination\n" "Do not use prefix for autoconfiguration\n") ALIAS (ipv6_nd_prefix, ipv6_nd_prefix_noval_noauto_cmd, "ipv6 nd prefix X:X::X:X/M (no-autoconfig|)", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Prefix information\n" "IPv6 prefix\n" "Do not use prefix for autoconfiguration\n") ALIAS (ipv6_nd_prefix, ipv6_nd_prefix_noval_offlink_cmd, "ipv6 nd prefix X:X::X:X/M (off-link|)", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Prefix information\n" "IPv6 prefix\n" "Do not use prefix for onlink determination\n") ALIAS (ipv6_nd_prefix, ipv6_nd_prefix_noval_rtaddr_cmd, "ipv6 nd prefix X:X::X:X/M (router-address|)", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Prefix information\n" "IPv6 prefix\n" "Set Router Address flag\n") ALIAS (ipv6_nd_prefix, ipv6_nd_prefix_prefix_cmd, "ipv6 nd prefix X:X::X:X/M", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Prefix information\n" "IPv6 prefix\n") DEFUN (no_ipv6_nd_prefix, no_ipv6_nd_prefix_cmd, "no ipv6 nd prefix IPV6PREFIX", NO_STR "Interface IPv6 config commands\n" "Neighbor discovery\n" "Prefix information\n" "IPv6 prefix\n") { int ret; struct interface *ifp; struct zebra_if *zebra_if; struct rtadv_prefix rp; ifp = (struct interface *) vty->index; zebra_if = ifp->info; ret = str2prefix_ipv6 (argv[0], &rp.prefix); if (!ret) { vty_out (vty, "Malformed IPv6 prefix%s", VTY_NEWLINE); return CMD_WARNING; } apply_mask_ipv6 (&rp.prefix); /* RFC4861 4.6.2 */ ret = rtadv_prefix_reset (zebra_if, &rp); if (!ret) { vty_out (vty, "Non-exist IPv6 prefix%s", VTY_NEWLINE); return CMD_WARNING; } return CMD_SUCCESS; } DEFUN (ipv6_nd_router_preference, ipv6_nd_router_preference_cmd, "ipv6 nd router-preference (high|medium|low)", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Default router preference\n" "High default router preference\n" "Low default router preference\n" "Medium default router preference (default)\n") { struct interface *ifp; struct zebra_if *zif; int i = 0; ifp = (struct interface *) vty->index; zif = ifp->info; while (0 != rtadv_pref_strs[i]) { if (strncmp (argv[0], rtadv_pref_strs[i], 1) == 0) { zif->rtadv.DefaultPreference = i; return CMD_SUCCESS; } i++; } return CMD_ERR_NO_MATCH; } DEFUN (no_ipv6_nd_router_preference, no_ipv6_nd_router_preference_cmd, "no ipv6 nd router-preference", NO_STR "Interface IPv6 config commands\n" "Neighbor discovery\n" "Default router preference\n") { struct interface *ifp; struct zebra_if *zif; ifp = (struct interface *) vty->index; zif = ifp->info; zif->rtadv.DefaultPreference = RTADV_PREF_MEDIUM; /* Default per RFC4191. */ return CMD_SUCCESS; } ALIAS (no_ipv6_nd_router_preference, no_ipv6_nd_router_preference_val_cmd, "no ipv6 nd router-preference (high|medium|low)", NO_STR "Interface IPv6 config commands\n" "Neighbor discovery\n" "Default router preference\n" "High default router preference\n" "Low default router preference\n" "Medium default router preference (default)\n") DEFUN (ipv6_nd_mtu, ipv6_nd_mtu_cmd, "ipv6 nd mtu <1-65535>", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Advertised MTU\n" "MTU in bytes\n") { struct interface *ifp = (struct interface *) vty->index; struct zebra_if *zif = ifp->info; VTY_GET_INTEGER_RANGE ("MTU", zif->rtadv.AdvLinkMTU, argv[0], 1, 65535); return CMD_SUCCESS; } DEFUN (no_ipv6_nd_mtu, no_ipv6_nd_mtu_cmd, "no ipv6 nd mtu", NO_STR "Interface IPv6 config commands\n" "Neighbor discovery\n" "Advertised MTU\n") { struct interface *ifp = (struct interface *) vty->index; struct zebra_if *zif = ifp->info; zif->rtadv.AdvLinkMTU = 0; return CMD_SUCCESS; } ALIAS (no_ipv6_nd_mtu, no_ipv6_nd_mtu_val_cmd, "no ipv6 nd mtu <1-65535>", NO_STR "Interface IPv6 config commands\n" "Neighbor discovery\n" "Advertised MTU\n" "MTU in bytes\n") /* Write configuration about router advertisement. */ void rtadv_config_write (struct vty *vty, struct interface *ifp) { struct zebra_if *zif; struct listnode *node; struct rtadv_prefix *rprefix; char buf[PREFIX_STRLEN]; int interval; zif = ifp->info; if (! if_is_loopback (ifp)) { if (zif->rtadv.AdvSendAdvertisements) vty_out (vty, " no ipv6 nd suppress-ra%s", VTY_NEWLINE); } interval = zif->rtadv.MaxRtrAdvInterval; if (interval % 1000) vty_out (vty, " ipv6 nd ra-interval msec %d%s", interval, VTY_NEWLINE); else if (interval != RTADV_MAX_RTR_ADV_INTERVAL) vty_out (vty, " ipv6 nd ra-interval %d%s", interval / 1000, VTY_NEWLINE); if (zif->rtadv.AdvIntervalOption) vty_out (vty, " ipv6 nd adv-interval-option%s", VTY_NEWLINE); if (zif->rtadv.AdvDefaultLifetime != -1) vty_out (vty, " ipv6 nd ra-lifetime %d%s", zif->rtadv.AdvDefaultLifetime, VTY_NEWLINE); if (zif->rtadv.HomeAgentPreference) vty_out (vty, " ipv6 nd home-agent-preference %u%s", zif->rtadv.HomeAgentPreference, VTY_NEWLINE); if (zif->rtadv.HomeAgentLifetime != -1) vty_out (vty, " ipv6 nd home-agent-lifetime %u%s", zif->rtadv.HomeAgentLifetime, VTY_NEWLINE); if (zif->rtadv.AdvHomeAgentFlag) vty_out (vty, " ipv6 nd home-agent-config-flag%s", VTY_NEWLINE); if (zif->rtadv.AdvReachableTime) vty_out (vty, " ipv6 nd reachable-time %d%s", zif->rtadv.AdvReachableTime, VTY_NEWLINE); if (zif->rtadv.AdvManagedFlag) vty_out (vty, " ipv6 nd managed-config-flag%s", VTY_NEWLINE); if (zif->rtadv.AdvOtherConfigFlag) vty_out (vty, " ipv6 nd other-config-flag%s", VTY_NEWLINE); if (zif->rtadv.DefaultPreference != RTADV_PREF_MEDIUM) vty_out (vty, " ipv6 nd router-preference %s%s", rtadv_pref_strs[zif->rtadv.DefaultPreference], VTY_NEWLINE); if (zif->rtadv.AdvLinkMTU) vty_out (vty, " ipv6 nd mtu %d%s", zif->rtadv.AdvLinkMTU, VTY_NEWLINE); for (ALL_LIST_ELEMENTS_RO (zif->rtadv.AdvPrefixList, node, rprefix)) { vty_out (vty, " ipv6 nd prefix %s", prefix2str (&rprefix->prefix, buf, sizeof(buf))); if ((rprefix->AdvValidLifetime != RTADV_VALID_LIFETIME) || (rprefix->AdvPreferredLifetime != RTADV_PREFERRED_LIFETIME)) { if (rprefix->AdvValidLifetime == UINT32_MAX) vty_out (vty, " infinite"); else vty_out (vty, " %u", rprefix->AdvValidLifetime); if (rprefix->AdvPreferredLifetime == UINT32_MAX) vty_out (vty, " infinite"); else vty_out (vty, " %u", rprefix->AdvPreferredLifetime); } if (!rprefix->AdvOnLinkFlag) vty_out (vty, " off-link"); if (!rprefix->AdvAutonomousFlag) vty_out (vty, " no-autoconfig"); if (rprefix->AdvRouterAddressFlag) vty_out (vty, " router-address"); vty_out (vty, "%s", VTY_NEWLINE); } } static void rtadv_event (struct zebra_vrf *zvrf, enum rtadv_event event, int val) { struct rtadv *rtadv = &zvrf->rtadv; switch (event) { case RTADV_START: if (! rtadv->ra_read) rtadv->ra_read = thread_add_read (zebrad.master, rtadv_read, zvrf, val); if (! rtadv->ra_timer) rtadv->ra_timer = thread_add_event (zebrad.master, rtadv_timer, zvrf, 0); break; case RTADV_STOP: if (rtadv->ra_timer) { thread_cancel (rtadv->ra_timer); rtadv->ra_timer = NULL; } if (rtadv->ra_read) { thread_cancel (rtadv->ra_read); rtadv->ra_read = NULL; } break; case RTADV_TIMER: if (! rtadv->ra_timer) rtadv->ra_timer = thread_add_timer (zebrad.master, rtadv_timer, zvrf, val); break; case RTADV_TIMER_MSEC: if (! rtadv->ra_timer) rtadv->ra_timer = thread_add_timer_msec (zebrad.master, rtadv_timer, zvrf, val); break; case RTADV_READ: if (! rtadv->ra_read) rtadv->ra_read = thread_add_read (zebrad.master, rtadv_read, zvrf, val); break; default: break; } return; } void rtadv_init (struct zebra_vrf *zvrf) { zvrf->rtadv.sock = rtadv_make_socket (zvrf->vrf_id); } void rtadv_terminate (struct zebra_vrf *zvrf) { rtadv_event (zvrf, RTADV_STOP, 0); if (zvrf->rtadv.sock >= 0) { close (zvrf->rtadv.sock); zvrf->rtadv.sock = -1; } zvrf->rtadv.adv_if_count = 0; zvrf->rtadv.adv_msec_if_count = 0; } void rtadv_cmd_init (void) { install_element (INTERFACE_NODE, &ipv6_nd_suppress_ra_cmd); install_element (INTERFACE_NODE, &no_ipv6_nd_suppress_ra_cmd); install_element (INTERFACE_NODE, &ipv6_nd_ra_interval_cmd); install_element (INTERFACE_NODE, &ipv6_nd_ra_interval_msec_cmd); install_element (INTERFACE_NODE, &no_ipv6_nd_ra_interval_cmd); install_element (INTERFACE_NODE, &no_ipv6_nd_ra_interval_val_cmd); install_element (INTERFACE_NODE, &no_ipv6_nd_ra_interval_msec_val_cmd); install_element (INTERFACE_NODE, &ipv6_nd_ra_lifetime_cmd); install_element (INTERFACE_NODE, &no_ipv6_nd_ra_lifetime_cmd); install_element (INTERFACE_NODE, &no_ipv6_nd_ra_lifetime_val_cmd); install_element (INTERFACE_NODE, &ipv6_nd_reachable_time_cmd); install_element (INTERFACE_NODE, &no_ipv6_nd_reachable_time_cmd); install_element (INTERFACE_NODE, &no_ipv6_nd_reachable_time_val_cmd); install_element (INTERFACE_NODE, &ipv6_nd_managed_config_flag_cmd); install_element (INTERFACE_NODE, &no_ipv6_nd_managed_config_flag_cmd); install_element (INTERFACE_NODE, &ipv6_nd_other_config_flag_cmd); install_element (INTERFACE_NODE, &no_ipv6_nd_other_config_flag_cmd); install_element (INTERFACE_NODE, &ipv6_nd_homeagent_config_flag_cmd); install_element (INTERFACE_NODE, &no_ipv6_nd_homeagent_config_flag_cmd); install_element (INTERFACE_NODE, &ipv6_nd_homeagent_preference_cmd); install_element (INTERFACE_NODE, &no_ipv6_nd_homeagent_preference_cmd); install_element (INTERFACE_NODE, &no_ipv6_nd_homeagent_preference_val_cmd); install_element (INTERFACE_NODE, &ipv6_nd_homeagent_lifetime_cmd); install_element (INTERFACE_NODE, &no_ipv6_nd_homeagent_lifetime_cmd); install_element (INTERFACE_NODE, &no_ipv6_nd_homeagent_lifetime_val_cmd); install_element (INTERFACE_NODE, &ipv6_nd_adv_interval_config_option_cmd); install_element (INTERFACE_NODE, &no_ipv6_nd_adv_interval_config_option_cmd); install_element (INTERFACE_NODE, &ipv6_nd_prefix_cmd); install_element (INTERFACE_NODE, &ipv6_nd_prefix_val_rev_rtaddr_cmd); install_element (INTERFACE_NODE, &ipv6_nd_prefix_val_nortaddr_cmd); install_element (INTERFACE_NODE, &ipv6_nd_prefix_val_rev_cmd); install_element (INTERFACE_NODE, &ipv6_nd_prefix_val_noauto_cmd); install_element (INTERFACE_NODE, &ipv6_nd_prefix_val_offlink_cmd); install_element (INTERFACE_NODE, &ipv6_nd_prefix_val_rtaddr_cmd); install_element (INTERFACE_NODE, &ipv6_nd_prefix_val_cmd); install_element (INTERFACE_NODE, &ipv6_nd_prefix_noval_cmd); install_element (INTERFACE_NODE, &ipv6_nd_prefix_noval_rev_cmd); install_element (INTERFACE_NODE, &ipv6_nd_prefix_noval_noauto_cmd); install_element (INTERFACE_NODE, &ipv6_nd_prefix_noval_offlink_cmd); install_element (INTERFACE_NODE, &ipv6_nd_prefix_noval_rtaddr_cmd); install_element (INTERFACE_NODE, &ipv6_nd_prefix_prefix_cmd); install_element (INTERFACE_NODE, &no_ipv6_nd_prefix_cmd); install_element (INTERFACE_NODE, &ipv6_nd_router_preference_cmd); install_element (INTERFACE_NODE, &no_ipv6_nd_router_preference_cmd); install_element (INTERFACE_NODE, &no_ipv6_nd_router_preference_val_cmd); install_element (INTERFACE_NODE, &ipv6_nd_mtu_cmd); install_element (INTERFACE_NODE, &no_ipv6_nd_mtu_cmd); install_element (INTERFACE_NODE, &no_ipv6_nd_mtu_val_cmd); } static int if_join_all_router (int sock, struct interface *ifp) { int ret; struct ipv6_mreq mreq; memset (&mreq, 0, sizeof (struct ipv6_mreq)); inet_pton (AF_INET6, ALLROUTER, &mreq.ipv6mr_multiaddr); mreq.ipv6mr_interface = ifp->ifindex; ret = setsockopt (sock, IPPROTO_IPV6, IPV6_JOIN_GROUP, (char *) &mreq, sizeof mreq); if (ret < 0) zlog_warn ("can't setsockopt IPV6_JOIN_GROUP: %s", safe_strerror (errno)); zlog_info ("rtadv: %s join to all-routers multicast group", ifp->name); return 0; } static int if_leave_all_router (int sock, struct interface *ifp) { int ret; struct ipv6_mreq mreq; memset (&mreq, 0, sizeof (struct ipv6_mreq)); inet_pton (AF_INET6, ALLROUTER, &mreq.ipv6mr_multiaddr); mreq.ipv6mr_interface = ifp->ifindex; ret = setsockopt (sock, IPPROTO_IPV6, IPV6_LEAVE_GROUP, (char *) &mreq, sizeof mreq); if (ret < 0) zlog_warn ("can't setsockopt IPV6_LEAVE_GROUP: %s", safe_strerror (errno)); zlog_info ("rtadv: %s leave from all-routers multicast group", ifp->name); return 0; } #else void rtadv_init (struct zebra_vrf *zvrf) { /* Empty.*/; } void rtadv_terminate (struct zebra_vrf *zvrf) { /* Empty.*/; } void rtadv_cmd_init (void) { /* Empty.*/; } #endif /* HAVE_RTADV && HAVE_IPV6 */
/* Router advertisement * Copyright (C) 2005 6WIND <jean-mickael.guerin@6wind.com> * Copyright (C) 1999 Kunihiro Ishiguro * * This file is part of GNU Zebra. * * GNU Zebra is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2, or (at your option) any * later version. * * GNU Zebra is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with GNU Zebra; see the file COPYING. If not, write to the Free * Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA * 02111-1307, USA. */ #include <zebra.h> #include "memory.h" #include "sockopt.h" #include "thread.h" #include "if.h" #include "log.h" #include "prefix.h" #include "linklist.h" #include "command.h" #include "privs.h" #include "vrf.h" #include "zebra/interface.h" #include "zebra/rtadv.h" #include "zebra/debug.h" #include "zebra/rib.h" #include "zebra/zserv.h" extern struct zebra_privs_t zserv_privs; #if defined (HAVE_IPV6) && defined (HAVE_RTADV) #ifdef OPEN_BSD #include <netinet/icmp6.h> #endif /* If RFC2133 definition is used. */ #ifndef IPV6_JOIN_GROUP #define IPV6_JOIN_GROUP IPV6_ADD_MEMBERSHIP #endif #ifndef IPV6_LEAVE_GROUP #define IPV6_LEAVE_GROUP IPV6_DROP_MEMBERSHIP #endif #define ALLNODE "ff02::1" #define ALLROUTER "ff02::2" extern struct zebra_t zebrad; enum rtadv_event {RTADV_START, RTADV_STOP, RTADV_TIMER, RTADV_TIMER_MSEC, RTADV_READ}; static void rtadv_event (struct zebra_vrf *, enum rtadv_event, int); static int if_join_all_router (int, struct interface *); static int if_leave_all_router (int, struct interface *); static int rtadv_recv_packet (int sock, u_char *buf, int buflen, struct sockaddr_in6 *from, ifindex_t *ifindex, int *hoplimit) { int ret; struct msghdr msg; struct iovec iov; struct cmsghdr *cmsgptr; struct in6_addr dst; char adata[1024]; /* Fill in message and iovec. */ msg.msg_name = (void *) from; msg.msg_namelen = sizeof (struct sockaddr_in6); msg.msg_iov = &iov; msg.msg_iovlen = 1; msg.msg_control = (void *) adata; msg.msg_controllen = sizeof adata; iov.iov_base = buf; iov.iov_len = buflen; /* If recvmsg fail return minus value. */ ret = recvmsg (sock, &msg, 0); if (ret < 0) return ret; for (cmsgptr = ZCMSG_FIRSTHDR(&msg); cmsgptr != NULL; cmsgptr = CMSG_NXTHDR(&msg, cmsgptr)) { /* I want interface index which this packet comes from. */ if (cmsgptr->cmsg_level == IPPROTO_IPV6 && cmsgptr->cmsg_type == IPV6_PKTINFO) { struct in6_pktinfo *ptr; ptr = (struct in6_pktinfo *) CMSG_DATA (cmsgptr); *ifindex = ptr->ipi6_ifindex; memcpy(&dst, &ptr->ipi6_addr, sizeof(ptr->ipi6_addr)); } /* Incoming packet's hop limit. */ if (cmsgptr->cmsg_level == IPPROTO_IPV6 && cmsgptr->cmsg_type == IPV6_HOPLIMIT) { int *hoptr = (int *) CMSG_DATA (cmsgptr); *hoplimit = *hoptr; } } return ret; } #define RTADV_MSG_SIZE 4096 /* Send router advertisement packet. */ static void rtadv_send_packet (int sock, struct interface *ifp) { struct msghdr msg; struct iovec iov; struct cmsghdr *cmsgptr; struct in6_pktinfo *pkt; struct sockaddr_in6 addr; #ifdef HAVE_STRUCT_SOCKADDR_DL struct sockaddr_dl *sdl; #endif /* HAVE_STRUCT_SOCKADDR_DL */ static void *adata = NULL; unsigned char buf[RTADV_MSG_SIZE]; struct nd_router_advert *rtadv; int ret; int len = 0; struct zebra_if *zif; struct rtadv_prefix *rprefix; u_char all_nodes_addr[] = {0xff,0x02,0,0,0,0,0,0,0,0,0,0,0,0,0,1}; struct listnode *node; u_int16_t pkt_RouterLifetime; /* * Allocate control message bufffer. This is dynamic because * CMSG_SPACE is not guaranteed not to call a function. Note that * the size will be different on different architectures due to * differing alignment rules. */ if (adata == NULL) { /* XXX Free on shutdown. */ adata = malloc(CMSG_SPACE(sizeof(struct in6_pktinfo))); if (adata == NULL) zlog_err("rtadv_send_packet: can't malloc control data\n"); } /* Logging of packet. */ if (IS_ZEBRA_DEBUG_PACKET) zlog_debug ("Router advertisement send to %s", ifp->name); /* Fill in sockaddr_in6. */ memset (&addr, 0, sizeof (struct sockaddr_in6)); addr.sin6_family = AF_INET6; #ifdef SIN6_LEN addr.sin6_len = sizeof (struct sockaddr_in6); #endif /* SIN6_LEN */ addr.sin6_port = htons (IPPROTO_ICMPV6); IPV6_ADDR_COPY (&addr.sin6_addr, all_nodes_addr); /* Fetch interface information. */ zif = ifp->info; /* Make router advertisement message. */ rtadv = (struct nd_router_advert *) buf; rtadv->nd_ra_type = ND_ROUTER_ADVERT; rtadv->nd_ra_code = 0; rtadv->nd_ra_cksum = 0; rtadv->nd_ra_curhoplimit = 64; /* RFC4191: Default Router Preference is 0 if Router Lifetime is 0. */ rtadv->nd_ra_flags_reserved = zif->rtadv.AdvDefaultLifetime == 0 ? 0 : zif->rtadv.DefaultPreference; rtadv->nd_ra_flags_reserved <<= 3; if (zif->rtadv.AdvManagedFlag) rtadv->nd_ra_flags_reserved |= ND_RA_FLAG_MANAGED; if (zif->rtadv.AdvOtherConfigFlag) rtadv->nd_ra_flags_reserved |= ND_RA_FLAG_OTHER; if (zif->rtadv.AdvHomeAgentFlag) rtadv->nd_ra_flags_reserved |= ND_RA_FLAG_HOME_AGENT; /* Note that according to Neighbor Discovery (RFC 4861 [18]), * AdvDefaultLifetime is by default based on the value of * MaxRtrAdvInterval. AdvDefaultLifetime is used in the Router Lifetime * field of Router Advertisements. Given that this field is expressed * in seconds, a small MaxRtrAdvInterval value can result in a zero * value for this field. To prevent this, routers SHOULD keep * AdvDefaultLifetime in at least one second, even if the use of * MaxRtrAdvInterval would result in a smaller value. -- RFC6275, 7.5 */ pkt_RouterLifetime = zif->rtadv.AdvDefaultLifetime != -1 ? zif->rtadv.AdvDefaultLifetime : MAX (1, 0.003 * zif->rtadv.MaxRtrAdvInterval); rtadv->nd_ra_router_lifetime = htons (pkt_RouterLifetime); rtadv->nd_ra_reachable = htonl (zif->rtadv.AdvReachableTime); rtadv->nd_ra_retransmit = htonl (0); len = sizeof (struct nd_router_advert); /* If both the Home Agent Preference and Home Agent Lifetime are set to * their default values specified above, this option SHOULD NOT be * included in the Router Advertisement messages sent by this home * agent. -- RFC6275, 7.4 */ if ( zif->rtadv.AdvHomeAgentFlag && (zif->rtadv.HomeAgentPreference || zif->rtadv.HomeAgentLifetime != -1) ) { struct nd_opt_homeagent_info *ndopt_hai = (struct nd_opt_homeagent_info *)(buf + len); ndopt_hai->nd_opt_hai_type = ND_OPT_HA_INFORMATION; ndopt_hai->nd_opt_hai_len = 1; ndopt_hai->nd_opt_hai_reserved = 0; ndopt_hai->nd_opt_hai_preference = htons(zif->rtadv.HomeAgentPreference); /* 16-bit unsigned integer. The lifetime associated with the home * agent in units of seconds. The default value is the same as the * Router Lifetime, as specified in the main body of the Router * Advertisement. The maximum value corresponds to 18.2 hours. A * value of 0 MUST NOT be used. -- RFC6275, 7.5 */ ndopt_hai->nd_opt_hai_lifetime = htons ( zif->rtadv.HomeAgentLifetime != -1 ? zif->rtadv.HomeAgentLifetime : MAX (1, pkt_RouterLifetime) /* 0 is OK for RL, but not for HAL*/ ); len += sizeof(struct nd_opt_homeagent_info); } if (zif->rtadv.AdvIntervalOption) { struct nd_opt_adv_interval *ndopt_adv = (struct nd_opt_adv_interval *)(buf + len); ndopt_adv->nd_opt_ai_type = ND_OPT_ADV_INTERVAL; ndopt_adv->nd_opt_ai_len = 1; ndopt_adv->nd_opt_ai_reserved = 0; ndopt_adv->nd_opt_ai_interval = htonl(zif->rtadv.MaxRtrAdvInterval); len += sizeof(struct nd_opt_adv_interval); } /* Fill in prefix. */ for (ALL_LIST_ELEMENTS_RO (zif->rtadv.AdvPrefixList, node, rprefix)) { struct nd_opt_prefix_info *pinfo; pinfo = (struct nd_opt_prefix_info *) (buf + len); pinfo->nd_opt_pi_type = ND_OPT_PREFIX_INFORMATION; pinfo->nd_opt_pi_len = 4; pinfo->nd_opt_pi_prefix_len = rprefix->prefix.prefixlen; pinfo->nd_opt_pi_flags_reserved = 0; if (rprefix->AdvOnLinkFlag) pinfo->nd_opt_pi_flags_reserved |= ND_OPT_PI_FLAG_ONLINK; if (rprefix->AdvAutonomousFlag) pinfo->nd_opt_pi_flags_reserved |= ND_OPT_PI_FLAG_AUTO; if (rprefix->AdvRouterAddressFlag) pinfo->nd_opt_pi_flags_reserved |= ND_OPT_PI_FLAG_RADDR; pinfo->nd_opt_pi_valid_time = htonl (rprefix->AdvValidLifetime); pinfo->nd_opt_pi_preferred_time = htonl (rprefix->AdvPreferredLifetime); pinfo->nd_opt_pi_reserved2 = 0; IPV6_ADDR_COPY (&pinfo->nd_opt_pi_prefix, &rprefix->prefix.prefix); #ifdef DEBUG { u_char buf[INET6_ADDRSTRLEN]; zlog_debug ("DEBUG %s", inet_ntop (AF_INET6, &pinfo->nd_opt_pi_prefix, buf, INET6_ADDRSTRLEN)); } #endif /* DEBUG */ len += sizeof (struct nd_opt_prefix_info); } /* Hardware address. */ if (ifp->hw_addr_len != 0) { buf[len++] = ND_OPT_SOURCE_LINKADDR; /* Option length should be rounded up to next octet if the link address does not end on an octet boundary. */ buf[len++] = (ifp->hw_addr_len + 9) >> 3; memcpy (buf + len, ifp->hw_addr, ifp->hw_addr_len); len += ifp->hw_addr_len; /* Pad option to end on an octet boundary. */ memset (buf + len, 0, -(ifp->hw_addr_len + 2) & 0x7); len += -(ifp->hw_addr_len + 2) & 0x7; } /* MTU */ if (zif->rtadv.AdvLinkMTU) { struct nd_opt_mtu * opt = (struct nd_opt_mtu *) (buf + len); opt->nd_opt_mtu_type = ND_OPT_MTU; opt->nd_opt_mtu_len = 1; opt->nd_opt_mtu_reserved = 0; opt->nd_opt_mtu_mtu = htonl (zif->rtadv.AdvLinkMTU); len += sizeof (struct nd_opt_mtu); } msg.msg_name = (void *) &addr; msg.msg_namelen = sizeof (struct sockaddr_in6); msg.msg_iov = &iov; msg.msg_iovlen = 1; msg.msg_control = (void *) adata; msg.msg_controllen = CMSG_SPACE(sizeof(struct in6_pktinfo)); msg.msg_flags = 0; iov.iov_base = buf; iov.iov_len = len; cmsgptr = ZCMSG_FIRSTHDR(&msg); cmsgptr->cmsg_len = CMSG_LEN(sizeof(struct in6_pktinfo)); cmsgptr->cmsg_level = IPPROTO_IPV6; cmsgptr->cmsg_type = IPV6_PKTINFO; pkt = (struct in6_pktinfo *) CMSG_DATA (cmsgptr); memset (&pkt->ipi6_addr, 0, sizeof (struct in6_addr)); pkt->ipi6_ifindex = ifp->ifindex; ret = sendmsg (sock, &msg, 0); if (ret < 0) { zlog_err ("rtadv_send_packet: sendmsg %d (%s)\n", errno, safe_strerror(errno)); } } static int rtadv_timer (struct thread *thread) { struct zebra_vrf *zvrf = THREAD_ARG (thread); struct listnode *node, *nnode; struct interface *ifp; struct zebra_if *zif; int period; zvrf->rtadv.ra_timer = NULL; if (zvrf->rtadv.adv_msec_if_count == 0) { period = 1000; /* 1 s */ rtadv_event (zvrf, RTADV_TIMER, 1 /* 1 s */); } else { period = 10; /* 10 ms */ rtadv_event (zvrf, RTADV_TIMER_MSEC, 10 /* 10 ms */); } for (ALL_LIST_ELEMENTS (vrf_iflist (zvrf->vrf_id), node, nnode, ifp)) { if (if_is_loopback (ifp) || ! if_is_operative (ifp)) continue; zif = ifp->info; if (zif->rtadv.AdvSendAdvertisements) { zif->rtadv.AdvIntervalTimer -= period; if (zif->rtadv.AdvIntervalTimer <= 0) { /* FIXME: using MaxRtrAdvInterval each time isn't what section 6.2.4 of RFC4861 tells to do. */ zif->rtadv.AdvIntervalTimer = zif->rtadv.MaxRtrAdvInterval; rtadv_send_packet (zvrf->rtadv.sock, ifp); } } } return 0; } static void rtadv_process_solicit (struct interface *ifp) { struct zebra_vrf *zvrf = vrf_info_lookup (ifp->vrf_id); zlog_info ("Router solicitation received on %s vrf %u", ifp->name, zvrf->vrf_id); rtadv_send_packet (zvrf->rtadv.sock, ifp); } static void rtadv_process_advert (void) { zlog_info ("Router advertisement received"); } static void rtadv_process_packet (u_char *buf, unsigned int len, ifindex_t ifindex, int hoplimit, vrf_id_t vrf_id) { struct icmp6_hdr *icmph; struct interface *ifp; struct zebra_if *zif; /* Interface search. */ ifp = if_lookup_by_index_vrf (ifindex, vrf_id); if (ifp == NULL) { zlog_warn ("Unknown interface index: %d, vrf %u", ifindex, vrf_id); return; } if (if_is_loopback (ifp)) return; /* Check interface configuration. */ zif = ifp->info; if (! zif->rtadv.AdvSendAdvertisements) return; /* ICMP message length check. */ if (len < sizeof (struct icmp6_hdr)) { zlog_warn ("Invalid ICMPV6 packet length: %d", len); return; } icmph = (struct icmp6_hdr *) buf; /* ICMP message type check. */ if (icmph->icmp6_type != ND_ROUTER_SOLICIT && icmph->icmp6_type != ND_ROUTER_ADVERT) { zlog_warn ("Unwanted ICMPV6 message type: %d", icmph->icmp6_type); return; } /* Hoplimit check. */ if (hoplimit >= 0 && hoplimit != 255) { zlog_warn ("Invalid hoplimit %d for router advertisement ICMP packet", hoplimit); return; } /* Check ICMP message type. */ if (icmph->icmp6_type == ND_ROUTER_SOLICIT) rtadv_process_solicit (ifp); else if (icmph->icmp6_type == ND_ROUTER_ADVERT) rtadv_process_advert (); return; } static int rtadv_read (struct thread *thread) { int sock; int len; u_char buf[RTADV_MSG_SIZE]; struct sockaddr_in6 from; ifindex_t ifindex = 0; int hoplimit = -1; struct zebra_vrf *zvrf = THREAD_ARG (thread); sock = THREAD_FD (thread); zvrf->rtadv.ra_read = NULL; /* Register myself. */ rtadv_event (zvrf, RTADV_READ, sock); len = rtadv_recv_packet (sock, buf, sizeof (buf), &from, &ifindex, &hoplimit); if (len < 0) { zlog_warn ("router solicitation recv failed: %s.", safe_strerror (errno)); return len; } rtadv_process_packet (buf, (unsigned)len, ifindex, hoplimit, zvrf->vrf_id); return 0; } static int rtadv_make_socket (vrf_id_t vrf_id) { int sock; int ret; struct icmp6_filter filter; if ( zserv_privs.change (ZPRIVS_RAISE) ) zlog_err ("rtadv_make_socket: could not raise privs, %s", safe_strerror (errno) ); sock = vrf_socket (AF_INET6, SOCK_RAW, IPPROTO_ICMPV6, vrf_id); if ( zserv_privs.change (ZPRIVS_LOWER) ) zlog_err ("rtadv_make_socket: could not lower privs, %s", safe_strerror (errno) ); /* When we can't make ICMPV6 socket simply back. Router advertisement feature will not be supported. */ if (sock < 0) { close (sock); return -1; } ret = setsockopt_ipv6_pktinfo (sock, 1); if (ret < 0) { close (sock); return ret; } ret = setsockopt_ipv6_multicast_loop (sock, 0); if (ret < 0) { close (sock); return ret; } ret = setsockopt_ipv6_unicast_hops (sock, 255); if (ret < 0) { close (sock); return ret; } ret = setsockopt_ipv6_multicast_hops (sock, 255); if (ret < 0) { close (sock); return ret; } ret = setsockopt_ipv6_hoplimit (sock, 1); if (ret < 0) { close (sock); return ret; } ICMP6_FILTER_SETBLOCKALL(&filter); ICMP6_FILTER_SETPASS (ND_ROUTER_SOLICIT, &filter); ICMP6_FILTER_SETPASS (ND_ROUTER_ADVERT, &filter); ret = setsockopt (sock, IPPROTO_ICMPV6, ICMP6_FILTER, &filter, sizeof (struct icmp6_filter)); if (ret < 0) { zlog_info ("ICMP6_FILTER set fail: %s", safe_strerror (errno)); return ret; } return sock; } static struct rtadv_prefix * rtadv_prefix_new (void) { return XCALLOC (MTYPE_RTADV_PREFIX, sizeof (struct rtadv_prefix)); } static void rtadv_prefix_free (struct rtadv_prefix *rtadv_prefix) { XFREE (MTYPE_RTADV_PREFIX, rtadv_prefix); } static struct rtadv_prefix * rtadv_prefix_lookup (struct list *rplist, struct prefix_ipv6 *p) { struct listnode *node; struct rtadv_prefix *rprefix; for (ALL_LIST_ELEMENTS_RO (rplist, node, rprefix)) if (prefix_same ((struct prefix *) &rprefix->prefix, (struct prefix *) p)) return rprefix; return NULL; } static struct rtadv_prefix * rtadv_prefix_get (struct list *rplist, struct prefix_ipv6 *p) { struct rtadv_prefix *rprefix; rprefix = rtadv_prefix_lookup (rplist, p); if (rprefix) return rprefix; rprefix = rtadv_prefix_new (); memcpy (&rprefix->prefix, p, sizeof (struct prefix_ipv6)); listnode_add (rplist, rprefix); return rprefix; } static void rtadv_prefix_set (struct zebra_if *zif, struct rtadv_prefix *rp) { struct rtadv_prefix *rprefix; rprefix = rtadv_prefix_get (zif->rtadv.AdvPrefixList, &rp->prefix); /* Set parameters. */ rprefix->AdvValidLifetime = rp->AdvValidLifetime; rprefix->AdvPreferredLifetime = rp->AdvPreferredLifetime; rprefix->AdvOnLinkFlag = rp->AdvOnLinkFlag; rprefix->AdvAutonomousFlag = rp->AdvAutonomousFlag; rprefix->AdvRouterAddressFlag = rp->AdvRouterAddressFlag; } static int rtadv_prefix_reset (struct zebra_if *zif, struct rtadv_prefix *rp) { struct rtadv_prefix *rprefix; rprefix = rtadv_prefix_lookup (zif->rtadv.AdvPrefixList, &rp->prefix); if (rprefix != NULL) { listnode_delete (zif->rtadv.AdvPrefixList, (void *) rprefix); rtadv_prefix_free (rprefix); return 1; } else return 0; } DEFUN (ipv6_nd_suppress_ra, ipv6_nd_suppress_ra_cmd, "ipv6 nd suppress-ra", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Suppress Router Advertisement\n") { struct interface *ifp; struct zebra_if *zif; struct zebra_vrf *zvrf; ifp = vty->index; zif = ifp->info; zvrf = vrf_info_lookup (ifp->vrf_id); if (if_is_loopback (ifp)) { vty_out (vty, "Invalid interface%s", VTY_NEWLINE); return CMD_WARNING; } if (zif->rtadv.AdvSendAdvertisements) { zif->rtadv.AdvSendAdvertisements = 0; zif->rtadv.AdvIntervalTimer = 0; zvrf->rtadv.adv_if_count--; if_leave_all_router (zvrf->rtadv.sock, ifp); if (zvrf->rtadv.adv_if_count == 0) rtadv_event (zvrf, RTADV_STOP, 0); } return CMD_SUCCESS; } DEFUN (no_ipv6_nd_suppress_ra, no_ipv6_nd_suppress_ra_cmd, "no ipv6 nd suppress-ra", NO_STR "Interface IPv6 config commands\n" "Neighbor discovery\n" "Suppress Router Advertisement\n") { struct interface *ifp; struct zebra_if *zif; struct zebra_vrf *zvrf; ifp = vty->index; zif = ifp->info; zvrf = vrf_info_lookup (ifp->vrf_id); if (if_is_loopback (ifp)) { vty_out (vty, "Invalid interface%s", VTY_NEWLINE); return CMD_WARNING; } if (! zif->rtadv.AdvSendAdvertisements) { zif->rtadv.AdvSendAdvertisements = 1; zif->rtadv.AdvIntervalTimer = 0; zvrf->rtadv.adv_if_count++; if_join_all_router (zvrf->rtadv.sock, ifp); if (zvrf->rtadv.adv_if_count == 1) rtadv_event (zvrf, RTADV_START, zvrf->rtadv.sock); } return CMD_SUCCESS; } DEFUN (ipv6_nd_ra_interval_msec, ipv6_nd_ra_interval_msec_cmd, "ipv6 nd ra-interval msec <70-1800000>", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Router Advertisement interval\n" "Router Advertisement interval in milliseconds\n") { unsigned interval; struct interface *ifp = (struct interface *) vty->index; struct zebra_if *zif = ifp->info; struct zebra_vrf *zvrf = vrf_info_lookup (ifp->vrf_id); VTY_GET_INTEGER_RANGE ("router advertisement interval", interval, argv[0], 70, 1800000); if ((zif->rtadv.AdvDefaultLifetime != -1 && interval > (unsigned)zif->rtadv.AdvDefaultLifetime * 1000)) { vty_out (vty, "This ra-interval would conflict with configured ra-lifetime!%s", VTY_NEWLINE); return CMD_WARNING; } if (zif->rtadv.MaxRtrAdvInterval % 1000) zvrf->rtadv.adv_msec_if_count--; if (interval % 1000) zvrf->rtadv.adv_msec_if_count++; zif->rtadv.MaxRtrAdvInterval = interval; zif->rtadv.MinRtrAdvInterval = 0.33 * interval; zif->rtadv.AdvIntervalTimer = 0; return CMD_SUCCESS; } DEFUN (ipv6_nd_ra_interval, ipv6_nd_ra_interval_cmd, "ipv6 nd ra-interval <1-1800>", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Router Advertisement interval\n" "Router Advertisement interval in seconds\n") { unsigned interval; struct interface *ifp = (struct interface *) vty->index; struct zebra_if *zif = ifp->info; struct zebra_vrf *zvrf = vrf_info_lookup (ifp->vrf_id); VTY_GET_INTEGER_RANGE ("router advertisement interval", interval, argv[0], 1, 1800); if ((zif->rtadv.AdvDefaultLifetime != -1 && interval > (unsigned)zif->rtadv.AdvDefaultLifetime)) { vty_out (vty, "This ra-interval would conflict with configured ra-lifetime!%s", VTY_NEWLINE); return CMD_WARNING; } if (zif->rtadv.MaxRtrAdvInterval % 1000) zvrf->rtadv.adv_msec_if_count--; /* convert to milliseconds */ interval = interval * 1000; zif->rtadv.MaxRtrAdvInterval = interval; zif->rtadv.MinRtrAdvInterval = 0.33 * interval; zif->rtadv.AdvIntervalTimer = 0; return CMD_SUCCESS; } DEFUN (no_ipv6_nd_ra_interval, no_ipv6_nd_ra_interval_cmd, "no ipv6 nd ra-interval", NO_STR "Interface IPv6 config commands\n" "Neighbor discovery\n" "Router Advertisement interval\n") { struct interface *ifp; struct zebra_if *zif; struct zebra_vrf *zvrf; ifp = (struct interface *) vty->index; zif = ifp->info; zvrf = vrf_info_lookup (ifp->vrf_id); if (zif->rtadv.MaxRtrAdvInterval % 1000) zvrf->rtadv.adv_msec_if_count--; zif->rtadv.MaxRtrAdvInterval = RTADV_MAX_RTR_ADV_INTERVAL; zif->rtadv.MinRtrAdvInterval = RTADV_MIN_RTR_ADV_INTERVAL; zif->rtadv.AdvIntervalTimer = zif->rtadv.MaxRtrAdvInterval; return CMD_SUCCESS; } ALIAS (no_ipv6_nd_ra_interval, no_ipv6_nd_ra_interval_val_cmd, "no ipv6 nd ra-interval <1-1800>", NO_STR "Interface IPv6 config commands\n" "Neighbor discovery\n" "Router Advertisement interval\n") ALIAS (no_ipv6_nd_ra_interval, no_ipv6_nd_ra_interval_msec_val_cmd, "no ipv6 nd ra-interval msec <1-1800000>", NO_STR "Interface IPv6 config commands\n" "Neighbor discovery\n" "Router Advertisement interval\n" "Router Advertisement interval in milliseconds\n") DEFUN (ipv6_nd_ra_lifetime, ipv6_nd_ra_lifetime_cmd, "ipv6 nd ra-lifetime <0-9000>", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Router lifetime\n" "Router lifetime in seconds (0 stands for a non-default gw)\n") { int lifetime; struct interface *ifp; struct zebra_if *zif; ifp = (struct interface *) vty->index; zif = ifp->info; VTY_GET_INTEGER_RANGE ("router lifetime", lifetime, argv[0], 0, 9000); /* The value to be placed in the Router Lifetime field * of Router Advertisements sent from the interface, * in seconds. MUST be either zero or between * MaxRtrAdvInterval and 9000 seconds. -- RFC4861, 6.2.1 */ if ((lifetime != 0 && lifetime * 1000 < zif->rtadv.MaxRtrAdvInterval)) { vty_out (vty, "This ra-lifetime would conflict with configured ra-interval%s", VTY_NEWLINE); return CMD_WARNING; } zif->rtadv.AdvDefaultLifetime = lifetime; return CMD_SUCCESS; } DEFUN (no_ipv6_nd_ra_lifetime, no_ipv6_nd_ra_lifetime_cmd, "no ipv6 nd ra-lifetime", NO_STR "Interface IPv6 config commands\n" "Neighbor discovery\n" "Router lifetime\n") { struct interface *ifp; struct zebra_if *zif; ifp = (struct interface *) vty->index; zif = ifp->info; zif->rtadv.AdvDefaultLifetime = -1; return CMD_SUCCESS; } ALIAS (no_ipv6_nd_ra_lifetime, no_ipv6_nd_ra_lifetime_val_cmd, "no ipv6 nd ra-lifetime <0-9000>", NO_STR "Interface IPv6 config commands\n" "Neighbor discovery\n" "Router lifetime\n" "Router lifetime in seconds (0 stands for a non-default gw)\n") DEFUN (ipv6_nd_reachable_time, ipv6_nd_reachable_time_cmd, "ipv6 nd reachable-time <1-3600000>", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Reachable time\n" "Reachable time in milliseconds\n") { struct interface *ifp = (struct interface *) vty->index; struct zebra_if *zif = ifp->info; VTY_GET_INTEGER_RANGE ("reachable time", zif->rtadv.AdvReachableTime, argv[0], 1, RTADV_MAX_REACHABLE_TIME); return CMD_SUCCESS; } DEFUN (no_ipv6_nd_reachable_time, no_ipv6_nd_reachable_time_cmd, "no ipv6 nd reachable-time", NO_STR "Interface IPv6 config commands\n" "Neighbor discovery\n" "Reachable time\n") { struct interface *ifp; struct zebra_if *zif; ifp = (struct interface *) vty->index; zif = ifp->info; zif->rtadv.AdvReachableTime = 0; return CMD_SUCCESS; } ALIAS (no_ipv6_nd_reachable_time, no_ipv6_nd_reachable_time_val_cmd, "no ipv6 nd reachable-time <1-3600000>", NO_STR "Interface IPv6 config commands\n" "Neighbor discovery\n" "Reachable time\n" "Reachable time in milliseconds\n") DEFUN (ipv6_nd_homeagent_preference, ipv6_nd_homeagent_preference_cmd, "ipv6 nd home-agent-preference <0-65535>", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Home Agent preference\n" "preference value (default is 0, least preferred)\n") { struct interface *ifp = (struct interface *) vty->index; struct zebra_if *zif = ifp->info; VTY_GET_INTEGER_RANGE ("home agent preference", zif->rtadv.HomeAgentPreference, argv[0], 0, 65535); return CMD_SUCCESS; } DEFUN (no_ipv6_nd_homeagent_preference, no_ipv6_nd_homeagent_preference_cmd, "no ipv6 nd home-agent-preference", NO_STR "Interface IPv6 config commands\n" "Neighbor discovery\n" "Home Agent preference\n") { struct interface *ifp; struct zebra_if *zif; ifp = (struct interface *) vty->index; zif = ifp->info; zif->rtadv.HomeAgentPreference = 0; return CMD_SUCCESS; } ALIAS (no_ipv6_nd_homeagent_preference, no_ipv6_nd_homeagent_preference_val_cmd, "no ipv6 nd home-agent-preference <0-65535>", NO_STR "Interface IPv6 config commands\n" "Neighbor discovery\n" "Home Agent preference\n" "preference value (default is 0, least preferred)\n") DEFUN (ipv6_nd_homeagent_lifetime, ipv6_nd_homeagent_lifetime_cmd, "ipv6 nd home-agent-lifetime <0-65520>", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Home Agent lifetime\n" "Home Agent lifetime in seconds (0 to track ra-lifetime)\n") { struct interface *ifp = (struct interface *) vty->index; struct zebra_if *zif = ifp->info; VTY_GET_INTEGER_RANGE ("home agent lifetime", zif->rtadv.HomeAgentLifetime, argv[0], 0, RTADV_MAX_HALIFETIME); return CMD_SUCCESS; } DEFUN (no_ipv6_nd_homeagent_lifetime, no_ipv6_nd_homeagent_lifetime_cmd, "no ipv6 nd home-agent-lifetime", NO_STR "Interface IPv6 config commands\n" "Neighbor discovery\n" "Home Agent lifetime\n") { struct interface *ifp; struct zebra_if *zif; ifp = (struct interface *) vty->index; zif = ifp->info; zif->rtadv.HomeAgentLifetime = -1; return CMD_SUCCESS; } ALIAS (no_ipv6_nd_homeagent_lifetime, no_ipv6_nd_homeagent_lifetime_val_cmd, "no ipv6 nd home-agent-lifetime <0-65520>", NO_STR "Interface IPv6 config commands\n" "Neighbor discovery\n" "Home Agent lifetime\n" "Home Agent lifetime in seconds (0 to track ra-lifetime)\n") DEFUN (ipv6_nd_managed_config_flag, ipv6_nd_managed_config_flag_cmd, "ipv6 nd managed-config-flag", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Managed address configuration flag\n") { struct interface *ifp; struct zebra_if *zif; ifp = (struct interface *) vty->index; zif = ifp->info; zif->rtadv.AdvManagedFlag = 1; return CMD_SUCCESS; } DEFUN (no_ipv6_nd_managed_config_flag, no_ipv6_nd_managed_config_flag_cmd, "no ipv6 nd managed-config-flag", NO_STR "Interface IPv6 config commands\n" "Neighbor discovery\n" "Managed address configuration flag\n") { struct interface *ifp; struct zebra_if *zif; ifp = (struct interface *) vty->index; zif = ifp->info; zif->rtadv.AdvManagedFlag = 0; return CMD_SUCCESS; } DEFUN (ipv6_nd_homeagent_config_flag, ipv6_nd_homeagent_config_flag_cmd, "ipv6 nd home-agent-config-flag", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Home Agent configuration flag\n") { struct interface *ifp; struct zebra_if *zif; ifp = (struct interface *) vty->index; zif = ifp->info; zif->rtadv.AdvHomeAgentFlag = 1; return CMD_SUCCESS; } DEFUN (no_ipv6_nd_homeagent_config_flag, no_ipv6_nd_homeagent_config_flag_cmd, "no ipv6 nd home-agent-config-flag", NO_STR "Interface IPv6 config commands\n" "Neighbor discovery\n" "Home Agent configuration flag\n") { struct interface *ifp; struct zebra_if *zif; ifp = (struct interface *) vty->index; zif = ifp->info; zif->rtadv.AdvHomeAgentFlag = 0; return CMD_SUCCESS; } DEFUN (ipv6_nd_adv_interval_config_option, ipv6_nd_adv_interval_config_option_cmd, "ipv6 nd adv-interval-option", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Advertisement Interval Option\n") { struct interface *ifp; struct zebra_if *zif; ifp = (struct interface *) vty->index; zif = ifp->info; zif->rtadv.AdvIntervalOption = 1; return CMD_SUCCESS; } DEFUN (no_ipv6_nd_adv_interval_config_option, no_ipv6_nd_adv_interval_config_option_cmd, "no ipv6 nd adv-interval-option", NO_STR "Interface IPv6 config commands\n" "Neighbor discovery\n" "Advertisement Interval Option\n") { struct interface *ifp; struct zebra_if *zif; ifp = (struct interface *) vty->index; zif = ifp->info; zif->rtadv.AdvIntervalOption = 0; return CMD_SUCCESS; } DEFUN (ipv6_nd_other_config_flag, ipv6_nd_other_config_flag_cmd, "ipv6 nd other-config-flag", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Other statefull configuration flag\n") { struct interface *ifp; struct zebra_if *zif; ifp = (struct interface *) vty->index; zif = ifp->info; zif->rtadv.AdvOtherConfigFlag = 1; return CMD_SUCCESS; } DEFUN (no_ipv6_nd_other_config_flag, no_ipv6_nd_other_config_flag_cmd, "no ipv6 nd other-config-flag", NO_STR "Interface IPv6 config commands\n" "Neighbor discovery\n" "Other statefull configuration flag\n") { struct interface *ifp; struct zebra_if *zif; ifp = (struct interface *) vty->index; zif = ifp->info; zif->rtadv.AdvOtherConfigFlag = 0; return CMD_SUCCESS; } DEFUN (ipv6_nd_prefix, ipv6_nd_prefix_cmd, "ipv6 nd prefix X:X::X:X/M (<0-4294967295>|infinite) " "(<0-4294967295>|infinite) (off-link|) (no-autoconfig|) (router-address|)", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Prefix information\n" "IPv6 prefix\n" "Valid lifetime in seconds\n" "Infinite valid lifetime\n" "Preferred lifetime in seconds\n" "Infinite preferred lifetime\n" "Do not use prefix for onlink determination\n" "Do not use prefix for autoconfiguration\n" "Set Router Address flag\n") { int i; int ret; int cursor = 1; struct interface *ifp; struct zebra_if *zebra_if; struct rtadv_prefix rp; ifp = (struct interface *) vty->index; zebra_if = ifp->info; ret = str2prefix_ipv6 (argv[0], &rp.prefix); if (!ret) { vty_out (vty, "Malformed IPv6 prefix%s", VTY_NEWLINE); return CMD_WARNING; } apply_mask_ipv6 (&rp.prefix); /* RFC4861 4.6.2 */ rp.AdvOnLinkFlag = 1; rp.AdvAutonomousFlag = 1; rp.AdvRouterAddressFlag = 0; rp.AdvValidLifetime = RTADV_VALID_LIFETIME; rp.AdvPreferredLifetime = RTADV_PREFERRED_LIFETIME; if (argc > 1) { if ((isdigit((unsigned char)argv[1][0])) || strncmp (argv[1], "i", 1) == 0) { if ( strncmp (argv[1], "i", 1) == 0) rp.AdvValidLifetime = UINT32_MAX; else rp.AdvValidLifetime = (u_int32_t) strtoll (argv[1], (char **)NULL, 10); if ( strncmp (argv[2], "i", 1) == 0) rp.AdvPreferredLifetime = UINT32_MAX; else rp.AdvPreferredLifetime = (u_int32_t) strtoll (argv[2], (char **)NULL, 10); if (rp.AdvPreferredLifetime > rp.AdvValidLifetime) { vty_out (vty, "Invalid preferred lifetime%s", VTY_NEWLINE); return CMD_WARNING; } cursor = cursor + 2; } if (argc > cursor) { for (i = cursor; i < argc; i++) { if (strncmp (argv[i], "of", 2) == 0) rp.AdvOnLinkFlag = 0; if (strncmp (argv[i], "no", 2) == 0) rp.AdvAutonomousFlag = 0; if (strncmp (argv[i], "ro", 2) == 0) rp.AdvRouterAddressFlag = 1; } } } rtadv_prefix_set (zebra_if, &rp); return CMD_SUCCESS; } ALIAS (ipv6_nd_prefix, ipv6_nd_prefix_val_nortaddr_cmd, "ipv6 nd prefix X:X::X:X/M (<0-4294967295>|infinite) " "(<0-4294967295>|infinite) (off-link|) (no-autoconfig|)", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Prefix information\n" "IPv6 prefix\n" "Valid lifetime in seconds\n" "Infinite valid lifetime\n" "Preferred lifetime in seconds\n" "Infinite preferred lifetime\n" "Do not use prefix for onlink determination\n" "Do not use prefix for autoconfiguration\n") ALIAS (ipv6_nd_prefix, ipv6_nd_prefix_val_rev_cmd, "ipv6 nd prefix X:X::X:X/M (<0-4294967295>|infinite) " "(<0-4294967295>|infinite) (no-autoconfig|) (off-link|)", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Prefix information\n" "IPv6 prefix\n" "Valid lifetime in seconds\n" "Infinite valid lifetime\n" "Preferred lifetime in seconds\n" "Infinite preferred lifetime\n" "Do not use prefix for autoconfiguration\n" "Do not use prefix for onlink determination\n") ALIAS (ipv6_nd_prefix, ipv6_nd_prefix_val_rev_rtaddr_cmd, "ipv6 nd prefix X:X::X:X/M (<0-4294967295>|infinite) " "(<0-4294967295>|infinite) (no-autoconfig|) (off-link|) (router-address|)", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Prefix information\n" "IPv6 prefix\n" "Valid lifetime in seconds\n" "Infinite valid lifetime\n" "Preferred lifetime in seconds\n" "Infinite preferred lifetime\n" "Do not use prefix for autoconfiguration\n" "Do not use prefix for onlink determination\n" "Set Router Address flag\n") ALIAS (ipv6_nd_prefix, ipv6_nd_prefix_val_noauto_cmd, "ipv6 nd prefix X:X::X:X/M (<0-4294967295>|infinite) " "(<0-4294967295>|infinite) (no-autoconfig|)", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Prefix information\n" "IPv6 prefix\n" "Valid lifetime in seconds\n" "Infinite valid lifetime\n" "Preferred lifetime in seconds\n" "Infinite preferred lifetime\n" "Do not use prefix for autoconfiguration") ALIAS (ipv6_nd_prefix, ipv6_nd_prefix_val_offlink_cmd, "ipv6 nd prefix X:X::X:X/M (<0-4294967295>|infinite) " "(<0-4294967295>|infinite) (off-link|)", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Prefix information\n" "IPv6 prefix\n" "Valid lifetime in seconds\n" "Infinite valid lifetime\n" "Preferred lifetime in seconds\n" "Infinite preferred lifetime\n" "Do not use prefix for onlink determination\n") ALIAS (ipv6_nd_prefix, ipv6_nd_prefix_val_rtaddr_cmd, "ipv6 nd prefix X:X::X:X/M (<0-4294967295>|infinite) " "(<0-4294967295>|infinite) (router-address|)", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Prefix information\n" "IPv6 prefix\n" "Valid lifetime in seconds\n" "Infinite valid lifetime\n" "Preferred lifetime in seconds\n" "Infinite preferred lifetime\n" "Set Router Address flag\n") ALIAS (ipv6_nd_prefix, ipv6_nd_prefix_val_cmd, "ipv6 nd prefix X:X::X:X/M (<0-4294967295>|infinite) " "(<0-4294967295>|infinite)", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Prefix information\n" "IPv6 prefix\n" "Valid lifetime in seconds\n" "Infinite valid lifetime\n" "Preferred lifetime in seconds\n" "Infinite preferred lifetime\n") ALIAS (ipv6_nd_prefix, ipv6_nd_prefix_noval_cmd, "ipv6 nd prefix X:X::X:X/M (no-autoconfig|) (off-link|)", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Prefix information\n" "IPv6 prefix\n" "Do not use prefix for autoconfiguration\n" "Do not use prefix for onlink determination\n") ALIAS (ipv6_nd_prefix, ipv6_nd_prefix_noval_rev_cmd, "ipv6 nd prefix X:X::X:X/M (off-link|) (no-autoconfig|)", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Prefix information\n" "IPv6 prefix\n" "Do not use prefix for onlink determination\n" "Do not use prefix for autoconfiguration\n") ALIAS (ipv6_nd_prefix, ipv6_nd_prefix_noval_noauto_cmd, "ipv6 nd prefix X:X::X:X/M (no-autoconfig|)", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Prefix information\n" "IPv6 prefix\n" "Do not use prefix for autoconfiguration\n") ALIAS (ipv6_nd_prefix, ipv6_nd_prefix_noval_offlink_cmd, "ipv6 nd prefix X:X::X:X/M (off-link|)", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Prefix information\n" "IPv6 prefix\n" "Do not use prefix for onlink determination\n") ALIAS (ipv6_nd_prefix, ipv6_nd_prefix_noval_rtaddr_cmd, "ipv6 nd prefix X:X::X:X/M (router-address|)", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Prefix information\n" "IPv6 prefix\n" "Set Router Address flag\n") ALIAS (ipv6_nd_prefix, ipv6_nd_prefix_prefix_cmd, "ipv6 nd prefix X:X::X:X/M", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Prefix information\n" "IPv6 prefix\n") DEFUN (no_ipv6_nd_prefix, no_ipv6_nd_prefix_cmd, "no ipv6 nd prefix IPV6PREFIX", NO_STR "Interface IPv6 config commands\n" "Neighbor discovery\n" "Prefix information\n" "IPv6 prefix\n") { int ret; struct interface *ifp; struct zebra_if *zebra_if; struct rtadv_prefix rp; ifp = (struct interface *) vty->index; zebra_if = ifp->info; ret = str2prefix_ipv6 (argv[0], &rp.prefix); if (!ret) { vty_out (vty, "Malformed IPv6 prefix%s", VTY_NEWLINE); return CMD_WARNING; } apply_mask_ipv6 (&rp.prefix); /* RFC4861 4.6.2 */ ret = rtadv_prefix_reset (zebra_if, &rp); if (!ret) { vty_out (vty, "Non-exist IPv6 prefix%s", VTY_NEWLINE); return CMD_WARNING; } return CMD_SUCCESS; } DEFUN (ipv6_nd_router_preference, ipv6_nd_router_preference_cmd, "ipv6 nd router-preference (high|medium|low)", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Default router preference\n" "High default router preference\n" "Low default router preference\n" "Medium default router preference (default)\n") { struct interface *ifp; struct zebra_if *zif; int i = 0; ifp = (struct interface *) vty->index; zif = ifp->info; while (0 != rtadv_pref_strs[i]) { if (strncmp (argv[0], rtadv_pref_strs[i], 1) == 0) { zif->rtadv.DefaultPreference = i; return CMD_SUCCESS; } i++; } return CMD_ERR_NO_MATCH; } DEFUN (no_ipv6_nd_router_preference, no_ipv6_nd_router_preference_cmd, "no ipv6 nd router-preference", NO_STR "Interface IPv6 config commands\n" "Neighbor discovery\n" "Default router preference\n") { struct interface *ifp; struct zebra_if *zif; ifp = (struct interface *) vty->index; zif = ifp->info; zif->rtadv.DefaultPreference = RTADV_PREF_MEDIUM; /* Default per RFC4191. */ return CMD_SUCCESS; } ALIAS (no_ipv6_nd_router_preference, no_ipv6_nd_router_preference_val_cmd, "no ipv6 nd router-preference (high|medium|low)", NO_STR "Interface IPv6 config commands\n" "Neighbor discovery\n" "Default router preference\n" "High default router preference\n" "Low default router preference\n" "Medium default router preference (default)\n") DEFUN (ipv6_nd_mtu, ipv6_nd_mtu_cmd, "ipv6 nd mtu <1-65535>", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Advertised MTU\n" "MTU in bytes\n") { struct interface *ifp = (struct interface *) vty->index; struct zebra_if *zif = ifp->info; VTY_GET_INTEGER_RANGE ("MTU", zif->rtadv.AdvLinkMTU, argv[0], 1, 65535); return CMD_SUCCESS; } DEFUN (no_ipv6_nd_mtu, no_ipv6_nd_mtu_cmd, "no ipv6 nd mtu", NO_STR "Interface IPv6 config commands\n" "Neighbor discovery\n" "Advertised MTU\n") { struct interface *ifp = (struct interface *) vty->index; struct zebra_if *zif = ifp->info; zif->rtadv.AdvLinkMTU = 0; return CMD_SUCCESS; } ALIAS (no_ipv6_nd_mtu, no_ipv6_nd_mtu_val_cmd, "no ipv6 nd mtu <1-65535>", NO_STR "Interface IPv6 config commands\n" "Neighbor discovery\n" "Advertised MTU\n" "MTU in bytes\n") /* Write configuration about router advertisement. */ void rtadv_config_write (struct vty *vty, struct interface *ifp) { struct zebra_if *zif; struct listnode *node; struct rtadv_prefix *rprefix; char buf[PREFIX_STRLEN]; int interval; zif = ifp->info; if (! if_is_loopback (ifp)) { if (zif->rtadv.AdvSendAdvertisements) vty_out (vty, " no ipv6 nd suppress-ra%s", VTY_NEWLINE); } interval = zif->rtadv.MaxRtrAdvInterval; if (interval % 1000) vty_out (vty, " ipv6 nd ra-interval msec %d%s", interval, VTY_NEWLINE); else if (interval != RTADV_MAX_RTR_ADV_INTERVAL) vty_out (vty, " ipv6 nd ra-interval %d%s", interval / 1000, VTY_NEWLINE); if (zif->rtadv.AdvIntervalOption) vty_out (vty, " ipv6 nd adv-interval-option%s", VTY_NEWLINE); if (zif->rtadv.AdvDefaultLifetime != -1) vty_out (vty, " ipv6 nd ra-lifetime %d%s", zif->rtadv.AdvDefaultLifetime, VTY_NEWLINE); if (zif->rtadv.HomeAgentPreference) vty_out (vty, " ipv6 nd home-agent-preference %u%s", zif->rtadv.HomeAgentPreference, VTY_NEWLINE); if (zif->rtadv.HomeAgentLifetime != -1) vty_out (vty, " ipv6 nd home-agent-lifetime %u%s", zif->rtadv.HomeAgentLifetime, VTY_NEWLINE); if (zif->rtadv.AdvHomeAgentFlag) vty_out (vty, " ipv6 nd home-agent-config-flag%s", VTY_NEWLINE); if (zif->rtadv.AdvReachableTime) vty_out (vty, " ipv6 nd reachable-time %d%s", zif->rtadv.AdvReachableTime, VTY_NEWLINE); if (zif->rtadv.AdvManagedFlag) vty_out (vty, " ipv6 nd managed-config-flag%s", VTY_NEWLINE); if (zif->rtadv.AdvOtherConfigFlag) vty_out (vty, " ipv6 nd other-config-flag%s", VTY_NEWLINE); if (zif->rtadv.DefaultPreference != RTADV_PREF_MEDIUM) vty_out (vty, " ipv6 nd router-preference %s%s", rtadv_pref_strs[zif->rtadv.DefaultPreference], VTY_NEWLINE); if (zif->rtadv.AdvLinkMTU) vty_out (vty, " ipv6 nd mtu %d%s", zif->rtadv.AdvLinkMTU, VTY_NEWLINE); for (ALL_LIST_ELEMENTS_RO (zif->rtadv.AdvPrefixList, node, rprefix)) { vty_out (vty, " ipv6 nd prefix %s", prefix2str (&rprefix->prefix, buf, sizeof(buf))); if ((rprefix->AdvValidLifetime != RTADV_VALID_LIFETIME) || (rprefix->AdvPreferredLifetime != RTADV_PREFERRED_LIFETIME)) { if (rprefix->AdvValidLifetime == UINT32_MAX) vty_out (vty, " infinite"); else vty_out (vty, " %u", rprefix->AdvValidLifetime); if (rprefix->AdvPreferredLifetime == UINT32_MAX) vty_out (vty, " infinite"); else vty_out (vty, " %u", rprefix->AdvPreferredLifetime); } if (!rprefix->AdvOnLinkFlag) vty_out (vty, " off-link"); if (!rprefix->AdvAutonomousFlag) vty_out (vty, " no-autoconfig"); if (rprefix->AdvRouterAddressFlag) vty_out (vty, " router-address"); vty_out (vty, "%s", VTY_NEWLINE); } } static void rtadv_event (struct zebra_vrf *zvrf, enum rtadv_event event, int val) { struct rtadv *rtadv = &zvrf->rtadv; switch (event) { case RTADV_START: if (! rtadv->ra_read) rtadv->ra_read = thread_add_read (zebrad.master, rtadv_read, zvrf, val); if (! rtadv->ra_timer) rtadv->ra_timer = thread_add_event (zebrad.master, rtadv_timer, zvrf, 0); break; case RTADV_STOP: if (rtadv->ra_timer) { thread_cancel (rtadv->ra_timer); rtadv->ra_timer = NULL; } if (rtadv->ra_read) { thread_cancel (rtadv->ra_read); rtadv->ra_read = NULL; } break; case RTADV_TIMER: if (! rtadv->ra_timer) rtadv->ra_timer = thread_add_timer (zebrad.master, rtadv_timer, zvrf, val); break; case RTADV_TIMER_MSEC: if (! rtadv->ra_timer) rtadv->ra_timer = thread_add_timer_msec (zebrad.master, rtadv_timer, zvrf, val); break; case RTADV_READ: if (! rtadv->ra_read) rtadv->ra_read = thread_add_read (zebrad.master, rtadv_read, zvrf, val); break; default: break; } return; } void rtadv_init (struct zebra_vrf *zvrf) { zvrf->rtadv.sock = rtadv_make_socket (zvrf->vrf_id); } void rtadv_terminate (struct zebra_vrf *zvrf) { rtadv_event (zvrf, RTADV_STOP, 0); if (zvrf->rtadv.sock >= 0) { close (zvrf->rtadv.sock); zvrf->rtadv.sock = -1; } zvrf->rtadv.adv_if_count = 0; zvrf->rtadv.adv_msec_if_count = 0; } void rtadv_cmd_init (void) { install_element (INTERFACE_NODE, &ipv6_nd_suppress_ra_cmd); install_element (INTERFACE_NODE, &no_ipv6_nd_suppress_ra_cmd); install_element (INTERFACE_NODE, &ipv6_nd_ra_interval_cmd); install_element (INTERFACE_NODE, &ipv6_nd_ra_interval_msec_cmd); install_element (INTERFACE_NODE, &no_ipv6_nd_ra_interval_cmd); install_element (INTERFACE_NODE, &no_ipv6_nd_ra_interval_val_cmd); install_element (INTERFACE_NODE, &no_ipv6_nd_ra_interval_msec_val_cmd); install_element (INTERFACE_NODE, &ipv6_nd_ra_lifetime_cmd); install_element (INTERFACE_NODE, &no_ipv6_nd_ra_lifetime_cmd); install_element (INTERFACE_NODE, &no_ipv6_nd_ra_lifetime_val_cmd); install_element (INTERFACE_NODE, &ipv6_nd_reachable_time_cmd); install_element (INTERFACE_NODE, &no_ipv6_nd_reachable_time_cmd); install_element (INTERFACE_NODE, &no_ipv6_nd_reachable_time_val_cmd); install_element (INTERFACE_NODE, &ipv6_nd_managed_config_flag_cmd); install_element (INTERFACE_NODE, &no_ipv6_nd_managed_config_flag_cmd); install_element (INTERFACE_NODE, &ipv6_nd_other_config_flag_cmd); install_element (INTERFACE_NODE, &no_ipv6_nd_other_config_flag_cmd); install_element (INTERFACE_NODE, &ipv6_nd_homeagent_config_flag_cmd); install_element (INTERFACE_NODE, &no_ipv6_nd_homeagent_config_flag_cmd); install_element (INTERFACE_NODE, &ipv6_nd_homeagent_preference_cmd); install_element (INTERFACE_NODE, &no_ipv6_nd_homeagent_preference_cmd); install_element (INTERFACE_NODE, &no_ipv6_nd_homeagent_preference_val_cmd); install_element (INTERFACE_NODE, &ipv6_nd_homeagent_lifetime_cmd); install_element (INTERFACE_NODE, &no_ipv6_nd_homeagent_lifetime_cmd); install_element (INTERFACE_NODE, &no_ipv6_nd_homeagent_lifetime_val_cmd); install_element (INTERFACE_NODE, &ipv6_nd_adv_interval_config_option_cmd); install_element (INTERFACE_NODE, &no_ipv6_nd_adv_interval_config_option_cmd); install_element (INTERFACE_NODE, &ipv6_nd_prefix_cmd); install_element (INTERFACE_NODE, &ipv6_nd_prefix_val_rev_rtaddr_cmd); install_element (INTERFACE_NODE, &ipv6_nd_prefix_val_nortaddr_cmd); install_element (INTERFACE_NODE, &ipv6_nd_prefix_val_rev_cmd); install_element (INTERFACE_NODE, &ipv6_nd_prefix_val_noauto_cmd); install_element (INTERFACE_NODE, &ipv6_nd_prefix_val_offlink_cmd); install_element (INTERFACE_NODE, &ipv6_nd_prefix_val_rtaddr_cmd); install_element (INTERFACE_NODE, &ipv6_nd_prefix_val_cmd); install_element (INTERFACE_NODE, &ipv6_nd_prefix_noval_cmd); install_element (INTERFACE_NODE, &ipv6_nd_prefix_noval_rev_cmd); install_element (INTERFACE_NODE, &ipv6_nd_prefix_noval_noauto_cmd); install_element (INTERFACE_NODE, &ipv6_nd_prefix_noval_offlink_cmd); install_element (INTERFACE_NODE, &ipv6_nd_prefix_noval_rtaddr_cmd); install_element (INTERFACE_NODE, &ipv6_nd_prefix_prefix_cmd); install_element (INTERFACE_NODE, &no_ipv6_nd_prefix_cmd); install_element (INTERFACE_NODE, &ipv6_nd_router_preference_cmd); install_element (INTERFACE_NODE, &no_ipv6_nd_router_preference_cmd); install_element (INTERFACE_NODE, &no_ipv6_nd_router_preference_val_cmd); install_element (INTERFACE_NODE, &ipv6_nd_mtu_cmd); install_element (INTERFACE_NODE, &no_ipv6_nd_mtu_cmd); install_element (INTERFACE_NODE, &no_ipv6_nd_mtu_val_cmd); } static int if_join_all_router (int sock, struct interface *ifp) { int ret; struct ipv6_mreq mreq; memset (&mreq, 0, sizeof (struct ipv6_mreq)); inet_pton (AF_INET6, ALLROUTER, &mreq.ipv6mr_multiaddr); mreq.ipv6mr_interface = ifp->ifindex; ret = setsockopt (sock, IPPROTO_IPV6, IPV6_JOIN_GROUP, (char *) &mreq, sizeof mreq); if (ret < 0) zlog_warn ("can't setsockopt IPV6_JOIN_GROUP: %s", safe_strerror (errno)); zlog_info ("rtadv: %s join to all-routers multicast group", ifp->name); return 0; } static int if_leave_all_router (int sock, struct interface *ifp) { int ret; struct ipv6_mreq mreq; memset (&mreq, 0, sizeof (struct ipv6_mreq)); inet_pton (AF_INET6, ALLROUTER, &mreq.ipv6mr_multiaddr); mreq.ipv6mr_interface = ifp->ifindex; ret = setsockopt (sock, IPPROTO_IPV6, IPV6_LEAVE_GROUP, (char *) &mreq, sizeof mreq); if (ret < 0) zlog_warn ("can't setsockopt IPV6_LEAVE_GROUP: %s", safe_strerror (errno)); zlog_info ("rtadv: %s leave from all-routers multicast group", ifp->name); return 0; } #else void rtadv_init (struct zebra_vrf *zvrf) { /* Empty.*/; } void rtadv_terminate (struct zebra_vrf *zvrf) { /* Empty.*/; } void rtadv_cmd_init (void) { /* Empty.*/; } #endif /* HAVE_RTADV && HAVE_IPV6 */
rtadv_read (struct thread *thread) { int sock; int len; u_char buf[RTADV_MSG_SIZE]; struct sockaddr_in6 from; ifindex_t ifindex = 0; int hoplimit = -1; struct zebra_vrf *zvrf = THREAD_ARG (thread); sock = THREAD_FD (thread); zvrf->rtadv.ra_read = NULL; /* Register myself. */ rtadv_event (zvrf, RTADV_READ, sock); len = rtadv_recv_packet (sock, buf, BUFSIZ, &from, &ifindex, &hoplimit); if (len < 0) { zlog_warn ("router solicitation recv failed: %s.", safe_strerror (errno)); return len; } rtadv_process_packet (buf, (unsigned)len, ifindex, hoplimit, zvrf->vrf_id); return 0; }
rtadv_read (struct thread *thread) { int sock; int len; u_char buf[RTADV_MSG_SIZE]; struct sockaddr_in6 from; ifindex_t ifindex = 0; int hoplimit = -1; struct zebra_vrf *zvrf = THREAD_ARG (thread); sock = THREAD_FD (thread); zvrf->rtadv.ra_read = NULL; /* Register myself. */ rtadv_event (zvrf, RTADV_READ, sock); len = rtadv_recv_packet (sock, buf, sizeof (buf), &from, &ifindex, &hoplimit); if (len < 0) { zlog_warn ("router solicitation recv failed: %s.", safe_strerror (errno)); return len; } rtadv_process_packet (buf, (unsigned)len, ifindex, hoplimit, zvrf->vrf_id); return 0; }
{'added': [(485, ' len = rtadv_recv_packet (sock, buf, sizeof (buf), &from, &ifindex, &hoplimit);')], 'deleted': [(485, ' len = rtadv_recv_packet (sock, buf, BUFSIZ, &from, &ifindex, &hoplimit);')]}
1
1
1,425
6,821
https://github.com/Quagga/quagga
CVE-2016-1245
['CWE-119']
gd_tga.c
read_image_tga
/** * File: TGA Input * * Read TGA images. */ #ifdef HAVE_CONFIG_H #include "config.h" #endif /* HAVE_CONFIG_H */ #include <stdio.h> #include <stddef.h> #include <stdlib.h> #include <string.h> #include "gd_tga.h" #include "gd.h" #include "gd_errors.h" #include "gdhelpers.h" /* Function: gdImageCreateFromTga Creates a gdImage from a TGA file Parameters: infile - Pointer to TGA binary file */ BGD_DECLARE(gdImagePtr) gdImageCreateFromTga(FILE *fp) { gdImagePtr image; gdIOCtx* in = gdNewFileCtx(fp); if (in == NULL) return NULL; image = gdImageCreateFromTgaCtx(in); in->gd_free( in ); return image; } /* Function: gdImageCreateFromTgaPtr */ BGD_DECLARE(gdImagePtr) gdImageCreateFromTgaPtr(int size, void *data) { gdImagePtr im; gdIOCtx *in = gdNewDynamicCtxEx (size, data, 0); if (in == NULL) return NULL; im = gdImageCreateFromTgaCtx(in); in->gd_free(in); return im; } /* Function: gdImageCreateFromTgaCtx Creates a gdImage from a gdIOCtx referencing a TGA binary file. Parameters: ctx - Pointer to a gdIOCtx structure */ BGD_DECLARE(gdImagePtr) gdImageCreateFromTgaCtx(gdIOCtx* ctx) { int bitmap_caret = 0; oTga *tga = NULL; /* int pixel_block_size = 0; int image_block_size = 0; */ volatile gdImagePtr image = NULL; int x = 0; int y = 0; tga = (oTga *) gdMalloc(sizeof(oTga)); if (!tga) { return NULL; } tga->bitmap = NULL; tga->ident = NULL; if (read_header_tga(ctx, tga) < 0) { free_tga(tga); return NULL; } /*TODO: Will this be used? pixel_block_size = tga->bits / 8; image_block_size = (tga->width * tga->height) * pixel_block_size; */ if (read_image_tga(ctx, tga) < 0) { free_tga(tga); return NULL; } image = gdImageCreateTrueColor((int)tga->width, (int)tga->height ); if (image == 0) { free_tga( tga ); return NULL; } /*! \brief Populate GD image object * Copy the pixel data from our tga bitmap buffer into the GD image * Disable blending and save the alpha channel per default */ if (tga->alphabits) { gdImageAlphaBlending(image, 0); gdImageSaveAlpha(image, 1); } /* TODO: use alphabits as soon as we support 24bit and other alpha bps (ie != 8bits) */ for (y = 0; y < tga->height; y++) { register int *tpix = image->tpixels[y]; for ( x = 0; x < tga->width; x++, tpix++) { if (tga->bits == TGA_BPP_24) { *tpix = gdTrueColor(tga->bitmap[bitmap_caret + 2], tga->bitmap[bitmap_caret + 1], tga->bitmap[bitmap_caret]); bitmap_caret += 3; } else if (tga->bits == TGA_BPP_32 && tga->alphabits) { register int a = tga->bitmap[bitmap_caret + 3]; *tpix = gdTrueColorAlpha(tga->bitmap[bitmap_caret + 2], tga->bitmap[bitmap_caret + 1], tga->bitmap[bitmap_caret], gdAlphaMax - (a >> 1)); bitmap_caret += 4; } } } if (tga->flipv && tga->fliph) { gdImageFlipBoth(image); } else if (tga->flipv) { gdImageFlipVertical(image); } else if (tga->fliph) { gdImageFlipHorizontal(image); } free_tga(tga); return image; } /*! \brief Reads a TGA header. * Reads the header block from a binary TGA file populating the referenced TGA structure. * \param ctx Pointer to TGA binary file * \param tga Pointer to TGA structure * \return int 1 on sucess, -1 on failure */ int read_header_tga(gdIOCtx *ctx, oTga *tga) { unsigned char header[18]; if (gdGetBuf(header, sizeof(header), ctx) < 18) { gd_error("fail to read header"); return -1; } tga->identsize = header[0]; tga->colormaptype = header[1]; tga->imagetype = header[2]; tga->colormapstart = header[3] + (header[4] << 8); tga->colormaplength = header[5] + (header[6] << 8); tga->colormapbits = header[7]; tga->xstart = header[8] + (header[9] << 8); tga->ystart = header[10] + (header[11] << 8); tga->width = header[12] + (header[13] << 8); tga->height = header[14] + (header[15] << 8); tga->bits = header[16]; tga->alphabits = header[17] & 0x0f; tga->fliph = (header[17] & 0x10) ? 1 : 0; tga->flipv = (header[17] & 0x20) ? 0 : 1; #if DEBUG printf("format bps: %i\n", tga->bits); printf("flip h/v: %i / %i\n", tga->fliph, tga->flipv); printf("alpha: %i\n", tga->alphabits); printf("wxh: %i %i\n", tga->width, tga->height); #endif if (!((tga->bits == TGA_BPP_24 && tga->alphabits == 0) || (tga->bits == TGA_BPP_32 && tga->alphabits == 8))) { gd_error_ex(GD_WARNING, "gd-tga: %u bits per pixel with %u alpha bits not supported\n", tga->bits, tga->alphabits); return -1; } tga->ident = NULL; if (tga->identsize > 0) { tga->ident = (char *) gdMalloc(tga->identsize * sizeof(char)); if(tga->ident == NULL) { return -1; } gdGetBuf(tga->ident, tga->identsize, ctx); } return 1; } /*! \brief Reads a TGA image data into buffer. * Reads the image data block from a binary TGA file populating the referenced TGA structure. * \param ctx Pointer to TGA binary file * \param tga Pointer to TGA structure * \return int 0 on sucess, -1 on failure */ int read_image_tga( gdIOCtx *ctx, oTga *tga ) { int pixel_block_size = (tga->bits / 8); int image_block_size = (tga->width * tga->height) * pixel_block_size; int* decompression_buffer = NULL; unsigned char* conversion_buffer = NULL; int buffer_caret = 0; int bitmap_caret = 0; int i = 0; int encoded_pixels; int rle_size; if(overflow2(tga->width, tga->height)) { return -1; } if(overflow2(tga->width * tga->height, pixel_block_size)) { return -1; } if(overflow2(image_block_size, sizeof(int))) { return -1; } /*! \todo Add more image type support. */ if (tga->imagetype != TGA_TYPE_RGB && tga->imagetype != TGA_TYPE_RGB_RLE) return -1; /*! \brief Allocate memmory for image block * Allocate a chunk of memory for the image block to be passed into. */ tga->bitmap = (int *) gdMalloc(image_block_size * sizeof(int)); if (tga->bitmap == NULL) return -1; switch (tga->imagetype) { case TGA_TYPE_RGB: /*! \brief Read in uncompressed RGB TGA * Chunk load the pixel data from an uncompressed RGB type TGA. */ conversion_buffer = (unsigned char *) gdMalloc(image_block_size * sizeof(unsigned char)); if (conversion_buffer == NULL) { return -1; } if (gdGetBuf(conversion_buffer, image_block_size, ctx) != image_block_size) { gd_error("gd-tga: premature end of image data\n"); gdFree(conversion_buffer); return -1; } while (buffer_caret < image_block_size) { tga->bitmap[buffer_caret] = (int) conversion_buffer[buffer_caret]; buffer_caret++; } gdFree(conversion_buffer); break; case TGA_TYPE_RGB_RLE: /*! \brief Read in RLE compressed RGB TGA * Chunk load the pixel data from an RLE compressed RGB type TGA. */ decompression_buffer = (int*) gdMalloc(image_block_size * sizeof(int)); if (decompression_buffer == NULL) { return -1; } conversion_buffer = (unsigned char *) gdMalloc(image_block_size * sizeof(unsigned char)); if (conversion_buffer == NULL) { gd_error("gd-tga: premature end of image data\n"); gdFree( decompression_buffer ); return -1; } rle_size = gdGetBuf(conversion_buffer, image_block_size, ctx); if (rle_size <= 0) { gdFree(conversion_buffer); gdFree(decompression_buffer); return -1; } buffer_caret = 0; while( buffer_caret < rle_size) { decompression_buffer[buffer_caret] = (int)conversion_buffer[buffer_caret]; buffer_caret++; } buffer_caret = 0; while( bitmap_caret < image_block_size ) { if ((decompression_buffer[buffer_caret] & TGA_RLE_FLAG) == TGA_RLE_FLAG) { encoded_pixels = ( ( decompression_buffer[ buffer_caret ] & ~TGA_RLE_FLAG ) + 1 ); buffer_caret++; if ((bitmap_caret + (encoded_pixels * pixel_block_size)) > image_block_size || buffer_caret + pixel_block_size > rle_size) { gdFree( decompression_buffer ); gdFree( conversion_buffer ); return -1; } for (i = 0; i < encoded_pixels; i++) { memcpy(tga->bitmap + bitmap_caret, decompression_buffer + buffer_caret, pixel_block_size * sizeof(int)); bitmap_caret += pixel_block_size; } buffer_caret += pixel_block_size; } else { encoded_pixels = decompression_buffer[ buffer_caret ] + 1; buffer_caret++; if ((bitmap_caret + (encoded_pixels * pixel_block_size)) > image_block_size || buffer_caret + (encoded_pixels * pixel_block_size) > rle_size) { gdFree( decompression_buffer ); gdFree( conversion_buffer ); return -1; } memcpy(tga->bitmap + bitmap_caret, decompression_buffer + buffer_caret, encoded_pixels * pixel_block_size * sizeof(int)); bitmap_caret += (encoded_pixels * pixel_block_size); buffer_caret += (encoded_pixels * pixel_block_size); } } gdFree( decompression_buffer ); gdFree( conversion_buffer ); break; } return 1; } /*! \brief Cleans up a TGA structure. * Dereferences the bitmap referenced in a TGA structure, then the structure itself * \param tga Pointer to TGA structure */ void free_tga(oTga * tga) { if (tga) { if (tga->ident) gdFree(tga->ident); if (tga->bitmap) gdFree(tga->bitmap); gdFree(tga); } }
/** * File: TGA Input * * Read TGA images. */ #ifdef HAVE_CONFIG_H #include "config.h" #endif /* HAVE_CONFIG_H */ #include <stdio.h> #include <stddef.h> #include <stdlib.h> #include <string.h> #include "gd_tga.h" #include "gd.h" #include "gd_errors.h" #include "gdhelpers.h" /* Function: gdImageCreateFromTga Creates a gdImage from a TGA file Parameters: infile - Pointer to TGA binary file */ BGD_DECLARE(gdImagePtr) gdImageCreateFromTga(FILE *fp) { gdImagePtr image; gdIOCtx* in = gdNewFileCtx(fp); if (in == NULL) return NULL; image = gdImageCreateFromTgaCtx(in); in->gd_free( in ); return image; } /* Function: gdImageCreateFromTgaPtr */ BGD_DECLARE(gdImagePtr) gdImageCreateFromTgaPtr(int size, void *data) { gdImagePtr im; gdIOCtx *in = gdNewDynamicCtxEx (size, data, 0); if (in == NULL) return NULL; im = gdImageCreateFromTgaCtx(in); in->gd_free(in); return im; } /* Function: gdImageCreateFromTgaCtx Creates a gdImage from a gdIOCtx referencing a TGA binary file. Parameters: ctx - Pointer to a gdIOCtx structure */ BGD_DECLARE(gdImagePtr) gdImageCreateFromTgaCtx(gdIOCtx* ctx) { int bitmap_caret = 0; oTga *tga = NULL; /* int pixel_block_size = 0; int image_block_size = 0; */ volatile gdImagePtr image = NULL; int x = 0; int y = 0; tga = (oTga *) gdMalloc(sizeof(oTga)); if (!tga) { return NULL; } tga->bitmap = NULL; tga->ident = NULL; if (read_header_tga(ctx, tga) < 0) { free_tga(tga); return NULL; } /*TODO: Will this be used? pixel_block_size = tga->bits / 8; image_block_size = (tga->width * tga->height) * pixel_block_size; */ if (read_image_tga(ctx, tga) < 0) { free_tga(tga); return NULL; } image = gdImageCreateTrueColor((int)tga->width, (int)tga->height ); if (image == 0) { free_tga( tga ); return NULL; } /*! \brief Populate GD image object * Copy the pixel data from our tga bitmap buffer into the GD image * Disable blending and save the alpha channel per default */ if (tga->alphabits) { gdImageAlphaBlending(image, 0); gdImageSaveAlpha(image, 1); } /* TODO: use alphabits as soon as we support 24bit and other alpha bps (ie != 8bits) */ for (y = 0; y < tga->height; y++) { register int *tpix = image->tpixels[y]; for ( x = 0; x < tga->width; x++, tpix++) { if (tga->bits == TGA_BPP_24) { *tpix = gdTrueColor(tga->bitmap[bitmap_caret + 2], tga->bitmap[bitmap_caret + 1], tga->bitmap[bitmap_caret]); bitmap_caret += 3; } else if (tga->bits == TGA_BPP_32 && tga->alphabits) { register int a = tga->bitmap[bitmap_caret + 3]; *tpix = gdTrueColorAlpha(tga->bitmap[bitmap_caret + 2], tga->bitmap[bitmap_caret + 1], tga->bitmap[bitmap_caret], gdAlphaMax - (a >> 1)); bitmap_caret += 4; } } } if (tga->flipv && tga->fliph) { gdImageFlipBoth(image); } else if (tga->flipv) { gdImageFlipVertical(image); } else if (tga->fliph) { gdImageFlipHorizontal(image); } free_tga(tga); return image; } /*! \brief Reads a TGA header. * Reads the header block from a binary TGA file populating the referenced TGA structure. * \param ctx Pointer to TGA binary file * \param tga Pointer to TGA structure * \return int 1 on sucess, -1 on failure */ int read_header_tga(gdIOCtx *ctx, oTga *tga) { unsigned char header[18]; if (gdGetBuf(header, sizeof(header), ctx) < 18) { gd_error("fail to read header"); return -1; } tga->identsize = header[0]; tga->colormaptype = header[1]; tga->imagetype = header[2]; tga->colormapstart = header[3] + (header[4] << 8); tga->colormaplength = header[5] + (header[6] << 8); tga->colormapbits = header[7]; tga->xstart = header[8] + (header[9] << 8); tga->ystart = header[10] + (header[11] << 8); tga->width = header[12] + (header[13] << 8); tga->height = header[14] + (header[15] << 8); tga->bits = header[16]; tga->alphabits = header[17] & 0x0f; tga->fliph = (header[17] & 0x10) ? 1 : 0; tga->flipv = (header[17] & 0x20) ? 0 : 1; #if DEBUG printf("format bps: %i\n", tga->bits); printf("flip h/v: %i / %i\n", tga->fliph, tga->flipv); printf("alpha: %i\n", tga->alphabits); printf("wxh: %i %i\n", tga->width, tga->height); #endif if (!((tga->bits == TGA_BPP_24 && tga->alphabits == 0) || (tga->bits == TGA_BPP_32 && tga->alphabits == 8))) { gd_error_ex(GD_WARNING, "gd-tga: %u bits per pixel with %u alpha bits not supported\n", tga->bits, tga->alphabits); return -1; } tga->ident = NULL; if (tga->identsize > 0) { tga->ident = (char *) gdMalloc(tga->identsize * sizeof(char)); if(tga->ident == NULL) { return -1; } gdGetBuf(tga->ident, tga->identsize, ctx); } return 1; } /*! \brief Reads a TGA image data into buffer. * Reads the image data block from a binary TGA file populating the referenced TGA structure. * \param ctx Pointer to TGA binary file * \param tga Pointer to TGA structure * \return int 0 on sucess, -1 on failure */ int read_image_tga( gdIOCtx *ctx, oTga *tga ) { int pixel_block_size = (tga->bits / 8); int image_block_size = (tga->width * tga->height) * pixel_block_size; int* decompression_buffer = NULL; unsigned char* conversion_buffer = NULL; int buffer_caret = 0; int bitmap_caret = 0; int i = 0; int encoded_pixels; int rle_size; if(overflow2(tga->width, tga->height)) { return -1; } if(overflow2(tga->width * tga->height, pixel_block_size)) { return -1; } if(overflow2(image_block_size, sizeof(int))) { return -1; } /*! \todo Add more image type support. */ if (tga->imagetype != TGA_TYPE_RGB && tga->imagetype != TGA_TYPE_RGB_RLE) return -1; /*! \brief Allocate memmory for image block * Allocate a chunk of memory for the image block to be passed into. */ tga->bitmap = (int *) gdMalloc(image_block_size * sizeof(int)); if (tga->bitmap == NULL) return -1; switch (tga->imagetype) { case TGA_TYPE_RGB: /*! \brief Read in uncompressed RGB TGA * Chunk load the pixel data from an uncompressed RGB type TGA. */ conversion_buffer = (unsigned char *) gdMalloc(image_block_size * sizeof(unsigned char)); if (conversion_buffer == NULL) { return -1; } if (gdGetBuf(conversion_buffer, image_block_size, ctx) != image_block_size) { gd_error("gd-tga: premature end of image data\n"); gdFree(conversion_buffer); return -1; } while (buffer_caret < image_block_size) { tga->bitmap[buffer_caret] = (int) conversion_buffer[buffer_caret]; buffer_caret++; } gdFree(conversion_buffer); break; case TGA_TYPE_RGB_RLE: /*! \brief Read in RLE compressed RGB TGA * Chunk load the pixel data from an RLE compressed RGB type TGA. */ decompression_buffer = (int*) gdMalloc(image_block_size * sizeof(int)); if (decompression_buffer == NULL) { return -1; } conversion_buffer = (unsigned char *) gdMalloc(image_block_size * sizeof(unsigned char)); if (conversion_buffer == NULL) { gd_error("gd-tga: premature end of image data\n"); gdFree( decompression_buffer ); return -1; } rle_size = gdGetBuf(conversion_buffer, image_block_size, ctx); if (rle_size <= 0) { gdFree(conversion_buffer); gdFree(decompression_buffer); return -1; } buffer_caret = 0; while( buffer_caret < rle_size) { decompression_buffer[buffer_caret] = (int)conversion_buffer[buffer_caret]; buffer_caret++; } buffer_caret = 0; while( bitmap_caret < image_block_size ) { if (buffer_caret + pixel_block_size > rle_size) { gdFree( decompression_buffer ); gdFree( conversion_buffer ); return -1; } if ((decompression_buffer[buffer_caret] & TGA_RLE_FLAG) == TGA_RLE_FLAG) { encoded_pixels = ( ( decompression_buffer[ buffer_caret ] & ~TGA_RLE_FLAG ) + 1 ); buffer_caret++; if ((bitmap_caret + (encoded_pixels * pixel_block_size)) > image_block_size || buffer_caret + pixel_block_size > rle_size) { gdFree( decompression_buffer ); gdFree( conversion_buffer ); return -1; } for (i = 0; i < encoded_pixels; i++) { memcpy(tga->bitmap + bitmap_caret, decompression_buffer + buffer_caret, pixel_block_size * sizeof(int)); bitmap_caret += pixel_block_size; } buffer_caret += pixel_block_size; } else { encoded_pixels = decompression_buffer[ buffer_caret ] + 1; buffer_caret++; if ((bitmap_caret + (encoded_pixels * pixel_block_size)) > image_block_size || buffer_caret + (encoded_pixels * pixel_block_size) > rle_size) { gdFree( decompression_buffer ); gdFree( conversion_buffer ); return -1; } memcpy(tga->bitmap + bitmap_caret, decompression_buffer + buffer_caret, encoded_pixels * pixel_block_size * sizeof(int)); bitmap_caret += (encoded_pixels * pixel_block_size); buffer_caret += (encoded_pixels * pixel_block_size); } } gdFree( decompression_buffer ); gdFree( conversion_buffer ); break; } return 1; } /*! \brief Cleans up a TGA structure. * Dereferences the bitmap referenced in a TGA structure, then the structure itself * \param tga Pointer to TGA structure */ void free_tga(oTga * tga) { if (tga) { if (tga->ident) gdFree(tga->ident); if (tga->bitmap) gdFree(tga->bitmap); gdFree(tga); } }
int read_image_tga( gdIOCtx *ctx, oTga *tga ) { int pixel_block_size = (tga->bits / 8); int image_block_size = (tga->width * tga->height) * pixel_block_size; int* decompression_buffer = NULL; unsigned char* conversion_buffer = NULL; int buffer_caret = 0; int bitmap_caret = 0; int i = 0; int encoded_pixels; int rle_size; if(overflow2(tga->width, tga->height)) { return -1; } if(overflow2(tga->width * tga->height, pixel_block_size)) { return -1; } if(overflow2(image_block_size, sizeof(int))) { return -1; } /*! \todo Add more image type support. */ if (tga->imagetype != TGA_TYPE_RGB && tga->imagetype != TGA_TYPE_RGB_RLE) return -1; /*! \brief Allocate memmory for image block * Allocate a chunk of memory for the image block to be passed into. */ tga->bitmap = (int *) gdMalloc(image_block_size * sizeof(int)); if (tga->bitmap == NULL) return -1; switch (tga->imagetype) { case TGA_TYPE_RGB: /*! \brief Read in uncompressed RGB TGA * Chunk load the pixel data from an uncompressed RGB type TGA. */ conversion_buffer = (unsigned char *) gdMalloc(image_block_size * sizeof(unsigned char)); if (conversion_buffer == NULL) { return -1; } if (gdGetBuf(conversion_buffer, image_block_size, ctx) != image_block_size) { gd_error("gd-tga: premature end of image data\n"); gdFree(conversion_buffer); return -1; } while (buffer_caret < image_block_size) { tga->bitmap[buffer_caret] = (int) conversion_buffer[buffer_caret]; buffer_caret++; } gdFree(conversion_buffer); break; case TGA_TYPE_RGB_RLE: /*! \brief Read in RLE compressed RGB TGA * Chunk load the pixel data from an RLE compressed RGB type TGA. */ decompression_buffer = (int*) gdMalloc(image_block_size * sizeof(int)); if (decompression_buffer == NULL) { return -1; } conversion_buffer = (unsigned char *) gdMalloc(image_block_size * sizeof(unsigned char)); if (conversion_buffer == NULL) { gd_error("gd-tga: premature end of image data\n"); gdFree( decompression_buffer ); return -1; } rle_size = gdGetBuf(conversion_buffer, image_block_size, ctx); if (rle_size <= 0) { gdFree(conversion_buffer); gdFree(decompression_buffer); return -1; } buffer_caret = 0; while( buffer_caret < rle_size) { decompression_buffer[buffer_caret] = (int)conversion_buffer[buffer_caret]; buffer_caret++; } buffer_caret = 0; while( bitmap_caret < image_block_size ) { if ((decompression_buffer[buffer_caret] & TGA_RLE_FLAG) == TGA_RLE_FLAG) { encoded_pixels = ( ( decompression_buffer[ buffer_caret ] & ~TGA_RLE_FLAG ) + 1 ); buffer_caret++; if ((bitmap_caret + (encoded_pixels * pixel_block_size)) > image_block_size || buffer_caret + pixel_block_size > rle_size) { gdFree( decompression_buffer ); gdFree( conversion_buffer ); return -1; } for (i = 0; i < encoded_pixels; i++) { memcpy(tga->bitmap + bitmap_caret, decompression_buffer + buffer_caret, pixel_block_size * sizeof(int)); bitmap_caret += pixel_block_size; } buffer_caret += pixel_block_size; } else { encoded_pixels = decompression_buffer[ buffer_caret ] + 1; buffer_caret++; if ((bitmap_caret + (encoded_pixels * pixel_block_size)) > image_block_size || buffer_caret + (encoded_pixels * pixel_block_size) > rle_size) { gdFree( decompression_buffer ); gdFree( conversion_buffer ); return -1; } memcpy(tga->bitmap + bitmap_caret, decompression_buffer + buffer_caret, encoded_pixels * pixel_block_size * sizeof(int)); bitmap_caret += (encoded_pixels * pixel_block_size); buffer_caret += (encoded_pixels * pixel_block_size); } } gdFree( decompression_buffer ); gdFree( conversion_buffer ); break; } return 1; }
int read_image_tga( gdIOCtx *ctx, oTga *tga ) { int pixel_block_size = (tga->bits / 8); int image_block_size = (tga->width * tga->height) * pixel_block_size; int* decompression_buffer = NULL; unsigned char* conversion_buffer = NULL; int buffer_caret = 0; int bitmap_caret = 0; int i = 0; int encoded_pixels; int rle_size; if(overflow2(tga->width, tga->height)) { return -1; } if(overflow2(tga->width * tga->height, pixel_block_size)) { return -1; } if(overflow2(image_block_size, sizeof(int))) { return -1; } /*! \todo Add more image type support. */ if (tga->imagetype != TGA_TYPE_RGB && tga->imagetype != TGA_TYPE_RGB_RLE) return -1; /*! \brief Allocate memmory for image block * Allocate a chunk of memory for the image block to be passed into. */ tga->bitmap = (int *) gdMalloc(image_block_size * sizeof(int)); if (tga->bitmap == NULL) return -1; switch (tga->imagetype) { case TGA_TYPE_RGB: /*! \brief Read in uncompressed RGB TGA * Chunk load the pixel data from an uncompressed RGB type TGA. */ conversion_buffer = (unsigned char *) gdMalloc(image_block_size * sizeof(unsigned char)); if (conversion_buffer == NULL) { return -1; } if (gdGetBuf(conversion_buffer, image_block_size, ctx) != image_block_size) { gd_error("gd-tga: premature end of image data\n"); gdFree(conversion_buffer); return -1; } while (buffer_caret < image_block_size) { tga->bitmap[buffer_caret] = (int) conversion_buffer[buffer_caret]; buffer_caret++; } gdFree(conversion_buffer); break; case TGA_TYPE_RGB_RLE: /*! \brief Read in RLE compressed RGB TGA * Chunk load the pixel data from an RLE compressed RGB type TGA. */ decompression_buffer = (int*) gdMalloc(image_block_size * sizeof(int)); if (decompression_buffer == NULL) { return -1; } conversion_buffer = (unsigned char *) gdMalloc(image_block_size * sizeof(unsigned char)); if (conversion_buffer == NULL) { gd_error("gd-tga: premature end of image data\n"); gdFree( decompression_buffer ); return -1; } rle_size = gdGetBuf(conversion_buffer, image_block_size, ctx); if (rle_size <= 0) { gdFree(conversion_buffer); gdFree(decompression_buffer); return -1; } buffer_caret = 0; while( buffer_caret < rle_size) { decompression_buffer[buffer_caret] = (int)conversion_buffer[buffer_caret]; buffer_caret++; } buffer_caret = 0; while( bitmap_caret < image_block_size ) { if (buffer_caret + pixel_block_size > rle_size) { gdFree( decompression_buffer ); gdFree( conversion_buffer ); return -1; } if ((decompression_buffer[buffer_caret] & TGA_RLE_FLAG) == TGA_RLE_FLAG) { encoded_pixels = ( ( decompression_buffer[ buffer_caret ] & ~TGA_RLE_FLAG ) + 1 ); buffer_caret++; if ((bitmap_caret + (encoded_pixels * pixel_block_size)) > image_block_size || buffer_caret + pixel_block_size > rle_size) { gdFree( decompression_buffer ); gdFree( conversion_buffer ); return -1; } for (i = 0; i < encoded_pixels; i++) { memcpy(tga->bitmap + bitmap_caret, decompression_buffer + buffer_caret, pixel_block_size * sizeof(int)); bitmap_caret += pixel_block_size; } buffer_caret += pixel_block_size; } else { encoded_pixels = decompression_buffer[ buffer_caret ] + 1; buffer_caret++; if ((bitmap_caret + (encoded_pixels * pixel_block_size)) > image_block_size || buffer_caret + (encoded_pixels * pixel_block_size) > rle_size) { gdFree( decompression_buffer ); gdFree( conversion_buffer ); return -1; } memcpy(tga->bitmap + bitmap_caret, decompression_buffer + buffer_caret, encoded_pixels * pixel_block_size * sizeof(int)); bitmap_caret += (encoded_pixels * pixel_block_size); buffer_caret += (encoded_pixels * pixel_block_size); } } gdFree( decompression_buffer ); gdFree( conversion_buffer ); break; } return 1; }
{'added': [(298, ''), (299, '\t\t\tif (buffer_caret + pixel_block_size > rle_size) {'), (300, '\t\t\t\tgdFree( decompression_buffer );'), (301, '\t\t\t\tgdFree( conversion_buffer );'), (302, '\t\t\t\treturn -1;'), (303, '\t\t\t}'), (304, '')], 'deleted': [(298, '')]}
7
1
237
1,649
https://github.com/libgd/libgd
CVE-2016-6906
['CWE-125']
l2tp_ip.c
l2tp_ip_recvmsg
/* * L2TPv3 IP encapsulation support * * Copyright (c) 2008,2009,2010 Katalix Systems Ltd * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/icmp.h> #include <linux/module.h> #include <linux/skbuff.h> #include <linux/random.h> #include <linux/socket.h> #include <linux/l2tp.h> #include <linux/in.h> #include <net/sock.h> #include <net/ip.h> #include <net/icmp.h> #include <net/udp.h> #include <net/inet_common.h> #include <net/inet_hashtables.h> #include <net/tcp_states.h> #include <net/protocol.h> #include <net/xfrm.h> #include "l2tp_core.h" struct l2tp_ip_sock { /* inet_sock has to be the first member of l2tp_ip_sock */ struct inet_sock inet; u32 conn_id; u32 peer_conn_id; }; static DEFINE_RWLOCK(l2tp_ip_lock); static struct hlist_head l2tp_ip_table; static struct hlist_head l2tp_ip_bind_table; static inline struct l2tp_ip_sock *l2tp_ip_sk(const struct sock *sk) { return (struct l2tp_ip_sock *)sk; } static struct sock *__l2tp_ip_bind_lookup(struct net *net, __be32 laddr, int dif, u32 tunnel_id) { struct sock *sk; sk_for_each_bound(sk, &l2tp_ip_bind_table) { struct inet_sock *inet = inet_sk(sk); struct l2tp_ip_sock *l2tp = l2tp_ip_sk(sk); if (l2tp == NULL) continue; if ((l2tp->conn_id == tunnel_id) && net_eq(sock_net(sk), net) && !(inet->inet_rcv_saddr && inet->inet_rcv_saddr != laddr) && !(sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)) goto found; } sk = NULL; found: return sk; } static inline struct sock *l2tp_ip_bind_lookup(struct net *net, __be32 laddr, int dif, u32 tunnel_id) { struct sock *sk = __l2tp_ip_bind_lookup(net, laddr, dif, tunnel_id); if (sk) sock_hold(sk); return sk; } /* When processing receive frames, there are two cases to * consider. Data frames consist of a non-zero session-id and an * optional cookie. Control frames consist of a regular L2TP header * preceded by 32-bits of zeros. * * L2TPv3 Session Header Over IP * * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Session ID | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Cookie (optional, maximum 64 bits)... * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * L2TPv3 Control Message Header Over IP * * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | (32 bits of zeros) | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * |T|L|x|x|S|x|x|x|x|x|x|x| Ver | Length | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Control Connection ID | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Ns | Nr | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * All control frames are passed to userspace. */ static int l2tp_ip_recv(struct sk_buff *skb) { struct net *net = dev_net(skb->dev); struct sock *sk; u32 session_id; u32 tunnel_id; unsigned char *ptr, *optr; struct l2tp_session *session; struct l2tp_tunnel *tunnel = NULL; int length; /* Point to L2TP header */ optr = ptr = skb->data; if (!pskb_may_pull(skb, 4)) goto discard; session_id = ntohl(*((__be32 *) ptr)); ptr += 4; /* RFC3931: L2TP/IP packets have the first 4 bytes containing * the session_id. If it is 0, the packet is a L2TP control * frame and the session_id value can be discarded. */ if (session_id == 0) { __skb_pull(skb, 4); goto pass_up; } /* Ok, this is a data packet. Lookup the session. */ session = l2tp_session_find(net, NULL, session_id); if (session == NULL) goto discard; tunnel = session->tunnel; if (tunnel == NULL) goto discard; /* Trace packet contents, if enabled */ if (tunnel->debug & L2TP_MSG_DATA) { length = min(32u, skb->len); if (!pskb_may_pull(skb, length)) goto discard; pr_debug("%s: ip recv\n", tunnel->name); print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length); } l2tp_recv_common(session, skb, ptr, optr, 0, skb->len, tunnel->recv_payload_hook); return 0; pass_up: /* Get the tunnel_id from the L2TP header */ if (!pskb_may_pull(skb, 12)) goto discard; if ((skb->data[0] & 0xc0) != 0xc0) goto discard; tunnel_id = ntohl(*(__be32 *) &skb->data[4]); tunnel = l2tp_tunnel_find(net, tunnel_id); if (tunnel != NULL) sk = tunnel->sock; else { struct iphdr *iph = (struct iphdr *) skb_network_header(skb); read_lock_bh(&l2tp_ip_lock); sk = __l2tp_ip_bind_lookup(net, iph->daddr, 0, tunnel_id); read_unlock_bh(&l2tp_ip_lock); } if (sk == NULL) goto discard; sock_hold(sk); if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) goto discard_put; nf_reset(skb); return sk_receive_skb(sk, skb, 1); discard_put: sock_put(sk); discard: kfree_skb(skb); return 0; } static int l2tp_ip_open(struct sock *sk) { /* Prevent autobind. We don't have ports. */ inet_sk(sk)->inet_num = IPPROTO_L2TP; write_lock_bh(&l2tp_ip_lock); sk_add_node(sk, &l2tp_ip_table); write_unlock_bh(&l2tp_ip_lock); return 0; } static void l2tp_ip_close(struct sock *sk, long timeout) { write_lock_bh(&l2tp_ip_lock); hlist_del_init(&sk->sk_bind_node); sk_del_node_init(sk); write_unlock_bh(&l2tp_ip_lock); sk_common_release(sk); } static void l2tp_ip_destroy_sock(struct sock *sk) { struct sk_buff *skb; struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk); while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) kfree_skb(skb); if (tunnel) { l2tp_tunnel_closeall(tunnel); sock_put(sk); } sk_refcnt_debug_dec(sk); } static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) { struct inet_sock *inet = inet_sk(sk); struct sockaddr_l2tpip *addr = (struct sockaddr_l2tpip *) uaddr; struct net *net = sock_net(sk); int ret; int chk_addr_ret; if (!sock_flag(sk, SOCK_ZAPPED)) return -EINVAL; if (addr_len < sizeof(struct sockaddr_l2tpip)) return -EINVAL; if (addr->l2tp_family != AF_INET) return -EINVAL; ret = -EADDRINUSE; read_lock_bh(&l2tp_ip_lock); if (__l2tp_ip_bind_lookup(net, addr->l2tp_addr.s_addr, sk->sk_bound_dev_if, addr->l2tp_conn_id)) goto out_in_use; read_unlock_bh(&l2tp_ip_lock); lock_sock(sk); if (sk->sk_state != TCP_CLOSE || addr_len < sizeof(struct sockaddr_l2tpip)) goto out; chk_addr_ret = inet_addr_type(net, addr->l2tp_addr.s_addr); ret = -EADDRNOTAVAIL; if (addr->l2tp_addr.s_addr && chk_addr_ret != RTN_LOCAL && chk_addr_ret != RTN_MULTICAST && chk_addr_ret != RTN_BROADCAST) goto out; if (addr->l2tp_addr.s_addr) inet->inet_rcv_saddr = inet->inet_saddr = addr->l2tp_addr.s_addr; if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST) inet->inet_saddr = 0; /* Use device */ sk_dst_reset(sk); l2tp_ip_sk(sk)->conn_id = addr->l2tp_conn_id; write_lock_bh(&l2tp_ip_lock); sk_add_bind_node(sk, &l2tp_ip_bind_table); sk_del_node_init(sk); write_unlock_bh(&l2tp_ip_lock); ret = 0; sock_reset_flag(sk, SOCK_ZAPPED); out: release_sock(sk); return ret; out_in_use: read_unlock_bh(&l2tp_ip_lock); return ret; } static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) { struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *) uaddr; int rc; if (sock_flag(sk, SOCK_ZAPPED)) /* Must bind first - autobinding does not work */ return -EINVAL; if (addr_len < sizeof(*lsa)) return -EINVAL; if (ipv4_is_multicast(lsa->l2tp_addr.s_addr)) return -EINVAL; rc = ip4_datagram_connect(sk, uaddr, addr_len); if (rc < 0) return rc; lock_sock(sk); l2tp_ip_sk(sk)->peer_conn_id = lsa->l2tp_conn_id; write_lock_bh(&l2tp_ip_lock); hlist_del_init(&sk->sk_bind_node); sk_add_bind_node(sk, &l2tp_ip_bind_table); write_unlock_bh(&l2tp_ip_lock); release_sock(sk); return rc; } static int l2tp_ip_disconnect(struct sock *sk, int flags) { if (sock_flag(sk, SOCK_ZAPPED)) return 0; return udp_disconnect(sk, flags); } static int l2tp_ip_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer) { struct sock *sk = sock->sk; struct inet_sock *inet = inet_sk(sk); struct l2tp_ip_sock *lsk = l2tp_ip_sk(sk); struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *)uaddr; memset(lsa, 0, sizeof(*lsa)); lsa->l2tp_family = AF_INET; if (peer) { if (!inet->inet_dport) return -ENOTCONN; lsa->l2tp_conn_id = lsk->peer_conn_id; lsa->l2tp_addr.s_addr = inet->inet_daddr; } else { __be32 addr = inet->inet_rcv_saddr; if (!addr) addr = inet->inet_saddr; lsa->l2tp_conn_id = lsk->conn_id; lsa->l2tp_addr.s_addr = addr; } *uaddr_len = sizeof(*lsa); return 0; } static int l2tp_ip_backlog_recv(struct sock *sk, struct sk_buff *skb) { int rc; /* Charge it to the socket, dropping if the queue is full. */ rc = sock_queue_rcv_skb(sk, skb); if (rc < 0) goto drop; return 0; drop: IP_INC_STATS(sock_net(sk), IPSTATS_MIB_INDISCARDS); kfree_skb(skb); return -1; } /* Userspace will call sendmsg() on the tunnel socket to send L2TP * control frames. */ static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len) { struct sk_buff *skb; int rc; struct inet_sock *inet = inet_sk(sk); struct rtable *rt = NULL; struct flowi4 *fl4; int connected = 0; __be32 daddr; lock_sock(sk); rc = -ENOTCONN; if (sock_flag(sk, SOCK_DEAD)) goto out; /* Get and verify the address. */ if (msg->msg_name) { struct sockaddr_l2tpip *lip = (struct sockaddr_l2tpip *) msg->msg_name; rc = -EINVAL; if (msg->msg_namelen < sizeof(*lip)) goto out; if (lip->l2tp_family != AF_INET) { rc = -EAFNOSUPPORT; if (lip->l2tp_family != AF_UNSPEC) goto out; } daddr = lip->l2tp_addr.s_addr; } else { rc = -EDESTADDRREQ; if (sk->sk_state != TCP_ESTABLISHED) goto out; daddr = inet->inet_daddr; connected = 1; } /* Allocate a socket buffer */ rc = -ENOMEM; skb = sock_wmalloc(sk, 2 + NET_SKB_PAD + sizeof(struct iphdr) + 4 + len, 0, GFP_KERNEL); if (!skb) goto error; /* Reserve space for headers, putting IP header on 4-byte boundary. */ skb_reserve(skb, 2 + NET_SKB_PAD); skb_reset_network_header(skb); skb_reserve(skb, sizeof(struct iphdr)); skb_reset_transport_header(skb); /* Insert 0 session_id */ *((__be32 *) skb_put(skb, 4)) = 0; /* Copy user data into skb */ rc = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len); if (rc < 0) { kfree_skb(skb); goto error; } fl4 = &inet->cork.fl.u.ip4; if (connected) rt = (struct rtable *) __sk_dst_check(sk, 0); rcu_read_lock(); if (rt == NULL) { const struct ip_options_rcu *inet_opt; inet_opt = rcu_dereference(inet->inet_opt); /* Use correct destination address if we have options. */ if (inet_opt && inet_opt->opt.srr) daddr = inet_opt->opt.faddr; /* If this fails, retransmit mechanism of transport layer will * keep trying until route appears or the connection times * itself out. */ rt = ip_route_output_ports(sock_net(sk), fl4, sk, daddr, inet->inet_saddr, inet->inet_dport, inet->inet_sport, sk->sk_protocol, RT_CONN_FLAGS(sk), sk->sk_bound_dev_if); if (IS_ERR(rt)) goto no_route; if (connected) { sk_setup_caps(sk, &rt->dst); } else { skb_dst_set(skb, &rt->dst); goto xmit; } } /* We dont need to clone dst here, it is guaranteed to not disappear. * __dev_xmit_skb() might force a refcount if needed. */ skb_dst_set_noref(skb, &rt->dst); xmit: /* Queue the packet to IP for output */ rc = ip_queue_xmit(skb, &inet->cork.fl); rcu_read_unlock(); error: if (rc >= 0) rc = len; out: release_sock(sk); return rc; no_route: rcu_read_unlock(); IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES); kfree_skb(skb); rc = -EHOSTUNREACH; goto out; } static int l2tp_ip_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len, int noblock, int flags, int *addr_len) { struct inet_sock *inet = inet_sk(sk); size_t copied = 0; int err = -EOPNOTSUPP; struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name; struct sk_buff *skb; if (flags & MSG_OOB) goto out; if (addr_len) *addr_len = sizeof(*sin); skb = skb_recv_datagram(sk, flags, noblock, &err); if (!skb) goto out; copied = skb->len; if (len < copied) { msg->msg_flags |= MSG_TRUNC; copied = len; } err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); if (err) goto done; sock_recv_timestamp(msg, sk, skb); /* Copy the address. */ if (sin) { sin->sin_family = AF_INET; sin->sin_addr.s_addr = ip_hdr(skb)->saddr; sin->sin_port = 0; memset(&sin->sin_zero, 0, sizeof(sin->sin_zero)); } if (inet->cmsg_flags) ip_cmsg_recv(msg, skb); if (flags & MSG_TRUNC) copied = skb->len; done: skb_free_datagram(sk, skb); out: return err ? err : copied; } static struct proto l2tp_ip_prot = { .name = "L2TP/IP", .owner = THIS_MODULE, .init = l2tp_ip_open, .close = l2tp_ip_close, .bind = l2tp_ip_bind, .connect = l2tp_ip_connect, .disconnect = l2tp_ip_disconnect, .ioctl = udp_ioctl, .destroy = l2tp_ip_destroy_sock, .setsockopt = ip_setsockopt, .getsockopt = ip_getsockopt, .sendmsg = l2tp_ip_sendmsg, .recvmsg = l2tp_ip_recvmsg, .backlog_rcv = l2tp_ip_backlog_recv, .hash = inet_hash, .unhash = inet_unhash, .obj_size = sizeof(struct l2tp_ip_sock), #ifdef CONFIG_COMPAT .compat_setsockopt = compat_ip_setsockopt, .compat_getsockopt = compat_ip_getsockopt, #endif }; static const struct proto_ops l2tp_ip_ops = { .family = PF_INET, .owner = THIS_MODULE, .release = inet_release, .bind = inet_bind, .connect = inet_dgram_connect, .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = l2tp_ip_getname, .poll = datagram_poll, .ioctl = inet_ioctl, .listen = sock_no_listen, .shutdown = inet_shutdown, .setsockopt = sock_common_setsockopt, .getsockopt = sock_common_getsockopt, .sendmsg = inet_sendmsg, .recvmsg = sock_common_recvmsg, .mmap = sock_no_mmap, .sendpage = sock_no_sendpage, #ifdef CONFIG_COMPAT .compat_setsockopt = compat_sock_common_setsockopt, .compat_getsockopt = compat_sock_common_getsockopt, #endif }; static struct inet_protosw l2tp_ip_protosw = { .type = SOCK_DGRAM, .protocol = IPPROTO_L2TP, .prot = &l2tp_ip_prot, .ops = &l2tp_ip_ops, .no_check = 0, }; static struct net_protocol l2tp_ip_protocol __read_mostly = { .handler = l2tp_ip_recv, .netns_ok = 1, }; static int __init l2tp_ip_init(void) { int err; pr_info("L2TP IP encapsulation support (L2TPv3)\n"); err = proto_register(&l2tp_ip_prot, 1); if (err != 0) goto out; err = inet_add_protocol(&l2tp_ip_protocol, IPPROTO_L2TP); if (err) goto out1; inet_register_protosw(&l2tp_ip_protosw); return 0; out1: proto_unregister(&l2tp_ip_prot); out: return err; } static void __exit l2tp_ip_exit(void) { inet_unregister_protosw(&l2tp_ip_protosw); inet_del_protocol(&l2tp_ip_protocol, IPPROTO_L2TP); proto_unregister(&l2tp_ip_prot); } module_init(l2tp_ip_init); module_exit(l2tp_ip_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("James Chapman <jchapman@katalix.com>"); MODULE_DESCRIPTION("L2TP over IP"); MODULE_VERSION("1.0"); /* Use the value of SOCK_DGRAM (2) directory, because __stringify doesn't like * enums */ MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET, 2, IPPROTO_L2TP);
/* * L2TPv3 IP encapsulation support * * Copyright (c) 2008,2009,2010 Katalix Systems Ltd * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/icmp.h> #include <linux/module.h> #include <linux/skbuff.h> #include <linux/random.h> #include <linux/socket.h> #include <linux/l2tp.h> #include <linux/in.h> #include <net/sock.h> #include <net/ip.h> #include <net/icmp.h> #include <net/udp.h> #include <net/inet_common.h> #include <net/inet_hashtables.h> #include <net/tcp_states.h> #include <net/protocol.h> #include <net/xfrm.h> #include "l2tp_core.h" struct l2tp_ip_sock { /* inet_sock has to be the first member of l2tp_ip_sock */ struct inet_sock inet; u32 conn_id; u32 peer_conn_id; }; static DEFINE_RWLOCK(l2tp_ip_lock); static struct hlist_head l2tp_ip_table; static struct hlist_head l2tp_ip_bind_table; static inline struct l2tp_ip_sock *l2tp_ip_sk(const struct sock *sk) { return (struct l2tp_ip_sock *)sk; } static struct sock *__l2tp_ip_bind_lookup(struct net *net, __be32 laddr, int dif, u32 tunnel_id) { struct sock *sk; sk_for_each_bound(sk, &l2tp_ip_bind_table) { struct inet_sock *inet = inet_sk(sk); struct l2tp_ip_sock *l2tp = l2tp_ip_sk(sk); if (l2tp == NULL) continue; if ((l2tp->conn_id == tunnel_id) && net_eq(sock_net(sk), net) && !(inet->inet_rcv_saddr && inet->inet_rcv_saddr != laddr) && !(sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)) goto found; } sk = NULL; found: return sk; } static inline struct sock *l2tp_ip_bind_lookup(struct net *net, __be32 laddr, int dif, u32 tunnel_id) { struct sock *sk = __l2tp_ip_bind_lookup(net, laddr, dif, tunnel_id); if (sk) sock_hold(sk); return sk; } /* When processing receive frames, there are two cases to * consider. Data frames consist of a non-zero session-id and an * optional cookie. Control frames consist of a regular L2TP header * preceded by 32-bits of zeros. * * L2TPv3 Session Header Over IP * * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Session ID | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Cookie (optional, maximum 64 bits)... * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * L2TPv3 Control Message Header Over IP * * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | (32 bits of zeros) | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * |T|L|x|x|S|x|x|x|x|x|x|x| Ver | Length | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Control Connection ID | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Ns | Nr | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * All control frames are passed to userspace. */ static int l2tp_ip_recv(struct sk_buff *skb) { struct net *net = dev_net(skb->dev); struct sock *sk; u32 session_id; u32 tunnel_id; unsigned char *ptr, *optr; struct l2tp_session *session; struct l2tp_tunnel *tunnel = NULL; int length; /* Point to L2TP header */ optr = ptr = skb->data; if (!pskb_may_pull(skb, 4)) goto discard; session_id = ntohl(*((__be32 *) ptr)); ptr += 4; /* RFC3931: L2TP/IP packets have the first 4 bytes containing * the session_id. If it is 0, the packet is a L2TP control * frame and the session_id value can be discarded. */ if (session_id == 0) { __skb_pull(skb, 4); goto pass_up; } /* Ok, this is a data packet. Lookup the session. */ session = l2tp_session_find(net, NULL, session_id); if (session == NULL) goto discard; tunnel = session->tunnel; if (tunnel == NULL) goto discard; /* Trace packet contents, if enabled */ if (tunnel->debug & L2TP_MSG_DATA) { length = min(32u, skb->len); if (!pskb_may_pull(skb, length)) goto discard; pr_debug("%s: ip recv\n", tunnel->name); print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length); } l2tp_recv_common(session, skb, ptr, optr, 0, skb->len, tunnel->recv_payload_hook); return 0; pass_up: /* Get the tunnel_id from the L2TP header */ if (!pskb_may_pull(skb, 12)) goto discard; if ((skb->data[0] & 0xc0) != 0xc0) goto discard; tunnel_id = ntohl(*(__be32 *) &skb->data[4]); tunnel = l2tp_tunnel_find(net, tunnel_id); if (tunnel != NULL) sk = tunnel->sock; else { struct iphdr *iph = (struct iphdr *) skb_network_header(skb); read_lock_bh(&l2tp_ip_lock); sk = __l2tp_ip_bind_lookup(net, iph->daddr, 0, tunnel_id); read_unlock_bh(&l2tp_ip_lock); } if (sk == NULL) goto discard; sock_hold(sk); if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) goto discard_put; nf_reset(skb); return sk_receive_skb(sk, skb, 1); discard_put: sock_put(sk); discard: kfree_skb(skb); return 0; } static int l2tp_ip_open(struct sock *sk) { /* Prevent autobind. We don't have ports. */ inet_sk(sk)->inet_num = IPPROTO_L2TP; write_lock_bh(&l2tp_ip_lock); sk_add_node(sk, &l2tp_ip_table); write_unlock_bh(&l2tp_ip_lock); return 0; } static void l2tp_ip_close(struct sock *sk, long timeout) { write_lock_bh(&l2tp_ip_lock); hlist_del_init(&sk->sk_bind_node); sk_del_node_init(sk); write_unlock_bh(&l2tp_ip_lock); sk_common_release(sk); } static void l2tp_ip_destroy_sock(struct sock *sk) { struct sk_buff *skb; struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk); while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) kfree_skb(skb); if (tunnel) { l2tp_tunnel_closeall(tunnel); sock_put(sk); } sk_refcnt_debug_dec(sk); } static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) { struct inet_sock *inet = inet_sk(sk); struct sockaddr_l2tpip *addr = (struct sockaddr_l2tpip *) uaddr; struct net *net = sock_net(sk); int ret; int chk_addr_ret; if (!sock_flag(sk, SOCK_ZAPPED)) return -EINVAL; if (addr_len < sizeof(struct sockaddr_l2tpip)) return -EINVAL; if (addr->l2tp_family != AF_INET) return -EINVAL; ret = -EADDRINUSE; read_lock_bh(&l2tp_ip_lock); if (__l2tp_ip_bind_lookup(net, addr->l2tp_addr.s_addr, sk->sk_bound_dev_if, addr->l2tp_conn_id)) goto out_in_use; read_unlock_bh(&l2tp_ip_lock); lock_sock(sk); if (sk->sk_state != TCP_CLOSE || addr_len < sizeof(struct sockaddr_l2tpip)) goto out; chk_addr_ret = inet_addr_type(net, addr->l2tp_addr.s_addr); ret = -EADDRNOTAVAIL; if (addr->l2tp_addr.s_addr && chk_addr_ret != RTN_LOCAL && chk_addr_ret != RTN_MULTICAST && chk_addr_ret != RTN_BROADCAST) goto out; if (addr->l2tp_addr.s_addr) inet->inet_rcv_saddr = inet->inet_saddr = addr->l2tp_addr.s_addr; if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST) inet->inet_saddr = 0; /* Use device */ sk_dst_reset(sk); l2tp_ip_sk(sk)->conn_id = addr->l2tp_conn_id; write_lock_bh(&l2tp_ip_lock); sk_add_bind_node(sk, &l2tp_ip_bind_table); sk_del_node_init(sk); write_unlock_bh(&l2tp_ip_lock); ret = 0; sock_reset_flag(sk, SOCK_ZAPPED); out: release_sock(sk); return ret; out_in_use: read_unlock_bh(&l2tp_ip_lock); return ret; } static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) { struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *) uaddr; int rc; if (sock_flag(sk, SOCK_ZAPPED)) /* Must bind first - autobinding does not work */ return -EINVAL; if (addr_len < sizeof(*lsa)) return -EINVAL; if (ipv4_is_multicast(lsa->l2tp_addr.s_addr)) return -EINVAL; rc = ip4_datagram_connect(sk, uaddr, addr_len); if (rc < 0) return rc; lock_sock(sk); l2tp_ip_sk(sk)->peer_conn_id = lsa->l2tp_conn_id; write_lock_bh(&l2tp_ip_lock); hlist_del_init(&sk->sk_bind_node); sk_add_bind_node(sk, &l2tp_ip_bind_table); write_unlock_bh(&l2tp_ip_lock); release_sock(sk); return rc; } static int l2tp_ip_disconnect(struct sock *sk, int flags) { if (sock_flag(sk, SOCK_ZAPPED)) return 0; return udp_disconnect(sk, flags); } static int l2tp_ip_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer) { struct sock *sk = sock->sk; struct inet_sock *inet = inet_sk(sk); struct l2tp_ip_sock *lsk = l2tp_ip_sk(sk); struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *)uaddr; memset(lsa, 0, sizeof(*lsa)); lsa->l2tp_family = AF_INET; if (peer) { if (!inet->inet_dport) return -ENOTCONN; lsa->l2tp_conn_id = lsk->peer_conn_id; lsa->l2tp_addr.s_addr = inet->inet_daddr; } else { __be32 addr = inet->inet_rcv_saddr; if (!addr) addr = inet->inet_saddr; lsa->l2tp_conn_id = lsk->conn_id; lsa->l2tp_addr.s_addr = addr; } *uaddr_len = sizeof(*lsa); return 0; } static int l2tp_ip_backlog_recv(struct sock *sk, struct sk_buff *skb) { int rc; /* Charge it to the socket, dropping if the queue is full. */ rc = sock_queue_rcv_skb(sk, skb); if (rc < 0) goto drop; return 0; drop: IP_INC_STATS(sock_net(sk), IPSTATS_MIB_INDISCARDS); kfree_skb(skb); return -1; } /* Userspace will call sendmsg() on the tunnel socket to send L2TP * control frames. */ static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len) { struct sk_buff *skb; int rc; struct inet_sock *inet = inet_sk(sk); struct rtable *rt = NULL; struct flowi4 *fl4; int connected = 0; __be32 daddr; lock_sock(sk); rc = -ENOTCONN; if (sock_flag(sk, SOCK_DEAD)) goto out; /* Get and verify the address. */ if (msg->msg_name) { struct sockaddr_l2tpip *lip = (struct sockaddr_l2tpip *) msg->msg_name; rc = -EINVAL; if (msg->msg_namelen < sizeof(*lip)) goto out; if (lip->l2tp_family != AF_INET) { rc = -EAFNOSUPPORT; if (lip->l2tp_family != AF_UNSPEC) goto out; } daddr = lip->l2tp_addr.s_addr; } else { rc = -EDESTADDRREQ; if (sk->sk_state != TCP_ESTABLISHED) goto out; daddr = inet->inet_daddr; connected = 1; } /* Allocate a socket buffer */ rc = -ENOMEM; skb = sock_wmalloc(sk, 2 + NET_SKB_PAD + sizeof(struct iphdr) + 4 + len, 0, GFP_KERNEL); if (!skb) goto error; /* Reserve space for headers, putting IP header on 4-byte boundary. */ skb_reserve(skb, 2 + NET_SKB_PAD); skb_reset_network_header(skb); skb_reserve(skb, sizeof(struct iphdr)); skb_reset_transport_header(skb); /* Insert 0 session_id */ *((__be32 *) skb_put(skb, 4)) = 0; /* Copy user data into skb */ rc = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len); if (rc < 0) { kfree_skb(skb); goto error; } fl4 = &inet->cork.fl.u.ip4; if (connected) rt = (struct rtable *) __sk_dst_check(sk, 0); rcu_read_lock(); if (rt == NULL) { const struct ip_options_rcu *inet_opt; inet_opt = rcu_dereference(inet->inet_opt); /* Use correct destination address if we have options. */ if (inet_opt && inet_opt->opt.srr) daddr = inet_opt->opt.faddr; /* If this fails, retransmit mechanism of transport layer will * keep trying until route appears or the connection times * itself out. */ rt = ip_route_output_ports(sock_net(sk), fl4, sk, daddr, inet->inet_saddr, inet->inet_dport, inet->inet_sport, sk->sk_protocol, RT_CONN_FLAGS(sk), sk->sk_bound_dev_if); if (IS_ERR(rt)) goto no_route; if (connected) { sk_setup_caps(sk, &rt->dst); } else { skb_dst_set(skb, &rt->dst); goto xmit; } } /* We dont need to clone dst here, it is guaranteed to not disappear. * __dev_xmit_skb() might force a refcount if needed. */ skb_dst_set_noref(skb, &rt->dst); xmit: /* Queue the packet to IP for output */ rc = ip_queue_xmit(skb, &inet->cork.fl); rcu_read_unlock(); error: if (rc >= 0) rc = len; out: release_sock(sk); return rc; no_route: rcu_read_unlock(); IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES); kfree_skb(skb); rc = -EHOSTUNREACH; goto out; } static int l2tp_ip_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len, int noblock, int flags, int *addr_len) { struct inet_sock *inet = inet_sk(sk); size_t copied = 0; int err = -EOPNOTSUPP; struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name; struct sk_buff *skb; if (flags & MSG_OOB) goto out; skb = skb_recv_datagram(sk, flags, noblock, &err); if (!skb) goto out; copied = skb->len; if (len < copied) { msg->msg_flags |= MSG_TRUNC; copied = len; } err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); if (err) goto done; sock_recv_timestamp(msg, sk, skb); /* Copy the address. */ if (sin) { sin->sin_family = AF_INET; sin->sin_addr.s_addr = ip_hdr(skb)->saddr; sin->sin_port = 0; memset(&sin->sin_zero, 0, sizeof(sin->sin_zero)); *addr_len = sizeof(*sin); } if (inet->cmsg_flags) ip_cmsg_recv(msg, skb); if (flags & MSG_TRUNC) copied = skb->len; done: skb_free_datagram(sk, skb); out: return err ? err : copied; } static struct proto l2tp_ip_prot = { .name = "L2TP/IP", .owner = THIS_MODULE, .init = l2tp_ip_open, .close = l2tp_ip_close, .bind = l2tp_ip_bind, .connect = l2tp_ip_connect, .disconnect = l2tp_ip_disconnect, .ioctl = udp_ioctl, .destroy = l2tp_ip_destroy_sock, .setsockopt = ip_setsockopt, .getsockopt = ip_getsockopt, .sendmsg = l2tp_ip_sendmsg, .recvmsg = l2tp_ip_recvmsg, .backlog_rcv = l2tp_ip_backlog_recv, .hash = inet_hash, .unhash = inet_unhash, .obj_size = sizeof(struct l2tp_ip_sock), #ifdef CONFIG_COMPAT .compat_setsockopt = compat_ip_setsockopt, .compat_getsockopt = compat_ip_getsockopt, #endif }; static const struct proto_ops l2tp_ip_ops = { .family = PF_INET, .owner = THIS_MODULE, .release = inet_release, .bind = inet_bind, .connect = inet_dgram_connect, .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = l2tp_ip_getname, .poll = datagram_poll, .ioctl = inet_ioctl, .listen = sock_no_listen, .shutdown = inet_shutdown, .setsockopt = sock_common_setsockopt, .getsockopt = sock_common_getsockopt, .sendmsg = inet_sendmsg, .recvmsg = sock_common_recvmsg, .mmap = sock_no_mmap, .sendpage = sock_no_sendpage, #ifdef CONFIG_COMPAT .compat_setsockopt = compat_sock_common_setsockopt, .compat_getsockopt = compat_sock_common_getsockopt, #endif }; static struct inet_protosw l2tp_ip_protosw = { .type = SOCK_DGRAM, .protocol = IPPROTO_L2TP, .prot = &l2tp_ip_prot, .ops = &l2tp_ip_ops, .no_check = 0, }; static struct net_protocol l2tp_ip_protocol __read_mostly = { .handler = l2tp_ip_recv, .netns_ok = 1, }; static int __init l2tp_ip_init(void) { int err; pr_info("L2TP IP encapsulation support (L2TPv3)\n"); err = proto_register(&l2tp_ip_prot, 1); if (err != 0) goto out; err = inet_add_protocol(&l2tp_ip_protocol, IPPROTO_L2TP); if (err) goto out1; inet_register_protosw(&l2tp_ip_protosw); return 0; out1: proto_unregister(&l2tp_ip_prot); out: return err; } static void __exit l2tp_ip_exit(void) { inet_unregister_protosw(&l2tp_ip_protosw); inet_del_protocol(&l2tp_ip_protocol, IPPROTO_L2TP); proto_unregister(&l2tp_ip_prot); } module_init(l2tp_ip_init); module_exit(l2tp_ip_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("James Chapman <jchapman@katalix.com>"); MODULE_DESCRIPTION("L2TP over IP"); MODULE_VERSION("1.0"); /* Use the value of SOCK_DGRAM (2) directory, because __stringify doesn't like * enums */ MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET, 2, IPPROTO_L2TP);
static int l2tp_ip_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len, int noblock, int flags, int *addr_len) { struct inet_sock *inet = inet_sk(sk); size_t copied = 0; int err = -EOPNOTSUPP; struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name; struct sk_buff *skb; if (flags & MSG_OOB) goto out; if (addr_len) *addr_len = sizeof(*sin); skb = skb_recv_datagram(sk, flags, noblock, &err); if (!skb) goto out; copied = skb->len; if (len < copied) { msg->msg_flags |= MSG_TRUNC; copied = len; } err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); if (err) goto done; sock_recv_timestamp(msg, sk, skb); /* Copy the address. */ if (sin) { sin->sin_family = AF_INET; sin->sin_addr.s_addr = ip_hdr(skb)->saddr; sin->sin_port = 0; memset(&sin->sin_zero, 0, sizeof(sin->sin_zero)); } if (inet->cmsg_flags) ip_cmsg_recv(msg, skb); if (flags & MSG_TRUNC) copied = skb->len; done: skb_free_datagram(sk, skb); out: return err ? err : copied; }
static int l2tp_ip_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len, int noblock, int flags, int *addr_len) { struct inet_sock *inet = inet_sk(sk); size_t copied = 0; int err = -EOPNOTSUPP; struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name; struct sk_buff *skb; if (flags & MSG_OOB) goto out; skb = skb_recv_datagram(sk, flags, noblock, &err); if (!skb) goto out; copied = skb->len; if (len < copied) { msg->msg_flags |= MSG_TRUNC; copied = len; } err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); if (err) goto done; sock_recv_timestamp(msg, sk, skb); /* Copy the address. */ if (sin) { sin->sin_family = AF_INET; sin->sin_addr.s_addr = ip_hdr(skb)->saddr; sin->sin_port = 0; memset(&sin->sin_zero, 0, sizeof(sin->sin_zero)); *addr_len = sizeof(*sin); } if (inet->cmsg_flags) ip_cmsg_recv(msg, skb); if (flags & MSG_TRUNC) copied = skb->len; done: skb_free_datagram(sk, skb); out: return err ? err : copied; }
{'added': [(543, '\t\t*addr_len = sizeof(*sin);')], 'deleted': [(521, '\tif (addr_len)'), (522, '\t\t*addr_len = sizeof(*sin);'), (523, '')]}
1
3
462
2,765
https://github.com/torvalds/linux
CVE-2013-7263
['CWE-20']
print-isoclns.c
isis_print_extd_ip_reach
/* * Copyright (c) 1992, 1993, 1994, 1995, 1996 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that: (1) source code distributions * retain the above copyright notice and this paragraph in its entirety, (2) * distributions including binary code include the above copyright notice and * this paragraph in its entirety in the documentation or other materials * provided with the distribution, and (3) all advertising materials mentioning * features or use of this software display the following acknowledgement: * ``This product includes software developed by the University of California, * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of * the University nor the names of its contributors may be used to endorse * or promote products derived from this software without specific prior * written permission. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * * Original code by Matt Thomas, Digital Equipment Corporation * * Extensively modified by Hannes Gredler (hannes@gredler.at) for more * complete IS-IS & CLNP support. */ /* \summary: ISO CLNS, ESIS, and ISIS printer */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <netdissect-stdinc.h> #include <string.h> #include "netdissect.h" #include "addrtoname.h" #include "ether.h" #include "nlpid.h" #include "extract.h" #include "gmpls.h" #include "oui.h" #include "signature.h" static const char tstr[] = " [|isis]"; /* * IS-IS is defined in ISO 10589. Look there for protocol definitions. */ #define SYSTEM_ID_LEN ETHER_ADDR_LEN #define NODE_ID_LEN SYSTEM_ID_LEN+1 #define LSP_ID_LEN SYSTEM_ID_LEN+2 #define ISIS_VERSION 1 #define ESIS_VERSION 1 #define CLNP_VERSION 1 #define ISIS_PDU_TYPE_MASK 0x1F #define ESIS_PDU_TYPE_MASK 0x1F #define CLNP_PDU_TYPE_MASK 0x1F #define CLNP_FLAG_MASK 0xE0 #define ISIS_LAN_PRIORITY_MASK 0x7F #define ISIS_PDU_L1_LAN_IIH 15 #define ISIS_PDU_L2_LAN_IIH 16 #define ISIS_PDU_PTP_IIH 17 #define ISIS_PDU_L1_LSP 18 #define ISIS_PDU_L2_LSP 20 #define ISIS_PDU_L1_CSNP 24 #define ISIS_PDU_L2_CSNP 25 #define ISIS_PDU_L1_PSNP 26 #define ISIS_PDU_L2_PSNP 27 static const struct tok isis_pdu_values[] = { { ISIS_PDU_L1_LAN_IIH, "L1 Lan IIH"}, { ISIS_PDU_L2_LAN_IIH, "L2 Lan IIH"}, { ISIS_PDU_PTP_IIH, "p2p IIH"}, { ISIS_PDU_L1_LSP, "L1 LSP"}, { ISIS_PDU_L2_LSP, "L2 LSP"}, { ISIS_PDU_L1_CSNP, "L1 CSNP"}, { ISIS_PDU_L2_CSNP, "L2 CSNP"}, { ISIS_PDU_L1_PSNP, "L1 PSNP"}, { ISIS_PDU_L2_PSNP, "L2 PSNP"}, { 0, NULL} }; /* * A TLV is a tuple of a type, length and a value and is normally used for * encoding information in all sorts of places. This is an enumeration of * the well known types. * * list taken from rfc3359 plus some memory from veterans ;-) */ #define ISIS_TLV_AREA_ADDR 1 /* iso10589 */ #define ISIS_TLV_IS_REACH 2 /* iso10589 */ #define ISIS_TLV_ESNEIGH 3 /* iso10589 */ #define ISIS_TLV_PART_DIS 4 /* iso10589 */ #define ISIS_TLV_PREFIX_NEIGH 5 /* iso10589 */ #define ISIS_TLV_ISNEIGH 6 /* iso10589 */ #define ISIS_TLV_ISNEIGH_VARLEN 7 /* iso10589 */ #define ISIS_TLV_PADDING 8 /* iso10589 */ #define ISIS_TLV_LSP 9 /* iso10589 */ #define ISIS_TLV_AUTH 10 /* iso10589, rfc3567 */ #define ISIS_TLV_CHECKSUM 12 /* rfc3358 */ #define ISIS_TLV_CHECKSUM_MINLEN 2 #define ISIS_TLV_POI 13 /* rfc6232 */ #define ISIS_TLV_LSP_BUFFERSIZE 14 /* iso10589 rev2 */ #define ISIS_TLV_LSP_BUFFERSIZE_MINLEN 2 #define ISIS_TLV_EXT_IS_REACH 22 /* draft-ietf-isis-traffic-05 */ #define ISIS_TLV_IS_ALIAS_ID 24 /* draft-ietf-isis-ext-lsp-frags-02 */ #define ISIS_TLV_DECNET_PHASE4 42 #define ISIS_TLV_LUCENT_PRIVATE 66 #define ISIS_TLV_INT_IP_REACH 128 /* rfc1195, rfc2966 */ #define ISIS_TLV_PROTOCOLS 129 /* rfc1195 */ #define ISIS_TLV_EXT_IP_REACH 130 /* rfc1195, rfc2966 */ #define ISIS_TLV_IDRP_INFO 131 /* rfc1195 */ #define ISIS_TLV_IDRP_INFO_MINLEN 1 #define ISIS_TLV_IPADDR 132 /* rfc1195 */ #define ISIS_TLV_IPAUTH 133 /* rfc1195 */ #define ISIS_TLV_TE_ROUTER_ID 134 /* draft-ietf-isis-traffic-05 */ #define ISIS_TLV_EXTD_IP_REACH 135 /* draft-ietf-isis-traffic-05 */ #define ISIS_TLV_HOSTNAME 137 /* rfc2763 */ #define ISIS_TLV_SHARED_RISK_GROUP 138 /* draft-ietf-isis-gmpls-extensions */ #define ISIS_TLV_MT_PORT_CAP 143 /* rfc6165 */ #define ISIS_TLV_MT_CAPABILITY 144 /* rfc6329 */ #define ISIS_TLV_NORTEL_PRIVATE1 176 #define ISIS_TLV_NORTEL_PRIVATE2 177 #define ISIS_TLV_RESTART_SIGNALING 211 /* rfc3847 */ #define ISIS_TLV_RESTART_SIGNALING_FLAGLEN 1 #define ISIS_TLV_RESTART_SIGNALING_HOLDTIMELEN 2 #define ISIS_TLV_MT_IS_REACH 222 /* draft-ietf-isis-wg-multi-topology-05 */ #define ISIS_TLV_MT_SUPPORTED 229 /* draft-ietf-isis-wg-multi-topology-05 */ #define ISIS_TLV_MT_SUPPORTED_MINLEN 2 #define ISIS_TLV_IP6ADDR 232 /* draft-ietf-isis-ipv6-02 */ #define ISIS_TLV_MT_IP_REACH 235 /* draft-ietf-isis-wg-multi-topology-05 */ #define ISIS_TLV_IP6_REACH 236 /* draft-ietf-isis-ipv6-02 */ #define ISIS_TLV_MT_IP6_REACH 237 /* draft-ietf-isis-wg-multi-topology-05 */ #define ISIS_TLV_PTP_ADJ 240 /* rfc3373 */ #define ISIS_TLV_IIH_SEQNR 241 /* draft-shen-isis-iih-sequence-00 */ #define ISIS_TLV_IIH_SEQNR_MINLEN 4 #define ISIS_TLV_VENDOR_PRIVATE 250 /* draft-ietf-isis-experimental-tlv-01 */ #define ISIS_TLV_VENDOR_PRIVATE_MINLEN 3 static const struct tok isis_tlv_values[] = { { ISIS_TLV_AREA_ADDR, "Area address(es)"}, { ISIS_TLV_IS_REACH, "IS Reachability"}, { ISIS_TLV_ESNEIGH, "ES Neighbor(s)"}, { ISIS_TLV_PART_DIS, "Partition DIS"}, { ISIS_TLV_PREFIX_NEIGH, "Prefix Neighbors"}, { ISIS_TLV_ISNEIGH, "IS Neighbor(s)"}, { ISIS_TLV_ISNEIGH_VARLEN, "IS Neighbor(s) (variable length)"}, { ISIS_TLV_PADDING, "Padding"}, { ISIS_TLV_LSP, "LSP entries"}, { ISIS_TLV_AUTH, "Authentication"}, { ISIS_TLV_CHECKSUM, "Checksum"}, { ISIS_TLV_POI, "Purge Originator Identifier"}, { ISIS_TLV_LSP_BUFFERSIZE, "LSP Buffersize"}, { ISIS_TLV_EXT_IS_REACH, "Extended IS Reachability"}, { ISIS_TLV_IS_ALIAS_ID, "IS Alias ID"}, { ISIS_TLV_DECNET_PHASE4, "DECnet Phase IV"}, { ISIS_TLV_LUCENT_PRIVATE, "Lucent Proprietary"}, { ISIS_TLV_INT_IP_REACH, "IPv4 Internal Reachability"}, { ISIS_TLV_PROTOCOLS, "Protocols supported"}, { ISIS_TLV_EXT_IP_REACH, "IPv4 External Reachability"}, { ISIS_TLV_IDRP_INFO, "Inter-Domain Information Type"}, { ISIS_TLV_IPADDR, "IPv4 Interface address(es)"}, { ISIS_TLV_IPAUTH, "IPv4 authentication (deprecated)"}, { ISIS_TLV_TE_ROUTER_ID, "Traffic Engineering Router ID"}, { ISIS_TLV_EXTD_IP_REACH, "Extended IPv4 Reachability"}, { ISIS_TLV_SHARED_RISK_GROUP, "Shared Risk Link Group"}, { ISIS_TLV_MT_PORT_CAP, "Multi-Topology-Aware Port Capability"}, { ISIS_TLV_MT_CAPABILITY, "Multi-Topology Capability"}, { ISIS_TLV_NORTEL_PRIVATE1, "Nortel Proprietary"}, { ISIS_TLV_NORTEL_PRIVATE2, "Nortel Proprietary"}, { ISIS_TLV_HOSTNAME, "Hostname"}, { ISIS_TLV_RESTART_SIGNALING, "Restart Signaling"}, { ISIS_TLV_MT_IS_REACH, "Multi Topology IS Reachability"}, { ISIS_TLV_MT_SUPPORTED, "Multi Topology"}, { ISIS_TLV_IP6ADDR, "IPv6 Interface address(es)"}, { ISIS_TLV_MT_IP_REACH, "Multi-Topology IPv4 Reachability"}, { ISIS_TLV_IP6_REACH, "IPv6 reachability"}, { ISIS_TLV_MT_IP6_REACH, "Multi-Topology IP6 Reachability"}, { ISIS_TLV_PTP_ADJ, "Point-to-point Adjacency State"}, { ISIS_TLV_IIH_SEQNR, "Hello PDU Sequence Number"}, { ISIS_TLV_VENDOR_PRIVATE, "Vendor Private"}, { 0, NULL } }; #define ESIS_OPTION_PROTOCOLS 129 #define ESIS_OPTION_QOS_MAINTENANCE 195 /* iso9542 */ #define ESIS_OPTION_SECURITY 197 /* iso9542 */ #define ESIS_OPTION_ES_CONF_TIME 198 /* iso9542 */ #define ESIS_OPTION_PRIORITY 205 /* iso9542 */ #define ESIS_OPTION_ADDRESS_MASK 225 /* iso9542 */ #define ESIS_OPTION_SNPA_MASK 226 /* iso9542 */ static const struct tok esis_option_values[] = { { ESIS_OPTION_PROTOCOLS, "Protocols supported"}, { ESIS_OPTION_QOS_MAINTENANCE, "QoS Maintenance" }, { ESIS_OPTION_SECURITY, "Security" }, { ESIS_OPTION_ES_CONF_TIME, "ES Configuration Time" }, { ESIS_OPTION_PRIORITY, "Priority" }, { ESIS_OPTION_ADDRESS_MASK, "Addressk Mask" }, { ESIS_OPTION_SNPA_MASK, "SNPA Mask" }, { 0, NULL } }; #define CLNP_OPTION_DISCARD_REASON 193 #define CLNP_OPTION_QOS_MAINTENANCE 195 /* iso8473 */ #define CLNP_OPTION_SECURITY 197 /* iso8473 */ #define CLNP_OPTION_SOURCE_ROUTING 200 /* iso8473 */ #define CLNP_OPTION_ROUTE_RECORDING 203 /* iso8473 */ #define CLNP_OPTION_PADDING 204 /* iso8473 */ #define CLNP_OPTION_PRIORITY 205 /* iso8473 */ static const struct tok clnp_option_values[] = { { CLNP_OPTION_DISCARD_REASON, "Discard Reason"}, { CLNP_OPTION_PRIORITY, "Priority"}, { CLNP_OPTION_QOS_MAINTENANCE, "QoS Maintenance"}, { CLNP_OPTION_SECURITY, "Security"}, { CLNP_OPTION_SOURCE_ROUTING, "Source Routing"}, { CLNP_OPTION_ROUTE_RECORDING, "Route Recording"}, { CLNP_OPTION_PADDING, "Padding"}, { 0, NULL } }; static const struct tok clnp_option_rfd_class_values[] = { { 0x0, "General"}, { 0x8, "Address"}, { 0x9, "Source Routeing"}, { 0xa, "Lifetime"}, { 0xb, "PDU Discarded"}, { 0xc, "Reassembly"}, { 0, NULL } }; static const struct tok clnp_option_rfd_general_values[] = { { 0x0, "Reason not specified"}, { 0x1, "Protocol procedure error"}, { 0x2, "Incorrect checksum"}, { 0x3, "PDU discarded due to congestion"}, { 0x4, "Header syntax error (cannot be parsed)"}, { 0x5, "Segmentation needed but not permitted"}, { 0x6, "Incomplete PDU received"}, { 0x7, "Duplicate option"}, { 0, NULL } }; static const struct tok clnp_option_rfd_address_values[] = { { 0x0, "Destination address unreachable"}, { 0x1, "Destination address unknown"}, { 0, NULL } }; static const struct tok clnp_option_rfd_source_routeing_values[] = { { 0x0, "Unspecified source routeing error"}, { 0x1, "Syntax error in source routeing field"}, { 0x2, "Unknown address in source routeing field"}, { 0x3, "Path not acceptable"}, { 0, NULL } }; static const struct tok clnp_option_rfd_lifetime_values[] = { { 0x0, "Lifetime expired while data unit in transit"}, { 0x1, "Lifetime expired during reassembly"}, { 0, NULL } }; static const struct tok clnp_option_rfd_pdu_discard_values[] = { { 0x0, "Unsupported option not specified"}, { 0x1, "Unsupported protocol version"}, { 0x2, "Unsupported security option"}, { 0x3, "Unsupported source routeing option"}, { 0x4, "Unsupported recording of route option"}, { 0, NULL } }; static const struct tok clnp_option_rfd_reassembly_values[] = { { 0x0, "Reassembly interference"}, { 0, NULL } }; /* array of 16 error-classes */ static const struct tok *clnp_option_rfd_error_class[] = { clnp_option_rfd_general_values, NULL, NULL, NULL, NULL, NULL, NULL, NULL, clnp_option_rfd_address_values, clnp_option_rfd_source_routeing_values, clnp_option_rfd_lifetime_values, clnp_option_rfd_pdu_discard_values, clnp_option_rfd_reassembly_values, NULL, NULL, NULL }; #define CLNP_OPTION_OPTION_QOS_MASK 0x3f #define CLNP_OPTION_SCOPE_MASK 0xc0 #define CLNP_OPTION_SCOPE_SA_SPEC 0x40 #define CLNP_OPTION_SCOPE_DA_SPEC 0x80 #define CLNP_OPTION_SCOPE_GLOBAL 0xc0 static const struct tok clnp_option_scope_values[] = { { CLNP_OPTION_SCOPE_SA_SPEC, "Source Address Specific"}, { CLNP_OPTION_SCOPE_DA_SPEC, "Destination Address Specific"}, { CLNP_OPTION_SCOPE_GLOBAL, "Globally unique"}, { 0, NULL } }; static const struct tok clnp_option_sr_rr_values[] = { { 0x0, "partial"}, { 0x1, "complete"}, { 0, NULL } }; static const struct tok clnp_option_sr_rr_string_values[] = { { CLNP_OPTION_SOURCE_ROUTING, "source routing"}, { CLNP_OPTION_ROUTE_RECORDING, "recording of route in progress"}, { 0, NULL } }; static const struct tok clnp_option_qos_global_values[] = { { 0x20, "reserved"}, { 0x10, "sequencing vs. delay"}, { 0x08, "congested"}, { 0x04, "delay vs. cost"}, { 0x02, "error vs. delay"}, { 0x01, "error vs. cost"}, { 0, NULL } }; #define ISIS_SUBTLV_EXT_IS_REACH_ADMIN_GROUP 3 /* draft-ietf-isis-traffic-05 */ #define ISIS_SUBTLV_EXT_IS_REACH_LINK_LOCAL_REMOTE_ID 4 /* rfc4205 */ #define ISIS_SUBTLV_EXT_IS_REACH_LINK_REMOTE_ID 5 /* draft-ietf-isis-traffic-05 */ #define ISIS_SUBTLV_EXT_IS_REACH_IPV4_INTF_ADDR 6 /* draft-ietf-isis-traffic-05 */ #define ISIS_SUBTLV_EXT_IS_REACH_IPV4_NEIGHBOR_ADDR 8 /* draft-ietf-isis-traffic-05 */ #define ISIS_SUBTLV_EXT_IS_REACH_MAX_LINK_BW 9 /* draft-ietf-isis-traffic-05 */ #define ISIS_SUBTLV_EXT_IS_REACH_RESERVABLE_BW 10 /* draft-ietf-isis-traffic-05 */ #define ISIS_SUBTLV_EXT_IS_REACH_UNRESERVED_BW 11 /* rfc4124 */ #define ISIS_SUBTLV_EXT_IS_REACH_BW_CONSTRAINTS_OLD 12 /* draft-ietf-tewg-diff-te-proto-06 */ #define ISIS_SUBTLV_EXT_IS_REACH_TE_METRIC 18 /* draft-ietf-isis-traffic-05 */ #define ISIS_SUBTLV_EXT_IS_REACH_LINK_ATTRIBUTE 19 /* draft-ietf-isis-link-attr-01 */ #define ISIS_SUBTLV_EXT_IS_REACH_LINK_PROTECTION_TYPE 20 /* rfc4205 */ #define ISIS_SUBTLV_EXT_IS_REACH_INTF_SW_CAP_DESCR 21 /* rfc4205 */ #define ISIS_SUBTLV_EXT_IS_REACH_BW_CONSTRAINTS 22 /* rfc4124 */ #define ISIS_SUBTLV_SPB_METRIC 29 /* rfc6329 */ static const struct tok isis_ext_is_reach_subtlv_values[] = { { ISIS_SUBTLV_EXT_IS_REACH_ADMIN_GROUP, "Administrative groups" }, { ISIS_SUBTLV_EXT_IS_REACH_LINK_LOCAL_REMOTE_ID, "Link Local/Remote Identifier" }, { ISIS_SUBTLV_EXT_IS_REACH_LINK_REMOTE_ID, "Link Remote Identifier" }, { ISIS_SUBTLV_EXT_IS_REACH_IPV4_INTF_ADDR, "IPv4 interface address" }, { ISIS_SUBTLV_EXT_IS_REACH_IPV4_NEIGHBOR_ADDR, "IPv4 neighbor address" }, { ISIS_SUBTLV_EXT_IS_REACH_MAX_LINK_BW, "Maximum link bandwidth" }, { ISIS_SUBTLV_EXT_IS_REACH_RESERVABLE_BW, "Reservable link bandwidth" }, { ISIS_SUBTLV_EXT_IS_REACH_UNRESERVED_BW, "Unreserved bandwidth" }, { ISIS_SUBTLV_EXT_IS_REACH_TE_METRIC, "Traffic Engineering Metric" }, { ISIS_SUBTLV_EXT_IS_REACH_LINK_ATTRIBUTE, "Link Attribute" }, { ISIS_SUBTLV_EXT_IS_REACH_LINK_PROTECTION_TYPE, "Link Protection Type" }, { ISIS_SUBTLV_EXT_IS_REACH_INTF_SW_CAP_DESCR, "Interface Switching Capability" }, { ISIS_SUBTLV_EXT_IS_REACH_BW_CONSTRAINTS_OLD, "Bandwidth Constraints (old)" }, { ISIS_SUBTLV_EXT_IS_REACH_BW_CONSTRAINTS, "Bandwidth Constraints" }, { ISIS_SUBTLV_SPB_METRIC, "SPB Metric" }, { 250, "Reserved for cisco specific extensions" }, { 251, "Reserved for cisco specific extensions" }, { 252, "Reserved for cisco specific extensions" }, { 253, "Reserved for cisco specific extensions" }, { 254, "Reserved for cisco specific extensions" }, { 255, "Reserved for future expansion" }, { 0, NULL } }; #define ISIS_SUBTLV_EXTD_IP_REACH_ADMIN_TAG32 1 /* draft-ietf-isis-admin-tags-01 */ #define ISIS_SUBTLV_EXTD_IP_REACH_ADMIN_TAG64 2 /* draft-ietf-isis-admin-tags-01 */ #define ISIS_SUBTLV_EXTD_IP_REACH_MGMT_PREFIX_COLOR 117 /* draft-ietf-isis-wg-multi-topology-05 */ static const struct tok isis_ext_ip_reach_subtlv_values[] = { { ISIS_SUBTLV_EXTD_IP_REACH_ADMIN_TAG32, "32-Bit Administrative tag" }, { ISIS_SUBTLV_EXTD_IP_REACH_ADMIN_TAG64, "64-Bit Administrative tag" }, { ISIS_SUBTLV_EXTD_IP_REACH_MGMT_PREFIX_COLOR, "Management Prefix Color" }, { 0, NULL } }; static const struct tok isis_subtlv_link_attribute_values[] = { { 0x01, "Local Protection Available" }, { 0x02, "Link excluded from local protection path" }, { 0x04, "Local maintenance required"}, { 0, NULL } }; #define ISIS_SUBTLV_AUTH_SIMPLE 1 #define ISIS_SUBTLV_AUTH_GENERIC 3 /* rfc 5310 */ #define ISIS_SUBTLV_AUTH_MD5 54 #define ISIS_SUBTLV_AUTH_MD5_LEN 16 #define ISIS_SUBTLV_AUTH_PRIVATE 255 static const struct tok isis_subtlv_auth_values[] = { { ISIS_SUBTLV_AUTH_SIMPLE, "simple text password"}, { ISIS_SUBTLV_AUTH_GENERIC, "Generic Crypto key-id"}, { ISIS_SUBTLV_AUTH_MD5, "HMAC-MD5 password"}, { ISIS_SUBTLV_AUTH_PRIVATE, "Routing Domain private password"}, { 0, NULL } }; #define ISIS_SUBTLV_IDRP_RES 0 #define ISIS_SUBTLV_IDRP_LOCAL 1 #define ISIS_SUBTLV_IDRP_ASN 2 static const struct tok isis_subtlv_idrp_values[] = { { ISIS_SUBTLV_IDRP_RES, "Reserved"}, { ISIS_SUBTLV_IDRP_LOCAL, "Routing-Domain Specific"}, { ISIS_SUBTLV_IDRP_ASN, "AS Number Tag"}, { 0, NULL} }; #define ISIS_SUBTLV_SPB_MCID 4 #define ISIS_SUBTLV_SPB_DIGEST 5 #define ISIS_SUBTLV_SPB_BVID 6 #define ISIS_SUBTLV_SPB_INSTANCE 1 #define ISIS_SUBTLV_SPBM_SI 3 #define ISIS_SPB_MCID_LEN 51 #define ISIS_SUBTLV_SPB_MCID_MIN_LEN 102 #define ISIS_SUBTLV_SPB_DIGEST_MIN_LEN 33 #define ISIS_SUBTLV_SPB_BVID_MIN_LEN 6 #define ISIS_SUBTLV_SPB_INSTANCE_MIN_LEN 19 #define ISIS_SUBTLV_SPB_INSTANCE_VLAN_TUPLE_LEN 8 static const struct tok isis_mt_port_cap_subtlv_values[] = { { ISIS_SUBTLV_SPB_MCID, "SPB MCID" }, { ISIS_SUBTLV_SPB_DIGEST, "SPB Digest" }, { ISIS_SUBTLV_SPB_BVID, "SPB BVID" }, { 0, NULL } }; static const struct tok isis_mt_capability_subtlv_values[] = { { ISIS_SUBTLV_SPB_INSTANCE, "SPB Instance" }, { ISIS_SUBTLV_SPBM_SI, "SPBM Service Identifier and Unicast Address" }, { 0, NULL } }; struct isis_spb_mcid { uint8_t format_id; uint8_t name[32]; uint8_t revision_lvl[2]; uint8_t digest[16]; }; struct isis_subtlv_spb_mcid { struct isis_spb_mcid mcid; struct isis_spb_mcid aux_mcid; }; struct isis_subtlv_spb_instance { uint8_t cist_root_id[8]; uint8_t cist_external_root_path_cost[4]; uint8_t bridge_priority[2]; uint8_t spsourceid[4]; uint8_t no_of_trees; }; #define CLNP_SEGMENT_PART 0x80 #define CLNP_MORE_SEGMENTS 0x40 #define CLNP_REQUEST_ER 0x20 static const struct tok clnp_flag_values[] = { { CLNP_SEGMENT_PART, "Segmentation permitted"}, { CLNP_MORE_SEGMENTS, "more Segments"}, { CLNP_REQUEST_ER, "request Error Report"}, { 0, NULL} }; #define ISIS_MASK_LSP_OL_BIT(x) ((x)&0x4) #define ISIS_MASK_LSP_ISTYPE_BITS(x) ((x)&0x3) #define ISIS_MASK_LSP_PARTITION_BIT(x) ((x)&0x80) #define ISIS_MASK_LSP_ATT_BITS(x) ((x)&0x78) #define ISIS_MASK_LSP_ATT_ERROR_BIT(x) ((x)&0x40) #define ISIS_MASK_LSP_ATT_EXPENSE_BIT(x) ((x)&0x20) #define ISIS_MASK_LSP_ATT_DELAY_BIT(x) ((x)&0x10) #define ISIS_MASK_LSP_ATT_DEFAULT_BIT(x) ((x)&0x8) #define ISIS_MASK_MTID(x) ((x)&0x0fff) #define ISIS_MASK_MTFLAGS(x) ((x)&0xf000) static const struct tok isis_mt_flag_values[] = { { 0x4000, "ATT bit set"}, { 0x8000, "Overload bit set"}, { 0, NULL} }; #define ISIS_MASK_TLV_EXTD_IP_UPDOWN(x) ((x)&0x80) #define ISIS_MASK_TLV_EXTD_IP_SUBTLV(x) ((x)&0x40) #define ISIS_MASK_TLV_EXTD_IP6_IE(x) ((x)&0x40) #define ISIS_MASK_TLV_EXTD_IP6_SUBTLV(x) ((x)&0x20) #define ISIS_LSP_TLV_METRIC_SUPPORTED(x) ((x)&0x80) #define ISIS_LSP_TLV_METRIC_IE(x) ((x)&0x40) #define ISIS_LSP_TLV_METRIC_UPDOWN(x) ((x)&0x80) #define ISIS_LSP_TLV_METRIC_VALUE(x) ((x)&0x3f) #define ISIS_MASK_TLV_SHARED_RISK_GROUP(x) ((x)&0x1) static const struct tok isis_mt_values[] = { { 0, "IPv4 unicast"}, { 1, "In-Band Management"}, { 2, "IPv6 unicast"}, { 3, "Multicast"}, { 4095, "Development, Experimental or Proprietary"}, { 0, NULL } }; static const struct tok isis_iih_circuit_type_values[] = { { 1, "Level 1 only"}, { 2, "Level 2 only"}, { 3, "Level 1, Level 2"}, { 0, NULL} }; #define ISIS_LSP_TYPE_UNUSED0 0 #define ISIS_LSP_TYPE_LEVEL_1 1 #define ISIS_LSP_TYPE_UNUSED2 2 #define ISIS_LSP_TYPE_LEVEL_2 3 static const struct tok isis_lsp_istype_values[] = { { ISIS_LSP_TYPE_UNUSED0, "Unused 0x0 (invalid)"}, { ISIS_LSP_TYPE_LEVEL_1, "L1 IS"}, { ISIS_LSP_TYPE_UNUSED2, "Unused 0x2 (invalid)"}, { ISIS_LSP_TYPE_LEVEL_2, "L2 IS"}, { 0, NULL } }; /* * Katz's point to point adjacency TLV uses codes to tell us the state of * the remote adjacency. Enumerate them. */ #define ISIS_PTP_ADJ_UP 0 #define ISIS_PTP_ADJ_INIT 1 #define ISIS_PTP_ADJ_DOWN 2 static const struct tok isis_ptp_adjancey_values[] = { { ISIS_PTP_ADJ_UP, "Up" }, { ISIS_PTP_ADJ_INIT, "Initializing" }, { ISIS_PTP_ADJ_DOWN, "Down" }, { 0, NULL} }; struct isis_tlv_ptp_adj { uint8_t adjacency_state; uint8_t extd_local_circuit_id[4]; uint8_t neighbor_sysid[SYSTEM_ID_LEN]; uint8_t neighbor_extd_local_circuit_id[4]; }; static void osi_print_cksum(netdissect_options *, const uint8_t *pptr, uint16_t checksum, int checksum_offset, u_int length); static int clnp_print(netdissect_options *, const uint8_t *, u_int); static void esis_print(netdissect_options *, const uint8_t *, u_int); static int isis_print(netdissect_options *, const uint8_t *, u_int); struct isis_metric_block { uint8_t metric_default; uint8_t metric_delay; uint8_t metric_expense; uint8_t metric_error; }; struct isis_tlv_is_reach { struct isis_metric_block isis_metric_block; uint8_t neighbor_nodeid[NODE_ID_LEN]; }; struct isis_tlv_es_reach { struct isis_metric_block isis_metric_block; uint8_t neighbor_sysid[SYSTEM_ID_LEN]; }; struct isis_tlv_ip_reach { struct isis_metric_block isis_metric_block; uint8_t prefix[4]; uint8_t mask[4]; }; static const struct tok isis_is_reach_virtual_values[] = { { 0, "IsNotVirtual"}, { 1, "IsVirtual"}, { 0, NULL } }; static const struct tok isis_restart_flag_values[] = { { 0x1, "Restart Request"}, { 0x2, "Restart Acknowledgement"}, { 0x4, "Suppress adjacency advertisement"}, { 0, NULL } }; struct isis_common_header { uint8_t nlpid; uint8_t fixed_len; uint8_t version; /* Protocol version */ uint8_t id_length; uint8_t pdu_type; /* 3 MSbits are reserved */ uint8_t pdu_version; /* Packet format version */ uint8_t reserved; uint8_t max_area; }; struct isis_iih_lan_header { uint8_t circuit_type; uint8_t source_id[SYSTEM_ID_LEN]; uint8_t holding_time[2]; uint8_t pdu_len[2]; uint8_t priority; uint8_t lan_id[NODE_ID_LEN]; }; struct isis_iih_ptp_header { uint8_t circuit_type; uint8_t source_id[SYSTEM_ID_LEN]; uint8_t holding_time[2]; uint8_t pdu_len[2]; uint8_t circuit_id; }; struct isis_lsp_header { uint8_t pdu_len[2]; uint8_t remaining_lifetime[2]; uint8_t lsp_id[LSP_ID_LEN]; uint8_t sequence_number[4]; uint8_t checksum[2]; uint8_t typeblock; }; struct isis_csnp_header { uint8_t pdu_len[2]; uint8_t source_id[NODE_ID_LEN]; uint8_t start_lsp_id[LSP_ID_LEN]; uint8_t end_lsp_id[LSP_ID_LEN]; }; struct isis_psnp_header { uint8_t pdu_len[2]; uint8_t source_id[NODE_ID_LEN]; }; struct isis_tlv_lsp { uint8_t remaining_lifetime[2]; uint8_t lsp_id[LSP_ID_LEN]; uint8_t sequence_number[4]; uint8_t checksum[2]; }; #define ISIS_COMMON_HEADER_SIZE (sizeof(struct isis_common_header)) #define ISIS_IIH_LAN_HEADER_SIZE (sizeof(struct isis_iih_lan_header)) #define ISIS_IIH_PTP_HEADER_SIZE (sizeof(struct isis_iih_ptp_header)) #define ISIS_LSP_HEADER_SIZE (sizeof(struct isis_lsp_header)) #define ISIS_CSNP_HEADER_SIZE (sizeof(struct isis_csnp_header)) #define ISIS_PSNP_HEADER_SIZE (sizeof(struct isis_psnp_header)) void isoclns_print(netdissect_options *ndo, const uint8_t *p, u_int length) { if (!ND_TTEST(*p)) { /* enough bytes on the wire ? */ ND_PRINT((ndo, "|OSI")); return; } if (ndo->ndo_eflag) ND_PRINT((ndo, "OSI NLPID %s (0x%02x): ", tok2str(nlpid_values, "Unknown", *p), *p)); switch (*p) { case NLPID_CLNP: if (!clnp_print(ndo, p, length)) print_unknown_data(ndo, p, "\n\t", length); break; case NLPID_ESIS: esis_print(ndo, p, length); return; case NLPID_ISIS: if (!isis_print(ndo, p, length)) print_unknown_data(ndo, p, "\n\t", length); break; case NLPID_NULLNS: ND_PRINT((ndo, "%slength: %u", ndo->ndo_eflag ? "" : ", ", length)); break; case NLPID_Q933: q933_print(ndo, p + 1, length - 1); break; case NLPID_IP: ip_print(ndo, p + 1, length - 1); break; case NLPID_IP6: ip6_print(ndo, p + 1, length - 1); break; case NLPID_PPP: ppp_print(ndo, p + 1, length - 1); break; default: if (!ndo->ndo_eflag) ND_PRINT((ndo, "OSI NLPID 0x%02x unknown", *p)); ND_PRINT((ndo, "%slength: %u", ndo->ndo_eflag ? "" : ", ", length)); if (length > 1) print_unknown_data(ndo, p, "\n\t", length); break; } } #define CLNP_PDU_ER 1 #define CLNP_PDU_DT 28 #define CLNP_PDU_MD 29 #define CLNP_PDU_ERQ 30 #define CLNP_PDU_ERP 31 static const struct tok clnp_pdu_values[] = { { CLNP_PDU_ER, "Error Report"}, { CLNP_PDU_MD, "MD"}, { CLNP_PDU_DT, "Data"}, { CLNP_PDU_ERQ, "Echo Request"}, { CLNP_PDU_ERP, "Echo Response"}, { 0, NULL } }; struct clnp_header_t { uint8_t nlpid; uint8_t length_indicator; uint8_t version; uint8_t lifetime; /* units of 500ms */ uint8_t type; uint8_t segment_length[2]; uint8_t cksum[2]; }; struct clnp_segment_header_t { uint8_t data_unit_id[2]; uint8_t segment_offset[2]; uint8_t total_length[2]; }; /* * clnp_print * Decode CLNP packets. Return 0 on error. */ static int clnp_print(netdissect_options *ndo, const uint8_t *pptr, u_int length) { const uint8_t *optr,*source_address,*dest_address; u_int li,tlen,nsap_offset,source_address_length,dest_address_length, clnp_pdu_type, clnp_flags; const struct clnp_header_t *clnp_header; const struct clnp_segment_header_t *clnp_segment_header; uint8_t rfd_error_major,rfd_error_minor; clnp_header = (const struct clnp_header_t *) pptr; ND_TCHECK(*clnp_header); li = clnp_header->length_indicator; optr = pptr; if (!ndo->ndo_eflag) ND_PRINT((ndo, "CLNP")); /* * Sanity checking of the header. */ if (clnp_header->version != CLNP_VERSION) { ND_PRINT((ndo, "version %d packet not supported", clnp_header->version)); return (0); } if (li > length) { ND_PRINT((ndo, " length indicator(%u) > PDU size (%u)!", li, length)); return (0); } if (li < sizeof(struct clnp_header_t)) { ND_PRINT((ndo, " length indicator %u < min PDU size:", li)); while (pptr < ndo->ndo_snapend) ND_PRINT((ndo, "%02X", *pptr++)); return (0); } /* FIXME further header sanity checking */ clnp_pdu_type = clnp_header->type & CLNP_PDU_TYPE_MASK; clnp_flags = clnp_header->type & CLNP_FLAG_MASK; pptr += sizeof(struct clnp_header_t); li -= sizeof(struct clnp_header_t); if (li < 1) { ND_PRINT((ndo, "li < size of fixed part of CLNP header and addresses")); return (0); } ND_TCHECK(*pptr); dest_address_length = *pptr; pptr += 1; li -= 1; if (li < dest_address_length) { ND_PRINT((ndo, "li < size of fixed part of CLNP header and addresses")); return (0); } ND_TCHECK2(*pptr, dest_address_length); dest_address = pptr; pptr += dest_address_length; li -= dest_address_length; if (li < 1) { ND_PRINT((ndo, "li < size of fixed part of CLNP header and addresses")); return (0); } ND_TCHECK(*pptr); source_address_length = *pptr; pptr += 1; li -= 1; if (li < source_address_length) { ND_PRINT((ndo, "li < size of fixed part of CLNP header and addresses")); return (0); } ND_TCHECK2(*pptr, source_address_length); source_address = pptr; pptr += source_address_length; li -= source_address_length; if (ndo->ndo_vflag < 1) { ND_PRINT((ndo, "%s%s > %s, %s, length %u", ndo->ndo_eflag ? "" : ", ", isonsap_string(ndo, source_address, source_address_length), isonsap_string(ndo, dest_address, dest_address_length), tok2str(clnp_pdu_values,"unknown (%u)",clnp_pdu_type), length)); return (1); } ND_PRINT((ndo, "%slength %u", ndo->ndo_eflag ? "" : ", ", length)); ND_PRINT((ndo, "\n\t%s PDU, hlen: %u, v: %u, lifetime: %u.%us, Segment PDU length: %u, checksum: 0x%04x", tok2str(clnp_pdu_values, "unknown (%u)",clnp_pdu_type), clnp_header->length_indicator, clnp_header->version, clnp_header->lifetime/2, (clnp_header->lifetime%2)*5, EXTRACT_16BITS(clnp_header->segment_length), EXTRACT_16BITS(clnp_header->cksum))); osi_print_cksum(ndo, optr, EXTRACT_16BITS(clnp_header->cksum), 7, clnp_header->length_indicator); ND_PRINT((ndo, "\n\tFlags [%s]", bittok2str(clnp_flag_values, "none", clnp_flags))); ND_PRINT((ndo, "\n\tsource address (length %u): %s\n\tdest address (length %u): %s", source_address_length, isonsap_string(ndo, source_address, source_address_length), dest_address_length, isonsap_string(ndo, dest_address, dest_address_length))); if (clnp_flags & CLNP_SEGMENT_PART) { if (li < sizeof(const struct clnp_segment_header_t)) { ND_PRINT((ndo, "li < size of fixed part of CLNP header, addresses, and segment part")); return (0); } clnp_segment_header = (const struct clnp_segment_header_t *) pptr; ND_TCHECK(*clnp_segment_header); ND_PRINT((ndo, "\n\tData Unit ID: 0x%04x, Segment Offset: %u, Total PDU Length: %u", EXTRACT_16BITS(clnp_segment_header->data_unit_id), EXTRACT_16BITS(clnp_segment_header->segment_offset), EXTRACT_16BITS(clnp_segment_header->total_length))); pptr+=sizeof(const struct clnp_segment_header_t); li-=sizeof(const struct clnp_segment_header_t); } /* now walk the options */ while (li >= 2) { u_int op, opli; const uint8_t *tptr; if (li < 2) { ND_PRINT((ndo, ", bad opts/li")); return (0); } ND_TCHECK2(*pptr, 2); op = *pptr++; opli = *pptr++; li -= 2; if (opli > li) { ND_PRINT((ndo, ", opt (%d) too long", op)); return (0); } ND_TCHECK2(*pptr, opli); li -= opli; tptr = pptr; tlen = opli; ND_PRINT((ndo, "\n\t %s Option #%u, length %u, value: ", tok2str(clnp_option_values,"Unknown",op), op, opli)); /* * We've already checked that the entire option is present * in the captured packet with the ND_TCHECK2() call. * Therefore, we don't need to do ND_TCHECK()/ND_TCHECK2() * checks. * We do, however, need to check tlen, to make sure we * don't run past the end of the option. */ switch (op) { case CLNP_OPTION_ROUTE_RECORDING: /* those two options share the format */ case CLNP_OPTION_SOURCE_ROUTING: if (tlen < 2) { ND_PRINT((ndo, ", bad opt len")); return (0); } ND_PRINT((ndo, "%s %s", tok2str(clnp_option_sr_rr_values,"Unknown",*tptr), tok2str(clnp_option_sr_rr_string_values, "Unknown Option %u", op))); nsap_offset=*(tptr+1); if (nsap_offset == 0) { ND_PRINT((ndo, " Bad NSAP offset (0)")); break; } nsap_offset-=1; /* offset to nsap list */ if (nsap_offset > tlen) { ND_PRINT((ndo, " Bad NSAP offset (past end of option)")); break; } tptr+=nsap_offset; tlen-=nsap_offset; while (tlen > 0) { source_address_length=*tptr; if (tlen < source_address_length+1) { ND_PRINT((ndo, "\n\t NSAP address goes past end of option")); break; } if (source_address_length > 0) { source_address=(tptr+1); ND_TCHECK2(*source_address, source_address_length); ND_PRINT((ndo, "\n\t NSAP address (length %u): %s", source_address_length, isonsap_string(ndo, source_address, source_address_length))); } tlen-=source_address_length+1; } break; case CLNP_OPTION_PRIORITY: if (tlen < 1) { ND_PRINT((ndo, ", bad opt len")); return (0); } ND_PRINT((ndo, "0x%1x", *tptr&0x0f)); break; case CLNP_OPTION_QOS_MAINTENANCE: if (tlen < 1) { ND_PRINT((ndo, ", bad opt len")); return (0); } ND_PRINT((ndo, "\n\t Format Code: %s", tok2str(clnp_option_scope_values, "Reserved", *tptr&CLNP_OPTION_SCOPE_MASK))); if ((*tptr&CLNP_OPTION_SCOPE_MASK) == CLNP_OPTION_SCOPE_GLOBAL) ND_PRINT((ndo, "\n\t QoS Flags [%s]", bittok2str(clnp_option_qos_global_values, "none", *tptr&CLNP_OPTION_OPTION_QOS_MASK))); break; case CLNP_OPTION_SECURITY: if (tlen < 2) { ND_PRINT((ndo, ", bad opt len")); return (0); } ND_PRINT((ndo, "\n\t Format Code: %s, Security-Level %u", tok2str(clnp_option_scope_values,"Reserved",*tptr&CLNP_OPTION_SCOPE_MASK), *(tptr+1))); break; case CLNP_OPTION_DISCARD_REASON: if (tlen < 1) { ND_PRINT((ndo, ", bad opt len")); return (0); } rfd_error_major = (*tptr&0xf0) >> 4; rfd_error_minor = *tptr&0x0f; ND_PRINT((ndo, "\n\t Class: %s Error (0x%01x), %s (0x%01x)", tok2str(clnp_option_rfd_class_values,"Unknown",rfd_error_major), rfd_error_major, tok2str(clnp_option_rfd_error_class[rfd_error_major],"Unknown",rfd_error_minor), rfd_error_minor)); break; case CLNP_OPTION_PADDING: ND_PRINT((ndo, "padding data")); break; /* * FIXME those are the defined Options that lack a decoder * you are welcome to contribute code ;-) */ default: print_unknown_data(ndo, tptr, "\n\t ", opli); break; } if (ndo->ndo_vflag > 1) print_unknown_data(ndo, pptr, "\n\t ", opli); pptr += opli; } switch (clnp_pdu_type) { case CLNP_PDU_ER: /* fall through */ case CLNP_PDU_ERP: ND_TCHECK(*pptr); if (*(pptr) == NLPID_CLNP) { ND_PRINT((ndo, "\n\t-----original packet-----\n\t")); /* FIXME recursion protection */ clnp_print(ndo, pptr, length - clnp_header->length_indicator); break; } case CLNP_PDU_DT: case CLNP_PDU_MD: case CLNP_PDU_ERQ: default: /* dump the PDU specific data */ if (length-(pptr-optr) > 0) { ND_PRINT((ndo, "\n\t undecoded non-header data, length %u", length-clnp_header->length_indicator)); print_unknown_data(ndo, pptr, "\n\t ", length - (pptr - optr)); } } return (1); trunc: ND_PRINT((ndo, "[|clnp]")); return (1); } #define ESIS_PDU_REDIRECT 6 #define ESIS_PDU_ESH 2 #define ESIS_PDU_ISH 4 static const struct tok esis_pdu_values[] = { { ESIS_PDU_REDIRECT, "redirect"}, { ESIS_PDU_ESH, "ESH"}, { ESIS_PDU_ISH, "ISH"}, { 0, NULL } }; struct esis_header_t { uint8_t nlpid; uint8_t length_indicator; uint8_t version; uint8_t reserved; uint8_t type; uint8_t holdtime[2]; uint8_t cksum[2]; }; static void esis_print(netdissect_options *ndo, const uint8_t *pptr, u_int length) { const uint8_t *optr; u_int li,esis_pdu_type,source_address_length, source_address_number; const struct esis_header_t *esis_header; if (!ndo->ndo_eflag) ND_PRINT((ndo, "ES-IS")); if (length <= 2) { ND_PRINT((ndo, ndo->ndo_qflag ? "bad pkt!" : "no header at all!")); return; } esis_header = (const struct esis_header_t *) pptr; ND_TCHECK(*esis_header); li = esis_header->length_indicator; optr = pptr; /* * Sanity checking of the header. */ if (esis_header->nlpid != NLPID_ESIS) { ND_PRINT((ndo, " nlpid 0x%02x packet not supported", esis_header->nlpid)); return; } if (esis_header->version != ESIS_VERSION) { ND_PRINT((ndo, " version %d packet not supported", esis_header->version)); return; } if (li > length) { ND_PRINT((ndo, " length indicator(%u) > PDU size (%u)!", li, length)); return; } if (li < sizeof(struct esis_header_t) + 2) { ND_PRINT((ndo, " length indicator %u < min PDU size:", li)); while (pptr < ndo->ndo_snapend) ND_PRINT((ndo, "%02X", *pptr++)); return; } esis_pdu_type = esis_header->type & ESIS_PDU_TYPE_MASK; if (ndo->ndo_vflag < 1) { ND_PRINT((ndo, "%s%s, length %u", ndo->ndo_eflag ? "" : ", ", tok2str(esis_pdu_values,"unknown type (%u)",esis_pdu_type), length)); return; } else ND_PRINT((ndo, "%slength %u\n\t%s (%u)", ndo->ndo_eflag ? "" : ", ", length, tok2str(esis_pdu_values,"unknown type: %u", esis_pdu_type), esis_pdu_type)); ND_PRINT((ndo, ", v: %u%s", esis_header->version, esis_header->version == ESIS_VERSION ? "" : "unsupported" )); ND_PRINT((ndo, ", checksum: 0x%04x", EXTRACT_16BITS(esis_header->cksum))); osi_print_cksum(ndo, pptr, EXTRACT_16BITS(esis_header->cksum), 7, li); ND_PRINT((ndo, ", holding time: %us, length indicator: %u", EXTRACT_16BITS(esis_header->holdtime), li)); if (ndo->ndo_vflag > 1) print_unknown_data(ndo, optr, "\n\t", sizeof(struct esis_header_t)); pptr += sizeof(struct esis_header_t); li -= sizeof(struct esis_header_t); switch (esis_pdu_type) { case ESIS_PDU_REDIRECT: { const uint8_t *dst, *snpa, *neta; u_int dstl, snpal, netal; ND_TCHECK(*pptr); if (li < 1) { ND_PRINT((ndo, ", bad redirect/li")); return; } dstl = *pptr; pptr++; li--; ND_TCHECK2(*pptr, dstl); if (li < dstl) { ND_PRINT((ndo, ", bad redirect/li")); return; } dst = pptr; pptr += dstl; li -= dstl; ND_PRINT((ndo, "\n\t %s", isonsap_string(ndo, dst, dstl))); ND_TCHECK(*pptr); if (li < 1) { ND_PRINT((ndo, ", bad redirect/li")); return; } snpal = *pptr; pptr++; li--; ND_TCHECK2(*pptr, snpal); if (li < snpal) { ND_PRINT((ndo, ", bad redirect/li")); return; } snpa = pptr; pptr += snpal; li -= snpal; ND_TCHECK(*pptr); if (li < 1) { ND_PRINT((ndo, ", bad redirect/li")); return; } netal = *pptr; pptr++; ND_TCHECK2(*pptr, netal); if (li < netal) { ND_PRINT((ndo, ", bad redirect/li")); return; } neta = pptr; pptr += netal; li -= netal; if (netal == 0) ND_PRINT((ndo, "\n\t %s", etheraddr_string(ndo, snpa))); else ND_PRINT((ndo, "\n\t %s", isonsap_string(ndo, neta, netal))); break; } case ESIS_PDU_ESH: ND_TCHECK(*pptr); if (li < 1) { ND_PRINT((ndo, ", bad esh/li")); return; } source_address_number = *pptr; pptr++; li--; ND_PRINT((ndo, "\n\t Number of Source Addresses: %u", source_address_number)); while (source_address_number > 0) { ND_TCHECK(*pptr); if (li < 1) { ND_PRINT((ndo, ", bad esh/li")); return; } source_address_length = *pptr; pptr++; li--; ND_TCHECK2(*pptr, source_address_length); if (li < source_address_length) { ND_PRINT((ndo, ", bad esh/li")); return; } ND_PRINT((ndo, "\n\t NET (length: %u): %s", source_address_length, isonsap_string(ndo, pptr, source_address_length))); pptr += source_address_length; li -= source_address_length; source_address_number--; } break; case ESIS_PDU_ISH: { ND_TCHECK(*pptr); if (li < 1) { ND_PRINT((ndo, ", bad ish/li")); return; } source_address_length = *pptr; pptr++; li--; ND_TCHECK2(*pptr, source_address_length); if (li < source_address_length) { ND_PRINT((ndo, ", bad ish/li")); return; } ND_PRINT((ndo, "\n\t NET (length: %u): %s", source_address_length, isonsap_string(ndo, pptr, source_address_length))); pptr += source_address_length; li -= source_address_length; break; } default: if (ndo->ndo_vflag <= 1) { if (pptr < ndo->ndo_snapend) print_unknown_data(ndo, pptr, "\n\t ", ndo->ndo_snapend - pptr); } return; } /* now walk the options */ while (li != 0) { u_int op, opli; const uint8_t *tptr; if (li < 2) { ND_PRINT((ndo, ", bad opts/li")); return; } ND_TCHECK2(*pptr, 2); op = *pptr++; opli = *pptr++; li -= 2; if (opli > li) { ND_PRINT((ndo, ", opt (%d) too long", op)); return; } li -= opli; tptr = pptr; ND_PRINT((ndo, "\n\t %s Option #%u, length %u, value: ", tok2str(esis_option_values,"Unknown",op), op, opli)); switch (op) { case ESIS_OPTION_ES_CONF_TIME: if (opli == 2) { ND_TCHECK2(*pptr, 2); ND_PRINT((ndo, "%us", EXTRACT_16BITS(tptr))); } else ND_PRINT((ndo, "(bad length)")); break; case ESIS_OPTION_PROTOCOLS: while (opli>0) { ND_TCHECK(*pptr); ND_PRINT((ndo, "%s (0x%02x)", tok2str(nlpid_values, "unknown", *tptr), *tptr)); if (opli>1) /* further NPLIDs ? - put comma */ ND_PRINT((ndo, ", ")); tptr++; opli--; } break; /* * FIXME those are the defined Options that lack a decoder * you are welcome to contribute code ;-) */ case ESIS_OPTION_QOS_MAINTENANCE: case ESIS_OPTION_SECURITY: case ESIS_OPTION_PRIORITY: case ESIS_OPTION_ADDRESS_MASK: case ESIS_OPTION_SNPA_MASK: default: print_unknown_data(ndo, tptr, "\n\t ", opli); break; } if (ndo->ndo_vflag > 1) print_unknown_data(ndo, pptr, "\n\t ", opli); pptr += opli; } trunc: return; } static void isis_print_mcid(netdissect_options *ndo, const struct isis_spb_mcid *mcid) { int i; ND_TCHECK(*mcid); ND_PRINT((ndo, "ID: %d, Name: ", mcid->format_id)); if (fn_printzp(ndo, mcid->name, 32, ndo->ndo_snapend)) goto trunc; ND_PRINT((ndo, "\n\t Lvl: %d", EXTRACT_16BITS(mcid->revision_lvl))); ND_PRINT((ndo, ", Digest: ")); for(i=0;i<16;i++) ND_PRINT((ndo, "%.2x ", mcid->digest[i])); trunc: ND_PRINT((ndo, "%s", tstr)); } static int isis_print_mt_port_cap_subtlv(netdissect_options *ndo, const uint8_t *tptr, int len) { int stlv_type, stlv_len; const struct isis_subtlv_spb_mcid *subtlv_spb_mcid; int i; while (len > 2) { stlv_type = *(tptr++); stlv_len = *(tptr++); /* first lets see if we know the subTLVs name*/ ND_PRINT((ndo, "\n\t %s subTLV #%u, length: %u", tok2str(isis_mt_port_cap_subtlv_values, "unknown", stlv_type), stlv_type, stlv_len)); /*len -= TLV_TYPE_LEN_OFFSET;*/ len = len -2; switch (stlv_type) { case ISIS_SUBTLV_SPB_MCID: { ND_TCHECK2(*(tptr), ISIS_SUBTLV_SPB_MCID_MIN_LEN); subtlv_spb_mcid = (const struct isis_subtlv_spb_mcid *)tptr; ND_PRINT((ndo, "\n\t MCID: ")); isis_print_mcid(ndo, &(subtlv_spb_mcid->mcid)); /*tptr += SPB_MCID_MIN_LEN; len -= SPB_MCID_MIN_LEN; */ ND_PRINT((ndo, "\n\t AUX-MCID: ")); isis_print_mcid(ndo, &(subtlv_spb_mcid->aux_mcid)); /*tptr += SPB_MCID_MIN_LEN; len -= SPB_MCID_MIN_LEN; */ tptr = tptr + sizeof(struct isis_subtlv_spb_mcid); len = len - sizeof(struct isis_subtlv_spb_mcid); break; } case ISIS_SUBTLV_SPB_DIGEST: { ND_TCHECK2(*(tptr), ISIS_SUBTLV_SPB_DIGEST_MIN_LEN); ND_PRINT((ndo, "\n\t RES: %d V: %d A: %d D: %d", (*(tptr) >> 5), (((*tptr)>> 4) & 0x01), ((*(tptr) >> 2) & 0x03), ((*tptr) & 0x03))); tptr++; ND_PRINT((ndo, "\n\t Digest: ")); for(i=1;i<=8; i++) { ND_PRINT((ndo, "%08x ", EXTRACT_32BITS(tptr))); if (i%4 == 0 && i != 8) ND_PRINT((ndo, "\n\t ")); tptr = tptr + 4; } len = len - ISIS_SUBTLV_SPB_DIGEST_MIN_LEN; break; } case ISIS_SUBTLV_SPB_BVID: { ND_TCHECK2(*(tptr), stlv_len); while (len >= ISIS_SUBTLV_SPB_BVID_MIN_LEN) { ND_TCHECK2(*(tptr), ISIS_SUBTLV_SPB_BVID_MIN_LEN); ND_PRINT((ndo, "\n\t ECT: %08x", EXTRACT_32BITS(tptr))); tptr = tptr+4; ND_PRINT((ndo, " BVID: %d, U:%01x M:%01x ", (EXTRACT_16BITS (tptr) >> 4) , (EXTRACT_16BITS (tptr) >> 3) & 0x01, (EXTRACT_16BITS (tptr) >> 2) & 0x01)); tptr = tptr + 2; len = len - ISIS_SUBTLV_SPB_BVID_MIN_LEN; } break; } default: break; } } return 0; trunc: ND_PRINT((ndo, "\n\t\t")); ND_PRINT((ndo, "%s", tstr)); return(1); } static int isis_print_mt_capability_subtlv(netdissect_options *ndo, const uint8_t *tptr, int len) { int stlv_type, stlv_len, tmp; while (len > 2) { stlv_type = *(tptr++); stlv_len = *(tptr++); /* first lets see if we know the subTLVs name*/ ND_PRINT((ndo, "\n\t %s subTLV #%u, length: %u", tok2str(isis_mt_capability_subtlv_values, "unknown", stlv_type), stlv_type, stlv_len)); len = len - 2; switch (stlv_type) { case ISIS_SUBTLV_SPB_INSTANCE: ND_TCHECK2(*tptr, ISIS_SUBTLV_SPB_INSTANCE_MIN_LEN); ND_PRINT((ndo, "\n\t CIST Root-ID: %08x", EXTRACT_32BITS(tptr))); tptr = tptr+4; ND_PRINT((ndo, " %08x", EXTRACT_32BITS(tptr))); tptr = tptr+4; ND_PRINT((ndo, ", Path Cost: %08x", EXTRACT_32BITS(tptr))); tptr = tptr+4; ND_PRINT((ndo, ", Prio: %d", EXTRACT_16BITS(tptr))); tptr = tptr + 2; ND_PRINT((ndo, "\n\t RES: %d", EXTRACT_16BITS(tptr) >> 5)); ND_PRINT((ndo, ", V: %d", (EXTRACT_16BITS(tptr) >> 4) & 0x0001)); ND_PRINT((ndo, ", SPSource-ID: %d", (EXTRACT_32BITS(tptr) & 0x000fffff))); tptr = tptr+4; ND_PRINT((ndo, ", No of Trees: %x", *(tptr))); tmp = *(tptr++); len = len - ISIS_SUBTLV_SPB_INSTANCE_MIN_LEN; while (tmp) { ND_TCHECK2(*tptr, ISIS_SUBTLV_SPB_INSTANCE_VLAN_TUPLE_LEN); ND_PRINT((ndo, "\n\t U:%d, M:%d, A:%d, RES:%d", *(tptr) >> 7, (*(tptr) >> 6) & 0x01, (*(tptr) >> 5) & 0x01, (*(tptr) & 0x1f))); tptr++; ND_PRINT((ndo, ", ECT: %08x", EXTRACT_32BITS(tptr))); tptr = tptr + 4; ND_PRINT((ndo, ", BVID: %d, SPVID: %d", (EXTRACT_24BITS(tptr) >> 12) & 0x000fff, EXTRACT_24BITS(tptr) & 0x000fff)); tptr = tptr + 3; len = len - ISIS_SUBTLV_SPB_INSTANCE_VLAN_TUPLE_LEN; tmp--; } break; case ISIS_SUBTLV_SPBM_SI: ND_TCHECK2(*tptr, 8); ND_PRINT((ndo, "\n\t BMAC: %08x", EXTRACT_32BITS(tptr))); tptr = tptr+4; ND_PRINT((ndo, "%04x", EXTRACT_16BITS(tptr))); tptr = tptr+2; ND_PRINT((ndo, ", RES: %d, VID: %d", EXTRACT_16BITS(tptr) >> 12, (EXTRACT_16BITS(tptr)) & 0x0fff)); tptr = tptr+2; len = len - 8; stlv_len = stlv_len - 8; while (stlv_len >= 4) { ND_TCHECK2(*tptr, 4); ND_PRINT((ndo, "\n\t T: %d, R: %d, RES: %d, ISID: %d", (EXTRACT_32BITS(tptr) >> 31), (EXTRACT_32BITS(tptr) >> 30) & 0x01, (EXTRACT_32BITS(tptr) >> 24) & 0x03f, (EXTRACT_32BITS(tptr)) & 0x0ffffff)); tptr = tptr + 4; len = len - 4; stlv_len = stlv_len - 4; } break; default: break; } } return 0; trunc: ND_PRINT((ndo, "\n\t\t")); ND_PRINT((ndo, "%s", tstr)); return(1); } /* shared routine for printing system, node and lsp-ids */ static char * isis_print_id(const uint8_t *cp, int id_len) { int i; static char id[sizeof("xxxx.xxxx.xxxx.yy-zz")]; char *pos = id; for (i = 1; i <= SYSTEM_ID_LEN; i++) { snprintf(pos, sizeof(id) - (pos - id), "%02x", *cp++); pos += strlen(pos); if (i == 2 || i == 4) *pos++ = '.'; } if (id_len >= NODE_ID_LEN) { snprintf(pos, sizeof(id) - (pos - id), ".%02x", *cp++); pos += strlen(pos); } if (id_len == LSP_ID_LEN) snprintf(pos, sizeof(id) - (pos - id), "-%02x", *cp); return (id); } /* print the 4-byte metric block which is common found in the old-style TLVs */ static int isis_print_metric_block(netdissect_options *ndo, const struct isis_metric_block *isis_metric_block) { ND_PRINT((ndo, ", Default Metric: %d, %s", ISIS_LSP_TLV_METRIC_VALUE(isis_metric_block->metric_default), ISIS_LSP_TLV_METRIC_IE(isis_metric_block->metric_default) ? "External" : "Internal")); if (!ISIS_LSP_TLV_METRIC_SUPPORTED(isis_metric_block->metric_delay)) ND_PRINT((ndo, "\n\t\t Delay Metric: %d, %s", ISIS_LSP_TLV_METRIC_VALUE(isis_metric_block->metric_delay), ISIS_LSP_TLV_METRIC_IE(isis_metric_block->metric_delay) ? "External" : "Internal")); if (!ISIS_LSP_TLV_METRIC_SUPPORTED(isis_metric_block->metric_expense)) ND_PRINT((ndo, "\n\t\t Expense Metric: %d, %s", ISIS_LSP_TLV_METRIC_VALUE(isis_metric_block->metric_expense), ISIS_LSP_TLV_METRIC_IE(isis_metric_block->metric_expense) ? "External" : "Internal")); if (!ISIS_LSP_TLV_METRIC_SUPPORTED(isis_metric_block->metric_error)) ND_PRINT((ndo, "\n\t\t Error Metric: %d, %s", ISIS_LSP_TLV_METRIC_VALUE(isis_metric_block->metric_error), ISIS_LSP_TLV_METRIC_IE(isis_metric_block->metric_error) ? "External" : "Internal")); return(1); /* everything is ok */ } static int isis_print_tlv_ip_reach(netdissect_options *ndo, const uint8_t *cp, const char *ident, int length) { int prefix_len; const struct isis_tlv_ip_reach *tlv_ip_reach; tlv_ip_reach = (const struct isis_tlv_ip_reach *)cp; while (length > 0) { if ((size_t)length < sizeof(*tlv_ip_reach)) { ND_PRINT((ndo, "short IPv4 Reachability (%d vs %lu)", length, (unsigned long)sizeof(*tlv_ip_reach))); return (0); } if (!ND_TTEST(*tlv_ip_reach)) return (0); prefix_len = mask2plen(EXTRACT_32BITS(tlv_ip_reach->mask)); if (prefix_len == -1) ND_PRINT((ndo, "%sIPv4 prefix: %s mask %s", ident, ipaddr_string(ndo, (tlv_ip_reach->prefix)), ipaddr_string(ndo, (tlv_ip_reach->mask)))); else ND_PRINT((ndo, "%sIPv4 prefix: %15s/%u", ident, ipaddr_string(ndo, (tlv_ip_reach->prefix)), prefix_len)); ND_PRINT((ndo, ", Distribution: %s, Metric: %u, %s", ISIS_LSP_TLV_METRIC_UPDOWN(tlv_ip_reach->isis_metric_block.metric_default) ? "down" : "up", ISIS_LSP_TLV_METRIC_VALUE(tlv_ip_reach->isis_metric_block.metric_default), ISIS_LSP_TLV_METRIC_IE(tlv_ip_reach->isis_metric_block.metric_default) ? "External" : "Internal")); if (!ISIS_LSP_TLV_METRIC_SUPPORTED(tlv_ip_reach->isis_metric_block.metric_delay)) ND_PRINT((ndo, "%s Delay Metric: %u, %s", ident, ISIS_LSP_TLV_METRIC_VALUE(tlv_ip_reach->isis_metric_block.metric_delay), ISIS_LSP_TLV_METRIC_IE(tlv_ip_reach->isis_metric_block.metric_delay) ? "External" : "Internal")); if (!ISIS_LSP_TLV_METRIC_SUPPORTED(tlv_ip_reach->isis_metric_block.metric_expense)) ND_PRINT((ndo, "%s Expense Metric: %u, %s", ident, ISIS_LSP_TLV_METRIC_VALUE(tlv_ip_reach->isis_metric_block.metric_expense), ISIS_LSP_TLV_METRIC_IE(tlv_ip_reach->isis_metric_block.metric_expense) ? "External" : "Internal")); if (!ISIS_LSP_TLV_METRIC_SUPPORTED(tlv_ip_reach->isis_metric_block.metric_error)) ND_PRINT((ndo, "%s Error Metric: %u, %s", ident, ISIS_LSP_TLV_METRIC_VALUE(tlv_ip_reach->isis_metric_block.metric_error), ISIS_LSP_TLV_METRIC_IE(tlv_ip_reach->isis_metric_block.metric_error) ? "External" : "Internal")); length -= sizeof(struct isis_tlv_ip_reach); tlv_ip_reach++; } return (1); } /* * this is the common IP-REACH subTLV decoder it is called * from various EXTD-IP REACH TLVs (135,235,236,237) */ static int isis_print_ip_reach_subtlv(netdissect_options *ndo, const uint8_t *tptr, int subt, int subl, const char *ident) { /* first lets see if we know the subTLVs name*/ ND_PRINT((ndo, "%s%s subTLV #%u, length: %u", ident, tok2str(isis_ext_ip_reach_subtlv_values, "unknown", subt), subt, subl)); ND_TCHECK2(*tptr,subl); switch(subt) { case ISIS_SUBTLV_EXTD_IP_REACH_MGMT_PREFIX_COLOR: /* fall through */ case ISIS_SUBTLV_EXTD_IP_REACH_ADMIN_TAG32: while (subl >= 4) { ND_PRINT((ndo, ", 0x%08x (=%u)", EXTRACT_32BITS(tptr), EXTRACT_32BITS(tptr))); tptr+=4; subl-=4; } break; case ISIS_SUBTLV_EXTD_IP_REACH_ADMIN_TAG64: while (subl >= 8) { ND_PRINT((ndo, ", 0x%08x%08x", EXTRACT_32BITS(tptr), EXTRACT_32BITS(tptr+4))); tptr+=8; subl-=8; } break; default: if (!print_unknown_data(ndo, tptr, "\n\t\t ", subl)) return(0); break; } return(1); trunc: ND_PRINT((ndo, "%s", ident)); ND_PRINT((ndo, "%s", tstr)); return(0); } /* * this is the common IS-REACH subTLV decoder it is called * from isis_print_ext_is_reach() */ static int isis_print_is_reach_subtlv(netdissect_options *ndo, const uint8_t *tptr, u_int subt, u_int subl, const char *ident) { u_int te_class,priority_level,gmpls_switch_cap; union { /* int to float conversion buffer for several subTLVs */ float f; uint32_t i; } bw; /* first lets see if we know the subTLVs name*/ ND_PRINT((ndo, "%s%s subTLV #%u, length: %u", ident, tok2str(isis_ext_is_reach_subtlv_values, "unknown", subt), subt, subl)); ND_TCHECK2(*tptr, subl); switch(subt) { case ISIS_SUBTLV_EXT_IS_REACH_ADMIN_GROUP: case ISIS_SUBTLV_EXT_IS_REACH_LINK_LOCAL_REMOTE_ID: case ISIS_SUBTLV_EXT_IS_REACH_LINK_REMOTE_ID: if (subl >= 4) { ND_PRINT((ndo, ", 0x%08x", EXTRACT_32BITS(tptr))); if (subl == 8) /* rfc4205 */ ND_PRINT((ndo, ", 0x%08x", EXTRACT_32BITS(tptr+4))); } break; case ISIS_SUBTLV_EXT_IS_REACH_IPV4_INTF_ADDR: case ISIS_SUBTLV_EXT_IS_REACH_IPV4_NEIGHBOR_ADDR: if (subl >= sizeof(struct in_addr)) ND_PRINT((ndo, ", %s", ipaddr_string(ndo, tptr))); break; case ISIS_SUBTLV_EXT_IS_REACH_MAX_LINK_BW : case ISIS_SUBTLV_EXT_IS_REACH_RESERVABLE_BW: if (subl >= 4) { bw.i = EXTRACT_32BITS(tptr); ND_PRINT((ndo, ", %.3f Mbps", bw.f * 8 / 1000000)); } break; case ISIS_SUBTLV_EXT_IS_REACH_UNRESERVED_BW : if (subl >= 32) { for (te_class = 0; te_class < 8; te_class++) { bw.i = EXTRACT_32BITS(tptr); ND_PRINT((ndo, "%s TE-Class %u: %.3f Mbps", ident, te_class, bw.f * 8 / 1000000)); tptr+=4; } } break; case ISIS_SUBTLV_EXT_IS_REACH_BW_CONSTRAINTS: /* fall through */ case ISIS_SUBTLV_EXT_IS_REACH_BW_CONSTRAINTS_OLD: ND_PRINT((ndo, "%sBandwidth Constraints Model ID: %s (%u)", ident, tok2str(diffserv_te_bc_values, "unknown", *tptr), *tptr)); tptr++; /* decode BCs until the subTLV ends */ for (te_class = 0; te_class < (subl-1)/4; te_class++) { ND_TCHECK2(*tptr, 4); bw.i = EXTRACT_32BITS(tptr); ND_PRINT((ndo, "%s Bandwidth constraint CT%u: %.3f Mbps", ident, te_class, bw.f * 8 / 1000000)); tptr+=4; } break; case ISIS_SUBTLV_EXT_IS_REACH_TE_METRIC: if (subl >= 3) ND_PRINT((ndo, ", %u", EXTRACT_24BITS(tptr))); break; case ISIS_SUBTLV_EXT_IS_REACH_LINK_ATTRIBUTE: if (subl == 2) { ND_PRINT((ndo, ", [ %s ] (0x%04x)", bittok2str(isis_subtlv_link_attribute_values, "Unknown", EXTRACT_16BITS(tptr)), EXTRACT_16BITS(tptr))); } break; case ISIS_SUBTLV_EXT_IS_REACH_LINK_PROTECTION_TYPE: if (subl >= 2) { ND_PRINT((ndo, ", %s, Priority %u", bittok2str(gmpls_link_prot_values, "none", *tptr), *(tptr+1))); } break; case ISIS_SUBTLV_SPB_METRIC: if (subl >= 6) { ND_PRINT((ndo, ", LM: %u", EXTRACT_24BITS(tptr))); tptr=tptr+3; ND_PRINT((ndo, ", P: %u", *(tptr))); tptr++; ND_PRINT((ndo, ", P-ID: %u", EXTRACT_16BITS(tptr))); } break; case ISIS_SUBTLV_EXT_IS_REACH_INTF_SW_CAP_DESCR: if (subl >= 36) { gmpls_switch_cap = *tptr; ND_PRINT((ndo, "%s Interface Switching Capability:%s", ident, tok2str(gmpls_switch_cap_values, "Unknown", gmpls_switch_cap))); ND_PRINT((ndo, ", LSP Encoding: %s", tok2str(gmpls_encoding_values, "Unknown", *(tptr + 1)))); tptr+=4; ND_PRINT((ndo, "%s Max LSP Bandwidth:", ident)); for (priority_level = 0; priority_level < 8; priority_level++) { bw.i = EXTRACT_32BITS(tptr); ND_PRINT((ndo, "%s priority level %d: %.3f Mbps", ident, priority_level, bw.f * 8 / 1000000)); tptr+=4; } subl-=36; switch (gmpls_switch_cap) { case GMPLS_PSC1: case GMPLS_PSC2: case GMPLS_PSC3: case GMPLS_PSC4: ND_TCHECK2(*tptr, 6); bw.i = EXTRACT_32BITS(tptr); ND_PRINT((ndo, "%s Min LSP Bandwidth: %.3f Mbps", ident, bw.f * 8 / 1000000)); ND_PRINT((ndo, "%s Interface MTU: %u", ident, EXTRACT_16BITS(tptr + 4))); break; case GMPLS_TSC: ND_TCHECK2(*tptr, 8); bw.i = EXTRACT_32BITS(tptr); ND_PRINT((ndo, "%s Min LSP Bandwidth: %.3f Mbps", ident, bw.f * 8 / 1000000)); ND_PRINT((ndo, "%s Indication %s", ident, tok2str(gmpls_switch_cap_tsc_indication_values, "Unknown (%u)", *(tptr + 4)))); break; default: /* there is some optional stuff left to decode but this is as of yet not specified so just lets hexdump what is left */ if(subl>0){ if (!print_unknown_data(ndo, tptr, "\n\t\t ", subl)) return(0); } } } break; default: if (!print_unknown_data(ndo, tptr, "\n\t\t ", subl)) return(0); break; } return(1); trunc: return(0); } /* * this is the common IS-REACH decoder it is called * from various EXTD-IS REACH style TLVs (22,24,222) */ static int isis_print_ext_is_reach(netdissect_options *ndo, const uint8_t *tptr, const char *ident, int tlv_type) { char ident_buffer[20]; int subtlv_type,subtlv_len,subtlv_sum_len; int proc_bytes = 0; /* how many bytes did we process ? */ if (!ND_TTEST2(*tptr, NODE_ID_LEN)) return(0); ND_PRINT((ndo, "%sIS Neighbor: %s", ident, isis_print_id(tptr, NODE_ID_LEN))); tptr+=(NODE_ID_LEN); if (tlv_type != ISIS_TLV_IS_ALIAS_ID) { /* the Alias TLV Metric field is implicit 0 */ if (!ND_TTEST2(*tptr, 3)) /* and is therefore skipped */ return(0); ND_PRINT((ndo, ", Metric: %d", EXTRACT_24BITS(tptr))); tptr+=3; } if (!ND_TTEST2(*tptr, 1)) return(0); subtlv_sum_len=*(tptr++); /* read out subTLV length */ proc_bytes=NODE_ID_LEN+3+1; ND_PRINT((ndo, ", %ssub-TLVs present",subtlv_sum_len ? "" : "no ")); if (subtlv_sum_len) { ND_PRINT((ndo, " (%u)", subtlv_sum_len)); while (subtlv_sum_len>0) { if (!ND_TTEST2(*tptr,2)) return(0); subtlv_type=*(tptr++); subtlv_len=*(tptr++); /* prepend the indent string */ snprintf(ident_buffer, sizeof(ident_buffer), "%s ",ident); if (!isis_print_is_reach_subtlv(ndo, tptr, subtlv_type, subtlv_len, ident_buffer)) return(0); tptr+=subtlv_len; subtlv_sum_len-=(subtlv_len+2); proc_bytes+=(subtlv_len+2); } } return(proc_bytes); } /* * this is the common Multi Topology ID decoder * it is called from various MT-TLVs (222,229,235,237) */ static int isis_print_mtid(netdissect_options *ndo, const uint8_t *tptr, const char *ident) { if (!ND_TTEST2(*tptr, 2)) return(0); ND_PRINT((ndo, "%s%s", ident, tok2str(isis_mt_values, "Reserved for IETF Consensus", ISIS_MASK_MTID(EXTRACT_16BITS(tptr))))); ND_PRINT((ndo, " Topology (0x%03x), Flags: [%s]", ISIS_MASK_MTID(EXTRACT_16BITS(tptr)), bittok2str(isis_mt_flag_values, "none",ISIS_MASK_MTFLAGS(EXTRACT_16BITS(tptr))))); return(2); } /* * this is the common extended IP reach decoder * it is called from TLVs (135,235,236,237) * we process the TLV and optional subTLVs and return * the amount of processed bytes */ static int isis_print_extd_ip_reach(netdissect_options *ndo, const uint8_t *tptr, const char *ident, uint16_t afi) { char ident_buffer[20]; uint8_t prefix[sizeof(struct in6_addr)]; /* shared copy buffer for IPv4 and IPv6 prefixes */ u_int metric, status_byte, bit_length, byte_length, sublen, processed, subtlvtype, subtlvlen; if (!ND_TTEST2(*tptr, 4)) return (0); metric = EXTRACT_32BITS(tptr); processed=4; tptr+=4; if (afi == AF_INET) { if (!ND_TTEST2(*tptr, 1)) /* fetch status byte */ return (0); status_byte=*(tptr++); bit_length = status_byte&0x3f; if (bit_length > 32) { ND_PRINT((ndo, "%sIPv4 prefix: bad bit length %u", ident, bit_length)); return (0); } processed++; } else if (afi == AF_INET6) { if (!ND_TTEST2(*tptr, 1)) /* fetch status & prefix_len byte */ return (0); status_byte=*(tptr++); bit_length=*(tptr++); if (bit_length > 128) { ND_PRINT((ndo, "%sIPv6 prefix: bad bit length %u", ident, bit_length)); return (0); } processed+=2; } else return (0); /* somebody is fooling us */ byte_length = (bit_length + 7) / 8; /* prefix has variable length encoding */ if (!ND_TTEST2(*tptr, byte_length)) return (0); memset(prefix, 0, sizeof prefix); /* clear the copy buffer */ memcpy(prefix,tptr,byte_length); /* copy as much as is stored in the TLV */ tptr+=byte_length; processed+=byte_length; if (afi == AF_INET) ND_PRINT((ndo, "%sIPv4 prefix: %15s/%u", ident, ipaddr_string(ndo, prefix), bit_length)); else if (afi == AF_INET6) ND_PRINT((ndo, "%sIPv6 prefix: %s/%u", ident, ip6addr_string(ndo, prefix), bit_length)); ND_PRINT((ndo, ", Distribution: %s, Metric: %u", ISIS_MASK_TLV_EXTD_IP_UPDOWN(status_byte) ? "down" : "up", metric)); if (afi == AF_INET && ISIS_MASK_TLV_EXTD_IP_SUBTLV(status_byte)) ND_PRINT((ndo, ", sub-TLVs present")); else if (afi == AF_INET6) ND_PRINT((ndo, ", %s%s", ISIS_MASK_TLV_EXTD_IP6_IE(status_byte) ? "External" : "Internal", ISIS_MASK_TLV_EXTD_IP6_SUBTLV(status_byte) ? ", sub-TLVs present" : "")); if ((afi == AF_INET && ISIS_MASK_TLV_EXTD_IP_SUBTLV(status_byte)) || (afi == AF_INET6 && ISIS_MASK_TLV_EXTD_IP6_SUBTLV(status_byte)) ) { /* assume that one prefix can hold more than one subTLV - therefore the first byte must reflect the aggregate bytecount of the subTLVs for this prefix */ if (!ND_TTEST2(*tptr, 1)) return (0); sublen=*(tptr++); processed+=sublen+1; ND_PRINT((ndo, " (%u)", sublen)); /* print out subTLV length */ while (sublen>0) { if (!ND_TTEST2(*tptr,2)) return (0); subtlvtype=*(tptr++); subtlvlen=*(tptr++); /* prepend the indent string */ snprintf(ident_buffer, sizeof(ident_buffer), "%s ",ident); if (!isis_print_ip_reach_subtlv(ndo, tptr, subtlvtype, subtlvlen, ident_buffer)) return(0); tptr+=subtlvlen; sublen-=(subtlvlen+2); } } return (processed); } /* * Clear checksum and lifetime prior to signature verification. */ static void isis_clear_checksum_lifetime(void *header) { struct isis_lsp_header *header_lsp = (struct isis_lsp_header *) header; header_lsp->checksum[0] = 0; header_lsp->checksum[1] = 0; header_lsp->remaining_lifetime[0] = 0; header_lsp->remaining_lifetime[1] = 0; } /* * isis_print * Decode IS-IS packets. Return 0 on error. */ static int isis_print(netdissect_options *ndo, const uint8_t *p, u_int length) { const struct isis_common_header *isis_header; const struct isis_iih_lan_header *header_iih_lan; const struct isis_iih_ptp_header *header_iih_ptp; const struct isis_lsp_header *header_lsp; const struct isis_csnp_header *header_csnp; const struct isis_psnp_header *header_psnp; const struct isis_tlv_lsp *tlv_lsp; const struct isis_tlv_ptp_adj *tlv_ptp_adj; const struct isis_tlv_is_reach *tlv_is_reach; const struct isis_tlv_es_reach *tlv_es_reach; uint8_t pdu_type, max_area, id_length, tlv_type, tlv_len, tmp, alen, lan_alen, prefix_len; uint8_t ext_is_len, ext_ip_len, mt_len; const uint8_t *optr, *pptr, *tptr; u_short packet_len,pdu_len, key_id; u_int i,vendor_id; int sigcheck; packet_len=length; optr = p; /* initialize the _o_riginal pointer to the packet start - need it for parsing the checksum TLV and authentication TLV verification */ isis_header = (const struct isis_common_header *)p; ND_TCHECK(*isis_header); if (length < ISIS_COMMON_HEADER_SIZE) goto trunc; pptr = p+(ISIS_COMMON_HEADER_SIZE); header_iih_lan = (const struct isis_iih_lan_header *)pptr; header_iih_ptp = (const struct isis_iih_ptp_header *)pptr; header_lsp = (const struct isis_lsp_header *)pptr; header_csnp = (const struct isis_csnp_header *)pptr; header_psnp = (const struct isis_psnp_header *)pptr; if (!ndo->ndo_eflag) ND_PRINT((ndo, "IS-IS")); /* * Sanity checking of the header. */ if (isis_header->version != ISIS_VERSION) { ND_PRINT((ndo, "version %d packet not supported", isis_header->version)); return (0); } if ((isis_header->id_length != SYSTEM_ID_LEN) && (isis_header->id_length != 0)) { ND_PRINT((ndo, "system ID length of %d is not supported", isis_header->id_length)); return (0); } if (isis_header->pdu_version != ISIS_VERSION) { ND_PRINT((ndo, "version %d packet not supported", isis_header->pdu_version)); return (0); } if (length < isis_header->fixed_len) { ND_PRINT((ndo, "fixed header length %u > packet length %u", isis_header->fixed_len, length)); return (0); } if (isis_header->fixed_len < ISIS_COMMON_HEADER_SIZE) { ND_PRINT((ndo, "fixed header length %u < minimum header size %u", isis_header->fixed_len, (u_int)ISIS_COMMON_HEADER_SIZE)); return (0); } max_area = isis_header->max_area; switch(max_area) { case 0: max_area = 3; /* silly shit */ break; case 255: ND_PRINT((ndo, "bad packet -- 255 areas")); return (0); default: break; } id_length = isis_header->id_length; switch(id_length) { case 0: id_length = 6; /* silly shit again */ break; case 1: /* 1-8 are valid sys-ID lenghts */ case 2: case 3: case 4: case 5: case 6: case 7: case 8: break; case 255: id_length = 0; /* entirely useless */ break; default: break; } /* toss any non 6-byte sys-ID len PDUs */ if (id_length != 6 ) { ND_PRINT((ndo, "bad packet -- illegal sys-ID length (%u)", id_length)); return (0); } pdu_type=isis_header->pdu_type; /* in non-verbose mode print the basic PDU Type plus PDU specific brief information*/ if (ndo->ndo_vflag == 0) { ND_PRINT((ndo, "%s%s", ndo->ndo_eflag ? "" : ", ", tok2str(isis_pdu_values, "unknown PDU-Type %u", pdu_type))); } else { /* ok they seem to want to know everything - lets fully decode it */ ND_PRINT((ndo, "%slength %u", ndo->ndo_eflag ? "" : ", ", length)); ND_PRINT((ndo, "\n\t%s, hlen: %u, v: %u, pdu-v: %u, sys-id-len: %u (%u), max-area: %u (%u)", tok2str(isis_pdu_values, "unknown, type %u", pdu_type), isis_header->fixed_len, isis_header->version, isis_header->pdu_version, id_length, isis_header->id_length, max_area, isis_header->max_area)); if (ndo->ndo_vflag > 1) { if (!print_unknown_data(ndo, optr, "\n\t", 8)) /* provide the _o_riginal pointer */ return (0); /* for optionally debugging the common header */ } } switch (pdu_type) { case ISIS_PDU_L1_LAN_IIH: case ISIS_PDU_L2_LAN_IIH: if (isis_header->fixed_len != (ISIS_COMMON_HEADER_SIZE+ISIS_IIH_LAN_HEADER_SIZE)) { ND_PRINT((ndo, ", bogus fixed header length %u should be %lu", isis_header->fixed_len, (unsigned long)(ISIS_COMMON_HEADER_SIZE+ISIS_IIH_LAN_HEADER_SIZE))); return (0); } ND_TCHECK(*header_iih_lan); if (length < ISIS_COMMON_HEADER_SIZE+ISIS_IIH_LAN_HEADER_SIZE) goto trunc; if (ndo->ndo_vflag == 0) { ND_PRINT((ndo, ", src-id %s", isis_print_id(header_iih_lan->source_id, SYSTEM_ID_LEN))); ND_PRINT((ndo, ", lan-id %s, prio %u", isis_print_id(header_iih_lan->lan_id,NODE_ID_LEN), header_iih_lan->priority)); ND_PRINT((ndo, ", length %u", length)); return (1); } pdu_len=EXTRACT_16BITS(header_iih_lan->pdu_len); if (packet_len>pdu_len) { packet_len=pdu_len; /* do TLV decoding as long as it makes sense */ length=pdu_len; } ND_PRINT((ndo, "\n\t source-id: %s, holding time: %us, Flags: [%s]", isis_print_id(header_iih_lan->source_id,SYSTEM_ID_LEN), EXTRACT_16BITS(header_iih_lan->holding_time), tok2str(isis_iih_circuit_type_values, "unknown circuit type 0x%02x", header_iih_lan->circuit_type))); ND_PRINT((ndo, "\n\t lan-id: %s, Priority: %u, PDU length: %u", isis_print_id(header_iih_lan->lan_id, NODE_ID_LEN), (header_iih_lan->priority) & ISIS_LAN_PRIORITY_MASK, pdu_len)); if (ndo->ndo_vflag > 1) { if (!print_unknown_data(ndo, pptr, "\n\t ", ISIS_IIH_LAN_HEADER_SIZE)) return (0); } packet_len -= (ISIS_COMMON_HEADER_SIZE+ISIS_IIH_LAN_HEADER_SIZE); pptr = p + (ISIS_COMMON_HEADER_SIZE+ISIS_IIH_LAN_HEADER_SIZE); break; case ISIS_PDU_PTP_IIH: if (isis_header->fixed_len != (ISIS_COMMON_HEADER_SIZE+ISIS_IIH_PTP_HEADER_SIZE)) { ND_PRINT((ndo, ", bogus fixed header length %u should be %lu", isis_header->fixed_len, (unsigned long)(ISIS_COMMON_HEADER_SIZE+ISIS_IIH_PTP_HEADER_SIZE))); return (0); } ND_TCHECK(*header_iih_ptp); if (length < ISIS_COMMON_HEADER_SIZE+ISIS_IIH_PTP_HEADER_SIZE) goto trunc; if (ndo->ndo_vflag == 0) { ND_PRINT((ndo, ", src-id %s", isis_print_id(header_iih_ptp->source_id, SYSTEM_ID_LEN))); ND_PRINT((ndo, ", length %u", length)); return (1); } pdu_len=EXTRACT_16BITS(header_iih_ptp->pdu_len); if (packet_len>pdu_len) { packet_len=pdu_len; /* do TLV decoding as long as it makes sense */ length=pdu_len; } ND_PRINT((ndo, "\n\t source-id: %s, holding time: %us, Flags: [%s]", isis_print_id(header_iih_ptp->source_id,SYSTEM_ID_LEN), EXTRACT_16BITS(header_iih_ptp->holding_time), tok2str(isis_iih_circuit_type_values, "unknown circuit type 0x%02x", header_iih_ptp->circuit_type))); ND_PRINT((ndo, "\n\t circuit-id: 0x%02x, PDU length: %u", header_iih_ptp->circuit_id, pdu_len)); if (ndo->ndo_vflag > 1) { if (!print_unknown_data(ndo, pptr, "\n\t ", ISIS_IIH_PTP_HEADER_SIZE)) return (0); } packet_len -= (ISIS_COMMON_HEADER_SIZE+ISIS_IIH_PTP_HEADER_SIZE); pptr = p + (ISIS_COMMON_HEADER_SIZE+ISIS_IIH_PTP_HEADER_SIZE); break; case ISIS_PDU_L1_LSP: case ISIS_PDU_L2_LSP: if (isis_header->fixed_len != (ISIS_COMMON_HEADER_SIZE+ISIS_LSP_HEADER_SIZE)) { ND_PRINT((ndo, ", bogus fixed header length %u should be %lu", isis_header->fixed_len, (unsigned long)ISIS_LSP_HEADER_SIZE)); return (0); } ND_TCHECK(*header_lsp); if (length < ISIS_COMMON_HEADER_SIZE+ISIS_LSP_HEADER_SIZE) goto trunc; if (ndo->ndo_vflag == 0) { ND_PRINT((ndo, ", lsp-id %s, seq 0x%08x, lifetime %5us", isis_print_id(header_lsp->lsp_id, LSP_ID_LEN), EXTRACT_32BITS(header_lsp->sequence_number), EXTRACT_16BITS(header_lsp->remaining_lifetime))); ND_PRINT((ndo, ", length %u", length)); return (1); } pdu_len=EXTRACT_16BITS(header_lsp->pdu_len); if (packet_len>pdu_len) { packet_len=pdu_len; /* do TLV decoding as long as it makes sense */ length=pdu_len; } ND_PRINT((ndo, "\n\t lsp-id: %s, seq: 0x%08x, lifetime: %5us\n\t chksum: 0x%04x", isis_print_id(header_lsp->lsp_id, LSP_ID_LEN), EXTRACT_32BITS(header_lsp->sequence_number), EXTRACT_16BITS(header_lsp->remaining_lifetime), EXTRACT_16BITS(header_lsp->checksum))); osi_print_cksum(ndo, (const uint8_t *)header_lsp->lsp_id, EXTRACT_16BITS(header_lsp->checksum), 12, length-12); ND_PRINT((ndo, ", PDU length: %u, Flags: [ %s", pdu_len, ISIS_MASK_LSP_OL_BIT(header_lsp->typeblock) ? "Overload bit set, " : "")); if (ISIS_MASK_LSP_ATT_BITS(header_lsp->typeblock)) { ND_PRINT((ndo, "%s", ISIS_MASK_LSP_ATT_DEFAULT_BIT(header_lsp->typeblock) ? "default " : "")); ND_PRINT((ndo, "%s", ISIS_MASK_LSP_ATT_DELAY_BIT(header_lsp->typeblock) ? "delay " : "")); ND_PRINT((ndo, "%s", ISIS_MASK_LSP_ATT_EXPENSE_BIT(header_lsp->typeblock) ? "expense " : "")); ND_PRINT((ndo, "%s", ISIS_MASK_LSP_ATT_ERROR_BIT(header_lsp->typeblock) ? "error " : "")); ND_PRINT((ndo, "ATT bit set, ")); } ND_PRINT((ndo, "%s", ISIS_MASK_LSP_PARTITION_BIT(header_lsp->typeblock) ? "P bit set, " : "")); ND_PRINT((ndo, "%s ]", tok2str(isis_lsp_istype_values, "Unknown(0x%x)", ISIS_MASK_LSP_ISTYPE_BITS(header_lsp->typeblock)))); if (ndo->ndo_vflag > 1) { if (!print_unknown_data(ndo, pptr, "\n\t ", ISIS_LSP_HEADER_SIZE)) return (0); } packet_len -= (ISIS_COMMON_HEADER_SIZE+ISIS_LSP_HEADER_SIZE); pptr = p + (ISIS_COMMON_HEADER_SIZE+ISIS_LSP_HEADER_SIZE); break; case ISIS_PDU_L1_CSNP: case ISIS_PDU_L2_CSNP: if (isis_header->fixed_len != (ISIS_COMMON_HEADER_SIZE+ISIS_CSNP_HEADER_SIZE)) { ND_PRINT((ndo, ", bogus fixed header length %u should be %lu", isis_header->fixed_len, (unsigned long)(ISIS_COMMON_HEADER_SIZE+ISIS_CSNP_HEADER_SIZE))); return (0); } ND_TCHECK(*header_csnp); if (length < ISIS_COMMON_HEADER_SIZE+ISIS_CSNP_HEADER_SIZE) goto trunc; if (ndo->ndo_vflag == 0) { ND_PRINT((ndo, ", src-id %s", isis_print_id(header_csnp->source_id, NODE_ID_LEN))); ND_PRINT((ndo, ", length %u", length)); return (1); } pdu_len=EXTRACT_16BITS(header_csnp->pdu_len); if (packet_len>pdu_len) { packet_len=pdu_len; /* do TLV decoding as long as it makes sense */ length=pdu_len; } ND_PRINT((ndo, "\n\t source-id: %s, PDU length: %u", isis_print_id(header_csnp->source_id, NODE_ID_LEN), pdu_len)); ND_PRINT((ndo, "\n\t start lsp-id: %s", isis_print_id(header_csnp->start_lsp_id, LSP_ID_LEN))); ND_PRINT((ndo, "\n\t end lsp-id: %s", isis_print_id(header_csnp->end_lsp_id, LSP_ID_LEN))); if (ndo->ndo_vflag > 1) { if (!print_unknown_data(ndo, pptr, "\n\t ", ISIS_CSNP_HEADER_SIZE)) return (0); } packet_len -= (ISIS_COMMON_HEADER_SIZE+ISIS_CSNP_HEADER_SIZE); pptr = p + (ISIS_COMMON_HEADER_SIZE+ISIS_CSNP_HEADER_SIZE); break; case ISIS_PDU_L1_PSNP: case ISIS_PDU_L2_PSNP: if (isis_header->fixed_len != (ISIS_COMMON_HEADER_SIZE+ISIS_PSNP_HEADER_SIZE)) { ND_PRINT((ndo, "- bogus fixed header length %u should be %lu", isis_header->fixed_len, (unsigned long)(ISIS_COMMON_HEADER_SIZE+ISIS_PSNP_HEADER_SIZE))); return (0); } ND_TCHECK(*header_psnp); if (length < ISIS_COMMON_HEADER_SIZE+ISIS_PSNP_HEADER_SIZE) goto trunc; if (ndo->ndo_vflag == 0) { ND_PRINT((ndo, ", src-id %s", isis_print_id(header_psnp->source_id, NODE_ID_LEN))); ND_PRINT((ndo, ", length %u", length)); return (1); } pdu_len=EXTRACT_16BITS(header_psnp->pdu_len); if (packet_len>pdu_len) { packet_len=pdu_len; /* do TLV decoding as long as it makes sense */ length=pdu_len; } ND_PRINT((ndo, "\n\t source-id: %s, PDU length: %u", isis_print_id(header_psnp->source_id, NODE_ID_LEN), pdu_len)); if (ndo->ndo_vflag > 1) { if (!print_unknown_data(ndo, pptr, "\n\t ", ISIS_PSNP_HEADER_SIZE)) return (0); } packet_len -= (ISIS_COMMON_HEADER_SIZE+ISIS_PSNP_HEADER_SIZE); pptr = p + (ISIS_COMMON_HEADER_SIZE+ISIS_PSNP_HEADER_SIZE); break; default: if (ndo->ndo_vflag == 0) { ND_PRINT((ndo, ", length %u", length)); return (1); } (void)print_unknown_data(ndo, pptr, "\n\t ", length); return (0); } /* * Now print the TLV's. */ while (packet_len > 0) { ND_TCHECK2(*pptr, 2); if (packet_len < 2) goto trunc; tlv_type = *pptr++; tlv_len = *pptr++; tmp =tlv_len; /* copy temporary len & pointer to packet data */ tptr = pptr; packet_len -= 2; /* first lets see if we know the TLVs name*/ ND_PRINT((ndo, "\n\t %s TLV #%u, length: %u", tok2str(isis_tlv_values, "unknown", tlv_type), tlv_type, tlv_len)); if (tlv_len == 0) /* something is invalid */ continue; if (packet_len < tlv_len) goto trunc; /* now check if we have a decoder otherwise do a hexdump at the end*/ switch (tlv_type) { case ISIS_TLV_AREA_ADDR: ND_TCHECK2(*tptr, 1); alen = *tptr++; while (tmp && alen < tmp) { ND_TCHECK2(*tptr, alen); ND_PRINT((ndo, "\n\t Area address (length: %u): %s", alen, isonsap_string(ndo, tptr, alen))); tptr += alen; tmp -= alen + 1; if (tmp==0) /* if this is the last area address do not attemt a boundary check */ break; ND_TCHECK2(*tptr, 1); alen = *tptr++; } break; case ISIS_TLV_ISNEIGH: while (tmp >= ETHER_ADDR_LEN) { ND_TCHECK2(*tptr, ETHER_ADDR_LEN); ND_PRINT((ndo, "\n\t SNPA: %s", isis_print_id(tptr, ETHER_ADDR_LEN))); tmp -= ETHER_ADDR_LEN; tptr += ETHER_ADDR_LEN; } break; case ISIS_TLV_ISNEIGH_VARLEN: if (!ND_TTEST2(*tptr, 1) || tmp < 3) /* min. TLV length */ goto trunctlv; lan_alen = *tptr++; /* LAN address length */ if (lan_alen == 0) { ND_PRINT((ndo, "\n\t LAN address length 0 bytes (invalid)")); break; } tmp --; ND_PRINT((ndo, "\n\t LAN address length %u bytes ", lan_alen)); while (tmp >= lan_alen) { ND_TCHECK2(*tptr, lan_alen); ND_PRINT((ndo, "\n\t\tIS Neighbor: %s", isis_print_id(tptr, lan_alen))); tmp -= lan_alen; tptr +=lan_alen; } break; case ISIS_TLV_PADDING: break; case ISIS_TLV_MT_IS_REACH: mt_len = isis_print_mtid(ndo, tptr, "\n\t "); if (mt_len == 0) /* did something go wrong ? */ goto trunctlv; tptr+=mt_len; tmp-=mt_len; while (tmp >= 2+NODE_ID_LEN+3+1) { ext_is_len = isis_print_ext_is_reach(ndo, tptr, "\n\t ", tlv_type); if (ext_is_len == 0) /* did something go wrong ? */ goto trunctlv; tmp-=ext_is_len; tptr+=ext_is_len; } break; case ISIS_TLV_IS_ALIAS_ID: while (tmp >= NODE_ID_LEN+1) { /* is it worth attempting a decode ? */ ext_is_len = isis_print_ext_is_reach(ndo, tptr, "\n\t ", tlv_type); if (ext_is_len == 0) /* did something go wrong ? */ goto trunctlv; tmp-=ext_is_len; tptr+=ext_is_len; } break; case ISIS_TLV_EXT_IS_REACH: while (tmp >= NODE_ID_LEN+3+1) { /* is it worth attempting a decode ? */ ext_is_len = isis_print_ext_is_reach(ndo, tptr, "\n\t ", tlv_type); if (ext_is_len == 0) /* did something go wrong ? */ goto trunctlv; tmp-=ext_is_len; tptr+=ext_is_len; } break; case ISIS_TLV_IS_REACH: ND_TCHECK2(*tptr,1); /* check if there is one byte left to read out the virtual flag */ ND_PRINT((ndo, "\n\t %s", tok2str(isis_is_reach_virtual_values, "bogus virtual flag 0x%02x", *tptr++))); tlv_is_reach = (const struct isis_tlv_is_reach *)tptr; while (tmp >= sizeof(struct isis_tlv_is_reach)) { ND_TCHECK(*tlv_is_reach); ND_PRINT((ndo, "\n\t IS Neighbor: %s", isis_print_id(tlv_is_reach->neighbor_nodeid, NODE_ID_LEN))); isis_print_metric_block(ndo, &tlv_is_reach->isis_metric_block); tmp -= sizeof(struct isis_tlv_is_reach); tlv_is_reach++; } break; case ISIS_TLV_ESNEIGH: tlv_es_reach = (const struct isis_tlv_es_reach *)tptr; while (tmp >= sizeof(struct isis_tlv_es_reach)) { ND_TCHECK(*tlv_es_reach); ND_PRINT((ndo, "\n\t ES Neighbor: %s", isis_print_id(tlv_es_reach->neighbor_sysid, SYSTEM_ID_LEN))); isis_print_metric_block(ndo, &tlv_es_reach->isis_metric_block); tmp -= sizeof(struct isis_tlv_es_reach); tlv_es_reach++; } break; /* those two TLVs share the same format */ case ISIS_TLV_INT_IP_REACH: case ISIS_TLV_EXT_IP_REACH: if (!isis_print_tlv_ip_reach(ndo, pptr, "\n\t ", tlv_len)) return (1); break; case ISIS_TLV_EXTD_IP_REACH: while (tmp>0) { ext_ip_len = isis_print_extd_ip_reach(ndo, tptr, "\n\t ", AF_INET); if (ext_ip_len == 0) /* did something go wrong ? */ goto trunctlv; tptr+=ext_ip_len; tmp-=ext_ip_len; } break; case ISIS_TLV_MT_IP_REACH: mt_len = isis_print_mtid(ndo, tptr, "\n\t "); if (mt_len == 0) { /* did something go wrong ? */ goto trunctlv; } tptr+=mt_len; tmp-=mt_len; while (tmp>0) { ext_ip_len = isis_print_extd_ip_reach(ndo, tptr, "\n\t ", AF_INET); if (ext_ip_len == 0) /* did something go wrong ? */ goto trunctlv; tptr+=ext_ip_len; tmp-=ext_ip_len; } break; case ISIS_TLV_IP6_REACH: while (tmp>0) { ext_ip_len = isis_print_extd_ip_reach(ndo, tptr, "\n\t ", AF_INET6); if (ext_ip_len == 0) /* did something go wrong ? */ goto trunctlv; tptr+=ext_ip_len; tmp-=ext_ip_len; } break; case ISIS_TLV_MT_IP6_REACH: mt_len = isis_print_mtid(ndo, tptr, "\n\t "); if (mt_len == 0) { /* did something go wrong ? */ goto trunctlv; } tptr+=mt_len; tmp-=mt_len; while (tmp>0) { ext_ip_len = isis_print_extd_ip_reach(ndo, tptr, "\n\t ", AF_INET6); if (ext_ip_len == 0) /* did something go wrong ? */ goto trunctlv; tptr+=ext_ip_len; tmp-=ext_ip_len; } break; case ISIS_TLV_IP6ADDR: while (tmp>=sizeof(struct in6_addr)) { ND_TCHECK2(*tptr, sizeof(struct in6_addr)); ND_PRINT((ndo, "\n\t IPv6 interface address: %s", ip6addr_string(ndo, tptr))); tptr += sizeof(struct in6_addr); tmp -= sizeof(struct in6_addr); } break; case ISIS_TLV_AUTH: ND_TCHECK2(*tptr, 1); ND_PRINT((ndo, "\n\t %s: ", tok2str(isis_subtlv_auth_values, "unknown Authentication type 0x%02x", *tptr))); switch (*tptr) { case ISIS_SUBTLV_AUTH_SIMPLE: if (fn_printzp(ndo, tptr + 1, tlv_len - 1, ndo->ndo_snapend)) goto trunctlv; break; case ISIS_SUBTLV_AUTH_MD5: for(i=1;i<tlv_len;i++) { ND_TCHECK2(*(tptr + i), 1); ND_PRINT((ndo, "%02x", *(tptr + i))); } if (tlv_len != ISIS_SUBTLV_AUTH_MD5_LEN+1) ND_PRINT((ndo, ", (invalid subTLV) ")); sigcheck = signature_verify(ndo, optr, length, tptr + 1, isis_clear_checksum_lifetime, header_lsp); ND_PRINT((ndo, " (%s)", tok2str(signature_check_values, "Unknown", sigcheck))); break; case ISIS_SUBTLV_AUTH_GENERIC: ND_TCHECK2(*(tptr + 1), 2); key_id = EXTRACT_16BITS((tptr+1)); ND_PRINT((ndo, "%u, password: ", key_id)); for(i=1 + sizeof(uint16_t);i<tlv_len;i++) { ND_TCHECK2(*(tptr + i), 1); ND_PRINT((ndo, "%02x", *(tptr + i))); } break; case ISIS_SUBTLV_AUTH_PRIVATE: default: if (!print_unknown_data(ndo, tptr + 1, "\n\t\t ", tlv_len - 1)) return(0); break; } break; case ISIS_TLV_PTP_ADJ: tlv_ptp_adj = (const struct isis_tlv_ptp_adj *)tptr; if(tmp>=1) { ND_TCHECK2(*tptr, 1); ND_PRINT((ndo, "\n\t Adjacency State: %s (%u)", tok2str(isis_ptp_adjancey_values, "unknown", *tptr), *tptr)); tmp--; } if(tmp>sizeof(tlv_ptp_adj->extd_local_circuit_id)) { ND_TCHECK(tlv_ptp_adj->extd_local_circuit_id); ND_PRINT((ndo, "\n\t Extended Local circuit-ID: 0x%08x", EXTRACT_32BITS(tlv_ptp_adj->extd_local_circuit_id))); tmp-=sizeof(tlv_ptp_adj->extd_local_circuit_id); } if(tmp>=SYSTEM_ID_LEN) { ND_TCHECK2(tlv_ptp_adj->neighbor_sysid, SYSTEM_ID_LEN); ND_PRINT((ndo, "\n\t Neighbor System-ID: %s", isis_print_id(tlv_ptp_adj->neighbor_sysid, SYSTEM_ID_LEN))); tmp-=SYSTEM_ID_LEN; } if(tmp>=sizeof(tlv_ptp_adj->neighbor_extd_local_circuit_id)) { ND_TCHECK(tlv_ptp_adj->neighbor_extd_local_circuit_id); ND_PRINT((ndo, "\n\t Neighbor Extended Local circuit-ID: 0x%08x", EXTRACT_32BITS(tlv_ptp_adj->neighbor_extd_local_circuit_id))); } break; case ISIS_TLV_PROTOCOLS: ND_PRINT((ndo, "\n\t NLPID(s): ")); while (tmp>0) { ND_TCHECK2(*(tptr), 1); ND_PRINT((ndo, "%s (0x%02x)", tok2str(nlpid_values, "unknown", *tptr), *tptr)); if (tmp>1) /* further NPLIDs ? - put comma */ ND_PRINT((ndo, ", ")); tptr++; tmp--; } break; case ISIS_TLV_MT_PORT_CAP: { ND_TCHECK2(*(tptr), 2); ND_PRINT((ndo, "\n\t RES: %d, MTID(s): %d", (EXTRACT_16BITS (tptr) >> 12), (EXTRACT_16BITS (tptr) & 0x0fff))); tmp = tmp-2; tptr = tptr+2; if (tmp) isis_print_mt_port_cap_subtlv(ndo, tptr, tmp); break; } case ISIS_TLV_MT_CAPABILITY: ND_TCHECK2(*(tptr), 2); ND_PRINT((ndo, "\n\t O: %d, RES: %d, MTID(s): %d", (EXTRACT_16BITS(tptr) >> 15) & 0x01, (EXTRACT_16BITS(tptr) >> 12) & 0x07, EXTRACT_16BITS(tptr) & 0x0fff)); tmp = tmp-2; tptr = tptr+2; if (tmp) isis_print_mt_capability_subtlv(ndo, tptr, tmp); break; case ISIS_TLV_TE_ROUTER_ID: ND_TCHECK2(*pptr, sizeof(struct in_addr)); ND_PRINT((ndo, "\n\t Traffic Engineering Router ID: %s", ipaddr_string(ndo, pptr))); break; case ISIS_TLV_IPADDR: while (tmp>=sizeof(struct in_addr)) { ND_TCHECK2(*tptr, sizeof(struct in_addr)); ND_PRINT((ndo, "\n\t IPv4 interface address: %s", ipaddr_string(ndo, tptr))); tptr += sizeof(struct in_addr); tmp -= sizeof(struct in_addr); } break; case ISIS_TLV_HOSTNAME: ND_PRINT((ndo, "\n\t Hostname: ")); if (fn_printzp(ndo, tptr, tmp, ndo->ndo_snapend)) goto trunctlv; break; case ISIS_TLV_SHARED_RISK_GROUP: if (tmp < NODE_ID_LEN) break; ND_TCHECK2(*tptr, NODE_ID_LEN); ND_PRINT((ndo, "\n\t IS Neighbor: %s", isis_print_id(tptr, NODE_ID_LEN))); tptr+=(NODE_ID_LEN); tmp-=(NODE_ID_LEN); if (tmp < 1) break; ND_TCHECK2(*tptr, 1); ND_PRINT((ndo, ", Flags: [%s]", ISIS_MASK_TLV_SHARED_RISK_GROUP(*tptr++) ? "numbered" : "unnumbered")); tmp--; if (tmp < sizeof(struct in_addr)) break; ND_TCHECK2(*tptr, sizeof(struct in_addr)); ND_PRINT((ndo, "\n\t IPv4 interface address: %s", ipaddr_string(ndo, tptr))); tptr+=sizeof(struct in_addr); tmp-=sizeof(struct in_addr); if (tmp < sizeof(struct in_addr)) break; ND_TCHECK2(*tptr, sizeof(struct in_addr)); ND_PRINT((ndo, "\n\t IPv4 neighbor address: %s", ipaddr_string(ndo, tptr))); tptr+=sizeof(struct in_addr); tmp-=sizeof(struct in_addr); while (tmp>=4) { ND_TCHECK2(*tptr, 4); ND_PRINT((ndo, "\n\t Link-ID: 0x%08x", EXTRACT_32BITS(tptr))); tptr+=4; tmp-=4; } break; case ISIS_TLV_LSP: tlv_lsp = (const struct isis_tlv_lsp *)tptr; while(tmp>=sizeof(struct isis_tlv_lsp)) { ND_TCHECK((tlv_lsp->lsp_id)[LSP_ID_LEN-1]); ND_PRINT((ndo, "\n\t lsp-id: %s", isis_print_id(tlv_lsp->lsp_id, LSP_ID_LEN))); ND_TCHECK2(tlv_lsp->sequence_number, 4); ND_PRINT((ndo, ", seq: 0x%08x", EXTRACT_32BITS(tlv_lsp->sequence_number))); ND_TCHECK2(tlv_lsp->remaining_lifetime, 2); ND_PRINT((ndo, ", lifetime: %5ds", EXTRACT_16BITS(tlv_lsp->remaining_lifetime))); ND_TCHECK2(tlv_lsp->checksum, 2); ND_PRINT((ndo, ", chksum: 0x%04x", EXTRACT_16BITS(tlv_lsp->checksum))); tmp-=sizeof(struct isis_tlv_lsp); tlv_lsp++; } break; case ISIS_TLV_CHECKSUM: if (tmp < ISIS_TLV_CHECKSUM_MINLEN) break; ND_TCHECK2(*tptr, ISIS_TLV_CHECKSUM_MINLEN); ND_PRINT((ndo, "\n\t checksum: 0x%04x ", EXTRACT_16BITS(tptr))); /* do not attempt to verify the checksum if it is zero * most likely a HMAC-MD5 TLV is also present and * to avoid conflicts the checksum TLV is zeroed. * see rfc3358 for details */ osi_print_cksum(ndo, optr, EXTRACT_16BITS(tptr), tptr-optr, length); break; case ISIS_TLV_POI: if (tlv_len >= SYSTEM_ID_LEN + 1) { ND_TCHECK2(*tptr, SYSTEM_ID_LEN + 1); ND_PRINT((ndo, "\n\t Purge Originator System-ID: %s", isis_print_id(tptr + 1, SYSTEM_ID_LEN))); } if (tlv_len == 2 * SYSTEM_ID_LEN + 1) { ND_TCHECK2(*tptr, 2 * SYSTEM_ID_LEN + 1); ND_PRINT((ndo, "\n\t Received from System-ID: %s", isis_print_id(tptr + SYSTEM_ID_LEN + 1, SYSTEM_ID_LEN))); } break; case ISIS_TLV_MT_SUPPORTED: if (tmp < ISIS_TLV_MT_SUPPORTED_MINLEN) break; while (tmp>1) { /* length can only be a multiple of 2, otherwise there is something broken -> so decode down until length is 1 */ if (tmp!=1) { mt_len = isis_print_mtid(ndo, tptr, "\n\t "); if (mt_len == 0) /* did something go wrong ? */ goto trunctlv; tptr+=mt_len; tmp-=mt_len; } else { ND_PRINT((ndo, "\n\t invalid MT-ID")); break; } } break; case ISIS_TLV_RESTART_SIGNALING: /* first attempt to decode the flags */ if (tmp < ISIS_TLV_RESTART_SIGNALING_FLAGLEN) break; ND_TCHECK2(*tptr, ISIS_TLV_RESTART_SIGNALING_FLAGLEN); ND_PRINT((ndo, "\n\t Flags [%s]", bittok2str(isis_restart_flag_values, "none", *tptr))); tptr+=ISIS_TLV_RESTART_SIGNALING_FLAGLEN; tmp-=ISIS_TLV_RESTART_SIGNALING_FLAGLEN; /* is there anything other than the flags field? */ if (tmp == 0) break; if (tmp < ISIS_TLV_RESTART_SIGNALING_HOLDTIMELEN) break; ND_TCHECK2(*tptr, ISIS_TLV_RESTART_SIGNALING_HOLDTIMELEN); ND_PRINT((ndo, ", Remaining holding time %us", EXTRACT_16BITS(tptr))); tptr+=ISIS_TLV_RESTART_SIGNALING_HOLDTIMELEN; tmp-=ISIS_TLV_RESTART_SIGNALING_HOLDTIMELEN; /* is there an additional sysid field present ?*/ if (tmp == SYSTEM_ID_LEN) { ND_TCHECK2(*tptr, SYSTEM_ID_LEN); ND_PRINT((ndo, ", for %s", isis_print_id(tptr,SYSTEM_ID_LEN))); } break; case ISIS_TLV_IDRP_INFO: if (tmp < ISIS_TLV_IDRP_INFO_MINLEN) break; ND_TCHECK2(*tptr, ISIS_TLV_IDRP_INFO_MINLEN); ND_PRINT((ndo, "\n\t Inter-Domain Information Type: %s", tok2str(isis_subtlv_idrp_values, "Unknown (0x%02x)", *tptr))); switch (*tptr++) { case ISIS_SUBTLV_IDRP_ASN: ND_TCHECK2(*tptr, 2); /* fetch AS number */ ND_PRINT((ndo, "AS Number: %u", EXTRACT_16BITS(tptr))); break; case ISIS_SUBTLV_IDRP_LOCAL: case ISIS_SUBTLV_IDRP_RES: default: if (!print_unknown_data(ndo, tptr, "\n\t ", tlv_len - 1)) return(0); break; } break; case ISIS_TLV_LSP_BUFFERSIZE: if (tmp < ISIS_TLV_LSP_BUFFERSIZE_MINLEN) break; ND_TCHECK2(*tptr, ISIS_TLV_LSP_BUFFERSIZE_MINLEN); ND_PRINT((ndo, "\n\t LSP Buffersize: %u", EXTRACT_16BITS(tptr))); break; case ISIS_TLV_PART_DIS: while (tmp >= SYSTEM_ID_LEN) { ND_TCHECK2(*tptr, SYSTEM_ID_LEN); ND_PRINT((ndo, "\n\t %s", isis_print_id(tptr, SYSTEM_ID_LEN))); tptr+=SYSTEM_ID_LEN; tmp-=SYSTEM_ID_LEN; } break; case ISIS_TLV_PREFIX_NEIGH: if (tmp < sizeof(struct isis_metric_block)) break; ND_TCHECK2(*tptr, sizeof(struct isis_metric_block)); ND_PRINT((ndo, "\n\t Metric Block")); isis_print_metric_block(ndo, (const struct isis_metric_block *)tptr); tptr+=sizeof(struct isis_metric_block); tmp-=sizeof(struct isis_metric_block); while(tmp>0) { ND_TCHECK2(*tptr, 1); prefix_len=*tptr++; /* read out prefix length in semioctets*/ if (prefix_len < 2) { ND_PRINT((ndo, "\n\t\tAddress: prefix length %u < 2", prefix_len)); break; } tmp--; if (tmp < prefix_len/2) break; ND_TCHECK2(*tptr, prefix_len / 2); ND_PRINT((ndo, "\n\t\tAddress: %s/%u", isonsap_string(ndo, tptr, prefix_len / 2), prefix_len * 4)); tptr+=prefix_len/2; tmp-=prefix_len/2; } break; case ISIS_TLV_IIH_SEQNR: if (tmp < ISIS_TLV_IIH_SEQNR_MINLEN) break; ND_TCHECK2(*tptr, ISIS_TLV_IIH_SEQNR_MINLEN); /* check if four bytes are on the wire */ ND_PRINT((ndo, "\n\t Sequence number: %u", EXTRACT_32BITS(tptr))); break; case ISIS_TLV_VENDOR_PRIVATE: if (tmp < ISIS_TLV_VENDOR_PRIVATE_MINLEN) break; ND_TCHECK2(*tptr, ISIS_TLV_VENDOR_PRIVATE_MINLEN); /* check if enough byte for a full oui */ vendor_id = EXTRACT_24BITS(tptr); ND_PRINT((ndo, "\n\t Vendor: %s (%u)", tok2str(oui_values, "Unknown", vendor_id), vendor_id)); tptr+=3; tmp-=3; if (tmp > 0) /* hexdump the rest */ if (!print_unknown_data(ndo, tptr, "\n\t\t", tmp)) return(0); break; /* * FIXME those are the defined TLVs that lack a decoder * you are welcome to contribute code ;-) */ case ISIS_TLV_DECNET_PHASE4: case ISIS_TLV_LUCENT_PRIVATE: case ISIS_TLV_IPAUTH: case ISIS_TLV_NORTEL_PRIVATE1: case ISIS_TLV_NORTEL_PRIVATE2: default: if (ndo->ndo_vflag <= 1) { if (!print_unknown_data(ndo, pptr, "\n\t\t", tlv_len)) return(0); } break; } /* do we want to see an additionally hexdump ? */ if (ndo->ndo_vflag> 1) { if (!print_unknown_data(ndo, pptr, "\n\t ", tlv_len)) return(0); } pptr += tlv_len; packet_len -= tlv_len; } if (packet_len != 0) { ND_PRINT((ndo, "\n\t %u straggler bytes", packet_len)); } return (1); trunc: ND_PRINT((ndo, "%s", tstr)); return (1); trunctlv: ND_PRINT((ndo, "\n\t\t")); ND_PRINT((ndo, "%s", tstr)); return(1); } static void osi_print_cksum(netdissect_options *ndo, const uint8_t *pptr, uint16_t checksum, int checksum_offset, u_int length) { uint16_t calculated_checksum; /* do not attempt to verify the checksum if it is zero, * if the offset is nonsense, * or the base pointer is not sane */ if (!checksum || checksum_offset < 0 || !ND_TTEST2(*(pptr + checksum_offset), 2) || (u_int)checksum_offset > length || !ND_TTEST2(*pptr, length)) { ND_PRINT((ndo, " (unverified)")); } else { #if 0 printf("\nosi_print_cksum: %p %u %u\n", pptr, checksum_offset, length); #endif calculated_checksum = create_osi_cksum(pptr, checksum_offset, length); if (checksum == calculated_checksum) { ND_PRINT((ndo, " (correct)")); } else { ND_PRINT((ndo, " (incorrect should be 0x%04x)", calculated_checksum)); } } } /* * Local Variables: * c-style: whitesmith * c-basic-offset: 8 * End: */
/* * Copyright (c) 1992, 1993, 1994, 1995, 1996 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that: (1) source code distributions * retain the above copyright notice and this paragraph in its entirety, (2) * distributions including binary code include the above copyright notice and * this paragraph in its entirety in the documentation or other materials * provided with the distribution, and (3) all advertising materials mentioning * features or use of this software display the following acknowledgement: * ``This product includes software developed by the University of California, * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of * the University nor the names of its contributors may be used to endorse * or promote products derived from this software without specific prior * written permission. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * * Original code by Matt Thomas, Digital Equipment Corporation * * Extensively modified by Hannes Gredler (hannes@gredler.at) for more * complete IS-IS & CLNP support. */ /* \summary: ISO CLNS, ESIS, and ISIS printer */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <netdissect-stdinc.h> #include <string.h> #include "netdissect.h" #include "addrtoname.h" #include "ether.h" #include "nlpid.h" #include "extract.h" #include "gmpls.h" #include "oui.h" #include "signature.h" static const char tstr[] = " [|isis]"; /* * IS-IS is defined in ISO 10589. Look there for protocol definitions. */ #define SYSTEM_ID_LEN ETHER_ADDR_LEN #define NODE_ID_LEN SYSTEM_ID_LEN+1 #define LSP_ID_LEN SYSTEM_ID_LEN+2 #define ISIS_VERSION 1 #define ESIS_VERSION 1 #define CLNP_VERSION 1 #define ISIS_PDU_TYPE_MASK 0x1F #define ESIS_PDU_TYPE_MASK 0x1F #define CLNP_PDU_TYPE_MASK 0x1F #define CLNP_FLAG_MASK 0xE0 #define ISIS_LAN_PRIORITY_MASK 0x7F #define ISIS_PDU_L1_LAN_IIH 15 #define ISIS_PDU_L2_LAN_IIH 16 #define ISIS_PDU_PTP_IIH 17 #define ISIS_PDU_L1_LSP 18 #define ISIS_PDU_L2_LSP 20 #define ISIS_PDU_L1_CSNP 24 #define ISIS_PDU_L2_CSNP 25 #define ISIS_PDU_L1_PSNP 26 #define ISIS_PDU_L2_PSNP 27 static const struct tok isis_pdu_values[] = { { ISIS_PDU_L1_LAN_IIH, "L1 Lan IIH"}, { ISIS_PDU_L2_LAN_IIH, "L2 Lan IIH"}, { ISIS_PDU_PTP_IIH, "p2p IIH"}, { ISIS_PDU_L1_LSP, "L1 LSP"}, { ISIS_PDU_L2_LSP, "L2 LSP"}, { ISIS_PDU_L1_CSNP, "L1 CSNP"}, { ISIS_PDU_L2_CSNP, "L2 CSNP"}, { ISIS_PDU_L1_PSNP, "L1 PSNP"}, { ISIS_PDU_L2_PSNP, "L2 PSNP"}, { 0, NULL} }; /* * A TLV is a tuple of a type, length and a value and is normally used for * encoding information in all sorts of places. This is an enumeration of * the well known types. * * list taken from rfc3359 plus some memory from veterans ;-) */ #define ISIS_TLV_AREA_ADDR 1 /* iso10589 */ #define ISIS_TLV_IS_REACH 2 /* iso10589 */ #define ISIS_TLV_ESNEIGH 3 /* iso10589 */ #define ISIS_TLV_PART_DIS 4 /* iso10589 */ #define ISIS_TLV_PREFIX_NEIGH 5 /* iso10589 */ #define ISIS_TLV_ISNEIGH 6 /* iso10589 */ #define ISIS_TLV_ISNEIGH_VARLEN 7 /* iso10589 */ #define ISIS_TLV_PADDING 8 /* iso10589 */ #define ISIS_TLV_LSP 9 /* iso10589 */ #define ISIS_TLV_AUTH 10 /* iso10589, rfc3567 */ #define ISIS_TLV_CHECKSUM 12 /* rfc3358 */ #define ISIS_TLV_CHECKSUM_MINLEN 2 #define ISIS_TLV_POI 13 /* rfc6232 */ #define ISIS_TLV_LSP_BUFFERSIZE 14 /* iso10589 rev2 */ #define ISIS_TLV_LSP_BUFFERSIZE_MINLEN 2 #define ISIS_TLV_EXT_IS_REACH 22 /* draft-ietf-isis-traffic-05 */ #define ISIS_TLV_IS_ALIAS_ID 24 /* draft-ietf-isis-ext-lsp-frags-02 */ #define ISIS_TLV_DECNET_PHASE4 42 #define ISIS_TLV_LUCENT_PRIVATE 66 #define ISIS_TLV_INT_IP_REACH 128 /* rfc1195, rfc2966 */ #define ISIS_TLV_PROTOCOLS 129 /* rfc1195 */ #define ISIS_TLV_EXT_IP_REACH 130 /* rfc1195, rfc2966 */ #define ISIS_TLV_IDRP_INFO 131 /* rfc1195 */ #define ISIS_TLV_IDRP_INFO_MINLEN 1 #define ISIS_TLV_IPADDR 132 /* rfc1195 */ #define ISIS_TLV_IPAUTH 133 /* rfc1195 */ #define ISIS_TLV_TE_ROUTER_ID 134 /* draft-ietf-isis-traffic-05 */ #define ISIS_TLV_EXTD_IP_REACH 135 /* draft-ietf-isis-traffic-05 */ #define ISIS_TLV_HOSTNAME 137 /* rfc2763 */ #define ISIS_TLV_SHARED_RISK_GROUP 138 /* draft-ietf-isis-gmpls-extensions */ #define ISIS_TLV_MT_PORT_CAP 143 /* rfc6165 */ #define ISIS_TLV_MT_CAPABILITY 144 /* rfc6329 */ #define ISIS_TLV_NORTEL_PRIVATE1 176 #define ISIS_TLV_NORTEL_PRIVATE2 177 #define ISIS_TLV_RESTART_SIGNALING 211 /* rfc3847 */ #define ISIS_TLV_RESTART_SIGNALING_FLAGLEN 1 #define ISIS_TLV_RESTART_SIGNALING_HOLDTIMELEN 2 #define ISIS_TLV_MT_IS_REACH 222 /* draft-ietf-isis-wg-multi-topology-05 */ #define ISIS_TLV_MT_SUPPORTED 229 /* draft-ietf-isis-wg-multi-topology-05 */ #define ISIS_TLV_MT_SUPPORTED_MINLEN 2 #define ISIS_TLV_IP6ADDR 232 /* draft-ietf-isis-ipv6-02 */ #define ISIS_TLV_MT_IP_REACH 235 /* draft-ietf-isis-wg-multi-topology-05 */ #define ISIS_TLV_IP6_REACH 236 /* draft-ietf-isis-ipv6-02 */ #define ISIS_TLV_MT_IP6_REACH 237 /* draft-ietf-isis-wg-multi-topology-05 */ #define ISIS_TLV_PTP_ADJ 240 /* rfc3373 */ #define ISIS_TLV_IIH_SEQNR 241 /* draft-shen-isis-iih-sequence-00 */ #define ISIS_TLV_IIH_SEQNR_MINLEN 4 #define ISIS_TLV_VENDOR_PRIVATE 250 /* draft-ietf-isis-experimental-tlv-01 */ #define ISIS_TLV_VENDOR_PRIVATE_MINLEN 3 static const struct tok isis_tlv_values[] = { { ISIS_TLV_AREA_ADDR, "Area address(es)"}, { ISIS_TLV_IS_REACH, "IS Reachability"}, { ISIS_TLV_ESNEIGH, "ES Neighbor(s)"}, { ISIS_TLV_PART_DIS, "Partition DIS"}, { ISIS_TLV_PREFIX_NEIGH, "Prefix Neighbors"}, { ISIS_TLV_ISNEIGH, "IS Neighbor(s)"}, { ISIS_TLV_ISNEIGH_VARLEN, "IS Neighbor(s) (variable length)"}, { ISIS_TLV_PADDING, "Padding"}, { ISIS_TLV_LSP, "LSP entries"}, { ISIS_TLV_AUTH, "Authentication"}, { ISIS_TLV_CHECKSUM, "Checksum"}, { ISIS_TLV_POI, "Purge Originator Identifier"}, { ISIS_TLV_LSP_BUFFERSIZE, "LSP Buffersize"}, { ISIS_TLV_EXT_IS_REACH, "Extended IS Reachability"}, { ISIS_TLV_IS_ALIAS_ID, "IS Alias ID"}, { ISIS_TLV_DECNET_PHASE4, "DECnet Phase IV"}, { ISIS_TLV_LUCENT_PRIVATE, "Lucent Proprietary"}, { ISIS_TLV_INT_IP_REACH, "IPv4 Internal Reachability"}, { ISIS_TLV_PROTOCOLS, "Protocols supported"}, { ISIS_TLV_EXT_IP_REACH, "IPv4 External Reachability"}, { ISIS_TLV_IDRP_INFO, "Inter-Domain Information Type"}, { ISIS_TLV_IPADDR, "IPv4 Interface address(es)"}, { ISIS_TLV_IPAUTH, "IPv4 authentication (deprecated)"}, { ISIS_TLV_TE_ROUTER_ID, "Traffic Engineering Router ID"}, { ISIS_TLV_EXTD_IP_REACH, "Extended IPv4 Reachability"}, { ISIS_TLV_SHARED_RISK_GROUP, "Shared Risk Link Group"}, { ISIS_TLV_MT_PORT_CAP, "Multi-Topology-Aware Port Capability"}, { ISIS_TLV_MT_CAPABILITY, "Multi-Topology Capability"}, { ISIS_TLV_NORTEL_PRIVATE1, "Nortel Proprietary"}, { ISIS_TLV_NORTEL_PRIVATE2, "Nortel Proprietary"}, { ISIS_TLV_HOSTNAME, "Hostname"}, { ISIS_TLV_RESTART_SIGNALING, "Restart Signaling"}, { ISIS_TLV_MT_IS_REACH, "Multi Topology IS Reachability"}, { ISIS_TLV_MT_SUPPORTED, "Multi Topology"}, { ISIS_TLV_IP6ADDR, "IPv6 Interface address(es)"}, { ISIS_TLV_MT_IP_REACH, "Multi-Topology IPv4 Reachability"}, { ISIS_TLV_IP6_REACH, "IPv6 reachability"}, { ISIS_TLV_MT_IP6_REACH, "Multi-Topology IP6 Reachability"}, { ISIS_TLV_PTP_ADJ, "Point-to-point Adjacency State"}, { ISIS_TLV_IIH_SEQNR, "Hello PDU Sequence Number"}, { ISIS_TLV_VENDOR_PRIVATE, "Vendor Private"}, { 0, NULL } }; #define ESIS_OPTION_PROTOCOLS 129 #define ESIS_OPTION_QOS_MAINTENANCE 195 /* iso9542 */ #define ESIS_OPTION_SECURITY 197 /* iso9542 */ #define ESIS_OPTION_ES_CONF_TIME 198 /* iso9542 */ #define ESIS_OPTION_PRIORITY 205 /* iso9542 */ #define ESIS_OPTION_ADDRESS_MASK 225 /* iso9542 */ #define ESIS_OPTION_SNPA_MASK 226 /* iso9542 */ static const struct tok esis_option_values[] = { { ESIS_OPTION_PROTOCOLS, "Protocols supported"}, { ESIS_OPTION_QOS_MAINTENANCE, "QoS Maintenance" }, { ESIS_OPTION_SECURITY, "Security" }, { ESIS_OPTION_ES_CONF_TIME, "ES Configuration Time" }, { ESIS_OPTION_PRIORITY, "Priority" }, { ESIS_OPTION_ADDRESS_MASK, "Addressk Mask" }, { ESIS_OPTION_SNPA_MASK, "SNPA Mask" }, { 0, NULL } }; #define CLNP_OPTION_DISCARD_REASON 193 #define CLNP_OPTION_QOS_MAINTENANCE 195 /* iso8473 */ #define CLNP_OPTION_SECURITY 197 /* iso8473 */ #define CLNP_OPTION_SOURCE_ROUTING 200 /* iso8473 */ #define CLNP_OPTION_ROUTE_RECORDING 203 /* iso8473 */ #define CLNP_OPTION_PADDING 204 /* iso8473 */ #define CLNP_OPTION_PRIORITY 205 /* iso8473 */ static const struct tok clnp_option_values[] = { { CLNP_OPTION_DISCARD_REASON, "Discard Reason"}, { CLNP_OPTION_PRIORITY, "Priority"}, { CLNP_OPTION_QOS_MAINTENANCE, "QoS Maintenance"}, { CLNP_OPTION_SECURITY, "Security"}, { CLNP_OPTION_SOURCE_ROUTING, "Source Routing"}, { CLNP_OPTION_ROUTE_RECORDING, "Route Recording"}, { CLNP_OPTION_PADDING, "Padding"}, { 0, NULL } }; static const struct tok clnp_option_rfd_class_values[] = { { 0x0, "General"}, { 0x8, "Address"}, { 0x9, "Source Routeing"}, { 0xa, "Lifetime"}, { 0xb, "PDU Discarded"}, { 0xc, "Reassembly"}, { 0, NULL } }; static const struct tok clnp_option_rfd_general_values[] = { { 0x0, "Reason not specified"}, { 0x1, "Protocol procedure error"}, { 0x2, "Incorrect checksum"}, { 0x3, "PDU discarded due to congestion"}, { 0x4, "Header syntax error (cannot be parsed)"}, { 0x5, "Segmentation needed but not permitted"}, { 0x6, "Incomplete PDU received"}, { 0x7, "Duplicate option"}, { 0, NULL } }; static const struct tok clnp_option_rfd_address_values[] = { { 0x0, "Destination address unreachable"}, { 0x1, "Destination address unknown"}, { 0, NULL } }; static const struct tok clnp_option_rfd_source_routeing_values[] = { { 0x0, "Unspecified source routeing error"}, { 0x1, "Syntax error in source routeing field"}, { 0x2, "Unknown address in source routeing field"}, { 0x3, "Path not acceptable"}, { 0, NULL } }; static const struct tok clnp_option_rfd_lifetime_values[] = { { 0x0, "Lifetime expired while data unit in transit"}, { 0x1, "Lifetime expired during reassembly"}, { 0, NULL } }; static const struct tok clnp_option_rfd_pdu_discard_values[] = { { 0x0, "Unsupported option not specified"}, { 0x1, "Unsupported protocol version"}, { 0x2, "Unsupported security option"}, { 0x3, "Unsupported source routeing option"}, { 0x4, "Unsupported recording of route option"}, { 0, NULL } }; static const struct tok clnp_option_rfd_reassembly_values[] = { { 0x0, "Reassembly interference"}, { 0, NULL } }; /* array of 16 error-classes */ static const struct tok *clnp_option_rfd_error_class[] = { clnp_option_rfd_general_values, NULL, NULL, NULL, NULL, NULL, NULL, NULL, clnp_option_rfd_address_values, clnp_option_rfd_source_routeing_values, clnp_option_rfd_lifetime_values, clnp_option_rfd_pdu_discard_values, clnp_option_rfd_reassembly_values, NULL, NULL, NULL }; #define CLNP_OPTION_OPTION_QOS_MASK 0x3f #define CLNP_OPTION_SCOPE_MASK 0xc0 #define CLNP_OPTION_SCOPE_SA_SPEC 0x40 #define CLNP_OPTION_SCOPE_DA_SPEC 0x80 #define CLNP_OPTION_SCOPE_GLOBAL 0xc0 static const struct tok clnp_option_scope_values[] = { { CLNP_OPTION_SCOPE_SA_SPEC, "Source Address Specific"}, { CLNP_OPTION_SCOPE_DA_SPEC, "Destination Address Specific"}, { CLNP_OPTION_SCOPE_GLOBAL, "Globally unique"}, { 0, NULL } }; static const struct tok clnp_option_sr_rr_values[] = { { 0x0, "partial"}, { 0x1, "complete"}, { 0, NULL } }; static const struct tok clnp_option_sr_rr_string_values[] = { { CLNP_OPTION_SOURCE_ROUTING, "source routing"}, { CLNP_OPTION_ROUTE_RECORDING, "recording of route in progress"}, { 0, NULL } }; static const struct tok clnp_option_qos_global_values[] = { { 0x20, "reserved"}, { 0x10, "sequencing vs. delay"}, { 0x08, "congested"}, { 0x04, "delay vs. cost"}, { 0x02, "error vs. delay"}, { 0x01, "error vs. cost"}, { 0, NULL } }; #define ISIS_SUBTLV_EXT_IS_REACH_ADMIN_GROUP 3 /* draft-ietf-isis-traffic-05 */ #define ISIS_SUBTLV_EXT_IS_REACH_LINK_LOCAL_REMOTE_ID 4 /* rfc4205 */ #define ISIS_SUBTLV_EXT_IS_REACH_LINK_REMOTE_ID 5 /* draft-ietf-isis-traffic-05 */ #define ISIS_SUBTLV_EXT_IS_REACH_IPV4_INTF_ADDR 6 /* draft-ietf-isis-traffic-05 */ #define ISIS_SUBTLV_EXT_IS_REACH_IPV4_NEIGHBOR_ADDR 8 /* draft-ietf-isis-traffic-05 */ #define ISIS_SUBTLV_EXT_IS_REACH_MAX_LINK_BW 9 /* draft-ietf-isis-traffic-05 */ #define ISIS_SUBTLV_EXT_IS_REACH_RESERVABLE_BW 10 /* draft-ietf-isis-traffic-05 */ #define ISIS_SUBTLV_EXT_IS_REACH_UNRESERVED_BW 11 /* rfc4124 */ #define ISIS_SUBTLV_EXT_IS_REACH_BW_CONSTRAINTS_OLD 12 /* draft-ietf-tewg-diff-te-proto-06 */ #define ISIS_SUBTLV_EXT_IS_REACH_TE_METRIC 18 /* draft-ietf-isis-traffic-05 */ #define ISIS_SUBTLV_EXT_IS_REACH_LINK_ATTRIBUTE 19 /* draft-ietf-isis-link-attr-01 */ #define ISIS_SUBTLV_EXT_IS_REACH_LINK_PROTECTION_TYPE 20 /* rfc4205 */ #define ISIS_SUBTLV_EXT_IS_REACH_INTF_SW_CAP_DESCR 21 /* rfc4205 */ #define ISIS_SUBTLV_EXT_IS_REACH_BW_CONSTRAINTS 22 /* rfc4124 */ #define ISIS_SUBTLV_SPB_METRIC 29 /* rfc6329 */ static const struct tok isis_ext_is_reach_subtlv_values[] = { { ISIS_SUBTLV_EXT_IS_REACH_ADMIN_GROUP, "Administrative groups" }, { ISIS_SUBTLV_EXT_IS_REACH_LINK_LOCAL_REMOTE_ID, "Link Local/Remote Identifier" }, { ISIS_SUBTLV_EXT_IS_REACH_LINK_REMOTE_ID, "Link Remote Identifier" }, { ISIS_SUBTLV_EXT_IS_REACH_IPV4_INTF_ADDR, "IPv4 interface address" }, { ISIS_SUBTLV_EXT_IS_REACH_IPV4_NEIGHBOR_ADDR, "IPv4 neighbor address" }, { ISIS_SUBTLV_EXT_IS_REACH_MAX_LINK_BW, "Maximum link bandwidth" }, { ISIS_SUBTLV_EXT_IS_REACH_RESERVABLE_BW, "Reservable link bandwidth" }, { ISIS_SUBTLV_EXT_IS_REACH_UNRESERVED_BW, "Unreserved bandwidth" }, { ISIS_SUBTLV_EXT_IS_REACH_TE_METRIC, "Traffic Engineering Metric" }, { ISIS_SUBTLV_EXT_IS_REACH_LINK_ATTRIBUTE, "Link Attribute" }, { ISIS_SUBTLV_EXT_IS_REACH_LINK_PROTECTION_TYPE, "Link Protection Type" }, { ISIS_SUBTLV_EXT_IS_REACH_INTF_SW_CAP_DESCR, "Interface Switching Capability" }, { ISIS_SUBTLV_EXT_IS_REACH_BW_CONSTRAINTS_OLD, "Bandwidth Constraints (old)" }, { ISIS_SUBTLV_EXT_IS_REACH_BW_CONSTRAINTS, "Bandwidth Constraints" }, { ISIS_SUBTLV_SPB_METRIC, "SPB Metric" }, { 250, "Reserved for cisco specific extensions" }, { 251, "Reserved for cisco specific extensions" }, { 252, "Reserved for cisco specific extensions" }, { 253, "Reserved for cisco specific extensions" }, { 254, "Reserved for cisco specific extensions" }, { 255, "Reserved for future expansion" }, { 0, NULL } }; #define ISIS_SUBTLV_EXTD_IP_REACH_ADMIN_TAG32 1 /* draft-ietf-isis-admin-tags-01 */ #define ISIS_SUBTLV_EXTD_IP_REACH_ADMIN_TAG64 2 /* draft-ietf-isis-admin-tags-01 */ #define ISIS_SUBTLV_EXTD_IP_REACH_MGMT_PREFIX_COLOR 117 /* draft-ietf-isis-wg-multi-topology-05 */ static const struct tok isis_ext_ip_reach_subtlv_values[] = { { ISIS_SUBTLV_EXTD_IP_REACH_ADMIN_TAG32, "32-Bit Administrative tag" }, { ISIS_SUBTLV_EXTD_IP_REACH_ADMIN_TAG64, "64-Bit Administrative tag" }, { ISIS_SUBTLV_EXTD_IP_REACH_MGMT_PREFIX_COLOR, "Management Prefix Color" }, { 0, NULL } }; static const struct tok isis_subtlv_link_attribute_values[] = { { 0x01, "Local Protection Available" }, { 0x02, "Link excluded from local protection path" }, { 0x04, "Local maintenance required"}, { 0, NULL } }; #define ISIS_SUBTLV_AUTH_SIMPLE 1 #define ISIS_SUBTLV_AUTH_GENERIC 3 /* rfc 5310 */ #define ISIS_SUBTLV_AUTH_MD5 54 #define ISIS_SUBTLV_AUTH_MD5_LEN 16 #define ISIS_SUBTLV_AUTH_PRIVATE 255 static const struct tok isis_subtlv_auth_values[] = { { ISIS_SUBTLV_AUTH_SIMPLE, "simple text password"}, { ISIS_SUBTLV_AUTH_GENERIC, "Generic Crypto key-id"}, { ISIS_SUBTLV_AUTH_MD5, "HMAC-MD5 password"}, { ISIS_SUBTLV_AUTH_PRIVATE, "Routing Domain private password"}, { 0, NULL } }; #define ISIS_SUBTLV_IDRP_RES 0 #define ISIS_SUBTLV_IDRP_LOCAL 1 #define ISIS_SUBTLV_IDRP_ASN 2 static const struct tok isis_subtlv_idrp_values[] = { { ISIS_SUBTLV_IDRP_RES, "Reserved"}, { ISIS_SUBTLV_IDRP_LOCAL, "Routing-Domain Specific"}, { ISIS_SUBTLV_IDRP_ASN, "AS Number Tag"}, { 0, NULL} }; #define ISIS_SUBTLV_SPB_MCID 4 #define ISIS_SUBTLV_SPB_DIGEST 5 #define ISIS_SUBTLV_SPB_BVID 6 #define ISIS_SUBTLV_SPB_INSTANCE 1 #define ISIS_SUBTLV_SPBM_SI 3 #define ISIS_SPB_MCID_LEN 51 #define ISIS_SUBTLV_SPB_MCID_MIN_LEN 102 #define ISIS_SUBTLV_SPB_DIGEST_MIN_LEN 33 #define ISIS_SUBTLV_SPB_BVID_MIN_LEN 6 #define ISIS_SUBTLV_SPB_INSTANCE_MIN_LEN 19 #define ISIS_SUBTLV_SPB_INSTANCE_VLAN_TUPLE_LEN 8 static const struct tok isis_mt_port_cap_subtlv_values[] = { { ISIS_SUBTLV_SPB_MCID, "SPB MCID" }, { ISIS_SUBTLV_SPB_DIGEST, "SPB Digest" }, { ISIS_SUBTLV_SPB_BVID, "SPB BVID" }, { 0, NULL } }; static const struct tok isis_mt_capability_subtlv_values[] = { { ISIS_SUBTLV_SPB_INSTANCE, "SPB Instance" }, { ISIS_SUBTLV_SPBM_SI, "SPBM Service Identifier and Unicast Address" }, { 0, NULL } }; struct isis_spb_mcid { uint8_t format_id; uint8_t name[32]; uint8_t revision_lvl[2]; uint8_t digest[16]; }; struct isis_subtlv_spb_mcid { struct isis_spb_mcid mcid; struct isis_spb_mcid aux_mcid; }; struct isis_subtlv_spb_instance { uint8_t cist_root_id[8]; uint8_t cist_external_root_path_cost[4]; uint8_t bridge_priority[2]; uint8_t spsourceid[4]; uint8_t no_of_trees; }; #define CLNP_SEGMENT_PART 0x80 #define CLNP_MORE_SEGMENTS 0x40 #define CLNP_REQUEST_ER 0x20 static const struct tok clnp_flag_values[] = { { CLNP_SEGMENT_PART, "Segmentation permitted"}, { CLNP_MORE_SEGMENTS, "more Segments"}, { CLNP_REQUEST_ER, "request Error Report"}, { 0, NULL} }; #define ISIS_MASK_LSP_OL_BIT(x) ((x)&0x4) #define ISIS_MASK_LSP_ISTYPE_BITS(x) ((x)&0x3) #define ISIS_MASK_LSP_PARTITION_BIT(x) ((x)&0x80) #define ISIS_MASK_LSP_ATT_BITS(x) ((x)&0x78) #define ISIS_MASK_LSP_ATT_ERROR_BIT(x) ((x)&0x40) #define ISIS_MASK_LSP_ATT_EXPENSE_BIT(x) ((x)&0x20) #define ISIS_MASK_LSP_ATT_DELAY_BIT(x) ((x)&0x10) #define ISIS_MASK_LSP_ATT_DEFAULT_BIT(x) ((x)&0x8) #define ISIS_MASK_MTID(x) ((x)&0x0fff) #define ISIS_MASK_MTFLAGS(x) ((x)&0xf000) static const struct tok isis_mt_flag_values[] = { { 0x4000, "ATT bit set"}, { 0x8000, "Overload bit set"}, { 0, NULL} }; #define ISIS_MASK_TLV_EXTD_IP_UPDOWN(x) ((x)&0x80) #define ISIS_MASK_TLV_EXTD_IP_SUBTLV(x) ((x)&0x40) #define ISIS_MASK_TLV_EXTD_IP6_IE(x) ((x)&0x40) #define ISIS_MASK_TLV_EXTD_IP6_SUBTLV(x) ((x)&0x20) #define ISIS_LSP_TLV_METRIC_SUPPORTED(x) ((x)&0x80) #define ISIS_LSP_TLV_METRIC_IE(x) ((x)&0x40) #define ISIS_LSP_TLV_METRIC_UPDOWN(x) ((x)&0x80) #define ISIS_LSP_TLV_METRIC_VALUE(x) ((x)&0x3f) #define ISIS_MASK_TLV_SHARED_RISK_GROUP(x) ((x)&0x1) static const struct tok isis_mt_values[] = { { 0, "IPv4 unicast"}, { 1, "In-Band Management"}, { 2, "IPv6 unicast"}, { 3, "Multicast"}, { 4095, "Development, Experimental or Proprietary"}, { 0, NULL } }; static const struct tok isis_iih_circuit_type_values[] = { { 1, "Level 1 only"}, { 2, "Level 2 only"}, { 3, "Level 1, Level 2"}, { 0, NULL} }; #define ISIS_LSP_TYPE_UNUSED0 0 #define ISIS_LSP_TYPE_LEVEL_1 1 #define ISIS_LSP_TYPE_UNUSED2 2 #define ISIS_LSP_TYPE_LEVEL_2 3 static const struct tok isis_lsp_istype_values[] = { { ISIS_LSP_TYPE_UNUSED0, "Unused 0x0 (invalid)"}, { ISIS_LSP_TYPE_LEVEL_1, "L1 IS"}, { ISIS_LSP_TYPE_UNUSED2, "Unused 0x2 (invalid)"}, { ISIS_LSP_TYPE_LEVEL_2, "L2 IS"}, { 0, NULL } }; /* * Katz's point to point adjacency TLV uses codes to tell us the state of * the remote adjacency. Enumerate them. */ #define ISIS_PTP_ADJ_UP 0 #define ISIS_PTP_ADJ_INIT 1 #define ISIS_PTP_ADJ_DOWN 2 static const struct tok isis_ptp_adjancey_values[] = { { ISIS_PTP_ADJ_UP, "Up" }, { ISIS_PTP_ADJ_INIT, "Initializing" }, { ISIS_PTP_ADJ_DOWN, "Down" }, { 0, NULL} }; struct isis_tlv_ptp_adj { uint8_t adjacency_state; uint8_t extd_local_circuit_id[4]; uint8_t neighbor_sysid[SYSTEM_ID_LEN]; uint8_t neighbor_extd_local_circuit_id[4]; }; static void osi_print_cksum(netdissect_options *, const uint8_t *pptr, uint16_t checksum, int checksum_offset, u_int length); static int clnp_print(netdissect_options *, const uint8_t *, u_int); static void esis_print(netdissect_options *, const uint8_t *, u_int); static int isis_print(netdissect_options *, const uint8_t *, u_int); struct isis_metric_block { uint8_t metric_default; uint8_t metric_delay; uint8_t metric_expense; uint8_t metric_error; }; struct isis_tlv_is_reach { struct isis_metric_block isis_metric_block; uint8_t neighbor_nodeid[NODE_ID_LEN]; }; struct isis_tlv_es_reach { struct isis_metric_block isis_metric_block; uint8_t neighbor_sysid[SYSTEM_ID_LEN]; }; struct isis_tlv_ip_reach { struct isis_metric_block isis_metric_block; uint8_t prefix[4]; uint8_t mask[4]; }; static const struct tok isis_is_reach_virtual_values[] = { { 0, "IsNotVirtual"}, { 1, "IsVirtual"}, { 0, NULL } }; static const struct tok isis_restart_flag_values[] = { { 0x1, "Restart Request"}, { 0x2, "Restart Acknowledgement"}, { 0x4, "Suppress adjacency advertisement"}, { 0, NULL } }; struct isis_common_header { uint8_t nlpid; uint8_t fixed_len; uint8_t version; /* Protocol version */ uint8_t id_length; uint8_t pdu_type; /* 3 MSbits are reserved */ uint8_t pdu_version; /* Packet format version */ uint8_t reserved; uint8_t max_area; }; struct isis_iih_lan_header { uint8_t circuit_type; uint8_t source_id[SYSTEM_ID_LEN]; uint8_t holding_time[2]; uint8_t pdu_len[2]; uint8_t priority; uint8_t lan_id[NODE_ID_LEN]; }; struct isis_iih_ptp_header { uint8_t circuit_type; uint8_t source_id[SYSTEM_ID_LEN]; uint8_t holding_time[2]; uint8_t pdu_len[2]; uint8_t circuit_id; }; struct isis_lsp_header { uint8_t pdu_len[2]; uint8_t remaining_lifetime[2]; uint8_t lsp_id[LSP_ID_LEN]; uint8_t sequence_number[4]; uint8_t checksum[2]; uint8_t typeblock; }; struct isis_csnp_header { uint8_t pdu_len[2]; uint8_t source_id[NODE_ID_LEN]; uint8_t start_lsp_id[LSP_ID_LEN]; uint8_t end_lsp_id[LSP_ID_LEN]; }; struct isis_psnp_header { uint8_t pdu_len[2]; uint8_t source_id[NODE_ID_LEN]; }; struct isis_tlv_lsp { uint8_t remaining_lifetime[2]; uint8_t lsp_id[LSP_ID_LEN]; uint8_t sequence_number[4]; uint8_t checksum[2]; }; #define ISIS_COMMON_HEADER_SIZE (sizeof(struct isis_common_header)) #define ISIS_IIH_LAN_HEADER_SIZE (sizeof(struct isis_iih_lan_header)) #define ISIS_IIH_PTP_HEADER_SIZE (sizeof(struct isis_iih_ptp_header)) #define ISIS_LSP_HEADER_SIZE (sizeof(struct isis_lsp_header)) #define ISIS_CSNP_HEADER_SIZE (sizeof(struct isis_csnp_header)) #define ISIS_PSNP_HEADER_SIZE (sizeof(struct isis_psnp_header)) void isoclns_print(netdissect_options *ndo, const uint8_t *p, u_int length) { if (!ND_TTEST(*p)) { /* enough bytes on the wire ? */ ND_PRINT((ndo, "|OSI")); return; } if (ndo->ndo_eflag) ND_PRINT((ndo, "OSI NLPID %s (0x%02x): ", tok2str(nlpid_values, "Unknown", *p), *p)); switch (*p) { case NLPID_CLNP: if (!clnp_print(ndo, p, length)) print_unknown_data(ndo, p, "\n\t", length); break; case NLPID_ESIS: esis_print(ndo, p, length); return; case NLPID_ISIS: if (!isis_print(ndo, p, length)) print_unknown_data(ndo, p, "\n\t", length); break; case NLPID_NULLNS: ND_PRINT((ndo, "%slength: %u", ndo->ndo_eflag ? "" : ", ", length)); break; case NLPID_Q933: q933_print(ndo, p + 1, length - 1); break; case NLPID_IP: ip_print(ndo, p + 1, length - 1); break; case NLPID_IP6: ip6_print(ndo, p + 1, length - 1); break; case NLPID_PPP: ppp_print(ndo, p + 1, length - 1); break; default: if (!ndo->ndo_eflag) ND_PRINT((ndo, "OSI NLPID 0x%02x unknown", *p)); ND_PRINT((ndo, "%slength: %u", ndo->ndo_eflag ? "" : ", ", length)); if (length > 1) print_unknown_data(ndo, p, "\n\t", length); break; } } #define CLNP_PDU_ER 1 #define CLNP_PDU_DT 28 #define CLNP_PDU_MD 29 #define CLNP_PDU_ERQ 30 #define CLNP_PDU_ERP 31 static const struct tok clnp_pdu_values[] = { { CLNP_PDU_ER, "Error Report"}, { CLNP_PDU_MD, "MD"}, { CLNP_PDU_DT, "Data"}, { CLNP_PDU_ERQ, "Echo Request"}, { CLNP_PDU_ERP, "Echo Response"}, { 0, NULL } }; struct clnp_header_t { uint8_t nlpid; uint8_t length_indicator; uint8_t version; uint8_t lifetime; /* units of 500ms */ uint8_t type; uint8_t segment_length[2]; uint8_t cksum[2]; }; struct clnp_segment_header_t { uint8_t data_unit_id[2]; uint8_t segment_offset[2]; uint8_t total_length[2]; }; /* * clnp_print * Decode CLNP packets. Return 0 on error. */ static int clnp_print(netdissect_options *ndo, const uint8_t *pptr, u_int length) { const uint8_t *optr,*source_address,*dest_address; u_int li,tlen,nsap_offset,source_address_length,dest_address_length, clnp_pdu_type, clnp_flags; const struct clnp_header_t *clnp_header; const struct clnp_segment_header_t *clnp_segment_header; uint8_t rfd_error_major,rfd_error_minor; clnp_header = (const struct clnp_header_t *) pptr; ND_TCHECK(*clnp_header); li = clnp_header->length_indicator; optr = pptr; if (!ndo->ndo_eflag) ND_PRINT((ndo, "CLNP")); /* * Sanity checking of the header. */ if (clnp_header->version != CLNP_VERSION) { ND_PRINT((ndo, "version %d packet not supported", clnp_header->version)); return (0); } if (li > length) { ND_PRINT((ndo, " length indicator(%u) > PDU size (%u)!", li, length)); return (0); } if (li < sizeof(struct clnp_header_t)) { ND_PRINT((ndo, " length indicator %u < min PDU size:", li)); while (pptr < ndo->ndo_snapend) ND_PRINT((ndo, "%02X", *pptr++)); return (0); } /* FIXME further header sanity checking */ clnp_pdu_type = clnp_header->type & CLNP_PDU_TYPE_MASK; clnp_flags = clnp_header->type & CLNP_FLAG_MASK; pptr += sizeof(struct clnp_header_t); li -= sizeof(struct clnp_header_t); if (li < 1) { ND_PRINT((ndo, "li < size of fixed part of CLNP header and addresses")); return (0); } ND_TCHECK(*pptr); dest_address_length = *pptr; pptr += 1; li -= 1; if (li < dest_address_length) { ND_PRINT((ndo, "li < size of fixed part of CLNP header and addresses")); return (0); } ND_TCHECK2(*pptr, dest_address_length); dest_address = pptr; pptr += dest_address_length; li -= dest_address_length; if (li < 1) { ND_PRINT((ndo, "li < size of fixed part of CLNP header and addresses")); return (0); } ND_TCHECK(*pptr); source_address_length = *pptr; pptr += 1; li -= 1; if (li < source_address_length) { ND_PRINT((ndo, "li < size of fixed part of CLNP header and addresses")); return (0); } ND_TCHECK2(*pptr, source_address_length); source_address = pptr; pptr += source_address_length; li -= source_address_length; if (ndo->ndo_vflag < 1) { ND_PRINT((ndo, "%s%s > %s, %s, length %u", ndo->ndo_eflag ? "" : ", ", isonsap_string(ndo, source_address, source_address_length), isonsap_string(ndo, dest_address, dest_address_length), tok2str(clnp_pdu_values,"unknown (%u)",clnp_pdu_type), length)); return (1); } ND_PRINT((ndo, "%slength %u", ndo->ndo_eflag ? "" : ", ", length)); ND_PRINT((ndo, "\n\t%s PDU, hlen: %u, v: %u, lifetime: %u.%us, Segment PDU length: %u, checksum: 0x%04x", tok2str(clnp_pdu_values, "unknown (%u)",clnp_pdu_type), clnp_header->length_indicator, clnp_header->version, clnp_header->lifetime/2, (clnp_header->lifetime%2)*5, EXTRACT_16BITS(clnp_header->segment_length), EXTRACT_16BITS(clnp_header->cksum))); osi_print_cksum(ndo, optr, EXTRACT_16BITS(clnp_header->cksum), 7, clnp_header->length_indicator); ND_PRINT((ndo, "\n\tFlags [%s]", bittok2str(clnp_flag_values, "none", clnp_flags))); ND_PRINT((ndo, "\n\tsource address (length %u): %s\n\tdest address (length %u): %s", source_address_length, isonsap_string(ndo, source_address, source_address_length), dest_address_length, isonsap_string(ndo, dest_address, dest_address_length))); if (clnp_flags & CLNP_SEGMENT_PART) { if (li < sizeof(const struct clnp_segment_header_t)) { ND_PRINT((ndo, "li < size of fixed part of CLNP header, addresses, and segment part")); return (0); } clnp_segment_header = (const struct clnp_segment_header_t *) pptr; ND_TCHECK(*clnp_segment_header); ND_PRINT((ndo, "\n\tData Unit ID: 0x%04x, Segment Offset: %u, Total PDU Length: %u", EXTRACT_16BITS(clnp_segment_header->data_unit_id), EXTRACT_16BITS(clnp_segment_header->segment_offset), EXTRACT_16BITS(clnp_segment_header->total_length))); pptr+=sizeof(const struct clnp_segment_header_t); li-=sizeof(const struct clnp_segment_header_t); } /* now walk the options */ while (li >= 2) { u_int op, opli; const uint8_t *tptr; if (li < 2) { ND_PRINT((ndo, ", bad opts/li")); return (0); } ND_TCHECK2(*pptr, 2); op = *pptr++; opli = *pptr++; li -= 2; if (opli > li) { ND_PRINT((ndo, ", opt (%d) too long", op)); return (0); } ND_TCHECK2(*pptr, opli); li -= opli; tptr = pptr; tlen = opli; ND_PRINT((ndo, "\n\t %s Option #%u, length %u, value: ", tok2str(clnp_option_values,"Unknown",op), op, opli)); /* * We've already checked that the entire option is present * in the captured packet with the ND_TCHECK2() call. * Therefore, we don't need to do ND_TCHECK()/ND_TCHECK2() * checks. * We do, however, need to check tlen, to make sure we * don't run past the end of the option. */ switch (op) { case CLNP_OPTION_ROUTE_RECORDING: /* those two options share the format */ case CLNP_OPTION_SOURCE_ROUTING: if (tlen < 2) { ND_PRINT((ndo, ", bad opt len")); return (0); } ND_PRINT((ndo, "%s %s", tok2str(clnp_option_sr_rr_values,"Unknown",*tptr), tok2str(clnp_option_sr_rr_string_values, "Unknown Option %u", op))); nsap_offset=*(tptr+1); if (nsap_offset == 0) { ND_PRINT((ndo, " Bad NSAP offset (0)")); break; } nsap_offset-=1; /* offset to nsap list */ if (nsap_offset > tlen) { ND_PRINT((ndo, " Bad NSAP offset (past end of option)")); break; } tptr+=nsap_offset; tlen-=nsap_offset; while (tlen > 0) { source_address_length=*tptr; if (tlen < source_address_length+1) { ND_PRINT((ndo, "\n\t NSAP address goes past end of option")); break; } if (source_address_length > 0) { source_address=(tptr+1); ND_TCHECK2(*source_address, source_address_length); ND_PRINT((ndo, "\n\t NSAP address (length %u): %s", source_address_length, isonsap_string(ndo, source_address, source_address_length))); } tlen-=source_address_length+1; } break; case CLNP_OPTION_PRIORITY: if (tlen < 1) { ND_PRINT((ndo, ", bad opt len")); return (0); } ND_PRINT((ndo, "0x%1x", *tptr&0x0f)); break; case CLNP_OPTION_QOS_MAINTENANCE: if (tlen < 1) { ND_PRINT((ndo, ", bad opt len")); return (0); } ND_PRINT((ndo, "\n\t Format Code: %s", tok2str(clnp_option_scope_values, "Reserved", *tptr&CLNP_OPTION_SCOPE_MASK))); if ((*tptr&CLNP_OPTION_SCOPE_MASK) == CLNP_OPTION_SCOPE_GLOBAL) ND_PRINT((ndo, "\n\t QoS Flags [%s]", bittok2str(clnp_option_qos_global_values, "none", *tptr&CLNP_OPTION_OPTION_QOS_MASK))); break; case CLNP_OPTION_SECURITY: if (tlen < 2) { ND_PRINT((ndo, ", bad opt len")); return (0); } ND_PRINT((ndo, "\n\t Format Code: %s, Security-Level %u", tok2str(clnp_option_scope_values,"Reserved",*tptr&CLNP_OPTION_SCOPE_MASK), *(tptr+1))); break; case CLNP_OPTION_DISCARD_REASON: if (tlen < 1) { ND_PRINT((ndo, ", bad opt len")); return (0); } rfd_error_major = (*tptr&0xf0) >> 4; rfd_error_minor = *tptr&0x0f; ND_PRINT((ndo, "\n\t Class: %s Error (0x%01x), %s (0x%01x)", tok2str(clnp_option_rfd_class_values,"Unknown",rfd_error_major), rfd_error_major, tok2str(clnp_option_rfd_error_class[rfd_error_major],"Unknown",rfd_error_minor), rfd_error_minor)); break; case CLNP_OPTION_PADDING: ND_PRINT((ndo, "padding data")); break; /* * FIXME those are the defined Options that lack a decoder * you are welcome to contribute code ;-) */ default: print_unknown_data(ndo, tptr, "\n\t ", opli); break; } if (ndo->ndo_vflag > 1) print_unknown_data(ndo, pptr, "\n\t ", opli); pptr += opli; } switch (clnp_pdu_type) { case CLNP_PDU_ER: /* fall through */ case CLNP_PDU_ERP: ND_TCHECK(*pptr); if (*(pptr) == NLPID_CLNP) { ND_PRINT((ndo, "\n\t-----original packet-----\n\t")); /* FIXME recursion protection */ clnp_print(ndo, pptr, length - clnp_header->length_indicator); break; } case CLNP_PDU_DT: case CLNP_PDU_MD: case CLNP_PDU_ERQ: default: /* dump the PDU specific data */ if (length-(pptr-optr) > 0) { ND_PRINT((ndo, "\n\t undecoded non-header data, length %u", length-clnp_header->length_indicator)); print_unknown_data(ndo, pptr, "\n\t ", length - (pptr - optr)); } } return (1); trunc: ND_PRINT((ndo, "[|clnp]")); return (1); } #define ESIS_PDU_REDIRECT 6 #define ESIS_PDU_ESH 2 #define ESIS_PDU_ISH 4 static const struct tok esis_pdu_values[] = { { ESIS_PDU_REDIRECT, "redirect"}, { ESIS_PDU_ESH, "ESH"}, { ESIS_PDU_ISH, "ISH"}, { 0, NULL } }; struct esis_header_t { uint8_t nlpid; uint8_t length_indicator; uint8_t version; uint8_t reserved; uint8_t type; uint8_t holdtime[2]; uint8_t cksum[2]; }; static void esis_print(netdissect_options *ndo, const uint8_t *pptr, u_int length) { const uint8_t *optr; u_int li,esis_pdu_type,source_address_length, source_address_number; const struct esis_header_t *esis_header; if (!ndo->ndo_eflag) ND_PRINT((ndo, "ES-IS")); if (length <= 2) { ND_PRINT((ndo, ndo->ndo_qflag ? "bad pkt!" : "no header at all!")); return; } esis_header = (const struct esis_header_t *) pptr; ND_TCHECK(*esis_header); li = esis_header->length_indicator; optr = pptr; /* * Sanity checking of the header. */ if (esis_header->nlpid != NLPID_ESIS) { ND_PRINT((ndo, " nlpid 0x%02x packet not supported", esis_header->nlpid)); return; } if (esis_header->version != ESIS_VERSION) { ND_PRINT((ndo, " version %d packet not supported", esis_header->version)); return; } if (li > length) { ND_PRINT((ndo, " length indicator(%u) > PDU size (%u)!", li, length)); return; } if (li < sizeof(struct esis_header_t) + 2) { ND_PRINT((ndo, " length indicator %u < min PDU size:", li)); while (pptr < ndo->ndo_snapend) ND_PRINT((ndo, "%02X", *pptr++)); return; } esis_pdu_type = esis_header->type & ESIS_PDU_TYPE_MASK; if (ndo->ndo_vflag < 1) { ND_PRINT((ndo, "%s%s, length %u", ndo->ndo_eflag ? "" : ", ", tok2str(esis_pdu_values,"unknown type (%u)",esis_pdu_type), length)); return; } else ND_PRINT((ndo, "%slength %u\n\t%s (%u)", ndo->ndo_eflag ? "" : ", ", length, tok2str(esis_pdu_values,"unknown type: %u", esis_pdu_type), esis_pdu_type)); ND_PRINT((ndo, ", v: %u%s", esis_header->version, esis_header->version == ESIS_VERSION ? "" : "unsupported" )); ND_PRINT((ndo, ", checksum: 0x%04x", EXTRACT_16BITS(esis_header->cksum))); osi_print_cksum(ndo, pptr, EXTRACT_16BITS(esis_header->cksum), 7, li); ND_PRINT((ndo, ", holding time: %us, length indicator: %u", EXTRACT_16BITS(esis_header->holdtime), li)); if (ndo->ndo_vflag > 1) print_unknown_data(ndo, optr, "\n\t", sizeof(struct esis_header_t)); pptr += sizeof(struct esis_header_t); li -= sizeof(struct esis_header_t); switch (esis_pdu_type) { case ESIS_PDU_REDIRECT: { const uint8_t *dst, *snpa, *neta; u_int dstl, snpal, netal; ND_TCHECK(*pptr); if (li < 1) { ND_PRINT((ndo, ", bad redirect/li")); return; } dstl = *pptr; pptr++; li--; ND_TCHECK2(*pptr, dstl); if (li < dstl) { ND_PRINT((ndo, ", bad redirect/li")); return; } dst = pptr; pptr += dstl; li -= dstl; ND_PRINT((ndo, "\n\t %s", isonsap_string(ndo, dst, dstl))); ND_TCHECK(*pptr); if (li < 1) { ND_PRINT((ndo, ", bad redirect/li")); return; } snpal = *pptr; pptr++; li--; ND_TCHECK2(*pptr, snpal); if (li < snpal) { ND_PRINT((ndo, ", bad redirect/li")); return; } snpa = pptr; pptr += snpal; li -= snpal; ND_TCHECK(*pptr); if (li < 1) { ND_PRINT((ndo, ", bad redirect/li")); return; } netal = *pptr; pptr++; ND_TCHECK2(*pptr, netal); if (li < netal) { ND_PRINT((ndo, ", bad redirect/li")); return; } neta = pptr; pptr += netal; li -= netal; if (netal == 0) ND_PRINT((ndo, "\n\t %s", etheraddr_string(ndo, snpa))); else ND_PRINT((ndo, "\n\t %s", isonsap_string(ndo, neta, netal))); break; } case ESIS_PDU_ESH: ND_TCHECK(*pptr); if (li < 1) { ND_PRINT((ndo, ", bad esh/li")); return; } source_address_number = *pptr; pptr++; li--; ND_PRINT((ndo, "\n\t Number of Source Addresses: %u", source_address_number)); while (source_address_number > 0) { ND_TCHECK(*pptr); if (li < 1) { ND_PRINT((ndo, ", bad esh/li")); return; } source_address_length = *pptr; pptr++; li--; ND_TCHECK2(*pptr, source_address_length); if (li < source_address_length) { ND_PRINT((ndo, ", bad esh/li")); return; } ND_PRINT((ndo, "\n\t NET (length: %u): %s", source_address_length, isonsap_string(ndo, pptr, source_address_length))); pptr += source_address_length; li -= source_address_length; source_address_number--; } break; case ESIS_PDU_ISH: { ND_TCHECK(*pptr); if (li < 1) { ND_PRINT((ndo, ", bad ish/li")); return; } source_address_length = *pptr; pptr++; li--; ND_TCHECK2(*pptr, source_address_length); if (li < source_address_length) { ND_PRINT((ndo, ", bad ish/li")); return; } ND_PRINT((ndo, "\n\t NET (length: %u): %s", source_address_length, isonsap_string(ndo, pptr, source_address_length))); pptr += source_address_length; li -= source_address_length; break; } default: if (ndo->ndo_vflag <= 1) { if (pptr < ndo->ndo_snapend) print_unknown_data(ndo, pptr, "\n\t ", ndo->ndo_snapend - pptr); } return; } /* now walk the options */ while (li != 0) { u_int op, opli; const uint8_t *tptr; if (li < 2) { ND_PRINT((ndo, ", bad opts/li")); return; } ND_TCHECK2(*pptr, 2); op = *pptr++; opli = *pptr++; li -= 2; if (opli > li) { ND_PRINT((ndo, ", opt (%d) too long", op)); return; } li -= opli; tptr = pptr; ND_PRINT((ndo, "\n\t %s Option #%u, length %u, value: ", tok2str(esis_option_values,"Unknown",op), op, opli)); switch (op) { case ESIS_OPTION_ES_CONF_TIME: if (opli == 2) { ND_TCHECK2(*pptr, 2); ND_PRINT((ndo, "%us", EXTRACT_16BITS(tptr))); } else ND_PRINT((ndo, "(bad length)")); break; case ESIS_OPTION_PROTOCOLS: while (opli>0) { ND_TCHECK(*pptr); ND_PRINT((ndo, "%s (0x%02x)", tok2str(nlpid_values, "unknown", *tptr), *tptr)); if (opli>1) /* further NPLIDs ? - put comma */ ND_PRINT((ndo, ", ")); tptr++; opli--; } break; /* * FIXME those are the defined Options that lack a decoder * you are welcome to contribute code ;-) */ case ESIS_OPTION_QOS_MAINTENANCE: case ESIS_OPTION_SECURITY: case ESIS_OPTION_PRIORITY: case ESIS_OPTION_ADDRESS_MASK: case ESIS_OPTION_SNPA_MASK: default: print_unknown_data(ndo, tptr, "\n\t ", opli); break; } if (ndo->ndo_vflag > 1) print_unknown_data(ndo, pptr, "\n\t ", opli); pptr += opli; } trunc: return; } static void isis_print_mcid(netdissect_options *ndo, const struct isis_spb_mcid *mcid) { int i; ND_TCHECK(*mcid); ND_PRINT((ndo, "ID: %d, Name: ", mcid->format_id)); if (fn_printzp(ndo, mcid->name, 32, ndo->ndo_snapend)) goto trunc; ND_PRINT((ndo, "\n\t Lvl: %d", EXTRACT_16BITS(mcid->revision_lvl))); ND_PRINT((ndo, ", Digest: ")); for(i=0;i<16;i++) ND_PRINT((ndo, "%.2x ", mcid->digest[i])); trunc: ND_PRINT((ndo, "%s", tstr)); } static int isis_print_mt_port_cap_subtlv(netdissect_options *ndo, const uint8_t *tptr, int len) { int stlv_type, stlv_len; const struct isis_subtlv_spb_mcid *subtlv_spb_mcid; int i; while (len > 2) { stlv_type = *(tptr++); stlv_len = *(tptr++); /* first lets see if we know the subTLVs name*/ ND_PRINT((ndo, "\n\t %s subTLV #%u, length: %u", tok2str(isis_mt_port_cap_subtlv_values, "unknown", stlv_type), stlv_type, stlv_len)); /*len -= TLV_TYPE_LEN_OFFSET;*/ len = len -2; switch (stlv_type) { case ISIS_SUBTLV_SPB_MCID: { ND_TCHECK2(*(tptr), ISIS_SUBTLV_SPB_MCID_MIN_LEN); subtlv_spb_mcid = (const struct isis_subtlv_spb_mcid *)tptr; ND_PRINT((ndo, "\n\t MCID: ")); isis_print_mcid(ndo, &(subtlv_spb_mcid->mcid)); /*tptr += SPB_MCID_MIN_LEN; len -= SPB_MCID_MIN_LEN; */ ND_PRINT((ndo, "\n\t AUX-MCID: ")); isis_print_mcid(ndo, &(subtlv_spb_mcid->aux_mcid)); /*tptr += SPB_MCID_MIN_LEN; len -= SPB_MCID_MIN_LEN; */ tptr = tptr + sizeof(struct isis_subtlv_spb_mcid); len = len - sizeof(struct isis_subtlv_spb_mcid); break; } case ISIS_SUBTLV_SPB_DIGEST: { ND_TCHECK2(*(tptr), ISIS_SUBTLV_SPB_DIGEST_MIN_LEN); ND_PRINT((ndo, "\n\t RES: %d V: %d A: %d D: %d", (*(tptr) >> 5), (((*tptr)>> 4) & 0x01), ((*(tptr) >> 2) & 0x03), ((*tptr) & 0x03))); tptr++; ND_PRINT((ndo, "\n\t Digest: ")); for(i=1;i<=8; i++) { ND_PRINT((ndo, "%08x ", EXTRACT_32BITS(tptr))); if (i%4 == 0 && i != 8) ND_PRINT((ndo, "\n\t ")); tptr = tptr + 4; } len = len - ISIS_SUBTLV_SPB_DIGEST_MIN_LEN; break; } case ISIS_SUBTLV_SPB_BVID: { ND_TCHECK2(*(tptr), stlv_len); while (len >= ISIS_SUBTLV_SPB_BVID_MIN_LEN) { ND_TCHECK2(*(tptr), ISIS_SUBTLV_SPB_BVID_MIN_LEN); ND_PRINT((ndo, "\n\t ECT: %08x", EXTRACT_32BITS(tptr))); tptr = tptr+4; ND_PRINT((ndo, " BVID: %d, U:%01x M:%01x ", (EXTRACT_16BITS (tptr) >> 4) , (EXTRACT_16BITS (tptr) >> 3) & 0x01, (EXTRACT_16BITS (tptr) >> 2) & 0x01)); tptr = tptr + 2; len = len - ISIS_SUBTLV_SPB_BVID_MIN_LEN; } break; } default: break; } } return 0; trunc: ND_PRINT((ndo, "\n\t\t")); ND_PRINT((ndo, "%s", tstr)); return(1); } static int isis_print_mt_capability_subtlv(netdissect_options *ndo, const uint8_t *tptr, int len) { int stlv_type, stlv_len, tmp; while (len > 2) { stlv_type = *(tptr++); stlv_len = *(tptr++); /* first lets see if we know the subTLVs name*/ ND_PRINT((ndo, "\n\t %s subTLV #%u, length: %u", tok2str(isis_mt_capability_subtlv_values, "unknown", stlv_type), stlv_type, stlv_len)); len = len - 2; switch (stlv_type) { case ISIS_SUBTLV_SPB_INSTANCE: ND_TCHECK2(*tptr, ISIS_SUBTLV_SPB_INSTANCE_MIN_LEN); ND_PRINT((ndo, "\n\t CIST Root-ID: %08x", EXTRACT_32BITS(tptr))); tptr = tptr+4; ND_PRINT((ndo, " %08x", EXTRACT_32BITS(tptr))); tptr = tptr+4; ND_PRINT((ndo, ", Path Cost: %08x", EXTRACT_32BITS(tptr))); tptr = tptr+4; ND_PRINT((ndo, ", Prio: %d", EXTRACT_16BITS(tptr))); tptr = tptr + 2; ND_PRINT((ndo, "\n\t RES: %d", EXTRACT_16BITS(tptr) >> 5)); ND_PRINT((ndo, ", V: %d", (EXTRACT_16BITS(tptr) >> 4) & 0x0001)); ND_PRINT((ndo, ", SPSource-ID: %d", (EXTRACT_32BITS(tptr) & 0x000fffff))); tptr = tptr+4; ND_PRINT((ndo, ", No of Trees: %x", *(tptr))); tmp = *(tptr++); len = len - ISIS_SUBTLV_SPB_INSTANCE_MIN_LEN; while (tmp) { ND_TCHECK2(*tptr, ISIS_SUBTLV_SPB_INSTANCE_VLAN_TUPLE_LEN); ND_PRINT((ndo, "\n\t U:%d, M:%d, A:%d, RES:%d", *(tptr) >> 7, (*(tptr) >> 6) & 0x01, (*(tptr) >> 5) & 0x01, (*(tptr) & 0x1f))); tptr++; ND_PRINT((ndo, ", ECT: %08x", EXTRACT_32BITS(tptr))); tptr = tptr + 4; ND_PRINT((ndo, ", BVID: %d, SPVID: %d", (EXTRACT_24BITS(tptr) >> 12) & 0x000fff, EXTRACT_24BITS(tptr) & 0x000fff)); tptr = tptr + 3; len = len - ISIS_SUBTLV_SPB_INSTANCE_VLAN_TUPLE_LEN; tmp--; } break; case ISIS_SUBTLV_SPBM_SI: ND_TCHECK2(*tptr, 8); ND_PRINT((ndo, "\n\t BMAC: %08x", EXTRACT_32BITS(tptr))); tptr = tptr+4; ND_PRINT((ndo, "%04x", EXTRACT_16BITS(tptr))); tptr = tptr+2; ND_PRINT((ndo, ", RES: %d, VID: %d", EXTRACT_16BITS(tptr) >> 12, (EXTRACT_16BITS(tptr)) & 0x0fff)); tptr = tptr+2; len = len - 8; stlv_len = stlv_len - 8; while (stlv_len >= 4) { ND_TCHECK2(*tptr, 4); ND_PRINT((ndo, "\n\t T: %d, R: %d, RES: %d, ISID: %d", (EXTRACT_32BITS(tptr) >> 31), (EXTRACT_32BITS(tptr) >> 30) & 0x01, (EXTRACT_32BITS(tptr) >> 24) & 0x03f, (EXTRACT_32BITS(tptr)) & 0x0ffffff)); tptr = tptr + 4; len = len - 4; stlv_len = stlv_len - 4; } break; default: break; } } return 0; trunc: ND_PRINT((ndo, "\n\t\t")); ND_PRINT((ndo, "%s", tstr)); return(1); } /* shared routine for printing system, node and lsp-ids */ static char * isis_print_id(const uint8_t *cp, int id_len) { int i; static char id[sizeof("xxxx.xxxx.xxxx.yy-zz")]; char *pos = id; for (i = 1; i <= SYSTEM_ID_LEN; i++) { snprintf(pos, sizeof(id) - (pos - id), "%02x", *cp++); pos += strlen(pos); if (i == 2 || i == 4) *pos++ = '.'; } if (id_len >= NODE_ID_LEN) { snprintf(pos, sizeof(id) - (pos - id), ".%02x", *cp++); pos += strlen(pos); } if (id_len == LSP_ID_LEN) snprintf(pos, sizeof(id) - (pos - id), "-%02x", *cp); return (id); } /* print the 4-byte metric block which is common found in the old-style TLVs */ static int isis_print_metric_block(netdissect_options *ndo, const struct isis_metric_block *isis_metric_block) { ND_PRINT((ndo, ", Default Metric: %d, %s", ISIS_LSP_TLV_METRIC_VALUE(isis_metric_block->metric_default), ISIS_LSP_TLV_METRIC_IE(isis_metric_block->metric_default) ? "External" : "Internal")); if (!ISIS_LSP_TLV_METRIC_SUPPORTED(isis_metric_block->metric_delay)) ND_PRINT((ndo, "\n\t\t Delay Metric: %d, %s", ISIS_LSP_TLV_METRIC_VALUE(isis_metric_block->metric_delay), ISIS_LSP_TLV_METRIC_IE(isis_metric_block->metric_delay) ? "External" : "Internal")); if (!ISIS_LSP_TLV_METRIC_SUPPORTED(isis_metric_block->metric_expense)) ND_PRINT((ndo, "\n\t\t Expense Metric: %d, %s", ISIS_LSP_TLV_METRIC_VALUE(isis_metric_block->metric_expense), ISIS_LSP_TLV_METRIC_IE(isis_metric_block->metric_expense) ? "External" : "Internal")); if (!ISIS_LSP_TLV_METRIC_SUPPORTED(isis_metric_block->metric_error)) ND_PRINT((ndo, "\n\t\t Error Metric: %d, %s", ISIS_LSP_TLV_METRIC_VALUE(isis_metric_block->metric_error), ISIS_LSP_TLV_METRIC_IE(isis_metric_block->metric_error) ? "External" : "Internal")); return(1); /* everything is ok */ } static int isis_print_tlv_ip_reach(netdissect_options *ndo, const uint8_t *cp, const char *ident, int length) { int prefix_len; const struct isis_tlv_ip_reach *tlv_ip_reach; tlv_ip_reach = (const struct isis_tlv_ip_reach *)cp; while (length > 0) { if ((size_t)length < sizeof(*tlv_ip_reach)) { ND_PRINT((ndo, "short IPv4 Reachability (%d vs %lu)", length, (unsigned long)sizeof(*tlv_ip_reach))); return (0); } if (!ND_TTEST(*tlv_ip_reach)) return (0); prefix_len = mask2plen(EXTRACT_32BITS(tlv_ip_reach->mask)); if (prefix_len == -1) ND_PRINT((ndo, "%sIPv4 prefix: %s mask %s", ident, ipaddr_string(ndo, (tlv_ip_reach->prefix)), ipaddr_string(ndo, (tlv_ip_reach->mask)))); else ND_PRINT((ndo, "%sIPv4 prefix: %15s/%u", ident, ipaddr_string(ndo, (tlv_ip_reach->prefix)), prefix_len)); ND_PRINT((ndo, ", Distribution: %s, Metric: %u, %s", ISIS_LSP_TLV_METRIC_UPDOWN(tlv_ip_reach->isis_metric_block.metric_default) ? "down" : "up", ISIS_LSP_TLV_METRIC_VALUE(tlv_ip_reach->isis_metric_block.metric_default), ISIS_LSP_TLV_METRIC_IE(tlv_ip_reach->isis_metric_block.metric_default) ? "External" : "Internal")); if (!ISIS_LSP_TLV_METRIC_SUPPORTED(tlv_ip_reach->isis_metric_block.metric_delay)) ND_PRINT((ndo, "%s Delay Metric: %u, %s", ident, ISIS_LSP_TLV_METRIC_VALUE(tlv_ip_reach->isis_metric_block.metric_delay), ISIS_LSP_TLV_METRIC_IE(tlv_ip_reach->isis_metric_block.metric_delay) ? "External" : "Internal")); if (!ISIS_LSP_TLV_METRIC_SUPPORTED(tlv_ip_reach->isis_metric_block.metric_expense)) ND_PRINT((ndo, "%s Expense Metric: %u, %s", ident, ISIS_LSP_TLV_METRIC_VALUE(tlv_ip_reach->isis_metric_block.metric_expense), ISIS_LSP_TLV_METRIC_IE(tlv_ip_reach->isis_metric_block.metric_expense) ? "External" : "Internal")); if (!ISIS_LSP_TLV_METRIC_SUPPORTED(tlv_ip_reach->isis_metric_block.metric_error)) ND_PRINT((ndo, "%s Error Metric: %u, %s", ident, ISIS_LSP_TLV_METRIC_VALUE(tlv_ip_reach->isis_metric_block.metric_error), ISIS_LSP_TLV_METRIC_IE(tlv_ip_reach->isis_metric_block.metric_error) ? "External" : "Internal")); length -= sizeof(struct isis_tlv_ip_reach); tlv_ip_reach++; } return (1); } /* * this is the common IP-REACH subTLV decoder it is called * from various EXTD-IP REACH TLVs (135,235,236,237) */ static int isis_print_ip_reach_subtlv(netdissect_options *ndo, const uint8_t *tptr, int subt, int subl, const char *ident) { /* first lets see if we know the subTLVs name*/ ND_PRINT((ndo, "%s%s subTLV #%u, length: %u", ident, tok2str(isis_ext_ip_reach_subtlv_values, "unknown", subt), subt, subl)); ND_TCHECK2(*tptr,subl); switch(subt) { case ISIS_SUBTLV_EXTD_IP_REACH_MGMT_PREFIX_COLOR: /* fall through */ case ISIS_SUBTLV_EXTD_IP_REACH_ADMIN_TAG32: while (subl >= 4) { ND_PRINT((ndo, ", 0x%08x (=%u)", EXTRACT_32BITS(tptr), EXTRACT_32BITS(tptr))); tptr+=4; subl-=4; } break; case ISIS_SUBTLV_EXTD_IP_REACH_ADMIN_TAG64: while (subl >= 8) { ND_PRINT((ndo, ", 0x%08x%08x", EXTRACT_32BITS(tptr), EXTRACT_32BITS(tptr+4))); tptr+=8; subl-=8; } break; default: if (!print_unknown_data(ndo, tptr, "\n\t\t ", subl)) return(0); break; } return(1); trunc: ND_PRINT((ndo, "%s", ident)); ND_PRINT((ndo, "%s", tstr)); return(0); } /* * this is the common IS-REACH subTLV decoder it is called * from isis_print_ext_is_reach() */ static int isis_print_is_reach_subtlv(netdissect_options *ndo, const uint8_t *tptr, u_int subt, u_int subl, const char *ident) { u_int te_class,priority_level,gmpls_switch_cap; union { /* int to float conversion buffer for several subTLVs */ float f; uint32_t i; } bw; /* first lets see if we know the subTLVs name*/ ND_PRINT((ndo, "%s%s subTLV #%u, length: %u", ident, tok2str(isis_ext_is_reach_subtlv_values, "unknown", subt), subt, subl)); ND_TCHECK2(*tptr, subl); switch(subt) { case ISIS_SUBTLV_EXT_IS_REACH_ADMIN_GROUP: case ISIS_SUBTLV_EXT_IS_REACH_LINK_LOCAL_REMOTE_ID: case ISIS_SUBTLV_EXT_IS_REACH_LINK_REMOTE_ID: if (subl >= 4) { ND_PRINT((ndo, ", 0x%08x", EXTRACT_32BITS(tptr))); if (subl == 8) /* rfc4205 */ ND_PRINT((ndo, ", 0x%08x", EXTRACT_32BITS(tptr+4))); } break; case ISIS_SUBTLV_EXT_IS_REACH_IPV4_INTF_ADDR: case ISIS_SUBTLV_EXT_IS_REACH_IPV4_NEIGHBOR_ADDR: if (subl >= sizeof(struct in_addr)) ND_PRINT((ndo, ", %s", ipaddr_string(ndo, tptr))); break; case ISIS_SUBTLV_EXT_IS_REACH_MAX_LINK_BW : case ISIS_SUBTLV_EXT_IS_REACH_RESERVABLE_BW: if (subl >= 4) { bw.i = EXTRACT_32BITS(tptr); ND_PRINT((ndo, ", %.3f Mbps", bw.f * 8 / 1000000)); } break; case ISIS_SUBTLV_EXT_IS_REACH_UNRESERVED_BW : if (subl >= 32) { for (te_class = 0; te_class < 8; te_class++) { bw.i = EXTRACT_32BITS(tptr); ND_PRINT((ndo, "%s TE-Class %u: %.3f Mbps", ident, te_class, bw.f * 8 / 1000000)); tptr+=4; } } break; case ISIS_SUBTLV_EXT_IS_REACH_BW_CONSTRAINTS: /* fall through */ case ISIS_SUBTLV_EXT_IS_REACH_BW_CONSTRAINTS_OLD: ND_PRINT((ndo, "%sBandwidth Constraints Model ID: %s (%u)", ident, tok2str(diffserv_te_bc_values, "unknown", *tptr), *tptr)); tptr++; /* decode BCs until the subTLV ends */ for (te_class = 0; te_class < (subl-1)/4; te_class++) { ND_TCHECK2(*tptr, 4); bw.i = EXTRACT_32BITS(tptr); ND_PRINT((ndo, "%s Bandwidth constraint CT%u: %.3f Mbps", ident, te_class, bw.f * 8 / 1000000)); tptr+=4; } break; case ISIS_SUBTLV_EXT_IS_REACH_TE_METRIC: if (subl >= 3) ND_PRINT((ndo, ", %u", EXTRACT_24BITS(tptr))); break; case ISIS_SUBTLV_EXT_IS_REACH_LINK_ATTRIBUTE: if (subl == 2) { ND_PRINT((ndo, ", [ %s ] (0x%04x)", bittok2str(isis_subtlv_link_attribute_values, "Unknown", EXTRACT_16BITS(tptr)), EXTRACT_16BITS(tptr))); } break; case ISIS_SUBTLV_EXT_IS_REACH_LINK_PROTECTION_TYPE: if (subl >= 2) { ND_PRINT((ndo, ", %s, Priority %u", bittok2str(gmpls_link_prot_values, "none", *tptr), *(tptr+1))); } break; case ISIS_SUBTLV_SPB_METRIC: if (subl >= 6) { ND_PRINT((ndo, ", LM: %u", EXTRACT_24BITS(tptr))); tptr=tptr+3; ND_PRINT((ndo, ", P: %u", *(tptr))); tptr++; ND_PRINT((ndo, ", P-ID: %u", EXTRACT_16BITS(tptr))); } break; case ISIS_SUBTLV_EXT_IS_REACH_INTF_SW_CAP_DESCR: if (subl >= 36) { gmpls_switch_cap = *tptr; ND_PRINT((ndo, "%s Interface Switching Capability:%s", ident, tok2str(gmpls_switch_cap_values, "Unknown", gmpls_switch_cap))); ND_PRINT((ndo, ", LSP Encoding: %s", tok2str(gmpls_encoding_values, "Unknown", *(tptr + 1)))); tptr+=4; ND_PRINT((ndo, "%s Max LSP Bandwidth:", ident)); for (priority_level = 0; priority_level < 8; priority_level++) { bw.i = EXTRACT_32BITS(tptr); ND_PRINT((ndo, "%s priority level %d: %.3f Mbps", ident, priority_level, bw.f * 8 / 1000000)); tptr+=4; } subl-=36; switch (gmpls_switch_cap) { case GMPLS_PSC1: case GMPLS_PSC2: case GMPLS_PSC3: case GMPLS_PSC4: ND_TCHECK2(*tptr, 6); bw.i = EXTRACT_32BITS(tptr); ND_PRINT((ndo, "%s Min LSP Bandwidth: %.3f Mbps", ident, bw.f * 8 / 1000000)); ND_PRINT((ndo, "%s Interface MTU: %u", ident, EXTRACT_16BITS(tptr + 4))); break; case GMPLS_TSC: ND_TCHECK2(*tptr, 8); bw.i = EXTRACT_32BITS(tptr); ND_PRINT((ndo, "%s Min LSP Bandwidth: %.3f Mbps", ident, bw.f * 8 / 1000000)); ND_PRINT((ndo, "%s Indication %s", ident, tok2str(gmpls_switch_cap_tsc_indication_values, "Unknown (%u)", *(tptr + 4)))); break; default: /* there is some optional stuff left to decode but this is as of yet not specified so just lets hexdump what is left */ if(subl>0){ if (!print_unknown_data(ndo, tptr, "\n\t\t ", subl)) return(0); } } } break; default: if (!print_unknown_data(ndo, tptr, "\n\t\t ", subl)) return(0); break; } return(1); trunc: return(0); } /* * this is the common IS-REACH decoder it is called * from various EXTD-IS REACH style TLVs (22,24,222) */ static int isis_print_ext_is_reach(netdissect_options *ndo, const uint8_t *tptr, const char *ident, int tlv_type) { char ident_buffer[20]; int subtlv_type,subtlv_len,subtlv_sum_len; int proc_bytes = 0; /* how many bytes did we process ? */ if (!ND_TTEST2(*tptr, NODE_ID_LEN)) return(0); ND_PRINT((ndo, "%sIS Neighbor: %s", ident, isis_print_id(tptr, NODE_ID_LEN))); tptr+=(NODE_ID_LEN); if (tlv_type != ISIS_TLV_IS_ALIAS_ID) { /* the Alias TLV Metric field is implicit 0 */ if (!ND_TTEST2(*tptr, 3)) /* and is therefore skipped */ return(0); ND_PRINT((ndo, ", Metric: %d", EXTRACT_24BITS(tptr))); tptr+=3; } if (!ND_TTEST2(*tptr, 1)) return(0); subtlv_sum_len=*(tptr++); /* read out subTLV length */ proc_bytes=NODE_ID_LEN+3+1; ND_PRINT((ndo, ", %ssub-TLVs present",subtlv_sum_len ? "" : "no ")); if (subtlv_sum_len) { ND_PRINT((ndo, " (%u)", subtlv_sum_len)); while (subtlv_sum_len>0) { if (!ND_TTEST2(*tptr,2)) return(0); subtlv_type=*(tptr++); subtlv_len=*(tptr++); /* prepend the indent string */ snprintf(ident_buffer, sizeof(ident_buffer), "%s ",ident); if (!isis_print_is_reach_subtlv(ndo, tptr, subtlv_type, subtlv_len, ident_buffer)) return(0); tptr+=subtlv_len; subtlv_sum_len-=(subtlv_len+2); proc_bytes+=(subtlv_len+2); } } return(proc_bytes); } /* * this is the common Multi Topology ID decoder * it is called from various MT-TLVs (222,229,235,237) */ static int isis_print_mtid(netdissect_options *ndo, const uint8_t *tptr, const char *ident) { if (!ND_TTEST2(*tptr, 2)) return(0); ND_PRINT((ndo, "%s%s", ident, tok2str(isis_mt_values, "Reserved for IETF Consensus", ISIS_MASK_MTID(EXTRACT_16BITS(tptr))))); ND_PRINT((ndo, " Topology (0x%03x), Flags: [%s]", ISIS_MASK_MTID(EXTRACT_16BITS(tptr)), bittok2str(isis_mt_flag_values, "none",ISIS_MASK_MTFLAGS(EXTRACT_16BITS(tptr))))); return(2); } /* * this is the common extended IP reach decoder * it is called from TLVs (135,235,236,237) * we process the TLV and optional subTLVs and return * the amount of processed bytes */ static int isis_print_extd_ip_reach(netdissect_options *ndo, const uint8_t *tptr, const char *ident, uint16_t afi) { char ident_buffer[20]; uint8_t prefix[sizeof(struct in6_addr)]; /* shared copy buffer for IPv4 and IPv6 prefixes */ u_int metric, status_byte, bit_length, byte_length, sublen, processed, subtlvtype, subtlvlen; if (!ND_TTEST2(*tptr, 4)) return (0); metric = EXTRACT_32BITS(tptr); processed=4; tptr+=4; if (afi == AF_INET) { if (!ND_TTEST2(*tptr, 1)) /* fetch status byte */ return (0); status_byte=*(tptr++); bit_length = status_byte&0x3f; if (bit_length > 32) { ND_PRINT((ndo, "%sIPv4 prefix: bad bit length %u", ident, bit_length)); return (0); } processed++; } else if (afi == AF_INET6) { if (!ND_TTEST2(*tptr, 2)) /* fetch status & prefix_len byte */ return (0); status_byte=*(tptr++); bit_length=*(tptr++); if (bit_length > 128) { ND_PRINT((ndo, "%sIPv6 prefix: bad bit length %u", ident, bit_length)); return (0); } processed+=2; } else return (0); /* somebody is fooling us */ byte_length = (bit_length + 7) / 8; /* prefix has variable length encoding */ if (!ND_TTEST2(*tptr, byte_length)) return (0); memset(prefix, 0, sizeof prefix); /* clear the copy buffer */ memcpy(prefix,tptr,byte_length); /* copy as much as is stored in the TLV */ tptr+=byte_length; processed+=byte_length; if (afi == AF_INET) ND_PRINT((ndo, "%sIPv4 prefix: %15s/%u", ident, ipaddr_string(ndo, prefix), bit_length)); else if (afi == AF_INET6) ND_PRINT((ndo, "%sIPv6 prefix: %s/%u", ident, ip6addr_string(ndo, prefix), bit_length)); ND_PRINT((ndo, ", Distribution: %s, Metric: %u", ISIS_MASK_TLV_EXTD_IP_UPDOWN(status_byte) ? "down" : "up", metric)); if (afi == AF_INET && ISIS_MASK_TLV_EXTD_IP_SUBTLV(status_byte)) ND_PRINT((ndo, ", sub-TLVs present")); else if (afi == AF_INET6) ND_PRINT((ndo, ", %s%s", ISIS_MASK_TLV_EXTD_IP6_IE(status_byte) ? "External" : "Internal", ISIS_MASK_TLV_EXTD_IP6_SUBTLV(status_byte) ? ", sub-TLVs present" : "")); if ((afi == AF_INET && ISIS_MASK_TLV_EXTD_IP_SUBTLV(status_byte)) || (afi == AF_INET6 && ISIS_MASK_TLV_EXTD_IP6_SUBTLV(status_byte)) ) { /* assume that one prefix can hold more than one subTLV - therefore the first byte must reflect the aggregate bytecount of the subTLVs for this prefix */ if (!ND_TTEST2(*tptr, 1)) return (0); sublen=*(tptr++); processed+=sublen+1; ND_PRINT((ndo, " (%u)", sublen)); /* print out subTLV length */ while (sublen>0) { if (!ND_TTEST2(*tptr,2)) return (0); subtlvtype=*(tptr++); subtlvlen=*(tptr++); /* prepend the indent string */ snprintf(ident_buffer, sizeof(ident_buffer), "%s ",ident); if (!isis_print_ip_reach_subtlv(ndo, tptr, subtlvtype, subtlvlen, ident_buffer)) return(0); tptr+=subtlvlen; sublen-=(subtlvlen+2); } } return (processed); } /* * Clear checksum and lifetime prior to signature verification. */ static void isis_clear_checksum_lifetime(void *header) { struct isis_lsp_header *header_lsp = (struct isis_lsp_header *) header; header_lsp->checksum[0] = 0; header_lsp->checksum[1] = 0; header_lsp->remaining_lifetime[0] = 0; header_lsp->remaining_lifetime[1] = 0; } /* * isis_print * Decode IS-IS packets. Return 0 on error. */ static int isis_print(netdissect_options *ndo, const uint8_t *p, u_int length) { const struct isis_common_header *isis_header; const struct isis_iih_lan_header *header_iih_lan; const struct isis_iih_ptp_header *header_iih_ptp; const struct isis_lsp_header *header_lsp; const struct isis_csnp_header *header_csnp; const struct isis_psnp_header *header_psnp; const struct isis_tlv_lsp *tlv_lsp; const struct isis_tlv_ptp_adj *tlv_ptp_adj; const struct isis_tlv_is_reach *tlv_is_reach; const struct isis_tlv_es_reach *tlv_es_reach; uint8_t pdu_type, max_area, id_length, tlv_type, tlv_len, tmp, alen, lan_alen, prefix_len; uint8_t ext_is_len, ext_ip_len, mt_len; const uint8_t *optr, *pptr, *tptr; u_short packet_len,pdu_len, key_id; u_int i,vendor_id; int sigcheck; packet_len=length; optr = p; /* initialize the _o_riginal pointer to the packet start - need it for parsing the checksum TLV and authentication TLV verification */ isis_header = (const struct isis_common_header *)p; ND_TCHECK(*isis_header); if (length < ISIS_COMMON_HEADER_SIZE) goto trunc; pptr = p+(ISIS_COMMON_HEADER_SIZE); header_iih_lan = (const struct isis_iih_lan_header *)pptr; header_iih_ptp = (const struct isis_iih_ptp_header *)pptr; header_lsp = (const struct isis_lsp_header *)pptr; header_csnp = (const struct isis_csnp_header *)pptr; header_psnp = (const struct isis_psnp_header *)pptr; if (!ndo->ndo_eflag) ND_PRINT((ndo, "IS-IS")); /* * Sanity checking of the header. */ if (isis_header->version != ISIS_VERSION) { ND_PRINT((ndo, "version %d packet not supported", isis_header->version)); return (0); } if ((isis_header->id_length != SYSTEM_ID_LEN) && (isis_header->id_length != 0)) { ND_PRINT((ndo, "system ID length of %d is not supported", isis_header->id_length)); return (0); } if (isis_header->pdu_version != ISIS_VERSION) { ND_PRINT((ndo, "version %d packet not supported", isis_header->pdu_version)); return (0); } if (length < isis_header->fixed_len) { ND_PRINT((ndo, "fixed header length %u > packet length %u", isis_header->fixed_len, length)); return (0); } if (isis_header->fixed_len < ISIS_COMMON_HEADER_SIZE) { ND_PRINT((ndo, "fixed header length %u < minimum header size %u", isis_header->fixed_len, (u_int)ISIS_COMMON_HEADER_SIZE)); return (0); } max_area = isis_header->max_area; switch(max_area) { case 0: max_area = 3; /* silly shit */ break; case 255: ND_PRINT((ndo, "bad packet -- 255 areas")); return (0); default: break; } id_length = isis_header->id_length; switch(id_length) { case 0: id_length = 6; /* silly shit again */ break; case 1: /* 1-8 are valid sys-ID lenghts */ case 2: case 3: case 4: case 5: case 6: case 7: case 8: break; case 255: id_length = 0; /* entirely useless */ break; default: break; } /* toss any non 6-byte sys-ID len PDUs */ if (id_length != 6 ) { ND_PRINT((ndo, "bad packet -- illegal sys-ID length (%u)", id_length)); return (0); } pdu_type=isis_header->pdu_type; /* in non-verbose mode print the basic PDU Type plus PDU specific brief information*/ if (ndo->ndo_vflag == 0) { ND_PRINT((ndo, "%s%s", ndo->ndo_eflag ? "" : ", ", tok2str(isis_pdu_values, "unknown PDU-Type %u", pdu_type))); } else { /* ok they seem to want to know everything - lets fully decode it */ ND_PRINT((ndo, "%slength %u", ndo->ndo_eflag ? "" : ", ", length)); ND_PRINT((ndo, "\n\t%s, hlen: %u, v: %u, pdu-v: %u, sys-id-len: %u (%u), max-area: %u (%u)", tok2str(isis_pdu_values, "unknown, type %u", pdu_type), isis_header->fixed_len, isis_header->version, isis_header->pdu_version, id_length, isis_header->id_length, max_area, isis_header->max_area)); if (ndo->ndo_vflag > 1) { if (!print_unknown_data(ndo, optr, "\n\t", 8)) /* provide the _o_riginal pointer */ return (0); /* for optionally debugging the common header */ } } switch (pdu_type) { case ISIS_PDU_L1_LAN_IIH: case ISIS_PDU_L2_LAN_IIH: if (isis_header->fixed_len != (ISIS_COMMON_HEADER_SIZE+ISIS_IIH_LAN_HEADER_SIZE)) { ND_PRINT((ndo, ", bogus fixed header length %u should be %lu", isis_header->fixed_len, (unsigned long)(ISIS_COMMON_HEADER_SIZE+ISIS_IIH_LAN_HEADER_SIZE))); return (0); } ND_TCHECK(*header_iih_lan); if (length < ISIS_COMMON_HEADER_SIZE+ISIS_IIH_LAN_HEADER_SIZE) goto trunc; if (ndo->ndo_vflag == 0) { ND_PRINT((ndo, ", src-id %s", isis_print_id(header_iih_lan->source_id, SYSTEM_ID_LEN))); ND_PRINT((ndo, ", lan-id %s, prio %u", isis_print_id(header_iih_lan->lan_id,NODE_ID_LEN), header_iih_lan->priority)); ND_PRINT((ndo, ", length %u", length)); return (1); } pdu_len=EXTRACT_16BITS(header_iih_lan->pdu_len); if (packet_len>pdu_len) { packet_len=pdu_len; /* do TLV decoding as long as it makes sense */ length=pdu_len; } ND_PRINT((ndo, "\n\t source-id: %s, holding time: %us, Flags: [%s]", isis_print_id(header_iih_lan->source_id,SYSTEM_ID_LEN), EXTRACT_16BITS(header_iih_lan->holding_time), tok2str(isis_iih_circuit_type_values, "unknown circuit type 0x%02x", header_iih_lan->circuit_type))); ND_PRINT((ndo, "\n\t lan-id: %s, Priority: %u, PDU length: %u", isis_print_id(header_iih_lan->lan_id, NODE_ID_LEN), (header_iih_lan->priority) & ISIS_LAN_PRIORITY_MASK, pdu_len)); if (ndo->ndo_vflag > 1) { if (!print_unknown_data(ndo, pptr, "\n\t ", ISIS_IIH_LAN_HEADER_SIZE)) return (0); } packet_len -= (ISIS_COMMON_HEADER_SIZE+ISIS_IIH_LAN_HEADER_SIZE); pptr = p + (ISIS_COMMON_HEADER_SIZE+ISIS_IIH_LAN_HEADER_SIZE); break; case ISIS_PDU_PTP_IIH: if (isis_header->fixed_len != (ISIS_COMMON_HEADER_SIZE+ISIS_IIH_PTP_HEADER_SIZE)) { ND_PRINT((ndo, ", bogus fixed header length %u should be %lu", isis_header->fixed_len, (unsigned long)(ISIS_COMMON_HEADER_SIZE+ISIS_IIH_PTP_HEADER_SIZE))); return (0); } ND_TCHECK(*header_iih_ptp); if (length < ISIS_COMMON_HEADER_SIZE+ISIS_IIH_PTP_HEADER_SIZE) goto trunc; if (ndo->ndo_vflag == 0) { ND_PRINT((ndo, ", src-id %s", isis_print_id(header_iih_ptp->source_id, SYSTEM_ID_LEN))); ND_PRINT((ndo, ", length %u", length)); return (1); } pdu_len=EXTRACT_16BITS(header_iih_ptp->pdu_len); if (packet_len>pdu_len) { packet_len=pdu_len; /* do TLV decoding as long as it makes sense */ length=pdu_len; } ND_PRINT((ndo, "\n\t source-id: %s, holding time: %us, Flags: [%s]", isis_print_id(header_iih_ptp->source_id,SYSTEM_ID_LEN), EXTRACT_16BITS(header_iih_ptp->holding_time), tok2str(isis_iih_circuit_type_values, "unknown circuit type 0x%02x", header_iih_ptp->circuit_type))); ND_PRINT((ndo, "\n\t circuit-id: 0x%02x, PDU length: %u", header_iih_ptp->circuit_id, pdu_len)); if (ndo->ndo_vflag > 1) { if (!print_unknown_data(ndo, pptr, "\n\t ", ISIS_IIH_PTP_HEADER_SIZE)) return (0); } packet_len -= (ISIS_COMMON_HEADER_SIZE+ISIS_IIH_PTP_HEADER_SIZE); pptr = p + (ISIS_COMMON_HEADER_SIZE+ISIS_IIH_PTP_HEADER_SIZE); break; case ISIS_PDU_L1_LSP: case ISIS_PDU_L2_LSP: if (isis_header->fixed_len != (ISIS_COMMON_HEADER_SIZE+ISIS_LSP_HEADER_SIZE)) { ND_PRINT((ndo, ", bogus fixed header length %u should be %lu", isis_header->fixed_len, (unsigned long)ISIS_LSP_HEADER_SIZE)); return (0); } ND_TCHECK(*header_lsp); if (length < ISIS_COMMON_HEADER_SIZE+ISIS_LSP_HEADER_SIZE) goto trunc; if (ndo->ndo_vflag == 0) { ND_PRINT((ndo, ", lsp-id %s, seq 0x%08x, lifetime %5us", isis_print_id(header_lsp->lsp_id, LSP_ID_LEN), EXTRACT_32BITS(header_lsp->sequence_number), EXTRACT_16BITS(header_lsp->remaining_lifetime))); ND_PRINT((ndo, ", length %u", length)); return (1); } pdu_len=EXTRACT_16BITS(header_lsp->pdu_len); if (packet_len>pdu_len) { packet_len=pdu_len; /* do TLV decoding as long as it makes sense */ length=pdu_len; } ND_PRINT((ndo, "\n\t lsp-id: %s, seq: 0x%08x, lifetime: %5us\n\t chksum: 0x%04x", isis_print_id(header_lsp->lsp_id, LSP_ID_LEN), EXTRACT_32BITS(header_lsp->sequence_number), EXTRACT_16BITS(header_lsp->remaining_lifetime), EXTRACT_16BITS(header_lsp->checksum))); osi_print_cksum(ndo, (const uint8_t *)header_lsp->lsp_id, EXTRACT_16BITS(header_lsp->checksum), 12, length-12); ND_PRINT((ndo, ", PDU length: %u, Flags: [ %s", pdu_len, ISIS_MASK_LSP_OL_BIT(header_lsp->typeblock) ? "Overload bit set, " : "")); if (ISIS_MASK_LSP_ATT_BITS(header_lsp->typeblock)) { ND_PRINT((ndo, "%s", ISIS_MASK_LSP_ATT_DEFAULT_BIT(header_lsp->typeblock) ? "default " : "")); ND_PRINT((ndo, "%s", ISIS_MASK_LSP_ATT_DELAY_BIT(header_lsp->typeblock) ? "delay " : "")); ND_PRINT((ndo, "%s", ISIS_MASK_LSP_ATT_EXPENSE_BIT(header_lsp->typeblock) ? "expense " : "")); ND_PRINT((ndo, "%s", ISIS_MASK_LSP_ATT_ERROR_BIT(header_lsp->typeblock) ? "error " : "")); ND_PRINT((ndo, "ATT bit set, ")); } ND_PRINT((ndo, "%s", ISIS_MASK_LSP_PARTITION_BIT(header_lsp->typeblock) ? "P bit set, " : "")); ND_PRINT((ndo, "%s ]", tok2str(isis_lsp_istype_values, "Unknown(0x%x)", ISIS_MASK_LSP_ISTYPE_BITS(header_lsp->typeblock)))); if (ndo->ndo_vflag > 1) { if (!print_unknown_data(ndo, pptr, "\n\t ", ISIS_LSP_HEADER_SIZE)) return (0); } packet_len -= (ISIS_COMMON_HEADER_SIZE+ISIS_LSP_HEADER_SIZE); pptr = p + (ISIS_COMMON_HEADER_SIZE+ISIS_LSP_HEADER_SIZE); break; case ISIS_PDU_L1_CSNP: case ISIS_PDU_L2_CSNP: if (isis_header->fixed_len != (ISIS_COMMON_HEADER_SIZE+ISIS_CSNP_HEADER_SIZE)) { ND_PRINT((ndo, ", bogus fixed header length %u should be %lu", isis_header->fixed_len, (unsigned long)(ISIS_COMMON_HEADER_SIZE+ISIS_CSNP_HEADER_SIZE))); return (0); } ND_TCHECK(*header_csnp); if (length < ISIS_COMMON_HEADER_SIZE+ISIS_CSNP_HEADER_SIZE) goto trunc; if (ndo->ndo_vflag == 0) { ND_PRINT((ndo, ", src-id %s", isis_print_id(header_csnp->source_id, NODE_ID_LEN))); ND_PRINT((ndo, ", length %u", length)); return (1); } pdu_len=EXTRACT_16BITS(header_csnp->pdu_len); if (packet_len>pdu_len) { packet_len=pdu_len; /* do TLV decoding as long as it makes sense */ length=pdu_len; } ND_PRINT((ndo, "\n\t source-id: %s, PDU length: %u", isis_print_id(header_csnp->source_id, NODE_ID_LEN), pdu_len)); ND_PRINT((ndo, "\n\t start lsp-id: %s", isis_print_id(header_csnp->start_lsp_id, LSP_ID_LEN))); ND_PRINT((ndo, "\n\t end lsp-id: %s", isis_print_id(header_csnp->end_lsp_id, LSP_ID_LEN))); if (ndo->ndo_vflag > 1) { if (!print_unknown_data(ndo, pptr, "\n\t ", ISIS_CSNP_HEADER_SIZE)) return (0); } packet_len -= (ISIS_COMMON_HEADER_SIZE+ISIS_CSNP_HEADER_SIZE); pptr = p + (ISIS_COMMON_HEADER_SIZE+ISIS_CSNP_HEADER_SIZE); break; case ISIS_PDU_L1_PSNP: case ISIS_PDU_L2_PSNP: if (isis_header->fixed_len != (ISIS_COMMON_HEADER_SIZE+ISIS_PSNP_HEADER_SIZE)) { ND_PRINT((ndo, "- bogus fixed header length %u should be %lu", isis_header->fixed_len, (unsigned long)(ISIS_COMMON_HEADER_SIZE+ISIS_PSNP_HEADER_SIZE))); return (0); } ND_TCHECK(*header_psnp); if (length < ISIS_COMMON_HEADER_SIZE+ISIS_PSNP_HEADER_SIZE) goto trunc; if (ndo->ndo_vflag == 0) { ND_PRINT((ndo, ", src-id %s", isis_print_id(header_psnp->source_id, NODE_ID_LEN))); ND_PRINT((ndo, ", length %u", length)); return (1); } pdu_len=EXTRACT_16BITS(header_psnp->pdu_len); if (packet_len>pdu_len) { packet_len=pdu_len; /* do TLV decoding as long as it makes sense */ length=pdu_len; } ND_PRINT((ndo, "\n\t source-id: %s, PDU length: %u", isis_print_id(header_psnp->source_id, NODE_ID_LEN), pdu_len)); if (ndo->ndo_vflag > 1) { if (!print_unknown_data(ndo, pptr, "\n\t ", ISIS_PSNP_HEADER_SIZE)) return (0); } packet_len -= (ISIS_COMMON_HEADER_SIZE+ISIS_PSNP_HEADER_SIZE); pptr = p + (ISIS_COMMON_HEADER_SIZE+ISIS_PSNP_HEADER_SIZE); break; default: if (ndo->ndo_vflag == 0) { ND_PRINT((ndo, ", length %u", length)); return (1); } (void)print_unknown_data(ndo, pptr, "\n\t ", length); return (0); } /* * Now print the TLV's. */ while (packet_len > 0) { ND_TCHECK2(*pptr, 2); if (packet_len < 2) goto trunc; tlv_type = *pptr++; tlv_len = *pptr++; tmp =tlv_len; /* copy temporary len & pointer to packet data */ tptr = pptr; packet_len -= 2; /* first lets see if we know the TLVs name*/ ND_PRINT((ndo, "\n\t %s TLV #%u, length: %u", tok2str(isis_tlv_values, "unknown", tlv_type), tlv_type, tlv_len)); if (tlv_len == 0) /* something is invalid */ continue; if (packet_len < tlv_len) goto trunc; /* now check if we have a decoder otherwise do a hexdump at the end*/ switch (tlv_type) { case ISIS_TLV_AREA_ADDR: ND_TCHECK2(*tptr, 1); alen = *tptr++; while (tmp && alen < tmp) { ND_TCHECK2(*tptr, alen); ND_PRINT((ndo, "\n\t Area address (length: %u): %s", alen, isonsap_string(ndo, tptr, alen))); tptr += alen; tmp -= alen + 1; if (tmp==0) /* if this is the last area address do not attemt a boundary check */ break; ND_TCHECK2(*tptr, 1); alen = *tptr++; } break; case ISIS_TLV_ISNEIGH: while (tmp >= ETHER_ADDR_LEN) { ND_TCHECK2(*tptr, ETHER_ADDR_LEN); ND_PRINT((ndo, "\n\t SNPA: %s", isis_print_id(tptr, ETHER_ADDR_LEN))); tmp -= ETHER_ADDR_LEN; tptr += ETHER_ADDR_LEN; } break; case ISIS_TLV_ISNEIGH_VARLEN: if (!ND_TTEST2(*tptr, 1) || tmp < 3) /* min. TLV length */ goto trunctlv; lan_alen = *tptr++; /* LAN address length */ if (lan_alen == 0) { ND_PRINT((ndo, "\n\t LAN address length 0 bytes (invalid)")); break; } tmp --; ND_PRINT((ndo, "\n\t LAN address length %u bytes ", lan_alen)); while (tmp >= lan_alen) { ND_TCHECK2(*tptr, lan_alen); ND_PRINT((ndo, "\n\t\tIS Neighbor: %s", isis_print_id(tptr, lan_alen))); tmp -= lan_alen; tptr +=lan_alen; } break; case ISIS_TLV_PADDING: break; case ISIS_TLV_MT_IS_REACH: mt_len = isis_print_mtid(ndo, tptr, "\n\t "); if (mt_len == 0) /* did something go wrong ? */ goto trunctlv; tptr+=mt_len; tmp-=mt_len; while (tmp >= 2+NODE_ID_LEN+3+1) { ext_is_len = isis_print_ext_is_reach(ndo, tptr, "\n\t ", tlv_type); if (ext_is_len == 0) /* did something go wrong ? */ goto trunctlv; tmp-=ext_is_len; tptr+=ext_is_len; } break; case ISIS_TLV_IS_ALIAS_ID: while (tmp >= NODE_ID_LEN+1) { /* is it worth attempting a decode ? */ ext_is_len = isis_print_ext_is_reach(ndo, tptr, "\n\t ", tlv_type); if (ext_is_len == 0) /* did something go wrong ? */ goto trunctlv; tmp-=ext_is_len; tptr+=ext_is_len; } break; case ISIS_TLV_EXT_IS_REACH: while (tmp >= NODE_ID_LEN+3+1) { /* is it worth attempting a decode ? */ ext_is_len = isis_print_ext_is_reach(ndo, tptr, "\n\t ", tlv_type); if (ext_is_len == 0) /* did something go wrong ? */ goto trunctlv; tmp-=ext_is_len; tptr+=ext_is_len; } break; case ISIS_TLV_IS_REACH: ND_TCHECK2(*tptr,1); /* check if there is one byte left to read out the virtual flag */ ND_PRINT((ndo, "\n\t %s", tok2str(isis_is_reach_virtual_values, "bogus virtual flag 0x%02x", *tptr++))); tlv_is_reach = (const struct isis_tlv_is_reach *)tptr; while (tmp >= sizeof(struct isis_tlv_is_reach)) { ND_TCHECK(*tlv_is_reach); ND_PRINT((ndo, "\n\t IS Neighbor: %s", isis_print_id(tlv_is_reach->neighbor_nodeid, NODE_ID_LEN))); isis_print_metric_block(ndo, &tlv_is_reach->isis_metric_block); tmp -= sizeof(struct isis_tlv_is_reach); tlv_is_reach++; } break; case ISIS_TLV_ESNEIGH: tlv_es_reach = (const struct isis_tlv_es_reach *)tptr; while (tmp >= sizeof(struct isis_tlv_es_reach)) { ND_TCHECK(*tlv_es_reach); ND_PRINT((ndo, "\n\t ES Neighbor: %s", isis_print_id(tlv_es_reach->neighbor_sysid, SYSTEM_ID_LEN))); isis_print_metric_block(ndo, &tlv_es_reach->isis_metric_block); tmp -= sizeof(struct isis_tlv_es_reach); tlv_es_reach++; } break; /* those two TLVs share the same format */ case ISIS_TLV_INT_IP_REACH: case ISIS_TLV_EXT_IP_REACH: if (!isis_print_tlv_ip_reach(ndo, pptr, "\n\t ", tlv_len)) return (1); break; case ISIS_TLV_EXTD_IP_REACH: while (tmp>0) { ext_ip_len = isis_print_extd_ip_reach(ndo, tptr, "\n\t ", AF_INET); if (ext_ip_len == 0) /* did something go wrong ? */ goto trunctlv; tptr+=ext_ip_len; tmp-=ext_ip_len; } break; case ISIS_TLV_MT_IP_REACH: mt_len = isis_print_mtid(ndo, tptr, "\n\t "); if (mt_len == 0) { /* did something go wrong ? */ goto trunctlv; } tptr+=mt_len; tmp-=mt_len; while (tmp>0) { ext_ip_len = isis_print_extd_ip_reach(ndo, tptr, "\n\t ", AF_INET); if (ext_ip_len == 0) /* did something go wrong ? */ goto trunctlv; tptr+=ext_ip_len; tmp-=ext_ip_len; } break; case ISIS_TLV_IP6_REACH: while (tmp>0) { ext_ip_len = isis_print_extd_ip_reach(ndo, tptr, "\n\t ", AF_INET6); if (ext_ip_len == 0) /* did something go wrong ? */ goto trunctlv; tptr+=ext_ip_len; tmp-=ext_ip_len; } break; case ISIS_TLV_MT_IP6_REACH: mt_len = isis_print_mtid(ndo, tptr, "\n\t "); if (mt_len == 0) { /* did something go wrong ? */ goto trunctlv; } tptr+=mt_len; tmp-=mt_len; while (tmp>0) { ext_ip_len = isis_print_extd_ip_reach(ndo, tptr, "\n\t ", AF_INET6); if (ext_ip_len == 0) /* did something go wrong ? */ goto trunctlv; tptr+=ext_ip_len; tmp-=ext_ip_len; } break; case ISIS_TLV_IP6ADDR: while (tmp>=sizeof(struct in6_addr)) { ND_TCHECK2(*tptr, sizeof(struct in6_addr)); ND_PRINT((ndo, "\n\t IPv6 interface address: %s", ip6addr_string(ndo, tptr))); tptr += sizeof(struct in6_addr); tmp -= sizeof(struct in6_addr); } break; case ISIS_TLV_AUTH: ND_TCHECK2(*tptr, 1); ND_PRINT((ndo, "\n\t %s: ", tok2str(isis_subtlv_auth_values, "unknown Authentication type 0x%02x", *tptr))); switch (*tptr) { case ISIS_SUBTLV_AUTH_SIMPLE: if (fn_printzp(ndo, tptr + 1, tlv_len - 1, ndo->ndo_snapend)) goto trunctlv; break; case ISIS_SUBTLV_AUTH_MD5: for(i=1;i<tlv_len;i++) { ND_TCHECK2(*(tptr + i), 1); ND_PRINT((ndo, "%02x", *(tptr + i))); } if (tlv_len != ISIS_SUBTLV_AUTH_MD5_LEN+1) ND_PRINT((ndo, ", (invalid subTLV) ")); sigcheck = signature_verify(ndo, optr, length, tptr + 1, isis_clear_checksum_lifetime, header_lsp); ND_PRINT((ndo, " (%s)", tok2str(signature_check_values, "Unknown", sigcheck))); break; case ISIS_SUBTLV_AUTH_GENERIC: ND_TCHECK2(*(tptr + 1), 2); key_id = EXTRACT_16BITS((tptr+1)); ND_PRINT((ndo, "%u, password: ", key_id)); for(i=1 + sizeof(uint16_t);i<tlv_len;i++) { ND_TCHECK2(*(tptr + i), 1); ND_PRINT((ndo, "%02x", *(tptr + i))); } break; case ISIS_SUBTLV_AUTH_PRIVATE: default: if (!print_unknown_data(ndo, tptr + 1, "\n\t\t ", tlv_len - 1)) return(0); break; } break; case ISIS_TLV_PTP_ADJ: tlv_ptp_adj = (const struct isis_tlv_ptp_adj *)tptr; if(tmp>=1) { ND_TCHECK2(*tptr, 1); ND_PRINT((ndo, "\n\t Adjacency State: %s (%u)", tok2str(isis_ptp_adjancey_values, "unknown", *tptr), *tptr)); tmp--; } if(tmp>sizeof(tlv_ptp_adj->extd_local_circuit_id)) { ND_TCHECK(tlv_ptp_adj->extd_local_circuit_id); ND_PRINT((ndo, "\n\t Extended Local circuit-ID: 0x%08x", EXTRACT_32BITS(tlv_ptp_adj->extd_local_circuit_id))); tmp-=sizeof(tlv_ptp_adj->extd_local_circuit_id); } if(tmp>=SYSTEM_ID_LEN) { ND_TCHECK2(tlv_ptp_adj->neighbor_sysid, SYSTEM_ID_LEN); ND_PRINT((ndo, "\n\t Neighbor System-ID: %s", isis_print_id(tlv_ptp_adj->neighbor_sysid, SYSTEM_ID_LEN))); tmp-=SYSTEM_ID_LEN; } if(tmp>=sizeof(tlv_ptp_adj->neighbor_extd_local_circuit_id)) { ND_TCHECK(tlv_ptp_adj->neighbor_extd_local_circuit_id); ND_PRINT((ndo, "\n\t Neighbor Extended Local circuit-ID: 0x%08x", EXTRACT_32BITS(tlv_ptp_adj->neighbor_extd_local_circuit_id))); } break; case ISIS_TLV_PROTOCOLS: ND_PRINT((ndo, "\n\t NLPID(s): ")); while (tmp>0) { ND_TCHECK2(*(tptr), 1); ND_PRINT((ndo, "%s (0x%02x)", tok2str(nlpid_values, "unknown", *tptr), *tptr)); if (tmp>1) /* further NPLIDs ? - put comma */ ND_PRINT((ndo, ", ")); tptr++; tmp--; } break; case ISIS_TLV_MT_PORT_CAP: { ND_TCHECK2(*(tptr), 2); ND_PRINT((ndo, "\n\t RES: %d, MTID(s): %d", (EXTRACT_16BITS (tptr) >> 12), (EXTRACT_16BITS (tptr) & 0x0fff))); tmp = tmp-2; tptr = tptr+2; if (tmp) isis_print_mt_port_cap_subtlv(ndo, tptr, tmp); break; } case ISIS_TLV_MT_CAPABILITY: ND_TCHECK2(*(tptr), 2); ND_PRINT((ndo, "\n\t O: %d, RES: %d, MTID(s): %d", (EXTRACT_16BITS(tptr) >> 15) & 0x01, (EXTRACT_16BITS(tptr) >> 12) & 0x07, EXTRACT_16BITS(tptr) & 0x0fff)); tmp = tmp-2; tptr = tptr+2; if (tmp) isis_print_mt_capability_subtlv(ndo, tptr, tmp); break; case ISIS_TLV_TE_ROUTER_ID: ND_TCHECK2(*pptr, sizeof(struct in_addr)); ND_PRINT((ndo, "\n\t Traffic Engineering Router ID: %s", ipaddr_string(ndo, pptr))); break; case ISIS_TLV_IPADDR: while (tmp>=sizeof(struct in_addr)) { ND_TCHECK2(*tptr, sizeof(struct in_addr)); ND_PRINT((ndo, "\n\t IPv4 interface address: %s", ipaddr_string(ndo, tptr))); tptr += sizeof(struct in_addr); tmp -= sizeof(struct in_addr); } break; case ISIS_TLV_HOSTNAME: ND_PRINT((ndo, "\n\t Hostname: ")); if (fn_printzp(ndo, tptr, tmp, ndo->ndo_snapend)) goto trunctlv; break; case ISIS_TLV_SHARED_RISK_GROUP: if (tmp < NODE_ID_LEN) break; ND_TCHECK2(*tptr, NODE_ID_LEN); ND_PRINT((ndo, "\n\t IS Neighbor: %s", isis_print_id(tptr, NODE_ID_LEN))); tptr+=(NODE_ID_LEN); tmp-=(NODE_ID_LEN); if (tmp < 1) break; ND_TCHECK2(*tptr, 1); ND_PRINT((ndo, ", Flags: [%s]", ISIS_MASK_TLV_SHARED_RISK_GROUP(*tptr++) ? "numbered" : "unnumbered")); tmp--; if (tmp < sizeof(struct in_addr)) break; ND_TCHECK2(*tptr, sizeof(struct in_addr)); ND_PRINT((ndo, "\n\t IPv4 interface address: %s", ipaddr_string(ndo, tptr))); tptr+=sizeof(struct in_addr); tmp-=sizeof(struct in_addr); if (tmp < sizeof(struct in_addr)) break; ND_TCHECK2(*tptr, sizeof(struct in_addr)); ND_PRINT((ndo, "\n\t IPv4 neighbor address: %s", ipaddr_string(ndo, tptr))); tptr+=sizeof(struct in_addr); tmp-=sizeof(struct in_addr); while (tmp>=4) { ND_TCHECK2(*tptr, 4); ND_PRINT((ndo, "\n\t Link-ID: 0x%08x", EXTRACT_32BITS(tptr))); tptr+=4; tmp-=4; } break; case ISIS_TLV_LSP: tlv_lsp = (const struct isis_tlv_lsp *)tptr; while(tmp>=sizeof(struct isis_tlv_lsp)) { ND_TCHECK((tlv_lsp->lsp_id)[LSP_ID_LEN-1]); ND_PRINT((ndo, "\n\t lsp-id: %s", isis_print_id(tlv_lsp->lsp_id, LSP_ID_LEN))); ND_TCHECK2(tlv_lsp->sequence_number, 4); ND_PRINT((ndo, ", seq: 0x%08x", EXTRACT_32BITS(tlv_lsp->sequence_number))); ND_TCHECK2(tlv_lsp->remaining_lifetime, 2); ND_PRINT((ndo, ", lifetime: %5ds", EXTRACT_16BITS(tlv_lsp->remaining_lifetime))); ND_TCHECK2(tlv_lsp->checksum, 2); ND_PRINT((ndo, ", chksum: 0x%04x", EXTRACT_16BITS(tlv_lsp->checksum))); tmp-=sizeof(struct isis_tlv_lsp); tlv_lsp++; } break; case ISIS_TLV_CHECKSUM: if (tmp < ISIS_TLV_CHECKSUM_MINLEN) break; ND_TCHECK2(*tptr, ISIS_TLV_CHECKSUM_MINLEN); ND_PRINT((ndo, "\n\t checksum: 0x%04x ", EXTRACT_16BITS(tptr))); /* do not attempt to verify the checksum if it is zero * most likely a HMAC-MD5 TLV is also present and * to avoid conflicts the checksum TLV is zeroed. * see rfc3358 for details */ osi_print_cksum(ndo, optr, EXTRACT_16BITS(tptr), tptr-optr, length); break; case ISIS_TLV_POI: if (tlv_len >= SYSTEM_ID_LEN + 1) { ND_TCHECK2(*tptr, SYSTEM_ID_LEN + 1); ND_PRINT((ndo, "\n\t Purge Originator System-ID: %s", isis_print_id(tptr + 1, SYSTEM_ID_LEN))); } if (tlv_len == 2 * SYSTEM_ID_LEN + 1) { ND_TCHECK2(*tptr, 2 * SYSTEM_ID_LEN + 1); ND_PRINT((ndo, "\n\t Received from System-ID: %s", isis_print_id(tptr + SYSTEM_ID_LEN + 1, SYSTEM_ID_LEN))); } break; case ISIS_TLV_MT_SUPPORTED: if (tmp < ISIS_TLV_MT_SUPPORTED_MINLEN) break; while (tmp>1) { /* length can only be a multiple of 2, otherwise there is something broken -> so decode down until length is 1 */ if (tmp!=1) { mt_len = isis_print_mtid(ndo, tptr, "\n\t "); if (mt_len == 0) /* did something go wrong ? */ goto trunctlv; tptr+=mt_len; tmp-=mt_len; } else { ND_PRINT((ndo, "\n\t invalid MT-ID")); break; } } break; case ISIS_TLV_RESTART_SIGNALING: /* first attempt to decode the flags */ if (tmp < ISIS_TLV_RESTART_SIGNALING_FLAGLEN) break; ND_TCHECK2(*tptr, ISIS_TLV_RESTART_SIGNALING_FLAGLEN); ND_PRINT((ndo, "\n\t Flags [%s]", bittok2str(isis_restart_flag_values, "none", *tptr))); tptr+=ISIS_TLV_RESTART_SIGNALING_FLAGLEN; tmp-=ISIS_TLV_RESTART_SIGNALING_FLAGLEN; /* is there anything other than the flags field? */ if (tmp == 0) break; if (tmp < ISIS_TLV_RESTART_SIGNALING_HOLDTIMELEN) break; ND_TCHECK2(*tptr, ISIS_TLV_RESTART_SIGNALING_HOLDTIMELEN); ND_PRINT((ndo, ", Remaining holding time %us", EXTRACT_16BITS(tptr))); tptr+=ISIS_TLV_RESTART_SIGNALING_HOLDTIMELEN; tmp-=ISIS_TLV_RESTART_SIGNALING_HOLDTIMELEN; /* is there an additional sysid field present ?*/ if (tmp == SYSTEM_ID_LEN) { ND_TCHECK2(*tptr, SYSTEM_ID_LEN); ND_PRINT((ndo, ", for %s", isis_print_id(tptr,SYSTEM_ID_LEN))); } break; case ISIS_TLV_IDRP_INFO: if (tmp < ISIS_TLV_IDRP_INFO_MINLEN) break; ND_TCHECK2(*tptr, ISIS_TLV_IDRP_INFO_MINLEN); ND_PRINT((ndo, "\n\t Inter-Domain Information Type: %s", tok2str(isis_subtlv_idrp_values, "Unknown (0x%02x)", *tptr))); switch (*tptr++) { case ISIS_SUBTLV_IDRP_ASN: ND_TCHECK2(*tptr, 2); /* fetch AS number */ ND_PRINT((ndo, "AS Number: %u", EXTRACT_16BITS(tptr))); break; case ISIS_SUBTLV_IDRP_LOCAL: case ISIS_SUBTLV_IDRP_RES: default: if (!print_unknown_data(ndo, tptr, "\n\t ", tlv_len - 1)) return(0); break; } break; case ISIS_TLV_LSP_BUFFERSIZE: if (tmp < ISIS_TLV_LSP_BUFFERSIZE_MINLEN) break; ND_TCHECK2(*tptr, ISIS_TLV_LSP_BUFFERSIZE_MINLEN); ND_PRINT((ndo, "\n\t LSP Buffersize: %u", EXTRACT_16BITS(tptr))); break; case ISIS_TLV_PART_DIS: while (tmp >= SYSTEM_ID_LEN) { ND_TCHECK2(*tptr, SYSTEM_ID_LEN); ND_PRINT((ndo, "\n\t %s", isis_print_id(tptr, SYSTEM_ID_LEN))); tptr+=SYSTEM_ID_LEN; tmp-=SYSTEM_ID_LEN; } break; case ISIS_TLV_PREFIX_NEIGH: if (tmp < sizeof(struct isis_metric_block)) break; ND_TCHECK2(*tptr, sizeof(struct isis_metric_block)); ND_PRINT((ndo, "\n\t Metric Block")); isis_print_metric_block(ndo, (const struct isis_metric_block *)tptr); tptr+=sizeof(struct isis_metric_block); tmp-=sizeof(struct isis_metric_block); while(tmp>0) { ND_TCHECK2(*tptr, 1); prefix_len=*tptr++; /* read out prefix length in semioctets*/ if (prefix_len < 2) { ND_PRINT((ndo, "\n\t\tAddress: prefix length %u < 2", prefix_len)); break; } tmp--; if (tmp < prefix_len/2) break; ND_TCHECK2(*tptr, prefix_len / 2); ND_PRINT((ndo, "\n\t\tAddress: %s/%u", isonsap_string(ndo, tptr, prefix_len / 2), prefix_len * 4)); tptr+=prefix_len/2; tmp-=prefix_len/2; } break; case ISIS_TLV_IIH_SEQNR: if (tmp < ISIS_TLV_IIH_SEQNR_MINLEN) break; ND_TCHECK2(*tptr, ISIS_TLV_IIH_SEQNR_MINLEN); /* check if four bytes are on the wire */ ND_PRINT((ndo, "\n\t Sequence number: %u", EXTRACT_32BITS(tptr))); break; case ISIS_TLV_VENDOR_PRIVATE: if (tmp < ISIS_TLV_VENDOR_PRIVATE_MINLEN) break; ND_TCHECK2(*tptr, ISIS_TLV_VENDOR_PRIVATE_MINLEN); /* check if enough byte for a full oui */ vendor_id = EXTRACT_24BITS(tptr); ND_PRINT((ndo, "\n\t Vendor: %s (%u)", tok2str(oui_values, "Unknown", vendor_id), vendor_id)); tptr+=3; tmp-=3; if (tmp > 0) /* hexdump the rest */ if (!print_unknown_data(ndo, tptr, "\n\t\t", tmp)) return(0); break; /* * FIXME those are the defined TLVs that lack a decoder * you are welcome to contribute code ;-) */ case ISIS_TLV_DECNET_PHASE4: case ISIS_TLV_LUCENT_PRIVATE: case ISIS_TLV_IPAUTH: case ISIS_TLV_NORTEL_PRIVATE1: case ISIS_TLV_NORTEL_PRIVATE2: default: if (ndo->ndo_vflag <= 1) { if (!print_unknown_data(ndo, pptr, "\n\t\t", tlv_len)) return(0); } break; } /* do we want to see an additionally hexdump ? */ if (ndo->ndo_vflag> 1) { if (!print_unknown_data(ndo, pptr, "\n\t ", tlv_len)) return(0); } pptr += tlv_len; packet_len -= tlv_len; } if (packet_len != 0) { ND_PRINT((ndo, "\n\t %u straggler bytes", packet_len)); } return (1); trunc: ND_PRINT((ndo, "%s", tstr)); return (1); trunctlv: ND_PRINT((ndo, "\n\t\t")); ND_PRINT((ndo, "%s", tstr)); return(1); } static void osi_print_cksum(netdissect_options *ndo, const uint8_t *pptr, uint16_t checksum, int checksum_offset, u_int length) { uint16_t calculated_checksum; /* do not attempt to verify the checksum if it is zero, * if the offset is nonsense, * or the base pointer is not sane */ if (!checksum || checksum_offset < 0 || !ND_TTEST2(*(pptr + checksum_offset), 2) || (u_int)checksum_offset > length || !ND_TTEST2(*pptr, length)) { ND_PRINT((ndo, " (unverified)")); } else { #if 0 printf("\nosi_print_cksum: %p %u %u\n", pptr, checksum_offset, length); #endif calculated_checksum = create_osi_cksum(pptr, checksum_offset, length); if (checksum == calculated_checksum) { ND_PRINT((ndo, " (correct)")); } else { ND_PRINT((ndo, " (incorrect should be 0x%04x)", calculated_checksum)); } } } /* * Local Variables: * c-style: whitesmith * c-basic-offset: 8 * End: */
isis_print_extd_ip_reach(netdissect_options *ndo, const uint8_t *tptr, const char *ident, uint16_t afi) { char ident_buffer[20]; uint8_t prefix[sizeof(struct in6_addr)]; /* shared copy buffer for IPv4 and IPv6 prefixes */ u_int metric, status_byte, bit_length, byte_length, sublen, processed, subtlvtype, subtlvlen; if (!ND_TTEST2(*tptr, 4)) return (0); metric = EXTRACT_32BITS(tptr); processed=4; tptr+=4; if (afi == AF_INET) { if (!ND_TTEST2(*tptr, 1)) /* fetch status byte */ return (0); status_byte=*(tptr++); bit_length = status_byte&0x3f; if (bit_length > 32) { ND_PRINT((ndo, "%sIPv4 prefix: bad bit length %u", ident, bit_length)); return (0); } processed++; } else if (afi == AF_INET6) { if (!ND_TTEST2(*tptr, 1)) /* fetch status & prefix_len byte */ return (0); status_byte=*(tptr++); bit_length=*(tptr++); if (bit_length > 128) { ND_PRINT((ndo, "%sIPv6 prefix: bad bit length %u", ident, bit_length)); return (0); } processed+=2; } else return (0); /* somebody is fooling us */ byte_length = (bit_length + 7) / 8; /* prefix has variable length encoding */ if (!ND_TTEST2(*tptr, byte_length)) return (0); memset(prefix, 0, sizeof prefix); /* clear the copy buffer */ memcpy(prefix,tptr,byte_length); /* copy as much as is stored in the TLV */ tptr+=byte_length; processed+=byte_length; if (afi == AF_INET) ND_PRINT((ndo, "%sIPv4 prefix: %15s/%u", ident, ipaddr_string(ndo, prefix), bit_length)); else if (afi == AF_INET6) ND_PRINT((ndo, "%sIPv6 prefix: %s/%u", ident, ip6addr_string(ndo, prefix), bit_length)); ND_PRINT((ndo, ", Distribution: %s, Metric: %u", ISIS_MASK_TLV_EXTD_IP_UPDOWN(status_byte) ? "down" : "up", metric)); if (afi == AF_INET && ISIS_MASK_TLV_EXTD_IP_SUBTLV(status_byte)) ND_PRINT((ndo, ", sub-TLVs present")); else if (afi == AF_INET6) ND_PRINT((ndo, ", %s%s", ISIS_MASK_TLV_EXTD_IP6_IE(status_byte) ? "External" : "Internal", ISIS_MASK_TLV_EXTD_IP6_SUBTLV(status_byte) ? ", sub-TLVs present" : "")); if ((afi == AF_INET && ISIS_MASK_TLV_EXTD_IP_SUBTLV(status_byte)) || (afi == AF_INET6 && ISIS_MASK_TLV_EXTD_IP6_SUBTLV(status_byte)) ) { /* assume that one prefix can hold more than one subTLV - therefore the first byte must reflect the aggregate bytecount of the subTLVs for this prefix */ if (!ND_TTEST2(*tptr, 1)) return (0); sublen=*(tptr++); processed+=sublen+1; ND_PRINT((ndo, " (%u)", sublen)); /* print out subTLV length */ while (sublen>0) { if (!ND_TTEST2(*tptr,2)) return (0); subtlvtype=*(tptr++); subtlvlen=*(tptr++); /* prepend the indent string */ snprintf(ident_buffer, sizeof(ident_buffer), "%s ",ident); if (!isis_print_ip_reach_subtlv(ndo, tptr, subtlvtype, subtlvlen, ident_buffer)) return(0); tptr+=subtlvlen; sublen-=(subtlvlen+2); } } return (processed); }
isis_print_extd_ip_reach(netdissect_options *ndo, const uint8_t *tptr, const char *ident, uint16_t afi) { char ident_buffer[20]; uint8_t prefix[sizeof(struct in6_addr)]; /* shared copy buffer for IPv4 and IPv6 prefixes */ u_int metric, status_byte, bit_length, byte_length, sublen, processed, subtlvtype, subtlvlen; if (!ND_TTEST2(*tptr, 4)) return (0); metric = EXTRACT_32BITS(tptr); processed=4; tptr+=4; if (afi == AF_INET) { if (!ND_TTEST2(*tptr, 1)) /* fetch status byte */ return (0); status_byte=*(tptr++); bit_length = status_byte&0x3f; if (bit_length > 32) { ND_PRINT((ndo, "%sIPv4 prefix: bad bit length %u", ident, bit_length)); return (0); } processed++; } else if (afi == AF_INET6) { if (!ND_TTEST2(*tptr, 2)) /* fetch status & prefix_len byte */ return (0); status_byte=*(tptr++); bit_length=*(tptr++); if (bit_length > 128) { ND_PRINT((ndo, "%sIPv6 prefix: bad bit length %u", ident, bit_length)); return (0); } processed+=2; } else return (0); /* somebody is fooling us */ byte_length = (bit_length + 7) / 8; /* prefix has variable length encoding */ if (!ND_TTEST2(*tptr, byte_length)) return (0); memset(prefix, 0, sizeof prefix); /* clear the copy buffer */ memcpy(prefix,tptr,byte_length); /* copy as much as is stored in the TLV */ tptr+=byte_length; processed+=byte_length; if (afi == AF_INET) ND_PRINT((ndo, "%sIPv4 prefix: %15s/%u", ident, ipaddr_string(ndo, prefix), bit_length)); else if (afi == AF_INET6) ND_PRINT((ndo, "%sIPv6 prefix: %s/%u", ident, ip6addr_string(ndo, prefix), bit_length)); ND_PRINT((ndo, ", Distribution: %s, Metric: %u", ISIS_MASK_TLV_EXTD_IP_UPDOWN(status_byte) ? "down" : "up", metric)); if (afi == AF_INET && ISIS_MASK_TLV_EXTD_IP_SUBTLV(status_byte)) ND_PRINT((ndo, ", sub-TLVs present")); else if (afi == AF_INET6) ND_PRINT((ndo, ", %s%s", ISIS_MASK_TLV_EXTD_IP6_IE(status_byte) ? "External" : "Internal", ISIS_MASK_TLV_EXTD_IP6_SUBTLV(status_byte) ? ", sub-TLVs present" : "")); if ((afi == AF_INET && ISIS_MASK_TLV_EXTD_IP_SUBTLV(status_byte)) || (afi == AF_INET6 && ISIS_MASK_TLV_EXTD_IP6_SUBTLV(status_byte)) ) { /* assume that one prefix can hold more than one subTLV - therefore the first byte must reflect the aggregate bytecount of the subTLVs for this prefix */ if (!ND_TTEST2(*tptr, 1)) return (0); sublen=*(tptr++); processed+=sublen+1; ND_PRINT((ndo, " (%u)", sublen)); /* print out subTLV length */ while (sublen>0) { if (!ND_TTEST2(*tptr,2)) return (0); subtlvtype=*(tptr++); subtlvlen=*(tptr++); /* prepend the indent string */ snprintf(ident_buffer, sizeof(ident_buffer), "%s ",ident); if (!isis_print_ip_reach_subtlv(ndo, tptr, subtlvtype, subtlvlen, ident_buffer)) return(0); tptr+=subtlvlen; sublen-=(subtlvlen+2); } } return (processed); }
{'added': [(2041, ' if (!ND_TTEST2(*tptr, 2)) /* fetch status & prefix_len byte */')], 'deleted': [(2041, ' if (!ND_TTEST2(*tptr, 1)) /* fetch status & prefix_len byte */')]}
1
1
2,403
14,614
https://github.com/the-tcpdump-group/tcpdump
CVE-2017-12998
['CWE-125']
rx63n_eth_driver.c
rx63nEthInitGpio
/** * @file rx63n_eth_driver.c * @brief Renesas RX63N Ethernet MAC driver * * @section License * * SPDX-License-Identifier: GPL-2.0-or-later * * Copyright (C) 2010-2020 Oryx Embedded SARL. All rights reserved. * * This file is part of CycloneTCP Open. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * @author Oryx Embedded SARL (www.oryx-embedded.com) * @version 2.0.0 **/ //Switch to the appropriate trace level #define TRACE_LEVEL NIC_TRACE_LEVEL //Dependencies #include <iorx63n.h> #include <intrinsics.h> #include "core/net.h" #include "drivers/mac/rx63n_eth_driver.h" #include "debug.h" //Underlying network interface static NetInterface *nicDriverInterface; //IAR EWRX compiler? #if defined(__ICCRX__) //Transmit buffer #pragma data_alignment = 32 static uint8_t txBuffer[RX63N_ETH_TX_BUFFER_COUNT][RX63N_ETH_TX_BUFFER_SIZE]; //Receive buffer #pragma data_alignment = 32 static uint8_t rxBuffer[RX63N_ETH_RX_BUFFER_COUNT][RX63N_ETH_RX_BUFFER_SIZE]; //Transmit DMA descriptors #pragma data_alignment = 32 static Rx63nTxDmaDesc txDmaDesc[RX63N_ETH_TX_BUFFER_COUNT]; //Receive DMA descriptors #pragma data_alignment = 32 static Rx63nRxDmaDesc rxDmaDesc[RX63N_ETH_RX_BUFFER_COUNT]; //GCC compiler? #else //Transmit buffer static uint8_t txBuffer[RX63N_ETH_TX_BUFFER_COUNT][RX63N_ETH_TX_BUFFER_SIZE] __attribute__((aligned(32))); //Receive buffer static uint8_t rxBuffer[RX63N_ETH_RX_BUFFER_COUNT][RX63N_ETH_RX_BUFFER_SIZE] __attribute__((aligned(32))); //Transmit DMA descriptors static Rx63nTxDmaDesc txDmaDesc[RX63N_ETH_TX_BUFFER_COUNT] __attribute__((aligned(32))); //Receive DMA descriptors static Rx63nRxDmaDesc rxDmaDesc[RX63N_ETH_RX_BUFFER_COUNT] __attribute__((aligned(32))); #endif //Current transmit descriptor static uint_t txIndex; //Current receive descriptor static uint_t rxIndex; /** * @brief RX63N Ethernet MAC driver **/ const NicDriver rx63nEthDriver = { NIC_TYPE_ETHERNET, ETH_MTU, rx63nEthInit, rx63nEthTick, rx63nEthEnableIrq, rx63nEthDisableIrq, rx63nEthEventHandler, rx63nEthSendPacket, rx63nEthUpdateMacAddrFilter, rx63nEthUpdateMacConfig, rx63nEthWritePhyReg, rx63nEthReadPhyReg, TRUE, TRUE, TRUE, TRUE }; /** * @brief RX63N Ethernet MAC initialization * @param[in] interface Underlying network interface * @return Error code **/ error_t rx63nEthInit(NetInterface *interface) { error_t error; //Debug message TRACE_INFO("Initializing RX63N Ethernet MAC...\r\n"); //Save underlying network interface nicDriverInterface = interface; //Disable protection SYSTEM.PRCR.WORD = 0xA50B; //Cancel EDMAC module stop state MSTP(EDMAC) = 0; //Enable protection SYSTEM.PRCR.WORD = 0xA500; //GPIO configuration rx63nEthInitGpio(interface); //Reset EDMAC module EDMAC.EDMR.BIT.SWR = 1; sleep(10); //Valid Ethernet PHY or switch driver? if(interface->phyDriver != NULL) { //Ethernet PHY initialization error = interface->phyDriver->init(interface); } else if(interface->switchDriver != NULL) { //Ethernet switch initialization error = interface->switchDriver->init(interface); } else { //The interface is not properly configured error = ERROR_FAILURE; } //Any error to report? if(error) { return error; } //Initialize DMA descriptor lists rx63nEthInitDmaDesc(interface); //Maximum frame length that can be accepted ETHERC.RFLR.LONG = RX63N_ETH_RX_BUFFER_SIZE; //Set default inter packet gap (96-bit time) ETHERC.IPGR.LONG = 0x14; //Set the upper 32 bits of the MAC address ETHERC.MAHR = (interface->macAddr.b[0] << 24) | (interface->macAddr.b[1] << 16) | (interface->macAddr.b[2] << 8) | interface->macAddr.b[3]; //Set the lower 16 bits of the MAC address ETHERC.MALR.BIT.MA = (interface->macAddr.b[4] << 8) | interface->macAddr.b[5]; //Set descriptor length (16 bytes) EDMAC.EDMR.BIT.DL = 0; #ifdef _CPU_BIG_ENDIAN //Select big endian mode EDMAC.EDMR.BIT.DE = 0; #else //Select little endian mode EDMAC.EDMR.BIT.DE = 1; #endif //Use store and forward mode EDMAC.TFTR.BIT.TFT = 0; //Set transmit FIFO size (2048 bytes) EDMAC.FDR.BIT.TFD = 7; //Set receive FIFO size (2048 bytes) EDMAC.FDR.BIT.RFD = 7; //Enable continuous reception of multiple frames EDMAC.RMCR.BIT.RNR = 1; //Accept transmit interrupt notifications EDMAC.TRIMD.BIT.TIM = 0; EDMAC.TRIMD.BIT.TIS = 1; //Disable all EDMAC interrupts EDMAC.EESIPR.LONG = 0; //Enable only the desired EDMAC interrupts EDMAC.EESIPR.BIT.TWBIP = 1; EDMAC.EESIPR.BIT.FRIP = 1; //Configure EDMAC interrupt priority IPR(ETHER, EINT) = RX63N_ETH_IRQ_PRIORITY; //Enable transmission and reception ETHERC.ECMR.BIT.TE = 1; ETHERC.ECMR.BIT.RE = 1; //Instruct the DMA to poll the receive descriptor list EDMAC.EDRRR.BIT.RR = 1; //Accept any packets from the upper layer osSetEvent(&interface->nicTxEvent); //Successful initialization return NO_ERROR; } //RDK RX63N or RSK RX63N evaluation board? #if defined(USE_RDK_RX63N) || defined(USE_RSK_RX63N) /** * @brief GPIO configuration * @param[in] interface Underlying network interface **/ void rx63nEthInitGpio(NetInterface *interface) { //Unlock MPC registers MPC.PWPR.BIT.B0WI = 0; MPC.PWPR.BIT.PFSWE = 1; #if defined(USE_RDK_RX63N) //Select RMII interface mode MPC.PFENET.BIT.PHYMODE = 0; //Configure ET_MDIO (PA3) PORTA.PMR.BIT.B3 = 1; MPC.PA3PFS.BYTE = 0x11; //Configure ET_MDC (PA4) PORTA.PMR.BIT.B4 = 1; MPC.PA4PFS.BYTE = 0x11; //Configure ET_LINKSTA (PA5) PORTA.PMR.BIT.B5 = 1; MPC.PA5PFS.BYTE = 0x11; //Configure RMII_RXD1 (PB0) PORTB.PMR.BIT.B0 = 1; MPC.PB0PFS.BYTE = 0x12; //Configure RMII_RXD0 (PB1) PORTB.PMR.BIT.B1 = 1; MPC.PB1PFS.BYTE = 0x12; //Configure REF50CK (PB2) PORTB.PMR.BIT.B2 = 1; MPC.PB2PFS.BYTE = 0x12; //Configure RMII_RX_ER (PB3) PORTB.PMR.BIT.B3 = 1; MPC.PB3PFS.BYTE = 0x12; //Configure RMII_TXD_EN (PB4) PORTB.PMR.BIT.B4 = 1; MPC.PB4PFS.BYTE = 0x12; //Configure RMII_TXD0 (PB5) PORTB.PMR.BIT.B5 = 1; MPC.PB5PFS.BYTE = 0x12; //Configure RMII_TXD1 (PB6) PORTB.PMR.BIT.B6 = 1; MPC.PB6PFS.BYTE = 0x12; //Configure RMII_CRS_DV (PB7) PORTB.PMR.BIT.B7 = 1; MPC.PB7PFS.BYTE = 0x12; #elif defined(USE_RSK_RX63N) //Select MII interface mode MPC.PFENET.BIT.PHYMODE = 1; //Configure ET_MDIO (P71) PORT7.PMR.BIT.B1 = 1; MPC.P71PFS.BYTE = 0x11; //Configure ET_MDC (P72) PORT7.PMR.BIT.B2 = 1; MPC.P72PFS.BYTE = 0x11; //Configure ET_ERXD1 (P74) PORT7.PMR.BIT.B4 = 1; MPC.P74PFS.BYTE = 0x11; //Configure ET_ERXD0 P75) PORT7.PMR.BIT.B5 = 1; MPC.P75PFS.BYTE = 0x11; //Configure ET_RX_CLK (P76) PORT7.PMR.BIT.B6 = 1; MPC.P76PFS.BYTE = 0x11; //Configure ET_RX_ER (P77) PORT7.PMR.BIT.B7 = 1; MPC.P77PFS.BYTE = 0x11; //Configure ET_TX_EN (P80) PORT8.PMR.BIT.B0 = 1; MPC.P80PFS.BYTE = 0x11; //Configure ET_ETXD0 (P81) PORT8.PMR.BIT.B1 = 1; MPC.P81PFS.BYTE = 0x11; //Configure ET_ETXD1 (P82) PORT8.PMR.BIT.B2 = 1; MPC.P82PFS.BYTE = 0x11; //Configure ET_CRS (P83) PORT8.PMR.BIT.B3 = 1; MPC.P83PFS.BYTE = 0x11; //Configure ET_ERXD3 (PC0) PORTC.PMR.BIT.B0 = 1; MPC.PC0PFS.BYTE = 0x11; //Configure ET_ERXD2 (PC1) PORTC.PMR.BIT.B1 = 1; MPC.PC1PFS.BYTE = 0x11; //Configure ET_RX_DV (PC2) PORTC.PMR.BIT.B2 = 1; MPC.PC2PFS.BYTE = 0x11; //Configure ET_TX_ER (PC3) PORTC.PMR.BIT.B3 = 1; MPC.PC3PFS.BYTE = 0x11; //Configure ET_TX_CLK (PC4) PORTC.PMR.BIT.B4 = 1; MPC.PC4PFS.BYTE = 0x11; //Configure ET_ETXD2 (PC5) PORTC.PMR.BIT.B5 = 1; MPC.PC5PFS.BYTE = 0x11; //Configure ET_ETXD3 (PC6) PORTC.PMR.BIT.B6 = 1; MPC.PC6PFS.BYTE = 0x11; //Configure ET_COL (PC7) PORTC.PMR.BIT.B7 = 1; MPC.PC7PFS.BYTE = 0x11; #endif //Lock MPC registers MPC.PWPR.BIT.PFSWE = 0; MPC.PWPR.BIT.B0WI = 0; } #endif /** * @brief Initialize DMA descriptor lists * @param[in] interface Underlying network interface **/ void rx63nEthInitDmaDesc(NetInterface *interface) { uint_t i; //Initialize TX descriptors for(i = 0; i < RX63N_ETH_TX_BUFFER_COUNT; i++) { //The descriptor is initially owned by the application txDmaDesc[i].td0 = 0; //Transmit buffer length txDmaDesc[i].td1 = 0; //Transmit buffer address txDmaDesc[i].td2 = (uint32_t) txBuffer[i]; //Clear padding field txDmaDesc[i].padding = 0; } //Mark the last descriptor entry with the TDLE flag txDmaDesc[i - 1].td0 |= EDMAC_TD0_TDLE; //Initialize TX descriptor index txIndex = 0; //Initialize RX descriptors for(i = 0; i < RX63N_ETH_RX_BUFFER_COUNT; i++) { //The descriptor is initially owned by the DMA rxDmaDesc[i].rd0 = EDMAC_RD0_RACT; //Receive buffer length rxDmaDesc[i].rd1 = (RX63N_ETH_RX_BUFFER_SIZE << 16) & EDMAC_RD1_RBL; //Receive buffer address rxDmaDesc[i].rd2 = (uint32_t) rxBuffer[i]; //Clear padding field rxDmaDesc[i].padding = 0; } //Mark the last descriptor entry with the RDLE flag rxDmaDesc[i - 1].rd0 |= EDMAC_RD0_RDLE; //Initialize RX descriptor index rxIndex = 0; //Start address of the TX descriptor list EDMAC.TDLAR = txDmaDesc; //Start address of the RX descriptor list EDMAC.RDLAR = rxDmaDesc; } /** * @brief RX63N Ethernet MAC timer handler * * This routine is periodically called by the TCP/IP stack to handle periodic * operations such as polling the link state * * @param[in] interface Underlying network interface **/ void rx63nEthTick(NetInterface *interface) { //Valid Ethernet PHY or switch driver? if(interface->phyDriver != NULL) { //Handle periodic operations interface->phyDriver->tick(interface); } else if(interface->switchDriver != NULL) { //Handle periodic operations interface->switchDriver->tick(interface); } else { //Just for sanity } } /** * @brief Enable interrupts * @param[in] interface Underlying network interface **/ void rx63nEthEnableIrq(NetInterface *interface) { //Enable Ethernet MAC interrupts IEN(ETHER, EINT) = 1; //Valid Ethernet PHY or switch driver? if(interface->phyDriver != NULL) { //Enable Ethernet PHY interrupts interface->phyDriver->enableIrq(interface); } else if(interface->switchDriver != NULL) { //Enable Ethernet switch interrupts interface->switchDriver->enableIrq(interface); } else { //Just for sanity } } /** * @brief Disable interrupts * @param[in] interface Underlying network interface **/ void rx63nEthDisableIrq(NetInterface *interface) { //Disable Ethernet MAC interrupts IEN(ETHER, EINT) = 0; //Valid Ethernet PHY or switch driver? if(interface->phyDriver != NULL) { //Disable Ethernet PHY interrupts interface->phyDriver->disableIrq(interface); } else if(interface->switchDriver != NULL) { //Disable Ethernet switch interrupts interface->switchDriver->disableIrq(interface); } else { //Just for sanity } } /** * @brief RX63N Ethernet MAC interrupt service routine **/ #pragma vector = VECT_ETHER_EINT __interrupt void rx63nEthIrqHandler(void) { bool_t flag; uint32_t status; //Allow nested interrupts __enable_interrupt(); //This flag will be set if a higher priority task must be woken flag = FALSE; //Read interrupt status register status = EDMAC.EESR.LONG; //Packet transmitted? if((status & EDMAC_EESR_TWB) != 0) { //Clear TWB interrupt flag EDMAC.EESR.LONG = EDMAC_EESR_TWB; //Check whether the TX buffer is available for writing if((txDmaDesc[txIndex].td0 & EDMAC_TD0_TACT) == 0) { //Notify the TCP/IP stack that the transmitter is ready to send flag |= osSetEventFromIsr(&nicDriverInterface->nicTxEvent); } } //Packet received? if((status & EDMAC_EESR_FR) != 0) { //Disable FR interrupts EDMAC.EESIPR.BIT.FRIP = 0; //Set event flag nicDriverInterface->nicEvent = TRUE; //Notify the TCP/IP stack of the event flag |= osSetEventFromIsr(&netEvent); } //Interrupt service routine epilogue osExitIsr(flag); } /** * @brief RX63N Ethernet MAC event handler * @param[in] interface Underlying network interface **/ void rx63nEthEventHandler(NetInterface *interface) { error_t error; //Packet received? if((EDMAC.EESR.LONG & EDMAC_EESR_FR) != 0) { //Clear FR interrupt flag EDMAC.EESR.LONG = EDMAC_EESR_FR; //Process all pending packets do { //Read incoming packet error = rx63nEthReceivePacket(interface); //No more data in the receive buffer? } while(error != ERROR_BUFFER_EMPTY); } //Re-enable EDMAC interrupts EDMAC.EESIPR.BIT.TWBIP = 1; EDMAC.EESIPR.BIT.FRIP = 1; } /** * @brief Send a packet * @param[in] interface Underlying network interface * @param[in] buffer Multi-part buffer containing the data to send * @param[in] offset Offset to the first data byte * @param[in] ancillary Additional options passed to the stack along with * the packet * @return Error code **/ error_t rx63nEthSendPacket(NetInterface *interface, const NetBuffer *buffer, size_t offset, NetTxAncillary *ancillary) { //Retrieve the length of the packet size_t length = netBufferGetLength(buffer) - offset; //Check the frame length if(length > RX63N_ETH_TX_BUFFER_SIZE) { //The transmitter can accept another packet osSetEvent(&interface->nicTxEvent); //Report an error return ERROR_INVALID_LENGTH; } //Make sure the current buffer is available for writing if((txDmaDesc[txIndex].td0 & EDMAC_TD0_TACT) != 0) { return ERROR_FAILURE; } //Copy user data to the transmit buffer netBufferRead(txBuffer[txIndex], buffer, offset, length); //Write the number of bytes to send txDmaDesc[txIndex].td1 = (length << 16) & EDMAC_TD1_TBL; //Check current index if(txIndex < (RX63N_ETH_TX_BUFFER_COUNT - 1)) { //Give the ownership of the descriptor to the DMA engine txDmaDesc[txIndex].td0 = EDMAC_TD0_TACT | EDMAC_TD0_TFP_SOF | EDMAC_TD0_TFP_EOF | EDMAC_TD0_TWBI; //Point to the next descriptor txIndex++; } else { //Give the ownership of the descriptor to the DMA engine txDmaDesc[txIndex].td0 = EDMAC_TD0_TACT | EDMAC_TD0_TDLE | EDMAC_TD0_TFP_SOF | EDMAC_TD0_TFP_EOF | EDMAC_TD0_TWBI; //Wrap around txIndex = 0; } //Instruct the DMA to poll the transmit descriptor list EDMAC.EDTRR.BIT.TR = 1; //Check whether the next buffer is available for writing if((txDmaDesc[txIndex].td0 & EDMAC_TD0_TACT) == 0) { //The transmitter can accept another packet osSetEvent(&interface->nicTxEvent); } //Successful write operation return NO_ERROR; } /** * @brief Receive a packet * @param[in] interface Underlying network interface * @return Error code **/ error_t rx63nEthReceivePacket(NetInterface *interface) { error_t error; size_t n; NetRxAncillary ancillary; //The current buffer is available for reading? if((rxDmaDesc[rxIndex].rd0 & EDMAC_RD0_RACT) == 0) { //SOF and EOF flags should be set if((rxDmaDesc[rxIndex].rd0 & EDMAC_RD0_RFP_SOF) != 0 && (rxDmaDesc[rxIndex].rd0 & EDMAC_RD0_RFP_EOF) != 0) { //Make sure no error occurred if(!(rxDmaDesc[rxIndex].rd0 & (EDMAC_RD0_RFS_MASK & ~EDMAC_RD0_RFS_RMAF))) { //Retrieve the length of the frame n = rxDmaDesc[rxIndex].rd1 & EDMAC_RD1_RFL; //Limit the number of data to read n = MIN(n, RX63N_ETH_RX_BUFFER_SIZE); //Additional options can be passed to the stack along with the packet ancillary = NET_DEFAULT_RX_ANCILLARY; //Pass the packet to the upper layer nicProcessPacket(interface, rxBuffer[rxIndex], n, &ancillary); //Valid packet received error = NO_ERROR; } else { //The received packet contains an error error = ERROR_INVALID_PACKET; } } else { //The packet is not valid error = ERROR_INVALID_PACKET; } //Check current index if(rxIndex < (RX63N_ETH_RX_BUFFER_COUNT - 1)) { //Give the ownership of the descriptor back to the DMA rxDmaDesc[rxIndex].rd0 = EDMAC_RD0_RACT; //Point to the next descriptor rxIndex++; } else { //Give the ownership of the descriptor back to the DMA rxDmaDesc[rxIndex].rd0 = EDMAC_RD0_RACT | EDMAC_RD0_RDLE; //Wrap around rxIndex = 0; } //Instruct the DMA to poll the receive descriptor list EDMAC.EDRRR.BIT.RR = 1; } else { //No more data in the receive buffer error = ERROR_BUFFER_EMPTY; } //Return status code return error; } /** * @brief Configure MAC address filtering * @param[in] interface Underlying network interface * @return Error code **/ error_t rx63nEthUpdateMacAddrFilter(NetInterface *interface) { uint_t i; bool_t acceptMulticast; //Debug message TRACE_DEBUG("Updating MAC filter...\r\n"); //Set the upper 32 bits of the MAC address ETHERC.MAHR = (interface->macAddr.b[0] << 24) | (interface->macAddr.b[1] << 16) | (interface->macAddr.b[2] << 8) | interface->macAddr.b[3]; //Set the lower 16 bits of the MAC address ETHERC.MALR.BIT.MA = (interface->macAddr.b[4] << 8) | interface->macAddr.b[5]; //This flag will be set if multicast addresses should be accepted acceptMulticast = FALSE; //The MAC address filter contains the list of MAC addresses to accept //when receiving an Ethernet frame for(i = 0; i < MAC_ADDR_FILTER_SIZE; i++) { //Valid entry? if(interface->macAddrFilter[i].refCount > 0) { //Accept multicast addresses acceptMulticast = TRUE; //We are done break; } } //Enable the reception of multicast frames if necessary if(acceptMulticast) { EDMAC.EESR.BIT.RMAF = 1; } else { EDMAC.EESR.BIT.RMAF = 0; } //Successful processing return NO_ERROR; } /** * @brief Adjust MAC configuration parameters for proper operation * @param[in] interface Underlying network interface * @return Error code **/ error_t rx63nEthUpdateMacConfig(NetInterface *interface) { //10BASE-T or 100BASE-TX operation mode? if(interface->linkSpeed == NIC_LINK_SPEED_100MBPS) { ETHERC.ECMR.BIT.RTM = 1; } else { ETHERC.ECMR.BIT.RTM = 0; } //Half-duplex or full-duplex mode? if(interface->duplexMode == NIC_FULL_DUPLEX_MODE) { ETHERC.ECMR.BIT.DM = 1; } else { ETHERC.ECMR.BIT.DM = 0; } //Successful processing return NO_ERROR; } /** * @brief Write PHY register * @param[in] opcode Access type (2 bits) * @param[in] phyAddr PHY address (5 bits) * @param[in] regAddr Register address (5 bits) * @param[in] data Register value **/ void rx63nEthWritePhyReg(uint8_t opcode, uint8_t phyAddr, uint8_t regAddr, uint16_t data) { //Synchronization pattern rx63nEthWriteSmi(SMI_SYNC, 32); //Start of frame rx63nEthWriteSmi(SMI_START, 2); //Set up a write operation rx63nEthWriteSmi(opcode, 2); //Write PHY address rx63nEthWriteSmi(phyAddr, 5); //Write register address rx63nEthWriteSmi(regAddr, 5); //Turnaround rx63nEthWriteSmi(SMI_TA, 2); //Write register value rx63nEthWriteSmi(data, 16); //Release MDIO rx63nEthReadSmi(1); } /** * @brief Read PHY register * @param[in] opcode Access type (2 bits) * @param[in] phyAddr PHY address (5 bits) * @param[in] regAddr Register address (5 bits) * @return Register value **/ uint16_t rx63nEthReadPhyReg(uint8_t opcode, uint8_t phyAddr, uint8_t regAddr) { uint16_t data; //Synchronization pattern rx63nEthWriteSmi(SMI_SYNC, 32); //Start of frame rx63nEthWriteSmi(SMI_START, 2); //Set up a read operation rx63nEthWriteSmi(opcode, 2); //Write PHY address rx63nEthWriteSmi(phyAddr, 5); //Write register address rx63nEthWriteSmi(regAddr, 5); //Turnaround to avoid contention rx63nEthReadSmi(1); //Read register value data = rx63nEthReadSmi(16); //Force the PHY to release the MDIO pin rx63nEthReadSmi(1); //Return PHY register contents return data; } /** * @brief SMI write operation * @param[in] data Raw data to be written * @param[in] length Number of bits to be written **/ void rx63nEthWriteSmi(uint32_t data, uint_t length) { //Skip the most significant bits since they are meaningless data <<= 32 - length; //Configure MDIO as an output ETHERC.PIR.BIT.MMD = 1; //Write the specified number of bits while(length--) { //Write MDIO if((data & 0x80000000) != 0) { ETHERC.PIR.BIT.MDO = 1; } else { ETHERC.PIR.BIT.MDO = 0; } //Assert MDC usleep(1); ETHERC.PIR.BIT.MDC = 1; //Deassert MDC usleep(1); ETHERC.PIR.BIT.MDC = 0; //Rotate data data <<= 1; } } /** * @brief SMI read operation * @param[in] length Number of bits to be read * @return Data resulting from the MDIO read operation **/ uint32_t rx63nEthReadSmi(uint_t length) { uint32_t data = 0; //Configure MDIO as an input ETHERC.PIR.BIT.MMD = 0; //Read the specified number of bits while(length--) { //Rotate data data <<= 1; //Assert MDC ETHERC.PIR.BIT.MDC = 1; usleep(1); //Deassert MDC ETHERC.PIR.BIT.MDC = 0; usleep(1); //Check MDIO state if(ETHERC.PIR.BIT.MDI) { data |= 0x01; } } //Return the received data return data; }
/** * @file rx63n_eth_driver.c * @brief Renesas RX63N Ethernet MAC driver * * @section License * * SPDX-License-Identifier: GPL-2.0-or-later * * Copyright (C) 2010-2021 Oryx Embedded SARL. All rights reserved. * * This file is part of CycloneTCP Open. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * @author Oryx Embedded SARL (www.oryx-embedded.com) * @version 2.0.2 **/ //Switch to the appropriate trace level #define TRACE_LEVEL NIC_TRACE_LEVEL //Dependencies #include <iorx63n.h> #include <intrinsics.h> #include "core/net.h" #include "drivers/mac/rx63n_eth_driver.h" #include "debug.h" //Underlying network interface static NetInterface *nicDriverInterface; //IAR EWRX compiler? #if defined(__ICCRX__) //Transmit buffer #pragma data_alignment = 32 static uint8_t txBuffer[RX63N_ETH_TX_BUFFER_COUNT][RX63N_ETH_TX_BUFFER_SIZE]; //Receive buffer #pragma data_alignment = 32 static uint8_t rxBuffer[RX63N_ETH_RX_BUFFER_COUNT][RX63N_ETH_RX_BUFFER_SIZE]; //Transmit DMA descriptors #pragma data_alignment = 32 static Rx63nTxDmaDesc txDmaDesc[RX63N_ETH_TX_BUFFER_COUNT]; //Receive DMA descriptors #pragma data_alignment = 32 static Rx63nRxDmaDesc rxDmaDesc[RX63N_ETH_RX_BUFFER_COUNT]; //GCC compiler? #else //Transmit buffer static uint8_t txBuffer[RX63N_ETH_TX_BUFFER_COUNT][RX63N_ETH_TX_BUFFER_SIZE] __attribute__((aligned(32))); //Receive buffer static uint8_t rxBuffer[RX63N_ETH_RX_BUFFER_COUNT][RX63N_ETH_RX_BUFFER_SIZE] __attribute__((aligned(32))); //Transmit DMA descriptors static Rx63nTxDmaDesc txDmaDesc[RX63N_ETH_TX_BUFFER_COUNT] __attribute__((aligned(32))); //Receive DMA descriptors static Rx63nRxDmaDesc rxDmaDesc[RX63N_ETH_RX_BUFFER_COUNT] __attribute__((aligned(32))); #endif //Current transmit descriptor static uint_t txIndex; //Current receive descriptor static uint_t rxIndex; /** * @brief RX63N Ethernet MAC driver **/ const NicDriver rx63nEthDriver = { NIC_TYPE_ETHERNET, ETH_MTU, rx63nEthInit, rx63nEthTick, rx63nEthEnableIrq, rx63nEthDisableIrq, rx63nEthEventHandler, rx63nEthSendPacket, rx63nEthUpdateMacAddrFilter, rx63nEthUpdateMacConfig, rx63nEthWritePhyReg, rx63nEthReadPhyReg, TRUE, TRUE, TRUE, TRUE }; /** * @brief RX63N Ethernet MAC initialization * @param[in] interface Underlying network interface * @return Error code **/ error_t rx63nEthInit(NetInterface *interface) { error_t error; //Debug message TRACE_INFO("Initializing RX63N Ethernet MAC...\r\n"); //Save underlying network interface nicDriverInterface = interface; //Disable protection SYSTEM.PRCR.WORD = 0xA50B; //Cancel EDMAC module stop state MSTP(EDMAC) = 0; //Enable protection SYSTEM.PRCR.WORD = 0xA500; //GPIO configuration rx63nEthInitGpio(interface); //Reset EDMAC module EDMAC.EDMR.BIT.SWR = 1; sleep(10); //Valid Ethernet PHY or switch driver? if(interface->phyDriver != NULL) { //Ethernet PHY initialization error = interface->phyDriver->init(interface); } else if(interface->switchDriver != NULL) { //Ethernet switch initialization error = interface->switchDriver->init(interface); } else { //The interface is not properly configured error = ERROR_FAILURE; } //Any error to report? if(error) { return error; } //Initialize DMA descriptor lists rx63nEthInitDmaDesc(interface); //Maximum frame length that can be accepted ETHERC.RFLR.LONG = RX63N_ETH_RX_BUFFER_SIZE; //Set default inter packet gap (96-bit time) ETHERC.IPGR.LONG = 0x14; //Set the upper 32 bits of the MAC address ETHERC.MAHR = (interface->macAddr.b[0] << 24) | (interface->macAddr.b[1] << 16) | (interface->macAddr.b[2] << 8) | interface->macAddr.b[3]; //Set the lower 16 bits of the MAC address ETHERC.MALR.BIT.MA = (interface->macAddr.b[4] << 8) | interface->macAddr.b[5]; //Set descriptor length (16 bytes) EDMAC.EDMR.BIT.DL = 0; #ifdef _CPU_BIG_ENDIAN //Select big endian mode EDMAC.EDMR.BIT.DE = 0; #else //Select little endian mode EDMAC.EDMR.BIT.DE = 1; #endif //Use store and forward mode EDMAC.TFTR.BIT.TFT = 0; //Set transmit FIFO size (2048 bytes) EDMAC.FDR.BIT.TFD = 7; //Set receive FIFO size (2048 bytes) EDMAC.FDR.BIT.RFD = 7; //Enable continuous reception of multiple frames EDMAC.RMCR.BIT.RNR = 1; //Accept transmit interrupt notifications EDMAC.TRIMD.BIT.TIM = 0; EDMAC.TRIMD.BIT.TIS = 1; //Disable all EDMAC interrupts EDMAC.EESIPR.LONG = 0; //Enable only the desired EDMAC interrupts EDMAC.EESIPR.BIT.TWBIP = 1; EDMAC.EESIPR.BIT.FRIP = 1; //Configure EDMAC interrupt priority IPR(ETHER, EINT) = RX63N_ETH_IRQ_PRIORITY; //Enable transmission and reception ETHERC.ECMR.BIT.TE = 1; ETHERC.ECMR.BIT.RE = 1; //Instruct the DMA to poll the receive descriptor list EDMAC.EDRRR.BIT.RR = 1; //Accept any packets from the upper layer osSetEvent(&interface->nicTxEvent); //Successful initialization return NO_ERROR; } //RDK-RX63N, RSK-RX63N or RSK-RX63N-256K evaluation board? #if defined(USE_RDK_RX63N) || defined(USE_RSK_RX63N) || \ defined(USE_RSK_RX63N_256K) /** * @brief GPIO configuration * @param[in] interface Underlying network interface **/ void rx63nEthInitGpio(NetInterface *interface) { //Unlock MPC registers MPC.PWPR.BIT.B0WI = 0; MPC.PWPR.BIT.PFSWE = 1; #if defined(USE_RDK_RX63N) //Select RMII interface mode MPC.PFENET.BIT.PHYMODE = 0; //Configure ET_MDIO (PA3) PORTA.PMR.BIT.B3 = 1; MPC.PA3PFS.BYTE = 0x11; //Configure ET_MDC (PA4) PORTA.PMR.BIT.B4 = 1; MPC.PA4PFS.BYTE = 0x11; //Configure ET_LINKSTA (PA5) PORTA.PMR.BIT.B5 = 1; MPC.PA5PFS.BYTE = 0x11; //Configure RMII_RXD1 (PB0) PORTB.PMR.BIT.B0 = 1; MPC.PB0PFS.BYTE = 0x12; //Configure RMII_RXD0 (PB1) PORTB.PMR.BIT.B1 = 1; MPC.PB1PFS.BYTE = 0x12; //Configure REF50CK (PB2) PORTB.PMR.BIT.B2 = 1; MPC.PB2PFS.BYTE = 0x12; //Configure RMII_RX_ER (PB3) PORTB.PMR.BIT.B3 = 1; MPC.PB3PFS.BYTE = 0x12; //Configure RMII_TXD_EN (PB4) PORTB.PMR.BIT.B4 = 1; MPC.PB4PFS.BYTE = 0x12; //Configure RMII_TXD0 (PB5) PORTB.PMR.BIT.B5 = 1; MPC.PB5PFS.BYTE = 0x12; //Configure RMII_TXD1 (PB6) PORTB.PMR.BIT.B6 = 1; MPC.PB6PFS.BYTE = 0x12; //Configure RMII_CRS_DV (PB7) PORTB.PMR.BIT.B7 = 1; MPC.PB7PFS.BYTE = 0x12; #elif defined(USE_RSK_RX63N) || defined(USE_RSK_RX63N_256K) //Select MII interface mode MPC.PFENET.BIT.PHYMODE = 1; //Configure ET_MDIO (P71) PORT7.PMR.BIT.B1 = 1; MPC.P71PFS.BYTE = 0x11; //Configure ET_MDC (P72) PORT7.PMR.BIT.B2 = 1; MPC.P72PFS.BYTE = 0x11; //Configure ET_ERXD1 (P74) PORT7.PMR.BIT.B4 = 1; MPC.P74PFS.BYTE = 0x11; //Configure ET_ERXD0 P75) PORT7.PMR.BIT.B5 = 1; MPC.P75PFS.BYTE = 0x11; //Configure ET_RX_CLK (P76) PORT7.PMR.BIT.B6 = 1; MPC.P76PFS.BYTE = 0x11; //Configure ET_RX_ER (P77) PORT7.PMR.BIT.B7 = 1; MPC.P77PFS.BYTE = 0x11; //Configure ET_TX_EN (P80) PORT8.PMR.BIT.B0 = 1; MPC.P80PFS.BYTE = 0x11; //Configure ET_ETXD0 (P81) PORT8.PMR.BIT.B1 = 1; MPC.P81PFS.BYTE = 0x11; //Configure ET_ETXD1 (P82) PORT8.PMR.BIT.B2 = 1; MPC.P82PFS.BYTE = 0x11; //Configure ET_CRS (P83) PORT8.PMR.BIT.B3 = 1; MPC.P83PFS.BYTE = 0x11; //Configure ET_ERXD3 (PC0) PORTC.PMR.BIT.B0 = 1; MPC.PC0PFS.BYTE = 0x11; //Configure ET_ERXD2 (PC1) PORTC.PMR.BIT.B1 = 1; MPC.PC1PFS.BYTE = 0x11; //Configure ET_RX_DV (PC2) PORTC.PMR.BIT.B2 = 1; MPC.PC2PFS.BYTE = 0x11; //Configure ET_TX_ER (PC3) PORTC.PMR.BIT.B3 = 1; MPC.PC3PFS.BYTE = 0x11; //Configure ET_TX_CLK (PC4) PORTC.PMR.BIT.B4 = 1; MPC.PC4PFS.BYTE = 0x11; //Configure ET_ETXD2 (PC5) PORTC.PMR.BIT.B5 = 1; MPC.PC5PFS.BYTE = 0x11; //Configure ET_ETXD3 (PC6) PORTC.PMR.BIT.B6 = 1; MPC.PC6PFS.BYTE = 0x11; //Configure ET_COL (PC7) PORTC.PMR.BIT.B7 = 1; MPC.PC7PFS.BYTE = 0x11; #endif //Lock MPC registers MPC.PWPR.BIT.PFSWE = 0; MPC.PWPR.BIT.B0WI = 0; } #endif /** * @brief Initialize DMA descriptor lists * @param[in] interface Underlying network interface **/ void rx63nEthInitDmaDesc(NetInterface *interface) { uint_t i; //Initialize TX descriptors for(i = 0; i < RX63N_ETH_TX_BUFFER_COUNT; i++) { //The descriptor is initially owned by the application txDmaDesc[i].td0 = 0; //Transmit buffer length txDmaDesc[i].td1 = 0; //Transmit buffer address txDmaDesc[i].td2 = (uint32_t) txBuffer[i]; //Clear padding field txDmaDesc[i].padding = 0; } //Mark the last descriptor entry with the TDLE flag txDmaDesc[i - 1].td0 |= EDMAC_TD0_TDLE; //Initialize TX descriptor index txIndex = 0; //Initialize RX descriptors for(i = 0; i < RX63N_ETH_RX_BUFFER_COUNT; i++) { //The descriptor is initially owned by the DMA rxDmaDesc[i].rd0 = EDMAC_RD0_RACT; //Receive buffer length rxDmaDesc[i].rd1 = (RX63N_ETH_RX_BUFFER_SIZE << 16) & EDMAC_RD1_RBL; //Receive buffer address rxDmaDesc[i].rd2 = (uint32_t) rxBuffer[i]; //Clear padding field rxDmaDesc[i].padding = 0; } //Mark the last descriptor entry with the RDLE flag rxDmaDesc[i - 1].rd0 |= EDMAC_RD0_RDLE; //Initialize RX descriptor index rxIndex = 0; //Start address of the TX descriptor list EDMAC.TDLAR = txDmaDesc; //Start address of the RX descriptor list EDMAC.RDLAR = rxDmaDesc; } /** * @brief RX63N Ethernet MAC timer handler * * This routine is periodically called by the TCP/IP stack to handle periodic * operations such as polling the link state * * @param[in] interface Underlying network interface **/ void rx63nEthTick(NetInterface *interface) { //Valid Ethernet PHY or switch driver? if(interface->phyDriver != NULL) { //Handle periodic operations interface->phyDriver->tick(interface); } else if(interface->switchDriver != NULL) { //Handle periodic operations interface->switchDriver->tick(interface); } else { //Just for sanity } } /** * @brief Enable interrupts * @param[in] interface Underlying network interface **/ void rx63nEthEnableIrq(NetInterface *interface) { //Enable Ethernet MAC interrupts IEN(ETHER, EINT) = 1; //Valid Ethernet PHY or switch driver? if(interface->phyDriver != NULL) { //Enable Ethernet PHY interrupts interface->phyDriver->enableIrq(interface); } else if(interface->switchDriver != NULL) { //Enable Ethernet switch interrupts interface->switchDriver->enableIrq(interface); } else { //Just for sanity } } /** * @brief Disable interrupts * @param[in] interface Underlying network interface **/ void rx63nEthDisableIrq(NetInterface *interface) { //Disable Ethernet MAC interrupts IEN(ETHER, EINT) = 0; //Valid Ethernet PHY or switch driver? if(interface->phyDriver != NULL) { //Disable Ethernet PHY interrupts interface->phyDriver->disableIrq(interface); } else if(interface->switchDriver != NULL) { //Disable Ethernet switch interrupts interface->switchDriver->disableIrq(interface); } else { //Just for sanity } } /** * @brief RX63N Ethernet MAC interrupt service routine **/ #pragma vector = VECT_ETHER_EINT __interrupt void rx63nEthIrqHandler(void) { bool_t flag; uint32_t status; //Allow nested interrupts __enable_interrupt(); //This flag will be set if a higher priority task must be woken flag = FALSE; //Read interrupt status register status = EDMAC.EESR.LONG; //Packet transmitted? if((status & EDMAC_EESR_TWB) != 0) { //Clear TWB interrupt flag EDMAC.EESR.LONG = EDMAC_EESR_TWB; //Check whether the TX buffer is available for writing if((txDmaDesc[txIndex].td0 & EDMAC_TD0_TACT) == 0) { //Notify the TCP/IP stack that the transmitter is ready to send flag |= osSetEventFromIsr(&nicDriverInterface->nicTxEvent); } } //Packet received? if((status & EDMAC_EESR_FR) != 0) { //Disable FR interrupts EDMAC.EESIPR.BIT.FRIP = 0; //Set event flag nicDriverInterface->nicEvent = TRUE; //Notify the TCP/IP stack of the event flag |= osSetEventFromIsr(&netEvent); } //Interrupt service routine epilogue osExitIsr(flag); } /** * @brief RX63N Ethernet MAC event handler * @param[in] interface Underlying network interface **/ void rx63nEthEventHandler(NetInterface *interface) { error_t error; //Packet received? if((EDMAC.EESR.LONG & EDMAC_EESR_FR) != 0) { //Clear FR interrupt flag EDMAC.EESR.LONG = EDMAC_EESR_FR; //Process all pending packets do { //Read incoming packet error = rx63nEthReceivePacket(interface); //No more data in the receive buffer? } while(error != ERROR_BUFFER_EMPTY); } //Re-enable EDMAC interrupts EDMAC.EESIPR.BIT.TWBIP = 1; EDMAC.EESIPR.BIT.FRIP = 1; } /** * @brief Send a packet * @param[in] interface Underlying network interface * @param[in] buffer Multi-part buffer containing the data to send * @param[in] offset Offset to the first data byte * @param[in] ancillary Additional options passed to the stack along with * the packet * @return Error code **/ error_t rx63nEthSendPacket(NetInterface *interface, const NetBuffer *buffer, size_t offset, NetTxAncillary *ancillary) { //Retrieve the length of the packet size_t length = netBufferGetLength(buffer) - offset; //Check the frame length if(length > RX63N_ETH_TX_BUFFER_SIZE) { //The transmitter can accept another packet osSetEvent(&interface->nicTxEvent); //Report an error return ERROR_INVALID_LENGTH; } //Make sure the current buffer is available for writing if((txDmaDesc[txIndex].td0 & EDMAC_TD0_TACT) != 0) { return ERROR_FAILURE; } //Copy user data to the transmit buffer netBufferRead(txBuffer[txIndex], buffer, offset, length); //Write the number of bytes to send txDmaDesc[txIndex].td1 = (length << 16) & EDMAC_TD1_TBL; //Check current index if(txIndex < (RX63N_ETH_TX_BUFFER_COUNT - 1)) { //Give the ownership of the descriptor to the DMA engine txDmaDesc[txIndex].td0 = EDMAC_TD0_TACT | EDMAC_TD0_TFP_SOF | EDMAC_TD0_TFP_EOF | EDMAC_TD0_TWBI; //Point to the next descriptor txIndex++; } else { //Give the ownership of the descriptor to the DMA engine txDmaDesc[txIndex].td0 = EDMAC_TD0_TACT | EDMAC_TD0_TDLE | EDMAC_TD0_TFP_SOF | EDMAC_TD0_TFP_EOF | EDMAC_TD0_TWBI; //Wrap around txIndex = 0; } //Instruct the DMA to poll the transmit descriptor list EDMAC.EDTRR.BIT.TR = 1; //Check whether the next buffer is available for writing if((txDmaDesc[txIndex].td0 & EDMAC_TD0_TACT) == 0) { //The transmitter can accept another packet osSetEvent(&interface->nicTxEvent); } //Successful write operation return NO_ERROR; } /** * @brief Receive a packet * @param[in] interface Underlying network interface * @return Error code **/ error_t rx63nEthReceivePacket(NetInterface *interface) { error_t error; size_t n; NetRxAncillary ancillary; //The current buffer is available for reading? if((rxDmaDesc[rxIndex].rd0 & EDMAC_RD0_RACT) == 0) { //SOF and EOF flags should be set if((rxDmaDesc[rxIndex].rd0 & EDMAC_RD0_RFP_SOF) != 0 && (rxDmaDesc[rxIndex].rd0 & EDMAC_RD0_RFP_EOF) != 0) { //Make sure no error occurred if(!(rxDmaDesc[rxIndex].rd0 & (EDMAC_RD0_RFS_MASK & ~EDMAC_RD0_RFS_RMAF))) { //Retrieve the length of the frame n = rxDmaDesc[rxIndex].rd1 & EDMAC_RD1_RFL; //Limit the number of data to read n = MIN(n, RX63N_ETH_RX_BUFFER_SIZE); //Additional options can be passed to the stack along with the packet ancillary = NET_DEFAULT_RX_ANCILLARY; //Pass the packet to the upper layer nicProcessPacket(interface, rxBuffer[rxIndex], n, &ancillary); //Valid packet received error = NO_ERROR; } else { //The received packet contains an error error = ERROR_INVALID_PACKET; } } else { //The packet is not valid error = ERROR_INVALID_PACKET; } //Check current index if(rxIndex < (RX63N_ETH_RX_BUFFER_COUNT - 1)) { //Give the ownership of the descriptor back to the DMA rxDmaDesc[rxIndex].rd0 = EDMAC_RD0_RACT; //Point to the next descriptor rxIndex++; } else { //Give the ownership of the descriptor back to the DMA rxDmaDesc[rxIndex].rd0 = EDMAC_RD0_RACT | EDMAC_RD0_RDLE; //Wrap around rxIndex = 0; } //Instruct the DMA to poll the receive descriptor list EDMAC.EDRRR.BIT.RR = 1; } else { //No more data in the receive buffer error = ERROR_BUFFER_EMPTY; } //Return status code return error; } /** * @brief Configure MAC address filtering * @param[in] interface Underlying network interface * @return Error code **/ error_t rx63nEthUpdateMacAddrFilter(NetInterface *interface) { uint_t i; bool_t acceptMulticast; //Debug message TRACE_DEBUG("Updating MAC filter...\r\n"); //Set the upper 32 bits of the MAC address ETHERC.MAHR = (interface->macAddr.b[0] << 24) | (interface->macAddr.b[1] << 16) | (interface->macAddr.b[2] << 8) | interface->macAddr.b[3]; //Set the lower 16 bits of the MAC address ETHERC.MALR.BIT.MA = (interface->macAddr.b[4] << 8) | interface->macAddr.b[5]; //This flag will be set if multicast addresses should be accepted acceptMulticast = FALSE; //The MAC address filter contains the list of MAC addresses to accept //when receiving an Ethernet frame for(i = 0; i < MAC_ADDR_FILTER_SIZE; i++) { //Valid entry? if(interface->macAddrFilter[i].refCount > 0) { //Accept multicast addresses acceptMulticast = TRUE; //We are done break; } } //Enable the reception of multicast frames if necessary if(acceptMulticast) { EDMAC.EESR.BIT.RMAF = 1; } else { EDMAC.EESR.BIT.RMAF = 0; } //Successful processing return NO_ERROR; } /** * @brief Adjust MAC configuration parameters for proper operation * @param[in] interface Underlying network interface * @return Error code **/ error_t rx63nEthUpdateMacConfig(NetInterface *interface) { //10BASE-T or 100BASE-TX operation mode? if(interface->linkSpeed == NIC_LINK_SPEED_100MBPS) { ETHERC.ECMR.BIT.RTM = 1; } else { ETHERC.ECMR.BIT.RTM = 0; } //Half-duplex or full-duplex mode? if(interface->duplexMode == NIC_FULL_DUPLEX_MODE) { ETHERC.ECMR.BIT.DM = 1; } else { ETHERC.ECMR.BIT.DM = 0; } //Successful processing return NO_ERROR; } /** * @brief Write PHY register * @param[in] opcode Access type (2 bits) * @param[in] phyAddr PHY address (5 bits) * @param[in] regAddr Register address (5 bits) * @param[in] data Register value **/ void rx63nEthWritePhyReg(uint8_t opcode, uint8_t phyAddr, uint8_t regAddr, uint16_t data) { //Synchronization pattern rx63nEthWriteSmi(SMI_SYNC, 32); //Start of frame rx63nEthWriteSmi(SMI_START, 2); //Set up a write operation rx63nEthWriteSmi(opcode, 2); //Write PHY address rx63nEthWriteSmi(phyAddr, 5); //Write register address rx63nEthWriteSmi(regAddr, 5); //Turnaround rx63nEthWriteSmi(SMI_TA, 2); //Write register value rx63nEthWriteSmi(data, 16); //Release MDIO rx63nEthReadSmi(1); } /** * @brief Read PHY register * @param[in] opcode Access type (2 bits) * @param[in] phyAddr PHY address (5 bits) * @param[in] regAddr Register address (5 bits) * @return Register value **/ uint16_t rx63nEthReadPhyReg(uint8_t opcode, uint8_t phyAddr, uint8_t regAddr) { uint16_t data; //Synchronization pattern rx63nEthWriteSmi(SMI_SYNC, 32); //Start of frame rx63nEthWriteSmi(SMI_START, 2); //Set up a read operation rx63nEthWriteSmi(opcode, 2); //Write PHY address rx63nEthWriteSmi(phyAddr, 5); //Write register address rx63nEthWriteSmi(regAddr, 5); //Turnaround to avoid contention rx63nEthReadSmi(1); //Read register value data = rx63nEthReadSmi(16); //Force the PHY to release the MDIO pin rx63nEthReadSmi(1); //Return PHY register contents return data; } /** * @brief SMI write operation * @param[in] data Raw data to be written * @param[in] length Number of bits to be written **/ void rx63nEthWriteSmi(uint32_t data, uint_t length) { //Skip the most significant bits since they are meaningless data <<= 32 - length; //Configure MDIO as an output ETHERC.PIR.BIT.MMD = 1; //Write the specified number of bits while(length--) { //Write MDIO if((data & 0x80000000) != 0) { ETHERC.PIR.BIT.MDO = 1; } else { ETHERC.PIR.BIT.MDO = 0; } //Assert MDC usleep(1); ETHERC.PIR.BIT.MDC = 1; //Deassert MDC usleep(1); ETHERC.PIR.BIT.MDC = 0; //Rotate data data <<= 1; } } /** * @brief SMI read operation * @param[in] length Number of bits to be read * @return Data resulting from the MDIO read operation **/ uint32_t rx63nEthReadSmi(uint_t length) { uint32_t data = 0; //Configure MDIO as an input ETHERC.PIR.BIT.MMD = 0; //Read the specified number of bits while(length--) { //Rotate data data <<= 1; //Assert MDC ETHERC.PIR.BIT.MDC = 1; usleep(1); //Deassert MDC ETHERC.PIR.BIT.MDC = 0; usleep(1); //Check MDIO state if(ETHERC.PIR.BIT.MDI) { data |= 0x01; } } //Return the received data return data; }
void rx63nEthInitGpio(NetInterface *interface) { //Unlock MPC registers MPC.PWPR.BIT.B0WI = 0; MPC.PWPR.BIT.PFSWE = 1; #if defined(USE_RDK_RX63N) //Select RMII interface mode MPC.PFENET.BIT.PHYMODE = 0; //Configure ET_MDIO (PA3) PORTA.PMR.BIT.B3 = 1; MPC.PA3PFS.BYTE = 0x11; //Configure ET_MDC (PA4) PORTA.PMR.BIT.B4 = 1; MPC.PA4PFS.BYTE = 0x11; //Configure ET_LINKSTA (PA5) PORTA.PMR.BIT.B5 = 1; MPC.PA5PFS.BYTE = 0x11; //Configure RMII_RXD1 (PB0) PORTB.PMR.BIT.B0 = 1; MPC.PB0PFS.BYTE = 0x12; //Configure RMII_RXD0 (PB1) PORTB.PMR.BIT.B1 = 1; MPC.PB1PFS.BYTE = 0x12; //Configure REF50CK (PB2) PORTB.PMR.BIT.B2 = 1; MPC.PB2PFS.BYTE = 0x12; //Configure RMII_RX_ER (PB3) PORTB.PMR.BIT.B3 = 1; MPC.PB3PFS.BYTE = 0x12; //Configure RMII_TXD_EN (PB4) PORTB.PMR.BIT.B4 = 1; MPC.PB4PFS.BYTE = 0x12; //Configure RMII_TXD0 (PB5) PORTB.PMR.BIT.B5 = 1; MPC.PB5PFS.BYTE = 0x12; //Configure RMII_TXD1 (PB6) PORTB.PMR.BIT.B6 = 1; MPC.PB6PFS.BYTE = 0x12; //Configure RMII_CRS_DV (PB7) PORTB.PMR.BIT.B7 = 1; MPC.PB7PFS.BYTE = 0x12; #elif defined(USE_RSK_RX63N) //Select MII interface mode MPC.PFENET.BIT.PHYMODE = 1; //Configure ET_MDIO (P71) PORT7.PMR.BIT.B1 = 1; MPC.P71PFS.BYTE = 0x11; //Configure ET_MDC (P72) PORT7.PMR.BIT.B2 = 1; MPC.P72PFS.BYTE = 0x11; //Configure ET_ERXD1 (P74) PORT7.PMR.BIT.B4 = 1; MPC.P74PFS.BYTE = 0x11; //Configure ET_ERXD0 P75) PORT7.PMR.BIT.B5 = 1; MPC.P75PFS.BYTE = 0x11; //Configure ET_RX_CLK (P76) PORT7.PMR.BIT.B6 = 1; MPC.P76PFS.BYTE = 0x11; //Configure ET_RX_ER (P77) PORT7.PMR.BIT.B7 = 1; MPC.P77PFS.BYTE = 0x11; //Configure ET_TX_EN (P80) PORT8.PMR.BIT.B0 = 1; MPC.P80PFS.BYTE = 0x11; //Configure ET_ETXD0 (P81) PORT8.PMR.BIT.B1 = 1; MPC.P81PFS.BYTE = 0x11; //Configure ET_ETXD1 (P82) PORT8.PMR.BIT.B2 = 1; MPC.P82PFS.BYTE = 0x11; //Configure ET_CRS (P83) PORT8.PMR.BIT.B3 = 1; MPC.P83PFS.BYTE = 0x11; //Configure ET_ERXD3 (PC0) PORTC.PMR.BIT.B0 = 1; MPC.PC0PFS.BYTE = 0x11; //Configure ET_ERXD2 (PC1) PORTC.PMR.BIT.B1 = 1; MPC.PC1PFS.BYTE = 0x11; //Configure ET_RX_DV (PC2) PORTC.PMR.BIT.B2 = 1; MPC.PC2PFS.BYTE = 0x11; //Configure ET_TX_ER (PC3) PORTC.PMR.BIT.B3 = 1; MPC.PC3PFS.BYTE = 0x11; //Configure ET_TX_CLK (PC4) PORTC.PMR.BIT.B4 = 1; MPC.PC4PFS.BYTE = 0x11; //Configure ET_ETXD2 (PC5) PORTC.PMR.BIT.B5 = 1; MPC.PC5PFS.BYTE = 0x11; //Configure ET_ETXD3 (PC6) PORTC.PMR.BIT.B6 = 1; MPC.PC6PFS.BYTE = 0x11; //Configure ET_COL (PC7) PORTC.PMR.BIT.B7 = 1; MPC.PC7PFS.BYTE = 0x11; #endif //Lock MPC registers MPC.PWPR.BIT.PFSWE = 0; MPC.PWPR.BIT.B0WI = 0; }
void rx63nEthInitGpio(NetInterface *interface) { //Unlock MPC registers MPC.PWPR.BIT.B0WI = 0; MPC.PWPR.BIT.PFSWE = 1; #if defined(USE_RDK_RX63N) //Select RMII interface mode MPC.PFENET.BIT.PHYMODE = 0; //Configure ET_MDIO (PA3) PORTA.PMR.BIT.B3 = 1; MPC.PA3PFS.BYTE = 0x11; //Configure ET_MDC (PA4) PORTA.PMR.BIT.B4 = 1; MPC.PA4PFS.BYTE = 0x11; //Configure ET_LINKSTA (PA5) PORTA.PMR.BIT.B5 = 1; MPC.PA5PFS.BYTE = 0x11; //Configure RMII_RXD1 (PB0) PORTB.PMR.BIT.B0 = 1; MPC.PB0PFS.BYTE = 0x12; //Configure RMII_RXD0 (PB1) PORTB.PMR.BIT.B1 = 1; MPC.PB1PFS.BYTE = 0x12; //Configure REF50CK (PB2) PORTB.PMR.BIT.B2 = 1; MPC.PB2PFS.BYTE = 0x12; //Configure RMII_RX_ER (PB3) PORTB.PMR.BIT.B3 = 1; MPC.PB3PFS.BYTE = 0x12; //Configure RMII_TXD_EN (PB4) PORTB.PMR.BIT.B4 = 1; MPC.PB4PFS.BYTE = 0x12; //Configure RMII_TXD0 (PB5) PORTB.PMR.BIT.B5 = 1; MPC.PB5PFS.BYTE = 0x12; //Configure RMII_TXD1 (PB6) PORTB.PMR.BIT.B6 = 1; MPC.PB6PFS.BYTE = 0x12; //Configure RMII_CRS_DV (PB7) PORTB.PMR.BIT.B7 = 1; MPC.PB7PFS.BYTE = 0x12; #elif defined(USE_RSK_RX63N) || defined(USE_RSK_RX63N_256K) //Select MII interface mode MPC.PFENET.BIT.PHYMODE = 1; //Configure ET_MDIO (P71) PORT7.PMR.BIT.B1 = 1; MPC.P71PFS.BYTE = 0x11; //Configure ET_MDC (P72) PORT7.PMR.BIT.B2 = 1; MPC.P72PFS.BYTE = 0x11; //Configure ET_ERXD1 (P74) PORT7.PMR.BIT.B4 = 1; MPC.P74PFS.BYTE = 0x11; //Configure ET_ERXD0 P75) PORT7.PMR.BIT.B5 = 1; MPC.P75PFS.BYTE = 0x11; //Configure ET_RX_CLK (P76) PORT7.PMR.BIT.B6 = 1; MPC.P76PFS.BYTE = 0x11; //Configure ET_RX_ER (P77) PORT7.PMR.BIT.B7 = 1; MPC.P77PFS.BYTE = 0x11; //Configure ET_TX_EN (P80) PORT8.PMR.BIT.B0 = 1; MPC.P80PFS.BYTE = 0x11; //Configure ET_ETXD0 (P81) PORT8.PMR.BIT.B1 = 1; MPC.P81PFS.BYTE = 0x11; //Configure ET_ETXD1 (P82) PORT8.PMR.BIT.B2 = 1; MPC.P82PFS.BYTE = 0x11; //Configure ET_CRS (P83) PORT8.PMR.BIT.B3 = 1; MPC.P83PFS.BYTE = 0x11; //Configure ET_ERXD3 (PC0) PORTC.PMR.BIT.B0 = 1; MPC.PC0PFS.BYTE = 0x11; //Configure ET_ERXD2 (PC1) PORTC.PMR.BIT.B1 = 1; MPC.PC1PFS.BYTE = 0x11; //Configure ET_RX_DV (PC2) PORTC.PMR.BIT.B2 = 1; MPC.PC2PFS.BYTE = 0x11; //Configure ET_TX_ER (PC3) PORTC.PMR.BIT.B3 = 1; MPC.PC3PFS.BYTE = 0x11; //Configure ET_TX_CLK (PC4) PORTC.PMR.BIT.B4 = 1; MPC.PC4PFS.BYTE = 0x11; //Configure ET_ETXD2 (PC5) PORTC.PMR.BIT.B5 = 1; MPC.PC5PFS.BYTE = 0x11; //Configure ET_ETXD3 (PC6) PORTC.PMR.BIT.B6 = 1; MPC.PC6PFS.BYTE = 0x11; //Configure ET_COL (PC7) PORTC.PMR.BIT.B7 = 1; MPC.PC7PFS.BYTE = 0x11; #endif //Lock MPC registers MPC.PWPR.BIT.PFSWE = 0; MPC.PWPR.BIT.B0WI = 0; }
{'added': [(9, ' * Copyright (C) 2010-2021 Oryx Embedded SARL. All rights reserved.'), (28, ' * @version 2.0.2'), (227, '//RDK-RX63N, RSK-RX63N or RSK-RX63N-256K evaluation board?'), (228, '#if defined(USE_RDK_RX63N) || defined(USE_RSK_RX63N) || \\'), (229, ' defined(USE_RSK_RX63N_256K)'), (290, '#elif defined(USE_RSK_RX63N) || defined(USE_RSK_RX63N_256K)')], 'deleted': [(9, ' * Copyright (C) 2010-2020 Oryx Embedded SARL. All rights reserved.'), (28, ' * @version 2.0.0'), (227, '//RDK RX63N or RSK RX63N evaluation board?'), (228, '#if defined(USE_RDK_RX63N) || defined(USE_RSK_RX63N)'), (289, '#elif defined(USE_RSK_RX63N)')]}
6
5
454
2,672
https://github.com/Oryx-Embedded/CycloneTCP
CVE-2021-26788
['CWE-20']
cervlet.c
do_viewlog
/* * Copyright (C) Tildeslash Ltd. All rights reserved. * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License version 3. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * * In addition, as a special exception, the copyright holders give * permission to link the code of portions of this program with the * OpenSSL library under certain conditions as described in each * individual source file, and distribute linked combinations * including the two. * * You must obey the GNU Affero General Public License in all respects * for all of the code used other than OpenSSL. */ #include "config.h" #ifdef HAVE_STDIO_H #include <stdio.h> #endif #ifdef HAVE_STDLIB_H #include <stdlib.h> #endif #ifdef HAVE_ERRNO_H #include <errno.h> #endif #ifdef HAVE_SYS_TYPES_H #include <sys/types.h> #endif #ifdef HAVE_SYS_SOCKET_H #include <sys/socket.h> #endif #ifdef HAVE_STRING_H #include <string.h> #endif #ifdef HAVE_STRINGS_H #include <strings.h> #endif #ifdef HAVE_UNISTD_H #include <unistd.h> #endif #ifdef HAVE_SYS_STAT_H #include <sys/stat.h> #endif #ifdef HAVE_SYS_TIME_H #include <sys/time.h> #endif #ifdef HAVE_CTYPE_H #include <ctype.h> #endif // libmonit #include "system/Time.h" #include "util/Fmt.h" #include "util/List.h" #include "monit.h" #include "cervlet.h" #include "engine.h" #include "processor.h" #include "base64.h" #include "event.h" #include "alert.h" #include "ProcessTree.h" #include "device.h" #include "protocol.h" #include "Color.h" #include "Box.h" #define ACTION(c) ! strncasecmp(req->url, c, sizeof(c)) /* URL Commands supported */ #define HOME "/" #define TEST "/_monit" #define ABOUT "/_about" #define PING "/_ping" #define GETID "/_getid" #define STATUS "/_status" #define STATUS2 "/_status2" #define SUMMARY "/_summary" #define REPORT "/_report" #define RUNTIME "/_runtime" #define VIEWLOG "/_viewlog" #define DOACTION "/_doaction" #define FAVICON "/favicon.ico" typedef enum { TXT = 0, HTML } __attribute__((__packed__)) Output_Type; /* Private prototypes */ static boolean_t is_readonly(HttpRequest); static void printFavicon(HttpResponse); static void doGet(HttpRequest, HttpResponse); static void doPost(HttpRequest, HttpResponse); static void do_head(HttpResponse res, const char *path, const char *name, int refresh); static void do_foot(HttpResponse res); static void do_home(HttpResponse); static void do_home_system(HttpResponse); static void do_home_filesystem(HttpResponse); static void do_home_directory(HttpResponse); static void do_home_file(HttpResponse); static void do_home_fifo(HttpResponse); static void do_home_net(HttpResponse); static void do_home_process(HttpResponse); static void do_home_program(HttpResponse); static void do_home_host(HttpResponse); static void do_about(HttpResponse); static void do_ping(HttpResponse); static void do_getid(HttpResponse); static void do_runtime(HttpRequest, HttpResponse); static void do_viewlog(HttpRequest, HttpResponse); static void handle_service(HttpRequest, HttpResponse); static void handle_service_action(HttpRequest, HttpResponse); static void handle_doaction(HttpRequest, HttpResponse); static void handle_runtime(HttpRequest, HttpResponse); static void handle_runtime_action(HttpRequest, HttpResponse); static void is_monit_running(HttpResponse); static void do_service(HttpRequest, HttpResponse, Service_T); static void print_alerts(HttpResponse, Mail_T); static void print_buttons(HttpRequest, HttpResponse, Service_T); static void print_service_rules_timeout(HttpResponse, Service_T); static void print_service_rules_nonexistence(HttpResponse, Service_T); static void print_service_rules_existence(HttpResponse, Service_T); static void print_service_rules_port(HttpResponse, Service_T); static void print_service_rules_socket(HttpResponse, Service_T); static void print_service_rules_icmp(HttpResponse, Service_T); static void print_service_rules_perm(HttpResponse, Service_T); static void print_service_rules_uid(HttpResponse, Service_T); static void print_service_rules_euid(HttpResponse, Service_T); static void print_service_rules_gid(HttpResponse, Service_T); static void print_service_rules_timestamp(HttpResponse, Service_T); static void print_service_rules_fsflags(HttpResponse, Service_T); static void print_service_rules_filesystem(HttpResponse, Service_T); static void print_service_rules_size(HttpResponse, Service_T); static void print_service_rules_linkstatus(HttpResponse, Service_T); static void print_service_rules_linkspeed(HttpResponse, Service_T); static void print_service_rules_linksaturation(HttpResponse, Service_T); static void print_service_rules_uploadbytes(HttpResponse, Service_T); static void print_service_rules_uploadpackets(HttpResponse, Service_T); static void print_service_rules_downloadbytes(HttpResponse, Service_T); static void print_service_rules_downloadpackets(HttpResponse, Service_T); static void print_service_rules_uptime(HttpResponse, Service_T); static void print_service_rules_content(HttpResponse, Service_T); static void print_service_rules_checksum(HttpResponse, Service_T); static void print_service_rules_pid(HttpResponse, Service_T); static void print_service_rules_ppid(HttpResponse, Service_T); static void print_service_rules_program(HttpResponse, Service_T); static void print_service_rules_resource(HttpResponse, Service_T); static void print_service_rules_secattr(HttpResponse, Service_T); static void print_status(HttpRequest, HttpResponse, int); static void print_summary(HttpRequest, HttpResponse); static void _printReport(HttpRequest req, HttpResponse res); static void status_service_txt(Service_T, HttpResponse); static char *get_monitoring_status(Output_Type, Service_T s, char *, int); static char *get_service_status(Output_Type, Service_T, char *, int); /** * Implementation of doGet and doPost routines used by the cervlet * processor module. This particilary cervlet will provide * information about the monit deamon and programs monitored by * monit. * * @file */ /* ------------------------------------------------------------------ Public */ /** * Callback hook to the Processor module for registering this modules * doGet and doPost methods. */ void init_service() { add_Impl(doGet, doPost); } /* ----------------------------------------------------------------- Private */ static char *_getUptime(time_t delta, char s[256]) { static int min = 60; static int hour = 3600; static int day = 86400; long rest_d; long rest_h; long rest_m; char *p = s; if (delta < 0) { *s = 0; } else { if ((rest_d = delta / day) > 0) { p += snprintf(p, 256 - (p - s), "%ldd ", rest_d); delta -= rest_d * day; } if ((rest_h = delta / hour) > 0 || (rest_d > 0)) { p += snprintf(p, 256 - (p - s), "%ldh ", rest_h); delta -= rest_h * hour; } rest_m = delta / min; snprintf(p, 256 - (p - s), "%ldm", rest_m); } return s; } static void _formatStatus(const char *name, Event_Type errorType, Output_Type type, HttpResponse res, Service_T s, boolean_t validValue, const char *value, ...) { if (type == HTML) { StringBuffer_append(res->outputbuffer, "<tr><td>%c%s</td>", toupper(name[0]), name + 1); } else { StringBuffer_append(res->outputbuffer, " %-28s ", name); } if (! validValue) { StringBuffer_append(res->outputbuffer, type == HTML ? "<td class='gray-text'>-</td>" : COLOR_DARKGRAY "-" COLOR_RESET); } else { va_list ap; va_start(ap, value); char *_value = Str_vcat(value, ap); va_end(ap); if (errorType != Event_Null && s->error & errorType) StringBuffer_append(res->outputbuffer, type == HTML ? "<td class='red-text'>" : COLOR_LIGHTRED); else StringBuffer_append(res->outputbuffer, type == HTML ? "<td>" : COLOR_DEFAULT); if (type == HTML) { // If the output contains multiple line, wrap use <pre>, otherwise keep as is boolean_t multiline = strrchr(_value, '\n') ? true : false; if (multiline) StringBuffer_append(res->outputbuffer, "<pre>"); escapeHTML(res->outputbuffer, _value); StringBuffer_append(res->outputbuffer, "%s</td>", multiline ? "</pre>" : ""); } else { int column = 0; for (int i = 0; _value[i]; i++) { if (_value[i] == '\r') { // Discard CR continue; } else if (_value[i] == '\n') { // Indent 2nd+ line if (_value[i + 1]) StringBuffer_append(res->outputbuffer, "\n "); column = 0; continue; } else if (column <= 200) { StringBuffer_append(res->outputbuffer, "%c", _value[i]); column++; } } StringBuffer_append(res->outputbuffer, COLOR_RESET); } FREE(_value); } StringBuffer_append(res->outputbuffer, type == HTML ? "</tr>" : "\n"); } static void _printIOStatistics(Output_Type type, HttpResponse res, Service_T s, IOStatistics_T io, const char *header, const char *name) { boolean_t hasOps = Statistics_initialized(&(io->operations)); boolean_t hasBytes = Statistics_initialized(&(io->bytes)); if (hasOps && hasBytes) { double deltaBytesPerSec = Statistics_deltaNormalize(&(io->bytes)); double deltaOpsPerSec = Statistics_deltaNormalize(&(io->operations)); _formatStatus(header, Event_Resource, type, res, s, true, "%s/s [%s total], %.1f %ss/s [%"PRIu64" %ss total]", Fmt_bytes2str(deltaBytesPerSec, (char[10]){}), Fmt_bytes2str(Statistics_raw(&(io->bytes)), (char[10]){}), deltaOpsPerSec, name, Statistics_raw(&(io->operations)), name); } else if (hasOps) { double deltaOpsPerSec = Statistics_deltaNormalize(&(io->operations)); _formatStatus(header, Event_Resource, type, res, s, true, "%.1f %ss/s [%"PRIu64" %ss total]", deltaOpsPerSec, name, Statistics_raw(&(io->operations)), name); } else if (hasBytes) { double deltaBytesPerSec = Statistics_deltaNormalize(&(io->bytes)); _formatStatus(header, Event_Resource, type, res, s, true, "%s/s [%s total]", Fmt_bytes2str(deltaBytesPerSec, (char[10]){}), Fmt_bytes2str(Statistics_raw(&(io->bytes)), (char[10]){})); } } static void _printStatus(Output_Type type, HttpResponse res, Service_T s) { if (Util_hasServiceStatus(s)) { switch (s->type) { case Service_System: _formatStatus("load average", Event_Resource, type, res, s, true, "[%.2f] [%.2f] [%.2f]", systeminfo.loadavg[0], systeminfo.loadavg[1], systeminfo.loadavg[2]); _formatStatus("cpu", Event_Resource, type, res, s, true, "%.1f%%us %.1f%%sy" #ifdef HAVE_CPU_WAIT " %.1f%%wa" #endif , systeminfo.cpu.usage.user > 0. ? systeminfo.cpu.usage.user : 0., systeminfo.cpu.usage.system > 0. ? systeminfo.cpu.usage.system : 0. #ifdef HAVE_CPU_WAIT , systeminfo.cpu.usage.wait > 0. ? systeminfo.cpu.usage.wait : 0. #endif ); _formatStatus("memory usage", Event_Resource, type, res, s, true, "%s [%.1f%%]", Fmt_bytes2str(systeminfo.memory.usage.bytes, (char[10]){}), systeminfo.memory.usage.percent); _formatStatus("swap usage", Event_Resource, type, res, s, true, "%s [%.1f%%]", Fmt_bytes2str(systeminfo.swap.usage.bytes, (char[10]){}), systeminfo.swap.usage.percent); _formatStatus("uptime", Event_Uptime, type, res, s, systeminfo.booted > 0, "%s", _getUptime(Time_now() - systeminfo.booted, (char[256]){})); _formatStatus("boot time", Event_Null, type, res, s, true, "%s", Time_string(systeminfo.booted, (char[32]){})); break; case Service_File: _formatStatus("permission", Event_Permission, type, res, s, s->inf.file->mode >= 0, "%o", s->inf.file->mode & 07777); _formatStatus("uid", Event_Uid, type, res, s, s->inf.file->uid >= 0, "%d", s->inf.file->uid); _formatStatus("gid", Event_Gid, type, res, s, s->inf.file->gid >= 0, "%d", s->inf.file->gid); _formatStatus("size", Event_Size, type, res, s, s->inf.file->size >= 0, "%s", Fmt_bytes2str(s->inf.file->size, (char[10]){})); _formatStatus("access timestamp", Event_Timestamp, type, res, s, s->inf.file->timestamp.access > 0, "%s", Time_string(s->inf.file->timestamp.access, (char[32]){})); _formatStatus("change timestamp", Event_Timestamp, type, res, s, s->inf.file->timestamp.change > 0, "%s", Time_string(s->inf.file->timestamp.change, (char[32]){})); _formatStatus("modify timestamp", Event_Timestamp, type, res, s, s->inf.file->timestamp.modify > 0, "%s", Time_string(s->inf.file->timestamp.modify, (char[32]){})); if (s->matchlist) _formatStatus("content match", Event_Content, type, res, s, true, "%s", (s->error & Event_Content) ? "yes" : "no"); if (s->checksum) _formatStatus("checksum", Event_Checksum, type, res, s, *s->inf.file->cs_sum, "%s (%s)", s->inf.file->cs_sum, checksumnames[s->checksum->type]); break; case Service_Directory: _formatStatus("permission", Event_Permission, type, res, s, s->inf.directory->mode >= 0, "%o", s->inf.directory->mode & 07777); _formatStatus("uid", Event_Uid, type, res, s, s->inf.directory->uid >= 0, "%d", s->inf.directory->uid); _formatStatus("gid", Event_Gid, type, res, s, s->inf.directory->gid >= 0, "%d", s->inf.directory->gid); _formatStatus("access timestamp", Event_Timestamp, type, res, s, s->inf.directory->timestamp.access > 0, "%s", Time_string(s->inf.directory->timestamp.access, (char[32]){})); _formatStatus("change timestamp", Event_Timestamp, type, res, s, s->inf.directory->timestamp.change > 0, "%s", Time_string(s->inf.directory->timestamp.change, (char[32]){})); _formatStatus("modify timestamp", Event_Timestamp, type, res, s, s->inf.directory->timestamp.modify > 0, "%s", Time_string(s->inf.directory->timestamp.modify, (char[32]){})); break; case Service_Fifo: _formatStatus("permission", Event_Permission, type, res, s, s->inf.fifo->mode >= 0, "%o", s->inf.fifo->mode & 07777); _formatStatus("uid", Event_Uid, type, res, s, s->inf.fifo->uid >= 0, "%d", s->inf.fifo->uid); _formatStatus("gid", Event_Gid, type, res, s, s->inf.fifo->gid >= 0, "%d", s->inf.fifo->gid); _formatStatus("access timestamp", Event_Timestamp, type, res, s, s->inf.fifo->timestamp.access > 0, "%s", Time_string(s->inf.fifo->timestamp.access, (char[32]){})); _formatStatus("change timestamp", Event_Timestamp, type, res, s, s->inf.fifo->timestamp.change > 0, "%s", Time_string(s->inf.fifo->timestamp.change, (char[32]){})); _formatStatus("modify timestamp", Event_Timestamp, type, res, s, s->inf.fifo->timestamp.modify > 0, "%s", Time_string(s->inf.fifo->timestamp.modify, (char[32]){})); break; case Service_Net: { long long speed = Link_getSpeed(s->inf.net->stats); long long ibytes = Link_getBytesInPerSecond(s->inf.net->stats); long long obytes = Link_getBytesOutPerSecond(s->inf.net->stats); _formatStatus("link", Event_Link, type, res, s, Link_getState(s->inf.net->stats) == 1, "%d errors", Link_getErrorsInPerSecond(s->inf.net->stats) + Link_getErrorsOutPerSecond(s->inf.net->stats)); if (speed > 0) { _formatStatus("capacity", Event_Speed, type, res, s, Link_getState(s->inf.net->stats) == 1, "%.0lf Mb/s %s-duplex", (double)speed / 1000000., Link_getDuplex(s->inf.net->stats) == 1 ? "full" : "half"); _formatStatus("download bytes", Event_ByteIn, type, res, s, Link_getState(s->inf.net->stats) == 1, "%s/s (%.1f%% link saturation)", Fmt_bytes2str(ibytes, (char[10]){}), 100. * ibytes * 8 / (double)speed); _formatStatus("upload bytes", Event_ByteOut, type, res, s, Link_getState(s->inf.net->stats) == 1, "%s/s (%.1f%% link saturation)", Fmt_bytes2str(obytes, (char[10]){}), 100. * obytes * 8 / (double)speed); } else { _formatStatus("download bytes", Event_ByteIn, type, res, s, Link_getState(s->inf.net->stats) == 1, "%s/s", Fmt_bytes2str(ibytes, (char[10]){})); _formatStatus("upload bytes", Event_ByteOut, type, res, s, Link_getState(s->inf.net->stats) == 1, "%s/s", Fmt_bytes2str(obytes, (char[10]){})); } _formatStatus("download packets", Event_PacketIn, type, res, s, Link_getState(s->inf.net->stats) == 1, "%lld per second", Link_getPacketsInPerSecond(s->inf.net->stats)); _formatStatus("upload packets", Event_PacketOut, type, res, s, Link_getState(s->inf.net->stats) == 1, "%lld per second", Link_getPacketsOutPerSecond(s->inf.net->stats)); } break; case Service_Filesystem: _formatStatus("filesystem type", Event_Null, type, res, s, *(s->inf.filesystem->object.type), "%s", s->inf.filesystem->object.type); _formatStatus("filesystem flags", Event_FsFlag, type, res, s, *(s->inf.filesystem->flags), "%s", s->inf.filesystem->flags); _formatStatus("permission", Event_Permission, type, res, s, s->inf.filesystem->mode >= 0, "%o", s->inf.filesystem->mode & 07777); _formatStatus("uid", Event_Uid, type, res, s, s->inf.filesystem->uid >= 0, "%d", s->inf.filesystem->uid); _formatStatus("gid", Event_Gid, type, res, s, s->inf.filesystem->gid >= 0, "%d", s->inf.filesystem->gid); _formatStatus("block size", Event_Null, type, res, s, true, "%s", Fmt_bytes2str(s->inf.filesystem->f_bsize, (char[10]){})); _formatStatus("space total", Event_Null, type, res, s, true, "%s (of which %.1f%% is reserved for root user)", s->inf.filesystem->f_bsize > 0 ? Fmt_bytes2str(s->inf.filesystem->f_blocks * s->inf.filesystem->f_bsize, (char[10]){}) : "0 MB", s->inf.filesystem->f_blocks > 0 ? ((float)100 * (float)(s->inf.filesystem->f_blocksfreetotal - s->inf.filesystem->f_blocksfree) / (float)s->inf.filesystem->f_blocks) : 0); _formatStatus("space free for non superuser", Event_Null, type, res, s, true, "%s [%.1f%%]", s->inf.filesystem->f_bsize > 0 ? Fmt_bytes2str(s->inf.filesystem->f_blocksfree * s->inf.filesystem->f_bsize, (char[10]){}) : "0 MB", s->inf.filesystem->f_blocks > 0 ? ((float)100 * (float)s->inf.filesystem->f_blocksfree / (float)s->inf.filesystem->f_blocks) : 0); _formatStatus("space free total", Event_Resource, type, res, s, true, "%s [%.1f%%]", s->inf.filesystem->f_bsize > 0 ? Fmt_bytes2str(s->inf.filesystem->f_blocksfreetotal * s->inf.filesystem->f_bsize, (char[10]){}) : "0 MB", s->inf.filesystem->f_blocks > 0 ? ((float)100 * (float)s->inf.filesystem->f_blocksfreetotal / (float)s->inf.filesystem->f_blocks) : 0); if (s->inf.filesystem->f_files > 0) { _formatStatus("inodes total", Event_Null, type, res, s, true, "%lld", s->inf.filesystem->f_files); _formatStatus("inodes free", Event_Resource, type, res, s, true, "%lld [%.1f%%]", s->inf.filesystem->f_filesfree, (float)100 * (float)s->inf.filesystem->f_filesfree / (float)s->inf.filesystem->f_files); } _printIOStatistics(type, res, s, &(s->inf.filesystem->read), "read", "read"); _printIOStatistics(type, res, s, &(s->inf.filesystem->write), "write", "write"); boolean_t hasReadTime = Statistics_initialized(&(s->inf.filesystem->time.read)); boolean_t hasWriteTime = Statistics_initialized(&(s->inf.filesystem->time.write)); boolean_t hasWaitTime = Statistics_initialized(&(s->inf.filesystem->time.wait)); boolean_t hasRunTime = Statistics_initialized(&(s->inf.filesystem->time.run)); double deltaOperations = Statistics_delta(&(s->inf.filesystem->read.operations)) + Statistics_delta(&(s->inf.filesystem->write.operations)); if (hasReadTime && hasWriteTime) { double readTime = deltaOperations > 0. ? Statistics_deltaNormalize(&(s->inf.filesystem->time.read)) / deltaOperations : 0.; double writeTime = deltaOperations > 0. ? Statistics_deltaNormalize(&(s->inf.filesystem->time.write)) / deltaOperations : 0.; _formatStatus("service time", Event_Null, type, res, s, true, "%.3fms/operation (of which read %.3fms, write %.3fms)", readTime + writeTime, readTime, writeTime); } else if (hasWaitTime && hasRunTime) { double waitTime = deltaOperations > 0. ? Statistics_deltaNormalize(&(s->inf.filesystem->time.wait)) / deltaOperations : 0.; double runTime = deltaOperations > 0. ? Statistics_deltaNormalize(&(s->inf.filesystem->time.run)) / deltaOperations : 0.; _formatStatus("service time", Event_Null, type, res, s, true, "%.3fms/operation (of which queue %.3fms, active %.3fms)", waitTime + runTime, waitTime, runTime); } else if (hasWaitTime) { double waitTime = deltaOperations > 0. ? Statistics_deltaNormalize(&(s->inf.filesystem->time.wait)) / deltaOperations : 0.; _formatStatus("service time", Event_Null, type, res, s, true, "%.3fms/operation", waitTime); } else if (hasRunTime) { double runTime = deltaOperations > 0. ? Statistics_deltaNormalize(&(s->inf.filesystem->time.run)) / deltaOperations : 0.; _formatStatus("service time", Event_Null, type, res, s, true, "%.3fms/operation", runTime); } break; case Service_Process: _formatStatus("pid", Event_Pid, type, res, s, s->inf.process->pid >= 0, "%d", s->inf.process->pid); _formatStatus("parent pid", Event_PPid, type, res, s, s->inf.process->ppid >= 0, "%d", s->inf.process->ppid); _formatStatus("uid", Event_Uid, type, res, s, s->inf.process->uid >= 0, "%d", s->inf.process->uid); _formatStatus("effective uid", Event_Uid, type, res, s, s->inf.process->euid >= 0, "%d", s->inf.process->euid); _formatStatus("gid", Event_Gid, type, res, s, s->inf.process->gid >= 0, "%d", s->inf.process->gid); _formatStatus("uptime", Event_Uptime, type, res, s, s->inf.process->uptime >= 0, "%s", _getUptime(s->inf.process->uptime, (char[256]){})); if (Run.flags & Run_ProcessEngineEnabled) { _formatStatus("threads", Event_Resource, type, res, s, s->inf.process->threads >= 0, "%d", s->inf.process->threads); _formatStatus("children", Event_Resource, type, res, s, s->inf.process->children >= 0, "%d", s->inf.process->children); _formatStatus("cpu", Event_Resource, type, res, s, s->inf.process->cpu_percent >= 0, "%.1f%%", s->inf.process->cpu_percent); _formatStatus("cpu total", Event_Resource, type, res, s, s->inf.process->total_cpu_percent >= 0, "%.1f%%", s->inf.process->total_cpu_percent); _formatStatus("memory", Event_Resource, type, res, s, s->inf.process->mem_percent >= 0, "%.1f%% [%s]", s->inf.process->mem_percent, Fmt_bytes2str(s->inf.process->mem, (char[10]){})); _formatStatus("memory total", Event_Resource, type, res, s, s->inf.process->total_mem_percent >= 0, "%.1f%% [%s]", s->inf.process->total_mem_percent, Fmt_bytes2str(s->inf.process->total_mem, (char[10]){})); #ifdef LINUX _formatStatus("security attribute", Event_Invalid, type, res, s, *(s->inf.process->secattr), "%s", s->inf.process->secattr); #endif } _printIOStatistics(type, res, s, &(s->inf.process->read), "disk read", "read"); _printIOStatistics(type, res, s, &(s->inf.process->write), "disk write", "write"); break; case Service_Program: if (s->program->started) { _formatStatus("last exit value", Event_Status, type, res, s, true, "%d", s->program->exitStatus); _formatStatus("last output", Event_Status, type, res, s, StringBuffer_length(s->program->lastOutput), "%s", StringBuffer_toString(s->program->lastOutput)); } break; default: break; } for (Icmp_T i = s->icmplist; i; i = i->next) { if (i->is_available == Connection_Failed) _formatStatus("ping response time", Event_Icmp, type, res, s, true, "connection failed"); else _formatStatus("ping response time", Event_Null, type, res, s, i->is_available != Connection_Init && i->response >= 0., "%s", Fmt_time2str(i->response, (char[11]){})); } for (Port_T p = s->portlist; p; p = p->next) { if (p->is_available == Connection_Failed) { _formatStatus("port response time", Event_Connection, type, res, s, true, "FAILED to [%s]:%d%s type %s/%s %sprotocol %s", p->hostname, p->target.net.port, Util_portRequestDescription(p), Util_portTypeDescription(p), Util_portIpDescription(p), p->target.net.ssl.options.flags ? "using TLS " : "", p->protocol->name); } else { char buf[STRLEN] = {}; if (p->target.net.ssl.options.flags) snprintf(buf, sizeof(buf), "using TLS (certificate valid for %d days) ", p->target.net.ssl.certificate.validDays); _formatStatus("port response time", p->target.net.ssl.certificate.validDays < p->target.net.ssl.certificate.minimumDays ? Event_Timestamp : Event_Null, type, res, s, p->is_available != Connection_Init, "%s to %s:%d%s type %s/%s %sprotocol %s", Fmt_time2str(p->response, (char[11]){}), p->hostname, p->target.net.port, Util_portRequestDescription(p), Util_portTypeDescription(p), Util_portIpDescription(p), buf, p->protocol->name); } } for (Port_T p = s->socketlist; p; p = p->next) { if (p->is_available == Connection_Failed) { _formatStatus("unix socket response time", Event_Connection, type, res, s, true, "FAILED to %s type %s protocol %s", p->target.unix.pathname, Util_portTypeDescription(p), p->protocol->name); } else { _formatStatus("unix socket response time", Event_Null, type, res, s, p->is_available != Connection_Init, "%s to %s type %s protocol %s", Fmt_time2str(p->response, (char[11]){}), p->target.unix.pathname, Util_portTypeDescription(p), p->protocol->name); } } } _formatStatus("data collected", Event_Null, type, res, s, true, "%s", Time_string(s->collected.tv_sec, (char[32]){})); } /** * Called by the Processor (via the service method) * to handle a POST request. */ static void doPost(HttpRequest req, HttpResponse res) { set_content_type(res, "text/html"); if (ACTION(RUNTIME)) handle_runtime_action(req, res); else if (ACTION(VIEWLOG)) do_viewlog(req, res); else if (ACTION(STATUS)) print_status(req, res, 1); else if (ACTION(STATUS2)) print_status(req, res, 2); else if (ACTION(SUMMARY)) print_summary(req, res); else if (ACTION(REPORT)) _printReport(req, res); else if (ACTION(DOACTION)) handle_doaction(req, res); else handle_service_action(req, res); } /** * Called by the Processor (via the service method) * to handle a GET request. */ static void doGet(HttpRequest req, HttpResponse res) { set_content_type(res, "text/html"); if (ACTION(HOME)) { LOCK(Run.mutex) do_home(res); END_LOCK; } else if (ACTION(RUNTIME)) { handle_runtime(req, res); } else if (ACTION(TEST)) { is_monit_running(res); } else if (ACTION(ABOUT)) { do_about(res); } else if (ACTION(FAVICON)) { printFavicon(res); } else if (ACTION(PING)) { do_ping(res); } else if (ACTION(GETID)) { do_getid(res); } else if (ACTION(STATUS)) { print_status(req, res, 1); } else if (ACTION(STATUS2)) { print_status(req, res, 2); } else if (ACTION(SUMMARY)) { print_summary(req, res); } else if (ACTION(REPORT)) { _printReport(req, res); } else { handle_service(req, res); } } /* ----------------------------------------------------------------- Helpers */ static void is_monit_running(HttpResponse res) { set_status(res, exist_daemon() ? SC_OK : SC_GONE); } static void printFavicon(HttpResponse res) { static size_t l; Socket_T S = res->S; static unsigned char *favicon = NULL; if (! favicon) { favicon = CALLOC(sizeof(unsigned char), strlen(FAVICON_ICO)); l = decode_base64(favicon, FAVICON_ICO); } if (l) { res->is_committed = true; Socket_print(S, "HTTP/1.0 200 OK\r\n"); Socket_print(S, "Content-length: %lu\r\n", (unsigned long)l); Socket_print(S, "Content-Type: image/x-icon\r\n"); Socket_print(S, "Connection: close\r\n\r\n"); if (Socket_write(S, favicon, l) < 0) { LogError("Error sending favicon data -- %s\n", STRERROR); } } } static void do_head(HttpResponse res, const char *path, const char *name, int refresh) { StringBuffer_append(res->outputbuffer, "<!DOCTYPE html>"\ "<html>"\ "<head>"\ "<title>Monit: %s</title> "\ "<style type=\"text/css\"> "\ " html, body {height: 100%%;margin: 0;} "\ " body {background-color: white;font: normal normal normal 16px/20px 'HelveticaNeue', Helvetica, Arial, sans-serif; color:#222;} "\ " h1 {padding:30px 0 10px 0; text-align:center;color:#222;font-size:28px;} "\ " h2 {padding:20px 0 10px 0; text-align:center;color:#555;font-size:22px;} "\ " a:hover {text-decoration: none;} "\ " a {text-decoration: underline;color:#222} "\ " table {border-collapse:collapse; border:0;} "\ " .stripe {background:#EDF5FF} "\ " .rule {background:#ddd} "\ " .red-text {color:#ff0000;} "\ " .green-text {color:#00ff00;} "\ " .gray-text {color:#999999;} "\ " .blue-text {color:#0000ff;} "\ " .yellow-text {color:#ffff00;} "\ " .orange-text {color:#ff8800;} "\ " .short {overflow: hidden; text-overflow: ellipsis; white-space: nowrap; max-width: 350px;}"\ " .column {min-width: 80px;} "\ " .left {text-align:left} "\ " .right {text-align:right} "\ " .center {text-align:center} "\ " #wrap {min-height: 100%%;} "\ " #main {overflow:auto; padding-bottom:50px;} "\ " /*Opera Fix*/body:before {content:\"\";height:100%%;float:left;width:0;margin-top:-32767px;} "\ " #footer {position: relative;margin-top: -50px; height: 50px; clear:both; font-size:11px;color:#777;text-align:center;} "\ " #footer a {color:#333;} #footer a:hover {text-decoration: none;} "\ " #nav {background:#ddd;font:normal normal normal 14px/0px 'HelveticaNeue', Helvetica;} "\ " #nav td {padding:5px 10px;} "\ " #header {margin-bottom:30px;background:#EFF7FF} "\ " #nav, #header {border-bottom:1px solid #ccc;} "\ " #header-row {width:95%%;} "\ " #header-row th {padding:30px 10px 10px 10px;font-size:120%%;} "\ " #header-row td {padding:3px 10px;} "\ " #header-row .first {min-width:200px;width:200px;white-space:nowrap;overflow:hidden;text-overflow:ellipsis;} "\ " #status-table {width:95%%;} "\ " #status-table th {text-align:left;background:#edf5ff;font-weight:normal;} "\ " #status-table th, #status-table td, #status-table tr {border:1px solid #ccc;padding:5px;} "\ " #buttons {font-size:20px; margin:40px 0 20px 0;} "\ " #buttons td {padding-right:50px;} "\ " #buttons input {font-size:18px;padding:5px;} "\ "</style>"\ "<meta HTTP-EQUIV='REFRESH' CONTENT=%d> "\ "<meta HTTP-EQUIV='Expires' Content=0> "\ "<meta HTTP-EQUIV='Pragma' CONTENT='no-cache'> "\ "<meta charset='UTF-8'>" \ "<link rel='shortcut icon' href='favicon.ico'>"\ "</head>"\ "<body><div id='wrap'><div id='main'>" \ "<table id='nav' width='100%%'>"\ " <tr>"\ " <td width='20%%'><a href='.'>Home</a>&nbsp;&gt;&nbsp;<a href='%s'>%s</a></td>"\ " <td width='60%%' style='text-align:center;'>Use <a href='https://mmonit.com/'>M/Monit</a> to manage all your Monit instances</td>"\ " <td width='20%%'><p class='right'><a href='_about'>Monit %s</a></td>"\ " </tr>"\ "</table>"\ "<center>", Run.system->name, refresh, path, name, VERSION); } static void do_foot(HttpResponse res) { StringBuffer_append(res->outputbuffer, "</center></div></div>" "<div id='footer'>" "Copyright &copy; 2001-2018 <a href=\"http://tildeslash.com/\">Tildeslash</a>. All rights reserved. " "<span style='margin-left:5px;'></span>" "<a href=\"http://mmonit.com/monit/\">Monit web site</a> | " "<a href=\"http://mmonit.com/wiki/\">Monit Wiki</a> | " "<a href=\"http://mmonit.com/\">M/Monit</a>" "</div></body></html>"); } static void do_home(HttpResponse res) { do_head(res, "", "", Run.polltime); StringBuffer_append(res->outputbuffer, "<table id='header' width='100%%'>" " <tr>" " <td colspan=2 valign='top' class='left' width='100%%'>" " <h1>Monit Service Manager</h1>" " <p class='center'>Monit is <a href='_runtime'>running</a> on %s and monitoring:</p><br>" " </td>" " </tr>" "</table>", Run.system->name); do_home_system(res); do_home_process(res); do_home_program(res); do_home_filesystem(res); do_home_file(res); do_home_fifo(res); do_home_directory(res); do_home_net(res); do_home_host(res); do_foot(res); } static void do_about(HttpResponse res) { StringBuffer_append(res->outputbuffer, "<html><head><title>about monit</title></head><body bgcolor=white>" "<br><h1><center><a href='http://mmonit.com/monit/'>" "monit " VERSION "</a></center></h1>"); StringBuffer_append(res->outputbuffer, "<ul>" "<li style='padding-bottom:10px;'>Copyright &copy; 2001-2018 <a " "href='http://tildeslash.com/'>Tildeslash Ltd" "</a>. All Rights Reserved.</li></ul>"); StringBuffer_append(res->outputbuffer, "<hr size='1'>"); StringBuffer_append(res->outputbuffer, "<p>This program is free software; you can redistribute it and/or " "modify it under the terms of the GNU Affero General Public License version 3</p>" "<p>This program is distributed in the hope that it will be useful, but " "WITHOUT ANY WARRANTY; without even the implied warranty of " "MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the " "<a href='http://www.gnu.org/licenses/agpl.html'>" "GNU AFFERO GENERAL PUBLIC LICENSE</a> for more details.</p>"); StringBuffer_append(res->outputbuffer, "<center><p style='padding-top:20px;'>[<a href='.'>Back to Monit</a>]</p></body></html>"); } static void do_ping(HttpResponse res) { StringBuffer_append(res->outputbuffer, "pong"); } static void do_getid(HttpResponse res) { StringBuffer_append(res->outputbuffer, "%s", Run.id); } static void do_runtime(HttpRequest req, HttpResponse res) { int pid = exist_daemon(); char buf[STRLEN]; do_head(res, "_runtime", "Runtime", 1000); StringBuffer_append(res->outputbuffer, "<h2>Monit runtime status</h2>"); StringBuffer_append(res->outputbuffer, "<table id='status-table'><tr>" "<th width='40%%'>Parameter</th>" "<th width='60%%'>Value</th></tr>"); StringBuffer_append(res->outputbuffer, "<tr><td>Monit ID</td><td>%s</td></tr>", Run.id); StringBuffer_append(res->outputbuffer, "<tr><td>Host</td><td>%s</td></tr>", Run.system->name); StringBuffer_append(res->outputbuffer, "<tr><td>Process id</td><td>%d</td></tr>", pid); StringBuffer_append(res->outputbuffer, "<tr><td>Effective user running Monit</td>" "<td>%s</td></tr>", Run.Env.user); StringBuffer_append(res->outputbuffer, "<tr><td>Controlfile</td><td>%s</td></tr>", Run.files.control); if (Run.files.log) StringBuffer_append(res->outputbuffer, "<tr><td>Logfile</td><td>%s</td></tr>", Run.files.log); StringBuffer_append(res->outputbuffer, "<tr><td>Pidfile</td><td>%s</td></tr>", Run.files.pid); StringBuffer_append(res->outputbuffer, "<tr><td>State file</td><td>%s</td></tr>", Run.files.state); StringBuffer_append(res->outputbuffer, "<tr><td>Debug</td><td>%s</td></tr>", Run.debug ? "True" : "False"); StringBuffer_append(res->outputbuffer, "<tr><td>Log</td><td>%s</td></tr>", (Run.flags & Run_Log) ? "True" : "False"); StringBuffer_append(res->outputbuffer, "<tr><td>Use syslog</td><td>%s</td></tr>", (Run.flags & Run_UseSyslog) ? "True" : "False"); if (Run.eventlist_dir) { if (Run.eventlist_slots < 0) snprintf(buf, STRLEN, "unlimited"); else snprintf(buf, STRLEN, "%d", Run.eventlist_slots); StringBuffer_append(res->outputbuffer, "<tr><td>Event queue</td>" "<td>base directory %s with %d slots</td></tr>", Run.eventlist_dir, Run.eventlist_slots); } #ifdef HAVE_OPENSSL { const char *options = Ssl_printOptions(&(Run.ssl), (char[STRLEN]){}, STRLEN); if (options && *options) StringBuffer_append(res->outputbuffer, "<tr><td>SSL options</td><td>%s</td></tr>", options); } #endif if (Run.mmonits) { StringBuffer_append(res->outputbuffer, "<tr><td>M/Monit server(s)</td><td>"); for (Mmonit_T c = Run.mmonits; c; c = c->next) { StringBuffer_append(res->outputbuffer, "%s with timeout %s", c->url->url, Fmt_time2str(c->timeout, (char[11]){})); #ifdef HAVE_OPENSSL if (c->ssl.flags) { StringBuffer_append(res->outputbuffer, " using TLS"); const char *options = Ssl_printOptions(&c->ssl, (char[STRLEN]){}, STRLEN); if (options && *options) StringBuffer_append(res->outputbuffer, " with options {%s}", options); if (c->ssl.checksum) StringBuffer_append(res->outputbuffer, " and certificate checksum %s equal to '%s'", checksumnames[c->ssl.checksumType], c->ssl.checksum); } #endif if (Run.flags & Run_MmonitCredentials && c->url->user) StringBuffer_append(res->outputbuffer, " with credentials"); if (c->next) StringBuffer_append(res->outputbuffer, "</td></tr><tr><td>&nbsp;</td><td>"); } StringBuffer_append(res->outputbuffer, "</td></tr>"); } if (Run.mailservers) { StringBuffer_append(res->outputbuffer, "<tr><td>Mail server(s)</td><td>"); for (MailServer_T mta = Run.mailservers; mta; mta = mta->next) { StringBuffer_append(res->outputbuffer, "%s:%d", mta->host, mta->port); #ifdef HAVE_OPENSSL if (mta->ssl.flags) { StringBuffer_append(res->outputbuffer, " using TLS"); const char *options = Ssl_printOptions(&mta->ssl, (char[STRLEN]){}, STRLEN); if (options && *options) StringBuffer_append(res->outputbuffer, " with options {%s}", options); if (mta->ssl.checksum) StringBuffer_append(res->outputbuffer, " and certificate checksum %s equal to '%s'", checksumnames[mta->ssl.checksumType], mta->ssl.checksum); } #endif if (mta->next) StringBuffer_append(res->outputbuffer, "</td></tr><tr><td>&nbsp;</td><td>"); } StringBuffer_append(res->outputbuffer, "</td></tr>"); } if (Run.MailFormat.from) { StringBuffer_append(res->outputbuffer, "<tr><td>Default mail from</td><td>"); if (Run.MailFormat.from->name) StringBuffer_append(res->outputbuffer, "%s &lt;%s&gt;", Run.MailFormat.from->name, Run.MailFormat.from->address); else StringBuffer_append(res->outputbuffer, "%s", Run.MailFormat.from->address); StringBuffer_append(res->outputbuffer, "</td></tr>"); } if (Run.MailFormat.replyto) { StringBuffer_append(res->outputbuffer, "<tr><td>Default mail reply to</td><td>"); if (Run.MailFormat.replyto->name) StringBuffer_append(res->outputbuffer, "%s &lt;%s&gt;", Run.MailFormat.replyto->name, Run.MailFormat.replyto->address); else StringBuffer_append(res->outputbuffer, "%s", Run.MailFormat.replyto->address); StringBuffer_append(res->outputbuffer, "</td></tr>"); } if (Run.MailFormat.subject) StringBuffer_append(res->outputbuffer, "<tr><td>Default mail subject</td><td>%s</td></tr>", Run.MailFormat.subject); if (Run.MailFormat.message) StringBuffer_append(res->outputbuffer, "<tr><td>Default mail message</td><td>%s</td></tr>", Run.MailFormat.message); StringBuffer_append(res->outputbuffer, "<tr><td>Limit for Send/Expect buffer</td><td>%s</td></tr>", Fmt_bytes2str(Run.limits.sendExpectBuffer, buf)); StringBuffer_append(res->outputbuffer, "<tr><td>Limit for file content buffer</td><td>%s</td></tr>", Fmt_bytes2str(Run.limits.fileContentBuffer, buf)); StringBuffer_append(res->outputbuffer, "<tr><td>Limit for HTTP content buffer</td><td>%s</td></tr>", Fmt_bytes2str(Run.limits.httpContentBuffer, buf)); StringBuffer_append(res->outputbuffer, "<tr><td>Limit for program output</td><td>%s</td></tr>", Fmt_bytes2str(Run.limits.programOutput, buf)); StringBuffer_append(res->outputbuffer, "<tr><td>Limit for network timeout</td><td>%s</td></tr>", Fmt_time2str(Run.limits.networkTimeout, (char[11]){})); StringBuffer_append(res->outputbuffer, "<tr><td>Limit for check program timeout</td><td>%s</td></tr>", Fmt_time2str(Run.limits.programTimeout, (char[11]){})); StringBuffer_append(res->outputbuffer, "<tr><td>Limit for service stop timeout</td><td>%s</td></tr>", Fmt_time2str(Run.limits.stopTimeout, (char[11]){})); StringBuffer_append(res->outputbuffer, "<tr><td>Limit for service start timeout</td><td>%s</td></tr>", Fmt_time2str(Run.limits.startTimeout, (char[11]){})); StringBuffer_append(res->outputbuffer, "<tr><td>Limit for service restart timeout</td><td>%s</td></tr>", Fmt_time2str(Run.limits.restartTimeout, (char[11]){})); StringBuffer_append(res->outputbuffer, "<tr><td>On reboot</td><td>%s</td></tr>", onrebootnames[Run.onreboot]); StringBuffer_append(res->outputbuffer, "<tr><td>Poll time</td><td>%d seconds with start delay %d seconds</td></tr>", Run.polltime, Run.startdelay); if (Run.httpd.flags & Httpd_Net) { StringBuffer_append(res->outputbuffer, "<tr><td>httpd bind address</td><td>%s</td></tr>", Run.httpd.socket.net.address ? Run.httpd.socket.net.address : "Any/All"); StringBuffer_append(res->outputbuffer, "<tr><td>httpd portnumber</td><td>%d</td></tr>", Run.httpd.socket.net.port); #ifdef HAVE_OPENSSL const char *options = Ssl_printOptions(&(Run.httpd.socket.net.ssl), (char[STRLEN]){}, STRLEN); if (options && *options) StringBuffer_append(res->outputbuffer, "<tr><td>httpd encryption</td><td>%s</td></tr>", options); #endif } if (Run.httpd.flags & Httpd_Unix) StringBuffer_append(res->outputbuffer, "<tr><td>httpd unix socket</td><td>%s</td></tr>", Run.httpd.socket.unix.path); StringBuffer_append(res->outputbuffer, "<tr><td>httpd signature</td><td>%s</td></tr>", Run.httpd.flags & Httpd_Signature ? "True" : "False"); StringBuffer_append(res->outputbuffer, "<tr><td>httpd auth. style</td><td>%s</td></tr>", Run.httpd.credentials && Engine_hasAllow() ? "Basic Authentication and Host/Net allow list" : Run.httpd.credentials ? "Basic Authentication" : Engine_hasAllow() ? "Host/Net allow list" : "No authentication"); print_alerts(res, Run.maillist); StringBuffer_append(res->outputbuffer, "</table>"); if (! is_readonly(req)) { StringBuffer_append(res->outputbuffer, "<table id='buttons'><tr>"); StringBuffer_append(res->outputbuffer, "<td style='color:red;'>" "<form method=POST action='_runtime'>Stop Monit http server? " "<input type=hidden name='securitytoken' value='%s'>" "<input type=hidden name='action' value='stop'>" "<input type=submit value='Go'>" "</form>" "</td>", res->token); StringBuffer_append(res->outputbuffer, "<td>" "<form method=POST action='_runtime'>Force validate now? " "<input type=hidden name='securitytoken' value='%s'>" "<input type=hidden name='action' value='validate'>" "<input type=submit value='Go'>" "</form>" "</td>", res->token); if ((Run.flags & Run_Log) && ! (Run.flags & Run_UseSyslog)) { StringBuffer_append(res->outputbuffer, "<td>" "<form method=POST action='_viewlog'>View Monit logfile? " "<input type=hidden name='securitytoken' value='%s'>" "<input type=submit value='Go'>" "</form>" "</td>", res->token); } StringBuffer_append(res->outputbuffer, "</tr></table>"); } do_foot(res); } static void do_viewlog(HttpRequest req, HttpResponse res) { if (is_readonly(req)) { send_error(req, res, SC_FORBIDDEN, "You do not have sufficient privileges to access this page"); return; } do_head(res, "_viewlog", "View log", 100); if ((Run.flags & Run_Log) && ! (Run.flags & Run_UseSyslog)) { FILE *f = fopen(Run.files.log, "r"); if (f) { size_t n; char buf[512]; StringBuffer_append(res->outputbuffer, "<br><p><form><textarea cols=120 rows=30 readonly>"); while ((n = fread(buf, sizeof(char), sizeof(buf) - 1, f)) > 0) { buf[n] = 0; StringBuffer_append(res->outputbuffer, "%s", buf); } fclose(f); StringBuffer_append(res->outputbuffer, "</textarea></form>"); } else { StringBuffer_append(res->outputbuffer, "Error opening logfile: %s", STRERROR); } } else { StringBuffer_append(res->outputbuffer, "<b>Cannot view logfile:</b><br>"); if (! (Run.flags & Run_Log)) StringBuffer_append(res->outputbuffer, "Monit was started without logging"); else StringBuffer_append(res->outputbuffer, "Monit uses syslog"); } do_foot(res); } static void handle_service(HttpRequest req, HttpResponse res) { char *name = req->url; if (! name) { send_error(req, res, SC_NOT_FOUND, "Service name required"); return; } Service_T s = Util_getService(++name); if (! s) { send_error(req, res, SC_NOT_FOUND, "There is no service named \"%s\"", name); return; } do_service(req, res, s); } static void handle_service_action(HttpRequest req, HttpResponse res) { char *name = req->url; if (! name) { send_error(req, res, SC_NOT_FOUND, "Service name required"); return; } Service_T s = Util_getService(++name); if (! s) { send_error(req, res, SC_NOT_FOUND, "There is no service named \"%s\"", name); return; } const char *action = get_parameter(req, "action"); if (action) { if (is_readonly(req)) { send_error(req, res, SC_FORBIDDEN, "You do not have sufficient privileges to access this page"); return; } Action_Type doaction = Util_getAction(action); if (doaction == Action_Ignored) { send_error(req, res, SC_BAD_REQUEST, "Invalid action \"%s\"", action); return; } s->doaction = doaction; const char *token = get_parameter(req, "token"); if (token) { FREE(s->token); s->token = Str_dup(token); } LogInfo("'%s' %s on user request\n", s->name, action); Run.flags |= Run_ActionPending; /* set the global flag */ do_wakeupcall(); } do_service(req, res, s); } static void handle_doaction(HttpRequest req, HttpResponse res) { Service_T s; Action_Type doaction = Action_Ignored; const char *action = get_parameter(req, "action"); const char *token = get_parameter(req, "token"); if (action) { if (is_readonly(req)) { send_error(req, res, SC_FORBIDDEN, "You do not have sufficient privileges to access this page"); return; } if ((doaction = Util_getAction(action)) == Action_Ignored) { send_error(req, res, SC_BAD_REQUEST, "Invalid action \"%s\"", action); return; } for (HttpParameter p = req->params; p; p = p->next) { if (IS(p->name, "service")) { s = Util_getService(p->value); if (! s) { send_error(req, res, SC_BAD_REQUEST, "There is no service named \"%s\"", p->value ? p->value : ""); return; } s->doaction = doaction; LogInfo("'%s' %s on user request\n", s->name, action); } } /* Set token for last service only so we'll get it back after all services were handled */ if (token) { Service_T q = NULL; for (s = servicelist; s; s = s->next) if (s->doaction == doaction) q = s; if (q) { FREE(q->token); q->token = Str_dup(token); } } Run.flags |= Run_ActionPending; do_wakeupcall(); } } static void handle_runtime(HttpRequest req, HttpResponse res) { LOCK(Run.mutex) do_runtime(req, res); END_LOCK; } static void handle_runtime_action(HttpRequest req, HttpResponse res) { const char *action = get_parameter(req, "action"); if (action) { if (is_readonly(req)) { send_error(req, res, SC_FORBIDDEN, "You do not have sufficient privileges to access this page"); return; } if (IS(action, "validate")) { LogInfo("The Monit http server woke up on user request\n"); do_wakeupcall(); } else if (IS(action, "stop")) { LogInfo("The Monit http server stopped on user request\n"); send_error(req, res, SC_SERVICE_UNAVAILABLE, "The Monit http server is stopped"); Engine_stop(); return; } } handle_runtime(req, res); } static void do_service(HttpRequest req, HttpResponse res, Service_T s) { char buf[STRLEN]; ASSERT(s); do_head(res, s->name, s->name, Run.polltime); StringBuffer_append(res->outputbuffer, "<h2>%s status</h2>" "<table id='status-table'>" "<tr>" "<th width='30%%'>Parameter</th>" "<th width='70%%'>Value</th>" "</tr>" "<tr>" "<td>Name</td>" "<td>%s</td>" "</tr>", servicetypes[s->type], s->name); if (s->type == Service_Process) StringBuffer_append(res->outputbuffer, "<tr><td>%s</td><td>%s</td></tr>", s->matchlist ? "Match" : "Pid file", s->path); else if (s->type == Service_Host) StringBuffer_append(res->outputbuffer, "<tr><td>Address</td><td>%s</td></tr>", s->path); else if (s->type == Service_Net) StringBuffer_append(res->outputbuffer, "<tr><td>Interface</td><td>%s</td></tr>", s->path); else if (s->type != Service_System) StringBuffer_append(res->outputbuffer, "<tr><td>Path</td><td>%s</td></tr>", s->path); StringBuffer_append(res->outputbuffer, "<tr><td>Status</td><td>%s</td></tr>", get_service_status(HTML, s, buf, sizeof(buf))); for (ServiceGroup_T sg = servicegrouplist; sg; sg = sg->next) for (list_t m = sg->members->head; m; m = m->next) if (m->e == s) StringBuffer_append(res->outputbuffer, "<tr><td>Group</td><td class='blue-text'>%s</td></tr>", sg->name); StringBuffer_append(res->outputbuffer, "<tr><td>Monitoring status</td><td>%s</td></tr>", get_monitoring_status(HTML, s, buf, sizeof(buf))); StringBuffer_append(res->outputbuffer, "<tr><td>Monitoring mode</td><td>%s</td></tr>", modenames[s->mode]); StringBuffer_append(res->outputbuffer, "<tr><td>On reboot</td><td>%s</td></tr>", onrebootnames[s->onreboot]); for (Dependant_T d = s->dependantlist; d; d = d->next) { if (d->dependant != NULL) { StringBuffer_append(res->outputbuffer, "<tr><td>Depends on service </td><td> <a href=%s> %s </a></td></tr>", d->dependant, d->dependant); } } if (s->start) { StringBuffer_append(res->outputbuffer, "<tr><td>Start program</td><td>'%s'", Util_commandDescription(s->start, (char[STRLEN]){})); if (s->start->has_uid) StringBuffer_append(res->outputbuffer, " as uid %d", s->start->uid); if (s->start->has_gid) StringBuffer_append(res->outputbuffer, " as gid %d", s->start->gid); StringBuffer_append(res->outputbuffer, " timeout %s", Fmt_time2str(s->start->timeout, (char[11]){})); StringBuffer_append(res->outputbuffer, "</td></tr>"); } if (s->stop) { StringBuffer_append(res->outputbuffer, "<tr><td>Stop program</td><td>'%s'", Util_commandDescription(s->stop, (char[STRLEN]){})); if (s->stop->has_uid) StringBuffer_append(res->outputbuffer, " as uid %d", s->stop->uid); if (s->stop->has_gid) StringBuffer_append(res->outputbuffer, " as gid %d", s->stop->gid); StringBuffer_append(res->outputbuffer, " timeout %s", Fmt_time2str(s->stop->timeout, (char[11]){})); StringBuffer_append(res->outputbuffer, "</td></tr>"); } if (s->restart) { StringBuffer_append(res->outputbuffer, "<tr><td>Restart program</td><td>'%s'", Util_commandDescription(s->restart, (char[STRLEN]){})); if (s->restart->has_uid) StringBuffer_append(res->outputbuffer, " as uid %d", s->restart->uid); if (s->restart->has_gid) StringBuffer_append(res->outputbuffer, " as gid %d", s->restart->gid); StringBuffer_append(res->outputbuffer, " timeout %s", Fmt_time2str(s->restart->timeout, (char[11]){})); StringBuffer_append(res->outputbuffer, "</td></tr>"); } if (s->every.type != Every_Cycle) { StringBuffer_append(res->outputbuffer, "<tr><td>Check service</td><td>"); if (s->every.type == Every_SkipCycles) StringBuffer_append(res->outputbuffer, "every %d cycle", s->every.spec.cycle.number); else if (s->every.type == Every_Cron) StringBuffer_append(res->outputbuffer, "every <code>\"%s\"</code>", s->every.spec.cron); else if (s->every.type == Every_NotInCron) StringBuffer_append(res->outputbuffer, "not every <code>\"%s\"</code>", s->every.spec.cron); StringBuffer_append(res->outputbuffer, "</td></tr>"); } _printStatus(HTML, res, s); // Rules print_service_rules_timeout(res, s); print_service_rules_nonexistence(res, s); print_service_rules_existence(res, s); print_service_rules_icmp(res, s); print_service_rules_port(res, s); print_service_rules_socket(res, s); print_service_rules_perm(res, s); print_service_rules_uid(res, s); print_service_rules_euid(res, s); print_service_rules_secattr(res, s); print_service_rules_gid(res, s); print_service_rules_timestamp(res, s); print_service_rules_fsflags(res, s); print_service_rules_filesystem(res, s); print_service_rules_size(res, s); print_service_rules_linkstatus(res, s); print_service_rules_linkspeed(res, s); print_service_rules_linksaturation(res, s); print_service_rules_uploadbytes(res, s); print_service_rules_uploadpackets(res, s); print_service_rules_downloadbytes(res, s); print_service_rules_downloadpackets(res, s); print_service_rules_uptime(res, s); print_service_rules_content(res, s); print_service_rules_checksum(res, s); print_service_rules_pid(res, s); print_service_rules_ppid(res, s); print_service_rules_program(res, s); print_service_rules_resource(res, s); print_alerts(res, s->maillist); StringBuffer_append(res->outputbuffer, "</table>"); print_buttons(req, res, s); do_foot(res); } static void do_home_system(HttpResponse res) { Service_T s = Run.system; char buf[STRLEN]; StringBuffer_append(res->outputbuffer, "<table id='header-row'>" "<tr>" "<th class='left first'>System</th>" "<th class='left'>Status</th>" "<th class='right column'>Load</th>" "<th class='right column'>CPU</th>" "<th class='right column'>Memory</th>" "<th class='right column'>Swap</th>" "</tr>" "<tr class='stripe'>" "<td class='left'><a href='%s'>%s</a></td>" "<td class='left'>%s</td>" "<td class='right column'>[%.2f]&nbsp;[%.2f]&nbsp;[%.2f]</td>" "<td class='right column'>" "%.1f%%us,&nbsp;%.1f%%sy" #ifdef HAVE_CPU_WAIT ",&nbsp;%.1f%%wa" #endif "</td>", s->name, s->name, get_service_status(HTML, s, buf, sizeof(buf)), systeminfo.loadavg[0], systeminfo.loadavg[1], systeminfo.loadavg[2], systeminfo.cpu.usage.user > 0. ? systeminfo.cpu.usage.user : 0., systeminfo.cpu.usage.system > 0. ? systeminfo.cpu.usage.system : 0. #ifdef HAVE_CPU_WAIT , systeminfo.cpu.usage.wait > 0. ? systeminfo.cpu.usage.wait : 0. #endif ); StringBuffer_append(res->outputbuffer, "<td class='right column'>%.1f%% [%s]</td>", systeminfo.memory.usage.percent, Fmt_bytes2str(systeminfo.memory.usage.bytes, buf)); StringBuffer_append(res->outputbuffer, "<td class='right column'>%.1f%% [%s]</td>", systeminfo.swap.usage.percent, Fmt_bytes2str(systeminfo.swap.usage.bytes, buf)); StringBuffer_append(res->outputbuffer, "</tr>" "</table>"); } static void do_home_process(HttpResponse res) { char buf[STRLEN]; boolean_t on = true; boolean_t header = true; for (Service_T s = servicelist_conf; s; s = s->next_conf) { if (s->type != Service_Process) continue; if (header) { StringBuffer_append(res->outputbuffer, "<table id='header-row'>" "<tr>" "<th class='left' class='first'>Process</th>" "<th class='left'>Status</th>" "<th class='right'>Uptime</th>" "<th class='right'>CPU Total</b></th>" "<th class='right'>Memory Total</th>" "<th class='right column'>Read</th>" "<th class='right column'>Write</th>" "</tr>"); header = false; } StringBuffer_append(res->outputbuffer, "<tr%s>" "<td class='left'><a href='%s'>%s</a></td>" "<td class='left'>%s</td>", on ? " class='stripe'" : "", s->name, s->name, get_service_status(HTML, s, buf, sizeof(buf))); if (! (Run.flags & Run_ProcessEngineEnabled) || ! Util_hasServiceStatus(s) || s->inf.process->uptime < 0) { StringBuffer_append(res->outputbuffer, "<td class='right'>-</td>"); } else { StringBuffer_append(res->outputbuffer, "<td class='right'>%s</td>", _getUptime(s->inf.process->uptime, (char[256]){})); } if (! (Run.flags & Run_ProcessEngineEnabled) || ! Util_hasServiceStatus(s) || s->inf.process->total_cpu_percent < 0) { StringBuffer_append(res->outputbuffer, "<td class='right'>-</td>"); } else { StringBuffer_append(res->outputbuffer, "<td class='right%s'>%.1f%%</td>", (s->error & Event_Resource) ? " red-text" : "", s->inf.process->total_cpu_percent); } if (! (Run.flags & Run_ProcessEngineEnabled) || ! Util_hasServiceStatus(s) || s->inf.process->total_mem_percent < 0) { StringBuffer_append(res->outputbuffer, "<td class='right'>-</td>"); } else { StringBuffer_append(res->outputbuffer, "<td class='right%s'>%.1f%% [%s]</td>", (s->error & Event_Resource) ? " red-text" : "", s->inf.process->total_mem_percent, Fmt_bytes2str(s->inf.process->total_mem, buf)); } boolean_t hasReadBytes = Statistics_initialized(&(s->inf.process->read.bytes)); boolean_t hasReadOperations = Statistics_initialized(&(s->inf.process->read.operations)); if (! (Run.flags & Run_ProcessEngineEnabled) || ! Util_hasServiceStatus(s) || (! hasReadBytes && ! hasReadOperations)) { StringBuffer_append(res->outputbuffer, "<td class='right column'>-</td>"); } else if (hasReadBytes) { StringBuffer_append(res->outputbuffer, "<td class='right column%s'>%s/s</td>", (s->error & Event_Resource) ? " red-text" : "", Fmt_bytes2str(Statistics_deltaNormalize(&(s->inf.process->read.bytes)), (char[10]){})); } else if (hasReadOperations) { StringBuffer_append(res->outputbuffer, "<td class='right column%s'>%.1f/s</td>", (s->error & Event_Resource) ? " red-text" : "", Statistics_deltaNormalize(&(s->inf.process->read.operations))); } boolean_t hasWriteBytes = Statistics_initialized(&(s->inf.process->write.bytes)); boolean_t hasWriteOperations = Statistics_initialized(&(s->inf.process->write.operations)); if (! (Run.flags & Run_ProcessEngineEnabled) || ! Util_hasServiceStatus(s) || (! hasWriteBytes && ! hasWriteOperations)) { StringBuffer_append(res->outputbuffer, "<td class='right column'>-</td>"); } else if (hasWriteBytes) { StringBuffer_append(res->outputbuffer, "<td class='right column%s'>%s/s</td>", (s->error & Event_Resource) ? " red-text" : "", Fmt_bytes2str(Statistics_deltaNormalize(&(s->inf.process->write.bytes)), (char[10]){})); } else if (hasWriteOperations) { StringBuffer_append(res->outputbuffer, "<td class='right column%s'>%.1f/s</td>", (s->error & Event_Resource) ? " red-text" : "", Statistics_deltaNormalize(&(s->inf.process->write.operations))); } StringBuffer_append(res->outputbuffer, "</tr>"); on = ! on; } if (! header) StringBuffer_append(res->outputbuffer, "</table>"); } static void do_home_program(HttpResponse res) { char buf[STRLEN]; boolean_t on = true; boolean_t header = true; for (Service_T s = servicelist_conf; s; s = s->next_conf) { if (s->type != Service_Program) continue; if (header) { StringBuffer_append(res->outputbuffer, "<table id='header-row'>" "<tr>" "<th class='left' class='first'>Program</th>" "<th class='left'>Status</th>" "<th class='left'>Output</th>" "<th class='right'>Last started</th>" "<th class='right'>Exit value</th>" "</tr>"); header = false; } StringBuffer_append(res->outputbuffer, "<tr %s>" "<td class='left'><a href='%s'>%s</a></td>" "<td class='left'>%s</td>", on ? "class='stripe'" : "", s->name, s->name, get_service_status(HTML, s, buf, sizeof(buf))); if (! Util_hasServiceStatus(s)) { StringBuffer_append(res->outputbuffer, "<td class='left'>-</td>"); StringBuffer_append(res->outputbuffer, "<td class='right'>-</td>"); StringBuffer_append(res->outputbuffer, "<td class='right'>-</td>"); } else { if (s->program->started) { StringBuffer_append(res->outputbuffer, "<td class='left short'>"); if (StringBuffer_length(s->program->lastOutput)) { // Print first line only (escape HTML characters if any) const char *output = StringBuffer_toString(s->program->lastOutput); for (int i = 0; output[i]; i++) { if (output[i] == '<') StringBuffer_append(res->outputbuffer, "&lt;"); else if (output[i] == '>') StringBuffer_append(res->outputbuffer, "&gt;"); else if (output[i] == '&') StringBuffer_append(res->outputbuffer, "&amp;"); else if (output[i] == '\r' || output[i] == '\n') break; else StringBuffer_append(res->outputbuffer, "%c", output[i]); } } else { StringBuffer_append(res->outputbuffer, "no output"); } StringBuffer_append(res->outputbuffer, "</td>"); StringBuffer_append(res->outputbuffer, "<td class='right'>%s</td>", Time_fmt((char[32]){}, 32, "%d %b %Y %H:%M:%S", s->program->started)); StringBuffer_append(res->outputbuffer, "<td class='right'>%d</td>", s->program->exitStatus); } else { StringBuffer_append(res->outputbuffer, "<td class='right'>-</td>"); StringBuffer_append(res->outputbuffer, "<td class='right'>Not yet started</td>"); StringBuffer_append(res->outputbuffer, "<td class='right'>-</td>"); } } StringBuffer_append(res->outputbuffer, "</tr>"); on = ! on; } if (! header) StringBuffer_append(res->outputbuffer, "</table>"); } static void do_home_net(HttpResponse res) { char buf[STRLEN]; boolean_t on = true; boolean_t header = true; for (Service_T s = servicelist_conf; s; s = s->next_conf) { if (s->type != Service_Net) continue; if (header) { StringBuffer_append(res->outputbuffer, "<table id='header-row'>" "<tr>" "<th class='left first'>Net</th>" "<th class='left'>Status</th>" "<th class='right'>Upload</th>" "<th class='right'>Download</th>" "</tr>"); header = false; } StringBuffer_append(res->outputbuffer, "<tr %s>" "<td class='left'><a href='%s'>%s</a></td>" "<td class='left'>%s</td>", on ? "class='stripe'" : "", s->name, s->name, get_service_status(HTML, s, buf, sizeof(buf))); if (! Util_hasServiceStatus(s) || Link_getState(s->inf.net->stats) != 1) { StringBuffer_append(res->outputbuffer, "<td class='right'>-</td>"); StringBuffer_append(res->outputbuffer, "<td class='right'>-</td>"); } else { StringBuffer_append(res->outputbuffer, "<td class='right'>%s&#47;s</td>", Fmt_bytes2str(Link_getBytesOutPerSecond(s->inf.net->stats), buf)); StringBuffer_append(res->outputbuffer, "<td class='right'>%s&#47;s</td>", Fmt_bytes2str(Link_getBytesInPerSecond(s->inf.net->stats), buf)); } StringBuffer_append(res->outputbuffer, "</tr>"); on = ! on; } if (! header) StringBuffer_append(res->outputbuffer, "</table>"); } static void do_home_filesystem(HttpResponse res) { char buf[STRLEN]; boolean_t on = true; boolean_t header = true; for (Service_T s = servicelist_conf; s; s = s->next_conf) { if (s->type != Service_Filesystem) continue; if (header) { StringBuffer_append(res->outputbuffer, "<table id='header-row'>" "<tr>" "<th class='left first'>Filesystem</th>" "<th class='left'>Status</th>" "<th class='right'>Space usage</th>" "<th class='right'>Inodes usage</th>" "<th class='right column'>Read</th>" "<th class='right column'>Write</th>" "</tr>"); header = false; } StringBuffer_append(res->outputbuffer, "<tr %s>" "<td class='left'><a href='%s'>%s</a></td>" "<td class='left'>%s</td>", on ? "class='stripe'" : "", s->name, s->name, get_service_status(HTML, s, buf, sizeof(buf))); if (! Util_hasServiceStatus(s)) { StringBuffer_append(res->outputbuffer, "<td class='right'>- [-]</td>" "<td class='right'>- [-]</td>" "<td class='right column'>- [-]</td>" "<td class='right column'>- [-]</td>"); } else { StringBuffer_append(res->outputbuffer, "<td class='right column%s'>%.1f%% [%s]</td>", (s->error & Event_Resource) ? " red-text" : "", s->inf.filesystem->space_percent, s->inf.filesystem->f_bsize > 0 ? Fmt_bytes2str(s->inf.filesystem->f_blocksused * s->inf.filesystem->f_bsize, buf) : "0 MB"); if (s->inf.filesystem->f_files > 0) { StringBuffer_append(res->outputbuffer, "<td class='right column%s'>%.1f%% [%lld objects]</td>", (s->error & Event_Resource) ? " red-text" : "", s->inf.filesystem->inode_percent, s->inf.filesystem->f_filesused); } else { StringBuffer_append(res->outputbuffer, "<td class='right column'>not supported by filesystem</td>"); } StringBuffer_append(res->outputbuffer, "<td class='right column%s'>%s/s</td>" "<td class='right column%s'>%s/s</td>", (s->error & Event_Resource) ? " red-text" : "", Fmt_bytes2str(Statistics_deltaNormalize(&(s->inf.filesystem->read.bytes)), (char[10]){}), (s->error & Event_Resource) ? " red-text" : "", Fmt_bytes2str(Statistics_deltaNormalize(&(s->inf.filesystem->write.bytes)), (char[10]){})); } StringBuffer_append(res->outputbuffer, "</tr>"); on = ! on; } if (! header) StringBuffer_append(res->outputbuffer, "</table>"); } static void do_home_file(HttpResponse res) { char buf[STRLEN]; boolean_t on = true; boolean_t header = true; for (Service_T s = servicelist_conf; s; s = s->next_conf) { if (s->type != Service_File) continue; if (header) { StringBuffer_append(res->outputbuffer, "<table id='header-row'>" "<tr>" "<th class='left first'>File</th>" "<th class='left'>Status</th>" "<th class='right'>Size</th>" "<th class='right'>Permission</th>" "<th class='right'>UID</th>" "<th class='right'>GID</th>" "</tr>"); header = false; } StringBuffer_append(res->outputbuffer, "<tr %s>" "<td class='left'><a href='%s'>%s</a></td>" "<td class='left'>%s</td>", on ? "class='stripe'" : "", s->name, s->name, get_service_status(HTML, s, buf, sizeof(buf))); if (! Util_hasServiceStatus(s) || s->inf.file->size < 0) StringBuffer_append(res->outputbuffer, "<td class='right'>-</td>"); else StringBuffer_append(res->outputbuffer, "<td class='right'>%s</td>", Fmt_bytes2str(s->inf.file->size, (char[10]){})); if (! Util_hasServiceStatus(s) || s->inf.file->mode < 0) StringBuffer_append(res->outputbuffer, "<td class='right'>-</td>"); else StringBuffer_append(res->outputbuffer, "<td class='right'>%04o</td>", s->inf.file->mode & 07777); if (! Util_hasServiceStatus(s) || s->inf.file->uid < 0) StringBuffer_append(res->outputbuffer, "<td class='right'>-</td>"); else StringBuffer_append(res->outputbuffer, "<td class='right'>%d</td>", s->inf.file->uid); if (! Util_hasServiceStatus(s) || s->inf.file->gid < 0) StringBuffer_append(res->outputbuffer, "<td class='right'>-</td>"); else StringBuffer_append(res->outputbuffer, "<td class='right'>%d</td>", s->inf.file->gid); StringBuffer_append(res->outputbuffer, "</tr>"); on = ! on; } if (! header) StringBuffer_append(res->outputbuffer, "</table>"); } static void do_home_fifo(HttpResponse res) { char buf[STRLEN]; boolean_t on = true; boolean_t header = true; for (Service_T s = servicelist_conf; s; s = s->next_conf) { if (s->type != Service_Fifo) continue; if (header) { StringBuffer_append(res->outputbuffer, "<table id='header-row'>" "<tr>" "<th class='left first'>Fifo</th>" "<th class='left'>Status</th>" "<th class='right'>Permission</th>" "<th class='right'>UID</th>" "<th class='right'>GID</th>" "</tr>"); header = false; } StringBuffer_append(res->outputbuffer, "<tr %s>" "<td class='left'><a href='%s'>%s</a></td>" "<td class='left'>%s</td>", on ? "class='stripe'" : "", s->name, s->name, get_service_status(HTML, s, buf, sizeof(buf))); if (! Util_hasServiceStatus(s) || s->inf.fifo->mode < 0) StringBuffer_append(res->outputbuffer, "<td class='right'>-</td>"); else StringBuffer_append(res->outputbuffer, "<td class='right'>%04o</td>", s->inf.fifo->mode & 07777); if (! Util_hasServiceStatus(s) || s->inf.fifo->uid < 0) StringBuffer_append(res->outputbuffer, "<td class='right'>-</td>"); else StringBuffer_append(res->outputbuffer, "<td class='right'>%d</td>", s->inf.fifo->uid); if (! Util_hasServiceStatus(s) || s->inf.fifo->gid < 0) StringBuffer_append(res->outputbuffer, "<td class='right'>-</td>"); else StringBuffer_append(res->outputbuffer, "<td class='right'>%d</td>", s->inf.fifo->gid); StringBuffer_append(res->outputbuffer, "</tr>"); on = ! on; } if (! header) StringBuffer_append(res->outputbuffer, "</table>"); } static void do_home_directory(HttpResponse res) { char buf[STRLEN]; boolean_t on = true; boolean_t header = true; for (Service_T s = servicelist_conf; s; s = s->next_conf) { if (s->type != Service_Directory) continue; if (header) { StringBuffer_append(res->outputbuffer, "<table id='header-row'>" "<tr>" "<th class='left first'>Directory</th>" "<th class='left'>Status</th>" "<th class='right'>Permission</th>" "<th class='right'>UID</th>" "<th class='right'>GID</th>" "</tr>"); header = false; } StringBuffer_append(res->outputbuffer, "<tr %s>" "<td class='left'><a href='%s'>%s</a></td>" "<td class='left'>%s</td>", on ? "class='stripe'" : "", s->name, s->name, get_service_status(HTML, s, buf, sizeof(buf))); if (! Util_hasServiceStatus(s) || s->inf.directory->mode < 0) StringBuffer_append(res->outputbuffer, "<td class='right'>-</td>"); else StringBuffer_append(res->outputbuffer, "<td class='right'>%04o</td>", s->inf.directory->mode & 07777); if (! Util_hasServiceStatus(s) || s->inf.directory->uid < 0) StringBuffer_append(res->outputbuffer, "<td class='right'>-</td>"); else StringBuffer_append(res->outputbuffer, "<td class='right'>%d</td>", s->inf.directory->uid); if (! Util_hasServiceStatus(s) || s->inf.directory->gid < 0) StringBuffer_append(res->outputbuffer, "<td class='right'>-</td>"); else StringBuffer_append(res->outputbuffer, "<td class='right'>%d</td>", s->inf.directory->gid); StringBuffer_append(res->outputbuffer, "</tr>"); on = ! on; } if (! header) StringBuffer_append(res->outputbuffer, "</table>"); } static void do_home_host(HttpResponse res) { char buf[STRLEN]; boolean_t on = true; boolean_t header = true; for (Service_T s = servicelist_conf; s; s = s->next_conf) { if (s->type != Service_Host) continue; if (header) { StringBuffer_append(res->outputbuffer, "<table id='header-row'>" "<tr>" "<th class='left first'>Host</th>" "<th class='left'>Status</th>" "<th class='right'>Protocol(s)</th>" "</tr>"); header = false; } StringBuffer_append(res->outputbuffer, "<tr %s>" "<td class='left'><a href='%s'>%s</a></td>" "<td class='left'>%s</td>", on ? "class='stripe'" : "", s->name, s->name, get_service_status(HTML, s, buf, sizeof(buf))); if (! Util_hasServiceStatus(s)) { StringBuffer_append(res->outputbuffer, "<td class='right'>-</td>"); } else { StringBuffer_append(res->outputbuffer, "<td class='right'>"); for (Icmp_T icmp = s->icmplist; icmp; icmp = icmp->next) { if (icmp != s->icmplist) StringBuffer_append(res->outputbuffer, "&nbsp;&nbsp;<b>|</b>&nbsp;&nbsp;"); switch (icmp->is_available) { case Connection_Init: StringBuffer_append(res->outputbuffer, "<span class='gray-text'>[Ping]</span>"); break; case Connection_Failed: StringBuffer_append(res->outputbuffer, "<span class='red-text'>[Ping]</span>"); break; default: StringBuffer_append(res->outputbuffer, "<span>[Ping]</span>"); break; } } if (s->icmplist && s->portlist) StringBuffer_append(res->outputbuffer, "&nbsp;&nbsp;<b>|</b>&nbsp;&nbsp;"); for (Port_T port = s->portlist; port; port = port->next) { if (port != s->portlist) StringBuffer_append(res->outputbuffer, "&nbsp;&nbsp;<b>|</b>&nbsp;&nbsp;"); switch (port->is_available) { case Connection_Init: StringBuffer_append(res->outputbuffer, "<span class='gray-text'>[%s] at port %d</span>", port->protocol->name, port->target.net.port); break; case Connection_Failed: StringBuffer_append(res->outputbuffer, "<span class='red-text'>[%s] at port %d</span>", port->protocol->name, port->target.net.port); break; default: if (port->target.net.ssl.options.flags && port->target.net.ssl.certificate.validDays < port->target.net.ssl.certificate.minimumDays) StringBuffer_append(res->outputbuffer, "<span class='red-text'>[%s] at port %d</span>", port->protocol->name, port->target.net.port); else StringBuffer_append(res->outputbuffer, "<span>[%s] at port %d</span>", port->protocol->name, port->target.net.port); break; } } StringBuffer_append(res->outputbuffer, "</td>"); } StringBuffer_append(res->outputbuffer, "</tr>"); on = ! on; } if (! header) StringBuffer_append(res->outputbuffer, "</table>"); } /* ------------------------------------------------------------------------- */ static void print_alerts(HttpResponse res, Mail_T s) { for (Mail_T r = s; r; r = r->next) { StringBuffer_append(res->outputbuffer, "<tr class='stripe'><td>Alert mail to</td>" "<td>%s</td></tr>", r->to ? r->to : ""); StringBuffer_append(res->outputbuffer, "<tr><td>Alert on</td><td>"); if (r->events == Event_Null) { StringBuffer_append(res->outputbuffer, "No events"); } else if (r->events == Event_All) { StringBuffer_append(res->outputbuffer, "All events"); } else { if (IS_EVENT_SET(r->events, Event_Action)) StringBuffer_append(res->outputbuffer, "Action "); if (IS_EVENT_SET(r->events, Event_ByteIn)) StringBuffer_append(res->outputbuffer, "ByteIn "); if (IS_EVENT_SET(r->events, Event_ByteOut)) StringBuffer_append(res->outputbuffer, "ByteOut "); if (IS_EVENT_SET(r->events, Event_Checksum)) StringBuffer_append(res->outputbuffer, "Checksum "); if (IS_EVENT_SET(r->events, Event_Connection)) StringBuffer_append(res->outputbuffer, "Connection "); if (IS_EVENT_SET(r->events, Event_Content)) StringBuffer_append(res->outputbuffer, "Content "); if (IS_EVENT_SET(r->events, Event_Data)) StringBuffer_append(res->outputbuffer, "Data "); if (IS_EVENT_SET(r->events, Event_Exec)) StringBuffer_append(res->outputbuffer, "Exec "); if (IS_EVENT_SET(r->events, Event_Exist)) StringBuffer_append(res->outputbuffer, "Exist "); if (IS_EVENT_SET(r->events, Event_FsFlag)) StringBuffer_append(res->outputbuffer, "Fsflags "); if (IS_EVENT_SET(r->events, Event_Gid)) StringBuffer_append(res->outputbuffer, "Gid "); if (IS_EVENT_SET(r->events, Event_Instance)) StringBuffer_append(res->outputbuffer, "Instance "); if (IS_EVENT_SET(r->events, Event_Invalid)) StringBuffer_append(res->outputbuffer, "Invalid "); if (IS_EVENT_SET(r->events, Event_Link)) StringBuffer_append(res->outputbuffer, "Link "); if (IS_EVENT_SET(r->events, Event_NonExist)) StringBuffer_append(res->outputbuffer, "Nonexist "); if (IS_EVENT_SET(r->events, Event_Permission)) StringBuffer_append(res->outputbuffer, "Permission "); if (IS_EVENT_SET(r->events, Event_PacketIn)) StringBuffer_append(res->outputbuffer, "PacketIn "); if (IS_EVENT_SET(r->events, Event_PacketOut)) StringBuffer_append(res->outputbuffer, "PacketOut "); if (IS_EVENT_SET(r->events, Event_Pid)) StringBuffer_append(res->outputbuffer, "PID "); if (IS_EVENT_SET(r->events, Event_Icmp)) StringBuffer_append(res->outputbuffer, "Ping "); if (IS_EVENT_SET(r->events, Event_PPid)) StringBuffer_append(res->outputbuffer, "PPID "); if (IS_EVENT_SET(r->events, Event_Resource)) StringBuffer_append(res->outputbuffer, "Resource "); if (IS_EVENT_SET(r->events, Event_Saturation)) StringBuffer_append(res->outputbuffer, "Saturation "); if (IS_EVENT_SET(r->events, Event_Size)) StringBuffer_append(res->outputbuffer, "Size "); if (IS_EVENT_SET(r->events, Event_Speed)) StringBuffer_append(res->outputbuffer, "Speed "); if (IS_EVENT_SET(r->events, Event_Status)) StringBuffer_append(res->outputbuffer, "Status "); if (IS_EVENT_SET(r->events, Event_Timeout)) StringBuffer_append(res->outputbuffer, "Timeout "); if (IS_EVENT_SET(r->events, Event_Timestamp)) StringBuffer_append(res->outputbuffer, "Timestamp "); if (IS_EVENT_SET(r->events, Event_Uid)) StringBuffer_append(res->outputbuffer, "Uid "); if (IS_EVENT_SET(r->events, Event_Uptime)) StringBuffer_append(res->outputbuffer, "Uptime "); } StringBuffer_append(res->outputbuffer, "</td></tr>"); if (r->reminder) { StringBuffer_append(res->outputbuffer, "<tr><td>Alert reminder</td><td>%u cycles</td></tr>", r->reminder); } } } static void print_buttons(HttpRequest req, HttpResponse res, Service_T s) { if (is_readonly(req)) { // A read-only REMOTE_USER does not get access to these buttons return; } StringBuffer_append(res->outputbuffer, "<table id='buttons'><tr>"); /* Start program */ if (s->start) StringBuffer_append(res->outputbuffer, "<td>" "<form method=POST action=%s>" "<input type=hidden name='securitytoken' value='%s'>" "<input type=hidden value='start' name=action>" "<input type=submit value='Start service'>" "</form>" "</td>", s->name, res->token); /* Stop program */ if (s->stop) StringBuffer_append(res->outputbuffer, "<td>" "<form method=POST action=%s>" "<input type=hidden name='securitytoken' value='%s'>" "<input type=hidden value='stop' name=action>" "<input type=submit value='Stop service'>" "</form>" "</td>", s->name, res->token); /* Restart program */ if ((s->start && s->stop) || s->restart) StringBuffer_append(res->outputbuffer, "<td>" "<form method=POST action=%s>" "<input type=hidden name='securitytoken' value='%s'>" "<input type=hidden value='restart' name=action>" "<input type=submit value='Restart service'>" "</form>" "</td>", s->name, res->token); /* (un)monitor */ StringBuffer_append(res->outputbuffer, "<td>" "<form method=POST action=%s>" "<input type=hidden name='securitytoken' value='%s'>" "<input type=hidden value='%s' name=action>" "<input type=submit value='%s'>" "</form>" "</td>", s->name, res->token, s->monitor ? "unmonitor" : "monitor", s->monitor ? "Disable monitoring" : "Enable monitoring"); StringBuffer_append(res->outputbuffer, "</tr></table>"); } static void print_service_rules_timeout(HttpResponse res, Service_T s) { for (ActionRate_T ar = s->actionratelist; ar; ar = ar->next) { StringBuffer_append(res->outputbuffer, "<tr class='rule'><td>Timeout</td><td>If restarted %d times within %d cycle(s) then ", ar->count, ar->cycle); Util_printAction(ar->action->failed, res->outputbuffer); StringBuffer_append(res->outputbuffer, "</td></tr>"); } } static void print_service_rules_nonexistence(HttpResponse res, Service_T s) { for (NonExist_T l = s->nonexistlist; l; l = l->next) { StringBuffer_append(res->outputbuffer, "<tr class='rule'><td>Existence</td><td>"); Util_printRule(res->outputbuffer, l->action, "If doesn't exist"); StringBuffer_append(res->outputbuffer, "</td></tr>"); } } static void print_service_rules_existence(HttpResponse res, Service_T s) { for (Exist_T l = s->existlist; l; l = l->next) { StringBuffer_append(res->outputbuffer, "<tr class='rule'><td>Non-Existence</td><td>"); Util_printRule(res->outputbuffer, l->action, "If exist"); StringBuffer_append(res->outputbuffer, "</td></tr>"); } } static void print_service_rules_port(HttpResponse res, Service_T s) { for (Port_T p = s->portlist; p; p = p->next) { StringBuffer_append(res->outputbuffer, "<tr class='rule'><td>Port</td><td>"); StringBuffer_T buf = StringBuffer_create(64); StringBuffer_append(buf, "If failed [%s]:%d%s", p->hostname, p->target.net.port, Util_portRequestDescription(p)); if (p->outgoing.ip) StringBuffer_append(buf, " via address %s", p->outgoing.ip); StringBuffer_append(buf, " type %s/%s protocol %s with timeout %s", Util_portTypeDescription(p), Util_portIpDescription(p), p->protocol->name, Fmt_time2str(p->timeout, (char[11]){})); if (p->retry > 1) StringBuffer_append(buf, " and retry %d times", p->retry); #ifdef HAVE_OPENSSL if (p->target.net.ssl.options.flags) { StringBuffer_append(buf, " using TLS"); const char *options = Ssl_printOptions(&p->target.net.ssl.options, (char[STRLEN]){}, STRLEN); if (options && *options) StringBuffer_append(buf, " with options {%s}", options); if (p->target.net.ssl.certificate.minimumDays > 0) StringBuffer_append(buf, " and certificate valid for at least %d days", p->target.net.ssl.certificate.minimumDays); if (p->target.net.ssl.options.checksum) StringBuffer_append(buf, " and certificate checksum %s equal to '%s'", checksumnames[p->target.net.ssl.options.checksumType], p->target.net.ssl.options.checksum); } #endif Util_printRule(res->outputbuffer, p->action, "%s", StringBuffer_toString(buf)); StringBuffer_free(&buf); StringBuffer_append(res->outputbuffer, "</td></tr>"); } } static void print_service_rules_socket(HttpResponse res, Service_T s) { for (Port_T p = s->socketlist; p; p = p->next) { StringBuffer_append(res->outputbuffer, "<tr class='rule'><td>Unix Socket</td><td>"); if (p->retry > 1) Util_printRule(res->outputbuffer, p->action, "If failed %s type %s protocol %s with timeout %s and retry %d time(s)", p->target.unix.pathname, Util_portTypeDescription(p), p->protocol->name, Fmt_time2str(p->timeout, (char[11]){}), p->retry); else Util_printRule(res->outputbuffer, p->action, "If failed %s type %s protocol %s with timeout %s", p->target.unix.pathname, Util_portTypeDescription(p), p->protocol->name, Fmt_time2str(p->timeout, (char[11]){})); StringBuffer_append(res->outputbuffer, "</td></tr>"); } } static void print_service_rules_icmp(HttpResponse res, Service_T s) { for (Icmp_T i = s->icmplist; i; i = i->next) { switch (i->family) { case Socket_Ip4: StringBuffer_append(res->outputbuffer, "<tr class='rule'><td>Ping4</td><td>"); break; case Socket_Ip6: StringBuffer_append(res->outputbuffer, "<tr class='rule'><td>Ping6</td><td>"); break; default: StringBuffer_append(res->outputbuffer, "<tr class='rule'><td>Ping</td><td>"); break; } Util_printRule(res->outputbuffer, i->action, "If failed [count %d size %d with timeout %s%s%s]", i->count, i->size, Fmt_time2str(i->timeout, (char[11]){}), i->outgoing.ip ? " via address " : "", i->outgoing.ip ? i->outgoing.ip : ""); StringBuffer_append(res->outputbuffer, "</td></tr>"); } } static void print_service_rules_perm(HttpResponse res, Service_T s) { if (s->perm) { StringBuffer_append(res->outputbuffer, "<tr class='rule'><td>Permissions</td><td>"); if (s->perm->test_changes) Util_printRule(res->outputbuffer, s->perm->action, "If changed"); else Util_printRule(res->outputbuffer, s->perm->action, "If failed %o", s->perm->perm); StringBuffer_append(res->outputbuffer, "</td></tr>"); } } static void print_service_rules_uid(HttpResponse res, Service_T s) { if (s->uid) { StringBuffer_append(res->outputbuffer, "<tr class='rule'><td>UID</td><td>"); Util_printRule(res->outputbuffer, s->uid->action, "If failed %d", s->uid->uid); StringBuffer_append(res->outputbuffer, "</td></tr>"); } } static void print_service_rules_euid(HttpResponse res, Service_T s) { if (s->euid) { StringBuffer_append(res->outputbuffer, "<tr class='rule'><td>EUID</td><td>"); Util_printRule(res->outputbuffer, s->euid->action, "If failed %d", s->euid->uid); StringBuffer_append(res->outputbuffer, "</td></tr>"); } } static void print_service_rules_secattr(HttpResponse res, Service_T s) { for (SecurityAttribute_T a = s->secattrlist; a; a = a->next) { StringBuffer_append(res->outputbuffer, "<tr class='rule'><td>Security attribute</td><td>"); Util_printRule(res->outputbuffer, a->action, "If failed %s", a->attribute); StringBuffer_append(res->outputbuffer, "</td></tr>"); } } static void print_service_rules_gid(HttpResponse res, Service_T s) { if (s->gid) { StringBuffer_append(res->outputbuffer, "<tr class='rule'><td>GID</td><td>"); Util_printRule(res->outputbuffer, s->gid->action, "If failed %d", s->gid->gid); StringBuffer_append(res->outputbuffer, "</td></tr>"); } } static void print_service_rules_timestamp(HttpResponse res, Service_T s) { for (Timestamp_T t = s->timestamplist; t; t = t->next) { StringBuffer_append(res->outputbuffer, "<tr class='rule'><td>%c%s</td><td>", toupper(timestampnames[t->type][0]), timestampnames[t->type] + 1); if (t->test_changes) Util_printRule(res->outputbuffer, t->action, "If changed"); else Util_printRule(res->outputbuffer, t->action, "If %s %s", operatornames[t->operator], Fmt_time2str(t->time * 1000., (char[11]){})); StringBuffer_append(res->outputbuffer, "</td></tr>"); } } static void print_service_rules_fsflags(HttpResponse res, Service_T s) { for (FsFlag_T l = s->fsflaglist; l; l = l->next) { StringBuffer_append(res->outputbuffer, "<tr class='rule'><td>Filesystem flags</td><td>"); Util_printRule(res->outputbuffer, l->action, "If changed"); StringBuffer_append(res->outputbuffer, "</td></tr>"); } } static void print_service_rules_filesystem(HttpResponse res, Service_T s) { for (FileSystem_T dl = s->filesystemlist; dl; dl = dl->next) { if (dl->resource == Resource_Inode) { StringBuffer_append(res->outputbuffer, "<tr class='rule'><td>Inodes usage limit</td><td>"); if (dl->limit_absolute > -1) Util_printRule(res->outputbuffer, dl->action, "If %s %lld", operatornames[dl->operator], dl->limit_absolute); else Util_printRule(res->outputbuffer, dl->action, "If %s %.1f%%", operatornames[dl->operator], dl->limit_percent); StringBuffer_append(res->outputbuffer, "</td></tr>"); } else if (dl->resource == Resource_InodeFree) { StringBuffer_append(res->outputbuffer, "<tr class='rule'><td>Inodes free limit</td><td>"); if (dl->limit_absolute > -1) Util_printRule(res->outputbuffer, dl->action, "If %s %lld", operatornames[dl->operator], dl->limit_absolute); else Util_printRule(res->outputbuffer, dl->action, "If %s %.1f%%", operatornames[dl->operator], dl->limit_percent); StringBuffer_append(res->outputbuffer, "</td></tr>"); } else if (dl->resource == Resource_Space) { StringBuffer_append(res->outputbuffer, "<tr class='rule'><td>Space usage limit</td><td>"); if (dl->limit_absolute > -1) { Util_printRule(res->outputbuffer, dl->action, "If %s %s", operatornames[dl->operator], Fmt_bytes2str(dl->limit_absolute, (char[10]){})); } else { Util_printRule(res->outputbuffer, dl->action, "If %s %.1f%%", operatornames[dl->operator], dl->limit_percent); } StringBuffer_append(res->outputbuffer, "</td></tr>"); } else if (dl->resource == Resource_SpaceFree) { StringBuffer_append(res->outputbuffer, "<tr class='rule'><td>Space free limit</td><td>"); if (dl->limit_absolute > -1) { Util_printRule(res->outputbuffer, dl->action, "If %s %s", operatornames[dl->operator], Fmt_bytes2str(dl->limit_absolute, (char[10]){})); } else { Util_printRule(res->outputbuffer, dl->action, "If %s %.1f%%", operatornames[dl->operator], dl->limit_percent); } StringBuffer_append(res->outputbuffer, "</td></tr>"); } else if (dl->resource == Resource_ReadBytes) { StringBuffer_append(res->outputbuffer, "<tr class='rule'><td>Read limit</td><td>"); Util_printRule(res->outputbuffer, dl->action, "If read %s %s/s", operatornames[dl->operator], Fmt_bytes2str(dl->limit_absolute, (char[10]){})); StringBuffer_append(res->outputbuffer, "</td></tr>"); } else if (dl->resource == Resource_ReadOperations) { StringBuffer_append(res->outputbuffer, "<tr class='rule'><td>Read limit</td><td>"); Util_printRule(res->outputbuffer, dl->action, "If read %s %llu operations/s", operatornames[dl->operator], dl->limit_absolute); StringBuffer_append(res->outputbuffer, "</td></tr>"); } else if (dl->resource == Resource_WriteBytes) { StringBuffer_append(res->outputbuffer, "<tr class='rule'><td>Write limit</td><td>"); Util_printRule(res->outputbuffer, dl->action, "If write %s %s/s", operatornames[dl->operator], Fmt_bytes2str(dl->limit_absolute, (char[10]){})); StringBuffer_append(res->outputbuffer, "</td></tr>"); } else if (dl->resource == Resource_WriteOperations) { StringBuffer_append(res->outputbuffer, "<tr class='rule'><td>Write limit</td><td>"); Util_printRule(res->outputbuffer, dl->action, "If write %s %llu operations/s", operatornames[dl->operator], dl->limit_absolute); StringBuffer_append(res->outputbuffer, "</td></tr>"); } else if (dl->resource == Resource_ServiceTime) { StringBuffer_append(res->outputbuffer, "<tr class='rule'><td>Service time limit</td><td>"); Util_printRule(res->outputbuffer, dl->action, "If service time %s %s/operation", operatornames[dl->operator], Fmt_time2str(dl->limit_absolute, (char[11]){})); StringBuffer_append(res->outputbuffer, "</td></tr>"); } } } static void print_service_rules_size(HttpResponse res, Service_T s) { for (Size_T sl = s->sizelist; sl; sl = sl->next) { StringBuffer_append(res->outputbuffer, "<tr class='rule'><td>Size</td><td>"); if (sl->test_changes) Util_printRule(res->outputbuffer, sl->action, "If changed"); else Util_printRule(res->outputbuffer, sl->action, "If %s %llu byte(s)", operatornames[sl->operator], sl->size); StringBuffer_append(res->outputbuffer, "</td></tr>"); } } static void print_service_rules_linkstatus(HttpResponse res, Service_T s) { for (LinkStatus_T l = s->linkstatuslist; l; l = l->next) { StringBuffer_append(res->outputbuffer, "<tr class='rule'><td>Link status</td><td>"); Util_printRule(res->outputbuffer, l->action, "If failed"); StringBuffer_append(res->outputbuffer, "</td></tr>"); } } static void print_service_rules_linkspeed(HttpResponse res, Service_T s) { for (LinkSpeed_T l = s->linkspeedlist; l; l = l->next) { StringBuffer_append(res->outputbuffer, "<tr class='rule'><td>Link capacity</td><td>"); Util_printRule(res->outputbuffer, l->action, "If changed"); StringBuffer_append(res->outputbuffer, "</td></tr>"); } } static void print_service_rules_linksaturation(HttpResponse res, Service_T s) { for (LinkSaturation_T l = s->linksaturationlist; l; l = l->next) { StringBuffer_append(res->outputbuffer, "<tr class='rule'><td>Link saturation</td><td>"); Util_printRule(res->outputbuffer, l->action, "If %s %.1f%%", operatornames[l->operator], l->limit); StringBuffer_append(res->outputbuffer, "</td></tr>"); } } static void print_service_rules_uploadbytes(HttpResponse res, Service_T s) { for (Bandwidth_T bl = s->uploadbyteslist; bl; bl = bl->next) { if (bl->range == Time_Second) { StringBuffer_append(res->outputbuffer, "<tr class='rule'><td>Upload bytes</td><td>"); Util_printRule(res->outputbuffer, bl->action, "If %s %s/s", operatornames[bl->operator], Fmt_bytes2str(bl->limit, (char[10]){})); } else { StringBuffer_append(res->outputbuffer, "<tr class='rule'><td>Total upload bytes</td><td>"); Util_printRule(res->outputbuffer, bl->action, "If %s %s in last %d %s(s)", operatornames[bl->operator], Fmt_bytes2str(bl->limit, (char[10]){}), bl->rangecount, Util_timestr(bl->range)); } StringBuffer_append(res->outputbuffer, "</td></tr>"); } } static void print_service_rules_uploadpackets(HttpResponse res, Service_T s) { for (Bandwidth_T bl = s->uploadpacketslist; bl; bl = bl->next) { if (bl->range == Time_Second) { StringBuffer_append(res->outputbuffer, "<tr class='rule'><td>Upload packets</td><td>"); Util_printRule(res->outputbuffer, bl->action, "If %s %lld packets/s", operatornames[bl->operator], bl->limit); } else { StringBuffer_append(res->outputbuffer, "<tr class='rule'><td>Total upload packets</td><td>"); Util_printRule(res->outputbuffer, bl->action, "If %s %lld packets in last %d %s(s)", operatornames[bl->operator], bl->limit, bl->rangecount, Util_timestr(bl->range)); } StringBuffer_append(res->outputbuffer, "</td></tr>"); } } static void print_service_rules_downloadbytes(HttpResponse res, Service_T s) { for (Bandwidth_T bl = s->downloadbyteslist; bl; bl = bl->next) { if (bl->range == Time_Second) { StringBuffer_append(res->outputbuffer, "<tr class='rule'><td>Download bytes</td><td>"); Util_printRule(res->outputbuffer, bl->action, "If %s %s/s", operatornames[bl->operator], Fmt_bytes2str(bl->limit, (char[10]){})); } else { StringBuffer_append(res->outputbuffer, "<tr class='rule'><td>Total download bytes</td><td>"); Util_printRule(res->outputbuffer, bl->action, "If %s %s in last %d %s(s)", operatornames[bl->operator], Fmt_bytes2str(bl->limit, (char[10]){}), bl->rangecount, Util_timestr(bl->range)); } StringBuffer_append(res->outputbuffer, "</td></tr>"); } } static void print_service_rules_downloadpackets(HttpResponse res, Service_T s) { for (Bandwidth_T bl = s->downloadpacketslist; bl; bl = bl->next) { if (bl->range == Time_Second) { StringBuffer_append(res->outputbuffer, "<tr class='rule'><td>Download packets</td><td>"); Util_printRule(res->outputbuffer, bl->action, "If %s %lld packets/s", operatornames[bl->operator], bl->limit); } else { StringBuffer_append(res->outputbuffer, "<tr class='rule'><td>Total download packets</td><td>"); Util_printRule(res->outputbuffer, bl->action, "If %s %lld packets in last %d %s(s)", operatornames[bl->operator], bl->limit, bl->rangecount, Util_timestr(bl->range)); } StringBuffer_append(res->outputbuffer, "</td></tr>"); } } static void print_service_rules_uptime(HttpResponse res, Service_T s) { for (Uptime_T ul = s->uptimelist; ul; ul = ul->next) { StringBuffer_append(res->outputbuffer, "<tr class='rule'><td>Uptime</td><td>"); Util_printRule(res->outputbuffer, ul->action, "If %s %s", operatornames[ul->operator], _getUptime(ul->uptime, (char[256]){})); StringBuffer_append(res->outputbuffer, "</td></tr>"); } } static void print_service_rules_content(HttpResponse res, Service_T s) { if (s->type != Service_Process) { for (Match_T ml = s->matchignorelist; ml; ml = ml->next) { StringBuffer_append(res->outputbuffer, "<tr class='rule'><td>Ignore content</td><td>"); Util_printRule(res->outputbuffer, ml->action, "If content %s \"%s\"", ml->not ? "!=" : "=", ml->match_string); StringBuffer_append(res->outputbuffer, "</td></tr>"); } for (Match_T ml = s->matchlist; ml; ml = ml->next) { StringBuffer_append(res->outputbuffer, "<tr class='rule'><td>Content match</td><td>"); Util_printRule(res->outputbuffer, ml->action, "If content %s \"%s\"", ml->not ? "!=" : "=", ml->match_string); StringBuffer_append(res->outputbuffer, "</td></tr>"); } } } static void print_service_rules_checksum(HttpResponse res, Service_T s) { if (s->checksum) { StringBuffer_append(res->outputbuffer, "<tr class='rule'><td>Checksum</td><td>"); if (s->checksum->test_changes) Util_printRule(res->outputbuffer, s->checksum->action, "If changed %s", checksumnames[s->checksum->type]); else Util_printRule(res->outputbuffer, s->checksum->action, "If failed %s(%s)", s->checksum->hash, checksumnames[s->checksum->type]); StringBuffer_append(res->outputbuffer, "</td></tr>"); } } static void print_service_rules_pid(HttpResponse res, Service_T s) { for (Pid_T l = s->pidlist; l; l = l->next) { StringBuffer_append(res->outputbuffer, "<tr class='rule'><td>PID</td><td>"); Util_printRule(res->outputbuffer, l->action, "If changed"); StringBuffer_append(res->outputbuffer, "</td></tr>"); } } static void print_service_rules_ppid(HttpResponse res, Service_T s) { for (Pid_T l = s->ppidlist; l; l = l->next) { StringBuffer_append(res->outputbuffer, "<tr class='rule'><td>PPID</td><td>"); Util_printRule(res->outputbuffer, l->action, "If changed"); StringBuffer_append(res->outputbuffer, "</td></tr>"); } } static void print_service_rules_program(HttpResponse res, Service_T s) { if (s->type == Service_Program) { StringBuffer_append(res->outputbuffer, "<tr class='rule'><td>Program timeout</td><td>Terminate the program if not finished within %s</td></tr>", Fmt_time2str(s->program->timeout, (char[11]){})); for (Status_T status = s->statuslist; status; status = status->next) { StringBuffer_append(res->outputbuffer, "<tr class='rule'><td>Test Exit value</td><td>"); if (status->operator == Operator_Changed) Util_printRule(res->outputbuffer, status->action, "If exit value changed"); else Util_printRule(res->outputbuffer, status->action, "If exit value %s %d", operatorshortnames[status->operator], status->return_value); StringBuffer_append(res->outputbuffer, "</td></tr>"); } } } static void print_service_rules_resource(HttpResponse res, Service_T s) { char buf[STRLEN]; for (Resource_T q = s->resourcelist; q; q = q->next) { StringBuffer_append(res->outputbuffer, "<tr class='rule'><td>"); switch (q->resource_id) { case Resource_CpuPercent: StringBuffer_append(res->outputbuffer, "CPU usage limit"); break; case Resource_CpuPercentTotal: StringBuffer_append(res->outputbuffer, "CPU usage limit (incl. children)"); break; case Resource_CpuUser: StringBuffer_append(res->outputbuffer, "CPU user limit"); break; case Resource_CpuSystem: StringBuffer_append(res->outputbuffer, "CPU system limit"); break; case Resource_CpuWait: StringBuffer_append(res->outputbuffer, "CPU wait limit"); break; case Resource_MemoryPercent: StringBuffer_append(res->outputbuffer, "Memory usage limit"); break; case Resource_MemoryKbyte: StringBuffer_append(res->outputbuffer, "Memory amount limit"); break; case Resource_SwapPercent: StringBuffer_append(res->outputbuffer, "Swap usage limit"); break; case Resource_SwapKbyte: StringBuffer_append(res->outputbuffer, "Swap amount limit"); break; case Resource_LoadAverage1m: StringBuffer_append(res->outputbuffer, "Load average (1min)"); break; case Resource_LoadAverage5m: StringBuffer_append(res->outputbuffer, "Load average (5min)"); break; case Resource_LoadAverage15m: StringBuffer_append(res->outputbuffer, "Load average (15min)"); break; case Resource_Threads: StringBuffer_append(res->outputbuffer, "Threads"); break; case Resource_Children: StringBuffer_append(res->outputbuffer, "Children"); break; case Resource_MemoryKbyteTotal: StringBuffer_append(res->outputbuffer, "Memory amount limit (incl. children)"); break; case Resource_MemoryPercentTotal: StringBuffer_append(res->outputbuffer, "Memory usage limit (incl. children)"); break; case Resource_ReadBytes: StringBuffer_append(res->outputbuffer, "Disk read limit"); break; case Resource_ReadOperations: StringBuffer_append(res->outputbuffer, "Disk read limit"); break; case Resource_WriteBytes: StringBuffer_append(res->outputbuffer, "Disk write limit"); break; case Resource_WriteOperations: StringBuffer_append(res->outputbuffer, "Disk write limit"); break; default: break; } StringBuffer_append(res->outputbuffer, "</td><td>"); switch (q->resource_id) { case Resource_CpuPercent: case Resource_CpuPercentTotal: case Resource_MemoryPercentTotal: case Resource_CpuUser: case Resource_CpuSystem: case Resource_CpuWait: case Resource_MemoryPercent: case Resource_SwapPercent: Util_printRule(res->outputbuffer, q->action, "If %s %.1f%%", operatornames[q->operator], q->limit); break; case Resource_MemoryKbyte: case Resource_SwapKbyte: case Resource_MemoryKbyteTotal: Util_printRule(res->outputbuffer, q->action, "If %s %s", operatornames[q->operator], Fmt_bytes2str(q->limit, buf)); break; case Resource_LoadAverage1m: case Resource_LoadAverage5m: case Resource_LoadAverage15m: Util_printRule(res->outputbuffer, q->action, "If %s %.1f", operatornames[q->operator], q->limit); break; case Resource_Threads: case Resource_Children: Util_printRule(res->outputbuffer, q->action, "If %s %.0f", operatornames[q->operator], q->limit); break; case Resource_ReadBytes: case Resource_WriteBytes: Util_printRule(res->outputbuffer, q->action, "if %s %s", operatornames[q->operator], Fmt_bytes2str(q->limit, (char[10]){})); break; case Resource_ReadOperations: case Resource_WriteOperations: Util_printRule(res->outputbuffer, q->action, "if %s %.0f operations/s", operatornames[q->operator], q->limit); break; default: break; } StringBuffer_append(res->outputbuffer, "</td></tr>"); } } static boolean_t is_readonly(HttpRequest req) { if (req->remote_user) { Auth_T user_creds = Util_getUserCredentials(req->remote_user); return (user_creds ? user_creds->is_readonly : true); } return false; } /* ----------------------------------------------------------- Status output */ /* Print status in the given format. Text status is default. */ static void print_status(HttpRequest req, HttpResponse res, int version) { const char *stringFormat = get_parameter(req, "format"); if (stringFormat && Str_startsWith(stringFormat, "xml")) { char buf[STRLEN]; StringBuffer_T sb = StringBuffer_create(256); status_xml(sb, NULL, version, Socket_getLocalHost(req->S, buf, sizeof(buf))); StringBuffer_append(res->outputbuffer, "%s", StringBuffer_toString(sb)); StringBuffer_free(&sb); set_content_type(res, "text/xml"); } else { set_content_type(res, "text/plain"); StringBuffer_append(res->outputbuffer, "Monit %s uptime: %s\n\n", VERSION, _getUptime(ProcessTree_getProcessUptime(getpid()), (char[256]){})); int found = 0; const char *stringGroup = Util_urlDecode((char *)get_parameter(req, "group")); const char *stringService = Util_urlDecode((char *)get_parameter(req, "service")); if (stringGroup) { for (ServiceGroup_T sg = servicegrouplist; sg; sg = sg->next) { if (IS(stringGroup, sg->name)) { for (list_t m = sg->members->head; m; m = m->next) { status_service_txt(m->e, res); found++; } break; } } } else { for (Service_T s = servicelist_conf; s; s = s->next_conf) { if (! stringService || IS(stringService, s->name)) { status_service_txt(s, res); found++; } } } if (found == 0) { if (stringGroup) send_error(req, res, SC_BAD_REQUEST, "Service group '%s' not found", stringGroup); else if (stringService) send_error(req, res, SC_BAD_REQUEST, "Service '%s' not found", stringService); else send_error(req, res, SC_BAD_REQUEST, "No service found"); } } } static void _printServiceSummary(Box_T t, Service_T s) { Box_setColumn(t, 1, "%s", s->name); Box_setColumn(t, 2, "%s", get_service_status(TXT, s, (char[STRLEN]){}, STRLEN)); Box_setColumn(t, 3, "%s", servicetypes[s->type]); Box_printRow(t); } static int _printServiceSummaryByType(Box_T t, Service_Type type) { int found = 0; for (Service_T s = servicelist_conf; s; s = s->next_conf) { if (s->type == type) { _printServiceSummary(t, s); found++; } } return found; } static void print_summary(HttpRequest req, HttpResponse res) { set_content_type(res, "text/plain"); StringBuffer_append(res->outputbuffer, "Monit %s uptime: %s\n", VERSION, _getUptime(ProcessTree_getProcessUptime(getpid()), (char[256]){})); int found = 0; const char *stringGroup = Util_urlDecode((char *)get_parameter(req, "group")); const char *stringService = Util_urlDecode((char *)get_parameter(req, "service")); Box_T t = Box_new(res->outputbuffer, 3, (BoxColumn_T []){ {.name = "Service Name", .width = 31, .wrap = false, .align = BoxAlign_Left}, {.name = "Status", .width = 26, .wrap = false, .align = BoxAlign_Left}, {.name = "Type", .width = 13, .wrap = false, .align = BoxAlign_Left} }, true); if (stringGroup) { for (ServiceGroup_T sg = servicegrouplist; sg; sg = sg->next) { if (IS(stringGroup, sg->name)) { for (list_t m = sg->members->head; m; m = m->next) { _printServiceSummary(t, m->e); found++; } break; } } } else if (stringService) { for (Service_T s = servicelist_conf; s; s = s->next_conf) { if (IS(stringService, s->name)) { _printServiceSummary(t, s); found++; } } } else { found += _printServiceSummaryByType(t, Service_System); found += _printServiceSummaryByType(t, Service_Process); found += _printServiceSummaryByType(t, Service_File); found += _printServiceSummaryByType(t, Service_Fifo); found += _printServiceSummaryByType(t, Service_Directory); found += _printServiceSummaryByType(t, Service_Filesystem); found += _printServiceSummaryByType(t, Service_Host); found += _printServiceSummaryByType(t, Service_Net); found += _printServiceSummaryByType(t, Service_Program); } Box_free(&t); if (found == 0) { if (stringGroup) send_error(req, res, SC_BAD_REQUEST, "Service group '%s' not found", stringGroup); else if (stringService) send_error(req, res, SC_BAD_REQUEST, "Service '%s' not found", stringService); else send_error(req, res, SC_BAD_REQUEST, "No service found"); } } static void _printReport(HttpRequest req, HttpResponse res) { set_content_type(res, "text/plain"); const char *type = get_parameter(req, "type"); int count = 0; if (! type) { float up = 0, down = 0, init = 0, unmonitored = 0, total = 0; for (Service_T s = servicelist; s; s = s->next) { if (s->monitor == Monitor_Not) unmonitored++; else if (s->monitor & Monitor_Init) init++; else if (s->error) down++; else up++; total++; } StringBuffer_append(res->outputbuffer, "up: %*.0f (%.1f%%)\n" "down: %*.0f (%.1f%%)\n" "initialising: %*.0f (%.1f%%)\n" "unmonitored: %*.0f (%.1f%%)\n" "total: %*.0f services\n", 3, up, 100. * up / total, 3, down, 100. * down / total, 3, init, 100. * init / total, 3, unmonitored, 100. * unmonitored / total, 3, total); } else if (Str_isEqual(type, "up")) { for (Service_T s = servicelist; s; s = s->next) if (s->monitor != Monitor_Not && ! (s->monitor & Monitor_Init) && ! s->error) count++; StringBuffer_append(res->outputbuffer, "%d\n", count); } else if (Str_isEqual(type, "down")) { for (Service_T s = servicelist; s; s = s->next) if (s->monitor != Monitor_Not && ! (s->monitor & Monitor_Init) && s->error) count++; StringBuffer_append(res->outputbuffer, "%d\n", count); } else if (Str_startsWith(type, "initiali")) { // allow 'initiali(s|z)ing' for (Service_T s = servicelist; s; s = s->next) if (s->monitor & Monitor_Init) count++; StringBuffer_append(res->outputbuffer, "%d\n", count); } else if (Str_isEqual(type, "unmonitored")) { for (Service_T s = servicelist; s; s = s->next) if (s->monitor == Monitor_Not) count++; StringBuffer_append(res->outputbuffer, "%d\n", count); } else if (Str_isEqual(type, "total")) { for (Service_T s = servicelist; s; s = s->next) count++; StringBuffer_append(res->outputbuffer, "%d\n", count); } else { send_error(req, res, SC_BAD_REQUEST, "Invalid report type: '%s'", type); } } static void status_service_txt(Service_T s, HttpResponse res) { char buf[STRLEN]; StringBuffer_append(res->outputbuffer, COLOR_BOLDCYAN "%s '%s'" COLOR_RESET "\n" " %-28s %s\n", servicetypes[s->type], s->name, "status", get_service_status(TXT, s, buf, sizeof(buf))); StringBuffer_append(res->outputbuffer, " %-28s %s\n", "monitoring status", get_monitoring_status(TXT, s, buf, sizeof(buf))); StringBuffer_append(res->outputbuffer, " %-28s %s\n", "monitoring mode", modenames[s->mode]); StringBuffer_append(res->outputbuffer, " %-28s %s\n", "on reboot", onrebootnames[s->onreboot]); _printStatus(TXT, res, s); StringBuffer_append(res->outputbuffer, "\n"); } static char *get_monitoring_status(Output_Type type, Service_T s, char *buf, int buflen) { ASSERT(s); ASSERT(buf); if (s->monitor == Monitor_Not) { if (type == HTML) snprintf(buf, buflen, "<span class='gray-text'>Not monitored</span>"); else snprintf(buf, buflen, Color_lightYellow("Not monitored")); } else if (s->monitor & Monitor_Waiting) { if (type == HTML) snprintf(buf, buflen, "<span>Waiting</span>"); else snprintf(buf, buflen, Color_white("Waiting")); } else if (s->monitor & Monitor_Init) { if (type == HTML) snprintf(buf, buflen, "<span class='blue-text'>Initializing</span>"); else snprintf(buf, buflen, Color_lightBlue("Initializing")); } else if (s->monitor & Monitor_Yes) { if (type == HTML) snprintf(buf, buflen, "<span>Monitored</span>"); else snprintf(buf, buflen, "Monitored"); } return buf; } static char *get_service_status(Output_Type type, Service_T s, char *buf, int buflen) { ASSERT(s); ASSERT(buf); if (s->monitor == Monitor_Not || s->monitor & Monitor_Init) { get_monitoring_status(type, s, buf, buflen); } else if (s->error == 0) { snprintf(buf, buflen, type == HTML ? "<span class='green-text'>OK</span>" : Color_lightGreen("OK")); } else { // In the case that the service has actualy some failure, the error bitmap will be non zero char *p = buf; EventTable_T *et = Event_Table; while ((*et).id) { if (s->error & (*et).id) { if (p > buf) p += snprintf(p, buflen - (p - buf), " | "); if (s->error_hint & (*et).id) { if (type == HTML) p += snprintf(p, buflen - (p - buf), "<span class='orange-text'>%s</span>", (*et).description_changed); else p += snprintf(p, buflen - (p - buf), Color_lightYellow("%s", (*et).description_changed)); } else { if (type == HTML) p += snprintf(p, buflen - (p - buf), "<span class='red-text'>%s</span>", (*et).description_failed); else p += snprintf(p, buflen - (p - buf), Color_lightRed("%s", (*et).description_failed)); } } et++; } } if (s->doaction) snprintf(buf + strlen(buf), buflen - strlen(buf) - 1, " - %s pending", actionnames[s->doaction]); return buf; }
/* * Copyright (C) Tildeslash Ltd. All rights reserved. * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License version 3. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * * In addition, as a special exception, the copyright holders give * permission to link the code of portions of this program with the * OpenSSL library under certain conditions as described in each * individual source file, and distribute linked combinations * including the two. * * You must obey the GNU Affero General Public License in all respects * for all of the code used other than OpenSSL. */ #include "config.h" #ifdef HAVE_STDIO_H #include <stdio.h> #endif #ifdef HAVE_STDLIB_H #include <stdlib.h> #endif #ifdef HAVE_ERRNO_H #include <errno.h> #endif #ifdef HAVE_SYS_TYPES_H #include <sys/types.h> #endif #ifdef HAVE_SYS_SOCKET_H #include <sys/socket.h> #endif #ifdef HAVE_STRING_H #include <string.h> #endif #ifdef HAVE_STRINGS_H #include <strings.h> #endif #ifdef HAVE_UNISTD_H #include <unistd.h> #endif #ifdef HAVE_SYS_STAT_H #include <sys/stat.h> #endif #ifdef HAVE_SYS_TIME_H #include <sys/time.h> #endif #ifdef HAVE_CTYPE_H #include <ctype.h> #endif // libmonit #include "system/Time.h" #include "util/Fmt.h" #include "util/List.h" #include "monit.h" #include "cervlet.h" #include "engine.h" #include "processor.h" #include "base64.h" #include "event.h" #include "alert.h" #include "ProcessTree.h" #include "device.h" #include "protocol.h" #include "Color.h" #include "Box.h" #define ACTION(c) ! strncasecmp(req->url, c, sizeof(c)) /* URL Commands supported */ #define HOME "/" #define TEST "/_monit" #define ABOUT "/_about" #define PING "/_ping" #define GETID "/_getid" #define STATUS "/_status" #define STATUS2 "/_status2" #define SUMMARY "/_summary" #define REPORT "/_report" #define RUNTIME "/_runtime" #define VIEWLOG "/_viewlog" #define DOACTION "/_doaction" #define FAVICON "/favicon.ico" typedef enum { TXT = 0, HTML } __attribute__((__packed__)) Output_Type; /* Private prototypes */ static boolean_t is_readonly(HttpRequest); static void printFavicon(HttpResponse); static void doGet(HttpRequest, HttpResponse); static void doPost(HttpRequest, HttpResponse); static void do_head(HttpResponse res, const char *path, const char *name, int refresh); static void do_foot(HttpResponse res); static void do_home(HttpResponse); static void do_home_system(HttpResponse); static void do_home_filesystem(HttpResponse); static void do_home_directory(HttpResponse); static void do_home_file(HttpResponse); static void do_home_fifo(HttpResponse); static void do_home_net(HttpResponse); static void do_home_process(HttpResponse); static void do_home_program(HttpResponse); static void do_home_host(HttpResponse); static void do_about(HttpResponse); static void do_ping(HttpResponse); static void do_getid(HttpResponse); static void do_runtime(HttpRequest, HttpResponse); static void do_viewlog(HttpRequest, HttpResponse); static void handle_service(HttpRequest, HttpResponse); static void handle_service_action(HttpRequest, HttpResponse); static void handle_doaction(HttpRequest, HttpResponse); static void handle_runtime(HttpRequest, HttpResponse); static void handle_runtime_action(HttpRequest, HttpResponse); static void is_monit_running(HttpResponse); static void do_service(HttpRequest, HttpResponse, Service_T); static void print_alerts(HttpResponse, Mail_T); static void print_buttons(HttpRequest, HttpResponse, Service_T); static void print_service_rules_timeout(HttpResponse, Service_T); static void print_service_rules_nonexistence(HttpResponse, Service_T); static void print_service_rules_existence(HttpResponse, Service_T); static void print_service_rules_port(HttpResponse, Service_T); static void print_service_rules_socket(HttpResponse, Service_T); static void print_service_rules_icmp(HttpResponse, Service_T); static void print_service_rules_perm(HttpResponse, Service_T); static void print_service_rules_uid(HttpResponse, Service_T); static void print_service_rules_euid(HttpResponse, Service_T); static void print_service_rules_gid(HttpResponse, Service_T); static void print_service_rules_timestamp(HttpResponse, Service_T); static void print_service_rules_fsflags(HttpResponse, Service_T); static void print_service_rules_filesystem(HttpResponse, Service_T); static void print_service_rules_size(HttpResponse, Service_T); static void print_service_rules_linkstatus(HttpResponse, Service_T); static void print_service_rules_linkspeed(HttpResponse, Service_T); static void print_service_rules_linksaturation(HttpResponse, Service_T); static void print_service_rules_uploadbytes(HttpResponse, Service_T); static void print_service_rules_uploadpackets(HttpResponse, Service_T); static void print_service_rules_downloadbytes(HttpResponse, Service_T); static void print_service_rules_downloadpackets(HttpResponse, Service_T); static void print_service_rules_uptime(HttpResponse, Service_T); static void print_service_rules_content(HttpResponse, Service_T); static void print_service_rules_checksum(HttpResponse, Service_T); static void print_service_rules_pid(HttpResponse, Service_T); static void print_service_rules_ppid(HttpResponse, Service_T); static void print_service_rules_program(HttpResponse, Service_T); static void print_service_rules_resource(HttpResponse, Service_T); static void print_service_rules_secattr(HttpResponse, Service_T); static void print_status(HttpRequest, HttpResponse, int); static void print_summary(HttpRequest, HttpResponse); static void _printReport(HttpRequest req, HttpResponse res); static void status_service_txt(Service_T, HttpResponse); static char *get_monitoring_status(Output_Type, Service_T s, char *, int); static char *get_service_status(Output_Type, Service_T, char *, int); /** * Implementation of doGet and doPost routines used by the cervlet * processor module. This particilary cervlet will provide * information about the monit deamon and programs monitored by * monit. * * @file */ /* ------------------------------------------------------------------ Public */ /** * Callback hook to the Processor module for registering this modules * doGet and doPost methods. */ void init_service() { add_Impl(doGet, doPost); } /* ----------------------------------------------------------------- Private */ static char *_getUptime(time_t delta, char s[256]) { static int min = 60; static int hour = 3600; static int day = 86400; long rest_d; long rest_h; long rest_m; char *p = s; if (delta < 0) { *s = 0; } else { if ((rest_d = delta / day) > 0) { p += snprintf(p, 256 - (p - s), "%ldd ", rest_d); delta -= rest_d * day; } if ((rest_h = delta / hour) > 0 || (rest_d > 0)) { p += snprintf(p, 256 - (p - s), "%ldh ", rest_h); delta -= rest_h * hour; } rest_m = delta / min; snprintf(p, 256 - (p - s), "%ldm", rest_m); } return s; } static void _formatStatus(const char *name, Event_Type errorType, Output_Type type, HttpResponse res, Service_T s, boolean_t validValue, const char *value, ...) { if (type == HTML) { StringBuffer_append(res->outputbuffer, "<tr><td>%c%s</td>", toupper(name[0]), name + 1); } else { StringBuffer_append(res->outputbuffer, " %-28s ", name); } if (! validValue) { StringBuffer_append(res->outputbuffer, type == HTML ? "<td class='gray-text'>-</td>" : COLOR_DARKGRAY "-" COLOR_RESET); } else { va_list ap; va_start(ap, value); char *_value = Str_vcat(value, ap); va_end(ap); if (errorType != Event_Null && s->error & errorType) StringBuffer_append(res->outputbuffer, type == HTML ? "<td class='red-text'>" : COLOR_LIGHTRED); else StringBuffer_append(res->outputbuffer, type == HTML ? "<td>" : COLOR_DEFAULT); if (type == HTML) { // If the output contains multiple line, wrap use <pre>, otherwise keep as is boolean_t multiline = strrchr(_value, '\n') ? true : false; if (multiline) StringBuffer_append(res->outputbuffer, "<pre>"); escapeHTML(res->outputbuffer, _value); StringBuffer_append(res->outputbuffer, "%s</td>", multiline ? "</pre>" : ""); } else { int column = 0; for (int i = 0; _value[i]; i++) { if (_value[i] == '\r') { // Discard CR continue; } else if (_value[i] == '\n') { // Indent 2nd+ line if (_value[i + 1]) StringBuffer_append(res->outputbuffer, "\n "); column = 0; continue; } else if (column <= 200) { StringBuffer_append(res->outputbuffer, "%c", _value[i]); column++; } } StringBuffer_append(res->outputbuffer, COLOR_RESET); } FREE(_value); } StringBuffer_append(res->outputbuffer, type == HTML ? "</tr>" : "\n"); } static void _printIOStatistics(Output_Type type, HttpResponse res, Service_T s, IOStatistics_T io, const char *header, const char *name) { boolean_t hasOps = Statistics_initialized(&(io->operations)); boolean_t hasBytes = Statistics_initialized(&(io->bytes)); if (hasOps && hasBytes) { double deltaBytesPerSec = Statistics_deltaNormalize(&(io->bytes)); double deltaOpsPerSec = Statistics_deltaNormalize(&(io->operations)); _formatStatus(header, Event_Resource, type, res, s, true, "%s/s [%s total], %.1f %ss/s [%"PRIu64" %ss total]", Fmt_bytes2str(deltaBytesPerSec, (char[10]){}), Fmt_bytes2str(Statistics_raw(&(io->bytes)), (char[10]){}), deltaOpsPerSec, name, Statistics_raw(&(io->operations)), name); } else if (hasOps) { double deltaOpsPerSec = Statistics_deltaNormalize(&(io->operations)); _formatStatus(header, Event_Resource, type, res, s, true, "%.1f %ss/s [%"PRIu64" %ss total]", deltaOpsPerSec, name, Statistics_raw(&(io->operations)), name); } else if (hasBytes) { double deltaBytesPerSec = Statistics_deltaNormalize(&(io->bytes)); _formatStatus(header, Event_Resource, type, res, s, true, "%s/s [%s total]", Fmt_bytes2str(deltaBytesPerSec, (char[10]){}), Fmt_bytes2str(Statistics_raw(&(io->bytes)), (char[10]){})); } } static void _printStatus(Output_Type type, HttpResponse res, Service_T s) { if (Util_hasServiceStatus(s)) { switch (s->type) { case Service_System: _formatStatus("load average", Event_Resource, type, res, s, true, "[%.2f] [%.2f] [%.2f]", systeminfo.loadavg[0], systeminfo.loadavg[1], systeminfo.loadavg[2]); _formatStatus("cpu", Event_Resource, type, res, s, true, "%.1f%%us %.1f%%sy" #ifdef HAVE_CPU_WAIT " %.1f%%wa" #endif , systeminfo.cpu.usage.user > 0. ? systeminfo.cpu.usage.user : 0., systeminfo.cpu.usage.system > 0. ? systeminfo.cpu.usage.system : 0. #ifdef HAVE_CPU_WAIT , systeminfo.cpu.usage.wait > 0. ? systeminfo.cpu.usage.wait : 0. #endif ); _formatStatus("memory usage", Event_Resource, type, res, s, true, "%s [%.1f%%]", Fmt_bytes2str(systeminfo.memory.usage.bytes, (char[10]){}), systeminfo.memory.usage.percent); _formatStatus("swap usage", Event_Resource, type, res, s, true, "%s [%.1f%%]", Fmt_bytes2str(systeminfo.swap.usage.bytes, (char[10]){}), systeminfo.swap.usage.percent); _formatStatus("uptime", Event_Uptime, type, res, s, systeminfo.booted > 0, "%s", _getUptime(Time_now() - systeminfo.booted, (char[256]){})); _formatStatus("boot time", Event_Null, type, res, s, true, "%s", Time_string(systeminfo.booted, (char[32]){})); break; case Service_File: _formatStatus("permission", Event_Permission, type, res, s, s->inf.file->mode >= 0, "%o", s->inf.file->mode & 07777); _formatStatus("uid", Event_Uid, type, res, s, s->inf.file->uid >= 0, "%d", s->inf.file->uid); _formatStatus("gid", Event_Gid, type, res, s, s->inf.file->gid >= 0, "%d", s->inf.file->gid); _formatStatus("size", Event_Size, type, res, s, s->inf.file->size >= 0, "%s", Fmt_bytes2str(s->inf.file->size, (char[10]){})); _formatStatus("access timestamp", Event_Timestamp, type, res, s, s->inf.file->timestamp.access > 0, "%s", Time_string(s->inf.file->timestamp.access, (char[32]){})); _formatStatus("change timestamp", Event_Timestamp, type, res, s, s->inf.file->timestamp.change > 0, "%s", Time_string(s->inf.file->timestamp.change, (char[32]){})); _formatStatus("modify timestamp", Event_Timestamp, type, res, s, s->inf.file->timestamp.modify > 0, "%s", Time_string(s->inf.file->timestamp.modify, (char[32]){})); if (s->matchlist) _formatStatus("content match", Event_Content, type, res, s, true, "%s", (s->error & Event_Content) ? "yes" : "no"); if (s->checksum) _formatStatus("checksum", Event_Checksum, type, res, s, *s->inf.file->cs_sum, "%s (%s)", s->inf.file->cs_sum, checksumnames[s->checksum->type]); break; case Service_Directory: _formatStatus("permission", Event_Permission, type, res, s, s->inf.directory->mode >= 0, "%o", s->inf.directory->mode & 07777); _formatStatus("uid", Event_Uid, type, res, s, s->inf.directory->uid >= 0, "%d", s->inf.directory->uid); _formatStatus("gid", Event_Gid, type, res, s, s->inf.directory->gid >= 0, "%d", s->inf.directory->gid); _formatStatus("access timestamp", Event_Timestamp, type, res, s, s->inf.directory->timestamp.access > 0, "%s", Time_string(s->inf.directory->timestamp.access, (char[32]){})); _formatStatus("change timestamp", Event_Timestamp, type, res, s, s->inf.directory->timestamp.change > 0, "%s", Time_string(s->inf.directory->timestamp.change, (char[32]){})); _formatStatus("modify timestamp", Event_Timestamp, type, res, s, s->inf.directory->timestamp.modify > 0, "%s", Time_string(s->inf.directory->timestamp.modify, (char[32]){})); break; case Service_Fifo: _formatStatus("permission", Event_Permission, type, res, s, s->inf.fifo->mode >= 0, "%o", s->inf.fifo->mode & 07777); _formatStatus("uid", Event_Uid, type, res, s, s->inf.fifo->uid >= 0, "%d", s->inf.fifo->uid); _formatStatus("gid", Event_Gid, type, res, s, s->inf.fifo->gid >= 0, "%d", s->inf.fifo->gid); _formatStatus("access timestamp", Event_Timestamp, type, res, s, s->inf.fifo->timestamp.access > 0, "%s", Time_string(s->inf.fifo->timestamp.access, (char[32]){})); _formatStatus("change timestamp", Event_Timestamp, type, res, s, s->inf.fifo->timestamp.change > 0, "%s", Time_string(s->inf.fifo->timestamp.change, (char[32]){})); _formatStatus("modify timestamp", Event_Timestamp, type, res, s, s->inf.fifo->timestamp.modify > 0, "%s", Time_string(s->inf.fifo->timestamp.modify, (char[32]){})); break; case Service_Net: { long long speed = Link_getSpeed(s->inf.net->stats); long long ibytes = Link_getBytesInPerSecond(s->inf.net->stats); long long obytes = Link_getBytesOutPerSecond(s->inf.net->stats); _formatStatus("link", Event_Link, type, res, s, Link_getState(s->inf.net->stats) == 1, "%d errors", Link_getErrorsInPerSecond(s->inf.net->stats) + Link_getErrorsOutPerSecond(s->inf.net->stats)); if (speed > 0) { _formatStatus("capacity", Event_Speed, type, res, s, Link_getState(s->inf.net->stats) == 1, "%.0lf Mb/s %s-duplex", (double)speed / 1000000., Link_getDuplex(s->inf.net->stats) == 1 ? "full" : "half"); _formatStatus("download bytes", Event_ByteIn, type, res, s, Link_getState(s->inf.net->stats) == 1, "%s/s (%.1f%% link saturation)", Fmt_bytes2str(ibytes, (char[10]){}), 100. * ibytes * 8 / (double)speed); _formatStatus("upload bytes", Event_ByteOut, type, res, s, Link_getState(s->inf.net->stats) == 1, "%s/s (%.1f%% link saturation)", Fmt_bytes2str(obytes, (char[10]){}), 100. * obytes * 8 / (double)speed); } else { _formatStatus("download bytes", Event_ByteIn, type, res, s, Link_getState(s->inf.net->stats) == 1, "%s/s", Fmt_bytes2str(ibytes, (char[10]){})); _formatStatus("upload bytes", Event_ByteOut, type, res, s, Link_getState(s->inf.net->stats) == 1, "%s/s", Fmt_bytes2str(obytes, (char[10]){})); } _formatStatus("download packets", Event_PacketIn, type, res, s, Link_getState(s->inf.net->stats) == 1, "%lld per second", Link_getPacketsInPerSecond(s->inf.net->stats)); _formatStatus("upload packets", Event_PacketOut, type, res, s, Link_getState(s->inf.net->stats) == 1, "%lld per second", Link_getPacketsOutPerSecond(s->inf.net->stats)); } break; case Service_Filesystem: _formatStatus("filesystem type", Event_Null, type, res, s, *(s->inf.filesystem->object.type), "%s", s->inf.filesystem->object.type); _formatStatus("filesystem flags", Event_FsFlag, type, res, s, *(s->inf.filesystem->flags), "%s", s->inf.filesystem->flags); _formatStatus("permission", Event_Permission, type, res, s, s->inf.filesystem->mode >= 0, "%o", s->inf.filesystem->mode & 07777); _formatStatus("uid", Event_Uid, type, res, s, s->inf.filesystem->uid >= 0, "%d", s->inf.filesystem->uid); _formatStatus("gid", Event_Gid, type, res, s, s->inf.filesystem->gid >= 0, "%d", s->inf.filesystem->gid); _formatStatus("block size", Event_Null, type, res, s, true, "%s", Fmt_bytes2str(s->inf.filesystem->f_bsize, (char[10]){})); _formatStatus("space total", Event_Null, type, res, s, true, "%s (of which %.1f%% is reserved for root user)", s->inf.filesystem->f_bsize > 0 ? Fmt_bytes2str(s->inf.filesystem->f_blocks * s->inf.filesystem->f_bsize, (char[10]){}) : "0 MB", s->inf.filesystem->f_blocks > 0 ? ((float)100 * (float)(s->inf.filesystem->f_blocksfreetotal - s->inf.filesystem->f_blocksfree) / (float)s->inf.filesystem->f_blocks) : 0); _formatStatus("space free for non superuser", Event_Null, type, res, s, true, "%s [%.1f%%]", s->inf.filesystem->f_bsize > 0 ? Fmt_bytes2str(s->inf.filesystem->f_blocksfree * s->inf.filesystem->f_bsize, (char[10]){}) : "0 MB", s->inf.filesystem->f_blocks > 0 ? ((float)100 * (float)s->inf.filesystem->f_blocksfree / (float)s->inf.filesystem->f_blocks) : 0); _formatStatus("space free total", Event_Resource, type, res, s, true, "%s [%.1f%%]", s->inf.filesystem->f_bsize > 0 ? Fmt_bytes2str(s->inf.filesystem->f_blocksfreetotal * s->inf.filesystem->f_bsize, (char[10]){}) : "0 MB", s->inf.filesystem->f_blocks > 0 ? ((float)100 * (float)s->inf.filesystem->f_blocksfreetotal / (float)s->inf.filesystem->f_blocks) : 0); if (s->inf.filesystem->f_files > 0) { _formatStatus("inodes total", Event_Null, type, res, s, true, "%lld", s->inf.filesystem->f_files); _formatStatus("inodes free", Event_Resource, type, res, s, true, "%lld [%.1f%%]", s->inf.filesystem->f_filesfree, (float)100 * (float)s->inf.filesystem->f_filesfree / (float)s->inf.filesystem->f_files); } _printIOStatistics(type, res, s, &(s->inf.filesystem->read), "read", "read"); _printIOStatistics(type, res, s, &(s->inf.filesystem->write), "write", "write"); boolean_t hasReadTime = Statistics_initialized(&(s->inf.filesystem->time.read)); boolean_t hasWriteTime = Statistics_initialized(&(s->inf.filesystem->time.write)); boolean_t hasWaitTime = Statistics_initialized(&(s->inf.filesystem->time.wait)); boolean_t hasRunTime = Statistics_initialized(&(s->inf.filesystem->time.run)); double deltaOperations = Statistics_delta(&(s->inf.filesystem->read.operations)) + Statistics_delta(&(s->inf.filesystem->write.operations)); if (hasReadTime && hasWriteTime) { double readTime = deltaOperations > 0. ? Statistics_deltaNormalize(&(s->inf.filesystem->time.read)) / deltaOperations : 0.; double writeTime = deltaOperations > 0. ? Statistics_deltaNormalize(&(s->inf.filesystem->time.write)) / deltaOperations : 0.; _formatStatus("service time", Event_Null, type, res, s, true, "%.3fms/operation (of which read %.3fms, write %.3fms)", readTime + writeTime, readTime, writeTime); } else if (hasWaitTime && hasRunTime) { double waitTime = deltaOperations > 0. ? Statistics_deltaNormalize(&(s->inf.filesystem->time.wait)) / deltaOperations : 0.; double runTime = deltaOperations > 0. ? Statistics_deltaNormalize(&(s->inf.filesystem->time.run)) / deltaOperations : 0.; _formatStatus("service time", Event_Null, type, res, s, true, "%.3fms/operation (of which queue %.3fms, active %.3fms)", waitTime + runTime, waitTime, runTime); } else if (hasWaitTime) { double waitTime = deltaOperations > 0. ? Statistics_deltaNormalize(&(s->inf.filesystem->time.wait)) / deltaOperations : 0.; _formatStatus("service time", Event_Null, type, res, s, true, "%.3fms/operation", waitTime); } else if (hasRunTime) { double runTime = deltaOperations > 0. ? Statistics_deltaNormalize(&(s->inf.filesystem->time.run)) / deltaOperations : 0.; _formatStatus("service time", Event_Null, type, res, s, true, "%.3fms/operation", runTime); } break; case Service_Process: _formatStatus("pid", Event_Pid, type, res, s, s->inf.process->pid >= 0, "%d", s->inf.process->pid); _formatStatus("parent pid", Event_PPid, type, res, s, s->inf.process->ppid >= 0, "%d", s->inf.process->ppid); _formatStatus("uid", Event_Uid, type, res, s, s->inf.process->uid >= 0, "%d", s->inf.process->uid); _formatStatus("effective uid", Event_Uid, type, res, s, s->inf.process->euid >= 0, "%d", s->inf.process->euid); _formatStatus("gid", Event_Gid, type, res, s, s->inf.process->gid >= 0, "%d", s->inf.process->gid); _formatStatus("uptime", Event_Uptime, type, res, s, s->inf.process->uptime >= 0, "%s", _getUptime(s->inf.process->uptime, (char[256]){})); if (Run.flags & Run_ProcessEngineEnabled) { _formatStatus("threads", Event_Resource, type, res, s, s->inf.process->threads >= 0, "%d", s->inf.process->threads); _formatStatus("children", Event_Resource, type, res, s, s->inf.process->children >= 0, "%d", s->inf.process->children); _formatStatus("cpu", Event_Resource, type, res, s, s->inf.process->cpu_percent >= 0, "%.1f%%", s->inf.process->cpu_percent); _formatStatus("cpu total", Event_Resource, type, res, s, s->inf.process->total_cpu_percent >= 0, "%.1f%%", s->inf.process->total_cpu_percent); _formatStatus("memory", Event_Resource, type, res, s, s->inf.process->mem_percent >= 0, "%.1f%% [%s]", s->inf.process->mem_percent, Fmt_bytes2str(s->inf.process->mem, (char[10]){})); _formatStatus("memory total", Event_Resource, type, res, s, s->inf.process->total_mem_percent >= 0, "%.1f%% [%s]", s->inf.process->total_mem_percent, Fmt_bytes2str(s->inf.process->total_mem, (char[10]){})); #ifdef LINUX _formatStatus("security attribute", Event_Invalid, type, res, s, *(s->inf.process->secattr), "%s", s->inf.process->secattr); #endif } _printIOStatistics(type, res, s, &(s->inf.process->read), "disk read", "read"); _printIOStatistics(type, res, s, &(s->inf.process->write), "disk write", "write"); break; case Service_Program: if (s->program->started) { _formatStatus("last exit value", Event_Status, type, res, s, true, "%d", s->program->exitStatus); _formatStatus("last output", Event_Status, type, res, s, StringBuffer_length(s->program->lastOutput), "%s", StringBuffer_toString(s->program->lastOutput)); } break; default: break; } for (Icmp_T i = s->icmplist; i; i = i->next) { if (i->is_available == Connection_Failed) _formatStatus("ping response time", Event_Icmp, type, res, s, true, "connection failed"); else _formatStatus("ping response time", Event_Null, type, res, s, i->is_available != Connection_Init && i->response >= 0., "%s", Fmt_time2str(i->response, (char[11]){})); } for (Port_T p = s->portlist; p; p = p->next) { if (p->is_available == Connection_Failed) { _formatStatus("port response time", Event_Connection, type, res, s, true, "FAILED to [%s]:%d%s type %s/%s %sprotocol %s", p->hostname, p->target.net.port, Util_portRequestDescription(p), Util_portTypeDescription(p), Util_portIpDescription(p), p->target.net.ssl.options.flags ? "using TLS " : "", p->protocol->name); } else { char buf[STRLEN] = {}; if (p->target.net.ssl.options.flags) snprintf(buf, sizeof(buf), "using TLS (certificate valid for %d days) ", p->target.net.ssl.certificate.validDays); _formatStatus("port response time", p->target.net.ssl.certificate.validDays < p->target.net.ssl.certificate.minimumDays ? Event_Timestamp : Event_Null, type, res, s, p->is_available != Connection_Init, "%s to %s:%d%s type %s/%s %sprotocol %s", Fmt_time2str(p->response, (char[11]){}), p->hostname, p->target.net.port, Util_portRequestDescription(p), Util_portTypeDescription(p), Util_portIpDescription(p), buf, p->protocol->name); } } for (Port_T p = s->socketlist; p; p = p->next) { if (p->is_available == Connection_Failed) { _formatStatus("unix socket response time", Event_Connection, type, res, s, true, "FAILED to %s type %s protocol %s", p->target.unix.pathname, Util_portTypeDescription(p), p->protocol->name); } else { _formatStatus("unix socket response time", Event_Null, type, res, s, p->is_available != Connection_Init, "%s to %s type %s protocol %s", Fmt_time2str(p->response, (char[11]){}), p->target.unix.pathname, Util_portTypeDescription(p), p->protocol->name); } } } _formatStatus("data collected", Event_Null, type, res, s, true, "%s", Time_string(s->collected.tv_sec, (char[32]){})); } /** * Called by the Processor (via the service method) * to handle a POST request. */ static void doPost(HttpRequest req, HttpResponse res) { set_content_type(res, "text/html"); if (ACTION(RUNTIME)) handle_runtime_action(req, res); else if (ACTION(VIEWLOG)) do_viewlog(req, res); else if (ACTION(STATUS)) print_status(req, res, 1); else if (ACTION(STATUS2)) print_status(req, res, 2); else if (ACTION(SUMMARY)) print_summary(req, res); else if (ACTION(REPORT)) _printReport(req, res); else if (ACTION(DOACTION)) handle_doaction(req, res); else handle_service_action(req, res); } /** * Called by the Processor (via the service method) * to handle a GET request. */ static void doGet(HttpRequest req, HttpResponse res) { set_content_type(res, "text/html"); if (ACTION(HOME)) { LOCK(Run.mutex) do_home(res); END_LOCK; } else if (ACTION(RUNTIME)) { handle_runtime(req, res); } else if (ACTION(TEST)) { is_monit_running(res); } else if (ACTION(ABOUT)) { do_about(res); } else if (ACTION(FAVICON)) { printFavicon(res); } else if (ACTION(PING)) { do_ping(res); } else if (ACTION(GETID)) { do_getid(res); } else if (ACTION(STATUS)) { print_status(req, res, 1); } else if (ACTION(STATUS2)) { print_status(req, res, 2); } else if (ACTION(SUMMARY)) { print_summary(req, res); } else if (ACTION(REPORT)) { _printReport(req, res); } else { handle_service(req, res); } } /* ----------------------------------------------------------------- Helpers */ static void is_monit_running(HttpResponse res) { set_status(res, exist_daemon() ? SC_OK : SC_GONE); } static void printFavicon(HttpResponse res) { static size_t l; Socket_T S = res->S; static unsigned char *favicon = NULL; if (! favicon) { favicon = CALLOC(sizeof(unsigned char), strlen(FAVICON_ICO)); l = decode_base64(favicon, FAVICON_ICO); } if (l) { res->is_committed = true; Socket_print(S, "HTTP/1.0 200 OK\r\n"); Socket_print(S, "Content-length: %lu\r\n", (unsigned long)l); Socket_print(S, "Content-Type: image/x-icon\r\n"); Socket_print(S, "Connection: close\r\n\r\n"); if (Socket_write(S, favicon, l) < 0) { LogError("Error sending favicon data -- %s\n", STRERROR); } } } static void do_head(HttpResponse res, const char *path, const char *name, int refresh) { StringBuffer_append(res->outputbuffer, "<!DOCTYPE html>"\ "<html>"\ "<head>"\ "<title>Monit: %s</title> "\ "<style type=\"text/css\"> "\ " html, body {height: 100%%;margin: 0;} "\ " body {background-color: white;font: normal normal normal 16px/20px 'HelveticaNeue', Helvetica, Arial, sans-serif; color:#222;} "\ " h1 {padding:30px 0 10px 0; text-align:center;color:#222;font-size:28px;} "\ " h2 {padding:20px 0 10px 0; text-align:center;color:#555;font-size:22px;} "\ " a:hover {text-decoration: none;} "\ " a {text-decoration: underline;color:#222} "\ " table {border-collapse:collapse; border:0;} "\ " .stripe {background:#EDF5FF} "\ " .rule {background:#ddd} "\ " .red-text {color:#ff0000;} "\ " .green-text {color:#00ff00;} "\ " .gray-text {color:#999999;} "\ " .blue-text {color:#0000ff;} "\ " .yellow-text {color:#ffff00;} "\ " .orange-text {color:#ff8800;} "\ " .short {overflow: hidden; text-overflow: ellipsis; white-space: nowrap; max-width: 350px;}"\ " .column {min-width: 80px;} "\ " .left {text-align:left} "\ " .right {text-align:right} "\ " .center {text-align:center} "\ " #wrap {min-height: 100%%;} "\ " #main {overflow:auto; padding-bottom:50px;} "\ " /*Opera Fix*/body:before {content:\"\";height:100%%;float:left;width:0;margin-top:-32767px;} "\ " #footer {position: relative;margin-top: -50px; height: 50px; clear:both; font-size:11px;color:#777;text-align:center;} "\ " #footer a {color:#333;} #footer a:hover {text-decoration: none;} "\ " #nav {background:#ddd;font:normal normal normal 14px/0px 'HelveticaNeue', Helvetica;} "\ " #nav td {padding:5px 10px;} "\ " #header {margin-bottom:30px;background:#EFF7FF} "\ " #nav, #header {border-bottom:1px solid #ccc;} "\ " #header-row {width:95%%;} "\ " #header-row th {padding:30px 10px 10px 10px;font-size:120%%;} "\ " #header-row td {padding:3px 10px;} "\ " #header-row .first {min-width:200px;width:200px;white-space:nowrap;overflow:hidden;text-overflow:ellipsis;} "\ " #status-table {width:95%%;} "\ " #status-table th {text-align:left;background:#edf5ff;font-weight:normal;} "\ " #status-table th, #status-table td, #status-table tr {border:1px solid #ccc;padding:5px;} "\ " #buttons {font-size:20px; margin:40px 0 20px 0;} "\ " #buttons td {padding-right:50px;} "\ " #buttons input {font-size:18px;padding:5px;} "\ "</style>"\ "<meta HTTP-EQUIV='REFRESH' CONTENT=%d> "\ "<meta HTTP-EQUIV='Expires' Content=0> "\ "<meta HTTP-EQUIV='Pragma' CONTENT='no-cache'> "\ "<meta charset='UTF-8'>" \ "<link rel='shortcut icon' href='favicon.ico'>"\ "</head>"\ "<body><div id='wrap'><div id='main'>" \ "<table id='nav' width='100%%'>"\ " <tr>"\ " <td width='20%%'><a href='.'>Home</a>&nbsp;&gt;&nbsp;<a href='%s'>%s</a></td>"\ " <td width='60%%' style='text-align:center;'>Use <a href='https://mmonit.com/'>M/Monit</a> to manage all your Monit instances</td>"\ " <td width='20%%'><p class='right'><a href='_about'>Monit %s</a></td>"\ " </tr>"\ "</table>"\ "<center>", Run.system->name, refresh, path, name, VERSION); } static void do_foot(HttpResponse res) { StringBuffer_append(res->outputbuffer, "</center></div></div>" "<div id='footer'>" "Copyright &copy; 2001-2018 <a href=\"http://tildeslash.com/\">Tildeslash</a>. All rights reserved. " "<span style='margin-left:5px;'></span>" "<a href=\"http://mmonit.com/monit/\">Monit web site</a> | " "<a href=\"http://mmonit.com/wiki/\">Monit Wiki</a> | " "<a href=\"http://mmonit.com/\">M/Monit</a>" "</div></body></html>"); } static void do_home(HttpResponse res) { do_head(res, "", "", Run.polltime); StringBuffer_append(res->outputbuffer, "<table id='header' width='100%%'>" " <tr>" " <td colspan=2 valign='top' class='left' width='100%%'>" " <h1>Monit Service Manager</h1>" " <p class='center'>Monit is <a href='_runtime'>running</a> on %s and monitoring:</p><br>" " </td>" " </tr>" "</table>", Run.system->name); do_home_system(res); do_home_process(res); do_home_program(res); do_home_filesystem(res); do_home_file(res); do_home_fifo(res); do_home_directory(res); do_home_net(res); do_home_host(res); do_foot(res); } static void do_about(HttpResponse res) { StringBuffer_append(res->outputbuffer, "<html><head><title>about monit</title></head><body bgcolor=white>" "<br><h1><center><a href='http://mmonit.com/monit/'>" "monit " VERSION "</a></center></h1>"); StringBuffer_append(res->outputbuffer, "<ul>" "<li style='padding-bottom:10px;'>Copyright &copy; 2001-2018 <a " "href='http://tildeslash.com/'>Tildeslash Ltd" "</a>. All Rights Reserved.</li></ul>"); StringBuffer_append(res->outputbuffer, "<hr size='1'>"); StringBuffer_append(res->outputbuffer, "<p>This program is free software; you can redistribute it and/or " "modify it under the terms of the GNU Affero General Public License version 3</p>" "<p>This program is distributed in the hope that it will be useful, but " "WITHOUT ANY WARRANTY; without even the implied warranty of " "MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the " "<a href='http://www.gnu.org/licenses/agpl.html'>" "GNU AFFERO GENERAL PUBLIC LICENSE</a> for more details.</p>"); StringBuffer_append(res->outputbuffer, "<center><p style='padding-top:20px;'>[<a href='.'>Back to Monit</a>]</p></body></html>"); } static void do_ping(HttpResponse res) { StringBuffer_append(res->outputbuffer, "pong"); } static void do_getid(HttpResponse res) { StringBuffer_append(res->outputbuffer, "%s", Run.id); } static void do_runtime(HttpRequest req, HttpResponse res) { int pid = exist_daemon(); char buf[STRLEN]; do_head(res, "_runtime", "Runtime", 1000); StringBuffer_append(res->outputbuffer, "<h2>Monit runtime status</h2>"); StringBuffer_append(res->outputbuffer, "<table id='status-table'><tr>" "<th width='40%%'>Parameter</th>" "<th width='60%%'>Value</th></tr>"); StringBuffer_append(res->outputbuffer, "<tr><td>Monit ID</td><td>%s</td></tr>", Run.id); StringBuffer_append(res->outputbuffer, "<tr><td>Host</td><td>%s</td></tr>", Run.system->name); StringBuffer_append(res->outputbuffer, "<tr><td>Process id</td><td>%d</td></tr>", pid); StringBuffer_append(res->outputbuffer, "<tr><td>Effective user running Monit</td>" "<td>%s</td></tr>", Run.Env.user); StringBuffer_append(res->outputbuffer, "<tr><td>Controlfile</td><td>%s</td></tr>", Run.files.control); if (Run.files.log) StringBuffer_append(res->outputbuffer, "<tr><td>Logfile</td><td>%s</td></tr>", Run.files.log); StringBuffer_append(res->outputbuffer, "<tr><td>Pidfile</td><td>%s</td></tr>", Run.files.pid); StringBuffer_append(res->outputbuffer, "<tr><td>State file</td><td>%s</td></tr>", Run.files.state); StringBuffer_append(res->outputbuffer, "<tr><td>Debug</td><td>%s</td></tr>", Run.debug ? "True" : "False"); StringBuffer_append(res->outputbuffer, "<tr><td>Log</td><td>%s</td></tr>", (Run.flags & Run_Log) ? "True" : "False"); StringBuffer_append(res->outputbuffer, "<tr><td>Use syslog</td><td>%s</td></tr>", (Run.flags & Run_UseSyslog) ? "True" : "False"); if (Run.eventlist_dir) { if (Run.eventlist_slots < 0) snprintf(buf, STRLEN, "unlimited"); else snprintf(buf, STRLEN, "%d", Run.eventlist_slots); StringBuffer_append(res->outputbuffer, "<tr><td>Event queue</td>" "<td>base directory %s with %d slots</td></tr>", Run.eventlist_dir, Run.eventlist_slots); } #ifdef HAVE_OPENSSL { const char *options = Ssl_printOptions(&(Run.ssl), (char[STRLEN]){}, STRLEN); if (options && *options) StringBuffer_append(res->outputbuffer, "<tr><td>SSL options</td><td>%s</td></tr>", options); } #endif if (Run.mmonits) { StringBuffer_append(res->outputbuffer, "<tr><td>M/Monit server(s)</td><td>"); for (Mmonit_T c = Run.mmonits; c; c = c->next) { StringBuffer_append(res->outputbuffer, "%s with timeout %s", c->url->url, Fmt_time2str(c->timeout, (char[11]){})); #ifdef HAVE_OPENSSL if (c->ssl.flags) { StringBuffer_append(res->outputbuffer, " using TLS"); const char *options = Ssl_printOptions(&c->ssl, (char[STRLEN]){}, STRLEN); if (options && *options) StringBuffer_append(res->outputbuffer, " with options {%s}", options); if (c->ssl.checksum) StringBuffer_append(res->outputbuffer, " and certificate checksum %s equal to '%s'", checksumnames[c->ssl.checksumType], c->ssl.checksum); } #endif if (Run.flags & Run_MmonitCredentials && c->url->user) StringBuffer_append(res->outputbuffer, " with credentials"); if (c->next) StringBuffer_append(res->outputbuffer, "</td></tr><tr><td>&nbsp;</td><td>"); } StringBuffer_append(res->outputbuffer, "</td></tr>"); } if (Run.mailservers) { StringBuffer_append(res->outputbuffer, "<tr><td>Mail server(s)</td><td>"); for (MailServer_T mta = Run.mailservers; mta; mta = mta->next) { StringBuffer_append(res->outputbuffer, "%s:%d", mta->host, mta->port); #ifdef HAVE_OPENSSL if (mta->ssl.flags) { StringBuffer_append(res->outputbuffer, " using TLS"); const char *options = Ssl_printOptions(&mta->ssl, (char[STRLEN]){}, STRLEN); if (options && *options) StringBuffer_append(res->outputbuffer, " with options {%s}", options); if (mta->ssl.checksum) StringBuffer_append(res->outputbuffer, " and certificate checksum %s equal to '%s'", checksumnames[mta->ssl.checksumType], mta->ssl.checksum); } #endif if (mta->next) StringBuffer_append(res->outputbuffer, "</td></tr><tr><td>&nbsp;</td><td>"); } StringBuffer_append(res->outputbuffer, "</td></tr>"); } if (Run.MailFormat.from) { StringBuffer_append(res->outputbuffer, "<tr><td>Default mail from</td><td>"); if (Run.MailFormat.from->name) StringBuffer_append(res->outputbuffer, "%s &lt;%s&gt;", Run.MailFormat.from->name, Run.MailFormat.from->address); else StringBuffer_append(res->outputbuffer, "%s", Run.MailFormat.from->address); StringBuffer_append(res->outputbuffer, "</td></tr>"); } if (Run.MailFormat.replyto) { StringBuffer_append(res->outputbuffer, "<tr><td>Default mail reply to</td><td>"); if (Run.MailFormat.replyto->name) StringBuffer_append(res->outputbuffer, "%s &lt;%s&gt;", Run.MailFormat.replyto->name, Run.MailFormat.replyto->address); else StringBuffer_append(res->outputbuffer, "%s", Run.MailFormat.replyto->address); StringBuffer_append(res->outputbuffer, "</td></tr>"); } if (Run.MailFormat.subject) StringBuffer_append(res->outputbuffer, "<tr><td>Default mail subject</td><td>%s</td></tr>", Run.MailFormat.subject); if (Run.MailFormat.message) StringBuffer_append(res->outputbuffer, "<tr><td>Default mail message</td><td>%s</td></tr>", Run.MailFormat.message); StringBuffer_append(res->outputbuffer, "<tr><td>Limit for Send/Expect buffer</td><td>%s</td></tr>", Fmt_bytes2str(Run.limits.sendExpectBuffer, buf)); StringBuffer_append(res->outputbuffer, "<tr><td>Limit for file content buffer</td><td>%s</td></tr>", Fmt_bytes2str(Run.limits.fileContentBuffer, buf)); StringBuffer_append(res->outputbuffer, "<tr><td>Limit for HTTP content buffer</td><td>%s</td></tr>", Fmt_bytes2str(Run.limits.httpContentBuffer, buf)); StringBuffer_append(res->outputbuffer, "<tr><td>Limit for program output</td><td>%s</td></tr>", Fmt_bytes2str(Run.limits.programOutput, buf)); StringBuffer_append(res->outputbuffer, "<tr><td>Limit for network timeout</td><td>%s</td></tr>", Fmt_time2str(Run.limits.networkTimeout, (char[11]){})); StringBuffer_append(res->outputbuffer, "<tr><td>Limit for check program timeout</td><td>%s</td></tr>", Fmt_time2str(Run.limits.programTimeout, (char[11]){})); StringBuffer_append(res->outputbuffer, "<tr><td>Limit for service stop timeout</td><td>%s</td></tr>", Fmt_time2str(Run.limits.stopTimeout, (char[11]){})); StringBuffer_append(res->outputbuffer, "<tr><td>Limit for service start timeout</td><td>%s</td></tr>", Fmt_time2str(Run.limits.startTimeout, (char[11]){})); StringBuffer_append(res->outputbuffer, "<tr><td>Limit for service restart timeout</td><td>%s</td></tr>", Fmt_time2str(Run.limits.restartTimeout, (char[11]){})); StringBuffer_append(res->outputbuffer, "<tr><td>On reboot</td><td>%s</td></tr>", onrebootnames[Run.onreboot]); StringBuffer_append(res->outputbuffer, "<tr><td>Poll time</td><td>%d seconds with start delay %d seconds</td></tr>", Run.polltime, Run.startdelay); if (Run.httpd.flags & Httpd_Net) { StringBuffer_append(res->outputbuffer, "<tr><td>httpd bind address</td><td>%s</td></tr>", Run.httpd.socket.net.address ? Run.httpd.socket.net.address : "Any/All"); StringBuffer_append(res->outputbuffer, "<tr><td>httpd portnumber</td><td>%d</td></tr>", Run.httpd.socket.net.port); #ifdef HAVE_OPENSSL const char *options = Ssl_printOptions(&(Run.httpd.socket.net.ssl), (char[STRLEN]){}, STRLEN); if (options && *options) StringBuffer_append(res->outputbuffer, "<tr><td>httpd encryption</td><td>%s</td></tr>", options); #endif } if (Run.httpd.flags & Httpd_Unix) StringBuffer_append(res->outputbuffer, "<tr><td>httpd unix socket</td><td>%s</td></tr>", Run.httpd.socket.unix.path); StringBuffer_append(res->outputbuffer, "<tr><td>httpd signature</td><td>%s</td></tr>", Run.httpd.flags & Httpd_Signature ? "True" : "False"); StringBuffer_append(res->outputbuffer, "<tr><td>httpd auth. style</td><td>%s</td></tr>", Run.httpd.credentials && Engine_hasAllow() ? "Basic Authentication and Host/Net allow list" : Run.httpd.credentials ? "Basic Authentication" : Engine_hasAllow() ? "Host/Net allow list" : "No authentication"); print_alerts(res, Run.maillist); StringBuffer_append(res->outputbuffer, "</table>"); if (! is_readonly(req)) { StringBuffer_append(res->outputbuffer, "<table id='buttons'><tr>"); StringBuffer_append(res->outputbuffer, "<td style='color:red;'>" "<form method=POST action='_runtime'>Stop Monit http server? " "<input type=hidden name='securitytoken' value='%s'>" "<input type=hidden name='action' value='stop'>" "<input type=submit value='Go'>" "</form>" "</td>", res->token); StringBuffer_append(res->outputbuffer, "<td>" "<form method=POST action='_runtime'>Force validate now? " "<input type=hidden name='securitytoken' value='%s'>" "<input type=hidden name='action' value='validate'>" "<input type=submit value='Go'>" "</form>" "</td>", res->token); if ((Run.flags & Run_Log) && ! (Run.flags & Run_UseSyslog)) { StringBuffer_append(res->outputbuffer, "<td>" "<form method=POST action='_viewlog'>View Monit logfile? " "<input type=hidden name='securitytoken' value='%s'>" "<input type=submit value='Go'>" "</form>" "</td>", res->token); } StringBuffer_append(res->outputbuffer, "</tr></table>"); } do_foot(res); } static void do_viewlog(HttpRequest req, HttpResponse res) { if (is_readonly(req)) { send_error(req, res, SC_FORBIDDEN, "You do not have sufficient privileges to access this page"); return; } do_head(res, "_viewlog", "View log", 100); if ((Run.flags & Run_Log) && ! (Run.flags & Run_UseSyslog)) { FILE *f = fopen(Run.files.log, "r"); if (f) { size_t n; char buf[512]; StringBuffer_append(res->outputbuffer, "<br><p><form><textarea cols=120 rows=30 readonly>"); while ((n = fread(buf, sizeof(char), sizeof(buf) - 1, f)) > 0) { buf[n] = 0; escapeHTML(res->outputbuffer, buf); } fclose(f); StringBuffer_append(res->outputbuffer, "</textarea></form>"); } else { StringBuffer_append(res->outputbuffer, "Error opening logfile: %s", STRERROR); } } else { StringBuffer_append(res->outputbuffer, "<b>Cannot view logfile:</b><br>"); if (! (Run.flags & Run_Log)) StringBuffer_append(res->outputbuffer, "Monit was started without logging"); else StringBuffer_append(res->outputbuffer, "Monit uses syslog"); } do_foot(res); } static void handle_service(HttpRequest req, HttpResponse res) { char *name = req->url; if (! name) { send_error(req, res, SC_NOT_FOUND, "Service name required"); return; } Service_T s = Util_getService(++name); if (! s) { send_error(req, res, SC_NOT_FOUND, "There is no service named \"%s\"", name); return; } do_service(req, res, s); } static void handle_service_action(HttpRequest req, HttpResponse res) { char *name = req->url; if (! name) { send_error(req, res, SC_NOT_FOUND, "Service name required"); return; } Service_T s = Util_getService(++name); if (! s) { send_error(req, res, SC_NOT_FOUND, "There is no service named \"%s\"", name); return; } const char *action = get_parameter(req, "action"); if (action) { if (is_readonly(req)) { send_error(req, res, SC_FORBIDDEN, "You do not have sufficient privileges to access this page"); return; } Action_Type doaction = Util_getAction(action); if (doaction == Action_Ignored) { send_error(req, res, SC_BAD_REQUEST, "Invalid action \"%s\"", action); return; } s->doaction = doaction; const char *token = get_parameter(req, "token"); if (token) { FREE(s->token); s->token = Str_dup(token); } LogInfo("'%s' %s on user request\n", s->name, action); Run.flags |= Run_ActionPending; /* set the global flag */ do_wakeupcall(); } do_service(req, res, s); } static void handle_doaction(HttpRequest req, HttpResponse res) { Service_T s; Action_Type doaction = Action_Ignored; const char *action = get_parameter(req, "action"); const char *token = get_parameter(req, "token"); if (action) { if (is_readonly(req)) { send_error(req, res, SC_FORBIDDEN, "You do not have sufficient privileges to access this page"); return; } if ((doaction = Util_getAction(action)) == Action_Ignored) { send_error(req, res, SC_BAD_REQUEST, "Invalid action \"%s\"", action); return; } for (HttpParameter p = req->params; p; p = p->next) { if (IS(p->name, "service")) { s = Util_getService(p->value); if (! s) { send_error(req, res, SC_BAD_REQUEST, "There is no service named \"%s\"", p->value ? p->value : ""); return; } s->doaction = doaction; LogInfo("'%s' %s on user request\n", s->name, action); } } /* Set token for last service only so we'll get it back after all services were handled */ if (token) { Service_T q = NULL; for (s = servicelist; s; s = s->next) if (s->doaction == doaction) q = s; if (q) { FREE(q->token); q->token = Str_dup(token); } } Run.flags |= Run_ActionPending; do_wakeupcall(); } } static void handle_runtime(HttpRequest req, HttpResponse res) { LOCK(Run.mutex) do_runtime(req, res); END_LOCK; } static void handle_runtime_action(HttpRequest req, HttpResponse res) { const char *action = get_parameter(req, "action"); if (action) { if (is_readonly(req)) { send_error(req, res, SC_FORBIDDEN, "You do not have sufficient privileges to access this page"); return; } if (IS(action, "validate")) { LogInfo("The Monit http server woke up on user request\n"); do_wakeupcall(); } else if (IS(action, "stop")) { LogInfo("The Monit http server stopped on user request\n"); send_error(req, res, SC_SERVICE_UNAVAILABLE, "The Monit http server is stopped"); Engine_stop(); return; } } handle_runtime(req, res); } static void do_service(HttpRequest req, HttpResponse res, Service_T s) { char buf[STRLEN]; ASSERT(s); do_head(res, s->name, s->name, Run.polltime); StringBuffer_append(res->outputbuffer, "<h2>%s status</h2>" "<table id='status-table'>" "<tr>" "<th width='30%%'>Parameter</th>" "<th width='70%%'>Value</th>" "</tr>" "<tr>" "<td>Name</td>" "<td>%s</td>" "</tr>", servicetypes[s->type], s->name); if (s->type == Service_Process) StringBuffer_append(res->outputbuffer, "<tr><td>%s</td><td>%s</td></tr>", s->matchlist ? "Match" : "Pid file", s->path); else if (s->type == Service_Host) StringBuffer_append(res->outputbuffer, "<tr><td>Address</td><td>%s</td></tr>", s->path); else if (s->type == Service_Net) StringBuffer_append(res->outputbuffer, "<tr><td>Interface</td><td>%s</td></tr>", s->path); else if (s->type != Service_System) StringBuffer_append(res->outputbuffer, "<tr><td>Path</td><td>%s</td></tr>", s->path); StringBuffer_append(res->outputbuffer, "<tr><td>Status</td><td>%s</td></tr>", get_service_status(HTML, s, buf, sizeof(buf))); for (ServiceGroup_T sg = servicegrouplist; sg; sg = sg->next) for (list_t m = sg->members->head; m; m = m->next) if (m->e == s) StringBuffer_append(res->outputbuffer, "<tr><td>Group</td><td class='blue-text'>%s</td></tr>", sg->name); StringBuffer_append(res->outputbuffer, "<tr><td>Monitoring status</td><td>%s</td></tr>", get_monitoring_status(HTML, s, buf, sizeof(buf))); StringBuffer_append(res->outputbuffer, "<tr><td>Monitoring mode</td><td>%s</td></tr>", modenames[s->mode]); StringBuffer_append(res->outputbuffer, "<tr><td>On reboot</td><td>%s</td></tr>", onrebootnames[s->onreboot]); for (Dependant_T d = s->dependantlist; d; d = d->next) { if (d->dependant != NULL) { StringBuffer_append(res->outputbuffer, "<tr><td>Depends on service </td><td> <a href=%s> %s </a></td></tr>", d->dependant, d->dependant); } } if (s->start) { StringBuffer_append(res->outputbuffer, "<tr><td>Start program</td><td>'%s'", Util_commandDescription(s->start, (char[STRLEN]){})); if (s->start->has_uid) StringBuffer_append(res->outputbuffer, " as uid %d", s->start->uid); if (s->start->has_gid) StringBuffer_append(res->outputbuffer, " as gid %d", s->start->gid); StringBuffer_append(res->outputbuffer, " timeout %s", Fmt_time2str(s->start->timeout, (char[11]){})); StringBuffer_append(res->outputbuffer, "</td></tr>"); } if (s->stop) { StringBuffer_append(res->outputbuffer, "<tr><td>Stop program</td><td>'%s'", Util_commandDescription(s->stop, (char[STRLEN]){})); if (s->stop->has_uid) StringBuffer_append(res->outputbuffer, " as uid %d", s->stop->uid); if (s->stop->has_gid) StringBuffer_append(res->outputbuffer, " as gid %d", s->stop->gid); StringBuffer_append(res->outputbuffer, " timeout %s", Fmt_time2str(s->stop->timeout, (char[11]){})); StringBuffer_append(res->outputbuffer, "</td></tr>"); } if (s->restart) { StringBuffer_append(res->outputbuffer, "<tr><td>Restart program</td><td>'%s'", Util_commandDescription(s->restart, (char[STRLEN]){})); if (s->restart->has_uid) StringBuffer_append(res->outputbuffer, " as uid %d", s->restart->uid); if (s->restart->has_gid) StringBuffer_append(res->outputbuffer, " as gid %d", s->restart->gid); StringBuffer_append(res->outputbuffer, " timeout %s", Fmt_time2str(s->restart->timeout, (char[11]){})); StringBuffer_append(res->outputbuffer, "</td></tr>"); } if (s->every.type != Every_Cycle) { StringBuffer_append(res->outputbuffer, "<tr><td>Check service</td><td>"); if (s->every.type == Every_SkipCycles) StringBuffer_append(res->outputbuffer, "every %d cycle", s->every.spec.cycle.number); else if (s->every.type == Every_Cron) StringBuffer_append(res->outputbuffer, "every <code>\"%s\"</code>", s->every.spec.cron); else if (s->every.type == Every_NotInCron) StringBuffer_append(res->outputbuffer, "not every <code>\"%s\"</code>", s->every.spec.cron); StringBuffer_append(res->outputbuffer, "</td></tr>"); } _printStatus(HTML, res, s); // Rules print_service_rules_timeout(res, s); print_service_rules_nonexistence(res, s); print_service_rules_existence(res, s); print_service_rules_icmp(res, s); print_service_rules_port(res, s); print_service_rules_socket(res, s); print_service_rules_perm(res, s); print_service_rules_uid(res, s); print_service_rules_euid(res, s); print_service_rules_secattr(res, s); print_service_rules_gid(res, s); print_service_rules_timestamp(res, s); print_service_rules_fsflags(res, s); print_service_rules_filesystem(res, s); print_service_rules_size(res, s); print_service_rules_linkstatus(res, s); print_service_rules_linkspeed(res, s); print_service_rules_linksaturation(res, s); print_service_rules_uploadbytes(res, s); print_service_rules_uploadpackets(res, s); print_service_rules_downloadbytes(res, s); print_service_rules_downloadpackets(res, s); print_service_rules_uptime(res, s); print_service_rules_content(res, s); print_service_rules_checksum(res, s); print_service_rules_pid(res, s); print_service_rules_ppid(res, s); print_service_rules_program(res, s); print_service_rules_resource(res, s); print_alerts(res, s->maillist); StringBuffer_append(res->outputbuffer, "</table>"); print_buttons(req, res, s); do_foot(res); } static void do_home_system(HttpResponse res) { Service_T s = Run.system; char buf[STRLEN]; StringBuffer_append(res->outputbuffer, "<table id='header-row'>" "<tr>" "<th class='left first'>System</th>" "<th class='left'>Status</th>" "<th class='right column'>Load</th>" "<th class='right column'>CPU</th>" "<th class='right column'>Memory</th>" "<th class='right column'>Swap</th>" "</tr>" "<tr class='stripe'>" "<td class='left'><a href='%s'>%s</a></td>" "<td class='left'>%s</td>" "<td class='right column'>[%.2f]&nbsp;[%.2f]&nbsp;[%.2f]</td>" "<td class='right column'>" "%.1f%%us,&nbsp;%.1f%%sy" #ifdef HAVE_CPU_WAIT ",&nbsp;%.1f%%wa" #endif "</td>", s->name, s->name, get_service_status(HTML, s, buf, sizeof(buf)), systeminfo.loadavg[0], systeminfo.loadavg[1], systeminfo.loadavg[2], systeminfo.cpu.usage.user > 0. ? systeminfo.cpu.usage.user : 0., systeminfo.cpu.usage.system > 0. ? systeminfo.cpu.usage.system : 0. #ifdef HAVE_CPU_WAIT , systeminfo.cpu.usage.wait > 0. ? systeminfo.cpu.usage.wait : 0. #endif ); StringBuffer_append(res->outputbuffer, "<td class='right column'>%.1f%% [%s]</td>", systeminfo.memory.usage.percent, Fmt_bytes2str(systeminfo.memory.usage.bytes, buf)); StringBuffer_append(res->outputbuffer, "<td class='right column'>%.1f%% [%s]</td>", systeminfo.swap.usage.percent, Fmt_bytes2str(systeminfo.swap.usage.bytes, buf)); StringBuffer_append(res->outputbuffer, "</tr>" "</table>"); } static void do_home_process(HttpResponse res) { char buf[STRLEN]; boolean_t on = true; boolean_t header = true; for (Service_T s = servicelist_conf; s; s = s->next_conf) { if (s->type != Service_Process) continue; if (header) { StringBuffer_append(res->outputbuffer, "<table id='header-row'>" "<tr>" "<th class='left' class='first'>Process</th>" "<th class='left'>Status</th>" "<th class='right'>Uptime</th>" "<th class='right'>CPU Total</b></th>" "<th class='right'>Memory Total</th>" "<th class='right column'>Read</th>" "<th class='right column'>Write</th>" "</tr>"); header = false; } StringBuffer_append(res->outputbuffer, "<tr%s>" "<td class='left'><a href='%s'>%s</a></td>" "<td class='left'>%s</td>", on ? " class='stripe'" : "", s->name, s->name, get_service_status(HTML, s, buf, sizeof(buf))); if (! (Run.flags & Run_ProcessEngineEnabled) || ! Util_hasServiceStatus(s) || s->inf.process->uptime < 0) { StringBuffer_append(res->outputbuffer, "<td class='right'>-</td>"); } else { StringBuffer_append(res->outputbuffer, "<td class='right'>%s</td>", _getUptime(s->inf.process->uptime, (char[256]){})); } if (! (Run.flags & Run_ProcessEngineEnabled) || ! Util_hasServiceStatus(s) || s->inf.process->total_cpu_percent < 0) { StringBuffer_append(res->outputbuffer, "<td class='right'>-</td>"); } else { StringBuffer_append(res->outputbuffer, "<td class='right%s'>%.1f%%</td>", (s->error & Event_Resource) ? " red-text" : "", s->inf.process->total_cpu_percent); } if (! (Run.flags & Run_ProcessEngineEnabled) || ! Util_hasServiceStatus(s) || s->inf.process->total_mem_percent < 0) { StringBuffer_append(res->outputbuffer, "<td class='right'>-</td>"); } else { StringBuffer_append(res->outputbuffer, "<td class='right%s'>%.1f%% [%s]</td>", (s->error & Event_Resource) ? " red-text" : "", s->inf.process->total_mem_percent, Fmt_bytes2str(s->inf.process->total_mem, buf)); } boolean_t hasReadBytes = Statistics_initialized(&(s->inf.process->read.bytes)); boolean_t hasReadOperations = Statistics_initialized(&(s->inf.process->read.operations)); if (! (Run.flags & Run_ProcessEngineEnabled) || ! Util_hasServiceStatus(s) || (! hasReadBytes && ! hasReadOperations)) { StringBuffer_append(res->outputbuffer, "<td class='right column'>-</td>"); } else if (hasReadBytes) { StringBuffer_append(res->outputbuffer, "<td class='right column%s'>%s/s</td>", (s->error & Event_Resource) ? " red-text" : "", Fmt_bytes2str(Statistics_deltaNormalize(&(s->inf.process->read.bytes)), (char[10]){})); } else if (hasReadOperations) { StringBuffer_append(res->outputbuffer, "<td class='right column%s'>%.1f/s</td>", (s->error & Event_Resource) ? " red-text" : "", Statistics_deltaNormalize(&(s->inf.process->read.operations))); } boolean_t hasWriteBytes = Statistics_initialized(&(s->inf.process->write.bytes)); boolean_t hasWriteOperations = Statistics_initialized(&(s->inf.process->write.operations)); if (! (Run.flags & Run_ProcessEngineEnabled) || ! Util_hasServiceStatus(s) || (! hasWriteBytes && ! hasWriteOperations)) { StringBuffer_append(res->outputbuffer, "<td class='right column'>-</td>"); } else if (hasWriteBytes) { StringBuffer_append(res->outputbuffer, "<td class='right column%s'>%s/s</td>", (s->error & Event_Resource) ? " red-text" : "", Fmt_bytes2str(Statistics_deltaNormalize(&(s->inf.process->write.bytes)), (char[10]){})); } else if (hasWriteOperations) { StringBuffer_append(res->outputbuffer, "<td class='right column%s'>%.1f/s</td>", (s->error & Event_Resource) ? " red-text" : "", Statistics_deltaNormalize(&(s->inf.process->write.operations))); } StringBuffer_append(res->outputbuffer, "</tr>"); on = ! on; } if (! header) StringBuffer_append(res->outputbuffer, "</table>"); } static void do_home_program(HttpResponse res) { char buf[STRLEN]; boolean_t on = true; boolean_t header = true; for (Service_T s = servicelist_conf; s; s = s->next_conf) { if (s->type != Service_Program) continue; if (header) { StringBuffer_append(res->outputbuffer, "<table id='header-row'>" "<tr>" "<th class='left' class='first'>Program</th>" "<th class='left'>Status</th>" "<th class='left'>Output</th>" "<th class='right'>Last started</th>" "<th class='right'>Exit value</th>" "</tr>"); header = false; } StringBuffer_append(res->outputbuffer, "<tr %s>" "<td class='left'><a href='%s'>%s</a></td>" "<td class='left'>%s</td>", on ? "class='stripe'" : "", s->name, s->name, get_service_status(HTML, s, buf, sizeof(buf))); if (! Util_hasServiceStatus(s)) { StringBuffer_append(res->outputbuffer, "<td class='left'>-</td>"); StringBuffer_append(res->outputbuffer, "<td class='right'>-</td>"); StringBuffer_append(res->outputbuffer, "<td class='right'>-</td>"); } else { if (s->program->started) { StringBuffer_append(res->outputbuffer, "<td class='left short'>"); if (StringBuffer_length(s->program->lastOutput)) { // Print first line only (escape HTML characters if any) const char *output = StringBuffer_toString(s->program->lastOutput); for (int i = 0; output[i]; i++) { if (output[i] == '<') StringBuffer_append(res->outputbuffer, "&lt;"); else if (output[i] == '>') StringBuffer_append(res->outputbuffer, "&gt;"); else if (output[i] == '&') StringBuffer_append(res->outputbuffer, "&amp;"); else if (output[i] == '\r' || output[i] == '\n') break; else StringBuffer_append(res->outputbuffer, "%c", output[i]); } } else { StringBuffer_append(res->outputbuffer, "no output"); } StringBuffer_append(res->outputbuffer, "</td>"); StringBuffer_append(res->outputbuffer, "<td class='right'>%s</td>", Time_fmt((char[32]){}, 32, "%d %b %Y %H:%M:%S", s->program->started)); StringBuffer_append(res->outputbuffer, "<td class='right'>%d</td>", s->program->exitStatus); } else { StringBuffer_append(res->outputbuffer, "<td class='right'>-</td>"); StringBuffer_append(res->outputbuffer, "<td class='right'>Not yet started</td>"); StringBuffer_append(res->outputbuffer, "<td class='right'>-</td>"); } } StringBuffer_append(res->outputbuffer, "</tr>"); on = ! on; } if (! header) StringBuffer_append(res->outputbuffer, "</table>"); } static void do_home_net(HttpResponse res) { char buf[STRLEN]; boolean_t on = true; boolean_t header = true; for (Service_T s = servicelist_conf; s; s = s->next_conf) { if (s->type != Service_Net) continue; if (header) { StringBuffer_append(res->outputbuffer, "<table id='header-row'>" "<tr>" "<th class='left first'>Net</th>" "<th class='left'>Status</th>" "<th class='right'>Upload</th>" "<th class='right'>Download</th>" "</tr>"); header = false; } StringBuffer_append(res->outputbuffer, "<tr %s>" "<td class='left'><a href='%s'>%s</a></td>" "<td class='left'>%s</td>", on ? "class='stripe'" : "", s->name, s->name, get_service_status(HTML, s, buf, sizeof(buf))); if (! Util_hasServiceStatus(s) || Link_getState(s->inf.net->stats) != 1) { StringBuffer_append(res->outputbuffer, "<td class='right'>-</td>"); StringBuffer_append(res->outputbuffer, "<td class='right'>-</td>"); } else { StringBuffer_append(res->outputbuffer, "<td class='right'>%s&#47;s</td>", Fmt_bytes2str(Link_getBytesOutPerSecond(s->inf.net->stats), buf)); StringBuffer_append(res->outputbuffer, "<td class='right'>%s&#47;s</td>", Fmt_bytes2str(Link_getBytesInPerSecond(s->inf.net->stats), buf)); } StringBuffer_append(res->outputbuffer, "</tr>"); on = ! on; } if (! header) StringBuffer_append(res->outputbuffer, "</table>"); } static void do_home_filesystem(HttpResponse res) { char buf[STRLEN]; boolean_t on = true; boolean_t header = true; for (Service_T s = servicelist_conf; s; s = s->next_conf) { if (s->type != Service_Filesystem) continue; if (header) { StringBuffer_append(res->outputbuffer, "<table id='header-row'>" "<tr>" "<th class='left first'>Filesystem</th>" "<th class='left'>Status</th>" "<th class='right'>Space usage</th>" "<th class='right'>Inodes usage</th>" "<th class='right column'>Read</th>" "<th class='right column'>Write</th>" "</tr>"); header = false; } StringBuffer_append(res->outputbuffer, "<tr %s>" "<td class='left'><a href='%s'>%s</a></td>" "<td class='left'>%s</td>", on ? "class='stripe'" : "", s->name, s->name, get_service_status(HTML, s, buf, sizeof(buf))); if (! Util_hasServiceStatus(s)) { StringBuffer_append(res->outputbuffer, "<td class='right'>- [-]</td>" "<td class='right'>- [-]</td>" "<td class='right column'>- [-]</td>" "<td class='right column'>- [-]</td>"); } else { StringBuffer_append(res->outputbuffer, "<td class='right column%s'>%.1f%% [%s]</td>", (s->error & Event_Resource) ? " red-text" : "", s->inf.filesystem->space_percent, s->inf.filesystem->f_bsize > 0 ? Fmt_bytes2str(s->inf.filesystem->f_blocksused * s->inf.filesystem->f_bsize, buf) : "0 MB"); if (s->inf.filesystem->f_files > 0) { StringBuffer_append(res->outputbuffer, "<td class='right column%s'>%.1f%% [%lld objects]</td>", (s->error & Event_Resource) ? " red-text" : "", s->inf.filesystem->inode_percent, s->inf.filesystem->f_filesused); } else { StringBuffer_append(res->outputbuffer, "<td class='right column'>not supported by filesystem</td>"); } StringBuffer_append(res->outputbuffer, "<td class='right column%s'>%s/s</td>" "<td class='right column%s'>%s/s</td>", (s->error & Event_Resource) ? " red-text" : "", Fmt_bytes2str(Statistics_deltaNormalize(&(s->inf.filesystem->read.bytes)), (char[10]){}), (s->error & Event_Resource) ? " red-text" : "", Fmt_bytes2str(Statistics_deltaNormalize(&(s->inf.filesystem->write.bytes)), (char[10]){})); } StringBuffer_append(res->outputbuffer, "</tr>"); on = ! on; } if (! header) StringBuffer_append(res->outputbuffer, "</table>"); } static void do_home_file(HttpResponse res) { char buf[STRLEN]; boolean_t on = true; boolean_t header = true; for (Service_T s = servicelist_conf; s; s = s->next_conf) { if (s->type != Service_File) continue; if (header) { StringBuffer_append(res->outputbuffer, "<table id='header-row'>" "<tr>" "<th class='left first'>File</th>" "<th class='left'>Status</th>" "<th class='right'>Size</th>" "<th class='right'>Permission</th>" "<th class='right'>UID</th>" "<th class='right'>GID</th>" "</tr>"); header = false; } StringBuffer_append(res->outputbuffer, "<tr %s>" "<td class='left'><a href='%s'>%s</a></td>" "<td class='left'>%s</td>", on ? "class='stripe'" : "", s->name, s->name, get_service_status(HTML, s, buf, sizeof(buf))); if (! Util_hasServiceStatus(s) || s->inf.file->size < 0) StringBuffer_append(res->outputbuffer, "<td class='right'>-</td>"); else StringBuffer_append(res->outputbuffer, "<td class='right'>%s</td>", Fmt_bytes2str(s->inf.file->size, (char[10]){})); if (! Util_hasServiceStatus(s) || s->inf.file->mode < 0) StringBuffer_append(res->outputbuffer, "<td class='right'>-</td>"); else StringBuffer_append(res->outputbuffer, "<td class='right'>%04o</td>", s->inf.file->mode & 07777); if (! Util_hasServiceStatus(s) || s->inf.file->uid < 0) StringBuffer_append(res->outputbuffer, "<td class='right'>-</td>"); else StringBuffer_append(res->outputbuffer, "<td class='right'>%d</td>", s->inf.file->uid); if (! Util_hasServiceStatus(s) || s->inf.file->gid < 0) StringBuffer_append(res->outputbuffer, "<td class='right'>-</td>"); else StringBuffer_append(res->outputbuffer, "<td class='right'>%d</td>", s->inf.file->gid); StringBuffer_append(res->outputbuffer, "</tr>"); on = ! on; } if (! header) StringBuffer_append(res->outputbuffer, "</table>"); } static void do_home_fifo(HttpResponse res) { char buf[STRLEN]; boolean_t on = true; boolean_t header = true; for (Service_T s = servicelist_conf; s; s = s->next_conf) { if (s->type != Service_Fifo) continue; if (header) { StringBuffer_append(res->outputbuffer, "<table id='header-row'>" "<tr>" "<th class='left first'>Fifo</th>" "<th class='left'>Status</th>" "<th class='right'>Permission</th>" "<th class='right'>UID</th>" "<th class='right'>GID</th>" "</tr>"); header = false; } StringBuffer_append(res->outputbuffer, "<tr %s>" "<td class='left'><a href='%s'>%s</a></td>" "<td class='left'>%s</td>", on ? "class='stripe'" : "", s->name, s->name, get_service_status(HTML, s, buf, sizeof(buf))); if (! Util_hasServiceStatus(s) || s->inf.fifo->mode < 0) StringBuffer_append(res->outputbuffer, "<td class='right'>-</td>"); else StringBuffer_append(res->outputbuffer, "<td class='right'>%04o</td>", s->inf.fifo->mode & 07777); if (! Util_hasServiceStatus(s) || s->inf.fifo->uid < 0) StringBuffer_append(res->outputbuffer, "<td class='right'>-</td>"); else StringBuffer_append(res->outputbuffer, "<td class='right'>%d</td>", s->inf.fifo->uid); if (! Util_hasServiceStatus(s) || s->inf.fifo->gid < 0) StringBuffer_append(res->outputbuffer, "<td class='right'>-</td>"); else StringBuffer_append(res->outputbuffer, "<td class='right'>%d</td>", s->inf.fifo->gid); StringBuffer_append(res->outputbuffer, "</tr>"); on = ! on; } if (! header) StringBuffer_append(res->outputbuffer, "</table>"); } static void do_home_directory(HttpResponse res) { char buf[STRLEN]; boolean_t on = true; boolean_t header = true; for (Service_T s = servicelist_conf; s; s = s->next_conf) { if (s->type != Service_Directory) continue; if (header) { StringBuffer_append(res->outputbuffer, "<table id='header-row'>" "<tr>" "<th class='left first'>Directory</th>" "<th class='left'>Status</th>" "<th class='right'>Permission</th>" "<th class='right'>UID</th>" "<th class='right'>GID</th>" "</tr>"); header = false; } StringBuffer_append(res->outputbuffer, "<tr %s>" "<td class='left'><a href='%s'>%s</a></td>" "<td class='left'>%s</td>", on ? "class='stripe'" : "", s->name, s->name, get_service_status(HTML, s, buf, sizeof(buf))); if (! Util_hasServiceStatus(s) || s->inf.directory->mode < 0) StringBuffer_append(res->outputbuffer, "<td class='right'>-</td>"); else StringBuffer_append(res->outputbuffer, "<td class='right'>%04o</td>", s->inf.directory->mode & 07777); if (! Util_hasServiceStatus(s) || s->inf.directory->uid < 0) StringBuffer_append(res->outputbuffer, "<td class='right'>-</td>"); else StringBuffer_append(res->outputbuffer, "<td class='right'>%d</td>", s->inf.directory->uid); if (! Util_hasServiceStatus(s) || s->inf.directory->gid < 0) StringBuffer_append(res->outputbuffer, "<td class='right'>-</td>"); else StringBuffer_append(res->outputbuffer, "<td class='right'>%d</td>", s->inf.directory->gid); StringBuffer_append(res->outputbuffer, "</tr>"); on = ! on; } if (! header) StringBuffer_append(res->outputbuffer, "</table>"); } static void do_home_host(HttpResponse res) { char buf[STRLEN]; boolean_t on = true; boolean_t header = true; for (Service_T s = servicelist_conf; s; s = s->next_conf) { if (s->type != Service_Host) continue; if (header) { StringBuffer_append(res->outputbuffer, "<table id='header-row'>" "<tr>" "<th class='left first'>Host</th>" "<th class='left'>Status</th>" "<th class='right'>Protocol(s)</th>" "</tr>"); header = false; } StringBuffer_append(res->outputbuffer, "<tr %s>" "<td class='left'><a href='%s'>%s</a></td>" "<td class='left'>%s</td>", on ? "class='stripe'" : "", s->name, s->name, get_service_status(HTML, s, buf, sizeof(buf))); if (! Util_hasServiceStatus(s)) { StringBuffer_append(res->outputbuffer, "<td class='right'>-</td>"); } else { StringBuffer_append(res->outputbuffer, "<td class='right'>"); for (Icmp_T icmp = s->icmplist; icmp; icmp = icmp->next) { if (icmp != s->icmplist) StringBuffer_append(res->outputbuffer, "&nbsp;&nbsp;<b>|</b>&nbsp;&nbsp;"); switch (icmp->is_available) { case Connection_Init: StringBuffer_append(res->outputbuffer, "<span class='gray-text'>[Ping]</span>"); break; case Connection_Failed: StringBuffer_append(res->outputbuffer, "<span class='red-text'>[Ping]</span>"); break; default: StringBuffer_append(res->outputbuffer, "<span>[Ping]</span>"); break; } } if (s->icmplist && s->portlist) StringBuffer_append(res->outputbuffer, "&nbsp;&nbsp;<b>|</b>&nbsp;&nbsp;"); for (Port_T port = s->portlist; port; port = port->next) { if (port != s->portlist) StringBuffer_append(res->outputbuffer, "&nbsp;&nbsp;<b>|</b>&nbsp;&nbsp;"); switch (port->is_available) { case Connection_Init: StringBuffer_append(res->outputbuffer, "<span class='gray-text'>[%s] at port %d</span>", port->protocol->name, port->target.net.port); break; case Connection_Failed: StringBuffer_append(res->outputbuffer, "<span class='red-text'>[%s] at port %d</span>", port->protocol->name, port->target.net.port); break; default: if (port->target.net.ssl.options.flags && port->target.net.ssl.certificate.validDays < port->target.net.ssl.certificate.minimumDays) StringBuffer_append(res->outputbuffer, "<span class='red-text'>[%s] at port %d</span>", port->protocol->name, port->target.net.port); else StringBuffer_append(res->outputbuffer, "<span>[%s] at port %d</span>", port->protocol->name, port->target.net.port); break; } } StringBuffer_append(res->outputbuffer, "</td>"); } StringBuffer_append(res->outputbuffer, "</tr>"); on = ! on; } if (! header) StringBuffer_append(res->outputbuffer, "</table>"); } /* ------------------------------------------------------------------------- */ static void print_alerts(HttpResponse res, Mail_T s) { for (Mail_T r = s; r; r = r->next) { StringBuffer_append(res->outputbuffer, "<tr class='stripe'><td>Alert mail to</td>" "<td>%s</td></tr>", r->to ? r->to : ""); StringBuffer_append(res->outputbuffer, "<tr><td>Alert on</td><td>"); if (r->events == Event_Null) { StringBuffer_append(res->outputbuffer, "No events"); } else if (r->events == Event_All) { StringBuffer_append(res->outputbuffer, "All events"); } else { if (IS_EVENT_SET(r->events, Event_Action)) StringBuffer_append(res->outputbuffer, "Action "); if (IS_EVENT_SET(r->events, Event_ByteIn)) StringBuffer_append(res->outputbuffer, "ByteIn "); if (IS_EVENT_SET(r->events, Event_ByteOut)) StringBuffer_append(res->outputbuffer, "ByteOut "); if (IS_EVENT_SET(r->events, Event_Checksum)) StringBuffer_append(res->outputbuffer, "Checksum "); if (IS_EVENT_SET(r->events, Event_Connection)) StringBuffer_append(res->outputbuffer, "Connection "); if (IS_EVENT_SET(r->events, Event_Content)) StringBuffer_append(res->outputbuffer, "Content "); if (IS_EVENT_SET(r->events, Event_Data)) StringBuffer_append(res->outputbuffer, "Data "); if (IS_EVENT_SET(r->events, Event_Exec)) StringBuffer_append(res->outputbuffer, "Exec "); if (IS_EVENT_SET(r->events, Event_Exist)) StringBuffer_append(res->outputbuffer, "Exist "); if (IS_EVENT_SET(r->events, Event_FsFlag)) StringBuffer_append(res->outputbuffer, "Fsflags "); if (IS_EVENT_SET(r->events, Event_Gid)) StringBuffer_append(res->outputbuffer, "Gid "); if (IS_EVENT_SET(r->events, Event_Instance)) StringBuffer_append(res->outputbuffer, "Instance "); if (IS_EVENT_SET(r->events, Event_Invalid)) StringBuffer_append(res->outputbuffer, "Invalid "); if (IS_EVENT_SET(r->events, Event_Link)) StringBuffer_append(res->outputbuffer, "Link "); if (IS_EVENT_SET(r->events, Event_NonExist)) StringBuffer_append(res->outputbuffer, "Nonexist "); if (IS_EVENT_SET(r->events, Event_Permission)) StringBuffer_append(res->outputbuffer, "Permission "); if (IS_EVENT_SET(r->events, Event_PacketIn)) StringBuffer_append(res->outputbuffer, "PacketIn "); if (IS_EVENT_SET(r->events, Event_PacketOut)) StringBuffer_append(res->outputbuffer, "PacketOut "); if (IS_EVENT_SET(r->events, Event_Pid)) StringBuffer_append(res->outputbuffer, "PID "); if (IS_EVENT_SET(r->events, Event_Icmp)) StringBuffer_append(res->outputbuffer, "Ping "); if (IS_EVENT_SET(r->events, Event_PPid)) StringBuffer_append(res->outputbuffer, "PPID "); if (IS_EVENT_SET(r->events, Event_Resource)) StringBuffer_append(res->outputbuffer, "Resource "); if (IS_EVENT_SET(r->events, Event_Saturation)) StringBuffer_append(res->outputbuffer, "Saturation "); if (IS_EVENT_SET(r->events, Event_Size)) StringBuffer_append(res->outputbuffer, "Size "); if (IS_EVENT_SET(r->events, Event_Speed)) StringBuffer_append(res->outputbuffer, "Speed "); if (IS_EVENT_SET(r->events, Event_Status)) StringBuffer_append(res->outputbuffer, "Status "); if (IS_EVENT_SET(r->events, Event_Timeout)) StringBuffer_append(res->outputbuffer, "Timeout "); if (IS_EVENT_SET(r->events, Event_Timestamp)) StringBuffer_append(res->outputbuffer, "Timestamp "); if (IS_EVENT_SET(r->events, Event_Uid)) StringBuffer_append(res->outputbuffer, "Uid "); if (IS_EVENT_SET(r->events, Event_Uptime)) StringBuffer_append(res->outputbuffer, "Uptime "); } StringBuffer_append(res->outputbuffer, "</td></tr>"); if (r->reminder) { StringBuffer_append(res->outputbuffer, "<tr><td>Alert reminder</td><td>%u cycles</td></tr>", r->reminder); } } } static void print_buttons(HttpRequest req, HttpResponse res, Service_T s) { if (is_readonly(req)) { // A read-only REMOTE_USER does not get access to these buttons return; } StringBuffer_append(res->outputbuffer, "<table id='buttons'><tr>"); /* Start program */ if (s->start) StringBuffer_append(res->outputbuffer, "<td>" "<form method=POST action=%s>" "<input type=hidden name='securitytoken' value='%s'>" "<input type=hidden value='start' name=action>" "<input type=submit value='Start service'>" "</form>" "</td>", s->name, res->token); /* Stop program */ if (s->stop) StringBuffer_append(res->outputbuffer, "<td>" "<form method=POST action=%s>" "<input type=hidden name='securitytoken' value='%s'>" "<input type=hidden value='stop' name=action>" "<input type=submit value='Stop service'>" "</form>" "</td>", s->name, res->token); /* Restart program */ if ((s->start && s->stop) || s->restart) StringBuffer_append(res->outputbuffer, "<td>" "<form method=POST action=%s>" "<input type=hidden name='securitytoken' value='%s'>" "<input type=hidden value='restart' name=action>" "<input type=submit value='Restart service'>" "</form>" "</td>", s->name, res->token); /* (un)monitor */ StringBuffer_append(res->outputbuffer, "<td>" "<form method=POST action=%s>" "<input type=hidden name='securitytoken' value='%s'>" "<input type=hidden value='%s' name=action>" "<input type=submit value='%s'>" "</form>" "</td>", s->name, res->token, s->monitor ? "unmonitor" : "monitor", s->monitor ? "Disable monitoring" : "Enable monitoring"); StringBuffer_append(res->outputbuffer, "</tr></table>"); } static void print_service_rules_timeout(HttpResponse res, Service_T s) { for (ActionRate_T ar = s->actionratelist; ar; ar = ar->next) { StringBuffer_append(res->outputbuffer, "<tr class='rule'><td>Timeout</td><td>If restarted %d times within %d cycle(s) then ", ar->count, ar->cycle); Util_printAction(ar->action->failed, res->outputbuffer); StringBuffer_append(res->outputbuffer, "</td></tr>"); } } static void print_service_rules_nonexistence(HttpResponse res, Service_T s) { for (NonExist_T l = s->nonexistlist; l; l = l->next) { StringBuffer_append(res->outputbuffer, "<tr class='rule'><td>Existence</td><td>"); Util_printRule(res->outputbuffer, l->action, "If doesn't exist"); StringBuffer_append(res->outputbuffer, "</td></tr>"); } } static void print_service_rules_existence(HttpResponse res, Service_T s) { for (Exist_T l = s->existlist; l; l = l->next) { StringBuffer_append(res->outputbuffer, "<tr class='rule'><td>Non-Existence</td><td>"); Util_printRule(res->outputbuffer, l->action, "If exist"); StringBuffer_append(res->outputbuffer, "</td></tr>"); } } static void print_service_rules_port(HttpResponse res, Service_T s) { for (Port_T p = s->portlist; p; p = p->next) { StringBuffer_append(res->outputbuffer, "<tr class='rule'><td>Port</td><td>"); StringBuffer_T buf = StringBuffer_create(64); StringBuffer_append(buf, "If failed [%s]:%d%s", p->hostname, p->target.net.port, Util_portRequestDescription(p)); if (p->outgoing.ip) StringBuffer_append(buf, " via address %s", p->outgoing.ip); StringBuffer_append(buf, " type %s/%s protocol %s with timeout %s", Util_portTypeDescription(p), Util_portIpDescription(p), p->protocol->name, Fmt_time2str(p->timeout, (char[11]){})); if (p->retry > 1) StringBuffer_append(buf, " and retry %d times", p->retry); #ifdef HAVE_OPENSSL if (p->target.net.ssl.options.flags) { StringBuffer_append(buf, " using TLS"); const char *options = Ssl_printOptions(&p->target.net.ssl.options, (char[STRLEN]){}, STRLEN); if (options && *options) StringBuffer_append(buf, " with options {%s}", options); if (p->target.net.ssl.certificate.minimumDays > 0) StringBuffer_append(buf, " and certificate valid for at least %d days", p->target.net.ssl.certificate.minimumDays); if (p->target.net.ssl.options.checksum) StringBuffer_append(buf, " and certificate checksum %s equal to '%s'", checksumnames[p->target.net.ssl.options.checksumType], p->target.net.ssl.options.checksum); } #endif Util_printRule(res->outputbuffer, p->action, "%s", StringBuffer_toString(buf)); StringBuffer_free(&buf); StringBuffer_append(res->outputbuffer, "</td></tr>"); } } static void print_service_rules_socket(HttpResponse res, Service_T s) { for (Port_T p = s->socketlist; p; p = p->next) { StringBuffer_append(res->outputbuffer, "<tr class='rule'><td>Unix Socket</td><td>"); if (p->retry > 1) Util_printRule(res->outputbuffer, p->action, "If failed %s type %s protocol %s with timeout %s and retry %d time(s)", p->target.unix.pathname, Util_portTypeDescription(p), p->protocol->name, Fmt_time2str(p->timeout, (char[11]){}), p->retry); else Util_printRule(res->outputbuffer, p->action, "If failed %s type %s protocol %s with timeout %s", p->target.unix.pathname, Util_portTypeDescription(p), p->protocol->name, Fmt_time2str(p->timeout, (char[11]){})); StringBuffer_append(res->outputbuffer, "</td></tr>"); } } static void print_service_rules_icmp(HttpResponse res, Service_T s) { for (Icmp_T i = s->icmplist; i; i = i->next) { switch (i->family) { case Socket_Ip4: StringBuffer_append(res->outputbuffer, "<tr class='rule'><td>Ping4</td><td>"); break; case Socket_Ip6: StringBuffer_append(res->outputbuffer, "<tr class='rule'><td>Ping6</td><td>"); break; default: StringBuffer_append(res->outputbuffer, "<tr class='rule'><td>Ping</td><td>"); break; } Util_printRule(res->outputbuffer, i->action, "If failed [count %d size %d with timeout %s%s%s]", i->count, i->size, Fmt_time2str(i->timeout, (char[11]){}), i->outgoing.ip ? " via address " : "", i->outgoing.ip ? i->outgoing.ip : ""); StringBuffer_append(res->outputbuffer, "</td></tr>"); } } static void print_service_rules_perm(HttpResponse res, Service_T s) { if (s->perm) { StringBuffer_append(res->outputbuffer, "<tr class='rule'><td>Permissions</td><td>"); if (s->perm->test_changes) Util_printRule(res->outputbuffer, s->perm->action, "If changed"); else Util_printRule(res->outputbuffer, s->perm->action, "If failed %o", s->perm->perm); StringBuffer_append(res->outputbuffer, "</td></tr>"); } } static void print_service_rules_uid(HttpResponse res, Service_T s) { if (s->uid) { StringBuffer_append(res->outputbuffer, "<tr class='rule'><td>UID</td><td>"); Util_printRule(res->outputbuffer, s->uid->action, "If failed %d", s->uid->uid); StringBuffer_append(res->outputbuffer, "</td></tr>"); } } static void print_service_rules_euid(HttpResponse res, Service_T s) { if (s->euid) { StringBuffer_append(res->outputbuffer, "<tr class='rule'><td>EUID</td><td>"); Util_printRule(res->outputbuffer, s->euid->action, "If failed %d", s->euid->uid); StringBuffer_append(res->outputbuffer, "</td></tr>"); } } static void print_service_rules_secattr(HttpResponse res, Service_T s) { for (SecurityAttribute_T a = s->secattrlist; a; a = a->next) { StringBuffer_append(res->outputbuffer, "<tr class='rule'><td>Security attribute</td><td>"); Util_printRule(res->outputbuffer, a->action, "If failed %s", a->attribute); StringBuffer_append(res->outputbuffer, "</td></tr>"); } } static void print_service_rules_gid(HttpResponse res, Service_T s) { if (s->gid) { StringBuffer_append(res->outputbuffer, "<tr class='rule'><td>GID</td><td>"); Util_printRule(res->outputbuffer, s->gid->action, "If failed %d", s->gid->gid); StringBuffer_append(res->outputbuffer, "</td></tr>"); } } static void print_service_rules_timestamp(HttpResponse res, Service_T s) { for (Timestamp_T t = s->timestamplist; t; t = t->next) { StringBuffer_append(res->outputbuffer, "<tr class='rule'><td>%c%s</td><td>", toupper(timestampnames[t->type][0]), timestampnames[t->type] + 1); if (t->test_changes) Util_printRule(res->outputbuffer, t->action, "If changed"); else Util_printRule(res->outputbuffer, t->action, "If %s %s", operatornames[t->operator], Fmt_time2str(t->time * 1000., (char[11]){})); StringBuffer_append(res->outputbuffer, "</td></tr>"); } } static void print_service_rules_fsflags(HttpResponse res, Service_T s) { for (FsFlag_T l = s->fsflaglist; l; l = l->next) { StringBuffer_append(res->outputbuffer, "<tr class='rule'><td>Filesystem flags</td><td>"); Util_printRule(res->outputbuffer, l->action, "If changed"); StringBuffer_append(res->outputbuffer, "</td></tr>"); } } static void print_service_rules_filesystem(HttpResponse res, Service_T s) { for (FileSystem_T dl = s->filesystemlist; dl; dl = dl->next) { if (dl->resource == Resource_Inode) { StringBuffer_append(res->outputbuffer, "<tr class='rule'><td>Inodes usage limit</td><td>"); if (dl->limit_absolute > -1) Util_printRule(res->outputbuffer, dl->action, "If %s %lld", operatornames[dl->operator], dl->limit_absolute); else Util_printRule(res->outputbuffer, dl->action, "If %s %.1f%%", operatornames[dl->operator], dl->limit_percent); StringBuffer_append(res->outputbuffer, "</td></tr>"); } else if (dl->resource == Resource_InodeFree) { StringBuffer_append(res->outputbuffer, "<tr class='rule'><td>Inodes free limit</td><td>"); if (dl->limit_absolute > -1) Util_printRule(res->outputbuffer, dl->action, "If %s %lld", operatornames[dl->operator], dl->limit_absolute); else Util_printRule(res->outputbuffer, dl->action, "If %s %.1f%%", operatornames[dl->operator], dl->limit_percent); StringBuffer_append(res->outputbuffer, "</td></tr>"); } else if (dl->resource == Resource_Space) { StringBuffer_append(res->outputbuffer, "<tr class='rule'><td>Space usage limit</td><td>"); if (dl->limit_absolute > -1) { Util_printRule(res->outputbuffer, dl->action, "If %s %s", operatornames[dl->operator], Fmt_bytes2str(dl->limit_absolute, (char[10]){})); } else { Util_printRule(res->outputbuffer, dl->action, "If %s %.1f%%", operatornames[dl->operator], dl->limit_percent); } StringBuffer_append(res->outputbuffer, "</td></tr>"); } else if (dl->resource == Resource_SpaceFree) { StringBuffer_append(res->outputbuffer, "<tr class='rule'><td>Space free limit</td><td>"); if (dl->limit_absolute > -1) { Util_printRule(res->outputbuffer, dl->action, "If %s %s", operatornames[dl->operator], Fmt_bytes2str(dl->limit_absolute, (char[10]){})); } else { Util_printRule(res->outputbuffer, dl->action, "If %s %.1f%%", operatornames[dl->operator], dl->limit_percent); } StringBuffer_append(res->outputbuffer, "</td></tr>"); } else if (dl->resource == Resource_ReadBytes) { StringBuffer_append(res->outputbuffer, "<tr class='rule'><td>Read limit</td><td>"); Util_printRule(res->outputbuffer, dl->action, "If read %s %s/s", operatornames[dl->operator], Fmt_bytes2str(dl->limit_absolute, (char[10]){})); StringBuffer_append(res->outputbuffer, "</td></tr>"); } else if (dl->resource == Resource_ReadOperations) { StringBuffer_append(res->outputbuffer, "<tr class='rule'><td>Read limit</td><td>"); Util_printRule(res->outputbuffer, dl->action, "If read %s %llu operations/s", operatornames[dl->operator], dl->limit_absolute); StringBuffer_append(res->outputbuffer, "</td></tr>"); } else if (dl->resource == Resource_WriteBytes) { StringBuffer_append(res->outputbuffer, "<tr class='rule'><td>Write limit</td><td>"); Util_printRule(res->outputbuffer, dl->action, "If write %s %s/s", operatornames[dl->operator], Fmt_bytes2str(dl->limit_absolute, (char[10]){})); StringBuffer_append(res->outputbuffer, "</td></tr>"); } else if (dl->resource == Resource_WriteOperations) { StringBuffer_append(res->outputbuffer, "<tr class='rule'><td>Write limit</td><td>"); Util_printRule(res->outputbuffer, dl->action, "If write %s %llu operations/s", operatornames[dl->operator], dl->limit_absolute); StringBuffer_append(res->outputbuffer, "</td></tr>"); } else if (dl->resource == Resource_ServiceTime) { StringBuffer_append(res->outputbuffer, "<tr class='rule'><td>Service time limit</td><td>"); Util_printRule(res->outputbuffer, dl->action, "If service time %s %s/operation", operatornames[dl->operator], Fmt_time2str(dl->limit_absolute, (char[11]){})); StringBuffer_append(res->outputbuffer, "</td></tr>"); } } } static void print_service_rules_size(HttpResponse res, Service_T s) { for (Size_T sl = s->sizelist; sl; sl = sl->next) { StringBuffer_append(res->outputbuffer, "<tr class='rule'><td>Size</td><td>"); if (sl->test_changes) Util_printRule(res->outputbuffer, sl->action, "If changed"); else Util_printRule(res->outputbuffer, sl->action, "If %s %llu byte(s)", operatornames[sl->operator], sl->size); StringBuffer_append(res->outputbuffer, "</td></tr>"); } } static void print_service_rules_linkstatus(HttpResponse res, Service_T s) { for (LinkStatus_T l = s->linkstatuslist; l; l = l->next) { StringBuffer_append(res->outputbuffer, "<tr class='rule'><td>Link status</td><td>"); Util_printRule(res->outputbuffer, l->action, "If failed"); StringBuffer_append(res->outputbuffer, "</td></tr>"); } } static void print_service_rules_linkspeed(HttpResponse res, Service_T s) { for (LinkSpeed_T l = s->linkspeedlist; l; l = l->next) { StringBuffer_append(res->outputbuffer, "<tr class='rule'><td>Link capacity</td><td>"); Util_printRule(res->outputbuffer, l->action, "If changed"); StringBuffer_append(res->outputbuffer, "</td></tr>"); } } static void print_service_rules_linksaturation(HttpResponse res, Service_T s) { for (LinkSaturation_T l = s->linksaturationlist; l; l = l->next) { StringBuffer_append(res->outputbuffer, "<tr class='rule'><td>Link saturation</td><td>"); Util_printRule(res->outputbuffer, l->action, "If %s %.1f%%", operatornames[l->operator], l->limit); StringBuffer_append(res->outputbuffer, "</td></tr>"); } } static void print_service_rules_uploadbytes(HttpResponse res, Service_T s) { for (Bandwidth_T bl = s->uploadbyteslist; bl; bl = bl->next) { if (bl->range == Time_Second) { StringBuffer_append(res->outputbuffer, "<tr class='rule'><td>Upload bytes</td><td>"); Util_printRule(res->outputbuffer, bl->action, "If %s %s/s", operatornames[bl->operator], Fmt_bytes2str(bl->limit, (char[10]){})); } else { StringBuffer_append(res->outputbuffer, "<tr class='rule'><td>Total upload bytes</td><td>"); Util_printRule(res->outputbuffer, bl->action, "If %s %s in last %d %s(s)", operatornames[bl->operator], Fmt_bytes2str(bl->limit, (char[10]){}), bl->rangecount, Util_timestr(bl->range)); } StringBuffer_append(res->outputbuffer, "</td></tr>"); } } static void print_service_rules_uploadpackets(HttpResponse res, Service_T s) { for (Bandwidth_T bl = s->uploadpacketslist; bl; bl = bl->next) { if (bl->range == Time_Second) { StringBuffer_append(res->outputbuffer, "<tr class='rule'><td>Upload packets</td><td>"); Util_printRule(res->outputbuffer, bl->action, "If %s %lld packets/s", operatornames[bl->operator], bl->limit); } else { StringBuffer_append(res->outputbuffer, "<tr class='rule'><td>Total upload packets</td><td>"); Util_printRule(res->outputbuffer, bl->action, "If %s %lld packets in last %d %s(s)", operatornames[bl->operator], bl->limit, bl->rangecount, Util_timestr(bl->range)); } StringBuffer_append(res->outputbuffer, "</td></tr>"); } } static void print_service_rules_downloadbytes(HttpResponse res, Service_T s) { for (Bandwidth_T bl = s->downloadbyteslist; bl; bl = bl->next) { if (bl->range == Time_Second) { StringBuffer_append(res->outputbuffer, "<tr class='rule'><td>Download bytes</td><td>"); Util_printRule(res->outputbuffer, bl->action, "If %s %s/s", operatornames[bl->operator], Fmt_bytes2str(bl->limit, (char[10]){})); } else { StringBuffer_append(res->outputbuffer, "<tr class='rule'><td>Total download bytes</td><td>"); Util_printRule(res->outputbuffer, bl->action, "If %s %s in last %d %s(s)", operatornames[bl->operator], Fmt_bytes2str(bl->limit, (char[10]){}), bl->rangecount, Util_timestr(bl->range)); } StringBuffer_append(res->outputbuffer, "</td></tr>"); } } static void print_service_rules_downloadpackets(HttpResponse res, Service_T s) { for (Bandwidth_T bl = s->downloadpacketslist; bl; bl = bl->next) { if (bl->range == Time_Second) { StringBuffer_append(res->outputbuffer, "<tr class='rule'><td>Download packets</td><td>"); Util_printRule(res->outputbuffer, bl->action, "If %s %lld packets/s", operatornames[bl->operator], bl->limit); } else { StringBuffer_append(res->outputbuffer, "<tr class='rule'><td>Total download packets</td><td>"); Util_printRule(res->outputbuffer, bl->action, "If %s %lld packets in last %d %s(s)", operatornames[bl->operator], bl->limit, bl->rangecount, Util_timestr(bl->range)); } StringBuffer_append(res->outputbuffer, "</td></tr>"); } } static void print_service_rules_uptime(HttpResponse res, Service_T s) { for (Uptime_T ul = s->uptimelist; ul; ul = ul->next) { StringBuffer_append(res->outputbuffer, "<tr class='rule'><td>Uptime</td><td>"); Util_printRule(res->outputbuffer, ul->action, "If %s %s", operatornames[ul->operator], _getUptime(ul->uptime, (char[256]){})); StringBuffer_append(res->outputbuffer, "</td></tr>"); } } static void print_service_rules_content(HttpResponse res, Service_T s) { if (s->type != Service_Process) { for (Match_T ml = s->matchignorelist; ml; ml = ml->next) { StringBuffer_append(res->outputbuffer, "<tr class='rule'><td>Ignore content</td><td>"); Util_printRule(res->outputbuffer, ml->action, "If content %s \"%s\"", ml->not ? "!=" : "=", ml->match_string); StringBuffer_append(res->outputbuffer, "</td></tr>"); } for (Match_T ml = s->matchlist; ml; ml = ml->next) { StringBuffer_append(res->outputbuffer, "<tr class='rule'><td>Content match</td><td>"); Util_printRule(res->outputbuffer, ml->action, "If content %s \"%s\"", ml->not ? "!=" : "=", ml->match_string); StringBuffer_append(res->outputbuffer, "</td></tr>"); } } } static void print_service_rules_checksum(HttpResponse res, Service_T s) { if (s->checksum) { StringBuffer_append(res->outputbuffer, "<tr class='rule'><td>Checksum</td><td>"); if (s->checksum->test_changes) Util_printRule(res->outputbuffer, s->checksum->action, "If changed %s", checksumnames[s->checksum->type]); else Util_printRule(res->outputbuffer, s->checksum->action, "If failed %s(%s)", s->checksum->hash, checksumnames[s->checksum->type]); StringBuffer_append(res->outputbuffer, "</td></tr>"); } } static void print_service_rules_pid(HttpResponse res, Service_T s) { for (Pid_T l = s->pidlist; l; l = l->next) { StringBuffer_append(res->outputbuffer, "<tr class='rule'><td>PID</td><td>"); Util_printRule(res->outputbuffer, l->action, "If changed"); StringBuffer_append(res->outputbuffer, "</td></tr>"); } } static void print_service_rules_ppid(HttpResponse res, Service_T s) { for (Pid_T l = s->ppidlist; l; l = l->next) { StringBuffer_append(res->outputbuffer, "<tr class='rule'><td>PPID</td><td>"); Util_printRule(res->outputbuffer, l->action, "If changed"); StringBuffer_append(res->outputbuffer, "</td></tr>"); } } static void print_service_rules_program(HttpResponse res, Service_T s) { if (s->type == Service_Program) { StringBuffer_append(res->outputbuffer, "<tr class='rule'><td>Program timeout</td><td>Terminate the program if not finished within %s</td></tr>", Fmt_time2str(s->program->timeout, (char[11]){})); for (Status_T status = s->statuslist; status; status = status->next) { StringBuffer_append(res->outputbuffer, "<tr class='rule'><td>Test Exit value</td><td>"); if (status->operator == Operator_Changed) Util_printRule(res->outputbuffer, status->action, "If exit value changed"); else Util_printRule(res->outputbuffer, status->action, "If exit value %s %d", operatorshortnames[status->operator], status->return_value); StringBuffer_append(res->outputbuffer, "</td></tr>"); } } } static void print_service_rules_resource(HttpResponse res, Service_T s) { char buf[STRLEN]; for (Resource_T q = s->resourcelist; q; q = q->next) { StringBuffer_append(res->outputbuffer, "<tr class='rule'><td>"); switch (q->resource_id) { case Resource_CpuPercent: StringBuffer_append(res->outputbuffer, "CPU usage limit"); break; case Resource_CpuPercentTotal: StringBuffer_append(res->outputbuffer, "CPU usage limit (incl. children)"); break; case Resource_CpuUser: StringBuffer_append(res->outputbuffer, "CPU user limit"); break; case Resource_CpuSystem: StringBuffer_append(res->outputbuffer, "CPU system limit"); break; case Resource_CpuWait: StringBuffer_append(res->outputbuffer, "CPU wait limit"); break; case Resource_MemoryPercent: StringBuffer_append(res->outputbuffer, "Memory usage limit"); break; case Resource_MemoryKbyte: StringBuffer_append(res->outputbuffer, "Memory amount limit"); break; case Resource_SwapPercent: StringBuffer_append(res->outputbuffer, "Swap usage limit"); break; case Resource_SwapKbyte: StringBuffer_append(res->outputbuffer, "Swap amount limit"); break; case Resource_LoadAverage1m: StringBuffer_append(res->outputbuffer, "Load average (1min)"); break; case Resource_LoadAverage5m: StringBuffer_append(res->outputbuffer, "Load average (5min)"); break; case Resource_LoadAverage15m: StringBuffer_append(res->outputbuffer, "Load average (15min)"); break; case Resource_Threads: StringBuffer_append(res->outputbuffer, "Threads"); break; case Resource_Children: StringBuffer_append(res->outputbuffer, "Children"); break; case Resource_MemoryKbyteTotal: StringBuffer_append(res->outputbuffer, "Memory amount limit (incl. children)"); break; case Resource_MemoryPercentTotal: StringBuffer_append(res->outputbuffer, "Memory usage limit (incl. children)"); break; case Resource_ReadBytes: StringBuffer_append(res->outputbuffer, "Disk read limit"); break; case Resource_ReadOperations: StringBuffer_append(res->outputbuffer, "Disk read limit"); break; case Resource_WriteBytes: StringBuffer_append(res->outputbuffer, "Disk write limit"); break; case Resource_WriteOperations: StringBuffer_append(res->outputbuffer, "Disk write limit"); break; default: break; } StringBuffer_append(res->outputbuffer, "</td><td>"); switch (q->resource_id) { case Resource_CpuPercent: case Resource_CpuPercentTotal: case Resource_MemoryPercentTotal: case Resource_CpuUser: case Resource_CpuSystem: case Resource_CpuWait: case Resource_MemoryPercent: case Resource_SwapPercent: Util_printRule(res->outputbuffer, q->action, "If %s %.1f%%", operatornames[q->operator], q->limit); break; case Resource_MemoryKbyte: case Resource_SwapKbyte: case Resource_MemoryKbyteTotal: Util_printRule(res->outputbuffer, q->action, "If %s %s", operatornames[q->operator], Fmt_bytes2str(q->limit, buf)); break; case Resource_LoadAverage1m: case Resource_LoadAverage5m: case Resource_LoadAverage15m: Util_printRule(res->outputbuffer, q->action, "If %s %.1f", operatornames[q->operator], q->limit); break; case Resource_Threads: case Resource_Children: Util_printRule(res->outputbuffer, q->action, "If %s %.0f", operatornames[q->operator], q->limit); break; case Resource_ReadBytes: case Resource_WriteBytes: Util_printRule(res->outputbuffer, q->action, "if %s %s", operatornames[q->operator], Fmt_bytes2str(q->limit, (char[10]){})); break; case Resource_ReadOperations: case Resource_WriteOperations: Util_printRule(res->outputbuffer, q->action, "if %s %.0f operations/s", operatornames[q->operator], q->limit); break; default: break; } StringBuffer_append(res->outputbuffer, "</td></tr>"); } } static boolean_t is_readonly(HttpRequest req) { if (req->remote_user) { Auth_T user_creds = Util_getUserCredentials(req->remote_user); return (user_creds ? user_creds->is_readonly : true); } return false; } /* ----------------------------------------------------------- Status output */ /* Print status in the given format. Text status is default. */ static void print_status(HttpRequest req, HttpResponse res, int version) { const char *stringFormat = get_parameter(req, "format"); if (stringFormat && Str_startsWith(stringFormat, "xml")) { char buf[STRLEN]; StringBuffer_T sb = StringBuffer_create(256); status_xml(sb, NULL, version, Socket_getLocalHost(req->S, buf, sizeof(buf))); StringBuffer_append(res->outputbuffer, "%s", StringBuffer_toString(sb)); StringBuffer_free(&sb); set_content_type(res, "text/xml"); } else { set_content_type(res, "text/plain"); StringBuffer_append(res->outputbuffer, "Monit %s uptime: %s\n\n", VERSION, _getUptime(ProcessTree_getProcessUptime(getpid()), (char[256]){})); int found = 0; const char *stringGroup = Util_urlDecode((char *)get_parameter(req, "group")); const char *stringService = Util_urlDecode((char *)get_parameter(req, "service")); if (stringGroup) { for (ServiceGroup_T sg = servicegrouplist; sg; sg = sg->next) { if (IS(stringGroup, sg->name)) { for (list_t m = sg->members->head; m; m = m->next) { status_service_txt(m->e, res); found++; } break; } } } else { for (Service_T s = servicelist_conf; s; s = s->next_conf) { if (! stringService || IS(stringService, s->name)) { status_service_txt(s, res); found++; } } } if (found == 0) { if (stringGroup) send_error(req, res, SC_BAD_REQUEST, "Service group '%s' not found", stringGroup); else if (stringService) send_error(req, res, SC_BAD_REQUEST, "Service '%s' not found", stringService); else send_error(req, res, SC_BAD_REQUEST, "No service found"); } } } static void _printServiceSummary(Box_T t, Service_T s) { Box_setColumn(t, 1, "%s", s->name); Box_setColumn(t, 2, "%s", get_service_status(TXT, s, (char[STRLEN]){}, STRLEN)); Box_setColumn(t, 3, "%s", servicetypes[s->type]); Box_printRow(t); } static int _printServiceSummaryByType(Box_T t, Service_Type type) { int found = 0; for (Service_T s = servicelist_conf; s; s = s->next_conf) { if (s->type == type) { _printServiceSummary(t, s); found++; } } return found; } static void print_summary(HttpRequest req, HttpResponse res) { set_content_type(res, "text/plain"); StringBuffer_append(res->outputbuffer, "Monit %s uptime: %s\n", VERSION, _getUptime(ProcessTree_getProcessUptime(getpid()), (char[256]){})); int found = 0; const char *stringGroup = Util_urlDecode((char *)get_parameter(req, "group")); const char *stringService = Util_urlDecode((char *)get_parameter(req, "service")); Box_T t = Box_new(res->outputbuffer, 3, (BoxColumn_T []){ {.name = "Service Name", .width = 31, .wrap = false, .align = BoxAlign_Left}, {.name = "Status", .width = 26, .wrap = false, .align = BoxAlign_Left}, {.name = "Type", .width = 13, .wrap = false, .align = BoxAlign_Left} }, true); if (stringGroup) { for (ServiceGroup_T sg = servicegrouplist; sg; sg = sg->next) { if (IS(stringGroup, sg->name)) { for (list_t m = sg->members->head; m; m = m->next) { _printServiceSummary(t, m->e); found++; } break; } } } else if (stringService) { for (Service_T s = servicelist_conf; s; s = s->next_conf) { if (IS(stringService, s->name)) { _printServiceSummary(t, s); found++; } } } else { found += _printServiceSummaryByType(t, Service_System); found += _printServiceSummaryByType(t, Service_Process); found += _printServiceSummaryByType(t, Service_File); found += _printServiceSummaryByType(t, Service_Fifo); found += _printServiceSummaryByType(t, Service_Directory); found += _printServiceSummaryByType(t, Service_Filesystem); found += _printServiceSummaryByType(t, Service_Host); found += _printServiceSummaryByType(t, Service_Net); found += _printServiceSummaryByType(t, Service_Program); } Box_free(&t); if (found == 0) { if (stringGroup) send_error(req, res, SC_BAD_REQUEST, "Service group '%s' not found", stringGroup); else if (stringService) send_error(req, res, SC_BAD_REQUEST, "Service '%s' not found", stringService); else send_error(req, res, SC_BAD_REQUEST, "No service found"); } } static void _printReport(HttpRequest req, HttpResponse res) { set_content_type(res, "text/plain"); const char *type = get_parameter(req, "type"); int count = 0; if (! type) { float up = 0, down = 0, init = 0, unmonitored = 0, total = 0; for (Service_T s = servicelist; s; s = s->next) { if (s->monitor == Monitor_Not) unmonitored++; else if (s->monitor & Monitor_Init) init++; else if (s->error) down++; else up++; total++; } StringBuffer_append(res->outputbuffer, "up: %*.0f (%.1f%%)\n" "down: %*.0f (%.1f%%)\n" "initialising: %*.0f (%.1f%%)\n" "unmonitored: %*.0f (%.1f%%)\n" "total: %*.0f services\n", 3, up, 100. * up / total, 3, down, 100. * down / total, 3, init, 100. * init / total, 3, unmonitored, 100. * unmonitored / total, 3, total); } else if (Str_isEqual(type, "up")) { for (Service_T s = servicelist; s; s = s->next) if (s->monitor != Monitor_Not && ! (s->monitor & Monitor_Init) && ! s->error) count++; StringBuffer_append(res->outputbuffer, "%d\n", count); } else if (Str_isEqual(type, "down")) { for (Service_T s = servicelist; s; s = s->next) if (s->monitor != Monitor_Not && ! (s->monitor & Monitor_Init) && s->error) count++; StringBuffer_append(res->outputbuffer, "%d\n", count); } else if (Str_startsWith(type, "initiali")) { // allow 'initiali(s|z)ing' for (Service_T s = servicelist; s; s = s->next) if (s->monitor & Monitor_Init) count++; StringBuffer_append(res->outputbuffer, "%d\n", count); } else if (Str_isEqual(type, "unmonitored")) { for (Service_T s = servicelist; s; s = s->next) if (s->monitor == Monitor_Not) count++; StringBuffer_append(res->outputbuffer, "%d\n", count); } else if (Str_isEqual(type, "total")) { for (Service_T s = servicelist; s; s = s->next) count++; StringBuffer_append(res->outputbuffer, "%d\n", count); } else { send_error(req, res, SC_BAD_REQUEST, "Invalid report type: '%s'", type); } } static void status_service_txt(Service_T s, HttpResponse res) { char buf[STRLEN]; StringBuffer_append(res->outputbuffer, COLOR_BOLDCYAN "%s '%s'" COLOR_RESET "\n" " %-28s %s\n", servicetypes[s->type], s->name, "status", get_service_status(TXT, s, buf, sizeof(buf))); StringBuffer_append(res->outputbuffer, " %-28s %s\n", "monitoring status", get_monitoring_status(TXT, s, buf, sizeof(buf))); StringBuffer_append(res->outputbuffer, " %-28s %s\n", "monitoring mode", modenames[s->mode]); StringBuffer_append(res->outputbuffer, " %-28s %s\n", "on reboot", onrebootnames[s->onreboot]); _printStatus(TXT, res, s); StringBuffer_append(res->outputbuffer, "\n"); } static char *get_monitoring_status(Output_Type type, Service_T s, char *buf, int buflen) { ASSERT(s); ASSERT(buf); if (s->monitor == Monitor_Not) { if (type == HTML) snprintf(buf, buflen, "<span class='gray-text'>Not monitored</span>"); else snprintf(buf, buflen, Color_lightYellow("Not monitored")); } else if (s->monitor & Monitor_Waiting) { if (type == HTML) snprintf(buf, buflen, "<span>Waiting</span>"); else snprintf(buf, buflen, Color_white("Waiting")); } else if (s->monitor & Monitor_Init) { if (type == HTML) snprintf(buf, buflen, "<span class='blue-text'>Initializing</span>"); else snprintf(buf, buflen, Color_lightBlue("Initializing")); } else if (s->monitor & Monitor_Yes) { if (type == HTML) snprintf(buf, buflen, "<span>Monitored</span>"); else snprintf(buf, buflen, "Monitored"); } return buf; } static char *get_service_status(Output_Type type, Service_T s, char *buf, int buflen) { ASSERT(s); ASSERT(buf); if (s->monitor == Monitor_Not || s->monitor & Monitor_Init) { get_monitoring_status(type, s, buf, buflen); } else if (s->error == 0) { snprintf(buf, buflen, type == HTML ? "<span class='green-text'>OK</span>" : Color_lightGreen("OK")); } else { // In the case that the service has actualy some failure, the error bitmap will be non zero char *p = buf; EventTable_T *et = Event_Table; while ((*et).id) { if (s->error & (*et).id) { if (p > buf) p += snprintf(p, buflen - (p - buf), " | "); if (s->error_hint & (*et).id) { if (type == HTML) p += snprintf(p, buflen - (p - buf), "<span class='orange-text'>%s</span>", (*et).description_changed); else p += snprintf(p, buflen - (p - buf), Color_lightYellow("%s", (*et).description_changed)); } else { if (type == HTML) p += snprintf(p, buflen - (p - buf), "<span class='red-text'>%s</span>", (*et).description_failed); else p += snprintf(p, buflen - (p - buf), Color_lightRed("%s", (*et).description_failed)); } } et++; } } if (s->doaction) snprintf(buf + strlen(buf), buflen - strlen(buf) - 1, " - %s pending", actionnames[s->doaction]); return buf; }
static void do_viewlog(HttpRequest req, HttpResponse res) { if (is_readonly(req)) { send_error(req, res, SC_FORBIDDEN, "You do not have sufficient privileges to access this page"); return; } do_head(res, "_viewlog", "View log", 100); if ((Run.flags & Run_Log) && ! (Run.flags & Run_UseSyslog)) { FILE *f = fopen(Run.files.log, "r"); if (f) { size_t n; char buf[512]; StringBuffer_append(res->outputbuffer, "<br><p><form><textarea cols=120 rows=30 readonly>"); while ((n = fread(buf, sizeof(char), sizeof(buf) - 1, f)) > 0) { buf[n] = 0; StringBuffer_append(res->outputbuffer, "%s", buf); } fclose(f); StringBuffer_append(res->outputbuffer, "</textarea></form>"); } else { StringBuffer_append(res->outputbuffer, "Error opening logfile: %s", STRERROR); } } else { StringBuffer_append(res->outputbuffer, "<b>Cannot view logfile:</b><br>"); if (! (Run.flags & Run_Log)) StringBuffer_append(res->outputbuffer, "Monit was started without logging"); else StringBuffer_append(res->outputbuffer, "Monit uses syslog"); } do_foot(res); }
static void do_viewlog(HttpRequest req, HttpResponse res) { if (is_readonly(req)) { send_error(req, res, SC_FORBIDDEN, "You do not have sufficient privileges to access this page"); return; } do_head(res, "_viewlog", "View log", 100); if ((Run.flags & Run_Log) && ! (Run.flags & Run_UseSyslog)) { FILE *f = fopen(Run.files.log, "r"); if (f) { size_t n; char buf[512]; StringBuffer_append(res->outputbuffer, "<br><p><form><textarea cols=120 rows=30 readonly>"); while ((n = fread(buf, sizeof(char), sizeof(buf) - 1, f)) > 0) { buf[n] = 0; escapeHTML(res->outputbuffer, buf); } fclose(f); StringBuffer_append(res->outputbuffer, "</textarea></form>"); } else { StringBuffer_append(res->outputbuffer, "Error opening logfile: %s", STRERROR); } } else { StringBuffer_append(res->outputbuffer, "<b>Cannot view logfile:</b><br>"); if (! (Run.flags & Run_Log)) StringBuffer_append(res->outputbuffer, "Monit was started without logging"); else StringBuffer_append(res->outputbuffer, "Monit uses syslog"); } do_foot(res); }
{'added': [(910, ' escapeHTML(res->outputbuffer, buf);')], 'deleted': [(910, ' StringBuffer_append(res->outputbuffer, "%s", buf);')]}
1
1
2,258
20,152
https://bitbucket.org/tildeslash/monit
CVE-2019-11454
['CWE-79']
ttm_page_alloc.c
ttm_put_pages
/* * Copyright (c) Red Hat Inc. * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sub license, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Authors: Dave Airlie <airlied@redhat.com> * Jerome Glisse <jglisse@redhat.com> * Pauli Nieminen <suokkos@gmail.com> */ /* simple list based uncached page pool * - Pool collects resently freed pages for reuse * - Use page->lru to keep a free list * - doesn't track currently in use pages */ #define pr_fmt(fmt) "[TTM] " fmt #include <linux/list.h> #include <linux/spinlock.h> #include <linux/highmem.h> #include <linux/mm_types.h> #include <linux/module.h> #include <linux/mm.h> #include <linux/seq_file.h> /* for seq_printf */ #include <linux/slab.h> #include <linux/dma-mapping.h> #include <linux/atomic.h> #include <drm/ttm/ttm_bo_driver.h> #include <drm/ttm/ttm_page_alloc.h> #include <drm/ttm/ttm_set_memory.h> #define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *)) #define SMALL_ALLOCATION 16 #define FREE_ALL_PAGES (~0U) /* times are in msecs */ #define PAGE_FREE_INTERVAL 1000 /** * struct ttm_page_pool - Pool to reuse recently allocated uc/wc pages. * * @lock: Protects the shared pool from concurrnet access. Must be used with * irqsave/irqrestore variants because pool allocator maybe called from * delayed work. * @fill_lock: Prevent concurrent calls to fill. * @list: Pool of free uc/wc pages for fast reuse. * @gfp_flags: Flags to pass for alloc_page. * @npages: Number of pages in pool. */ struct ttm_page_pool { spinlock_t lock; bool fill_lock; struct list_head list; gfp_t gfp_flags; unsigned npages; char *name; unsigned long nfrees; unsigned long nrefills; unsigned int order; }; /** * Limits for the pool. They are handled without locks because only place where * they may change is in sysfs store. They won't have immediate effect anyway * so forcing serialization to access them is pointless. */ struct ttm_pool_opts { unsigned alloc_size; unsigned max_size; unsigned small; }; #define NUM_POOLS 6 /** * struct ttm_pool_manager - Holds memory pools for fst allocation * * Manager is read only object for pool code so it doesn't need locking. * * @free_interval: minimum number of jiffies between freeing pages from pool. * @page_alloc_inited: reference counting for pool allocation. * @work: Work that is used to shrink the pool. Work is only run when there is * some pages to free. * @small_allocation: Limit in number of pages what is small allocation. * * @pools: All pool objects in use. **/ struct ttm_pool_manager { struct kobject kobj; struct shrinker mm_shrink; struct ttm_pool_opts options; union { struct ttm_page_pool pools[NUM_POOLS]; struct { struct ttm_page_pool wc_pool; struct ttm_page_pool uc_pool; struct ttm_page_pool wc_pool_dma32; struct ttm_page_pool uc_pool_dma32; struct ttm_page_pool wc_pool_huge; struct ttm_page_pool uc_pool_huge; } ; }; }; static struct attribute ttm_page_pool_max = { .name = "pool_max_size", .mode = S_IRUGO | S_IWUSR }; static struct attribute ttm_page_pool_small = { .name = "pool_small_allocation", .mode = S_IRUGO | S_IWUSR }; static struct attribute ttm_page_pool_alloc_size = { .name = "pool_allocation_size", .mode = S_IRUGO | S_IWUSR }; static struct attribute *ttm_pool_attrs[] = { &ttm_page_pool_max, &ttm_page_pool_small, &ttm_page_pool_alloc_size, NULL }; static void ttm_pool_kobj_release(struct kobject *kobj) { struct ttm_pool_manager *m = container_of(kobj, struct ttm_pool_manager, kobj); kfree(m); } static ssize_t ttm_pool_store(struct kobject *kobj, struct attribute *attr, const char *buffer, size_t size) { struct ttm_pool_manager *m = container_of(kobj, struct ttm_pool_manager, kobj); int chars; unsigned val; chars = sscanf(buffer, "%u", &val); if (chars == 0) return size; /* Convert kb to number of pages */ val = val / (PAGE_SIZE >> 10); if (attr == &ttm_page_pool_max) m->options.max_size = val; else if (attr == &ttm_page_pool_small) m->options.small = val; else if (attr == &ttm_page_pool_alloc_size) { if (val > NUM_PAGES_TO_ALLOC*8) { pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n", NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7), NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); return size; } else if (val > NUM_PAGES_TO_ALLOC) { pr_warn("Setting allocation size to larger than %lu is not recommended\n", NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); } m->options.alloc_size = val; } return size; } static ssize_t ttm_pool_show(struct kobject *kobj, struct attribute *attr, char *buffer) { struct ttm_pool_manager *m = container_of(kobj, struct ttm_pool_manager, kobj); unsigned val = 0; if (attr == &ttm_page_pool_max) val = m->options.max_size; else if (attr == &ttm_page_pool_small) val = m->options.small; else if (attr == &ttm_page_pool_alloc_size) val = m->options.alloc_size; val = val * (PAGE_SIZE >> 10); return snprintf(buffer, PAGE_SIZE, "%u\n", val); } static const struct sysfs_ops ttm_pool_sysfs_ops = { .show = &ttm_pool_show, .store = &ttm_pool_store, }; static struct kobj_type ttm_pool_kobj_type = { .release = &ttm_pool_kobj_release, .sysfs_ops = &ttm_pool_sysfs_ops, .default_attrs = ttm_pool_attrs, }; static struct ttm_pool_manager *_manager; /** * Select the right pool or requested caching state and ttm flags. */ static struct ttm_page_pool *ttm_get_pool(int flags, bool huge, enum ttm_caching_state cstate) { int pool_index; if (cstate == tt_cached) return NULL; if (cstate == tt_wc) pool_index = 0x0; else pool_index = 0x1; if (flags & TTM_PAGE_FLAG_DMA32) { if (huge) return NULL; pool_index |= 0x2; } else if (huge) { pool_index |= 0x4; } return &_manager->pools[pool_index]; } /* set memory back to wb and free the pages. */ static void ttm_pages_put(struct page *pages[], unsigned npages, unsigned int order) { unsigned int i, pages_nr = (1 << order); if (order == 0) { if (ttm_set_pages_array_wb(pages, npages)) pr_err("Failed to set %d pages to wb!\n", npages); } for (i = 0; i < npages; ++i) { if (order > 0) { if (ttm_set_pages_wb(pages[i], pages_nr)) pr_err("Failed to set %d pages to wb!\n", pages_nr); } __free_pages(pages[i], order); } } static void ttm_pool_update_free_locked(struct ttm_page_pool *pool, unsigned freed_pages) { pool->npages -= freed_pages; pool->nfrees += freed_pages; } /** * Free pages from pool. * * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC * number of pages in one go. * * @pool: to free the pages from * @free_all: If set to true will free all pages in pool * @use_static: Safe to use static buffer **/ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free, bool use_static) { static struct page *static_buf[NUM_PAGES_TO_ALLOC]; unsigned long irq_flags; struct page *p; struct page **pages_to_free; unsigned freed_pages = 0, npages_to_free = nr_free; if (NUM_PAGES_TO_ALLOC < nr_free) npages_to_free = NUM_PAGES_TO_ALLOC; if (use_static) pages_to_free = static_buf; else pages_to_free = kmalloc_array(npages_to_free, sizeof(struct page *), GFP_KERNEL); if (!pages_to_free) { pr_debug("Failed to allocate memory for pool free operation\n"); return 0; } restart: spin_lock_irqsave(&pool->lock, irq_flags); list_for_each_entry_reverse(p, &pool->list, lru) { if (freed_pages >= npages_to_free) break; pages_to_free[freed_pages++] = p; /* We can only remove NUM_PAGES_TO_ALLOC at a time. */ if (freed_pages >= NUM_PAGES_TO_ALLOC) { /* remove range of pages from the pool */ __list_del(p->lru.prev, &pool->list); ttm_pool_update_free_locked(pool, freed_pages); /** * Because changing page caching is costly * we unlock the pool to prevent stalling. */ spin_unlock_irqrestore(&pool->lock, irq_flags); ttm_pages_put(pages_to_free, freed_pages, pool->order); if (likely(nr_free != FREE_ALL_PAGES)) nr_free -= freed_pages; if (NUM_PAGES_TO_ALLOC >= nr_free) npages_to_free = nr_free; else npages_to_free = NUM_PAGES_TO_ALLOC; freed_pages = 0; /* free all so restart the processing */ if (nr_free) goto restart; /* Not allowed to fall through or break because * following context is inside spinlock while we are * outside here. */ goto out; } } /* remove range of pages from the pool */ if (freed_pages) { __list_del(&p->lru, &pool->list); ttm_pool_update_free_locked(pool, freed_pages); nr_free -= freed_pages; } spin_unlock_irqrestore(&pool->lock, irq_flags); if (freed_pages) ttm_pages_put(pages_to_free, freed_pages, pool->order); out: if (pages_to_free != static_buf) kfree(pages_to_free); return nr_free; } /** * Callback for mm to request pool to reduce number of page held. * * XXX: (dchinner) Deadlock warning! * * This code is crying out for a shrinker per pool.... */ static unsigned long ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) { static DEFINE_MUTEX(lock); static unsigned start_pool; unsigned i; unsigned pool_offset; struct ttm_page_pool *pool; int shrink_pages = sc->nr_to_scan; unsigned long freed = 0; unsigned int nr_free_pool; if (!mutex_trylock(&lock)) return SHRINK_STOP; pool_offset = ++start_pool % NUM_POOLS; /* select start pool in round robin fashion */ for (i = 0; i < NUM_POOLS; ++i) { unsigned nr_free = shrink_pages; unsigned page_nr; if (shrink_pages == 0) break; pool = &_manager->pools[(i + pool_offset)%NUM_POOLS]; page_nr = (1 << pool->order); /* OK to use static buffer since global mutex is held. */ nr_free_pool = roundup(nr_free, page_nr) >> pool->order; shrink_pages = ttm_page_pool_free(pool, nr_free_pool, true); freed += (nr_free_pool - shrink_pages) << pool->order; if (freed >= sc->nr_to_scan) break; shrink_pages <<= pool->order; } mutex_unlock(&lock); return freed; } static unsigned long ttm_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc) { unsigned i; unsigned long count = 0; struct ttm_page_pool *pool; for (i = 0; i < NUM_POOLS; ++i) { pool = &_manager->pools[i]; count += (pool->npages << pool->order); } return count; } static int ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager) { manager->mm_shrink.count_objects = ttm_pool_shrink_count; manager->mm_shrink.scan_objects = ttm_pool_shrink_scan; manager->mm_shrink.seeks = 1; return register_shrinker(&manager->mm_shrink); } static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager) { unregister_shrinker(&manager->mm_shrink); } static int ttm_set_pages_caching(struct page **pages, enum ttm_caching_state cstate, unsigned cpages) { int r = 0; /* Set page caching */ switch (cstate) { case tt_uncached: r = ttm_set_pages_array_uc(pages, cpages); if (r) pr_err("Failed to set %d pages to uc!\n", cpages); break; case tt_wc: r = ttm_set_pages_array_wc(pages, cpages); if (r) pr_err("Failed to set %d pages to wc!\n", cpages); break; default: break; } return r; } /** * Free pages the pages that failed to change the caching state. If there is * any pages that have changed their caching state already put them to the * pool. */ static void ttm_handle_caching_state_failure(struct list_head *pages, int ttm_flags, enum ttm_caching_state cstate, struct page **failed_pages, unsigned cpages) { unsigned i; /* Failed pages have to be freed */ for (i = 0; i < cpages; ++i) { list_del(&failed_pages[i]->lru); __free_page(failed_pages[i]); } } /** * Allocate new pages with correct caching. * * This function is reentrant if caller updates count depending on number of * pages returned in pages array. */ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags, int ttm_flags, enum ttm_caching_state cstate, unsigned count, unsigned order) { struct page **caching_array; struct page *p; int r = 0; unsigned i, j, cpages; unsigned npages = 1 << order; unsigned max_cpages = min(count << order, (unsigned)NUM_PAGES_TO_ALLOC); /* allocate array for page caching change */ caching_array = kmalloc_array(max_cpages, sizeof(struct page *), GFP_KERNEL); if (!caching_array) { pr_debug("Unable to allocate table for new pages\n"); return -ENOMEM; } for (i = 0, cpages = 0; i < count; ++i) { p = alloc_pages(gfp_flags, order); if (!p) { pr_debug("Unable to get page %u\n", i); /* store already allocated pages in the pool after * setting the caching state */ if (cpages) { r = ttm_set_pages_caching(caching_array, cstate, cpages); if (r) ttm_handle_caching_state_failure(pages, ttm_flags, cstate, caching_array, cpages); } r = -ENOMEM; goto out; } list_add(&p->lru, pages); #ifdef CONFIG_HIGHMEM /* gfp flags of highmem page should never be dma32 so we * we should be fine in such case */ if (PageHighMem(p)) continue; #endif for (j = 0; j < npages; ++j) { caching_array[cpages++] = p++; if (cpages == max_cpages) { r = ttm_set_pages_caching(caching_array, cstate, cpages); if (r) { ttm_handle_caching_state_failure(pages, ttm_flags, cstate, caching_array, cpages); goto out; } cpages = 0; } } } if (cpages) { r = ttm_set_pages_caching(caching_array, cstate, cpages); if (r) ttm_handle_caching_state_failure(pages, ttm_flags, cstate, caching_array, cpages); } out: kfree(caching_array); return r; } /** * Fill the given pool if there aren't enough pages and the requested number of * pages is small. */ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, int ttm_flags, enum ttm_caching_state cstate, unsigned count, unsigned long *irq_flags) { struct page *p; int r; unsigned cpages = 0; /** * Only allow one pool fill operation at a time. * If pool doesn't have enough pages for the allocation new pages are * allocated from outside of pool. */ if (pool->fill_lock) return; pool->fill_lock = true; /* If allocation request is small and there are not enough * pages in a pool we fill the pool up first. */ if (count < _manager->options.small && count > pool->npages) { struct list_head new_pages; unsigned alloc_size = _manager->options.alloc_size; /** * Can't change page caching if in irqsave context. We have to * drop the pool->lock. */ spin_unlock_irqrestore(&pool->lock, *irq_flags); INIT_LIST_HEAD(&new_pages); r = ttm_alloc_new_pages(&new_pages, pool->gfp_flags, ttm_flags, cstate, alloc_size, 0); spin_lock_irqsave(&pool->lock, *irq_flags); if (!r) { list_splice(&new_pages, &pool->list); ++pool->nrefills; pool->npages += alloc_size; } else { pr_debug("Failed to fill pool (%p)\n", pool); /* If we have any pages left put them to the pool. */ list_for_each_entry(p, &new_pages, lru) { ++cpages; } list_splice(&new_pages, &pool->list); pool->npages += cpages; } } pool->fill_lock = false; } /** * Allocate pages from the pool and put them on the return list. * * @return zero for success or negative error code. */ static int ttm_page_pool_get_pages(struct ttm_page_pool *pool, struct list_head *pages, int ttm_flags, enum ttm_caching_state cstate, unsigned count, unsigned order) { unsigned long irq_flags; struct list_head *p; unsigned i; int r = 0; spin_lock_irqsave(&pool->lock, irq_flags); if (!order) ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count, &irq_flags); if (count >= pool->npages) { /* take all pages from the pool */ list_splice_init(&pool->list, pages); count -= pool->npages; pool->npages = 0; goto out; } /* find the last pages to include for requested number of pages. Split * pool to begin and halve it to reduce search space. */ if (count <= pool->npages/2) { i = 0; list_for_each(p, &pool->list) { if (++i == count) break; } } else { i = pool->npages + 1; list_for_each_prev(p, &pool->list) { if (--i == count) break; } } /* Cut 'count' number of pages from the pool */ list_cut_position(pages, &pool->list, p); pool->npages -= count; count = 0; out: spin_unlock_irqrestore(&pool->lock, irq_flags); /* clear the pages coming from the pool if requested */ if (ttm_flags & TTM_PAGE_FLAG_ZERO_ALLOC) { struct page *page; list_for_each_entry(page, pages, lru) { if (PageHighMem(page)) clear_highpage(page); else clear_page(page_address(page)); } } /* If pool didn't have enough pages allocate new one. */ if (count) { gfp_t gfp_flags = pool->gfp_flags; /* set zero flag for page allocation if required */ if (ttm_flags & TTM_PAGE_FLAG_ZERO_ALLOC) gfp_flags |= __GFP_ZERO; if (ttm_flags & TTM_PAGE_FLAG_NO_RETRY) gfp_flags |= __GFP_RETRY_MAYFAIL; /* ttm_alloc_new_pages doesn't reference pool so we can run * multiple requests in parallel. **/ r = ttm_alloc_new_pages(pages, gfp_flags, ttm_flags, cstate, count, order); } return r; } /* Put all pages in pages list to correct pool to wait for reuse */ static void ttm_put_pages(struct page **pages, unsigned npages, int flags, enum ttm_caching_state cstate) { struct ttm_page_pool *pool = ttm_get_pool(flags, false, cstate); #ifdef CONFIG_TRANSPARENT_HUGEPAGE struct ttm_page_pool *huge = ttm_get_pool(flags, true, cstate); #endif unsigned long irq_flags; unsigned i; if (pool == NULL) { /* No pool for this memory type so free the pages */ i = 0; while (i < npages) { #ifdef CONFIG_TRANSPARENT_HUGEPAGE struct page *p = pages[i]; #endif unsigned order = 0, j; if (!pages[i]) { ++i; continue; } #ifdef CONFIG_TRANSPARENT_HUGEPAGE if (!(flags & TTM_PAGE_FLAG_DMA32)) { for (j = 0; j < HPAGE_PMD_NR; ++j) if (p++ != pages[i + j]) break; if (j == HPAGE_PMD_NR) order = HPAGE_PMD_ORDER; } #endif if (page_count(pages[i]) != 1) pr_err("Erroneous page count. Leaking pages.\n"); __free_pages(pages[i], order); j = 1 << order; while (j) { pages[i++] = NULL; --j; } } return; } i = 0; #ifdef CONFIG_TRANSPARENT_HUGEPAGE if (huge) { unsigned max_size, n2free; spin_lock_irqsave(&huge->lock, irq_flags); while (i < npages) { struct page *p = pages[i]; unsigned j; if (!p) break; for (j = 0; j < HPAGE_PMD_NR; ++j) if (p++ != pages[i + j]) break; if (j != HPAGE_PMD_NR) break; list_add_tail(&pages[i]->lru, &huge->list); for (j = 0; j < HPAGE_PMD_NR; ++j) pages[i++] = NULL; huge->npages++; } /* Check that we don't go over the pool limit */ max_size = _manager->options.max_size; max_size /= HPAGE_PMD_NR; if (huge->npages > max_size) n2free = huge->npages - max_size; else n2free = 0; spin_unlock_irqrestore(&huge->lock, irq_flags); if (n2free) ttm_page_pool_free(huge, n2free, false); } #endif spin_lock_irqsave(&pool->lock, irq_flags); while (i < npages) { if (pages[i]) { if (page_count(pages[i]) != 1) pr_err("Erroneous page count. Leaking pages.\n"); list_add_tail(&pages[i]->lru, &pool->list); pages[i] = NULL; pool->npages++; } ++i; } /* Check that we don't go over the pool limit */ npages = 0; if (pool->npages > _manager->options.max_size) { npages = pool->npages - _manager->options.max_size; /* free at least NUM_PAGES_TO_ALLOC number of pages * to reduce calls to set_memory_wb */ if (npages < NUM_PAGES_TO_ALLOC) npages = NUM_PAGES_TO_ALLOC; } spin_unlock_irqrestore(&pool->lock, irq_flags); if (npages) ttm_page_pool_free(pool, npages, false); } /* * On success pages list will hold count number of correctly * cached pages. */ static int ttm_get_pages(struct page **pages, unsigned npages, int flags, enum ttm_caching_state cstate) { struct ttm_page_pool *pool = ttm_get_pool(flags, false, cstate); #ifdef CONFIG_TRANSPARENT_HUGEPAGE struct ttm_page_pool *huge = ttm_get_pool(flags, true, cstate); #endif struct list_head plist; struct page *p = NULL; unsigned count, first; int r; /* No pool for cached pages */ if (pool == NULL) { gfp_t gfp_flags = GFP_USER; unsigned i; #ifdef CONFIG_TRANSPARENT_HUGEPAGE unsigned j; #endif /* set zero flag for page allocation if required */ if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) gfp_flags |= __GFP_ZERO; if (flags & TTM_PAGE_FLAG_NO_RETRY) gfp_flags |= __GFP_RETRY_MAYFAIL; if (flags & TTM_PAGE_FLAG_DMA32) gfp_flags |= GFP_DMA32; else gfp_flags |= GFP_HIGHUSER; i = 0; #ifdef CONFIG_TRANSPARENT_HUGEPAGE if (!(gfp_flags & GFP_DMA32)) { while (npages >= HPAGE_PMD_NR) { gfp_t huge_flags = gfp_flags; huge_flags |= GFP_TRANSHUGE_LIGHT | __GFP_NORETRY | __GFP_KSWAPD_RECLAIM; huge_flags &= ~__GFP_MOVABLE; huge_flags &= ~__GFP_COMP; p = alloc_pages(huge_flags, HPAGE_PMD_ORDER); if (!p) break; for (j = 0; j < HPAGE_PMD_NR; ++j) pages[i++] = p++; npages -= HPAGE_PMD_NR; } } #endif first = i; while (npages) { p = alloc_page(gfp_flags); if (!p) { pr_debug("Unable to allocate page\n"); return -ENOMEM; } /* Swap the pages if we detect consecutive order */ if (i > first && pages[i - 1] == p - 1) swap(p, pages[i - 1]); pages[i++] = p; --npages; } return 0; } count = 0; #ifdef CONFIG_TRANSPARENT_HUGEPAGE if (huge && npages >= HPAGE_PMD_NR) { INIT_LIST_HEAD(&plist); ttm_page_pool_get_pages(huge, &plist, flags, cstate, npages / HPAGE_PMD_NR, HPAGE_PMD_ORDER); list_for_each_entry(p, &plist, lru) { unsigned j; for (j = 0; j < HPAGE_PMD_NR; ++j) pages[count++] = &p[j]; } } #endif INIT_LIST_HEAD(&plist); r = ttm_page_pool_get_pages(pool, &plist, flags, cstate, npages - count, 0); first = count; list_for_each_entry(p, &plist, lru) { struct page *tmp = p; /* Swap the pages if we detect consecutive order */ if (count > first && pages[count - 1] == tmp - 1) swap(tmp, pages[count - 1]); pages[count++] = tmp; } if (r) { /* If there is any pages in the list put them back to * the pool. */ pr_debug("Failed to allocate extra pages for large request\n"); ttm_put_pages(pages, count, flags, cstate); return r; } return 0; } static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, gfp_t flags, char *name, unsigned int order) { spin_lock_init(&pool->lock); pool->fill_lock = false; INIT_LIST_HEAD(&pool->list); pool->npages = pool->nfrees = 0; pool->gfp_flags = flags; pool->name = name; pool->order = order; } int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages) { int ret; #ifdef CONFIG_TRANSPARENT_HUGEPAGE unsigned order = HPAGE_PMD_ORDER; #else unsigned order = 0; #endif WARN_ON(_manager); pr_info("Initializing pool allocator\n"); _manager = kzalloc(sizeof(*_manager), GFP_KERNEL); if (!_manager) return -ENOMEM; ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc", 0); ttm_page_pool_init_locked(&_manager->uc_pool, GFP_HIGHUSER, "uc", 0); ttm_page_pool_init_locked(&_manager->wc_pool_dma32, GFP_USER | GFP_DMA32, "wc dma", 0); ttm_page_pool_init_locked(&_manager->uc_pool_dma32, GFP_USER | GFP_DMA32, "uc dma", 0); ttm_page_pool_init_locked(&_manager->wc_pool_huge, (GFP_TRANSHUGE_LIGHT | __GFP_NORETRY | __GFP_KSWAPD_RECLAIM) & ~(__GFP_MOVABLE | __GFP_COMP), "wc huge", order); ttm_page_pool_init_locked(&_manager->uc_pool_huge, (GFP_TRANSHUGE_LIGHT | __GFP_NORETRY | __GFP_KSWAPD_RECLAIM) & ~(__GFP_MOVABLE | __GFP_COMP) , "uc huge", order); _manager->options.max_size = max_pages; _manager->options.small = SMALL_ALLOCATION; _manager->options.alloc_size = NUM_PAGES_TO_ALLOC; ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type, &glob->kobj, "pool"); if (unlikely(ret != 0)) goto error; ret = ttm_pool_mm_shrink_init(_manager); if (unlikely(ret != 0)) goto error; return 0; error: kobject_put(&_manager->kobj); _manager = NULL; return ret; } void ttm_page_alloc_fini(void) { int i; pr_info("Finalizing pool allocator\n"); ttm_pool_mm_shrink_fini(_manager); /* OK to use static buffer since global mutex is no longer used. */ for (i = 0; i < NUM_POOLS; ++i) ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES, true); kobject_put(&_manager->kobj); _manager = NULL; } static void ttm_pool_unpopulate_helper(struct ttm_tt *ttm, unsigned mem_count_update) { struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob; unsigned i; if (mem_count_update == 0) goto put_pages; for (i = 0; i < mem_count_update; ++i) { if (!ttm->pages[i]) continue; ttm_mem_global_free_page(mem_glob, ttm->pages[i], PAGE_SIZE); } put_pages: ttm_put_pages(ttm->pages, ttm->num_pages, ttm->page_flags, ttm->caching_state); ttm->state = tt_unpopulated; } int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) { struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob; unsigned i; int ret; if (ttm->state != tt_unpopulated) return 0; if (ttm_check_under_lowerlimit(mem_glob, ttm->num_pages, ctx)) return -ENOMEM; ret = ttm_get_pages(ttm->pages, ttm->num_pages, ttm->page_flags, ttm->caching_state); if (unlikely(ret != 0)) { ttm_pool_unpopulate_helper(ttm, 0); return ret; } for (i = 0; i < ttm->num_pages; ++i) { ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i], PAGE_SIZE, ctx); if (unlikely(ret != 0)) { ttm_pool_unpopulate_helper(ttm, i); return -ENOMEM; } } if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) { ret = ttm_tt_swapin(ttm); if (unlikely(ret != 0)) { ttm_pool_unpopulate(ttm); return ret; } } ttm->state = tt_unbound; return 0; } EXPORT_SYMBOL(ttm_pool_populate); void ttm_pool_unpopulate(struct ttm_tt *ttm) { ttm_pool_unpopulate_helper(ttm, ttm->num_pages); } EXPORT_SYMBOL(ttm_pool_unpopulate); int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt, struct ttm_operation_ctx *ctx) { unsigned i, j; int r; r = ttm_pool_populate(&tt->ttm, ctx); if (r) return r; for (i = 0; i < tt->ttm.num_pages; ++i) { struct page *p = tt->ttm.pages[i]; size_t num_pages = 1; for (j = i + 1; j < tt->ttm.num_pages; ++j) { if (++p != tt->ttm.pages[j]) break; ++num_pages; } tt->dma_address[i] = dma_map_page(dev, tt->ttm.pages[i], 0, num_pages * PAGE_SIZE, DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, tt->dma_address[i])) { while (i--) { dma_unmap_page(dev, tt->dma_address[i], PAGE_SIZE, DMA_BIDIRECTIONAL); tt->dma_address[i] = 0; } ttm_pool_unpopulate(&tt->ttm); return -EFAULT; } for (j = 1; j < num_pages; ++j) { tt->dma_address[i + 1] = tt->dma_address[i] + PAGE_SIZE; ++i; } } return 0; } EXPORT_SYMBOL(ttm_populate_and_map_pages); void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt) { unsigned i, j; for (i = 0; i < tt->ttm.num_pages;) { struct page *p = tt->ttm.pages[i]; size_t num_pages = 1; if (!tt->dma_address[i] || !tt->ttm.pages[i]) { ++i; continue; } for (j = i + 1; j < tt->ttm.num_pages; ++j) { if (++p != tt->ttm.pages[j]) break; ++num_pages; } dma_unmap_page(dev, tt->dma_address[i], num_pages * PAGE_SIZE, DMA_BIDIRECTIONAL); i += num_pages; } ttm_pool_unpopulate(&tt->ttm); } EXPORT_SYMBOL(ttm_unmap_and_unpopulate_pages); int ttm_page_alloc_debugfs(struct seq_file *m, void *data) { struct ttm_page_pool *p; unsigned i; char *h[] = {"pool", "refills", "pages freed", "size"}; if (!_manager) { seq_printf(m, "No pool allocator running.\n"); return 0; } seq_printf(m, "%7s %12s %13s %8s\n", h[0], h[1], h[2], h[3]); for (i = 0; i < NUM_POOLS; ++i) { p = &_manager->pools[i]; seq_printf(m, "%7s %12ld %13ld %8d\n", p->name, p->nrefills, p->nfrees, p->npages); } return 0; } EXPORT_SYMBOL(ttm_page_alloc_debugfs);
/* * Copyright (c) Red Hat Inc. * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sub license, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Authors: Dave Airlie <airlied@redhat.com> * Jerome Glisse <jglisse@redhat.com> * Pauli Nieminen <suokkos@gmail.com> */ /* simple list based uncached page pool * - Pool collects resently freed pages for reuse * - Use page->lru to keep a free list * - doesn't track currently in use pages */ #define pr_fmt(fmt) "[TTM] " fmt #include <linux/list.h> #include <linux/spinlock.h> #include <linux/highmem.h> #include <linux/mm_types.h> #include <linux/module.h> #include <linux/mm.h> #include <linux/seq_file.h> /* for seq_printf */ #include <linux/slab.h> #include <linux/dma-mapping.h> #include <linux/atomic.h> #include <drm/ttm/ttm_bo_driver.h> #include <drm/ttm/ttm_page_alloc.h> #include <drm/ttm/ttm_set_memory.h> #define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *)) #define SMALL_ALLOCATION 16 #define FREE_ALL_PAGES (~0U) /* times are in msecs */ #define PAGE_FREE_INTERVAL 1000 /** * struct ttm_page_pool - Pool to reuse recently allocated uc/wc pages. * * @lock: Protects the shared pool from concurrnet access. Must be used with * irqsave/irqrestore variants because pool allocator maybe called from * delayed work. * @fill_lock: Prevent concurrent calls to fill. * @list: Pool of free uc/wc pages for fast reuse. * @gfp_flags: Flags to pass for alloc_page. * @npages: Number of pages in pool. */ struct ttm_page_pool { spinlock_t lock; bool fill_lock; struct list_head list; gfp_t gfp_flags; unsigned npages; char *name; unsigned long nfrees; unsigned long nrefills; unsigned int order; }; /** * Limits for the pool. They are handled without locks because only place where * they may change is in sysfs store. They won't have immediate effect anyway * so forcing serialization to access them is pointless. */ struct ttm_pool_opts { unsigned alloc_size; unsigned max_size; unsigned small; }; #define NUM_POOLS 6 /** * struct ttm_pool_manager - Holds memory pools for fst allocation * * Manager is read only object for pool code so it doesn't need locking. * * @free_interval: minimum number of jiffies between freeing pages from pool. * @page_alloc_inited: reference counting for pool allocation. * @work: Work that is used to shrink the pool. Work is only run when there is * some pages to free. * @small_allocation: Limit in number of pages what is small allocation. * * @pools: All pool objects in use. **/ struct ttm_pool_manager { struct kobject kobj; struct shrinker mm_shrink; struct ttm_pool_opts options; union { struct ttm_page_pool pools[NUM_POOLS]; struct { struct ttm_page_pool wc_pool; struct ttm_page_pool uc_pool; struct ttm_page_pool wc_pool_dma32; struct ttm_page_pool uc_pool_dma32; struct ttm_page_pool wc_pool_huge; struct ttm_page_pool uc_pool_huge; } ; }; }; static struct attribute ttm_page_pool_max = { .name = "pool_max_size", .mode = S_IRUGO | S_IWUSR }; static struct attribute ttm_page_pool_small = { .name = "pool_small_allocation", .mode = S_IRUGO | S_IWUSR }; static struct attribute ttm_page_pool_alloc_size = { .name = "pool_allocation_size", .mode = S_IRUGO | S_IWUSR }; static struct attribute *ttm_pool_attrs[] = { &ttm_page_pool_max, &ttm_page_pool_small, &ttm_page_pool_alloc_size, NULL }; static void ttm_pool_kobj_release(struct kobject *kobj) { struct ttm_pool_manager *m = container_of(kobj, struct ttm_pool_manager, kobj); kfree(m); } static ssize_t ttm_pool_store(struct kobject *kobj, struct attribute *attr, const char *buffer, size_t size) { struct ttm_pool_manager *m = container_of(kobj, struct ttm_pool_manager, kobj); int chars; unsigned val; chars = sscanf(buffer, "%u", &val); if (chars == 0) return size; /* Convert kb to number of pages */ val = val / (PAGE_SIZE >> 10); if (attr == &ttm_page_pool_max) m->options.max_size = val; else if (attr == &ttm_page_pool_small) m->options.small = val; else if (attr == &ttm_page_pool_alloc_size) { if (val > NUM_PAGES_TO_ALLOC*8) { pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n", NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7), NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); return size; } else if (val > NUM_PAGES_TO_ALLOC) { pr_warn("Setting allocation size to larger than %lu is not recommended\n", NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); } m->options.alloc_size = val; } return size; } static ssize_t ttm_pool_show(struct kobject *kobj, struct attribute *attr, char *buffer) { struct ttm_pool_manager *m = container_of(kobj, struct ttm_pool_manager, kobj); unsigned val = 0; if (attr == &ttm_page_pool_max) val = m->options.max_size; else if (attr == &ttm_page_pool_small) val = m->options.small; else if (attr == &ttm_page_pool_alloc_size) val = m->options.alloc_size; val = val * (PAGE_SIZE >> 10); return snprintf(buffer, PAGE_SIZE, "%u\n", val); } static const struct sysfs_ops ttm_pool_sysfs_ops = { .show = &ttm_pool_show, .store = &ttm_pool_store, }; static struct kobj_type ttm_pool_kobj_type = { .release = &ttm_pool_kobj_release, .sysfs_ops = &ttm_pool_sysfs_ops, .default_attrs = ttm_pool_attrs, }; static struct ttm_pool_manager *_manager; /** * Select the right pool or requested caching state and ttm flags. */ static struct ttm_page_pool *ttm_get_pool(int flags, bool huge, enum ttm_caching_state cstate) { int pool_index; if (cstate == tt_cached) return NULL; if (cstate == tt_wc) pool_index = 0x0; else pool_index = 0x1; if (flags & TTM_PAGE_FLAG_DMA32) { if (huge) return NULL; pool_index |= 0x2; } else if (huge) { pool_index |= 0x4; } return &_manager->pools[pool_index]; } /* set memory back to wb and free the pages. */ static void ttm_pages_put(struct page *pages[], unsigned npages, unsigned int order) { unsigned int i, pages_nr = (1 << order); if (order == 0) { if (ttm_set_pages_array_wb(pages, npages)) pr_err("Failed to set %d pages to wb!\n", npages); } for (i = 0; i < npages; ++i) { if (order > 0) { if (ttm_set_pages_wb(pages[i], pages_nr)) pr_err("Failed to set %d pages to wb!\n", pages_nr); } __free_pages(pages[i], order); } } static void ttm_pool_update_free_locked(struct ttm_page_pool *pool, unsigned freed_pages) { pool->npages -= freed_pages; pool->nfrees += freed_pages; } /** * Free pages from pool. * * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC * number of pages in one go. * * @pool: to free the pages from * @free_all: If set to true will free all pages in pool * @use_static: Safe to use static buffer **/ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free, bool use_static) { static struct page *static_buf[NUM_PAGES_TO_ALLOC]; unsigned long irq_flags; struct page *p; struct page **pages_to_free; unsigned freed_pages = 0, npages_to_free = nr_free; if (NUM_PAGES_TO_ALLOC < nr_free) npages_to_free = NUM_PAGES_TO_ALLOC; if (use_static) pages_to_free = static_buf; else pages_to_free = kmalloc_array(npages_to_free, sizeof(struct page *), GFP_KERNEL); if (!pages_to_free) { pr_debug("Failed to allocate memory for pool free operation\n"); return 0; } restart: spin_lock_irqsave(&pool->lock, irq_flags); list_for_each_entry_reverse(p, &pool->list, lru) { if (freed_pages >= npages_to_free) break; pages_to_free[freed_pages++] = p; /* We can only remove NUM_PAGES_TO_ALLOC at a time. */ if (freed_pages >= NUM_PAGES_TO_ALLOC) { /* remove range of pages from the pool */ __list_del(p->lru.prev, &pool->list); ttm_pool_update_free_locked(pool, freed_pages); /** * Because changing page caching is costly * we unlock the pool to prevent stalling. */ spin_unlock_irqrestore(&pool->lock, irq_flags); ttm_pages_put(pages_to_free, freed_pages, pool->order); if (likely(nr_free != FREE_ALL_PAGES)) nr_free -= freed_pages; if (NUM_PAGES_TO_ALLOC >= nr_free) npages_to_free = nr_free; else npages_to_free = NUM_PAGES_TO_ALLOC; freed_pages = 0; /* free all so restart the processing */ if (nr_free) goto restart; /* Not allowed to fall through or break because * following context is inside spinlock while we are * outside here. */ goto out; } } /* remove range of pages from the pool */ if (freed_pages) { __list_del(&p->lru, &pool->list); ttm_pool_update_free_locked(pool, freed_pages); nr_free -= freed_pages; } spin_unlock_irqrestore(&pool->lock, irq_flags); if (freed_pages) ttm_pages_put(pages_to_free, freed_pages, pool->order); out: if (pages_to_free != static_buf) kfree(pages_to_free); return nr_free; } /** * Callback for mm to request pool to reduce number of page held. * * XXX: (dchinner) Deadlock warning! * * This code is crying out for a shrinker per pool.... */ static unsigned long ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) { static DEFINE_MUTEX(lock); static unsigned start_pool; unsigned i; unsigned pool_offset; struct ttm_page_pool *pool; int shrink_pages = sc->nr_to_scan; unsigned long freed = 0; unsigned int nr_free_pool; if (!mutex_trylock(&lock)) return SHRINK_STOP; pool_offset = ++start_pool % NUM_POOLS; /* select start pool in round robin fashion */ for (i = 0; i < NUM_POOLS; ++i) { unsigned nr_free = shrink_pages; unsigned page_nr; if (shrink_pages == 0) break; pool = &_manager->pools[(i + pool_offset)%NUM_POOLS]; page_nr = (1 << pool->order); /* OK to use static buffer since global mutex is held. */ nr_free_pool = roundup(nr_free, page_nr) >> pool->order; shrink_pages = ttm_page_pool_free(pool, nr_free_pool, true); freed += (nr_free_pool - shrink_pages) << pool->order; if (freed >= sc->nr_to_scan) break; shrink_pages <<= pool->order; } mutex_unlock(&lock); return freed; } static unsigned long ttm_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc) { unsigned i; unsigned long count = 0; struct ttm_page_pool *pool; for (i = 0; i < NUM_POOLS; ++i) { pool = &_manager->pools[i]; count += (pool->npages << pool->order); } return count; } static int ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager) { manager->mm_shrink.count_objects = ttm_pool_shrink_count; manager->mm_shrink.scan_objects = ttm_pool_shrink_scan; manager->mm_shrink.seeks = 1; return register_shrinker(&manager->mm_shrink); } static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager) { unregister_shrinker(&manager->mm_shrink); } static int ttm_set_pages_caching(struct page **pages, enum ttm_caching_state cstate, unsigned cpages) { int r = 0; /* Set page caching */ switch (cstate) { case tt_uncached: r = ttm_set_pages_array_uc(pages, cpages); if (r) pr_err("Failed to set %d pages to uc!\n", cpages); break; case tt_wc: r = ttm_set_pages_array_wc(pages, cpages); if (r) pr_err("Failed to set %d pages to wc!\n", cpages); break; default: break; } return r; } /** * Free pages the pages that failed to change the caching state. If there is * any pages that have changed their caching state already put them to the * pool. */ static void ttm_handle_caching_state_failure(struct list_head *pages, int ttm_flags, enum ttm_caching_state cstate, struct page **failed_pages, unsigned cpages) { unsigned i; /* Failed pages have to be freed */ for (i = 0; i < cpages; ++i) { list_del(&failed_pages[i]->lru); __free_page(failed_pages[i]); } } /** * Allocate new pages with correct caching. * * This function is reentrant if caller updates count depending on number of * pages returned in pages array. */ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags, int ttm_flags, enum ttm_caching_state cstate, unsigned count, unsigned order) { struct page **caching_array; struct page *p; int r = 0; unsigned i, j, cpages; unsigned npages = 1 << order; unsigned max_cpages = min(count << order, (unsigned)NUM_PAGES_TO_ALLOC); /* allocate array for page caching change */ caching_array = kmalloc_array(max_cpages, sizeof(struct page *), GFP_KERNEL); if (!caching_array) { pr_debug("Unable to allocate table for new pages\n"); return -ENOMEM; } for (i = 0, cpages = 0; i < count; ++i) { p = alloc_pages(gfp_flags, order); if (!p) { pr_debug("Unable to get page %u\n", i); /* store already allocated pages in the pool after * setting the caching state */ if (cpages) { r = ttm_set_pages_caching(caching_array, cstate, cpages); if (r) ttm_handle_caching_state_failure(pages, ttm_flags, cstate, caching_array, cpages); } r = -ENOMEM; goto out; } list_add(&p->lru, pages); #ifdef CONFIG_HIGHMEM /* gfp flags of highmem page should never be dma32 so we * we should be fine in such case */ if (PageHighMem(p)) continue; #endif for (j = 0; j < npages; ++j) { caching_array[cpages++] = p++; if (cpages == max_cpages) { r = ttm_set_pages_caching(caching_array, cstate, cpages); if (r) { ttm_handle_caching_state_failure(pages, ttm_flags, cstate, caching_array, cpages); goto out; } cpages = 0; } } } if (cpages) { r = ttm_set_pages_caching(caching_array, cstate, cpages); if (r) ttm_handle_caching_state_failure(pages, ttm_flags, cstate, caching_array, cpages); } out: kfree(caching_array); return r; } /** * Fill the given pool if there aren't enough pages and the requested number of * pages is small. */ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, int ttm_flags, enum ttm_caching_state cstate, unsigned count, unsigned long *irq_flags) { struct page *p; int r; unsigned cpages = 0; /** * Only allow one pool fill operation at a time. * If pool doesn't have enough pages for the allocation new pages are * allocated from outside of pool. */ if (pool->fill_lock) return; pool->fill_lock = true; /* If allocation request is small and there are not enough * pages in a pool we fill the pool up first. */ if (count < _manager->options.small && count > pool->npages) { struct list_head new_pages; unsigned alloc_size = _manager->options.alloc_size; /** * Can't change page caching if in irqsave context. We have to * drop the pool->lock. */ spin_unlock_irqrestore(&pool->lock, *irq_flags); INIT_LIST_HEAD(&new_pages); r = ttm_alloc_new_pages(&new_pages, pool->gfp_flags, ttm_flags, cstate, alloc_size, 0); spin_lock_irqsave(&pool->lock, *irq_flags); if (!r) { list_splice(&new_pages, &pool->list); ++pool->nrefills; pool->npages += alloc_size; } else { pr_debug("Failed to fill pool (%p)\n", pool); /* If we have any pages left put them to the pool. */ list_for_each_entry(p, &new_pages, lru) { ++cpages; } list_splice(&new_pages, &pool->list); pool->npages += cpages; } } pool->fill_lock = false; } /** * Allocate pages from the pool and put them on the return list. * * @return zero for success or negative error code. */ static int ttm_page_pool_get_pages(struct ttm_page_pool *pool, struct list_head *pages, int ttm_flags, enum ttm_caching_state cstate, unsigned count, unsigned order) { unsigned long irq_flags; struct list_head *p; unsigned i; int r = 0; spin_lock_irqsave(&pool->lock, irq_flags); if (!order) ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count, &irq_flags); if (count >= pool->npages) { /* take all pages from the pool */ list_splice_init(&pool->list, pages); count -= pool->npages; pool->npages = 0; goto out; } /* find the last pages to include for requested number of pages. Split * pool to begin and halve it to reduce search space. */ if (count <= pool->npages/2) { i = 0; list_for_each(p, &pool->list) { if (++i == count) break; } } else { i = pool->npages + 1; list_for_each_prev(p, &pool->list) { if (--i == count) break; } } /* Cut 'count' number of pages from the pool */ list_cut_position(pages, &pool->list, p); pool->npages -= count; count = 0; out: spin_unlock_irqrestore(&pool->lock, irq_flags); /* clear the pages coming from the pool if requested */ if (ttm_flags & TTM_PAGE_FLAG_ZERO_ALLOC) { struct page *page; list_for_each_entry(page, pages, lru) { if (PageHighMem(page)) clear_highpage(page); else clear_page(page_address(page)); } } /* If pool didn't have enough pages allocate new one. */ if (count) { gfp_t gfp_flags = pool->gfp_flags; /* set zero flag for page allocation if required */ if (ttm_flags & TTM_PAGE_FLAG_ZERO_ALLOC) gfp_flags |= __GFP_ZERO; if (ttm_flags & TTM_PAGE_FLAG_NO_RETRY) gfp_flags |= __GFP_RETRY_MAYFAIL; /* ttm_alloc_new_pages doesn't reference pool so we can run * multiple requests in parallel. **/ r = ttm_alloc_new_pages(pages, gfp_flags, ttm_flags, cstate, count, order); } return r; } /* Put all pages in pages list to correct pool to wait for reuse */ static void ttm_put_pages(struct page **pages, unsigned npages, int flags, enum ttm_caching_state cstate) { struct ttm_page_pool *pool = ttm_get_pool(flags, false, cstate); #ifdef CONFIG_TRANSPARENT_HUGEPAGE struct ttm_page_pool *huge = ttm_get_pool(flags, true, cstate); #endif unsigned long irq_flags; unsigned i; if (pool == NULL) { /* No pool for this memory type so free the pages */ i = 0; while (i < npages) { #ifdef CONFIG_TRANSPARENT_HUGEPAGE struct page *p = pages[i]; #endif unsigned order = 0, j; if (!pages[i]) { ++i; continue; } #ifdef CONFIG_TRANSPARENT_HUGEPAGE if (!(flags & TTM_PAGE_FLAG_DMA32) && (npages - i) >= HPAGE_PMD_NR) { for (j = 0; j < HPAGE_PMD_NR; ++j) if (p++ != pages[i + j]) break; if (j == HPAGE_PMD_NR) order = HPAGE_PMD_ORDER; } #endif if (page_count(pages[i]) != 1) pr_err("Erroneous page count. Leaking pages.\n"); __free_pages(pages[i], order); j = 1 << order; while (j) { pages[i++] = NULL; --j; } } return; } i = 0; #ifdef CONFIG_TRANSPARENT_HUGEPAGE if (huge) { unsigned max_size, n2free; spin_lock_irqsave(&huge->lock, irq_flags); while ((npages - i) >= HPAGE_PMD_NR) { struct page *p = pages[i]; unsigned j; if (!p) break; for (j = 0; j < HPAGE_PMD_NR; ++j) if (p++ != pages[i + j]) break; if (j != HPAGE_PMD_NR) break; list_add_tail(&pages[i]->lru, &huge->list); for (j = 0; j < HPAGE_PMD_NR; ++j) pages[i++] = NULL; huge->npages++; } /* Check that we don't go over the pool limit */ max_size = _manager->options.max_size; max_size /= HPAGE_PMD_NR; if (huge->npages > max_size) n2free = huge->npages - max_size; else n2free = 0; spin_unlock_irqrestore(&huge->lock, irq_flags); if (n2free) ttm_page_pool_free(huge, n2free, false); } #endif spin_lock_irqsave(&pool->lock, irq_flags); while (i < npages) { if (pages[i]) { if (page_count(pages[i]) != 1) pr_err("Erroneous page count. Leaking pages.\n"); list_add_tail(&pages[i]->lru, &pool->list); pages[i] = NULL; pool->npages++; } ++i; } /* Check that we don't go over the pool limit */ npages = 0; if (pool->npages > _manager->options.max_size) { npages = pool->npages - _manager->options.max_size; /* free at least NUM_PAGES_TO_ALLOC number of pages * to reduce calls to set_memory_wb */ if (npages < NUM_PAGES_TO_ALLOC) npages = NUM_PAGES_TO_ALLOC; } spin_unlock_irqrestore(&pool->lock, irq_flags); if (npages) ttm_page_pool_free(pool, npages, false); } /* * On success pages list will hold count number of correctly * cached pages. */ static int ttm_get_pages(struct page **pages, unsigned npages, int flags, enum ttm_caching_state cstate) { struct ttm_page_pool *pool = ttm_get_pool(flags, false, cstate); #ifdef CONFIG_TRANSPARENT_HUGEPAGE struct ttm_page_pool *huge = ttm_get_pool(flags, true, cstate); #endif struct list_head plist; struct page *p = NULL; unsigned count, first; int r; /* No pool for cached pages */ if (pool == NULL) { gfp_t gfp_flags = GFP_USER; unsigned i; #ifdef CONFIG_TRANSPARENT_HUGEPAGE unsigned j; #endif /* set zero flag for page allocation if required */ if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) gfp_flags |= __GFP_ZERO; if (flags & TTM_PAGE_FLAG_NO_RETRY) gfp_flags |= __GFP_RETRY_MAYFAIL; if (flags & TTM_PAGE_FLAG_DMA32) gfp_flags |= GFP_DMA32; else gfp_flags |= GFP_HIGHUSER; i = 0; #ifdef CONFIG_TRANSPARENT_HUGEPAGE if (!(gfp_flags & GFP_DMA32)) { while (npages >= HPAGE_PMD_NR) { gfp_t huge_flags = gfp_flags; huge_flags |= GFP_TRANSHUGE_LIGHT | __GFP_NORETRY | __GFP_KSWAPD_RECLAIM; huge_flags &= ~__GFP_MOVABLE; huge_flags &= ~__GFP_COMP; p = alloc_pages(huge_flags, HPAGE_PMD_ORDER); if (!p) break; for (j = 0; j < HPAGE_PMD_NR; ++j) pages[i++] = p++; npages -= HPAGE_PMD_NR; } } #endif first = i; while (npages) { p = alloc_page(gfp_flags); if (!p) { pr_debug("Unable to allocate page\n"); return -ENOMEM; } /* Swap the pages if we detect consecutive order */ if (i > first && pages[i - 1] == p - 1) swap(p, pages[i - 1]); pages[i++] = p; --npages; } return 0; } count = 0; #ifdef CONFIG_TRANSPARENT_HUGEPAGE if (huge && npages >= HPAGE_PMD_NR) { INIT_LIST_HEAD(&plist); ttm_page_pool_get_pages(huge, &plist, flags, cstate, npages / HPAGE_PMD_NR, HPAGE_PMD_ORDER); list_for_each_entry(p, &plist, lru) { unsigned j; for (j = 0; j < HPAGE_PMD_NR; ++j) pages[count++] = &p[j]; } } #endif INIT_LIST_HEAD(&plist); r = ttm_page_pool_get_pages(pool, &plist, flags, cstate, npages - count, 0); first = count; list_for_each_entry(p, &plist, lru) { struct page *tmp = p; /* Swap the pages if we detect consecutive order */ if (count > first && pages[count - 1] == tmp - 1) swap(tmp, pages[count - 1]); pages[count++] = tmp; } if (r) { /* If there is any pages in the list put them back to * the pool. */ pr_debug("Failed to allocate extra pages for large request\n"); ttm_put_pages(pages, count, flags, cstate); return r; } return 0; } static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, gfp_t flags, char *name, unsigned int order) { spin_lock_init(&pool->lock); pool->fill_lock = false; INIT_LIST_HEAD(&pool->list); pool->npages = pool->nfrees = 0; pool->gfp_flags = flags; pool->name = name; pool->order = order; } int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages) { int ret; #ifdef CONFIG_TRANSPARENT_HUGEPAGE unsigned order = HPAGE_PMD_ORDER; #else unsigned order = 0; #endif WARN_ON(_manager); pr_info("Initializing pool allocator\n"); _manager = kzalloc(sizeof(*_manager), GFP_KERNEL); if (!_manager) return -ENOMEM; ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc", 0); ttm_page_pool_init_locked(&_manager->uc_pool, GFP_HIGHUSER, "uc", 0); ttm_page_pool_init_locked(&_manager->wc_pool_dma32, GFP_USER | GFP_DMA32, "wc dma", 0); ttm_page_pool_init_locked(&_manager->uc_pool_dma32, GFP_USER | GFP_DMA32, "uc dma", 0); ttm_page_pool_init_locked(&_manager->wc_pool_huge, (GFP_TRANSHUGE_LIGHT | __GFP_NORETRY | __GFP_KSWAPD_RECLAIM) & ~(__GFP_MOVABLE | __GFP_COMP), "wc huge", order); ttm_page_pool_init_locked(&_manager->uc_pool_huge, (GFP_TRANSHUGE_LIGHT | __GFP_NORETRY | __GFP_KSWAPD_RECLAIM) & ~(__GFP_MOVABLE | __GFP_COMP) , "uc huge", order); _manager->options.max_size = max_pages; _manager->options.small = SMALL_ALLOCATION; _manager->options.alloc_size = NUM_PAGES_TO_ALLOC; ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type, &glob->kobj, "pool"); if (unlikely(ret != 0)) goto error; ret = ttm_pool_mm_shrink_init(_manager); if (unlikely(ret != 0)) goto error; return 0; error: kobject_put(&_manager->kobj); _manager = NULL; return ret; } void ttm_page_alloc_fini(void) { int i; pr_info("Finalizing pool allocator\n"); ttm_pool_mm_shrink_fini(_manager); /* OK to use static buffer since global mutex is no longer used. */ for (i = 0; i < NUM_POOLS; ++i) ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES, true); kobject_put(&_manager->kobj); _manager = NULL; } static void ttm_pool_unpopulate_helper(struct ttm_tt *ttm, unsigned mem_count_update) { struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob; unsigned i; if (mem_count_update == 0) goto put_pages; for (i = 0; i < mem_count_update; ++i) { if (!ttm->pages[i]) continue; ttm_mem_global_free_page(mem_glob, ttm->pages[i], PAGE_SIZE); } put_pages: ttm_put_pages(ttm->pages, ttm->num_pages, ttm->page_flags, ttm->caching_state); ttm->state = tt_unpopulated; } int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) { struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob; unsigned i; int ret; if (ttm->state != tt_unpopulated) return 0; if (ttm_check_under_lowerlimit(mem_glob, ttm->num_pages, ctx)) return -ENOMEM; ret = ttm_get_pages(ttm->pages, ttm->num_pages, ttm->page_flags, ttm->caching_state); if (unlikely(ret != 0)) { ttm_pool_unpopulate_helper(ttm, 0); return ret; } for (i = 0; i < ttm->num_pages; ++i) { ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i], PAGE_SIZE, ctx); if (unlikely(ret != 0)) { ttm_pool_unpopulate_helper(ttm, i); return -ENOMEM; } } if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) { ret = ttm_tt_swapin(ttm); if (unlikely(ret != 0)) { ttm_pool_unpopulate(ttm); return ret; } } ttm->state = tt_unbound; return 0; } EXPORT_SYMBOL(ttm_pool_populate); void ttm_pool_unpopulate(struct ttm_tt *ttm) { ttm_pool_unpopulate_helper(ttm, ttm->num_pages); } EXPORT_SYMBOL(ttm_pool_unpopulate); int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt, struct ttm_operation_ctx *ctx) { unsigned i, j; int r; r = ttm_pool_populate(&tt->ttm, ctx); if (r) return r; for (i = 0; i < tt->ttm.num_pages; ++i) { struct page *p = tt->ttm.pages[i]; size_t num_pages = 1; for (j = i + 1; j < tt->ttm.num_pages; ++j) { if (++p != tt->ttm.pages[j]) break; ++num_pages; } tt->dma_address[i] = dma_map_page(dev, tt->ttm.pages[i], 0, num_pages * PAGE_SIZE, DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, tt->dma_address[i])) { while (i--) { dma_unmap_page(dev, tt->dma_address[i], PAGE_SIZE, DMA_BIDIRECTIONAL); tt->dma_address[i] = 0; } ttm_pool_unpopulate(&tt->ttm); return -EFAULT; } for (j = 1; j < num_pages; ++j) { tt->dma_address[i + 1] = tt->dma_address[i] + PAGE_SIZE; ++i; } } return 0; } EXPORT_SYMBOL(ttm_populate_and_map_pages); void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt) { unsigned i, j; for (i = 0; i < tt->ttm.num_pages;) { struct page *p = tt->ttm.pages[i]; size_t num_pages = 1; if (!tt->dma_address[i] || !tt->ttm.pages[i]) { ++i; continue; } for (j = i + 1; j < tt->ttm.num_pages; ++j) { if (++p != tt->ttm.pages[j]) break; ++num_pages; } dma_unmap_page(dev, tt->dma_address[i], num_pages * PAGE_SIZE, DMA_BIDIRECTIONAL); i += num_pages; } ttm_pool_unpopulate(&tt->ttm); } EXPORT_SYMBOL(ttm_unmap_and_unpopulate_pages); int ttm_page_alloc_debugfs(struct seq_file *m, void *data) { struct ttm_page_pool *p; unsigned i; char *h[] = {"pool", "refills", "pages freed", "size"}; if (!_manager) { seq_printf(m, "No pool allocator running.\n"); return 0; } seq_printf(m, "%7s %12s %13s %8s\n", h[0], h[1], h[2], h[3]); for (i = 0; i < NUM_POOLS; ++i) { p = &_manager->pools[i]; seq_printf(m, "%7s %12ld %13ld %8d\n", p->name, p->nrefills, p->nfrees, p->npages); } return 0; } EXPORT_SYMBOL(ttm_page_alloc_debugfs);
static void ttm_put_pages(struct page **pages, unsigned npages, int flags, enum ttm_caching_state cstate) { struct ttm_page_pool *pool = ttm_get_pool(flags, false, cstate); #ifdef CONFIG_TRANSPARENT_HUGEPAGE struct ttm_page_pool *huge = ttm_get_pool(flags, true, cstate); #endif unsigned long irq_flags; unsigned i; if (pool == NULL) { /* No pool for this memory type so free the pages */ i = 0; while (i < npages) { #ifdef CONFIG_TRANSPARENT_HUGEPAGE struct page *p = pages[i]; #endif unsigned order = 0, j; if (!pages[i]) { ++i; continue; } #ifdef CONFIG_TRANSPARENT_HUGEPAGE if (!(flags & TTM_PAGE_FLAG_DMA32)) { for (j = 0; j < HPAGE_PMD_NR; ++j) if (p++ != pages[i + j]) break; if (j == HPAGE_PMD_NR) order = HPAGE_PMD_ORDER; } #endif if (page_count(pages[i]) != 1) pr_err("Erroneous page count. Leaking pages.\n"); __free_pages(pages[i], order); j = 1 << order; while (j) { pages[i++] = NULL; --j; } } return; } i = 0; #ifdef CONFIG_TRANSPARENT_HUGEPAGE if (huge) { unsigned max_size, n2free; spin_lock_irqsave(&huge->lock, irq_flags); while (i < npages) { struct page *p = pages[i]; unsigned j; if (!p) break; for (j = 0; j < HPAGE_PMD_NR; ++j) if (p++ != pages[i + j]) break; if (j != HPAGE_PMD_NR) break; list_add_tail(&pages[i]->lru, &huge->list); for (j = 0; j < HPAGE_PMD_NR; ++j) pages[i++] = NULL; huge->npages++; } /* Check that we don't go over the pool limit */ max_size = _manager->options.max_size; max_size /= HPAGE_PMD_NR; if (huge->npages > max_size) n2free = huge->npages - max_size; else n2free = 0; spin_unlock_irqrestore(&huge->lock, irq_flags); if (n2free) ttm_page_pool_free(huge, n2free, false); } #endif spin_lock_irqsave(&pool->lock, irq_flags); while (i < npages) { if (pages[i]) { if (page_count(pages[i]) != 1) pr_err("Erroneous page count. Leaking pages.\n"); list_add_tail(&pages[i]->lru, &pool->list); pages[i] = NULL; pool->npages++; } ++i; } /* Check that we don't go over the pool limit */ npages = 0; if (pool->npages > _manager->options.max_size) { npages = pool->npages - _manager->options.max_size; /* free at least NUM_PAGES_TO_ALLOC number of pages * to reduce calls to set_memory_wb */ if (npages < NUM_PAGES_TO_ALLOC) npages = NUM_PAGES_TO_ALLOC; } spin_unlock_irqrestore(&pool->lock, irq_flags); if (npages) ttm_page_pool_free(pool, npages, false); }
static void ttm_put_pages(struct page **pages, unsigned npages, int flags, enum ttm_caching_state cstate) { struct ttm_page_pool *pool = ttm_get_pool(flags, false, cstate); #ifdef CONFIG_TRANSPARENT_HUGEPAGE struct ttm_page_pool *huge = ttm_get_pool(flags, true, cstate); #endif unsigned long irq_flags; unsigned i; if (pool == NULL) { /* No pool for this memory type so free the pages */ i = 0; while (i < npages) { #ifdef CONFIG_TRANSPARENT_HUGEPAGE struct page *p = pages[i]; #endif unsigned order = 0, j; if (!pages[i]) { ++i; continue; } #ifdef CONFIG_TRANSPARENT_HUGEPAGE if (!(flags & TTM_PAGE_FLAG_DMA32) && (npages - i) >= HPAGE_PMD_NR) { for (j = 0; j < HPAGE_PMD_NR; ++j) if (p++ != pages[i + j]) break; if (j == HPAGE_PMD_NR) order = HPAGE_PMD_ORDER; } #endif if (page_count(pages[i]) != 1) pr_err("Erroneous page count. Leaking pages.\n"); __free_pages(pages[i], order); j = 1 << order; while (j) { pages[i++] = NULL; --j; } } return; } i = 0; #ifdef CONFIG_TRANSPARENT_HUGEPAGE if (huge) { unsigned max_size, n2free; spin_lock_irqsave(&huge->lock, irq_flags); while ((npages - i) >= HPAGE_PMD_NR) { struct page *p = pages[i]; unsigned j; if (!p) break; for (j = 0; j < HPAGE_PMD_NR; ++j) if (p++ != pages[i + j]) break; if (j != HPAGE_PMD_NR) break; list_add_tail(&pages[i]->lru, &huge->list); for (j = 0; j < HPAGE_PMD_NR; ++j) pages[i++] = NULL; huge->npages++; } /* Check that we don't go over the pool limit */ max_size = _manager->options.max_size; max_size /= HPAGE_PMD_NR; if (huge->npages > max_size) n2free = huge->npages - max_size; else n2free = 0; spin_unlock_irqrestore(&huge->lock, irq_flags); if (n2free) ttm_page_pool_free(huge, n2free, false); } #endif spin_lock_irqsave(&pool->lock, irq_flags); while (i < npages) { if (pages[i]) { if (page_count(pages[i]) != 1) pr_err("Erroneous page count. Leaking pages.\n"); list_add_tail(&pages[i]->lru, &pool->list); pages[i] = NULL; pool->npages++; } ++i; } /* Check that we don't go over the pool limit */ npages = 0; if (pool->npages > _manager->options.max_size) { npages = pool->npages - _manager->options.max_size; /* free at least NUM_PAGES_TO_ALLOC number of pages * to reduce calls to set_memory_wb */ if (npages < NUM_PAGES_TO_ALLOC) npages = NUM_PAGES_TO_ALLOC; } spin_unlock_irqrestore(&pool->lock, irq_flags); if (npages) ttm_page_pool_free(pool, npages, false); }
{'added': [(733, '\t\t\tif (!(flags & TTM_PAGE_FLAG_DMA32) &&'), (734, '\t\t\t (npages - i) >= HPAGE_PMD_NR) {'), (763, '\t\twhile ((npages - i) >= HPAGE_PMD_NR) {')], 'deleted': [(733, '\t\t\tif (!(flags & TTM_PAGE_FLAG_DMA32)) {'), (762, '\t\twhile (i < npages) {')]}
3
2
819
4,746
https://github.com/torvalds/linux
CVE-2019-19927
['CWE-125']
nodemanager.c
o2nm_node_ipv4_address_store
/* -*- mode: c; c-basic-offset: 8; -*- * vim: noexpandtab sw=8 ts=8 sts=0: * * Copyright (C) 2004, 2005 Oracle. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. */ #include <linux/slab.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/configfs.h> #include "tcp.h" #include "nodemanager.h" #include "heartbeat.h" #include "masklog.h" #include "sys.h" /* for now we operate under the assertion that there can be only one * cluster active at a time. Changing this will require trickling * cluster references throughout where nodes are looked up */ struct o2nm_cluster *o2nm_single_cluster = NULL; char *o2nm_fence_method_desc[O2NM_FENCE_METHODS] = { "reset", /* O2NM_FENCE_RESET */ "panic", /* O2NM_FENCE_PANIC */ }; struct o2nm_node *o2nm_get_node_by_num(u8 node_num) { struct o2nm_node *node = NULL; if (node_num >= O2NM_MAX_NODES || o2nm_single_cluster == NULL) goto out; read_lock(&o2nm_single_cluster->cl_nodes_lock); node = o2nm_single_cluster->cl_nodes[node_num]; if (node) config_item_get(&node->nd_item); read_unlock(&o2nm_single_cluster->cl_nodes_lock); out: return node; } EXPORT_SYMBOL_GPL(o2nm_get_node_by_num); int o2nm_configured_node_map(unsigned long *map, unsigned bytes) { struct o2nm_cluster *cluster = o2nm_single_cluster; BUG_ON(bytes < (sizeof(cluster->cl_nodes_bitmap))); if (cluster == NULL) return -EINVAL; read_lock(&cluster->cl_nodes_lock); memcpy(map, cluster->cl_nodes_bitmap, sizeof(cluster->cl_nodes_bitmap)); read_unlock(&cluster->cl_nodes_lock); return 0; } EXPORT_SYMBOL_GPL(o2nm_configured_node_map); static struct o2nm_node *o2nm_node_ip_tree_lookup(struct o2nm_cluster *cluster, __be32 ip_needle, struct rb_node ***ret_p, struct rb_node **ret_parent) { struct rb_node **p = &cluster->cl_node_ip_tree.rb_node; struct rb_node *parent = NULL; struct o2nm_node *node, *ret = NULL; while (*p) { int cmp; parent = *p; node = rb_entry(parent, struct o2nm_node, nd_ip_node); cmp = memcmp(&ip_needle, &node->nd_ipv4_address, sizeof(ip_needle)); if (cmp < 0) p = &(*p)->rb_left; else if (cmp > 0) p = &(*p)->rb_right; else { ret = node; break; } } if (ret_p != NULL) *ret_p = p; if (ret_parent != NULL) *ret_parent = parent; return ret; } struct o2nm_node *o2nm_get_node_by_ip(__be32 addr) { struct o2nm_node *node = NULL; struct o2nm_cluster *cluster = o2nm_single_cluster; if (cluster == NULL) goto out; read_lock(&cluster->cl_nodes_lock); node = o2nm_node_ip_tree_lookup(cluster, addr, NULL, NULL); if (node) config_item_get(&node->nd_item); read_unlock(&cluster->cl_nodes_lock); out: return node; } EXPORT_SYMBOL_GPL(o2nm_get_node_by_ip); void o2nm_node_put(struct o2nm_node *node) { config_item_put(&node->nd_item); } EXPORT_SYMBOL_GPL(o2nm_node_put); void o2nm_node_get(struct o2nm_node *node) { config_item_get(&node->nd_item); } EXPORT_SYMBOL_GPL(o2nm_node_get); u8 o2nm_this_node(void) { u8 node_num = O2NM_MAX_NODES; if (o2nm_single_cluster && o2nm_single_cluster->cl_has_local) node_num = o2nm_single_cluster->cl_local_node; return node_num; } EXPORT_SYMBOL_GPL(o2nm_this_node); /* node configfs bits */ static struct o2nm_cluster *to_o2nm_cluster(struct config_item *item) { return item ? container_of(to_config_group(item), struct o2nm_cluster, cl_group) : NULL; } static struct o2nm_node *to_o2nm_node(struct config_item *item) { return item ? container_of(item, struct o2nm_node, nd_item) : NULL; } static void o2nm_node_release(struct config_item *item) { struct o2nm_node *node = to_o2nm_node(item); kfree(node); } static ssize_t o2nm_node_num_show(struct config_item *item, char *page) { return sprintf(page, "%d\n", to_o2nm_node(item)->nd_num); } static struct o2nm_cluster *to_o2nm_cluster_from_node(struct o2nm_node *node) { /* through the first node_set .parent * mycluster/nodes/mynode == o2nm_cluster->o2nm_node_group->o2nm_node */ return to_o2nm_cluster(node->nd_item.ci_parent->ci_parent); } enum { O2NM_NODE_ATTR_NUM = 0, O2NM_NODE_ATTR_PORT, O2NM_NODE_ATTR_ADDRESS, }; static ssize_t o2nm_node_num_store(struct config_item *item, const char *page, size_t count) { struct o2nm_node *node = to_o2nm_node(item); struct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node); unsigned long tmp; char *p = (char *)page; int ret = 0; tmp = simple_strtoul(p, &p, 0); if (!p || (*p && (*p != '\n'))) return -EINVAL; if (tmp >= O2NM_MAX_NODES) return -ERANGE; /* once we're in the cl_nodes tree networking can look us up by * node number and try to use our address and port attributes * to connect to this node.. make sure that they've been set * before writing the node attribute? */ if (!test_bit(O2NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes) || !test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes)) return -EINVAL; /* XXX */ write_lock(&cluster->cl_nodes_lock); if (cluster->cl_nodes[tmp]) ret = -EEXIST; else if (test_and_set_bit(O2NM_NODE_ATTR_NUM, &node->nd_set_attributes)) ret = -EBUSY; else { cluster->cl_nodes[tmp] = node; node->nd_num = tmp; set_bit(tmp, cluster->cl_nodes_bitmap); } write_unlock(&cluster->cl_nodes_lock); if (ret) return ret; return count; } static ssize_t o2nm_node_ipv4_port_show(struct config_item *item, char *page) { return sprintf(page, "%u\n", ntohs(to_o2nm_node(item)->nd_ipv4_port)); } static ssize_t o2nm_node_ipv4_port_store(struct config_item *item, const char *page, size_t count) { struct o2nm_node *node = to_o2nm_node(item); unsigned long tmp; char *p = (char *)page; tmp = simple_strtoul(p, &p, 0); if (!p || (*p && (*p != '\n'))) return -EINVAL; if (tmp == 0) return -EINVAL; if (tmp >= (u16)-1) return -ERANGE; if (test_and_set_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes)) return -EBUSY; node->nd_ipv4_port = htons(tmp); return count; } static ssize_t o2nm_node_ipv4_address_show(struct config_item *item, char *page) { return sprintf(page, "%pI4\n", &to_o2nm_node(item)->nd_ipv4_address); } static ssize_t o2nm_node_ipv4_address_store(struct config_item *item, const char *page, size_t count) { struct o2nm_node *node = to_o2nm_node(item); struct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node); int ret, i; struct rb_node **p, *parent; unsigned int octets[4]; __be32 ipv4_addr = 0; ret = sscanf(page, "%3u.%3u.%3u.%3u", &octets[3], &octets[2], &octets[1], &octets[0]); if (ret != 4) return -EINVAL; for (i = 0; i < ARRAY_SIZE(octets); i++) { if (octets[i] > 255) return -ERANGE; be32_add_cpu(&ipv4_addr, octets[i] << (i * 8)); } ret = 0; write_lock(&cluster->cl_nodes_lock); if (o2nm_node_ip_tree_lookup(cluster, ipv4_addr, &p, &parent)) ret = -EEXIST; else if (test_and_set_bit(O2NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes)) ret = -EBUSY; else { rb_link_node(&node->nd_ip_node, parent, p); rb_insert_color(&node->nd_ip_node, &cluster->cl_node_ip_tree); } write_unlock(&cluster->cl_nodes_lock); if (ret) return ret; memcpy(&node->nd_ipv4_address, &ipv4_addr, sizeof(ipv4_addr)); return count; } static ssize_t o2nm_node_local_show(struct config_item *item, char *page) { return sprintf(page, "%d\n", to_o2nm_node(item)->nd_local); } static ssize_t o2nm_node_local_store(struct config_item *item, const char *page, size_t count) { struct o2nm_node *node = to_o2nm_node(item); struct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node); unsigned long tmp; char *p = (char *)page; ssize_t ret; tmp = simple_strtoul(p, &p, 0); if (!p || (*p && (*p != '\n'))) return -EINVAL; tmp = !!tmp; /* boolean of whether this node wants to be local */ /* setting local turns on networking rx for now so we require having * set everything else first */ if (!test_bit(O2NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes) || !test_bit(O2NM_NODE_ATTR_NUM, &node->nd_set_attributes) || !test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes)) return -EINVAL; /* XXX */ /* the only failure case is trying to set a new local node * when a different one is already set */ if (tmp && tmp == cluster->cl_has_local && cluster->cl_local_node != node->nd_num) return -EBUSY; /* bring up the rx thread if we're setting the new local node. */ if (tmp && !cluster->cl_has_local) { ret = o2net_start_listening(node); if (ret) return ret; } if (!tmp && cluster->cl_has_local && cluster->cl_local_node == node->nd_num) { o2net_stop_listening(node); cluster->cl_local_node = O2NM_INVALID_NODE_NUM; } node->nd_local = tmp; if (node->nd_local) { cluster->cl_has_local = tmp; cluster->cl_local_node = node->nd_num; } return count; } CONFIGFS_ATTR(o2nm_node_, num); CONFIGFS_ATTR(o2nm_node_, ipv4_port); CONFIGFS_ATTR(o2nm_node_, ipv4_address); CONFIGFS_ATTR(o2nm_node_, local); static struct configfs_attribute *o2nm_node_attrs[] = { &o2nm_node_attr_num, &o2nm_node_attr_ipv4_port, &o2nm_node_attr_ipv4_address, &o2nm_node_attr_local, NULL, }; static struct configfs_item_operations o2nm_node_item_ops = { .release = o2nm_node_release, }; static const struct config_item_type o2nm_node_type = { .ct_item_ops = &o2nm_node_item_ops, .ct_attrs = o2nm_node_attrs, .ct_owner = THIS_MODULE, }; /* node set */ struct o2nm_node_group { struct config_group ns_group; /* some stuff? */ }; #if 0 static struct o2nm_node_group *to_o2nm_node_group(struct config_group *group) { return group ? container_of(group, struct o2nm_node_group, ns_group) : NULL; } #endif static ssize_t o2nm_cluster_attr_write(const char *page, ssize_t count, unsigned int *val) { unsigned long tmp; char *p = (char *)page; tmp = simple_strtoul(p, &p, 0); if (!p || (*p && (*p != '\n'))) return -EINVAL; if (tmp == 0) return -EINVAL; if (tmp >= (u32)-1) return -ERANGE; *val = tmp; return count; } static ssize_t o2nm_cluster_idle_timeout_ms_show(struct config_item *item, char *page) { return sprintf(page, "%u\n", to_o2nm_cluster(item)->cl_idle_timeout_ms); } static ssize_t o2nm_cluster_idle_timeout_ms_store(struct config_item *item, const char *page, size_t count) { struct o2nm_cluster *cluster = to_o2nm_cluster(item); ssize_t ret; unsigned int val; ret = o2nm_cluster_attr_write(page, count, &val); if (ret > 0) { if (cluster->cl_idle_timeout_ms != val && o2net_num_connected_peers()) { mlog(ML_NOTICE, "o2net: cannot change idle timeout after " "the first peer has agreed to it." " %d connected peers\n", o2net_num_connected_peers()); ret = -EINVAL; } else if (val <= cluster->cl_keepalive_delay_ms) { mlog(ML_NOTICE, "o2net: idle timeout must be larger " "than keepalive delay\n"); ret = -EINVAL; } else { cluster->cl_idle_timeout_ms = val; } } return ret; } static ssize_t o2nm_cluster_keepalive_delay_ms_show( struct config_item *item, char *page) { return sprintf(page, "%u\n", to_o2nm_cluster(item)->cl_keepalive_delay_ms); } static ssize_t o2nm_cluster_keepalive_delay_ms_store( struct config_item *item, const char *page, size_t count) { struct o2nm_cluster *cluster = to_o2nm_cluster(item); ssize_t ret; unsigned int val; ret = o2nm_cluster_attr_write(page, count, &val); if (ret > 0) { if (cluster->cl_keepalive_delay_ms != val && o2net_num_connected_peers()) { mlog(ML_NOTICE, "o2net: cannot change keepalive delay after" " the first peer has agreed to it." " %d connected peers\n", o2net_num_connected_peers()); ret = -EINVAL; } else if (val >= cluster->cl_idle_timeout_ms) { mlog(ML_NOTICE, "o2net: keepalive delay must be " "smaller than idle timeout\n"); ret = -EINVAL; } else { cluster->cl_keepalive_delay_ms = val; } } return ret; } static ssize_t o2nm_cluster_reconnect_delay_ms_show( struct config_item *item, char *page) { return sprintf(page, "%u\n", to_o2nm_cluster(item)->cl_reconnect_delay_ms); } static ssize_t o2nm_cluster_reconnect_delay_ms_store( struct config_item *item, const char *page, size_t count) { return o2nm_cluster_attr_write(page, count, &to_o2nm_cluster(item)->cl_reconnect_delay_ms); } static ssize_t o2nm_cluster_fence_method_show( struct config_item *item, char *page) { struct o2nm_cluster *cluster = to_o2nm_cluster(item); ssize_t ret = 0; if (cluster) ret = sprintf(page, "%s\n", o2nm_fence_method_desc[cluster->cl_fence_method]); return ret; } static ssize_t o2nm_cluster_fence_method_store( struct config_item *item, const char *page, size_t count) { unsigned int i; if (page[count - 1] != '\n') goto bail; for (i = 0; i < O2NM_FENCE_METHODS; ++i) { if (count != strlen(o2nm_fence_method_desc[i]) + 1) continue; if (strncasecmp(page, o2nm_fence_method_desc[i], count - 1)) continue; if (to_o2nm_cluster(item)->cl_fence_method != i) { printk(KERN_INFO "ocfs2: Changing fence method to %s\n", o2nm_fence_method_desc[i]); to_o2nm_cluster(item)->cl_fence_method = i; } return count; } bail: return -EINVAL; } CONFIGFS_ATTR(o2nm_cluster_, idle_timeout_ms); CONFIGFS_ATTR(o2nm_cluster_, keepalive_delay_ms); CONFIGFS_ATTR(o2nm_cluster_, reconnect_delay_ms); CONFIGFS_ATTR(o2nm_cluster_, fence_method); static struct configfs_attribute *o2nm_cluster_attrs[] = { &o2nm_cluster_attr_idle_timeout_ms, &o2nm_cluster_attr_keepalive_delay_ms, &o2nm_cluster_attr_reconnect_delay_ms, &o2nm_cluster_attr_fence_method, NULL, }; static struct config_item *o2nm_node_group_make_item(struct config_group *group, const char *name) { struct o2nm_node *node = NULL; if (strlen(name) > O2NM_MAX_NAME_LEN) return ERR_PTR(-ENAMETOOLONG); node = kzalloc(sizeof(struct o2nm_node), GFP_KERNEL); if (node == NULL) return ERR_PTR(-ENOMEM); strcpy(node->nd_name, name); /* use item.ci_namebuf instead? */ config_item_init_type_name(&node->nd_item, name, &o2nm_node_type); spin_lock_init(&node->nd_lock); mlog(ML_CLUSTER, "o2nm: Registering node %s\n", name); return &node->nd_item; } static void o2nm_node_group_drop_item(struct config_group *group, struct config_item *item) { struct o2nm_node *node = to_o2nm_node(item); struct o2nm_cluster *cluster = to_o2nm_cluster(group->cg_item.ci_parent); o2net_disconnect_node(node); if (cluster->cl_has_local && (cluster->cl_local_node == node->nd_num)) { cluster->cl_has_local = 0; cluster->cl_local_node = O2NM_INVALID_NODE_NUM; o2net_stop_listening(node); } /* XXX call into net to stop this node from trading messages */ write_lock(&cluster->cl_nodes_lock); /* XXX sloppy */ if (node->nd_ipv4_address) rb_erase(&node->nd_ip_node, &cluster->cl_node_ip_tree); /* nd_num might be 0 if the node number hasn't been set.. */ if (cluster->cl_nodes[node->nd_num] == node) { cluster->cl_nodes[node->nd_num] = NULL; clear_bit(node->nd_num, cluster->cl_nodes_bitmap); } write_unlock(&cluster->cl_nodes_lock); mlog(ML_CLUSTER, "o2nm: Unregistered node %s\n", config_item_name(&node->nd_item)); config_item_put(item); } static struct configfs_group_operations o2nm_node_group_group_ops = { .make_item = o2nm_node_group_make_item, .drop_item = o2nm_node_group_drop_item, }; static const struct config_item_type o2nm_node_group_type = { .ct_group_ops = &o2nm_node_group_group_ops, .ct_owner = THIS_MODULE, }; /* cluster */ static void o2nm_cluster_release(struct config_item *item) { struct o2nm_cluster *cluster = to_o2nm_cluster(item); kfree(cluster); } static struct configfs_item_operations o2nm_cluster_item_ops = { .release = o2nm_cluster_release, }; static const struct config_item_type o2nm_cluster_type = { .ct_item_ops = &o2nm_cluster_item_ops, .ct_attrs = o2nm_cluster_attrs, .ct_owner = THIS_MODULE, }; /* cluster set */ struct o2nm_cluster_group { struct configfs_subsystem cs_subsys; /* some stuff? */ }; #if 0 static struct o2nm_cluster_group *to_o2nm_cluster_group(struct config_group *group) { return group ? container_of(to_configfs_subsystem(group), struct o2nm_cluster_group, cs_subsys) : NULL; } #endif static struct config_group *o2nm_cluster_group_make_group(struct config_group *group, const char *name) { struct o2nm_cluster *cluster = NULL; struct o2nm_node_group *ns = NULL; struct config_group *o2hb_group = NULL, *ret = NULL; /* this runs under the parent dir's i_mutex; there can be only * one caller in here at a time */ if (o2nm_single_cluster) return ERR_PTR(-ENOSPC); cluster = kzalloc(sizeof(struct o2nm_cluster), GFP_KERNEL); ns = kzalloc(sizeof(struct o2nm_node_group), GFP_KERNEL); o2hb_group = o2hb_alloc_hb_set(); if (cluster == NULL || ns == NULL || o2hb_group == NULL) goto out; config_group_init_type_name(&cluster->cl_group, name, &o2nm_cluster_type); configfs_add_default_group(&ns->ns_group, &cluster->cl_group); config_group_init_type_name(&ns->ns_group, "node", &o2nm_node_group_type); configfs_add_default_group(o2hb_group, &cluster->cl_group); rwlock_init(&cluster->cl_nodes_lock); cluster->cl_node_ip_tree = RB_ROOT; cluster->cl_reconnect_delay_ms = O2NET_RECONNECT_DELAY_MS_DEFAULT; cluster->cl_idle_timeout_ms = O2NET_IDLE_TIMEOUT_MS_DEFAULT; cluster->cl_keepalive_delay_ms = O2NET_KEEPALIVE_DELAY_MS_DEFAULT; cluster->cl_fence_method = O2NM_FENCE_RESET; ret = &cluster->cl_group; o2nm_single_cluster = cluster; out: if (ret == NULL) { kfree(cluster); kfree(ns); o2hb_free_hb_set(o2hb_group); ret = ERR_PTR(-ENOMEM); } return ret; } static void o2nm_cluster_group_drop_item(struct config_group *group, struct config_item *item) { struct o2nm_cluster *cluster = to_o2nm_cluster(item); BUG_ON(o2nm_single_cluster != cluster); o2nm_single_cluster = NULL; configfs_remove_default_groups(&cluster->cl_group); config_item_put(item); } static struct configfs_group_operations o2nm_cluster_group_group_ops = { .make_group = o2nm_cluster_group_make_group, .drop_item = o2nm_cluster_group_drop_item, }; static const struct config_item_type o2nm_cluster_group_type = { .ct_group_ops = &o2nm_cluster_group_group_ops, .ct_owner = THIS_MODULE, }; static struct o2nm_cluster_group o2nm_cluster_group = { .cs_subsys = { .su_group = { .cg_item = { .ci_namebuf = "cluster", .ci_type = &o2nm_cluster_group_type, }, }, }, }; int o2nm_depend_item(struct config_item *item) { return configfs_depend_item(&o2nm_cluster_group.cs_subsys, item); } void o2nm_undepend_item(struct config_item *item) { configfs_undepend_item(item); } int o2nm_depend_this_node(void) { int ret = 0; struct o2nm_node *local_node; local_node = o2nm_get_node_by_num(o2nm_this_node()); if (!local_node) { ret = -EINVAL; goto out; } ret = o2nm_depend_item(&local_node->nd_item); o2nm_node_put(local_node); out: return ret; } void o2nm_undepend_this_node(void) { struct o2nm_node *local_node; local_node = o2nm_get_node_by_num(o2nm_this_node()); BUG_ON(!local_node); o2nm_undepend_item(&local_node->nd_item); o2nm_node_put(local_node); } static void __exit exit_o2nm(void) { /* XXX sync with hb callbacks and shut down hb? */ o2net_unregister_hb_callbacks(); configfs_unregister_subsystem(&o2nm_cluster_group.cs_subsys); o2cb_sys_shutdown(); o2net_exit(); o2hb_exit(); } static int __init init_o2nm(void) { int ret = -1; ret = o2hb_init(); if (ret) goto out; ret = o2net_init(); if (ret) goto out_o2hb; ret = o2net_register_hb_callbacks(); if (ret) goto out_o2net; config_group_init(&o2nm_cluster_group.cs_subsys.su_group); mutex_init(&o2nm_cluster_group.cs_subsys.su_mutex); ret = configfs_register_subsystem(&o2nm_cluster_group.cs_subsys); if (ret) { printk(KERN_ERR "nodemanager: Registration returned %d\n", ret); goto out_callbacks; } ret = o2cb_sys_init(); if (!ret) goto out; configfs_unregister_subsystem(&o2nm_cluster_group.cs_subsys); out_callbacks: o2net_unregister_hb_callbacks(); out_o2net: o2net_exit(); out_o2hb: o2hb_exit(); out: return ret; } MODULE_AUTHOR("Oracle"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("OCFS2 cluster management"); module_init(init_o2nm) module_exit(exit_o2nm)
/* -*- mode: c; c-basic-offset: 8; -*- * vim: noexpandtab sw=8 ts=8 sts=0: * * Copyright (C) 2004, 2005 Oracle. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. */ #include <linux/slab.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/configfs.h> #include "tcp.h" #include "nodemanager.h" #include "heartbeat.h" #include "masklog.h" #include "sys.h" /* for now we operate under the assertion that there can be only one * cluster active at a time. Changing this will require trickling * cluster references throughout where nodes are looked up */ struct o2nm_cluster *o2nm_single_cluster = NULL; char *o2nm_fence_method_desc[O2NM_FENCE_METHODS] = { "reset", /* O2NM_FENCE_RESET */ "panic", /* O2NM_FENCE_PANIC */ }; static inline void o2nm_lock_subsystem(void); static inline void o2nm_unlock_subsystem(void); struct o2nm_node *o2nm_get_node_by_num(u8 node_num) { struct o2nm_node *node = NULL; if (node_num >= O2NM_MAX_NODES || o2nm_single_cluster == NULL) goto out; read_lock(&o2nm_single_cluster->cl_nodes_lock); node = o2nm_single_cluster->cl_nodes[node_num]; if (node) config_item_get(&node->nd_item); read_unlock(&o2nm_single_cluster->cl_nodes_lock); out: return node; } EXPORT_SYMBOL_GPL(o2nm_get_node_by_num); int o2nm_configured_node_map(unsigned long *map, unsigned bytes) { struct o2nm_cluster *cluster = o2nm_single_cluster; BUG_ON(bytes < (sizeof(cluster->cl_nodes_bitmap))); if (cluster == NULL) return -EINVAL; read_lock(&cluster->cl_nodes_lock); memcpy(map, cluster->cl_nodes_bitmap, sizeof(cluster->cl_nodes_bitmap)); read_unlock(&cluster->cl_nodes_lock); return 0; } EXPORT_SYMBOL_GPL(o2nm_configured_node_map); static struct o2nm_node *o2nm_node_ip_tree_lookup(struct o2nm_cluster *cluster, __be32 ip_needle, struct rb_node ***ret_p, struct rb_node **ret_parent) { struct rb_node **p = &cluster->cl_node_ip_tree.rb_node; struct rb_node *parent = NULL; struct o2nm_node *node, *ret = NULL; while (*p) { int cmp; parent = *p; node = rb_entry(parent, struct o2nm_node, nd_ip_node); cmp = memcmp(&ip_needle, &node->nd_ipv4_address, sizeof(ip_needle)); if (cmp < 0) p = &(*p)->rb_left; else if (cmp > 0) p = &(*p)->rb_right; else { ret = node; break; } } if (ret_p != NULL) *ret_p = p; if (ret_parent != NULL) *ret_parent = parent; return ret; } struct o2nm_node *o2nm_get_node_by_ip(__be32 addr) { struct o2nm_node *node = NULL; struct o2nm_cluster *cluster = o2nm_single_cluster; if (cluster == NULL) goto out; read_lock(&cluster->cl_nodes_lock); node = o2nm_node_ip_tree_lookup(cluster, addr, NULL, NULL); if (node) config_item_get(&node->nd_item); read_unlock(&cluster->cl_nodes_lock); out: return node; } EXPORT_SYMBOL_GPL(o2nm_get_node_by_ip); void o2nm_node_put(struct o2nm_node *node) { config_item_put(&node->nd_item); } EXPORT_SYMBOL_GPL(o2nm_node_put); void o2nm_node_get(struct o2nm_node *node) { config_item_get(&node->nd_item); } EXPORT_SYMBOL_GPL(o2nm_node_get); u8 o2nm_this_node(void) { u8 node_num = O2NM_MAX_NODES; if (o2nm_single_cluster && o2nm_single_cluster->cl_has_local) node_num = o2nm_single_cluster->cl_local_node; return node_num; } EXPORT_SYMBOL_GPL(o2nm_this_node); /* node configfs bits */ static struct o2nm_cluster *to_o2nm_cluster(struct config_item *item) { return item ? container_of(to_config_group(item), struct o2nm_cluster, cl_group) : NULL; } static struct o2nm_node *to_o2nm_node(struct config_item *item) { return item ? container_of(item, struct o2nm_node, nd_item) : NULL; } static void o2nm_node_release(struct config_item *item) { struct o2nm_node *node = to_o2nm_node(item); kfree(node); } static ssize_t o2nm_node_num_show(struct config_item *item, char *page) { return sprintf(page, "%d\n", to_o2nm_node(item)->nd_num); } static struct o2nm_cluster *to_o2nm_cluster_from_node(struct o2nm_node *node) { /* through the first node_set .parent * mycluster/nodes/mynode == o2nm_cluster->o2nm_node_group->o2nm_node */ if (node->nd_item.ci_parent) return to_o2nm_cluster(node->nd_item.ci_parent->ci_parent); else return NULL; } enum { O2NM_NODE_ATTR_NUM = 0, O2NM_NODE_ATTR_PORT, O2NM_NODE_ATTR_ADDRESS, }; static ssize_t o2nm_node_num_store(struct config_item *item, const char *page, size_t count) { struct o2nm_node *node = to_o2nm_node(item); struct o2nm_cluster *cluster; unsigned long tmp; char *p = (char *)page; int ret = 0; tmp = simple_strtoul(p, &p, 0); if (!p || (*p && (*p != '\n'))) return -EINVAL; if (tmp >= O2NM_MAX_NODES) return -ERANGE; /* once we're in the cl_nodes tree networking can look us up by * node number and try to use our address and port attributes * to connect to this node.. make sure that they've been set * before writing the node attribute? */ if (!test_bit(O2NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes) || !test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes)) return -EINVAL; /* XXX */ o2nm_lock_subsystem(); cluster = to_o2nm_cluster_from_node(node); if (!cluster) { o2nm_unlock_subsystem(); return -EINVAL; } write_lock(&cluster->cl_nodes_lock); if (cluster->cl_nodes[tmp]) ret = -EEXIST; else if (test_and_set_bit(O2NM_NODE_ATTR_NUM, &node->nd_set_attributes)) ret = -EBUSY; else { cluster->cl_nodes[tmp] = node; node->nd_num = tmp; set_bit(tmp, cluster->cl_nodes_bitmap); } write_unlock(&cluster->cl_nodes_lock); o2nm_unlock_subsystem(); if (ret) return ret; return count; } static ssize_t o2nm_node_ipv4_port_show(struct config_item *item, char *page) { return sprintf(page, "%u\n", ntohs(to_o2nm_node(item)->nd_ipv4_port)); } static ssize_t o2nm_node_ipv4_port_store(struct config_item *item, const char *page, size_t count) { struct o2nm_node *node = to_o2nm_node(item); unsigned long tmp; char *p = (char *)page; tmp = simple_strtoul(p, &p, 0); if (!p || (*p && (*p != '\n'))) return -EINVAL; if (tmp == 0) return -EINVAL; if (tmp >= (u16)-1) return -ERANGE; if (test_and_set_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes)) return -EBUSY; node->nd_ipv4_port = htons(tmp); return count; } static ssize_t o2nm_node_ipv4_address_show(struct config_item *item, char *page) { return sprintf(page, "%pI4\n", &to_o2nm_node(item)->nd_ipv4_address); } static ssize_t o2nm_node_ipv4_address_store(struct config_item *item, const char *page, size_t count) { struct o2nm_node *node = to_o2nm_node(item); struct o2nm_cluster *cluster; int ret, i; struct rb_node **p, *parent; unsigned int octets[4]; __be32 ipv4_addr = 0; ret = sscanf(page, "%3u.%3u.%3u.%3u", &octets[3], &octets[2], &octets[1], &octets[0]); if (ret != 4) return -EINVAL; for (i = 0; i < ARRAY_SIZE(octets); i++) { if (octets[i] > 255) return -ERANGE; be32_add_cpu(&ipv4_addr, octets[i] << (i * 8)); } o2nm_lock_subsystem(); cluster = to_o2nm_cluster_from_node(node); if (!cluster) { o2nm_unlock_subsystem(); return -EINVAL; } ret = 0; write_lock(&cluster->cl_nodes_lock); if (o2nm_node_ip_tree_lookup(cluster, ipv4_addr, &p, &parent)) ret = -EEXIST; else if (test_and_set_bit(O2NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes)) ret = -EBUSY; else { rb_link_node(&node->nd_ip_node, parent, p); rb_insert_color(&node->nd_ip_node, &cluster->cl_node_ip_tree); } write_unlock(&cluster->cl_nodes_lock); o2nm_unlock_subsystem(); if (ret) return ret; memcpy(&node->nd_ipv4_address, &ipv4_addr, sizeof(ipv4_addr)); return count; } static ssize_t o2nm_node_local_show(struct config_item *item, char *page) { return sprintf(page, "%d\n", to_o2nm_node(item)->nd_local); } static ssize_t o2nm_node_local_store(struct config_item *item, const char *page, size_t count) { struct o2nm_node *node = to_o2nm_node(item); struct o2nm_cluster *cluster; unsigned long tmp; char *p = (char *)page; ssize_t ret; tmp = simple_strtoul(p, &p, 0); if (!p || (*p && (*p != '\n'))) return -EINVAL; tmp = !!tmp; /* boolean of whether this node wants to be local */ /* setting local turns on networking rx for now so we require having * set everything else first */ if (!test_bit(O2NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes) || !test_bit(O2NM_NODE_ATTR_NUM, &node->nd_set_attributes) || !test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes)) return -EINVAL; /* XXX */ o2nm_lock_subsystem(); cluster = to_o2nm_cluster_from_node(node); if (!cluster) { ret = -EINVAL; goto out; } /* the only failure case is trying to set a new local node * when a different one is already set */ if (tmp && tmp == cluster->cl_has_local && cluster->cl_local_node != node->nd_num) { ret = -EBUSY; goto out; } /* bring up the rx thread if we're setting the new local node. */ if (tmp && !cluster->cl_has_local) { ret = o2net_start_listening(node); if (ret) goto out; } if (!tmp && cluster->cl_has_local && cluster->cl_local_node == node->nd_num) { o2net_stop_listening(node); cluster->cl_local_node = O2NM_INVALID_NODE_NUM; } node->nd_local = tmp; if (node->nd_local) { cluster->cl_has_local = tmp; cluster->cl_local_node = node->nd_num; } ret = count; out: o2nm_unlock_subsystem(); return ret; } CONFIGFS_ATTR(o2nm_node_, num); CONFIGFS_ATTR(o2nm_node_, ipv4_port); CONFIGFS_ATTR(o2nm_node_, ipv4_address); CONFIGFS_ATTR(o2nm_node_, local); static struct configfs_attribute *o2nm_node_attrs[] = { &o2nm_node_attr_num, &o2nm_node_attr_ipv4_port, &o2nm_node_attr_ipv4_address, &o2nm_node_attr_local, NULL, }; static struct configfs_item_operations o2nm_node_item_ops = { .release = o2nm_node_release, }; static const struct config_item_type o2nm_node_type = { .ct_item_ops = &o2nm_node_item_ops, .ct_attrs = o2nm_node_attrs, .ct_owner = THIS_MODULE, }; /* node set */ struct o2nm_node_group { struct config_group ns_group; /* some stuff? */ }; #if 0 static struct o2nm_node_group *to_o2nm_node_group(struct config_group *group) { return group ? container_of(group, struct o2nm_node_group, ns_group) : NULL; } #endif static ssize_t o2nm_cluster_attr_write(const char *page, ssize_t count, unsigned int *val) { unsigned long tmp; char *p = (char *)page; tmp = simple_strtoul(p, &p, 0); if (!p || (*p && (*p != '\n'))) return -EINVAL; if (tmp == 0) return -EINVAL; if (tmp >= (u32)-1) return -ERANGE; *val = tmp; return count; } static ssize_t o2nm_cluster_idle_timeout_ms_show(struct config_item *item, char *page) { return sprintf(page, "%u\n", to_o2nm_cluster(item)->cl_idle_timeout_ms); } static ssize_t o2nm_cluster_idle_timeout_ms_store(struct config_item *item, const char *page, size_t count) { struct o2nm_cluster *cluster = to_o2nm_cluster(item); ssize_t ret; unsigned int val; ret = o2nm_cluster_attr_write(page, count, &val); if (ret > 0) { if (cluster->cl_idle_timeout_ms != val && o2net_num_connected_peers()) { mlog(ML_NOTICE, "o2net: cannot change idle timeout after " "the first peer has agreed to it." " %d connected peers\n", o2net_num_connected_peers()); ret = -EINVAL; } else if (val <= cluster->cl_keepalive_delay_ms) { mlog(ML_NOTICE, "o2net: idle timeout must be larger " "than keepalive delay\n"); ret = -EINVAL; } else { cluster->cl_idle_timeout_ms = val; } } return ret; } static ssize_t o2nm_cluster_keepalive_delay_ms_show( struct config_item *item, char *page) { return sprintf(page, "%u\n", to_o2nm_cluster(item)->cl_keepalive_delay_ms); } static ssize_t o2nm_cluster_keepalive_delay_ms_store( struct config_item *item, const char *page, size_t count) { struct o2nm_cluster *cluster = to_o2nm_cluster(item); ssize_t ret; unsigned int val; ret = o2nm_cluster_attr_write(page, count, &val); if (ret > 0) { if (cluster->cl_keepalive_delay_ms != val && o2net_num_connected_peers()) { mlog(ML_NOTICE, "o2net: cannot change keepalive delay after" " the first peer has agreed to it." " %d connected peers\n", o2net_num_connected_peers()); ret = -EINVAL; } else if (val >= cluster->cl_idle_timeout_ms) { mlog(ML_NOTICE, "o2net: keepalive delay must be " "smaller than idle timeout\n"); ret = -EINVAL; } else { cluster->cl_keepalive_delay_ms = val; } } return ret; } static ssize_t o2nm_cluster_reconnect_delay_ms_show( struct config_item *item, char *page) { return sprintf(page, "%u\n", to_o2nm_cluster(item)->cl_reconnect_delay_ms); } static ssize_t o2nm_cluster_reconnect_delay_ms_store( struct config_item *item, const char *page, size_t count) { return o2nm_cluster_attr_write(page, count, &to_o2nm_cluster(item)->cl_reconnect_delay_ms); } static ssize_t o2nm_cluster_fence_method_show( struct config_item *item, char *page) { struct o2nm_cluster *cluster = to_o2nm_cluster(item); ssize_t ret = 0; if (cluster) ret = sprintf(page, "%s\n", o2nm_fence_method_desc[cluster->cl_fence_method]); return ret; } static ssize_t o2nm_cluster_fence_method_store( struct config_item *item, const char *page, size_t count) { unsigned int i; if (page[count - 1] != '\n') goto bail; for (i = 0; i < O2NM_FENCE_METHODS; ++i) { if (count != strlen(o2nm_fence_method_desc[i]) + 1) continue; if (strncasecmp(page, o2nm_fence_method_desc[i], count - 1)) continue; if (to_o2nm_cluster(item)->cl_fence_method != i) { printk(KERN_INFO "ocfs2: Changing fence method to %s\n", o2nm_fence_method_desc[i]); to_o2nm_cluster(item)->cl_fence_method = i; } return count; } bail: return -EINVAL; } CONFIGFS_ATTR(o2nm_cluster_, idle_timeout_ms); CONFIGFS_ATTR(o2nm_cluster_, keepalive_delay_ms); CONFIGFS_ATTR(o2nm_cluster_, reconnect_delay_ms); CONFIGFS_ATTR(o2nm_cluster_, fence_method); static struct configfs_attribute *o2nm_cluster_attrs[] = { &o2nm_cluster_attr_idle_timeout_ms, &o2nm_cluster_attr_keepalive_delay_ms, &o2nm_cluster_attr_reconnect_delay_ms, &o2nm_cluster_attr_fence_method, NULL, }; static struct config_item *o2nm_node_group_make_item(struct config_group *group, const char *name) { struct o2nm_node *node = NULL; if (strlen(name) > O2NM_MAX_NAME_LEN) return ERR_PTR(-ENAMETOOLONG); node = kzalloc(sizeof(struct o2nm_node), GFP_KERNEL); if (node == NULL) return ERR_PTR(-ENOMEM); strcpy(node->nd_name, name); /* use item.ci_namebuf instead? */ config_item_init_type_name(&node->nd_item, name, &o2nm_node_type); spin_lock_init(&node->nd_lock); mlog(ML_CLUSTER, "o2nm: Registering node %s\n", name); return &node->nd_item; } static void o2nm_node_group_drop_item(struct config_group *group, struct config_item *item) { struct o2nm_node *node = to_o2nm_node(item); struct o2nm_cluster *cluster = to_o2nm_cluster(group->cg_item.ci_parent); o2net_disconnect_node(node); if (cluster->cl_has_local && (cluster->cl_local_node == node->nd_num)) { cluster->cl_has_local = 0; cluster->cl_local_node = O2NM_INVALID_NODE_NUM; o2net_stop_listening(node); } /* XXX call into net to stop this node from trading messages */ write_lock(&cluster->cl_nodes_lock); /* XXX sloppy */ if (node->nd_ipv4_address) rb_erase(&node->nd_ip_node, &cluster->cl_node_ip_tree); /* nd_num might be 0 if the node number hasn't been set.. */ if (cluster->cl_nodes[node->nd_num] == node) { cluster->cl_nodes[node->nd_num] = NULL; clear_bit(node->nd_num, cluster->cl_nodes_bitmap); } write_unlock(&cluster->cl_nodes_lock); mlog(ML_CLUSTER, "o2nm: Unregistered node %s\n", config_item_name(&node->nd_item)); config_item_put(item); } static struct configfs_group_operations o2nm_node_group_group_ops = { .make_item = o2nm_node_group_make_item, .drop_item = o2nm_node_group_drop_item, }; static const struct config_item_type o2nm_node_group_type = { .ct_group_ops = &o2nm_node_group_group_ops, .ct_owner = THIS_MODULE, }; /* cluster */ static void o2nm_cluster_release(struct config_item *item) { struct o2nm_cluster *cluster = to_o2nm_cluster(item); kfree(cluster); } static struct configfs_item_operations o2nm_cluster_item_ops = { .release = o2nm_cluster_release, }; static const struct config_item_type o2nm_cluster_type = { .ct_item_ops = &o2nm_cluster_item_ops, .ct_attrs = o2nm_cluster_attrs, .ct_owner = THIS_MODULE, }; /* cluster set */ struct o2nm_cluster_group { struct configfs_subsystem cs_subsys; /* some stuff? */ }; #if 0 static struct o2nm_cluster_group *to_o2nm_cluster_group(struct config_group *group) { return group ? container_of(to_configfs_subsystem(group), struct o2nm_cluster_group, cs_subsys) : NULL; } #endif static struct config_group *o2nm_cluster_group_make_group(struct config_group *group, const char *name) { struct o2nm_cluster *cluster = NULL; struct o2nm_node_group *ns = NULL; struct config_group *o2hb_group = NULL, *ret = NULL; /* this runs under the parent dir's i_mutex; there can be only * one caller in here at a time */ if (o2nm_single_cluster) return ERR_PTR(-ENOSPC); cluster = kzalloc(sizeof(struct o2nm_cluster), GFP_KERNEL); ns = kzalloc(sizeof(struct o2nm_node_group), GFP_KERNEL); o2hb_group = o2hb_alloc_hb_set(); if (cluster == NULL || ns == NULL || o2hb_group == NULL) goto out; config_group_init_type_name(&cluster->cl_group, name, &o2nm_cluster_type); configfs_add_default_group(&ns->ns_group, &cluster->cl_group); config_group_init_type_name(&ns->ns_group, "node", &o2nm_node_group_type); configfs_add_default_group(o2hb_group, &cluster->cl_group); rwlock_init(&cluster->cl_nodes_lock); cluster->cl_node_ip_tree = RB_ROOT; cluster->cl_reconnect_delay_ms = O2NET_RECONNECT_DELAY_MS_DEFAULT; cluster->cl_idle_timeout_ms = O2NET_IDLE_TIMEOUT_MS_DEFAULT; cluster->cl_keepalive_delay_ms = O2NET_KEEPALIVE_DELAY_MS_DEFAULT; cluster->cl_fence_method = O2NM_FENCE_RESET; ret = &cluster->cl_group; o2nm_single_cluster = cluster; out: if (ret == NULL) { kfree(cluster); kfree(ns); o2hb_free_hb_set(o2hb_group); ret = ERR_PTR(-ENOMEM); } return ret; } static void o2nm_cluster_group_drop_item(struct config_group *group, struct config_item *item) { struct o2nm_cluster *cluster = to_o2nm_cluster(item); BUG_ON(o2nm_single_cluster != cluster); o2nm_single_cluster = NULL; configfs_remove_default_groups(&cluster->cl_group); config_item_put(item); } static struct configfs_group_operations o2nm_cluster_group_group_ops = { .make_group = o2nm_cluster_group_make_group, .drop_item = o2nm_cluster_group_drop_item, }; static const struct config_item_type o2nm_cluster_group_type = { .ct_group_ops = &o2nm_cluster_group_group_ops, .ct_owner = THIS_MODULE, }; static struct o2nm_cluster_group o2nm_cluster_group = { .cs_subsys = { .su_group = { .cg_item = { .ci_namebuf = "cluster", .ci_type = &o2nm_cluster_group_type, }, }, }, }; static inline void o2nm_lock_subsystem(void) { mutex_lock(&o2nm_cluster_group.cs_subsys.su_mutex); } static inline void o2nm_unlock_subsystem(void) { mutex_unlock(&o2nm_cluster_group.cs_subsys.su_mutex); } int o2nm_depend_item(struct config_item *item) { return configfs_depend_item(&o2nm_cluster_group.cs_subsys, item); } void o2nm_undepend_item(struct config_item *item) { configfs_undepend_item(item); } int o2nm_depend_this_node(void) { int ret = 0; struct o2nm_node *local_node; local_node = o2nm_get_node_by_num(o2nm_this_node()); if (!local_node) { ret = -EINVAL; goto out; } ret = o2nm_depend_item(&local_node->nd_item); o2nm_node_put(local_node); out: return ret; } void o2nm_undepend_this_node(void) { struct o2nm_node *local_node; local_node = o2nm_get_node_by_num(o2nm_this_node()); BUG_ON(!local_node); o2nm_undepend_item(&local_node->nd_item); o2nm_node_put(local_node); } static void __exit exit_o2nm(void) { /* XXX sync with hb callbacks and shut down hb? */ o2net_unregister_hb_callbacks(); configfs_unregister_subsystem(&o2nm_cluster_group.cs_subsys); o2cb_sys_shutdown(); o2net_exit(); o2hb_exit(); } static int __init init_o2nm(void) { int ret = -1; ret = o2hb_init(); if (ret) goto out; ret = o2net_init(); if (ret) goto out_o2hb; ret = o2net_register_hb_callbacks(); if (ret) goto out_o2net; config_group_init(&o2nm_cluster_group.cs_subsys.su_group); mutex_init(&o2nm_cluster_group.cs_subsys.su_mutex); ret = configfs_register_subsystem(&o2nm_cluster_group.cs_subsys); if (ret) { printk(KERN_ERR "nodemanager: Registration returned %d\n", ret); goto out_callbacks; } ret = o2cb_sys_init(); if (!ret) goto out; configfs_unregister_subsystem(&o2nm_cluster_group.cs_subsys); out_callbacks: o2net_unregister_hb_callbacks(); out_o2net: o2net_exit(); out_o2hb: o2hb_exit(); out: return ret; } MODULE_AUTHOR("Oracle"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("OCFS2 cluster management"); module_init(init_o2nm) module_exit(exit_o2nm)
static ssize_t o2nm_node_ipv4_address_store(struct config_item *item, const char *page, size_t count) { struct o2nm_node *node = to_o2nm_node(item); struct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node); int ret, i; struct rb_node **p, *parent; unsigned int octets[4]; __be32 ipv4_addr = 0; ret = sscanf(page, "%3u.%3u.%3u.%3u", &octets[3], &octets[2], &octets[1], &octets[0]); if (ret != 4) return -EINVAL; for (i = 0; i < ARRAY_SIZE(octets); i++) { if (octets[i] > 255) return -ERANGE; be32_add_cpu(&ipv4_addr, octets[i] << (i * 8)); } ret = 0; write_lock(&cluster->cl_nodes_lock); if (o2nm_node_ip_tree_lookup(cluster, ipv4_addr, &p, &parent)) ret = -EEXIST; else if (test_and_set_bit(O2NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes)) ret = -EBUSY; else { rb_link_node(&node->nd_ip_node, parent, p); rb_insert_color(&node->nd_ip_node, &cluster->cl_node_ip_tree); } write_unlock(&cluster->cl_nodes_lock); if (ret) return ret; memcpy(&node->nd_ipv4_address, &ipv4_addr, sizeof(ipv4_addr)); return count; }
static ssize_t o2nm_node_ipv4_address_store(struct config_item *item, const char *page, size_t count) { struct o2nm_node *node = to_o2nm_node(item); struct o2nm_cluster *cluster; int ret, i; struct rb_node **p, *parent; unsigned int octets[4]; __be32 ipv4_addr = 0; ret = sscanf(page, "%3u.%3u.%3u.%3u", &octets[3], &octets[2], &octets[1], &octets[0]); if (ret != 4) return -EINVAL; for (i = 0; i < ARRAY_SIZE(octets); i++) { if (octets[i] > 255) return -ERANGE; be32_add_cpu(&ipv4_addr, octets[i] << (i * 8)); } o2nm_lock_subsystem(); cluster = to_o2nm_cluster_from_node(node); if (!cluster) { o2nm_unlock_subsystem(); return -EINVAL; } ret = 0; write_lock(&cluster->cl_nodes_lock); if (o2nm_node_ip_tree_lookup(cluster, ipv4_addr, &p, &parent)) ret = -EEXIST; else if (test_and_set_bit(O2NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes)) ret = -EBUSY; else { rb_link_node(&node->nd_ip_node, parent, p); rb_insert_color(&node->nd_ip_node, &cluster->cl_node_ip_tree); } write_unlock(&cluster->cl_nodes_lock); o2nm_unlock_subsystem(); if (ret) return ret; memcpy(&node->nd_ipv4_address, &ipv4_addr, sizeof(ipv4_addr)); return count; }
{'added': [(43, 'static inline void o2nm_lock_subsystem(void);'), (44, 'static inline void o2nm_unlock_subsystem(void);'), (45, ''), (187, '\tif (node->nd_item.ci_parent)'), (188, '\t\treturn to_o2nm_cluster(node->nd_item.ci_parent->ci_parent);'), (189, '\telse'), (190, '\t\treturn NULL;'), (203, '\tstruct o2nm_cluster *cluster;'), (223, '\to2nm_lock_subsystem();'), (224, '\tcluster = to_o2nm_cluster_from_node(node);'), (225, '\tif (!cluster) {'), (226, '\t\to2nm_unlock_subsystem();'), (227, '\t\treturn -EINVAL;'), (228, '\t}'), (229, ''), (242, '\to2nm_unlock_subsystem();'), (243, ''), (287, '\tstruct o2nm_cluster *cluster;'), (304, '\to2nm_lock_subsystem();'), (305, '\tcluster = to_o2nm_cluster_from_node(node);'), (306, '\tif (!cluster) {'), (307, '\t\to2nm_unlock_subsystem();'), (308, '\t\treturn -EINVAL;'), (309, '\t}'), (310, ''), (323, '\to2nm_unlock_subsystem();'), (324, ''), (342, '\tstruct o2nm_cluster *cluster;'), (360, '\to2nm_lock_subsystem();'), (361, '\tcluster = to_o2nm_cluster_from_node(node);'), (362, '\tif (!cluster) {'), (363, '\t\tret = -EINVAL;'), (364, '\t\tgoto out;'), (365, '\t}'), (366, ''), (370, '\t cluster->cl_local_node != node->nd_num) {'), (371, '\t\tret = -EBUSY;'), (372, '\t\tgoto out;'), (373, '\t}'), (379, '\t\t\tgoto out;'), (394, '\tret = count;'), (395, ''), (396, 'out:'), (397, '\to2nm_unlock_subsystem();'), (398, '\treturn ret;'), (778, 'static inline void o2nm_lock_subsystem(void)'), (779, '{'), (780, '\tmutex_lock(&o2nm_cluster_group.cs_subsys.su_mutex);'), (781, '}'), (782, ''), (783, 'static inline void o2nm_unlock_subsystem(void)'), (784, '{'), (785, '\tmutex_unlock(&o2nm_cluster_group.cs_subsys.su_mutex);'), (786, '}'), (787, '')], 'deleted': [(184, '\treturn to_o2nm_cluster(node->nd_item.ci_parent->ci_parent);'), (197, '\tstruct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node);'), (272, '\tstruct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node);'), (318, '\tstruct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node);'), (339, '\t cluster->cl_local_node != node->nd_num)'), (340, '\t\treturn -EBUSY;'), (346, '\t\t\treturn ret;'), (361, '\treturn count;')]}
55
8
669
3,744
https://github.com/torvalds/linux
CVE-2017-18216
['CWE-476']
nodemanager.c
o2nm_node_local_store
/* -*- mode: c; c-basic-offset: 8; -*- * vim: noexpandtab sw=8 ts=8 sts=0: * * Copyright (C) 2004, 2005 Oracle. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. */ #include <linux/slab.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/configfs.h> #include "tcp.h" #include "nodemanager.h" #include "heartbeat.h" #include "masklog.h" #include "sys.h" /* for now we operate under the assertion that there can be only one * cluster active at a time. Changing this will require trickling * cluster references throughout where nodes are looked up */ struct o2nm_cluster *o2nm_single_cluster = NULL; char *o2nm_fence_method_desc[O2NM_FENCE_METHODS] = { "reset", /* O2NM_FENCE_RESET */ "panic", /* O2NM_FENCE_PANIC */ }; struct o2nm_node *o2nm_get_node_by_num(u8 node_num) { struct o2nm_node *node = NULL; if (node_num >= O2NM_MAX_NODES || o2nm_single_cluster == NULL) goto out; read_lock(&o2nm_single_cluster->cl_nodes_lock); node = o2nm_single_cluster->cl_nodes[node_num]; if (node) config_item_get(&node->nd_item); read_unlock(&o2nm_single_cluster->cl_nodes_lock); out: return node; } EXPORT_SYMBOL_GPL(o2nm_get_node_by_num); int o2nm_configured_node_map(unsigned long *map, unsigned bytes) { struct o2nm_cluster *cluster = o2nm_single_cluster; BUG_ON(bytes < (sizeof(cluster->cl_nodes_bitmap))); if (cluster == NULL) return -EINVAL; read_lock(&cluster->cl_nodes_lock); memcpy(map, cluster->cl_nodes_bitmap, sizeof(cluster->cl_nodes_bitmap)); read_unlock(&cluster->cl_nodes_lock); return 0; } EXPORT_SYMBOL_GPL(o2nm_configured_node_map); static struct o2nm_node *o2nm_node_ip_tree_lookup(struct o2nm_cluster *cluster, __be32 ip_needle, struct rb_node ***ret_p, struct rb_node **ret_parent) { struct rb_node **p = &cluster->cl_node_ip_tree.rb_node; struct rb_node *parent = NULL; struct o2nm_node *node, *ret = NULL; while (*p) { int cmp; parent = *p; node = rb_entry(parent, struct o2nm_node, nd_ip_node); cmp = memcmp(&ip_needle, &node->nd_ipv4_address, sizeof(ip_needle)); if (cmp < 0) p = &(*p)->rb_left; else if (cmp > 0) p = &(*p)->rb_right; else { ret = node; break; } } if (ret_p != NULL) *ret_p = p; if (ret_parent != NULL) *ret_parent = parent; return ret; } struct o2nm_node *o2nm_get_node_by_ip(__be32 addr) { struct o2nm_node *node = NULL; struct o2nm_cluster *cluster = o2nm_single_cluster; if (cluster == NULL) goto out; read_lock(&cluster->cl_nodes_lock); node = o2nm_node_ip_tree_lookup(cluster, addr, NULL, NULL); if (node) config_item_get(&node->nd_item); read_unlock(&cluster->cl_nodes_lock); out: return node; } EXPORT_SYMBOL_GPL(o2nm_get_node_by_ip); void o2nm_node_put(struct o2nm_node *node) { config_item_put(&node->nd_item); } EXPORT_SYMBOL_GPL(o2nm_node_put); void o2nm_node_get(struct o2nm_node *node) { config_item_get(&node->nd_item); } EXPORT_SYMBOL_GPL(o2nm_node_get); u8 o2nm_this_node(void) { u8 node_num = O2NM_MAX_NODES; if (o2nm_single_cluster && o2nm_single_cluster->cl_has_local) node_num = o2nm_single_cluster->cl_local_node; return node_num; } EXPORT_SYMBOL_GPL(o2nm_this_node); /* node configfs bits */ static struct o2nm_cluster *to_o2nm_cluster(struct config_item *item) { return item ? container_of(to_config_group(item), struct o2nm_cluster, cl_group) : NULL; } static struct o2nm_node *to_o2nm_node(struct config_item *item) { return item ? container_of(item, struct o2nm_node, nd_item) : NULL; } static void o2nm_node_release(struct config_item *item) { struct o2nm_node *node = to_o2nm_node(item); kfree(node); } static ssize_t o2nm_node_num_show(struct config_item *item, char *page) { return sprintf(page, "%d\n", to_o2nm_node(item)->nd_num); } static struct o2nm_cluster *to_o2nm_cluster_from_node(struct o2nm_node *node) { /* through the first node_set .parent * mycluster/nodes/mynode == o2nm_cluster->o2nm_node_group->o2nm_node */ return to_o2nm_cluster(node->nd_item.ci_parent->ci_parent); } enum { O2NM_NODE_ATTR_NUM = 0, O2NM_NODE_ATTR_PORT, O2NM_NODE_ATTR_ADDRESS, }; static ssize_t o2nm_node_num_store(struct config_item *item, const char *page, size_t count) { struct o2nm_node *node = to_o2nm_node(item); struct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node); unsigned long tmp; char *p = (char *)page; int ret = 0; tmp = simple_strtoul(p, &p, 0); if (!p || (*p && (*p != '\n'))) return -EINVAL; if (tmp >= O2NM_MAX_NODES) return -ERANGE; /* once we're in the cl_nodes tree networking can look us up by * node number and try to use our address and port attributes * to connect to this node.. make sure that they've been set * before writing the node attribute? */ if (!test_bit(O2NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes) || !test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes)) return -EINVAL; /* XXX */ write_lock(&cluster->cl_nodes_lock); if (cluster->cl_nodes[tmp]) ret = -EEXIST; else if (test_and_set_bit(O2NM_NODE_ATTR_NUM, &node->nd_set_attributes)) ret = -EBUSY; else { cluster->cl_nodes[tmp] = node; node->nd_num = tmp; set_bit(tmp, cluster->cl_nodes_bitmap); } write_unlock(&cluster->cl_nodes_lock); if (ret) return ret; return count; } static ssize_t o2nm_node_ipv4_port_show(struct config_item *item, char *page) { return sprintf(page, "%u\n", ntohs(to_o2nm_node(item)->nd_ipv4_port)); } static ssize_t o2nm_node_ipv4_port_store(struct config_item *item, const char *page, size_t count) { struct o2nm_node *node = to_o2nm_node(item); unsigned long tmp; char *p = (char *)page; tmp = simple_strtoul(p, &p, 0); if (!p || (*p && (*p != '\n'))) return -EINVAL; if (tmp == 0) return -EINVAL; if (tmp >= (u16)-1) return -ERANGE; if (test_and_set_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes)) return -EBUSY; node->nd_ipv4_port = htons(tmp); return count; } static ssize_t o2nm_node_ipv4_address_show(struct config_item *item, char *page) { return sprintf(page, "%pI4\n", &to_o2nm_node(item)->nd_ipv4_address); } static ssize_t o2nm_node_ipv4_address_store(struct config_item *item, const char *page, size_t count) { struct o2nm_node *node = to_o2nm_node(item); struct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node); int ret, i; struct rb_node **p, *parent; unsigned int octets[4]; __be32 ipv4_addr = 0; ret = sscanf(page, "%3u.%3u.%3u.%3u", &octets[3], &octets[2], &octets[1], &octets[0]); if (ret != 4) return -EINVAL; for (i = 0; i < ARRAY_SIZE(octets); i++) { if (octets[i] > 255) return -ERANGE; be32_add_cpu(&ipv4_addr, octets[i] << (i * 8)); } ret = 0; write_lock(&cluster->cl_nodes_lock); if (o2nm_node_ip_tree_lookup(cluster, ipv4_addr, &p, &parent)) ret = -EEXIST; else if (test_and_set_bit(O2NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes)) ret = -EBUSY; else { rb_link_node(&node->nd_ip_node, parent, p); rb_insert_color(&node->nd_ip_node, &cluster->cl_node_ip_tree); } write_unlock(&cluster->cl_nodes_lock); if (ret) return ret; memcpy(&node->nd_ipv4_address, &ipv4_addr, sizeof(ipv4_addr)); return count; } static ssize_t o2nm_node_local_show(struct config_item *item, char *page) { return sprintf(page, "%d\n", to_o2nm_node(item)->nd_local); } static ssize_t o2nm_node_local_store(struct config_item *item, const char *page, size_t count) { struct o2nm_node *node = to_o2nm_node(item); struct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node); unsigned long tmp; char *p = (char *)page; ssize_t ret; tmp = simple_strtoul(p, &p, 0); if (!p || (*p && (*p != '\n'))) return -EINVAL; tmp = !!tmp; /* boolean of whether this node wants to be local */ /* setting local turns on networking rx for now so we require having * set everything else first */ if (!test_bit(O2NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes) || !test_bit(O2NM_NODE_ATTR_NUM, &node->nd_set_attributes) || !test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes)) return -EINVAL; /* XXX */ /* the only failure case is trying to set a new local node * when a different one is already set */ if (tmp && tmp == cluster->cl_has_local && cluster->cl_local_node != node->nd_num) return -EBUSY; /* bring up the rx thread if we're setting the new local node. */ if (tmp && !cluster->cl_has_local) { ret = o2net_start_listening(node); if (ret) return ret; } if (!tmp && cluster->cl_has_local && cluster->cl_local_node == node->nd_num) { o2net_stop_listening(node); cluster->cl_local_node = O2NM_INVALID_NODE_NUM; } node->nd_local = tmp; if (node->nd_local) { cluster->cl_has_local = tmp; cluster->cl_local_node = node->nd_num; } return count; } CONFIGFS_ATTR(o2nm_node_, num); CONFIGFS_ATTR(o2nm_node_, ipv4_port); CONFIGFS_ATTR(o2nm_node_, ipv4_address); CONFIGFS_ATTR(o2nm_node_, local); static struct configfs_attribute *o2nm_node_attrs[] = { &o2nm_node_attr_num, &o2nm_node_attr_ipv4_port, &o2nm_node_attr_ipv4_address, &o2nm_node_attr_local, NULL, }; static struct configfs_item_operations o2nm_node_item_ops = { .release = o2nm_node_release, }; static const struct config_item_type o2nm_node_type = { .ct_item_ops = &o2nm_node_item_ops, .ct_attrs = o2nm_node_attrs, .ct_owner = THIS_MODULE, }; /* node set */ struct o2nm_node_group { struct config_group ns_group; /* some stuff? */ }; #if 0 static struct o2nm_node_group *to_o2nm_node_group(struct config_group *group) { return group ? container_of(group, struct o2nm_node_group, ns_group) : NULL; } #endif static ssize_t o2nm_cluster_attr_write(const char *page, ssize_t count, unsigned int *val) { unsigned long tmp; char *p = (char *)page; tmp = simple_strtoul(p, &p, 0); if (!p || (*p && (*p != '\n'))) return -EINVAL; if (tmp == 0) return -EINVAL; if (tmp >= (u32)-1) return -ERANGE; *val = tmp; return count; } static ssize_t o2nm_cluster_idle_timeout_ms_show(struct config_item *item, char *page) { return sprintf(page, "%u\n", to_o2nm_cluster(item)->cl_idle_timeout_ms); } static ssize_t o2nm_cluster_idle_timeout_ms_store(struct config_item *item, const char *page, size_t count) { struct o2nm_cluster *cluster = to_o2nm_cluster(item); ssize_t ret; unsigned int val; ret = o2nm_cluster_attr_write(page, count, &val); if (ret > 0) { if (cluster->cl_idle_timeout_ms != val && o2net_num_connected_peers()) { mlog(ML_NOTICE, "o2net: cannot change idle timeout after " "the first peer has agreed to it." " %d connected peers\n", o2net_num_connected_peers()); ret = -EINVAL; } else if (val <= cluster->cl_keepalive_delay_ms) { mlog(ML_NOTICE, "o2net: idle timeout must be larger " "than keepalive delay\n"); ret = -EINVAL; } else { cluster->cl_idle_timeout_ms = val; } } return ret; } static ssize_t o2nm_cluster_keepalive_delay_ms_show( struct config_item *item, char *page) { return sprintf(page, "%u\n", to_o2nm_cluster(item)->cl_keepalive_delay_ms); } static ssize_t o2nm_cluster_keepalive_delay_ms_store( struct config_item *item, const char *page, size_t count) { struct o2nm_cluster *cluster = to_o2nm_cluster(item); ssize_t ret; unsigned int val; ret = o2nm_cluster_attr_write(page, count, &val); if (ret > 0) { if (cluster->cl_keepalive_delay_ms != val && o2net_num_connected_peers()) { mlog(ML_NOTICE, "o2net: cannot change keepalive delay after" " the first peer has agreed to it." " %d connected peers\n", o2net_num_connected_peers()); ret = -EINVAL; } else if (val >= cluster->cl_idle_timeout_ms) { mlog(ML_NOTICE, "o2net: keepalive delay must be " "smaller than idle timeout\n"); ret = -EINVAL; } else { cluster->cl_keepalive_delay_ms = val; } } return ret; } static ssize_t o2nm_cluster_reconnect_delay_ms_show( struct config_item *item, char *page) { return sprintf(page, "%u\n", to_o2nm_cluster(item)->cl_reconnect_delay_ms); } static ssize_t o2nm_cluster_reconnect_delay_ms_store( struct config_item *item, const char *page, size_t count) { return o2nm_cluster_attr_write(page, count, &to_o2nm_cluster(item)->cl_reconnect_delay_ms); } static ssize_t o2nm_cluster_fence_method_show( struct config_item *item, char *page) { struct o2nm_cluster *cluster = to_o2nm_cluster(item); ssize_t ret = 0; if (cluster) ret = sprintf(page, "%s\n", o2nm_fence_method_desc[cluster->cl_fence_method]); return ret; } static ssize_t o2nm_cluster_fence_method_store( struct config_item *item, const char *page, size_t count) { unsigned int i; if (page[count - 1] != '\n') goto bail; for (i = 0; i < O2NM_FENCE_METHODS; ++i) { if (count != strlen(o2nm_fence_method_desc[i]) + 1) continue; if (strncasecmp(page, o2nm_fence_method_desc[i], count - 1)) continue; if (to_o2nm_cluster(item)->cl_fence_method != i) { printk(KERN_INFO "ocfs2: Changing fence method to %s\n", o2nm_fence_method_desc[i]); to_o2nm_cluster(item)->cl_fence_method = i; } return count; } bail: return -EINVAL; } CONFIGFS_ATTR(o2nm_cluster_, idle_timeout_ms); CONFIGFS_ATTR(o2nm_cluster_, keepalive_delay_ms); CONFIGFS_ATTR(o2nm_cluster_, reconnect_delay_ms); CONFIGFS_ATTR(o2nm_cluster_, fence_method); static struct configfs_attribute *o2nm_cluster_attrs[] = { &o2nm_cluster_attr_idle_timeout_ms, &o2nm_cluster_attr_keepalive_delay_ms, &o2nm_cluster_attr_reconnect_delay_ms, &o2nm_cluster_attr_fence_method, NULL, }; static struct config_item *o2nm_node_group_make_item(struct config_group *group, const char *name) { struct o2nm_node *node = NULL; if (strlen(name) > O2NM_MAX_NAME_LEN) return ERR_PTR(-ENAMETOOLONG); node = kzalloc(sizeof(struct o2nm_node), GFP_KERNEL); if (node == NULL) return ERR_PTR(-ENOMEM); strcpy(node->nd_name, name); /* use item.ci_namebuf instead? */ config_item_init_type_name(&node->nd_item, name, &o2nm_node_type); spin_lock_init(&node->nd_lock); mlog(ML_CLUSTER, "o2nm: Registering node %s\n", name); return &node->nd_item; } static void o2nm_node_group_drop_item(struct config_group *group, struct config_item *item) { struct o2nm_node *node = to_o2nm_node(item); struct o2nm_cluster *cluster = to_o2nm_cluster(group->cg_item.ci_parent); o2net_disconnect_node(node); if (cluster->cl_has_local && (cluster->cl_local_node == node->nd_num)) { cluster->cl_has_local = 0; cluster->cl_local_node = O2NM_INVALID_NODE_NUM; o2net_stop_listening(node); } /* XXX call into net to stop this node from trading messages */ write_lock(&cluster->cl_nodes_lock); /* XXX sloppy */ if (node->nd_ipv4_address) rb_erase(&node->nd_ip_node, &cluster->cl_node_ip_tree); /* nd_num might be 0 if the node number hasn't been set.. */ if (cluster->cl_nodes[node->nd_num] == node) { cluster->cl_nodes[node->nd_num] = NULL; clear_bit(node->nd_num, cluster->cl_nodes_bitmap); } write_unlock(&cluster->cl_nodes_lock); mlog(ML_CLUSTER, "o2nm: Unregistered node %s\n", config_item_name(&node->nd_item)); config_item_put(item); } static struct configfs_group_operations o2nm_node_group_group_ops = { .make_item = o2nm_node_group_make_item, .drop_item = o2nm_node_group_drop_item, }; static const struct config_item_type o2nm_node_group_type = { .ct_group_ops = &o2nm_node_group_group_ops, .ct_owner = THIS_MODULE, }; /* cluster */ static void o2nm_cluster_release(struct config_item *item) { struct o2nm_cluster *cluster = to_o2nm_cluster(item); kfree(cluster); } static struct configfs_item_operations o2nm_cluster_item_ops = { .release = o2nm_cluster_release, }; static const struct config_item_type o2nm_cluster_type = { .ct_item_ops = &o2nm_cluster_item_ops, .ct_attrs = o2nm_cluster_attrs, .ct_owner = THIS_MODULE, }; /* cluster set */ struct o2nm_cluster_group { struct configfs_subsystem cs_subsys; /* some stuff? */ }; #if 0 static struct o2nm_cluster_group *to_o2nm_cluster_group(struct config_group *group) { return group ? container_of(to_configfs_subsystem(group), struct o2nm_cluster_group, cs_subsys) : NULL; } #endif static struct config_group *o2nm_cluster_group_make_group(struct config_group *group, const char *name) { struct o2nm_cluster *cluster = NULL; struct o2nm_node_group *ns = NULL; struct config_group *o2hb_group = NULL, *ret = NULL; /* this runs under the parent dir's i_mutex; there can be only * one caller in here at a time */ if (o2nm_single_cluster) return ERR_PTR(-ENOSPC); cluster = kzalloc(sizeof(struct o2nm_cluster), GFP_KERNEL); ns = kzalloc(sizeof(struct o2nm_node_group), GFP_KERNEL); o2hb_group = o2hb_alloc_hb_set(); if (cluster == NULL || ns == NULL || o2hb_group == NULL) goto out; config_group_init_type_name(&cluster->cl_group, name, &o2nm_cluster_type); configfs_add_default_group(&ns->ns_group, &cluster->cl_group); config_group_init_type_name(&ns->ns_group, "node", &o2nm_node_group_type); configfs_add_default_group(o2hb_group, &cluster->cl_group); rwlock_init(&cluster->cl_nodes_lock); cluster->cl_node_ip_tree = RB_ROOT; cluster->cl_reconnect_delay_ms = O2NET_RECONNECT_DELAY_MS_DEFAULT; cluster->cl_idle_timeout_ms = O2NET_IDLE_TIMEOUT_MS_DEFAULT; cluster->cl_keepalive_delay_ms = O2NET_KEEPALIVE_DELAY_MS_DEFAULT; cluster->cl_fence_method = O2NM_FENCE_RESET; ret = &cluster->cl_group; o2nm_single_cluster = cluster; out: if (ret == NULL) { kfree(cluster); kfree(ns); o2hb_free_hb_set(o2hb_group); ret = ERR_PTR(-ENOMEM); } return ret; } static void o2nm_cluster_group_drop_item(struct config_group *group, struct config_item *item) { struct o2nm_cluster *cluster = to_o2nm_cluster(item); BUG_ON(o2nm_single_cluster != cluster); o2nm_single_cluster = NULL; configfs_remove_default_groups(&cluster->cl_group); config_item_put(item); } static struct configfs_group_operations o2nm_cluster_group_group_ops = { .make_group = o2nm_cluster_group_make_group, .drop_item = o2nm_cluster_group_drop_item, }; static const struct config_item_type o2nm_cluster_group_type = { .ct_group_ops = &o2nm_cluster_group_group_ops, .ct_owner = THIS_MODULE, }; static struct o2nm_cluster_group o2nm_cluster_group = { .cs_subsys = { .su_group = { .cg_item = { .ci_namebuf = "cluster", .ci_type = &o2nm_cluster_group_type, }, }, }, }; int o2nm_depend_item(struct config_item *item) { return configfs_depend_item(&o2nm_cluster_group.cs_subsys, item); } void o2nm_undepend_item(struct config_item *item) { configfs_undepend_item(item); } int o2nm_depend_this_node(void) { int ret = 0; struct o2nm_node *local_node; local_node = o2nm_get_node_by_num(o2nm_this_node()); if (!local_node) { ret = -EINVAL; goto out; } ret = o2nm_depend_item(&local_node->nd_item); o2nm_node_put(local_node); out: return ret; } void o2nm_undepend_this_node(void) { struct o2nm_node *local_node; local_node = o2nm_get_node_by_num(o2nm_this_node()); BUG_ON(!local_node); o2nm_undepend_item(&local_node->nd_item); o2nm_node_put(local_node); } static void __exit exit_o2nm(void) { /* XXX sync with hb callbacks and shut down hb? */ o2net_unregister_hb_callbacks(); configfs_unregister_subsystem(&o2nm_cluster_group.cs_subsys); o2cb_sys_shutdown(); o2net_exit(); o2hb_exit(); } static int __init init_o2nm(void) { int ret = -1; ret = o2hb_init(); if (ret) goto out; ret = o2net_init(); if (ret) goto out_o2hb; ret = o2net_register_hb_callbacks(); if (ret) goto out_o2net; config_group_init(&o2nm_cluster_group.cs_subsys.su_group); mutex_init(&o2nm_cluster_group.cs_subsys.su_mutex); ret = configfs_register_subsystem(&o2nm_cluster_group.cs_subsys); if (ret) { printk(KERN_ERR "nodemanager: Registration returned %d\n", ret); goto out_callbacks; } ret = o2cb_sys_init(); if (!ret) goto out; configfs_unregister_subsystem(&o2nm_cluster_group.cs_subsys); out_callbacks: o2net_unregister_hb_callbacks(); out_o2net: o2net_exit(); out_o2hb: o2hb_exit(); out: return ret; } MODULE_AUTHOR("Oracle"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("OCFS2 cluster management"); module_init(init_o2nm) module_exit(exit_o2nm)
/* -*- mode: c; c-basic-offset: 8; -*- * vim: noexpandtab sw=8 ts=8 sts=0: * * Copyright (C) 2004, 2005 Oracle. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. */ #include <linux/slab.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/configfs.h> #include "tcp.h" #include "nodemanager.h" #include "heartbeat.h" #include "masklog.h" #include "sys.h" /* for now we operate under the assertion that there can be only one * cluster active at a time. Changing this will require trickling * cluster references throughout where nodes are looked up */ struct o2nm_cluster *o2nm_single_cluster = NULL; char *o2nm_fence_method_desc[O2NM_FENCE_METHODS] = { "reset", /* O2NM_FENCE_RESET */ "panic", /* O2NM_FENCE_PANIC */ }; static inline void o2nm_lock_subsystem(void); static inline void o2nm_unlock_subsystem(void); struct o2nm_node *o2nm_get_node_by_num(u8 node_num) { struct o2nm_node *node = NULL; if (node_num >= O2NM_MAX_NODES || o2nm_single_cluster == NULL) goto out; read_lock(&o2nm_single_cluster->cl_nodes_lock); node = o2nm_single_cluster->cl_nodes[node_num]; if (node) config_item_get(&node->nd_item); read_unlock(&o2nm_single_cluster->cl_nodes_lock); out: return node; } EXPORT_SYMBOL_GPL(o2nm_get_node_by_num); int o2nm_configured_node_map(unsigned long *map, unsigned bytes) { struct o2nm_cluster *cluster = o2nm_single_cluster; BUG_ON(bytes < (sizeof(cluster->cl_nodes_bitmap))); if (cluster == NULL) return -EINVAL; read_lock(&cluster->cl_nodes_lock); memcpy(map, cluster->cl_nodes_bitmap, sizeof(cluster->cl_nodes_bitmap)); read_unlock(&cluster->cl_nodes_lock); return 0; } EXPORT_SYMBOL_GPL(o2nm_configured_node_map); static struct o2nm_node *o2nm_node_ip_tree_lookup(struct o2nm_cluster *cluster, __be32 ip_needle, struct rb_node ***ret_p, struct rb_node **ret_parent) { struct rb_node **p = &cluster->cl_node_ip_tree.rb_node; struct rb_node *parent = NULL; struct o2nm_node *node, *ret = NULL; while (*p) { int cmp; parent = *p; node = rb_entry(parent, struct o2nm_node, nd_ip_node); cmp = memcmp(&ip_needle, &node->nd_ipv4_address, sizeof(ip_needle)); if (cmp < 0) p = &(*p)->rb_left; else if (cmp > 0) p = &(*p)->rb_right; else { ret = node; break; } } if (ret_p != NULL) *ret_p = p; if (ret_parent != NULL) *ret_parent = parent; return ret; } struct o2nm_node *o2nm_get_node_by_ip(__be32 addr) { struct o2nm_node *node = NULL; struct o2nm_cluster *cluster = o2nm_single_cluster; if (cluster == NULL) goto out; read_lock(&cluster->cl_nodes_lock); node = o2nm_node_ip_tree_lookup(cluster, addr, NULL, NULL); if (node) config_item_get(&node->nd_item); read_unlock(&cluster->cl_nodes_lock); out: return node; } EXPORT_SYMBOL_GPL(o2nm_get_node_by_ip); void o2nm_node_put(struct o2nm_node *node) { config_item_put(&node->nd_item); } EXPORT_SYMBOL_GPL(o2nm_node_put); void o2nm_node_get(struct o2nm_node *node) { config_item_get(&node->nd_item); } EXPORT_SYMBOL_GPL(o2nm_node_get); u8 o2nm_this_node(void) { u8 node_num = O2NM_MAX_NODES; if (o2nm_single_cluster && o2nm_single_cluster->cl_has_local) node_num = o2nm_single_cluster->cl_local_node; return node_num; } EXPORT_SYMBOL_GPL(o2nm_this_node); /* node configfs bits */ static struct o2nm_cluster *to_o2nm_cluster(struct config_item *item) { return item ? container_of(to_config_group(item), struct o2nm_cluster, cl_group) : NULL; } static struct o2nm_node *to_o2nm_node(struct config_item *item) { return item ? container_of(item, struct o2nm_node, nd_item) : NULL; } static void o2nm_node_release(struct config_item *item) { struct o2nm_node *node = to_o2nm_node(item); kfree(node); } static ssize_t o2nm_node_num_show(struct config_item *item, char *page) { return sprintf(page, "%d\n", to_o2nm_node(item)->nd_num); } static struct o2nm_cluster *to_o2nm_cluster_from_node(struct o2nm_node *node) { /* through the first node_set .parent * mycluster/nodes/mynode == o2nm_cluster->o2nm_node_group->o2nm_node */ if (node->nd_item.ci_parent) return to_o2nm_cluster(node->nd_item.ci_parent->ci_parent); else return NULL; } enum { O2NM_NODE_ATTR_NUM = 0, O2NM_NODE_ATTR_PORT, O2NM_NODE_ATTR_ADDRESS, }; static ssize_t o2nm_node_num_store(struct config_item *item, const char *page, size_t count) { struct o2nm_node *node = to_o2nm_node(item); struct o2nm_cluster *cluster; unsigned long tmp; char *p = (char *)page; int ret = 0; tmp = simple_strtoul(p, &p, 0); if (!p || (*p && (*p != '\n'))) return -EINVAL; if (tmp >= O2NM_MAX_NODES) return -ERANGE; /* once we're in the cl_nodes tree networking can look us up by * node number and try to use our address and port attributes * to connect to this node.. make sure that they've been set * before writing the node attribute? */ if (!test_bit(O2NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes) || !test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes)) return -EINVAL; /* XXX */ o2nm_lock_subsystem(); cluster = to_o2nm_cluster_from_node(node); if (!cluster) { o2nm_unlock_subsystem(); return -EINVAL; } write_lock(&cluster->cl_nodes_lock); if (cluster->cl_nodes[tmp]) ret = -EEXIST; else if (test_and_set_bit(O2NM_NODE_ATTR_NUM, &node->nd_set_attributes)) ret = -EBUSY; else { cluster->cl_nodes[tmp] = node; node->nd_num = tmp; set_bit(tmp, cluster->cl_nodes_bitmap); } write_unlock(&cluster->cl_nodes_lock); o2nm_unlock_subsystem(); if (ret) return ret; return count; } static ssize_t o2nm_node_ipv4_port_show(struct config_item *item, char *page) { return sprintf(page, "%u\n", ntohs(to_o2nm_node(item)->nd_ipv4_port)); } static ssize_t o2nm_node_ipv4_port_store(struct config_item *item, const char *page, size_t count) { struct o2nm_node *node = to_o2nm_node(item); unsigned long tmp; char *p = (char *)page; tmp = simple_strtoul(p, &p, 0); if (!p || (*p && (*p != '\n'))) return -EINVAL; if (tmp == 0) return -EINVAL; if (tmp >= (u16)-1) return -ERANGE; if (test_and_set_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes)) return -EBUSY; node->nd_ipv4_port = htons(tmp); return count; } static ssize_t o2nm_node_ipv4_address_show(struct config_item *item, char *page) { return sprintf(page, "%pI4\n", &to_o2nm_node(item)->nd_ipv4_address); } static ssize_t o2nm_node_ipv4_address_store(struct config_item *item, const char *page, size_t count) { struct o2nm_node *node = to_o2nm_node(item); struct o2nm_cluster *cluster; int ret, i; struct rb_node **p, *parent; unsigned int octets[4]; __be32 ipv4_addr = 0; ret = sscanf(page, "%3u.%3u.%3u.%3u", &octets[3], &octets[2], &octets[1], &octets[0]); if (ret != 4) return -EINVAL; for (i = 0; i < ARRAY_SIZE(octets); i++) { if (octets[i] > 255) return -ERANGE; be32_add_cpu(&ipv4_addr, octets[i] << (i * 8)); } o2nm_lock_subsystem(); cluster = to_o2nm_cluster_from_node(node); if (!cluster) { o2nm_unlock_subsystem(); return -EINVAL; } ret = 0; write_lock(&cluster->cl_nodes_lock); if (o2nm_node_ip_tree_lookup(cluster, ipv4_addr, &p, &parent)) ret = -EEXIST; else if (test_and_set_bit(O2NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes)) ret = -EBUSY; else { rb_link_node(&node->nd_ip_node, parent, p); rb_insert_color(&node->nd_ip_node, &cluster->cl_node_ip_tree); } write_unlock(&cluster->cl_nodes_lock); o2nm_unlock_subsystem(); if (ret) return ret; memcpy(&node->nd_ipv4_address, &ipv4_addr, sizeof(ipv4_addr)); return count; } static ssize_t o2nm_node_local_show(struct config_item *item, char *page) { return sprintf(page, "%d\n", to_o2nm_node(item)->nd_local); } static ssize_t o2nm_node_local_store(struct config_item *item, const char *page, size_t count) { struct o2nm_node *node = to_o2nm_node(item); struct o2nm_cluster *cluster; unsigned long tmp; char *p = (char *)page; ssize_t ret; tmp = simple_strtoul(p, &p, 0); if (!p || (*p && (*p != '\n'))) return -EINVAL; tmp = !!tmp; /* boolean of whether this node wants to be local */ /* setting local turns on networking rx for now so we require having * set everything else first */ if (!test_bit(O2NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes) || !test_bit(O2NM_NODE_ATTR_NUM, &node->nd_set_attributes) || !test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes)) return -EINVAL; /* XXX */ o2nm_lock_subsystem(); cluster = to_o2nm_cluster_from_node(node); if (!cluster) { ret = -EINVAL; goto out; } /* the only failure case is trying to set a new local node * when a different one is already set */ if (tmp && tmp == cluster->cl_has_local && cluster->cl_local_node != node->nd_num) { ret = -EBUSY; goto out; } /* bring up the rx thread if we're setting the new local node. */ if (tmp && !cluster->cl_has_local) { ret = o2net_start_listening(node); if (ret) goto out; } if (!tmp && cluster->cl_has_local && cluster->cl_local_node == node->nd_num) { o2net_stop_listening(node); cluster->cl_local_node = O2NM_INVALID_NODE_NUM; } node->nd_local = tmp; if (node->nd_local) { cluster->cl_has_local = tmp; cluster->cl_local_node = node->nd_num; } ret = count; out: o2nm_unlock_subsystem(); return ret; } CONFIGFS_ATTR(o2nm_node_, num); CONFIGFS_ATTR(o2nm_node_, ipv4_port); CONFIGFS_ATTR(o2nm_node_, ipv4_address); CONFIGFS_ATTR(o2nm_node_, local); static struct configfs_attribute *o2nm_node_attrs[] = { &o2nm_node_attr_num, &o2nm_node_attr_ipv4_port, &o2nm_node_attr_ipv4_address, &o2nm_node_attr_local, NULL, }; static struct configfs_item_operations o2nm_node_item_ops = { .release = o2nm_node_release, }; static const struct config_item_type o2nm_node_type = { .ct_item_ops = &o2nm_node_item_ops, .ct_attrs = o2nm_node_attrs, .ct_owner = THIS_MODULE, }; /* node set */ struct o2nm_node_group { struct config_group ns_group; /* some stuff? */ }; #if 0 static struct o2nm_node_group *to_o2nm_node_group(struct config_group *group) { return group ? container_of(group, struct o2nm_node_group, ns_group) : NULL; } #endif static ssize_t o2nm_cluster_attr_write(const char *page, ssize_t count, unsigned int *val) { unsigned long tmp; char *p = (char *)page; tmp = simple_strtoul(p, &p, 0); if (!p || (*p && (*p != '\n'))) return -EINVAL; if (tmp == 0) return -EINVAL; if (tmp >= (u32)-1) return -ERANGE; *val = tmp; return count; } static ssize_t o2nm_cluster_idle_timeout_ms_show(struct config_item *item, char *page) { return sprintf(page, "%u\n", to_o2nm_cluster(item)->cl_idle_timeout_ms); } static ssize_t o2nm_cluster_idle_timeout_ms_store(struct config_item *item, const char *page, size_t count) { struct o2nm_cluster *cluster = to_o2nm_cluster(item); ssize_t ret; unsigned int val; ret = o2nm_cluster_attr_write(page, count, &val); if (ret > 0) { if (cluster->cl_idle_timeout_ms != val && o2net_num_connected_peers()) { mlog(ML_NOTICE, "o2net: cannot change idle timeout after " "the first peer has agreed to it." " %d connected peers\n", o2net_num_connected_peers()); ret = -EINVAL; } else if (val <= cluster->cl_keepalive_delay_ms) { mlog(ML_NOTICE, "o2net: idle timeout must be larger " "than keepalive delay\n"); ret = -EINVAL; } else { cluster->cl_idle_timeout_ms = val; } } return ret; } static ssize_t o2nm_cluster_keepalive_delay_ms_show( struct config_item *item, char *page) { return sprintf(page, "%u\n", to_o2nm_cluster(item)->cl_keepalive_delay_ms); } static ssize_t o2nm_cluster_keepalive_delay_ms_store( struct config_item *item, const char *page, size_t count) { struct o2nm_cluster *cluster = to_o2nm_cluster(item); ssize_t ret; unsigned int val; ret = o2nm_cluster_attr_write(page, count, &val); if (ret > 0) { if (cluster->cl_keepalive_delay_ms != val && o2net_num_connected_peers()) { mlog(ML_NOTICE, "o2net: cannot change keepalive delay after" " the first peer has agreed to it." " %d connected peers\n", o2net_num_connected_peers()); ret = -EINVAL; } else if (val >= cluster->cl_idle_timeout_ms) { mlog(ML_NOTICE, "o2net: keepalive delay must be " "smaller than idle timeout\n"); ret = -EINVAL; } else { cluster->cl_keepalive_delay_ms = val; } } return ret; } static ssize_t o2nm_cluster_reconnect_delay_ms_show( struct config_item *item, char *page) { return sprintf(page, "%u\n", to_o2nm_cluster(item)->cl_reconnect_delay_ms); } static ssize_t o2nm_cluster_reconnect_delay_ms_store( struct config_item *item, const char *page, size_t count) { return o2nm_cluster_attr_write(page, count, &to_o2nm_cluster(item)->cl_reconnect_delay_ms); } static ssize_t o2nm_cluster_fence_method_show( struct config_item *item, char *page) { struct o2nm_cluster *cluster = to_o2nm_cluster(item); ssize_t ret = 0; if (cluster) ret = sprintf(page, "%s\n", o2nm_fence_method_desc[cluster->cl_fence_method]); return ret; } static ssize_t o2nm_cluster_fence_method_store( struct config_item *item, const char *page, size_t count) { unsigned int i; if (page[count - 1] != '\n') goto bail; for (i = 0; i < O2NM_FENCE_METHODS; ++i) { if (count != strlen(o2nm_fence_method_desc[i]) + 1) continue; if (strncasecmp(page, o2nm_fence_method_desc[i], count - 1)) continue; if (to_o2nm_cluster(item)->cl_fence_method != i) { printk(KERN_INFO "ocfs2: Changing fence method to %s\n", o2nm_fence_method_desc[i]); to_o2nm_cluster(item)->cl_fence_method = i; } return count; } bail: return -EINVAL; } CONFIGFS_ATTR(o2nm_cluster_, idle_timeout_ms); CONFIGFS_ATTR(o2nm_cluster_, keepalive_delay_ms); CONFIGFS_ATTR(o2nm_cluster_, reconnect_delay_ms); CONFIGFS_ATTR(o2nm_cluster_, fence_method); static struct configfs_attribute *o2nm_cluster_attrs[] = { &o2nm_cluster_attr_idle_timeout_ms, &o2nm_cluster_attr_keepalive_delay_ms, &o2nm_cluster_attr_reconnect_delay_ms, &o2nm_cluster_attr_fence_method, NULL, }; static struct config_item *o2nm_node_group_make_item(struct config_group *group, const char *name) { struct o2nm_node *node = NULL; if (strlen(name) > O2NM_MAX_NAME_LEN) return ERR_PTR(-ENAMETOOLONG); node = kzalloc(sizeof(struct o2nm_node), GFP_KERNEL); if (node == NULL) return ERR_PTR(-ENOMEM); strcpy(node->nd_name, name); /* use item.ci_namebuf instead? */ config_item_init_type_name(&node->nd_item, name, &o2nm_node_type); spin_lock_init(&node->nd_lock); mlog(ML_CLUSTER, "o2nm: Registering node %s\n", name); return &node->nd_item; } static void o2nm_node_group_drop_item(struct config_group *group, struct config_item *item) { struct o2nm_node *node = to_o2nm_node(item); struct o2nm_cluster *cluster = to_o2nm_cluster(group->cg_item.ci_parent); o2net_disconnect_node(node); if (cluster->cl_has_local && (cluster->cl_local_node == node->nd_num)) { cluster->cl_has_local = 0; cluster->cl_local_node = O2NM_INVALID_NODE_NUM; o2net_stop_listening(node); } /* XXX call into net to stop this node from trading messages */ write_lock(&cluster->cl_nodes_lock); /* XXX sloppy */ if (node->nd_ipv4_address) rb_erase(&node->nd_ip_node, &cluster->cl_node_ip_tree); /* nd_num might be 0 if the node number hasn't been set.. */ if (cluster->cl_nodes[node->nd_num] == node) { cluster->cl_nodes[node->nd_num] = NULL; clear_bit(node->nd_num, cluster->cl_nodes_bitmap); } write_unlock(&cluster->cl_nodes_lock); mlog(ML_CLUSTER, "o2nm: Unregistered node %s\n", config_item_name(&node->nd_item)); config_item_put(item); } static struct configfs_group_operations o2nm_node_group_group_ops = { .make_item = o2nm_node_group_make_item, .drop_item = o2nm_node_group_drop_item, }; static const struct config_item_type o2nm_node_group_type = { .ct_group_ops = &o2nm_node_group_group_ops, .ct_owner = THIS_MODULE, }; /* cluster */ static void o2nm_cluster_release(struct config_item *item) { struct o2nm_cluster *cluster = to_o2nm_cluster(item); kfree(cluster); } static struct configfs_item_operations o2nm_cluster_item_ops = { .release = o2nm_cluster_release, }; static const struct config_item_type o2nm_cluster_type = { .ct_item_ops = &o2nm_cluster_item_ops, .ct_attrs = o2nm_cluster_attrs, .ct_owner = THIS_MODULE, }; /* cluster set */ struct o2nm_cluster_group { struct configfs_subsystem cs_subsys; /* some stuff? */ }; #if 0 static struct o2nm_cluster_group *to_o2nm_cluster_group(struct config_group *group) { return group ? container_of(to_configfs_subsystem(group), struct o2nm_cluster_group, cs_subsys) : NULL; } #endif static struct config_group *o2nm_cluster_group_make_group(struct config_group *group, const char *name) { struct o2nm_cluster *cluster = NULL; struct o2nm_node_group *ns = NULL; struct config_group *o2hb_group = NULL, *ret = NULL; /* this runs under the parent dir's i_mutex; there can be only * one caller in here at a time */ if (o2nm_single_cluster) return ERR_PTR(-ENOSPC); cluster = kzalloc(sizeof(struct o2nm_cluster), GFP_KERNEL); ns = kzalloc(sizeof(struct o2nm_node_group), GFP_KERNEL); o2hb_group = o2hb_alloc_hb_set(); if (cluster == NULL || ns == NULL || o2hb_group == NULL) goto out; config_group_init_type_name(&cluster->cl_group, name, &o2nm_cluster_type); configfs_add_default_group(&ns->ns_group, &cluster->cl_group); config_group_init_type_name(&ns->ns_group, "node", &o2nm_node_group_type); configfs_add_default_group(o2hb_group, &cluster->cl_group); rwlock_init(&cluster->cl_nodes_lock); cluster->cl_node_ip_tree = RB_ROOT; cluster->cl_reconnect_delay_ms = O2NET_RECONNECT_DELAY_MS_DEFAULT; cluster->cl_idle_timeout_ms = O2NET_IDLE_TIMEOUT_MS_DEFAULT; cluster->cl_keepalive_delay_ms = O2NET_KEEPALIVE_DELAY_MS_DEFAULT; cluster->cl_fence_method = O2NM_FENCE_RESET; ret = &cluster->cl_group; o2nm_single_cluster = cluster; out: if (ret == NULL) { kfree(cluster); kfree(ns); o2hb_free_hb_set(o2hb_group); ret = ERR_PTR(-ENOMEM); } return ret; } static void o2nm_cluster_group_drop_item(struct config_group *group, struct config_item *item) { struct o2nm_cluster *cluster = to_o2nm_cluster(item); BUG_ON(o2nm_single_cluster != cluster); o2nm_single_cluster = NULL; configfs_remove_default_groups(&cluster->cl_group); config_item_put(item); } static struct configfs_group_operations o2nm_cluster_group_group_ops = { .make_group = o2nm_cluster_group_make_group, .drop_item = o2nm_cluster_group_drop_item, }; static const struct config_item_type o2nm_cluster_group_type = { .ct_group_ops = &o2nm_cluster_group_group_ops, .ct_owner = THIS_MODULE, }; static struct o2nm_cluster_group o2nm_cluster_group = { .cs_subsys = { .su_group = { .cg_item = { .ci_namebuf = "cluster", .ci_type = &o2nm_cluster_group_type, }, }, }, }; static inline void o2nm_lock_subsystem(void) { mutex_lock(&o2nm_cluster_group.cs_subsys.su_mutex); } static inline void o2nm_unlock_subsystem(void) { mutex_unlock(&o2nm_cluster_group.cs_subsys.su_mutex); } int o2nm_depend_item(struct config_item *item) { return configfs_depend_item(&o2nm_cluster_group.cs_subsys, item); } void o2nm_undepend_item(struct config_item *item) { configfs_undepend_item(item); } int o2nm_depend_this_node(void) { int ret = 0; struct o2nm_node *local_node; local_node = o2nm_get_node_by_num(o2nm_this_node()); if (!local_node) { ret = -EINVAL; goto out; } ret = o2nm_depend_item(&local_node->nd_item); o2nm_node_put(local_node); out: return ret; } void o2nm_undepend_this_node(void) { struct o2nm_node *local_node; local_node = o2nm_get_node_by_num(o2nm_this_node()); BUG_ON(!local_node); o2nm_undepend_item(&local_node->nd_item); o2nm_node_put(local_node); } static void __exit exit_o2nm(void) { /* XXX sync with hb callbacks and shut down hb? */ o2net_unregister_hb_callbacks(); configfs_unregister_subsystem(&o2nm_cluster_group.cs_subsys); o2cb_sys_shutdown(); o2net_exit(); o2hb_exit(); } static int __init init_o2nm(void) { int ret = -1; ret = o2hb_init(); if (ret) goto out; ret = o2net_init(); if (ret) goto out_o2hb; ret = o2net_register_hb_callbacks(); if (ret) goto out_o2net; config_group_init(&o2nm_cluster_group.cs_subsys.su_group); mutex_init(&o2nm_cluster_group.cs_subsys.su_mutex); ret = configfs_register_subsystem(&o2nm_cluster_group.cs_subsys); if (ret) { printk(KERN_ERR "nodemanager: Registration returned %d\n", ret); goto out_callbacks; } ret = o2cb_sys_init(); if (!ret) goto out; configfs_unregister_subsystem(&o2nm_cluster_group.cs_subsys); out_callbacks: o2net_unregister_hb_callbacks(); out_o2net: o2net_exit(); out_o2hb: o2hb_exit(); out: return ret; } MODULE_AUTHOR("Oracle"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("OCFS2 cluster management"); module_init(init_o2nm) module_exit(exit_o2nm)
static ssize_t o2nm_node_local_store(struct config_item *item, const char *page, size_t count) { struct o2nm_node *node = to_o2nm_node(item); struct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node); unsigned long tmp; char *p = (char *)page; ssize_t ret; tmp = simple_strtoul(p, &p, 0); if (!p || (*p && (*p != '\n'))) return -EINVAL; tmp = !!tmp; /* boolean of whether this node wants to be local */ /* setting local turns on networking rx for now so we require having * set everything else first */ if (!test_bit(O2NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes) || !test_bit(O2NM_NODE_ATTR_NUM, &node->nd_set_attributes) || !test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes)) return -EINVAL; /* XXX */ /* the only failure case is trying to set a new local node * when a different one is already set */ if (tmp && tmp == cluster->cl_has_local && cluster->cl_local_node != node->nd_num) return -EBUSY; /* bring up the rx thread if we're setting the new local node. */ if (tmp && !cluster->cl_has_local) { ret = o2net_start_listening(node); if (ret) return ret; } if (!tmp && cluster->cl_has_local && cluster->cl_local_node == node->nd_num) { o2net_stop_listening(node); cluster->cl_local_node = O2NM_INVALID_NODE_NUM; } node->nd_local = tmp; if (node->nd_local) { cluster->cl_has_local = tmp; cluster->cl_local_node = node->nd_num; } return count; }
static ssize_t o2nm_node_local_store(struct config_item *item, const char *page, size_t count) { struct o2nm_node *node = to_o2nm_node(item); struct o2nm_cluster *cluster; unsigned long tmp; char *p = (char *)page; ssize_t ret; tmp = simple_strtoul(p, &p, 0); if (!p || (*p && (*p != '\n'))) return -EINVAL; tmp = !!tmp; /* boolean of whether this node wants to be local */ /* setting local turns on networking rx for now so we require having * set everything else first */ if (!test_bit(O2NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes) || !test_bit(O2NM_NODE_ATTR_NUM, &node->nd_set_attributes) || !test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes)) return -EINVAL; /* XXX */ o2nm_lock_subsystem(); cluster = to_o2nm_cluster_from_node(node); if (!cluster) { ret = -EINVAL; goto out; } /* the only failure case is trying to set a new local node * when a different one is already set */ if (tmp && tmp == cluster->cl_has_local && cluster->cl_local_node != node->nd_num) { ret = -EBUSY; goto out; } /* bring up the rx thread if we're setting the new local node. */ if (tmp && !cluster->cl_has_local) { ret = o2net_start_listening(node); if (ret) goto out; } if (!tmp && cluster->cl_has_local && cluster->cl_local_node == node->nd_num) { o2net_stop_listening(node); cluster->cl_local_node = O2NM_INVALID_NODE_NUM; } node->nd_local = tmp; if (node->nd_local) { cluster->cl_has_local = tmp; cluster->cl_local_node = node->nd_num; } ret = count; out: o2nm_unlock_subsystem(); return ret; }
{'added': [(43, 'static inline void o2nm_lock_subsystem(void);'), (44, 'static inline void o2nm_unlock_subsystem(void);'), (45, ''), (187, '\tif (node->nd_item.ci_parent)'), (188, '\t\treturn to_o2nm_cluster(node->nd_item.ci_parent->ci_parent);'), (189, '\telse'), (190, '\t\treturn NULL;'), (203, '\tstruct o2nm_cluster *cluster;'), (223, '\to2nm_lock_subsystem();'), (224, '\tcluster = to_o2nm_cluster_from_node(node);'), (225, '\tif (!cluster) {'), (226, '\t\to2nm_unlock_subsystem();'), (227, '\t\treturn -EINVAL;'), (228, '\t}'), (229, ''), (242, '\to2nm_unlock_subsystem();'), (243, ''), (287, '\tstruct o2nm_cluster *cluster;'), (304, '\to2nm_lock_subsystem();'), (305, '\tcluster = to_o2nm_cluster_from_node(node);'), (306, '\tif (!cluster) {'), (307, '\t\to2nm_unlock_subsystem();'), (308, '\t\treturn -EINVAL;'), (309, '\t}'), (310, ''), (323, '\to2nm_unlock_subsystem();'), (324, ''), (342, '\tstruct o2nm_cluster *cluster;'), (360, '\to2nm_lock_subsystem();'), (361, '\tcluster = to_o2nm_cluster_from_node(node);'), (362, '\tif (!cluster) {'), (363, '\t\tret = -EINVAL;'), (364, '\t\tgoto out;'), (365, '\t}'), (366, ''), (370, '\t cluster->cl_local_node != node->nd_num) {'), (371, '\t\tret = -EBUSY;'), (372, '\t\tgoto out;'), (373, '\t}'), (379, '\t\t\tgoto out;'), (394, '\tret = count;'), (395, ''), (396, 'out:'), (397, '\to2nm_unlock_subsystem();'), (398, '\treturn ret;'), (778, 'static inline void o2nm_lock_subsystem(void)'), (779, '{'), (780, '\tmutex_lock(&o2nm_cluster_group.cs_subsys.su_mutex);'), (781, '}'), (782, ''), (783, 'static inline void o2nm_unlock_subsystem(void)'), (784, '{'), (785, '\tmutex_unlock(&o2nm_cluster_group.cs_subsys.su_mutex);'), (786, '}'), (787, '')], 'deleted': [(184, '\treturn to_o2nm_cluster(node->nd_item.ci_parent->ci_parent);'), (197, '\tstruct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node);'), (272, '\tstruct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node);'), (318, '\tstruct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node);'), (339, '\t cluster->cl_local_node != node->nd_num)'), (340, '\t\treturn -EBUSY;'), (346, '\t\t\treturn ret;'), (361, '\treturn count;')]}
55
8
669
3,744
https://github.com/torvalds/linux
CVE-2017-18216
['CWE-476']
nodemanager.c
o2nm_node_num_store
/* -*- mode: c; c-basic-offset: 8; -*- * vim: noexpandtab sw=8 ts=8 sts=0: * * Copyright (C) 2004, 2005 Oracle. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. */ #include <linux/slab.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/configfs.h> #include "tcp.h" #include "nodemanager.h" #include "heartbeat.h" #include "masklog.h" #include "sys.h" /* for now we operate under the assertion that there can be only one * cluster active at a time. Changing this will require trickling * cluster references throughout where nodes are looked up */ struct o2nm_cluster *o2nm_single_cluster = NULL; char *o2nm_fence_method_desc[O2NM_FENCE_METHODS] = { "reset", /* O2NM_FENCE_RESET */ "panic", /* O2NM_FENCE_PANIC */ }; struct o2nm_node *o2nm_get_node_by_num(u8 node_num) { struct o2nm_node *node = NULL; if (node_num >= O2NM_MAX_NODES || o2nm_single_cluster == NULL) goto out; read_lock(&o2nm_single_cluster->cl_nodes_lock); node = o2nm_single_cluster->cl_nodes[node_num]; if (node) config_item_get(&node->nd_item); read_unlock(&o2nm_single_cluster->cl_nodes_lock); out: return node; } EXPORT_SYMBOL_GPL(o2nm_get_node_by_num); int o2nm_configured_node_map(unsigned long *map, unsigned bytes) { struct o2nm_cluster *cluster = o2nm_single_cluster; BUG_ON(bytes < (sizeof(cluster->cl_nodes_bitmap))); if (cluster == NULL) return -EINVAL; read_lock(&cluster->cl_nodes_lock); memcpy(map, cluster->cl_nodes_bitmap, sizeof(cluster->cl_nodes_bitmap)); read_unlock(&cluster->cl_nodes_lock); return 0; } EXPORT_SYMBOL_GPL(o2nm_configured_node_map); static struct o2nm_node *o2nm_node_ip_tree_lookup(struct o2nm_cluster *cluster, __be32 ip_needle, struct rb_node ***ret_p, struct rb_node **ret_parent) { struct rb_node **p = &cluster->cl_node_ip_tree.rb_node; struct rb_node *parent = NULL; struct o2nm_node *node, *ret = NULL; while (*p) { int cmp; parent = *p; node = rb_entry(parent, struct o2nm_node, nd_ip_node); cmp = memcmp(&ip_needle, &node->nd_ipv4_address, sizeof(ip_needle)); if (cmp < 0) p = &(*p)->rb_left; else if (cmp > 0) p = &(*p)->rb_right; else { ret = node; break; } } if (ret_p != NULL) *ret_p = p; if (ret_parent != NULL) *ret_parent = parent; return ret; } struct o2nm_node *o2nm_get_node_by_ip(__be32 addr) { struct o2nm_node *node = NULL; struct o2nm_cluster *cluster = o2nm_single_cluster; if (cluster == NULL) goto out; read_lock(&cluster->cl_nodes_lock); node = o2nm_node_ip_tree_lookup(cluster, addr, NULL, NULL); if (node) config_item_get(&node->nd_item); read_unlock(&cluster->cl_nodes_lock); out: return node; } EXPORT_SYMBOL_GPL(o2nm_get_node_by_ip); void o2nm_node_put(struct o2nm_node *node) { config_item_put(&node->nd_item); } EXPORT_SYMBOL_GPL(o2nm_node_put); void o2nm_node_get(struct o2nm_node *node) { config_item_get(&node->nd_item); } EXPORT_SYMBOL_GPL(o2nm_node_get); u8 o2nm_this_node(void) { u8 node_num = O2NM_MAX_NODES; if (o2nm_single_cluster && o2nm_single_cluster->cl_has_local) node_num = o2nm_single_cluster->cl_local_node; return node_num; } EXPORT_SYMBOL_GPL(o2nm_this_node); /* node configfs bits */ static struct o2nm_cluster *to_o2nm_cluster(struct config_item *item) { return item ? container_of(to_config_group(item), struct o2nm_cluster, cl_group) : NULL; } static struct o2nm_node *to_o2nm_node(struct config_item *item) { return item ? container_of(item, struct o2nm_node, nd_item) : NULL; } static void o2nm_node_release(struct config_item *item) { struct o2nm_node *node = to_o2nm_node(item); kfree(node); } static ssize_t o2nm_node_num_show(struct config_item *item, char *page) { return sprintf(page, "%d\n", to_o2nm_node(item)->nd_num); } static struct o2nm_cluster *to_o2nm_cluster_from_node(struct o2nm_node *node) { /* through the first node_set .parent * mycluster/nodes/mynode == o2nm_cluster->o2nm_node_group->o2nm_node */ return to_o2nm_cluster(node->nd_item.ci_parent->ci_parent); } enum { O2NM_NODE_ATTR_NUM = 0, O2NM_NODE_ATTR_PORT, O2NM_NODE_ATTR_ADDRESS, }; static ssize_t o2nm_node_num_store(struct config_item *item, const char *page, size_t count) { struct o2nm_node *node = to_o2nm_node(item); struct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node); unsigned long tmp; char *p = (char *)page; int ret = 0; tmp = simple_strtoul(p, &p, 0); if (!p || (*p && (*p != '\n'))) return -EINVAL; if (tmp >= O2NM_MAX_NODES) return -ERANGE; /* once we're in the cl_nodes tree networking can look us up by * node number and try to use our address and port attributes * to connect to this node.. make sure that they've been set * before writing the node attribute? */ if (!test_bit(O2NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes) || !test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes)) return -EINVAL; /* XXX */ write_lock(&cluster->cl_nodes_lock); if (cluster->cl_nodes[tmp]) ret = -EEXIST; else if (test_and_set_bit(O2NM_NODE_ATTR_NUM, &node->nd_set_attributes)) ret = -EBUSY; else { cluster->cl_nodes[tmp] = node; node->nd_num = tmp; set_bit(tmp, cluster->cl_nodes_bitmap); } write_unlock(&cluster->cl_nodes_lock); if (ret) return ret; return count; } static ssize_t o2nm_node_ipv4_port_show(struct config_item *item, char *page) { return sprintf(page, "%u\n", ntohs(to_o2nm_node(item)->nd_ipv4_port)); } static ssize_t o2nm_node_ipv4_port_store(struct config_item *item, const char *page, size_t count) { struct o2nm_node *node = to_o2nm_node(item); unsigned long tmp; char *p = (char *)page; tmp = simple_strtoul(p, &p, 0); if (!p || (*p && (*p != '\n'))) return -EINVAL; if (tmp == 0) return -EINVAL; if (tmp >= (u16)-1) return -ERANGE; if (test_and_set_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes)) return -EBUSY; node->nd_ipv4_port = htons(tmp); return count; } static ssize_t o2nm_node_ipv4_address_show(struct config_item *item, char *page) { return sprintf(page, "%pI4\n", &to_o2nm_node(item)->nd_ipv4_address); } static ssize_t o2nm_node_ipv4_address_store(struct config_item *item, const char *page, size_t count) { struct o2nm_node *node = to_o2nm_node(item); struct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node); int ret, i; struct rb_node **p, *parent; unsigned int octets[4]; __be32 ipv4_addr = 0; ret = sscanf(page, "%3u.%3u.%3u.%3u", &octets[3], &octets[2], &octets[1], &octets[0]); if (ret != 4) return -EINVAL; for (i = 0; i < ARRAY_SIZE(octets); i++) { if (octets[i] > 255) return -ERANGE; be32_add_cpu(&ipv4_addr, octets[i] << (i * 8)); } ret = 0; write_lock(&cluster->cl_nodes_lock); if (o2nm_node_ip_tree_lookup(cluster, ipv4_addr, &p, &parent)) ret = -EEXIST; else if (test_and_set_bit(O2NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes)) ret = -EBUSY; else { rb_link_node(&node->nd_ip_node, parent, p); rb_insert_color(&node->nd_ip_node, &cluster->cl_node_ip_tree); } write_unlock(&cluster->cl_nodes_lock); if (ret) return ret; memcpy(&node->nd_ipv4_address, &ipv4_addr, sizeof(ipv4_addr)); return count; } static ssize_t o2nm_node_local_show(struct config_item *item, char *page) { return sprintf(page, "%d\n", to_o2nm_node(item)->nd_local); } static ssize_t o2nm_node_local_store(struct config_item *item, const char *page, size_t count) { struct o2nm_node *node = to_o2nm_node(item); struct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node); unsigned long tmp; char *p = (char *)page; ssize_t ret; tmp = simple_strtoul(p, &p, 0); if (!p || (*p && (*p != '\n'))) return -EINVAL; tmp = !!tmp; /* boolean of whether this node wants to be local */ /* setting local turns on networking rx for now so we require having * set everything else first */ if (!test_bit(O2NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes) || !test_bit(O2NM_NODE_ATTR_NUM, &node->nd_set_attributes) || !test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes)) return -EINVAL; /* XXX */ /* the only failure case is trying to set a new local node * when a different one is already set */ if (tmp && tmp == cluster->cl_has_local && cluster->cl_local_node != node->nd_num) return -EBUSY; /* bring up the rx thread if we're setting the new local node. */ if (tmp && !cluster->cl_has_local) { ret = o2net_start_listening(node); if (ret) return ret; } if (!tmp && cluster->cl_has_local && cluster->cl_local_node == node->nd_num) { o2net_stop_listening(node); cluster->cl_local_node = O2NM_INVALID_NODE_NUM; } node->nd_local = tmp; if (node->nd_local) { cluster->cl_has_local = tmp; cluster->cl_local_node = node->nd_num; } return count; } CONFIGFS_ATTR(o2nm_node_, num); CONFIGFS_ATTR(o2nm_node_, ipv4_port); CONFIGFS_ATTR(o2nm_node_, ipv4_address); CONFIGFS_ATTR(o2nm_node_, local); static struct configfs_attribute *o2nm_node_attrs[] = { &o2nm_node_attr_num, &o2nm_node_attr_ipv4_port, &o2nm_node_attr_ipv4_address, &o2nm_node_attr_local, NULL, }; static struct configfs_item_operations o2nm_node_item_ops = { .release = o2nm_node_release, }; static const struct config_item_type o2nm_node_type = { .ct_item_ops = &o2nm_node_item_ops, .ct_attrs = o2nm_node_attrs, .ct_owner = THIS_MODULE, }; /* node set */ struct o2nm_node_group { struct config_group ns_group; /* some stuff? */ }; #if 0 static struct o2nm_node_group *to_o2nm_node_group(struct config_group *group) { return group ? container_of(group, struct o2nm_node_group, ns_group) : NULL; } #endif static ssize_t o2nm_cluster_attr_write(const char *page, ssize_t count, unsigned int *val) { unsigned long tmp; char *p = (char *)page; tmp = simple_strtoul(p, &p, 0); if (!p || (*p && (*p != '\n'))) return -EINVAL; if (tmp == 0) return -EINVAL; if (tmp >= (u32)-1) return -ERANGE; *val = tmp; return count; } static ssize_t o2nm_cluster_idle_timeout_ms_show(struct config_item *item, char *page) { return sprintf(page, "%u\n", to_o2nm_cluster(item)->cl_idle_timeout_ms); } static ssize_t o2nm_cluster_idle_timeout_ms_store(struct config_item *item, const char *page, size_t count) { struct o2nm_cluster *cluster = to_o2nm_cluster(item); ssize_t ret; unsigned int val; ret = o2nm_cluster_attr_write(page, count, &val); if (ret > 0) { if (cluster->cl_idle_timeout_ms != val && o2net_num_connected_peers()) { mlog(ML_NOTICE, "o2net: cannot change idle timeout after " "the first peer has agreed to it." " %d connected peers\n", o2net_num_connected_peers()); ret = -EINVAL; } else if (val <= cluster->cl_keepalive_delay_ms) { mlog(ML_NOTICE, "o2net: idle timeout must be larger " "than keepalive delay\n"); ret = -EINVAL; } else { cluster->cl_idle_timeout_ms = val; } } return ret; } static ssize_t o2nm_cluster_keepalive_delay_ms_show( struct config_item *item, char *page) { return sprintf(page, "%u\n", to_o2nm_cluster(item)->cl_keepalive_delay_ms); } static ssize_t o2nm_cluster_keepalive_delay_ms_store( struct config_item *item, const char *page, size_t count) { struct o2nm_cluster *cluster = to_o2nm_cluster(item); ssize_t ret; unsigned int val; ret = o2nm_cluster_attr_write(page, count, &val); if (ret > 0) { if (cluster->cl_keepalive_delay_ms != val && o2net_num_connected_peers()) { mlog(ML_NOTICE, "o2net: cannot change keepalive delay after" " the first peer has agreed to it." " %d connected peers\n", o2net_num_connected_peers()); ret = -EINVAL; } else if (val >= cluster->cl_idle_timeout_ms) { mlog(ML_NOTICE, "o2net: keepalive delay must be " "smaller than idle timeout\n"); ret = -EINVAL; } else { cluster->cl_keepalive_delay_ms = val; } } return ret; } static ssize_t o2nm_cluster_reconnect_delay_ms_show( struct config_item *item, char *page) { return sprintf(page, "%u\n", to_o2nm_cluster(item)->cl_reconnect_delay_ms); } static ssize_t o2nm_cluster_reconnect_delay_ms_store( struct config_item *item, const char *page, size_t count) { return o2nm_cluster_attr_write(page, count, &to_o2nm_cluster(item)->cl_reconnect_delay_ms); } static ssize_t o2nm_cluster_fence_method_show( struct config_item *item, char *page) { struct o2nm_cluster *cluster = to_o2nm_cluster(item); ssize_t ret = 0; if (cluster) ret = sprintf(page, "%s\n", o2nm_fence_method_desc[cluster->cl_fence_method]); return ret; } static ssize_t o2nm_cluster_fence_method_store( struct config_item *item, const char *page, size_t count) { unsigned int i; if (page[count - 1] != '\n') goto bail; for (i = 0; i < O2NM_FENCE_METHODS; ++i) { if (count != strlen(o2nm_fence_method_desc[i]) + 1) continue; if (strncasecmp(page, o2nm_fence_method_desc[i], count - 1)) continue; if (to_o2nm_cluster(item)->cl_fence_method != i) { printk(KERN_INFO "ocfs2: Changing fence method to %s\n", o2nm_fence_method_desc[i]); to_o2nm_cluster(item)->cl_fence_method = i; } return count; } bail: return -EINVAL; } CONFIGFS_ATTR(o2nm_cluster_, idle_timeout_ms); CONFIGFS_ATTR(o2nm_cluster_, keepalive_delay_ms); CONFIGFS_ATTR(o2nm_cluster_, reconnect_delay_ms); CONFIGFS_ATTR(o2nm_cluster_, fence_method); static struct configfs_attribute *o2nm_cluster_attrs[] = { &o2nm_cluster_attr_idle_timeout_ms, &o2nm_cluster_attr_keepalive_delay_ms, &o2nm_cluster_attr_reconnect_delay_ms, &o2nm_cluster_attr_fence_method, NULL, }; static struct config_item *o2nm_node_group_make_item(struct config_group *group, const char *name) { struct o2nm_node *node = NULL; if (strlen(name) > O2NM_MAX_NAME_LEN) return ERR_PTR(-ENAMETOOLONG); node = kzalloc(sizeof(struct o2nm_node), GFP_KERNEL); if (node == NULL) return ERR_PTR(-ENOMEM); strcpy(node->nd_name, name); /* use item.ci_namebuf instead? */ config_item_init_type_name(&node->nd_item, name, &o2nm_node_type); spin_lock_init(&node->nd_lock); mlog(ML_CLUSTER, "o2nm: Registering node %s\n", name); return &node->nd_item; } static void o2nm_node_group_drop_item(struct config_group *group, struct config_item *item) { struct o2nm_node *node = to_o2nm_node(item); struct o2nm_cluster *cluster = to_o2nm_cluster(group->cg_item.ci_parent); o2net_disconnect_node(node); if (cluster->cl_has_local && (cluster->cl_local_node == node->nd_num)) { cluster->cl_has_local = 0; cluster->cl_local_node = O2NM_INVALID_NODE_NUM; o2net_stop_listening(node); } /* XXX call into net to stop this node from trading messages */ write_lock(&cluster->cl_nodes_lock); /* XXX sloppy */ if (node->nd_ipv4_address) rb_erase(&node->nd_ip_node, &cluster->cl_node_ip_tree); /* nd_num might be 0 if the node number hasn't been set.. */ if (cluster->cl_nodes[node->nd_num] == node) { cluster->cl_nodes[node->nd_num] = NULL; clear_bit(node->nd_num, cluster->cl_nodes_bitmap); } write_unlock(&cluster->cl_nodes_lock); mlog(ML_CLUSTER, "o2nm: Unregistered node %s\n", config_item_name(&node->nd_item)); config_item_put(item); } static struct configfs_group_operations o2nm_node_group_group_ops = { .make_item = o2nm_node_group_make_item, .drop_item = o2nm_node_group_drop_item, }; static const struct config_item_type o2nm_node_group_type = { .ct_group_ops = &o2nm_node_group_group_ops, .ct_owner = THIS_MODULE, }; /* cluster */ static void o2nm_cluster_release(struct config_item *item) { struct o2nm_cluster *cluster = to_o2nm_cluster(item); kfree(cluster); } static struct configfs_item_operations o2nm_cluster_item_ops = { .release = o2nm_cluster_release, }; static const struct config_item_type o2nm_cluster_type = { .ct_item_ops = &o2nm_cluster_item_ops, .ct_attrs = o2nm_cluster_attrs, .ct_owner = THIS_MODULE, }; /* cluster set */ struct o2nm_cluster_group { struct configfs_subsystem cs_subsys; /* some stuff? */ }; #if 0 static struct o2nm_cluster_group *to_o2nm_cluster_group(struct config_group *group) { return group ? container_of(to_configfs_subsystem(group), struct o2nm_cluster_group, cs_subsys) : NULL; } #endif static struct config_group *o2nm_cluster_group_make_group(struct config_group *group, const char *name) { struct o2nm_cluster *cluster = NULL; struct o2nm_node_group *ns = NULL; struct config_group *o2hb_group = NULL, *ret = NULL; /* this runs under the parent dir's i_mutex; there can be only * one caller in here at a time */ if (o2nm_single_cluster) return ERR_PTR(-ENOSPC); cluster = kzalloc(sizeof(struct o2nm_cluster), GFP_KERNEL); ns = kzalloc(sizeof(struct o2nm_node_group), GFP_KERNEL); o2hb_group = o2hb_alloc_hb_set(); if (cluster == NULL || ns == NULL || o2hb_group == NULL) goto out; config_group_init_type_name(&cluster->cl_group, name, &o2nm_cluster_type); configfs_add_default_group(&ns->ns_group, &cluster->cl_group); config_group_init_type_name(&ns->ns_group, "node", &o2nm_node_group_type); configfs_add_default_group(o2hb_group, &cluster->cl_group); rwlock_init(&cluster->cl_nodes_lock); cluster->cl_node_ip_tree = RB_ROOT; cluster->cl_reconnect_delay_ms = O2NET_RECONNECT_DELAY_MS_DEFAULT; cluster->cl_idle_timeout_ms = O2NET_IDLE_TIMEOUT_MS_DEFAULT; cluster->cl_keepalive_delay_ms = O2NET_KEEPALIVE_DELAY_MS_DEFAULT; cluster->cl_fence_method = O2NM_FENCE_RESET; ret = &cluster->cl_group; o2nm_single_cluster = cluster; out: if (ret == NULL) { kfree(cluster); kfree(ns); o2hb_free_hb_set(o2hb_group); ret = ERR_PTR(-ENOMEM); } return ret; } static void o2nm_cluster_group_drop_item(struct config_group *group, struct config_item *item) { struct o2nm_cluster *cluster = to_o2nm_cluster(item); BUG_ON(o2nm_single_cluster != cluster); o2nm_single_cluster = NULL; configfs_remove_default_groups(&cluster->cl_group); config_item_put(item); } static struct configfs_group_operations o2nm_cluster_group_group_ops = { .make_group = o2nm_cluster_group_make_group, .drop_item = o2nm_cluster_group_drop_item, }; static const struct config_item_type o2nm_cluster_group_type = { .ct_group_ops = &o2nm_cluster_group_group_ops, .ct_owner = THIS_MODULE, }; static struct o2nm_cluster_group o2nm_cluster_group = { .cs_subsys = { .su_group = { .cg_item = { .ci_namebuf = "cluster", .ci_type = &o2nm_cluster_group_type, }, }, }, }; int o2nm_depend_item(struct config_item *item) { return configfs_depend_item(&o2nm_cluster_group.cs_subsys, item); } void o2nm_undepend_item(struct config_item *item) { configfs_undepend_item(item); } int o2nm_depend_this_node(void) { int ret = 0; struct o2nm_node *local_node; local_node = o2nm_get_node_by_num(o2nm_this_node()); if (!local_node) { ret = -EINVAL; goto out; } ret = o2nm_depend_item(&local_node->nd_item); o2nm_node_put(local_node); out: return ret; } void o2nm_undepend_this_node(void) { struct o2nm_node *local_node; local_node = o2nm_get_node_by_num(o2nm_this_node()); BUG_ON(!local_node); o2nm_undepend_item(&local_node->nd_item); o2nm_node_put(local_node); } static void __exit exit_o2nm(void) { /* XXX sync with hb callbacks and shut down hb? */ o2net_unregister_hb_callbacks(); configfs_unregister_subsystem(&o2nm_cluster_group.cs_subsys); o2cb_sys_shutdown(); o2net_exit(); o2hb_exit(); } static int __init init_o2nm(void) { int ret = -1; ret = o2hb_init(); if (ret) goto out; ret = o2net_init(); if (ret) goto out_o2hb; ret = o2net_register_hb_callbacks(); if (ret) goto out_o2net; config_group_init(&o2nm_cluster_group.cs_subsys.su_group); mutex_init(&o2nm_cluster_group.cs_subsys.su_mutex); ret = configfs_register_subsystem(&o2nm_cluster_group.cs_subsys); if (ret) { printk(KERN_ERR "nodemanager: Registration returned %d\n", ret); goto out_callbacks; } ret = o2cb_sys_init(); if (!ret) goto out; configfs_unregister_subsystem(&o2nm_cluster_group.cs_subsys); out_callbacks: o2net_unregister_hb_callbacks(); out_o2net: o2net_exit(); out_o2hb: o2hb_exit(); out: return ret; } MODULE_AUTHOR("Oracle"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("OCFS2 cluster management"); module_init(init_o2nm) module_exit(exit_o2nm)
/* -*- mode: c; c-basic-offset: 8; -*- * vim: noexpandtab sw=8 ts=8 sts=0: * * Copyright (C) 2004, 2005 Oracle. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. */ #include <linux/slab.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/configfs.h> #include "tcp.h" #include "nodemanager.h" #include "heartbeat.h" #include "masklog.h" #include "sys.h" /* for now we operate under the assertion that there can be only one * cluster active at a time. Changing this will require trickling * cluster references throughout where nodes are looked up */ struct o2nm_cluster *o2nm_single_cluster = NULL; char *o2nm_fence_method_desc[O2NM_FENCE_METHODS] = { "reset", /* O2NM_FENCE_RESET */ "panic", /* O2NM_FENCE_PANIC */ }; static inline void o2nm_lock_subsystem(void); static inline void o2nm_unlock_subsystem(void); struct o2nm_node *o2nm_get_node_by_num(u8 node_num) { struct o2nm_node *node = NULL; if (node_num >= O2NM_MAX_NODES || o2nm_single_cluster == NULL) goto out; read_lock(&o2nm_single_cluster->cl_nodes_lock); node = o2nm_single_cluster->cl_nodes[node_num]; if (node) config_item_get(&node->nd_item); read_unlock(&o2nm_single_cluster->cl_nodes_lock); out: return node; } EXPORT_SYMBOL_GPL(o2nm_get_node_by_num); int o2nm_configured_node_map(unsigned long *map, unsigned bytes) { struct o2nm_cluster *cluster = o2nm_single_cluster; BUG_ON(bytes < (sizeof(cluster->cl_nodes_bitmap))); if (cluster == NULL) return -EINVAL; read_lock(&cluster->cl_nodes_lock); memcpy(map, cluster->cl_nodes_bitmap, sizeof(cluster->cl_nodes_bitmap)); read_unlock(&cluster->cl_nodes_lock); return 0; } EXPORT_SYMBOL_GPL(o2nm_configured_node_map); static struct o2nm_node *o2nm_node_ip_tree_lookup(struct o2nm_cluster *cluster, __be32 ip_needle, struct rb_node ***ret_p, struct rb_node **ret_parent) { struct rb_node **p = &cluster->cl_node_ip_tree.rb_node; struct rb_node *parent = NULL; struct o2nm_node *node, *ret = NULL; while (*p) { int cmp; parent = *p; node = rb_entry(parent, struct o2nm_node, nd_ip_node); cmp = memcmp(&ip_needle, &node->nd_ipv4_address, sizeof(ip_needle)); if (cmp < 0) p = &(*p)->rb_left; else if (cmp > 0) p = &(*p)->rb_right; else { ret = node; break; } } if (ret_p != NULL) *ret_p = p; if (ret_parent != NULL) *ret_parent = parent; return ret; } struct o2nm_node *o2nm_get_node_by_ip(__be32 addr) { struct o2nm_node *node = NULL; struct o2nm_cluster *cluster = o2nm_single_cluster; if (cluster == NULL) goto out; read_lock(&cluster->cl_nodes_lock); node = o2nm_node_ip_tree_lookup(cluster, addr, NULL, NULL); if (node) config_item_get(&node->nd_item); read_unlock(&cluster->cl_nodes_lock); out: return node; } EXPORT_SYMBOL_GPL(o2nm_get_node_by_ip); void o2nm_node_put(struct o2nm_node *node) { config_item_put(&node->nd_item); } EXPORT_SYMBOL_GPL(o2nm_node_put); void o2nm_node_get(struct o2nm_node *node) { config_item_get(&node->nd_item); } EXPORT_SYMBOL_GPL(o2nm_node_get); u8 o2nm_this_node(void) { u8 node_num = O2NM_MAX_NODES; if (o2nm_single_cluster && o2nm_single_cluster->cl_has_local) node_num = o2nm_single_cluster->cl_local_node; return node_num; } EXPORT_SYMBOL_GPL(o2nm_this_node); /* node configfs bits */ static struct o2nm_cluster *to_o2nm_cluster(struct config_item *item) { return item ? container_of(to_config_group(item), struct o2nm_cluster, cl_group) : NULL; } static struct o2nm_node *to_o2nm_node(struct config_item *item) { return item ? container_of(item, struct o2nm_node, nd_item) : NULL; } static void o2nm_node_release(struct config_item *item) { struct o2nm_node *node = to_o2nm_node(item); kfree(node); } static ssize_t o2nm_node_num_show(struct config_item *item, char *page) { return sprintf(page, "%d\n", to_o2nm_node(item)->nd_num); } static struct o2nm_cluster *to_o2nm_cluster_from_node(struct o2nm_node *node) { /* through the first node_set .parent * mycluster/nodes/mynode == o2nm_cluster->o2nm_node_group->o2nm_node */ if (node->nd_item.ci_parent) return to_o2nm_cluster(node->nd_item.ci_parent->ci_parent); else return NULL; } enum { O2NM_NODE_ATTR_NUM = 0, O2NM_NODE_ATTR_PORT, O2NM_NODE_ATTR_ADDRESS, }; static ssize_t o2nm_node_num_store(struct config_item *item, const char *page, size_t count) { struct o2nm_node *node = to_o2nm_node(item); struct o2nm_cluster *cluster; unsigned long tmp; char *p = (char *)page; int ret = 0; tmp = simple_strtoul(p, &p, 0); if (!p || (*p && (*p != '\n'))) return -EINVAL; if (tmp >= O2NM_MAX_NODES) return -ERANGE; /* once we're in the cl_nodes tree networking can look us up by * node number and try to use our address and port attributes * to connect to this node.. make sure that they've been set * before writing the node attribute? */ if (!test_bit(O2NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes) || !test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes)) return -EINVAL; /* XXX */ o2nm_lock_subsystem(); cluster = to_o2nm_cluster_from_node(node); if (!cluster) { o2nm_unlock_subsystem(); return -EINVAL; } write_lock(&cluster->cl_nodes_lock); if (cluster->cl_nodes[tmp]) ret = -EEXIST; else if (test_and_set_bit(O2NM_NODE_ATTR_NUM, &node->nd_set_attributes)) ret = -EBUSY; else { cluster->cl_nodes[tmp] = node; node->nd_num = tmp; set_bit(tmp, cluster->cl_nodes_bitmap); } write_unlock(&cluster->cl_nodes_lock); o2nm_unlock_subsystem(); if (ret) return ret; return count; } static ssize_t o2nm_node_ipv4_port_show(struct config_item *item, char *page) { return sprintf(page, "%u\n", ntohs(to_o2nm_node(item)->nd_ipv4_port)); } static ssize_t o2nm_node_ipv4_port_store(struct config_item *item, const char *page, size_t count) { struct o2nm_node *node = to_o2nm_node(item); unsigned long tmp; char *p = (char *)page; tmp = simple_strtoul(p, &p, 0); if (!p || (*p && (*p != '\n'))) return -EINVAL; if (tmp == 0) return -EINVAL; if (tmp >= (u16)-1) return -ERANGE; if (test_and_set_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes)) return -EBUSY; node->nd_ipv4_port = htons(tmp); return count; } static ssize_t o2nm_node_ipv4_address_show(struct config_item *item, char *page) { return sprintf(page, "%pI4\n", &to_o2nm_node(item)->nd_ipv4_address); } static ssize_t o2nm_node_ipv4_address_store(struct config_item *item, const char *page, size_t count) { struct o2nm_node *node = to_o2nm_node(item); struct o2nm_cluster *cluster; int ret, i; struct rb_node **p, *parent; unsigned int octets[4]; __be32 ipv4_addr = 0; ret = sscanf(page, "%3u.%3u.%3u.%3u", &octets[3], &octets[2], &octets[1], &octets[0]); if (ret != 4) return -EINVAL; for (i = 0; i < ARRAY_SIZE(octets); i++) { if (octets[i] > 255) return -ERANGE; be32_add_cpu(&ipv4_addr, octets[i] << (i * 8)); } o2nm_lock_subsystem(); cluster = to_o2nm_cluster_from_node(node); if (!cluster) { o2nm_unlock_subsystem(); return -EINVAL; } ret = 0; write_lock(&cluster->cl_nodes_lock); if (o2nm_node_ip_tree_lookup(cluster, ipv4_addr, &p, &parent)) ret = -EEXIST; else if (test_and_set_bit(O2NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes)) ret = -EBUSY; else { rb_link_node(&node->nd_ip_node, parent, p); rb_insert_color(&node->nd_ip_node, &cluster->cl_node_ip_tree); } write_unlock(&cluster->cl_nodes_lock); o2nm_unlock_subsystem(); if (ret) return ret; memcpy(&node->nd_ipv4_address, &ipv4_addr, sizeof(ipv4_addr)); return count; } static ssize_t o2nm_node_local_show(struct config_item *item, char *page) { return sprintf(page, "%d\n", to_o2nm_node(item)->nd_local); } static ssize_t o2nm_node_local_store(struct config_item *item, const char *page, size_t count) { struct o2nm_node *node = to_o2nm_node(item); struct o2nm_cluster *cluster; unsigned long tmp; char *p = (char *)page; ssize_t ret; tmp = simple_strtoul(p, &p, 0); if (!p || (*p && (*p != '\n'))) return -EINVAL; tmp = !!tmp; /* boolean of whether this node wants to be local */ /* setting local turns on networking rx for now so we require having * set everything else first */ if (!test_bit(O2NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes) || !test_bit(O2NM_NODE_ATTR_NUM, &node->nd_set_attributes) || !test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes)) return -EINVAL; /* XXX */ o2nm_lock_subsystem(); cluster = to_o2nm_cluster_from_node(node); if (!cluster) { ret = -EINVAL; goto out; } /* the only failure case is trying to set a new local node * when a different one is already set */ if (tmp && tmp == cluster->cl_has_local && cluster->cl_local_node != node->nd_num) { ret = -EBUSY; goto out; } /* bring up the rx thread if we're setting the new local node. */ if (tmp && !cluster->cl_has_local) { ret = o2net_start_listening(node); if (ret) goto out; } if (!tmp && cluster->cl_has_local && cluster->cl_local_node == node->nd_num) { o2net_stop_listening(node); cluster->cl_local_node = O2NM_INVALID_NODE_NUM; } node->nd_local = tmp; if (node->nd_local) { cluster->cl_has_local = tmp; cluster->cl_local_node = node->nd_num; } ret = count; out: o2nm_unlock_subsystem(); return ret; } CONFIGFS_ATTR(o2nm_node_, num); CONFIGFS_ATTR(o2nm_node_, ipv4_port); CONFIGFS_ATTR(o2nm_node_, ipv4_address); CONFIGFS_ATTR(o2nm_node_, local); static struct configfs_attribute *o2nm_node_attrs[] = { &o2nm_node_attr_num, &o2nm_node_attr_ipv4_port, &o2nm_node_attr_ipv4_address, &o2nm_node_attr_local, NULL, }; static struct configfs_item_operations o2nm_node_item_ops = { .release = o2nm_node_release, }; static const struct config_item_type o2nm_node_type = { .ct_item_ops = &o2nm_node_item_ops, .ct_attrs = o2nm_node_attrs, .ct_owner = THIS_MODULE, }; /* node set */ struct o2nm_node_group { struct config_group ns_group; /* some stuff? */ }; #if 0 static struct o2nm_node_group *to_o2nm_node_group(struct config_group *group) { return group ? container_of(group, struct o2nm_node_group, ns_group) : NULL; } #endif static ssize_t o2nm_cluster_attr_write(const char *page, ssize_t count, unsigned int *val) { unsigned long tmp; char *p = (char *)page; tmp = simple_strtoul(p, &p, 0); if (!p || (*p && (*p != '\n'))) return -EINVAL; if (tmp == 0) return -EINVAL; if (tmp >= (u32)-1) return -ERANGE; *val = tmp; return count; } static ssize_t o2nm_cluster_idle_timeout_ms_show(struct config_item *item, char *page) { return sprintf(page, "%u\n", to_o2nm_cluster(item)->cl_idle_timeout_ms); } static ssize_t o2nm_cluster_idle_timeout_ms_store(struct config_item *item, const char *page, size_t count) { struct o2nm_cluster *cluster = to_o2nm_cluster(item); ssize_t ret; unsigned int val; ret = o2nm_cluster_attr_write(page, count, &val); if (ret > 0) { if (cluster->cl_idle_timeout_ms != val && o2net_num_connected_peers()) { mlog(ML_NOTICE, "o2net: cannot change idle timeout after " "the first peer has agreed to it." " %d connected peers\n", o2net_num_connected_peers()); ret = -EINVAL; } else if (val <= cluster->cl_keepalive_delay_ms) { mlog(ML_NOTICE, "o2net: idle timeout must be larger " "than keepalive delay\n"); ret = -EINVAL; } else { cluster->cl_idle_timeout_ms = val; } } return ret; } static ssize_t o2nm_cluster_keepalive_delay_ms_show( struct config_item *item, char *page) { return sprintf(page, "%u\n", to_o2nm_cluster(item)->cl_keepalive_delay_ms); } static ssize_t o2nm_cluster_keepalive_delay_ms_store( struct config_item *item, const char *page, size_t count) { struct o2nm_cluster *cluster = to_o2nm_cluster(item); ssize_t ret; unsigned int val; ret = o2nm_cluster_attr_write(page, count, &val); if (ret > 0) { if (cluster->cl_keepalive_delay_ms != val && o2net_num_connected_peers()) { mlog(ML_NOTICE, "o2net: cannot change keepalive delay after" " the first peer has agreed to it." " %d connected peers\n", o2net_num_connected_peers()); ret = -EINVAL; } else if (val >= cluster->cl_idle_timeout_ms) { mlog(ML_NOTICE, "o2net: keepalive delay must be " "smaller than idle timeout\n"); ret = -EINVAL; } else { cluster->cl_keepalive_delay_ms = val; } } return ret; } static ssize_t o2nm_cluster_reconnect_delay_ms_show( struct config_item *item, char *page) { return sprintf(page, "%u\n", to_o2nm_cluster(item)->cl_reconnect_delay_ms); } static ssize_t o2nm_cluster_reconnect_delay_ms_store( struct config_item *item, const char *page, size_t count) { return o2nm_cluster_attr_write(page, count, &to_o2nm_cluster(item)->cl_reconnect_delay_ms); } static ssize_t o2nm_cluster_fence_method_show( struct config_item *item, char *page) { struct o2nm_cluster *cluster = to_o2nm_cluster(item); ssize_t ret = 0; if (cluster) ret = sprintf(page, "%s\n", o2nm_fence_method_desc[cluster->cl_fence_method]); return ret; } static ssize_t o2nm_cluster_fence_method_store( struct config_item *item, const char *page, size_t count) { unsigned int i; if (page[count - 1] != '\n') goto bail; for (i = 0; i < O2NM_FENCE_METHODS; ++i) { if (count != strlen(o2nm_fence_method_desc[i]) + 1) continue; if (strncasecmp(page, o2nm_fence_method_desc[i], count - 1)) continue; if (to_o2nm_cluster(item)->cl_fence_method != i) { printk(KERN_INFO "ocfs2: Changing fence method to %s\n", o2nm_fence_method_desc[i]); to_o2nm_cluster(item)->cl_fence_method = i; } return count; } bail: return -EINVAL; } CONFIGFS_ATTR(o2nm_cluster_, idle_timeout_ms); CONFIGFS_ATTR(o2nm_cluster_, keepalive_delay_ms); CONFIGFS_ATTR(o2nm_cluster_, reconnect_delay_ms); CONFIGFS_ATTR(o2nm_cluster_, fence_method); static struct configfs_attribute *o2nm_cluster_attrs[] = { &o2nm_cluster_attr_idle_timeout_ms, &o2nm_cluster_attr_keepalive_delay_ms, &o2nm_cluster_attr_reconnect_delay_ms, &o2nm_cluster_attr_fence_method, NULL, }; static struct config_item *o2nm_node_group_make_item(struct config_group *group, const char *name) { struct o2nm_node *node = NULL; if (strlen(name) > O2NM_MAX_NAME_LEN) return ERR_PTR(-ENAMETOOLONG); node = kzalloc(sizeof(struct o2nm_node), GFP_KERNEL); if (node == NULL) return ERR_PTR(-ENOMEM); strcpy(node->nd_name, name); /* use item.ci_namebuf instead? */ config_item_init_type_name(&node->nd_item, name, &o2nm_node_type); spin_lock_init(&node->nd_lock); mlog(ML_CLUSTER, "o2nm: Registering node %s\n", name); return &node->nd_item; } static void o2nm_node_group_drop_item(struct config_group *group, struct config_item *item) { struct o2nm_node *node = to_o2nm_node(item); struct o2nm_cluster *cluster = to_o2nm_cluster(group->cg_item.ci_parent); o2net_disconnect_node(node); if (cluster->cl_has_local && (cluster->cl_local_node == node->nd_num)) { cluster->cl_has_local = 0; cluster->cl_local_node = O2NM_INVALID_NODE_NUM; o2net_stop_listening(node); } /* XXX call into net to stop this node from trading messages */ write_lock(&cluster->cl_nodes_lock); /* XXX sloppy */ if (node->nd_ipv4_address) rb_erase(&node->nd_ip_node, &cluster->cl_node_ip_tree); /* nd_num might be 0 if the node number hasn't been set.. */ if (cluster->cl_nodes[node->nd_num] == node) { cluster->cl_nodes[node->nd_num] = NULL; clear_bit(node->nd_num, cluster->cl_nodes_bitmap); } write_unlock(&cluster->cl_nodes_lock); mlog(ML_CLUSTER, "o2nm: Unregistered node %s\n", config_item_name(&node->nd_item)); config_item_put(item); } static struct configfs_group_operations o2nm_node_group_group_ops = { .make_item = o2nm_node_group_make_item, .drop_item = o2nm_node_group_drop_item, }; static const struct config_item_type o2nm_node_group_type = { .ct_group_ops = &o2nm_node_group_group_ops, .ct_owner = THIS_MODULE, }; /* cluster */ static void o2nm_cluster_release(struct config_item *item) { struct o2nm_cluster *cluster = to_o2nm_cluster(item); kfree(cluster); } static struct configfs_item_operations o2nm_cluster_item_ops = { .release = o2nm_cluster_release, }; static const struct config_item_type o2nm_cluster_type = { .ct_item_ops = &o2nm_cluster_item_ops, .ct_attrs = o2nm_cluster_attrs, .ct_owner = THIS_MODULE, }; /* cluster set */ struct o2nm_cluster_group { struct configfs_subsystem cs_subsys; /* some stuff? */ }; #if 0 static struct o2nm_cluster_group *to_o2nm_cluster_group(struct config_group *group) { return group ? container_of(to_configfs_subsystem(group), struct o2nm_cluster_group, cs_subsys) : NULL; } #endif static struct config_group *o2nm_cluster_group_make_group(struct config_group *group, const char *name) { struct o2nm_cluster *cluster = NULL; struct o2nm_node_group *ns = NULL; struct config_group *o2hb_group = NULL, *ret = NULL; /* this runs under the parent dir's i_mutex; there can be only * one caller in here at a time */ if (o2nm_single_cluster) return ERR_PTR(-ENOSPC); cluster = kzalloc(sizeof(struct o2nm_cluster), GFP_KERNEL); ns = kzalloc(sizeof(struct o2nm_node_group), GFP_KERNEL); o2hb_group = o2hb_alloc_hb_set(); if (cluster == NULL || ns == NULL || o2hb_group == NULL) goto out; config_group_init_type_name(&cluster->cl_group, name, &o2nm_cluster_type); configfs_add_default_group(&ns->ns_group, &cluster->cl_group); config_group_init_type_name(&ns->ns_group, "node", &o2nm_node_group_type); configfs_add_default_group(o2hb_group, &cluster->cl_group); rwlock_init(&cluster->cl_nodes_lock); cluster->cl_node_ip_tree = RB_ROOT; cluster->cl_reconnect_delay_ms = O2NET_RECONNECT_DELAY_MS_DEFAULT; cluster->cl_idle_timeout_ms = O2NET_IDLE_TIMEOUT_MS_DEFAULT; cluster->cl_keepalive_delay_ms = O2NET_KEEPALIVE_DELAY_MS_DEFAULT; cluster->cl_fence_method = O2NM_FENCE_RESET; ret = &cluster->cl_group; o2nm_single_cluster = cluster; out: if (ret == NULL) { kfree(cluster); kfree(ns); o2hb_free_hb_set(o2hb_group); ret = ERR_PTR(-ENOMEM); } return ret; } static void o2nm_cluster_group_drop_item(struct config_group *group, struct config_item *item) { struct o2nm_cluster *cluster = to_o2nm_cluster(item); BUG_ON(o2nm_single_cluster != cluster); o2nm_single_cluster = NULL; configfs_remove_default_groups(&cluster->cl_group); config_item_put(item); } static struct configfs_group_operations o2nm_cluster_group_group_ops = { .make_group = o2nm_cluster_group_make_group, .drop_item = o2nm_cluster_group_drop_item, }; static const struct config_item_type o2nm_cluster_group_type = { .ct_group_ops = &o2nm_cluster_group_group_ops, .ct_owner = THIS_MODULE, }; static struct o2nm_cluster_group o2nm_cluster_group = { .cs_subsys = { .su_group = { .cg_item = { .ci_namebuf = "cluster", .ci_type = &o2nm_cluster_group_type, }, }, }, }; static inline void o2nm_lock_subsystem(void) { mutex_lock(&o2nm_cluster_group.cs_subsys.su_mutex); } static inline void o2nm_unlock_subsystem(void) { mutex_unlock(&o2nm_cluster_group.cs_subsys.su_mutex); } int o2nm_depend_item(struct config_item *item) { return configfs_depend_item(&o2nm_cluster_group.cs_subsys, item); } void o2nm_undepend_item(struct config_item *item) { configfs_undepend_item(item); } int o2nm_depend_this_node(void) { int ret = 0; struct o2nm_node *local_node; local_node = o2nm_get_node_by_num(o2nm_this_node()); if (!local_node) { ret = -EINVAL; goto out; } ret = o2nm_depend_item(&local_node->nd_item); o2nm_node_put(local_node); out: return ret; } void o2nm_undepend_this_node(void) { struct o2nm_node *local_node; local_node = o2nm_get_node_by_num(o2nm_this_node()); BUG_ON(!local_node); o2nm_undepend_item(&local_node->nd_item); o2nm_node_put(local_node); } static void __exit exit_o2nm(void) { /* XXX sync with hb callbacks and shut down hb? */ o2net_unregister_hb_callbacks(); configfs_unregister_subsystem(&o2nm_cluster_group.cs_subsys); o2cb_sys_shutdown(); o2net_exit(); o2hb_exit(); } static int __init init_o2nm(void) { int ret = -1; ret = o2hb_init(); if (ret) goto out; ret = o2net_init(); if (ret) goto out_o2hb; ret = o2net_register_hb_callbacks(); if (ret) goto out_o2net; config_group_init(&o2nm_cluster_group.cs_subsys.su_group); mutex_init(&o2nm_cluster_group.cs_subsys.su_mutex); ret = configfs_register_subsystem(&o2nm_cluster_group.cs_subsys); if (ret) { printk(KERN_ERR "nodemanager: Registration returned %d\n", ret); goto out_callbacks; } ret = o2cb_sys_init(); if (!ret) goto out; configfs_unregister_subsystem(&o2nm_cluster_group.cs_subsys); out_callbacks: o2net_unregister_hb_callbacks(); out_o2net: o2net_exit(); out_o2hb: o2hb_exit(); out: return ret; } MODULE_AUTHOR("Oracle"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("OCFS2 cluster management"); module_init(init_o2nm) module_exit(exit_o2nm)
static ssize_t o2nm_node_num_store(struct config_item *item, const char *page, size_t count) { struct o2nm_node *node = to_o2nm_node(item); struct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node); unsigned long tmp; char *p = (char *)page; int ret = 0; tmp = simple_strtoul(p, &p, 0); if (!p || (*p && (*p != '\n'))) return -EINVAL; if (tmp >= O2NM_MAX_NODES) return -ERANGE; /* once we're in the cl_nodes tree networking can look us up by * node number and try to use our address and port attributes * to connect to this node.. make sure that they've been set * before writing the node attribute? */ if (!test_bit(O2NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes) || !test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes)) return -EINVAL; /* XXX */ write_lock(&cluster->cl_nodes_lock); if (cluster->cl_nodes[tmp]) ret = -EEXIST; else if (test_and_set_bit(O2NM_NODE_ATTR_NUM, &node->nd_set_attributes)) ret = -EBUSY; else { cluster->cl_nodes[tmp] = node; node->nd_num = tmp; set_bit(tmp, cluster->cl_nodes_bitmap); } write_unlock(&cluster->cl_nodes_lock); if (ret) return ret; return count; }
static ssize_t o2nm_node_num_store(struct config_item *item, const char *page, size_t count) { struct o2nm_node *node = to_o2nm_node(item); struct o2nm_cluster *cluster; unsigned long tmp; char *p = (char *)page; int ret = 0; tmp = simple_strtoul(p, &p, 0); if (!p || (*p && (*p != '\n'))) return -EINVAL; if (tmp >= O2NM_MAX_NODES) return -ERANGE; /* once we're in the cl_nodes tree networking can look us up by * node number and try to use our address and port attributes * to connect to this node.. make sure that they've been set * before writing the node attribute? */ if (!test_bit(O2NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes) || !test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes)) return -EINVAL; /* XXX */ o2nm_lock_subsystem(); cluster = to_o2nm_cluster_from_node(node); if (!cluster) { o2nm_unlock_subsystem(); return -EINVAL; } write_lock(&cluster->cl_nodes_lock); if (cluster->cl_nodes[tmp]) ret = -EEXIST; else if (test_and_set_bit(O2NM_NODE_ATTR_NUM, &node->nd_set_attributes)) ret = -EBUSY; else { cluster->cl_nodes[tmp] = node; node->nd_num = tmp; set_bit(tmp, cluster->cl_nodes_bitmap); } write_unlock(&cluster->cl_nodes_lock); o2nm_unlock_subsystem(); if (ret) return ret; return count; }
{'added': [(43, 'static inline void o2nm_lock_subsystem(void);'), (44, 'static inline void o2nm_unlock_subsystem(void);'), (45, ''), (187, '\tif (node->nd_item.ci_parent)'), (188, '\t\treturn to_o2nm_cluster(node->nd_item.ci_parent->ci_parent);'), (189, '\telse'), (190, '\t\treturn NULL;'), (203, '\tstruct o2nm_cluster *cluster;'), (223, '\to2nm_lock_subsystem();'), (224, '\tcluster = to_o2nm_cluster_from_node(node);'), (225, '\tif (!cluster) {'), (226, '\t\to2nm_unlock_subsystem();'), (227, '\t\treturn -EINVAL;'), (228, '\t}'), (229, ''), (242, '\to2nm_unlock_subsystem();'), (243, ''), (287, '\tstruct o2nm_cluster *cluster;'), (304, '\to2nm_lock_subsystem();'), (305, '\tcluster = to_o2nm_cluster_from_node(node);'), (306, '\tif (!cluster) {'), (307, '\t\to2nm_unlock_subsystem();'), (308, '\t\treturn -EINVAL;'), (309, '\t}'), (310, ''), (323, '\to2nm_unlock_subsystem();'), (324, ''), (342, '\tstruct o2nm_cluster *cluster;'), (360, '\to2nm_lock_subsystem();'), (361, '\tcluster = to_o2nm_cluster_from_node(node);'), (362, '\tif (!cluster) {'), (363, '\t\tret = -EINVAL;'), (364, '\t\tgoto out;'), (365, '\t}'), (366, ''), (370, '\t cluster->cl_local_node != node->nd_num) {'), (371, '\t\tret = -EBUSY;'), (372, '\t\tgoto out;'), (373, '\t}'), (379, '\t\t\tgoto out;'), (394, '\tret = count;'), (395, ''), (396, 'out:'), (397, '\to2nm_unlock_subsystem();'), (398, '\treturn ret;'), (778, 'static inline void o2nm_lock_subsystem(void)'), (779, '{'), (780, '\tmutex_lock(&o2nm_cluster_group.cs_subsys.su_mutex);'), (781, '}'), (782, ''), (783, 'static inline void o2nm_unlock_subsystem(void)'), (784, '{'), (785, '\tmutex_unlock(&o2nm_cluster_group.cs_subsys.su_mutex);'), (786, '}'), (787, '')], 'deleted': [(184, '\treturn to_o2nm_cluster(node->nd_item.ci_parent->ci_parent);'), (197, '\tstruct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node);'), (272, '\tstruct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node);'), (318, '\tstruct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node);'), (339, '\t cluster->cl_local_node != node->nd_num)'), (340, '\t\treturn -EBUSY;'), (346, '\t\t\treturn ret;'), (361, '\treturn count;')]}
55
8
669
3,744
https://github.com/torvalds/linux
CVE-2017-18216
['CWE-476']
nodemanager.c
to_o2nm_cluster_from_node
/* -*- mode: c; c-basic-offset: 8; -*- * vim: noexpandtab sw=8 ts=8 sts=0: * * Copyright (C) 2004, 2005 Oracle. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. */ #include <linux/slab.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/configfs.h> #include "tcp.h" #include "nodemanager.h" #include "heartbeat.h" #include "masklog.h" #include "sys.h" /* for now we operate under the assertion that there can be only one * cluster active at a time. Changing this will require trickling * cluster references throughout where nodes are looked up */ struct o2nm_cluster *o2nm_single_cluster = NULL; char *o2nm_fence_method_desc[O2NM_FENCE_METHODS] = { "reset", /* O2NM_FENCE_RESET */ "panic", /* O2NM_FENCE_PANIC */ }; struct o2nm_node *o2nm_get_node_by_num(u8 node_num) { struct o2nm_node *node = NULL; if (node_num >= O2NM_MAX_NODES || o2nm_single_cluster == NULL) goto out; read_lock(&o2nm_single_cluster->cl_nodes_lock); node = o2nm_single_cluster->cl_nodes[node_num]; if (node) config_item_get(&node->nd_item); read_unlock(&o2nm_single_cluster->cl_nodes_lock); out: return node; } EXPORT_SYMBOL_GPL(o2nm_get_node_by_num); int o2nm_configured_node_map(unsigned long *map, unsigned bytes) { struct o2nm_cluster *cluster = o2nm_single_cluster; BUG_ON(bytes < (sizeof(cluster->cl_nodes_bitmap))); if (cluster == NULL) return -EINVAL; read_lock(&cluster->cl_nodes_lock); memcpy(map, cluster->cl_nodes_bitmap, sizeof(cluster->cl_nodes_bitmap)); read_unlock(&cluster->cl_nodes_lock); return 0; } EXPORT_SYMBOL_GPL(o2nm_configured_node_map); static struct o2nm_node *o2nm_node_ip_tree_lookup(struct o2nm_cluster *cluster, __be32 ip_needle, struct rb_node ***ret_p, struct rb_node **ret_parent) { struct rb_node **p = &cluster->cl_node_ip_tree.rb_node; struct rb_node *parent = NULL; struct o2nm_node *node, *ret = NULL; while (*p) { int cmp; parent = *p; node = rb_entry(parent, struct o2nm_node, nd_ip_node); cmp = memcmp(&ip_needle, &node->nd_ipv4_address, sizeof(ip_needle)); if (cmp < 0) p = &(*p)->rb_left; else if (cmp > 0) p = &(*p)->rb_right; else { ret = node; break; } } if (ret_p != NULL) *ret_p = p; if (ret_parent != NULL) *ret_parent = parent; return ret; } struct o2nm_node *o2nm_get_node_by_ip(__be32 addr) { struct o2nm_node *node = NULL; struct o2nm_cluster *cluster = o2nm_single_cluster; if (cluster == NULL) goto out; read_lock(&cluster->cl_nodes_lock); node = o2nm_node_ip_tree_lookup(cluster, addr, NULL, NULL); if (node) config_item_get(&node->nd_item); read_unlock(&cluster->cl_nodes_lock); out: return node; } EXPORT_SYMBOL_GPL(o2nm_get_node_by_ip); void o2nm_node_put(struct o2nm_node *node) { config_item_put(&node->nd_item); } EXPORT_SYMBOL_GPL(o2nm_node_put); void o2nm_node_get(struct o2nm_node *node) { config_item_get(&node->nd_item); } EXPORT_SYMBOL_GPL(o2nm_node_get); u8 o2nm_this_node(void) { u8 node_num = O2NM_MAX_NODES; if (o2nm_single_cluster && o2nm_single_cluster->cl_has_local) node_num = o2nm_single_cluster->cl_local_node; return node_num; } EXPORT_SYMBOL_GPL(o2nm_this_node); /* node configfs bits */ static struct o2nm_cluster *to_o2nm_cluster(struct config_item *item) { return item ? container_of(to_config_group(item), struct o2nm_cluster, cl_group) : NULL; } static struct o2nm_node *to_o2nm_node(struct config_item *item) { return item ? container_of(item, struct o2nm_node, nd_item) : NULL; } static void o2nm_node_release(struct config_item *item) { struct o2nm_node *node = to_o2nm_node(item); kfree(node); } static ssize_t o2nm_node_num_show(struct config_item *item, char *page) { return sprintf(page, "%d\n", to_o2nm_node(item)->nd_num); } static struct o2nm_cluster *to_o2nm_cluster_from_node(struct o2nm_node *node) { /* through the first node_set .parent * mycluster/nodes/mynode == o2nm_cluster->o2nm_node_group->o2nm_node */ return to_o2nm_cluster(node->nd_item.ci_parent->ci_parent); } enum { O2NM_NODE_ATTR_NUM = 0, O2NM_NODE_ATTR_PORT, O2NM_NODE_ATTR_ADDRESS, }; static ssize_t o2nm_node_num_store(struct config_item *item, const char *page, size_t count) { struct o2nm_node *node = to_o2nm_node(item); struct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node); unsigned long tmp; char *p = (char *)page; int ret = 0; tmp = simple_strtoul(p, &p, 0); if (!p || (*p && (*p != '\n'))) return -EINVAL; if (tmp >= O2NM_MAX_NODES) return -ERANGE; /* once we're in the cl_nodes tree networking can look us up by * node number and try to use our address and port attributes * to connect to this node.. make sure that they've been set * before writing the node attribute? */ if (!test_bit(O2NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes) || !test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes)) return -EINVAL; /* XXX */ write_lock(&cluster->cl_nodes_lock); if (cluster->cl_nodes[tmp]) ret = -EEXIST; else if (test_and_set_bit(O2NM_NODE_ATTR_NUM, &node->nd_set_attributes)) ret = -EBUSY; else { cluster->cl_nodes[tmp] = node; node->nd_num = tmp; set_bit(tmp, cluster->cl_nodes_bitmap); } write_unlock(&cluster->cl_nodes_lock); if (ret) return ret; return count; } static ssize_t o2nm_node_ipv4_port_show(struct config_item *item, char *page) { return sprintf(page, "%u\n", ntohs(to_o2nm_node(item)->nd_ipv4_port)); } static ssize_t o2nm_node_ipv4_port_store(struct config_item *item, const char *page, size_t count) { struct o2nm_node *node = to_o2nm_node(item); unsigned long tmp; char *p = (char *)page; tmp = simple_strtoul(p, &p, 0); if (!p || (*p && (*p != '\n'))) return -EINVAL; if (tmp == 0) return -EINVAL; if (tmp >= (u16)-1) return -ERANGE; if (test_and_set_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes)) return -EBUSY; node->nd_ipv4_port = htons(tmp); return count; } static ssize_t o2nm_node_ipv4_address_show(struct config_item *item, char *page) { return sprintf(page, "%pI4\n", &to_o2nm_node(item)->nd_ipv4_address); } static ssize_t o2nm_node_ipv4_address_store(struct config_item *item, const char *page, size_t count) { struct o2nm_node *node = to_o2nm_node(item); struct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node); int ret, i; struct rb_node **p, *parent; unsigned int octets[4]; __be32 ipv4_addr = 0; ret = sscanf(page, "%3u.%3u.%3u.%3u", &octets[3], &octets[2], &octets[1], &octets[0]); if (ret != 4) return -EINVAL; for (i = 0; i < ARRAY_SIZE(octets); i++) { if (octets[i] > 255) return -ERANGE; be32_add_cpu(&ipv4_addr, octets[i] << (i * 8)); } ret = 0; write_lock(&cluster->cl_nodes_lock); if (o2nm_node_ip_tree_lookup(cluster, ipv4_addr, &p, &parent)) ret = -EEXIST; else if (test_and_set_bit(O2NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes)) ret = -EBUSY; else { rb_link_node(&node->nd_ip_node, parent, p); rb_insert_color(&node->nd_ip_node, &cluster->cl_node_ip_tree); } write_unlock(&cluster->cl_nodes_lock); if (ret) return ret; memcpy(&node->nd_ipv4_address, &ipv4_addr, sizeof(ipv4_addr)); return count; } static ssize_t o2nm_node_local_show(struct config_item *item, char *page) { return sprintf(page, "%d\n", to_o2nm_node(item)->nd_local); } static ssize_t o2nm_node_local_store(struct config_item *item, const char *page, size_t count) { struct o2nm_node *node = to_o2nm_node(item); struct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node); unsigned long tmp; char *p = (char *)page; ssize_t ret; tmp = simple_strtoul(p, &p, 0); if (!p || (*p && (*p != '\n'))) return -EINVAL; tmp = !!tmp; /* boolean of whether this node wants to be local */ /* setting local turns on networking rx for now so we require having * set everything else first */ if (!test_bit(O2NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes) || !test_bit(O2NM_NODE_ATTR_NUM, &node->nd_set_attributes) || !test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes)) return -EINVAL; /* XXX */ /* the only failure case is trying to set a new local node * when a different one is already set */ if (tmp && tmp == cluster->cl_has_local && cluster->cl_local_node != node->nd_num) return -EBUSY; /* bring up the rx thread if we're setting the new local node. */ if (tmp && !cluster->cl_has_local) { ret = o2net_start_listening(node); if (ret) return ret; } if (!tmp && cluster->cl_has_local && cluster->cl_local_node == node->nd_num) { o2net_stop_listening(node); cluster->cl_local_node = O2NM_INVALID_NODE_NUM; } node->nd_local = tmp; if (node->nd_local) { cluster->cl_has_local = tmp; cluster->cl_local_node = node->nd_num; } return count; } CONFIGFS_ATTR(o2nm_node_, num); CONFIGFS_ATTR(o2nm_node_, ipv4_port); CONFIGFS_ATTR(o2nm_node_, ipv4_address); CONFIGFS_ATTR(o2nm_node_, local); static struct configfs_attribute *o2nm_node_attrs[] = { &o2nm_node_attr_num, &o2nm_node_attr_ipv4_port, &o2nm_node_attr_ipv4_address, &o2nm_node_attr_local, NULL, }; static struct configfs_item_operations o2nm_node_item_ops = { .release = o2nm_node_release, }; static const struct config_item_type o2nm_node_type = { .ct_item_ops = &o2nm_node_item_ops, .ct_attrs = o2nm_node_attrs, .ct_owner = THIS_MODULE, }; /* node set */ struct o2nm_node_group { struct config_group ns_group; /* some stuff? */ }; #if 0 static struct o2nm_node_group *to_o2nm_node_group(struct config_group *group) { return group ? container_of(group, struct o2nm_node_group, ns_group) : NULL; } #endif static ssize_t o2nm_cluster_attr_write(const char *page, ssize_t count, unsigned int *val) { unsigned long tmp; char *p = (char *)page; tmp = simple_strtoul(p, &p, 0); if (!p || (*p && (*p != '\n'))) return -EINVAL; if (tmp == 0) return -EINVAL; if (tmp >= (u32)-1) return -ERANGE; *val = tmp; return count; } static ssize_t o2nm_cluster_idle_timeout_ms_show(struct config_item *item, char *page) { return sprintf(page, "%u\n", to_o2nm_cluster(item)->cl_idle_timeout_ms); } static ssize_t o2nm_cluster_idle_timeout_ms_store(struct config_item *item, const char *page, size_t count) { struct o2nm_cluster *cluster = to_o2nm_cluster(item); ssize_t ret; unsigned int val; ret = o2nm_cluster_attr_write(page, count, &val); if (ret > 0) { if (cluster->cl_idle_timeout_ms != val && o2net_num_connected_peers()) { mlog(ML_NOTICE, "o2net: cannot change idle timeout after " "the first peer has agreed to it." " %d connected peers\n", o2net_num_connected_peers()); ret = -EINVAL; } else if (val <= cluster->cl_keepalive_delay_ms) { mlog(ML_NOTICE, "o2net: idle timeout must be larger " "than keepalive delay\n"); ret = -EINVAL; } else { cluster->cl_idle_timeout_ms = val; } } return ret; } static ssize_t o2nm_cluster_keepalive_delay_ms_show( struct config_item *item, char *page) { return sprintf(page, "%u\n", to_o2nm_cluster(item)->cl_keepalive_delay_ms); } static ssize_t o2nm_cluster_keepalive_delay_ms_store( struct config_item *item, const char *page, size_t count) { struct o2nm_cluster *cluster = to_o2nm_cluster(item); ssize_t ret; unsigned int val; ret = o2nm_cluster_attr_write(page, count, &val); if (ret > 0) { if (cluster->cl_keepalive_delay_ms != val && o2net_num_connected_peers()) { mlog(ML_NOTICE, "o2net: cannot change keepalive delay after" " the first peer has agreed to it." " %d connected peers\n", o2net_num_connected_peers()); ret = -EINVAL; } else if (val >= cluster->cl_idle_timeout_ms) { mlog(ML_NOTICE, "o2net: keepalive delay must be " "smaller than idle timeout\n"); ret = -EINVAL; } else { cluster->cl_keepalive_delay_ms = val; } } return ret; } static ssize_t o2nm_cluster_reconnect_delay_ms_show( struct config_item *item, char *page) { return sprintf(page, "%u\n", to_o2nm_cluster(item)->cl_reconnect_delay_ms); } static ssize_t o2nm_cluster_reconnect_delay_ms_store( struct config_item *item, const char *page, size_t count) { return o2nm_cluster_attr_write(page, count, &to_o2nm_cluster(item)->cl_reconnect_delay_ms); } static ssize_t o2nm_cluster_fence_method_show( struct config_item *item, char *page) { struct o2nm_cluster *cluster = to_o2nm_cluster(item); ssize_t ret = 0; if (cluster) ret = sprintf(page, "%s\n", o2nm_fence_method_desc[cluster->cl_fence_method]); return ret; } static ssize_t o2nm_cluster_fence_method_store( struct config_item *item, const char *page, size_t count) { unsigned int i; if (page[count - 1] != '\n') goto bail; for (i = 0; i < O2NM_FENCE_METHODS; ++i) { if (count != strlen(o2nm_fence_method_desc[i]) + 1) continue; if (strncasecmp(page, o2nm_fence_method_desc[i], count - 1)) continue; if (to_o2nm_cluster(item)->cl_fence_method != i) { printk(KERN_INFO "ocfs2: Changing fence method to %s\n", o2nm_fence_method_desc[i]); to_o2nm_cluster(item)->cl_fence_method = i; } return count; } bail: return -EINVAL; } CONFIGFS_ATTR(o2nm_cluster_, idle_timeout_ms); CONFIGFS_ATTR(o2nm_cluster_, keepalive_delay_ms); CONFIGFS_ATTR(o2nm_cluster_, reconnect_delay_ms); CONFIGFS_ATTR(o2nm_cluster_, fence_method); static struct configfs_attribute *o2nm_cluster_attrs[] = { &o2nm_cluster_attr_idle_timeout_ms, &o2nm_cluster_attr_keepalive_delay_ms, &o2nm_cluster_attr_reconnect_delay_ms, &o2nm_cluster_attr_fence_method, NULL, }; static struct config_item *o2nm_node_group_make_item(struct config_group *group, const char *name) { struct o2nm_node *node = NULL; if (strlen(name) > O2NM_MAX_NAME_LEN) return ERR_PTR(-ENAMETOOLONG); node = kzalloc(sizeof(struct o2nm_node), GFP_KERNEL); if (node == NULL) return ERR_PTR(-ENOMEM); strcpy(node->nd_name, name); /* use item.ci_namebuf instead? */ config_item_init_type_name(&node->nd_item, name, &o2nm_node_type); spin_lock_init(&node->nd_lock); mlog(ML_CLUSTER, "o2nm: Registering node %s\n", name); return &node->nd_item; } static void o2nm_node_group_drop_item(struct config_group *group, struct config_item *item) { struct o2nm_node *node = to_o2nm_node(item); struct o2nm_cluster *cluster = to_o2nm_cluster(group->cg_item.ci_parent); o2net_disconnect_node(node); if (cluster->cl_has_local && (cluster->cl_local_node == node->nd_num)) { cluster->cl_has_local = 0; cluster->cl_local_node = O2NM_INVALID_NODE_NUM; o2net_stop_listening(node); } /* XXX call into net to stop this node from trading messages */ write_lock(&cluster->cl_nodes_lock); /* XXX sloppy */ if (node->nd_ipv4_address) rb_erase(&node->nd_ip_node, &cluster->cl_node_ip_tree); /* nd_num might be 0 if the node number hasn't been set.. */ if (cluster->cl_nodes[node->nd_num] == node) { cluster->cl_nodes[node->nd_num] = NULL; clear_bit(node->nd_num, cluster->cl_nodes_bitmap); } write_unlock(&cluster->cl_nodes_lock); mlog(ML_CLUSTER, "o2nm: Unregistered node %s\n", config_item_name(&node->nd_item)); config_item_put(item); } static struct configfs_group_operations o2nm_node_group_group_ops = { .make_item = o2nm_node_group_make_item, .drop_item = o2nm_node_group_drop_item, }; static const struct config_item_type o2nm_node_group_type = { .ct_group_ops = &o2nm_node_group_group_ops, .ct_owner = THIS_MODULE, }; /* cluster */ static void o2nm_cluster_release(struct config_item *item) { struct o2nm_cluster *cluster = to_o2nm_cluster(item); kfree(cluster); } static struct configfs_item_operations o2nm_cluster_item_ops = { .release = o2nm_cluster_release, }; static const struct config_item_type o2nm_cluster_type = { .ct_item_ops = &o2nm_cluster_item_ops, .ct_attrs = o2nm_cluster_attrs, .ct_owner = THIS_MODULE, }; /* cluster set */ struct o2nm_cluster_group { struct configfs_subsystem cs_subsys; /* some stuff? */ }; #if 0 static struct o2nm_cluster_group *to_o2nm_cluster_group(struct config_group *group) { return group ? container_of(to_configfs_subsystem(group), struct o2nm_cluster_group, cs_subsys) : NULL; } #endif static struct config_group *o2nm_cluster_group_make_group(struct config_group *group, const char *name) { struct o2nm_cluster *cluster = NULL; struct o2nm_node_group *ns = NULL; struct config_group *o2hb_group = NULL, *ret = NULL; /* this runs under the parent dir's i_mutex; there can be only * one caller in here at a time */ if (o2nm_single_cluster) return ERR_PTR(-ENOSPC); cluster = kzalloc(sizeof(struct o2nm_cluster), GFP_KERNEL); ns = kzalloc(sizeof(struct o2nm_node_group), GFP_KERNEL); o2hb_group = o2hb_alloc_hb_set(); if (cluster == NULL || ns == NULL || o2hb_group == NULL) goto out; config_group_init_type_name(&cluster->cl_group, name, &o2nm_cluster_type); configfs_add_default_group(&ns->ns_group, &cluster->cl_group); config_group_init_type_name(&ns->ns_group, "node", &o2nm_node_group_type); configfs_add_default_group(o2hb_group, &cluster->cl_group); rwlock_init(&cluster->cl_nodes_lock); cluster->cl_node_ip_tree = RB_ROOT; cluster->cl_reconnect_delay_ms = O2NET_RECONNECT_DELAY_MS_DEFAULT; cluster->cl_idle_timeout_ms = O2NET_IDLE_TIMEOUT_MS_DEFAULT; cluster->cl_keepalive_delay_ms = O2NET_KEEPALIVE_DELAY_MS_DEFAULT; cluster->cl_fence_method = O2NM_FENCE_RESET; ret = &cluster->cl_group; o2nm_single_cluster = cluster; out: if (ret == NULL) { kfree(cluster); kfree(ns); o2hb_free_hb_set(o2hb_group); ret = ERR_PTR(-ENOMEM); } return ret; } static void o2nm_cluster_group_drop_item(struct config_group *group, struct config_item *item) { struct o2nm_cluster *cluster = to_o2nm_cluster(item); BUG_ON(o2nm_single_cluster != cluster); o2nm_single_cluster = NULL; configfs_remove_default_groups(&cluster->cl_group); config_item_put(item); } static struct configfs_group_operations o2nm_cluster_group_group_ops = { .make_group = o2nm_cluster_group_make_group, .drop_item = o2nm_cluster_group_drop_item, }; static const struct config_item_type o2nm_cluster_group_type = { .ct_group_ops = &o2nm_cluster_group_group_ops, .ct_owner = THIS_MODULE, }; static struct o2nm_cluster_group o2nm_cluster_group = { .cs_subsys = { .su_group = { .cg_item = { .ci_namebuf = "cluster", .ci_type = &o2nm_cluster_group_type, }, }, }, }; int o2nm_depend_item(struct config_item *item) { return configfs_depend_item(&o2nm_cluster_group.cs_subsys, item); } void o2nm_undepend_item(struct config_item *item) { configfs_undepend_item(item); } int o2nm_depend_this_node(void) { int ret = 0; struct o2nm_node *local_node; local_node = o2nm_get_node_by_num(o2nm_this_node()); if (!local_node) { ret = -EINVAL; goto out; } ret = o2nm_depend_item(&local_node->nd_item); o2nm_node_put(local_node); out: return ret; } void o2nm_undepend_this_node(void) { struct o2nm_node *local_node; local_node = o2nm_get_node_by_num(o2nm_this_node()); BUG_ON(!local_node); o2nm_undepend_item(&local_node->nd_item); o2nm_node_put(local_node); } static void __exit exit_o2nm(void) { /* XXX sync with hb callbacks and shut down hb? */ o2net_unregister_hb_callbacks(); configfs_unregister_subsystem(&o2nm_cluster_group.cs_subsys); o2cb_sys_shutdown(); o2net_exit(); o2hb_exit(); } static int __init init_o2nm(void) { int ret = -1; ret = o2hb_init(); if (ret) goto out; ret = o2net_init(); if (ret) goto out_o2hb; ret = o2net_register_hb_callbacks(); if (ret) goto out_o2net; config_group_init(&o2nm_cluster_group.cs_subsys.su_group); mutex_init(&o2nm_cluster_group.cs_subsys.su_mutex); ret = configfs_register_subsystem(&o2nm_cluster_group.cs_subsys); if (ret) { printk(KERN_ERR "nodemanager: Registration returned %d\n", ret); goto out_callbacks; } ret = o2cb_sys_init(); if (!ret) goto out; configfs_unregister_subsystem(&o2nm_cluster_group.cs_subsys); out_callbacks: o2net_unregister_hb_callbacks(); out_o2net: o2net_exit(); out_o2hb: o2hb_exit(); out: return ret; } MODULE_AUTHOR("Oracle"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("OCFS2 cluster management"); module_init(init_o2nm) module_exit(exit_o2nm)
/* -*- mode: c; c-basic-offset: 8; -*- * vim: noexpandtab sw=8 ts=8 sts=0: * * Copyright (C) 2004, 2005 Oracle. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. */ #include <linux/slab.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/configfs.h> #include "tcp.h" #include "nodemanager.h" #include "heartbeat.h" #include "masklog.h" #include "sys.h" /* for now we operate under the assertion that there can be only one * cluster active at a time. Changing this will require trickling * cluster references throughout where nodes are looked up */ struct o2nm_cluster *o2nm_single_cluster = NULL; char *o2nm_fence_method_desc[O2NM_FENCE_METHODS] = { "reset", /* O2NM_FENCE_RESET */ "panic", /* O2NM_FENCE_PANIC */ }; static inline void o2nm_lock_subsystem(void); static inline void o2nm_unlock_subsystem(void); struct o2nm_node *o2nm_get_node_by_num(u8 node_num) { struct o2nm_node *node = NULL; if (node_num >= O2NM_MAX_NODES || o2nm_single_cluster == NULL) goto out; read_lock(&o2nm_single_cluster->cl_nodes_lock); node = o2nm_single_cluster->cl_nodes[node_num]; if (node) config_item_get(&node->nd_item); read_unlock(&o2nm_single_cluster->cl_nodes_lock); out: return node; } EXPORT_SYMBOL_GPL(o2nm_get_node_by_num); int o2nm_configured_node_map(unsigned long *map, unsigned bytes) { struct o2nm_cluster *cluster = o2nm_single_cluster; BUG_ON(bytes < (sizeof(cluster->cl_nodes_bitmap))); if (cluster == NULL) return -EINVAL; read_lock(&cluster->cl_nodes_lock); memcpy(map, cluster->cl_nodes_bitmap, sizeof(cluster->cl_nodes_bitmap)); read_unlock(&cluster->cl_nodes_lock); return 0; } EXPORT_SYMBOL_GPL(o2nm_configured_node_map); static struct o2nm_node *o2nm_node_ip_tree_lookup(struct o2nm_cluster *cluster, __be32 ip_needle, struct rb_node ***ret_p, struct rb_node **ret_parent) { struct rb_node **p = &cluster->cl_node_ip_tree.rb_node; struct rb_node *parent = NULL; struct o2nm_node *node, *ret = NULL; while (*p) { int cmp; parent = *p; node = rb_entry(parent, struct o2nm_node, nd_ip_node); cmp = memcmp(&ip_needle, &node->nd_ipv4_address, sizeof(ip_needle)); if (cmp < 0) p = &(*p)->rb_left; else if (cmp > 0) p = &(*p)->rb_right; else { ret = node; break; } } if (ret_p != NULL) *ret_p = p; if (ret_parent != NULL) *ret_parent = parent; return ret; } struct o2nm_node *o2nm_get_node_by_ip(__be32 addr) { struct o2nm_node *node = NULL; struct o2nm_cluster *cluster = o2nm_single_cluster; if (cluster == NULL) goto out; read_lock(&cluster->cl_nodes_lock); node = o2nm_node_ip_tree_lookup(cluster, addr, NULL, NULL); if (node) config_item_get(&node->nd_item); read_unlock(&cluster->cl_nodes_lock); out: return node; } EXPORT_SYMBOL_GPL(o2nm_get_node_by_ip); void o2nm_node_put(struct o2nm_node *node) { config_item_put(&node->nd_item); } EXPORT_SYMBOL_GPL(o2nm_node_put); void o2nm_node_get(struct o2nm_node *node) { config_item_get(&node->nd_item); } EXPORT_SYMBOL_GPL(o2nm_node_get); u8 o2nm_this_node(void) { u8 node_num = O2NM_MAX_NODES; if (o2nm_single_cluster && o2nm_single_cluster->cl_has_local) node_num = o2nm_single_cluster->cl_local_node; return node_num; } EXPORT_SYMBOL_GPL(o2nm_this_node); /* node configfs bits */ static struct o2nm_cluster *to_o2nm_cluster(struct config_item *item) { return item ? container_of(to_config_group(item), struct o2nm_cluster, cl_group) : NULL; } static struct o2nm_node *to_o2nm_node(struct config_item *item) { return item ? container_of(item, struct o2nm_node, nd_item) : NULL; } static void o2nm_node_release(struct config_item *item) { struct o2nm_node *node = to_o2nm_node(item); kfree(node); } static ssize_t o2nm_node_num_show(struct config_item *item, char *page) { return sprintf(page, "%d\n", to_o2nm_node(item)->nd_num); } static struct o2nm_cluster *to_o2nm_cluster_from_node(struct o2nm_node *node) { /* through the first node_set .parent * mycluster/nodes/mynode == o2nm_cluster->o2nm_node_group->o2nm_node */ if (node->nd_item.ci_parent) return to_o2nm_cluster(node->nd_item.ci_parent->ci_parent); else return NULL; } enum { O2NM_NODE_ATTR_NUM = 0, O2NM_NODE_ATTR_PORT, O2NM_NODE_ATTR_ADDRESS, }; static ssize_t o2nm_node_num_store(struct config_item *item, const char *page, size_t count) { struct o2nm_node *node = to_o2nm_node(item); struct o2nm_cluster *cluster; unsigned long tmp; char *p = (char *)page; int ret = 0; tmp = simple_strtoul(p, &p, 0); if (!p || (*p && (*p != '\n'))) return -EINVAL; if (tmp >= O2NM_MAX_NODES) return -ERANGE; /* once we're in the cl_nodes tree networking can look us up by * node number and try to use our address and port attributes * to connect to this node.. make sure that they've been set * before writing the node attribute? */ if (!test_bit(O2NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes) || !test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes)) return -EINVAL; /* XXX */ o2nm_lock_subsystem(); cluster = to_o2nm_cluster_from_node(node); if (!cluster) { o2nm_unlock_subsystem(); return -EINVAL; } write_lock(&cluster->cl_nodes_lock); if (cluster->cl_nodes[tmp]) ret = -EEXIST; else if (test_and_set_bit(O2NM_NODE_ATTR_NUM, &node->nd_set_attributes)) ret = -EBUSY; else { cluster->cl_nodes[tmp] = node; node->nd_num = tmp; set_bit(tmp, cluster->cl_nodes_bitmap); } write_unlock(&cluster->cl_nodes_lock); o2nm_unlock_subsystem(); if (ret) return ret; return count; } static ssize_t o2nm_node_ipv4_port_show(struct config_item *item, char *page) { return sprintf(page, "%u\n", ntohs(to_o2nm_node(item)->nd_ipv4_port)); } static ssize_t o2nm_node_ipv4_port_store(struct config_item *item, const char *page, size_t count) { struct o2nm_node *node = to_o2nm_node(item); unsigned long tmp; char *p = (char *)page; tmp = simple_strtoul(p, &p, 0); if (!p || (*p && (*p != '\n'))) return -EINVAL; if (tmp == 0) return -EINVAL; if (tmp >= (u16)-1) return -ERANGE; if (test_and_set_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes)) return -EBUSY; node->nd_ipv4_port = htons(tmp); return count; } static ssize_t o2nm_node_ipv4_address_show(struct config_item *item, char *page) { return sprintf(page, "%pI4\n", &to_o2nm_node(item)->nd_ipv4_address); } static ssize_t o2nm_node_ipv4_address_store(struct config_item *item, const char *page, size_t count) { struct o2nm_node *node = to_o2nm_node(item); struct o2nm_cluster *cluster; int ret, i; struct rb_node **p, *parent; unsigned int octets[4]; __be32 ipv4_addr = 0; ret = sscanf(page, "%3u.%3u.%3u.%3u", &octets[3], &octets[2], &octets[1], &octets[0]); if (ret != 4) return -EINVAL; for (i = 0; i < ARRAY_SIZE(octets); i++) { if (octets[i] > 255) return -ERANGE; be32_add_cpu(&ipv4_addr, octets[i] << (i * 8)); } o2nm_lock_subsystem(); cluster = to_o2nm_cluster_from_node(node); if (!cluster) { o2nm_unlock_subsystem(); return -EINVAL; } ret = 0; write_lock(&cluster->cl_nodes_lock); if (o2nm_node_ip_tree_lookup(cluster, ipv4_addr, &p, &parent)) ret = -EEXIST; else if (test_and_set_bit(O2NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes)) ret = -EBUSY; else { rb_link_node(&node->nd_ip_node, parent, p); rb_insert_color(&node->nd_ip_node, &cluster->cl_node_ip_tree); } write_unlock(&cluster->cl_nodes_lock); o2nm_unlock_subsystem(); if (ret) return ret; memcpy(&node->nd_ipv4_address, &ipv4_addr, sizeof(ipv4_addr)); return count; } static ssize_t o2nm_node_local_show(struct config_item *item, char *page) { return sprintf(page, "%d\n", to_o2nm_node(item)->nd_local); } static ssize_t o2nm_node_local_store(struct config_item *item, const char *page, size_t count) { struct o2nm_node *node = to_o2nm_node(item); struct o2nm_cluster *cluster; unsigned long tmp; char *p = (char *)page; ssize_t ret; tmp = simple_strtoul(p, &p, 0); if (!p || (*p && (*p != '\n'))) return -EINVAL; tmp = !!tmp; /* boolean of whether this node wants to be local */ /* setting local turns on networking rx for now so we require having * set everything else first */ if (!test_bit(O2NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes) || !test_bit(O2NM_NODE_ATTR_NUM, &node->nd_set_attributes) || !test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes)) return -EINVAL; /* XXX */ o2nm_lock_subsystem(); cluster = to_o2nm_cluster_from_node(node); if (!cluster) { ret = -EINVAL; goto out; } /* the only failure case is trying to set a new local node * when a different one is already set */ if (tmp && tmp == cluster->cl_has_local && cluster->cl_local_node != node->nd_num) { ret = -EBUSY; goto out; } /* bring up the rx thread if we're setting the new local node. */ if (tmp && !cluster->cl_has_local) { ret = o2net_start_listening(node); if (ret) goto out; } if (!tmp && cluster->cl_has_local && cluster->cl_local_node == node->nd_num) { o2net_stop_listening(node); cluster->cl_local_node = O2NM_INVALID_NODE_NUM; } node->nd_local = tmp; if (node->nd_local) { cluster->cl_has_local = tmp; cluster->cl_local_node = node->nd_num; } ret = count; out: o2nm_unlock_subsystem(); return ret; } CONFIGFS_ATTR(o2nm_node_, num); CONFIGFS_ATTR(o2nm_node_, ipv4_port); CONFIGFS_ATTR(o2nm_node_, ipv4_address); CONFIGFS_ATTR(o2nm_node_, local); static struct configfs_attribute *o2nm_node_attrs[] = { &o2nm_node_attr_num, &o2nm_node_attr_ipv4_port, &o2nm_node_attr_ipv4_address, &o2nm_node_attr_local, NULL, }; static struct configfs_item_operations o2nm_node_item_ops = { .release = o2nm_node_release, }; static const struct config_item_type o2nm_node_type = { .ct_item_ops = &o2nm_node_item_ops, .ct_attrs = o2nm_node_attrs, .ct_owner = THIS_MODULE, }; /* node set */ struct o2nm_node_group { struct config_group ns_group; /* some stuff? */ }; #if 0 static struct o2nm_node_group *to_o2nm_node_group(struct config_group *group) { return group ? container_of(group, struct o2nm_node_group, ns_group) : NULL; } #endif static ssize_t o2nm_cluster_attr_write(const char *page, ssize_t count, unsigned int *val) { unsigned long tmp; char *p = (char *)page; tmp = simple_strtoul(p, &p, 0); if (!p || (*p && (*p != '\n'))) return -EINVAL; if (tmp == 0) return -EINVAL; if (tmp >= (u32)-1) return -ERANGE; *val = tmp; return count; } static ssize_t o2nm_cluster_idle_timeout_ms_show(struct config_item *item, char *page) { return sprintf(page, "%u\n", to_o2nm_cluster(item)->cl_idle_timeout_ms); } static ssize_t o2nm_cluster_idle_timeout_ms_store(struct config_item *item, const char *page, size_t count) { struct o2nm_cluster *cluster = to_o2nm_cluster(item); ssize_t ret; unsigned int val; ret = o2nm_cluster_attr_write(page, count, &val); if (ret > 0) { if (cluster->cl_idle_timeout_ms != val && o2net_num_connected_peers()) { mlog(ML_NOTICE, "o2net: cannot change idle timeout after " "the first peer has agreed to it." " %d connected peers\n", o2net_num_connected_peers()); ret = -EINVAL; } else if (val <= cluster->cl_keepalive_delay_ms) { mlog(ML_NOTICE, "o2net: idle timeout must be larger " "than keepalive delay\n"); ret = -EINVAL; } else { cluster->cl_idle_timeout_ms = val; } } return ret; } static ssize_t o2nm_cluster_keepalive_delay_ms_show( struct config_item *item, char *page) { return sprintf(page, "%u\n", to_o2nm_cluster(item)->cl_keepalive_delay_ms); } static ssize_t o2nm_cluster_keepalive_delay_ms_store( struct config_item *item, const char *page, size_t count) { struct o2nm_cluster *cluster = to_o2nm_cluster(item); ssize_t ret; unsigned int val; ret = o2nm_cluster_attr_write(page, count, &val); if (ret > 0) { if (cluster->cl_keepalive_delay_ms != val && o2net_num_connected_peers()) { mlog(ML_NOTICE, "o2net: cannot change keepalive delay after" " the first peer has agreed to it." " %d connected peers\n", o2net_num_connected_peers()); ret = -EINVAL; } else if (val >= cluster->cl_idle_timeout_ms) { mlog(ML_NOTICE, "o2net: keepalive delay must be " "smaller than idle timeout\n"); ret = -EINVAL; } else { cluster->cl_keepalive_delay_ms = val; } } return ret; } static ssize_t o2nm_cluster_reconnect_delay_ms_show( struct config_item *item, char *page) { return sprintf(page, "%u\n", to_o2nm_cluster(item)->cl_reconnect_delay_ms); } static ssize_t o2nm_cluster_reconnect_delay_ms_store( struct config_item *item, const char *page, size_t count) { return o2nm_cluster_attr_write(page, count, &to_o2nm_cluster(item)->cl_reconnect_delay_ms); } static ssize_t o2nm_cluster_fence_method_show( struct config_item *item, char *page) { struct o2nm_cluster *cluster = to_o2nm_cluster(item); ssize_t ret = 0; if (cluster) ret = sprintf(page, "%s\n", o2nm_fence_method_desc[cluster->cl_fence_method]); return ret; } static ssize_t o2nm_cluster_fence_method_store( struct config_item *item, const char *page, size_t count) { unsigned int i; if (page[count - 1] != '\n') goto bail; for (i = 0; i < O2NM_FENCE_METHODS; ++i) { if (count != strlen(o2nm_fence_method_desc[i]) + 1) continue; if (strncasecmp(page, o2nm_fence_method_desc[i], count - 1)) continue; if (to_o2nm_cluster(item)->cl_fence_method != i) { printk(KERN_INFO "ocfs2: Changing fence method to %s\n", o2nm_fence_method_desc[i]); to_o2nm_cluster(item)->cl_fence_method = i; } return count; } bail: return -EINVAL; } CONFIGFS_ATTR(o2nm_cluster_, idle_timeout_ms); CONFIGFS_ATTR(o2nm_cluster_, keepalive_delay_ms); CONFIGFS_ATTR(o2nm_cluster_, reconnect_delay_ms); CONFIGFS_ATTR(o2nm_cluster_, fence_method); static struct configfs_attribute *o2nm_cluster_attrs[] = { &o2nm_cluster_attr_idle_timeout_ms, &o2nm_cluster_attr_keepalive_delay_ms, &o2nm_cluster_attr_reconnect_delay_ms, &o2nm_cluster_attr_fence_method, NULL, }; static struct config_item *o2nm_node_group_make_item(struct config_group *group, const char *name) { struct o2nm_node *node = NULL; if (strlen(name) > O2NM_MAX_NAME_LEN) return ERR_PTR(-ENAMETOOLONG); node = kzalloc(sizeof(struct o2nm_node), GFP_KERNEL); if (node == NULL) return ERR_PTR(-ENOMEM); strcpy(node->nd_name, name); /* use item.ci_namebuf instead? */ config_item_init_type_name(&node->nd_item, name, &o2nm_node_type); spin_lock_init(&node->nd_lock); mlog(ML_CLUSTER, "o2nm: Registering node %s\n", name); return &node->nd_item; } static void o2nm_node_group_drop_item(struct config_group *group, struct config_item *item) { struct o2nm_node *node = to_o2nm_node(item); struct o2nm_cluster *cluster = to_o2nm_cluster(group->cg_item.ci_parent); o2net_disconnect_node(node); if (cluster->cl_has_local && (cluster->cl_local_node == node->nd_num)) { cluster->cl_has_local = 0; cluster->cl_local_node = O2NM_INVALID_NODE_NUM; o2net_stop_listening(node); } /* XXX call into net to stop this node from trading messages */ write_lock(&cluster->cl_nodes_lock); /* XXX sloppy */ if (node->nd_ipv4_address) rb_erase(&node->nd_ip_node, &cluster->cl_node_ip_tree); /* nd_num might be 0 if the node number hasn't been set.. */ if (cluster->cl_nodes[node->nd_num] == node) { cluster->cl_nodes[node->nd_num] = NULL; clear_bit(node->nd_num, cluster->cl_nodes_bitmap); } write_unlock(&cluster->cl_nodes_lock); mlog(ML_CLUSTER, "o2nm: Unregistered node %s\n", config_item_name(&node->nd_item)); config_item_put(item); } static struct configfs_group_operations o2nm_node_group_group_ops = { .make_item = o2nm_node_group_make_item, .drop_item = o2nm_node_group_drop_item, }; static const struct config_item_type o2nm_node_group_type = { .ct_group_ops = &o2nm_node_group_group_ops, .ct_owner = THIS_MODULE, }; /* cluster */ static void o2nm_cluster_release(struct config_item *item) { struct o2nm_cluster *cluster = to_o2nm_cluster(item); kfree(cluster); } static struct configfs_item_operations o2nm_cluster_item_ops = { .release = o2nm_cluster_release, }; static const struct config_item_type o2nm_cluster_type = { .ct_item_ops = &o2nm_cluster_item_ops, .ct_attrs = o2nm_cluster_attrs, .ct_owner = THIS_MODULE, }; /* cluster set */ struct o2nm_cluster_group { struct configfs_subsystem cs_subsys; /* some stuff? */ }; #if 0 static struct o2nm_cluster_group *to_o2nm_cluster_group(struct config_group *group) { return group ? container_of(to_configfs_subsystem(group), struct o2nm_cluster_group, cs_subsys) : NULL; } #endif static struct config_group *o2nm_cluster_group_make_group(struct config_group *group, const char *name) { struct o2nm_cluster *cluster = NULL; struct o2nm_node_group *ns = NULL; struct config_group *o2hb_group = NULL, *ret = NULL; /* this runs under the parent dir's i_mutex; there can be only * one caller in here at a time */ if (o2nm_single_cluster) return ERR_PTR(-ENOSPC); cluster = kzalloc(sizeof(struct o2nm_cluster), GFP_KERNEL); ns = kzalloc(sizeof(struct o2nm_node_group), GFP_KERNEL); o2hb_group = o2hb_alloc_hb_set(); if (cluster == NULL || ns == NULL || o2hb_group == NULL) goto out; config_group_init_type_name(&cluster->cl_group, name, &o2nm_cluster_type); configfs_add_default_group(&ns->ns_group, &cluster->cl_group); config_group_init_type_name(&ns->ns_group, "node", &o2nm_node_group_type); configfs_add_default_group(o2hb_group, &cluster->cl_group); rwlock_init(&cluster->cl_nodes_lock); cluster->cl_node_ip_tree = RB_ROOT; cluster->cl_reconnect_delay_ms = O2NET_RECONNECT_DELAY_MS_DEFAULT; cluster->cl_idle_timeout_ms = O2NET_IDLE_TIMEOUT_MS_DEFAULT; cluster->cl_keepalive_delay_ms = O2NET_KEEPALIVE_DELAY_MS_DEFAULT; cluster->cl_fence_method = O2NM_FENCE_RESET; ret = &cluster->cl_group; o2nm_single_cluster = cluster; out: if (ret == NULL) { kfree(cluster); kfree(ns); o2hb_free_hb_set(o2hb_group); ret = ERR_PTR(-ENOMEM); } return ret; } static void o2nm_cluster_group_drop_item(struct config_group *group, struct config_item *item) { struct o2nm_cluster *cluster = to_o2nm_cluster(item); BUG_ON(o2nm_single_cluster != cluster); o2nm_single_cluster = NULL; configfs_remove_default_groups(&cluster->cl_group); config_item_put(item); } static struct configfs_group_operations o2nm_cluster_group_group_ops = { .make_group = o2nm_cluster_group_make_group, .drop_item = o2nm_cluster_group_drop_item, }; static const struct config_item_type o2nm_cluster_group_type = { .ct_group_ops = &o2nm_cluster_group_group_ops, .ct_owner = THIS_MODULE, }; static struct o2nm_cluster_group o2nm_cluster_group = { .cs_subsys = { .su_group = { .cg_item = { .ci_namebuf = "cluster", .ci_type = &o2nm_cluster_group_type, }, }, }, }; static inline void o2nm_lock_subsystem(void) { mutex_lock(&o2nm_cluster_group.cs_subsys.su_mutex); } static inline void o2nm_unlock_subsystem(void) { mutex_unlock(&o2nm_cluster_group.cs_subsys.su_mutex); } int o2nm_depend_item(struct config_item *item) { return configfs_depend_item(&o2nm_cluster_group.cs_subsys, item); } void o2nm_undepend_item(struct config_item *item) { configfs_undepend_item(item); } int o2nm_depend_this_node(void) { int ret = 0; struct o2nm_node *local_node; local_node = o2nm_get_node_by_num(o2nm_this_node()); if (!local_node) { ret = -EINVAL; goto out; } ret = o2nm_depend_item(&local_node->nd_item); o2nm_node_put(local_node); out: return ret; } void o2nm_undepend_this_node(void) { struct o2nm_node *local_node; local_node = o2nm_get_node_by_num(o2nm_this_node()); BUG_ON(!local_node); o2nm_undepend_item(&local_node->nd_item); o2nm_node_put(local_node); } static void __exit exit_o2nm(void) { /* XXX sync with hb callbacks and shut down hb? */ o2net_unregister_hb_callbacks(); configfs_unregister_subsystem(&o2nm_cluster_group.cs_subsys); o2cb_sys_shutdown(); o2net_exit(); o2hb_exit(); } static int __init init_o2nm(void) { int ret = -1; ret = o2hb_init(); if (ret) goto out; ret = o2net_init(); if (ret) goto out_o2hb; ret = o2net_register_hb_callbacks(); if (ret) goto out_o2net; config_group_init(&o2nm_cluster_group.cs_subsys.su_group); mutex_init(&o2nm_cluster_group.cs_subsys.su_mutex); ret = configfs_register_subsystem(&o2nm_cluster_group.cs_subsys); if (ret) { printk(KERN_ERR "nodemanager: Registration returned %d\n", ret); goto out_callbacks; } ret = o2cb_sys_init(); if (!ret) goto out; configfs_unregister_subsystem(&o2nm_cluster_group.cs_subsys); out_callbacks: o2net_unregister_hb_callbacks(); out_o2net: o2net_exit(); out_o2hb: o2hb_exit(); out: return ret; } MODULE_AUTHOR("Oracle"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("OCFS2 cluster management"); module_init(init_o2nm) module_exit(exit_o2nm)
static struct o2nm_cluster *to_o2nm_cluster_from_node(struct o2nm_node *node) { /* through the first node_set .parent * mycluster/nodes/mynode == o2nm_cluster->o2nm_node_group->o2nm_node */ return to_o2nm_cluster(node->nd_item.ci_parent->ci_parent); }
static struct o2nm_cluster *to_o2nm_cluster_from_node(struct o2nm_node *node) { /* through the first node_set .parent * mycluster/nodes/mynode == o2nm_cluster->o2nm_node_group->o2nm_node */ if (node->nd_item.ci_parent) return to_o2nm_cluster(node->nd_item.ci_parent->ci_parent); else return NULL; }
{'added': [(43, 'static inline void o2nm_lock_subsystem(void);'), (44, 'static inline void o2nm_unlock_subsystem(void);'), (45, ''), (187, '\tif (node->nd_item.ci_parent)'), (188, '\t\treturn to_o2nm_cluster(node->nd_item.ci_parent->ci_parent);'), (189, '\telse'), (190, '\t\treturn NULL;'), (203, '\tstruct o2nm_cluster *cluster;'), (223, '\to2nm_lock_subsystem();'), (224, '\tcluster = to_o2nm_cluster_from_node(node);'), (225, '\tif (!cluster) {'), (226, '\t\to2nm_unlock_subsystem();'), (227, '\t\treturn -EINVAL;'), (228, '\t}'), (229, ''), (242, '\to2nm_unlock_subsystem();'), (243, ''), (287, '\tstruct o2nm_cluster *cluster;'), (304, '\to2nm_lock_subsystem();'), (305, '\tcluster = to_o2nm_cluster_from_node(node);'), (306, '\tif (!cluster) {'), (307, '\t\to2nm_unlock_subsystem();'), (308, '\t\treturn -EINVAL;'), (309, '\t}'), (310, ''), (323, '\to2nm_unlock_subsystem();'), (324, ''), (342, '\tstruct o2nm_cluster *cluster;'), (360, '\to2nm_lock_subsystem();'), (361, '\tcluster = to_o2nm_cluster_from_node(node);'), (362, '\tif (!cluster) {'), (363, '\t\tret = -EINVAL;'), (364, '\t\tgoto out;'), (365, '\t}'), (366, ''), (370, '\t cluster->cl_local_node != node->nd_num) {'), (371, '\t\tret = -EBUSY;'), (372, '\t\tgoto out;'), (373, '\t}'), (379, '\t\t\tgoto out;'), (394, '\tret = count;'), (395, ''), (396, 'out:'), (397, '\to2nm_unlock_subsystem();'), (398, '\treturn ret;'), (778, 'static inline void o2nm_lock_subsystem(void)'), (779, '{'), (780, '\tmutex_lock(&o2nm_cluster_group.cs_subsys.su_mutex);'), (781, '}'), (782, ''), (783, 'static inline void o2nm_unlock_subsystem(void)'), (784, '{'), (785, '\tmutex_unlock(&o2nm_cluster_group.cs_subsys.su_mutex);'), (786, '}'), (787, '')], 'deleted': [(184, '\treturn to_o2nm_cluster(node->nd_item.ci_parent->ci_parent);'), (197, '\tstruct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node);'), (272, '\tstruct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node);'), (318, '\tstruct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node);'), (339, '\t cluster->cl_local_node != node->nd_num)'), (340, '\t\treturn -EBUSY;'), (346, '\t\t\treturn ret;'), (361, '\treturn count;')]}
55
8
669
3,744
https://github.com/torvalds/linux
CVE-2017-18216
['CWE-476']
rfbserver.c
rfbProcessFileTransferReadBuffer
/* * rfbserver.c - deal with server-side of the RFB protocol. */ /* * Copyright (C) 2011-2012 D. R. Commander * Copyright (C) 2005 Rohit Kumar, Johannes E. Schindelin * Copyright (C) 2002 RealVNC Ltd. * OSXvnc Copyright (C) 2001 Dan McGuirk <mcguirk@incompleteness.net>. * Original Xvnc code Copyright (C) 1999 AT&T Laboratories Cambridge. * All Rights Reserved. * * This is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this software; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, * USA. */ #ifdef __STRICT_ANSI__ #define _BSD_SOURCE #define _POSIX_SOURCE #define _XOPEN_SOURCE 600 #endif #include <stdio.h> #include <string.h> #include <rfb/rfb.h> #include <rfb/rfbregion.h> #include "private.h" #include "rfb/rfbconfig.h" #ifdef LIBVNCSERVER_HAVE_FCNTL_H #include <fcntl.h> #endif #ifdef WIN32 #include <winsock2.h> #include <ws2tcpip.h> #include <io.h> #define write(sock,buf,len) send(sock,buf,len,0) #else #ifdef LIBVNCSERVER_HAVE_UNISTD_H #include <unistd.h> #endif #include <pwd.h> #ifdef LIBVNCSERVER_HAVE_SYS_SOCKET_H #include <sys/socket.h> #endif #ifdef LIBVNCSERVER_HAVE_NETINET_IN_H #include <netinet/in.h> #include <netinet/tcp.h> #include <netdb.h> #include <arpa/inet.h> #endif #endif #ifdef DEBUGPROTO #undef DEBUGPROTO #define DEBUGPROTO(x) x #else #define DEBUGPROTO(x) #endif #include <stdarg.h> #include <scale.h> /* stst() */ #include <sys/types.h> #include <sys/stat.h> #if LIBVNCSERVER_HAVE_UNISTD_H #include <unistd.h> #endif #ifndef WIN32 /* readdir() */ #include <dirent.h> #endif /* errno */ #include <errno.h> /* strftime() */ #include <time.h> #ifdef LIBVNCSERVER_WITH_WEBSOCKETS #include "rfbssl.h" #endif #ifdef _MSC_VER #define snprintf _snprintf /* Missing in MSVC */ /* Prevent POSIX deprecation warnings */ #define close _close #define strdup _strdup #endif #ifdef WIN32 #include <direct.h> #ifdef __MINGW32__ #define mkdir(path, perms) mkdir(path) /* Omit the perms argument to match POSIX signature */ #else /* MSVC and other windows compilers */ #define mkdir(path, perms) _mkdir(path) /* Omit the perms argument to match POSIX signature */ #endif /* __MINGW32__ else... */ #ifndef S_ISDIR #define S_ISDIR(m) (((m) & S_IFDIR) == S_IFDIR) #endif #endif #ifdef LIBVNCSERVER_HAVE_LIBJPEG /* * Map of quality levels to provide compatibility with TightVNC/TigerVNC * clients. This emulates the behavior of the TigerVNC Server. */ static const int tight2turbo_qual[10] = { 15, 29, 41, 42, 62, 77, 79, 86, 92, 100 }; static const int tight2turbo_subsamp[10] = { 1, 1, 1, 2, 2, 2, 0, 0, 0, 0 }; #endif static void rfbProcessClientProtocolVersion(rfbClientPtr cl); static void rfbProcessClientNormalMessage(rfbClientPtr cl); static void rfbProcessClientInitMessage(rfbClientPtr cl); #ifdef LIBVNCSERVER_HAVE_LIBPTHREAD void rfbIncrClientRef(rfbClientPtr cl) { LOCK(cl->refCountMutex); cl->refCount++; UNLOCK(cl->refCountMutex); } void rfbDecrClientRef(rfbClientPtr cl) { LOCK(cl->refCountMutex); cl->refCount--; if(cl->refCount<=0) /* just to be sure also < 0 */ TSIGNAL(cl->deleteCond); UNLOCK(cl->refCountMutex); } #else void rfbIncrClientRef(rfbClientPtr cl) {} void rfbDecrClientRef(rfbClientPtr cl) {} #endif #ifdef LIBVNCSERVER_HAVE_LIBPTHREAD static MUTEX(rfbClientListMutex); #endif struct rfbClientIterator { rfbClientPtr next; rfbScreenInfoPtr screen; rfbBool closedToo; }; void rfbClientListInit(rfbScreenInfoPtr rfbScreen) { if(sizeof(rfbBool)!=1) { /* a sanity check */ fprintf(stderr,"rfbBool's size is not 1 (%d)!\n",(int)sizeof(rfbBool)); /* we cannot continue, because rfbBool is supposed to be char everywhere */ exit(1); } rfbScreen->clientHead = NULL; INIT_MUTEX(rfbClientListMutex); } rfbClientIteratorPtr rfbGetClientIterator(rfbScreenInfoPtr rfbScreen) { rfbClientIteratorPtr i = (rfbClientIteratorPtr)malloc(sizeof(struct rfbClientIterator)); i->next = NULL; i->screen = rfbScreen; i->closedToo = FALSE; return i; } rfbClientIteratorPtr rfbGetClientIteratorWithClosed(rfbScreenInfoPtr rfbScreen) { rfbClientIteratorPtr i = (rfbClientIteratorPtr)malloc(sizeof(struct rfbClientIterator)); i->next = NULL; i->screen = rfbScreen; i->closedToo = TRUE; return i; } rfbClientPtr rfbClientIteratorHead(rfbClientIteratorPtr i) { #ifdef LIBVNCSERVER_HAVE_LIBPTHREAD if(i->next != 0) { rfbDecrClientRef(i->next); rfbIncrClientRef(i->screen->clientHead); } #endif LOCK(rfbClientListMutex); i->next = i->screen->clientHead; UNLOCK(rfbClientListMutex); return i->next; } rfbClientPtr rfbClientIteratorNext(rfbClientIteratorPtr i) { if(i->next == 0) { LOCK(rfbClientListMutex); i->next = i->screen->clientHead; UNLOCK(rfbClientListMutex); } else { IF_PTHREADS(rfbClientPtr cl = i->next); i->next = i->next->next; IF_PTHREADS(rfbDecrClientRef(cl)); } #ifdef LIBVNCSERVER_HAVE_LIBPTHREAD if(!i->closedToo) while(i->next && i->next->sock<0) i->next = i->next->next; if(i->next) rfbIncrClientRef(i->next); #endif return i->next; } void rfbReleaseClientIterator(rfbClientIteratorPtr iterator) { IF_PTHREADS(if(iterator->next) rfbDecrClientRef(iterator->next)); free(iterator); } /* * rfbNewClientConnection is called from sockets.c when a new connection * comes in. */ void rfbNewClientConnection(rfbScreenInfoPtr rfbScreen, int sock) { rfbNewClient(rfbScreen,sock); } /* * rfbReverseConnection is called to make an outward * connection to a "listening" RFB client. */ rfbClientPtr rfbReverseConnection(rfbScreenInfoPtr rfbScreen, char *host, int port) { int sock; rfbClientPtr cl; if ((sock = rfbConnect(rfbScreen, host, port)) < 0) return (rfbClientPtr)NULL; cl = rfbNewClient(rfbScreen, sock); if (cl) { cl->reverseConnection = TRUE; } return cl; } void rfbSetProtocolVersion(rfbScreenInfoPtr rfbScreen, int major_, int minor_) { /* Permit the server to set the version to report */ /* TODO: sanity checking */ if ((major_==3) && (minor_ > 2 && minor_ < 9)) { rfbScreen->protocolMajorVersion = major_; rfbScreen->protocolMinorVersion = minor_; } else rfbLog("rfbSetProtocolVersion(%d,%d) set to invalid values\n", major_, minor_); } /* * rfbNewClient is called when a new connection has been made by whatever * means. */ static rfbClientPtr rfbNewTCPOrUDPClient(rfbScreenInfoPtr rfbScreen, int sock, rfbBool isUDP) { rfbProtocolVersionMsg pv; rfbClientIteratorPtr iterator; rfbClientPtr cl,cl_; #ifdef LIBVNCSERVER_IPv6 struct sockaddr_storage addr; #else struct sockaddr_in addr; #endif socklen_t addrlen = sizeof(addr); rfbProtocolExtension* extension; cl = (rfbClientPtr)calloc(sizeof(rfbClientRec),1); cl->screen = rfbScreen; cl->sock = sock; cl->viewOnly = FALSE; /* setup pseudo scaling */ cl->scaledScreen = rfbScreen; cl->scaledScreen->scaledScreenRefCount++; rfbResetStats(cl); cl->clientData = NULL; cl->clientGoneHook = rfbDoNothingWithClient; if(isUDP) { rfbLog(" accepted UDP client\n"); } else { #ifdef LIBVNCSERVER_IPv6 char host[1024]; #endif int one=1; getpeername(sock, (struct sockaddr *)&addr, &addrlen); #ifdef LIBVNCSERVER_IPv6 if(getnameinfo((struct sockaddr*)&addr, addrlen, host, sizeof(host), NULL, 0, NI_NUMERICHOST) != 0) { rfbLogPerror("rfbNewClient: error in getnameinfo"); cl->host = strdup(""); } else cl->host = strdup(host); #else cl->host = strdup(inet_ntoa(addr.sin_addr)); #endif rfbLog(" other clients:\n"); iterator = rfbGetClientIterator(rfbScreen); while ((cl_ = rfbClientIteratorNext(iterator)) != NULL) { rfbLog(" %s\n",cl_->host); } rfbReleaseClientIterator(iterator); if(!rfbSetNonBlocking(sock)) { close(sock); return NULL; } if (setsockopt(sock, IPPROTO_TCP, TCP_NODELAY, (char *)&one, sizeof(one)) < 0) { rfbLogPerror("setsockopt failed: can't set TCP_NODELAY flag, non TCP socket?"); } FD_SET(sock,&(rfbScreen->allFds)); rfbScreen->maxFd = rfbMax(sock,rfbScreen->maxFd); INIT_MUTEX(cl->outputMutex); INIT_MUTEX(cl->refCountMutex); INIT_MUTEX(cl->sendMutex); INIT_COND(cl->deleteCond); cl->state = RFB_PROTOCOL_VERSION; cl->reverseConnection = FALSE; cl->readyForSetColourMapEntries = FALSE; cl->useCopyRect = FALSE; cl->preferredEncoding = -1; cl->correMaxWidth = 48; cl->correMaxHeight = 48; #ifdef LIBVNCSERVER_HAVE_LIBZ cl->zrleData = NULL; #endif cl->copyRegion = sraRgnCreate(); cl->copyDX = 0; cl->copyDY = 0; cl->modifiedRegion = sraRgnCreateRect(0,0,rfbScreen->width,rfbScreen->height); INIT_MUTEX(cl->updateMutex); INIT_COND(cl->updateCond); cl->requestedRegion = sraRgnCreate(); cl->format = cl->screen->serverFormat; cl->translateFn = rfbTranslateNone; cl->translateLookupTable = NULL; LOCK(rfbClientListMutex); IF_PTHREADS(cl->refCount = 0); cl->next = rfbScreen->clientHead; cl->prev = NULL; if (rfbScreen->clientHead) rfbScreen->clientHead->prev = cl; rfbScreen->clientHead = cl; UNLOCK(rfbClientListMutex); #if defined(LIBVNCSERVER_HAVE_LIBZ) || defined(LIBVNCSERVER_HAVE_LIBPNG) cl->tightQualityLevel = -1; #ifdef LIBVNCSERVER_HAVE_LIBJPEG cl->tightCompressLevel = TIGHT_DEFAULT_COMPRESSION; cl->turboSubsampLevel = TURBO_DEFAULT_SUBSAMP; { int i; for (i = 0; i < 4; i++) cl->zsActive[i] = FALSE; } #endif #endif cl->fileTransfer.fd = -1; cl->enableCursorShapeUpdates = FALSE; cl->enableCursorPosUpdates = FALSE; cl->useRichCursorEncoding = FALSE; cl->enableLastRectEncoding = FALSE; cl->enableKeyboardLedState = FALSE; cl->enableSupportedMessages = FALSE; cl->enableSupportedEncodings = FALSE; cl->enableServerIdentity = FALSE; cl->lastKeyboardLedState = -1; cl->cursorX = rfbScreen->cursorX; cl->cursorY = rfbScreen->cursorY; cl->useNewFBSize = FALSE; #ifdef LIBVNCSERVER_HAVE_LIBZ cl->compStreamInited = FALSE; cl->compStream.total_in = 0; cl->compStream.total_out = 0; cl->compStream.zalloc = Z_NULL; cl->compStream.zfree = Z_NULL; cl->compStream.opaque = Z_NULL; cl->zlibCompressLevel = 5; #endif cl->progressiveSliceY = 0; cl->extensions = NULL; cl->lastPtrX = -1; #ifdef LIBVNCSERVER_WITH_WEBSOCKETS /* * Wait a few ms for the client to send WebSockets connection (TLS/SSL or plain) */ if (!webSocketsCheck(cl)) { /* Error reporting handled in webSocketsHandshake */ rfbCloseClient(cl); rfbClientConnectionGone(cl); return NULL; } #endif sprintf(pv,rfbProtocolVersionFormat,rfbScreen->protocolMajorVersion, rfbScreen->protocolMinorVersion); if (rfbWriteExact(cl, pv, sz_rfbProtocolVersionMsg) < 0) { rfbLogPerror("rfbNewClient: write"); rfbCloseClient(cl); rfbClientConnectionGone(cl); return NULL; } } for(extension = rfbGetExtensionIterator(); extension; extension=extension->next) { void* data = NULL; /* if the extension does not have a newClient method, it wants * to be initialized later. */ if(extension->newClient && extension->newClient(cl, &data)) rfbEnableExtension(cl, extension, data); } rfbReleaseExtensionIterator(); switch (cl->screen->newClientHook(cl)) { case RFB_CLIENT_ON_HOLD: cl->onHold = TRUE; break; case RFB_CLIENT_ACCEPT: cl->onHold = FALSE; break; case RFB_CLIENT_REFUSE: rfbCloseClient(cl); rfbClientConnectionGone(cl); cl = NULL; break; } return cl; } rfbClientPtr rfbNewClient(rfbScreenInfoPtr rfbScreen, int sock) { return(rfbNewTCPOrUDPClient(rfbScreen,sock,FALSE)); } rfbClientPtr rfbNewUDPClient(rfbScreenInfoPtr rfbScreen) { return((rfbScreen->udpClient= rfbNewTCPOrUDPClient(rfbScreen,rfbScreen->udpSock,TRUE))); } /* * rfbClientConnectionGone is called from sockets.c just after a connection * has gone away. */ void rfbClientConnectionGone(rfbClientPtr cl) { #if defined(LIBVNCSERVER_HAVE_LIBZ) && defined(LIBVNCSERVER_HAVE_LIBJPEG) int i; #endif LOCK(rfbClientListMutex); if (cl->prev) cl->prev->next = cl->next; else cl->screen->clientHead = cl->next; if (cl->next) cl->next->prev = cl->prev; UNLOCK(rfbClientListMutex); #ifdef LIBVNCSERVER_HAVE_LIBPTHREAD if(cl->screen->backgroundLoop != FALSE) { int i; do { LOCK(cl->refCountMutex); i=cl->refCount; if(i>0) WAIT(cl->deleteCond,cl->refCountMutex); UNLOCK(cl->refCountMutex); } while(i>0); } #endif if(cl->sock>=0) close(cl->sock); if (cl->scaledScreen!=NULL) cl->scaledScreen->scaledScreenRefCount--; #ifdef LIBVNCSERVER_HAVE_LIBZ rfbFreeZrleData(cl); #endif rfbFreeUltraData(cl); /* free buffers holding pixel data before and after encoding */ free(cl->beforeEncBuf); free(cl->afterEncBuf); if(cl->sock>=0) FD_CLR(cl->sock,&(cl->screen->allFds)); cl->clientGoneHook(cl); rfbLog("Client %s gone\n",cl->host); free(cl->host); #ifdef LIBVNCSERVER_HAVE_LIBZ /* Release the compression state structures if any. */ if ( cl->compStreamInited ) { deflateEnd( &(cl->compStream) ); } #ifdef LIBVNCSERVER_HAVE_LIBJPEG for (i = 0; i < 4; i++) { if (cl->zsActive[i]) deflateEnd(&cl->zsStruct[i]); } #endif #endif if (cl->screen->pointerClient == cl) cl->screen->pointerClient = NULL; sraRgnDestroy(cl->modifiedRegion); sraRgnDestroy(cl->requestedRegion); sraRgnDestroy(cl->copyRegion); if (cl->translateLookupTable) free(cl->translateLookupTable); TINI_COND(cl->updateCond); TINI_MUTEX(cl->updateMutex); /* make sure outputMutex is unlocked before destroying */ LOCK(cl->outputMutex); UNLOCK(cl->outputMutex); TINI_MUTEX(cl->outputMutex); LOCK(cl->sendMutex); UNLOCK(cl->sendMutex); TINI_MUTEX(cl->sendMutex); #ifdef LIBVNCSERVER_HAVE_LIBPTHREAD close(cl->pipe_notify_client_thread[0]); close(cl->pipe_notify_client_thread[1]); #endif rfbPrintStats(cl); rfbResetStats(cl); free(cl); } /* * rfbProcessClientMessage is called when there is data to read from a client. */ void rfbProcessClientMessage(rfbClientPtr cl) { switch (cl->state) { case RFB_PROTOCOL_VERSION: rfbProcessClientProtocolVersion(cl); return; case RFB_SECURITY_TYPE: rfbProcessClientSecurityType(cl); return; case RFB_AUTHENTICATION: rfbAuthProcessClientMessage(cl); return; case RFB_INITIALISATION: case RFB_INITIALISATION_SHARED: rfbProcessClientInitMessage(cl); return; default: rfbProcessClientNormalMessage(cl); return; } } /* * rfbProcessClientProtocolVersion is called when the client sends its * protocol version. */ static void rfbProcessClientProtocolVersion(rfbClientPtr cl) { rfbProtocolVersionMsg pv; int n, major_, minor_; if ((n = rfbReadExact(cl, pv, sz_rfbProtocolVersionMsg)) <= 0) { if (n == 0) rfbLog("rfbProcessClientProtocolVersion: client gone\n"); else rfbLogPerror("rfbProcessClientProtocolVersion: read"); rfbCloseClient(cl); return; } pv[sz_rfbProtocolVersionMsg] = 0; if (sscanf(pv,rfbProtocolVersionFormat,&major_,&minor_) != 2) { rfbErr("rfbProcessClientProtocolVersion: not a valid RFB client: %s\n", pv); rfbCloseClient(cl); return; } rfbLog("Client Protocol Version %d.%d\n", major_, minor_); if (major_ != rfbProtocolMajorVersion) { rfbErr("RFB protocol version mismatch - server %d.%d, client %d.%d", cl->screen->protocolMajorVersion, cl->screen->protocolMinorVersion, major_,minor_); rfbCloseClient(cl); return; } /* Check for the minor version use either of the two standard version of RFB */ /* * UltraVNC Viewer detects FileTransfer compatible servers via rfb versions * 3.4, 3.6, 3.14, 3.16 * It's a bad method, but it is what they use to enable features... * maintaining RFB version compatibility across multiple servers is a pain * Should use something like ServerIdentity encoding */ cl->protocolMajorVersion = major_; cl->protocolMinorVersion = minor_; rfbLog("Protocol version sent %d.%d, using %d.%d\n", major_, minor_, rfbProtocolMajorVersion, cl->protocolMinorVersion); rfbAuthNewClient(cl); } void rfbClientSendString(rfbClientPtr cl, const char *reason) { char *buf; int len = strlen(reason); rfbLog("rfbClientSendString(\"%s\")\n", reason); buf = (char *)malloc(4 + len); ((uint32_t *)buf)[0] = Swap32IfLE(len); memcpy(buf + 4, reason, len); if (rfbWriteExact(cl, buf, 4 + len) < 0) rfbLogPerror("rfbClientSendString: write"); free(buf); rfbCloseClient(cl); } /* * rfbClientConnFailed is called when a client connection has failed either * because it talks the wrong protocol or it has failed authentication. */ void rfbClientConnFailed(rfbClientPtr cl, const char *reason) { char *buf; int len = strlen(reason); rfbLog("rfbClientConnFailed(\"%s\")\n", reason); buf = (char *)malloc(8 + len); ((uint32_t *)buf)[0] = Swap32IfLE(rfbConnFailed); ((uint32_t *)buf)[1] = Swap32IfLE(len); memcpy(buf + 8, reason, len); if (rfbWriteExact(cl, buf, 8 + len) < 0) rfbLogPerror("rfbClientConnFailed: write"); free(buf); rfbCloseClient(cl); } /* * rfbProcessClientInitMessage is called when the client sends its * initialisation message. */ static void rfbProcessClientInitMessage(rfbClientPtr cl) { rfbClientInitMsg ci; union { char buf[256]; rfbServerInitMsg si; } u; int len, n; rfbClientIteratorPtr iterator; rfbClientPtr otherCl; rfbExtensionData* extension; if (cl->state == RFB_INITIALISATION_SHARED) { /* In this case behave as though an implicit ClientInit message has * already been received with a shared-flag of true. */ ci.shared = 1; /* Avoid the possibility of exposing the RFB_INITIALISATION_SHARED * state to calling software. */ cl->state = RFB_INITIALISATION; } else { if ((n = rfbReadExact(cl, (char *)&ci,sz_rfbClientInitMsg)) <= 0) { if (n == 0) rfbLog("rfbProcessClientInitMessage: client gone\n"); else rfbLogPerror("rfbProcessClientInitMessage: read"); rfbCloseClient(cl); return; } } memset(u.buf,0,sizeof(u.buf)); u.si.framebufferWidth = Swap16IfLE(cl->screen->width); u.si.framebufferHeight = Swap16IfLE(cl->screen->height); u.si.format = cl->screen->serverFormat; u.si.format.redMax = Swap16IfLE(u.si.format.redMax); u.si.format.greenMax = Swap16IfLE(u.si.format.greenMax); u.si.format.blueMax = Swap16IfLE(u.si.format.blueMax); strncpy(u.buf + sz_rfbServerInitMsg, cl->screen->desktopName, 127); len = strlen(u.buf + sz_rfbServerInitMsg); u.si.nameLength = Swap32IfLE(len); if (rfbWriteExact(cl, u.buf, sz_rfbServerInitMsg + len) < 0) { rfbLogPerror("rfbProcessClientInitMessage: write"); rfbCloseClient(cl); return; } for(extension = cl->extensions; extension;) { rfbExtensionData* next = extension->next; if(extension->extension->init && !extension->extension->init(cl, extension->data)) /* extension requested that it be removed */ rfbDisableExtension(cl, extension->extension); extension = next; } cl->state = RFB_NORMAL; if (!cl->reverseConnection && (cl->screen->neverShared || (!cl->screen->alwaysShared && !ci.shared))) { if (cl->screen->dontDisconnect) { iterator = rfbGetClientIterator(cl->screen); while ((otherCl = rfbClientIteratorNext(iterator)) != NULL) { if ((otherCl != cl) && (otherCl->state == RFB_NORMAL)) { rfbLog("-dontdisconnect: Not shared & existing client\n"); rfbLog(" refusing new client %s\n", cl->host); rfbCloseClient(cl); rfbReleaseClientIterator(iterator); return; } } rfbReleaseClientIterator(iterator); } else { iterator = rfbGetClientIterator(cl->screen); while ((otherCl = rfbClientIteratorNext(iterator)) != NULL) { if ((otherCl != cl) && (otherCl->state == RFB_NORMAL)) { rfbLog("Not shared - closing connection to client %s\n", otherCl->host); rfbCloseClient(otherCl); } } rfbReleaseClientIterator(iterator); } } } /* The values come in based on the scaled screen, we need to convert them to * values based on the man screen's coordinate system */ static rfbBool rectSwapIfLEAndClip(uint16_t* x,uint16_t* y,uint16_t* w,uint16_t* h, rfbClientPtr cl) { int x1=Swap16IfLE(*x); int y1=Swap16IfLE(*y); int w1=Swap16IfLE(*w); int h1=Swap16IfLE(*h); rfbScaledCorrection(cl->scaledScreen, cl->screen, &x1, &y1, &w1, &h1, "rectSwapIfLEAndClip"); *x = x1; *y = y1; *w = w1; *h = h1; if(*w>cl->screen->width-*x) *w=cl->screen->width-*x; /* possible underflow */ if(*w>cl->screen->width-*x) return FALSE; if(*h>cl->screen->height-*y) *h=cl->screen->height-*y; if(*h>cl->screen->height-*y) return FALSE; return TRUE; } /* * Send keyboard state (PointerPos pseudo-encoding). */ rfbBool rfbSendKeyboardLedState(rfbClientPtr cl) { rfbFramebufferUpdateRectHeader rect; if (cl->ublen + sz_rfbFramebufferUpdateRectHeader > UPDATE_BUF_SIZE) { if (!rfbSendUpdateBuf(cl)) return FALSE; } rect.encoding = Swap32IfLE(rfbEncodingKeyboardLedState); rect.r.x = Swap16IfLE(cl->lastKeyboardLedState); rect.r.y = 0; rect.r.w = 0; rect.r.h = 0; memcpy(&cl->updateBuf[cl->ublen], (char *)&rect, sz_rfbFramebufferUpdateRectHeader); cl->ublen += sz_rfbFramebufferUpdateRectHeader; rfbStatRecordEncodingSent(cl, rfbEncodingKeyboardLedState, sz_rfbFramebufferUpdateRectHeader, sz_rfbFramebufferUpdateRectHeader); if (!rfbSendUpdateBuf(cl)) return FALSE; return TRUE; } #define rfbSetBit(buffer, position) (buffer[(position & 255) / 8] |= (1 << (position % 8))) /* * Send rfbEncodingSupportedMessages. */ rfbBool rfbSendSupportedMessages(rfbClientPtr cl) { rfbFramebufferUpdateRectHeader rect; rfbSupportedMessages msgs; if (cl->ublen + sz_rfbFramebufferUpdateRectHeader + sz_rfbSupportedMessages > UPDATE_BUF_SIZE) { if (!rfbSendUpdateBuf(cl)) return FALSE; } rect.encoding = Swap32IfLE(rfbEncodingSupportedMessages); rect.r.x = 0; rect.r.y = 0; rect.r.w = Swap16IfLE(sz_rfbSupportedMessages); rect.r.h = 0; memcpy(&cl->updateBuf[cl->ublen], (char *)&rect, sz_rfbFramebufferUpdateRectHeader); cl->ublen += sz_rfbFramebufferUpdateRectHeader; memset((char *)&msgs, 0, sz_rfbSupportedMessages); rfbSetBit(msgs.client2server, rfbSetPixelFormat); rfbSetBit(msgs.client2server, rfbFixColourMapEntries); rfbSetBit(msgs.client2server, rfbSetEncodings); rfbSetBit(msgs.client2server, rfbFramebufferUpdateRequest); rfbSetBit(msgs.client2server, rfbKeyEvent); rfbSetBit(msgs.client2server, rfbPointerEvent); rfbSetBit(msgs.client2server, rfbClientCutText); rfbSetBit(msgs.client2server, rfbFileTransfer); rfbSetBit(msgs.client2server, rfbSetScale); /*rfbSetBit(msgs.client2server, rfbSetServerInput); */ /*rfbSetBit(msgs.client2server, rfbSetSW); */ /*rfbSetBit(msgs.client2server, rfbTextChat); */ rfbSetBit(msgs.client2server, rfbPalmVNCSetScaleFactor); rfbSetBit(msgs.server2client, rfbFramebufferUpdate); rfbSetBit(msgs.server2client, rfbSetColourMapEntries); rfbSetBit(msgs.server2client, rfbBell); rfbSetBit(msgs.server2client, rfbServerCutText); rfbSetBit(msgs.server2client, rfbResizeFrameBuffer); rfbSetBit(msgs.server2client, rfbPalmVNCReSizeFrameBuffer); if (cl->screen->xvpHook) { rfbSetBit(msgs.client2server, rfbXvp); rfbSetBit(msgs.server2client, rfbXvp); } memcpy(&cl->updateBuf[cl->ublen], (char *)&msgs, sz_rfbSupportedMessages); cl->ublen += sz_rfbSupportedMessages; rfbStatRecordEncodingSent(cl, rfbEncodingSupportedMessages, sz_rfbFramebufferUpdateRectHeader+sz_rfbSupportedMessages, sz_rfbFramebufferUpdateRectHeader+sz_rfbSupportedMessages); if (!rfbSendUpdateBuf(cl)) return FALSE; return TRUE; } /* * Send rfbEncodingSupportedEncodings. */ rfbBool rfbSendSupportedEncodings(rfbClientPtr cl) { rfbFramebufferUpdateRectHeader rect; static uint32_t supported[] = { rfbEncodingRaw, rfbEncodingCopyRect, rfbEncodingRRE, rfbEncodingCoRRE, rfbEncodingHextile, #ifdef LIBVNCSERVER_HAVE_LIBZ rfbEncodingZlib, rfbEncodingZRLE, rfbEncodingZYWRLE, #endif #ifdef LIBVNCSERVER_HAVE_LIBJPEG rfbEncodingTight, #endif #ifdef LIBVNCSERVER_HAVE_LIBPNG rfbEncodingTightPng, #endif rfbEncodingUltra, rfbEncodingUltraZip, rfbEncodingXCursor, rfbEncodingRichCursor, rfbEncodingPointerPos, rfbEncodingLastRect, rfbEncodingNewFBSize, rfbEncodingKeyboardLedState, rfbEncodingSupportedMessages, rfbEncodingSupportedEncodings, rfbEncodingServerIdentity, }; uint32_t nEncodings = sizeof(supported) / sizeof(supported[0]), i; /* think rfbSetEncodingsMsg */ if (cl->ublen + sz_rfbFramebufferUpdateRectHeader + (nEncodings * sizeof(uint32_t)) > UPDATE_BUF_SIZE) { if (!rfbSendUpdateBuf(cl)) return FALSE; } rect.encoding = Swap32IfLE(rfbEncodingSupportedEncodings); rect.r.x = 0; rect.r.y = 0; rect.r.w = Swap16IfLE(nEncodings * sizeof(uint32_t)); rect.r.h = Swap16IfLE(nEncodings); memcpy(&cl->updateBuf[cl->ublen], (char *)&rect, sz_rfbFramebufferUpdateRectHeader); cl->ublen += sz_rfbFramebufferUpdateRectHeader; for (i = 0; i < nEncodings; i++) { uint32_t encoding = Swap32IfLE(supported[i]); memcpy(&cl->updateBuf[cl->ublen], (char *)&encoding, sizeof(encoding)); cl->ublen += sizeof(encoding); } rfbStatRecordEncodingSent(cl, rfbEncodingSupportedEncodings, sz_rfbFramebufferUpdateRectHeader+(nEncodings * sizeof(uint32_t)), sz_rfbFramebufferUpdateRectHeader+(nEncodings * sizeof(uint32_t))); if (!rfbSendUpdateBuf(cl)) return FALSE; return TRUE; } void rfbSetServerVersionIdentity(rfbScreenInfoPtr screen, char *fmt, ...) { char buffer[256]; va_list ap; va_start(ap, fmt); vsnprintf(buffer, sizeof(buffer)-1, fmt, ap); va_end(ap); if (screen->versionString!=NULL) free(screen->versionString); screen->versionString = strdup(buffer); } /* * Send rfbEncodingServerIdentity. */ rfbBool rfbSendServerIdentity(rfbClientPtr cl) { rfbFramebufferUpdateRectHeader rect; char buffer[512]; /* tack on our library version */ snprintf(buffer,sizeof(buffer)-1, "%s (%s)", (cl->screen->versionString==NULL ? "unknown" : cl->screen->versionString), LIBVNCSERVER_PACKAGE_STRING); if (cl->ublen + sz_rfbFramebufferUpdateRectHeader + (strlen(buffer)+1) > UPDATE_BUF_SIZE) { if (!rfbSendUpdateBuf(cl)) return FALSE; } rect.encoding = Swap32IfLE(rfbEncodingServerIdentity); rect.r.x = 0; rect.r.y = 0; rect.r.w = Swap16IfLE(strlen(buffer)+1); rect.r.h = 0; memcpy(&cl->updateBuf[cl->ublen], (char *)&rect, sz_rfbFramebufferUpdateRectHeader); cl->ublen += sz_rfbFramebufferUpdateRectHeader; memcpy(&cl->updateBuf[cl->ublen], buffer, strlen(buffer)+1); cl->ublen += strlen(buffer)+1; rfbStatRecordEncodingSent(cl, rfbEncodingServerIdentity, sz_rfbFramebufferUpdateRectHeader+strlen(buffer)+1, sz_rfbFramebufferUpdateRectHeader+strlen(buffer)+1); if (!rfbSendUpdateBuf(cl)) return FALSE; return TRUE; } /* * Send an xvp server message */ rfbBool rfbSendXvp(rfbClientPtr cl, uint8_t version, uint8_t code) { rfbXvpMsg xvp; xvp.type = rfbXvp; xvp.pad = 0; xvp.version = version; xvp.code = code; LOCK(cl->sendMutex); if (rfbWriteExact(cl, (char *)&xvp, sz_rfbXvpMsg) < 0) { rfbLogPerror("rfbSendXvp: write"); rfbCloseClient(cl); } UNLOCK(cl->sendMutex); rfbStatRecordMessageSent(cl, rfbXvp, sz_rfbXvpMsg, sz_rfbXvpMsg); return TRUE; } rfbBool rfbSendTextChatMessage(rfbClientPtr cl, uint32_t length, char *buffer) { rfbTextChatMsg tc; int bytesToSend=0; memset((char *)&tc, 0, sizeof(tc)); tc.type = rfbTextChat; tc.length = Swap32IfLE(length); switch(length) { case rfbTextChatOpen: case rfbTextChatClose: case rfbTextChatFinished: bytesToSend=0; break; default: bytesToSend=length; if (bytesToSend>rfbTextMaxSize) bytesToSend=rfbTextMaxSize; } if (cl->ublen + sz_rfbTextChatMsg + bytesToSend > UPDATE_BUF_SIZE) { if (!rfbSendUpdateBuf(cl)) return FALSE; } memcpy(&cl->updateBuf[cl->ublen], (char *)&tc, sz_rfbTextChatMsg); cl->ublen += sz_rfbTextChatMsg; if (bytesToSend>0) { memcpy(&cl->updateBuf[cl->ublen], buffer, bytesToSend); cl->ublen += bytesToSend; } rfbStatRecordMessageSent(cl, rfbTextChat, sz_rfbTextChatMsg+bytesToSend, sz_rfbTextChatMsg+bytesToSend); if (!rfbSendUpdateBuf(cl)) return FALSE; return TRUE; } #define FILEXFER_ALLOWED_OR_CLOSE_AND_RETURN(msg, cl, ret) \ if ((cl->screen->getFileTransferPermission != NULL \ && cl->screen->getFileTransferPermission(cl) != TRUE) \ || cl->screen->permitFileTransfer != TRUE) { \ rfbLog("%sUltra File Transfer is disabled, dropping client: %s\n", msg, cl->host); \ rfbCloseClient(cl); \ return ret; \ } int DB = 1; rfbBool rfbSendFileTransferMessage(rfbClientPtr cl, uint8_t contentType, uint8_t contentParam, uint32_t size, uint32_t length, const char *buffer) { rfbFileTransferMsg ft; ft.type = rfbFileTransfer; ft.contentType = contentType; ft.contentParam = contentParam; ft.pad = 0; /* UltraVNC did not Swap16LE(ft.contentParam) (Looks like it might be BigEndian) */ ft.size = Swap32IfLE(size); ft.length = Swap32IfLE(length); FILEXFER_ALLOWED_OR_CLOSE_AND_RETURN("", cl, FALSE); /* rfbLog("rfbSendFileTransferMessage( %dtype, %dparam, %dsize, %dlen, %p)\n", contentType, contentParam, size, length, buffer); */ LOCK(cl->sendMutex); if (rfbWriteExact(cl, (char *)&ft, sz_rfbFileTransferMsg) < 0) { rfbLogPerror("rfbSendFileTransferMessage: write"); rfbCloseClient(cl); UNLOCK(cl->sendMutex); return FALSE; } if (length>0) { if (rfbWriteExact(cl, buffer, length) < 0) { rfbLogPerror("rfbSendFileTransferMessage: write"); rfbCloseClient(cl); UNLOCK(cl->sendMutex); return FALSE; } } UNLOCK(cl->sendMutex); rfbStatRecordMessageSent(cl, rfbFileTransfer, sz_rfbFileTransferMsg+length, sz_rfbFileTransferMsg+length); return TRUE; } /* * UltraVNC uses Windows Structures */ #define MAX_PATH 260 typedef struct { uint32_t dwLowDateTime; uint32_t dwHighDateTime; } RFB_FILETIME; typedef struct { uint32_t dwFileAttributes; RFB_FILETIME ftCreationTime; RFB_FILETIME ftLastAccessTime; RFB_FILETIME ftLastWriteTime; uint32_t nFileSizeHigh; uint32_t nFileSizeLow; uint32_t dwReserved0; uint32_t dwReserved1; uint8_t cFileName[ MAX_PATH ]; uint8_t cAlternateFileName[ 14 ]; } RFB_FIND_DATA; #define RFB_FILE_ATTRIBUTE_READONLY 0x1 #define RFB_FILE_ATTRIBUTE_HIDDEN 0x2 #define RFB_FILE_ATTRIBUTE_SYSTEM 0x4 #define RFB_FILE_ATTRIBUTE_DIRECTORY 0x10 #define RFB_FILE_ATTRIBUTE_ARCHIVE 0x20 #define RFB_FILE_ATTRIBUTE_NORMAL 0x80 #define RFB_FILE_ATTRIBUTE_TEMPORARY 0x100 #define RFB_FILE_ATTRIBUTE_COMPRESSED 0x800 rfbBool rfbFilenameTranslate2UNIX(rfbClientPtr cl, /* in */ char *path, /* out */ char *unixPath, size_t unixPathMaxLen) { int x; char *home=NULL; FILEXFER_ALLOWED_OR_CLOSE_AND_RETURN("", cl, FALSE); /* * Do not use strncpy() - truncating the file name would probably have undesirable side effects * Instead check if destination buffer is big enough */ if (strlen(path) >= unixPathMaxLen) return FALSE; /* C: */ if (path[0]=='C' && path[1]==':') strcpy(unixPath, &path[2]); else { home = getenv("HOME"); if (home!=NULL) { /* Re-check buffer size */ if ((strlen(path) + strlen(home) + 1) >= unixPathMaxLen) return FALSE; strcpy(unixPath, home); strcat(unixPath,"/"); strcat(unixPath, path); } else strcpy(unixPath, path); } for (x=0;x<strlen(unixPath);x++) if (unixPath[x]=='\\') unixPath[x]='/'; return TRUE; } rfbBool rfbFilenameTranslate2DOS(rfbClientPtr cl, char *unixPath, char *path) { int x; FILEXFER_ALLOWED_OR_CLOSE_AND_RETURN("", cl, FALSE); sprintf(path,"C:%s", unixPath); for (x=2;x<strlen(path);x++) if (path[x]=='/') path[x]='\\'; return TRUE; } rfbBool rfbSendDirContent(rfbClientPtr cl, int length, char *buffer) { char retfilename[MAX_PATH]; char path[MAX_PATH]; struct stat statbuf; RFB_FIND_DATA win32filename; int nOptLen = 0, retval=0; #ifdef WIN32 WIN32_FIND_DATAA winFindData; HANDLE findHandle; int pathLen, basePathLength; char *basePath; #else DIR *dirp=NULL; struct dirent *direntp=NULL; #endif FILEXFER_ALLOWED_OR_CLOSE_AND_RETURN("", cl, FALSE); /* Client thinks we are Winblows */ if (!rfbFilenameTranslate2UNIX(cl, buffer, path, sizeof(path))) return FALSE; if (DB) rfbLog("rfbProcessFileTransfer() rfbDirContentRequest: rfbRDirContent: \"%s\"->\"%s\"\n",buffer, path); #ifdef WIN32 // Create a search string, like C:\folder\* pathLen = strlen(path); basePath = malloc(pathLen + 3); memcpy(basePath, path, pathLen); basePathLength = pathLen; basePath[basePathLength] = '\\'; basePath[basePathLength + 1] = '*'; basePath[basePathLength + 2] = '\0'; // Start a search memset(&winFindData, 0, sizeof(winFindData)); findHandle = FindFirstFileA(path, &winFindData); free(basePath); if (findHandle == INVALID_HANDLE_VALUE) #else dirp=opendir(path); if (dirp==NULL) #endif return rfbSendFileTransferMessage(cl, rfbDirPacket, rfbADirectory, 0, 0, NULL); /* send back the path name (necessary for links) */ if (rfbSendFileTransferMessage(cl, rfbDirPacket, rfbADirectory, 0, length, buffer)==FALSE) return FALSE; #ifdef WIN32 while (findHandle != INVALID_HANDLE_VALUE) #else for (direntp=readdir(dirp); direntp!=NULL; direntp=readdir(dirp)) #endif { /* get stats */ #ifdef WIN32 snprintf(retfilename,sizeof(retfilename),"%s/%s", path, winFindData.cFileName); #else snprintf(retfilename,sizeof(retfilename),"%s/%s", path, direntp->d_name); #endif retval = stat(retfilename, &statbuf); if (retval==0) { memset((char *)&win32filename, 0, sizeof(win32filename)); #ifdef WIN32 win32filename.dwFileAttributes = winFindData.dwFileAttributes; win32filename.ftCreationTime.dwLowDateTime = winFindData.ftCreationTime.dwLowDateTime; win32filename.ftCreationTime.dwHighDateTime = winFindData.ftCreationTime.dwHighDateTime; win32filename.ftLastAccessTime.dwLowDateTime = winFindData.ftLastAccessTime.dwLowDateTime; win32filename.ftLastAccessTime.dwHighDateTime = winFindData.ftLastAccessTime.dwHighDateTime; win32filename.ftLastWriteTime.dwLowDateTime = winFindData.ftLastWriteTime.dwLowDateTime; win32filename.ftLastWriteTime.dwHighDateTime = winFindData.ftLastWriteTime.dwHighDateTime; win32filename.nFileSizeLow = winFindData.nFileSizeLow; win32filename.nFileSizeHigh = winFindData.nFileSizeHigh; win32filename.dwReserved0 = winFindData.dwReserved0; win32filename.dwReserved1 = winFindData.dwReserved1; strcpy((char *)win32filename.cFileName, winFindData.cFileName); strcpy((char *)win32filename.cAlternateFileName, winFindData.cAlternateFileName); #else win32filename.dwFileAttributes = Swap32IfBE(RFB_FILE_ATTRIBUTE_NORMAL); if (S_ISDIR(statbuf.st_mode)) win32filename.dwFileAttributes = Swap32IfBE(RFB_FILE_ATTRIBUTE_DIRECTORY); win32filename.ftCreationTime.dwLowDateTime = Swap32IfBE(statbuf.st_ctime); /* Intel Order */ win32filename.ftCreationTime.dwHighDateTime = 0; win32filename.ftLastAccessTime.dwLowDateTime = Swap32IfBE(statbuf.st_atime); /* Intel Order */ win32filename.ftLastAccessTime.dwHighDateTime = 0; win32filename.ftLastWriteTime.dwLowDateTime = Swap32IfBE(statbuf.st_mtime); /* Intel Order */ win32filename.ftLastWriteTime.dwHighDateTime = 0; win32filename.nFileSizeLow = Swap32IfBE(statbuf.st_size); /* Intel Order */ win32filename.nFileSizeHigh = 0; win32filename.dwReserved0 = 0; win32filename.dwReserved1 = 0; /* If this had the full path, we would need to translate to DOS format ("C:\") */ /* rfbFilenameTranslate2DOS(cl, retfilename, win32filename.cFileName); */ strcpy((char *)win32filename.cFileName, direntp->d_name); #endif /* Do not show hidden files (but show how to move up the tree) */ if ((strcmp((char *)win32filename.cFileName, "..")==0) || (win32filename.cFileName[0]!='.')) { nOptLen = sizeof(RFB_FIND_DATA) - MAX_PATH - 14 + strlen((char *)win32filename.cFileName); /* rfbLog("rfbProcessFileTransfer() rfbDirContentRequest: rfbRDirContent: Sending \"%s\"\n", (char *)win32filename.cFileName); */ if (rfbSendFileTransferMessage(cl, rfbDirPacket, rfbADirectory, 0, nOptLen, (char *)&win32filename)==FALSE) { #ifdef WIN32 FindClose(findHandle); #else closedir(dirp); #endif return FALSE; } } } #ifdef WIN32 if (FindNextFileA(findHandle, &winFindData) == 0) { FindClose(findHandle); findHandle = INVALID_HANDLE_VALUE; } #endif } #ifdef WIN32 if (findHandle != INVALID_HANDLE_VALUE) { FindClose(findHandle); } #else closedir(dirp); #endif /* End of the transfer */ return rfbSendFileTransferMessage(cl, rfbDirPacket, 0, 0, 0, NULL); } char *rfbProcessFileTransferReadBuffer(rfbClientPtr cl, uint32_t length) { char *buffer=NULL; int n=0; FILEXFER_ALLOWED_OR_CLOSE_AND_RETURN("", cl, NULL); /* We later alloc length+1, which might wrap around on 32-bit systems if length equals 0XFFFFFFFF, i.e. SIZE_MAX for 32-bit systems. On 64-bit systems, a length of 0XFFFFFFFF will safely be allocated since this check will never trigger and malloc() can digest length+1 without problems as length is a uint32_t. */ if(length == SIZE_MAX) { rfbErr("rfbProcessFileTransferReadBuffer: too big file transfer length requested: %u", (unsigned int)length); rfbCloseClient(cl); return NULL; } if (length>0) { buffer=malloc((size_t)length+1); if (buffer!=NULL) { if ((n = rfbReadExact(cl, (char *)buffer, length)) <= 0) { if (n != 0) rfbLogPerror("rfbProcessFileTransferReadBuffer: read"); rfbCloseClient(cl); /* NOTE: don't forget to free(buffer) if you return early! */ if (buffer!=NULL) free(buffer); return NULL; } /* Null Terminate */ buffer[length]=0; } } return buffer; } rfbBool rfbSendFileTransferChunk(rfbClientPtr cl) { /* Allocate buffer for compression */ unsigned char readBuf[sz_rfbBlockSize]; int bytesRead=0; int retval=0; fd_set wfds; struct timeval tv; int n; #ifdef LIBVNCSERVER_HAVE_LIBZ unsigned char compBuf[sz_rfbBlockSize + 1024]; unsigned long nMaxCompSize = sizeof(compBuf); int nRetC = 0; #endif /* * Don't close the client if we get into this one because * it is called from many places to service file transfers. * Note that permitFileTransfer is checked first. */ if (cl->screen->permitFileTransfer != TRUE || (cl->screen->getFileTransferPermission != NULL && cl->screen->getFileTransferPermission(cl) != TRUE)) { return TRUE; } /* If not sending, or no file open... Return as if we sent something! */ if ((cl->fileTransfer.fd!=-1) && (cl->fileTransfer.sending==1)) { FD_ZERO(&wfds); FD_SET(cl->sock, &wfds); /* return immediately */ tv.tv_sec = 0; tv.tv_usec = 0; n = select(cl->sock + 1, NULL, &wfds, NULL, &tv); if (n<0) { #ifdef WIN32 errno=WSAGetLastError(); #endif rfbLog("rfbSendFileTransferChunk() select failed: %s\n", strerror(errno)); } /* We have space on the transmit queue */ if (n > 0) { bytesRead = read(cl->fileTransfer.fd, readBuf, sz_rfbBlockSize); switch (bytesRead) { case 0: /* rfbLog("rfbSendFileTransferChunk(): End-Of-File Encountered\n"); */ retval = rfbSendFileTransferMessage(cl, rfbEndOfFile, 0, 0, 0, NULL); close(cl->fileTransfer.fd); cl->fileTransfer.fd = -1; cl->fileTransfer.sending = 0; cl->fileTransfer.receiving = 0; return retval; case -1: /* TODO : send an error msg to the client... */ #ifdef WIN32 errno=WSAGetLastError(); #endif rfbLog("rfbSendFileTransferChunk(): %s\n",strerror(errno)); retval = rfbSendFileTransferMessage(cl, rfbAbortFileTransfer, 0, 0, 0, NULL); close(cl->fileTransfer.fd); cl->fileTransfer.fd = -1; cl->fileTransfer.sending = 0; cl->fileTransfer.receiving = 0; return retval; default: /* rfbLog("rfbSendFileTransferChunk(): Read %d bytes\n", bytesRead); */ if (!cl->fileTransfer.compressionEnabled) return rfbSendFileTransferMessage(cl, rfbFilePacket, 0, 0, bytesRead, (char *)readBuf); else { #ifdef LIBVNCSERVER_HAVE_LIBZ nRetC = compress(compBuf, &nMaxCompSize, readBuf, bytesRead); /* rfbLog("Compressed the packet from %d -> %d bytes\n", nMaxCompSize, bytesRead); */ if ((nRetC==0) && (nMaxCompSize<bytesRead)) return rfbSendFileTransferMessage(cl, rfbFilePacket, 0, 1, nMaxCompSize, (char *)compBuf); else return rfbSendFileTransferMessage(cl, rfbFilePacket, 0, 0, bytesRead, (char *)readBuf); #else /* We do not support compression of the data stream */ return rfbSendFileTransferMessage(cl, rfbFilePacket, 0, 0, bytesRead, (char *)readBuf); #endif } } } } return TRUE; } rfbBool rfbProcessFileTransfer(rfbClientPtr cl, uint8_t contentType, uint8_t contentParam, uint32_t size, uint32_t length) { char *buffer=NULL, *p=NULL; int retval=0; char filename1[MAX_PATH]; char filename2[MAX_PATH]; char szFileTime[MAX_PATH]; struct stat statbuf; uint32_t sizeHtmp=0; int n=0; char timespec[64]; #ifdef LIBVNCSERVER_HAVE_LIBZ unsigned char compBuff[sz_rfbBlockSize]; unsigned long nRawBytes = sz_rfbBlockSize; int nRet = 0; #endif FILEXFER_ALLOWED_OR_CLOSE_AND_RETURN("", cl, FALSE); /* rfbLog("rfbProcessFileTransfer(%dtype, %dparam, %dsize, %dlen)\n", contentType, contentParam, size, length); */ switch (contentType) { case rfbDirContentRequest: switch (contentParam) { case rfbRDrivesList: /* Client requests the List of Local Drives */ /* rfbLog("rfbProcessFileTransfer() rfbDirContentRequest: rfbRDrivesList:\n"); */ /* Format when filled : "C:\<NULL>D:\<NULL>....Z:\<NULL><NULL> * * We replace the "\" char following the drive letter and ":" * with a char corresponding to the type of drive * We obtain something like "C:l<NULL>D:c<NULL>....Z:n\<NULL><NULL>" * Isn't it ugly ? * DRIVE_FIXED = 'l' (local?) * DRIVE_REMOVABLE = 'f' (floppy?) * DRIVE_CDROM = 'c' * DRIVE_REMOTE = 'n' */ /* in unix, there are no 'drives' (We could list mount points though) * We fake the root as a "C:" for the Winblows users */ filename2[0]='C'; filename2[1]=':'; filename2[2]='l'; filename2[3]=0; filename2[4]=0; retval = rfbSendFileTransferMessage(cl, rfbDirPacket, rfbADrivesList, 0, 5, filename2); if (buffer!=NULL) free(buffer); return retval; break; case rfbRDirContent: /* Client requests the content of a directory */ /* rfbLog("rfbProcessFileTransfer() rfbDirContentRequest: rfbRDirContent\n"); */ if ((buffer = rfbProcessFileTransferReadBuffer(cl, length))==NULL) return FALSE; retval = rfbSendDirContent(cl, length, buffer); if (buffer!=NULL) free(buffer); return retval; } break; case rfbDirPacket: rfbLog("rfbProcessFileTransfer() rfbDirPacket\n"); break; case rfbFileAcceptHeader: rfbLog("rfbProcessFileTransfer() rfbFileAcceptHeader\n"); break; case rfbCommandReturn: rfbLog("rfbProcessFileTransfer() rfbCommandReturn\n"); break; case rfbFileChecksums: /* Destination file already exists - the viewer sends the checksums */ rfbLog("rfbProcessFileTransfer() rfbFileChecksums\n"); break; case rfbFileTransferAccess: rfbLog("rfbProcessFileTransfer() rfbFileTransferAccess\n"); break; /* * sending from the server to the viewer */ case rfbFileTransferRequest: /* rfbLog("rfbProcessFileTransfer() rfbFileTransferRequest:\n"); */ /* add some space to the end of the buffer as we will be adding a timespec to it */ if ((buffer = rfbProcessFileTransferReadBuffer(cl, length))==NULL) return FALSE; /* The client requests a File */ if (!rfbFilenameTranslate2UNIX(cl, buffer, filename1, sizeof(filename1))) goto fail; cl->fileTransfer.fd=open(filename1, O_RDONLY, 0744); /* */ if (DB) rfbLog("rfbProcessFileTransfer() rfbFileTransferRequest(\"%s\"->\"%s\") Open: %s fd=%d\n", buffer, filename1, (cl->fileTransfer.fd==-1?"Failed":"Success"), cl->fileTransfer.fd); if (cl->fileTransfer.fd!=-1) { if (fstat(cl->fileTransfer.fd, &statbuf)!=0) { close(cl->fileTransfer.fd); cl->fileTransfer.fd=-1; } else { /* Add the File Time Stamp to the filename */ strftime(timespec, sizeof(timespec), "%m/%d/%Y %H:%M",gmtime(&statbuf.st_ctime)); buffer=realloc(buffer, length + strlen(timespec) + 2); /* comma, and Null term */ if (buffer==NULL) { rfbLog("rfbProcessFileTransfer() rfbFileTransferRequest: Failed to malloc %d bytes\n", length + strlen(timespec) + 2); return FALSE; } strcat(buffer,","); strcat(buffer, timespec); length = strlen(buffer); if (DB) rfbLog("rfbProcessFileTransfer() buffer is now: \"%s\"\n", buffer); } } /* The viewer supports compression if size==1 */ cl->fileTransfer.compressionEnabled = (size==1); /* rfbLog("rfbProcessFileTransfer() rfbFileTransferRequest(\"%s\"->\"%s\")%s\n", buffer, filename1, (size==1?" <Compression Enabled>":"")); */ /* File Size in bytes, 0xFFFFFFFF (-1) means error */ retval = rfbSendFileTransferMessage(cl, rfbFileHeader, 0, (cl->fileTransfer.fd==-1 ? -1 : statbuf.st_size), length, buffer); if (cl->fileTransfer.fd==-1) { if (buffer!=NULL) free(buffer); return retval; } /* setup filetransfer stuff */ cl->fileTransfer.fileSize = statbuf.st_size; cl->fileTransfer.numPackets = statbuf.st_size / sz_rfbBlockSize; cl->fileTransfer.receiving = 0; cl->fileTransfer.sending = 0; /* set when we receive a rfbFileHeader: */ /* TODO: finish 64-bit file size support */ sizeHtmp = 0; LOCK(cl->sendMutex); if (rfbWriteExact(cl, (char *)&sizeHtmp, 4) < 0) { rfbLogPerror("rfbProcessFileTransfer: write"); rfbCloseClient(cl); UNLOCK(cl->sendMutex); if (buffer!=NULL) free(buffer); return FALSE; } UNLOCK(cl->sendMutex); break; case rfbFileHeader: /* Destination file (viewer side) is ready for reception (size > 0) or not (size = -1) */ if (size==-1) { rfbLog("rfbProcessFileTransfer() rfbFileHeader (error, aborting)\n"); close(cl->fileTransfer.fd); cl->fileTransfer.fd=-1; return TRUE; } /* rfbLog("rfbProcessFileTransfer() rfbFileHeader (%d bytes of a file)\n", size); */ /* Starts the transfer! */ cl->fileTransfer.sending=1; return rfbSendFileTransferChunk(cl); break; /* * sending from the viewer to the server */ case rfbFileTransferOffer: /* client is sending a file to us */ /* buffer contains full path name (plus FileTime) */ /* size contains size of the file */ /* rfbLog("rfbProcessFileTransfer() rfbFileTransferOffer:\n"); */ if ((buffer = rfbProcessFileTransferReadBuffer(cl, length))==NULL) return FALSE; /* Parse the FileTime */ p = strrchr(buffer, ','); if (p!=NULL) { *p = '\0'; strncpy(szFileTime, p+1, sizeof(szFileTime)); szFileTime[sizeof(szFileTime)-1] = '\x00'; /* ensure NULL terminating byte is present, even if copy overflowed */ } else szFileTime[0]=0; /* Need to read in sizeHtmp */ if ((n = rfbReadExact(cl, (char *)&sizeHtmp, 4)) <= 0) { if (n != 0) rfbLogPerror("rfbProcessFileTransfer: read sizeHtmp"); rfbCloseClient(cl); /* NOTE: don't forget to free(buffer) if you return early! */ if (buffer!=NULL) free(buffer); return FALSE; } sizeHtmp = Swap32IfLE(sizeHtmp); if (!rfbFilenameTranslate2UNIX(cl, buffer, filename1, sizeof(filename1))) goto fail; /* If the file exists... We can send a rfbFileChecksums back to the client before we send an rfbFileAcceptHeader */ /* TODO: Delta Transfer */ cl->fileTransfer.fd=open(filename1, O_CREAT|O_WRONLY|O_TRUNC, 0744); if (DB) rfbLog("rfbProcessFileTransfer() rfbFileTransferOffer(\"%s\"->\"%s\") %s %s fd=%d\n", buffer, filename1, (cl->fileTransfer.fd==-1?"Failed":"Success"), (cl->fileTransfer.fd==-1?strerror(errno):""), cl->fileTransfer.fd); /* */ /* File Size in bytes, 0xFFFFFFFF (-1) means error */ retval = rfbSendFileTransferMessage(cl, rfbFileAcceptHeader, 0, (cl->fileTransfer.fd==-1 ? -1 : 0), length, buffer); if (cl->fileTransfer.fd==-1) { free(buffer); return retval; } /* setup filetransfer stuff */ cl->fileTransfer.fileSize = size; cl->fileTransfer.numPackets = size / sz_rfbBlockSize; cl->fileTransfer.receiving = 1; cl->fileTransfer.sending = 0; break; case rfbFilePacket: /* rfbLog("rfbProcessFileTransfer() rfbFilePacket:\n"); */ if ((buffer = rfbProcessFileTransferReadBuffer(cl, length))==NULL) return FALSE; if (cl->fileTransfer.fd!=-1) { /* buffer contains the contents of the file */ if (size==0) retval=write(cl->fileTransfer.fd, buffer, length); else { #ifdef LIBVNCSERVER_HAVE_LIBZ /* compressed packet */ nRet = uncompress(compBuff,&nRawBytes,(const unsigned char*)buffer, length); if(nRet == Z_OK) retval=write(cl->fileTransfer.fd, (char*)compBuff, nRawBytes); else retval = -1; #else /* Write the file out as received... */ retval=write(cl->fileTransfer.fd, buffer, length); #endif } if (retval==-1) { close(cl->fileTransfer.fd); cl->fileTransfer.fd=-1; cl->fileTransfer.sending = 0; cl->fileTransfer.receiving = 0; } } break; case rfbEndOfFile: if (DB) rfbLog("rfbProcessFileTransfer() rfbEndOfFile\n"); /* */ if (cl->fileTransfer.fd!=-1) close(cl->fileTransfer.fd); cl->fileTransfer.fd=-1; cl->fileTransfer.sending = 0; cl->fileTransfer.receiving = 0; break; case rfbAbortFileTransfer: if (DB) rfbLog("rfbProcessFileTransfer() rfbAbortFileTransfer\n"); /* */ if (cl->fileTransfer.fd!=-1) { close(cl->fileTransfer.fd); cl->fileTransfer.fd=-1; cl->fileTransfer.sending = 0; cl->fileTransfer.receiving = 0; } else { /* We use this message for FileTransfer rights (<=RC18 versions) * The client asks for FileTransfer permission */ if (contentParam == 0) { rfbLog("rfbProcessFileTransfer() File Transfer Permission DENIED! (Client Version <=RC18)\n"); /* Old method for FileTransfer handshake perimssion (<=RC18) (Deny it)*/ return rfbSendFileTransferMessage(cl, rfbAbortFileTransfer, 0, -1, 0, ""); } /* New method is allowed */ if (cl->screen->getFileTransferPermission!=NULL) { if (cl->screen->getFileTransferPermission(cl)==TRUE) { rfbLog("rfbProcessFileTransfer() File Transfer Permission Granted!\n"); return rfbSendFileTransferMessage(cl, rfbFileTransferAccess, 0, 1 , 0, ""); /* Permit */ } else { rfbLog("rfbProcessFileTransfer() File Transfer Permission DENIED!\n"); return rfbSendFileTransferMessage(cl, rfbFileTransferAccess, 0, -1 , 0, ""); /* Deny */ } } else { if (cl->screen->permitFileTransfer) { rfbLog("rfbProcessFileTransfer() File Transfer Permission Granted!\n"); return rfbSendFileTransferMessage(cl, rfbFileTransferAccess, 0, 1 , 0, ""); /* Permit */ } else { rfbLog("rfbProcessFileTransfer() File Transfer Permission DENIED by default!\n"); return rfbSendFileTransferMessage(cl, rfbFileTransferAccess, 0, -1 , 0, ""); /* DEFAULT: DENY (for security) */ } } } break; case rfbCommand: /* rfbLog("rfbProcessFileTransfer() rfbCommand:\n"); */ if ((buffer = rfbProcessFileTransferReadBuffer(cl, length))==NULL) return FALSE; switch (contentParam) { case rfbCDirCreate: /* Client requests the creation of a directory */ if (!rfbFilenameTranslate2UNIX(cl, buffer, filename1, sizeof(filename1))) goto fail; retval = mkdir(filename1, 0755); if (DB) rfbLog("rfbProcessFileTransfer() rfbCommand: rfbCDirCreate(\"%s\"->\"%s\") %s\n", buffer, filename1, (retval==-1?"Failed":"Success")); /* */ retval = rfbSendFileTransferMessage(cl, rfbCommandReturn, rfbADirCreate, retval, length, buffer); if (buffer!=NULL) free(buffer); return retval; case rfbCFileDelete: /* Client requests the deletion of a file */ if (!rfbFilenameTranslate2UNIX(cl, buffer, filename1, sizeof(filename1))) goto fail; if (stat(filename1,&statbuf)==0) { if (S_ISDIR(statbuf.st_mode)) retval = rmdir(filename1); else retval = unlink(filename1); } else retval=-1; retval = rfbSendFileTransferMessage(cl, rfbCommandReturn, rfbAFileDelete, retval, length, buffer); if (buffer!=NULL) free(buffer); return retval; case rfbCFileRename: /* Client requests the Renaming of a file/directory */ p = strrchr(buffer, '*'); if (p != NULL) { /* Split into 2 filenames ('*' is a seperator) */ *p = '\0'; if (!rfbFilenameTranslate2UNIX(cl, buffer, filename1, sizeof(filename1))) goto fail; if (!rfbFilenameTranslate2UNIX(cl, p+1, filename2, sizeof(filename2))) goto fail; retval = rename(filename1,filename2); if (DB) rfbLog("rfbProcessFileTransfer() rfbCommand: rfbCFileRename(\"%s\"->\"%s\" -->> \"%s\"->\"%s\") %s\n", buffer, filename1, p+1, filename2, (retval==-1?"Failed":"Success")); /* */ /* Restore the buffer so the reply is good */ *p = '*'; retval = rfbSendFileTransferMessage(cl, rfbCommandReturn, rfbAFileRename, retval, length, buffer); if (buffer!=NULL) free(buffer); return retval; } break; } break; } /* NOTE: don't forget to free(buffer) if you return early! */ if (buffer!=NULL) free(buffer); return TRUE; fail: if (buffer!=NULL) free(buffer); return FALSE; } /* * rfbProcessClientNormalMessage is called when the client has sent a normal * protocol message. */ static void rfbProcessClientNormalMessage(rfbClientPtr cl) { int n=0; rfbClientToServerMsg msg; char *str; int i; uint32_t enc=0; uint32_t lastPreferredEncoding = -1; char encBuf[64]; char encBuf2[64]; if ((n = rfbReadExact(cl, (char *)&msg, 1)) <= 0) { if (n != 0) rfbLogPerror("rfbProcessClientNormalMessage: read"); rfbCloseClient(cl); return; } switch (msg.type) { case rfbSetPixelFormat: if ((n = rfbReadExact(cl, ((char *)&msg) + 1, sz_rfbSetPixelFormatMsg - 1)) <= 0) { if (n != 0) rfbLogPerror("rfbProcessClientNormalMessage: read"); rfbCloseClient(cl); return; } cl->format.bitsPerPixel = msg.spf.format.bitsPerPixel; cl->format.depth = msg.spf.format.depth; cl->format.bigEndian = (msg.spf.format.bigEndian ? TRUE : FALSE); cl->format.trueColour = (msg.spf.format.trueColour ? TRUE : FALSE); cl->format.redMax = Swap16IfLE(msg.spf.format.redMax); cl->format.greenMax = Swap16IfLE(msg.spf.format.greenMax); cl->format.blueMax = Swap16IfLE(msg.spf.format.blueMax); cl->format.redShift = msg.spf.format.redShift; cl->format.greenShift = msg.spf.format.greenShift; cl->format.blueShift = msg.spf.format.blueShift; cl->readyForSetColourMapEntries = TRUE; cl->screen->setTranslateFunction(cl); rfbStatRecordMessageRcvd(cl, msg.type, sz_rfbSetPixelFormatMsg, sz_rfbSetPixelFormatMsg); return; case rfbFixColourMapEntries: if ((n = rfbReadExact(cl, ((char *)&msg) + 1, sz_rfbFixColourMapEntriesMsg - 1)) <= 0) { if (n != 0) rfbLogPerror("rfbProcessClientNormalMessage: read"); rfbCloseClient(cl); return; } rfbStatRecordMessageRcvd(cl, msg.type, sz_rfbSetPixelFormatMsg, sz_rfbSetPixelFormatMsg); rfbLog("rfbProcessClientNormalMessage: %s", "FixColourMapEntries unsupported\n"); rfbCloseClient(cl); return; /* NOTE: Some clients send us a set of encodings (ie: PointerPos) designed to enable/disable features... * We may want to look into this... * Example: * case rfbEncodingXCursor: * cl->enableCursorShapeUpdates = TRUE; * * Currently: cl->enableCursorShapeUpdates can *never* be turned off... */ case rfbSetEncodings: { if ((n = rfbReadExact(cl, ((char *)&msg) + 1, sz_rfbSetEncodingsMsg - 1)) <= 0) { if (n != 0) rfbLogPerror("rfbProcessClientNormalMessage: read"); rfbCloseClient(cl); return; } msg.se.nEncodings = Swap16IfLE(msg.se.nEncodings); rfbStatRecordMessageRcvd(cl, msg.type, sz_rfbSetEncodingsMsg+(msg.se.nEncodings*4),sz_rfbSetEncodingsMsg+(msg.se.nEncodings*4)); /* * UltraVNC Client has the ability to adapt to changing network environments * So, let's give it a change to tell us what it wants now! */ if (cl->preferredEncoding!=-1) lastPreferredEncoding = cl->preferredEncoding; /* Reset all flags to defaults (allows us to switch between PointerPos and Server Drawn Cursors) */ cl->preferredEncoding=-1; cl->useCopyRect = FALSE; cl->useNewFBSize = FALSE; cl->cursorWasChanged = FALSE; cl->useRichCursorEncoding = FALSE; cl->enableCursorPosUpdates = FALSE; cl->enableCursorShapeUpdates = FALSE; cl->enableCursorShapeUpdates = FALSE; cl->enableLastRectEncoding = FALSE; cl->enableKeyboardLedState = FALSE; cl->enableSupportedMessages = FALSE; cl->enableSupportedEncodings = FALSE; cl->enableServerIdentity = FALSE; #if defined(LIBVNCSERVER_HAVE_LIBZ) || defined(LIBVNCSERVER_HAVE_LIBPNG) cl->tightQualityLevel = -1; #ifdef LIBVNCSERVER_HAVE_LIBJPEG cl->tightCompressLevel = TIGHT_DEFAULT_COMPRESSION; cl->turboSubsampLevel = TURBO_DEFAULT_SUBSAMP; cl->turboQualityLevel = -1; #endif #endif for (i = 0; i < msg.se.nEncodings; i++) { if ((n = rfbReadExact(cl, (char *)&enc, 4)) <= 0) { if (n != 0) rfbLogPerror("rfbProcessClientNormalMessage: read"); rfbCloseClient(cl); return; } enc = Swap32IfLE(enc); switch (enc) { case rfbEncodingCopyRect: cl->useCopyRect = TRUE; break; case rfbEncodingRaw: case rfbEncodingRRE: case rfbEncodingCoRRE: case rfbEncodingHextile: case rfbEncodingUltra: #ifdef LIBVNCSERVER_HAVE_LIBZ case rfbEncodingZlib: case rfbEncodingZRLE: case rfbEncodingZYWRLE: #ifdef LIBVNCSERVER_HAVE_LIBJPEG case rfbEncodingTight: #endif #endif #ifdef LIBVNCSERVER_HAVE_LIBPNG case rfbEncodingTightPng: #endif /* The first supported encoding is the 'preferred' encoding */ if (cl->preferredEncoding == -1) cl->preferredEncoding = enc; break; case rfbEncodingXCursor: if(!cl->screen->dontConvertRichCursorToXCursor) { rfbLog("Enabling X-style cursor updates for client %s\n", cl->host); /* if cursor was drawn, hide the cursor */ if(!cl->enableCursorShapeUpdates) rfbRedrawAfterHideCursor(cl,NULL); cl->enableCursorShapeUpdates = TRUE; cl->cursorWasChanged = TRUE; } break; case rfbEncodingRichCursor: rfbLog("Enabling full-color cursor updates for client %s\n", cl->host); /* if cursor was drawn, hide the cursor */ if(!cl->enableCursorShapeUpdates) rfbRedrawAfterHideCursor(cl,NULL); cl->enableCursorShapeUpdates = TRUE; cl->useRichCursorEncoding = TRUE; cl->cursorWasChanged = TRUE; break; case rfbEncodingPointerPos: if (!cl->enableCursorPosUpdates) { rfbLog("Enabling cursor position updates for client %s\n", cl->host); cl->enableCursorPosUpdates = TRUE; cl->cursorWasMoved = TRUE; } break; case rfbEncodingLastRect: if (!cl->enableLastRectEncoding) { rfbLog("Enabling LastRect protocol extension for client " "%s\n", cl->host); cl->enableLastRectEncoding = TRUE; } break; case rfbEncodingNewFBSize: if (!cl->useNewFBSize) { rfbLog("Enabling NewFBSize protocol extension for client " "%s\n", cl->host); cl->useNewFBSize = TRUE; } break; case rfbEncodingKeyboardLedState: if (!cl->enableKeyboardLedState) { rfbLog("Enabling KeyboardLedState protocol extension for client " "%s\n", cl->host); cl->enableKeyboardLedState = TRUE; } break; case rfbEncodingSupportedMessages: if (!cl->enableSupportedMessages) { rfbLog("Enabling SupportedMessages protocol extension for client " "%s\n", cl->host); cl->enableSupportedMessages = TRUE; } break; case rfbEncodingSupportedEncodings: if (!cl->enableSupportedEncodings) { rfbLog("Enabling SupportedEncodings protocol extension for client " "%s\n", cl->host); cl->enableSupportedEncodings = TRUE; } break; case rfbEncodingServerIdentity: if (!cl->enableServerIdentity) { rfbLog("Enabling ServerIdentity protocol extension for client " "%s\n", cl->host); cl->enableServerIdentity = TRUE; } break; case rfbEncodingXvp: if (cl->screen->xvpHook) { rfbLog("Enabling Xvp protocol extension for client " "%s\n", cl->host); if (!rfbSendXvp(cl, 1, rfbXvp_Init)) { rfbCloseClient(cl); return; } } break; default: #if defined(LIBVNCSERVER_HAVE_LIBZ) || defined(LIBVNCSERVER_HAVE_LIBPNG) if ( enc >= (uint32_t)rfbEncodingCompressLevel0 && enc <= (uint32_t)rfbEncodingCompressLevel9 ) { cl->zlibCompressLevel = enc & 0x0F; #ifdef LIBVNCSERVER_HAVE_LIBJPEG cl->tightCompressLevel = enc & 0x0F; rfbLog("Using compression level %d for client %s\n", cl->tightCompressLevel, cl->host); #endif } else if ( enc >= (uint32_t)rfbEncodingQualityLevel0 && enc <= (uint32_t)rfbEncodingQualityLevel9 ) { cl->tightQualityLevel = enc & 0x0F; rfbLog("Using image quality level %d for client %s\n", cl->tightQualityLevel, cl->host); #ifdef LIBVNCSERVER_HAVE_LIBJPEG cl->turboQualityLevel = tight2turbo_qual[enc & 0x0F]; cl->turboSubsampLevel = tight2turbo_subsamp[enc & 0x0F]; rfbLog("Using JPEG subsampling %d, Q%d for client %s\n", cl->turboSubsampLevel, cl->turboQualityLevel, cl->host); } else if ( enc >= (uint32_t)rfbEncodingFineQualityLevel0 + 1 && enc <= (uint32_t)rfbEncodingFineQualityLevel100 ) { cl->turboQualityLevel = enc & 0xFF; rfbLog("Using fine quality level %d for client %s\n", cl->turboQualityLevel, cl->host); } else if ( enc >= (uint32_t)rfbEncodingSubsamp1X && enc <= (uint32_t)rfbEncodingSubsampGray ) { cl->turboSubsampLevel = enc & 0xFF; rfbLog("Using subsampling level %d for client %s\n", cl->turboSubsampLevel, cl->host); #endif } else #endif { rfbExtensionData* e; for(e = cl->extensions; e;) { rfbExtensionData* next = e->next; if(e->extension->enablePseudoEncoding && e->extension->enablePseudoEncoding(cl, &e->data, (int)enc)) /* ext handles this encoding */ break; e = next; } if(e == NULL) { rfbBool handled = FALSE; /* if the pseudo encoding is not handled by the enabled extensions, search through all extensions. */ rfbProtocolExtension* e; for(e = rfbGetExtensionIterator(); e;) { int* encs = e->pseudoEncodings; while(encs && *encs!=0) { if(*encs==(int)enc) { void* data = NULL; if(!e->enablePseudoEncoding(cl, &data, (int)enc)) { rfbLog("Installed extension pretends to handle pseudo encoding 0x%x, but does not!\n",(int)enc); } else { rfbEnableExtension(cl, e, data); handled = TRUE; e = NULL; break; } } encs++; } if(e) e = e->next; } rfbReleaseExtensionIterator(); if(!handled) rfbLog("rfbProcessClientNormalMessage: " "ignoring unsupported encoding type %s\n", encodingName(enc,encBuf,sizeof(encBuf))); } } } } if (cl->preferredEncoding == -1) { if (lastPreferredEncoding==-1) { cl->preferredEncoding = rfbEncodingRaw; rfbLog("Defaulting to %s encoding for client %s\n", encodingName(cl->preferredEncoding,encBuf,sizeof(encBuf)),cl->host); } else { cl->preferredEncoding = lastPreferredEncoding; rfbLog("Sticking with %s encoding for client %s\n", encodingName(cl->preferredEncoding,encBuf,sizeof(encBuf)),cl->host); } } else { if (lastPreferredEncoding==-1) { rfbLog("Using %s encoding for client %s\n", encodingName(cl->preferredEncoding,encBuf,sizeof(encBuf)),cl->host); } else { rfbLog("Switching from %s to %s Encoding for client %s\n", encodingName(lastPreferredEncoding,encBuf2,sizeof(encBuf2)), encodingName(cl->preferredEncoding,encBuf,sizeof(encBuf)), cl->host); } } if (cl->enableCursorPosUpdates && !cl->enableCursorShapeUpdates) { rfbLog("Disabling cursor position updates for client %s\n", cl->host); cl->enableCursorPosUpdates = FALSE; } return; } case rfbFramebufferUpdateRequest: { sraRegionPtr tmpRegion; if ((n = rfbReadExact(cl, ((char *)&msg) + 1, sz_rfbFramebufferUpdateRequestMsg-1)) <= 0) { if (n != 0) rfbLogPerror("rfbProcessClientNormalMessage: read"); rfbCloseClient(cl); return; } rfbStatRecordMessageRcvd(cl, msg.type, sz_rfbFramebufferUpdateRequestMsg,sz_rfbFramebufferUpdateRequestMsg); /* The values come in based on the scaled screen, we need to convert them to * values based on the main screen's coordinate system */ if(!rectSwapIfLEAndClip(&msg.fur.x,&msg.fur.y,&msg.fur.w,&msg.fur.h,cl)) { rfbLog("Warning, ignoring rfbFramebufferUpdateRequest: %dXx%dY-%dWx%dH\n",msg.fur.x, msg.fur.y, msg.fur.w, msg.fur.h); return; } tmpRegion = sraRgnCreateRect(msg.fur.x, msg.fur.y, msg.fur.x+msg.fur.w, msg.fur.y+msg.fur.h); LOCK(cl->updateMutex); sraRgnOr(cl->requestedRegion,tmpRegion); if (!cl->readyForSetColourMapEntries) { /* client hasn't sent a SetPixelFormat so is using server's */ cl->readyForSetColourMapEntries = TRUE; if (!cl->format.trueColour) { if (!rfbSetClientColourMap(cl, 0, 0)) { sraRgnDestroy(tmpRegion); TSIGNAL(cl->updateCond); UNLOCK(cl->updateMutex); return; } } } if (!msg.fur.incremental) { sraRgnOr(cl->modifiedRegion,tmpRegion); sraRgnSubtract(cl->copyRegion,tmpRegion); } TSIGNAL(cl->updateCond); UNLOCK(cl->updateMutex); sraRgnDestroy(tmpRegion); return; } case rfbKeyEvent: if ((n = rfbReadExact(cl, ((char *)&msg) + 1, sz_rfbKeyEventMsg - 1)) <= 0) { if (n != 0) rfbLogPerror("rfbProcessClientNormalMessage: read"); rfbCloseClient(cl); return; } rfbStatRecordMessageRcvd(cl, msg.type, sz_rfbKeyEventMsg, sz_rfbKeyEventMsg); if(!cl->viewOnly) { cl->screen->kbdAddEvent(msg.ke.down, (rfbKeySym)Swap32IfLE(msg.ke.key), cl); } return; case rfbPointerEvent: if ((n = rfbReadExact(cl, ((char *)&msg) + 1, sz_rfbPointerEventMsg - 1)) <= 0) { if (n != 0) rfbLogPerror("rfbProcessClientNormalMessage: read"); rfbCloseClient(cl); return; } rfbStatRecordMessageRcvd(cl, msg.type, sz_rfbPointerEventMsg, sz_rfbPointerEventMsg); if (cl->screen->pointerClient && cl->screen->pointerClient != cl) return; if (msg.pe.buttonMask == 0) cl->screen->pointerClient = NULL; else cl->screen->pointerClient = cl; if(!cl->viewOnly) { if (msg.pe.buttonMask != cl->lastPtrButtons || cl->screen->deferPtrUpdateTime == 0) { cl->screen->ptrAddEvent(msg.pe.buttonMask, ScaleX(cl->scaledScreen, cl->screen, Swap16IfLE(msg.pe.x)), ScaleY(cl->scaledScreen, cl->screen, Swap16IfLE(msg.pe.y)), cl); cl->lastPtrButtons = msg.pe.buttonMask; } else { cl->lastPtrX = ScaleX(cl->scaledScreen, cl->screen, Swap16IfLE(msg.pe.x)); cl->lastPtrY = ScaleY(cl->scaledScreen, cl->screen, Swap16IfLE(msg.pe.y)); cl->lastPtrButtons = msg.pe.buttonMask; } } return; case rfbFileTransfer: if ((n = rfbReadExact(cl, ((char *)&msg) + 1, sz_rfbFileTransferMsg - 1)) <= 0) { if (n != 0) rfbLogPerror("rfbProcessClientNormalMessage: read"); rfbCloseClient(cl); return; } msg.ft.size = Swap32IfLE(msg.ft.size); msg.ft.length = Swap32IfLE(msg.ft.length); /* record statistics in rfbProcessFileTransfer as length is filled with garbage when it is not valid */ rfbProcessFileTransfer(cl, msg.ft.contentType, msg.ft.contentParam, msg.ft.size, msg.ft.length); return; case rfbSetSW: if ((n = rfbReadExact(cl, ((char *)&msg) + 1, sz_rfbSetSWMsg - 1)) <= 0) { if (n != 0) rfbLogPerror("rfbProcessClientNormalMessage: read"); rfbCloseClient(cl); return; } msg.sw.x = Swap16IfLE(msg.sw.x); msg.sw.y = Swap16IfLE(msg.sw.y); rfbStatRecordMessageRcvd(cl, msg.type, sz_rfbSetSWMsg, sz_rfbSetSWMsg); /* msg.sw.status is not initialized in the ultraVNC viewer and contains random numbers (why???) */ rfbLog("Received a rfbSetSingleWindow(%d x, %d y)\n", msg.sw.x, msg.sw.y); if (cl->screen->setSingleWindow!=NULL) cl->screen->setSingleWindow(cl, msg.sw.x, msg.sw.y); return; case rfbSetServerInput: if ((n = rfbReadExact(cl, ((char *)&msg) + 1, sz_rfbSetServerInputMsg - 1)) <= 0) { if (n != 0) rfbLogPerror("rfbProcessClientNormalMessage: read"); rfbCloseClient(cl); return; } rfbStatRecordMessageRcvd(cl, msg.type, sz_rfbSetServerInputMsg, sz_rfbSetServerInputMsg); /* msg.sim.pad is not initialized in the ultraVNC viewer and contains random numbers (why???) */ /* msg.sim.pad = Swap16IfLE(msg.sim.pad); */ rfbLog("Received a rfbSetServerInput(%d status)\n", msg.sim.status); if (cl->screen->setServerInput!=NULL) cl->screen->setServerInput(cl, msg.sim.status); return; case rfbTextChat: if ((n = rfbReadExact(cl, ((char *)&msg) + 1, sz_rfbTextChatMsg - 1)) <= 0) { if (n != 0) rfbLogPerror("rfbProcessClientNormalMessage: read"); rfbCloseClient(cl); return; } msg.tc.pad2 = Swap16IfLE(msg.tc.pad2); msg.tc.length = Swap32IfLE(msg.tc.length); switch (msg.tc.length) { case rfbTextChatOpen: case rfbTextChatClose: case rfbTextChatFinished: /* commands do not have text following */ /* Why couldn't they have used the pad byte??? */ str=NULL; rfbStatRecordMessageRcvd(cl, msg.type, sz_rfbTextChatMsg, sz_rfbTextChatMsg); break; default: if ((msg.tc.length>0) && (msg.tc.length<rfbTextMaxSize)) { str = (char *)malloc(msg.tc.length); if (str==NULL) { rfbLog("Unable to malloc %d bytes for a TextChat Message\n", msg.tc.length); rfbCloseClient(cl); return; } if ((n = rfbReadExact(cl, str, msg.tc.length)) <= 0) { if (n != 0) rfbLogPerror("rfbProcessClientNormalMessage: read"); free(str); rfbCloseClient(cl); return; } rfbStatRecordMessageRcvd(cl, msg.type, sz_rfbTextChatMsg+msg.tc.length, sz_rfbTextChatMsg+msg.tc.length); } else { /* This should never happen */ rfbLog("client sent us a Text Message that is too big %d>%d\n", msg.tc.length, rfbTextMaxSize); rfbCloseClient(cl); return; } } /* Note: length can be commands: rfbTextChatOpen, rfbTextChatClose, and rfbTextChatFinished * at which point, the str is NULL (as it is not sent) */ if (cl->screen->setTextChat!=NULL) cl->screen->setTextChat(cl, msg.tc.length, str); free(str); return; case rfbClientCutText: if ((n = rfbReadExact(cl, ((char *)&msg) + 1, sz_rfbClientCutTextMsg - 1)) <= 0) { if (n != 0) rfbLogPerror("rfbProcessClientNormalMessage: read"); rfbCloseClient(cl); return; } msg.cct.length = Swap32IfLE(msg.cct.length); /* uint32_t input is passed to malloc()'s size_t argument, * to rfbReadExact()'s int argument, to rfbStatRecordMessageRcvd()'s int * argument increased of sz_rfbClientCutTextMsg, and to setXCutText()'s int * argument. Here we impose a limit of 1 MB so that the value fits * into all of the types to prevent from misinterpretation and thus * from accessing uninitialized memory (CVE-2018-7225) and also to * prevent from a denial-of-service by allocating too much memory in * the server. */ if (msg.cct.length > 1<<20) { rfbLog("rfbClientCutText: too big cut text length requested: %u B > 1 MB\n", (unsigned int)msg.cct.length); rfbCloseClient(cl); return; } /* Allow zero-length client cut text. */ str = (char *)calloc(msg.cct.length ? msg.cct.length : 1, 1); if (str == NULL) { rfbLogPerror("rfbProcessClientNormalMessage: not enough memory"); rfbCloseClient(cl); return; } if ((n = rfbReadExact(cl, str, msg.cct.length)) <= 0) { if (n != 0) rfbLogPerror("rfbProcessClientNormalMessage: read"); free(str); rfbCloseClient(cl); return; } rfbStatRecordMessageRcvd(cl, msg.type, sz_rfbClientCutTextMsg+msg.cct.length, sz_rfbClientCutTextMsg+msg.cct.length); if(!cl->viewOnly) { cl->screen->setXCutText(str, msg.cct.length, cl); } free(str); return; case rfbPalmVNCSetScaleFactor: cl->PalmVNC = TRUE; if ((n = rfbReadExact(cl, ((char *)&msg) + 1, sz_rfbSetScaleMsg - 1)) <= 0) { if (n != 0) rfbLogPerror("rfbProcessClientNormalMessage: read"); rfbCloseClient(cl); return; } if (msg.ssc.scale == 0) { rfbLogPerror("rfbProcessClientNormalMessage: will not accept a scale factor of zero"); rfbCloseClient(cl); return; } rfbStatRecordMessageRcvd(cl, msg.type, sz_rfbSetScaleMsg, sz_rfbSetScaleMsg); rfbLog("rfbSetScale(%d)\n", msg.ssc.scale); rfbScalingSetup(cl,cl->screen->width/msg.ssc.scale, cl->screen->height/msg.ssc.scale); rfbSendNewScaleSize(cl); return; case rfbSetScale: if ((n = rfbReadExact(cl, ((char *)&msg) + 1, sz_rfbSetScaleMsg - 1)) <= 0) { if (n != 0) rfbLogPerror("rfbProcessClientNormalMessage: read"); rfbCloseClient(cl); return; } if (msg.ssc.scale == 0) { rfbLogPerror("rfbProcessClientNormalMessage: will not accept a scale factor of zero"); rfbCloseClient(cl); return; } rfbStatRecordMessageRcvd(cl, msg.type, sz_rfbSetScaleMsg, sz_rfbSetScaleMsg); rfbLog("rfbSetScale(%d)\n", msg.ssc.scale); rfbScalingSetup(cl,cl->screen->width/msg.ssc.scale, cl->screen->height/msg.ssc.scale); rfbSendNewScaleSize(cl); return; case rfbXvp: if ((n = rfbReadExact(cl, ((char *)&msg) + 1, sz_rfbXvpMsg - 1)) <= 0) { if (n != 0) rfbLogPerror("rfbProcessClientNormalMessage: read"); rfbCloseClient(cl); return; } rfbStatRecordMessageRcvd(cl, msg.type, sz_rfbXvpMsg, sz_rfbXvpMsg); /* only version when is defined, so echo back a fail */ if(msg.xvp.version != 1) { rfbSendXvp(cl, msg.xvp.version, rfbXvp_Fail); } else { /* if the hook exists and fails, send a fail msg */ if(cl->screen->xvpHook && !cl->screen->xvpHook(cl, msg.xvp.version, msg.xvp.code)) rfbSendXvp(cl, 1, rfbXvp_Fail); } return; default: { rfbExtensionData *e,*next; for(e=cl->extensions; e;) { next = e->next; if(e->extension->handleMessage && e->extension->handleMessage(cl, e->data, &msg)) { rfbStatRecordMessageRcvd(cl, msg.type, 0, 0); /* Extension should handle this */ return; } e = next; } rfbLog("rfbProcessClientNormalMessage: unknown message type %d\n", msg.type); rfbLog(" ... closing connection\n"); rfbCloseClient(cl); return; } } } /* * rfbSendFramebufferUpdate - send the currently pending framebuffer update to * the RFB client. * givenUpdateRegion is not changed. */ rfbBool rfbSendFramebufferUpdate(rfbClientPtr cl, sraRegionPtr givenUpdateRegion) { sraRectangleIterator* i=NULL; sraRect rect; int nUpdateRegionRects; rfbFramebufferUpdateMsg *fu = (rfbFramebufferUpdateMsg *)cl->updateBuf; sraRegionPtr updateRegion,updateCopyRegion,tmpRegion; int dx, dy; rfbBool sendCursorShape = FALSE; rfbBool sendCursorPos = FALSE; rfbBool sendKeyboardLedState = FALSE; rfbBool sendSupportedMessages = FALSE; rfbBool sendSupportedEncodings = FALSE; rfbBool sendServerIdentity = FALSE; rfbBool result = TRUE; if(cl->screen->displayHook) cl->screen->displayHook(cl); /* * If framebuffer size was changed and the client supports NewFBSize * encoding, just send NewFBSize marker and return. */ if (cl->useNewFBSize && cl->newFBSizePending) { LOCK(cl->updateMutex); cl->newFBSizePending = FALSE; UNLOCK(cl->updateMutex); fu->type = rfbFramebufferUpdate; fu->nRects = Swap16IfLE(1); cl->ublen = sz_rfbFramebufferUpdateMsg; if (!rfbSendNewFBSize(cl, cl->scaledScreen->width, cl->scaledScreen->height)) { if(cl->screen->displayFinishedHook) cl->screen->displayFinishedHook(cl, FALSE); return FALSE; } result = rfbSendUpdateBuf(cl); if(cl->screen->displayFinishedHook) cl->screen->displayFinishedHook(cl, result); return result; } /* * If this client understands cursor shape updates, cursor should be * removed from the framebuffer. Otherwise, make sure it's put up. */ if (cl->enableCursorShapeUpdates) { if (cl->cursorWasChanged && cl->readyForSetColourMapEntries) sendCursorShape = TRUE; } /* * Do we plan to send cursor position update? */ if (cl->enableCursorPosUpdates && cl->cursorWasMoved) sendCursorPos = TRUE; /* * Do we plan to send a keyboard state update? */ if ((cl->enableKeyboardLedState) && (cl->screen->getKeyboardLedStateHook!=NULL)) { int x; x=cl->screen->getKeyboardLedStateHook(cl->screen); if (x!=cl->lastKeyboardLedState) { sendKeyboardLedState = TRUE; cl->lastKeyboardLedState=x; } } /* * Do we plan to send a rfbEncodingSupportedMessages? */ if (cl->enableSupportedMessages) { sendSupportedMessages = TRUE; /* We only send this message ONCE <per setEncodings message received> * (We disable it here) */ cl->enableSupportedMessages = FALSE; } /* * Do we plan to send a rfbEncodingSupportedEncodings? */ if (cl->enableSupportedEncodings) { sendSupportedEncodings = TRUE; /* We only send this message ONCE <per setEncodings message received> * (We disable it here) */ cl->enableSupportedEncodings = FALSE; } /* * Do we plan to send a rfbEncodingServerIdentity? */ if (cl->enableServerIdentity) { sendServerIdentity = TRUE; /* We only send this message ONCE <per setEncodings message received> * (We disable it here) */ cl->enableServerIdentity = FALSE; } LOCK(cl->updateMutex); /* * The modifiedRegion may overlap the destination copyRegion. We remove * any overlapping bits from the copyRegion (since they'd only be * overwritten anyway). */ sraRgnSubtract(cl->copyRegion,cl->modifiedRegion); /* * The client is interested in the region requestedRegion. The region * which should be updated now is the intersection of requestedRegion * and the union of modifiedRegion and copyRegion. If it's empty then * no update is needed. */ updateRegion = sraRgnCreateRgn(givenUpdateRegion); if(cl->screen->progressiveSliceHeight>0) { int height=cl->screen->progressiveSliceHeight, y=cl->progressiveSliceY; sraRegionPtr bbox=sraRgnBBox(updateRegion); sraRect rect; if(sraRgnPopRect(bbox,&rect,0)) { sraRegionPtr slice; if(y<rect.y1 || y>=rect.y2) y=rect.y1; slice=sraRgnCreateRect(0,y,cl->screen->width,y+height); sraRgnAnd(updateRegion,slice); sraRgnDestroy(slice); } sraRgnDestroy(bbox); y+=height; if(y>=cl->screen->height) y=0; cl->progressiveSliceY=y; } sraRgnOr(updateRegion,cl->copyRegion); if(!sraRgnAnd(updateRegion,cl->requestedRegion) && sraRgnEmpty(updateRegion) && (cl->enableCursorShapeUpdates || (cl->cursorX == cl->screen->cursorX && cl->cursorY == cl->screen->cursorY)) && !sendCursorShape && !sendCursorPos && !sendKeyboardLedState && !sendSupportedMessages && !sendSupportedEncodings && !sendServerIdentity) { sraRgnDestroy(updateRegion); UNLOCK(cl->updateMutex); if(cl->screen->displayFinishedHook) cl->screen->displayFinishedHook(cl, TRUE); return TRUE; } /* * We assume that the client doesn't have any pixel data outside the * requestedRegion. In other words, both the source and destination of a * copy must lie within requestedRegion. So the region we can send as a * copy is the intersection of the copyRegion with both the requestedRegion * and the requestedRegion translated by the amount of the copy. We set * updateCopyRegion to this. */ updateCopyRegion = sraRgnCreateRgn(cl->copyRegion); sraRgnAnd(updateCopyRegion,cl->requestedRegion); tmpRegion = sraRgnCreateRgn(cl->requestedRegion); sraRgnOffset(tmpRegion,cl->copyDX,cl->copyDY); sraRgnAnd(updateCopyRegion,tmpRegion); sraRgnDestroy(tmpRegion); dx = cl->copyDX; dy = cl->copyDY; /* * Next we remove updateCopyRegion from updateRegion so that updateRegion * is the part of this update which is sent as ordinary pixel data (i.e not * a copy). */ sraRgnSubtract(updateRegion,updateCopyRegion); /* * Finally we leave modifiedRegion to be the remainder (if any) of parts of * the screen which are modified but outside the requestedRegion. We also * empty both the requestedRegion and the copyRegion - note that we never * carry over a copyRegion for a future update. */ sraRgnOr(cl->modifiedRegion,cl->copyRegion); sraRgnSubtract(cl->modifiedRegion,updateRegion); sraRgnSubtract(cl->modifiedRegion,updateCopyRegion); sraRgnMakeEmpty(cl->requestedRegion); sraRgnMakeEmpty(cl->copyRegion); cl->copyDX = 0; cl->copyDY = 0; UNLOCK(cl->updateMutex); if (!cl->enableCursorShapeUpdates) { if(cl->cursorX != cl->screen->cursorX || cl->cursorY != cl->screen->cursorY) { rfbRedrawAfterHideCursor(cl,updateRegion); LOCK(cl->screen->cursorMutex); cl->cursorX = cl->screen->cursorX; cl->cursorY = cl->screen->cursorY; UNLOCK(cl->screen->cursorMutex); rfbRedrawAfterHideCursor(cl,updateRegion); } rfbShowCursor(cl); } /* * Now send the update. */ rfbStatRecordMessageSent(cl, rfbFramebufferUpdate, 0, 0); if (cl->preferredEncoding == rfbEncodingCoRRE) { nUpdateRegionRects = 0; for(i = sraRgnGetIterator(updateRegion); sraRgnIteratorNext(i,&rect);){ int x = rect.x1; int y = rect.y1; int w = rect.x2 - x; int h = rect.y2 - y; int rectsPerRow, rows; /* We need to count the number of rects in the scaled screen */ if (cl->screen!=cl->scaledScreen) rfbScaledCorrection(cl->screen, cl->scaledScreen, &x, &y, &w, &h, "rfbSendFramebufferUpdate"); rectsPerRow = (w-1)/cl->correMaxWidth+1; rows = (h-1)/cl->correMaxHeight+1; nUpdateRegionRects += rectsPerRow*rows; } sraRgnReleaseIterator(i); i=NULL; } else if (cl->preferredEncoding == rfbEncodingUltra) { nUpdateRegionRects = 0; for(i = sraRgnGetIterator(updateRegion); sraRgnIteratorNext(i,&rect);){ int x = rect.x1; int y = rect.y1; int w = rect.x2 - x; int h = rect.y2 - y; /* We need to count the number of rects in the scaled screen */ if (cl->screen!=cl->scaledScreen) rfbScaledCorrection(cl->screen, cl->scaledScreen, &x, &y, &w, &h, "rfbSendFramebufferUpdate"); nUpdateRegionRects += (((h-1) / (ULTRA_MAX_SIZE( w ) / w)) + 1); } sraRgnReleaseIterator(i); i=NULL; #ifdef LIBVNCSERVER_HAVE_LIBZ } else if (cl->preferredEncoding == rfbEncodingZlib) { nUpdateRegionRects = 0; for(i = sraRgnGetIterator(updateRegion); sraRgnIteratorNext(i,&rect);){ int x = rect.x1; int y = rect.y1; int w = rect.x2 - x; int h = rect.y2 - y; /* We need to count the number of rects in the scaled screen */ if (cl->screen!=cl->scaledScreen) rfbScaledCorrection(cl->screen, cl->scaledScreen, &x, &y, &w, &h, "rfbSendFramebufferUpdate"); nUpdateRegionRects += (((h-1) / (ZLIB_MAX_SIZE( w ) / w)) + 1); } sraRgnReleaseIterator(i); i=NULL; #ifdef LIBVNCSERVER_HAVE_LIBJPEG } else if (cl->preferredEncoding == rfbEncodingTight) { nUpdateRegionRects = 0; for(i = sraRgnGetIterator(updateRegion); sraRgnIteratorNext(i,&rect);){ int x = rect.x1; int y = rect.y1; int w = rect.x2 - x; int h = rect.y2 - y; int n; /* We need to count the number of rects in the scaled screen */ if (cl->screen!=cl->scaledScreen) rfbScaledCorrection(cl->screen, cl->scaledScreen, &x, &y, &w, &h, "rfbSendFramebufferUpdate"); n = rfbNumCodedRectsTight(cl, x, y, w, h); if (n == 0) { nUpdateRegionRects = 0xFFFF; break; } nUpdateRegionRects += n; } sraRgnReleaseIterator(i); i=NULL; #endif #endif #if defined(LIBVNCSERVER_HAVE_LIBJPEG) && defined(LIBVNCSERVER_HAVE_LIBPNG) } else if (cl->preferredEncoding == rfbEncodingTightPng) { nUpdateRegionRects = 0; for(i = sraRgnGetIterator(updateRegion); sraRgnIteratorNext(i,&rect);){ int x = rect.x1; int y = rect.y1; int w = rect.x2 - x; int h = rect.y2 - y; int n; /* We need to count the number of rects in the scaled screen */ if (cl->screen!=cl->scaledScreen) rfbScaledCorrection(cl->screen, cl->scaledScreen, &x, &y, &w, &h, "rfbSendFramebufferUpdate"); n = rfbNumCodedRectsTight(cl, x, y, w, h); if (n == 0) { nUpdateRegionRects = 0xFFFF; break; } nUpdateRegionRects += n; } sraRgnReleaseIterator(i); i=NULL; #endif } else { nUpdateRegionRects = sraRgnCountRects(updateRegion); } fu->type = rfbFramebufferUpdate; if (nUpdateRegionRects != 0xFFFF) { if(cl->screen->maxRectsPerUpdate>0 /* CoRRE splits the screen into smaller squares */ && cl->preferredEncoding != rfbEncodingCoRRE /* Ultra encoding splits rectangles up into smaller chunks */ && cl->preferredEncoding != rfbEncodingUltra #ifdef LIBVNCSERVER_HAVE_LIBZ /* Zlib encoding splits rectangles up into smaller chunks */ && cl->preferredEncoding != rfbEncodingZlib #ifdef LIBVNCSERVER_HAVE_LIBJPEG /* Tight encoding counts the rectangles differently */ && cl->preferredEncoding != rfbEncodingTight #endif #endif #ifdef LIBVNCSERVER_HAVE_LIBPNG /* Tight encoding counts the rectangles differently */ && cl->preferredEncoding != rfbEncodingTightPng #endif && nUpdateRegionRects>cl->screen->maxRectsPerUpdate) { sraRegion* newUpdateRegion = sraRgnBBox(updateRegion); sraRgnDestroy(updateRegion); updateRegion = newUpdateRegion; nUpdateRegionRects = sraRgnCountRects(updateRegion); } fu->nRects = Swap16IfLE((uint16_t)(sraRgnCountRects(updateCopyRegion) + nUpdateRegionRects + !!sendCursorShape + !!sendCursorPos + !!sendKeyboardLedState + !!sendSupportedMessages + !!sendSupportedEncodings + !!sendServerIdentity)); } else { fu->nRects = 0xFFFF; } cl->ublen = sz_rfbFramebufferUpdateMsg; if (sendCursorShape) { cl->cursorWasChanged = FALSE; if (!rfbSendCursorShape(cl)) goto updateFailed; } if (sendCursorPos) { cl->cursorWasMoved = FALSE; if (!rfbSendCursorPos(cl)) goto updateFailed; } if (sendKeyboardLedState) { if (!rfbSendKeyboardLedState(cl)) goto updateFailed; } if (sendSupportedMessages) { if (!rfbSendSupportedMessages(cl)) goto updateFailed; } if (sendSupportedEncodings) { if (!rfbSendSupportedEncodings(cl)) goto updateFailed; } if (sendServerIdentity) { if (!rfbSendServerIdentity(cl)) goto updateFailed; } if (!sraRgnEmpty(updateCopyRegion)) { if (!rfbSendCopyRegion(cl,updateCopyRegion,dx,dy)) goto updateFailed; } for(i = sraRgnGetIterator(updateRegion); sraRgnIteratorNext(i,&rect);){ int x = rect.x1; int y = rect.y1; int w = rect.x2 - x; int h = rect.y2 - y; /* We need to count the number of rects in the scaled screen */ if (cl->screen!=cl->scaledScreen) rfbScaledCorrection(cl->screen, cl->scaledScreen, &x, &y, &w, &h, "rfbSendFramebufferUpdate"); switch (cl->preferredEncoding) { case -1: case rfbEncodingRaw: if (!rfbSendRectEncodingRaw(cl, x, y, w, h)) goto updateFailed; break; case rfbEncodingRRE: if (!rfbSendRectEncodingRRE(cl, x, y, w, h)) goto updateFailed; break; case rfbEncodingCoRRE: if (!rfbSendRectEncodingCoRRE(cl, x, y, w, h)) goto updateFailed; break; case rfbEncodingHextile: if (!rfbSendRectEncodingHextile(cl, x, y, w, h)) goto updateFailed; break; case rfbEncodingUltra: if (!rfbSendRectEncodingUltra(cl, x, y, w, h)) goto updateFailed; break; #ifdef LIBVNCSERVER_HAVE_LIBZ case rfbEncodingZlib: if (!rfbSendRectEncodingZlib(cl, x, y, w, h)) goto updateFailed; break; case rfbEncodingZRLE: case rfbEncodingZYWRLE: if (!rfbSendRectEncodingZRLE(cl, x, y, w, h)) goto updateFailed; break; #endif #if defined(LIBVNCSERVER_HAVE_LIBJPEG) && (defined(LIBVNCSERVER_HAVE_LIBZ) || defined(LIBVNCSERVER_HAVE_LIBPNG)) case rfbEncodingTight: if (!rfbSendRectEncodingTight(cl, x, y, w, h)) goto updateFailed; break; #ifdef LIBVNCSERVER_HAVE_LIBPNG case rfbEncodingTightPng: if (!rfbSendRectEncodingTightPng(cl, x, y, w, h)) goto updateFailed; break; #endif #endif } } if (i) { sraRgnReleaseIterator(i); i = NULL; } if ( nUpdateRegionRects == 0xFFFF && !rfbSendLastRectMarker(cl) ) goto updateFailed; if (!rfbSendUpdateBuf(cl)) { updateFailed: result = FALSE; } if (!cl->enableCursorShapeUpdates) { rfbHideCursor(cl); } if(i) sraRgnReleaseIterator(i); sraRgnDestroy(updateRegion); sraRgnDestroy(updateCopyRegion); if(cl->screen->displayFinishedHook) cl->screen->displayFinishedHook(cl, result); return result; } /* * Send the copy region as a string of CopyRect encoded rectangles. * The only slightly tricky thing is that we should send the messages in * the correct order so that an earlier CopyRect will not corrupt the source * of a later one. */ rfbBool rfbSendCopyRegion(rfbClientPtr cl, sraRegionPtr reg, int dx, int dy) { int x, y, w, h; rfbFramebufferUpdateRectHeader rect; rfbCopyRect cr; sraRectangleIterator* i; sraRect rect1; /* printf("copyrect: "); sraRgnPrint(reg); putchar('\n');fflush(stdout); */ i = sraRgnGetReverseIterator(reg,dx>0,dy>0); /* correct for the scale of the screen */ dx = ScaleX(cl->screen, cl->scaledScreen, dx); dy = ScaleX(cl->screen, cl->scaledScreen, dy); while(sraRgnIteratorNext(i,&rect1)) { x = rect1.x1; y = rect1.y1; w = rect1.x2 - x; h = rect1.y2 - y; /* correct for scaling (if necessary) */ rfbScaledCorrection(cl->screen, cl->scaledScreen, &x, &y, &w, &h, "copyrect"); rect.r.x = Swap16IfLE(x); rect.r.y = Swap16IfLE(y); rect.r.w = Swap16IfLE(w); rect.r.h = Swap16IfLE(h); rect.encoding = Swap32IfLE(rfbEncodingCopyRect); memcpy(&cl->updateBuf[cl->ublen], (char *)&rect, sz_rfbFramebufferUpdateRectHeader); cl->ublen += sz_rfbFramebufferUpdateRectHeader; cr.srcX = Swap16IfLE(x - dx); cr.srcY = Swap16IfLE(y - dy); memcpy(&cl->updateBuf[cl->ublen], (char *)&cr, sz_rfbCopyRect); cl->ublen += sz_rfbCopyRect; rfbStatRecordEncodingSent(cl, rfbEncodingCopyRect, sz_rfbFramebufferUpdateRectHeader + sz_rfbCopyRect, w * h * (cl->scaledScreen->bitsPerPixel / 8)); } sraRgnReleaseIterator(i); return TRUE; } /* * Send a given rectangle in raw encoding (rfbEncodingRaw). */ rfbBool rfbSendRectEncodingRaw(rfbClientPtr cl, int x, int y, int w, int h) { rfbFramebufferUpdateRectHeader rect; int nlines; int bytesPerLine = w * (cl->format.bitsPerPixel / 8); char *fbptr = (cl->scaledScreen->frameBuffer + (cl->scaledScreen->paddedWidthInBytes * y) + (x * (cl->scaledScreen->bitsPerPixel / 8))); /* Flush the buffer to guarantee correct alignment for translateFn(). */ if (cl->ublen > 0) { if (!rfbSendUpdateBuf(cl)) return FALSE; } rect.r.x = Swap16IfLE(x); rect.r.y = Swap16IfLE(y); rect.r.w = Swap16IfLE(w); rect.r.h = Swap16IfLE(h); rect.encoding = Swap32IfLE(rfbEncodingRaw); memcpy(&cl->updateBuf[cl->ublen], (char *)&rect,sz_rfbFramebufferUpdateRectHeader); cl->ublen += sz_rfbFramebufferUpdateRectHeader; rfbStatRecordEncodingSent(cl, rfbEncodingRaw, sz_rfbFramebufferUpdateRectHeader + bytesPerLine * h, sz_rfbFramebufferUpdateRectHeader + bytesPerLine * h); nlines = (UPDATE_BUF_SIZE - cl->ublen) / bytesPerLine; while (TRUE) { if (nlines > h) nlines = h; (*cl->translateFn)(cl->translateLookupTable, &(cl->screen->serverFormat), &cl->format, fbptr, &cl->updateBuf[cl->ublen], cl->scaledScreen->paddedWidthInBytes, w, nlines); cl->ublen += nlines * bytesPerLine; h -= nlines; if (h == 0) /* rect fitted in buffer, do next one */ return TRUE; /* buffer full - flush partial rect and do another nlines */ if (!rfbSendUpdateBuf(cl)) return FALSE; fbptr += (cl->scaledScreen->paddedWidthInBytes * nlines); nlines = (UPDATE_BUF_SIZE - cl->ublen) / bytesPerLine; if (nlines == 0) { rfbErr("rfbSendRectEncodingRaw: send buffer too small for %d " "bytes per line\n", bytesPerLine); rfbCloseClient(cl); return FALSE; } } } /* * Send an empty rectangle with encoding field set to value of * rfbEncodingLastRect to notify client that this is the last * rectangle in framebuffer update ("LastRect" extension of RFB * protocol). */ rfbBool rfbSendLastRectMarker(rfbClientPtr cl) { rfbFramebufferUpdateRectHeader rect; if (cl->ublen + sz_rfbFramebufferUpdateRectHeader > UPDATE_BUF_SIZE) { if (!rfbSendUpdateBuf(cl)) return FALSE; } rect.encoding = Swap32IfLE(rfbEncodingLastRect); rect.r.x = 0; rect.r.y = 0; rect.r.w = 0; rect.r.h = 0; memcpy(&cl->updateBuf[cl->ublen], (char *)&rect,sz_rfbFramebufferUpdateRectHeader); cl->ublen += sz_rfbFramebufferUpdateRectHeader; rfbStatRecordEncodingSent(cl, rfbEncodingLastRect, sz_rfbFramebufferUpdateRectHeader, sz_rfbFramebufferUpdateRectHeader); return TRUE; } /* * Send NewFBSize pseudo-rectangle. This tells the client to change * its framebuffer size. */ rfbBool rfbSendNewFBSize(rfbClientPtr cl, int w, int h) { rfbFramebufferUpdateRectHeader rect; if (cl->ublen + sz_rfbFramebufferUpdateRectHeader > UPDATE_BUF_SIZE) { if (!rfbSendUpdateBuf(cl)) return FALSE; } if (cl->PalmVNC==TRUE) rfbLog("Sending rfbEncodingNewFBSize in response to a PalmVNC style framebuffer resize (%dx%d)\n", w, h); else rfbLog("Sending rfbEncodingNewFBSize for resize to (%dx%d)\n", w, h); rect.encoding = Swap32IfLE(rfbEncodingNewFBSize); rect.r.x = 0; rect.r.y = 0; rect.r.w = Swap16IfLE(w); rect.r.h = Swap16IfLE(h); memcpy(&cl->updateBuf[cl->ublen], (char *)&rect, sz_rfbFramebufferUpdateRectHeader); cl->ublen += sz_rfbFramebufferUpdateRectHeader; rfbStatRecordEncodingSent(cl, rfbEncodingNewFBSize, sz_rfbFramebufferUpdateRectHeader, sz_rfbFramebufferUpdateRectHeader); return TRUE; } /* * Send the contents of cl->updateBuf. Returns 1 if successful, -1 if * not (errno should be set). */ rfbBool rfbSendUpdateBuf(rfbClientPtr cl) { if(cl->sock<0) return FALSE; if (rfbWriteExact(cl, cl->updateBuf, cl->ublen) < 0) { rfbLogPerror("rfbSendUpdateBuf: write"); rfbCloseClient(cl); return FALSE; } cl->ublen = 0; return TRUE; } /* * rfbSendSetColourMapEntries sends a SetColourMapEntries message to the * client, using values from the currently installed colormap. */ rfbBool rfbSendSetColourMapEntries(rfbClientPtr cl, int firstColour, int nColours) { char buf[sz_rfbSetColourMapEntriesMsg + 256 * 3 * 2]; char *wbuf = buf; rfbSetColourMapEntriesMsg *scme; uint16_t *rgb; rfbColourMap* cm = &cl->screen->colourMap; int i, len; if (nColours > 256) { /* some rare hardware has, e.g., 4096 colors cells: PseudoColor:12 */ wbuf = (char *) malloc(sz_rfbSetColourMapEntriesMsg + nColours * 3 * 2); } scme = (rfbSetColourMapEntriesMsg *)wbuf; rgb = (uint16_t *)(&wbuf[sz_rfbSetColourMapEntriesMsg]); scme->type = rfbSetColourMapEntries; scme->firstColour = Swap16IfLE(firstColour); scme->nColours = Swap16IfLE(nColours); len = sz_rfbSetColourMapEntriesMsg; for (i = 0; i < nColours; i++) { if(i<(int)cm->count) { if(cm->is16) { rgb[i*3] = Swap16IfLE(cm->data.shorts[i*3]); rgb[i*3+1] = Swap16IfLE(cm->data.shorts[i*3+1]); rgb[i*3+2] = Swap16IfLE(cm->data.shorts[i*3+2]); } else { rgb[i*3] = Swap16IfLE((unsigned short)cm->data.bytes[i*3]); rgb[i*3+1] = Swap16IfLE((unsigned short)cm->data.bytes[i*3+1]); rgb[i*3+2] = Swap16IfLE((unsigned short)cm->data.bytes[i*3+2]); } } } len += nColours * 3 * 2; LOCK(cl->sendMutex); if (rfbWriteExact(cl, wbuf, len) < 0) { rfbLogPerror("rfbSendSetColourMapEntries: write"); rfbCloseClient(cl); if (wbuf != buf) free(wbuf); UNLOCK(cl->sendMutex); return FALSE; } UNLOCK(cl->sendMutex); rfbStatRecordMessageSent(cl, rfbSetColourMapEntries, len, len); if (wbuf != buf) free(wbuf); return TRUE; } /* * rfbSendBell sends a Bell message to all the clients. */ void rfbSendBell(rfbScreenInfoPtr rfbScreen) { rfbClientIteratorPtr i; rfbClientPtr cl; rfbBellMsg b; i = rfbGetClientIterator(rfbScreen); while((cl=rfbClientIteratorNext(i))) { b.type = rfbBell; LOCK(cl->sendMutex); if (rfbWriteExact(cl, (char *)&b, sz_rfbBellMsg) < 0) { rfbLogPerror("rfbSendBell: write"); rfbCloseClient(cl); } UNLOCK(cl->sendMutex); } rfbStatRecordMessageSent(cl, rfbBell, sz_rfbBellMsg, sz_rfbBellMsg); rfbReleaseClientIterator(i); } /* * rfbSendServerCutText sends a ServerCutText message to all the clients. */ void rfbSendServerCutText(rfbScreenInfoPtr rfbScreen,char *str, int len) { rfbClientPtr cl; rfbServerCutTextMsg sct; rfbClientIteratorPtr iterator; iterator = rfbGetClientIterator(rfbScreen); while ((cl = rfbClientIteratorNext(iterator)) != NULL) { sct.type = rfbServerCutText; sct.length = Swap32IfLE(len); LOCK(cl->sendMutex); if (rfbWriteExact(cl, (char *)&sct, sz_rfbServerCutTextMsg) < 0) { rfbLogPerror("rfbSendServerCutText: write"); rfbCloseClient(cl); UNLOCK(cl->sendMutex); continue; } if (rfbWriteExact(cl, str, len) < 0) { rfbLogPerror("rfbSendServerCutText: write"); rfbCloseClient(cl); } UNLOCK(cl->sendMutex); rfbStatRecordMessageSent(cl, rfbServerCutText, sz_rfbServerCutTextMsg+len, sz_rfbServerCutTextMsg+len); } rfbReleaseClientIterator(iterator); } /***************************************************************************** * * UDP can be used for keyboard and pointer events when the underlying * network is highly reliable. This is really here to support ORL's * videotile, whose TCP implementation doesn't like sending lots of small * packets (such as 100s of pen readings per second!). */ static unsigned char ptrAcceleration = 50; void rfbNewUDPConnection(rfbScreenInfoPtr rfbScreen, int sock) { if (write(sock, (char*) &ptrAcceleration, 1) < 0) { rfbLogPerror("rfbNewUDPConnection: write"); } } /* * Because UDP is a message based service, we can't read the first byte and * then the rest of the packet separately like we do with TCP. We will always * get a whole packet delivered in one go, so we ask read() for the maximum * number of bytes we can possibly get. */ void rfbProcessUDPInput(rfbScreenInfoPtr rfbScreen) { int n; rfbClientPtr cl=rfbScreen->udpClient; rfbClientToServerMsg msg; if((!cl) || cl->onHold) return; if ((n = read(rfbScreen->udpSock, (char *)&msg, sizeof(msg))) <= 0) { if (n < 0) { rfbLogPerror("rfbProcessUDPInput: read"); } rfbDisconnectUDPSock(rfbScreen); return; } switch (msg.type) { case rfbKeyEvent: if (n != sz_rfbKeyEventMsg) { rfbErr("rfbProcessUDPInput: key event incorrect length\n"); rfbDisconnectUDPSock(rfbScreen); return; } cl->screen->kbdAddEvent(msg.ke.down, (rfbKeySym)Swap32IfLE(msg.ke.key), cl); break; case rfbPointerEvent: if (n != sz_rfbPointerEventMsg) { rfbErr("rfbProcessUDPInput: ptr event incorrect length\n"); rfbDisconnectUDPSock(rfbScreen); return; } cl->screen->ptrAddEvent(msg.pe.buttonMask, Swap16IfLE(msg.pe.x), Swap16IfLE(msg.pe.y), cl); break; default: rfbErr("rfbProcessUDPInput: unknown message type %d\n", msg.type); rfbDisconnectUDPSock(rfbScreen); } }
/* * rfbserver.c - deal with server-side of the RFB protocol. */ /* * Copyright (C) 2011-2012 D. R. Commander * Copyright (C) 2005 Rohit Kumar, Johannes E. Schindelin * Copyright (C) 2002 RealVNC Ltd. * OSXvnc Copyright (C) 2001 Dan McGuirk <mcguirk@incompleteness.net>. * Original Xvnc code Copyright (C) 1999 AT&T Laboratories Cambridge. * All Rights Reserved. * * This is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this software; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, * USA. */ #ifdef __STRICT_ANSI__ #define _BSD_SOURCE #define _POSIX_SOURCE #define _XOPEN_SOURCE 600 #endif #include <stdio.h> #include <string.h> #include <rfb/rfb.h> #include <rfb/rfbregion.h> #include "private.h" #include "rfb/rfbconfig.h" #ifdef LIBVNCSERVER_HAVE_FCNTL_H #include <fcntl.h> #endif #ifdef WIN32 #include <winsock2.h> #include <ws2tcpip.h> #include <io.h> #define write(sock,buf,len) send(sock,buf,len,0) #else #ifdef LIBVNCSERVER_HAVE_UNISTD_H #include <unistd.h> #endif #include <pwd.h> #ifdef LIBVNCSERVER_HAVE_SYS_SOCKET_H #include <sys/socket.h> #endif #ifdef LIBVNCSERVER_HAVE_NETINET_IN_H #include <netinet/in.h> #include <netinet/tcp.h> #include <netdb.h> #include <arpa/inet.h> #endif #endif #ifdef DEBUGPROTO #undef DEBUGPROTO #define DEBUGPROTO(x) x #else #define DEBUGPROTO(x) #endif #include <stdarg.h> #include <scale.h> /* stst() */ #include <sys/types.h> #include <sys/stat.h> #if LIBVNCSERVER_HAVE_UNISTD_H #include <unistd.h> #endif #ifndef WIN32 /* readdir() */ #include <dirent.h> #endif /* errno */ #include <errno.h> /* strftime() */ #include <time.h> /* INT_MAX */ #include <limits.h> #ifdef LIBVNCSERVER_WITH_WEBSOCKETS #include "rfbssl.h" #endif #ifdef _MSC_VER #define snprintf _snprintf /* Missing in MSVC */ /* Prevent POSIX deprecation warnings */ #define close _close #define strdup _strdup #endif #ifdef WIN32 #include <direct.h> #ifdef __MINGW32__ #define mkdir(path, perms) mkdir(path) /* Omit the perms argument to match POSIX signature */ #else /* MSVC and other windows compilers */ #define mkdir(path, perms) _mkdir(path) /* Omit the perms argument to match POSIX signature */ #endif /* __MINGW32__ else... */ #ifndef S_ISDIR #define S_ISDIR(m) (((m) & S_IFDIR) == S_IFDIR) #endif #endif #ifdef LIBVNCSERVER_HAVE_LIBJPEG /* * Map of quality levels to provide compatibility with TightVNC/TigerVNC * clients. This emulates the behavior of the TigerVNC Server. */ static const int tight2turbo_qual[10] = { 15, 29, 41, 42, 62, 77, 79, 86, 92, 100 }; static const int tight2turbo_subsamp[10] = { 1, 1, 1, 2, 2, 2, 0, 0, 0, 0 }; #endif static void rfbProcessClientProtocolVersion(rfbClientPtr cl); static void rfbProcessClientNormalMessage(rfbClientPtr cl); static void rfbProcessClientInitMessage(rfbClientPtr cl); #ifdef LIBVNCSERVER_HAVE_LIBPTHREAD void rfbIncrClientRef(rfbClientPtr cl) { LOCK(cl->refCountMutex); cl->refCount++; UNLOCK(cl->refCountMutex); } void rfbDecrClientRef(rfbClientPtr cl) { LOCK(cl->refCountMutex); cl->refCount--; if(cl->refCount<=0) /* just to be sure also < 0 */ TSIGNAL(cl->deleteCond); UNLOCK(cl->refCountMutex); } #else void rfbIncrClientRef(rfbClientPtr cl) {} void rfbDecrClientRef(rfbClientPtr cl) {} #endif #ifdef LIBVNCSERVER_HAVE_LIBPTHREAD static MUTEX(rfbClientListMutex); #endif struct rfbClientIterator { rfbClientPtr next; rfbScreenInfoPtr screen; rfbBool closedToo; }; void rfbClientListInit(rfbScreenInfoPtr rfbScreen) { if(sizeof(rfbBool)!=1) { /* a sanity check */ fprintf(stderr,"rfbBool's size is not 1 (%d)!\n",(int)sizeof(rfbBool)); /* we cannot continue, because rfbBool is supposed to be char everywhere */ exit(1); } rfbScreen->clientHead = NULL; INIT_MUTEX(rfbClientListMutex); } rfbClientIteratorPtr rfbGetClientIterator(rfbScreenInfoPtr rfbScreen) { rfbClientIteratorPtr i = (rfbClientIteratorPtr)malloc(sizeof(struct rfbClientIterator)); i->next = NULL; i->screen = rfbScreen; i->closedToo = FALSE; return i; } rfbClientIteratorPtr rfbGetClientIteratorWithClosed(rfbScreenInfoPtr rfbScreen) { rfbClientIteratorPtr i = (rfbClientIteratorPtr)malloc(sizeof(struct rfbClientIterator)); i->next = NULL; i->screen = rfbScreen; i->closedToo = TRUE; return i; } rfbClientPtr rfbClientIteratorHead(rfbClientIteratorPtr i) { #ifdef LIBVNCSERVER_HAVE_LIBPTHREAD if(i->next != 0) { rfbDecrClientRef(i->next); rfbIncrClientRef(i->screen->clientHead); } #endif LOCK(rfbClientListMutex); i->next = i->screen->clientHead; UNLOCK(rfbClientListMutex); return i->next; } rfbClientPtr rfbClientIteratorNext(rfbClientIteratorPtr i) { if(i->next == 0) { LOCK(rfbClientListMutex); i->next = i->screen->clientHead; UNLOCK(rfbClientListMutex); } else { IF_PTHREADS(rfbClientPtr cl = i->next); i->next = i->next->next; IF_PTHREADS(rfbDecrClientRef(cl)); } #ifdef LIBVNCSERVER_HAVE_LIBPTHREAD if(!i->closedToo) while(i->next && i->next->sock<0) i->next = i->next->next; if(i->next) rfbIncrClientRef(i->next); #endif return i->next; } void rfbReleaseClientIterator(rfbClientIteratorPtr iterator) { IF_PTHREADS(if(iterator->next) rfbDecrClientRef(iterator->next)); free(iterator); } /* * rfbNewClientConnection is called from sockets.c when a new connection * comes in. */ void rfbNewClientConnection(rfbScreenInfoPtr rfbScreen, int sock) { rfbNewClient(rfbScreen,sock); } /* * rfbReverseConnection is called to make an outward * connection to a "listening" RFB client. */ rfbClientPtr rfbReverseConnection(rfbScreenInfoPtr rfbScreen, char *host, int port) { int sock; rfbClientPtr cl; if ((sock = rfbConnect(rfbScreen, host, port)) < 0) return (rfbClientPtr)NULL; cl = rfbNewClient(rfbScreen, sock); if (cl) { cl->reverseConnection = TRUE; } return cl; } void rfbSetProtocolVersion(rfbScreenInfoPtr rfbScreen, int major_, int minor_) { /* Permit the server to set the version to report */ /* TODO: sanity checking */ if ((major_==3) && (minor_ > 2 && minor_ < 9)) { rfbScreen->protocolMajorVersion = major_; rfbScreen->protocolMinorVersion = minor_; } else rfbLog("rfbSetProtocolVersion(%d,%d) set to invalid values\n", major_, minor_); } /* * rfbNewClient is called when a new connection has been made by whatever * means. */ static rfbClientPtr rfbNewTCPOrUDPClient(rfbScreenInfoPtr rfbScreen, int sock, rfbBool isUDP) { rfbProtocolVersionMsg pv; rfbClientIteratorPtr iterator; rfbClientPtr cl,cl_; #ifdef LIBVNCSERVER_IPv6 struct sockaddr_storage addr; #else struct sockaddr_in addr; #endif socklen_t addrlen = sizeof(addr); rfbProtocolExtension* extension; cl = (rfbClientPtr)calloc(sizeof(rfbClientRec),1); cl->screen = rfbScreen; cl->sock = sock; cl->viewOnly = FALSE; /* setup pseudo scaling */ cl->scaledScreen = rfbScreen; cl->scaledScreen->scaledScreenRefCount++; rfbResetStats(cl); cl->clientData = NULL; cl->clientGoneHook = rfbDoNothingWithClient; if(isUDP) { rfbLog(" accepted UDP client\n"); } else { #ifdef LIBVNCSERVER_IPv6 char host[1024]; #endif int one=1; getpeername(sock, (struct sockaddr *)&addr, &addrlen); #ifdef LIBVNCSERVER_IPv6 if(getnameinfo((struct sockaddr*)&addr, addrlen, host, sizeof(host), NULL, 0, NI_NUMERICHOST) != 0) { rfbLogPerror("rfbNewClient: error in getnameinfo"); cl->host = strdup(""); } else cl->host = strdup(host); #else cl->host = strdup(inet_ntoa(addr.sin_addr)); #endif rfbLog(" other clients:\n"); iterator = rfbGetClientIterator(rfbScreen); while ((cl_ = rfbClientIteratorNext(iterator)) != NULL) { rfbLog(" %s\n",cl_->host); } rfbReleaseClientIterator(iterator); if(!rfbSetNonBlocking(sock)) { close(sock); return NULL; } if (setsockopt(sock, IPPROTO_TCP, TCP_NODELAY, (char *)&one, sizeof(one)) < 0) { rfbLogPerror("setsockopt failed: can't set TCP_NODELAY flag, non TCP socket?"); } FD_SET(sock,&(rfbScreen->allFds)); rfbScreen->maxFd = rfbMax(sock,rfbScreen->maxFd); INIT_MUTEX(cl->outputMutex); INIT_MUTEX(cl->refCountMutex); INIT_MUTEX(cl->sendMutex); INIT_COND(cl->deleteCond); cl->state = RFB_PROTOCOL_VERSION; cl->reverseConnection = FALSE; cl->readyForSetColourMapEntries = FALSE; cl->useCopyRect = FALSE; cl->preferredEncoding = -1; cl->correMaxWidth = 48; cl->correMaxHeight = 48; #ifdef LIBVNCSERVER_HAVE_LIBZ cl->zrleData = NULL; #endif cl->copyRegion = sraRgnCreate(); cl->copyDX = 0; cl->copyDY = 0; cl->modifiedRegion = sraRgnCreateRect(0,0,rfbScreen->width,rfbScreen->height); INIT_MUTEX(cl->updateMutex); INIT_COND(cl->updateCond); cl->requestedRegion = sraRgnCreate(); cl->format = cl->screen->serverFormat; cl->translateFn = rfbTranslateNone; cl->translateLookupTable = NULL; LOCK(rfbClientListMutex); IF_PTHREADS(cl->refCount = 0); cl->next = rfbScreen->clientHead; cl->prev = NULL; if (rfbScreen->clientHead) rfbScreen->clientHead->prev = cl; rfbScreen->clientHead = cl; UNLOCK(rfbClientListMutex); #if defined(LIBVNCSERVER_HAVE_LIBZ) || defined(LIBVNCSERVER_HAVE_LIBPNG) cl->tightQualityLevel = -1; #ifdef LIBVNCSERVER_HAVE_LIBJPEG cl->tightCompressLevel = TIGHT_DEFAULT_COMPRESSION; cl->turboSubsampLevel = TURBO_DEFAULT_SUBSAMP; { int i; for (i = 0; i < 4; i++) cl->zsActive[i] = FALSE; } #endif #endif cl->fileTransfer.fd = -1; cl->enableCursorShapeUpdates = FALSE; cl->enableCursorPosUpdates = FALSE; cl->useRichCursorEncoding = FALSE; cl->enableLastRectEncoding = FALSE; cl->enableKeyboardLedState = FALSE; cl->enableSupportedMessages = FALSE; cl->enableSupportedEncodings = FALSE; cl->enableServerIdentity = FALSE; cl->lastKeyboardLedState = -1; cl->cursorX = rfbScreen->cursorX; cl->cursorY = rfbScreen->cursorY; cl->useNewFBSize = FALSE; #ifdef LIBVNCSERVER_HAVE_LIBZ cl->compStreamInited = FALSE; cl->compStream.total_in = 0; cl->compStream.total_out = 0; cl->compStream.zalloc = Z_NULL; cl->compStream.zfree = Z_NULL; cl->compStream.opaque = Z_NULL; cl->zlibCompressLevel = 5; #endif cl->progressiveSliceY = 0; cl->extensions = NULL; cl->lastPtrX = -1; #ifdef LIBVNCSERVER_WITH_WEBSOCKETS /* * Wait a few ms for the client to send WebSockets connection (TLS/SSL or plain) */ if (!webSocketsCheck(cl)) { /* Error reporting handled in webSocketsHandshake */ rfbCloseClient(cl); rfbClientConnectionGone(cl); return NULL; } #endif sprintf(pv,rfbProtocolVersionFormat,rfbScreen->protocolMajorVersion, rfbScreen->protocolMinorVersion); if (rfbWriteExact(cl, pv, sz_rfbProtocolVersionMsg) < 0) { rfbLogPerror("rfbNewClient: write"); rfbCloseClient(cl); rfbClientConnectionGone(cl); return NULL; } } for(extension = rfbGetExtensionIterator(); extension; extension=extension->next) { void* data = NULL; /* if the extension does not have a newClient method, it wants * to be initialized later. */ if(extension->newClient && extension->newClient(cl, &data)) rfbEnableExtension(cl, extension, data); } rfbReleaseExtensionIterator(); switch (cl->screen->newClientHook(cl)) { case RFB_CLIENT_ON_HOLD: cl->onHold = TRUE; break; case RFB_CLIENT_ACCEPT: cl->onHold = FALSE; break; case RFB_CLIENT_REFUSE: rfbCloseClient(cl); rfbClientConnectionGone(cl); cl = NULL; break; } return cl; } rfbClientPtr rfbNewClient(rfbScreenInfoPtr rfbScreen, int sock) { return(rfbNewTCPOrUDPClient(rfbScreen,sock,FALSE)); } rfbClientPtr rfbNewUDPClient(rfbScreenInfoPtr rfbScreen) { return((rfbScreen->udpClient= rfbNewTCPOrUDPClient(rfbScreen,rfbScreen->udpSock,TRUE))); } /* * rfbClientConnectionGone is called from sockets.c just after a connection * has gone away. */ void rfbClientConnectionGone(rfbClientPtr cl) { #if defined(LIBVNCSERVER_HAVE_LIBZ) && defined(LIBVNCSERVER_HAVE_LIBJPEG) int i; #endif LOCK(rfbClientListMutex); if (cl->prev) cl->prev->next = cl->next; else cl->screen->clientHead = cl->next; if (cl->next) cl->next->prev = cl->prev; UNLOCK(rfbClientListMutex); #ifdef LIBVNCSERVER_HAVE_LIBPTHREAD if(cl->screen->backgroundLoop != FALSE) { int i; do { LOCK(cl->refCountMutex); i=cl->refCount; if(i>0) WAIT(cl->deleteCond,cl->refCountMutex); UNLOCK(cl->refCountMutex); } while(i>0); } #endif if(cl->sock>=0) close(cl->sock); if (cl->scaledScreen!=NULL) cl->scaledScreen->scaledScreenRefCount--; #ifdef LIBVNCSERVER_HAVE_LIBZ rfbFreeZrleData(cl); #endif rfbFreeUltraData(cl); /* free buffers holding pixel data before and after encoding */ free(cl->beforeEncBuf); free(cl->afterEncBuf); if(cl->sock>=0) FD_CLR(cl->sock,&(cl->screen->allFds)); cl->clientGoneHook(cl); rfbLog("Client %s gone\n",cl->host); free(cl->host); #ifdef LIBVNCSERVER_HAVE_LIBZ /* Release the compression state structures if any. */ if ( cl->compStreamInited ) { deflateEnd( &(cl->compStream) ); } #ifdef LIBVNCSERVER_HAVE_LIBJPEG for (i = 0; i < 4; i++) { if (cl->zsActive[i]) deflateEnd(&cl->zsStruct[i]); } #endif #endif if (cl->screen->pointerClient == cl) cl->screen->pointerClient = NULL; sraRgnDestroy(cl->modifiedRegion); sraRgnDestroy(cl->requestedRegion); sraRgnDestroy(cl->copyRegion); if (cl->translateLookupTable) free(cl->translateLookupTable); TINI_COND(cl->updateCond); TINI_MUTEX(cl->updateMutex); /* make sure outputMutex is unlocked before destroying */ LOCK(cl->outputMutex); UNLOCK(cl->outputMutex); TINI_MUTEX(cl->outputMutex); LOCK(cl->sendMutex); UNLOCK(cl->sendMutex); TINI_MUTEX(cl->sendMutex); #ifdef LIBVNCSERVER_HAVE_LIBPTHREAD close(cl->pipe_notify_client_thread[0]); close(cl->pipe_notify_client_thread[1]); #endif rfbPrintStats(cl); rfbResetStats(cl); free(cl); } /* * rfbProcessClientMessage is called when there is data to read from a client. */ void rfbProcessClientMessage(rfbClientPtr cl) { switch (cl->state) { case RFB_PROTOCOL_VERSION: rfbProcessClientProtocolVersion(cl); return; case RFB_SECURITY_TYPE: rfbProcessClientSecurityType(cl); return; case RFB_AUTHENTICATION: rfbAuthProcessClientMessage(cl); return; case RFB_INITIALISATION: case RFB_INITIALISATION_SHARED: rfbProcessClientInitMessage(cl); return; default: rfbProcessClientNormalMessage(cl); return; } } /* * rfbProcessClientProtocolVersion is called when the client sends its * protocol version. */ static void rfbProcessClientProtocolVersion(rfbClientPtr cl) { rfbProtocolVersionMsg pv; int n, major_, minor_; if ((n = rfbReadExact(cl, pv, sz_rfbProtocolVersionMsg)) <= 0) { if (n == 0) rfbLog("rfbProcessClientProtocolVersion: client gone\n"); else rfbLogPerror("rfbProcessClientProtocolVersion: read"); rfbCloseClient(cl); return; } pv[sz_rfbProtocolVersionMsg] = 0; if (sscanf(pv,rfbProtocolVersionFormat,&major_,&minor_) != 2) { rfbErr("rfbProcessClientProtocolVersion: not a valid RFB client: %s\n", pv); rfbCloseClient(cl); return; } rfbLog("Client Protocol Version %d.%d\n", major_, minor_); if (major_ != rfbProtocolMajorVersion) { rfbErr("RFB protocol version mismatch - server %d.%d, client %d.%d", cl->screen->protocolMajorVersion, cl->screen->protocolMinorVersion, major_,minor_); rfbCloseClient(cl); return; } /* Check for the minor version use either of the two standard version of RFB */ /* * UltraVNC Viewer detects FileTransfer compatible servers via rfb versions * 3.4, 3.6, 3.14, 3.16 * It's a bad method, but it is what they use to enable features... * maintaining RFB version compatibility across multiple servers is a pain * Should use something like ServerIdentity encoding */ cl->protocolMajorVersion = major_; cl->protocolMinorVersion = minor_; rfbLog("Protocol version sent %d.%d, using %d.%d\n", major_, minor_, rfbProtocolMajorVersion, cl->protocolMinorVersion); rfbAuthNewClient(cl); } void rfbClientSendString(rfbClientPtr cl, const char *reason) { char *buf; int len = strlen(reason); rfbLog("rfbClientSendString(\"%s\")\n", reason); buf = (char *)malloc(4 + len); ((uint32_t *)buf)[0] = Swap32IfLE(len); memcpy(buf + 4, reason, len); if (rfbWriteExact(cl, buf, 4 + len) < 0) rfbLogPerror("rfbClientSendString: write"); free(buf); rfbCloseClient(cl); } /* * rfbClientConnFailed is called when a client connection has failed either * because it talks the wrong protocol or it has failed authentication. */ void rfbClientConnFailed(rfbClientPtr cl, const char *reason) { char *buf; int len = strlen(reason); rfbLog("rfbClientConnFailed(\"%s\")\n", reason); buf = (char *)malloc(8 + len); ((uint32_t *)buf)[0] = Swap32IfLE(rfbConnFailed); ((uint32_t *)buf)[1] = Swap32IfLE(len); memcpy(buf + 8, reason, len); if (rfbWriteExact(cl, buf, 8 + len) < 0) rfbLogPerror("rfbClientConnFailed: write"); free(buf); rfbCloseClient(cl); } /* * rfbProcessClientInitMessage is called when the client sends its * initialisation message. */ static void rfbProcessClientInitMessage(rfbClientPtr cl) { rfbClientInitMsg ci; union { char buf[256]; rfbServerInitMsg si; } u; int len, n; rfbClientIteratorPtr iterator; rfbClientPtr otherCl; rfbExtensionData* extension; if (cl->state == RFB_INITIALISATION_SHARED) { /* In this case behave as though an implicit ClientInit message has * already been received with a shared-flag of true. */ ci.shared = 1; /* Avoid the possibility of exposing the RFB_INITIALISATION_SHARED * state to calling software. */ cl->state = RFB_INITIALISATION; } else { if ((n = rfbReadExact(cl, (char *)&ci,sz_rfbClientInitMsg)) <= 0) { if (n == 0) rfbLog("rfbProcessClientInitMessage: client gone\n"); else rfbLogPerror("rfbProcessClientInitMessage: read"); rfbCloseClient(cl); return; } } memset(u.buf,0,sizeof(u.buf)); u.si.framebufferWidth = Swap16IfLE(cl->screen->width); u.si.framebufferHeight = Swap16IfLE(cl->screen->height); u.si.format = cl->screen->serverFormat; u.si.format.redMax = Swap16IfLE(u.si.format.redMax); u.si.format.greenMax = Swap16IfLE(u.si.format.greenMax); u.si.format.blueMax = Swap16IfLE(u.si.format.blueMax); strncpy(u.buf + sz_rfbServerInitMsg, cl->screen->desktopName, 127); len = strlen(u.buf + sz_rfbServerInitMsg); u.si.nameLength = Swap32IfLE(len); if (rfbWriteExact(cl, u.buf, sz_rfbServerInitMsg + len) < 0) { rfbLogPerror("rfbProcessClientInitMessage: write"); rfbCloseClient(cl); return; } for(extension = cl->extensions; extension;) { rfbExtensionData* next = extension->next; if(extension->extension->init && !extension->extension->init(cl, extension->data)) /* extension requested that it be removed */ rfbDisableExtension(cl, extension->extension); extension = next; } cl->state = RFB_NORMAL; if (!cl->reverseConnection && (cl->screen->neverShared || (!cl->screen->alwaysShared && !ci.shared))) { if (cl->screen->dontDisconnect) { iterator = rfbGetClientIterator(cl->screen); while ((otherCl = rfbClientIteratorNext(iterator)) != NULL) { if ((otherCl != cl) && (otherCl->state == RFB_NORMAL)) { rfbLog("-dontdisconnect: Not shared & existing client\n"); rfbLog(" refusing new client %s\n", cl->host); rfbCloseClient(cl); rfbReleaseClientIterator(iterator); return; } } rfbReleaseClientIterator(iterator); } else { iterator = rfbGetClientIterator(cl->screen); while ((otherCl = rfbClientIteratorNext(iterator)) != NULL) { if ((otherCl != cl) && (otherCl->state == RFB_NORMAL)) { rfbLog("Not shared - closing connection to client %s\n", otherCl->host); rfbCloseClient(otherCl); } } rfbReleaseClientIterator(iterator); } } } /* The values come in based on the scaled screen, we need to convert them to * values based on the man screen's coordinate system */ static rfbBool rectSwapIfLEAndClip(uint16_t* x,uint16_t* y,uint16_t* w,uint16_t* h, rfbClientPtr cl) { int x1=Swap16IfLE(*x); int y1=Swap16IfLE(*y); int w1=Swap16IfLE(*w); int h1=Swap16IfLE(*h); rfbScaledCorrection(cl->scaledScreen, cl->screen, &x1, &y1, &w1, &h1, "rectSwapIfLEAndClip"); *x = x1; *y = y1; *w = w1; *h = h1; if(*w>cl->screen->width-*x) *w=cl->screen->width-*x; /* possible underflow */ if(*w>cl->screen->width-*x) return FALSE; if(*h>cl->screen->height-*y) *h=cl->screen->height-*y; if(*h>cl->screen->height-*y) return FALSE; return TRUE; } /* * Send keyboard state (PointerPos pseudo-encoding). */ rfbBool rfbSendKeyboardLedState(rfbClientPtr cl) { rfbFramebufferUpdateRectHeader rect; if (cl->ublen + sz_rfbFramebufferUpdateRectHeader > UPDATE_BUF_SIZE) { if (!rfbSendUpdateBuf(cl)) return FALSE; } rect.encoding = Swap32IfLE(rfbEncodingKeyboardLedState); rect.r.x = Swap16IfLE(cl->lastKeyboardLedState); rect.r.y = 0; rect.r.w = 0; rect.r.h = 0; memcpy(&cl->updateBuf[cl->ublen], (char *)&rect, sz_rfbFramebufferUpdateRectHeader); cl->ublen += sz_rfbFramebufferUpdateRectHeader; rfbStatRecordEncodingSent(cl, rfbEncodingKeyboardLedState, sz_rfbFramebufferUpdateRectHeader, sz_rfbFramebufferUpdateRectHeader); if (!rfbSendUpdateBuf(cl)) return FALSE; return TRUE; } #define rfbSetBit(buffer, position) (buffer[(position & 255) / 8] |= (1 << (position % 8))) /* * Send rfbEncodingSupportedMessages. */ rfbBool rfbSendSupportedMessages(rfbClientPtr cl) { rfbFramebufferUpdateRectHeader rect; rfbSupportedMessages msgs; if (cl->ublen + sz_rfbFramebufferUpdateRectHeader + sz_rfbSupportedMessages > UPDATE_BUF_SIZE) { if (!rfbSendUpdateBuf(cl)) return FALSE; } rect.encoding = Swap32IfLE(rfbEncodingSupportedMessages); rect.r.x = 0; rect.r.y = 0; rect.r.w = Swap16IfLE(sz_rfbSupportedMessages); rect.r.h = 0; memcpy(&cl->updateBuf[cl->ublen], (char *)&rect, sz_rfbFramebufferUpdateRectHeader); cl->ublen += sz_rfbFramebufferUpdateRectHeader; memset((char *)&msgs, 0, sz_rfbSupportedMessages); rfbSetBit(msgs.client2server, rfbSetPixelFormat); rfbSetBit(msgs.client2server, rfbFixColourMapEntries); rfbSetBit(msgs.client2server, rfbSetEncodings); rfbSetBit(msgs.client2server, rfbFramebufferUpdateRequest); rfbSetBit(msgs.client2server, rfbKeyEvent); rfbSetBit(msgs.client2server, rfbPointerEvent); rfbSetBit(msgs.client2server, rfbClientCutText); rfbSetBit(msgs.client2server, rfbFileTransfer); rfbSetBit(msgs.client2server, rfbSetScale); /*rfbSetBit(msgs.client2server, rfbSetServerInput); */ /*rfbSetBit(msgs.client2server, rfbSetSW); */ /*rfbSetBit(msgs.client2server, rfbTextChat); */ rfbSetBit(msgs.client2server, rfbPalmVNCSetScaleFactor); rfbSetBit(msgs.server2client, rfbFramebufferUpdate); rfbSetBit(msgs.server2client, rfbSetColourMapEntries); rfbSetBit(msgs.server2client, rfbBell); rfbSetBit(msgs.server2client, rfbServerCutText); rfbSetBit(msgs.server2client, rfbResizeFrameBuffer); rfbSetBit(msgs.server2client, rfbPalmVNCReSizeFrameBuffer); if (cl->screen->xvpHook) { rfbSetBit(msgs.client2server, rfbXvp); rfbSetBit(msgs.server2client, rfbXvp); } memcpy(&cl->updateBuf[cl->ublen], (char *)&msgs, sz_rfbSupportedMessages); cl->ublen += sz_rfbSupportedMessages; rfbStatRecordEncodingSent(cl, rfbEncodingSupportedMessages, sz_rfbFramebufferUpdateRectHeader+sz_rfbSupportedMessages, sz_rfbFramebufferUpdateRectHeader+sz_rfbSupportedMessages); if (!rfbSendUpdateBuf(cl)) return FALSE; return TRUE; } /* * Send rfbEncodingSupportedEncodings. */ rfbBool rfbSendSupportedEncodings(rfbClientPtr cl) { rfbFramebufferUpdateRectHeader rect; static uint32_t supported[] = { rfbEncodingRaw, rfbEncodingCopyRect, rfbEncodingRRE, rfbEncodingCoRRE, rfbEncodingHextile, #ifdef LIBVNCSERVER_HAVE_LIBZ rfbEncodingZlib, rfbEncodingZRLE, rfbEncodingZYWRLE, #endif #ifdef LIBVNCSERVER_HAVE_LIBJPEG rfbEncodingTight, #endif #ifdef LIBVNCSERVER_HAVE_LIBPNG rfbEncodingTightPng, #endif rfbEncodingUltra, rfbEncodingUltraZip, rfbEncodingXCursor, rfbEncodingRichCursor, rfbEncodingPointerPos, rfbEncodingLastRect, rfbEncodingNewFBSize, rfbEncodingKeyboardLedState, rfbEncodingSupportedMessages, rfbEncodingSupportedEncodings, rfbEncodingServerIdentity, }; uint32_t nEncodings = sizeof(supported) / sizeof(supported[0]), i; /* think rfbSetEncodingsMsg */ if (cl->ublen + sz_rfbFramebufferUpdateRectHeader + (nEncodings * sizeof(uint32_t)) > UPDATE_BUF_SIZE) { if (!rfbSendUpdateBuf(cl)) return FALSE; } rect.encoding = Swap32IfLE(rfbEncodingSupportedEncodings); rect.r.x = 0; rect.r.y = 0; rect.r.w = Swap16IfLE(nEncodings * sizeof(uint32_t)); rect.r.h = Swap16IfLE(nEncodings); memcpy(&cl->updateBuf[cl->ublen], (char *)&rect, sz_rfbFramebufferUpdateRectHeader); cl->ublen += sz_rfbFramebufferUpdateRectHeader; for (i = 0; i < nEncodings; i++) { uint32_t encoding = Swap32IfLE(supported[i]); memcpy(&cl->updateBuf[cl->ublen], (char *)&encoding, sizeof(encoding)); cl->ublen += sizeof(encoding); } rfbStatRecordEncodingSent(cl, rfbEncodingSupportedEncodings, sz_rfbFramebufferUpdateRectHeader+(nEncodings * sizeof(uint32_t)), sz_rfbFramebufferUpdateRectHeader+(nEncodings * sizeof(uint32_t))); if (!rfbSendUpdateBuf(cl)) return FALSE; return TRUE; } void rfbSetServerVersionIdentity(rfbScreenInfoPtr screen, char *fmt, ...) { char buffer[256]; va_list ap; va_start(ap, fmt); vsnprintf(buffer, sizeof(buffer)-1, fmt, ap); va_end(ap); if (screen->versionString!=NULL) free(screen->versionString); screen->versionString = strdup(buffer); } /* * Send rfbEncodingServerIdentity. */ rfbBool rfbSendServerIdentity(rfbClientPtr cl) { rfbFramebufferUpdateRectHeader rect; char buffer[512]; /* tack on our library version */ snprintf(buffer,sizeof(buffer)-1, "%s (%s)", (cl->screen->versionString==NULL ? "unknown" : cl->screen->versionString), LIBVNCSERVER_PACKAGE_STRING); if (cl->ublen + sz_rfbFramebufferUpdateRectHeader + (strlen(buffer)+1) > UPDATE_BUF_SIZE) { if (!rfbSendUpdateBuf(cl)) return FALSE; } rect.encoding = Swap32IfLE(rfbEncodingServerIdentity); rect.r.x = 0; rect.r.y = 0; rect.r.w = Swap16IfLE(strlen(buffer)+1); rect.r.h = 0; memcpy(&cl->updateBuf[cl->ublen], (char *)&rect, sz_rfbFramebufferUpdateRectHeader); cl->ublen += sz_rfbFramebufferUpdateRectHeader; memcpy(&cl->updateBuf[cl->ublen], buffer, strlen(buffer)+1); cl->ublen += strlen(buffer)+1; rfbStatRecordEncodingSent(cl, rfbEncodingServerIdentity, sz_rfbFramebufferUpdateRectHeader+strlen(buffer)+1, sz_rfbFramebufferUpdateRectHeader+strlen(buffer)+1); if (!rfbSendUpdateBuf(cl)) return FALSE; return TRUE; } /* * Send an xvp server message */ rfbBool rfbSendXvp(rfbClientPtr cl, uint8_t version, uint8_t code) { rfbXvpMsg xvp; xvp.type = rfbXvp; xvp.pad = 0; xvp.version = version; xvp.code = code; LOCK(cl->sendMutex); if (rfbWriteExact(cl, (char *)&xvp, sz_rfbXvpMsg) < 0) { rfbLogPerror("rfbSendXvp: write"); rfbCloseClient(cl); } UNLOCK(cl->sendMutex); rfbStatRecordMessageSent(cl, rfbXvp, sz_rfbXvpMsg, sz_rfbXvpMsg); return TRUE; } rfbBool rfbSendTextChatMessage(rfbClientPtr cl, uint32_t length, char *buffer) { rfbTextChatMsg tc; int bytesToSend=0; memset((char *)&tc, 0, sizeof(tc)); tc.type = rfbTextChat; tc.length = Swap32IfLE(length); switch(length) { case rfbTextChatOpen: case rfbTextChatClose: case rfbTextChatFinished: bytesToSend=0; break; default: bytesToSend=length; if (bytesToSend>rfbTextMaxSize) bytesToSend=rfbTextMaxSize; } if (cl->ublen + sz_rfbTextChatMsg + bytesToSend > UPDATE_BUF_SIZE) { if (!rfbSendUpdateBuf(cl)) return FALSE; } memcpy(&cl->updateBuf[cl->ublen], (char *)&tc, sz_rfbTextChatMsg); cl->ublen += sz_rfbTextChatMsg; if (bytesToSend>0) { memcpy(&cl->updateBuf[cl->ublen], buffer, bytesToSend); cl->ublen += bytesToSend; } rfbStatRecordMessageSent(cl, rfbTextChat, sz_rfbTextChatMsg+bytesToSend, sz_rfbTextChatMsg+bytesToSend); if (!rfbSendUpdateBuf(cl)) return FALSE; return TRUE; } #define FILEXFER_ALLOWED_OR_CLOSE_AND_RETURN(msg, cl, ret) \ if ((cl->screen->getFileTransferPermission != NULL \ && cl->screen->getFileTransferPermission(cl) != TRUE) \ || cl->screen->permitFileTransfer != TRUE) { \ rfbLog("%sUltra File Transfer is disabled, dropping client: %s\n", msg, cl->host); \ rfbCloseClient(cl); \ return ret; \ } int DB = 1; rfbBool rfbSendFileTransferMessage(rfbClientPtr cl, uint8_t contentType, uint8_t contentParam, uint32_t size, uint32_t length, const char *buffer) { rfbFileTransferMsg ft; ft.type = rfbFileTransfer; ft.contentType = contentType; ft.contentParam = contentParam; ft.pad = 0; /* UltraVNC did not Swap16LE(ft.contentParam) (Looks like it might be BigEndian) */ ft.size = Swap32IfLE(size); ft.length = Swap32IfLE(length); FILEXFER_ALLOWED_OR_CLOSE_AND_RETURN("", cl, FALSE); /* rfbLog("rfbSendFileTransferMessage( %dtype, %dparam, %dsize, %dlen, %p)\n", contentType, contentParam, size, length, buffer); */ LOCK(cl->sendMutex); if (rfbWriteExact(cl, (char *)&ft, sz_rfbFileTransferMsg) < 0) { rfbLogPerror("rfbSendFileTransferMessage: write"); rfbCloseClient(cl); UNLOCK(cl->sendMutex); return FALSE; } if (length>0) { if (rfbWriteExact(cl, buffer, length) < 0) { rfbLogPerror("rfbSendFileTransferMessage: write"); rfbCloseClient(cl); UNLOCK(cl->sendMutex); return FALSE; } } UNLOCK(cl->sendMutex); rfbStatRecordMessageSent(cl, rfbFileTransfer, sz_rfbFileTransferMsg+length, sz_rfbFileTransferMsg+length); return TRUE; } /* * UltraVNC uses Windows Structures */ #define MAX_PATH 260 typedef struct { uint32_t dwLowDateTime; uint32_t dwHighDateTime; } RFB_FILETIME; typedef struct { uint32_t dwFileAttributes; RFB_FILETIME ftCreationTime; RFB_FILETIME ftLastAccessTime; RFB_FILETIME ftLastWriteTime; uint32_t nFileSizeHigh; uint32_t nFileSizeLow; uint32_t dwReserved0; uint32_t dwReserved1; uint8_t cFileName[ MAX_PATH ]; uint8_t cAlternateFileName[ 14 ]; } RFB_FIND_DATA; #define RFB_FILE_ATTRIBUTE_READONLY 0x1 #define RFB_FILE_ATTRIBUTE_HIDDEN 0x2 #define RFB_FILE_ATTRIBUTE_SYSTEM 0x4 #define RFB_FILE_ATTRIBUTE_DIRECTORY 0x10 #define RFB_FILE_ATTRIBUTE_ARCHIVE 0x20 #define RFB_FILE_ATTRIBUTE_NORMAL 0x80 #define RFB_FILE_ATTRIBUTE_TEMPORARY 0x100 #define RFB_FILE_ATTRIBUTE_COMPRESSED 0x800 rfbBool rfbFilenameTranslate2UNIX(rfbClientPtr cl, /* in */ char *path, /* out */ char *unixPath, size_t unixPathMaxLen) { int x; char *home=NULL; FILEXFER_ALLOWED_OR_CLOSE_AND_RETURN("", cl, FALSE); /* * Do not use strncpy() - truncating the file name would probably have undesirable side effects * Instead check if destination buffer is big enough */ if (strlen(path) >= unixPathMaxLen) return FALSE; /* C: */ if (path[0]=='C' && path[1]==':') strcpy(unixPath, &path[2]); else { home = getenv("HOME"); if (home!=NULL) { /* Re-check buffer size */ if ((strlen(path) + strlen(home) + 1) >= unixPathMaxLen) return FALSE; strcpy(unixPath, home); strcat(unixPath,"/"); strcat(unixPath, path); } else strcpy(unixPath, path); } for (x=0;x<strlen(unixPath);x++) if (unixPath[x]=='\\') unixPath[x]='/'; return TRUE; } rfbBool rfbFilenameTranslate2DOS(rfbClientPtr cl, char *unixPath, char *path) { int x; FILEXFER_ALLOWED_OR_CLOSE_AND_RETURN("", cl, FALSE); sprintf(path,"C:%s", unixPath); for (x=2;x<strlen(path);x++) if (path[x]=='/') path[x]='\\'; return TRUE; } rfbBool rfbSendDirContent(rfbClientPtr cl, int length, char *buffer) { char retfilename[MAX_PATH]; char path[MAX_PATH]; struct stat statbuf; RFB_FIND_DATA win32filename; int nOptLen = 0, retval=0; #ifdef WIN32 WIN32_FIND_DATAA winFindData; HANDLE findHandle; int pathLen, basePathLength; char *basePath; #else DIR *dirp=NULL; struct dirent *direntp=NULL; #endif FILEXFER_ALLOWED_OR_CLOSE_AND_RETURN("", cl, FALSE); /* Client thinks we are Winblows */ if (!rfbFilenameTranslate2UNIX(cl, buffer, path, sizeof(path))) return FALSE; if (DB) rfbLog("rfbProcessFileTransfer() rfbDirContentRequest: rfbRDirContent: \"%s\"->\"%s\"\n",buffer, path); #ifdef WIN32 // Create a search string, like C:\folder\* pathLen = strlen(path); basePath = malloc(pathLen + 3); memcpy(basePath, path, pathLen); basePathLength = pathLen; basePath[basePathLength] = '\\'; basePath[basePathLength + 1] = '*'; basePath[basePathLength + 2] = '\0'; // Start a search memset(&winFindData, 0, sizeof(winFindData)); findHandle = FindFirstFileA(path, &winFindData); free(basePath); if (findHandle == INVALID_HANDLE_VALUE) #else dirp=opendir(path); if (dirp==NULL) #endif return rfbSendFileTransferMessage(cl, rfbDirPacket, rfbADirectory, 0, 0, NULL); /* send back the path name (necessary for links) */ if (rfbSendFileTransferMessage(cl, rfbDirPacket, rfbADirectory, 0, length, buffer)==FALSE) return FALSE; #ifdef WIN32 while (findHandle != INVALID_HANDLE_VALUE) #else for (direntp=readdir(dirp); direntp!=NULL; direntp=readdir(dirp)) #endif { /* get stats */ #ifdef WIN32 snprintf(retfilename,sizeof(retfilename),"%s/%s", path, winFindData.cFileName); #else snprintf(retfilename,sizeof(retfilename),"%s/%s", path, direntp->d_name); #endif retval = stat(retfilename, &statbuf); if (retval==0) { memset((char *)&win32filename, 0, sizeof(win32filename)); #ifdef WIN32 win32filename.dwFileAttributes = winFindData.dwFileAttributes; win32filename.ftCreationTime.dwLowDateTime = winFindData.ftCreationTime.dwLowDateTime; win32filename.ftCreationTime.dwHighDateTime = winFindData.ftCreationTime.dwHighDateTime; win32filename.ftLastAccessTime.dwLowDateTime = winFindData.ftLastAccessTime.dwLowDateTime; win32filename.ftLastAccessTime.dwHighDateTime = winFindData.ftLastAccessTime.dwHighDateTime; win32filename.ftLastWriteTime.dwLowDateTime = winFindData.ftLastWriteTime.dwLowDateTime; win32filename.ftLastWriteTime.dwHighDateTime = winFindData.ftLastWriteTime.dwHighDateTime; win32filename.nFileSizeLow = winFindData.nFileSizeLow; win32filename.nFileSizeHigh = winFindData.nFileSizeHigh; win32filename.dwReserved0 = winFindData.dwReserved0; win32filename.dwReserved1 = winFindData.dwReserved1; strcpy((char *)win32filename.cFileName, winFindData.cFileName); strcpy((char *)win32filename.cAlternateFileName, winFindData.cAlternateFileName); #else win32filename.dwFileAttributes = Swap32IfBE(RFB_FILE_ATTRIBUTE_NORMAL); if (S_ISDIR(statbuf.st_mode)) win32filename.dwFileAttributes = Swap32IfBE(RFB_FILE_ATTRIBUTE_DIRECTORY); win32filename.ftCreationTime.dwLowDateTime = Swap32IfBE(statbuf.st_ctime); /* Intel Order */ win32filename.ftCreationTime.dwHighDateTime = 0; win32filename.ftLastAccessTime.dwLowDateTime = Swap32IfBE(statbuf.st_atime); /* Intel Order */ win32filename.ftLastAccessTime.dwHighDateTime = 0; win32filename.ftLastWriteTime.dwLowDateTime = Swap32IfBE(statbuf.st_mtime); /* Intel Order */ win32filename.ftLastWriteTime.dwHighDateTime = 0; win32filename.nFileSizeLow = Swap32IfBE(statbuf.st_size); /* Intel Order */ win32filename.nFileSizeHigh = 0; win32filename.dwReserved0 = 0; win32filename.dwReserved1 = 0; /* If this had the full path, we would need to translate to DOS format ("C:\") */ /* rfbFilenameTranslate2DOS(cl, retfilename, win32filename.cFileName); */ strcpy((char *)win32filename.cFileName, direntp->d_name); #endif /* Do not show hidden files (but show how to move up the tree) */ if ((strcmp((char *)win32filename.cFileName, "..")==0) || (win32filename.cFileName[0]!='.')) { nOptLen = sizeof(RFB_FIND_DATA) - MAX_PATH - 14 + strlen((char *)win32filename.cFileName); /* rfbLog("rfbProcessFileTransfer() rfbDirContentRequest: rfbRDirContent: Sending \"%s\"\n", (char *)win32filename.cFileName); */ if (rfbSendFileTransferMessage(cl, rfbDirPacket, rfbADirectory, 0, nOptLen, (char *)&win32filename)==FALSE) { #ifdef WIN32 FindClose(findHandle); #else closedir(dirp); #endif return FALSE; } } } #ifdef WIN32 if (FindNextFileA(findHandle, &winFindData) == 0) { FindClose(findHandle); findHandle = INVALID_HANDLE_VALUE; } #endif } #ifdef WIN32 if (findHandle != INVALID_HANDLE_VALUE) { FindClose(findHandle); } #else closedir(dirp); #endif /* End of the transfer */ return rfbSendFileTransferMessage(cl, rfbDirPacket, 0, 0, 0, NULL); } char *rfbProcessFileTransferReadBuffer(rfbClientPtr cl, uint32_t length) { char *buffer=NULL; int n=0; FILEXFER_ALLOWED_OR_CLOSE_AND_RETURN("", cl, NULL); /* We later alloc length+1, which might wrap around on 32-bit systems if length equals 0XFFFFFFFF, i.e. SIZE_MAX for 32-bit systems. On 64-bit systems, a length of 0XFFFFFFFF will safely be allocated since this check will never trigger and malloc() can digest length+1 without problems as length is a uint32_t. We also later pass length to rfbReadExact() that expects a signed int type and that might wrap on platforms with a 32-bit int type if length is bigger than 0X7FFFFFFF. */ if(length == SIZE_MAX || length > INT_MAX) { rfbErr("rfbProcessFileTransferReadBuffer: too big file transfer length requested: %u", (unsigned int)length); rfbCloseClient(cl); return NULL; } if (length>0) { buffer=malloc((size_t)length+1); if (buffer!=NULL) { if ((n = rfbReadExact(cl, (char *)buffer, length)) <= 0) { if (n != 0) rfbLogPerror("rfbProcessFileTransferReadBuffer: read"); rfbCloseClient(cl); /* NOTE: don't forget to free(buffer) if you return early! */ if (buffer!=NULL) free(buffer); return NULL; } /* Null Terminate */ buffer[length]=0; } } return buffer; } rfbBool rfbSendFileTransferChunk(rfbClientPtr cl) { /* Allocate buffer for compression */ unsigned char readBuf[sz_rfbBlockSize]; int bytesRead=0; int retval=0; fd_set wfds; struct timeval tv; int n; #ifdef LIBVNCSERVER_HAVE_LIBZ unsigned char compBuf[sz_rfbBlockSize + 1024]; unsigned long nMaxCompSize = sizeof(compBuf); int nRetC = 0; #endif /* * Don't close the client if we get into this one because * it is called from many places to service file transfers. * Note that permitFileTransfer is checked first. */ if (cl->screen->permitFileTransfer != TRUE || (cl->screen->getFileTransferPermission != NULL && cl->screen->getFileTransferPermission(cl) != TRUE)) { return TRUE; } /* If not sending, or no file open... Return as if we sent something! */ if ((cl->fileTransfer.fd!=-1) && (cl->fileTransfer.sending==1)) { FD_ZERO(&wfds); FD_SET(cl->sock, &wfds); /* return immediately */ tv.tv_sec = 0; tv.tv_usec = 0; n = select(cl->sock + 1, NULL, &wfds, NULL, &tv); if (n<0) { #ifdef WIN32 errno=WSAGetLastError(); #endif rfbLog("rfbSendFileTransferChunk() select failed: %s\n", strerror(errno)); } /* We have space on the transmit queue */ if (n > 0) { bytesRead = read(cl->fileTransfer.fd, readBuf, sz_rfbBlockSize); switch (bytesRead) { case 0: /* rfbLog("rfbSendFileTransferChunk(): End-Of-File Encountered\n"); */ retval = rfbSendFileTransferMessage(cl, rfbEndOfFile, 0, 0, 0, NULL); close(cl->fileTransfer.fd); cl->fileTransfer.fd = -1; cl->fileTransfer.sending = 0; cl->fileTransfer.receiving = 0; return retval; case -1: /* TODO : send an error msg to the client... */ #ifdef WIN32 errno=WSAGetLastError(); #endif rfbLog("rfbSendFileTransferChunk(): %s\n",strerror(errno)); retval = rfbSendFileTransferMessage(cl, rfbAbortFileTransfer, 0, 0, 0, NULL); close(cl->fileTransfer.fd); cl->fileTransfer.fd = -1; cl->fileTransfer.sending = 0; cl->fileTransfer.receiving = 0; return retval; default: /* rfbLog("rfbSendFileTransferChunk(): Read %d bytes\n", bytesRead); */ if (!cl->fileTransfer.compressionEnabled) return rfbSendFileTransferMessage(cl, rfbFilePacket, 0, 0, bytesRead, (char *)readBuf); else { #ifdef LIBVNCSERVER_HAVE_LIBZ nRetC = compress(compBuf, &nMaxCompSize, readBuf, bytesRead); /* rfbLog("Compressed the packet from %d -> %d bytes\n", nMaxCompSize, bytesRead); */ if ((nRetC==0) && (nMaxCompSize<bytesRead)) return rfbSendFileTransferMessage(cl, rfbFilePacket, 0, 1, nMaxCompSize, (char *)compBuf); else return rfbSendFileTransferMessage(cl, rfbFilePacket, 0, 0, bytesRead, (char *)readBuf); #else /* We do not support compression of the data stream */ return rfbSendFileTransferMessage(cl, rfbFilePacket, 0, 0, bytesRead, (char *)readBuf); #endif } } } } return TRUE; } rfbBool rfbProcessFileTransfer(rfbClientPtr cl, uint8_t contentType, uint8_t contentParam, uint32_t size, uint32_t length) { char *buffer=NULL, *p=NULL; int retval=0; char filename1[MAX_PATH]; char filename2[MAX_PATH]; char szFileTime[MAX_PATH]; struct stat statbuf; uint32_t sizeHtmp=0; int n=0; char timespec[64]; #ifdef LIBVNCSERVER_HAVE_LIBZ unsigned char compBuff[sz_rfbBlockSize]; unsigned long nRawBytes = sz_rfbBlockSize; int nRet = 0; #endif FILEXFER_ALLOWED_OR_CLOSE_AND_RETURN("", cl, FALSE); /* rfbLog("rfbProcessFileTransfer(%dtype, %dparam, %dsize, %dlen)\n", contentType, contentParam, size, length); */ switch (contentType) { case rfbDirContentRequest: switch (contentParam) { case rfbRDrivesList: /* Client requests the List of Local Drives */ /* rfbLog("rfbProcessFileTransfer() rfbDirContentRequest: rfbRDrivesList:\n"); */ /* Format when filled : "C:\<NULL>D:\<NULL>....Z:\<NULL><NULL> * * We replace the "\" char following the drive letter and ":" * with a char corresponding to the type of drive * We obtain something like "C:l<NULL>D:c<NULL>....Z:n\<NULL><NULL>" * Isn't it ugly ? * DRIVE_FIXED = 'l' (local?) * DRIVE_REMOVABLE = 'f' (floppy?) * DRIVE_CDROM = 'c' * DRIVE_REMOTE = 'n' */ /* in unix, there are no 'drives' (We could list mount points though) * We fake the root as a "C:" for the Winblows users */ filename2[0]='C'; filename2[1]=':'; filename2[2]='l'; filename2[3]=0; filename2[4]=0; retval = rfbSendFileTransferMessage(cl, rfbDirPacket, rfbADrivesList, 0, 5, filename2); if (buffer!=NULL) free(buffer); return retval; break; case rfbRDirContent: /* Client requests the content of a directory */ /* rfbLog("rfbProcessFileTransfer() rfbDirContentRequest: rfbRDirContent\n"); */ if ((buffer = rfbProcessFileTransferReadBuffer(cl, length))==NULL) return FALSE; retval = rfbSendDirContent(cl, length, buffer); if (buffer!=NULL) free(buffer); return retval; } break; case rfbDirPacket: rfbLog("rfbProcessFileTransfer() rfbDirPacket\n"); break; case rfbFileAcceptHeader: rfbLog("rfbProcessFileTransfer() rfbFileAcceptHeader\n"); break; case rfbCommandReturn: rfbLog("rfbProcessFileTransfer() rfbCommandReturn\n"); break; case rfbFileChecksums: /* Destination file already exists - the viewer sends the checksums */ rfbLog("rfbProcessFileTransfer() rfbFileChecksums\n"); break; case rfbFileTransferAccess: rfbLog("rfbProcessFileTransfer() rfbFileTransferAccess\n"); break; /* * sending from the server to the viewer */ case rfbFileTransferRequest: /* rfbLog("rfbProcessFileTransfer() rfbFileTransferRequest:\n"); */ /* add some space to the end of the buffer as we will be adding a timespec to it */ if ((buffer = rfbProcessFileTransferReadBuffer(cl, length))==NULL) return FALSE; /* The client requests a File */ if (!rfbFilenameTranslate2UNIX(cl, buffer, filename1, sizeof(filename1))) goto fail; cl->fileTransfer.fd=open(filename1, O_RDONLY, 0744); /* */ if (DB) rfbLog("rfbProcessFileTransfer() rfbFileTransferRequest(\"%s\"->\"%s\") Open: %s fd=%d\n", buffer, filename1, (cl->fileTransfer.fd==-1?"Failed":"Success"), cl->fileTransfer.fd); if (cl->fileTransfer.fd!=-1) { if (fstat(cl->fileTransfer.fd, &statbuf)!=0) { close(cl->fileTransfer.fd); cl->fileTransfer.fd=-1; } else { /* Add the File Time Stamp to the filename */ strftime(timespec, sizeof(timespec), "%m/%d/%Y %H:%M",gmtime(&statbuf.st_ctime)); buffer=realloc(buffer, length + strlen(timespec) + 2); /* comma, and Null term */ if (buffer==NULL) { rfbLog("rfbProcessFileTransfer() rfbFileTransferRequest: Failed to malloc %d bytes\n", length + strlen(timespec) + 2); return FALSE; } strcat(buffer,","); strcat(buffer, timespec); length = strlen(buffer); if (DB) rfbLog("rfbProcessFileTransfer() buffer is now: \"%s\"\n", buffer); } } /* The viewer supports compression if size==1 */ cl->fileTransfer.compressionEnabled = (size==1); /* rfbLog("rfbProcessFileTransfer() rfbFileTransferRequest(\"%s\"->\"%s\")%s\n", buffer, filename1, (size==1?" <Compression Enabled>":"")); */ /* File Size in bytes, 0xFFFFFFFF (-1) means error */ retval = rfbSendFileTransferMessage(cl, rfbFileHeader, 0, (cl->fileTransfer.fd==-1 ? -1 : statbuf.st_size), length, buffer); if (cl->fileTransfer.fd==-1) { if (buffer!=NULL) free(buffer); return retval; } /* setup filetransfer stuff */ cl->fileTransfer.fileSize = statbuf.st_size; cl->fileTransfer.numPackets = statbuf.st_size / sz_rfbBlockSize; cl->fileTransfer.receiving = 0; cl->fileTransfer.sending = 0; /* set when we receive a rfbFileHeader: */ /* TODO: finish 64-bit file size support */ sizeHtmp = 0; LOCK(cl->sendMutex); if (rfbWriteExact(cl, (char *)&sizeHtmp, 4) < 0) { rfbLogPerror("rfbProcessFileTransfer: write"); rfbCloseClient(cl); UNLOCK(cl->sendMutex); if (buffer!=NULL) free(buffer); return FALSE; } UNLOCK(cl->sendMutex); break; case rfbFileHeader: /* Destination file (viewer side) is ready for reception (size > 0) or not (size = -1) */ if (size==-1) { rfbLog("rfbProcessFileTransfer() rfbFileHeader (error, aborting)\n"); close(cl->fileTransfer.fd); cl->fileTransfer.fd=-1; return TRUE; } /* rfbLog("rfbProcessFileTransfer() rfbFileHeader (%d bytes of a file)\n", size); */ /* Starts the transfer! */ cl->fileTransfer.sending=1; return rfbSendFileTransferChunk(cl); break; /* * sending from the viewer to the server */ case rfbFileTransferOffer: /* client is sending a file to us */ /* buffer contains full path name (plus FileTime) */ /* size contains size of the file */ /* rfbLog("rfbProcessFileTransfer() rfbFileTransferOffer:\n"); */ if ((buffer = rfbProcessFileTransferReadBuffer(cl, length))==NULL) return FALSE; /* Parse the FileTime */ p = strrchr(buffer, ','); if (p!=NULL) { *p = '\0'; strncpy(szFileTime, p+1, sizeof(szFileTime)); szFileTime[sizeof(szFileTime)-1] = '\x00'; /* ensure NULL terminating byte is present, even if copy overflowed */ } else szFileTime[0]=0; /* Need to read in sizeHtmp */ if ((n = rfbReadExact(cl, (char *)&sizeHtmp, 4)) <= 0) { if (n != 0) rfbLogPerror("rfbProcessFileTransfer: read sizeHtmp"); rfbCloseClient(cl); /* NOTE: don't forget to free(buffer) if you return early! */ if (buffer!=NULL) free(buffer); return FALSE; } sizeHtmp = Swap32IfLE(sizeHtmp); if (!rfbFilenameTranslate2UNIX(cl, buffer, filename1, sizeof(filename1))) goto fail; /* If the file exists... We can send a rfbFileChecksums back to the client before we send an rfbFileAcceptHeader */ /* TODO: Delta Transfer */ cl->fileTransfer.fd=open(filename1, O_CREAT|O_WRONLY|O_TRUNC, 0744); if (DB) rfbLog("rfbProcessFileTransfer() rfbFileTransferOffer(\"%s\"->\"%s\") %s %s fd=%d\n", buffer, filename1, (cl->fileTransfer.fd==-1?"Failed":"Success"), (cl->fileTransfer.fd==-1?strerror(errno):""), cl->fileTransfer.fd); /* */ /* File Size in bytes, 0xFFFFFFFF (-1) means error */ retval = rfbSendFileTransferMessage(cl, rfbFileAcceptHeader, 0, (cl->fileTransfer.fd==-1 ? -1 : 0), length, buffer); if (cl->fileTransfer.fd==-1) { free(buffer); return retval; } /* setup filetransfer stuff */ cl->fileTransfer.fileSize = size; cl->fileTransfer.numPackets = size / sz_rfbBlockSize; cl->fileTransfer.receiving = 1; cl->fileTransfer.sending = 0; break; case rfbFilePacket: /* rfbLog("rfbProcessFileTransfer() rfbFilePacket:\n"); */ if ((buffer = rfbProcessFileTransferReadBuffer(cl, length))==NULL) return FALSE; if (cl->fileTransfer.fd!=-1) { /* buffer contains the contents of the file */ if (size==0) retval=write(cl->fileTransfer.fd, buffer, length); else { #ifdef LIBVNCSERVER_HAVE_LIBZ /* compressed packet */ nRet = uncompress(compBuff,&nRawBytes,(const unsigned char*)buffer, length); if(nRet == Z_OK) retval=write(cl->fileTransfer.fd, (char*)compBuff, nRawBytes); else retval = -1; #else /* Write the file out as received... */ retval=write(cl->fileTransfer.fd, buffer, length); #endif } if (retval==-1) { close(cl->fileTransfer.fd); cl->fileTransfer.fd=-1; cl->fileTransfer.sending = 0; cl->fileTransfer.receiving = 0; } } break; case rfbEndOfFile: if (DB) rfbLog("rfbProcessFileTransfer() rfbEndOfFile\n"); /* */ if (cl->fileTransfer.fd!=-1) close(cl->fileTransfer.fd); cl->fileTransfer.fd=-1; cl->fileTransfer.sending = 0; cl->fileTransfer.receiving = 0; break; case rfbAbortFileTransfer: if (DB) rfbLog("rfbProcessFileTransfer() rfbAbortFileTransfer\n"); /* */ if (cl->fileTransfer.fd!=-1) { close(cl->fileTransfer.fd); cl->fileTransfer.fd=-1; cl->fileTransfer.sending = 0; cl->fileTransfer.receiving = 0; } else { /* We use this message for FileTransfer rights (<=RC18 versions) * The client asks for FileTransfer permission */ if (contentParam == 0) { rfbLog("rfbProcessFileTransfer() File Transfer Permission DENIED! (Client Version <=RC18)\n"); /* Old method for FileTransfer handshake perimssion (<=RC18) (Deny it)*/ return rfbSendFileTransferMessage(cl, rfbAbortFileTransfer, 0, -1, 0, ""); } /* New method is allowed */ if (cl->screen->getFileTransferPermission!=NULL) { if (cl->screen->getFileTransferPermission(cl)==TRUE) { rfbLog("rfbProcessFileTransfer() File Transfer Permission Granted!\n"); return rfbSendFileTransferMessage(cl, rfbFileTransferAccess, 0, 1 , 0, ""); /* Permit */ } else { rfbLog("rfbProcessFileTransfer() File Transfer Permission DENIED!\n"); return rfbSendFileTransferMessage(cl, rfbFileTransferAccess, 0, -1 , 0, ""); /* Deny */ } } else { if (cl->screen->permitFileTransfer) { rfbLog("rfbProcessFileTransfer() File Transfer Permission Granted!\n"); return rfbSendFileTransferMessage(cl, rfbFileTransferAccess, 0, 1 , 0, ""); /* Permit */ } else { rfbLog("rfbProcessFileTransfer() File Transfer Permission DENIED by default!\n"); return rfbSendFileTransferMessage(cl, rfbFileTransferAccess, 0, -1 , 0, ""); /* DEFAULT: DENY (for security) */ } } } break; case rfbCommand: /* rfbLog("rfbProcessFileTransfer() rfbCommand:\n"); */ if ((buffer = rfbProcessFileTransferReadBuffer(cl, length))==NULL) return FALSE; switch (contentParam) { case rfbCDirCreate: /* Client requests the creation of a directory */ if (!rfbFilenameTranslate2UNIX(cl, buffer, filename1, sizeof(filename1))) goto fail; retval = mkdir(filename1, 0755); if (DB) rfbLog("rfbProcessFileTransfer() rfbCommand: rfbCDirCreate(\"%s\"->\"%s\") %s\n", buffer, filename1, (retval==-1?"Failed":"Success")); /* */ retval = rfbSendFileTransferMessage(cl, rfbCommandReturn, rfbADirCreate, retval, length, buffer); if (buffer!=NULL) free(buffer); return retval; case rfbCFileDelete: /* Client requests the deletion of a file */ if (!rfbFilenameTranslate2UNIX(cl, buffer, filename1, sizeof(filename1))) goto fail; if (stat(filename1,&statbuf)==0) { if (S_ISDIR(statbuf.st_mode)) retval = rmdir(filename1); else retval = unlink(filename1); } else retval=-1; retval = rfbSendFileTransferMessage(cl, rfbCommandReturn, rfbAFileDelete, retval, length, buffer); if (buffer!=NULL) free(buffer); return retval; case rfbCFileRename: /* Client requests the Renaming of a file/directory */ p = strrchr(buffer, '*'); if (p != NULL) { /* Split into 2 filenames ('*' is a seperator) */ *p = '\0'; if (!rfbFilenameTranslate2UNIX(cl, buffer, filename1, sizeof(filename1))) goto fail; if (!rfbFilenameTranslate2UNIX(cl, p+1, filename2, sizeof(filename2))) goto fail; retval = rename(filename1,filename2); if (DB) rfbLog("rfbProcessFileTransfer() rfbCommand: rfbCFileRename(\"%s\"->\"%s\" -->> \"%s\"->\"%s\") %s\n", buffer, filename1, p+1, filename2, (retval==-1?"Failed":"Success")); /* */ /* Restore the buffer so the reply is good */ *p = '*'; retval = rfbSendFileTransferMessage(cl, rfbCommandReturn, rfbAFileRename, retval, length, buffer); if (buffer!=NULL) free(buffer); return retval; } break; } break; } /* NOTE: don't forget to free(buffer) if you return early! */ if (buffer!=NULL) free(buffer); return TRUE; fail: if (buffer!=NULL) free(buffer); return FALSE; } /* * rfbProcessClientNormalMessage is called when the client has sent a normal * protocol message. */ static void rfbProcessClientNormalMessage(rfbClientPtr cl) { int n=0; rfbClientToServerMsg msg; char *str; int i; uint32_t enc=0; uint32_t lastPreferredEncoding = -1; char encBuf[64]; char encBuf2[64]; if ((n = rfbReadExact(cl, (char *)&msg, 1)) <= 0) { if (n != 0) rfbLogPerror("rfbProcessClientNormalMessage: read"); rfbCloseClient(cl); return; } switch (msg.type) { case rfbSetPixelFormat: if ((n = rfbReadExact(cl, ((char *)&msg) + 1, sz_rfbSetPixelFormatMsg - 1)) <= 0) { if (n != 0) rfbLogPerror("rfbProcessClientNormalMessage: read"); rfbCloseClient(cl); return; } cl->format.bitsPerPixel = msg.spf.format.bitsPerPixel; cl->format.depth = msg.spf.format.depth; cl->format.bigEndian = (msg.spf.format.bigEndian ? TRUE : FALSE); cl->format.trueColour = (msg.spf.format.trueColour ? TRUE : FALSE); cl->format.redMax = Swap16IfLE(msg.spf.format.redMax); cl->format.greenMax = Swap16IfLE(msg.spf.format.greenMax); cl->format.blueMax = Swap16IfLE(msg.spf.format.blueMax); cl->format.redShift = msg.spf.format.redShift; cl->format.greenShift = msg.spf.format.greenShift; cl->format.blueShift = msg.spf.format.blueShift; cl->readyForSetColourMapEntries = TRUE; cl->screen->setTranslateFunction(cl); rfbStatRecordMessageRcvd(cl, msg.type, sz_rfbSetPixelFormatMsg, sz_rfbSetPixelFormatMsg); return; case rfbFixColourMapEntries: if ((n = rfbReadExact(cl, ((char *)&msg) + 1, sz_rfbFixColourMapEntriesMsg - 1)) <= 0) { if (n != 0) rfbLogPerror("rfbProcessClientNormalMessage: read"); rfbCloseClient(cl); return; } rfbStatRecordMessageRcvd(cl, msg.type, sz_rfbSetPixelFormatMsg, sz_rfbSetPixelFormatMsg); rfbLog("rfbProcessClientNormalMessage: %s", "FixColourMapEntries unsupported\n"); rfbCloseClient(cl); return; /* NOTE: Some clients send us a set of encodings (ie: PointerPos) designed to enable/disable features... * We may want to look into this... * Example: * case rfbEncodingXCursor: * cl->enableCursorShapeUpdates = TRUE; * * Currently: cl->enableCursorShapeUpdates can *never* be turned off... */ case rfbSetEncodings: { if ((n = rfbReadExact(cl, ((char *)&msg) + 1, sz_rfbSetEncodingsMsg - 1)) <= 0) { if (n != 0) rfbLogPerror("rfbProcessClientNormalMessage: read"); rfbCloseClient(cl); return; } msg.se.nEncodings = Swap16IfLE(msg.se.nEncodings); rfbStatRecordMessageRcvd(cl, msg.type, sz_rfbSetEncodingsMsg+(msg.se.nEncodings*4),sz_rfbSetEncodingsMsg+(msg.se.nEncodings*4)); /* * UltraVNC Client has the ability to adapt to changing network environments * So, let's give it a change to tell us what it wants now! */ if (cl->preferredEncoding!=-1) lastPreferredEncoding = cl->preferredEncoding; /* Reset all flags to defaults (allows us to switch between PointerPos and Server Drawn Cursors) */ cl->preferredEncoding=-1; cl->useCopyRect = FALSE; cl->useNewFBSize = FALSE; cl->cursorWasChanged = FALSE; cl->useRichCursorEncoding = FALSE; cl->enableCursorPosUpdates = FALSE; cl->enableCursorShapeUpdates = FALSE; cl->enableCursorShapeUpdates = FALSE; cl->enableLastRectEncoding = FALSE; cl->enableKeyboardLedState = FALSE; cl->enableSupportedMessages = FALSE; cl->enableSupportedEncodings = FALSE; cl->enableServerIdentity = FALSE; #if defined(LIBVNCSERVER_HAVE_LIBZ) || defined(LIBVNCSERVER_HAVE_LIBPNG) cl->tightQualityLevel = -1; #ifdef LIBVNCSERVER_HAVE_LIBJPEG cl->tightCompressLevel = TIGHT_DEFAULT_COMPRESSION; cl->turboSubsampLevel = TURBO_DEFAULT_SUBSAMP; cl->turboQualityLevel = -1; #endif #endif for (i = 0; i < msg.se.nEncodings; i++) { if ((n = rfbReadExact(cl, (char *)&enc, 4)) <= 0) { if (n != 0) rfbLogPerror("rfbProcessClientNormalMessage: read"); rfbCloseClient(cl); return; } enc = Swap32IfLE(enc); switch (enc) { case rfbEncodingCopyRect: cl->useCopyRect = TRUE; break; case rfbEncodingRaw: case rfbEncodingRRE: case rfbEncodingCoRRE: case rfbEncodingHextile: case rfbEncodingUltra: #ifdef LIBVNCSERVER_HAVE_LIBZ case rfbEncodingZlib: case rfbEncodingZRLE: case rfbEncodingZYWRLE: #ifdef LIBVNCSERVER_HAVE_LIBJPEG case rfbEncodingTight: #endif #endif #ifdef LIBVNCSERVER_HAVE_LIBPNG case rfbEncodingTightPng: #endif /* The first supported encoding is the 'preferred' encoding */ if (cl->preferredEncoding == -1) cl->preferredEncoding = enc; break; case rfbEncodingXCursor: if(!cl->screen->dontConvertRichCursorToXCursor) { rfbLog("Enabling X-style cursor updates for client %s\n", cl->host); /* if cursor was drawn, hide the cursor */ if(!cl->enableCursorShapeUpdates) rfbRedrawAfterHideCursor(cl,NULL); cl->enableCursorShapeUpdates = TRUE; cl->cursorWasChanged = TRUE; } break; case rfbEncodingRichCursor: rfbLog("Enabling full-color cursor updates for client %s\n", cl->host); /* if cursor was drawn, hide the cursor */ if(!cl->enableCursorShapeUpdates) rfbRedrawAfterHideCursor(cl,NULL); cl->enableCursorShapeUpdates = TRUE; cl->useRichCursorEncoding = TRUE; cl->cursorWasChanged = TRUE; break; case rfbEncodingPointerPos: if (!cl->enableCursorPosUpdates) { rfbLog("Enabling cursor position updates for client %s\n", cl->host); cl->enableCursorPosUpdates = TRUE; cl->cursorWasMoved = TRUE; } break; case rfbEncodingLastRect: if (!cl->enableLastRectEncoding) { rfbLog("Enabling LastRect protocol extension for client " "%s\n", cl->host); cl->enableLastRectEncoding = TRUE; } break; case rfbEncodingNewFBSize: if (!cl->useNewFBSize) { rfbLog("Enabling NewFBSize protocol extension for client " "%s\n", cl->host); cl->useNewFBSize = TRUE; } break; case rfbEncodingKeyboardLedState: if (!cl->enableKeyboardLedState) { rfbLog("Enabling KeyboardLedState protocol extension for client " "%s\n", cl->host); cl->enableKeyboardLedState = TRUE; } break; case rfbEncodingSupportedMessages: if (!cl->enableSupportedMessages) { rfbLog("Enabling SupportedMessages protocol extension for client " "%s\n", cl->host); cl->enableSupportedMessages = TRUE; } break; case rfbEncodingSupportedEncodings: if (!cl->enableSupportedEncodings) { rfbLog("Enabling SupportedEncodings protocol extension for client " "%s\n", cl->host); cl->enableSupportedEncodings = TRUE; } break; case rfbEncodingServerIdentity: if (!cl->enableServerIdentity) { rfbLog("Enabling ServerIdentity protocol extension for client " "%s\n", cl->host); cl->enableServerIdentity = TRUE; } break; case rfbEncodingXvp: if (cl->screen->xvpHook) { rfbLog("Enabling Xvp protocol extension for client " "%s\n", cl->host); if (!rfbSendXvp(cl, 1, rfbXvp_Init)) { rfbCloseClient(cl); return; } } break; default: #if defined(LIBVNCSERVER_HAVE_LIBZ) || defined(LIBVNCSERVER_HAVE_LIBPNG) if ( enc >= (uint32_t)rfbEncodingCompressLevel0 && enc <= (uint32_t)rfbEncodingCompressLevel9 ) { cl->zlibCompressLevel = enc & 0x0F; #ifdef LIBVNCSERVER_HAVE_LIBJPEG cl->tightCompressLevel = enc & 0x0F; rfbLog("Using compression level %d for client %s\n", cl->tightCompressLevel, cl->host); #endif } else if ( enc >= (uint32_t)rfbEncodingQualityLevel0 && enc <= (uint32_t)rfbEncodingQualityLevel9 ) { cl->tightQualityLevel = enc & 0x0F; rfbLog("Using image quality level %d for client %s\n", cl->tightQualityLevel, cl->host); #ifdef LIBVNCSERVER_HAVE_LIBJPEG cl->turboQualityLevel = tight2turbo_qual[enc & 0x0F]; cl->turboSubsampLevel = tight2turbo_subsamp[enc & 0x0F]; rfbLog("Using JPEG subsampling %d, Q%d for client %s\n", cl->turboSubsampLevel, cl->turboQualityLevel, cl->host); } else if ( enc >= (uint32_t)rfbEncodingFineQualityLevel0 + 1 && enc <= (uint32_t)rfbEncodingFineQualityLevel100 ) { cl->turboQualityLevel = enc & 0xFF; rfbLog("Using fine quality level %d for client %s\n", cl->turboQualityLevel, cl->host); } else if ( enc >= (uint32_t)rfbEncodingSubsamp1X && enc <= (uint32_t)rfbEncodingSubsampGray ) { cl->turboSubsampLevel = enc & 0xFF; rfbLog("Using subsampling level %d for client %s\n", cl->turboSubsampLevel, cl->host); #endif } else #endif { rfbExtensionData* e; for(e = cl->extensions; e;) { rfbExtensionData* next = e->next; if(e->extension->enablePseudoEncoding && e->extension->enablePseudoEncoding(cl, &e->data, (int)enc)) /* ext handles this encoding */ break; e = next; } if(e == NULL) { rfbBool handled = FALSE; /* if the pseudo encoding is not handled by the enabled extensions, search through all extensions. */ rfbProtocolExtension* e; for(e = rfbGetExtensionIterator(); e;) { int* encs = e->pseudoEncodings; while(encs && *encs!=0) { if(*encs==(int)enc) { void* data = NULL; if(!e->enablePseudoEncoding(cl, &data, (int)enc)) { rfbLog("Installed extension pretends to handle pseudo encoding 0x%x, but does not!\n",(int)enc); } else { rfbEnableExtension(cl, e, data); handled = TRUE; e = NULL; break; } } encs++; } if(e) e = e->next; } rfbReleaseExtensionIterator(); if(!handled) rfbLog("rfbProcessClientNormalMessage: " "ignoring unsupported encoding type %s\n", encodingName(enc,encBuf,sizeof(encBuf))); } } } } if (cl->preferredEncoding == -1) { if (lastPreferredEncoding==-1) { cl->preferredEncoding = rfbEncodingRaw; rfbLog("Defaulting to %s encoding for client %s\n", encodingName(cl->preferredEncoding,encBuf,sizeof(encBuf)),cl->host); } else { cl->preferredEncoding = lastPreferredEncoding; rfbLog("Sticking with %s encoding for client %s\n", encodingName(cl->preferredEncoding,encBuf,sizeof(encBuf)),cl->host); } } else { if (lastPreferredEncoding==-1) { rfbLog("Using %s encoding for client %s\n", encodingName(cl->preferredEncoding,encBuf,sizeof(encBuf)),cl->host); } else { rfbLog("Switching from %s to %s Encoding for client %s\n", encodingName(lastPreferredEncoding,encBuf2,sizeof(encBuf2)), encodingName(cl->preferredEncoding,encBuf,sizeof(encBuf)), cl->host); } } if (cl->enableCursorPosUpdates && !cl->enableCursorShapeUpdates) { rfbLog("Disabling cursor position updates for client %s\n", cl->host); cl->enableCursorPosUpdates = FALSE; } return; } case rfbFramebufferUpdateRequest: { sraRegionPtr tmpRegion; if ((n = rfbReadExact(cl, ((char *)&msg) + 1, sz_rfbFramebufferUpdateRequestMsg-1)) <= 0) { if (n != 0) rfbLogPerror("rfbProcessClientNormalMessage: read"); rfbCloseClient(cl); return; } rfbStatRecordMessageRcvd(cl, msg.type, sz_rfbFramebufferUpdateRequestMsg,sz_rfbFramebufferUpdateRequestMsg); /* The values come in based on the scaled screen, we need to convert them to * values based on the main screen's coordinate system */ if(!rectSwapIfLEAndClip(&msg.fur.x,&msg.fur.y,&msg.fur.w,&msg.fur.h,cl)) { rfbLog("Warning, ignoring rfbFramebufferUpdateRequest: %dXx%dY-%dWx%dH\n",msg.fur.x, msg.fur.y, msg.fur.w, msg.fur.h); return; } tmpRegion = sraRgnCreateRect(msg.fur.x, msg.fur.y, msg.fur.x+msg.fur.w, msg.fur.y+msg.fur.h); LOCK(cl->updateMutex); sraRgnOr(cl->requestedRegion,tmpRegion); if (!cl->readyForSetColourMapEntries) { /* client hasn't sent a SetPixelFormat so is using server's */ cl->readyForSetColourMapEntries = TRUE; if (!cl->format.trueColour) { if (!rfbSetClientColourMap(cl, 0, 0)) { sraRgnDestroy(tmpRegion); TSIGNAL(cl->updateCond); UNLOCK(cl->updateMutex); return; } } } if (!msg.fur.incremental) { sraRgnOr(cl->modifiedRegion,tmpRegion); sraRgnSubtract(cl->copyRegion,tmpRegion); } TSIGNAL(cl->updateCond); UNLOCK(cl->updateMutex); sraRgnDestroy(tmpRegion); return; } case rfbKeyEvent: if ((n = rfbReadExact(cl, ((char *)&msg) + 1, sz_rfbKeyEventMsg - 1)) <= 0) { if (n != 0) rfbLogPerror("rfbProcessClientNormalMessage: read"); rfbCloseClient(cl); return; } rfbStatRecordMessageRcvd(cl, msg.type, sz_rfbKeyEventMsg, sz_rfbKeyEventMsg); if(!cl->viewOnly) { cl->screen->kbdAddEvent(msg.ke.down, (rfbKeySym)Swap32IfLE(msg.ke.key), cl); } return; case rfbPointerEvent: if ((n = rfbReadExact(cl, ((char *)&msg) + 1, sz_rfbPointerEventMsg - 1)) <= 0) { if (n != 0) rfbLogPerror("rfbProcessClientNormalMessage: read"); rfbCloseClient(cl); return; } rfbStatRecordMessageRcvd(cl, msg.type, sz_rfbPointerEventMsg, sz_rfbPointerEventMsg); if (cl->screen->pointerClient && cl->screen->pointerClient != cl) return; if (msg.pe.buttonMask == 0) cl->screen->pointerClient = NULL; else cl->screen->pointerClient = cl; if(!cl->viewOnly) { if (msg.pe.buttonMask != cl->lastPtrButtons || cl->screen->deferPtrUpdateTime == 0) { cl->screen->ptrAddEvent(msg.pe.buttonMask, ScaleX(cl->scaledScreen, cl->screen, Swap16IfLE(msg.pe.x)), ScaleY(cl->scaledScreen, cl->screen, Swap16IfLE(msg.pe.y)), cl); cl->lastPtrButtons = msg.pe.buttonMask; } else { cl->lastPtrX = ScaleX(cl->scaledScreen, cl->screen, Swap16IfLE(msg.pe.x)); cl->lastPtrY = ScaleY(cl->scaledScreen, cl->screen, Swap16IfLE(msg.pe.y)); cl->lastPtrButtons = msg.pe.buttonMask; } } return; case rfbFileTransfer: if ((n = rfbReadExact(cl, ((char *)&msg) + 1, sz_rfbFileTransferMsg - 1)) <= 0) { if (n != 0) rfbLogPerror("rfbProcessClientNormalMessage: read"); rfbCloseClient(cl); return; } msg.ft.size = Swap32IfLE(msg.ft.size); msg.ft.length = Swap32IfLE(msg.ft.length); /* record statistics in rfbProcessFileTransfer as length is filled with garbage when it is not valid */ rfbProcessFileTransfer(cl, msg.ft.contentType, msg.ft.contentParam, msg.ft.size, msg.ft.length); return; case rfbSetSW: if ((n = rfbReadExact(cl, ((char *)&msg) + 1, sz_rfbSetSWMsg - 1)) <= 0) { if (n != 0) rfbLogPerror("rfbProcessClientNormalMessage: read"); rfbCloseClient(cl); return; } msg.sw.x = Swap16IfLE(msg.sw.x); msg.sw.y = Swap16IfLE(msg.sw.y); rfbStatRecordMessageRcvd(cl, msg.type, sz_rfbSetSWMsg, sz_rfbSetSWMsg); /* msg.sw.status is not initialized in the ultraVNC viewer and contains random numbers (why???) */ rfbLog("Received a rfbSetSingleWindow(%d x, %d y)\n", msg.sw.x, msg.sw.y); if (cl->screen->setSingleWindow!=NULL) cl->screen->setSingleWindow(cl, msg.sw.x, msg.sw.y); return; case rfbSetServerInput: if ((n = rfbReadExact(cl, ((char *)&msg) + 1, sz_rfbSetServerInputMsg - 1)) <= 0) { if (n != 0) rfbLogPerror("rfbProcessClientNormalMessage: read"); rfbCloseClient(cl); return; } rfbStatRecordMessageRcvd(cl, msg.type, sz_rfbSetServerInputMsg, sz_rfbSetServerInputMsg); /* msg.sim.pad is not initialized in the ultraVNC viewer and contains random numbers (why???) */ /* msg.sim.pad = Swap16IfLE(msg.sim.pad); */ rfbLog("Received a rfbSetServerInput(%d status)\n", msg.sim.status); if (cl->screen->setServerInput!=NULL) cl->screen->setServerInput(cl, msg.sim.status); return; case rfbTextChat: if ((n = rfbReadExact(cl, ((char *)&msg) + 1, sz_rfbTextChatMsg - 1)) <= 0) { if (n != 0) rfbLogPerror("rfbProcessClientNormalMessage: read"); rfbCloseClient(cl); return; } msg.tc.pad2 = Swap16IfLE(msg.tc.pad2); msg.tc.length = Swap32IfLE(msg.tc.length); switch (msg.tc.length) { case rfbTextChatOpen: case rfbTextChatClose: case rfbTextChatFinished: /* commands do not have text following */ /* Why couldn't they have used the pad byte??? */ str=NULL; rfbStatRecordMessageRcvd(cl, msg.type, sz_rfbTextChatMsg, sz_rfbTextChatMsg); break; default: if ((msg.tc.length>0) && (msg.tc.length<rfbTextMaxSize)) { str = (char *)malloc(msg.tc.length); if (str==NULL) { rfbLog("Unable to malloc %d bytes for a TextChat Message\n", msg.tc.length); rfbCloseClient(cl); return; } if ((n = rfbReadExact(cl, str, msg.tc.length)) <= 0) { if (n != 0) rfbLogPerror("rfbProcessClientNormalMessage: read"); free(str); rfbCloseClient(cl); return; } rfbStatRecordMessageRcvd(cl, msg.type, sz_rfbTextChatMsg+msg.tc.length, sz_rfbTextChatMsg+msg.tc.length); } else { /* This should never happen */ rfbLog("client sent us a Text Message that is too big %d>%d\n", msg.tc.length, rfbTextMaxSize); rfbCloseClient(cl); return; } } /* Note: length can be commands: rfbTextChatOpen, rfbTextChatClose, and rfbTextChatFinished * at which point, the str is NULL (as it is not sent) */ if (cl->screen->setTextChat!=NULL) cl->screen->setTextChat(cl, msg.tc.length, str); free(str); return; case rfbClientCutText: if ((n = rfbReadExact(cl, ((char *)&msg) + 1, sz_rfbClientCutTextMsg - 1)) <= 0) { if (n != 0) rfbLogPerror("rfbProcessClientNormalMessage: read"); rfbCloseClient(cl); return; } msg.cct.length = Swap32IfLE(msg.cct.length); /* uint32_t input is passed to malloc()'s size_t argument, * to rfbReadExact()'s int argument, to rfbStatRecordMessageRcvd()'s int * argument increased of sz_rfbClientCutTextMsg, and to setXCutText()'s int * argument. Here we impose a limit of 1 MB so that the value fits * into all of the types to prevent from misinterpretation and thus * from accessing uninitialized memory (CVE-2018-7225) and also to * prevent from a denial-of-service by allocating too much memory in * the server. */ if (msg.cct.length > 1<<20) { rfbLog("rfbClientCutText: too big cut text length requested: %u B > 1 MB\n", (unsigned int)msg.cct.length); rfbCloseClient(cl); return; } /* Allow zero-length client cut text. */ str = (char *)calloc(msg.cct.length ? msg.cct.length : 1, 1); if (str == NULL) { rfbLogPerror("rfbProcessClientNormalMessage: not enough memory"); rfbCloseClient(cl); return; } if ((n = rfbReadExact(cl, str, msg.cct.length)) <= 0) { if (n != 0) rfbLogPerror("rfbProcessClientNormalMessage: read"); free(str); rfbCloseClient(cl); return; } rfbStatRecordMessageRcvd(cl, msg.type, sz_rfbClientCutTextMsg+msg.cct.length, sz_rfbClientCutTextMsg+msg.cct.length); if(!cl->viewOnly) { cl->screen->setXCutText(str, msg.cct.length, cl); } free(str); return; case rfbPalmVNCSetScaleFactor: cl->PalmVNC = TRUE; if ((n = rfbReadExact(cl, ((char *)&msg) + 1, sz_rfbSetScaleMsg - 1)) <= 0) { if (n != 0) rfbLogPerror("rfbProcessClientNormalMessage: read"); rfbCloseClient(cl); return; } if (msg.ssc.scale == 0) { rfbLogPerror("rfbProcessClientNormalMessage: will not accept a scale factor of zero"); rfbCloseClient(cl); return; } rfbStatRecordMessageRcvd(cl, msg.type, sz_rfbSetScaleMsg, sz_rfbSetScaleMsg); rfbLog("rfbSetScale(%d)\n", msg.ssc.scale); rfbScalingSetup(cl,cl->screen->width/msg.ssc.scale, cl->screen->height/msg.ssc.scale); rfbSendNewScaleSize(cl); return; case rfbSetScale: if ((n = rfbReadExact(cl, ((char *)&msg) + 1, sz_rfbSetScaleMsg - 1)) <= 0) { if (n != 0) rfbLogPerror("rfbProcessClientNormalMessage: read"); rfbCloseClient(cl); return; } if (msg.ssc.scale == 0) { rfbLogPerror("rfbProcessClientNormalMessage: will not accept a scale factor of zero"); rfbCloseClient(cl); return; } rfbStatRecordMessageRcvd(cl, msg.type, sz_rfbSetScaleMsg, sz_rfbSetScaleMsg); rfbLog("rfbSetScale(%d)\n", msg.ssc.scale); rfbScalingSetup(cl,cl->screen->width/msg.ssc.scale, cl->screen->height/msg.ssc.scale); rfbSendNewScaleSize(cl); return; case rfbXvp: if ((n = rfbReadExact(cl, ((char *)&msg) + 1, sz_rfbXvpMsg - 1)) <= 0) { if (n != 0) rfbLogPerror("rfbProcessClientNormalMessage: read"); rfbCloseClient(cl); return; } rfbStatRecordMessageRcvd(cl, msg.type, sz_rfbXvpMsg, sz_rfbXvpMsg); /* only version when is defined, so echo back a fail */ if(msg.xvp.version != 1) { rfbSendXvp(cl, msg.xvp.version, rfbXvp_Fail); } else { /* if the hook exists and fails, send a fail msg */ if(cl->screen->xvpHook && !cl->screen->xvpHook(cl, msg.xvp.version, msg.xvp.code)) rfbSendXvp(cl, 1, rfbXvp_Fail); } return; default: { rfbExtensionData *e,*next; for(e=cl->extensions; e;) { next = e->next; if(e->extension->handleMessage && e->extension->handleMessage(cl, e->data, &msg)) { rfbStatRecordMessageRcvd(cl, msg.type, 0, 0); /* Extension should handle this */ return; } e = next; } rfbLog("rfbProcessClientNormalMessage: unknown message type %d\n", msg.type); rfbLog(" ... closing connection\n"); rfbCloseClient(cl); return; } } } /* * rfbSendFramebufferUpdate - send the currently pending framebuffer update to * the RFB client. * givenUpdateRegion is not changed. */ rfbBool rfbSendFramebufferUpdate(rfbClientPtr cl, sraRegionPtr givenUpdateRegion) { sraRectangleIterator* i=NULL; sraRect rect; int nUpdateRegionRects; rfbFramebufferUpdateMsg *fu = (rfbFramebufferUpdateMsg *)cl->updateBuf; sraRegionPtr updateRegion,updateCopyRegion,tmpRegion; int dx, dy; rfbBool sendCursorShape = FALSE; rfbBool sendCursorPos = FALSE; rfbBool sendKeyboardLedState = FALSE; rfbBool sendSupportedMessages = FALSE; rfbBool sendSupportedEncodings = FALSE; rfbBool sendServerIdentity = FALSE; rfbBool result = TRUE; if(cl->screen->displayHook) cl->screen->displayHook(cl); /* * If framebuffer size was changed and the client supports NewFBSize * encoding, just send NewFBSize marker and return. */ if (cl->useNewFBSize && cl->newFBSizePending) { LOCK(cl->updateMutex); cl->newFBSizePending = FALSE; UNLOCK(cl->updateMutex); fu->type = rfbFramebufferUpdate; fu->nRects = Swap16IfLE(1); cl->ublen = sz_rfbFramebufferUpdateMsg; if (!rfbSendNewFBSize(cl, cl->scaledScreen->width, cl->scaledScreen->height)) { if(cl->screen->displayFinishedHook) cl->screen->displayFinishedHook(cl, FALSE); return FALSE; } result = rfbSendUpdateBuf(cl); if(cl->screen->displayFinishedHook) cl->screen->displayFinishedHook(cl, result); return result; } /* * If this client understands cursor shape updates, cursor should be * removed from the framebuffer. Otherwise, make sure it's put up. */ if (cl->enableCursorShapeUpdates) { if (cl->cursorWasChanged && cl->readyForSetColourMapEntries) sendCursorShape = TRUE; } /* * Do we plan to send cursor position update? */ if (cl->enableCursorPosUpdates && cl->cursorWasMoved) sendCursorPos = TRUE; /* * Do we plan to send a keyboard state update? */ if ((cl->enableKeyboardLedState) && (cl->screen->getKeyboardLedStateHook!=NULL)) { int x; x=cl->screen->getKeyboardLedStateHook(cl->screen); if (x!=cl->lastKeyboardLedState) { sendKeyboardLedState = TRUE; cl->lastKeyboardLedState=x; } } /* * Do we plan to send a rfbEncodingSupportedMessages? */ if (cl->enableSupportedMessages) { sendSupportedMessages = TRUE; /* We only send this message ONCE <per setEncodings message received> * (We disable it here) */ cl->enableSupportedMessages = FALSE; } /* * Do we plan to send a rfbEncodingSupportedEncodings? */ if (cl->enableSupportedEncodings) { sendSupportedEncodings = TRUE; /* We only send this message ONCE <per setEncodings message received> * (We disable it here) */ cl->enableSupportedEncodings = FALSE; } /* * Do we plan to send a rfbEncodingServerIdentity? */ if (cl->enableServerIdentity) { sendServerIdentity = TRUE; /* We only send this message ONCE <per setEncodings message received> * (We disable it here) */ cl->enableServerIdentity = FALSE; } LOCK(cl->updateMutex); /* * The modifiedRegion may overlap the destination copyRegion. We remove * any overlapping bits from the copyRegion (since they'd only be * overwritten anyway). */ sraRgnSubtract(cl->copyRegion,cl->modifiedRegion); /* * The client is interested in the region requestedRegion. The region * which should be updated now is the intersection of requestedRegion * and the union of modifiedRegion and copyRegion. If it's empty then * no update is needed. */ updateRegion = sraRgnCreateRgn(givenUpdateRegion); if(cl->screen->progressiveSliceHeight>0) { int height=cl->screen->progressiveSliceHeight, y=cl->progressiveSliceY; sraRegionPtr bbox=sraRgnBBox(updateRegion); sraRect rect; if(sraRgnPopRect(bbox,&rect,0)) { sraRegionPtr slice; if(y<rect.y1 || y>=rect.y2) y=rect.y1; slice=sraRgnCreateRect(0,y,cl->screen->width,y+height); sraRgnAnd(updateRegion,slice); sraRgnDestroy(slice); } sraRgnDestroy(bbox); y+=height; if(y>=cl->screen->height) y=0; cl->progressiveSliceY=y; } sraRgnOr(updateRegion,cl->copyRegion); if(!sraRgnAnd(updateRegion,cl->requestedRegion) && sraRgnEmpty(updateRegion) && (cl->enableCursorShapeUpdates || (cl->cursorX == cl->screen->cursorX && cl->cursorY == cl->screen->cursorY)) && !sendCursorShape && !sendCursorPos && !sendKeyboardLedState && !sendSupportedMessages && !sendSupportedEncodings && !sendServerIdentity) { sraRgnDestroy(updateRegion); UNLOCK(cl->updateMutex); if(cl->screen->displayFinishedHook) cl->screen->displayFinishedHook(cl, TRUE); return TRUE; } /* * We assume that the client doesn't have any pixel data outside the * requestedRegion. In other words, both the source and destination of a * copy must lie within requestedRegion. So the region we can send as a * copy is the intersection of the copyRegion with both the requestedRegion * and the requestedRegion translated by the amount of the copy. We set * updateCopyRegion to this. */ updateCopyRegion = sraRgnCreateRgn(cl->copyRegion); sraRgnAnd(updateCopyRegion,cl->requestedRegion); tmpRegion = sraRgnCreateRgn(cl->requestedRegion); sraRgnOffset(tmpRegion,cl->copyDX,cl->copyDY); sraRgnAnd(updateCopyRegion,tmpRegion); sraRgnDestroy(tmpRegion); dx = cl->copyDX; dy = cl->copyDY; /* * Next we remove updateCopyRegion from updateRegion so that updateRegion * is the part of this update which is sent as ordinary pixel data (i.e not * a copy). */ sraRgnSubtract(updateRegion,updateCopyRegion); /* * Finally we leave modifiedRegion to be the remainder (if any) of parts of * the screen which are modified but outside the requestedRegion. We also * empty both the requestedRegion and the copyRegion - note that we never * carry over a copyRegion for a future update. */ sraRgnOr(cl->modifiedRegion,cl->copyRegion); sraRgnSubtract(cl->modifiedRegion,updateRegion); sraRgnSubtract(cl->modifiedRegion,updateCopyRegion); sraRgnMakeEmpty(cl->requestedRegion); sraRgnMakeEmpty(cl->copyRegion); cl->copyDX = 0; cl->copyDY = 0; UNLOCK(cl->updateMutex); if (!cl->enableCursorShapeUpdates) { if(cl->cursorX != cl->screen->cursorX || cl->cursorY != cl->screen->cursorY) { rfbRedrawAfterHideCursor(cl,updateRegion); LOCK(cl->screen->cursorMutex); cl->cursorX = cl->screen->cursorX; cl->cursorY = cl->screen->cursorY; UNLOCK(cl->screen->cursorMutex); rfbRedrawAfterHideCursor(cl,updateRegion); } rfbShowCursor(cl); } /* * Now send the update. */ rfbStatRecordMessageSent(cl, rfbFramebufferUpdate, 0, 0); if (cl->preferredEncoding == rfbEncodingCoRRE) { nUpdateRegionRects = 0; for(i = sraRgnGetIterator(updateRegion); sraRgnIteratorNext(i,&rect);){ int x = rect.x1; int y = rect.y1; int w = rect.x2 - x; int h = rect.y2 - y; int rectsPerRow, rows; /* We need to count the number of rects in the scaled screen */ if (cl->screen!=cl->scaledScreen) rfbScaledCorrection(cl->screen, cl->scaledScreen, &x, &y, &w, &h, "rfbSendFramebufferUpdate"); rectsPerRow = (w-1)/cl->correMaxWidth+1; rows = (h-1)/cl->correMaxHeight+1; nUpdateRegionRects += rectsPerRow*rows; } sraRgnReleaseIterator(i); i=NULL; } else if (cl->preferredEncoding == rfbEncodingUltra) { nUpdateRegionRects = 0; for(i = sraRgnGetIterator(updateRegion); sraRgnIteratorNext(i,&rect);){ int x = rect.x1; int y = rect.y1; int w = rect.x2 - x; int h = rect.y2 - y; /* We need to count the number of rects in the scaled screen */ if (cl->screen!=cl->scaledScreen) rfbScaledCorrection(cl->screen, cl->scaledScreen, &x, &y, &w, &h, "rfbSendFramebufferUpdate"); nUpdateRegionRects += (((h-1) / (ULTRA_MAX_SIZE( w ) / w)) + 1); } sraRgnReleaseIterator(i); i=NULL; #ifdef LIBVNCSERVER_HAVE_LIBZ } else if (cl->preferredEncoding == rfbEncodingZlib) { nUpdateRegionRects = 0; for(i = sraRgnGetIterator(updateRegion); sraRgnIteratorNext(i,&rect);){ int x = rect.x1; int y = rect.y1; int w = rect.x2 - x; int h = rect.y2 - y; /* We need to count the number of rects in the scaled screen */ if (cl->screen!=cl->scaledScreen) rfbScaledCorrection(cl->screen, cl->scaledScreen, &x, &y, &w, &h, "rfbSendFramebufferUpdate"); nUpdateRegionRects += (((h-1) / (ZLIB_MAX_SIZE( w ) / w)) + 1); } sraRgnReleaseIterator(i); i=NULL; #ifdef LIBVNCSERVER_HAVE_LIBJPEG } else if (cl->preferredEncoding == rfbEncodingTight) { nUpdateRegionRects = 0; for(i = sraRgnGetIterator(updateRegion); sraRgnIteratorNext(i,&rect);){ int x = rect.x1; int y = rect.y1; int w = rect.x2 - x; int h = rect.y2 - y; int n; /* We need to count the number of rects in the scaled screen */ if (cl->screen!=cl->scaledScreen) rfbScaledCorrection(cl->screen, cl->scaledScreen, &x, &y, &w, &h, "rfbSendFramebufferUpdate"); n = rfbNumCodedRectsTight(cl, x, y, w, h); if (n == 0) { nUpdateRegionRects = 0xFFFF; break; } nUpdateRegionRects += n; } sraRgnReleaseIterator(i); i=NULL; #endif #endif #if defined(LIBVNCSERVER_HAVE_LIBJPEG) && defined(LIBVNCSERVER_HAVE_LIBPNG) } else if (cl->preferredEncoding == rfbEncodingTightPng) { nUpdateRegionRects = 0; for(i = sraRgnGetIterator(updateRegion); sraRgnIteratorNext(i,&rect);){ int x = rect.x1; int y = rect.y1; int w = rect.x2 - x; int h = rect.y2 - y; int n; /* We need to count the number of rects in the scaled screen */ if (cl->screen!=cl->scaledScreen) rfbScaledCorrection(cl->screen, cl->scaledScreen, &x, &y, &w, &h, "rfbSendFramebufferUpdate"); n = rfbNumCodedRectsTight(cl, x, y, w, h); if (n == 0) { nUpdateRegionRects = 0xFFFF; break; } nUpdateRegionRects += n; } sraRgnReleaseIterator(i); i=NULL; #endif } else { nUpdateRegionRects = sraRgnCountRects(updateRegion); } fu->type = rfbFramebufferUpdate; if (nUpdateRegionRects != 0xFFFF) { if(cl->screen->maxRectsPerUpdate>0 /* CoRRE splits the screen into smaller squares */ && cl->preferredEncoding != rfbEncodingCoRRE /* Ultra encoding splits rectangles up into smaller chunks */ && cl->preferredEncoding != rfbEncodingUltra #ifdef LIBVNCSERVER_HAVE_LIBZ /* Zlib encoding splits rectangles up into smaller chunks */ && cl->preferredEncoding != rfbEncodingZlib #ifdef LIBVNCSERVER_HAVE_LIBJPEG /* Tight encoding counts the rectangles differently */ && cl->preferredEncoding != rfbEncodingTight #endif #endif #ifdef LIBVNCSERVER_HAVE_LIBPNG /* Tight encoding counts the rectangles differently */ && cl->preferredEncoding != rfbEncodingTightPng #endif && nUpdateRegionRects>cl->screen->maxRectsPerUpdate) { sraRegion* newUpdateRegion = sraRgnBBox(updateRegion); sraRgnDestroy(updateRegion); updateRegion = newUpdateRegion; nUpdateRegionRects = sraRgnCountRects(updateRegion); } fu->nRects = Swap16IfLE((uint16_t)(sraRgnCountRects(updateCopyRegion) + nUpdateRegionRects + !!sendCursorShape + !!sendCursorPos + !!sendKeyboardLedState + !!sendSupportedMessages + !!sendSupportedEncodings + !!sendServerIdentity)); } else { fu->nRects = 0xFFFF; } cl->ublen = sz_rfbFramebufferUpdateMsg; if (sendCursorShape) { cl->cursorWasChanged = FALSE; if (!rfbSendCursorShape(cl)) goto updateFailed; } if (sendCursorPos) { cl->cursorWasMoved = FALSE; if (!rfbSendCursorPos(cl)) goto updateFailed; } if (sendKeyboardLedState) { if (!rfbSendKeyboardLedState(cl)) goto updateFailed; } if (sendSupportedMessages) { if (!rfbSendSupportedMessages(cl)) goto updateFailed; } if (sendSupportedEncodings) { if (!rfbSendSupportedEncodings(cl)) goto updateFailed; } if (sendServerIdentity) { if (!rfbSendServerIdentity(cl)) goto updateFailed; } if (!sraRgnEmpty(updateCopyRegion)) { if (!rfbSendCopyRegion(cl,updateCopyRegion,dx,dy)) goto updateFailed; } for(i = sraRgnGetIterator(updateRegion); sraRgnIteratorNext(i,&rect);){ int x = rect.x1; int y = rect.y1; int w = rect.x2 - x; int h = rect.y2 - y; /* We need to count the number of rects in the scaled screen */ if (cl->screen!=cl->scaledScreen) rfbScaledCorrection(cl->screen, cl->scaledScreen, &x, &y, &w, &h, "rfbSendFramebufferUpdate"); switch (cl->preferredEncoding) { case -1: case rfbEncodingRaw: if (!rfbSendRectEncodingRaw(cl, x, y, w, h)) goto updateFailed; break; case rfbEncodingRRE: if (!rfbSendRectEncodingRRE(cl, x, y, w, h)) goto updateFailed; break; case rfbEncodingCoRRE: if (!rfbSendRectEncodingCoRRE(cl, x, y, w, h)) goto updateFailed; break; case rfbEncodingHextile: if (!rfbSendRectEncodingHextile(cl, x, y, w, h)) goto updateFailed; break; case rfbEncodingUltra: if (!rfbSendRectEncodingUltra(cl, x, y, w, h)) goto updateFailed; break; #ifdef LIBVNCSERVER_HAVE_LIBZ case rfbEncodingZlib: if (!rfbSendRectEncodingZlib(cl, x, y, w, h)) goto updateFailed; break; case rfbEncodingZRLE: case rfbEncodingZYWRLE: if (!rfbSendRectEncodingZRLE(cl, x, y, w, h)) goto updateFailed; break; #endif #if defined(LIBVNCSERVER_HAVE_LIBJPEG) && (defined(LIBVNCSERVER_HAVE_LIBZ) || defined(LIBVNCSERVER_HAVE_LIBPNG)) case rfbEncodingTight: if (!rfbSendRectEncodingTight(cl, x, y, w, h)) goto updateFailed; break; #ifdef LIBVNCSERVER_HAVE_LIBPNG case rfbEncodingTightPng: if (!rfbSendRectEncodingTightPng(cl, x, y, w, h)) goto updateFailed; break; #endif #endif } } if (i) { sraRgnReleaseIterator(i); i = NULL; } if ( nUpdateRegionRects == 0xFFFF && !rfbSendLastRectMarker(cl) ) goto updateFailed; if (!rfbSendUpdateBuf(cl)) { updateFailed: result = FALSE; } if (!cl->enableCursorShapeUpdates) { rfbHideCursor(cl); } if(i) sraRgnReleaseIterator(i); sraRgnDestroy(updateRegion); sraRgnDestroy(updateCopyRegion); if(cl->screen->displayFinishedHook) cl->screen->displayFinishedHook(cl, result); return result; } /* * Send the copy region as a string of CopyRect encoded rectangles. * The only slightly tricky thing is that we should send the messages in * the correct order so that an earlier CopyRect will not corrupt the source * of a later one. */ rfbBool rfbSendCopyRegion(rfbClientPtr cl, sraRegionPtr reg, int dx, int dy) { int x, y, w, h; rfbFramebufferUpdateRectHeader rect; rfbCopyRect cr; sraRectangleIterator* i; sraRect rect1; /* printf("copyrect: "); sraRgnPrint(reg); putchar('\n');fflush(stdout); */ i = sraRgnGetReverseIterator(reg,dx>0,dy>0); /* correct for the scale of the screen */ dx = ScaleX(cl->screen, cl->scaledScreen, dx); dy = ScaleX(cl->screen, cl->scaledScreen, dy); while(sraRgnIteratorNext(i,&rect1)) { x = rect1.x1; y = rect1.y1; w = rect1.x2 - x; h = rect1.y2 - y; /* correct for scaling (if necessary) */ rfbScaledCorrection(cl->screen, cl->scaledScreen, &x, &y, &w, &h, "copyrect"); rect.r.x = Swap16IfLE(x); rect.r.y = Swap16IfLE(y); rect.r.w = Swap16IfLE(w); rect.r.h = Swap16IfLE(h); rect.encoding = Swap32IfLE(rfbEncodingCopyRect); memcpy(&cl->updateBuf[cl->ublen], (char *)&rect, sz_rfbFramebufferUpdateRectHeader); cl->ublen += sz_rfbFramebufferUpdateRectHeader; cr.srcX = Swap16IfLE(x - dx); cr.srcY = Swap16IfLE(y - dy); memcpy(&cl->updateBuf[cl->ublen], (char *)&cr, sz_rfbCopyRect); cl->ublen += sz_rfbCopyRect; rfbStatRecordEncodingSent(cl, rfbEncodingCopyRect, sz_rfbFramebufferUpdateRectHeader + sz_rfbCopyRect, w * h * (cl->scaledScreen->bitsPerPixel / 8)); } sraRgnReleaseIterator(i); return TRUE; } /* * Send a given rectangle in raw encoding (rfbEncodingRaw). */ rfbBool rfbSendRectEncodingRaw(rfbClientPtr cl, int x, int y, int w, int h) { rfbFramebufferUpdateRectHeader rect; int nlines; int bytesPerLine = w * (cl->format.bitsPerPixel / 8); char *fbptr = (cl->scaledScreen->frameBuffer + (cl->scaledScreen->paddedWidthInBytes * y) + (x * (cl->scaledScreen->bitsPerPixel / 8))); /* Flush the buffer to guarantee correct alignment for translateFn(). */ if (cl->ublen > 0) { if (!rfbSendUpdateBuf(cl)) return FALSE; } rect.r.x = Swap16IfLE(x); rect.r.y = Swap16IfLE(y); rect.r.w = Swap16IfLE(w); rect.r.h = Swap16IfLE(h); rect.encoding = Swap32IfLE(rfbEncodingRaw); memcpy(&cl->updateBuf[cl->ublen], (char *)&rect,sz_rfbFramebufferUpdateRectHeader); cl->ublen += sz_rfbFramebufferUpdateRectHeader; rfbStatRecordEncodingSent(cl, rfbEncodingRaw, sz_rfbFramebufferUpdateRectHeader + bytesPerLine * h, sz_rfbFramebufferUpdateRectHeader + bytesPerLine * h); nlines = (UPDATE_BUF_SIZE - cl->ublen) / bytesPerLine; while (TRUE) { if (nlines > h) nlines = h; (*cl->translateFn)(cl->translateLookupTable, &(cl->screen->serverFormat), &cl->format, fbptr, &cl->updateBuf[cl->ublen], cl->scaledScreen->paddedWidthInBytes, w, nlines); cl->ublen += nlines * bytesPerLine; h -= nlines; if (h == 0) /* rect fitted in buffer, do next one */ return TRUE; /* buffer full - flush partial rect and do another nlines */ if (!rfbSendUpdateBuf(cl)) return FALSE; fbptr += (cl->scaledScreen->paddedWidthInBytes * nlines); nlines = (UPDATE_BUF_SIZE - cl->ublen) / bytesPerLine; if (nlines == 0) { rfbErr("rfbSendRectEncodingRaw: send buffer too small for %d " "bytes per line\n", bytesPerLine); rfbCloseClient(cl); return FALSE; } } } /* * Send an empty rectangle with encoding field set to value of * rfbEncodingLastRect to notify client that this is the last * rectangle in framebuffer update ("LastRect" extension of RFB * protocol). */ rfbBool rfbSendLastRectMarker(rfbClientPtr cl) { rfbFramebufferUpdateRectHeader rect; if (cl->ublen + sz_rfbFramebufferUpdateRectHeader > UPDATE_BUF_SIZE) { if (!rfbSendUpdateBuf(cl)) return FALSE; } rect.encoding = Swap32IfLE(rfbEncodingLastRect); rect.r.x = 0; rect.r.y = 0; rect.r.w = 0; rect.r.h = 0; memcpy(&cl->updateBuf[cl->ublen], (char *)&rect,sz_rfbFramebufferUpdateRectHeader); cl->ublen += sz_rfbFramebufferUpdateRectHeader; rfbStatRecordEncodingSent(cl, rfbEncodingLastRect, sz_rfbFramebufferUpdateRectHeader, sz_rfbFramebufferUpdateRectHeader); return TRUE; } /* * Send NewFBSize pseudo-rectangle. This tells the client to change * its framebuffer size. */ rfbBool rfbSendNewFBSize(rfbClientPtr cl, int w, int h) { rfbFramebufferUpdateRectHeader rect; if (cl->ublen + sz_rfbFramebufferUpdateRectHeader > UPDATE_BUF_SIZE) { if (!rfbSendUpdateBuf(cl)) return FALSE; } if (cl->PalmVNC==TRUE) rfbLog("Sending rfbEncodingNewFBSize in response to a PalmVNC style framebuffer resize (%dx%d)\n", w, h); else rfbLog("Sending rfbEncodingNewFBSize for resize to (%dx%d)\n", w, h); rect.encoding = Swap32IfLE(rfbEncodingNewFBSize); rect.r.x = 0; rect.r.y = 0; rect.r.w = Swap16IfLE(w); rect.r.h = Swap16IfLE(h); memcpy(&cl->updateBuf[cl->ublen], (char *)&rect, sz_rfbFramebufferUpdateRectHeader); cl->ublen += sz_rfbFramebufferUpdateRectHeader; rfbStatRecordEncodingSent(cl, rfbEncodingNewFBSize, sz_rfbFramebufferUpdateRectHeader, sz_rfbFramebufferUpdateRectHeader); return TRUE; } /* * Send the contents of cl->updateBuf. Returns 1 if successful, -1 if * not (errno should be set). */ rfbBool rfbSendUpdateBuf(rfbClientPtr cl) { if(cl->sock<0) return FALSE; if (rfbWriteExact(cl, cl->updateBuf, cl->ublen) < 0) { rfbLogPerror("rfbSendUpdateBuf: write"); rfbCloseClient(cl); return FALSE; } cl->ublen = 0; return TRUE; } /* * rfbSendSetColourMapEntries sends a SetColourMapEntries message to the * client, using values from the currently installed colormap. */ rfbBool rfbSendSetColourMapEntries(rfbClientPtr cl, int firstColour, int nColours) { char buf[sz_rfbSetColourMapEntriesMsg + 256 * 3 * 2]; char *wbuf = buf; rfbSetColourMapEntriesMsg *scme; uint16_t *rgb; rfbColourMap* cm = &cl->screen->colourMap; int i, len; if (nColours > 256) { /* some rare hardware has, e.g., 4096 colors cells: PseudoColor:12 */ wbuf = (char *) malloc(sz_rfbSetColourMapEntriesMsg + nColours * 3 * 2); } scme = (rfbSetColourMapEntriesMsg *)wbuf; rgb = (uint16_t *)(&wbuf[sz_rfbSetColourMapEntriesMsg]); scme->type = rfbSetColourMapEntries; scme->firstColour = Swap16IfLE(firstColour); scme->nColours = Swap16IfLE(nColours); len = sz_rfbSetColourMapEntriesMsg; for (i = 0; i < nColours; i++) { if(i<(int)cm->count) { if(cm->is16) { rgb[i*3] = Swap16IfLE(cm->data.shorts[i*3]); rgb[i*3+1] = Swap16IfLE(cm->data.shorts[i*3+1]); rgb[i*3+2] = Swap16IfLE(cm->data.shorts[i*3+2]); } else { rgb[i*3] = Swap16IfLE((unsigned short)cm->data.bytes[i*3]); rgb[i*3+1] = Swap16IfLE((unsigned short)cm->data.bytes[i*3+1]); rgb[i*3+2] = Swap16IfLE((unsigned short)cm->data.bytes[i*3+2]); } } } len += nColours * 3 * 2; LOCK(cl->sendMutex); if (rfbWriteExact(cl, wbuf, len) < 0) { rfbLogPerror("rfbSendSetColourMapEntries: write"); rfbCloseClient(cl); if (wbuf != buf) free(wbuf); UNLOCK(cl->sendMutex); return FALSE; } UNLOCK(cl->sendMutex); rfbStatRecordMessageSent(cl, rfbSetColourMapEntries, len, len); if (wbuf != buf) free(wbuf); return TRUE; } /* * rfbSendBell sends a Bell message to all the clients. */ void rfbSendBell(rfbScreenInfoPtr rfbScreen) { rfbClientIteratorPtr i; rfbClientPtr cl; rfbBellMsg b; i = rfbGetClientIterator(rfbScreen); while((cl=rfbClientIteratorNext(i))) { b.type = rfbBell; LOCK(cl->sendMutex); if (rfbWriteExact(cl, (char *)&b, sz_rfbBellMsg) < 0) { rfbLogPerror("rfbSendBell: write"); rfbCloseClient(cl); } UNLOCK(cl->sendMutex); } rfbStatRecordMessageSent(cl, rfbBell, sz_rfbBellMsg, sz_rfbBellMsg); rfbReleaseClientIterator(i); } /* * rfbSendServerCutText sends a ServerCutText message to all the clients. */ void rfbSendServerCutText(rfbScreenInfoPtr rfbScreen,char *str, int len) { rfbClientPtr cl; rfbServerCutTextMsg sct; rfbClientIteratorPtr iterator; iterator = rfbGetClientIterator(rfbScreen); while ((cl = rfbClientIteratorNext(iterator)) != NULL) { sct.type = rfbServerCutText; sct.length = Swap32IfLE(len); LOCK(cl->sendMutex); if (rfbWriteExact(cl, (char *)&sct, sz_rfbServerCutTextMsg) < 0) { rfbLogPerror("rfbSendServerCutText: write"); rfbCloseClient(cl); UNLOCK(cl->sendMutex); continue; } if (rfbWriteExact(cl, str, len) < 0) { rfbLogPerror("rfbSendServerCutText: write"); rfbCloseClient(cl); } UNLOCK(cl->sendMutex); rfbStatRecordMessageSent(cl, rfbServerCutText, sz_rfbServerCutTextMsg+len, sz_rfbServerCutTextMsg+len); } rfbReleaseClientIterator(iterator); } /***************************************************************************** * * UDP can be used for keyboard and pointer events when the underlying * network is highly reliable. This is really here to support ORL's * videotile, whose TCP implementation doesn't like sending lots of small * packets (such as 100s of pen readings per second!). */ static unsigned char ptrAcceleration = 50; void rfbNewUDPConnection(rfbScreenInfoPtr rfbScreen, int sock) { if (write(sock, (char*) &ptrAcceleration, 1) < 0) { rfbLogPerror("rfbNewUDPConnection: write"); } } /* * Because UDP is a message based service, we can't read the first byte and * then the rest of the packet separately like we do with TCP. We will always * get a whole packet delivered in one go, so we ask read() for the maximum * number of bytes we can possibly get. */ void rfbProcessUDPInput(rfbScreenInfoPtr rfbScreen) { int n; rfbClientPtr cl=rfbScreen->udpClient; rfbClientToServerMsg msg; if((!cl) || cl->onHold) return; if ((n = read(rfbScreen->udpSock, (char *)&msg, sizeof(msg))) <= 0) { if (n < 0) { rfbLogPerror("rfbProcessUDPInput: read"); } rfbDisconnectUDPSock(rfbScreen); return; } switch (msg.type) { case rfbKeyEvent: if (n != sz_rfbKeyEventMsg) { rfbErr("rfbProcessUDPInput: key event incorrect length\n"); rfbDisconnectUDPSock(rfbScreen); return; } cl->screen->kbdAddEvent(msg.ke.down, (rfbKeySym)Swap32IfLE(msg.ke.key), cl); break; case rfbPointerEvent: if (n != sz_rfbPointerEventMsg) { rfbErr("rfbProcessUDPInput: ptr event incorrect length\n"); rfbDisconnectUDPSock(rfbScreen); return; } cl->screen->ptrAddEvent(msg.pe.buttonMask, Swap16IfLE(msg.pe.x), Swap16IfLE(msg.pe.y), cl); break; default: rfbErr("rfbProcessUDPInput: unknown message type %d\n", msg.type); rfbDisconnectUDPSock(rfbScreen); } }
char *rfbProcessFileTransferReadBuffer(rfbClientPtr cl, uint32_t length) { char *buffer=NULL; int n=0; FILEXFER_ALLOWED_OR_CLOSE_AND_RETURN("", cl, NULL); /* We later alloc length+1, which might wrap around on 32-bit systems if length equals 0XFFFFFFFF, i.e. SIZE_MAX for 32-bit systems. On 64-bit systems, a length of 0XFFFFFFFF will safely be allocated since this check will never trigger and malloc() can digest length+1 without problems as length is a uint32_t. */ if(length == SIZE_MAX) { rfbErr("rfbProcessFileTransferReadBuffer: too big file transfer length requested: %u", (unsigned int)length); rfbCloseClient(cl); return NULL; } if (length>0) { buffer=malloc((size_t)length+1); if (buffer!=NULL) { if ((n = rfbReadExact(cl, (char *)buffer, length)) <= 0) { if (n != 0) rfbLogPerror("rfbProcessFileTransferReadBuffer: read"); rfbCloseClient(cl); /* NOTE: don't forget to free(buffer) if you return early! */ if (buffer!=NULL) free(buffer); return NULL; } /* Null Terminate */ buffer[length]=0; } } return buffer; }
char *rfbProcessFileTransferReadBuffer(rfbClientPtr cl, uint32_t length) { char *buffer=NULL; int n=0; FILEXFER_ALLOWED_OR_CLOSE_AND_RETURN("", cl, NULL); /* We later alloc length+1, which might wrap around on 32-bit systems if length equals 0XFFFFFFFF, i.e. SIZE_MAX for 32-bit systems. On 64-bit systems, a length of 0XFFFFFFFF will safely be allocated since this check will never trigger and malloc() can digest length+1 without problems as length is a uint32_t. We also later pass length to rfbReadExact() that expects a signed int type and that might wrap on platforms with a 32-bit int type if length is bigger than 0X7FFFFFFF. */ if(length == SIZE_MAX || length > INT_MAX) { rfbErr("rfbProcessFileTransferReadBuffer: too big file transfer length requested: %u", (unsigned int)length); rfbCloseClient(cl); return NULL; } if (length>0) { buffer=malloc((size_t)length+1); if (buffer!=NULL) { if ((n = rfbReadExact(cl, (char *)buffer, length)) <= 0) { if (n != 0) rfbLogPerror("rfbProcessFileTransferReadBuffer: read"); rfbCloseClient(cl); /* NOTE: don't forget to free(buffer) if you return early! */ if (buffer!=NULL) free(buffer); return NULL; } /* Null Terminate */ buffer[length]=0; } } return buffer; }
{'added': [(91, '/* INT_MAX */'), (92, '#include <limits.h>'), (1477, ' We also later pass length to rfbReadExact() that expects a signed int type and'), (1478, ' that might wrap on platforms with a 32-bit int type if length is bigger'), (1479, ' than 0X7FFFFFFF.'), (1481, ' if(length == SIZE_MAX || length > INT_MAX) {')], 'deleted': [(1476, ' if(length == SIZE_MAX) {')]}
6
1
2,482
16,646
https://github.com/LibVNC/libvncserver
CVE-2018-20750
['CWE-787']
print-nfs.c
interp_reply
/* * Copyright (c) 1988, 1989, 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that: (1) source code distributions * retain the above copyright notice and this paragraph in its entirety, (2) * distributions including binary code include the above copyright notice and * this paragraph in its entirety in the documentation or other materials * provided with the distribution, and (3) all advertising materials mentioning * features or use of this software display the following acknowledgement: * ``This product includes software developed by the University of California, * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of * the University nor the names of its contributors may be used to endorse * or promote products derived from this software without specific prior * written permission. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. */ /* \summary: Network File System (NFS) printer */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <netdissect-stdinc.h> #include <stdio.h> #include <string.h> #include "netdissect.h" #include "addrtoname.h" #include "extract.h" #include "nfs.h" #include "nfsfh.h" #include "ip.h" #include "ip6.h" #include "rpc_auth.h" #include "rpc_msg.h" static const char tstr[] = " [|nfs]"; static void nfs_printfh(netdissect_options *, const uint32_t *, const u_int); static int xid_map_enter(netdissect_options *, const struct sunrpc_msg *, const u_char *); static int xid_map_find(const struct sunrpc_msg *, const u_char *, uint32_t *, uint32_t *); static void interp_reply(netdissect_options *, const struct sunrpc_msg *, uint32_t, uint32_t, int); static const uint32_t *parse_post_op_attr(netdissect_options *, const uint32_t *, int); /* * Mapping of old NFS Version 2 RPC numbers to generic numbers. */ static uint32_t nfsv3_procid[NFS_NPROCS] = { NFSPROC_NULL, NFSPROC_GETATTR, NFSPROC_SETATTR, NFSPROC_NOOP, NFSPROC_LOOKUP, NFSPROC_READLINK, NFSPROC_READ, NFSPROC_NOOP, NFSPROC_WRITE, NFSPROC_CREATE, NFSPROC_REMOVE, NFSPROC_RENAME, NFSPROC_LINK, NFSPROC_SYMLINK, NFSPROC_MKDIR, NFSPROC_RMDIR, NFSPROC_READDIR, NFSPROC_FSSTAT, NFSPROC_NOOP, NFSPROC_NOOP, NFSPROC_NOOP, NFSPROC_NOOP, NFSPROC_NOOP, NFSPROC_NOOP, NFSPROC_NOOP, NFSPROC_NOOP }; static const struct tok nfsproc_str[] = { { NFSPROC_NOOP, "nop" }, { NFSPROC_NULL, "null" }, { NFSPROC_GETATTR, "getattr" }, { NFSPROC_SETATTR, "setattr" }, { NFSPROC_LOOKUP, "lookup" }, { NFSPROC_ACCESS, "access" }, { NFSPROC_READLINK, "readlink" }, { NFSPROC_READ, "read" }, { NFSPROC_WRITE, "write" }, { NFSPROC_CREATE, "create" }, { NFSPROC_MKDIR, "mkdir" }, { NFSPROC_SYMLINK, "symlink" }, { NFSPROC_MKNOD, "mknod" }, { NFSPROC_REMOVE, "remove" }, { NFSPROC_RMDIR, "rmdir" }, { NFSPROC_RENAME, "rename" }, { NFSPROC_LINK, "link" }, { NFSPROC_READDIR, "readdir" }, { NFSPROC_READDIRPLUS, "readdirplus" }, { NFSPROC_FSSTAT, "fsstat" }, { NFSPROC_FSINFO, "fsinfo" }, { NFSPROC_PATHCONF, "pathconf" }, { NFSPROC_COMMIT, "commit" }, { 0, NULL } }; /* * NFS V2 and V3 status values. * * Some of these come from the RFCs for NFS V2 and V3, with the message * strings taken from the FreeBSD C library "errlst.c". * * Others are errors that are not in the RFC but that I suspect some * NFS servers could return; the values are FreeBSD errno values, as * the first NFS server was the SunOS 2.0 one, and until 5.0 SunOS * was primarily BSD-derived. */ static const struct tok status2str[] = { { 1, "Operation not permitted" }, /* EPERM */ { 2, "No such file or directory" }, /* ENOENT */ { 5, "Input/output error" }, /* EIO */ { 6, "Device not configured" }, /* ENXIO */ { 11, "Resource deadlock avoided" }, /* EDEADLK */ { 12, "Cannot allocate memory" }, /* ENOMEM */ { 13, "Permission denied" }, /* EACCES */ { 17, "File exists" }, /* EEXIST */ { 18, "Cross-device link" }, /* EXDEV */ { 19, "Operation not supported by device" }, /* ENODEV */ { 20, "Not a directory" }, /* ENOTDIR */ { 21, "Is a directory" }, /* EISDIR */ { 22, "Invalid argument" }, /* EINVAL */ { 26, "Text file busy" }, /* ETXTBSY */ { 27, "File too large" }, /* EFBIG */ { 28, "No space left on device" }, /* ENOSPC */ { 30, "Read-only file system" }, /* EROFS */ { 31, "Too many links" }, /* EMLINK */ { 45, "Operation not supported" }, /* EOPNOTSUPP */ { 62, "Too many levels of symbolic links" }, /* ELOOP */ { 63, "File name too long" }, /* ENAMETOOLONG */ { 66, "Directory not empty" }, /* ENOTEMPTY */ { 69, "Disc quota exceeded" }, /* EDQUOT */ { 70, "Stale NFS file handle" }, /* ESTALE */ { 71, "Too many levels of remote in path" }, /* EREMOTE */ { 99, "Write cache flushed to disk" }, /* NFSERR_WFLUSH (not used) */ { 10001, "Illegal NFS file handle" }, /* NFS3ERR_BADHANDLE */ { 10002, "Update synchronization mismatch" }, /* NFS3ERR_NOT_SYNC */ { 10003, "READDIR/READDIRPLUS cookie is stale" }, /* NFS3ERR_BAD_COOKIE */ { 10004, "Operation not supported" }, /* NFS3ERR_NOTSUPP */ { 10005, "Buffer or request is too small" }, /* NFS3ERR_TOOSMALL */ { 10006, "Unspecified error on server" }, /* NFS3ERR_SERVERFAULT */ { 10007, "Object of that type not supported" }, /* NFS3ERR_BADTYPE */ { 10008, "Request couldn't be completed in time" }, /* NFS3ERR_JUKEBOX */ { 0, NULL } }; static const struct tok nfsv3_writemodes[] = { { 0, "unstable" }, { 1, "datasync" }, { 2, "filesync" }, { 0, NULL } }; static const struct tok type2str[] = { { NFNON, "NON" }, { NFREG, "REG" }, { NFDIR, "DIR" }, { NFBLK, "BLK" }, { NFCHR, "CHR" }, { NFLNK, "LNK" }, { NFFIFO, "FIFO" }, { 0, NULL } }; static const struct tok sunrpc_auth_str[] = { { SUNRPC_AUTH_OK, "OK" }, { SUNRPC_AUTH_BADCRED, "Bogus Credentials (seal broken)" }, { SUNRPC_AUTH_REJECTEDCRED, "Rejected Credentials (client should begin new session)" }, { SUNRPC_AUTH_BADVERF, "Bogus Verifier (seal broken)" }, { SUNRPC_AUTH_REJECTEDVERF, "Verifier expired or was replayed" }, { SUNRPC_AUTH_TOOWEAK, "Credentials are too weak" }, { SUNRPC_AUTH_INVALIDRESP, "Bogus response verifier" }, { SUNRPC_AUTH_FAILED, "Unknown failure" }, { 0, NULL } }; static const struct tok sunrpc_str[] = { { SUNRPC_PROG_UNAVAIL, "PROG_UNAVAIL" }, { SUNRPC_PROG_MISMATCH, "PROG_MISMATCH" }, { SUNRPC_PROC_UNAVAIL, "PROC_UNAVAIL" }, { SUNRPC_GARBAGE_ARGS, "GARBAGE_ARGS" }, { SUNRPC_SYSTEM_ERR, "SYSTEM_ERR" }, { 0, NULL } }; static void print_nfsaddr(netdissect_options *ndo, const u_char *bp, const char *s, const char *d) { const struct ip *ip; const struct ip6_hdr *ip6; char srcaddr[INET6_ADDRSTRLEN], dstaddr[INET6_ADDRSTRLEN]; srcaddr[0] = dstaddr[0] = '\0'; switch (IP_V((const struct ip *)bp)) { case 4: ip = (const struct ip *)bp; strlcpy(srcaddr, ipaddr_string(ndo, &ip->ip_src), sizeof(srcaddr)); strlcpy(dstaddr, ipaddr_string(ndo, &ip->ip_dst), sizeof(dstaddr)); break; case 6: ip6 = (const struct ip6_hdr *)bp; strlcpy(srcaddr, ip6addr_string(ndo, &ip6->ip6_src), sizeof(srcaddr)); strlcpy(dstaddr, ip6addr_string(ndo, &ip6->ip6_dst), sizeof(dstaddr)); break; default: strlcpy(srcaddr, "?", sizeof(srcaddr)); strlcpy(dstaddr, "?", sizeof(dstaddr)); break; } ND_PRINT((ndo, "%s.%s > %s.%s: ", srcaddr, s, dstaddr, d)); } static const uint32_t * parse_sattr3(netdissect_options *ndo, const uint32_t *dp, struct nfsv3_sattr *sa3) { ND_TCHECK(dp[0]); sa3->sa_modeset = EXTRACT_32BITS(dp); dp++; if (sa3->sa_modeset) { ND_TCHECK(dp[0]); sa3->sa_mode = EXTRACT_32BITS(dp); dp++; } ND_TCHECK(dp[0]); sa3->sa_uidset = EXTRACT_32BITS(dp); dp++; if (sa3->sa_uidset) { ND_TCHECK(dp[0]); sa3->sa_uid = EXTRACT_32BITS(dp); dp++; } ND_TCHECK(dp[0]); sa3->sa_gidset = EXTRACT_32BITS(dp); dp++; if (sa3->sa_gidset) { ND_TCHECK(dp[0]); sa3->sa_gid = EXTRACT_32BITS(dp); dp++; } ND_TCHECK(dp[0]); sa3->sa_sizeset = EXTRACT_32BITS(dp); dp++; if (sa3->sa_sizeset) { ND_TCHECK(dp[0]); sa3->sa_size = EXTRACT_32BITS(dp); dp++; } ND_TCHECK(dp[0]); sa3->sa_atimetype = EXTRACT_32BITS(dp); dp++; if (sa3->sa_atimetype == NFSV3SATTRTIME_TOCLIENT) { ND_TCHECK(dp[1]); sa3->sa_atime.nfsv3_sec = EXTRACT_32BITS(dp); dp++; sa3->sa_atime.nfsv3_nsec = EXTRACT_32BITS(dp); dp++; } ND_TCHECK(dp[0]); sa3->sa_mtimetype = EXTRACT_32BITS(dp); dp++; if (sa3->sa_mtimetype == NFSV3SATTRTIME_TOCLIENT) { ND_TCHECK(dp[1]); sa3->sa_mtime.nfsv3_sec = EXTRACT_32BITS(dp); dp++; sa3->sa_mtime.nfsv3_nsec = EXTRACT_32BITS(dp); dp++; } return dp; trunc: return NULL; } static int nfserr; /* true if we error rather than trunc */ static void print_sattr3(netdissect_options *ndo, const struct nfsv3_sattr *sa3, int verbose) { if (sa3->sa_modeset) ND_PRINT((ndo, " mode %o", sa3->sa_mode)); if (sa3->sa_uidset) ND_PRINT((ndo, " uid %u", sa3->sa_uid)); if (sa3->sa_gidset) ND_PRINT((ndo, " gid %u", sa3->sa_gid)); if (verbose > 1) { if (sa3->sa_atimetype == NFSV3SATTRTIME_TOCLIENT) ND_PRINT((ndo, " atime %u.%06u", sa3->sa_atime.nfsv3_sec, sa3->sa_atime.nfsv3_nsec)); if (sa3->sa_mtimetype == NFSV3SATTRTIME_TOCLIENT) ND_PRINT((ndo, " mtime %u.%06u", sa3->sa_mtime.nfsv3_sec, sa3->sa_mtime.nfsv3_nsec)); } } void nfsreply_print(netdissect_options *ndo, register const u_char *bp, u_int length, register const u_char *bp2) { register const struct sunrpc_msg *rp; char srcid[20], dstid[20]; /*fits 32bit*/ nfserr = 0; /* assume no error */ rp = (const struct sunrpc_msg *)bp; ND_TCHECK(rp->rm_xid); if (!ndo->ndo_nflag) { strlcpy(srcid, "nfs", sizeof(srcid)); snprintf(dstid, sizeof(dstid), "%u", EXTRACT_32BITS(&rp->rm_xid)); } else { snprintf(srcid, sizeof(srcid), "%u", NFS_PORT); snprintf(dstid, sizeof(dstid), "%u", EXTRACT_32BITS(&rp->rm_xid)); } print_nfsaddr(ndo, bp2, srcid, dstid); nfsreply_print_noaddr(ndo, bp, length, bp2); return; trunc: if (!nfserr) ND_PRINT((ndo, "%s", tstr)); } void nfsreply_print_noaddr(netdissect_options *ndo, register const u_char *bp, u_int length, register const u_char *bp2) { register const struct sunrpc_msg *rp; uint32_t proc, vers, reply_stat; enum sunrpc_reject_stat rstat; uint32_t rlow; uint32_t rhigh; enum sunrpc_auth_stat rwhy; nfserr = 0; /* assume no error */ rp = (const struct sunrpc_msg *)bp; ND_TCHECK(rp->rm_reply.rp_stat); reply_stat = EXTRACT_32BITS(&rp->rm_reply.rp_stat); switch (reply_stat) { case SUNRPC_MSG_ACCEPTED: ND_PRINT((ndo, "reply ok %u", length)); if (xid_map_find(rp, bp2, &proc, &vers) >= 0) interp_reply(ndo, rp, proc, vers, length); break; case SUNRPC_MSG_DENIED: ND_PRINT((ndo, "reply ERR %u: ", length)); ND_TCHECK(rp->rm_reply.rp_reject.rj_stat); rstat = EXTRACT_32BITS(&rp->rm_reply.rp_reject.rj_stat); switch (rstat) { case SUNRPC_RPC_MISMATCH: ND_TCHECK(rp->rm_reply.rp_reject.rj_vers.high); rlow = EXTRACT_32BITS(&rp->rm_reply.rp_reject.rj_vers.low); rhigh = EXTRACT_32BITS(&rp->rm_reply.rp_reject.rj_vers.high); ND_PRINT((ndo, "RPC Version mismatch (%u-%u)", rlow, rhigh)); break; case SUNRPC_AUTH_ERROR: ND_TCHECK(rp->rm_reply.rp_reject.rj_why); rwhy = EXTRACT_32BITS(&rp->rm_reply.rp_reject.rj_why); ND_PRINT((ndo, "Auth %s", tok2str(sunrpc_auth_str, "Invalid failure code %u", rwhy))); break; default: ND_PRINT((ndo, "Unknown reason for rejecting rpc message %u", (unsigned int)rstat)); break; } break; default: ND_PRINT((ndo, "reply Unknown rpc response code=%u %u", reply_stat, length)); break; } return; trunc: if (!nfserr) ND_PRINT((ndo, "%s", tstr)); } /* * Return a pointer to the first file handle in the packet. * If the packet was truncated, return 0. */ static const uint32_t * parsereq(netdissect_options *ndo, register const struct sunrpc_msg *rp, register u_int length) { register const uint32_t *dp; register u_int len; /* * find the start of the req data (if we captured it) */ dp = (const uint32_t *)&rp->rm_call.cb_cred; ND_TCHECK(dp[1]); len = EXTRACT_32BITS(&dp[1]); if (len < length) { dp += (len + (2 * sizeof(*dp) + 3)) / sizeof(*dp); ND_TCHECK(dp[1]); len = EXTRACT_32BITS(&dp[1]); if (len < length) { dp += (len + (2 * sizeof(*dp) + 3)) / sizeof(*dp); ND_TCHECK2(dp[0], 0); return (dp); } } trunc: return (NULL); } /* * Print out an NFS file handle and return a pointer to following word. * If packet was truncated, return 0. */ static const uint32_t * parsefh(netdissect_options *ndo, register const uint32_t *dp, int v3) { u_int len; if (v3) { ND_TCHECK(dp[0]); len = EXTRACT_32BITS(dp) / 4; dp++; } else len = NFSX_V2FH / 4; if (ND_TTEST2(*dp, len * sizeof(*dp))) { nfs_printfh(ndo, dp, len); return (dp + len); } trunc: return (NULL); } /* * Print out a file name and return pointer to 32-bit word past it. * If packet was truncated, return 0. */ static const uint32_t * parsefn(netdissect_options *ndo, register const uint32_t *dp) { register uint32_t len; register const u_char *cp; /* Bail if we don't have the string length */ ND_TCHECK(*dp); /* Fetch string length; convert to host order */ len = *dp++; NTOHL(len); ND_TCHECK2(*dp, ((len + 3) & ~3)); cp = (const u_char *)dp; /* Update 32-bit pointer (NFS filenames padded to 32-bit boundaries) */ dp += ((len + 3) & ~3) / sizeof(*dp); ND_PRINT((ndo, "\"")); if (fn_printn(ndo, cp, len, ndo->ndo_snapend)) { ND_PRINT((ndo, "\"")); goto trunc; } ND_PRINT((ndo, "\"")); return (dp); trunc: return NULL; } /* * Print out file handle and file name. * Return pointer to 32-bit word past file name. * If packet was truncated (or there was some other error), return 0. */ static const uint32_t * parsefhn(netdissect_options *ndo, register const uint32_t *dp, int v3) { dp = parsefh(ndo, dp, v3); if (dp == NULL) return (NULL); ND_PRINT((ndo, " ")); return (parsefn(ndo, dp)); } void nfsreq_print_noaddr(netdissect_options *ndo, register const u_char *bp, u_int length, register const u_char *bp2) { register const struct sunrpc_msg *rp; register const uint32_t *dp; nfs_type type; int v3; uint32_t proc; uint32_t access_flags; struct nfsv3_sattr sa3; ND_PRINT((ndo, "%d", length)); nfserr = 0; /* assume no error */ rp = (const struct sunrpc_msg *)bp; if (!xid_map_enter(ndo, rp, bp2)) /* record proc number for later on */ goto trunc; v3 = (EXTRACT_32BITS(&rp->rm_call.cb_vers) == NFS_VER3); proc = EXTRACT_32BITS(&rp->rm_call.cb_proc); if (!v3 && proc < NFS_NPROCS) proc = nfsv3_procid[proc]; ND_PRINT((ndo, " %s", tok2str(nfsproc_str, "proc-%u", proc))); switch (proc) { case NFSPROC_GETATTR: case NFSPROC_SETATTR: case NFSPROC_READLINK: case NFSPROC_FSSTAT: case NFSPROC_FSINFO: case NFSPROC_PATHCONF: if ((dp = parsereq(ndo, rp, length)) != NULL && parsefh(ndo, dp, v3) != NULL) return; break; case NFSPROC_LOOKUP: case NFSPROC_CREATE: case NFSPROC_MKDIR: case NFSPROC_REMOVE: case NFSPROC_RMDIR: if ((dp = parsereq(ndo, rp, length)) != NULL && parsefhn(ndo, dp, v3) != NULL) return; break; case NFSPROC_ACCESS: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefh(ndo, dp, v3)) != NULL) { ND_TCHECK(dp[0]); access_flags = EXTRACT_32BITS(&dp[0]); if (access_flags & ~NFSV3ACCESS_FULL) { /* NFSV3ACCESS definitions aren't up to date */ ND_PRINT((ndo, " %04x", access_flags)); } else if ((access_flags & NFSV3ACCESS_FULL) == NFSV3ACCESS_FULL) { ND_PRINT((ndo, " NFS_ACCESS_FULL")); } else { char separator = ' '; if (access_flags & NFSV3ACCESS_READ) { ND_PRINT((ndo, " NFS_ACCESS_READ")); separator = '|'; } if (access_flags & NFSV3ACCESS_LOOKUP) { ND_PRINT((ndo, "%cNFS_ACCESS_LOOKUP", separator)); separator = '|'; } if (access_flags & NFSV3ACCESS_MODIFY) { ND_PRINT((ndo, "%cNFS_ACCESS_MODIFY", separator)); separator = '|'; } if (access_flags & NFSV3ACCESS_EXTEND) { ND_PRINT((ndo, "%cNFS_ACCESS_EXTEND", separator)); separator = '|'; } if (access_flags & NFSV3ACCESS_DELETE) { ND_PRINT((ndo, "%cNFS_ACCESS_DELETE", separator)); separator = '|'; } if (access_flags & NFSV3ACCESS_EXECUTE) ND_PRINT((ndo, "%cNFS_ACCESS_EXECUTE", separator)); } return; } break; case NFSPROC_READ: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefh(ndo, dp, v3)) != NULL) { if (v3) { ND_TCHECK(dp[2]); ND_PRINT((ndo, " %u bytes @ %" PRIu64, EXTRACT_32BITS(&dp[2]), EXTRACT_64BITS(&dp[0]))); } else { ND_TCHECK(dp[1]); ND_PRINT((ndo, " %u bytes @ %u", EXTRACT_32BITS(&dp[1]), EXTRACT_32BITS(&dp[0]))); } return; } break; case NFSPROC_WRITE: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefh(ndo, dp, v3)) != NULL) { if (v3) { ND_TCHECK(dp[2]); ND_PRINT((ndo, " %u (%u) bytes @ %" PRIu64, EXTRACT_32BITS(&dp[4]), EXTRACT_32BITS(&dp[2]), EXTRACT_64BITS(&dp[0]))); if (ndo->ndo_vflag) { dp += 3; ND_TCHECK(dp[0]); ND_PRINT((ndo, " <%s>", tok2str(nfsv3_writemodes, NULL, EXTRACT_32BITS(dp)))); } } else { ND_TCHECK(dp[3]); ND_PRINT((ndo, " %u (%u) bytes @ %u (%u)", EXTRACT_32BITS(&dp[3]), EXTRACT_32BITS(&dp[2]), EXTRACT_32BITS(&dp[1]), EXTRACT_32BITS(&dp[0]))); } return; } break; case NFSPROC_SYMLINK: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefhn(ndo, dp, v3)) != NULL) { ND_PRINT((ndo, " ->")); if (v3 && (dp = parse_sattr3(ndo, dp, &sa3)) == NULL) break; if (parsefn(ndo, dp) == NULL) break; if (v3 && ndo->ndo_vflag) print_sattr3(ndo, &sa3, ndo->ndo_vflag); return; } break; case NFSPROC_MKNOD: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefhn(ndo, dp, v3)) != NULL) { ND_TCHECK(*dp); type = (nfs_type)EXTRACT_32BITS(dp); dp++; if ((dp = parse_sattr3(ndo, dp, &sa3)) == NULL) break; ND_PRINT((ndo, " %s", tok2str(type2str, "unk-ft %d", type))); if (ndo->ndo_vflag && (type == NFCHR || type == NFBLK)) { ND_TCHECK(dp[1]); ND_PRINT((ndo, " %u/%u", EXTRACT_32BITS(&dp[0]), EXTRACT_32BITS(&dp[1]))); dp += 2; } if (ndo->ndo_vflag) print_sattr3(ndo, &sa3, ndo->ndo_vflag); return; } break; case NFSPROC_RENAME: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefhn(ndo, dp, v3)) != NULL) { ND_PRINT((ndo, " ->")); if (parsefhn(ndo, dp, v3) != NULL) return; } break; case NFSPROC_LINK: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefh(ndo, dp, v3)) != NULL) { ND_PRINT((ndo, " ->")); if (parsefhn(ndo, dp, v3) != NULL) return; } break; case NFSPROC_READDIR: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefh(ndo, dp, v3)) != NULL) { if (v3) { ND_TCHECK(dp[4]); /* * We shouldn't really try to interpret the * offset cookie here. */ ND_PRINT((ndo, " %u bytes @ %" PRId64, EXTRACT_32BITS(&dp[4]), EXTRACT_64BITS(&dp[0]))); if (ndo->ndo_vflag) ND_PRINT((ndo, " verf %08x%08x", dp[2], dp[3])); } else { ND_TCHECK(dp[1]); /* * Print the offset as signed, since -1 is * common, but offsets > 2^31 aren't. */ ND_PRINT((ndo, " %u bytes @ %d", EXTRACT_32BITS(&dp[1]), EXTRACT_32BITS(&dp[0]))); } return; } break; case NFSPROC_READDIRPLUS: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefh(ndo, dp, v3)) != NULL) { ND_TCHECK(dp[4]); /* * We don't try to interpret the offset * cookie here. */ ND_PRINT((ndo, " %u bytes @ %" PRId64, EXTRACT_32BITS(&dp[4]), EXTRACT_64BITS(&dp[0]))); if (ndo->ndo_vflag) { ND_TCHECK(dp[5]); ND_PRINT((ndo, " max %u verf %08x%08x", EXTRACT_32BITS(&dp[5]), dp[2], dp[3])); } return; } break; case NFSPROC_COMMIT: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefh(ndo, dp, v3)) != NULL) { ND_TCHECK(dp[2]); ND_PRINT((ndo, " %u bytes @ %" PRIu64, EXTRACT_32BITS(&dp[2]), EXTRACT_64BITS(&dp[0]))); return; } break; default: return; } trunc: if (!nfserr) ND_PRINT((ndo, "%s", tstr)); } /* * Print out an NFS file handle. * We assume packet was not truncated before the end of the * file handle pointed to by dp. * * Note: new version (using portable file-handle parser) doesn't produce * generation number. It probably could be made to do that, with some * additional hacking on the parser code. */ static void nfs_printfh(netdissect_options *ndo, register const uint32_t *dp, const u_int len) { my_fsid fsid; uint32_t ino; const char *sfsname = NULL; char *spacep; if (ndo->ndo_uflag) { u_int i; char const *sep = ""; ND_PRINT((ndo, " fh[")); for (i=0; i<len; i++) { ND_PRINT((ndo, "%s%x", sep, dp[i])); sep = ":"; } ND_PRINT((ndo, "]")); return; } Parse_fh((const u_char *)dp, len, &fsid, &ino, NULL, &sfsname, 0); if (sfsname) { /* file system ID is ASCII, not numeric, for this server OS */ static char temp[NFSX_V3FHMAX+1]; /* Make sure string is null-terminated */ strncpy(temp, sfsname, NFSX_V3FHMAX); temp[sizeof(temp) - 1] = '\0'; /* Remove trailing spaces */ spacep = strchr(temp, ' '); if (spacep) *spacep = '\0'; ND_PRINT((ndo, " fh %s/", temp)); } else { ND_PRINT((ndo, " fh %d,%d/", fsid.Fsid_dev.Major, fsid.Fsid_dev.Minor)); } if(fsid.Fsid_dev.Minor == 257) /* Print the undecoded handle */ ND_PRINT((ndo, "%s", fsid.Opaque_Handle)); else ND_PRINT((ndo, "%ld", (long) ino)); } /* * Maintain a small cache of recent client.XID.server/proc pairs, to allow * us to match up replies with requests and thus to know how to parse * the reply. */ struct xid_map_entry { uint32_t xid; /* transaction ID (net order) */ int ipver; /* IP version (4 or 6) */ struct in6_addr client; /* client IP address (net order) */ struct in6_addr server; /* server IP address (net order) */ uint32_t proc; /* call proc number (host order) */ uint32_t vers; /* program version (host order) */ }; /* * Map entries are kept in an array that we manage as a ring; * new entries are always added at the tail of the ring. Initially, * all the entries are zero and hence don't match anything. */ #define XIDMAPSIZE 64 static struct xid_map_entry xid_map[XIDMAPSIZE]; static int xid_map_next = 0; static int xid_map_hint = 0; static int xid_map_enter(netdissect_options *ndo, const struct sunrpc_msg *rp, const u_char *bp) { const struct ip *ip = NULL; const struct ip6_hdr *ip6 = NULL; struct xid_map_entry *xmep; if (!ND_TTEST(rp->rm_call.cb_vers)) return (0); switch (IP_V((const struct ip *)bp)) { case 4: ip = (const struct ip *)bp; break; case 6: ip6 = (const struct ip6_hdr *)bp; break; default: return (1); } xmep = &xid_map[xid_map_next]; if (++xid_map_next >= XIDMAPSIZE) xid_map_next = 0; UNALIGNED_MEMCPY(&xmep->xid, &rp->rm_xid, sizeof(xmep->xid)); if (ip) { xmep->ipver = 4; UNALIGNED_MEMCPY(&xmep->client, &ip->ip_src, sizeof(ip->ip_src)); UNALIGNED_MEMCPY(&xmep->server, &ip->ip_dst, sizeof(ip->ip_dst)); } else if (ip6) { xmep->ipver = 6; UNALIGNED_MEMCPY(&xmep->client, &ip6->ip6_src, sizeof(ip6->ip6_src)); UNALIGNED_MEMCPY(&xmep->server, &ip6->ip6_dst, sizeof(ip6->ip6_dst)); } xmep->proc = EXTRACT_32BITS(&rp->rm_call.cb_proc); xmep->vers = EXTRACT_32BITS(&rp->rm_call.cb_vers); return (1); } /* * Returns 0 and puts NFSPROC_xxx in proc return and * version in vers return, or returns -1 on failure */ static int xid_map_find(const struct sunrpc_msg *rp, const u_char *bp, uint32_t *proc, uint32_t *vers) { int i; struct xid_map_entry *xmep; uint32_t xid; const struct ip *ip = (const struct ip *)bp; const struct ip6_hdr *ip6 = (const struct ip6_hdr *)bp; int cmp; UNALIGNED_MEMCPY(&xid, &rp->rm_xid, sizeof(xmep->xid)); /* Start searching from where we last left off */ i = xid_map_hint; do { xmep = &xid_map[i]; cmp = 1; if (xmep->ipver != IP_V(ip) || xmep->xid != xid) goto nextitem; switch (xmep->ipver) { case 4: if (UNALIGNED_MEMCMP(&ip->ip_src, &xmep->server, sizeof(ip->ip_src)) != 0 || UNALIGNED_MEMCMP(&ip->ip_dst, &xmep->client, sizeof(ip->ip_dst)) != 0) { cmp = 0; } break; case 6: if (UNALIGNED_MEMCMP(&ip6->ip6_src, &xmep->server, sizeof(ip6->ip6_src)) != 0 || UNALIGNED_MEMCMP(&ip6->ip6_dst, &xmep->client, sizeof(ip6->ip6_dst)) != 0) { cmp = 0; } break; default: cmp = 0; break; } if (cmp) { /* match */ xid_map_hint = i; *proc = xmep->proc; *vers = xmep->vers; return 0; } nextitem: if (++i >= XIDMAPSIZE) i = 0; } while (i != xid_map_hint); /* search failed */ return (-1); } /* * Routines for parsing reply packets */ /* * Return a pointer to the beginning of the actual results. * If the packet was truncated, return 0. */ static const uint32_t * parserep(netdissect_options *ndo, register const struct sunrpc_msg *rp, register u_int length) { register const uint32_t *dp; u_int len; enum sunrpc_accept_stat astat; /* * Portability note: * Here we find the address of the ar_verf credentials. * Originally, this calculation was * dp = (uint32_t *)&rp->rm_reply.rp_acpt.ar_verf * On the wire, the rp_acpt field starts immediately after * the (32 bit) rp_stat field. However, rp_acpt (which is a * "struct accepted_reply") contains a "struct opaque_auth", * whose internal representation contains a pointer, so on a * 64-bit machine the compiler inserts 32 bits of padding * before rp->rm_reply.rp_acpt.ar_verf. So, we cannot use * the internal representation to parse the on-the-wire * representation. Instead, we skip past the rp_stat field, * which is an "enum" and so occupies one 32-bit word. */ dp = ((const uint32_t *)&rp->rm_reply) + 1; ND_TCHECK(dp[1]); len = EXTRACT_32BITS(&dp[1]); if (len >= length) return (NULL); /* * skip past the ar_verf credentials. */ dp += (len + (2*sizeof(uint32_t) + 3)) / sizeof(uint32_t); ND_TCHECK2(dp[0], 0); /* * now we can check the ar_stat field */ astat = (enum sunrpc_accept_stat) EXTRACT_32BITS(dp); if (astat != SUNRPC_SUCCESS) { ND_PRINT((ndo, " %s", tok2str(sunrpc_str, "ar_stat %d", astat))); nfserr = 1; /* suppress trunc string */ return (NULL); } /* successful return */ ND_TCHECK2(*dp, sizeof(astat)); return ((const uint32_t *) (sizeof(astat) + ((const char *)dp))); trunc: return (0); } static const uint32_t * parsestatus(netdissect_options *ndo, const uint32_t *dp, int *er) { int errnum; ND_TCHECK(dp[0]); errnum = EXTRACT_32BITS(&dp[0]); if (er) *er = errnum; if (errnum != 0) { if (!ndo->ndo_qflag) ND_PRINT((ndo, " ERROR: %s", tok2str(status2str, "unk %d", errnum))); nfserr = 1; } return (dp + 1); trunc: return NULL; } static const uint32_t * parsefattr(netdissect_options *ndo, const uint32_t *dp, int verbose, int v3) { const struct nfs_fattr *fap; fap = (const struct nfs_fattr *)dp; ND_TCHECK(fap->fa_gid); if (verbose) { ND_PRINT((ndo, " %s %o ids %d/%d", tok2str(type2str, "unk-ft %d ", EXTRACT_32BITS(&fap->fa_type)), EXTRACT_32BITS(&fap->fa_mode), EXTRACT_32BITS(&fap->fa_uid), EXTRACT_32BITS(&fap->fa_gid))); if (v3) { ND_TCHECK(fap->fa3_size); ND_PRINT((ndo, " sz %" PRIu64, EXTRACT_64BITS((const uint32_t *)&fap->fa3_size))); } else { ND_TCHECK(fap->fa2_size); ND_PRINT((ndo, " sz %d", EXTRACT_32BITS(&fap->fa2_size))); } } /* print lots more stuff */ if (verbose > 1) { if (v3) { ND_TCHECK(fap->fa3_ctime); ND_PRINT((ndo, " nlink %d rdev %d/%d", EXTRACT_32BITS(&fap->fa_nlink), EXTRACT_32BITS(&fap->fa3_rdev.specdata1), EXTRACT_32BITS(&fap->fa3_rdev.specdata2))); ND_PRINT((ndo, " fsid %" PRIx64, EXTRACT_64BITS((const uint32_t *)&fap->fa3_fsid))); ND_PRINT((ndo, " fileid %" PRIx64, EXTRACT_64BITS((const uint32_t *)&fap->fa3_fileid))); ND_PRINT((ndo, " a/m/ctime %u.%06u", EXTRACT_32BITS(&fap->fa3_atime.nfsv3_sec), EXTRACT_32BITS(&fap->fa3_atime.nfsv3_nsec))); ND_PRINT((ndo, " %u.%06u", EXTRACT_32BITS(&fap->fa3_mtime.nfsv3_sec), EXTRACT_32BITS(&fap->fa3_mtime.nfsv3_nsec))); ND_PRINT((ndo, " %u.%06u", EXTRACT_32BITS(&fap->fa3_ctime.nfsv3_sec), EXTRACT_32BITS(&fap->fa3_ctime.nfsv3_nsec))); } else { ND_TCHECK(fap->fa2_ctime); ND_PRINT((ndo, " nlink %d rdev 0x%x fsid 0x%x nodeid 0x%x a/m/ctime", EXTRACT_32BITS(&fap->fa_nlink), EXTRACT_32BITS(&fap->fa2_rdev), EXTRACT_32BITS(&fap->fa2_fsid), EXTRACT_32BITS(&fap->fa2_fileid))); ND_PRINT((ndo, " %u.%06u", EXTRACT_32BITS(&fap->fa2_atime.nfsv2_sec), EXTRACT_32BITS(&fap->fa2_atime.nfsv2_usec))); ND_PRINT((ndo, " %u.%06u", EXTRACT_32BITS(&fap->fa2_mtime.nfsv2_sec), EXTRACT_32BITS(&fap->fa2_mtime.nfsv2_usec))); ND_PRINT((ndo, " %u.%06u", EXTRACT_32BITS(&fap->fa2_ctime.nfsv2_sec), EXTRACT_32BITS(&fap->fa2_ctime.nfsv2_usec))); } } return ((const uint32_t *)((const unsigned char *)dp + (v3 ? NFSX_V3FATTR : NFSX_V2FATTR))); trunc: return (NULL); } static int parseattrstat(netdissect_options *ndo, const uint32_t *dp, int verbose, int v3) { int er; dp = parsestatus(ndo, dp, &er); if (dp == NULL) return (0); if (er) return (1); return (parsefattr(ndo, dp, verbose, v3) != NULL); } static int parsediropres(netdissect_options *ndo, const uint32_t *dp) { int er; if (!(dp = parsestatus(ndo, dp, &er))) return (0); if (er) return (1); dp = parsefh(ndo, dp, 0); if (dp == NULL) return (0); return (parsefattr(ndo, dp, ndo->ndo_vflag, 0) != NULL); } static int parselinkres(netdissect_options *ndo, const uint32_t *dp, int v3) { int er; dp = parsestatus(ndo, dp, &er); if (dp == NULL) return(0); if (er) return(1); if (v3 && !(dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag))) return (0); ND_PRINT((ndo, " ")); return (parsefn(ndo, dp) != NULL); } static int parsestatfs(netdissect_options *ndo, const uint32_t *dp, int v3) { const struct nfs_statfs *sfsp; int er; dp = parsestatus(ndo, dp, &er); if (dp == NULL) return (0); if (!v3 && er) return (1); if (ndo->ndo_qflag) return(1); if (v3) { if (ndo->ndo_vflag) ND_PRINT((ndo, " POST:")); if (!(dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag))) return (0); } ND_TCHECK2(*dp, (v3 ? NFSX_V3STATFS : NFSX_V2STATFS)); sfsp = (const struct nfs_statfs *)dp; if (v3) { ND_PRINT((ndo, " tbytes %" PRIu64 " fbytes %" PRIu64 " abytes %" PRIu64, EXTRACT_64BITS((const uint32_t *)&sfsp->sf_tbytes), EXTRACT_64BITS((const uint32_t *)&sfsp->sf_fbytes), EXTRACT_64BITS((const uint32_t *)&sfsp->sf_abytes))); if (ndo->ndo_vflag) { ND_PRINT((ndo, " tfiles %" PRIu64 " ffiles %" PRIu64 " afiles %" PRIu64 " invar %u", EXTRACT_64BITS((const uint32_t *)&sfsp->sf_tfiles), EXTRACT_64BITS((const uint32_t *)&sfsp->sf_ffiles), EXTRACT_64BITS((const uint32_t *)&sfsp->sf_afiles), EXTRACT_32BITS(&sfsp->sf_invarsec))); } } else { ND_PRINT((ndo, " tsize %d bsize %d blocks %d bfree %d bavail %d", EXTRACT_32BITS(&sfsp->sf_tsize), EXTRACT_32BITS(&sfsp->sf_bsize), EXTRACT_32BITS(&sfsp->sf_blocks), EXTRACT_32BITS(&sfsp->sf_bfree), EXTRACT_32BITS(&sfsp->sf_bavail))); } return (1); trunc: return (0); } static int parserddires(netdissect_options *ndo, const uint32_t *dp) { int er; dp = parsestatus(ndo, dp, &er); if (dp == NULL) return (0); if (er) return (1); if (ndo->ndo_qflag) return (1); ND_TCHECK(dp[2]); ND_PRINT((ndo, " offset 0x%x size %d ", EXTRACT_32BITS(&dp[0]), EXTRACT_32BITS(&dp[1]))); if (dp[2] != 0) ND_PRINT((ndo, " eof")); return (1); trunc: return (0); } static const uint32_t * parse_wcc_attr(netdissect_options *ndo, const uint32_t *dp) { ND_PRINT((ndo, " sz %" PRIu64, EXTRACT_64BITS(&dp[0]))); ND_PRINT((ndo, " mtime %u.%06u ctime %u.%06u", EXTRACT_32BITS(&dp[2]), EXTRACT_32BITS(&dp[3]), EXTRACT_32BITS(&dp[4]), EXTRACT_32BITS(&dp[5]))); return (dp + 6); } /* * Pre operation attributes. Print only if vflag > 1. */ static const uint32_t * parse_pre_op_attr(netdissect_options *ndo, const uint32_t *dp, int verbose) { ND_TCHECK(dp[0]); if (!EXTRACT_32BITS(&dp[0])) return (dp + 1); dp++; ND_TCHECK2(*dp, 24); if (verbose > 1) { return parse_wcc_attr(ndo, dp); } else { /* If not verbose enough, just skip over wcc_attr */ return (dp + 6); } trunc: return (NULL); } /* * Post operation attributes are printed if vflag >= 1 */ static const uint32_t * parse_post_op_attr(netdissect_options *ndo, const uint32_t *dp, int verbose) { ND_TCHECK(dp[0]); if (!EXTRACT_32BITS(&dp[0])) return (dp + 1); dp++; if (verbose) { return parsefattr(ndo, dp, verbose, 1); } else return (dp + (NFSX_V3FATTR / sizeof (uint32_t))); trunc: return (NULL); } static const uint32_t * parse_wcc_data(netdissect_options *ndo, const uint32_t *dp, int verbose) { if (verbose > 1) ND_PRINT((ndo, " PRE:")); if (!(dp = parse_pre_op_attr(ndo, dp, verbose))) return (0); if (verbose) ND_PRINT((ndo, " POST:")); return parse_post_op_attr(ndo, dp, verbose); } static const uint32_t * parsecreateopres(netdissect_options *ndo, const uint32_t *dp, int verbose) { int er; if (!(dp = parsestatus(ndo, dp, &er))) return (0); if (er) dp = parse_wcc_data(ndo, dp, verbose); else { ND_TCHECK(dp[0]); if (!EXTRACT_32BITS(&dp[0])) return (dp + 1); dp++; if (!(dp = parsefh(ndo, dp, 1))) return (0); if (verbose) { if (!(dp = parse_post_op_attr(ndo, dp, verbose))) return (0); if (ndo->ndo_vflag > 1) { ND_PRINT((ndo, " dir attr:")); dp = parse_wcc_data(ndo, dp, verbose); } } } return (dp); trunc: return (NULL); } static int parsewccres(netdissect_options *ndo, const uint32_t *dp, int verbose) { int er; if (!(dp = parsestatus(ndo, dp, &er))) return (0); return parse_wcc_data(ndo, dp, verbose) != NULL; } static const uint32_t * parsev3rddirres(netdissect_options *ndo, const uint32_t *dp, int verbose) { int er; if (!(dp = parsestatus(ndo, dp, &er))) return (0); if (ndo->ndo_vflag) ND_PRINT((ndo, " POST:")); if (!(dp = parse_post_op_attr(ndo, dp, verbose))) return (0); if (er) return dp; if (ndo->ndo_vflag) { ND_TCHECK(dp[1]); ND_PRINT((ndo, " verf %08x%08x", dp[0], dp[1])); dp += 2; } return dp; trunc: return (NULL); } static int parsefsinfo(netdissect_options *ndo, const uint32_t *dp) { const struct nfsv3_fsinfo *sfp; int er; if (!(dp = parsestatus(ndo, dp, &er))) return (0); if (ndo->ndo_vflag) ND_PRINT((ndo, " POST:")); if (!(dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag))) return (0); if (er) return (1); sfp = (const struct nfsv3_fsinfo *)dp; ND_TCHECK(*sfp); ND_PRINT((ndo, " rtmax %u rtpref %u wtmax %u wtpref %u dtpref %u", EXTRACT_32BITS(&sfp->fs_rtmax), EXTRACT_32BITS(&sfp->fs_rtpref), EXTRACT_32BITS(&sfp->fs_wtmax), EXTRACT_32BITS(&sfp->fs_wtpref), EXTRACT_32BITS(&sfp->fs_dtpref))); if (ndo->ndo_vflag) { ND_PRINT((ndo, " rtmult %u wtmult %u maxfsz %" PRIu64, EXTRACT_32BITS(&sfp->fs_rtmult), EXTRACT_32BITS(&sfp->fs_wtmult), EXTRACT_64BITS((const uint32_t *)&sfp->fs_maxfilesize))); ND_PRINT((ndo, " delta %u.%06u ", EXTRACT_32BITS(&sfp->fs_timedelta.nfsv3_sec), EXTRACT_32BITS(&sfp->fs_timedelta.nfsv3_nsec))); } return (1); trunc: return (0); } static int parsepathconf(netdissect_options *ndo, const uint32_t *dp) { int er; const struct nfsv3_pathconf *spp; if (!(dp = parsestatus(ndo, dp, &er))) return (0); if (ndo->ndo_vflag) ND_PRINT((ndo, " POST:")); if (!(dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag))) return (0); if (er) return (1); spp = (const struct nfsv3_pathconf *)dp; ND_TCHECK(*spp); ND_PRINT((ndo, " linkmax %u namemax %u %s %s %s %s", EXTRACT_32BITS(&spp->pc_linkmax), EXTRACT_32BITS(&spp->pc_namemax), EXTRACT_32BITS(&spp->pc_notrunc) ? "notrunc" : "", EXTRACT_32BITS(&spp->pc_chownrestricted) ? "chownres" : "", EXTRACT_32BITS(&spp->pc_caseinsensitive) ? "igncase" : "", EXTRACT_32BITS(&spp->pc_casepreserving) ? "keepcase" : "")); return (1); trunc: return (0); } static void interp_reply(netdissect_options *ndo, const struct sunrpc_msg *rp, uint32_t proc, uint32_t vers, int length) { register const uint32_t *dp; register int v3; int er; v3 = (vers == NFS_VER3); if (!v3 && proc < NFS_NPROCS) proc = nfsv3_procid[proc]; ND_PRINT((ndo, " %s", tok2str(nfsproc_str, "proc-%u", proc))); switch (proc) { case NFSPROC_GETATTR: dp = parserep(ndo, rp, length); if (dp != NULL && parseattrstat(ndo, dp, !ndo->ndo_qflag, v3) != 0) return; break; case NFSPROC_SETATTR: if (!(dp = parserep(ndo, rp, length))) return; if (v3) { if (parsewccres(ndo, dp, ndo->ndo_vflag)) return; } else { if (parseattrstat(ndo, dp, !ndo->ndo_qflag, 0) != 0) return; } break; case NFSPROC_LOOKUP: if (!(dp = parserep(ndo, rp, length))) break; if (v3) { if (!(dp = parsestatus(ndo, dp, &er))) break; if (er) { if (ndo->ndo_vflag > 1) { ND_PRINT((ndo, " post dattr:")); dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag); } } else { if (!(dp = parsefh(ndo, dp, v3))) break; if ((dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag)) && ndo->ndo_vflag > 1) { ND_PRINT((ndo, " post dattr:")); dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag); } } if (dp) return; } else { if (parsediropres(ndo, dp) != 0) return; } break; case NFSPROC_ACCESS: if (!(dp = parserep(ndo, rp, length))) break; if (!(dp = parsestatus(ndo, dp, &er))) break; if (ndo->ndo_vflag) ND_PRINT((ndo, " attr:")); if (!(dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag))) break; if (!er) ND_PRINT((ndo, " c %04x", EXTRACT_32BITS(&dp[0]))); return; case NFSPROC_READLINK: dp = parserep(ndo, rp, length); if (dp != NULL && parselinkres(ndo, dp, v3) != 0) return; break; case NFSPROC_READ: if (!(dp = parserep(ndo, rp, length))) break; if (v3) { if (!(dp = parsestatus(ndo, dp, &er))) break; if (!(dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag))) break; if (er) return; if (ndo->ndo_vflag) { ND_TCHECK(dp[1]); ND_PRINT((ndo, " %u bytes", EXTRACT_32BITS(&dp[0]))); if (EXTRACT_32BITS(&dp[1])) ND_PRINT((ndo, " EOF")); } return; } else { if (parseattrstat(ndo, dp, ndo->ndo_vflag, 0) != 0) return; } break; case NFSPROC_WRITE: if (!(dp = parserep(ndo, rp, length))) break; if (v3) { if (!(dp = parsestatus(ndo, dp, &er))) break; if (!(dp = parse_wcc_data(ndo, dp, ndo->ndo_vflag))) break; if (er) return; if (ndo->ndo_vflag) { ND_TCHECK(dp[0]); ND_PRINT((ndo, " %u bytes", EXTRACT_32BITS(&dp[0]))); if (ndo->ndo_vflag > 1) { ND_TCHECK(dp[1]); ND_PRINT((ndo, " <%s>", tok2str(nfsv3_writemodes, NULL, EXTRACT_32BITS(&dp[1])))); } return; } } else { if (parseattrstat(ndo, dp, ndo->ndo_vflag, v3) != 0) return; } break; case NFSPROC_CREATE: case NFSPROC_MKDIR: if (!(dp = parserep(ndo, rp, length))) break; if (v3) { if (parsecreateopres(ndo, dp, ndo->ndo_vflag) != NULL) return; } else { if (parsediropres(ndo, dp) != 0) return; } break; case NFSPROC_SYMLINK: if (!(dp = parserep(ndo, rp, length))) break; if (v3) { if (parsecreateopres(ndo, dp, ndo->ndo_vflag) != NULL) return; } else { if (parsestatus(ndo, dp, &er) != NULL) return; } break; case NFSPROC_MKNOD: if (!(dp = parserep(ndo, rp, length))) break; if (parsecreateopres(ndo, dp, ndo->ndo_vflag) != NULL) return; break; case NFSPROC_REMOVE: case NFSPROC_RMDIR: if (!(dp = parserep(ndo, rp, length))) break; if (v3) { if (parsewccres(ndo, dp, ndo->ndo_vflag)) return; } else { if (parsestatus(ndo, dp, &er) != NULL) return; } break; case NFSPROC_RENAME: if (!(dp = parserep(ndo, rp, length))) break; if (v3) { if (!(dp = parsestatus(ndo, dp, &er))) break; if (ndo->ndo_vflag) { ND_PRINT((ndo, " from:")); if (!(dp = parse_wcc_data(ndo, dp, ndo->ndo_vflag))) break; ND_PRINT((ndo, " to:")); if (!(dp = parse_wcc_data(ndo, dp, ndo->ndo_vflag))) break; } return; } else { if (parsestatus(ndo, dp, &er) != NULL) return; } break; case NFSPROC_LINK: if (!(dp = parserep(ndo, rp, length))) break; if (v3) { if (!(dp = parsestatus(ndo, dp, &er))) break; if (ndo->ndo_vflag) { ND_PRINT((ndo, " file POST:")); if (!(dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag))) break; ND_PRINT((ndo, " dir:")); if (!(dp = parse_wcc_data(ndo, dp, ndo->ndo_vflag))) break; return; } } else { if (parsestatus(ndo, dp, &er) != NULL) return; } break; case NFSPROC_READDIR: if (!(dp = parserep(ndo, rp, length))) break; if (v3) { if (parsev3rddirres(ndo, dp, ndo->ndo_vflag)) return; } else { if (parserddires(ndo, dp) != 0) return; } break; case NFSPROC_READDIRPLUS: if (!(dp = parserep(ndo, rp, length))) break; if (parsev3rddirres(ndo, dp, ndo->ndo_vflag)) return; break; case NFSPROC_FSSTAT: dp = parserep(ndo, rp, length); if (dp != NULL && parsestatfs(ndo, dp, v3) != 0) return; break; case NFSPROC_FSINFO: dp = parserep(ndo, rp, length); if (dp != NULL && parsefsinfo(ndo, dp) != 0) return; break; case NFSPROC_PATHCONF: dp = parserep(ndo, rp, length); if (dp != NULL && parsepathconf(ndo, dp) != 0) return; break; case NFSPROC_COMMIT: dp = parserep(ndo, rp, length); if (dp != NULL && parsewccres(ndo, dp, ndo->ndo_vflag) != 0) return; break; default: return; } trunc: if (!nfserr) ND_PRINT((ndo, "%s", tstr)); }
/* * Copyright (c) 1988, 1989, 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that: (1) source code distributions * retain the above copyright notice and this paragraph in its entirety, (2) * distributions including binary code include the above copyright notice and * this paragraph in its entirety in the documentation or other materials * provided with the distribution, and (3) all advertising materials mentioning * features or use of this software display the following acknowledgement: * ``This product includes software developed by the University of California, * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of * the University nor the names of its contributors may be used to endorse * or promote products derived from this software without specific prior * written permission. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. */ /* \summary: Network File System (NFS) printer */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <netdissect-stdinc.h> #include <stdio.h> #include <string.h> #include "netdissect.h" #include "addrtoname.h" #include "extract.h" #include "nfs.h" #include "nfsfh.h" #include "ip.h" #include "ip6.h" #include "rpc_auth.h" #include "rpc_msg.h" static const char tstr[] = " [|nfs]"; static void nfs_printfh(netdissect_options *, const uint32_t *, const u_int); static int xid_map_enter(netdissect_options *, const struct sunrpc_msg *, const u_char *); static int xid_map_find(const struct sunrpc_msg *, const u_char *, uint32_t *, uint32_t *); static void interp_reply(netdissect_options *, const struct sunrpc_msg *, uint32_t, uint32_t, int); static const uint32_t *parse_post_op_attr(netdissect_options *, const uint32_t *, int); /* * Mapping of old NFS Version 2 RPC numbers to generic numbers. */ static uint32_t nfsv3_procid[NFS_NPROCS] = { NFSPROC_NULL, NFSPROC_GETATTR, NFSPROC_SETATTR, NFSPROC_NOOP, NFSPROC_LOOKUP, NFSPROC_READLINK, NFSPROC_READ, NFSPROC_NOOP, NFSPROC_WRITE, NFSPROC_CREATE, NFSPROC_REMOVE, NFSPROC_RENAME, NFSPROC_LINK, NFSPROC_SYMLINK, NFSPROC_MKDIR, NFSPROC_RMDIR, NFSPROC_READDIR, NFSPROC_FSSTAT, NFSPROC_NOOP, NFSPROC_NOOP, NFSPROC_NOOP, NFSPROC_NOOP, NFSPROC_NOOP, NFSPROC_NOOP, NFSPROC_NOOP, NFSPROC_NOOP }; static const struct tok nfsproc_str[] = { { NFSPROC_NOOP, "nop" }, { NFSPROC_NULL, "null" }, { NFSPROC_GETATTR, "getattr" }, { NFSPROC_SETATTR, "setattr" }, { NFSPROC_LOOKUP, "lookup" }, { NFSPROC_ACCESS, "access" }, { NFSPROC_READLINK, "readlink" }, { NFSPROC_READ, "read" }, { NFSPROC_WRITE, "write" }, { NFSPROC_CREATE, "create" }, { NFSPROC_MKDIR, "mkdir" }, { NFSPROC_SYMLINK, "symlink" }, { NFSPROC_MKNOD, "mknod" }, { NFSPROC_REMOVE, "remove" }, { NFSPROC_RMDIR, "rmdir" }, { NFSPROC_RENAME, "rename" }, { NFSPROC_LINK, "link" }, { NFSPROC_READDIR, "readdir" }, { NFSPROC_READDIRPLUS, "readdirplus" }, { NFSPROC_FSSTAT, "fsstat" }, { NFSPROC_FSINFO, "fsinfo" }, { NFSPROC_PATHCONF, "pathconf" }, { NFSPROC_COMMIT, "commit" }, { 0, NULL } }; /* * NFS V2 and V3 status values. * * Some of these come from the RFCs for NFS V2 and V3, with the message * strings taken from the FreeBSD C library "errlst.c". * * Others are errors that are not in the RFC but that I suspect some * NFS servers could return; the values are FreeBSD errno values, as * the first NFS server was the SunOS 2.0 one, and until 5.0 SunOS * was primarily BSD-derived. */ static const struct tok status2str[] = { { 1, "Operation not permitted" }, /* EPERM */ { 2, "No such file or directory" }, /* ENOENT */ { 5, "Input/output error" }, /* EIO */ { 6, "Device not configured" }, /* ENXIO */ { 11, "Resource deadlock avoided" }, /* EDEADLK */ { 12, "Cannot allocate memory" }, /* ENOMEM */ { 13, "Permission denied" }, /* EACCES */ { 17, "File exists" }, /* EEXIST */ { 18, "Cross-device link" }, /* EXDEV */ { 19, "Operation not supported by device" }, /* ENODEV */ { 20, "Not a directory" }, /* ENOTDIR */ { 21, "Is a directory" }, /* EISDIR */ { 22, "Invalid argument" }, /* EINVAL */ { 26, "Text file busy" }, /* ETXTBSY */ { 27, "File too large" }, /* EFBIG */ { 28, "No space left on device" }, /* ENOSPC */ { 30, "Read-only file system" }, /* EROFS */ { 31, "Too many links" }, /* EMLINK */ { 45, "Operation not supported" }, /* EOPNOTSUPP */ { 62, "Too many levels of symbolic links" }, /* ELOOP */ { 63, "File name too long" }, /* ENAMETOOLONG */ { 66, "Directory not empty" }, /* ENOTEMPTY */ { 69, "Disc quota exceeded" }, /* EDQUOT */ { 70, "Stale NFS file handle" }, /* ESTALE */ { 71, "Too many levels of remote in path" }, /* EREMOTE */ { 99, "Write cache flushed to disk" }, /* NFSERR_WFLUSH (not used) */ { 10001, "Illegal NFS file handle" }, /* NFS3ERR_BADHANDLE */ { 10002, "Update synchronization mismatch" }, /* NFS3ERR_NOT_SYNC */ { 10003, "READDIR/READDIRPLUS cookie is stale" }, /* NFS3ERR_BAD_COOKIE */ { 10004, "Operation not supported" }, /* NFS3ERR_NOTSUPP */ { 10005, "Buffer or request is too small" }, /* NFS3ERR_TOOSMALL */ { 10006, "Unspecified error on server" }, /* NFS3ERR_SERVERFAULT */ { 10007, "Object of that type not supported" }, /* NFS3ERR_BADTYPE */ { 10008, "Request couldn't be completed in time" }, /* NFS3ERR_JUKEBOX */ { 0, NULL } }; static const struct tok nfsv3_writemodes[] = { { 0, "unstable" }, { 1, "datasync" }, { 2, "filesync" }, { 0, NULL } }; static const struct tok type2str[] = { { NFNON, "NON" }, { NFREG, "REG" }, { NFDIR, "DIR" }, { NFBLK, "BLK" }, { NFCHR, "CHR" }, { NFLNK, "LNK" }, { NFFIFO, "FIFO" }, { 0, NULL } }; static const struct tok sunrpc_auth_str[] = { { SUNRPC_AUTH_OK, "OK" }, { SUNRPC_AUTH_BADCRED, "Bogus Credentials (seal broken)" }, { SUNRPC_AUTH_REJECTEDCRED, "Rejected Credentials (client should begin new session)" }, { SUNRPC_AUTH_BADVERF, "Bogus Verifier (seal broken)" }, { SUNRPC_AUTH_REJECTEDVERF, "Verifier expired or was replayed" }, { SUNRPC_AUTH_TOOWEAK, "Credentials are too weak" }, { SUNRPC_AUTH_INVALIDRESP, "Bogus response verifier" }, { SUNRPC_AUTH_FAILED, "Unknown failure" }, { 0, NULL } }; static const struct tok sunrpc_str[] = { { SUNRPC_PROG_UNAVAIL, "PROG_UNAVAIL" }, { SUNRPC_PROG_MISMATCH, "PROG_MISMATCH" }, { SUNRPC_PROC_UNAVAIL, "PROC_UNAVAIL" }, { SUNRPC_GARBAGE_ARGS, "GARBAGE_ARGS" }, { SUNRPC_SYSTEM_ERR, "SYSTEM_ERR" }, { 0, NULL } }; static void print_nfsaddr(netdissect_options *ndo, const u_char *bp, const char *s, const char *d) { const struct ip *ip; const struct ip6_hdr *ip6; char srcaddr[INET6_ADDRSTRLEN], dstaddr[INET6_ADDRSTRLEN]; srcaddr[0] = dstaddr[0] = '\0'; switch (IP_V((const struct ip *)bp)) { case 4: ip = (const struct ip *)bp; strlcpy(srcaddr, ipaddr_string(ndo, &ip->ip_src), sizeof(srcaddr)); strlcpy(dstaddr, ipaddr_string(ndo, &ip->ip_dst), sizeof(dstaddr)); break; case 6: ip6 = (const struct ip6_hdr *)bp; strlcpy(srcaddr, ip6addr_string(ndo, &ip6->ip6_src), sizeof(srcaddr)); strlcpy(dstaddr, ip6addr_string(ndo, &ip6->ip6_dst), sizeof(dstaddr)); break; default: strlcpy(srcaddr, "?", sizeof(srcaddr)); strlcpy(dstaddr, "?", sizeof(dstaddr)); break; } ND_PRINT((ndo, "%s.%s > %s.%s: ", srcaddr, s, dstaddr, d)); } static const uint32_t * parse_sattr3(netdissect_options *ndo, const uint32_t *dp, struct nfsv3_sattr *sa3) { ND_TCHECK(dp[0]); sa3->sa_modeset = EXTRACT_32BITS(dp); dp++; if (sa3->sa_modeset) { ND_TCHECK(dp[0]); sa3->sa_mode = EXTRACT_32BITS(dp); dp++; } ND_TCHECK(dp[0]); sa3->sa_uidset = EXTRACT_32BITS(dp); dp++; if (sa3->sa_uidset) { ND_TCHECK(dp[0]); sa3->sa_uid = EXTRACT_32BITS(dp); dp++; } ND_TCHECK(dp[0]); sa3->sa_gidset = EXTRACT_32BITS(dp); dp++; if (sa3->sa_gidset) { ND_TCHECK(dp[0]); sa3->sa_gid = EXTRACT_32BITS(dp); dp++; } ND_TCHECK(dp[0]); sa3->sa_sizeset = EXTRACT_32BITS(dp); dp++; if (sa3->sa_sizeset) { ND_TCHECK(dp[0]); sa3->sa_size = EXTRACT_32BITS(dp); dp++; } ND_TCHECK(dp[0]); sa3->sa_atimetype = EXTRACT_32BITS(dp); dp++; if (sa3->sa_atimetype == NFSV3SATTRTIME_TOCLIENT) { ND_TCHECK(dp[1]); sa3->sa_atime.nfsv3_sec = EXTRACT_32BITS(dp); dp++; sa3->sa_atime.nfsv3_nsec = EXTRACT_32BITS(dp); dp++; } ND_TCHECK(dp[0]); sa3->sa_mtimetype = EXTRACT_32BITS(dp); dp++; if (sa3->sa_mtimetype == NFSV3SATTRTIME_TOCLIENT) { ND_TCHECK(dp[1]); sa3->sa_mtime.nfsv3_sec = EXTRACT_32BITS(dp); dp++; sa3->sa_mtime.nfsv3_nsec = EXTRACT_32BITS(dp); dp++; } return dp; trunc: return NULL; } static int nfserr; /* true if we error rather than trunc */ static void print_sattr3(netdissect_options *ndo, const struct nfsv3_sattr *sa3, int verbose) { if (sa3->sa_modeset) ND_PRINT((ndo, " mode %o", sa3->sa_mode)); if (sa3->sa_uidset) ND_PRINT((ndo, " uid %u", sa3->sa_uid)); if (sa3->sa_gidset) ND_PRINT((ndo, " gid %u", sa3->sa_gid)); if (verbose > 1) { if (sa3->sa_atimetype == NFSV3SATTRTIME_TOCLIENT) ND_PRINT((ndo, " atime %u.%06u", sa3->sa_atime.nfsv3_sec, sa3->sa_atime.nfsv3_nsec)); if (sa3->sa_mtimetype == NFSV3SATTRTIME_TOCLIENT) ND_PRINT((ndo, " mtime %u.%06u", sa3->sa_mtime.nfsv3_sec, sa3->sa_mtime.nfsv3_nsec)); } } void nfsreply_print(netdissect_options *ndo, register const u_char *bp, u_int length, register const u_char *bp2) { register const struct sunrpc_msg *rp; char srcid[20], dstid[20]; /*fits 32bit*/ nfserr = 0; /* assume no error */ rp = (const struct sunrpc_msg *)bp; ND_TCHECK(rp->rm_xid); if (!ndo->ndo_nflag) { strlcpy(srcid, "nfs", sizeof(srcid)); snprintf(dstid, sizeof(dstid), "%u", EXTRACT_32BITS(&rp->rm_xid)); } else { snprintf(srcid, sizeof(srcid), "%u", NFS_PORT); snprintf(dstid, sizeof(dstid), "%u", EXTRACT_32BITS(&rp->rm_xid)); } print_nfsaddr(ndo, bp2, srcid, dstid); nfsreply_print_noaddr(ndo, bp, length, bp2); return; trunc: if (!nfserr) ND_PRINT((ndo, "%s", tstr)); } void nfsreply_print_noaddr(netdissect_options *ndo, register const u_char *bp, u_int length, register const u_char *bp2) { register const struct sunrpc_msg *rp; uint32_t proc, vers, reply_stat; enum sunrpc_reject_stat rstat; uint32_t rlow; uint32_t rhigh; enum sunrpc_auth_stat rwhy; nfserr = 0; /* assume no error */ rp = (const struct sunrpc_msg *)bp; ND_TCHECK(rp->rm_reply.rp_stat); reply_stat = EXTRACT_32BITS(&rp->rm_reply.rp_stat); switch (reply_stat) { case SUNRPC_MSG_ACCEPTED: ND_PRINT((ndo, "reply ok %u", length)); if (xid_map_find(rp, bp2, &proc, &vers) >= 0) interp_reply(ndo, rp, proc, vers, length); break; case SUNRPC_MSG_DENIED: ND_PRINT((ndo, "reply ERR %u: ", length)); ND_TCHECK(rp->rm_reply.rp_reject.rj_stat); rstat = EXTRACT_32BITS(&rp->rm_reply.rp_reject.rj_stat); switch (rstat) { case SUNRPC_RPC_MISMATCH: ND_TCHECK(rp->rm_reply.rp_reject.rj_vers.high); rlow = EXTRACT_32BITS(&rp->rm_reply.rp_reject.rj_vers.low); rhigh = EXTRACT_32BITS(&rp->rm_reply.rp_reject.rj_vers.high); ND_PRINT((ndo, "RPC Version mismatch (%u-%u)", rlow, rhigh)); break; case SUNRPC_AUTH_ERROR: ND_TCHECK(rp->rm_reply.rp_reject.rj_why); rwhy = EXTRACT_32BITS(&rp->rm_reply.rp_reject.rj_why); ND_PRINT((ndo, "Auth %s", tok2str(sunrpc_auth_str, "Invalid failure code %u", rwhy))); break; default: ND_PRINT((ndo, "Unknown reason for rejecting rpc message %u", (unsigned int)rstat)); break; } break; default: ND_PRINT((ndo, "reply Unknown rpc response code=%u %u", reply_stat, length)); break; } return; trunc: if (!nfserr) ND_PRINT((ndo, "%s", tstr)); } /* * Return a pointer to the first file handle in the packet. * If the packet was truncated, return 0. */ static const uint32_t * parsereq(netdissect_options *ndo, register const struct sunrpc_msg *rp, register u_int length) { register const uint32_t *dp; register u_int len; /* * find the start of the req data (if we captured it) */ dp = (const uint32_t *)&rp->rm_call.cb_cred; ND_TCHECK(dp[1]); len = EXTRACT_32BITS(&dp[1]); if (len < length) { dp += (len + (2 * sizeof(*dp) + 3)) / sizeof(*dp); ND_TCHECK(dp[1]); len = EXTRACT_32BITS(&dp[1]); if (len < length) { dp += (len + (2 * sizeof(*dp) + 3)) / sizeof(*dp); ND_TCHECK2(dp[0], 0); return (dp); } } trunc: return (NULL); } /* * Print out an NFS file handle and return a pointer to following word. * If packet was truncated, return 0. */ static const uint32_t * parsefh(netdissect_options *ndo, register const uint32_t *dp, int v3) { u_int len; if (v3) { ND_TCHECK(dp[0]); len = EXTRACT_32BITS(dp) / 4; dp++; } else len = NFSX_V2FH / 4; if (ND_TTEST2(*dp, len * sizeof(*dp))) { nfs_printfh(ndo, dp, len); return (dp + len); } trunc: return (NULL); } /* * Print out a file name and return pointer to 32-bit word past it. * If packet was truncated, return 0. */ static const uint32_t * parsefn(netdissect_options *ndo, register const uint32_t *dp) { register uint32_t len; register const u_char *cp; /* Bail if we don't have the string length */ ND_TCHECK(*dp); /* Fetch string length; convert to host order */ len = *dp++; NTOHL(len); ND_TCHECK2(*dp, ((len + 3) & ~3)); cp = (const u_char *)dp; /* Update 32-bit pointer (NFS filenames padded to 32-bit boundaries) */ dp += ((len + 3) & ~3) / sizeof(*dp); ND_PRINT((ndo, "\"")); if (fn_printn(ndo, cp, len, ndo->ndo_snapend)) { ND_PRINT((ndo, "\"")); goto trunc; } ND_PRINT((ndo, "\"")); return (dp); trunc: return NULL; } /* * Print out file handle and file name. * Return pointer to 32-bit word past file name. * If packet was truncated (or there was some other error), return 0. */ static const uint32_t * parsefhn(netdissect_options *ndo, register const uint32_t *dp, int v3) { dp = parsefh(ndo, dp, v3); if (dp == NULL) return (NULL); ND_PRINT((ndo, " ")); return (parsefn(ndo, dp)); } void nfsreq_print_noaddr(netdissect_options *ndo, register const u_char *bp, u_int length, register const u_char *bp2) { register const struct sunrpc_msg *rp; register const uint32_t *dp; nfs_type type; int v3; uint32_t proc; uint32_t access_flags; struct nfsv3_sattr sa3; ND_PRINT((ndo, "%d", length)); nfserr = 0; /* assume no error */ rp = (const struct sunrpc_msg *)bp; if (!xid_map_enter(ndo, rp, bp2)) /* record proc number for later on */ goto trunc; v3 = (EXTRACT_32BITS(&rp->rm_call.cb_vers) == NFS_VER3); proc = EXTRACT_32BITS(&rp->rm_call.cb_proc); if (!v3 && proc < NFS_NPROCS) proc = nfsv3_procid[proc]; ND_PRINT((ndo, " %s", tok2str(nfsproc_str, "proc-%u", proc))); switch (proc) { case NFSPROC_GETATTR: case NFSPROC_SETATTR: case NFSPROC_READLINK: case NFSPROC_FSSTAT: case NFSPROC_FSINFO: case NFSPROC_PATHCONF: if ((dp = parsereq(ndo, rp, length)) != NULL && parsefh(ndo, dp, v3) != NULL) return; break; case NFSPROC_LOOKUP: case NFSPROC_CREATE: case NFSPROC_MKDIR: case NFSPROC_REMOVE: case NFSPROC_RMDIR: if ((dp = parsereq(ndo, rp, length)) != NULL && parsefhn(ndo, dp, v3) != NULL) return; break; case NFSPROC_ACCESS: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefh(ndo, dp, v3)) != NULL) { ND_TCHECK(dp[0]); access_flags = EXTRACT_32BITS(&dp[0]); if (access_flags & ~NFSV3ACCESS_FULL) { /* NFSV3ACCESS definitions aren't up to date */ ND_PRINT((ndo, " %04x", access_flags)); } else if ((access_flags & NFSV3ACCESS_FULL) == NFSV3ACCESS_FULL) { ND_PRINT((ndo, " NFS_ACCESS_FULL")); } else { char separator = ' '; if (access_flags & NFSV3ACCESS_READ) { ND_PRINT((ndo, " NFS_ACCESS_READ")); separator = '|'; } if (access_flags & NFSV3ACCESS_LOOKUP) { ND_PRINT((ndo, "%cNFS_ACCESS_LOOKUP", separator)); separator = '|'; } if (access_flags & NFSV3ACCESS_MODIFY) { ND_PRINT((ndo, "%cNFS_ACCESS_MODIFY", separator)); separator = '|'; } if (access_flags & NFSV3ACCESS_EXTEND) { ND_PRINT((ndo, "%cNFS_ACCESS_EXTEND", separator)); separator = '|'; } if (access_flags & NFSV3ACCESS_DELETE) { ND_PRINT((ndo, "%cNFS_ACCESS_DELETE", separator)); separator = '|'; } if (access_flags & NFSV3ACCESS_EXECUTE) ND_PRINT((ndo, "%cNFS_ACCESS_EXECUTE", separator)); } return; } break; case NFSPROC_READ: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefh(ndo, dp, v3)) != NULL) { if (v3) { ND_TCHECK(dp[2]); ND_PRINT((ndo, " %u bytes @ %" PRIu64, EXTRACT_32BITS(&dp[2]), EXTRACT_64BITS(&dp[0]))); } else { ND_TCHECK(dp[1]); ND_PRINT((ndo, " %u bytes @ %u", EXTRACT_32BITS(&dp[1]), EXTRACT_32BITS(&dp[0]))); } return; } break; case NFSPROC_WRITE: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefh(ndo, dp, v3)) != NULL) { if (v3) { ND_TCHECK(dp[4]); ND_PRINT((ndo, " %u (%u) bytes @ %" PRIu64, EXTRACT_32BITS(&dp[4]), EXTRACT_32BITS(&dp[2]), EXTRACT_64BITS(&dp[0]))); if (ndo->ndo_vflag) { ND_PRINT((ndo, " <%s>", tok2str(nfsv3_writemodes, NULL, EXTRACT_32BITS(&dp[3])))); } } else { ND_TCHECK(dp[3]); ND_PRINT((ndo, " %u (%u) bytes @ %u (%u)", EXTRACT_32BITS(&dp[3]), EXTRACT_32BITS(&dp[2]), EXTRACT_32BITS(&dp[1]), EXTRACT_32BITS(&dp[0]))); } return; } break; case NFSPROC_SYMLINK: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefhn(ndo, dp, v3)) != NULL) { ND_PRINT((ndo, " ->")); if (v3 && (dp = parse_sattr3(ndo, dp, &sa3)) == NULL) break; if (parsefn(ndo, dp) == NULL) break; if (v3 && ndo->ndo_vflag) print_sattr3(ndo, &sa3, ndo->ndo_vflag); return; } break; case NFSPROC_MKNOD: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefhn(ndo, dp, v3)) != NULL) { ND_TCHECK(*dp); type = (nfs_type)EXTRACT_32BITS(dp); dp++; if ((dp = parse_sattr3(ndo, dp, &sa3)) == NULL) break; ND_PRINT((ndo, " %s", tok2str(type2str, "unk-ft %d", type))); if (ndo->ndo_vflag && (type == NFCHR || type == NFBLK)) { ND_TCHECK(dp[1]); ND_PRINT((ndo, " %u/%u", EXTRACT_32BITS(&dp[0]), EXTRACT_32BITS(&dp[1]))); dp += 2; } if (ndo->ndo_vflag) print_sattr3(ndo, &sa3, ndo->ndo_vflag); return; } break; case NFSPROC_RENAME: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefhn(ndo, dp, v3)) != NULL) { ND_PRINT((ndo, " ->")); if (parsefhn(ndo, dp, v3) != NULL) return; } break; case NFSPROC_LINK: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefh(ndo, dp, v3)) != NULL) { ND_PRINT((ndo, " ->")); if (parsefhn(ndo, dp, v3) != NULL) return; } break; case NFSPROC_READDIR: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefh(ndo, dp, v3)) != NULL) { if (v3) { ND_TCHECK(dp[4]); /* * We shouldn't really try to interpret the * offset cookie here. */ ND_PRINT((ndo, " %u bytes @ %" PRId64, EXTRACT_32BITS(&dp[4]), EXTRACT_64BITS(&dp[0]))); if (ndo->ndo_vflag) ND_PRINT((ndo, " verf %08x%08x", dp[2], dp[3])); } else { ND_TCHECK(dp[1]); /* * Print the offset as signed, since -1 is * common, but offsets > 2^31 aren't. */ ND_PRINT((ndo, " %u bytes @ %d", EXTRACT_32BITS(&dp[1]), EXTRACT_32BITS(&dp[0]))); } return; } break; case NFSPROC_READDIRPLUS: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefh(ndo, dp, v3)) != NULL) { ND_TCHECK(dp[4]); /* * We don't try to interpret the offset * cookie here. */ ND_PRINT((ndo, " %u bytes @ %" PRId64, EXTRACT_32BITS(&dp[4]), EXTRACT_64BITS(&dp[0]))); if (ndo->ndo_vflag) { ND_TCHECK(dp[5]); ND_PRINT((ndo, " max %u verf %08x%08x", EXTRACT_32BITS(&dp[5]), dp[2], dp[3])); } return; } break; case NFSPROC_COMMIT: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefh(ndo, dp, v3)) != NULL) { ND_TCHECK(dp[2]); ND_PRINT((ndo, " %u bytes @ %" PRIu64, EXTRACT_32BITS(&dp[2]), EXTRACT_64BITS(&dp[0]))); return; } break; default: return; } trunc: if (!nfserr) ND_PRINT((ndo, "%s", tstr)); } /* * Print out an NFS file handle. * We assume packet was not truncated before the end of the * file handle pointed to by dp. * * Note: new version (using portable file-handle parser) doesn't produce * generation number. It probably could be made to do that, with some * additional hacking on the parser code. */ static void nfs_printfh(netdissect_options *ndo, register const uint32_t *dp, const u_int len) { my_fsid fsid; uint32_t ino; const char *sfsname = NULL; char *spacep; if (ndo->ndo_uflag) { u_int i; char const *sep = ""; ND_PRINT((ndo, " fh[")); for (i=0; i<len; i++) { ND_PRINT((ndo, "%s%x", sep, dp[i])); sep = ":"; } ND_PRINT((ndo, "]")); return; } Parse_fh((const u_char *)dp, len, &fsid, &ino, NULL, &sfsname, 0); if (sfsname) { /* file system ID is ASCII, not numeric, for this server OS */ static char temp[NFSX_V3FHMAX+1]; /* Make sure string is null-terminated */ strncpy(temp, sfsname, NFSX_V3FHMAX); temp[sizeof(temp) - 1] = '\0'; /* Remove trailing spaces */ spacep = strchr(temp, ' '); if (spacep) *spacep = '\0'; ND_PRINT((ndo, " fh %s/", temp)); } else { ND_PRINT((ndo, " fh %d,%d/", fsid.Fsid_dev.Major, fsid.Fsid_dev.Minor)); } if(fsid.Fsid_dev.Minor == 257) /* Print the undecoded handle */ ND_PRINT((ndo, "%s", fsid.Opaque_Handle)); else ND_PRINT((ndo, "%ld", (long) ino)); } /* * Maintain a small cache of recent client.XID.server/proc pairs, to allow * us to match up replies with requests and thus to know how to parse * the reply. */ struct xid_map_entry { uint32_t xid; /* transaction ID (net order) */ int ipver; /* IP version (4 or 6) */ struct in6_addr client; /* client IP address (net order) */ struct in6_addr server; /* server IP address (net order) */ uint32_t proc; /* call proc number (host order) */ uint32_t vers; /* program version (host order) */ }; /* * Map entries are kept in an array that we manage as a ring; * new entries are always added at the tail of the ring. Initially, * all the entries are zero and hence don't match anything. */ #define XIDMAPSIZE 64 static struct xid_map_entry xid_map[XIDMAPSIZE]; static int xid_map_next = 0; static int xid_map_hint = 0; static int xid_map_enter(netdissect_options *ndo, const struct sunrpc_msg *rp, const u_char *bp) { const struct ip *ip = NULL; const struct ip6_hdr *ip6 = NULL; struct xid_map_entry *xmep; if (!ND_TTEST(rp->rm_call.cb_vers)) return (0); switch (IP_V((const struct ip *)bp)) { case 4: ip = (const struct ip *)bp; break; case 6: ip6 = (const struct ip6_hdr *)bp; break; default: return (1); } xmep = &xid_map[xid_map_next]; if (++xid_map_next >= XIDMAPSIZE) xid_map_next = 0; UNALIGNED_MEMCPY(&xmep->xid, &rp->rm_xid, sizeof(xmep->xid)); if (ip) { xmep->ipver = 4; UNALIGNED_MEMCPY(&xmep->client, &ip->ip_src, sizeof(ip->ip_src)); UNALIGNED_MEMCPY(&xmep->server, &ip->ip_dst, sizeof(ip->ip_dst)); } else if (ip6) { xmep->ipver = 6; UNALIGNED_MEMCPY(&xmep->client, &ip6->ip6_src, sizeof(ip6->ip6_src)); UNALIGNED_MEMCPY(&xmep->server, &ip6->ip6_dst, sizeof(ip6->ip6_dst)); } xmep->proc = EXTRACT_32BITS(&rp->rm_call.cb_proc); xmep->vers = EXTRACT_32BITS(&rp->rm_call.cb_vers); return (1); } /* * Returns 0 and puts NFSPROC_xxx in proc return and * version in vers return, or returns -1 on failure */ static int xid_map_find(const struct sunrpc_msg *rp, const u_char *bp, uint32_t *proc, uint32_t *vers) { int i; struct xid_map_entry *xmep; uint32_t xid; const struct ip *ip = (const struct ip *)bp; const struct ip6_hdr *ip6 = (const struct ip6_hdr *)bp; int cmp; UNALIGNED_MEMCPY(&xid, &rp->rm_xid, sizeof(xmep->xid)); /* Start searching from where we last left off */ i = xid_map_hint; do { xmep = &xid_map[i]; cmp = 1; if (xmep->ipver != IP_V(ip) || xmep->xid != xid) goto nextitem; switch (xmep->ipver) { case 4: if (UNALIGNED_MEMCMP(&ip->ip_src, &xmep->server, sizeof(ip->ip_src)) != 0 || UNALIGNED_MEMCMP(&ip->ip_dst, &xmep->client, sizeof(ip->ip_dst)) != 0) { cmp = 0; } break; case 6: if (UNALIGNED_MEMCMP(&ip6->ip6_src, &xmep->server, sizeof(ip6->ip6_src)) != 0 || UNALIGNED_MEMCMP(&ip6->ip6_dst, &xmep->client, sizeof(ip6->ip6_dst)) != 0) { cmp = 0; } break; default: cmp = 0; break; } if (cmp) { /* match */ xid_map_hint = i; *proc = xmep->proc; *vers = xmep->vers; return 0; } nextitem: if (++i >= XIDMAPSIZE) i = 0; } while (i != xid_map_hint); /* search failed */ return (-1); } /* * Routines for parsing reply packets */ /* * Return a pointer to the beginning of the actual results. * If the packet was truncated, return 0. */ static const uint32_t * parserep(netdissect_options *ndo, register const struct sunrpc_msg *rp, register u_int length) { register const uint32_t *dp; u_int len; enum sunrpc_accept_stat astat; /* * Portability note: * Here we find the address of the ar_verf credentials. * Originally, this calculation was * dp = (uint32_t *)&rp->rm_reply.rp_acpt.ar_verf * On the wire, the rp_acpt field starts immediately after * the (32 bit) rp_stat field. However, rp_acpt (which is a * "struct accepted_reply") contains a "struct opaque_auth", * whose internal representation contains a pointer, so on a * 64-bit machine the compiler inserts 32 bits of padding * before rp->rm_reply.rp_acpt.ar_verf. So, we cannot use * the internal representation to parse the on-the-wire * representation. Instead, we skip past the rp_stat field, * which is an "enum" and so occupies one 32-bit word. */ dp = ((const uint32_t *)&rp->rm_reply) + 1; ND_TCHECK(dp[1]); len = EXTRACT_32BITS(&dp[1]); if (len >= length) return (NULL); /* * skip past the ar_verf credentials. */ dp += (len + (2*sizeof(uint32_t) + 3)) / sizeof(uint32_t); /* * now we can check the ar_stat field */ ND_TCHECK(dp[0]); astat = (enum sunrpc_accept_stat) EXTRACT_32BITS(dp); if (astat != SUNRPC_SUCCESS) { ND_PRINT((ndo, " %s", tok2str(sunrpc_str, "ar_stat %d", astat))); nfserr = 1; /* suppress trunc string */ return (NULL); } /* successful return */ ND_TCHECK2(*dp, sizeof(astat)); return ((const uint32_t *) (sizeof(astat) + ((const char *)dp))); trunc: return (0); } static const uint32_t * parsestatus(netdissect_options *ndo, const uint32_t *dp, int *er) { int errnum; ND_TCHECK(dp[0]); errnum = EXTRACT_32BITS(&dp[0]); if (er) *er = errnum; if (errnum != 0) { if (!ndo->ndo_qflag) ND_PRINT((ndo, " ERROR: %s", tok2str(status2str, "unk %d", errnum))); nfserr = 1; } return (dp + 1); trunc: return NULL; } static const uint32_t * parsefattr(netdissect_options *ndo, const uint32_t *dp, int verbose, int v3) { const struct nfs_fattr *fap; fap = (const struct nfs_fattr *)dp; ND_TCHECK(fap->fa_gid); if (verbose) { ND_PRINT((ndo, " %s %o ids %d/%d", tok2str(type2str, "unk-ft %d ", EXTRACT_32BITS(&fap->fa_type)), EXTRACT_32BITS(&fap->fa_mode), EXTRACT_32BITS(&fap->fa_uid), EXTRACT_32BITS(&fap->fa_gid))); if (v3) { ND_TCHECK(fap->fa3_size); ND_PRINT((ndo, " sz %" PRIu64, EXTRACT_64BITS((const uint32_t *)&fap->fa3_size))); } else { ND_TCHECK(fap->fa2_size); ND_PRINT((ndo, " sz %d", EXTRACT_32BITS(&fap->fa2_size))); } } /* print lots more stuff */ if (verbose > 1) { if (v3) { ND_TCHECK(fap->fa3_ctime); ND_PRINT((ndo, " nlink %d rdev %d/%d", EXTRACT_32BITS(&fap->fa_nlink), EXTRACT_32BITS(&fap->fa3_rdev.specdata1), EXTRACT_32BITS(&fap->fa3_rdev.specdata2))); ND_PRINT((ndo, " fsid %" PRIx64, EXTRACT_64BITS((const uint32_t *)&fap->fa3_fsid))); ND_PRINT((ndo, " fileid %" PRIx64, EXTRACT_64BITS((const uint32_t *)&fap->fa3_fileid))); ND_PRINT((ndo, " a/m/ctime %u.%06u", EXTRACT_32BITS(&fap->fa3_atime.nfsv3_sec), EXTRACT_32BITS(&fap->fa3_atime.nfsv3_nsec))); ND_PRINT((ndo, " %u.%06u", EXTRACT_32BITS(&fap->fa3_mtime.nfsv3_sec), EXTRACT_32BITS(&fap->fa3_mtime.nfsv3_nsec))); ND_PRINT((ndo, " %u.%06u", EXTRACT_32BITS(&fap->fa3_ctime.nfsv3_sec), EXTRACT_32BITS(&fap->fa3_ctime.nfsv3_nsec))); } else { ND_TCHECK(fap->fa2_ctime); ND_PRINT((ndo, " nlink %d rdev 0x%x fsid 0x%x nodeid 0x%x a/m/ctime", EXTRACT_32BITS(&fap->fa_nlink), EXTRACT_32BITS(&fap->fa2_rdev), EXTRACT_32BITS(&fap->fa2_fsid), EXTRACT_32BITS(&fap->fa2_fileid))); ND_PRINT((ndo, " %u.%06u", EXTRACT_32BITS(&fap->fa2_atime.nfsv2_sec), EXTRACT_32BITS(&fap->fa2_atime.nfsv2_usec))); ND_PRINT((ndo, " %u.%06u", EXTRACT_32BITS(&fap->fa2_mtime.nfsv2_sec), EXTRACT_32BITS(&fap->fa2_mtime.nfsv2_usec))); ND_PRINT((ndo, " %u.%06u", EXTRACT_32BITS(&fap->fa2_ctime.nfsv2_sec), EXTRACT_32BITS(&fap->fa2_ctime.nfsv2_usec))); } } return ((const uint32_t *)((const unsigned char *)dp + (v3 ? NFSX_V3FATTR : NFSX_V2FATTR))); trunc: return (NULL); } static int parseattrstat(netdissect_options *ndo, const uint32_t *dp, int verbose, int v3) { int er; dp = parsestatus(ndo, dp, &er); if (dp == NULL) return (0); if (er) return (1); return (parsefattr(ndo, dp, verbose, v3) != NULL); } static int parsediropres(netdissect_options *ndo, const uint32_t *dp) { int er; if (!(dp = parsestatus(ndo, dp, &er))) return (0); if (er) return (1); dp = parsefh(ndo, dp, 0); if (dp == NULL) return (0); return (parsefattr(ndo, dp, ndo->ndo_vflag, 0) != NULL); } static int parselinkres(netdissect_options *ndo, const uint32_t *dp, int v3) { int er; dp = parsestatus(ndo, dp, &er); if (dp == NULL) return(0); if (er) return(1); if (v3 && !(dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag))) return (0); ND_PRINT((ndo, " ")); return (parsefn(ndo, dp) != NULL); } static int parsestatfs(netdissect_options *ndo, const uint32_t *dp, int v3) { const struct nfs_statfs *sfsp; int er; dp = parsestatus(ndo, dp, &er); if (dp == NULL) return (0); if (!v3 && er) return (1); if (ndo->ndo_qflag) return(1); if (v3) { if (ndo->ndo_vflag) ND_PRINT((ndo, " POST:")); if (!(dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag))) return (0); } ND_TCHECK2(*dp, (v3 ? NFSX_V3STATFS : NFSX_V2STATFS)); sfsp = (const struct nfs_statfs *)dp; if (v3) { ND_PRINT((ndo, " tbytes %" PRIu64 " fbytes %" PRIu64 " abytes %" PRIu64, EXTRACT_64BITS((const uint32_t *)&sfsp->sf_tbytes), EXTRACT_64BITS((const uint32_t *)&sfsp->sf_fbytes), EXTRACT_64BITS((const uint32_t *)&sfsp->sf_abytes))); if (ndo->ndo_vflag) { ND_PRINT((ndo, " tfiles %" PRIu64 " ffiles %" PRIu64 " afiles %" PRIu64 " invar %u", EXTRACT_64BITS((const uint32_t *)&sfsp->sf_tfiles), EXTRACT_64BITS((const uint32_t *)&sfsp->sf_ffiles), EXTRACT_64BITS((const uint32_t *)&sfsp->sf_afiles), EXTRACT_32BITS(&sfsp->sf_invarsec))); } } else { ND_PRINT((ndo, " tsize %d bsize %d blocks %d bfree %d bavail %d", EXTRACT_32BITS(&sfsp->sf_tsize), EXTRACT_32BITS(&sfsp->sf_bsize), EXTRACT_32BITS(&sfsp->sf_blocks), EXTRACT_32BITS(&sfsp->sf_bfree), EXTRACT_32BITS(&sfsp->sf_bavail))); } return (1); trunc: return (0); } static int parserddires(netdissect_options *ndo, const uint32_t *dp) { int er; dp = parsestatus(ndo, dp, &er); if (dp == NULL) return (0); if (er) return (1); if (ndo->ndo_qflag) return (1); ND_TCHECK(dp[2]); ND_PRINT((ndo, " offset 0x%x size %d ", EXTRACT_32BITS(&dp[0]), EXTRACT_32BITS(&dp[1]))); if (dp[2] != 0) ND_PRINT((ndo, " eof")); return (1); trunc: return (0); } static const uint32_t * parse_wcc_attr(netdissect_options *ndo, const uint32_t *dp) { /* Our caller has already checked this */ ND_PRINT((ndo, " sz %" PRIu64, EXTRACT_64BITS(&dp[0]))); ND_PRINT((ndo, " mtime %u.%06u ctime %u.%06u", EXTRACT_32BITS(&dp[2]), EXTRACT_32BITS(&dp[3]), EXTRACT_32BITS(&dp[4]), EXTRACT_32BITS(&dp[5]))); return (dp + 6); } /* * Pre operation attributes. Print only if vflag > 1. */ static const uint32_t * parse_pre_op_attr(netdissect_options *ndo, const uint32_t *dp, int verbose) { ND_TCHECK(dp[0]); if (!EXTRACT_32BITS(&dp[0])) return (dp + 1); dp++; ND_TCHECK2(*dp, 24); if (verbose > 1) { return parse_wcc_attr(ndo, dp); } else { /* If not verbose enough, just skip over wcc_attr */ return (dp + 6); } trunc: return (NULL); } /* * Post operation attributes are printed if vflag >= 1 */ static const uint32_t * parse_post_op_attr(netdissect_options *ndo, const uint32_t *dp, int verbose) { ND_TCHECK(dp[0]); if (!EXTRACT_32BITS(&dp[0])) return (dp + 1); dp++; if (verbose) { return parsefattr(ndo, dp, verbose, 1); } else return (dp + (NFSX_V3FATTR / sizeof (uint32_t))); trunc: return (NULL); } static const uint32_t * parse_wcc_data(netdissect_options *ndo, const uint32_t *dp, int verbose) { if (verbose > 1) ND_PRINT((ndo, " PRE:")); if (!(dp = parse_pre_op_attr(ndo, dp, verbose))) return (0); if (verbose) ND_PRINT((ndo, " POST:")); return parse_post_op_attr(ndo, dp, verbose); } static const uint32_t * parsecreateopres(netdissect_options *ndo, const uint32_t *dp, int verbose) { int er; if (!(dp = parsestatus(ndo, dp, &er))) return (0); if (er) dp = parse_wcc_data(ndo, dp, verbose); else { ND_TCHECK(dp[0]); if (!EXTRACT_32BITS(&dp[0])) return (dp + 1); dp++; if (!(dp = parsefh(ndo, dp, 1))) return (0); if (verbose) { if (!(dp = parse_post_op_attr(ndo, dp, verbose))) return (0); if (ndo->ndo_vflag > 1) { ND_PRINT((ndo, " dir attr:")); dp = parse_wcc_data(ndo, dp, verbose); } } } return (dp); trunc: return (NULL); } static int parsewccres(netdissect_options *ndo, const uint32_t *dp, int verbose) { int er; if (!(dp = parsestatus(ndo, dp, &er))) return (0); return parse_wcc_data(ndo, dp, verbose) != NULL; } static const uint32_t * parsev3rddirres(netdissect_options *ndo, const uint32_t *dp, int verbose) { int er; if (!(dp = parsestatus(ndo, dp, &er))) return (0); if (ndo->ndo_vflag) ND_PRINT((ndo, " POST:")); if (!(dp = parse_post_op_attr(ndo, dp, verbose))) return (0); if (er) return dp; if (ndo->ndo_vflag) { ND_TCHECK(dp[1]); ND_PRINT((ndo, " verf %08x%08x", dp[0], dp[1])); dp += 2; } return dp; trunc: return (NULL); } static int parsefsinfo(netdissect_options *ndo, const uint32_t *dp) { const struct nfsv3_fsinfo *sfp; int er; if (!(dp = parsestatus(ndo, dp, &er))) return (0); if (ndo->ndo_vflag) ND_PRINT((ndo, " POST:")); if (!(dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag))) return (0); if (er) return (1); sfp = (const struct nfsv3_fsinfo *)dp; ND_TCHECK(*sfp); ND_PRINT((ndo, " rtmax %u rtpref %u wtmax %u wtpref %u dtpref %u", EXTRACT_32BITS(&sfp->fs_rtmax), EXTRACT_32BITS(&sfp->fs_rtpref), EXTRACT_32BITS(&sfp->fs_wtmax), EXTRACT_32BITS(&sfp->fs_wtpref), EXTRACT_32BITS(&sfp->fs_dtpref))); if (ndo->ndo_vflag) { ND_PRINT((ndo, " rtmult %u wtmult %u maxfsz %" PRIu64, EXTRACT_32BITS(&sfp->fs_rtmult), EXTRACT_32BITS(&sfp->fs_wtmult), EXTRACT_64BITS((const uint32_t *)&sfp->fs_maxfilesize))); ND_PRINT((ndo, " delta %u.%06u ", EXTRACT_32BITS(&sfp->fs_timedelta.nfsv3_sec), EXTRACT_32BITS(&sfp->fs_timedelta.nfsv3_nsec))); } return (1); trunc: return (0); } static int parsepathconf(netdissect_options *ndo, const uint32_t *dp) { int er; const struct nfsv3_pathconf *spp; if (!(dp = parsestatus(ndo, dp, &er))) return (0); if (ndo->ndo_vflag) ND_PRINT((ndo, " POST:")); if (!(dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag))) return (0); if (er) return (1); spp = (const struct nfsv3_pathconf *)dp; ND_TCHECK(*spp); ND_PRINT((ndo, " linkmax %u namemax %u %s %s %s %s", EXTRACT_32BITS(&spp->pc_linkmax), EXTRACT_32BITS(&spp->pc_namemax), EXTRACT_32BITS(&spp->pc_notrunc) ? "notrunc" : "", EXTRACT_32BITS(&spp->pc_chownrestricted) ? "chownres" : "", EXTRACT_32BITS(&spp->pc_caseinsensitive) ? "igncase" : "", EXTRACT_32BITS(&spp->pc_casepreserving) ? "keepcase" : "")); return (1); trunc: return (0); } static void interp_reply(netdissect_options *ndo, const struct sunrpc_msg *rp, uint32_t proc, uint32_t vers, int length) { register const uint32_t *dp; register int v3; int er; v3 = (vers == NFS_VER3); if (!v3 && proc < NFS_NPROCS) proc = nfsv3_procid[proc]; ND_PRINT((ndo, " %s", tok2str(nfsproc_str, "proc-%u", proc))); switch (proc) { case NFSPROC_GETATTR: dp = parserep(ndo, rp, length); if (dp != NULL && parseattrstat(ndo, dp, !ndo->ndo_qflag, v3) != 0) return; break; case NFSPROC_SETATTR: if (!(dp = parserep(ndo, rp, length))) return; if (v3) { if (parsewccres(ndo, dp, ndo->ndo_vflag)) return; } else { if (parseattrstat(ndo, dp, !ndo->ndo_qflag, 0) != 0) return; } break; case NFSPROC_LOOKUP: if (!(dp = parserep(ndo, rp, length))) break; if (v3) { if (!(dp = parsestatus(ndo, dp, &er))) break; if (er) { if (ndo->ndo_vflag > 1) { ND_PRINT((ndo, " post dattr:")); dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag); } } else { if (!(dp = parsefh(ndo, dp, v3))) break; if ((dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag)) && ndo->ndo_vflag > 1) { ND_PRINT((ndo, " post dattr:")); dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag); } } if (dp) return; } else { if (parsediropres(ndo, dp) != 0) return; } break; case NFSPROC_ACCESS: if (!(dp = parserep(ndo, rp, length))) break; if (!(dp = parsestatus(ndo, dp, &er))) break; if (ndo->ndo_vflag) ND_PRINT((ndo, " attr:")); if (!(dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag))) break; if (!er) { ND_TCHECK(dp[0]); ND_PRINT((ndo, " c %04x", EXTRACT_32BITS(&dp[0]))); } return; case NFSPROC_READLINK: dp = parserep(ndo, rp, length); if (dp != NULL && parselinkres(ndo, dp, v3) != 0) return; break; case NFSPROC_READ: if (!(dp = parserep(ndo, rp, length))) break; if (v3) { if (!(dp = parsestatus(ndo, dp, &er))) break; if (!(dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag))) break; if (er) return; if (ndo->ndo_vflag) { ND_TCHECK(dp[1]); ND_PRINT((ndo, " %u bytes", EXTRACT_32BITS(&dp[0]))); if (EXTRACT_32BITS(&dp[1])) ND_PRINT((ndo, " EOF")); } return; } else { if (parseattrstat(ndo, dp, ndo->ndo_vflag, 0) != 0) return; } break; case NFSPROC_WRITE: if (!(dp = parserep(ndo, rp, length))) break; if (v3) { if (!(dp = parsestatus(ndo, dp, &er))) break; if (!(dp = parse_wcc_data(ndo, dp, ndo->ndo_vflag))) break; if (er) return; if (ndo->ndo_vflag) { ND_TCHECK(dp[0]); ND_PRINT((ndo, " %u bytes", EXTRACT_32BITS(&dp[0]))); if (ndo->ndo_vflag > 1) { ND_TCHECK(dp[1]); ND_PRINT((ndo, " <%s>", tok2str(nfsv3_writemodes, NULL, EXTRACT_32BITS(&dp[1])))); } return; } } else { if (parseattrstat(ndo, dp, ndo->ndo_vflag, v3) != 0) return; } break; case NFSPROC_CREATE: case NFSPROC_MKDIR: if (!(dp = parserep(ndo, rp, length))) break; if (v3) { if (parsecreateopres(ndo, dp, ndo->ndo_vflag) != NULL) return; } else { if (parsediropres(ndo, dp) != 0) return; } break; case NFSPROC_SYMLINK: if (!(dp = parserep(ndo, rp, length))) break; if (v3) { if (parsecreateopres(ndo, dp, ndo->ndo_vflag) != NULL) return; } else { if (parsestatus(ndo, dp, &er) != NULL) return; } break; case NFSPROC_MKNOD: if (!(dp = parserep(ndo, rp, length))) break; if (parsecreateopres(ndo, dp, ndo->ndo_vflag) != NULL) return; break; case NFSPROC_REMOVE: case NFSPROC_RMDIR: if (!(dp = parserep(ndo, rp, length))) break; if (v3) { if (parsewccres(ndo, dp, ndo->ndo_vflag)) return; } else { if (parsestatus(ndo, dp, &er) != NULL) return; } break; case NFSPROC_RENAME: if (!(dp = parserep(ndo, rp, length))) break; if (v3) { if (!(dp = parsestatus(ndo, dp, &er))) break; if (ndo->ndo_vflag) { ND_PRINT((ndo, " from:")); if (!(dp = parse_wcc_data(ndo, dp, ndo->ndo_vflag))) break; ND_PRINT((ndo, " to:")); if (!(dp = parse_wcc_data(ndo, dp, ndo->ndo_vflag))) break; } return; } else { if (parsestatus(ndo, dp, &er) != NULL) return; } break; case NFSPROC_LINK: if (!(dp = parserep(ndo, rp, length))) break; if (v3) { if (!(dp = parsestatus(ndo, dp, &er))) break; if (ndo->ndo_vflag) { ND_PRINT((ndo, " file POST:")); if (!(dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag))) break; ND_PRINT((ndo, " dir:")); if (!(dp = parse_wcc_data(ndo, dp, ndo->ndo_vflag))) break; return; } } else { if (parsestatus(ndo, dp, &er) != NULL) return; } break; case NFSPROC_READDIR: if (!(dp = parserep(ndo, rp, length))) break; if (v3) { if (parsev3rddirres(ndo, dp, ndo->ndo_vflag)) return; } else { if (parserddires(ndo, dp) != 0) return; } break; case NFSPROC_READDIRPLUS: if (!(dp = parserep(ndo, rp, length))) break; if (parsev3rddirres(ndo, dp, ndo->ndo_vflag)) return; break; case NFSPROC_FSSTAT: dp = parserep(ndo, rp, length); if (dp != NULL && parsestatfs(ndo, dp, v3) != 0) return; break; case NFSPROC_FSINFO: dp = parserep(ndo, rp, length); if (dp != NULL && parsefsinfo(ndo, dp) != 0) return; break; case NFSPROC_PATHCONF: dp = parserep(ndo, rp, length); if (dp != NULL && parsepathconf(ndo, dp) != 0) return; break; case NFSPROC_COMMIT: dp = parserep(ndo, rp, length); if (dp != NULL && parsewccres(ndo, dp, ndo->ndo_vflag) != 0) return; break; default: return; } trunc: if (!nfserr) ND_PRINT((ndo, "%s", tstr)); }
interp_reply(netdissect_options *ndo, const struct sunrpc_msg *rp, uint32_t proc, uint32_t vers, int length) { register const uint32_t *dp; register int v3; int er; v3 = (vers == NFS_VER3); if (!v3 && proc < NFS_NPROCS) proc = nfsv3_procid[proc]; ND_PRINT((ndo, " %s", tok2str(nfsproc_str, "proc-%u", proc))); switch (proc) { case NFSPROC_GETATTR: dp = parserep(ndo, rp, length); if (dp != NULL && parseattrstat(ndo, dp, !ndo->ndo_qflag, v3) != 0) return; break; case NFSPROC_SETATTR: if (!(dp = parserep(ndo, rp, length))) return; if (v3) { if (parsewccres(ndo, dp, ndo->ndo_vflag)) return; } else { if (parseattrstat(ndo, dp, !ndo->ndo_qflag, 0) != 0) return; } break; case NFSPROC_LOOKUP: if (!(dp = parserep(ndo, rp, length))) break; if (v3) { if (!(dp = parsestatus(ndo, dp, &er))) break; if (er) { if (ndo->ndo_vflag > 1) { ND_PRINT((ndo, " post dattr:")); dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag); } } else { if (!(dp = parsefh(ndo, dp, v3))) break; if ((dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag)) && ndo->ndo_vflag > 1) { ND_PRINT((ndo, " post dattr:")); dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag); } } if (dp) return; } else { if (parsediropres(ndo, dp) != 0) return; } break; case NFSPROC_ACCESS: if (!(dp = parserep(ndo, rp, length))) break; if (!(dp = parsestatus(ndo, dp, &er))) break; if (ndo->ndo_vflag) ND_PRINT((ndo, " attr:")); if (!(dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag))) break; if (!er) ND_PRINT((ndo, " c %04x", EXTRACT_32BITS(&dp[0]))); return; case NFSPROC_READLINK: dp = parserep(ndo, rp, length); if (dp != NULL && parselinkres(ndo, dp, v3) != 0) return; break; case NFSPROC_READ: if (!(dp = parserep(ndo, rp, length))) break; if (v3) { if (!(dp = parsestatus(ndo, dp, &er))) break; if (!(dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag))) break; if (er) return; if (ndo->ndo_vflag) { ND_TCHECK(dp[1]); ND_PRINT((ndo, " %u bytes", EXTRACT_32BITS(&dp[0]))); if (EXTRACT_32BITS(&dp[1])) ND_PRINT((ndo, " EOF")); } return; } else { if (parseattrstat(ndo, dp, ndo->ndo_vflag, 0) != 0) return; } break; case NFSPROC_WRITE: if (!(dp = parserep(ndo, rp, length))) break; if (v3) { if (!(dp = parsestatus(ndo, dp, &er))) break; if (!(dp = parse_wcc_data(ndo, dp, ndo->ndo_vflag))) break; if (er) return; if (ndo->ndo_vflag) { ND_TCHECK(dp[0]); ND_PRINT((ndo, " %u bytes", EXTRACT_32BITS(&dp[0]))); if (ndo->ndo_vflag > 1) { ND_TCHECK(dp[1]); ND_PRINT((ndo, " <%s>", tok2str(nfsv3_writemodes, NULL, EXTRACT_32BITS(&dp[1])))); } return; } } else { if (parseattrstat(ndo, dp, ndo->ndo_vflag, v3) != 0) return; } break; case NFSPROC_CREATE: case NFSPROC_MKDIR: if (!(dp = parserep(ndo, rp, length))) break; if (v3) { if (parsecreateopres(ndo, dp, ndo->ndo_vflag) != NULL) return; } else { if (parsediropres(ndo, dp) != 0) return; } break; case NFSPROC_SYMLINK: if (!(dp = parserep(ndo, rp, length))) break; if (v3) { if (parsecreateopres(ndo, dp, ndo->ndo_vflag) != NULL) return; } else { if (parsestatus(ndo, dp, &er) != NULL) return; } break; case NFSPROC_MKNOD: if (!(dp = parserep(ndo, rp, length))) break; if (parsecreateopres(ndo, dp, ndo->ndo_vflag) != NULL) return; break; case NFSPROC_REMOVE: case NFSPROC_RMDIR: if (!(dp = parserep(ndo, rp, length))) break; if (v3) { if (parsewccres(ndo, dp, ndo->ndo_vflag)) return; } else { if (parsestatus(ndo, dp, &er) != NULL) return; } break; case NFSPROC_RENAME: if (!(dp = parserep(ndo, rp, length))) break; if (v3) { if (!(dp = parsestatus(ndo, dp, &er))) break; if (ndo->ndo_vflag) { ND_PRINT((ndo, " from:")); if (!(dp = parse_wcc_data(ndo, dp, ndo->ndo_vflag))) break; ND_PRINT((ndo, " to:")); if (!(dp = parse_wcc_data(ndo, dp, ndo->ndo_vflag))) break; } return; } else { if (parsestatus(ndo, dp, &er) != NULL) return; } break; case NFSPROC_LINK: if (!(dp = parserep(ndo, rp, length))) break; if (v3) { if (!(dp = parsestatus(ndo, dp, &er))) break; if (ndo->ndo_vflag) { ND_PRINT((ndo, " file POST:")); if (!(dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag))) break; ND_PRINT((ndo, " dir:")); if (!(dp = parse_wcc_data(ndo, dp, ndo->ndo_vflag))) break; return; } } else { if (parsestatus(ndo, dp, &er) != NULL) return; } break; case NFSPROC_READDIR: if (!(dp = parserep(ndo, rp, length))) break; if (v3) { if (parsev3rddirres(ndo, dp, ndo->ndo_vflag)) return; } else { if (parserddires(ndo, dp) != 0) return; } break; case NFSPROC_READDIRPLUS: if (!(dp = parserep(ndo, rp, length))) break; if (parsev3rddirres(ndo, dp, ndo->ndo_vflag)) return; break; case NFSPROC_FSSTAT: dp = parserep(ndo, rp, length); if (dp != NULL && parsestatfs(ndo, dp, v3) != 0) return; break; case NFSPROC_FSINFO: dp = parserep(ndo, rp, length); if (dp != NULL && parsefsinfo(ndo, dp) != 0) return; break; case NFSPROC_PATHCONF: dp = parserep(ndo, rp, length); if (dp != NULL && parsepathconf(ndo, dp) != 0) return; break; case NFSPROC_COMMIT: dp = parserep(ndo, rp, length); if (dp != NULL && parsewccres(ndo, dp, ndo->ndo_vflag) != 0) return; break; default: return; } trunc: if (!nfserr) ND_PRINT((ndo, "%s", tstr)); }
interp_reply(netdissect_options *ndo, const struct sunrpc_msg *rp, uint32_t proc, uint32_t vers, int length) { register const uint32_t *dp; register int v3; int er; v3 = (vers == NFS_VER3); if (!v3 && proc < NFS_NPROCS) proc = nfsv3_procid[proc]; ND_PRINT((ndo, " %s", tok2str(nfsproc_str, "proc-%u", proc))); switch (proc) { case NFSPROC_GETATTR: dp = parserep(ndo, rp, length); if (dp != NULL && parseattrstat(ndo, dp, !ndo->ndo_qflag, v3) != 0) return; break; case NFSPROC_SETATTR: if (!(dp = parserep(ndo, rp, length))) return; if (v3) { if (parsewccres(ndo, dp, ndo->ndo_vflag)) return; } else { if (parseattrstat(ndo, dp, !ndo->ndo_qflag, 0) != 0) return; } break; case NFSPROC_LOOKUP: if (!(dp = parserep(ndo, rp, length))) break; if (v3) { if (!(dp = parsestatus(ndo, dp, &er))) break; if (er) { if (ndo->ndo_vflag > 1) { ND_PRINT((ndo, " post dattr:")); dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag); } } else { if (!(dp = parsefh(ndo, dp, v3))) break; if ((dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag)) && ndo->ndo_vflag > 1) { ND_PRINT((ndo, " post dattr:")); dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag); } } if (dp) return; } else { if (parsediropres(ndo, dp) != 0) return; } break; case NFSPROC_ACCESS: if (!(dp = parserep(ndo, rp, length))) break; if (!(dp = parsestatus(ndo, dp, &er))) break; if (ndo->ndo_vflag) ND_PRINT((ndo, " attr:")); if (!(dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag))) break; if (!er) { ND_TCHECK(dp[0]); ND_PRINT((ndo, " c %04x", EXTRACT_32BITS(&dp[0]))); } return; case NFSPROC_READLINK: dp = parserep(ndo, rp, length); if (dp != NULL && parselinkres(ndo, dp, v3) != 0) return; break; case NFSPROC_READ: if (!(dp = parserep(ndo, rp, length))) break; if (v3) { if (!(dp = parsestatus(ndo, dp, &er))) break; if (!(dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag))) break; if (er) return; if (ndo->ndo_vflag) { ND_TCHECK(dp[1]); ND_PRINT((ndo, " %u bytes", EXTRACT_32BITS(&dp[0]))); if (EXTRACT_32BITS(&dp[1])) ND_PRINT((ndo, " EOF")); } return; } else { if (parseattrstat(ndo, dp, ndo->ndo_vflag, 0) != 0) return; } break; case NFSPROC_WRITE: if (!(dp = parserep(ndo, rp, length))) break; if (v3) { if (!(dp = parsestatus(ndo, dp, &er))) break; if (!(dp = parse_wcc_data(ndo, dp, ndo->ndo_vflag))) break; if (er) return; if (ndo->ndo_vflag) { ND_TCHECK(dp[0]); ND_PRINT((ndo, " %u bytes", EXTRACT_32BITS(&dp[0]))); if (ndo->ndo_vflag > 1) { ND_TCHECK(dp[1]); ND_PRINT((ndo, " <%s>", tok2str(nfsv3_writemodes, NULL, EXTRACT_32BITS(&dp[1])))); } return; } } else { if (parseattrstat(ndo, dp, ndo->ndo_vflag, v3) != 0) return; } break; case NFSPROC_CREATE: case NFSPROC_MKDIR: if (!(dp = parserep(ndo, rp, length))) break; if (v3) { if (parsecreateopres(ndo, dp, ndo->ndo_vflag) != NULL) return; } else { if (parsediropres(ndo, dp) != 0) return; } break; case NFSPROC_SYMLINK: if (!(dp = parserep(ndo, rp, length))) break; if (v3) { if (parsecreateopres(ndo, dp, ndo->ndo_vflag) != NULL) return; } else { if (parsestatus(ndo, dp, &er) != NULL) return; } break; case NFSPROC_MKNOD: if (!(dp = parserep(ndo, rp, length))) break; if (parsecreateopres(ndo, dp, ndo->ndo_vflag) != NULL) return; break; case NFSPROC_REMOVE: case NFSPROC_RMDIR: if (!(dp = parserep(ndo, rp, length))) break; if (v3) { if (parsewccres(ndo, dp, ndo->ndo_vflag)) return; } else { if (parsestatus(ndo, dp, &er) != NULL) return; } break; case NFSPROC_RENAME: if (!(dp = parserep(ndo, rp, length))) break; if (v3) { if (!(dp = parsestatus(ndo, dp, &er))) break; if (ndo->ndo_vflag) { ND_PRINT((ndo, " from:")); if (!(dp = parse_wcc_data(ndo, dp, ndo->ndo_vflag))) break; ND_PRINT((ndo, " to:")); if (!(dp = parse_wcc_data(ndo, dp, ndo->ndo_vflag))) break; } return; } else { if (parsestatus(ndo, dp, &er) != NULL) return; } break; case NFSPROC_LINK: if (!(dp = parserep(ndo, rp, length))) break; if (v3) { if (!(dp = parsestatus(ndo, dp, &er))) break; if (ndo->ndo_vflag) { ND_PRINT((ndo, " file POST:")); if (!(dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag))) break; ND_PRINT((ndo, " dir:")); if (!(dp = parse_wcc_data(ndo, dp, ndo->ndo_vflag))) break; return; } } else { if (parsestatus(ndo, dp, &er) != NULL) return; } break; case NFSPROC_READDIR: if (!(dp = parserep(ndo, rp, length))) break; if (v3) { if (parsev3rddirres(ndo, dp, ndo->ndo_vflag)) return; } else { if (parserddires(ndo, dp) != 0) return; } break; case NFSPROC_READDIRPLUS: if (!(dp = parserep(ndo, rp, length))) break; if (parsev3rddirres(ndo, dp, ndo->ndo_vflag)) return; break; case NFSPROC_FSSTAT: dp = parserep(ndo, rp, length); if (dp != NULL && parsestatfs(ndo, dp, v3) != 0) return; break; case NFSPROC_FSINFO: dp = parserep(ndo, rp, length); if (dp != NULL && parsefsinfo(ndo, dp) != 0) return; break; case NFSPROC_PATHCONF: dp = parserep(ndo, rp, length); if (dp != NULL && parsepathconf(ndo, dp) != 0) return; break; case NFSPROC_COMMIT: dp = parserep(ndo, rp, length); if (dp != NULL && parsewccres(ndo, dp, ndo->ndo_vflag) != 0) return; break; default: return; } trunc: if (!nfserr) ND_PRINT((ndo, "%s", tstr)); }
{'added': [(631, '\t\t\t\tND_TCHECK(dp[4]);'), (639, '\t\t\t\t\t\t\tNULL, EXTRACT_32BITS(&dp[3]))));'), (1007, '\tND_TCHECK(dp[0]);'), (1244, '\t/* Our caller has already checked this */'), (1513, '\t\tif (!er) {'), (1514, '\t\t\tND_TCHECK(dp[0]);'), (1516, '\t\t}')], 'deleted': [(631, '\t\t\t\tND_TCHECK(dp[2]);'), (637, '\t\t\t\t\tdp += 3;'), (638, '\t\t\t\t\tND_TCHECK(dp[0]);'), (641, '\t\t\t\t\t\t\tNULL, EXTRACT_32BITS(dp))));'), (1005, '\tND_TCHECK2(dp[0], 0);'), (1514, '\t\tif (!er)')]}
7
6
1,400
9,286
https://github.com/the-tcpdump-group/tcpdump
CVE-2017-12898
['CWE-125']
print-nfs.c
nfsreq_print_noaddr
/* * Copyright (c) 1988, 1989, 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that: (1) source code distributions * retain the above copyright notice and this paragraph in its entirety, (2) * distributions including binary code include the above copyright notice and * this paragraph in its entirety in the documentation or other materials * provided with the distribution, and (3) all advertising materials mentioning * features or use of this software display the following acknowledgement: * ``This product includes software developed by the University of California, * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of * the University nor the names of its contributors may be used to endorse * or promote products derived from this software without specific prior * written permission. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. */ /* \summary: Network File System (NFS) printer */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <netdissect-stdinc.h> #include <stdio.h> #include <string.h> #include "netdissect.h" #include "addrtoname.h" #include "extract.h" #include "nfs.h" #include "nfsfh.h" #include "ip.h" #include "ip6.h" #include "rpc_auth.h" #include "rpc_msg.h" static const char tstr[] = " [|nfs]"; static void nfs_printfh(netdissect_options *, const uint32_t *, const u_int); static int xid_map_enter(netdissect_options *, const struct sunrpc_msg *, const u_char *); static int xid_map_find(const struct sunrpc_msg *, const u_char *, uint32_t *, uint32_t *); static void interp_reply(netdissect_options *, const struct sunrpc_msg *, uint32_t, uint32_t, int); static const uint32_t *parse_post_op_attr(netdissect_options *, const uint32_t *, int); /* * Mapping of old NFS Version 2 RPC numbers to generic numbers. */ static uint32_t nfsv3_procid[NFS_NPROCS] = { NFSPROC_NULL, NFSPROC_GETATTR, NFSPROC_SETATTR, NFSPROC_NOOP, NFSPROC_LOOKUP, NFSPROC_READLINK, NFSPROC_READ, NFSPROC_NOOP, NFSPROC_WRITE, NFSPROC_CREATE, NFSPROC_REMOVE, NFSPROC_RENAME, NFSPROC_LINK, NFSPROC_SYMLINK, NFSPROC_MKDIR, NFSPROC_RMDIR, NFSPROC_READDIR, NFSPROC_FSSTAT, NFSPROC_NOOP, NFSPROC_NOOP, NFSPROC_NOOP, NFSPROC_NOOP, NFSPROC_NOOP, NFSPROC_NOOP, NFSPROC_NOOP, NFSPROC_NOOP }; static const struct tok nfsproc_str[] = { { NFSPROC_NOOP, "nop" }, { NFSPROC_NULL, "null" }, { NFSPROC_GETATTR, "getattr" }, { NFSPROC_SETATTR, "setattr" }, { NFSPROC_LOOKUP, "lookup" }, { NFSPROC_ACCESS, "access" }, { NFSPROC_READLINK, "readlink" }, { NFSPROC_READ, "read" }, { NFSPROC_WRITE, "write" }, { NFSPROC_CREATE, "create" }, { NFSPROC_MKDIR, "mkdir" }, { NFSPROC_SYMLINK, "symlink" }, { NFSPROC_MKNOD, "mknod" }, { NFSPROC_REMOVE, "remove" }, { NFSPROC_RMDIR, "rmdir" }, { NFSPROC_RENAME, "rename" }, { NFSPROC_LINK, "link" }, { NFSPROC_READDIR, "readdir" }, { NFSPROC_READDIRPLUS, "readdirplus" }, { NFSPROC_FSSTAT, "fsstat" }, { NFSPROC_FSINFO, "fsinfo" }, { NFSPROC_PATHCONF, "pathconf" }, { NFSPROC_COMMIT, "commit" }, { 0, NULL } }; /* * NFS V2 and V3 status values. * * Some of these come from the RFCs for NFS V2 and V3, with the message * strings taken from the FreeBSD C library "errlst.c". * * Others are errors that are not in the RFC but that I suspect some * NFS servers could return; the values are FreeBSD errno values, as * the first NFS server was the SunOS 2.0 one, and until 5.0 SunOS * was primarily BSD-derived. */ static const struct tok status2str[] = { { 1, "Operation not permitted" }, /* EPERM */ { 2, "No such file or directory" }, /* ENOENT */ { 5, "Input/output error" }, /* EIO */ { 6, "Device not configured" }, /* ENXIO */ { 11, "Resource deadlock avoided" }, /* EDEADLK */ { 12, "Cannot allocate memory" }, /* ENOMEM */ { 13, "Permission denied" }, /* EACCES */ { 17, "File exists" }, /* EEXIST */ { 18, "Cross-device link" }, /* EXDEV */ { 19, "Operation not supported by device" }, /* ENODEV */ { 20, "Not a directory" }, /* ENOTDIR */ { 21, "Is a directory" }, /* EISDIR */ { 22, "Invalid argument" }, /* EINVAL */ { 26, "Text file busy" }, /* ETXTBSY */ { 27, "File too large" }, /* EFBIG */ { 28, "No space left on device" }, /* ENOSPC */ { 30, "Read-only file system" }, /* EROFS */ { 31, "Too many links" }, /* EMLINK */ { 45, "Operation not supported" }, /* EOPNOTSUPP */ { 62, "Too many levels of symbolic links" }, /* ELOOP */ { 63, "File name too long" }, /* ENAMETOOLONG */ { 66, "Directory not empty" }, /* ENOTEMPTY */ { 69, "Disc quota exceeded" }, /* EDQUOT */ { 70, "Stale NFS file handle" }, /* ESTALE */ { 71, "Too many levels of remote in path" }, /* EREMOTE */ { 99, "Write cache flushed to disk" }, /* NFSERR_WFLUSH (not used) */ { 10001, "Illegal NFS file handle" }, /* NFS3ERR_BADHANDLE */ { 10002, "Update synchronization mismatch" }, /* NFS3ERR_NOT_SYNC */ { 10003, "READDIR/READDIRPLUS cookie is stale" }, /* NFS3ERR_BAD_COOKIE */ { 10004, "Operation not supported" }, /* NFS3ERR_NOTSUPP */ { 10005, "Buffer or request is too small" }, /* NFS3ERR_TOOSMALL */ { 10006, "Unspecified error on server" }, /* NFS3ERR_SERVERFAULT */ { 10007, "Object of that type not supported" }, /* NFS3ERR_BADTYPE */ { 10008, "Request couldn't be completed in time" }, /* NFS3ERR_JUKEBOX */ { 0, NULL } }; static const struct tok nfsv3_writemodes[] = { { 0, "unstable" }, { 1, "datasync" }, { 2, "filesync" }, { 0, NULL } }; static const struct tok type2str[] = { { NFNON, "NON" }, { NFREG, "REG" }, { NFDIR, "DIR" }, { NFBLK, "BLK" }, { NFCHR, "CHR" }, { NFLNK, "LNK" }, { NFFIFO, "FIFO" }, { 0, NULL } }; static const struct tok sunrpc_auth_str[] = { { SUNRPC_AUTH_OK, "OK" }, { SUNRPC_AUTH_BADCRED, "Bogus Credentials (seal broken)" }, { SUNRPC_AUTH_REJECTEDCRED, "Rejected Credentials (client should begin new session)" }, { SUNRPC_AUTH_BADVERF, "Bogus Verifier (seal broken)" }, { SUNRPC_AUTH_REJECTEDVERF, "Verifier expired or was replayed" }, { SUNRPC_AUTH_TOOWEAK, "Credentials are too weak" }, { SUNRPC_AUTH_INVALIDRESP, "Bogus response verifier" }, { SUNRPC_AUTH_FAILED, "Unknown failure" }, { 0, NULL } }; static const struct tok sunrpc_str[] = { { SUNRPC_PROG_UNAVAIL, "PROG_UNAVAIL" }, { SUNRPC_PROG_MISMATCH, "PROG_MISMATCH" }, { SUNRPC_PROC_UNAVAIL, "PROC_UNAVAIL" }, { SUNRPC_GARBAGE_ARGS, "GARBAGE_ARGS" }, { SUNRPC_SYSTEM_ERR, "SYSTEM_ERR" }, { 0, NULL } }; static void print_nfsaddr(netdissect_options *ndo, const u_char *bp, const char *s, const char *d) { const struct ip *ip; const struct ip6_hdr *ip6; char srcaddr[INET6_ADDRSTRLEN], dstaddr[INET6_ADDRSTRLEN]; srcaddr[0] = dstaddr[0] = '\0'; switch (IP_V((const struct ip *)bp)) { case 4: ip = (const struct ip *)bp; strlcpy(srcaddr, ipaddr_string(ndo, &ip->ip_src), sizeof(srcaddr)); strlcpy(dstaddr, ipaddr_string(ndo, &ip->ip_dst), sizeof(dstaddr)); break; case 6: ip6 = (const struct ip6_hdr *)bp; strlcpy(srcaddr, ip6addr_string(ndo, &ip6->ip6_src), sizeof(srcaddr)); strlcpy(dstaddr, ip6addr_string(ndo, &ip6->ip6_dst), sizeof(dstaddr)); break; default: strlcpy(srcaddr, "?", sizeof(srcaddr)); strlcpy(dstaddr, "?", sizeof(dstaddr)); break; } ND_PRINT((ndo, "%s.%s > %s.%s: ", srcaddr, s, dstaddr, d)); } static const uint32_t * parse_sattr3(netdissect_options *ndo, const uint32_t *dp, struct nfsv3_sattr *sa3) { ND_TCHECK(dp[0]); sa3->sa_modeset = EXTRACT_32BITS(dp); dp++; if (sa3->sa_modeset) { ND_TCHECK(dp[0]); sa3->sa_mode = EXTRACT_32BITS(dp); dp++; } ND_TCHECK(dp[0]); sa3->sa_uidset = EXTRACT_32BITS(dp); dp++; if (sa3->sa_uidset) { ND_TCHECK(dp[0]); sa3->sa_uid = EXTRACT_32BITS(dp); dp++; } ND_TCHECK(dp[0]); sa3->sa_gidset = EXTRACT_32BITS(dp); dp++; if (sa3->sa_gidset) { ND_TCHECK(dp[0]); sa3->sa_gid = EXTRACT_32BITS(dp); dp++; } ND_TCHECK(dp[0]); sa3->sa_sizeset = EXTRACT_32BITS(dp); dp++; if (sa3->sa_sizeset) { ND_TCHECK(dp[0]); sa3->sa_size = EXTRACT_32BITS(dp); dp++; } ND_TCHECK(dp[0]); sa3->sa_atimetype = EXTRACT_32BITS(dp); dp++; if (sa3->sa_atimetype == NFSV3SATTRTIME_TOCLIENT) { ND_TCHECK(dp[1]); sa3->sa_atime.nfsv3_sec = EXTRACT_32BITS(dp); dp++; sa3->sa_atime.nfsv3_nsec = EXTRACT_32BITS(dp); dp++; } ND_TCHECK(dp[0]); sa3->sa_mtimetype = EXTRACT_32BITS(dp); dp++; if (sa3->sa_mtimetype == NFSV3SATTRTIME_TOCLIENT) { ND_TCHECK(dp[1]); sa3->sa_mtime.nfsv3_sec = EXTRACT_32BITS(dp); dp++; sa3->sa_mtime.nfsv3_nsec = EXTRACT_32BITS(dp); dp++; } return dp; trunc: return NULL; } static int nfserr; /* true if we error rather than trunc */ static void print_sattr3(netdissect_options *ndo, const struct nfsv3_sattr *sa3, int verbose) { if (sa3->sa_modeset) ND_PRINT((ndo, " mode %o", sa3->sa_mode)); if (sa3->sa_uidset) ND_PRINT((ndo, " uid %u", sa3->sa_uid)); if (sa3->sa_gidset) ND_PRINT((ndo, " gid %u", sa3->sa_gid)); if (verbose > 1) { if (sa3->sa_atimetype == NFSV3SATTRTIME_TOCLIENT) ND_PRINT((ndo, " atime %u.%06u", sa3->sa_atime.nfsv3_sec, sa3->sa_atime.nfsv3_nsec)); if (sa3->sa_mtimetype == NFSV3SATTRTIME_TOCLIENT) ND_PRINT((ndo, " mtime %u.%06u", sa3->sa_mtime.nfsv3_sec, sa3->sa_mtime.nfsv3_nsec)); } } void nfsreply_print(netdissect_options *ndo, register const u_char *bp, u_int length, register const u_char *bp2) { register const struct sunrpc_msg *rp; char srcid[20], dstid[20]; /*fits 32bit*/ nfserr = 0; /* assume no error */ rp = (const struct sunrpc_msg *)bp; ND_TCHECK(rp->rm_xid); if (!ndo->ndo_nflag) { strlcpy(srcid, "nfs", sizeof(srcid)); snprintf(dstid, sizeof(dstid), "%u", EXTRACT_32BITS(&rp->rm_xid)); } else { snprintf(srcid, sizeof(srcid), "%u", NFS_PORT); snprintf(dstid, sizeof(dstid), "%u", EXTRACT_32BITS(&rp->rm_xid)); } print_nfsaddr(ndo, bp2, srcid, dstid); nfsreply_print_noaddr(ndo, bp, length, bp2); return; trunc: if (!nfserr) ND_PRINT((ndo, "%s", tstr)); } void nfsreply_print_noaddr(netdissect_options *ndo, register const u_char *bp, u_int length, register const u_char *bp2) { register const struct sunrpc_msg *rp; uint32_t proc, vers, reply_stat; enum sunrpc_reject_stat rstat; uint32_t rlow; uint32_t rhigh; enum sunrpc_auth_stat rwhy; nfserr = 0; /* assume no error */ rp = (const struct sunrpc_msg *)bp; ND_TCHECK(rp->rm_reply.rp_stat); reply_stat = EXTRACT_32BITS(&rp->rm_reply.rp_stat); switch (reply_stat) { case SUNRPC_MSG_ACCEPTED: ND_PRINT((ndo, "reply ok %u", length)); if (xid_map_find(rp, bp2, &proc, &vers) >= 0) interp_reply(ndo, rp, proc, vers, length); break; case SUNRPC_MSG_DENIED: ND_PRINT((ndo, "reply ERR %u: ", length)); ND_TCHECK(rp->rm_reply.rp_reject.rj_stat); rstat = EXTRACT_32BITS(&rp->rm_reply.rp_reject.rj_stat); switch (rstat) { case SUNRPC_RPC_MISMATCH: ND_TCHECK(rp->rm_reply.rp_reject.rj_vers.high); rlow = EXTRACT_32BITS(&rp->rm_reply.rp_reject.rj_vers.low); rhigh = EXTRACT_32BITS(&rp->rm_reply.rp_reject.rj_vers.high); ND_PRINT((ndo, "RPC Version mismatch (%u-%u)", rlow, rhigh)); break; case SUNRPC_AUTH_ERROR: ND_TCHECK(rp->rm_reply.rp_reject.rj_why); rwhy = EXTRACT_32BITS(&rp->rm_reply.rp_reject.rj_why); ND_PRINT((ndo, "Auth %s", tok2str(sunrpc_auth_str, "Invalid failure code %u", rwhy))); break; default: ND_PRINT((ndo, "Unknown reason for rejecting rpc message %u", (unsigned int)rstat)); break; } break; default: ND_PRINT((ndo, "reply Unknown rpc response code=%u %u", reply_stat, length)); break; } return; trunc: if (!nfserr) ND_PRINT((ndo, "%s", tstr)); } /* * Return a pointer to the first file handle in the packet. * If the packet was truncated, return 0. */ static const uint32_t * parsereq(netdissect_options *ndo, register const struct sunrpc_msg *rp, register u_int length) { register const uint32_t *dp; register u_int len; /* * find the start of the req data (if we captured it) */ dp = (const uint32_t *)&rp->rm_call.cb_cred; ND_TCHECK(dp[1]); len = EXTRACT_32BITS(&dp[1]); if (len < length) { dp += (len + (2 * sizeof(*dp) + 3)) / sizeof(*dp); ND_TCHECK(dp[1]); len = EXTRACT_32BITS(&dp[1]); if (len < length) { dp += (len + (2 * sizeof(*dp) + 3)) / sizeof(*dp); ND_TCHECK2(dp[0], 0); return (dp); } } trunc: return (NULL); } /* * Print out an NFS file handle and return a pointer to following word. * If packet was truncated, return 0. */ static const uint32_t * parsefh(netdissect_options *ndo, register const uint32_t *dp, int v3) { u_int len; if (v3) { ND_TCHECK(dp[0]); len = EXTRACT_32BITS(dp) / 4; dp++; } else len = NFSX_V2FH / 4; if (ND_TTEST2(*dp, len * sizeof(*dp))) { nfs_printfh(ndo, dp, len); return (dp + len); } trunc: return (NULL); } /* * Print out a file name and return pointer to 32-bit word past it. * If packet was truncated, return 0. */ static const uint32_t * parsefn(netdissect_options *ndo, register const uint32_t *dp) { register uint32_t len; register const u_char *cp; /* Bail if we don't have the string length */ ND_TCHECK(*dp); /* Fetch string length; convert to host order */ len = *dp++; NTOHL(len); ND_TCHECK2(*dp, ((len + 3) & ~3)); cp = (const u_char *)dp; /* Update 32-bit pointer (NFS filenames padded to 32-bit boundaries) */ dp += ((len + 3) & ~3) / sizeof(*dp); ND_PRINT((ndo, "\"")); if (fn_printn(ndo, cp, len, ndo->ndo_snapend)) { ND_PRINT((ndo, "\"")); goto trunc; } ND_PRINT((ndo, "\"")); return (dp); trunc: return NULL; } /* * Print out file handle and file name. * Return pointer to 32-bit word past file name. * If packet was truncated (or there was some other error), return 0. */ static const uint32_t * parsefhn(netdissect_options *ndo, register const uint32_t *dp, int v3) { dp = parsefh(ndo, dp, v3); if (dp == NULL) return (NULL); ND_PRINT((ndo, " ")); return (parsefn(ndo, dp)); } void nfsreq_print_noaddr(netdissect_options *ndo, register const u_char *bp, u_int length, register const u_char *bp2) { register const struct sunrpc_msg *rp; register const uint32_t *dp; nfs_type type; int v3; uint32_t proc; uint32_t access_flags; struct nfsv3_sattr sa3; ND_PRINT((ndo, "%d", length)); nfserr = 0; /* assume no error */ rp = (const struct sunrpc_msg *)bp; if (!xid_map_enter(ndo, rp, bp2)) /* record proc number for later on */ goto trunc; v3 = (EXTRACT_32BITS(&rp->rm_call.cb_vers) == NFS_VER3); proc = EXTRACT_32BITS(&rp->rm_call.cb_proc); if (!v3 && proc < NFS_NPROCS) proc = nfsv3_procid[proc]; ND_PRINT((ndo, " %s", tok2str(nfsproc_str, "proc-%u", proc))); switch (proc) { case NFSPROC_GETATTR: case NFSPROC_SETATTR: case NFSPROC_READLINK: case NFSPROC_FSSTAT: case NFSPROC_FSINFO: case NFSPROC_PATHCONF: if ((dp = parsereq(ndo, rp, length)) != NULL && parsefh(ndo, dp, v3) != NULL) return; break; case NFSPROC_LOOKUP: case NFSPROC_CREATE: case NFSPROC_MKDIR: case NFSPROC_REMOVE: case NFSPROC_RMDIR: if ((dp = parsereq(ndo, rp, length)) != NULL && parsefhn(ndo, dp, v3) != NULL) return; break; case NFSPROC_ACCESS: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefh(ndo, dp, v3)) != NULL) { ND_TCHECK(dp[0]); access_flags = EXTRACT_32BITS(&dp[0]); if (access_flags & ~NFSV3ACCESS_FULL) { /* NFSV3ACCESS definitions aren't up to date */ ND_PRINT((ndo, " %04x", access_flags)); } else if ((access_flags & NFSV3ACCESS_FULL) == NFSV3ACCESS_FULL) { ND_PRINT((ndo, " NFS_ACCESS_FULL")); } else { char separator = ' '; if (access_flags & NFSV3ACCESS_READ) { ND_PRINT((ndo, " NFS_ACCESS_READ")); separator = '|'; } if (access_flags & NFSV3ACCESS_LOOKUP) { ND_PRINT((ndo, "%cNFS_ACCESS_LOOKUP", separator)); separator = '|'; } if (access_flags & NFSV3ACCESS_MODIFY) { ND_PRINT((ndo, "%cNFS_ACCESS_MODIFY", separator)); separator = '|'; } if (access_flags & NFSV3ACCESS_EXTEND) { ND_PRINT((ndo, "%cNFS_ACCESS_EXTEND", separator)); separator = '|'; } if (access_flags & NFSV3ACCESS_DELETE) { ND_PRINT((ndo, "%cNFS_ACCESS_DELETE", separator)); separator = '|'; } if (access_flags & NFSV3ACCESS_EXECUTE) ND_PRINT((ndo, "%cNFS_ACCESS_EXECUTE", separator)); } return; } break; case NFSPROC_READ: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefh(ndo, dp, v3)) != NULL) { if (v3) { ND_TCHECK(dp[2]); ND_PRINT((ndo, " %u bytes @ %" PRIu64, EXTRACT_32BITS(&dp[2]), EXTRACT_64BITS(&dp[0]))); } else { ND_TCHECK(dp[1]); ND_PRINT((ndo, " %u bytes @ %u", EXTRACT_32BITS(&dp[1]), EXTRACT_32BITS(&dp[0]))); } return; } break; case NFSPROC_WRITE: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefh(ndo, dp, v3)) != NULL) { if (v3) { ND_TCHECK(dp[2]); ND_PRINT((ndo, " %u (%u) bytes @ %" PRIu64, EXTRACT_32BITS(&dp[4]), EXTRACT_32BITS(&dp[2]), EXTRACT_64BITS(&dp[0]))); if (ndo->ndo_vflag) { dp += 3; ND_TCHECK(dp[0]); ND_PRINT((ndo, " <%s>", tok2str(nfsv3_writemodes, NULL, EXTRACT_32BITS(dp)))); } } else { ND_TCHECK(dp[3]); ND_PRINT((ndo, " %u (%u) bytes @ %u (%u)", EXTRACT_32BITS(&dp[3]), EXTRACT_32BITS(&dp[2]), EXTRACT_32BITS(&dp[1]), EXTRACT_32BITS(&dp[0]))); } return; } break; case NFSPROC_SYMLINK: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefhn(ndo, dp, v3)) != NULL) { ND_PRINT((ndo, " ->")); if (v3 && (dp = parse_sattr3(ndo, dp, &sa3)) == NULL) break; if (parsefn(ndo, dp) == NULL) break; if (v3 && ndo->ndo_vflag) print_sattr3(ndo, &sa3, ndo->ndo_vflag); return; } break; case NFSPROC_MKNOD: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefhn(ndo, dp, v3)) != NULL) { ND_TCHECK(*dp); type = (nfs_type)EXTRACT_32BITS(dp); dp++; if ((dp = parse_sattr3(ndo, dp, &sa3)) == NULL) break; ND_PRINT((ndo, " %s", tok2str(type2str, "unk-ft %d", type))); if (ndo->ndo_vflag && (type == NFCHR || type == NFBLK)) { ND_TCHECK(dp[1]); ND_PRINT((ndo, " %u/%u", EXTRACT_32BITS(&dp[0]), EXTRACT_32BITS(&dp[1]))); dp += 2; } if (ndo->ndo_vflag) print_sattr3(ndo, &sa3, ndo->ndo_vflag); return; } break; case NFSPROC_RENAME: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefhn(ndo, dp, v3)) != NULL) { ND_PRINT((ndo, " ->")); if (parsefhn(ndo, dp, v3) != NULL) return; } break; case NFSPROC_LINK: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefh(ndo, dp, v3)) != NULL) { ND_PRINT((ndo, " ->")); if (parsefhn(ndo, dp, v3) != NULL) return; } break; case NFSPROC_READDIR: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefh(ndo, dp, v3)) != NULL) { if (v3) { ND_TCHECK(dp[4]); /* * We shouldn't really try to interpret the * offset cookie here. */ ND_PRINT((ndo, " %u bytes @ %" PRId64, EXTRACT_32BITS(&dp[4]), EXTRACT_64BITS(&dp[0]))); if (ndo->ndo_vflag) ND_PRINT((ndo, " verf %08x%08x", dp[2], dp[3])); } else { ND_TCHECK(dp[1]); /* * Print the offset as signed, since -1 is * common, but offsets > 2^31 aren't. */ ND_PRINT((ndo, " %u bytes @ %d", EXTRACT_32BITS(&dp[1]), EXTRACT_32BITS(&dp[0]))); } return; } break; case NFSPROC_READDIRPLUS: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefh(ndo, dp, v3)) != NULL) { ND_TCHECK(dp[4]); /* * We don't try to interpret the offset * cookie here. */ ND_PRINT((ndo, " %u bytes @ %" PRId64, EXTRACT_32BITS(&dp[4]), EXTRACT_64BITS(&dp[0]))); if (ndo->ndo_vflag) { ND_TCHECK(dp[5]); ND_PRINT((ndo, " max %u verf %08x%08x", EXTRACT_32BITS(&dp[5]), dp[2], dp[3])); } return; } break; case NFSPROC_COMMIT: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefh(ndo, dp, v3)) != NULL) { ND_TCHECK(dp[2]); ND_PRINT((ndo, " %u bytes @ %" PRIu64, EXTRACT_32BITS(&dp[2]), EXTRACT_64BITS(&dp[0]))); return; } break; default: return; } trunc: if (!nfserr) ND_PRINT((ndo, "%s", tstr)); } /* * Print out an NFS file handle. * We assume packet was not truncated before the end of the * file handle pointed to by dp. * * Note: new version (using portable file-handle parser) doesn't produce * generation number. It probably could be made to do that, with some * additional hacking on the parser code. */ static void nfs_printfh(netdissect_options *ndo, register const uint32_t *dp, const u_int len) { my_fsid fsid; uint32_t ino; const char *sfsname = NULL; char *spacep; if (ndo->ndo_uflag) { u_int i; char const *sep = ""; ND_PRINT((ndo, " fh[")); for (i=0; i<len; i++) { ND_PRINT((ndo, "%s%x", sep, dp[i])); sep = ":"; } ND_PRINT((ndo, "]")); return; } Parse_fh((const u_char *)dp, len, &fsid, &ino, NULL, &sfsname, 0); if (sfsname) { /* file system ID is ASCII, not numeric, for this server OS */ static char temp[NFSX_V3FHMAX+1]; /* Make sure string is null-terminated */ strncpy(temp, sfsname, NFSX_V3FHMAX); temp[sizeof(temp) - 1] = '\0'; /* Remove trailing spaces */ spacep = strchr(temp, ' '); if (spacep) *spacep = '\0'; ND_PRINT((ndo, " fh %s/", temp)); } else { ND_PRINT((ndo, " fh %d,%d/", fsid.Fsid_dev.Major, fsid.Fsid_dev.Minor)); } if(fsid.Fsid_dev.Minor == 257) /* Print the undecoded handle */ ND_PRINT((ndo, "%s", fsid.Opaque_Handle)); else ND_PRINT((ndo, "%ld", (long) ino)); } /* * Maintain a small cache of recent client.XID.server/proc pairs, to allow * us to match up replies with requests and thus to know how to parse * the reply. */ struct xid_map_entry { uint32_t xid; /* transaction ID (net order) */ int ipver; /* IP version (4 or 6) */ struct in6_addr client; /* client IP address (net order) */ struct in6_addr server; /* server IP address (net order) */ uint32_t proc; /* call proc number (host order) */ uint32_t vers; /* program version (host order) */ }; /* * Map entries are kept in an array that we manage as a ring; * new entries are always added at the tail of the ring. Initially, * all the entries are zero and hence don't match anything. */ #define XIDMAPSIZE 64 static struct xid_map_entry xid_map[XIDMAPSIZE]; static int xid_map_next = 0; static int xid_map_hint = 0; static int xid_map_enter(netdissect_options *ndo, const struct sunrpc_msg *rp, const u_char *bp) { const struct ip *ip = NULL; const struct ip6_hdr *ip6 = NULL; struct xid_map_entry *xmep; if (!ND_TTEST(rp->rm_call.cb_vers)) return (0); switch (IP_V((const struct ip *)bp)) { case 4: ip = (const struct ip *)bp; break; case 6: ip6 = (const struct ip6_hdr *)bp; break; default: return (1); } xmep = &xid_map[xid_map_next]; if (++xid_map_next >= XIDMAPSIZE) xid_map_next = 0; UNALIGNED_MEMCPY(&xmep->xid, &rp->rm_xid, sizeof(xmep->xid)); if (ip) { xmep->ipver = 4; UNALIGNED_MEMCPY(&xmep->client, &ip->ip_src, sizeof(ip->ip_src)); UNALIGNED_MEMCPY(&xmep->server, &ip->ip_dst, sizeof(ip->ip_dst)); } else if (ip6) { xmep->ipver = 6; UNALIGNED_MEMCPY(&xmep->client, &ip6->ip6_src, sizeof(ip6->ip6_src)); UNALIGNED_MEMCPY(&xmep->server, &ip6->ip6_dst, sizeof(ip6->ip6_dst)); } xmep->proc = EXTRACT_32BITS(&rp->rm_call.cb_proc); xmep->vers = EXTRACT_32BITS(&rp->rm_call.cb_vers); return (1); } /* * Returns 0 and puts NFSPROC_xxx in proc return and * version in vers return, or returns -1 on failure */ static int xid_map_find(const struct sunrpc_msg *rp, const u_char *bp, uint32_t *proc, uint32_t *vers) { int i; struct xid_map_entry *xmep; uint32_t xid; const struct ip *ip = (const struct ip *)bp; const struct ip6_hdr *ip6 = (const struct ip6_hdr *)bp; int cmp; UNALIGNED_MEMCPY(&xid, &rp->rm_xid, sizeof(xmep->xid)); /* Start searching from where we last left off */ i = xid_map_hint; do { xmep = &xid_map[i]; cmp = 1; if (xmep->ipver != IP_V(ip) || xmep->xid != xid) goto nextitem; switch (xmep->ipver) { case 4: if (UNALIGNED_MEMCMP(&ip->ip_src, &xmep->server, sizeof(ip->ip_src)) != 0 || UNALIGNED_MEMCMP(&ip->ip_dst, &xmep->client, sizeof(ip->ip_dst)) != 0) { cmp = 0; } break; case 6: if (UNALIGNED_MEMCMP(&ip6->ip6_src, &xmep->server, sizeof(ip6->ip6_src)) != 0 || UNALIGNED_MEMCMP(&ip6->ip6_dst, &xmep->client, sizeof(ip6->ip6_dst)) != 0) { cmp = 0; } break; default: cmp = 0; break; } if (cmp) { /* match */ xid_map_hint = i; *proc = xmep->proc; *vers = xmep->vers; return 0; } nextitem: if (++i >= XIDMAPSIZE) i = 0; } while (i != xid_map_hint); /* search failed */ return (-1); } /* * Routines for parsing reply packets */ /* * Return a pointer to the beginning of the actual results. * If the packet was truncated, return 0. */ static const uint32_t * parserep(netdissect_options *ndo, register const struct sunrpc_msg *rp, register u_int length) { register const uint32_t *dp; u_int len; enum sunrpc_accept_stat astat; /* * Portability note: * Here we find the address of the ar_verf credentials. * Originally, this calculation was * dp = (uint32_t *)&rp->rm_reply.rp_acpt.ar_verf * On the wire, the rp_acpt field starts immediately after * the (32 bit) rp_stat field. However, rp_acpt (which is a * "struct accepted_reply") contains a "struct opaque_auth", * whose internal representation contains a pointer, so on a * 64-bit machine the compiler inserts 32 bits of padding * before rp->rm_reply.rp_acpt.ar_verf. So, we cannot use * the internal representation to parse the on-the-wire * representation. Instead, we skip past the rp_stat field, * which is an "enum" and so occupies one 32-bit word. */ dp = ((const uint32_t *)&rp->rm_reply) + 1; ND_TCHECK(dp[1]); len = EXTRACT_32BITS(&dp[1]); if (len >= length) return (NULL); /* * skip past the ar_verf credentials. */ dp += (len + (2*sizeof(uint32_t) + 3)) / sizeof(uint32_t); ND_TCHECK2(dp[0], 0); /* * now we can check the ar_stat field */ astat = (enum sunrpc_accept_stat) EXTRACT_32BITS(dp); if (astat != SUNRPC_SUCCESS) { ND_PRINT((ndo, " %s", tok2str(sunrpc_str, "ar_stat %d", astat))); nfserr = 1; /* suppress trunc string */ return (NULL); } /* successful return */ ND_TCHECK2(*dp, sizeof(astat)); return ((const uint32_t *) (sizeof(astat) + ((const char *)dp))); trunc: return (0); } static const uint32_t * parsestatus(netdissect_options *ndo, const uint32_t *dp, int *er) { int errnum; ND_TCHECK(dp[0]); errnum = EXTRACT_32BITS(&dp[0]); if (er) *er = errnum; if (errnum != 0) { if (!ndo->ndo_qflag) ND_PRINT((ndo, " ERROR: %s", tok2str(status2str, "unk %d", errnum))); nfserr = 1; } return (dp + 1); trunc: return NULL; } static const uint32_t * parsefattr(netdissect_options *ndo, const uint32_t *dp, int verbose, int v3) { const struct nfs_fattr *fap; fap = (const struct nfs_fattr *)dp; ND_TCHECK(fap->fa_gid); if (verbose) { ND_PRINT((ndo, " %s %o ids %d/%d", tok2str(type2str, "unk-ft %d ", EXTRACT_32BITS(&fap->fa_type)), EXTRACT_32BITS(&fap->fa_mode), EXTRACT_32BITS(&fap->fa_uid), EXTRACT_32BITS(&fap->fa_gid))); if (v3) { ND_TCHECK(fap->fa3_size); ND_PRINT((ndo, " sz %" PRIu64, EXTRACT_64BITS((const uint32_t *)&fap->fa3_size))); } else { ND_TCHECK(fap->fa2_size); ND_PRINT((ndo, " sz %d", EXTRACT_32BITS(&fap->fa2_size))); } } /* print lots more stuff */ if (verbose > 1) { if (v3) { ND_TCHECK(fap->fa3_ctime); ND_PRINT((ndo, " nlink %d rdev %d/%d", EXTRACT_32BITS(&fap->fa_nlink), EXTRACT_32BITS(&fap->fa3_rdev.specdata1), EXTRACT_32BITS(&fap->fa3_rdev.specdata2))); ND_PRINT((ndo, " fsid %" PRIx64, EXTRACT_64BITS((const uint32_t *)&fap->fa3_fsid))); ND_PRINT((ndo, " fileid %" PRIx64, EXTRACT_64BITS((const uint32_t *)&fap->fa3_fileid))); ND_PRINT((ndo, " a/m/ctime %u.%06u", EXTRACT_32BITS(&fap->fa3_atime.nfsv3_sec), EXTRACT_32BITS(&fap->fa3_atime.nfsv3_nsec))); ND_PRINT((ndo, " %u.%06u", EXTRACT_32BITS(&fap->fa3_mtime.nfsv3_sec), EXTRACT_32BITS(&fap->fa3_mtime.nfsv3_nsec))); ND_PRINT((ndo, " %u.%06u", EXTRACT_32BITS(&fap->fa3_ctime.nfsv3_sec), EXTRACT_32BITS(&fap->fa3_ctime.nfsv3_nsec))); } else { ND_TCHECK(fap->fa2_ctime); ND_PRINT((ndo, " nlink %d rdev 0x%x fsid 0x%x nodeid 0x%x a/m/ctime", EXTRACT_32BITS(&fap->fa_nlink), EXTRACT_32BITS(&fap->fa2_rdev), EXTRACT_32BITS(&fap->fa2_fsid), EXTRACT_32BITS(&fap->fa2_fileid))); ND_PRINT((ndo, " %u.%06u", EXTRACT_32BITS(&fap->fa2_atime.nfsv2_sec), EXTRACT_32BITS(&fap->fa2_atime.nfsv2_usec))); ND_PRINT((ndo, " %u.%06u", EXTRACT_32BITS(&fap->fa2_mtime.nfsv2_sec), EXTRACT_32BITS(&fap->fa2_mtime.nfsv2_usec))); ND_PRINT((ndo, " %u.%06u", EXTRACT_32BITS(&fap->fa2_ctime.nfsv2_sec), EXTRACT_32BITS(&fap->fa2_ctime.nfsv2_usec))); } } return ((const uint32_t *)((const unsigned char *)dp + (v3 ? NFSX_V3FATTR : NFSX_V2FATTR))); trunc: return (NULL); } static int parseattrstat(netdissect_options *ndo, const uint32_t *dp, int verbose, int v3) { int er; dp = parsestatus(ndo, dp, &er); if (dp == NULL) return (0); if (er) return (1); return (parsefattr(ndo, dp, verbose, v3) != NULL); } static int parsediropres(netdissect_options *ndo, const uint32_t *dp) { int er; if (!(dp = parsestatus(ndo, dp, &er))) return (0); if (er) return (1); dp = parsefh(ndo, dp, 0); if (dp == NULL) return (0); return (parsefattr(ndo, dp, ndo->ndo_vflag, 0) != NULL); } static int parselinkres(netdissect_options *ndo, const uint32_t *dp, int v3) { int er; dp = parsestatus(ndo, dp, &er); if (dp == NULL) return(0); if (er) return(1); if (v3 && !(dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag))) return (0); ND_PRINT((ndo, " ")); return (parsefn(ndo, dp) != NULL); } static int parsestatfs(netdissect_options *ndo, const uint32_t *dp, int v3) { const struct nfs_statfs *sfsp; int er; dp = parsestatus(ndo, dp, &er); if (dp == NULL) return (0); if (!v3 && er) return (1); if (ndo->ndo_qflag) return(1); if (v3) { if (ndo->ndo_vflag) ND_PRINT((ndo, " POST:")); if (!(dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag))) return (0); } ND_TCHECK2(*dp, (v3 ? NFSX_V3STATFS : NFSX_V2STATFS)); sfsp = (const struct nfs_statfs *)dp; if (v3) { ND_PRINT((ndo, " tbytes %" PRIu64 " fbytes %" PRIu64 " abytes %" PRIu64, EXTRACT_64BITS((const uint32_t *)&sfsp->sf_tbytes), EXTRACT_64BITS((const uint32_t *)&sfsp->sf_fbytes), EXTRACT_64BITS((const uint32_t *)&sfsp->sf_abytes))); if (ndo->ndo_vflag) { ND_PRINT((ndo, " tfiles %" PRIu64 " ffiles %" PRIu64 " afiles %" PRIu64 " invar %u", EXTRACT_64BITS((const uint32_t *)&sfsp->sf_tfiles), EXTRACT_64BITS((const uint32_t *)&sfsp->sf_ffiles), EXTRACT_64BITS((const uint32_t *)&sfsp->sf_afiles), EXTRACT_32BITS(&sfsp->sf_invarsec))); } } else { ND_PRINT((ndo, " tsize %d bsize %d blocks %d bfree %d bavail %d", EXTRACT_32BITS(&sfsp->sf_tsize), EXTRACT_32BITS(&sfsp->sf_bsize), EXTRACT_32BITS(&sfsp->sf_blocks), EXTRACT_32BITS(&sfsp->sf_bfree), EXTRACT_32BITS(&sfsp->sf_bavail))); } return (1); trunc: return (0); } static int parserddires(netdissect_options *ndo, const uint32_t *dp) { int er; dp = parsestatus(ndo, dp, &er); if (dp == NULL) return (0); if (er) return (1); if (ndo->ndo_qflag) return (1); ND_TCHECK(dp[2]); ND_PRINT((ndo, " offset 0x%x size %d ", EXTRACT_32BITS(&dp[0]), EXTRACT_32BITS(&dp[1]))); if (dp[2] != 0) ND_PRINT((ndo, " eof")); return (1); trunc: return (0); } static const uint32_t * parse_wcc_attr(netdissect_options *ndo, const uint32_t *dp) { ND_PRINT((ndo, " sz %" PRIu64, EXTRACT_64BITS(&dp[0]))); ND_PRINT((ndo, " mtime %u.%06u ctime %u.%06u", EXTRACT_32BITS(&dp[2]), EXTRACT_32BITS(&dp[3]), EXTRACT_32BITS(&dp[4]), EXTRACT_32BITS(&dp[5]))); return (dp + 6); } /* * Pre operation attributes. Print only if vflag > 1. */ static const uint32_t * parse_pre_op_attr(netdissect_options *ndo, const uint32_t *dp, int verbose) { ND_TCHECK(dp[0]); if (!EXTRACT_32BITS(&dp[0])) return (dp + 1); dp++; ND_TCHECK2(*dp, 24); if (verbose > 1) { return parse_wcc_attr(ndo, dp); } else { /* If not verbose enough, just skip over wcc_attr */ return (dp + 6); } trunc: return (NULL); } /* * Post operation attributes are printed if vflag >= 1 */ static const uint32_t * parse_post_op_attr(netdissect_options *ndo, const uint32_t *dp, int verbose) { ND_TCHECK(dp[0]); if (!EXTRACT_32BITS(&dp[0])) return (dp + 1); dp++; if (verbose) { return parsefattr(ndo, dp, verbose, 1); } else return (dp + (NFSX_V3FATTR / sizeof (uint32_t))); trunc: return (NULL); } static const uint32_t * parse_wcc_data(netdissect_options *ndo, const uint32_t *dp, int verbose) { if (verbose > 1) ND_PRINT((ndo, " PRE:")); if (!(dp = parse_pre_op_attr(ndo, dp, verbose))) return (0); if (verbose) ND_PRINT((ndo, " POST:")); return parse_post_op_attr(ndo, dp, verbose); } static const uint32_t * parsecreateopres(netdissect_options *ndo, const uint32_t *dp, int verbose) { int er; if (!(dp = parsestatus(ndo, dp, &er))) return (0); if (er) dp = parse_wcc_data(ndo, dp, verbose); else { ND_TCHECK(dp[0]); if (!EXTRACT_32BITS(&dp[0])) return (dp + 1); dp++; if (!(dp = parsefh(ndo, dp, 1))) return (0); if (verbose) { if (!(dp = parse_post_op_attr(ndo, dp, verbose))) return (0); if (ndo->ndo_vflag > 1) { ND_PRINT((ndo, " dir attr:")); dp = parse_wcc_data(ndo, dp, verbose); } } } return (dp); trunc: return (NULL); } static int parsewccres(netdissect_options *ndo, const uint32_t *dp, int verbose) { int er; if (!(dp = parsestatus(ndo, dp, &er))) return (0); return parse_wcc_data(ndo, dp, verbose) != NULL; } static const uint32_t * parsev3rddirres(netdissect_options *ndo, const uint32_t *dp, int verbose) { int er; if (!(dp = parsestatus(ndo, dp, &er))) return (0); if (ndo->ndo_vflag) ND_PRINT((ndo, " POST:")); if (!(dp = parse_post_op_attr(ndo, dp, verbose))) return (0); if (er) return dp; if (ndo->ndo_vflag) { ND_TCHECK(dp[1]); ND_PRINT((ndo, " verf %08x%08x", dp[0], dp[1])); dp += 2; } return dp; trunc: return (NULL); } static int parsefsinfo(netdissect_options *ndo, const uint32_t *dp) { const struct nfsv3_fsinfo *sfp; int er; if (!(dp = parsestatus(ndo, dp, &er))) return (0); if (ndo->ndo_vflag) ND_PRINT((ndo, " POST:")); if (!(dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag))) return (0); if (er) return (1); sfp = (const struct nfsv3_fsinfo *)dp; ND_TCHECK(*sfp); ND_PRINT((ndo, " rtmax %u rtpref %u wtmax %u wtpref %u dtpref %u", EXTRACT_32BITS(&sfp->fs_rtmax), EXTRACT_32BITS(&sfp->fs_rtpref), EXTRACT_32BITS(&sfp->fs_wtmax), EXTRACT_32BITS(&sfp->fs_wtpref), EXTRACT_32BITS(&sfp->fs_dtpref))); if (ndo->ndo_vflag) { ND_PRINT((ndo, " rtmult %u wtmult %u maxfsz %" PRIu64, EXTRACT_32BITS(&sfp->fs_rtmult), EXTRACT_32BITS(&sfp->fs_wtmult), EXTRACT_64BITS((const uint32_t *)&sfp->fs_maxfilesize))); ND_PRINT((ndo, " delta %u.%06u ", EXTRACT_32BITS(&sfp->fs_timedelta.nfsv3_sec), EXTRACT_32BITS(&sfp->fs_timedelta.nfsv3_nsec))); } return (1); trunc: return (0); } static int parsepathconf(netdissect_options *ndo, const uint32_t *dp) { int er; const struct nfsv3_pathconf *spp; if (!(dp = parsestatus(ndo, dp, &er))) return (0); if (ndo->ndo_vflag) ND_PRINT((ndo, " POST:")); if (!(dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag))) return (0); if (er) return (1); spp = (const struct nfsv3_pathconf *)dp; ND_TCHECK(*spp); ND_PRINT((ndo, " linkmax %u namemax %u %s %s %s %s", EXTRACT_32BITS(&spp->pc_linkmax), EXTRACT_32BITS(&spp->pc_namemax), EXTRACT_32BITS(&spp->pc_notrunc) ? "notrunc" : "", EXTRACT_32BITS(&spp->pc_chownrestricted) ? "chownres" : "", EXTRACT_32BITS(&spp->pc_caseinsensitive) ? "igncase" : "", EXTRACT_32BITS(&spp->pc_casepreserving) ? "keepcase" : "")); return (1); trunc: return (0); } static void interp_reply(netdissect_options *ndo, const struct sunrpc_msg *rp, uint32_t proc, uint32_t vers, int length) { register const uint32_t *dp; register int v3; int er; v3 = (vers == NFS_VER3); if (!v3 && proc < NFS_NPROCS) proc = nfsv3_procid[proc]; ND_PRINT((ndo, " %s", tok2str(nfsproc_str, "proc-%u", proc))); switch (proc) { case NFSPROC_GETATTR: dp = parserep(ndo, rp, length); if (dp != NULL && parseattrstat(ndo, dp, !ndo->ndo_qflag, v3) != 0) return; break; case NFSPROC_SETATTR: if (!(dp = parserep(ndo, rp, length))) return; if (v3) { if (parsewccres(ndo, dp, ndo->ndo_vflag)) return; } else { if (parseattrstat(ndo, dp, !ndo->ndo_qflag, 0) != 0) return; } break; case NFSPROC_LOOKUP: if (!(dp = parserep(ndo, rp, length))) break; if (v3) { if (!(dp = parsestatus(ndo, dp, &er))) break; if (er) { if (ndo->ndo_vflag > 1) { ND_PRINT((ndo, " post dattr:")); dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag); } } else { if (!(dp = parsefh(ndo, dp, v3))) break; if ((dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag)) && ndo->ndo_vflag > 1) { ND_PRINT((ndo, " post dattr:")); dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag); } } if (dp) return; } else { if (parsediropres(ndo, dp) != 0) return; } break; case NFSPROC_ACCESS: if (!(dp = parserep(ndo, rp, length))) break; if (!(dp = parsestatus(ndo, dp, &er))) break; if (ndo->ndo_vflag) ND_PRINT((ndo, " attr:")); if (!(dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag))) break; if (!er) ND_PRINT((ndo, " c %04x", EXTRACT_32BITS(&dp[0]))); return; case NFSPROC_READLINK: dp = parserep(ndo, rp, length); if (dp != NULL && parselinkres(ndo, dp, v3) != 0) return; break; case NFSPROC_READ: if (!(dp = parserep(ndo, rp, length))) break; if (v3) { if (!(dp = parsestatus(ndo, dp, &er))) break; if (!(dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag))) break; if (er) return; if (ndo->ndo_vflag) { ND_TCHECK(dp[1]); ND_PRINT((ndo, " %u bytes", EXTRACT_32BITS(&dp[0]))); if (EXTRACT_32BITS(&dp[1])) ND_PRINT((ndo, " EOF")); } return; } else { if (parseattrstat(ndo, dp, ndo->ndo_vflag, 0) != 0) return; } break; case NFSPROC_WRITE: if (!(dp = parserep(ndo, rp, length))) break; if (v3) { if (!(dp = parsestatus(ndo, dp, &er))) break; if (!(dp = parse_wcc_data(ndo, dp, ndo->ndo_vflag))) break; if (er) return; if (ndo->ndo_vflag) { ND_TCHECK(dp[0]); ND_PRINT((ndo, " %u bytes", EXTRACT_32BITS(&dp[0]))); if (ndo->ndo_vflag > 1) { ND_TCHECK(dp[1]); ND_PRINT((ndo, " <%s>", tok2str(nfsv3_writemodes, NULL, EXTRACT_32BITS(&dp[1])))); } return; } } else { if (parseattrstat(ndo, dp, ndo->ndo_vflag, v3) != 0) return; } break; case NFSPROC_CREATE: case NFSPROC_MKDIR: if (!(dp = parserep(ndo, rp, length))) break; if (v3) { if (parsecreateopres(ndo, dp, ndo->ndo_vflag) != NULL) return; } else { if (parsediropres(ndo, dp) != 0) return; } break; case NFSPROC_SYMLINK: if (!(dp = parserep(ndo, rp, length))) break; if (v3) { if (parsecreateopres(ndo, dp, ndo->ndo_vflag) != NULL) return; } else { if (parsestatus(ndo, dp, &er) != NULL) return; } break; case NFSPROC_MKNOD: if (!(dp = parserep(ndo, rp, length))) break; if (parsecreateopres(ndo, dp, ndo->ndo_vflag) != NULL) return; break; case NFSPROC_REMOVE: case NFSPROC_RMDIR: if (!(dp = parserep(ndo, rp, length))) break; if (v3) { if (parsewccres(ndo, dp, ndo->ndo_vflag)) return; } else { if (parsestatus(ndo, dp, &er) != NULL) return; } break; case NFSPROC_RENAME: if (!(dp = parserep(ndo, rp, length))) break; if (v3) { if (!(dp = parsestatus(ndo, dp, &er))) break; if (ndo->ndo_vflag) { ND_PRINT((ndo, " from:")); if (!(dp = parse_wcc_data(ndo, dp, ndo->ndo_vflag))) break; ND_PRINT((ndo, " to:")); if (!(dp = parse_wcc_data(ndo, dp, ndo->ndo_vflag))) break; } return; } else { if (parsestatus(ndo, dp, &er) != NULL) return; } break; case NFSPROC_LINK: if (!(dp = parserep(ndo, rp, length))) break; if (v3) { if (!(dp = parsestatus(ndo, dp, &er))) break; if (ndo->ndo_vflag) { ND_PRINT((ndo, " file POST:")); if (!(dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag))) break; ND_PRINT((ndo, " dir:")); if (!(dp = parse_wcc_data(ndo, dp, ndo->ndo_vflag))) break; return; } } else { if (parsestatus(ndo, dp, &er) != NULL) return; } break; case NFSPROC_READDIR: if (!(dp = parserep(ndo, rp, length))) break; if (v3) { if (parsev3rddirres(ndo, dp, ndo->ndo_vflag)) return; } else { if (parserddires(ndo, dp) != 0) return; } break; case NFSPROC_READDIRPLUS: if (!(dp = parserep(ndo, rp, length))) break; if (parsev3rddirres(ndo, dp, ndo->ndo_vflag)) return; break; case NFSPROC_FSSTAT: dp = parserep(ndo, rp, length); if (dp != NULL && parsestatfs(ndo, dp, v3) != 0) return; break; case NFSPROC_FSINFO: dp = parserep(ndo, rp, length); if (dp != NULL && parsefsinfo(ndo, dp) != 0) return; break; case NFSPROC_PATHCONF: dp = parserep(ndo, rp, length); if (dp != NULL && parsepathconf(ndo, dp) != 0) return; break; case NFSPROC_COMMIT: dp = parserep(ndo, rp, length); if (dp != NULL && parsewccres(ndo, dp, ndo->ndo_vflag) != 0) return; break; default: return; } trunc: if (!nfserr) ND_PRINT((ndo, "%s", tstr)); }
/* * Copyright (c) 1988, 1989, 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that: (1) source code distributions * retain the above copyright notice and this paragraph in its entirety, (2) * distributions including binary code include the above copyright notice and * this paragraph in its entirety in the documentation or other materials * provided with the distribution, and (3) all advertising materials mentioning * features or use of this software display the following acknowledgement: * ``This product includes software developed by the University of California, * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of * the University nor the names of its contributors may be used to endorse * or promote products derived from this software without specific prior * written permission. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. */ /* \summary: Network File System (NFS) printer */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <netdissect-stdinc.h> #include <stdio.h> #include <string.h> #include "netdissect.h" #include "addrtoname.h" #include "extract.h" #include "nfs.h" #include "nfsfh.h" #include "ip.h" #include "ip6.h" #include "rpc_auth.h" #include "rpc_msg.h" static const char tstr[] = " [|nfs]"; static void nfs_printfh(netdissect_options *, const uint32_t *, const u_int); static int xid_map_enter(netdissect_options *, const struct sunrpc_msg *, const u_char *); static int xid_map_find(const struct sunrpc_msg *, const u_char *, uint32_t *, uint32_t *); static void interp_reply(netdissect_options *, const struct sunrpc_msg *, uint32_t, uint32_t, int); static const uint32_t *parse_post_op_attr(netdissect_options *, const uint32_t *, int); /* * Mapping of old NFS Version 2 RPC numbers to generic numbers. */ static uint32_t nfsv3_procid[NFS_NPROCS] = { NFSPROC_NULL, NFSPROC_GETATTR, NFSPROC_SETATTR, NFSPROC_NOOP, NFSPROC_LOOKUP, NFSPROC_READLINK, NFSPROC_READ, NFSPROC_NOOP, NFSPROC_WRITE, NFSPROC_CREATE, NFSPROC_REMOVE, NFSPROC_RENAME, NFSPROC_LINK, NFSPROC_SYMLINK, NFSPROC_MKDIR, NFSPROC_RMDIR, NFSPROC_READDIR, NFSPROC_FSSTAT, NFSPROC_NOOP, NFSPROC_NOOP, NFSPROC_NOOP, NFSPROC_NOOP, NFSPROC_NOOP, NFSPROC_NOOP, NFSPROC_NOOP, NFSPROC_NOOP }; static const struct tok nfsproc_str[] = { { NFSPROC_NOOP, "nop" }, { NFSPROC_NULL, "null" }, { NFSPROC_GETATTR, "getattr" }, { NFSPROC_SETATTR, "setattr" }, { NFSPROC_LOOKUP, "lookup" }, { NFSPROC_ACCESS, "access" }, { NFSPROC_READLINK, "readlink" }, { NFSPROC_READ, "read" }, { NFSPROC_WRITE, "write" }, { NFSPROC_CREATE, "create" }, { NFSPROC_MKDIR, "mkdir" }, { NFSPROC_SYMLINK, "symlink" }, { NFSPROC_MKNOD, "mknod" }, { NFSPROC_REMOVE, "remove" }, { NFSPROC_RMDIR, "rmdir" }, { NFSPROC_RENAME, "rename" }, { NFSPROC_LINK, "link" }, { NFSPROC_READDIR, "readdir" }, { NFSPROC_READDIRPLUS, "readdirplus" }, { NFSPROC_FSSTAT, "fsstat" }, { NFSPROC_FSINFO, "fsinfo" }, { NFSPROC_PATHCONF, "pathconf" }, { NFSPROC_COMMIT, "commit" }, { 0, NULL } }; /* * NFS V2 and V3 status values. * * Some of these come from the RFCs for NFS V2 and V3, with the message * strings taken from the FreeBSD C library "errlst.c". * * Others are errors that are not in the RFC but that I suspect some * NFS servers could return; the values are FreeBSD errno values, as * the first NFS server was the SunOS 2.0 one, and until 5.0 SunOS * was primarily BSD-derived. */ static const struct tok status2str[] = { { 1, "Operation not permitted" }, /* EPERM */ { 2, "No such file or directory" }, /* ENOENT */ { 5, "Input/output error" }, /* EIO */ { 6, "Device not configured" }, /* ENXIO */ { 11, "Resource deadlock avoided" }, /* EDEADLK */ { 12, "Cannot allocate memory" }, /* ENOMEM */ { 13, "Permission denied" }, /* EACCES */ { 17, "File exists" }, /* EEXIST */ { 18, "Cross-device link" }, /* EXDEV */ { 19, "Operation not supported by device" }, /* ENODEV */ { 20, "Not a directory" }, /* ENOTDIR */ { 21, "Is a directory" }, /* EISDIR */ { 22, "Invalid argument" }, /* EINVAL */ { 26, "Text file busy" }, /* ETXTBSY */ { 27, "File too large" }, /* EFBIG */ { 28, "No space left on device" }, /* ENOSPC */ { 30, "Read-only file system" }, /* EROFS */ { 31, "Too many links" }, /* EMLINK */ { 45, "Operation not supported" }, /* EOPNOTSUPP */ { 62, "Too many levels of symbolic links" }, /* ELOOP */ { 63, "File name too long" }, /* ENAMETOOLONG */ { 66, "Directory not empty" }, /* ENOTEMPTY */ { 69, "Disc quota exceeded" }, /* EDQUOT */ { 70, "Stale NFS file handle" }, /* ESTALE */ { 71, "Too many levels of remote in path" }, /* EREMOTE */ { 99, "Write cache flushed to disk" }, /* NFSERR_WFLUSH (not used) */ { 10001, "Illegal NFS file handle" }, /* NFS3ERR_BADHANDLE */ { 10002, "Update synchronization mismatch" }, /* NFS3ERR_NOT_SYNC */ { 10003, "READDIR/READDIRPLUS cookie is stale" }, /* NFS3ERR_BAD_COOKIE */ { 10004, "Operation not supported" }, /* NFS3ERR_NOTSUPP */ { 10005, "Buffer or request is too small" }, /* NFS3ERR_TOOSMALL */ { 10006, "Unspecified error on server" }, /* NFS3ERR_SERVERFAULT */ { 10007, "Object of that type not supported" }, /* NFS3ERR_BADTYPE */ { 10008, "Request couldn't be completed in time" }, /* NFS3ERR_JUKEBOX */ { 0, NULL } }; static const struct tok nfsv3_writemodes[] = { { 0, "unstable" }, { 1, "datasync" }, { 2, "filesync" }, { 0, NULL } }; static const struct tok type2str[] = { { NFNON, "NON" }, { NFREG, "REG" }, { NFDIR, "DIR" }, { NFBLK, "BLK" }, { NFCHR, "CHR" }, { NFLNK, "LNK" }, { NFFIFO, "FIFO" }, { 0, NULL } }; static const struct tok sunrpc_auth_str[] = { { SUNRPC_AUTH_OK, "OK" }, { SUNRPC_AUTH_BADCRED, "Bogus Credentials (seal broken)" }, { SUNRPC_AUTH_REJECTEDCRED, "Rejected Credentials (client should begin new session)" }, { SUNRPC_AUTH_BADVERF, "Bogus Verifier (seal broken)" }, { SUNRPC_AUTH_REJECTEDVERF, "Verifier expired or was replayed" }, { SUNRPC_AUTH_TOOWEAK, "Credentials are too weak" }, { SUNRPC_AUTH_INVALIDRESP, "Bogus response verifier" }, { SUNRPC_AUTH_FAILED, "Unknown failure" }, { 0, NULL } }; static const struct tok sunrpc_str[] = { { SUNRPC_PROG_UNAVAIL, "PROG_UNAVAIL" }, { SUNRPC_PROG_MISMATCH, "PROG_MISMATCH" }, { SUNRPC_PROC_UNAVAIL, "PROC_UNAVAIL" }, { SUNRPC_GARBAGE_ARGS, "GARBAGE_ARGS" }, { SUNRPC_SYSTEM_ERR, "SYSTEM_ERR" }, { 0, NULL } }; static void print_nfsaddr(netdissect_options *ndo, const u_char *bp, const char *s, const char *d) { const struct ip *ip; const struct ip6_hdr *ip6; char srcaddr[INET6_ADDRSTRLEN], dstaddr[INET6_ADDRSTRLEN]; srcaddr[0] = dstaddr[0] = '\0'; switch (IP_V((const struct ip *)bp)) { case 4: ip = (const struct ip *)bp; strlcpy(srcaddr, ipaddr_string(ndo, &ip->ip_src), sizeof(srcaddr)); strlcpy(dstaddr, ipaddr_string(ndo, &ip->ip_dst), sizeof(dstaddr)); break; case 6: ip6 = (const struct ip6_hdr *)bp; strlcpy(srcaddr, ip6addr_string(ndo, &ip6->ip6_src), sizeof(srcaddr)); strlcpy(dstaddr, ip6addr_string(ndo, &ip6->ip6_dst), sizeof(dstaddr)); break; default: strlcpy(srcaddr, "?", sizeof(srcaddr)); strlcpy(dstaddr, "?", sizeof(dstaddr)); break; } ND_PRINT((ndo, "%s.%s > %s.%s: ", srcaddr, s, dstaddr, d)); } static const uint32_t * parse_sattr3(netdissect_options *ndo, const uint32_t *dp, struct nfsv3_sattr *sa3) { ND_TCHECK(dp[0]); sa3->sa_modeset = EXTRACT_32BITS(dp); dp++; if (sa3->sa_modeset) { ND_TCHECK(dp[0]); sa3->sa_mode = EXTRACT_32BITS(dp); dp++; } ND_TCHECK(dp[0]); sa3->sa_uidset = EXTRACT_32BITS(dp); dp++; if (sa3->sa_uidset) { ND_TCHECK(dp[0]); sa3->sa_uid = EXTRACT_32BITS(dp); dp++; } ND_TCHECK(dp[0]); sa3->sa_gidset = EXTRACT_32BITS(dp); dp++; if (sa3->sa_gidset) { ND_TCHECK(dp[0]); sa3->sa_gid = EXTRACT_32BITS(dp); dp++; } ND_TCHECK(dp[0]); sa3->sa_sizeset = EXTRACT_32BITS(dp); dp++; if (sa3->sa_sizeset) { ND_TCHECK(dp[0]); sa3->sa_size = EXTRACT_32BITS(dp); dp++; } ND_TCHECK(dp[0]); sa3->sa_atimetype = EXTRACT_32BITS(dp); dp++; if (sa3->sa_atimetype == NFSV3SATTRTIME_TOCLIENT) { ND_TCHECK(dp[1]); sa3->sa_atime.nfsv3_sec = EXTRACT_32BITS(dp); dp++; sa3->sa_atime.nfsv3_nsec = EXTRACT_32BITS(dp); dp++; } ND_TCHECK(dp[0]); sa3->sa_mtimetype = EXTRACT_32BITS(dp); dp++; if (sa3->sa_mtimetype == NFSV3SATTRTIME_TOCLIENT) { ND_TCHECK(dp[1]); sa3->sa_mtime.nfsv3_sec = EXTRACT_32BITS(dp); dp++; sa3->sa_mtime.nfsv3_nsec = EXTRACT_32BITS(dp); dp++; } return dp; trunc: return NULL; } static int nfserr; /* true if we error rather than trunc */ static void print_sattr3(netdissect_options *ndo, const struct nfsv3_sattr *sa3, int verbose) { if (sa3->sa_modeset) ND_PRINT((ndo, " mode %o", sa3->sa_mode)); if (sa3->sa_uidset) ND_PRINT((ndo, " uid %u", sa3->sa_uid)); if (sa3->sa_gidset) ND_PRINT((ndo, " gid %u", sa3->sa_gid)); if (verbose > 1) { if (sa3->sa_atimetype == NFSV3SATTRTIME_TOCLIENT) ND_PRINT((ndo, " atime %u.%06u", sa3->sa_atime.nfsv3_sec, sa3->sa_atime.nfsv3_nsec)); if (sa3->sa_mtimetype == NFSV3SATTRTIME_TOCLIENT) ND_PRINT((ndo, " mtime %u.%06u", sa3->sa_mtime.nfsv3_sec, sa3->sa_mtime.nfsv3_nsec)); } } void nfsreply_print(netdissect_options *ndo, register const u_char *bp, u_int length, register const u_char *bp2) { register const struct sunrpc_msg *rp; char srcid[20], dstid[20]; /*fits 32bit*/ nfserr = 0; /* assume no error */ rp = (const struct sunrpc_msg *)bp; ND_TCHECK(rp->rm_xid); if (!ndo->ndo_nflag) { strlcpy(srcid, "nfs", sizeof(srcid)); snprintf(dstid, sizeof(dstid), "%u", EXTRACT_32BITS(&rp->rm_xid)); } else { snprintf(srcid, sizeof(srcid), "%u", NFS_PORT); snprintf(dstid, sizeof(dstid), "%u", EXTRACT_32BITS(&rp->rm_xid)); } print_nfsaddr(ndo, bp2, srcid, dstid); nfsreply_print_noaddr(ndo, bp, length, bp2); return; trunc: if (!nfserr) ND_PRINT((ndo, "%s", tstr)); } void nfsreply_print_noaddr(netdissect_options *ndo, register const u_char *bp, u_int length, register const u_char *bp2) { register const struct sunrpc_msg *rp; uint32_t proc, vers, reply_stat; enum sunrpc_reject_stat rstat; uint32_t rlow; uint32_t rhigh; enum sunrpc_auth_stat rwhy; nfserr = 0; /* assume no error */ rp = (const struct sunrpc_msg *)bp; ND_TCHECK(rp->rm_reply.rp_stat); reply_stat = EXTRACT_32BITS(&rp->rm_reply.rp_stat); switch (reply_stat) { case SUNRPC_MSG_ACCEPTED: ND_PRINT((ndo, "reply ok %u", length)); if (xid_map_find(rp, bp2, &proc, &vers) >= 0) interp_reply(ndo, rp, proc, vers, length); break; case SUNRPC_MSG_DENIED: ND_PRINT((ndo, "reply ERR %u: ", length)); ND_TCHECK(rp->rm_reply.rp_reject.rj_stat); rstat = EXTRACT_32BITS(&rp->rm_reply.rp_reject.rj_stat); switch (rstat) { case SUNRPC_RPC_MISMATCH: ND_TCHECK(rp->rm_reply.rp_reject.rj_vers.high); rlow = EXTRACT_32BITS(&rp->rm_reply.rp_reject.rj_vers.low); rhigh = EXTRACT_32BITS(&rp->rm_reply.rp_reject.rj_vers.high); ND_PRINT((ndo, "RPC Version mismatch (%u-%u)", rlow, rhigh)); break; case SUNRPC_AUTH_ERROR: ND_TCHECK(rp->rm_reply.rp_reject.rj_why); rwhy = EXTRACT_32BITS(&rp->rm_reply.rp_reject.rj_why); ND_PRINT((ndo, "Auth %s", tok2str(sunrpc_auth_str, "Invalid failure code %u", rwhy))); break; default: ND_PRINT((ndo, "Unknown reason for rejecting rpc message %u", (unsigned int)rstat)); break; } break; default: ND_PRINT((ndo, "reply Unknown rpc response code=%u %u", reply_stat, length)); break; } return; trunc: if (!nfserr) ND_PRINT((ndo, "%s", tstr)); } /* * Return a pointer to the first file handle in the packet. * If the packet was truncated, return 0. */ static const uint32_t * parsereq(netdissect_options *ndo, register const struct sunrpc_msg *rp, register u_int length) { register const uint32_t *dp; register u_int len; /* * find the start of the req data (if we captured it) */ dp = (const uint32_t *)&rp->rm_call.cb_cred; ND_TCHECK(dp[1]); len = EXTRACT_32BITS(&dp[1]); if (len < length) { dp += (len + (2 * sizeof(*dp) + 3)) / sizeof(*dp); ND_TCHECK(dp[1]); len = EXTRACT_32BITS(&dp[1]); if (len < length) { dp += (len + (2 * sizeof(*dp) + 3)) / sizeof(*dp); ND_TCHECK2(dp[0], 0); return (dp); } } trunc: return (NULL); } /* * Print out an NFS file handle and return a pointer to following word. * If packet was truncated, return 0. */ static const uint32_t * parsefh(netdissect_options *ndo, register const uint32_t *dp, int v3) { u_int len; if (v3) { ND_TCHECK(dp[0]); len = EXTRACT_32BITS(dp) / 4; dp++; } else len = NFSX_V2FH / 4; if (ND_TTEST2(*dp, len * sizeof(*dp))) { nfs_printfh(ndo, dp, len); return (dp + len); } trunc: return (NULL); } /* * Print out a file name and return pointer to 32-bit word past it. * If packet was truncated, return 0. */ static const uint32_t * parsefn(netdissect_options *ndo, register const uint32_t *dp) { register uint32_t len; register const u_char *cp; /* Bail if we don't have the string length */ ND_TCHECK(*dp); /* Fetch string length; convert to host order */ len = *dp++; NTOHL(len); ND_TCHECK2(*dp, ((len + 3) & ~3)); cp = (const u_char *)dp; /* Update 32-bit pointer (NFS filenames padded to 32-bit boundaries) */ dp += ((len + 3) & ~3) / sizeof(*dp); ND_PRINT((ndo, "\"")); if (fn_printn(ndo, cp, len, ndo->ndo_snapend)) { ND_PRINT((ndo, "\"")); goto trunc; } ND_PRINT((ndo, "\"")); return (dp); trunc: return NULL; } /* * Print out file handle and file name. * Return pointer to 32-bit word past file name. * If packet was truncated (or there was some other error), return 0. */ static const uint32_t * parsefhn(netdissect_options *ndo, register const uint32_t *dp, int v3) { dp = parsefh(ndo, dp, v3); if (dp == NULL) return (NULL); ND_PRINT((ndo, " ")); return (parsefn(ndo, dp)); } void nfsreq_print_noaddr(netdissect_options *ndo, register const u_char *bp, u_int length, register const u_char *bp2) { register const struct sunrpc_msg *rp; register const uint32_t *dp; nfs_type type; int v3; uint32_t proc; uint32_t access_flags; struct nfsv3_sattr sa3; ND_PRINT((ndo, "%d", length)); nfserr = 0; /* assume no error */ rp = (const struct sunrpc_msg *)bp; if (!xid_map_enter(ndo, rp, bp2)) /* record proc number for later on */ goto trunc; v3 = (EXTRACT_32BITS(&rp->rm_call.cb_vers) == NFS_VER3); proc = EXTRACT_32BITS(&rp->rm_call.cb_proc); if (!v3 && proc < NFS_NPROCS) proc = nfsv3_procid[proc]; ND_PRINT((ndo, " %s", tok2str(nfsproc_str, "proc-%u", proc))); switch (proc) { case NFSPROC_GETATTR: case NFSPROC_SETATTR: case NFSPROC_READLINK: case NFSPROC_FSSTAT: case NFSPROC_FSINFO: case NFSPROC_PATHCONF: if ((dp = parsereq(ndo, rp, length)) != NULL && parsefh(ndo, dp, v3) != NULL) return; break; case NFSPROC_LOOKUP: case NFSPROC_CREATE: case NFSPROC_MKDIR: case NFSPROC_REMOVE: case NFSPROC_RMDIR: if ((dp = parsereq(ndo, rp, length)) != NULL && parsefhn(ndo, dp, v3) != NULL) return; break; case NFSPROC_ACCESS: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefh(ndo, dp, v3)) != NULL) { ND_TCHECK(dp[0]); access_flags = EXTRACT_32BITS(&dp[0]); if (access_flags & ~NFSV3ACCESS_FULL) { /* NFSV3ACCESS definitions aren't up to date */ ND_PRINT((ndo, " %04x", access_flags)); } else if ((access_flags & NFSV3ACCESS_FULL) == NFSV3ACCESS_FULL) { ND_PRINT((ndo, " NFS_ACCESS_FULL")); } else { char separator = ' '; if (access_flags & NFSV3ACCESS_READ) { ND_PRINT((ndo, " NFS_ACCESS_READ")); separator = '|'; } if (access_flags & NFSV3ACCESS_LOOKUP) { ND_PRINT((ndo, "%cNFS_ACCESS_LOOKUP", separator)); separator = '|'; } if (access_flags & NFSV3ACCESS_MODIFY) { ND_PRINT((ndo, "%cNFS_ACCESS_MODIFY", separator)); separator = '|'; } if (access_flags & NFSV3ACCESS_EXTEND) { ND_PRINT((ndo, "%cNFS_ACCESS_EXTEND", separator)); separator = '|'; } if (access_flags & NFSV3ACCESS_DELETE) { ND_PRINT((ndo, "%cNFS_ACCESS_DELETE", separator)); separator = '|'; } if (access_flags & NFSV3ACCESS_EXECUTE) ND_PRINT((ndo, "%cNFS_ACCESS_EXECUTE", separator)); } return; } break; case NFSPROC_READ: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefh(ndo, dp, v3)) != NULL) { if (v3) { ND_TCHECK(dp[2]); ND_PRINT((ndo, " %u bytes @ %" PRIu64, EXTRACT_32BITS(&dp[2]), EXTRACT_64BITS(&dp[0]))); } else { ND_TCHECK(dp[1]); ND_PRINT((ndo, " %u bytes @ %u", EXTRACT_32BITS(&dp[1]), EXTRACT_32BITS(&dp[0]))); } return; } break; case NFSPROC_WRITE: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefh(ndo, dp, v3)) != NULL) { if (v3) { ND_TCHECK(dp[4]); ND_PRINT((ndo, " %u (%u) bytes @ %" PRIu64, EXTRACT_32BITS(&dp[4]), EXTRACT_32BITS(&dp[2]), EXTRACT_64BITS(&dp[0]))); if (ndo->ndo_vflag) { ND_PRINT((ndo, " <%s>", tok2str(nfsv3_writemodes, NULL, EXTRACT_32BITS(&dp[3])))); } } else { ND_TCHECK(dp[3]); ND_PRINT((ndo, " %u (%u) bytes @ %u (%u)", EXTRACT_32BITS(&dp[3]), EXTRACT_32BITS(&dp[2]), EXTRACT_32BITS(&dp[1]), EXTRACT_32BITS(&dp[0]))); } return; } break; case NFSPROC_SYMLINK: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefhn(ndo, dp, v3)) != NULL) { ND_PRINT((ndo, " ->")); if (v3 && (dp = parse_sattr3(ndo, dp, &sa3)) == NULL) break; if (parsefn(ndo, dp) == NULL) break; if (v3 && ndo->ndo_vflag) print_sattr3(ndo, &sa3, ndo->ndo_vflag); return; } break; case NFSPROC_MKNOD: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefhn(ndo, dp, v3)) != NULL) { ND_TCHECK(*dp); type = (nfs_type)EXTRACT_32BITS(dp); dp++; if ((dp = parse_sattr3(ndo, dp, &sa3)) == NULL) break; ND_PRINT((ndo, " %s", tok2str(type2str, "unk-ft %d", type))); if (ndo->ndo_vflag && (type == NFCHR || type == NFBLK)) { ND_TCHECK(dp[1]); ND_PRINT((ndo, " %u/%u", EXTRACT_32BITS(&dp[0]), EXTRACT_32BITS(&dp[1]))); dp += 2; } if (ndo->ndo_vflag) print_sattr3(ndo, &sa3, ndo->ndo_vflag); return; } break; case NFSPROC_RENAME: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefhn(ndo, dp, v3)) != NULL) { ND_PRINT((ndo, " ->")); if (parsefhn(ndo, dp, v3) != NULL) return; } break; case NFSPROC_LINK: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefh(ndo, dp, v3)) != NULL) { ND_PRINT((ndo, " ->")); if (parsefhn(ndo, dp, v3) != NULL) return; } break; case NFSPROC_READDIR: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefh(ndo, dp, v3)) != NULL) { if (v3) { ND_TCHECK(dp[4]); /* * We shouldn't really try to interpret the * offset cookie here. */ ND_PRINT((ndo, " %u bytes @ %" PRId64, EXTRACT_32BITS(&dp[4]), EXTRACT_64BITS(&dp[0]))); if (ndo->ndo_vflag) ND_PRINT((ndo, " verf %08x%08x", dp[2], dp[3])); } else { ND_TCHECK(dp[1]); /* * Print the offset as signed, since -1 is * common, but offsets > 2^31 aren't. */ ND_PRINT((ndo, " %u bytes @ %d", EXTRACT_32BITS(&dp[1]), EXTRACT_32BITS(&dp[0]))); } return; } break; case NFSPROC_READDIRPLUS: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefh(ndo, dp, v3)) != NULL) { ND_TCHECK(dp[4]); /* * We don't try to interpret the offset * cookie here. */ ND_PRINT((ndo, " %u bytes @ %" PRId64, EXTRACT_32BITS(&dp[4]), EXTRACT_64BITS(&dp[0]))); if (ndo->ndo_vflag) { ND_TCHECK(dp[5]); ND_PRINT((ndo, " max %u verf %08x%08x", EXTRACT_32BITS(&dp[5]), dp[2], dp[3])); } return; } break; case NFSPROC_COMMIT: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefh(ndo, dp, v3)) != NULL) { ND_TCHECK(dp[2]); ND_PRINT((ndo, " %u bytes @ %" PRIu64, EXTRACT_32BITS(&dp[2]), EXTRACT_64BITS(&dp[0]))); return; } break; default: return; } trunc: if (!nfserr) ND_PRINT((ndo, "%s", tstr)); } /* * Print out an NFS file handle. * We assume packet was not truncated before the end of the * file handle pointed to by dp. * * Note: new version (using portable file-handle parser) doesn't produce * generation number. It probably could be made to do that, with some * additional hacking on the parser code. */ static void nfs_printfh(netdissect_options *ndo, register const uint32_t *dp, const u_int len) { my_fsid fsid; uint32_t ino; const char *sfsname = NULL; char *spacep; if (ndo->ndo_uflag) { u_int i; char const *sep = ""; ND_PRINT((ndo, " fh[")); for (i=0; i<len; i++) { ND_PRINT((ndo, "%s%x", sep, dp[i])); sep = ":"; } ND_PRINT((ndo, "]")); return; } Parse_fh((const u_char *)dp, len, &fsid, &ino, NULL, &sfsname, 0); if (sfsname) { /* file system ID is ASCII, not numeric, for this server OS */ static char temp[NFSX_V3FHMAX+1]; /* Make sure string is null-terminated */ strncpy(temp, sfsname, NFSX_V3FHMAX); temp[sizeof(temp) - 1] = '\0'; /* Remove trailing spaces */ spacep = strchr(temp, ' '); if (spacep) *spacep = '\0'; ND_PRINT((ndo, " fh %s/", temp)); } else { ND_PRINT((ndo, " fh %d,%d/", fsid.Fsid_dev.Major, fsid.Fsid_dev.Minor)); } if(fsid.Fsid_dev.Minor == 257) /* Print the undecoded handle */ ND_PRINT((ndo, "%s", fsid.Opaque_Handle)); else ND_PRINT((ndo, "%ld", (long) ino)); } /* * Maintain a small cache of recent client.XID.server/proc pairs, to allow * us to match up replies with requests and thus to know how to parse * the reply. */ struct xid_map_entry { uint32_t xid; /* transaction ID (net order) */ int ipver; /* IP version (4 or 6) */ struct in6_addr client; /* client IP address (net order) */ struct in6_addr server; /* server IP address (net order) */ uint32_t proc; /* call proc number (host order) */ uint32_t vers; /* program version (host order) */ }; /* * Map entries are kept in an array that we manage as a ring; * new entries are always added at the tail of the ring. Initially, * all the entries are zero and hence don't match anything. */ #define XIDMAPSIZE 64 static struct xid_map_entry xid_map[XIDMAPSIZE]; static int xid_map_next = 0; static int xid_map_hint = 0; static int xid_map_enter(netdissect_options *ndo, const struct sunrpc_msg *rp, const u_char *bp) { const struct ip *ip = NULL; const struct ip6_hdr *ip6 = NULL; struct xid_map_entry *xmep; if (!ND_TTEST(rp->rm_call.cb_vers)) return (0); switch (IP_V((const struct ip *)bp)) { case 4: ip = (const struct ip *)bp; break; case 6: ip6 = (const struct ip6_hdr *)bp; break; default: return (1); } xmep = &xid_map[xid_map_next]; if (++xid_map_next >= XIDMAPSIZE) xid_map_next = 0; UNALIGNED_MEMCPY(&xmep->xid, &rp->rm_xid, sizeof(xmep->xid)); if (ip) { xmep->ipver = 4; UNALIGNED_MEMCPY(&xmep->client, &ip->ip_src, sizeof(ip->ip_src)); UNALIGNED_MEMCPY(&xmep->server, &ip->ip_dst, sizeof(ip->ip_dst)); } else if (ip6) { xmep->ipver = 6; UNALIGNED_MEMCPY(&xmep->client, &ip6->ip6_src, sizeof(ip6->ip6_src)); UNALIGNED_MEMCPY(&xmep->server, &ip6->ip6_dst, sizeof(ip6->ip6_dst)); } xmep->proc = EXTRACT_32BITS(&rp->rm_call.cb_proc); xmep->vers = EXTRACT_32BITS(&rp->rm_call.cb_vers); return (1); } /* * Returns 0 and puts NFSPROC_xxx in proc return and * version in vers return, or returns -1 on failure */ static int xid_map_find(const struct sunrpc_msg *rp, const u_char *bp, uint32_t *proc, uint32_t *vers) { int i; struct xid_map_entry *xmep; uint32_t xid; const struct ip *ip = (const struct ip *)bp; const struct ip6_hdr *ip6 = (const struct ip6_hdr *)bp; int cmp; UNALIGNED_MEMCPY(&xid, &rp->rm_xid, sizeof(xmep->xid)); /* Start searching from where we last left off */ i = xid_map_hint; do { xmep = &xid_map[i]; cmp = 1; if (xmep->ipver != IP_V(ip) || xmep->xid != xid) goto nextitem; switch (xmep->ipver) { case 4: if (UNALIGNED_MEMCMP(&ip->ip_src, &xmep->server, sizeof(ip->ip_src)) != 0 || UNALIGNED_MEMCMP(&ip->ip_dst, &xmep->client, sizeof(ip->ip_dst)) != 0) { cmp = 0; } break; case 6: if (UNALIGNED_MEMCMP(&ip6->ip6_src, &xmep->server, sizeof(ip6->ip6_src)) != 0 || UNALIGNED_MEMCMP(&ip6->ip6_dst, &xmep->client, sizeof(ip6->ip6_dst)) != 0) { cmp = 0; } break; default: cmp = 0; break; } if (cmp) { /* match */ xid_map_hint = i; *proc = xmep->proc; *vers = xmep->vers; return 0; } nextitem: if (++i >= XIDMAPSIZE) i = 0; } while (i != xid_map_hint); /* search failed */ return (-1); } /* * Routines for parsing reply packets */ /* * Return a pointer to the beginning of the actual results. * If the packet was truncated, return 0. */ static const uint32_t * parserep(netdissect_options *ndo, register const struct sunrpc_msg *rp, register u_int length) { register const uint32_t *dp; u_int len; enum sunrpc_accept_stat astat; /* * Portability note: * Here we find the address of the ar_verf credentials. * Originally, this calculation was * dp = (uint32_t *)&rp->rm_reply.rp_acpt.ar_verf * On the wire, the rp_acpt field starts immediately after * the (32 bit) rp_stat field. However, rp_acpt (which is a * "struct accepted_reply") contains a "struct opaque_auth", * whose internal representation contains a pointer, so on a * 64-bit machine the compiler inserts 32 bits of padding * before rp->rm_reply.rp_acpt.ar_verf. So, we cannot use * the internal representation to parse the on-the-wire * representation. Instead, we skip past the rp_stat field, * which is an "enum" and so occupies one 32-bit word. */ dp = ((const uint32_t *)&rp->rm_reply) + 1; ND_TCHECK(dp[1]); len = EXTRACT_32BITS(&dp[1]); if (len >= length) return (NULL); /* * skip past the ar_verf credentials. */ dp += (len + (2*sizeof(uint32_t) + 3)) / sizeof(uint32_t); /* * now we can check the ar_stat field */ ND_TCHECK(dp[0]); astat = (enum sunrpc_accept_stat) EXTRACT_32BITS(dp); if (astat != SUNRPC_SUCCESS) { ND_PRINT((ndo, " %s", tok2str(sunrpc_str, "ar_stat %d", astat))); nfserr = 1; /* suppress trunc string */ return (NULL); } /* successful return */ ND_TCHECK2(*dp, sizeof(astat)); return ((const uint32_t *) (sizeof(astat) + ((const char *)dp))); trunc: return (0); } static const uint32_t * parsestatus(netdissect_options *ndo, const uint32_t *dp, int *er) { int errnum; ND_TCHECK(dp[0]); errnum = EXTRACT_32BITS(&dp[0]); if (er) *er = errnum; if (errnum != 0) { if (!ndo->ndo_qflag) ND_PRINT((ndo, " ERROR: %s", tok2str(status2str, "unk %d", errnum))); nfserr = 1; } return (dp + 1); trunc: return NULL; } static const uint32_t * parsefattr(netdissect_options *ndo, const uint32_t *dp, int verbose, int v3) { const struct nfs_fattr *fap; fap = (const struct nfs_fattr *)dp; ND_TCHECK(fap->fa_gid); if (verbose) { ND_PRINT((ndo, " %s %o ids %d/%d", tok2str(type2str, "unk-ft %d ", EXTRACT_32BITS(&fap->fa_type)), EXTRACT_32BITS(&fap->fa_mode), EXTRACT_32BITS(&fap->fa_uid), EXTRACT_32BITS(&fap->fa_gid))); if (v3) { ND_TCHECK(fap->fa3_size); ND_PRINT((ndo, " sz %" PRIu64, EXTRACT_64BITS((const uint32_t *)&fap->fa3_size))); } else { ND_TCHECK(fap->fa2_size); ND_PRINT((ndo, " sz %d", EXTRACT_32BITS(&fap->fa2_size))); } } /* print lots more stuff */ if (verbose > 1) { if (v3) { ND_TCHECK(fap->fa3_ctime); ND_PRINT((ndo, " nlink %d rdev %d/%d", EXTRACT_32BITS(&fap->fa_nlink), EXTRACT_32BITS(&fap->fa3_rdev.specdata1), EXTRACT_32BITS(&fap->fa3_rdev.specdata2))); ND_PRINT((ndo, " fsid %" PRIx64, EXTRACT_64BITS((const uint32_t *)&fap->fa3_fsid))); ND_PRINT((ndo, " fileid %" PRIx64, EXTRACT_64BITS((const uint32_t *)&fap->fa3_fileid))); ND_PRINT((ndo, " a/m/ctime %u.%06u", EXTRACT_32BITS(&fap->fa3_atime.nfsv3_sec), EXTRACT_32BITS(&fap->fa3_atime.nfsv3_nsec))); ND_PRINT((ndo, " %u.%06u", EXTRACT_32BITS(&fap->fa3_mtime.nfsv3_sec), EXTRACT_32BITS(&fap->fa3_mtime.nfsv3_nsec))); ND_PRINT((ndo, " %u.%06u", EXTRACT_32BITS(&fap->fa3_ctime.nfsv3_sec), EXTRACT_32BITS(&fap->fa3_ctime.nfsv3_nsec))); } else { ND_TCHECK(fap->fa2_ctime); ND_PRINT((ndo, " nlink %d rdev 0x%x fsid 0x%x nodeid 0x%x a/m/ctime", EXTRACT_32BITS(&fap->fa_nlink), EXTRACT_32BITS(&fap->fa2_rdev), EXTRACT_32BITS(&fap->fa2_fsid), EXTRACT_32BITS(&fap->fa2_fileid))); ND_PRINT((ndo, " %u.%06u", EXTRACT_32BITS(&fap->fa2_atime.nfsv2_sec), EXTRACT_32BITS(&fap->fa2_atime.nfsv2_usec))); ND_PRINT((ndo, " %u.%06u", EXTRACT_32BITS(&fap->fa2_mtime.nfsv2_sec), EXTRACT_32BITS(&fap->fa2_mtime.nfsv2_usec))); ND_PRINT((ndo, " %u.%06u", EXTRACT_32BITS(&fap->fa2_ctime.nfsv2_sec), EXTRACT_32BITS(&fap->fa2_ctime.nfsv2_usec))); } } return ((const uint32_t *)((const unsigned char *)dp + (v3 ? NFSX_V3FATTR : NFSX_V2FATTR))); trunc: return (NULL); } static int parseattrstat(netdissect_options *ndo, const uint32_t *dp, int verbose, int v3) { int er; dp = parsestatus(ndo, dp, &er); if (dp == NULL) return (0); if (er) return (1); return (parsefattr(ndo, dp, verbose, v3) != NULL); } static int parsediropres(netdissect_options *ndo, const uint32_t *dp) { int er; if (!(dp = parsestatus(ndo, dp, &er))) return (0); if (er) return (1); dp = parsefh(ndo, dp, 0); if (dp == NULL) return (0); return (parsefattr(ndo, dp, ndo->ndo_vflag, 0) != NULL); } static int parselinkres(netdissect_options *ndo, const uint32_t *dp, int v3) { int er; dp = parsestatus(ndo, dp, &er); if (dp == NULL) return(0); if (er) return(1); if (v3 && !(dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag))) return (0); ND_PRINT((ndo, " ")); return (parsefn(ndo, dp) != NULL); } static int parsestatfs(netdissect_options *ndo, const uint32_t *dp, int v3) { const struct nfs_statfs *sfsp; int er; dp = parsestatus(ndo, dp, &er); if (dp == NULL) return (0); if (!v3 && er) return (1); if (ndo->ndo_qflag) return(1); if (v3) { if (ndo->ndo_vflag) ND_PRINT((ndo, " POST:")); if (!(dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag))) return (0); } ND_TCHECK2(*dp, (v3 ? NFSX_V3STATFS : NFSX_V2STATFS)); sfsp = (const struct nfs_statfs *)dp; if (v3) { ND_PRINT((ndo, " tbytes %" PRIu64 " fbytes %" PRIu64 " abytes %" PRIu64, EXTRACT_64BITS((const uint32_t *)&sfsp->sf_tbytes), EXTRACT_64BITS((const uint32_t *)&sfsp->sf_fbytes), EXTRACT_64BITS((const uint32_t *)&sfsp->sf_abytes))); if (ndo->ndo_vflag) { ND_PRINT((ndo, " tfiles %" PRIu64 " ffiles %" PRIu64 " afiles %" PRIu64 " invar %u", EXTRACT_64BITS((const uint32_t *)&sfsp->sf_tfiles), EXTRACT_64BITS((const uint32_t *)&sfsp->sf_ffiles), EXTRACT_64BITS((const uint32_t *)&sfsp->sf_afiles), EXTRACT_32BITS(&sfsp->sf_invarsec))); } } else { ND_PRINT((ndo, " tsize %d bsize %d blocks %d bfree %d bavail %d", EXTRACT_32BITS(&sfsp->sf_tsize), EXTRACT_32BITS(&sfsp->sf_bsize), EXTRACT_32BITS(&sfsp->sf_blocks), EXTRACT_32BITS(&sfsp->sf_bfree), EXTRACT_32BITS(&sfsp->sf_bavail))); } return (1); trunc: return (0); } static int parserddires(netdissect_options *ndo, const uint32_t *dp) { int er; dp = parsestatus(ndo, dp, &er); if (dp == NULL) return (0); if (er) return (1); if (ndo->ndo_qflag) return (1); ND_TCHECK(dp[2]); ND_PRINT((ndo, " offset 0x%x size %d ", EXTRACT_32BITS(&dp[0]), EXTRACT_32BITS(&dp[1]))); if (dp[2] != 0) ND_PRINT((ndo, " eof")); return (1); trunc: return (0); } static const uint32_t * parse_wcc_attr(netdissect_options *ndo, const uint32_t *dp) { /* Our caller has already checked this */ ND_PRINT((ndo, " sz %" PRIu64, EXTRACT_64BITS(&dp[0]))); ND_PRINT((ndo, " mtime %u.%06u ctime %u.%06u", EXTRACT_32BITS(&dp[2]), EXTRACT_32BITS(&dp[3]), EXTRACT_32BITS(&dp[4]), EXTRACT_32BITS(&dp[5]))); return (dp + 6); } /* * Pre operation attributes. Print only if vflag > 1. */ static const uint32_t * parse_pre_op_attr(netdissect_options *ndo, const uint32_t *dp, int verbose) { ND_TCHECK(dp[0]); if (!EXTRACT_32BITS(&dp[0])) return (dp + 1); dp++; ND_TCHECK2(*dp, 24); if (verbose > 1) { return parse_wcc_attr(ndo, dp); } else { /* If not verbose enough, just skip over wcc_attr */ return (dp + 6); } trunc: return (NULL); } /* * Post operation attributes are printed if vflag >= 1 */ static const uint32_t * parse_post_op_attr(netdissect_options *ndo, const uint32_t *dp, int verbose) { ND_TCHECK(dp[0]); if (!EXTRACT_32BITS(&dp[0])) return (dp + 1); dp++; if (verbose) { return parsefattr(ndo, dp, verbose, 1); } else return (dp + (NFSX_V3FATTR / sizeof (uint32_t))); trunc: return (NULL); } static const uint32_t * parse_wcc_data(netdissect_options *ndo, const uint32_t *dp, int verbose) { if (verbose > 1) ND_PRINT((ndo, " PRE:")); if (!(dp = parse_pre_op_attr(ndo, dp, verbose))) return (0); if (verbose) ND_PRINT((ndo, " POST:")); return parse_post_op_attr(ndo, dp, verbose); } static const uint32_t * parsecreateopres(netdissect_options *ndo, const uint32_t *dp, int verbose) { int er; if (!(dp = parsestatus(ndo, dp, &er))) return (0); if (er) dp = parse_wcc_data(ndo, dp, verbose); else { ND_TCHECK(dp[0]); if (!EXTRACT_32BITS(&dp[0])) return (dp + 1); dp++; if (!(dp = parsefh(ndo, dp, 1))) return (0); if (verbose) { if (!(dp = parse_post_op_attr(ndo, dp, verbose))) return (0); if (ndo->ndo_vflag > 1) { ND_PRINT((ndo, " dir attr:")); dp = parse_wcc_data(ndo, dp, verbose); } } } return (dp); trunc: return (NULL); } static int parsewccres(netdissect_options *ndo, const uint32_t *dp, int verbose) { int er; if (!(dp = parsestatus(ndo, dp, &er))) return (0); return parse_wcc_data(ndo, dp, verbose) != NULL; } static const uint32_t * parsev3rddirres(netdissect_options *ndo, const uint32_t *dp, int verbose) { int er; if (!(dp = parsestatus(ndo, dp, &er))) return (0); if (ndo->ndo_vflag) ND_PRINT((ndo, " POST:")); if (!(dp = parse_post_op_attr(ndo, dp, verbose))) return (0); if (er) return dp; if (ndo->ndo_vflag) { ND_TCHECK(dp[1]); ND_PRINT((ndo, " verf %08x%08x", dp[0], dp[1])); dp += 2; } return dp; trunc: return (NULL); } static int parsefsinfo(netdissect_options *ndo, const uint32_t *dp) { const struct nfsv3_fsinfo *sfp; int er; if (!(dp = parsestatus(ndo, dp, &er))) return (0); if (ndo->ndo_vflag) ND_PRINT((ndo, " POST:")); if (!(dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag))) return (0); if (er) return (1); sfp = (const struct nfsv3_fsinfo *)dp; ND_TCHECK(*sfp); ND_PRINT((ndo, " rtmax %u rtpref %u wtmax %u wtpref %u dtpref %u", EXTRACT_32BITS(&sfp->fs_rtmax), EXTRACT_32BITS(&sfp->fs_rtpref), EXTRACT_32BITS(&sfp->fs_wtmax), EXTRACT_32BITS(&sfp->fs_wtpref), EXTRACT_32BITS(&sfp->fs_dtpref))); if (ndo->ndo_vflag) { ND_PRINT((ndo, " rtmult %u wtmult %u maxfsz %" PRIu64, EXTRACT_32BITS(&sfp->fs_rtmult), EXTRACT_32BITS(&sfp->fs_wtmult), EXTRACT_64BITS((const uint32_t *)&sfp->fs_maxfilesize))); ND_PRINT((ndo, " delta %u.%06u ", EXTRACT_32BITS(&sfp->fs_timedelta.nfsv3_sec), EXTRACT_32BITS(&sfp->fs_timedelta.nfsv3_nsec))); } return (1); trunc: return (0); } static int parsepathconf(netdissect_options *ndo, const uint32_t *dp) { int er; const struct nfsv3_pathconf *spp; if (!(dp = parsestatus(ndo, dp, &er))) return (0); if (ndo->ndo_vflag) ND_PRINT((ndo, " POST:")); if (!(dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag))) return (0); if (er) return (1); spp = (const struct nfsv3_pathconf *)dp; ND_TCHECK(*spp); ND_PRINT((ndo, " linkmax %u namemax %u %s %s %s %s", EXTRACT_32BITS(&spp->pc_linkmax), EXTRACT_32BITS(&spp->pc_namemax), EXTRACT_32BITS(&spp->pc_notrunc) ? "notrunc" : "", EXTRACT_32BITS(&spp->pc_chownrestricted) ? "chownres" : "", EXTRACT_32BITS(&spp->pc_caseinsensitive) ? "igncase" : "", EXTRACT_32BITS(&spp->pc_casepreserving) ? "keepcase" : "")); return (1); trunc: return (0); } static void interp_reply(netdissect_options *ndo, const struct sunrpc_msg *rp, uint32_t proc, uint32_t vers, int length) { register const uint32_t *dp; register int v3; int er; v3 = (vers == NFS_VER3); if (!v3 && proc < NFS_NPROCS) proc = nfsv3_procid[proc]; ND_PRINT((ndo, " %s", tok2str(nfsproc_str, "proc-%u", proc))); switch (proc) { case NFSPROC_GETATTR: dp = parserep(ndo, rp, length); if (dp != NULL && parseattrstat(ndo, dp, !ndo->ndo_qflag, v3) != 0) return; break; case NFSPROC_SETATTR: if (!(dp = parserep(ndo, rp, length))) return; if (v3) { if (parsewccres(ndo, dp, ndo->ndo_vflag)) return; } else { if (parseattrstat(ndo, dp, !ndo->ndo_qflag, 0) != 0) return; } break; case NFSPROC_LOOKUP: if (!(dp = parserep(ndo, rp, length))) break; if (v3) { if (!(dp = parsestatus(ndo, dp, &er))) break; if (er) { if (ndo->ndo_vflag > 1) { ND_PRINT((ndo, " post dattr:")); dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag); } } else { if (!(dp = parsefh(ndo, dp, v3))) break; if ((dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag)) && ndo->ndo_vflag > 1) { ND_PRINT((ndo, " post dattr:")); dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag); } } if (dp) return; } else { if (parsediropres(ndo, dp) != 0) return; } break; case NFSPROC_ACCESS: if (!(dp = parserep(ndo, rp, length))) break; if (!(dp = parsestatus(ndo, dp, &er))) break; if (ndo->ndo_vflag) ND_PRINT((ndo, " attr:")); if (!(dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag))) break; if (!er) { ND_TCHECK(dp[0]); ND_PRINT((ndo, " c %04x", EXTRACT_32BITS(&dp[0]))); } return; case NFSPROC_READLINK: dp = parserep(ndo, rp, length); if (dp != NULL && parselinkres(ndo, dp, v3) != 0) return; break; case NFSPROC_READ: if (!(dp = parserep(ndo, rp, length))) break; if (v3) { if (!(dp = parsestatus(ndo, dp, &er))) break; if (!(dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag))) break; if (er) return; if (ndo->ndo_vflag) { ND_TCHECK(dp[1]); ND_PRINT((ndo, " %u bytes", EXTRACT_32BITS(&dp[0]))); if (EXTRACT_32BITS(&dp[1])) ND_PRINT((ndo, " EOF")); } return; } else { if (parseattrstat(ndo, dp, ndo->ndo_vflag, 0) != 0) return; } break; case NFSPROC_WRITE: if (!(dp = parserep(ndo, rp, length))) break; if (v3) { if (!(dp = parsestatus(ndo, dp, &er))) break; if (!(dp = parse_wcc_data(ndo, dp, ndo->ndo_vflag))) break; if (er) return; if (ndo->ndo_vflag) { ND_TCHECK(dp[0]); ND_PRINT((ndo, " %u bytes", EXTRACT_32BITS(&dp[0]))); if (ndo->ndo_vflag > 1) { ND_TCHECK(dp[1]); ND_PRINT((ndo, " <%s>", tok2str(nfsv3_writemodes, NULL, EXTRACT_32BITS(&dp[1])))); } return; } } else { if (parseattrstat(ndo, dp, ndo->ndo_vflag, v3) != 0) return; } break; case NFSPROC_CREATE: case NFSPROC_MKDIR: if (!(dp = parserep(ndo, rp, length))) break; if (v3) { if (parsecreateopres(ndo, dp, ndo->ndo_vflag) != NULL) return; } else { if (parsediropres(ndo, dp) != 0) return; } break; case NFSPROC_SYMLINK: if (!(dp = parserep(ndo, rp, length))) break; if (v3) { if (parsecreateopres(ndo, dp, ndo->ndo_vflag) != NULL) return; } else { if (parsestatus(ndo, dp, &er) != NULL) return; } break; case NFSPROC_MKNOD: if (!(dp = parserep(ndo, rp, length))) break; if (parsecreateopres(ndo, dp, ndo->ndo_vflag) != NULL) return; break; case NFSPROC_REMOVE: case NFSPROC_RMDIR: if (!(dp = parserep(ndo, rp, length))) break; if (v3) { if (parsewccres(ndo, dp, ndo->ndo_vflag)) return; } else { if (parsestatus(ndo, dp, &er) != NULL) return; } break; case NFSPROC_RENAME: if (!(dp = parserep(ndo, rp, length))) break; if (v3) { if (!(dp = parsestatus(ndo, dp, &er))) break; if (ndo->ndo_vflag) { ND_PRINT((ndo, " from:")); if (!(dp = parse_wcc_data(ndo, dp, ndo->ndo_vflag))) break; ND_PRINT((ndo, " to:")); if (!(dp = parse_wcc_data(ndo, dp, ndo->ndo_vflag))) break; } return; } else { if (parsestatus(ndo, dp, &er) != NULL) return; } break; case NFSPROC_LINK: if (!(dp = parserep(ndo, rp, length))) break; if (v3) { if (!(dp = parsestatus(ndo, dp, &er))) break; if (ndo->ndo_vflag) { ND_PRINT((ndo, " file POST:")); if (!(dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag))) break; ND_PRINT((ndo, " dir:")); if (!(dp = parse_wcc_data(ndo, dp, ndo->ndo_vflag))) break; return; } } else { if (parsestatus(ndo, dp, &er) != NULL) return; } break; case NFSPROC_READDIR: if (!(dp = parserep(ndo, rp, length))) break; if (v3) { if (parsev3rddirres(ndo, dp, ndo->ndo_vflag)) return; } else { if (parserddires(ndo, dp) != 0) return; } break; case NFSPROC_READDIRPLUS: if (!(dp = parserep(ndo, rp, length))) break; if (parsev3rddirres(ndo, dp, ndo->ndo_vflag)) return; break; case NFSPROC_FSSTAT: dp = parserep(ndo, rp, length); if (dp != NULL && parsestatfs(ndo, dp, v3) != 0) return; break; case NFSPROC_FSINFO: dp = parserep(ndo, rp, length); if (dp != NULL && parsefsinfo(ndo, dp) != 0) return; break; case NFSPROC_PATHCONF: dp = parserep(ndo, rp, length); if (dp != NULL && parsepathconf(ndo, dp) != 0) return; break; case NFSPROC_COMMIT: dp = parserep(ndo, rp, length); if (dp != NULL && parsewccres(ndo, dp, ndo->ndo_vflag) != 0) return; break; default: return; } trunc: if (!nfserr) ND_PRINT((ndo, "%s", tstr)); }
nfsreq_print_noaddr(netdissect_options *ndo, register const u_char *bp, u_int length, register const u_char *bp2) { register const struct sunrpc_msg *rp; register const uint32_t *dp; nfs_type type; int v3; uint32_t proc; uint32_t access_flags; struct nfsv3_sattr sa3; ND_PRINT((ndo, "%d", length)); nfserr = 0; /* assume no error */ rp = (const struct sunrpc_msg *)bp; if (!xid_map_enter(ndo, rp, bp2)) /* record proc number for later on */ goto trunc; v3 = (EXTRACT_32BITS(&rp->rm_call.cb_vers) == NFS_VER3); proc = EXTRACT_32BITS(&rp->rm_call.cb_proc); if (!v3 && proc < NFS_NPROCS) proc = nfsv3_procid[proc]; ND_PRINT((ndo, " %s", tok2str(nfsproc_str, "proc-%u", proc))); switch (proc) { case NFSPROC_GETATTR: case NFSPROC_SETATTR: case NFSPROC_READLINK: case NFSPROC_FSSTAT: case NFSPROC_FSINFO: case NFSPROC_PATHCONF: if ((dp = parsereq(ndo, rp, length)) != NULL && parsefh(ndo, dp, v3) != NULL) return; break; case NFSPROC_LOOKUP: case NFSPROC_CREATE: case NFSPROC_MKDIR: case NFSPROC_REMOVE: case NFSPROC_RMDIR: if ((dp = parsereq(ndo, rp, length)) != NULL && parsefhn(ndo, dp, v3) != NULL) return; break; case NFSPROC_ACCESS: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefh(ndo, dp, v3)) != NULL) { ND_TCHECK(dp[0]); access_flags = EXTRACT_32BITS(&dp[0]); if (access_flags & ~NFSV3ACCESS_FULL) { /* NFSV3ACCESS definitions aren't up to date */ ND_PRINT((ndo, " %04x", access_flags)); } else if ((access_flags & NFSV3ACCESS_FULL) == NFSV3ACCESS_FULL) { ND_PRINT((ndo, " NFS_ACCESS_FULL")); } else { char separator = ' '; if (access_flags & NFSV3ACCESS_READ) { ND_PRINT((ndo, " NFS_ACCESS_READ")); separator = '|'; } if (access_flags & NFSV3ACCESS_LOOKUP) { ND_PRINT((ndo, "%cNFS_ACCESS_LOOKUP", separator)); separator = '|'; } if (access_flags & NFSV3ACCESS_MODIFY) { ND_PRINT((ndo, "%cNFS_ACCESS_MODIFY", separator)); separator = '|'; } if (access_flags & NFSV3ACCESS_EXTEND) { ND_PRINT((ndo, "%cNFS_ACCESS_EXTEND", separator)); separator = '|'; } if (access_flags & NFSV3ACCESS_DELETE) { ND_PRINT((ndo, "%cNFS_ACCESS_DELETE", separator)); separator = '|'; } if (access_flags & NFSV3ACCESS_EXECUTE) ND_PRINT((ndo, "%cNFS_ACCESS_EXECUTE", separator)); } return; } break; case NFSPROC_READ: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefh(ndo, dp, v3)) != NULL) { if (v3) { ND_TCHECK(dp[2]); ND_PRINT((ndo, " %u bytes @ %" PRIu64, EXTRACT_32BITS(&dp[2]), EXTRACT_64BITS(&dp[0]))); } else { ND_TCHECK(dp[1]); ND_PRINT((ndo, " %u bytes @ %u", EXTRACT_32BITS(&dp[1]), EXTRACT_32BITS(&dp[0]))); } return; } break; case NFSPROC_WRITE: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefh(ndo, dp, v3)) != NULL) { if (v3) { ND_TCHECK(dp[2]); ND_PRINT((ndo, " %u (%u) bytes @ %" PRIu64, EXTRACT_32BITS(&dp[4]), EXTRACT_32BITS(&dp[2]), EXTRACT_64BITS(&dp[0]))); if (ndo->ndo_vflag) { dp += 3; ND_TCHECK(dp[0]); ND_PRINT((ndo, " <%s>", tok2str(nfsv3_writemodes, NULL, EXTRACT_32BITS(dp)))); } } else { ND_TCHECK(dp[3]); ND_PRINT((ndo, " %u (%u) bytes @ %u (%u)", EXTRACT_32BITS(&dp[3]), EXTRACT_32BITS(&dp[2]), EXTRACT_32BITS(&dp[1]), EXTRACT_32BITS(&dp[0]))); } return; } break; case NFSPROC_SYMLINK: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefhn(ndo, dp, v3)) != NULL) { ND_PRINT((ndo, " ->")); if (v3 && (dp = parse_sattr3(ndo, dp, &sa3)) == NULL) break; if (parsefn(ndo, dp) == NULL) break; if (v3 && ndo->ndo_vflag) print_sattr3(ndo, &sa3, ndo->ndo_vflag); return; } break; case NFSPROC_MKNOD: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefhn(ndo, dp, v3)) != NULL) { ND_TCHECK(*dp); type = (nfs_type)EXTRACT_32BITS(dp); dp++; if ((dp = parse_sattr3(ndo, dp, &sa3)) == NULL) break; ND_PRINT((ndo, " %s", tok2str(type2str, "unk-ft %d", type))); if (ndo->ndo_vflag && (type == NFCHR || type == NFBLK)) { ND_TCHECK(dp[1]); ND_PRINT((ndo, " %u/%u", EXTRACT_32BITS(&dp[0]), EXTRACT_32BITS(&dp[1]))); dp += 2; } if (ndo->ndo_vflag) print_sattr3(ndo, &sa3, ndo->ndo_vflag); return; } break; case NFSPROC_RENAME: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefhn(ndo, dp, v3)) != NULL) { ND_PRINT((ndo, " ->")); if (parsefhn(ndo, dp, v3) != NULL) return; } break; case NFSPROC_LINK: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefh(ndo, dp, v3)) != NULL) { ND_PRINT((ndo, " ->")); if (parsefhn(ndo, dp, v3) != NULL) return; } break; case NFSPROC_READDIR: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefh(ndo, dp, v3)) != NULL) { if (v3) { ND_TCHECK(dp[4]); /* * We shouldn't really try to interpret the * offset cookie here. */ ND_PRINT((ndo, " %u bytes @ %" PRId64, EXTRACT_32BITS(&dp[4]), EXTRACT_64BITS(&dp[0]))); if (ndo->ndo_vflag) ND_PRINT((ndo, " verf %08x%08x", dp[2], dp[3])); } else { ND_TCHECK(dp[1]); /* * Print the offset as signed, since -1 is * common, but offsets > 2^31 aren't. */ ND_PRINT((ndo, " %u bytes @ %d", EXTRACT_32BITS(&dp[1]), EXTRACT_32BITS(&dp[0]))); } return; } break; case NFSPROC_READDIRPLUS: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefh(ndo, dp, v3)) != NULL) { ND_TCHECK(dp[4]); /* * We don't try to interpret the offset * cookie here. */ ND_PRINT((ndo, " %u bytes @ %" PRId64, EXTRACT_32BITS(&dp[4]), EXTRACT_64BITS(&dp[0]))); if (ndo->ndo_vflag) { ND_TCHECK(dp[5]); ND_PRINT((ndo, " max %u verf %08x%08x", EXTRACT_32BITS(&dp[5]), dp[2], dp[3])); } return; } break; case NFSPROC_COMMIT: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefh(ndo, dp, v3)) != NULL) { ND_TCHECK(dp[2]); ND_PRINT((ndo, " %u bytes @ %" PRIu64, EXTRACT_32BITS(&dp[2]), EXTRACT_64BITS(&dp[0]))); return; } break; default: return; } trunc: if (!nfserr) ND_PRINT((ndo, "%s", tstr)); }
nfsreq_print_noaddr(netdissect_options *ndo, register const u_char *bp, u_int length, register const u_char *bp2) { register const struct sunrpc_msg *rp; register const uint32_t *dp; nfs_type type; int v3; uint32_t proc; uint32_t access_flags; struct nfsv3_sattr sa3; ND_PRINT((ndo, "%d", length)); nfserr = 0; /* assume no error */ rp = (const struct sunrpc_msg *)bp; if (!xid_map_enter(ndo, rp, bp2)) /* record proc number for later on */ goto trunc; v3 = (EXTRACT_32BITS(&rp->rm_call.cb_vers) == NFS_VER3); proc = EXTRACT_32BITS(&rp->rm_call.cb_proc); if (!v3 && proc < NFS_NPROCS) proc = nfsv3_procid[proc]; ND_PRINT((ndo, " %s", tok2str(nfsproc_str, "proc-%u", proc))); switch (proc) { case NFSPROC_GETATTR: case NFSPROC_SETATTR: case NFSPROC_READLINK: case NFSPROC_FSSTAT: case NFSPROC_FSINFO: case NFSPROC_PATHCONF: if ((dp = parsereq(ndo, rp, length)) != NULL && parsefh(ndo, dp, v3) != NULL) return; break; case NFSPROC_LOOKUP: case NFSPROC_CREATE: case NFSPROC_MKDIR: case NFSPROC_REMOVE: case NFSPROC_RMDIR: if ((dp = parsereq(ndo, rp, length)) != NULL && parsefhn(ndo, dp, v3) != NULL) return; break; case NFSPROC_ACCESS: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefh(ndo, dp, v3)) != NULL) { ND_TCHECK(dp[0]); access_flags = EXTRACT_32BITS(&dp[0]); if (access_flags & ~NFSV3ACCESS_FULL) { /* NFSV3ACCESS definitions aren't up to date */ ND_PRINT((ndo, " %04x", access_flags)); } else if ((access_flags & NFSV3ACCESS_FULL) == NFSV3ACCESS_FULL) { ND_PRINT((ndo, " NFS_ACCESS_FULL")); } else { char separator = ' '; if (access_flags & NFSV3ACCESS_READ) { ND_PRINT((ndo, " NFS_ACCESS_READ")); separator = '|'; } if (access_flags & NFSV3ACCESS_LOOKUP) { ND_PRINT((ndo, "%cNFS_ACCESS_LOOKUP", separator)); separator = '|'; } if (access_flags & NFSV3ACCESS_MODIFY) { ND_PRINT((ndo, "%cNFS_ACCESS_MODIFY", separator)); separator = '|'; } if (access_flags & NFSV3ACCESS_EXTEND) { ND_PRINT((ndo, "%cNFS_ACCESS_EXTEND", separator)); separator = '|'; } if (access_flags & NFSV3ACCESS_DELETE) { ND_PRINT((ndo, "%cNFS_ACCESS_DELETE", separator)); separator = '|'; } if (access_flags & NFSV3ACCESS_EXECUTE) ND_PRINT((ndo, "%cNFS_ACCESS_EXECUTE", separator)); } return; } break; case NFSPROC_READ: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefh(ndo, dp, v3)) != NULL) { if (v3) { ND_TCHECK(dp[2]); ND_PRINT((ndo, " %u bytes @ %" PRIu64, EXTRACT_32BITS(&dp[2]), EXTRACT_64BITS(&dp[0]))); } else { ND_TCHECK(dp[1]); ND_PRINT((ndo, " %u bytes @ %u", EXTRACT_32BITS(&dp[1]), EXTRACT_32BITS(&dp[0]))); } return; } break; case NFSPROC_WRITE: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefh(ndo, dp, v3)) != NULL) { if (v3) { ND_TCHECK(dp[4]); ND_PRINT((ndo, " %u (%u) bytes @ %" PRIu64, EXTRACT_32BITS(&dp[4]), EXTRACT_32BITS(&dp[2]), EXTRACT_64BITS(&dp[0]))); if (ndo->ndo_vflag) { ND_PRINT((ndo, " <%s>", tok2str(nfsv3_writemodes, NULL, EXTRACT_32BITS(&dp[3])))); } } else { ND_TCHECK(dp[3]); ND_PRINT((ndo, " %u (%u) bytes @ %u (%u)", EXTRACT_32BITS(&dp[3]), EXTRACT_32BITS(&dp[2]), EXTRACT_32BITS(&dp[1]), EXTRACT_32BITS(&dp[0]))); } return; } break; case NFSPROC_SYMLINK: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefhn(ndo, dp, v3)) != NULL) { ND_PRINT((ndo, " ->")); if (v3 && (dp = parse_sattr3(ndo, dp, &sa3)) == NULL) break; if (parsefn(ndo, dp) == NULL) break; if (v3 && ndo->ndo_vflag) print_sattr3(ndo, &sa3, ndo->ndo_vflag); return; } break; case NFSPROC_MKNOD: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefhn(ndo, dp, v3)) != NULL) { ND_TCHECK(*dp); type = (nfs_type)EXTRACT_32BITS(dp); dp++; if ((dp = parse_sattr3(ndo, dp, &sa3)) == NULL) break; ND_PRINT((ndo, " %s", tok2str(type2str, "unk-ft %d", type))); if (ndo->ndo_vflag && (type == NFCHR || type == NFBLK)) { ND_TCHECK(dp[1]); ND_PRINT((ndo, " %u/%u", EXTRACT_32BITS(&dp[0]), EXTRACT_32BITS(&dp[1]))); dp += 2; } if (ndo->ndo_vflag) print_sattr3(ndo, &sa3, ndo->ndo_vflag); return; } break; case NFSPROC_RENAME: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefhn(ndo, dp, v3)) != NULL) { ND_PRINT((ndo, " ->")); if (parsefhn(ndo, dp, v3) != NULL) return; } break; case NFSPROC_LINK: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefh(ndo, dp, v3)) != NULL) { ND_PRINT((ndo, " ->")); if (parsefhn(ndo, dp, v3) != NULL) return; } break; case NFSPROC_READDIR: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefh(ndo, dp, v3)) != NULL) { if (v3) { ND_TCHECK(dp[4]); /* * We shouldn't really try to interpret the * offset cookie here. */ ND_PRINT((ndo, " %u bytes @ %" PRId64, EXTRACT_32BITS(&dp[4]), EXTRACT_64BITS(&dp[0]))); if (ndo->ndo_vflag) ND_PRINT((ndo, " verf %08x%08x", dp[2], dp[3])); } else { ND_TCHECK(dp[1]); /* * Print the offset as signed, since -1 is * common, but offsets > 2^31 aren't. */ ND_PRINT((ndo, " %u bytes @ %d", EXTRACT_32BITS(&dp[1]), EXTRACT_32BITS(&dp[0]))); } return; } break; case NFSPROC_READDIRPLUS: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefh(ndo, dp, v3)) != NULL) { ND_TCHECK(dp[4]); /* * We don't try to interpret the offset * cookie here. */ ND_PRINT((ndo, " %u bytes @ %" PRId64, EXTRACT_32BITS(&dp[4]), EXTRACT_64BITS(&dp[0]))); if (ndo->ndo_vflag) { ND_TCHECK(dp[5]); ND_PRINT((ndo, " max %u verf %08x%08x", EXTRACT_32BITS(&dp[5]), dp[2], dp[3])); } return; } break; case NFSPROC_COMMIT: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefh(ndo, dp, v3)) != NULL) { ND_TCHECK(dp[2]); ND_PRINT((ndo, " %u bytes @ %" PRIu64, EXTRACT_32BITS(&dp[2]), EXTRACT_64BITS(&dp[0]))); return; } break; default: return; } trunc: if (!nfserr) ND_PRINT((ndo, "%s", tstr)); }
{'added': [(631, '\t\t\t\tND_TCHECK(dp[4]);'), (639, '\t\t\t\t\t\t\tNULL, EXTRACT_32BITS(&dp[3]))));'), (1007, '\tND_TCHECK(dp[0]);'), (1244, '\t/* Our caller has already checked this */'), (1513, '\t\tif (!er) {'), (1514, '\t\t\tND_TCHECK(dp[0]);'), (1516, '\t\t}')], 'deleted': [(631, '\t\t\t\tND_TCHECK(dp[2]);'), (637, '\t\t\t\t\tdp += 3;'), (638, '\t\t\t\t\tND_TCHECK(dp[0]);'), (641, '\t\t\t\t\t\t\tNULL, EXTRACT_32BITS(dp))));'), (1005, '\tND_TCHECK2(dp[0], 0);'), (1514, '\t\tif (!er)')]}
7
6
1,400
9,286
https://github.com/the-tcpdump-group/tcpdump
CVE-2017-12898
['CWE-125']
print-nfs.c
parserep
/* * Copyright (c) 1988, 1989, 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that: (1) source code distributions * retain the above copyright notice and this paragraph in its entirety, (2) * distributions including binary code include the above copyright notice and * this paragraph in its entirety in the documentation or other materials * provided with the distribution, and (3) all advertising materials mentioning * features or use of this software display the following acknowledgement: * ``This product includes software developed by the University of California, * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of * the University nor the names of its contributors may be used to endorse * or promote products derived from this software without specific prior * written permission. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. */ /* \summary: Network File System (NFS) printer */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <netdissect-stdinc.h> #include <stdio.h> #include <string.h> #include "netdissect.h" #include "addrtoname.h" #include "extract.h" #include "nfs.h" #include "nfsfh.h" #include "ip.h" #include "ip6.h" #include "rpc_auth.h" #include "rpc_msg.h" static const char tstr[] = " [|nfs]"; static void nfs_printfh(netdissect_options *, const uint32_t *, const u_int); static int xid_map_enter(netdissect_options *, const struct sunrpc_msg *, const u_char *); static int xid_map_find(const struct sunrpc_msg *, const u_char *, uint32_t *, uint32_t *); static void interp_reply(netdissect_options *, const struct sunrpc_msg *, uint32_t, uint32_t, int); static const uint32_t *parse_post_op_attr(netdissect_options *, const uint32_t *, int); /* * Mapping of old NFS Version 2 RPC numbers to generic numbers. */ static uint32_t nfsv3_procid[NFS_NPROCS] = { NFSPROC_NULL, NFSPROC_GETATTR, NFSPROC_SETATTR, NFSPROC_NOOP, NFSPROC_LOOKUP, NFSPROC_READLINK, NFSPROC_READ, NFSPROC_NOOP, NFSPROC_WRITE, NFSPROC_CREATE, NFSPROC_REMOVE, NFSPROC_RENAME, NFSPROC_LINK, NFSPROC_SYMLINK, NFSPROC_MKDIR, NFSPROC_RMDIR, NFSPROC_READDIR, NFSPROC_FSSTAT, NFSPROC_NOOP, NFSPROC_NOOP, NFSPROC_NOOP, NFSPROC_NOOP, NFSPROC_NOOP, NFSPROC_NOOP, NFSPROC_NOOP, NFSPROC_NOOP }; static const struct tok nfsproc_str[] = { { NFSPROC_NOOP, "nop" }, { NFSPROC_NULL, "null" }, { NFSPROC_GETATTR, "getattr" }, { NFSPROC_SETATTR, "setattr" }, { NFSPROC_LOOKUP, "lookup" }, { NFSPROC_ACCESS, "access" }, { NFSPROC_READLINK, "readlink" }, { NFSPROC_READ, "read" }, { NFSPROC_WRITE, "write" }, { NFSPROC_CREATE, "create" }, { NFSPROC_MKDIR, "mkdir" }, { NFSPROC_SYMLINK, "symlink" }, { NFSPROC_MKNOD, "mknod" }, { NFSPROC_REMOVE, "remove" }, { NFSPROC_RMDIR, "rmdir" }, { NFSPROC_RENAME, "rename" }, { NFSPROC_LINK, "link" }, { NFSPROC_READDIR, "readdir" }, { NFSPROC_READDIRPLUS, "readdirplus" }, { NFSPROC_FSSTAT, "fsstat" }, { NFSPROC_FSINFO, "fsinfo" }, { NFSPROC_PATHCONF, "pathconf" }, { NFSPROC_COMMIT, "commit" }, { 0, NULL } }; /* * NFS V2 and V3 status values. * * Some of these come from the RFCs for NFS V2 and V3, with the message * strings taken from the FreeBSD C library "errlst.c". * * Others are errors that are not in the RFC but that I suspect some * NFS servers could return; the values are FreeBSD errno values, as * the first NFS server was the SunOS 2.0 one, and until 5.0 SunOS * was primarily BSD-derived. */ static const struct tok status2str[] = { { 1, "Operation not permitted" }, /* EPERM */ { 2, "No such file or directory" }, /* ENOENT */ { 5, "Input/output error" }, /* EIO */ { 6, "Device not configured" }, /* ENXIO */ { 11, "Resource deadlock avoided" }, /* EDEADLK */ { 12, "Cannot allocate memory" }, /* ENOMEM */ { 13, "Permission denied" }, /* EACCES */ { 17, "File exists" }, /* EEXIST */ { 18, "Cross-device link" }, /* EXDEV */ { 19, "Operation not supported by device" }, /* ENODEV */ { 20, "Not a directory" }, /* ENOTDIR */ { 21, "Is a directory" }, /* EISDIR */ { 22, "Invalid argument" }, /* EINVAL */ { 26, "Text file busy" }, /* ETXTBSY */ { 27, "File too large" }, /* EFBIG */ { 28, "No space left on device" }, /* ENOSPC */ { 30, "Read-only file system" }, /* EROFS */ { 31, "Too many links" }, /* EMLINK */ { 45, "Operation not supported" }, /* EOPNOTSUPP */ { 62, "Too many levels of symbolic links" }, /* ELOOP */ { 63, "File name too long" }, /* ENAMETOOLONG */ { 66, "Directory not empty" }, /* ENOTEMPTY */ { 69, "Disc quota exceeded" }, /* EDQUOT */ { 70, "Stale NFS file handle" }, /* ESTALE */ { 71, "Too many levels of remote in path" }, /* EREMOTE */ { 99, "Write cache flushed to disk" }, /* NFSERR_WFLUSH (not used) */ { 10001, "Illegal NFS file handle" }, /* NFS3ERR_BADHANDLE */ { 10002, "Update synchronization mismatch" }, /* NFS3ERR_NOT_SYNC */ { 10003, "READDIR/READDIRPLUS cookie is stale" }, /* NFS3ERR_BAD_COOKIE */ { 10004, "Operation not supported" }, /* NFS3ERR_NOTSUPP */ { 10005, "Buffer or request is too small" }, /* NFS3ERR_TOOSMALL */ { 10006, "Unspecified error on server" }, /* NFS3ERR_SERVERFAULT */ { 10007, "Object of that type not supported" }, /* NFS3ERR_BADTYPE */ { 10008, "Request couldn't be completed in time" }, /* NFS3ERR_JUKEBOX */ { 0, NULL } }; static const struct tok nfsv3_writemodes[] = { { 0, "unstable" }, { 1, "datasync" }, { 2, "filesync" }, { 0, NULL } }; static const struct tok type2str[] = { { NFNON, "NON" }, { NFREG, "REG" }, { NFDIR, "DIR" }, { NFBLK, "BLK" }, { NFCHR, "CHR" }, { NFLNK, "LNK" }, { NFFIFO, "FIFO" }, { 0, NULL } }; static const struct tok sunrpc_auth_str[] = { { SUNRPC_AUTH_OK, "OK" }, { SUNRPC_AUTH_BADCRED, "Bogus Credentials (seal broken)" }, { SUNRPC_AUTH_REJECTEDCRED, "Rejected Credentials (client should begin new session)" }, { SUNRPC_AUTH_BADVERF, "Bogus Verifier (seal broken)" }, { SUNRPC_AUTH_REJECTEDVERF, "Verifier expired or was replayed" }, { SUNRPC_AUTH_TOOWEAK, "Credentials are too weak" }, { SUNRPC_AUTH_INVALIDRESP, "Bogus response verifier" }, { SUNRPC_AUTH_FAILED, "Unknown failure" }, { 0, NULL } }; static const struct tok sunrpc_str[] = { { SUNRPC_PROG_UNAVAIL, "PROG_UNAVAIL" }, { SUNRPC_PROG_MISMATCH, "PROG_MISMATCH" }, { SUNRPC_PROC_UNAVAIL, "PROC_UNAVAIL" }, { SUNRPC_GARBAGE_ARGS, "GARBAGE_ARGS" }, { SUNRPC_SYSTEM_ERR, "SYSTEM_ERR" }, { 0, NULL } }; static void print_nfsaddr(netdissect_options *ndo, const u_char *bp, const char *s, const char *d) { const struct ip *ip; const struct ip6_hdr *ip6; char srcaddr[INET6_ADDRSTRLEN], dstaddr[INET6_ADDRSTRLEN]; srcaddr[0] = dstaddr[0] = '\0'; switch (IP_V((const struct ip *)bp)) { case 4: ip = (const struct ip *)bp; strlcpy(srcaddr, ipaddr_string(ndo, &ip->ip_src), sizeof(srcaddr)); strlcpy(dstaddr, ipaddr_string(ndo, &ip->ip_dst), sizeof(dstaddr)); break; case 6: ip6 = (const struct ip6_hdr *)bp; strlcpy(srcaddr, ip6addr_string(ndo, &ip6->ip6_src), sizeof(srcaddr)); strlcpy(dstaddr, ip6addr_string(ndo, &ip6->ip6_dst), sizeof(dstaddr)); break; default: strlcpy(srcaddr, "?", sizeof(srcaddr)); strlcpy(dstaddr, "?", sizeof(dstaddr)); break; } ND_PRINT((ndo, "%s.%s > %s.%s: ", srcaddr, s, dstaddr, d)); } static const uint32_t * parse_sattr3(netdissect_options *ndo, const uint32_t *dp, struct nfsv3_sattr *sa3) { ND_TCHECK(dp[0]); sa3->sa_modeset = EXTRACT_32BITS(dp); dp++; if (sa3->sa_modeset) { ND_TCHECK(dp[0]); sa3->sa_mode = EXTRACT_32BITS(dp); dp++; } ND_TCHECK(dp[0]); sa3->sa_uidset = EXTRACT_32BITS(dp); dp++; if (sa3->sa_uidset) { ND_TCHECK(dp[0]); sa3->sa_uid = EXTRACT_32BITS(dp); dp++; } ND_TCHECK(dp[0]); sa3->sa_gidset = EXTRACT_32BITS(dp); dp++; if (sa3->sa_gidset) { ND_TCHECK(dp[0]); sa3->sa_gid = EXTRACT_32BITS(dp); dp++; } ND_TCHECK(dp[0]); sa3->sa_sizeset = EXTRACT_32BITS(dp); dp++; if (sa3->sa_sizeset) { ND_TCHECK(dp[0]); sa3->sa_size = EXTRACT_32BITS(dp); dp++; } ND_TCHECK(dp[0]); sa3->sa_atimetype = EXTRACT_32BITS(dp); dp++; if (sa3->sa_atimetype == NFSV3SATTRTIME_TOCLIENT) { ND_TCHECK(dp[1]); sa3->sa_atime.nfsv3_sec = EXTRACT_32BITS(dp); dp++; sa3->sa_atime.nfsv3_nsec = EXTRACT_32BITS(dp); dp++; } ND_TCHECK(dp[0]); sa3->sa_mtimetype = EXTRACT_32BITS(dp); dp++; if (sa3->sa_mtimetype == NFSV3SATTRTIME_TOCLIENT) { ND_TCHECK(dp[1]); sa3->sa_mtime.nfsv3_sec = EXTRACT_32BITS(dp); dp++; sa3->sa_mtime.nfsv3_nsec = EXTRACT_32BITS(dp); dp++; } return dp; trunc: return NULL; } static int nfserr; /* true if we error rather than trunc */ static void print_sattr3(netdissect_options *ndo, const struct nfsv3_sattr *sa3, int verbose) { if (sa3->sa_modeset) ND_PRINT((ndo, " mode %o", sa3->sa_mode)); if (sa3->sa_uidset) ND_PRINT((ndo, " uid %u", sa3->sa_uid)); if (sa3->sa_gidset) ND_PRINT((ndo, " gid %u", sa3->sa_gid)); if (verbose > 1) { if (sa3->sa_atimetype == NFSV3SATTRTIME_TOCLIENT) ND_PRINT((ndo, " atime %u.%06u", sa3->sa_atime.nfsv3_sec, sa3->sa_atime.nfsv3_nsec)); if (sa3->sa_mtimetype == NFSV3SATTRTIME_TOCLIENT) ND_PRINT((ndo, " mtime %u.%06u", sa3->sa_mtime.nfsv3_sec, sa3->sa_mtime.nfsv3_nsec)); } } void nfsreply_print(netdissect_options *ndo, register const u_char *bp, u_int length, register const u_char *bp2) { register const struct sunrpc_msg *rp; char srcid[20], dstid[20]; /*fits 32bit*/ nfserr = 0; /* assume no error */ rp = (const struct sunrpc_msg *)bp; ND_TCHECK(rp->rm_xid); if (!ndo->ndo_nflag) { strlcpy(srcid, "nfs", sizeof(srcid)); snprintf(dstid, sizeof(dstid), "%u", EXTRACT_32BITS(&rp->rm_xid)); } else { snprintf(srcid, sizeof(srcid), "%u", NFS_PORT); snprintf(dstid, sizeof(dstid), "%u", EXTRACT_32BITS(&rp->rm_xid)); } print_nfsaddr(ndo, bp2, srcid, dstid); nfsreply_print_noaddr(ndo, bp, length, bp2); return; trunc: if (!nfserr) ND_PRINT((ndo, "%s", tstr)); } void nfsreply_print_noaddr(netdissect_options *ndo, register const u_char *bp, u_int length, register const u_char *bp2) { register const struct sunrpc_msg *rp; uint32_t proc, vers, reply_stat; enum sunrpc_reject_stat rstat; uint32_t rlow; uint32_t rhigh; enum sunrpc_auth_stat rwhy; nfserr = 0; /* assume no error */ rp = (const struct sunrpc_msg *)bp; ND_TCHECK(rp->rm_reply.rp_stat); reply_stat = EXTRACT_32BITS(&rp->rm_reply.rp_stat); switch (reply_stat) { case SUNRPC_MSG_ACCEPTED: ND_PRINT((ndo, "reply ok %u", length)); if (xid_map_find(rp, bp2, &proc, &vers) >= 0) interp_reply(ndo, rp, proc, vers, length); break; case SUNRPC_MSG_DENIED: ND_PRINT((ndo, "reply ERR %u: ", length)); ND_TCHECK(rp->rm_reply.rp_reject.rj_stat); rstat = EXTRACT_32BITS(&rp->rm_reply.rp_reject.rj_stat); switch (rstat) { case SUNRPC_RPC_MISMATCH: ND_TCHECK(rp->rm_reply.rp_reject.rj_vers.high); rlow = EXTRACT_32BITS(&rp->rm_reply.rp_reject.rj_vers.low); rhigh = EXTRACT_32BITS(&rp->rm_reply.rp_reject.rj_vers.high); ND_PRINT((ndo, "RPC Version mismatch (%u-%u)", rlow, rhigh)); break; case SUNRPC_AUTH_ERROR: ND_TCHECK(rp->rm_reply.rp_reject.rj_why); rwhy = EXTRACT_32BITS(&rp->rm_reply.rp_reject.rj_why); ND_PRINT((ndo, "Auth %s", tok2str(sunrpc_auth_str, "Invalid failure code %u", rwhy))); break; default: ND_PRINT((ndo, "Unknown reason for rejecting rpc message %u", (unsigned int)rstat)); break; } break; default: ND_PRINT((ndo, "reply Unknown rpc response code=%u %u", reply_stat, length)); break; } return; trunc: if (!nfserr) ND_PRINT((ndo, "%s", tstr)); } /* * Return a pointer to the first file handle in the packet. * If the packet was truncated, return 0. */ static const uint32_t * parsereq(netdissect_options *ndo, register const struct sunrpc_msg *rp, register u_int length) { register const uint32_t *dp; register u_int len; /* * find the start of the req data (if we captured it) */ dp = (const uint32_t *)&rp->rm_call.cb_cred; ND_TCHECK(dp[1]); len = EXTRACT_32BITS(&dp[1]); if (len < length) { dp += (len + (2 * sizeof(*dp) + 3)) / sizeof(*dp); ND_TCHECK(dp[1]); len = EXTRACT_32BITS(&dp[1]); if (len < length) { dp += (len + (2 * sizeof(*dp) + 3)) / sizeof(*dp); ND_TCHECK2(dp[0], 0); return (dp); } } trunc: return (NULL); } /* * Print out an NFS file handle and return a pointer to following word. * If packet was truncated, return 0. */ static const uint32_t * parsefh(netdissect_options *ndo, register const uint32_t *dp, int v3) { u_int len; if (v3) { ND_TCHECK(dp[0]); len = EXTRACT_32BITS(dp) / 4; dp++; } else len = NFSX_V2FH / 4; if (ND_TTEST2(*dp, len * sizeof(*dp))) { nfs_printfh(ndo, dp, len); return (dp + len); } trunc: return (NULL); } /* * Print out a file name and return pointer to 32-bit word past it. * If packet was truncated, return 0. */ static const uint32_t * parsefn(netdissect_options *ndo, register const uint32_t *dp) { register uint32_t len; register const u_char *cp; /* Bail if we don't have the string length */ ND_TCHECK(*dp); /* Fetch string length; convert to host order */ len = *dp++; NTOHL(len); ND_TCHECK2(*dp, ((len + 3) & ~3)); cp = (const u_char *)dp; /* Update 32-bit pointer (NFS filenames padded to 32-bit boundaries) */ dp += ((len + 3) & ~3) / sizeof(*dp); ND_PRINT((ndo, "\"")); if (fn_printn(ndo, cp, len, ndo->ndo_snapend)) { ND_PRINT((ndo, "\"")); goto trunc; } ND_PRINT((ndo, "\"")); return (dp); trunc: return NULL; } /* * Print out file handle and file name. * Return pointer to 32-bit word past file name. * If packet was truncated (or there was some other error), return 0. */ static const uint32_t * parsefhn(netdissect_options *ndo, register const uint32_t *dp, int v3) { dp = parsefh(ndo, dp, v3); if (dp == NULL) return (NULL); ND_PRINT((ndo, " ")); return (parsefn(ndo, dp)); } void nfsreq_print_noaddr(netdissect_options *ndo, register const u_char *bp, u_int length, register const u_char *bp2) { register const struct sunrpc_msg *rp; register const uint32_t *dp; nfs_type type; int v3; uint32_t proc; uint32_t access_flags; struct nfsv3_sattr sa3; ND_PRINT((ndo, "%d", length)); nfserr = 0; /* assume no error */ rp = (const struct sunrpc_msg *)bp; if (!xid_map_enter(ndo, rp, bp2)) /* record proc number for later on */ goto trunc; v3 = (EXTRACT_32BITS(&rp->rm_call.cb_vers) == NFS_VER3); proc = EXTRACT_32BITS(&rp->rm_call.cb_proc); if (!v3 && proc < NFS_NPROCS) proc = nfsv3_procid[proc]; ND_PRINT((ndo, " %s", tok2str(nfsproc_str, "proc-%u", proc))); switch (proc) { case NFSPROC_GETATTR: case NFSPROC_SETATTR: case NFSPROC_READLINK: case NFSPROC_FSSTAT: case NFSPROC_FSINFO: case NFSPROC_PATHCONF: if ((dp = parsereq(ndo, rp, length)) != NULL && parsefh(ndo, dp, v3) != NULL) return; break; case NFSPROC_LOOKUP: case NFSPROC_CREATE: case NFSPROC_MKDIR: case NFSPROC_REMOVE: case NFSPROC_RMDIR: if ((dp = parsereq(ndo, rp, length)) != NULL && parsefhn(ndo, dp, v3) != NULL) return; break; case NFSPROC_ACCESS: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefh(ndo, dp, v3)) != NULL) { ND_TCHECK(dp[0]); access_flags = EXTRACT_32BITS(&dp[0]); if (access_flags & ~NFSV3ACCESS_FULL) { /* NFSV3ACCESS definitions aren't up to date */ ND_PRINT((ndo, " %04x", access_flags)); } else if ((access_flags & NFSV3ACCESS_FULL) == NFSV3ACCESS_FULL) { ND_PRINT((ndo, " NFS_ACCESS_FULL")); } else { char separator = ' '; if (access_flags & NFSV3ACCESS_READ) { ND_PRINT((ndo, " NFS_ACCESS_READ")); separator = '|'; } if (access_flags & NFSV3ACCESS_LOOKUP) { ND_PRINT((ndo, "%cNFS_ACCESS_LOOKUP", separator)); separator = '|'; } if (access_flags & NFSV3ACCESS_MODIFY) { ND_PRINT((ndo, "%cNFS_ACCESS_MODIFY", separator)); separator = '|'; } if (access_flags & NFSV3ACCESS_EXTEND) { ND_PRINT((ndo, "%cNFS_ACCESS_EXTEND", separator)); separator = '|'; } if (access_flags & NFSV3ACCESS_DELETE) { ND_PRINT((ndo, "%cNFS_ACCESS_DELETE", separator)); separator = '|'; } if (access_flags & NFSV3ACCESS_EXECUTE) ND_PRINT((ndo, "%cNFS_ACCESS_EXECUTE", separator)); } return; } break; case NFSPROC_READ: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefh(ndo, dp, v3)) != NULL) { if (v3) { ND_TCHECK(dp[2]); ND_PRINT((ndo, " %u bytes @ %" PRIu64, EXTRACT_32BITS(&dp[2]), EXTRACT_64BITS(&dp[0]))); } else { ND_TCHECK(dp[1]); ND_PRINT((ndo, " %u bytes @ %u", EXTRACT_32BITS(&dp[1]), EXTRACT_32BITS(&dp[0]))); } return; } break; case NFSPROC_WRITE: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefh(ndo, dp, v3)) != NULL) { if (v3) { ND_TCHECK(dp[2]); ND_PRINT((ndo, " %u (%u) bytes @ %" PRIu64, EXTRACT_32BITS(&dp[4]), EXTRACT_32BITS(&dp[2]), EXTRACT_64BITS(&dp[0]))); if (ndo->ndo_vflag) { dp += 3; ND_TCHECK(dp[0]); ND_PRINT((ndo, " <%s>", tok2str(nfsv3_writemodes, NULL, EXTRACT_32BITS(dp)))); } } else { ND_TCHECK(dp[3]); ND_PRINT((ndo, " %u (%u) bytes @ %u (%u)", EXTRACT_32BITS(&dp[3]), EXTRACT_32BITS(&dp[2]), EXTRACT_32BITS(&dp[1]), EXTRACT_32BITS(&dp[0]))); } return; } break; case NFSPROC_SYMLINK: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefhn(ndo, dp, v3)) != NULL) { ND_PRINT((ndo, " ->")); if (v3 && (dp = parse_sattr3(ndo, dp, &sa3)) == NULL) break; if (parsefn(ndo, dp) == NULL) break; if (v3 && ndo->ndo_vflag) print_sattr3(ndo, &sa3, ndo->ndo_vflag); return; } break; case NFSPROC_MKNOD: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefhn(ndo, dp, v3)) != NULL) { ND_TCHECK(*dp); type = (nfs_type)EXTRACT_32BITS(dp); dp++; if ((dp = parse_sattr3(ndo, dp, &sa3)) == NULL) break; ND_PRINT((ndo, " %s", tok2str(type2str, "unk-ft %d", type))); if (ndo->ndo_vflag && (type == NFCHR || type == NFBLK)) { ND_TCHECK(dp[1]); ND_PRINT((ndo, " %u/%u", EXTRACT_32BITS(&dp[0]), EXTRACT_32BITS(&dp[1]))); dp += 2; } if (ndo->ndo_vflag) print_sattr3(ndo, &sa3, ndo->ndo_vflag); return; } break; case NFSPROC_RENAME: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefhn(ndo, dp, v3)) != NULL) { ND_PRINT((ndo, " ->")); if (parsefhn(ndo, dp, v3) != NULL) return; } break; case NFSPROC_LINK: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefh(ndo, dp, v3)) != NULL) { ND_PRINT((ndo, " ->")); if (parsefhn(ndo, dp, v3) != NULL) return; } break; case NFSPROC_READDIR: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefh(ndo, dp, v3)) != NULL) { if (v3) { ND_TCHECK(dp[4]); /* * We shouldn't really try to interpret the * offset cookie here. */ ND_PRINT((ndo, " %u bytes @ %" PRId64, EXTRACT_32BITS(&dp[4]), EXTRACT_64BITS(&dp[0]))); if (ndo->ndo_vflag) ND_PRINT((ndo, " verf %08x%08x", dp[2], dp[3])); } else { ND_TCHECK(dp[1]); /* * Print the offset as signed, since -1 is * common, but offsets > 2^31 aren't. */ ND_PRINT((ndo, " %u bytes @ %d", EXTRACT_32BITS(&dp[1]), EXTRACT_32BITS(&dp[0]))); } return; } break; case NFSPROC_READDIRPLUS: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefh(ndo, dp, v3)) != NULL) { ND_TCHECK(dp[4]); /* * We don't try to interpret the offset * cookie here. */ ND_PRINT((ndo, " %u bytes @ %" PRId64, EXTRACT_32BITS(&dp[4]), EXTRACT_64BITS(&dp[0]))); if (ndo->ndo_vflag) { ND_TCHECK(dp[5]); ND_PRINT((ndo, " max %u verf %08x%08x", EXTRACT_32BITS(&dp[5]), dp[2], dp[3])); } return; } break; case NFSPROC_COMMIT: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefh(ndo, dp, v3)) != NULL) { ND_TCHECK(dp[2]); ND_PRINT((ndo, " %u bytes @ %" PRIu64, EXTRACT_32BITS(&dp[2]), EXTRACT_64BITS(&dp[0]))); return; } break; default: return; } trunc: if (!nfserr) ND_PRINT((ndo, "%s", tstr)); } /* * Print out an NFS file handle. * We assume packet was not truncated before the end of the * file handle pointed to by dp. * * Note: new version (using portable file-handle parser) doesn't produce * generation number. It probably could be made to do that, with some * additional hacking on the parser code. */ static void nfs_printfh(netdissect_options *ndo, register const uint32_t *dp, const u_int len) { my_fsid fsid; uint32_t ino; const char *sfsname = NULL; char *spacep; if (ndo->ndo_uflag) { u_int i; char const *sep = ""; ND_PRINT((ndo, " fh[")); for (i=0; i<len; i++) { ND_PRINT((ndo, "%s%x", sep, dp[i])); sep = ":"; } ND_PRINT((ndo, "]")); return; } Parse_fh((const u_char *)dp, len, &fsid, &ino, NULL, &sfsname, 0); if (sfsname) { /* file system ID is ASCII, not numeric, for this server OS */ static char temp[NFSX_V3FHMAX+1]; /* Make sure string is null-terminated */ strncpy(temp, sfsname, NFSX_V3FHMAX); temp[sizeof(temp) - 1] = '\0'; /* Remove trailing spaces */ spacep = strchr(temp, ' '); if (spacep) *spacep = '\0'; ND_PRINT((ndo, " fh %s/", temp)); } else { ND_PRINT((ndo, " fh %d,%d/", fsid.Fsid_dev.Major, fsid.Fsid_dev.Minor)); } if(fsid.Fsid_dev.Minor == 257) /* Print the undecoded handle */ ND_PRINT((ndo, "%s", fsid.Opaque_Handle)); else ND_PRINT((ndo, "%ld", (long) ino)); } /* * Maintain a small cache of recent client.XID.server/proc pairs, to allow * us to match up replies with requests and thus to know how to parse * the reply. */ struct xid_map_entry { uint32_t xid; /* transaction ID (net order) */ int ipver; /* IP version (4 or 6) */ struct in6_addr client; /* client IP address (net order) */ struct in6_addr server; /* server IP address (net order) */ uint32_t proc; /* call proc number (host order) */ uint32_t vers; /* program version (host order) */ }; /* * Map entries are kept in an array that we manage as a ring; * new entries are always added at the tail of the ring. Initially, * all the entries are zero and hence don't match anything. */ #define XIDMAPSIZE 64 static struct xid_map_entry xid_map[XIDMAPSIZE]; static int xid_map_next = 0; static int xid_map_hint = 0; static int xid_map_enter(netdissect_options *ndo, const struct sunrpc_msg *rp, const u_char *bp) { const struct ip *ip = NULL; const struct ip6_hdr *ip6 = NULL; struct xid_map_entry *xmep; if (!ND_TTEST(rp->rm_call.cb_vers)) return (0); switch (IP_V((const struct ip *)bp)) { case 4: ip = (const struct ip *)bp; break; case 6: ip6 = (const struct ip6_hdr *)bp; break; default: return (1); } xmep = &xid_map[xid_map_next]; if (++xid_map_next >= XIDMAPSIZE) xid_map_next = 0; UNALIGNED_MEMCPY(&xmep->xid, &rp->rm_xid, sizeof(xmep->xid)); if (ip) { xmep->ipver = 4; UNALIGNED_MEMCPY(&xmep->client, &ip->ip_src, sizeof(ip->ip_src)); UNALIGNED_MEMCPY(&xmep->server, &ip->ip_dst, sizeof(ip->ip_dst)); } else if (ip6) { xmep->ipver = 6; UNALIGNED_MEMCPY(&xmep->client, &ip6->ip6_src, sizeof(ip6->ip6_src)); UNALIGNED_MEMCPY(&xmep->server, &ip6->ip6_dst, sizeof(ip6->ip6_dst)); } xmep->proc = EXTRACT_32BITS(&rp->rm_call.cb_proc); xmep->vers = EXTRACT_32BITS(&rp->rm_call.cb_vers); return (1); } /* * Returns 0 and puts NFSPROC_xxx in proc return and * version in vers return, or returns -1 on failure */ static int xid_map_find(const struct sunrpc_msg *rp, const u_char *bp, uint32_t *proc, uint32_t *vers) { int i; struct xid_map_entry *xmep; uint32_t xid; const struct ip *ip = (const struct ip *)bp; const struct ip6_hdr *ip6 = (const struct ip6_hdr *)bp; int cmp; UNALIGNED_MEMCPY(&xid, &rp->rm_xid, sizeof(xmep->xid)); /* Start searching from where we last left off */ i = xid_map_hint; do { xmep = &xid_map[i]; cmp = 1; if (xmep->ipver != IP_V(ip) || xmep->xid != xid) goto nextitem; switch (xmep->ipver) { case 4: if (UNALIGNED_MEMCMP(&ip->ip_src, &xmep->server, sizeof(ip->ip_src)) != 0 || UNALIGNED_MEMCMP(&ip->ip_dst, &xmep->client, sizeof(ip->ip_dst)) != 0) { cmp = 0; } break; case 6: if (UNALIGNED_MEMCMP(&ip6->ip6_src, &xmep->server, sizeof(ip6->ip6_src)) != 0 || UNALIGNED_MEMCMP(&ip6->ip6_dst, &xmep->client, sizeof(ip6->ip6_dst)) != 0) { cmp = 0; } break; default: cmp = 0; break; } if (cmp) { /* match */ xid_map_hint = i; *proc = xmep->proc; *vers = xmep->vers; return 0; } nextitem: if (++i >= XIDMAPSIZE) i = 0; } while (i != xid_map_hint); /* search failed */ return (-1); } /* * Routines for parsing reply packets */ /* * Return a pointer to the beginning of the actual results. * If the packet was truncated, return 0. */ static const uint32_t * parserep(netdissect_options *ndo, register const struct sunrpc_msg *rp, register u_int length) { register const uint32_t *dp; u_int len; enum sunrpc_accept_stat astat; /* * Portability note: * Here we find the address of the ar_verf credentials. * Originally, this calculation was * dp = (uint32_t *)&rp->rm_reply.rp_acpt.ar_verf * On the wire, the rp_acpt field starts immediately after * the (32 bit) rp_stat field. However, rp_acpt (which is a * "struct accepted_reply") contains a "struct opaque_auth", * whose internal representation contains a pointer, so on a * 64-bit machine the compiler inserts 32 bits of padding * before rp->rm_reply.rp_acpt.ar_verf. So, we cannot use * the internal representation to parse the on-the-wire * representation. Instead, we skip past the rp_stat field, * which is an "enum" and so occupies one 32-bit word. */ dp = ((const uint32_t *)&rp->rm_reply) + 1; ND_TCHECK(dp[1]); len = EXTRACT_32BITS(&dp[1]); if (len >= length) return (NULL); /* * skip past the ar_verf credentials. */ dp += (len + (2*sizeof(uint32_t) + 3)) / sizeof(uint32_t); ND_TCHECK2(dp[0], 0); /* * now we can check the ar_stat field */ astat = (enum sunrpc_accept_stat) EXTRACT_32BITS(dp); if (astat != SUNRPC_SUCCESS) { ND_PRINT((ndo, " %s", tok2str(sunrpc_str, "ar_stat %d", astat))); nfserr = 1; /* suppress trunc string */ return (NULL); } /* successful return */ ND_TCHECK2(*dp, sizeof(astat)); return ((const uint32_t *) (sizeof(astat) + ((const char *)dp))); trunc: return (0); } static const uint32_t * parsestatus(netdissect_options *ndo, const uint32_t *dp, int *er) { int errnum; ND_TCHECK(dp[0]); errnum = EXTRACT_32BITS(&dp[0]); if (er) *er = errnum; if (errnum != 0) { if (!ndo->ndo_qflag) ND_PRINT((ndo, " ERROR: %s", tok2str(status2str, "unk %d", errnum))); nfserr = 1; } return (dp + 1); trunc: return NULL; } static const uint32_t * parsefattr(netdissect_options *ndo, const uint32_t *dp, int verbose, int v3) { const struct nfs_fattr *fap; fap = (const struct nfs_fattr *)dp; ND_TCHECK(fap->fa_gid); if (verbose) { ND_PRINT((ndo, " %s %o ids %d/%d", tok2str(type2str, "unk-ft %d ", EXTRACT_32BITS(&fap->fa_type)), EXTRACT_32BITS(&fap->fa_mode), EXTRACT_32BITS(&fap->fa_uid), EXTRACT_32BITS(&fap->fa_gid))); if (v3) { ND_TCHECK(fap->fa3_size); ND_PRINT((ndo, " sz %" PRIu64, EXTRACT_64BITS((const uint32_t *)&fap->fa3_size))); } else { ND_TCHECK(fap->fa2_size); ND_PRINT((ndo, " sz %d", EXTRACT_32BITS(&fap->fa2_size))); } } /* print lots more stuff */ if (verbose > 1) { if (v3) { ND_TCHECK(fap->fa3_ctime); ND_PRINT((ndo, " nlink %d rdev %d/%d", EXTRACT_32BITS(&fap->fa_nlink), EXTRACT_32BITS(&fap->fa3_rdev.specdata1), EXTRACT_32BITS(&fap->fa3_rdev.specdata2))); ND_PRINT((ndo, " fsid %" PRIx64, EXTRACT_64BITS((const uint32_t *)&fap->fa3_fsid))); ND_PRINT((ndo, " fileid %" PRIx64, EXTRACT_64BITS((const uint32_t *)&fap->fa3_fileid))); ND_PRINT((ndo, " a/m/ctime %u.%06u", EXTRACT_32BITS(&fap->fa3_atime.nfsv3_sec), EXTRACT_32BITS(&fap->fa3_atime.nfsv3_nsec))); ND_PRINT((ndo, " %u.%06u", EXTRACT_32BITS(&fap->fa3_mtime.nfsv3_sec), EXTRACT_32BITS(&fap->fa3_mtime.nfsv3_nsec))); ND_PRINT((ndo, " %u.%06u", EXTRACT_32BITS(&fap->fa3_ctime.nfsv3_sec), EXTRACT_32BITS(&fap->fa3_ctime.nfsv3_nsec))); } else { ND_TCHECK(fap->fa2_ctime); ND_PRINT((ndo, " nlink %d rdev 0x%x fsid 0x%x nodeid 0x%x a/m/ctime", EXTRACT_32BITS(&fap->fa_nlink), EXTRACT_32BITS(&fap->fa2_rdev), EXTRACT_32BITS(&fap->fa2_fsid), EXTRACT_32BITS(&fap->fa2_fileid))); ND_PRINT((ndo, " %u.%06u", EXTRACT_32BITS(&fap->fa2_atime.nfsv2_sec), EXTRACT_32BITS(&fap->fa2_atime.nfsv2_usec))); ND_PRINT((ndo, " %u.%06u", EXTRACT_32BITS(&fap->fa2_mtime.nfsv2_sec), EXTRACT_32BITS(&fap->fa2_mtime.nfsv2_usec))); ND_PRINT((ndo, " %u.%06u", EXTRACT_32BITS(&fap->fa2_ctime.nfsv2_sec), EXTRACT_32BITS(&fap->fa2_ctime.nfsv2_usec))); } } return ((const uint32_t *)((const unsigned char *)dp + (v3 ? NFSX_V3FATTR : NFSX_V2FATTR))); trunc: return (NULL); } static int parseattrstat(netdissect_options *ndo, const uint32_t *dp, int verbose, int v3) { int er; dp = parsestatus(ndo, dp, &er); if (dp == NULL) return (0); if (er) return (1); return (parsefattr(ndo, dp, verbose, v3) != NULL); } static int parsediropres(netdissect_options *ndo, const uint32_t *dp) { int er; if (!(dp = parsestatus(ndo, dp, &er))) return (0); if (er) return (1); dp = parsefh(ndo, dp, 0); if (dp == NULL) return (0); return (parsefattr(ndo, dp, ndo->ndo_vflag, 0) != NULL); } static int parselinkres(netdissect_options *ndo, const uint32_t *dp, int v3) { int er; dp = parsestatus(ndo, dp, &er); if (dp == NULL) return(0); if (er) return(1); if (v3 && !(dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag))) return (0); ND_PRINT((ndo, " ")); return (parsefn(ndo, dp) != NULL); } static int parsestatfs(netdissect_options *ndo, const uint32_t *dp, int v3) { const struct nfs_statfs *sfsp; int er; dp = parsestatus(ndo, dp, &er); if (dp == NULL) return (0); if (!v3 && er) return (1); if (ndo->ndo_qflag) return(1); if (v3) { if (ndo->ndo_vflag) ND_PRINT((ndo, " POST:")); if (!(dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag))) return (0); } ND_TCHECK2(*dp, (v3 ? NFSX_V3STATFS : NFSX_V2STATFS)); sfsp = (const struct nfs_statfs *)dp; if (v3) { ND_PRINT((ndo, " tbytes %" PRIu64 " fbytes %" PRIu64 " abytes %" PRIu64, EXTRACT_64BITS((const uint32_t *)&sfsp->sf_tbytes), EXTRACT_64BITS((const uint32_t *)&sfsp->sf_fbytes), EXTRACT_64BITS((const uint32_t *)&sfsp->sf_abytes))); if (ndo->ndo_vflag) { ND_PRINT((ndo, " tfiles %" PRIu64 " ffiles %" PRIu64 " afiles %" PRIu64 " invar %u", EXTRACT_64BITS((const uint32_t *)&sfsp->sf_tfiles), EXTRACT_64BITS((const uint32_t *)&sfsp->sf_ffiles), EXTRACT_64BITS((const uint32_t *)&sfsp->sf_afiles), EXTRACT_32BITS(&sfsp->sf_invarsec))); } } else { ND_PRINT((ndo, " tsize %d bsize %d blocks %d bfree %d bavail %d", EXTRACT_32BITS(&sfsp->sf_tsize), EXTRACT_32BITS(&sfsp->sf_bsize), EXTRACT_32BITS(&sfsp->sf_blocks), EXTRACT_32BITS(&sfsp->sf_bfree), EXTRACT_32BITS(&sfsp->sf_bavail))); } return (1); trunc: return (0); } static int parserddires(netdissect_options *ndo, const uint32_t *dp) { int er; dp = parsestatus(ndo, dp, &er); if (dp == NULL) return (0); if (er) return (1); if (ndo->ndo_qflag) return (1); ND_TCHECK(dp[2]); ND_PRINT((ndo, " offset 0x%x size %d ", EXTRACT_32BITS(&dp[0]), EXTRACT_32BITS(&dp[1]))); if (dp[2] != 0) ND_PRINT((ndo, " eof")); return (1); trunc: return (0); } static const uint32_t * parse_wcc_attr(netdissect_options *ndo, const uint32_t *dp) { ND_PRINT((ndo, " sz %" PRIu64, EXTRACT_64BITS(&dp[0]))); ND_PRINT((ndo, " mtime %u.%06u ctime %u.%06u", EXTRACT_32BITS(&dp[2]), EXTRACT_32BITS(&dp[3]), EXTRACT_32BITS(&dp[4]), EXTRACT_32BITS(&dp[5]))); return (dp + 6); } /* * Pre operation attributes. Print only if vflag > 1. */ static const uint32_t * parse_pre_op_attr(netdissect_options *ndo, const uint32_t *dp, int verbose) { ND_TCHECK(dp[0]); if (!EXTRACT_32BITS(&dp[0])) return (dp + 1); dp++; ND_TCHECK2(*dp, 24); if (verbose > 1) { return parse_wcc_attr(ndo, dp); } else { /* If not verbose enough, just skip over wcc_attr */ return (dp + 6); } trunc: return (NULL); } /* * Post operation attributes are printed if vflag >= 1 */ static const uint32_t * parse_post_op_attr(netdissect_options *ndo, const uint32_t *dp, int verbose) { ND_TCHECK(dp[0]); if (!EXTRACT_32BITS(&dp[0])) return (dp + 1); dp++; if (verbose) { return parsefattr(ndo, dp, verbose, 1); } else return (dp + (NFSX_V3FATTR / sizeof (uint32_t))); trunc: return (NULL); } static const uint32_t * parse_wcc_data(netdissect_options *ndo, const uint32_t *dp, int verbose) { if (verbose > 1) ND_PRINT((ndo, " PRE:")); if (!(dp = parse_pre_op_attr(ndo, dp, verbose))) return (0); if (verbose) ND_PRINT((ndo, " POST:")); return parse_post_op_attr(ndo, dp, verbose); } static const uint32_t * parsecreateopres(netdissect_options *ndo, const uint32_t *dp, int verbose) { int er; if (!(dp = parsestatus(ndo, dp, &er))) return (0); if (er) dp = parse_wcc_data(ndo, dp, verbose); else { ND_TCHECK(dp[0]); if (!EXTRACT_32BITS(&dp[0])) return (dp + 1); dp++; if (!(dp = parsefh(ndo, dp, 1))) return (0); if (verbose) { if (!(dp = parse_post_op_attr(ndo, dp, verbose))) return (0); if (ndo->ndo_vflag > 1) { ND_PRINT((ndo, " dir attr:")); dp = parse_wcc_data(ndo, dp, verbose); } } } return (dp); trunc: return (NULL); } static int parsewccres(netdissect_options *ndo, const uint32_t *dp, int verbose) { int er; if (!(dp = parsestatus(ndo, dp, &er))) return (0); return parse_wcc_data(ndo, dp, verbose) != NULL; } static const uint32_t * parsev3rddirres(netdissect_options *ndo, const uint32_t *dp, int verbose) { int er; if (!(dp = parsestatus(ndo, dp, &er))) return (0); if (ndo->ndo_vflag) ND_PRINT((ndo, " POST:")); if (!(dp = parse_post_op_attr(ndo, dp, verbose))) return (0); if (er) return dp; if (ndo->ndo_vflag) { ND_TCHECK(dp[1]); ND_PRINT((ndo, " verf %08x%08x", dp[0], dp[1])); dp += 2; } return dp; trunc: return (NULL); } static int parsefsinfo(netdissect_options *ndo, const uint32_t *dp) { const struct nfsv3_fsinfo *sfp; int er; if (!(dp = parsestatus(ndo, dp, &er))) return (0); if (ndo->ndo_vflag) ND_PRINT((ndo, " POST:")); if (!(dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag))) return (0); if (er) return (1); sfp = (const struct nfsv3_fsinfo *)dp; ND_TCHECK(*sfp); ND_PRINT((ndo, " rtmax %u rtpref %u wtmax %u wtpref %u dtpref %u", EXTRACT_32BITS(&sfp->fs_rtmax), EXTRACT_32BITS(&sfp->fs_rtpref), EXTRACT_32BITS(&sfp->fs_wtmax), EXTRACT_32BITS(&sfp->fs_wtpref), EXTRACT_32BITS(&sfp->fs_dtpref))); if (ndo->ndo_vflag) { ND_PRINT((ndo, " rtmult %u wtmult %u maxfsz %" PRIu64, EXTRACT_32BITS(&sfp->fs_rtmult), EXTRACT_32BITS(&sfp->fs_wtmult), EXTRACT_64BITS((const uint32_t *)&sfp->fs_maxfilesize))); ND_PRINT((ndo, " delta %u.%06u ", EXTRACT_32BITS(&sfp->fs_timedelta.nfsv3_sec), EXTRACT_32BITS(&sfp->fs_timedelta.nfsv3_nsec))); } return (1); trunc: return (0); } static int parsepathconf(netdissect_options *ndo, const uint32_t *dp) { int er; const struct nfsv3_pathconf *spp; if (!(dp = parsestatus(ndo, dp, &er))) return (0); if (ndo->ndo_vflag) ND_PRINT((ndo, " POST:")); if (!(dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag))) return (0); if (er) return (1); spp = (const struct nfsv3_pathconf *)dp; ND_TCHECK(*spp); ND_PRINT((ndo, " linkmax %u namemax %u %s %s %s %s", EXTRACT_32BITS(&spp->pc_linkmax), EXTRACT_32BITS(&spp->pc_namemax), EXTRACT_32BITS(&spp->pc_notrunc) ? "notrunc" : "", EXTRACT_32BITS(&spp->pc_chownrestricted) ? "chownres" : "", EXTRACT_32BITS(&spp->pc_caseinsensitive) ? "igncase" : "", EXTRACT_32BITS(&spp->pc_casepreserving) ? "keepcase" : "")); return (1); trunc: return (0); } static void interp_reply(netdissect_options *ndo, const struct sunrpc_msg *rp, uint32_t proc, uint32_t vers, int length) { register const uint32_t *dp; register int v3; int er; v3 = (vers == NFS_VER3); if (!v3 && proc < NFS_NPROCS) proc = nfsv3_procid[proc]; ND_PRINT((ndo, " %s", tok2str(nfsproc_str, "proc-%u", proc))); switch (proc) { case NFSPROC_GETATTR: dp = parserep(ndo, rp, length); if (dp != NULL && parseattrstat(ndo, dp, !ndo->ndo_qflag, v3) != 0) return; break; case NFSPROC_SETATTR: if (!(dp = parserep(ndo, rp, length))) return; if (v3) { if (parsewccres(ndo, dp, ndo->ndo_vflag)) return; } else { if (parseattrstat(ndo, dp, !ndo->ndo_qflag, 0) != 0) return; } break; case NFSPROC_LOOKUP: if (!(dp = parserep(ndo, rp, length))) break; if (v3) { if (!(dp = parsestatus(ndo, dp, &er))) break; if (er) { if (ndo->ndo_vflag > 1) { ND_PRINT((ndo, " post dattr:")); dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag); } } else { if (!(dp = parsefh(ndo, dp, v3))) break; if ((dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag)) && ndo->ndo_vflag > 1) { ND_PRINT((ndo, " post dattr:")); dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag); } } if (dp) return; } else { if (parsediropres(ndo, dp) != 0) return; } break; case NFSPROC_ACCESS: if (!(dp = parserep(ndo, rp, length))) break; if (!(dp = parsestatus(ndo, dp, &er))) break; if (ndo->ndo_vflag) ND_PRINT((ndo, " attr:")); if (!(dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag))) break; if (!er) ND_PRINT((ndo, " c %04x", EXTRACT_32BITS(&dp[0]))); return; case NFSPROC_READLINK: dp = parserep(ndo, rp, length); if (dp != NULL && parselinkres(ndo, dp, v3) != 0) return; break; case NFSPROC_READ: if (!(dp = parserep(ndo, rp, length))) break; if (v3) { if (!(dp = parsestatus(ndo, dp, &er))) break; if (!(dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag))) break; if (er) return; if (ndo->ndo_vflag) { ND_TCHECK(dp[1]); ND_PRINT((ndo, " %u bytes", EXTRACT_32BITS(&dp[0]))); if (EXTRACT_32BITS(&dp[1])) ND_PRINT((ndo, " EOF")); } return; } else { if (parseattrstat(ndo, dp, ndo->ndo_vflag, 0) != 0) return; } break; case NFSPROC_WRITE: if (!(dp = parserep(ndo, rp, length))) break; if (v3) { if (!(dp = parsestatus(ndo, dp, &er))) break; if (!(dp = parse_wcc_data(ndo, dp, ndo->ndo_vflag))) break; if (er) return; if (ndo->ndo_vflag) { ND_TCHECK(dp[0]); ND_PRINT((ndo, " %u bytes", EXTRACT_32BITS(&dp[0]))); if (ndo->ndo_vflag > 1) { ND_TCHECK(dp[1]); ND_PRINT((ndo, " <%s>", tok2str(nfsv3_writemodes, NULL, EXTRACT_32BITS(&dp[1])))); } return; } } else { if (parseattrstat(ndo, dp, ndo->ndo_vflag, v3) != 0) return; } break; case NFSPROC_CREATE: case NFSPROC_MKDIR: if (!(dp = parserep(ndo, rp, length))) break; if (v3) { if (parsecreateopres(ndo, dp, ndo->ndo_vflag) != NULL) return; } else { if (parsediropres(ndo, dp) != 0) return; } break; case NFSPROC_SYMLINK: if (!(dp = parserep(ndo, rp, length))) break; if (v3) { if (parsecreateopres(ndo, dp, ndo->ndo_vflag) != NULL) return; } else { if (parsestatus(ndo, dp, &er) != NULL) return; } break; case NFSPROC_MKNOD: if (!(dp = parserep(ndo, rp, length))) break; if (parsecreateopres(ndo, dp, ndo->ndo_vflag) != NULL) return; break; case NFSPROC_REMOVE: case NFSPROC_RMDIR: if (!(dp = parserep(ndo, rp, length))) break; if (v3) { if (parsewccres(ndo, dp, ndo->ndo_vflag)) return; } else { if (parsestatus(ndo, dp, &er) != NULL) return; } break; case NFSPROC_RENAME: if (!(dp = parserep(ndo, rp, length))) break; if (v3) { if (!(dp = parsestatus(ndo, dp, &er))) break; if (ndo->ndo_vflag) { ND_PRINT((ndo, " from:")); if (!(dp = parse_wcc_data(ndo, dp, ndo->ndo_vflag))) break; ND_PRINT((ndo, " to:")); if (!(dp = parse_wcc_data(ndo, dp, ndo->ndo_vflag))) break; } return; } else { if (parsestatus(ndo, dp, &er) != NULL) return; } break; case NFSPROC_LINK: if (!(dp = parserep(ndo, rp, length))) break; if (v3) { if (!(dp = parsestatus(ndo, dp, &er))) break; if (ndo->ndo_vflag) { ND_PRINT((ndo, " file POST:")); if (!(dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag))) break; ND_PRINT((ndo, " dir:")); if (!(dp = parse_wcc_data(ndo, dp, ndo->ndo_vflag))) break; return; } } else { if (parsestatus(ndo, dp, &er) != NULL) return; } break; case NFSPROC_READDIR: if (!(dp = parserep(ndo, rp, length))) break; if (v3) { if (parsev3rddirres(ndo, dp, ndo->ndo_vflag)) return; } else { if (parserddires(ndo, dp) != 0) return; } break; case NFSPROC_READDIRPLUS: if (!(dp = parserep(ndo, rp, length))) break; if (parsev3rddirres(ndo, dp, ndo->ndo_vflag)) return; break; case NFSPROC_FSSTAT: dp = parserep(ndo, rp, length); if (dp != NULL && parsestatfs(ndo, dp, v3) != 0) return; break; case NFSPROC_FSINFO: dp = parserep(ndo, rp, length); if (dp != NULL && parsefsinfo(ndo, dp) != 0) return; break; case NFSPROC_PATHCONF: dp = parserep(ndo, rp, length); if (dp != NULL && parsepathconf(ndo, dp) != 0) return; break; case NFSPROC_COMMIT: dp = parserep(ndo, rp, length); if (dp != NULL && parsewccres(ndo, dp, ndo->ndo_vflag) != 0) return; break; default: return; } trunc: if (!nfserr) ND_PRINT((ndo, "%s", tstr)); }
/* * Copyright (c) 1988, 1989, 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that: (1) source code distributions * retain the above copyright notice and this paragraph in its entirety, (2) * distributions including binary code include the above copyright notice and * this paragraph in its entirety in the documentation or other materials * provided with the distribution, and (3) all advertising materials mentioning * features or use of this software display the following acknowledgement: * ``This product includes software developed by the University of California, * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of * the University nor the names of its contributors may be used to endorse * or promote products derived from this software without specific prior * written permission. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. */ /* \summary: Network File System (NFS) printer */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <netdissect-stdinc.h> #include <stdio.h> #include <string.h> #include "netdissect.h" #include "addrtoname.h" #include "extract.h" #include "nfs.h" #include "nfsfh.h" #include "ip.h" #include "ip6.h" #include "rpc_auth.h" #include "rpc_msg.h" static const char tstr[] = " [|nfs]"; static void nfs_printfh(netdissect_options *, const uint32_t *, const u_int); static int xid_map_enter(netdissect_options *, const struct sunrpc_msg *, const u_char *); static int xid_map_find(const struct sunrpc_msg *, const u_char *, uint32_t *, uint32_t *); static void interp_reply(netdissect_options *, const struct sunrpc_msg *, uint32_t, uint32_t, int); static const uint32_t *parse_post_op_attr(netdissect_options *, const uint32_t *, int); /* * Mapping of old NFS Version 2 RPC numbers to generic numbers. */ static uint32_t nfsv3_procid[NFS_NPROCS] = { NFSPROC_NULL, NFSPROC_GETATTR, NFSPROC_SETATTR, NFSPROC_NOOP, NFSPROC_LOOKUP, NFSPROC_READLINK, NFSPROC_READ, NFSPROC_NOOP, NFSPROC_WRITE, NFSPROC_CREATE, NFSPROC_REMOVE, NFSPROC_RENAME, NFSPROC_LINK, NFSPROC_SYMLINK, NFSPROC_MKDIR, NFSPROC_RMDIR, NFSPROC_READDIR, NFSPROC_FSSTAT, NFSPROC_NOOP, NFSPROC_NOOP, NFSPROC_NOOP, NFSPROC_NOOP, NFSPROC_NOOP, NFSPROC_NOOP, NFSPROC_NOOP, NFSPROC_NOOP }; static const struct tok nfsproc_str[] = { { NFSPROC_NOOP, "nop" }, { NFSPROC_NULL, "null" }, { NFSPROC_GETATTR, "getattr" }, { NFSPROC_SETATTR, "setattr" }, { NFSPROC_LOOKUP, "lookup" }, { NFSPROC_ACCESS, "access" }, { NFSPROC_READLINK, "readlink" }, { NFSPROC_READ, "read" }, { NFSPROC_WRITE, "write" }, { NFSPROC_CREATE, "create" }, { NFSPROC_MKDIR, "mkdir" }, { NFSPROC_SYMLINK, "symlink" }, { NFSPROC_MKNOD, "mknod" }, { NFSPROC_REMOVE, "remove" }, { NFSPROC_RMDIR, "rmdir" }, { NFSPROC_RENAME, "rename" }, { NFSPROC_LINK, "link" }, { NFSPROC_READDIR, "readdir" }, { NFSPROC_READDIRPLUS, "readdirplus" }, { NFSPROC_FSSTAT, "fsstat" }, { NFSPROC_FSINFO, "fsinfo" }, { NFSPROC_PATHCONF, "pathconf" }, { NFSPROC_COMMIT, "commit" }, { 0, NULL } }; /* * NFS V2 and V3 status values. * * Some of these come from the RFCs for NFS V2 and V3, with the message * strings taken from the FreeBSD C library "errlst.c". * * Others are errors that are not in the RFC but that I suspect some * NFS servers could return; the values are FreeBSD errno values, as * the first NFS server was the SunOS 2.0 one, and until 5.0 SunOS * was primarily BSD-derived. */ static const struct tok status2str[] = { { 1, "Operation not permitted" }, /* EPERM */ { 2, "No such file or directory" }, /* ENOENT */ { 5, "Input/output error" }, /* EIO */ { 6, "Device not configured" }, /* ENXIO */ { 11, "Resource deadlock avoided" }, /* EDEADLK */ { 12, "Cannot allocate memory" }, /* ENOMEM */ { 13, "Permission denied" }, /* EACCES */ { 17, "File exists" }, /* EEXIST */ { 18, "Cross-device link" }, /* EXDEV */ { 19, "Operation not supported by device" }, /* ENODEV */ { 20, "Not a directory" }, /* ENOTDIR */ { 21, "Is a directory" }, /* EISDIR */ { 22, "Invalid argument" }, /* EINVAL */ { 26, "Text file busy" }, /* ETXTBSY */ { 27, "File too large" }, /* EFBIG */ { 28, "No space left on device" }, /* ENOSPC */ { 30, "Read-only file system" }, /* EROFS */ { 31, "Too many links" }, /* EMLINK */ { 45, "Operation not supported" }, /* EOPNOTSUPP */ { 62, "Too many levels of symbolic links" }, /* ELOOP */ { 63, "File name too long" }, /* ENAMETOOLONG */ { 66, "Directory not empty" }, /* ENOTEMPTY */ { 69, "Disc quota exceeded" }, /* EDQUOT */ { 70, "Stale NFS file handle" }, /* ESTALE */ { 71, "Too many levels of remote in path" }, /* EREMOTE */ { 99, "Write cache flushed to disk" }, /* NFSERR_WFLUSH (not used) */ { 10001, "Illegal NFS file handle" }, /* NFS3ERR_BADHANDLE */ { 10002, "Update synchronization mismatch" }, /* NFS3ERR_NOT_SYNC */ { 10003, "READDIR/READDIRPLUS cookie is stale" }, /* NFS3ERR_BAD_COOKIE */ { 10004, "Operation not supported" }, /* NFS3ERR_NOTSUPP */ { 10005, "Buffer or request is too small" }, /* NFS3ERR_TOOSMALL */ { 10006, "Unspecified error on server" }, /* NFS3ERR_SERVERFAULT */ { 10007, "Object of that type not supported" }, /* NFS3ERR_BADTYPE */ { 10008, "Request couldn't be completed in time" }, /* NFS3ERR_JUKEBOX */ { 0, NULL } }; static const struct tok nfsv3_writemodes[] = { { 0, "unstable" }, { 1, "datasync" }, { 2, "filesync" }, { 0, NULL } }; static const struct tok type2str[] = { { NFNON, "NON" }, { NFREG, "REG" }, { NFDIR, "DIR" }, { NFBLK, "BLK" }, { NFCHR, "CHR" }, { NFLNK, "LNK" }, { NFFIFO, "FIFO" }, { 0, NULL } }; static const struct tok sunrpc_auth_str[] = { { SUNRPC_AUTH_OK, "OK" }, { SUNRPC_AUTH_BADCRED, "Bogus Credentials (seal broken)" }, { SUNRPC_AUTH_REJECTEDCRED, "Rejected Credentials (client should begin new session)" }, { SUNRPC_AUTH_BADVERF, "Bogus Verifier (seal broken)" }, { SUNRPC_AUTH_REJECTEDVERF, "Verifier expired or was replayed" }, { SUNRPC_AUTH_TOOWEAK, "Credentials are too weak" }, { SUNRPC_AUTH_INVALIDRESP, "Bogus response verifier" }, { SUNRPC_AUTH_FAILED, "Unknown failure" }, { 0, NULL } }; static const struct tok sunrpc_str[] = { { SUNRPC_PROG_UNAVAIL, "PROG_UNAVAIL" }, { SUNRPC_PROG_MISMATCH, "PROG_MISMATCH" }, { SUNRPC_PROC_UNAVAIL, "PROC_UNAVAIL" }, { SUNRPC_GARBAGE_ARGS, "GARBAGE_ARGS" }, { SUNRPC_SYSTEM_ERR, "SYSTEM_ERR" }, { 0, NULL } }; static void print_nfsaddr(netdissect_options *ndo, const u_char *bp, const char *s, const char *d) { const struct ip *ip; const struct ip6_hdr *ip6; char srcaddr[INET6_ADDRSTRLEN], dstaddr[INET6_ADDRSTRLEN]; srcaddr[0] = dstaddr[0] = '\0'; switch (IP_V((const struct ip *)bp)) { case 4: ip = (const struct ip *)bp; strlcpy(srcaddr, ipaddr_string(ndo, &ip->ip_src), sizeof(srcaddr)); strlcpy(dstaddr, ipaddr_string(ndo, &ip->ip_dst), sizeof(dstaddr)); break; case 6: ip6 = (const struct ip6_hdr *)bp; strlcpy(srcaddr, ip6addr_string(ndo, &ip6->ip6_src), sizeof(srcaddr)); strlcpy(dstaddr, ip6addr_string(ndo, &ip6->ip6_dst), sizeof(dstaddr)); break; default: strlcpy(srcaddr, "?", sizeof(srcaddr)); strlcpy(dstaddr, "?", sizeof(dstaddr)); break; } ND_PRINT((ndo, "%s.%s > %s.%s: ", srcaddr, s, dstaddr, d)); } static const uint32_t * parse_sattr3(netdissect_options *ndo, const uint32_t *dp, struct nfsv3_sattr *sa3) { ND_TCHECK(dp[0]); sa3->sa_modeset = EXTRACT_32BITS(dp); dp++; if (sa3->sa_modeset) { ND_TCHECK(dp[0]); sa3->sa_mode = EXTRACT_32BITS(dp); dp++; } ND_TCHECK(dp[0]); sa3->sa_uidset = EXTRACT_32BITS(dp); dp++; if (sa3->sa_uidset) { ND_TCHECK(dp[0]); sa3->sa_uid = EXTRACT_32BITS(dp); dp++; } ND_TCHECK(dp[0]); sa3->sa_gidset = EXTRACT_32BITS(dp); dp++; if (sa3->sa_gidset) { ND_TCHECK(dp[0]); sa3->sa_gid = EXTRACT_32BITS(dp); dp++; } ND_TCHECK(dp[0]); sa3->sa_sizeset = EXTRACT_32BITS(dp); dp++; if (sa3->sa_sizeset) { ND_TCHECK(dp[0]); sa3->sa_size = EXTRACT_32BITS(dp); dp++; } ND_TCHECK(dp[0]); sa3->sa_atimetype = EXTRACT_32BITS(dp); dp++; if (sa3->sa_atimetype == NFSV3SATTRTIME_TOCLIENT) { ND_TCHECK(dp[1]); sa3->sa_atime.nfsv3_sec = EXTRACT_32BITS(dp); dp++; sa3->sa_atime.nfsv3_nsec = EXTRACT_32BITS(dp); dp++; } ND_TCHECK(dp[0]); sa3->sa_mtimetype = EXTRACT_32BITS(dp); dp++; if (sa3->sa_mtimetype == NFSV3SATTRTIME_TOCLIENT) { ND_TCHECK(dp[1]); sa3->sa_mtime.nfsv3_sec = EXTRACT_32BITS(dp); dp++; sa3->sa_mtime.nfsv3_nsec = EXTRACT_32BITS(dp); dp++; } return dp; trunc: return NULL; } static int nfserr; /* true if we error rather than trunc */ static void print_sattr3(netdissect_options *ndo, const struct nfsv3_sattr *sa3, int verbose) { if (sa3->sa_modeset) ND_PRINT((ndo, " mode %o", sa3->sa_mode)); if (sa3->sa_uidset) ND_PRINT((ndo, " uid %u", sa3->sa_uid)); if (sa3->sa_gidset) ND_PRINT((ndo, " gid %u", sa3->sa_gid)); if (verbose > 1) { if (sa3->sa_atimetype == NFSV3SATTRTIME_TOCLIENT) ND_PRINT((ndo, " atime %u.%06u", sa3->sa_atime.nfsv3_sec, sa3->sa_atime.nfsv3_nsec)); if (sa3->sa_mtimetype == NFSV3SATTRTIME_TOCLIENT) ND_PRINT((ndo, " mtime %u.%06u", sa3->sa_mtime.nfsv3_sec, sa3->sa_mtime.nfsv3_nsec)); } } void nfsreply_print(netdissect_options *ndo, register const u_char *bp, u_int length, register const u_char *bp2) { register const struct sunrpc_msg *rp; char srcid[20], dstid[20]; /*fits 32bit*/ nfserr = 0; /* assume no error */ rp = (const struct sunrpc_msg *)bp; ND_TCHECK(rp->rm_xid); if (!ndo->ndo_nflag) { strlcpy(srcid, "nfs", sizeof(srcid)); snprintf(dstid, sizeof(dstid), "%u", EXTRACT_32BITS(&rp->rm_xid)); } else { snprintf(srcid, sizeof(srcid), "%u", NFS_PORT); snprintf(dstid, sizeof(dstid), "%u", EXTRACT_32BITS(&rp->rm_xid)); } print_nfsaddr(ndo, bp2, srcid, dstid); nfsreply_print_noaddr(ndo, bp, length, bp2); return; trunc: if (!nfserr) ND_PRINT((ndo, "%s", tstr)); } void nfsreply_print_noaddr(netdissect_options *ndo, register const u_char *bp, u_int length, register const u_char *bp2) { register const struct sunrpc_msg *rp; uint32_t proc, vers, reply_stat; enum sunrpc_reject_stat rstat; uint32_t rlow; uint32_t rhigh; enum sunrpc_auth_stat rwhy; nfserr = 0; /* assume no error */ rp = (const struct sunrpc_msg *)bp; ND_TCHECK(rp->rm_reply.rp_stat); reply_stat = EXTRACT_32BITS(&rp->rm_reply.rp_stat); switch (reply_stat) { case SUNRPC_MSG_ACCEPTED: ND_PRINT((ndo, "reply ok %u", length)); if (xid_map_find(rp, bp2, &proc, &vers) >= 0) interp_reply(ndo, rp, proc, vers, length); break; case SUNRPC_MSG_DENIED: ND_PRINT((ndo, "reply ERR %u: ", length)); ND_TCHECK(rp->rm_reply.rp_reject.rj_stat); rstat = EXTRACT_32BITS(&rp->rm_reply.rp_reject.rj_stat); switch (rstat) { case SUNRPC_RPC_MISMATCH: ND_TCHECK(rp->rm_reply.rp_reject.rj_vers.high); rlow = EXTRACT_32BITS(&rp->rm_reply.rp_reject.rj_vers.low); rhigh = EXTRACT_32BITS(&rp->rm_reply.rp_reject.rj_vers.high); ND_PRINT((ndo, "RPC Version mismatch (%u-%u)", rlow, rhigh)); break; case SUNRPC_AUTH_ERROR: ND_TCHECK(rp->rm_reply.rp_reject.rj_why); rwhy = EXTRACT_32BITS(&rp->rm_reply.rp_reject.rj_why); ND_PRINT((ndo, "Auth %s", tok2str(sunrpc_auth_str, "Invalid failure code %u", rwhy))); break; default: ND_PRINT((ndo, "Unknown reason for rejecting rpc message %u", (unsigned int)rstat)); break; } break; default: ND_PRINT((ndo, "reply Unknown rpc response code=%u %u", reply_stat, length)); break; } return; trunc: if (!nfserr) ND_PRINT((ndo, "%s", tstr)); } /* * Return a pointer to the first file handle in the packet. * If the packet was truncated, return 0. */ static const uint32_t * parsereq(netdissect_options *ndo, register const struct sunrpc_msg *rp, register u_int length) { register const uint32_t *dp; register u_int len; /* * find the start of the req data (if we captured it) */ dp = (const uint32_t *)&rp->rm_call.cb_cred; ND_TCHECK(dp[1]); len = EXTRACT_32BITS(&dp[1]); if (len < length) { dp += (len + (2 * sizeof(*dp) + 3)) / sizeof(*dp); ND_TCHECK(dp[1]); len = EXTRACT_32BITS(&dp[1]); if (len < length) { dp += (len + (2 * sizeof(*dp) + 3)) / sizeof(*dp); ND_TCHECK2(dp[0], 0); return (dp); } } trunc: return (NULL); } /* * Print out an NFS file handle and return a pointer to following word. * If packet was truncated, return 0. */ static const uint32_t * parsefh(netdissect_options *ndo, register const uint32_t *dp, int v3) { u_int len; if (v3) { ND_TCHECK(dp[0]); len = EXTRACT_32BITS(dp) / 4; dp++; } else len = NFSX_V2FH / 4; if (ND_TTEST2(*dp, len * sizeof(*dp))) { nfs_printfh(ndo, dp, len); return (dp + len); } trunc: return (NULL); } /* * Print out a file name and return pointer to 32-bit word past it. * If packet was truncated, return 0. */ static const uint32_t * parsefn(netdissect_options *ndo, register const uint32_t *dp) { register uint32_t len; register const u_char *cp; /* Bail if we don't have the string length */ ND_TCHECK(*dp); /* Fetch string length; convert to host order */ len = *dp++; NTOHL(len); ND_TCHECK2(*dp, ((len + 3) & ~3)); cp = (const u_char *)dp; /* Update 32-bit pointer (NFS filenames padded to 32-bit boundaries) */ dp += ((len + 3) & ~3) / sizeof(*dp); ND_PRINT((ndo, "\"")); if (fn_printn(ndo, cp, len, ndo->ndo_snapend)) { ND_PRINT((ndo, "\"")); goto trunc; } ND_PRINT((ndo, "\"")); return (dp); trunc: return NULL; } /* * Print out file handle and file name. * Return pointer to 32-bit word past file name. * If packet was truncated (or there was some other error), return 0. */ static const uint32_t * parsefhn(netdissect_options *ndo, register const uint32_t *dp, int v3) { dp = parsefh(ndo, dp, v3); if (dp == NULL) return (NULL); ND_PRINT((ndo, " ")); return (parsefn(ndo, dp)); } void nfsreq_print_noaddr(netdissect_options *ndo, register const u_char *bp, u_int length, register const u_char *bp2) { register const struct sunrpc_msg *rp; register const uint32_t *dp; nfs_type type; int v3; uint32_t proc; uint32_t access_flags; struct nfsv3_sattr sa3; ND_PRINT((ndo, "%d", length)); nfserr = 0; /* assume no error */ rp = (const struct sunrpc_msg *)bp; if (!xid_map_enter(ndo, rp, bp2)) /* record proc number for later on */ goto trunc; v3 = (EXTRACT_32BITS(&rp->rm_call.cb_vers) == NFS_VER3); proc = EXTRACT_32BITS(&rp->rm_call.cb_proc); if (!v3 && proc < NFS_NPROCS) proc = nfsv3_procid[proc]; ND_PRINT((ndo, " %s", tok2str(nfsproc_str, "proc-%u", proc))); switch (proc) { case NFSPROC_GETATTR: case NFSPROC_SETATTR: case NFSPROC_READLINK: case NFSPROC_FSSTAT: case NFSPROC_FSINFO: case NFSPROC_PATHCONF: if ((dp = parsereq(ndo, rp, length)) != NULL && parsefh(ndo, dp, v3) != NULL) return; break; case NFSPROC_LOOKUP: case NFSPROC_CREATE: case NFSPROC_MKDIR: case NFSPROC_REMOVE: case NFSPROC_RMDIR: if ((dp = parsereq(ndo, rp, length)) != NULL && parsefhn(ndo, dp, v3) != NULL) return; break; case NFSPROC_ACCESS: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefh(ndo, dp, v3)) != NULL) { ND_TCHECK(dp[0]); access_flags = EXTRACT_32BITS(&dp[0]); if (access_flags & ~NFSV3ACCESS_FULL) { /* NFSV3ACCESS definitions aren't up to date */ ND_PRINT((ndo, " %04x", access_flags)); } else if ((access_flags & NFSV3ACCESS_FULL) == NFSV3ACCESS_FULL) { ND_PRINT((ndo, " NFS_ACCESS_FULL")); } else { char separator = ' '; if (access_flags & NFSV3ACCESS_READ) { ND_PRINT((ndo, " NFS_ACCESS_READ")); separator = '|'; } if (access_flags & NFSV3ACCESS_LOOKUP) { ND_PRINT((ndo, "%cNFS_ACCESS_LOOKUP", separator)); separator = '|'; } if (access_flags & NFSV3ACCESS_MODIFY) { ND_PRINT((ndo, "%cNFS_ACCESS_MODIFY", separator)); separator = '|'; } if (access_flags & NFSV3ACCESS_EXTEND) { ND_PRINT((ndo, "%cNFS_ACCESS_EXTEND", separator)); separator = '|'; } if (access_flags & NFSV3ACCESS_DELETE) { ND_PRINT((ndo, "%cNFS_ACCESS_DELETE", separator)); separator = '|'; } if (access_flags & NFSV3ACCESS_EXECUTE) ND_PRINT((ndo, "%cNFS_ACCESS_EXECUTE", separator)); } return; } break; case NFSPROC_READ: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefh(ndo, dp, v3)) != NULL) { if (v3) { ND_TCHECK(dp[2]); ND_PRINT((ndo, " %u bytes @ %" PRIu64, EXTRACT_32BITS(&dp[2]), EXTRACT_64BITS(&dp[0]))); } else { ND_TCHECK(dp[1]); ND_PRINT((ndo, " %u bytes @ %u", EXTRACT_32BITS(&dp[1]), EXTRACT_32BITS(&dp[0]))); } return; } break; case NFSPROC_WRITE: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefh(ndo, dp, v3)) != NULL) { if (v3) { ND_TCHECK(dp[4]); ND_PRINT((ndo, " %u (%u) bytes @ %" PRIu64, EXTRACT_32BITS(&dp[4]), EXTRACT_32BITS(&dp[2]), EXTRACT_64BITS(&dp[0]))); if (ndo->ndo_vflag) { ND_PRINT((ndo, " <%s>", tok2str(nfsv3_writemodes, NULL, EXTRACT_32BITS(&dp[3])))); } } else { ND_TCHECK(dp[3]); ND_PRINT((ndo, " %u (%u) bytes @ %u (%u)", EXTRACT_32BITS(&dp[3]), EXTRACT_32BITS(&dp[2]), EXTRACT_32BITS(&dp[1]), EXTRACT_32BITS(&dp[0]))); } return; } break; case NFSPROC_SYMLINK: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefhn(ndo, dp, v3)) != NULL) { ND_PRINT((ndo, " ->")); if (v3 && (dp = parse_sattr3(ndo, dp, &sa3)) == NULL) break; if (parsefn(ndo, dp) == NULL) break; if (v3 && ndo->ndo_vflag) print_sattr3(ndo, &sa3, ndo->ndo_vflag); return; } break; case NFSPROC_MKNOD: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefhn(ndo, dp, v3)) != NULL) { ND_TCHECK(*dp); type = (nfs_type)EXTRACT_32BITS(dp); dp++; if ((dp = parse_sattr3(ndo, dp, &sa3)) == NULL) break; ND_PRINT((ndo, " %s", tok2str(type2str, "unk-ft %d", type))); if (ndo->ndo_vflag && (type == NFCHR || type == NFBLK)) { ND_TCHECK(dp[1]); ND_PRINT((ndo, " %u/%u", EXTRACT_32BITS(&dp[0]), EXTRACT_32BITS(&dp[1]))); dp += 2; } if (ndo->ndo_vflag) print_sattr3(ndo, &sa3, ndo->ndo_vflag); return; } break; case NFSPROC_RENAME: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefhn(ndo, dp, v3)) != NULL) { ND_PRINT((ndo, " ->")); if (parsefhn(ndo, dp, v3) != NULL) return; } break; case NFSPROC_LINK: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefh(ndo, dp, v3)) != NULL) { ND_PRINT((ndo, " ->")); if (parsefhn(ndo, dp, v3) != NULL) return; } break; case NFSPROC_READDIR: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefh(ndo, dp, v3)) != NULL) { if (v3) { ND_TCHECK(dp[4]); /* * We shouldn't really try to interpret the * offset cookie here. */ ND_PRINT((ndo, " %u bytes @ %" PRId64, EXTRACT_32BITS(&dp[4]), EXTRACT_64BITS(&dp[0]))); if (ndo->ndo_vflag) ND_PRINT((ndo, " verf %08x%08x", dp[2], dp[3])); } else { ND_TCHECK(dp[1]); /* * Print the offset as signed, since -1 is * common, but offsets > 2^31 aren't. */ ND_PRINT((ndo, " %u bytes @ %d", EXTRACT_32BITS(&dp[1]), EXTRACT_32BITS(&dp[0]))); } return; } break; case NFSPROC_READDIRPLUS: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefh(ndo, dp, v3)) != NULL) { ND_TCHECK(dp[4]); /* * We don't try to interpret the offset * cookie here. */ ND_PRINT((ndo, " %u bytes @ %" PRId64, EXTRACT_32BITS(&dp[4]), EXTRACT_64BITS(&dp[0]))); if (ndo->ndo_vflag) { ND_TCHECK(dp[5]); ND_PRINT((ndo, " max %u verf %08x%08x", EXTRACT_32BITS(&dp[5]), dp[2], dp[3])); } return; } break; case NFSPROC_COMMIT: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefh(ndo, dp, v3)) != NULL) { ND_TCHECK(dp[2]); ND_PRINT((ndo, " %u bytes @ %" PRIu64, EXTRACT_32BITS(&dp[2]), EXTRACT_64BITS(&dp[0]))); return; } break; default: return; } trunc: if (!nfserr) ND_PRINT((ndo, "%s", tstr)); } /* * Print out an NFS file handle. * We assume packet was not truncated before the end of the * file handle pointed to by dp. * * Note: new version (using portable file-handle parser) doesn't produce * generation number. It probably could be made to do that, with some * additional hacking on the parser code. */ static void nfs_printfh(netdissect_options *ndo, register const uint32_t *dp, const u_int len) { my_fsid fsid; uint32_t ino; const char *sfsname = NULL; char *spacep; if (ndo->ndo_uflag) { u_int i; char const *sep = ""; ND_PRINT((ndo, " fh[")); for (i=0; i<len; i++) { ND_PRINT((ndo, "%s%x", sep, dp[i])); sep = ":"; } ND_PRINT((ndo, "]")); return; } Parse_fh((const u_char *)dp, len, &fsid, &ino, NULL, &sfsname, 0); if (sfsname) { /* file system ID is ASCII, not numeric, for this server OS */ static char temp[NFSX_V3FHMAX+1]; /* Make sure string is null-terminated */ strncpy(temp, sfsname, NFSX_V3FHMAX); temp[sizeof(temp) - 1] = '\0'; /* Remove trailing spaces */ spacep = strchr(temp, ' '); if (spacep) *spacep = '\0'; ND_PRINT((ndo, " fh %s/", temp)); } else { ND_PRINT((ndo, " fh %d,%d/", fsid.Fsid_dev.Major, fsid.Fsid_dev.Minor)); } if(fsid.Fsid_dev.Minor == 257) /* Print the undecoded handle */ ND_PRINT((ndo, "%s", fsid.Opaque_Handle)); else ND_PRINT((ndo, "%ld", (long) ino)); } /* * Maintain a small cache of recent client.XID.server/proc pairs, to allow * us to match up replies with requests and thus to know how to parse * the reply. */ struct xid_map_entry { uint32_t xid; /* transaction ID (net order) */ int ipver; /* IP version (4 or 6) */ struct in6_addr client; /* client IP address (net order) */ struct in6_addr server; /* server IP address (net order) */ uint32_t proc; /* call proc number (host order) */ uint32_t vers; /* program version (host order) */ }; /* * Map entries are kept in an array that we manage as a ring; * new entries are always added at the tail of the ring. Initially, * all the entries are zero and hence don't match anything. */ #define XIDMAPSIZE 64 static struct xid_map_entry xid_map[XIDMAPSIZE]; static int xid_map_next = 0; static int xid_map_hint = 0; static int xid_map_enter(netdissect_options *ndo, const struct sunrpc_msg *rp, const u_char *bp) { const struct ip *ip = NULL; const struct ip6_hdr *ip6 = NULL; struct xid_map_entry *xmep; if (!ND_TTEST(rp->rm_call.cb_vers)) return (0); switch (IP_V((const struct ip *)bp)) { case 4: ip = (const struct ip *)bp; break; case 6: ip6 = (const struct ip6_hdr *)bp; break; default: return (1); } xmep = &xid_map[xid_map_next]; if (++xid_map_next >= XIDMAPSIZE) xid_map_next = 0; UNALIGNED_MEMCPY(&xmep->xid, &rp->rm_xid, sizeof(xmep->xid)); if (ip) { xmep->ipver = 4; UNALIGNED_MEMCPY(&xmep->client, &ip->ip_src, sizeof(ip->ip_src)); UNALIGNED_MEMCPY(&xmep->server, &ip->ip_dst, sizeof(ip->ip_dst)); } else if (ip6) { xmep->ipver = 6; UNALIGNED_MEMCPY(&xmep->client, &ip6->ip6_src, sizeof(ip6->ip6_src)); UNALIGNED_MEMCPY(&xmep->server, &ip6->ip6_dst, sizeof(ip6->ip6_dst)); } xmep->proc = EXTRACT_32BITS(&rp->rm_call.cb_proc); xmep->vers = EXTRACT_32BITS(&rp->rm_call.cb_vers); return (1); } /* * Returns 0 and puts NFSPROC_xxx in proc return and * version in vers return, or returns -1 on failure */ static int xid_map_find(const struct sunrpc_msg *rp, const u_char *bp, uint32_t *proc, uint32_t *vers) { int i; struct xid_map_entry *xmep; uint32_t xid; const struct ip *ip = (const struct ip *)bp; const struct ip6_hdr *ip6 = (const struct ip6_hdr *)bp; int cmp; UNALIGNED_MEMCPY(&xid, &rp->rm_xid, sizeof(xmep->xid)); /* Start searching from where we last left off */ i = xid_map_hint; do { xmep = &xid_map[i]; cmp = 1; if (xmep->ipver != IP_V(ip) || xmep->xid != xid) goto nextitem; switch (xmep->ipver) { case 4: if (UNALIGNED_MEMCMP(&ip->ip_src, &xmep->server, sizeof(ip->ip_src)) != 0 || UNALIGNED_MEMCMP(&ip->ip_dst, &xmep->client, sizeof(ip->ip_dst)) != 0) { cmp = 0; } break; case 6: if (UNALIGNED_MEMCMP(&ip6->ip6_src, &xmep->server, sizeof(ip6->ip6_src)) != 0 || UNALIGNED_MEMCMP(&ip6->ip6_dst, &xmep->client, sizeof(ip6->ip6_dst)) != 0) { cmp = 0; } break; default: cmp = 0; break; } if (cmp) { /* match */ xid_map_hint = i; *proc = xmep->proc; *vers = xmep->vers; return 0; } nextitem: if (++i >= XIDMAPSIZE) i = 0; } while (i != xid_map_hint); /* search failed */ return (-1); } /* * Routines for parsing reply packets */ /* * Return a pointer to the beginning of the actual results. * If the packet was truncated, return 0. */ static const uint32_t * parserep(netdissect_options *ndo, register const struct sunrpc_msg *rp, register u_int length) { register const uint32_t *dp; u_int len; enum sunrpc_accept_stat astat; /* * Portability note: * Here we find the address of the ar_verf credentials. * Originally, this calculation was * dp = (uint32_t *)&rp->rm_reply.rp_acpt.ar_verf * On the wire, the rp_acpt field starts immediately after * the (32 bit) rp_stat field. However, rp_acpt (which is a * "struct accepted_reply") contains a "struct opaque_auth", * whose internal representation contains a pointer, so on a * 64-bit machine the compiler inserts 32 bits of padding * before rp->rm_reply.rp_acpt.ar_verf. So, we cannot use * the internal representation to parse the on-the-wire * representation. Instead, we skip past the rp_stat field, * which is an "enum" and so occupies one 32-bit word. */ dp = ((const uint32_t *)&rp->rm_reply) + 1; ND_TCHECK(dp[1]); len = EXTRACT_32BITS(&dp[1]); if (len >= length) return (NULL); /* * skip past the ar_verf credentials. */ dp += (len + (2*sizeof(uint32_t) + 3)) / sizeof(uint32_t); /* * now we can check the ar_stat field */ ND_TCHECK(dp[0]); astat = (enum sunrpc_accept_stat) EXTRACT_32BITS(dp); if (astat != SUNRPC_SUCCESS) { ND_PRINT((ndo, " %s", tok2str(sunrpc_str, "ar_stat %d", astat))); nfserr = 1; /* suppress trunc string */ return (NULL); } /* successful return */ ND_TCHECK2(*dp, sizeof(astat)); return ((const uint32_t *) (sizeof(astat) + ((const char *)dp))); trunc: return (0); } static const uint32_t * parsestatus(netdissect_options *ndo, const uint32_t *dp, int *er) { int errnum; ND_TCHECK(dp[0]); errnum = EXTRACT_32BITS(&dp[0]); if (er) *er = errnum; if (errnum != 0) { if (!ndo->ndo_qflag) ND_PRINT((ndo, " ERROR: %s", tok2str(status2str, "unk %d", errnum))); nfserr = 1; } return (dp + 1); trunc: return NULL; } static const uint32_t * parsefattr(netdissect_options *ndo, const uint32_t *dp, int verbose, int v3) { const struct nfs_fattr *fap; fap = (const struct nfs_fattr *)dp; ND_TCHECK(fap->fa_gid); if (verbose) { ND_PRINT((ndo, " %s %o ids %d/%d", tok2str(type2str, "unk-ft %d ", EXTRACT_32BITS(&fap->fa_type)), EXTRACT_32BITS(&fap->fa_mode), EXTRACT_32BITS(&fap->fa_uid), EXTRACT_32BITS(&fap->fa_gid))); if (v3) { ND_TCHECK(fap->fa3_size); ND_PRINT((ndo, " sz %" PRIu64, EXTRACT_64BITS((const uint32_t *)&fap->fa3_size))); } else { ND_TCHECK(fap->fa2_size); ND_PRINT((ndo, " sz %d", EXTRACT_32BITS(&fap->fa2_size))); } } /* print lots more stuff */ if (verbose > 1) { if (v3) { ND_TCHECK(fap->fa3_ctime); ND_PRINT((ndo, " nlink %d rdev %d/%d", EXTRACT_32BITS(&fap->fa_nlink), EXTRACT_32BITS(&fap->fa3_rdev.specdata1), EXTRACT_32BITS(&fap->fa3_rdev.specdata2))); ND_PRINT((ndo, " fsid %" PRIx64, EXTRACT_64BITS((const uint32_t *)&fap->fa3_fsid))); ND_PRINT((ndo, " fileid %" PRIx64, EXTRACT_64BITS((const uint32_t *)&fap->fa3_fileid))); ND_PRINT((ndo, " a/m/ctime %u.%06u", EXTRACT_32BITS(&fap->fa3_atime.nfsv3_sec), EXTRACT_32BITS(&fap->fa3_atime.nfsv3_nsec))); ND_PRINT((ndo, " %u.%06u", EXTRACT_32BITS(&fap->fa3_mtime.nfsv3_sec), EXTRACT_32BITS(&fap->fa3_mtime.nfsv3_nsec))); ND_PRINT((ndo, " %u.%06u", EXTRACT_32BITS(&fap->fa3_ctime.nfsv3_sec), EXTRACT_32BITS(&fap->fa3_ctime.nfsv3_nsec))); } else { ND_TCHECK(fap->fa2_ctime); ND_PRINT((ndo, " nlink %d rdev 0x%x fsid 0x%x nodeid 0x%x a/m/ctime", EXTRACT_32BITS(&fap->fa_nlink), EXTRACT_32BITS(&fap->fa2_rdev), EXTRACT_32BITS(&fap->fa2_fsid), EXTRACT_32BITS(&fap->fa2_fileid))); ND_PRINT((ndo, " %u.%06u", EXTRACT_32BITS(&fap->fa2_atime.nfsv2_sec), EXTRACT_32BITS(&fap->fa2_atime.nfsv2_usec))); ND_PRINT((ndo, " %u.%06u", EXTRACT_32BITS(&fap->fa2_mtime.nfsv2_sec), EXTRACT_32BITS(&fap->fa2_mtime.nfsv2_usec))); ND_PRINT((ndo, " %u.%06u", EXTRACT_32BITS(&fap->fa2_ctime.nfsv2_sec), EXTRACT_32BITS(&fap->fa2_ctime.nfsv2_usec))); } } return ((const uint32_t *)((const unsigned char *)dp + (v3 ? NFSX_V3FATTR : NFSX_V2FATTR))); trunc: return (NULL); } static int parseattrstat(netdissect_options *ndo, const uint32_t *dp, int verbose, int v3) { int er; dp = parsestatus(ndo, dp, &er); if (dp == NULL) return (0); if (er) return (1); return (parsefattr(ndo, dp, verbose, v3) != NULL); } static int parsediropres(netdissect_options *ndo, const uint32_t *dp) { int er; if (!(dp = parsestatus(ndo, dp, &er))) return (0); if (er) return (1); dp = parsefh(ndo, dp, 0); if (dp == NULL) return (0); return (parsefattr(ndo, dp, ndo->ndo_vflag, 0) != NULL); } static int parselinkres(netdissect_options *ndo, const uint32_t *dp, int v3) { int er; dp = parsestatus(ndo, dp, &er); if (dp == NULL) return(0); if (er) return(1); if (v3 && !(dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag))) return (0); ND_PRINT((ndo, " ")); return (parsefn(ndo, dp) != NULL); } static int parsestatfs(netdissect_options *ndo, const uint32_t *dp, int v3) { const struct nfs_statfs *sfsp; int er; dp = parsestatus(ndo, dp, &er); if (dp == NULL) return (0); if (!v3 && er) return (1); if (ndo->ndo_qflag) return(1); if (v3) { if (ndo->ndo_vflag) ND_PRINT((ndo, " POST:")); if (!(dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag))) return (0); } ND_TCHECK2(*dp, (v3 ? NFSX_V3STATFS : NFSX_V2STATFS)); sfsp = (const struct nfs_statfs *)dp; if (v3) { ND_PRINT((ndo, " tbytes %" PRIu64 " fbytes %" PRIu64 " abytes %" PRIu64, EXTRACT_64BITS((const uint32_t *)&sfsp->sf_tbytes), EXTRACT_64BITS((const uint32_t *)&sfsp->sf_fbytes), EXTRACT_64BITS((const uint32_t *)&sfsp->sf_abytes))); if (ndo->ndo_vflag) { ND_PRINT((ndo, " tfiles %" PRIu64 " ffiles %" PRIu64 " afiles %" PRIu64 " invar %u", EXTRACT_64BITS((const uint32_t *)&sfsp->sf_tfiles), EXTRACT_64BITS((const uint32_t *)&sfsp->sf_ffiles), EXTRACT_64BITS((const uint32_t *)&sfsp->sf_afiles), EXTRACT_32BITS(&sfsp->sf_invarsec))); } } else { ND_PRINT((ndo, " tsize %d bsize %d blocks %d bfree %d bavail %d", EXTRACT_32BITS(&sfsp->sf_tsize), EXTRACT_32BITS(&sfsp->sf_bsize), EXTRACT_32BITS(&sfsp->sf_blocks), EXTRACT_32BITS(&sfsp->sf_bfree), EXTRACT_32BITS(&sfsp->sf_bavail))); } return (1); trunc: return (0); } static int parserddires(netdissect_options *ndo, const uint32_t *dp) { int er; dp = parsestatus(ndo, dp, &er); if (dp == NULL) return (0); if (er) return (1); if (ndo->ndo_qflag) return (1); ND_TCHECK(dp[2]); ND_PRINT((ndo, " offset 0x%x size %d ", EXTRACT_32BITS(&dp[0]), EXTRACT_32BITS(&dp[1]))); if (dp[2] != 0) ND_PRINT((ndo, " eof")); return (1); trunc: return (0); } static const uint32_t * parse_wcc_attr(netdissect_options *ndo, const uint32_t *dp) { /* Our caller has already checked this */ ND_PRINT((ndo, " sz %" PRIu64, EXTRACT_64BITS(&dp[0]))); ND_PRINT((ndo, " mtime %u.%06u ctime %u.%06u", EXTRACT_32BITS(&dp[2]), EXTRACT_32BITS(&dp[3]), EXTRACT_32BITS(&dp[4]), EXTRACT_32BITS(&dp[5]))); return (dp + 6); } /* * Pre operation attributes. Print only if vflag > 1. */ static const uint32_t * parse_pre_op_attr(netdissect_options *ndo, const uint32_t *dp, int verbose) { ND_TCHECK(dp[0]); if (!EXTRACT_32BITS(&dp[0])) return (dp + 1); dp++; ND_TCHECK2(*dp, 24); if (verbose > 1) { return parse_wcc_attr(ndo, dp); } else { /* If not verbose enough, just skip over wcc_attr */ return (dp + 6); } trunc: return (NULL); } /* * Post operation attributes are printed if vflag >= 1 */ static const uint32_t * parse_post_op_attr(netdissect_options *ndo, const uint32_t *dp, int verbose) { ND_TCHECK(dp[0]); if (!EXTRACT_32BITS(&dp[0])) return (dp + 1); dp++; if (verbose) { return parsefattr(ndo, dp, verbose, 1); } else return (dp + (NFSX_V3FATTR / sizeof (uint32_t))); trunc: return (NULL); } static const uint32_t * parse_wcc_data(netdissect_options *ndo, const uint32_t *dp, int verbose) { if (verbose > 1) ND_PRINT((ndo, " PRE:")); if (!(dp = parse_pre_op_attr(ndo, dp, verbose))) return (0); if (verbose) ND_PRINT((ndo, " POST:")); return parse_post_op_attr(ndo, dp, verbose); } static const uint32_t * parsecreateopres(netdissect_options *ndo, const uint32_t *dp, int verbose) { int er; if (!(dp = parsestatus(ndo, dp, &er))) return (0); if (er) dp = parse_wcc_data(ndo, dp, verbose); else { ND_TCHECK(dp[0]); if (!EXTRACT_32BITS(&dp[0])) return (dp + 1); dp++; if (!(dp = parsefh(ndo, dp, 1))) return (0); if (verbose) { if (!(dp = parse_post_op_attr(ndo, dp, verbose))) return (0); if (ndo->ndo_vflag > 1) { ND_PRINT((ndo, " dir attr:")); dp = parse_wcc_data(ndo, dp, verbose); } } } return (dp); trunc: return (NULL); } static int parsewccres(netdissect_options *ndo, const uint32_t *dp, int verbose) { int er; if (!(dp = parsestatus(ndo, dp, &er))) return (0); return parse_wcc_data(ndo, dp, verbose) != NULL; } static const uint32_t * parsev3rddirres(netdissect_options *ndo, const uint32_t *dp, int verbose) { int er; if (!(dp = parsestatus(ndo, dp, &er))) return (0); if (ndo->ndo_vflag) ND_PRINT((ndo, " POST:")); if (!(dp = parse_post_op_attr(ndo, dp, verbose))) return (0); if (er) return dp; if (ndo->ndo_vflag) { ND_TCHECK(dp[1]); ND_PRINT((ndo, " verf %08x%08x", dp[0], dp[1])); dp += 2; } return dp; trunc: return (NULL); } static int parsefsinfo(netdissect_options *ndo, const uint32_t *dp) { const struct nfsv3_fsinfo *sfp; int er; if (!(dp = parsestatus(ndo, dp, &er))) return (0); if (ndo->ndo_vflag) ND_PRINT((ndo, " POST:")); if (!(dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag))) return (0); if (er) return (1); sfp = (const struct nfsv3_fsinfo *)dp; ND_TCHECK(*sfp); ND_PRINT((ndo, " rtmax %u rtpref %u wtmax %u wtpref %u dtpref %u", EXTRACT_32BITS(&sfp->fs_rtmax), EXTRACT_32BITS(&sfp->fs_rtpref), EXTRACT_32BITS(&sfp->fs_wtmax), EXTRACT_32BITS(&sfp->fs_wtpref), EXTRACT_32BITS(&sfp->fs_dtpref))); if (ndo->ndo_vflag) { ND_PRINT((ndo, " rtmult %u wtmult %u maxfsz %" PRIu64, EXTRACT_32BITS(&sfp->fs_rtmult), EXTRACT_32BITS(&sfp->fs_wtmult), EXTRACT_64BITS((const uint32_t *)&sfp->fs_maxfilesize))); ND_PRINT((ndo, " delta %u.%06u ", EXTRACT_32BITS(&sfp->fs_timedelta.nfsv3_sec), EXTRACT_32BITS(&sfp->fs_timedelta.nfsv3_nsec))); } return (1); trunc: return (0); } static int parsepathconf(netdissect_options *ndo, const uint32_t *dp) { int er; const struct nfsv3_pathconf *spp; if (!(dp = parsestatus(ndo, dp, &er))) return (0); if (ndo->ndo_vflag) ND_PRINT((ndo, " POST:")); if (!(dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag))) return (0); if (er) return (1); spp = (const struct nfsv3_pathconf *)dp; ND_TCHECK(*spp); ND_PRINT((ndo, " linkmax %u namemax %u %s %s %s %s", EXTRACT_32BITS(&spp->pc_linkmax), EXTRACT_32BITS(&spp->pc_namemax), EXTRACT_32BITS(&spp->pc_notrunc) ? "notrunc" : "", EXTRACT_32BITS(&spp->pc_chownrestricted) ? "chownres" : "", EXTRACT_32BITS(&spp->pc_caseinsensitive) ? "igncase" : "", EXTRACT_32BITS(&spp->pc_casepreserving) ? "keepcase" : "")); return (1); trunc: return (0); } static void interp_reply(netdissect_options *ndo, const struct sunrpc_msg *rp, uint32_t proc, uint32_t vers, int length) { register const uint32_t *dp; register int v3; int er; v3 = (vers == NFS_VER3); if (!v3 && proc < NFS_NPROCS) proc = nfsv3_procid[proc]; ND_PRINT((ndo, " %s", tok2str(nfsproc_str, "proc-%u", proc))); switch (proc) { case NFSPROC_GETATTR: dp = parserep(ndo, rp, length); if (dp != NULL && parseattrstat(ndo, dp, !ndo->ndo_qflag, v3) != 0) return; break; case NFSPROC_SETATTR: if (!(dp = parserep(ndo, rp, length))) return; if (v3) { if (parsewccres(ndo, dp, ndo->ndo_vflag)) return; } else { if (parseattrstat(ndo, dp, !ndo->ndo_qflag, 0) != 0) return; } break; case NFSPROC_LOOKUP: if (!(dp = parserep(ndo, rp, length))) break; if (v3) { if (!(dp = parsestatus(ndo, dp, &er))) break; if (er) { if (ndo->ndo_vflag > 1) { ND_PRINT((ndo, " post dattr:")); dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag); } } else { if (!(dp = parsefh(ndo, dp, v3))) break; if ((dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag)) && ndo->ndo_vflag > 1) { ND_PRINT((ndo, " post dattr:")); dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag); } } if (dp) return; } else { if (parsediropres(ndo, dp) != 0) return; } break; case NFSPROC_ACCESS: if (!(dp = parserep(ndo, rp, length))) break; if (!(dp = parsestatus(ndo, dp, &er))) break; if (ndo->ndo_vflag) ND_PRINT((ndo, " attr:")); if (!(dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag))) break; if (!er) { ND_TCHECK(dp[0]); ND_PRINT((ndo, " c %04x", EXTRACT_32BITS(&dp[0]))); } return; case NFSPROC_READLINK: dp = parserep(ndo, rp, length); if (dp != NULL && parselinkres(ndo, dp, v3) != 0) return; break; case NFSPROC_READ: if (!(dp = parserep(ndo, rp, length))) break; if (v3) { if (!(dp = parsestatus(ndo, dp, &er))) break; if (!(dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag))) break; if (er) return; if (ndo->ndo_vflag) { ND_TCHECK(dp[1]); ND_PRINT((ndo, " %u bytes", EXTRACT_32BITS(&dp[0]))); if (EXTRACT_32BITS(&dp[1])) ND_PRINT((ndo, " EOF")); } return; } else { if (parseattrstat(ndo, dp, ndo->ndo_vflag, 0) != 0) return; } break; case NFSPROC_WRITE: if (!(dp = parserep(ndo, rp, length))) break; if (v3) { if (!(dp = parsestatus(ndo, dp, &er))) break; if (!(dp = parse_wcc_data(ndo, dp, ndo->ndo_vflag))) break; if (er) return; if (ndo->ndo_vflag) { ND_TCHECK(dp[0]); ND_PRINT((ndo, " %u bytes", EXTRACT_32BITS(&dp[0]))); if (ndo->ndo_vflag > 1) { ND_TCHECK(dp[1]); ND_PRINT((ndo, " <%s>", tok2str(nfsv3_writemodes, NULL, EXTRACT_32BITS(&dp[1])))); } return; } } else { if (parseattrstat(ndo, dp, ndo->ndo_vflag, v3) != 0) return; } break; case NFSPROC_CREATE: case NFSPROC_MKDIR: if (!(dp = parserep(ndo, rp, length))) break; if (v3) { if (parsecreateopres(ndo, dp, ndo->ndo_vflag) != NULL) return; } else { if (parsediropres(ndo, dp) != 0) return; } break; case NFSPROC_SYMLINK: if (!(dp = parserep(ndo, rp, length))) break; if (v3) { if (parsecreateopres(ndo, dp, ndo->ndo_vflag) != NULL) return; } else { if (parsestatus(ndo, dp, &er) != NULL) return; } break; case NFSPROC_MKNOD: if (!(dp = parserep(ndo, rp, length))) break; if (parsecreateopres(ndo, dp, ndo->ndo_vflag) != NULL) return; break; case NFSPROC_REMOVE: case NFSPROC_RMDIR: if (!(dp = parserep(ndo, rp, length))) break; if (v3) { if (parsewccres(ndo, dp, ndo->ndo_vflag)) return; } else { if (parsestatus(ndo, dp, &er) != NULL) return; } break; case NFSPROC_RENAME: if (!(dp = parserep(ndo, rp, length))) break; if (v3) { if (!(dp = parsestatus(ndo, dp, &er))) break; if (ndo->ndo_vflag) { ND_PRINT((ndo, " from:")); if (!(dp = parse_wcc_data(ndo, dp, ndo->ndo_vflag))) break; ND_PRINT((ndo, " to:")); if (!(dp = parse_wcc_data(ndo, dp, ndo->ndo_vflag))) break; } return; } else { if (parsestatus(ndo, dp, &er) != NULL) return; } break; case NFSPROC_LINK: if (!(dp = parserep(ndo, rp, length))) break; if (v3) { if (!(dp = parsestatus(ndo, dp, &er))) break; if (ndo->ndo_vflag) { ND_PRINT((ndo, " file POST:")); if (!(dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag))) break; ND_PRINT((ndo, " dir:")); if (!(dp = parse_wcc_data(ndo, dp, ndo->ndo_vflag))) break; return; } } else { if (parsestatus(ndo, dp, &er) != NULL) return; } break; case NFSPROC_READDIR: if (!(dp = parserep(ndo, rp, length))) break; if (v3) { if (parsev3rddirres(ndo, dp, ndo->ndo_vflag)) return; } else { if (parserddires(ndo, dp) != 0) return; } break; case NFSPROC_READDIRPLUS: if (!(dp = parserep(ndo, rp, length))) break; if (parsev3rddirres(ndo, dp, ndo->ndo_vflag)) return; break; case NFSPROC_FSSTAT: dp = parserep(ndo, rp, length); if (dp != NULL && parsestatfs(ndo, dp, v3) != 0) return; break; case NFSPROC_FSINFO: dp = parserep(ndo, rp, length); if (dp != NULL && parsefsinfo(ndo, dp) != 0) return; break; case NFSPROC_PATHCONF: dp = parserep(ndo, rp, length); if (dp != NULL && parsepathconf(ndo, dp) != 0) return; break; case NFSPROC_COMMIT: dp = parserep(ndo, rp, length); if (dp != NULL && parsewccres(ndo, dp, ndo->ndo_vflag) != 0) return; break; default: return; } trunc: if (!nfserr) ND_PRINT((ndo, "%s", tstr)); }
parserep(netdissect_options *ndo, register const struct sunrpc_msg *rp, register u_int length) { register const uint32_t *dp; u_int len; enum sunrpc_accept_stat astat; /* * Portability note: * Here we find the address of the ar_verf credentials. * Originally, this calculation was * dp = (uint32_t *)&rp->rm_reply.rp_acpt.ar_verf * On the wire, the rp_acpt field starts immediately after * the (32 bit) rp_stat field. However, rp_acpt (which is a * "struct accepted_reply") contains a "struct opaque_auth", * whose internal representation contains a pointer, so on a * 64-bit machine the compiler inserts 32 bits of padding * before rp->rm_reply.rp_acpt.ar_verf. So, we cannot use * the internal representation to parse the on-the-wire * representation. Instead, we skip past the rp_stat field, * which is an "enum" and so occupies one 32-bit word. */ dp = ((const uint32_t *)&rp->rm_reply) + 1; ND_TCHECK(dp[1]); len = EXTRACT_32BITS(&dp[1]); if (len >= length) return (NULL); /* * skip past the ar_verf credentials. */ dp += (len + (2*sizeof(uint32_t) + 3)) / sizeof(uint32_t); ND_TCHECK2(dp[0], 0); /* * now we can check the ar_stat field */ astat = (enum sunrpc_accept_stat) EXTRACT_32BITS(dp); if (astat != SUNRPC_SUCCESS) { ND_PRINT((ndo, " %s", tok2str(sunrpc_str, "ar_stat %d", astat))); nfserr = 1; /* suppress trunc string */ return (NULL); } /* successful return */ ND_TCHECK2(*dp, sizeof(astat)); return ((const uint32_t *) (sizeof(astat) + ((const char *)dp))); trunc: return (0); }
parserep(netdissect_options *ndo, register const struct sunrpc_msg *rp, register u_int length) { register const uint32_t *dp; u_int len; enum sunrpc_accept_stat astat; /* * Portability note: * Here we find the address of the ar_verf credentials. * Originally, this calculation was * dp = (uint32_t *)&rp->rm_reply.rp_acpt.ar_verf * On the wire, the rp_acpt field starts immediately after * the (32 bit) rp_stat field. However, rp_acpt (which is a * "struct accepted_reply") contains a "struct opaque_auth", * whose internal representation contains a pointer, so on a * 64-bit machine the compiler inserts 32 bits of padding * before rp->rm_reply.rp_acpt.ar_verf. So, we cannot use * the internal representation to parse the on-the-wire * representation. Instead, we skip past the rp_stat field, * which is an "enum" and so occupies one 32-bit word. */ dp = ((const uint32_t *)&rp->rm_reply) + 1; ND_TCHECK(dp[1]); len = EXTRACT_32BITS(&dp[1]); if (len >= length) return (NULL); /* * skip past the ar_verf credentials. */ dp += (len + (2*sizeof(uint32_t) + 3)) / sizeof(uint32_t); /* * now we can check the ar_stat field */ ND_TCHECK(dp[0]); astat = (enum sunrpc_accept_stat) EXTRACT_32BITS(dp); if (astat != SUNRPC_SUCCESS) { ND_PRINT((ndo, " %s", tok2str(sunrpc_str, "ar_stat %d", astat))); nfserr = 1; /* suppress trunc string */ return (NULL); } /* successful return */ ND_TCHECK2(*dp, sizeof(astat)); return ((const uint32_t *) (sizeof(astat) + ((const char *)dp))); trunc: return (0); }
{'added': [(631, '\t\t\t\tND_TCHECK(dp[4]);'), (639, '\t\t\t\t\t\t\tNULL, EXTRACT_32BITS(&dp[3]))));'), (1007, '\tND_TCHECK(dp[0]);'), (1244, '\t/* Our caller has already checked this */'), (1513, '\t\tif (!er) {'), (1514, '\t\t\tND_TCHECK(dp[0]);'), (1516, '\t\t}')], 'deleted': [(631, '\t\t\t\tND_TCHECK(dp[2]);'), (637, '\t\t\t\t\tdp += 3;'), (638, '\t\t\t\t\tND_TCHECK(dp[0]);'), (641, '\t\t\t\t\t\t\tNULL, EXTRACT_32BITS(dp))));'), (1005, '\tND_TCHECK2(dp[0], 0);'), (1514, '\t\tif (!er)')]}
7
6
1,400
9,286
https://github.com/the-tcpdump-group/tcpdump
CVE-2017-12898
['CWE-125']
dsdiff.c
ParseDsdiffHeaderConfig
//////////////////////////////////////////////////////////////////////////// // **** WAVPACK **** // // Hybrid Lossless Wavefile Compressor // // Copyright (c) 1998 - 2016 David Bryant. // // All Rights Reserved. // // Distributed under the BSD Software License (see license.txt) // //////////////////////////////////////////////////////////////////////////// // dsdiff.c // This module is a helper to the WavPack command-line programs to support DFF files. #include <string.h> #include <stdlib.h> #include <fcntl.h> #include <math.h> #include <stdio.h> #include <ctype.h> #include "wavpack.h" #include "utils.h" #include "md5.h" #ifdef _WIN32 #define strdup(x) _strdup(x) #endif #define WAVPACK_NO_ERROR 0 #define WAVPACK_SOFT_ERROR 1 #define WAVPACK_HARD_ERROR 2 extern int debug_logging_mode; #pragma pack(push,2) typedef struct { char ckID [4]; int64_t ckDataSize; } DFFChunkHeader; typedef struct { char ckID [4]; int64_t ckDataSize; char formType [4]; } DFFFileHeader; typedef struct { char ckID [4]; int64_t ckDataSize; uint32_t version; } DFFVersionChunk; typedef struct { char ckID [4]; int64_t ckDataSize; uint32_t sampleRate; } DFFSampleRateChunk; typedef struct { char ckID [4]; int64_t ckDataSize; uint16_t numChannels; } DFFChannelsHeader; typedef struct { char ckID [4]; int64_t ckDataSize; char compressionType [4]; } DFFCompressionHeader; #pragma pack(pop) #define DFFChunkHeaderFormat "4D" #define DFFFileHeaderFormat "4D4" #define DFFVersionChunkFormat "4DL" #define DFFSampleRateChunkFormat "4DL" #define DFFChannelsHeaderFormat "4DS" #define DFFCompressionHeaderFormat "4D4" int ParseDsdiffHeaderConfig (FILE *infile, char *infilename, char *fourcc, WavpackContext *wpc, WavpackConfig *config) { int64_t infilesize, total_samples; DFFFileHeader dff_file_header; DFFChunkHeader dff_chunk_header; uint32_t bcount; infilesize = DoGetFileSize (infile); memcpy (&dff_file_header, fourcc, 4); if ((!DoReadFile (infile, ((char *) &dff_file_header) + 4, sizeof (DFFFileHeader) - 4, &bcount) || bcount != sizeof (DFFFileHeader) - 4) || strncmp (dff_file_header.formType, "DSD ", 4)) { error_line ("%s is not a valid .DFF file!", infilename); return WAVPACK_SOFT_ERROR; } else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) && !WavpackAddWrapper (wpc, &dff_file_header, sizeof (DFFFileHeader))) { error_line ("%s", WavpackGetErrorMessage (wpc)); return WAVPACK_SOFT_ERROR; } #if 1 // this might be a little too picky... WavpackBigEndianToNative (&dff_file_header, DFFFileHeaderFormat); if (infilesize && !(config->qmode & QMODE_IGNORE_LENGTH) && dff_file_header.ckDataSize && dff_file_header.ckDataSize + 1 && dff_file_header.ckDataSize + 12 != infilesize) { error_line ("%s is not a valid .DFF file (by total size)!", infilename); return WAVPACK_SOFT_ERROR; } if (debug_logging_mode) error_line ("file header indicated length = %lld", dff_file_header.ckDataSize); #endif // loop through all elements of the DSDIFF header // (until the data chuck) and copy them to the output file while (1) { if (!DoReadFile (infile, &dff_chunk_header, sizeof (DFFChunkHeader), &bcount) || bcount != sizeof (DFFChunkHeader)) { error_line ("%s is not a valid .DFF file!", infilename); return WAVPACK_SOFT_ERROR; } else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) && !WavpackAddWrapper (wpc, &dff_chunk_header, sizeof (DFFChunkHeader))) { error_line ("%s", WavpackGetErrorMessage (wpc)); return WAVPACK_SOFT_ERROR; } WavpackBigEndianToNative (&dff_chunk_header, DFFChunkHeaderFormat); if (debug_logging_mode) error_line ("chunk header indicated length = %lld", dff_chunk_header.ckDataSize); if (!strncmp (dff_chunk_header.ckID, "FVER", 4)) { uint32_t version; if (dff_chunk_header.ckDataSize != sizeof (version) || !DoReadFile (infile, &version, sizeof (version), &bcount) || bcount != sizeof (version)) { error_line ("%s is not a valid .DFF file!", infilename); return WAVPACK_SOFT_ERROR; } else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) && !WavpackAddWrapper (wpc, &version, sizeof (version))) { error_line ("%s", WavpackGetErrorMessage (wpc)); return WAVPACK_SOFT_ERROR; } WavpackBigEndianToNative (&version, "L"); if (debug_logging_mode) error_line ("dsdiff file version = 0x%08x", version); } else if (!strncmp (dff_chunk_header.ckID, "PROP", 4)) { char *prop_chunk = malloc ((size_t) dff_chunk_header.ckDataSize); if (!DoReadFile (infile, prop_chunk, (uint32_t) dff_chunk_header.ckDataSize, &bcount) || bcount != dff_chunk_header.ckDataSize) { error_line ("%s is not a valid .DFF file!", infilename); free (prop_chunk); return WAVPACK_SOFT_ERROR; } else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) && !WavpackAddWrapper (wpc, prop_chunk, (uint32_t) dff_chunk_header.ckDataSize)) { error_line ("%s", WavpackGetErrorMessage (wpc)); free (prop_chunk); return WAVPACK_SOFT_ERROR; } if (!strncmp (prop_chunk, "SND ", 4)) { char *cptr = prop_chunk + 4, *eptr = prop_chunk + dff_chunk_header.ckDataSize; uint16_t numChannels, chansSpecified, chanMask = 0; uint32_t sampleRate; while (eptr - cptr >= sizeof (dff_chunk_header)) { memcpy (&dff_chunk_header, cptr, sizeof (dff_chunk_header)); cptr += sizeof (dff_chunk_header); WavpackBigEndianToNative (&dff_chunk_header, DFFChunkHeaderFormat); if (eptr - cptr >= dff_chunk_header.ckDataSize) { if (!strncmp (dff_chunk_header.ckID, "FS ", 4) && dff_chunk_header.ckDataSize == 4) { memcpy (&sampleRate, cptr, sizeof (sampleRate)); WavpackBigEndianToNative (&sampleRate, "L"); cptr += dff_chunk_header.ckDataSize; if (debug_logging_mode) error_line ("got sample rate of %u Hz", sampleRate); } else if (!strncmp (dff_chunk_header.ckID, "CHNL", 4) && dff_chunk_header.ckDataSize >= 2) { memcpy (&numChannels, cptr, sizeof (numChannels)); WavpackBigEndianToNative (&numChannels, "S"); cptr += sizeof (numChannels); chansSpecified = (int)(dff_chunk_header.ckDataSize - sizeof (numChannels)) / 4; while (chansSpecified--) { if (!strncmp (cptr, "SLFT", 4) || !strncmp (cptr, "MLFT", 4)) chanMask |= 0x1; else if (!strncmp (cptr, "SRGT", 4) || !strncmp (cptr, "MRGT", 4)) chanMask |= 0x2; else if (!strncmp (cptr, "LS ", 4)) chanMask |= 0x10; else if (!strncmp (cptr, "RS ", 4)) chanMask |= 0x20; else if (!strncmp (cptr, "C ", 4)) chanMask |= 0x4; else if (!strncmp (cptr, "LFE ", 4)) chanMask |= 0x8; else if (debug_logging_mode) error_line ("undefined channel ID %c%c%c%c", cptr [0], cptr [1], cptr [2], cptr [3]); cptr += 4; } if (debug_logging_mode) error_line ("%d channels, mask = 0x%08x", numChannels, chanMask); } else if (!strncmp (dff_chunk_header.ckID, "CMPR", 4) && dff_chunk_header.ckDataSize >= 4) { if (strncmp (cptr, "DSD ", 4)) { error_line ("DSDIFF files must be uncompressed, not \"%c%c%c%c\"!", cptr [0], cptr [1], cptr [2], cptr [3]); free (prop_chunk); return WAVPACK_SOFT_ERROR; } cptr += dff_chunk_header.ckDataSize; } else { if (debug_logging_mode) error_line ("got PROP/SND chunk type \"%c%c%c%c\" of %d bytes", dff_chunk_header.ckID [0], dff_chunk_header.ckID [1], dff_chunk_header.ckID [2], dff_chunk_header.ckID [3], dff_chunk_header.ckDataSize); cptr += dff_chunk_header.ckDataSize; } } else { error_line ("%s is not a valid .DFF file!", infilename); free (prop_chunk); return WAVPACK_SOFT_ERROR; } } if (chanMask && (config->channel_mask || (config->qmode & QMODE_CHANS_UNASSIGNED))) { error_line ("this DSDIFF file already has channel order information!"); free (prop_chunk); return WAVPACK_SOFT_ERROR; } else if (chanMask) config->channel_mask = chanMask; config->bits_per_sample = 8; config->bytes_per_sample = 1; config->num_channels = numChannels; config->sample_rate = sampleRate / 8; config->qmode |= QMODE_DSD_MSB_FIRST; } else if (debug_logging_mode) error_line ("got unknown PROP chunk type \"%c%c%c%c\" of %d bytes", prop_chunk [0], prop_chunk [1], prop_chunk [2], prop_chunk [3], dff_chunk_header.ckDataSize); free (prop_chunk); } else if (!strncmp (dff_chunk_header.ckID, "DSD ", 4)) { total_samples = dff_chunk_header.ckDataSize / config->num_channels; break; } else { // just copy unknown chunks to output file int bytes_to_copy = (int)(((dff_chunk_header.ckDataSize) + 1) & ~(int64_t)1); char *buff = malloc (bytes_to_copy); if (debug_logging_mode) error_line ("extra unknown chunk \"%c%c%c%c\" of %d bytes", dff_chunk_header.ckID [0], dff_chunk_header.ckID [1], dff_chunk_header.ckID [2], dff_chunk_header.ckID [3], dff_chunk_header.ckDataSize); if (!DoReadFile (infile, buff, bytes_to_copy, &bcount) || bcount != bytes_to_copy || (!(config->qmode & QMODE_NO_STORE_WRAPPER) && !WavpackAddWrapper (wpc, buff, bytes_to_copy))) { error_line ("%s", WavpackGetErrorMessage (wpc)); free (buff); return WAVPACK_SOFT_ERROR; } free (buff); } } if (debug_logging_mode) error_line ("setting configuration with %lld samples", total_samples); if (!WavpackSetConfiguration64 (wpc, config, total_samples, NULL)) { error_line ("%s: %s", infilename, WavpackGetErrorMessage (wpc)); return WAVPACK_SOFT_ERROR; } return WAVPACK_NO_ERROR; } int WriteDsdiffHeader (FILE *outfile, WavpackContext *wpc, int64_t total_samples, int qmode) { uint32_t chan_mask = WavpackGetChannelMask (wpc); int num_channels = WavpackGetNumChannels (wpc); DFFFileHeader file_header, prop_header; DFFChunkHeader data_header; DFFVersionChunk ver_chunk; DFFSampleRateChunk fs_chunk; DFFChannelsHeader chan_header; DFFCompressionHeader cmpr_header; char *cmpr_name = "\016not compressed", *chan_ids; int64_t file_size, prop_chunk_size, data_size; int cmpr_name_size, chan_ids_size; uint32_t bcount; if (debug_logging_mode) error_line ("WriteDsdiffHeader (), total samples = %lld, qmode = 0x%02x\n", (long long) total_samples, qmode); cmpr_name_size = (strlen (cmpr_name) + 1) & ~1; chan_ids_size = num_channels * 4; chan_ids = malloc (chan_ids_size); if (chan_ids) { uint32_t scan_mask = 0x1; char *cptr = chan_ids; int ci, uci = 0; for (ci = 0; ci < num_channels; ++ci) { while (scan_mask && !(scan_mask & chan_mask)) scan_mask <<= 1; if (scan_mask & 0x1) memcpy (cptr, num_channels <= 2 ? "SLFT" : "MLFT", 4); else if (scan_mask & 0x2) memcpy (cptr, num_channels <= 2 ? "SRGT" : "MRGT", 4); else if (scan_mask & 0x4) memcpy (cptr, "C ", 4); else if (scan_mask & 0x8) memcpy (cptr, "LFE ", 4); else if (scan_mask & 0x10) memcpy (cptr, "LS ", 4); else if (scan_mask & 0x20) memcpy (cptr, "RS ", 4); else { cptr [0] = 'C'; cptr [1] = (uci / 100) + '0'; cptr [2] = ((uci % 100) / 10) + '0'; cptr [3] = (uci % 10) + '0'; uci++; } scan_mask <<= 1; cptr += 4; } } else { error_line ("can't allocate memory!"); return FALSE; } data_size = total_samples * num_channels; prop_chunk_size = sizeof (prop_header) + sizeof (fs_chunk) + sizeof (chan_header) + chan_ids_size + sizeof (cmpr_header) + cmpr_name_size; file_size = sizeof (file_header) + sizeof (ver_chunk) + prop_chunk_size + sizeof (data_header) + ((data_size + 1) & ~(int64_t)1); memcpy (file_header.ckID, "FRM8", 4); file_header.ckDataSize = file_size - 12; memcpy (file_header.formType, "DSD ", 4); memcpy (prop_header.ckID, "PROP", 4); prop_header.ckDataSize = prop_chunk_size - 12; memcpy (prop_header.formType, "SND ", 4); memcpy (ver_chunk.ckID, "FVER", 4); ver_chunk.ckDataSize = sizeof (ver_chunk) - 12; ver_chunk.version = 0x01050000; memcpy (fs_chunk.ckID, "FS ", 4); fs_chunk.ckDataSize = sizeof (fs_chunk) - 12; fs_chunk.sampleRate = WavpackGetSampleRate (wpc) * 8; memcpy (chan_header.ckID, "CHNL", 4); chan_header.ckDataSize = sizeof (chan_header) + chan_ids_size - 12; chan_header.numChannels = num_channels; memcpy (cmpr_header.ckID, "CMPR", 4); cmpr_header.ckDataSize = sizeof (cmpr_header) + cmpr_name_size - 12; memcpy (cmpr_header.compressionType, "DSD ", 4); memcpy (data_header.ckID, "DSD ", 4); data_header.ckDataSize = data_size; WavpackNativeToBigEndian (&file_header, DFFFileHeaderFormat); WavpackNativeToBigEndian (&ver_chunk, DFFVersionChunkFormat); WavpackNativeToBigEndian (&prop_header, DFFFileHeaderFormat); WavpackNativeToBigEndian (&fs_chunk, DFFSampleRateChunkFormat); WavpackNativeToBigEndian (&chan_header, DFFChannelsHeaderFormat); WavpackNativeToBigEndian (&cmpr_header, DFFCompressionHeaderFormat); WavpackNativeToBigEndian (&data_header, DFFChunkHeaderFormat); if (!DoWriteFile (outfile, &file_header, sizeof (file_header), &bcount) || bcount != sizeof (file_header) || !DoWriteFile (outfile, &ver_chunk, sizeof (ver_chunk), &bcount) || bcount != sizeof (ver_chunk) || !DoWriteFile (outfile, &prop_header, sizeof (prop_header), &bcount) || bcount != sizeof (prop_header) || !DoWriteFile (outfile, &fs_chunk, sizeof (fs_chunk), &bcount) || bcount != sizeof (fs_chunk) || !DoWriteFile (outfile, &chan_header, sizeof (chan_header), &bcount) || bcount != sizeof (chan_header) || !DoWriteFile (outfile, chan_ids, chan_ids_size, &bcount) || bcount != chan_ids_size || !DoWriteFile (outfile, &cmpr_header, sizeof (cmpr_header), &bcount) || bcount != sizeof (cmpr_header) || !DoWriteFile (outfile, cmpr_name, cmpr_name_size, &bcount) || bcount != cmpr_name_size || !DoWriteFile (outfile, &data_header, sizeof (data_header), &bcount) || bcount != sizeof (data_header)) { error_line ("can't write .DSF data, disk probably full!"); free (chan_ids); return FALSE; } free (chan_ids); return TRUE; }
//////////////////////////////////////////////////////////////////////////// // **** WAVPACK **** // // Hybrid Lossless Wavefile Compressor // // Copyright (c) 1998 - 2016 David Bryant. // // All Rights Reserved. // // Distributed under the BSD Software License (see license.txt) // //////////////////////////////////////////////////////////////////////////// // dsdiff.c // This module is a helper to the WavPack command-line programs to support DFF files. #include <string.h> #include <stdlib.h> #include <fcntl.h> #include <math.h> #include <stdio.h> #include <ctype.h> #include "wavpack.h" #include "utils.h" #include "md5.h" #ifdef _WIN32 #define strdup(x) _strdup(x) #endif #define WAVPACK_NO_ERROR 0 #define WAVPACK_SOFT_ERROR 1 #define WAVPACK_HARD_ERROR 2 extern int debug_logging_mode; #pragma pack(push,2) typedef struct { char ckID [4]; int64_t ckDataSize; } DFFChunkHeader; typedef struct { char ckID [4]; int64_t ckDataSize; char formType [4]; } DFFFileHeader; typedef struct { char ckID [4]; int64_t ckDataSize; uint32_t version; } DFFVersionChunk; typedef struct { char ckID [4]; int64_t ckDataSize; uint32_t sampleRate; } DFFSampleRateChunk; typedef struct { char ckID [4]; int64_t ckDataSize; uint16_t numChannels; } DFFChannelsHeader; typedef struct { char ckID [4]; int64_t ckDataSize; char compressionType [4]; } DFFCompressionHeader; #pragma pack(pop) #define DFFChunkHeaderFormat "4D" #define DFFFileHeaderFormat "4D4" #define DFFVersionChunkFormat "4DL" #define DFFSampleRateChunkFormat "4DL" #define DFFChannelsHeaderFormat "4DS" #define DFFCompressionHeaderFormat "4D4" int ParseDsdiffHeaderConfig (FILE *infile, char *infilename, char *fourcc, WavpackContext *wpc, WavpackConfig *config) { int64_t infilesize, total_samples; DFFFileHeader dff_file_header; DFFChunkHeader dff_chunk_header; uint32_t bcount; infilesize = DoGetFileSize (infile); memcpy (&dff_file_header, fourcc, 4); if ((!DoReadFile (infile, ((char *) &dff_file_header) + 4, sizeof (DFFFileHeader) - 4, &bcount) || bcount != sizeof (DFFFileHeader) - 4) || strncmp (dff_file_header.formType, "DSD ", 4)) { error_line ("%s is not a valid .DFF file!", infilename); return WAVPACK_SOFT_ERROR; } else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) && !WavpackAddWrapper (wpc, &dff_file_header, sizeof (DFFFileHeader))) { error_line ("%s", WavpackGetErrorMessage (wpc)); return WAVPACK_SOFT_ERROR; } #if 1 // this might be a little too picky... WavpackBigEndianToNative (&dff_file_header, DFFFileHeaderFormat); if (infilesize && !(config->qmode & QMODE_IGNORE_LENGTH) && dff_file_header.ckDataSize && dff_file_header.ckDataSize + 1 && dff_file_header.ckDataSize + 12 != infilesize) { error_line ("%s is not a valid .DFF file (by total size)!", infilename); return WAVPACK_SOFT_ERROR; } if (debug_logging_mode) error_line ("file header indicated length = %lld", dff_file_header.ckDataSize); #endif // loop through all elements of the DSDIFF header // (until the data chuck) and copy them to the output file while (1) { if (!DoReadFile (infile, &dff_chunk_header, sizeof (DFFChunkHeader), &bcount) || bcount != sizeof (DFFChunkHeader)) { error_line ("%s is not a valid .DFF file!", infilename); return WAVPACK_SOFT_ERROR; } else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) && !WavpackAddWrapper (wpc, &dff_chunk_header, sizeof (DFFChunkHeader))) { error_line ("%s", WavpackGetErrorMessage (wpc)); return WAVPACK_SOFT_ERROR; } WavpackBigEndianToNative (&dff_chunk_header, DFFChunkHeaderFormat); if (debug_logging_mode) error_line ("chunk header indicated length = %lld", dff_chunk_header.ckDataSize); if (!strncmp (dff_chunk_header.ckID, "FVER", 4)) { uint32_t version; if (dff_chunk_header.ckDataSize != sizeof (version) || !DoReadFile (infile, &version, sizeof (version), &bcount) || bcount != sizeof (version)) { error_line ("%s is not a valid .DFF file!", infilename); return WAVPACK_SOFT_ERROR; } else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) && !WavpackAddWrapper (wpc, &version, sizeof (version))) { error_line ("%s", WavpackGetErrorMessage (wpc)); return WAVPACK_SOFT_ERROR; } WavpackBigEndianToNative (&version, "L"); if (debug_logging_mode) error_line ("dsdiff file version = 0x%08x", version); } else if (!strncmp (dff_chunk_header.ckID, "PROP", 4)) { char *prop_chunk; if (dff_chunk_header.ckDataSize < 4 || dff_chunk_header.ckDataSize > 1024) { error_line ("%s is not a valid .DFF file!", infilename); return WAVPACK_SOFT_ERROR; } if (debug_logging_mode) error_line ("got PROP chunk of %d bytes total", (int) dff_chunk_header.ckDataSize); prop_chunk = malloc ((size_t) dff_chunk_header.ckDataSize); if (!DoReadFile (infile, prop_chunk, (uint32_t) dff_chunk_header.ckDataSize, &bcount) || bcount != dff_chunk_header.ckDataSize) { error_line ("%s is not a valid .DFF file!", infilename); free (prop_chunk); return WAVPACK_SOFT_ERROR; } else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) && !WavpackAddWrapper (wpc, prop_chunk, (uint32_t) dff_chunk_header.ckDataSize)) { error_line ("%s", WavpackGetErrorMessage (wpc)); free (prop_chunk); return WAVPACK_SOFT_ERROR; } if (!strncmp (prop_chunk, "SND ", 4)) { char *cptr = prop_chunk + 4, *eptr = prop_chunk + dff_chunk_header.ckDataSize; uint16_t numChannels, chansSpecified, chanMask = 0; uint32_t sampleRate; while (eptr - cptr >= sizeof (dff_chunk_header)) { memcpy (&dff_chunk_header, cptr, sizeof (dff_chunk_header)); cptr += sizeof (dff_chunk_header); WavpackBigEndianToNative (&dff_chunk_header, DFFChunkHeaderFormat); if (eptr - cptr >= dff_chunk_header.ckDataSize) { if (!strncmp (dff_chunk_header.ckID, "FS ", 4) && dff_chunk_header.ckDataSize == 4) { memcpy (&sampleRate, cptr, sizeof (sampleRate)); WavpackBigEndianToNative (&sampleRate, "L"); cptr += dff_chunk_header.ckDataSize; if (debug_logging_mode) error_line ("got sample rate of %u Hz", sampleRate); } else if (!strncmp (dff_chunk_header.ckID, "CHNL", 4) && dff_chunk_header.ckDataSize >= 2) { memcpy (&numChannels, cptr, sizeof (numChannels)); WavpackBigEndianToNative (&numChannels, "S"); cptr += sizeof (numChannels); chansSpecified = (int)(dff_chunk_header.ckDataSize - sizeof (numChannels)) / 4; while (chansSpecified--) { if (!strncmp (cptr, "SLFT", 4) || !strncmp (cptr, "MLFT", 4)) chanMask |= 0x1; else if (!strncmp (cptr, "SRGT", 4) || !strncmp (cptr, "MRGT", 4)) chanMask |= 0x2; else if (!strncmp (cptr, "LS ", 4)) chanMask |= 0x10; else if (!strncmp (cptr, "RS ", 4)) chanMask |= 0x20; else if (!strncmp (cptr, "C ", 4)) chanMask |= 0x4; else if (!strncmp (cptr, "LFE ", 4)) chanMask |= 0x8; else if (debug_logging_mode) error_line ("undefined channel ID %c%c%c%c", cptr [0], cptr [1], cptr [2], cptr [3]); cptr += 4; } if (debug_logging_mode) error_line ("%d channels, mask = 0x%08x", numChannels, chanMask); } else if (!strncmp (dff_chunk_header.ckID, "CMPR", 4) && dff_chunk_header.ckDataSize >= 4) { if (strncmp (cptr, "DSD ", 4)) { error_line ("DSDIFF files must be uncompressed, not \"%c%c%c%c\"!", cptr [0], cptr [1], cptr [2], cptr [3]); free (prop_chunk); return WAVPACK_SOFT_ERROR; } cptr += dff_chunk_header.ckDataSize; } else { if (debug_logging_mode) error_line ("got PROP/SND chunk type \"%c%c%c%c\" of %d bytes", dff_chunk_header.ckID [0], dff_chunk_header.ckID [1], dff_chunk_header.ckID [2], dff_chunk_header.ckID [3], dff_chunk_header.ckDataSize); cptr += dff_chunk_header.ckDataSize; } } else { error_line ("%s is not a valid .DFF file!", infilename); free (prop_chunk); return WAVPACK_SOFT_ERROR; } } if (chanMask && (config->channel_mask || (config->qmode & QMODE_CHANS_UNASSIGNED))) { error_line ("this DSDIFF file already has channel order information!"); free (prop_chunk); return WAVPACK_SOFT_ERROR; } else if (chanMask) config->channel_mask = chanMask; config->bits_per_sample = 8; config->bytes_per_sample = 1; config->num_channels = numChannels; config->sample_rate = sampleRate / 8; config->qmode |= QMODE_DSD_MSB_FIRST; } else if (debug_logging_mode) error_line ("got unknown PROP chunk type \"%c%c%c%c\" of %d bytes", prop_chunk [0], prop_chunk [1], prop_chunk [2], prop_chunk [3], dff_chunk_header.ckDataSize); free (prop_chunk); } else if (!strncmp (dff_chunk_header.ckID, "DSD ", 4)) { total_samples = dff_chunk_header.ckDataSize / config->num_channels; break; } else { // just copy unknown chunks to output file int bytes_to_copy = (int)(((dff_chunk_header.ckDataSize) + 1) & ~(int64_t)1); char *buff = malloc (bytes_to_copy); if (debug_logging_mode) error_line ("extra unknown chunk \"%c%c%c%c\" of %d bytes", dff_chunk_header.ckID [0], dff_chunk_header.ckID [1], dff_chunk_header.ckID [2], dff_chunk_header.ckID [3], dff_chunk_header.ckDataSize); if (!DoReadFile (infile, buff, bytes_to_copy, &bcount) || bcount != bytes_to_copy || (!(config->qmode & QMODE_NO_STORE_WRAPPER) && !WavpackAddWrapper (wpc, buff, bytes_to_copy))) { error_line ("%s", WavpackGetErrorMessage (wpc)); free (buff); return WAVPACK_SOFT_ERROR; } free (buff); } } if (debug_logging_mode) error_line ("setting configuration with %lld samples", total_samples); if (!WavpackSetConfiguration64 (wpc, config, total_samples, NULL)) { error_line ("%s: %s", infilename, WavpackGetErrorMessage (wpc)); return WAVPACK_SOFT_ERROR; } return WAVPACK_NO_ERROR; } int WriteDsdiffHeader (FILE *outfile, WavpackContext *wpc, int64_t total_samples, int qmode) { uint32_t chan_mask = WavpackGetChannelMask (wpc); int num_channels = WavpackGetNumChannels (wpc); DFFFileHeader file_header, prop_header; DFFChunkHeader data_header; DFFVersionChunk ver_chunk; DFFSampleRateChunk fs_chunk; DFFChannelsHeader chan_header; DFFCompressionHeader cmpr_header; char *cmpr_name = "\016not compressed", *chan_ids; int64_t file_size, prop_chunk_size, data_size; int cmpr_name_size, chan_ids_size; uint32_t bcount; if (debug_logging_mode) error_line ("WriteDsdiffHeader (), total samples = %lld, qmode = 0x%02x\n", (long long) total_samples, qmode); cmpr_name_size = (strlen (cmpr_name) + 1) & ~1; chan_ids_size = num_channels * 4; chan_ids = malloc (chan_ids_size); if (chan_ids) { uint32_t scan_mask = 0x1; char *cptr = chan_ids; int ci, uci = 0; for (ci = 0; ci < num_channels; ++ci) { while (scan_mask && !(scan_mask & chan_mask)) scan_mask <<= 1; if (scan_mask & 0x1) memcpy (cptr, num_channels <= 2 ? "SLFT" : "MLFT", 4); else if (scan_mask & 0x2) memcpy (cptr, num_channels <= 2 ? "SRGT" : "MRGT", 4); else if (scan_mask & 0x4) memcpy (cptr, "C ", 4); else if (scan_mask & 0x8) memcpy (cptr, "LFE ", 4); else if (scan_mask & 0x10) memcpy (cptr, "LS ", 4); else if (scan_mask & 0x20) memcpy (cptr, "RS ", 4); else { cptr [0] = 'C'; cptr [1] = (uci / 100) + '0'; cptr [2] = ((uci % 100) / 10) + '0'; cptr [3] = (uci % 10) + '0'; uci++; } scan_mask <<= 1; cptr += 4; } } else { error_line ("can't allocate memory!"); return FALSE; } data_size = total_samples * num_channels; prop_chunk_size = sizeof (prop_header) + sizeof (fs_chunk) + sizeof (chan_header) + chan_ids_size + sizeof (cmpr_header) + cmpr_name_size; file_size = sizeof (file_header) + sizeof (ver_chunk) + prop_chunk_size + sizeof (data_header) + ((data_size + 1) & ~(int64_t)1); memcpy (file_header.ckID, "FRM8", 4); file_header.ckDataSize = file_size - 12; memcpy (file_header.formType, "DSD ", 4); memcpy (prop_header.ckID, "PROP", 4); prop_header.ckDataSize = prop_chunk_size - 12; memcpy (prop_header.formType, "SND ", 4); memcpy (ver_chunk.ckID, "FVER", 4); ver_chunk.ckDataSize = sizeof (ver_chunk) - 12; ver_chunk.version = 0x01050000; memcpy (fs_chunk.ckID, "FS ", 4); fs_chunk.ckDataSize = sizeof (fs_chunk) - 12; fs_chunk.sampleRate = WavpackGetSampleRate (wpc) * 8; memcpy (chan_header.ckID, "CHNL", 4); chan_header.ckDataSize = sizeof (chan_header) + chan_ids_size - 12; chan_header.numChannels = num_channels; memcpy (cmpr_header.ckID, "CMPR", 4); cmpr_header.ckDataSize = sizeof (cmpr_header) + cmpr_name_size - 12; memcpy (cmpr_header.compressionType, "DSD ", 4); memcpy (data_header.ckID, "DSD ", 4); data_header.ckDataSize = data_size; WavpackNativeToBigEndian (&file_header, DFFFileHeaderFormat); WavpackNativeToBigEndian (&ver_chunk, DFFVersionChunkFormat); WavpackNativeToBigEndian (&prop_header, DFFFileHeaderFormat); WavpackNativeToBigEndian (&fs_chunk, DFFSampleRateChunkFormat); WavpackNativeToBigEndian (&chan_header, DFFChannelsHeaderFormat); WavpackNativeToBigEndian (&cmpr_header, DFFCompressionHeaderFormat); WavpackNativeToBigEndian (&data_header, DFFChunkHeaderFormat); if (!DoWriteFile (outfile, &file_header, sizeof (file_header), &bcount) || bcount != sizeof (file_header) || !DoWriteFile (outfile, &ver_chunk, sizeof (ver_chunk), &bcount) || bcount != sizeof (ver_chunk) || !DoWriteFile (outfile, &prop_header, sizeof (prop_header), &bcount) || bcount != sizeof (prop_header) || !DoWriteFile (outfile, &fs_chunk, sizeof (fs_chunk), &bcount) || bcount != sizeof (fs_chunk) || !DoWriteFile (outfile, &chan_header, sizeof (chan_header), &bcount) || bcount != sizeof (chan_header) || !DoWriteFile (outfile, chan_ids, chan_ids_size, &bcount) || bcount != chan_ids_size || !DoWriteFile (outfile, &cmpr_header, sizeof (cmpr_header), &bcount) || bcount != sizeof (cmpr_header) || !DoWriteFile (outfile, cmpr_name, cmpr_name_size, &bcount) || bcount != cmpr_name_size || !DoWriteFile (outfile, &data_header, sizeof (data_header), &bcount) || bcount != sizeof (data_header)) { error_line ("can't write .DSF data, disk probably full!"); free (chan_ids); return FALSE; } free (chan_ids); return TRUE; }
int ParseDsdiffHeaderConfig (FILE *infile, char *infilename, char *fourcc, WavpackContext *wpc, WavpackConfig *config) { int64_t infilesize, total_samples; DFFFileHeader dff_file_header; DFFChunkHeader dff_chunk_header; uint32_t bcount; infilesize = DoGetFileSize (infile); memcpy (&dff_file_header, fourcc, 4); if ((!DoReadFile (infile, ((char *) &dff_file_header) + 4, sizeof (DFFFileHeader) - 4, &bcount) || bcount != sizeof (DFFFileHeader) - 4) || strncmp (dff_file_header.formType, "DSD ", 4)) { error_line ("%s is not a valid .DFF file!", infilename); return WAVPACK_SOFT_ERROR; } else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) && !WavpackAddWrapper (wpc, &dff_file_header, sizeof (DFFFileHeader))) { error_line ("%s", WavpackGetErrorMessage (wpc)); return WAVPACK_SOFT_ERROR; } #if 1 // this might be a little too picky... WavpackBigEndianToNative (&dff_file_header, DFFFileHeaderFormat); if (infilesize && !(config->qmode & QMODE_IGNORE_LENGTH) && dff_file_header.ckDataSize && dff_file_header.ckDataSize + 1 && dff_file_header.ckDataSize + 12 != infilesize) { error_line ("%s is not a valid .DFF file (by total size)!", infilename); return WAVPACK_SOFT_ERROR; } if (debug_logging_mode) error_line ("file header indicated length = %lld", dff_file_header.ckDataSize); #endif // loop through all elements of the DSDIFF header // (until the data chuck) and copy them to the output file while (1) { if (!DoReadFile (infile, &dff_chunk_header, sizeof (DFFChunkHeader), &bcount) || bcount != sizeof (DFFChunkHeader)) { error_line ("%s is not a valid .DFF file!", infilename); return WAVPACK_SOFT_ERROR; } else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) && !WavpackAddWrapper (wpc, &dff_chunk_header, sizeof (DFFChunkHeader))) { error_line ("%s", WavpackGetErrorMessage (wpc)); return WAVPACK_SOFT_ERROR; } WavpackBigEndianToNative (&dff_chunk_header, DFFChunkHeaderFormat); if (debug_logging_mode) error_line ("chunk header indicated length = %lld", dff_chunk_header.ckDataSize); if (!strncmp (dff_chunk_header.ckID, "FVER", 4)) { uint32_t version; if (dff_chunk_header.ckDataSize != sizeof (version) || !DoReadFile (infile, &version, sizeof (version), &bcount) || bcount != sizeof (version)) { error_line ("%s is not a valid .DFF file!", infilename); return WAVPACK_SOFT_ERROR; } else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) && !WavpackAddWrapper (wpc, &version, sizeof (version))) { error_line ("%s", WavpackGetErrorMessage (wpc)); return WAVPACK_SOFT_ERROR; } WavpackBigEndianToNative (&version, "L"); if (debug_logging_mode) error_line ("dsdiff file version = 0x%08x", version); } else if (!strncmp (dff_chunk_header.ckID, "PROP", 4)) { char *prop_chunk = malloc ((size_t) dff_chunk_header.ckDataSize); if (!DoReadFile (infile, prop_chunk, (uint32_t) dff_chunk_header.ckDataSize, &bcount) || bcount != dff_chunk_header.ckDataSize) { error_line ("%s is not a valid .DFF file!", infilename); free (prop_chunk); return WAVPACK_SOFT_ERROR; } else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) && !WavpackAddWrapper (wpc, prop_chunk, (uint32_t) dff_chunk_header.ckDataSize)) { error_line ("%s", WavpackGetErrorMessage (wpc)); free (prop_chunk); return WAVPACK_SOFT_ERROR; } if (!strncmp (prop_chunk, "SND ", 4)) { char *cptr = prop_chunk + 4, *eptr = prop_chunk + dff_chunk_header.ckDataSize; uint16_t numChannels, chansSpecified, chanMask = 0; uint32_t sampleRate; while (eptr - cptr >= sizeof (dff_chunk_header)) { memcpy (&dff_chunk_header, cptr, sizeof (dff_chunk_header)); cptr += sizeof (dff_chunk_header); WavpackBigEndianToNative (&dff_chunk_header, DFFChunkHeaderFormat); if (eptr - cptr >= dff_chunk_header.ckDataSize) { if (!strncmp (dff_chunk_header.ckID, "FS ", 4) && dff_chunk_header.ckDataSize == 4) { memcpy (&sampleRate, cptr, sizeof (sampleRate)); WavpackBigEndianToNative (&sampleRate, "L"); cptr += dff_chunk_header.ckDataSize; if (debug_logging_mode) error_line ("got sample rate of %u Hz", sampleRate); } else if (!strncmp (dff_chunk_header.ckID, "CHNL", 4) && dff_chunk_header.ckDataSize >= 2) { memcpy (&numChannels, cptr, sizeof (numChannels)); WavpackBigEndianToNative (&numChannels, "S"); cptr += sizeof (numChannels); chansSpecified = (int)(dff_chunk_header.ckDataSize - sizeof (numChannels)) / 4; while (chansSpecified--) { if (!strncmp (cptr, "SLFT", 4) || !strncmp (cptr, "MLFT", 4)) chanMask |= 0x1; else if (!strncmp (cptr, "SRGT", 4) || !strncmp (cptr, "MRGT", 4)) chanMask |= 0x2; else if (!strncmp (cptr, "LS ", 4)) chanMask |= 0x10; else if (!strncmp (cptr, "RS ", 4)) chanMask |= 0x20; else if (!strncmp (cptr, "C ", 4)) chanMask |= 0x4; else if (!strncmp (cptr, "LFE ", 4)) chanMask |= 0x8; else if (debug_logging_mode) error_line ("undefined channel ID %c%c%c%c", cptr [0], cptr [1], cptr [2], cptr [3]); cptr += 4; } if (debug_logging_mode) error_line ("%d channels, mask = 0x%08x", numChannels, chanMask); } else if (!strncmp (dff_chunk_header.ckID, "CMPR", 4) && dff_chunk_header.ckDataSize >= 4) { if (strncmp (cptr, "DSD ", 4)) { error_line ("DSDIFF files must be uncompressed, not \"%c%c%c%c\"!", cptr [0], cptr [1], cptr [2], cptr [3]); free (prop_chunk); return WAVPACK_SOFT_ERROR; } cptr += dff_chunk_header.ckDataSize; } else { if (debug_logging_mode) error_line ("got PROP/SND chunk type \"%c%c%c%c\" of %d bytes", dff_chunk_header.ckID [0], dff_chunk_header.ckID [1], dff_chunk_header.ckID [2], dff_chunk_header.ckID [3], dff_chunk_header.ckDataSize); cptr += dff_chunk_header.ckDataSize; } } else { error_line ("%s is not a valid .DFF file!", infilename); free (prop_chunk); return WAVPACK_SOFT_ERROR; } } if (chanMask && (config->channel_mask || (config->qmode & QMODE_CHANS_UNASSIGNED))) { error_line ("this DSDIFF file already has channel order information!"); free (prop_chunk); return WAVPACK_SOFT_ERROR; } else if (chanMask) config->channel_mask = chanMask; config->bits_per_sample = 8; config->bytes_per_sample = 1; config->num_channels = numChannels; config->sample_rate = sampleRate / 8; config->qmode |= QMODE_DSD_MSB_FIRST; } else if (debug_logging_mode) error_line ("got unknown PROP chunk type \"%c%c%c%c\" of %d bytes", prop_chunk [0], prop_chunk [1], prop_chunk [2], prop_chunk [3], dff_chunk_header.ckDataSize); free (prop_chunk); } else if (!strncmp (dff_chunk_header.ckID, "DSD ", 4)) { total_samples = dff_chunk_header.ckDataSize / config->num_channels; break; } else { // just copy unknown chunks to output file int bytes_to_copy = (int)(((dff_chunk_header.ckDataSize) + 1) & ~(int64_t)1); char *buff = malloc (bytes_to_copy); if (debug_logging_mode) error_line ("extra unknown chunk \"%c%c%c%c\" of %d bytes", dff_chunk_header.ckID [0], dff_chunk_header.ckID [1], dff_chunk_header.ckID [2], dff_chunk_header.ckID [3], dff_chunk_header.ckDataSize); if (!DoReadFile (infile, buff, bytes_to_copy, &bcount) || bcount != bytes_to_copy || (!(config->qmode & QMODE_NO_STORE_WRAPPER) && !WavpackAddWrapper (wpc, buff, bytes_to_copy))) { error_line ("%s", WavpackGetErrorMessage (wpc)); free (buff); return WAVPACK_SOFT_ERROR; } free (buff); } } if (debug_logging_mode) error_line ("setting configuration with %lld samples", total_samples); if (!WavpackSetConfiguration64 (wpc, config, total_samples, NULL)) { error_line ("%s: %s", infilename, WavpackGetErrorMessage (wpc)); return WAVPACK_SOFT_ERROR; } return WAVPACK_NO_ERROR; }
int ParseDsdiffHeaderConfig (FILE *infile, char *infilename, char *fourcc, WavpackContext *wpc, WavpackConfig *config) { int64_t infilesize, total_samples; DFFFileHeader dff_file_header; DFFChunkHeader dff_chunk_header; uint32_t bcount; infilesize = DoGetFileSize (infile); memcpy (&dff_file_header, fourcc, 4); if ((!DoReadFile (infile, ((char *) &dff_file_header) + 4, sizeof (DFFFileHeader) - 4, &bcount) || bcount != sizeof (DFFFileHeader) - 4) || strncmp (dff_file_header.formType, "DSD ", 4)) { error_line ("%s is not a valid .DFF file!", infilename); return WAVPACK_SOFT_ERROR; } else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) && !WavpackAddWrapper (wpc, &dff_file_header, sizeof (DFFFileHeader))) { error_line ("%s", WavpackGetErrorMessage (wpc)); return WAVPACK_SOFT_ERROR; } #if 1 // this might be a little too picky... WavpackBigEndianToNative (&dff_file_header, DFFFileHeaderFormat); if (infilesize && !(config->qmode & QMODE_IGNORE_LENGTH) && dff_file_header.ckDataSize && dff_file_header.ckDataSize + 1 && dff_file_header.ckDataSize + 12 != infilesize) { error_line ("%s is not a valid .DFF file (by total size)!", infilename); return WAVPACK_SOFT_ERROR; } if (debug_logging_mode) error_line ("file header indicated length = %lld", dff_file_header.ckDataSize); #endif // loop through all elements of the DSDIFF header // (until the data chuck) and copy them to the output file while (1) { if (!DoReadFile (infile, &dff_chunk_header, sizeof (DFFChunkHeader), &bcount) || bcount != sizeof (DFFChunkHeader)) { error_line ("%s is not a valid .DFF file!", infilename); return WAVPACK_SOFT_ERROR; } else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) && !WavpackAddWrapper (wpc, &dff_chunk_header, sizeof (DFFChunkHeader))) { error_line ("%s", WavpackGetErrorMessage (wpc)); return WAVPACK_SOFT_ERROR; } WavpackBigEndianToNative (&dff_chunk_header, DFFChunkHeaderFormat); if (debug_logging_mode) error_line ("chunk header indicated length = %lld", dff_chunk_header.ckDataSize); if (!strncmp (dff_chunk_header.ckID, "FVER", 4)) { uint32_t version; if (dff_chunk_header.ckDataSize != sizeof (version) || !DoReadFile (infile, &version, sizeof (version), &bcount) || bcount != sizeof (version)) { error_line ("%s is not a valid .DFF file!", infilename); return WAVPACK_SOFT_ERROR; } else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) && !WavpackAddWrapper (wpc, &version, sizeof (version))) { error_line ("%s", WavpackGetErrorMessage (wpc)); return WAVPACK_SOFT_ERROR; } WavpackBigEndianToNative (&version, "L"); if (debug_logging_mode) error_line ("dsdiff file version = 0x%08x", version); } else if (!strncmp (dff_chunk_header.ckID, "PROP", 4)) { char *prop_chunk; if (dff_chunk_header.ckDataSize < 4 || dff_chunk_header.ckDataSize > 1024) { error_line ("%s is not a valid .DFF file!", infilename); return WAVPACK_SOFT_ERROR; } if (debug_logging_mode) error_line ("got PROP chunk of %d bytes total", (int) dff_chunk_header.ckDataSize); prop_chunk = malloc ((size_t) dff_chunk_header.ckDataSize); if (!DoReadFile (infile, prop_chunk, (uint32_t) dff_chunk_header.ckDataSize, &bcount) || bcount != dff_chunk_header.ckDataSize) { error_line ("%s is not a valid .DFF file!", infilename); free (prop_chunk); return WAVPACK_SOFT_ERROR; } else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) && !WavpackAddWrapper (wpc, prop_chunk, (uint32_t) dff_chunk_header.ckDataSize)) { error_line ("%s", WavpackGetErrorMessage (wpc)); free (prop_chunk); return WAVPACK_SOFT_ERROR; } if (!strncmp (prop_chunk, "SND ", 4)) { char *cptr = prop_chunk + 4, *eptr = prop_chunk + dff_chunk_header.ckDataSize; uint16_t numChannels, chansSpecified, chanMask = 0; uint32_t sampleRate; while (eptr - cptr >= sizeof (dff_chunk_header)) { memcpy (&dff_chunk_header, cptr, sizeof (dff_chunk_header)); cptr += sizeof (dff_chunk_header); WavpackBigEndianToNative (&dff_chunk_header, DFFChunkHeaderFormat); if (eptr - cptr >= dff_chunk_header.ckDataSize) { if (!strncmp (dff_chunk_header.ckID, "FS ", 4) && dff_chunk_header.ckDataSize == 4) { memcpy (&sampleRate, cptr, sizeof (sampleRate)); WavpackBigEndianToNative (&sampleRate, "L"); cptr += dff_chunk_header.ckDataSize; if (debug_logging_mode) error_line ("got sample rate of %u Hz", sampleRate); } else if (!strncmp (dff_chunk_header.ckID, "CHNL", 4) && dff_chunk_header.ckDataSize >= 2) { memcpy (&numChannels, cptr, sizeof (numChannels)); WavpackBigEndianToNative (&numChannels, "S"); cptr += sizeof (numChannels); chansSpecified = (int)(dff_chunk_header.ckDataSize - sizeof (numChannels)) / 4; while (chansSpecified--) { if (!strncmp (cptr, "SLFT", 4) || !strncmp (cptr, "MLFT", 4)) chanMask |= 0x1; else if (!strncmp (cptr, "SRGT", 4) || !strncmp (cptr, "MRGT", 4)) chanMask |= 0x2; else if (!strncmp (cptr, "LS ", 4)) chanMask |= 0x10; else if (!strncmp (cptr, "RS ", 4)) chanMask |= 0x20; else if (!strncmp (cptr, "C ", 4)) chanMask |= 0x4; else if (!strncmp (cptr, "LFE ", 4)) chanMask |= 0x8; else if (debug_logging_mode) error_line ("undefined channel ID %c%c%c%c", cptr [0], cptr [1], cptr [2], cptr [3]); cptr += 4; } if (debug_logging_mode) error_line ("%d channels, mask = 0x%08x", numChannels, chanMask); } else if (!strncmp (dff_chunk_header.ckID, "CMPR", 4) && dff_chunk_header.ckDataSize >= 4) { if (strncmp (cptr, "DSD ", 4)) { error_line ("DSDIFF files must be uncompressed, not \"%c%c%c%c\"!", cptr [0], cptr [1], cptr [2], cptr [3]); free (prop_chunk); return WAVPACK_SOFT_ERROR; } cptr += dff_chunk_header.ckDataSize; } else { if (debug_logging_mode) error_line ("got PROP/SND chunk type \"%c%c%c%c\" of %d bytes", dff_chunk_header.ckID [0], dff_chunk_header.ckID [1], dff_chunk_header.ckID [2], dff_chunk_header.ckID [3], dff_chunk_header.ckDataSize); cptr += dff_chunk_header.ckDataSize; } } else { error_line ("%s is not a valid .DFF file!", infilename); free (prop_chunk); return WAVPACK_SOFT_ERROR; } } if (chanMask && (config->channel_mask || (config->qmode & QMODE_CHANS_UNASSIGNED))) { error_line ("this DSDIFF file already has channel order information!"); free (prop_chunk); return WAVPACK_SOFT_ERROR; } else if (chanMask) config->channel_mask = chanMask; config->bits_per_sample = 8; config->bytes_per_sample = 1; config->num_channels = numChannels; config->sample_rate = sampleRate / 8; config->qmode |= QMODE_DSD_MSB_FIRST; } else if (debug_logging_mode) error_line ("got unknown PROP chunk type \"%c%c%c%c\" of %d bytes", prop_chunk [0], prop_chunk [1], prop_chunk [2], prop_chunk [3], dff_chunk_header.ckDataSize); free (prop_chunk); } else if (!strncmp (dff_chunk_header.ckID, "DSD ", 4)) { total_samples = dff_chunk_header.ckDataSize / config->num_channels; break; } else { // just copy unknown chunks to output file int bytes_to_copy = (int)(((dff_chunk_header.ckDataSize) + 1) & ~(int64_t)1); char *buff = malloc (bytes_to_copy); if (debug_logging_mode) error_line ("extra unknown chunk \"%c%c%c%c\" of %d bytes", dff_chunk_header.ckID [0], dff_chunk_header.ckID [1], dff_chunk_header.ckID [2], dff_chunk_header.ckID [3], dff_chunk_header.ckDataSize); if (!DoReadFile (infile, buff, bytes_to_copy, &bcount) || bcount != bytes_to_copy || (!(config->qmode & QMODE_NO_STORE_WRAPPER) && !WavpackAddWrapper (wpc, buff, bytes_to_copy))) { error_line ("%s", WavpackGetErrorMessage (wpc)); free (buff); return WAVPACK_SOFT_ERROR; } free (buff); } } if (debug_logging_mode) error_line ("setting configuration with %lld samples", total_samples); if (!WavpackSetConfiguration64 (wpc, config, total_samples, NULL)) { error_line ("%s: %s", infilename, WavpackGetErrorMessage (wpc)); return WAVPACK_SOFT_ERROR; } return WAVPACK_NO_ERROR; }
{'added': [(156, ' char *prop_chunk;'), (157, ''), (158, ' if (dff_chunk_header.ckDataSize < 4 || dff_chunk_header.ckDataSize > 1024) {'), (159, ' error_line ("%s is not a valid .DFF file!", infilename);'), (160, ' return WAVPACK_SOFT_ERROR;'), (161, ' }'), (162, ''), (163, ' if (debug_logging_mode)'), (164, ' error_line ("got PROP chunk of %d bytes total", (int) dff_chunk_header.ckDataSize);'), (165, ''), (166, ' prop_chunk = malloc ((size_t) dff_chunk_header.ckDataSize);')], 'deleted': [(156, ' char *prop_chunk = malloc ((size_t) dff_chunk_header.ckDataSize);')]}
11
1
329
2,535
https://github.com/dbry/WavPack
CVE-2018-7253
['CWE-125']
exec.c
crun_command_exec
/* * crun - OCI runtime written in C * * Copyright (C) 2017, 2018, 2019 Giuseppe Scrivano <giuseppe@scrivano.org> * crun is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * crun is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with crun. If not, see <http://www.gnu.org/licenses/>. */ #include <config.h> #include <stdio.h> #include <stdlib.h> #include <argp.h> #include <string.h> #include <unistd.h> #include <errno.h> #include "crun.h" #include "libcrun/container.h" #include "libcrun/utils.h" #include "libcrun/linux.h" static char doc[] = "OCI runtime"; struct exec_options_s { bool tty; bool detach; bool no_new_privs; int preserve_fds; const char *process; const char *console_socket; const char *pid_file; char *process_label; char *apparmor; char *cwd; char *user; char **env; char **cap; size_t cap_size; size_t env_size; char *cgroup; }; enum { OPTION_CONSOLE_SOCKET = 1000, OPTION_PID_FILE, OPTION_CWD, OPTION_PRESERVE_FDS, OPTION_NO_NEW_PRIVS, OPTION_PROCESS_LABEL, OPTION_APPARMOR, OPTION_CGROUP, }; static struct exec_options_s exec_options; static struct argp_option options[] = { { "console-socket", OPTION_CONSOLE_SOCKET, "SOCKET", 0, "path to a socket that will receive the ptmx end of the tty", 0 }, { "tty", 't', "TTY", OPTION_ARG_OPTIONAL, "allocate a pseudo-TTY", 0 }, { "process", 'p', "FILE", 0, "path to the process.json", 0 }, { "cwd", OPTION_CWD, "CWD", 0, "current working directory", 0 }, { "cgroup", OPTION_CGROUP, "PATH", 0, "sub-cgroup in the container", 0 }, { "detach", 'd', 0, 0, "detach the command in the background", 0 }, { "user", 'u', "USERSPEC", 0, "specify the user in the form UID[:GID]", 0 }, { "env", 'e', "ENV", 0, "add an environment variable", 0 }, { "cap", 'c', "CAP", 0, "add a capability", 0 }, { "pid-file", OPTION_PID_FILE, "FILE", 0, "where to write the PID of the container", 0 }, { "preserve-fds", OPTION_PRESERVE_FDS, "N", 0, "pass additional FDs to the container", 0 }, { "no-new-privs", OPTION_NO_NEW_PRIVS, 0, 0, "set the no new privileges value for the process", 0 }, { "process-label", OPTION_PROCESS_LABEL, "VALUE", 0, "set the asm process label for the process commonly used with selinux", 0 }, { "apparmor", OPTION_APPARMOR, "VALUE", 0, "set the apparmor profile for the process", 0 }, { 0, } }; static char args_doc[] = "exec CONTAINER cmd"; static void append_env (const char *arg) { exec_options.env = realloc (exec_options.env, (exec_options.env_size + 2) * sizeof (*exec_options.env)); if (exec_options.env == NULL) error (EXIT_FAILURE, errno, "cannot allocate memory"); exec_options.env[exec_options.env_size + 1] = NULL; exec_options.env[exec_options.env_size] = xstrdup (arg); exec_options.env_size++; } static void append_cap (const char *arg) { exec_options.cap = realloc (exec_options.cap, (exec_options.cap_size + 2) * sizeof (*exec_options.cap)); if (exec_options.cap == NULL) error (EXIT_FAILURE, errno, "cannot allocate memory"); exec_options.cap[exec_options.cap_size + 1] = NULL; exec_options.cap[exec_options.cap_size] = xstrdup (arg); exec_options.cap_size++; } static char ** dup_array (char **arr, size_t len) { size_t i; char **ret; ret = malloc (sizeof (char *) * (len + 1)); if (ret == NULL) error (EXIT_FAILURE, errno, "cannot allocate memory"); for (i = 0; i < len; i++) ret[i] = xstrdup (arr[i]); ret[i] = NULL; return ret; } static error_t parse_opt (int key, char *arg, struct argp_state *state) { switch (key) { case OPTION_CONSOLE_SOCKET: exec_options.console_socket = arg; break; case OPTION_PID_FILE: exec_options.pid_file = arg; break; case OPTION_NO_NEW_PRIVS: exec_options.no_new_privs = true; break; case OPTION_PROCESS_LABEL: exec_options.process_label = argp_mandatory_argument (arg, state); break; case OPTION_APPARMOR: exec_options.apparmor = argp_mandatory_argument (arg, state); break; case OPTION_PRESERVE_FDS: exec_options.preserve_fds = strtoul (argp_mandatory_argument (arg, state), NULL, 10); break; case OPTION_CGROUP: exec_options.cgroup = argp_mandatory_argument (arg, state); break; case 'd': exec_options.detach = true; break; case 'p': exec_options.process = arg; break; case 't': exec_options.tty = arg == NULL || (strcmp (arg, "false") != 0 && strcmp (arg, "no") != 0); break; case 'u': exec_options.user = arg; break; case 'e': append_env (arg); break; case 'c': append_cap (arg); break; case OPTION_CWD: exec_options.cwd = xstrdup (arg); break; case ARGP_KEY_NO_ARGS: libcrun_fail_with_error (0, "please specify a ID for the container"); default: return ARGP_ERR_UNKNOWN; } return 0; } static struct argp run_argp = { options, parse_opt, args_doc, doc, NULL, NULL, NULL }; static runtime_spec_schema_config_schema_process_user * make_oci_process_user (const char *userspec) { runtime_spec_schema_config_schema_process_user *u; char *endptr = NULL; if (userspec == NULL) return NULL; u = xmalloc0 (sizeof (runtime_spec_schema_config_schema_process_user)); errno = 0; u->uid = strtol (userspec, &endptr, 10); if (errno == ERANGE) libcrun_fail_with_error (0, "invalid UID specified"); if (*endptr == '\0') return u; if (*endptr != ':') libcrun_fail_with_error (0, "invalid USERSPEC specified"); errno = 0; u->gid = strtol (endptr + 1, &endptr, 10); if (errno == ERANGE) libcrun_fail_with_error (0, "invalid GID specified"); if (*endptr != '\0') libcrun_fail_with_error (0, "invalid USERSPEC specified"); return u; } #define cleanup_process_schema __attribute__ ((cleanup (cleanup_process_schemap))) static inline void cleanup_process_schemap (runtime_spec_schema_config_schema_process **p) { runtime_spec_schema_config_schema_process *process = *p; if (process) (void) free_runtime_spec_schema_config_schema_process (process); } int crun_command_exec (struct crun_global_arguments *global_args, int argc, char **argv, libcrun_error_t *err) { int first_arg = 0, ret = 0; libcrun_context_t crun_context = { 0, }; cleanup_process_schema runtime_spec_schema_config_schema_process *process = NULL; struct libcrun_container_exec_options_s exec_opts; memset (&exec_opts, 0, sizeof (exec_opts)); exec_opts.struct_size = sizeof (exec_opts); crun_context.preserve_fds = 0; crun_context.listen_fds = 0; argp_parse (&run_argp, argc, argv, ARGP_IN_ORDER, &first_arg, &exec_options); crun_assert_n_args (argc - first_arg, exec_options.process ? 1 : 2, -1); ret = init_libcrun_context (&crun_context, argv[first_arg], global_args, err); if (UNLIKELY (ret < 0)) return ret; crun_context.detach = exec_options.detach; crun_context.console_socket = exec_options.console_socket; crun_context.pid_file = exec_options.pid_file; crun_context.preserve_fds = exec_options.preserve_fds; if (getenv ("LISTEN_FDS")) { crun_context.listen_fds = strtoll (getenv ("LISTEN_FDS"), NULL, 10); crun_context.preserve_fds += crun_context.listen_fds; } if (exec_options.process) exec_opts.path = exec_options.process; else { process = xmalloc0 (sizeof (*process)); int i; process->args_len = argc; process->args = xmalloc0 ((argc + 1) * sizeof (*process->args)); for (i = 0; i < argc - first_arg; i++) process->args[i] = xstrdup (argv[first_arg + i + 1]); process->args[i] = NULL; if (exec_options.cwd) process->cwd = exec_options.cwd; process->terminal = exec_options.tty; process->env = exec_options.env; process->env_len = exec_options.env_size; process->user = make_oci_process_user (exec_options.user); if (exec_options.process_label != NULL) process->selinux_label = exec_options.process_label; if (exec_options.apparmor != NULL) process->apparmor_profile = exec_options.apparmor; if (exec_options.cap_size > 0) { runtime_spec_schema_config_schema_process_capabilities *capabilities = xmalloc (sizeof (runtime_spec_schema_config_schema_process_capabilities)); capabilities->effective = exec_options.cap; capabilities->effective_len = exec_options.cap_size; capabilities->inheritable = dup_array (exec_options.cap, exec_options.cap_size); capabilities->inheritable_len = exec_options.cap_size; capabilities->bounding = dup_array (exec_options.cap, exec_options.cap_size); capabilities->bounding_len = exec_options.cap_size; capabilities->ambient = dup_array (exec_options.cap, exec_options.cap_size); capabilities->ambient_len = exec_options.cap_size; capabilities->permitted = dup_array (exec_options.cap, exec_options.cap_size); capabilities->permitted_len = exec_options.cap_size; process->capabilities = capabilities; } // noNewPriviledges will remain `false` if basespec has `false` unless specified // Default is always `true` in generated basespec config if (exec_options.no_new_privs) process->no_new_privileges = 1; exec_opts.process = process; } exec_opts.cgroup = exec_options.cgroup; return libcrun_container_exec_with_options (&crun_context, argv[first_arg], &exec_opts, err); }
/* * crun - OCI runtime written in C * * Copyright (C) 2017, 2018, 2019 Giuseppe Scrivano <giuseppe@scrivano.org> * crun is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * crun is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with crun. If not, see <http://www.gnu.org/licenses/>. */ #include <config.h> #include <stdio.h> #include <stdlib.h> #include <argp.h> #include <string.h> #include <unistd.h> #include <errno.h> #include "crun.h" #include "libcrun/container.h" #include "libcrun/utils.h" #include "libcrun/linux.h" static char doc[] = "OCI runtime"; struct exec_options_s { bool tty; bool detach; bool no_new_privs; int preserve_fds; const char *process; const char *console_socket; const char *pid_file; char *process_label; char *apparmor; char *cwd; char *user; char **env; char **cap; size_t cap_size; size_t env_size; char *cgroup; }; enum { OPTION_CONSOLE_SOCKET = 1000, OPTION_PID_FILE, OPTION_CWD, OPTION_PRESERVE_FDS, OPTION_NO_NEW_PRIVS, OPTION_PROCESS_LABEL, OPTION_APPARMOR, OPTION_CGROUP, }; static struct exec_options_s exec_options; static struct argp_option options[] = { { "console-socket", OPTION_CONSOLE_SOCKET, "SOCKET", 0, "path to a socket that will receive the ptmx end of the tty", 0 }, { "tty", 't', "TTY", OPTION_ARG_OPTIONAL, "allocate a pseudo-TTY", 0 }, { "process", 'p', "FILE", 0, "path to the process.json", 0 }, { "cwd", OPTION_CWD, "CWD", 0, "current working directory", 0 }, { "cgroup", OPTION_CGROUP, "PATH", 0, "sub-cgroup in the container", 0 }, { "detach", 'd', 0, 0, "detach the command in the background", 0 }, { "user", 'u', "USERSPEC", 0, "specify the user in the form UID[:GID]", 0 }, { "env", 'e', "ENV", 0, "add an environment variable", 0 }, { "cap", 'c', "CAP", 0, "add a capability", 0 }, { "pid-file", OPTION_PID_FILE, "FILE", 0, "where to write the PID of the container", 0 }, { "preserve-fds", OPTION_PRESERVE_FDS, "N", 0, "pass additional FDs to the container", 0 }, { "no-new-privs", OPTION_NO_NEW_PRIVS, 0, 0, "set the no new privileges value for the process", 0 }, { "process-label", OPTION_PROCESS_LABEL, "VALUE", 0, "set the asm process label for the process commonly used with selinux", 0 }, { "apparmor", OPTION_APPARMOR, "VALUE", 0, "set the apparmor profile for the process", 0 }, { 0, } }; static char args_doc[] = "exec CONTAINER cmd"; static void append_env (const char *arg) { exec_options.env = realloc (exec_options.env, (exec_options.env_size + 2) * sizeof (*exec_options.env)); if (exec_options.env == NULL) error (EXIT_FAILURE, errno, "cannot allocate memory"); exec_options.env[exec_options.env_size + 1] = NULL; exec_options.env[exec_options.env_size] = xstrdup (arg); exec_options.env_size++; } static void append_cap (const char *arg) { exec_options.cap = realloc (exec_options.cap, (exec_options.cap_size + 2) * sizeof (*exec_options.cap)); if (exec_options.cap == NULL) error (EXIT_FAILURE, errno, "cannot allocate memory"); exec_options.cap[exec_options.cap_size + 1] = NULL; exec_options.cap[exec_options.cap_size] = xstrdup (arg); exec_options.cap_size++; } static char ** dup_array (char **arr, size_t len) { size_t i; char **ret; ret = malloc (sizeof (char *) * (len + 1)); if (ret == NULL) error (EXIT_FAILURE, errno, "cannot allocate memory"); for (i = 0; i < len; i++) ret[i] = xstrdup (arr[i]); ret[i] = NULL; return ret; } static error_t parse_opt (int key, char *arg, struct argp_state *state) { switch (key) { case OPTION_CONSOLE_SOCKET: exec_options.console_socket = arg; break; case OPTION_PID_FILE: exec_options.pid_file = arg; break; case OPTION_NO_NEW_PRIVS: exec_options.no_new_privs = true; break; case OPTION_PROCESS_LABEL: exec_options.process_label = argp_mandatory_argument (arg, state); break; case OPTION_APPARMOR: exec_options.apparmor = argp_mandatory_argument (arg, state); break; case OPTION_PRESERVE_FDS: exec_options.preserve_fds = strtoul (argp_mandatory_argument (arg, state), NULL, 10); break; case OPTION_CGROUP: exec_options.cgroup = argp_mandatory_argument (arg, state); break; case 'd': exec_options.detach = true; break; case 'p': exec_options.process = arg; break; case 't': exec_options.tty = arg == NULL || (strcmp (arg, "false") != 0 && strcmp (arg, "no") != 0); break; case 'u': exec_options.user = arg; break; case 'e': append_env (arg); break; case 'c': append_cap (arg); break; case OPTION_CWD: exec_options.cwd = xstrdup (arg); break; case ARGP_KEY_NO_ARGS: libcrun_fail_with_error (0, "please specify a ID for the container"); default: return ARGP_ERR_UNKNOWN; } return 0; } static struct argp run_argp = { options, parse_opt, args_doc, doc, NULL, NULL, NULL }; static runtime_spec_schema_config_schema_process_user * make_oci_process_user (const char *userspec) { runtime_spec_schema_config_schema_process_user *u; char *endptr = NULL; if (userspec == NULL) return NULL; u = xmalloc0 (sizeof (runtime_spec_schema_config_schema_process_user)); errno = 0; u->uid = strtol (userspec, &endptr, 10); if (errno == ERANGE) libcrun_fail_with_error (0, "invalid UID specified"); if (*endptr == '\0') return u; if (*endptr != ':') libcrun_fail_with_error (0, "invalid USERSPEC specified"); errno = 0; u->gid = strtol (endptr + 1, &endptr, 10); if (errno == ERANGE) libcrun_fail_with_error (0, "invalid GID specified"); if (*endptr != '\0') libcrun_fail_with_error (0, "invalid USERSPEC specified"); return u; } #define cleanup_process_schema __attribute__ ((cleanup (cleanup_process_schemap))) static inline void cleanup_process_schemap (runtime_spec_schema_config_schema_process **p) { runtime_spec_schema_config_schema_process *process = *p; if (process) (void) free_runtime_spec_schema_config_schema_process (process); } int crun_command_exec (struct crun_global_arguments *global_args, int argc, char **argv, libcrun_error_t *err) { int first_arg = 0, ret = 0; libcrun_context_t crun_context = { 0, }; cleanup_process_schema runtime_spec_schema_config_schema_process *process = NULL; struct libcrun_container_exec_options_s exec_opts; memset (&exec_opts, 0, sizeof (exec_opts)); exec_opts.struct_size = sizeof (exec_opts); crun_context.preserve_fds = 0; crun_context.listen_fds = 0; argp_parse (&run_argp, argc, argv, ARGP_IN_ORDER, &first_arg, &exec_options); crun_assert_n_args (argc - first_arg, exec_options.process ? 1 : 2, -1); ret = init_libcrun_context (&crun_context, argv[first_arg], global_args, err); if (UNLIKELY (ret < 0)) return ret; crun_context.detach = exec_options.detach; crun_context.console_socket = exec_options.console_socket; crun_context.pid_file = exec_options.pid_file; crun_context.preserve_fds = exec_options.preserve_fds; if (getenv ("LISTEN_FDS")) { crun_context.listen_fds = strtoll (getenv ("LISTEN_FDS"), NULL, 10); crun_context.preserve_fds += crun_context.listen_fds; } if (exec_options.process) exec_opts.path = exec_options.process; else { process = xmalloc0 (sizeof (*process)); int i; process->args_len = argc; process->args = xmalloc0 ((argc + 1) * sizeof (*process->args)); for (i = 0; i < argc - first_arg; i++) process->args[i] = xstrdup (argv[first_arg + i + 1]); process->args[i] = NULL; if (exec_options.cwd) process->cwd = exec_options.cwd; process->terminal = exec_options.tty; process->env = exec_options.env; process->env_len = exec_options.env_size; process->user = make_oci_process_user (exec_options.user); if (exec_options.process_label != NULL) process->selinux_label = exec_options.process_label; if (exec_options.apparmor != NULL) process->apparmor_profile = exec_options.apparmor; if (exec_options.cap_size > 0) { runtime_spec_schema_config_schema_process_capabilities *capabilities = xmalloc (sizeof (runtime_spec_schema_config_schema_process_capabilities)); capabilities->effective = exec_options.cap; capabilities->effective_len = exec_options.cap_size; capabilities->inheritable = NULL; capabilities->inheritable_len = 0; capabilities->bounding = dup_array (exec_options.cap, exec_options.cap_size); capabilities->bounding_len = exec_options.cap_size; capabilities->ambient = dup_array (exec_options.cap, exec_options.cap_size); capabilities->ambient_len = exec_options.cap_size; capabilities->permitted = dup_array (exec_options.cap, exec_options.cap_size); capabilities->permitted_len = exec_options.cap_size; process->capabilities = capabilities; } // noNewPriviledges will remain `false` if basespec has `false` unless specified // Default is always `true` in generated basespec config if (exec_options.no_new_privs) process->no_new_privileges = 1; exec_opts.process = process; } exec_opts.cgroup = exec_options.cgroup; return libcrun_container_exec_with_options (&crun_context, argv[first_arg], &exec_opts, err); }
crun_command_exec (struct crun_global_arguments *global_args, int argc, char **argv, libcrun_error_t *err) { int first_arg = 0, ret = 0; libcrun_context_t crun_context = { 0, }; cleanup_process_schema runtime_spec_schema_config_schema_process *process = NULL; struct libcrun_container_exec_options_s exec_opts; memset (&exec_opts, 0, sizeof (exec_opts)); exec_opts.struct_size = sizeof (exec_opts); crun_context.preserve_fds = 0; crun_context.listen_fds = 0; argp_parse (&run_argp, argc, argv, ARGP_IN_ORDER, &first_arg, &exec_options); crun_assert_n_args (argc - first_arg, exec_options.process ? 1 : 2, -1); ret = init_libcrun_context (&crun_context, argv[first_arg], global_args, err); if (UNLIKELY (ret < 0)) return ret; crun_context.detach = exec_options.detach; crun_context.console_socket = exec_options.console_socket; crun_context.pid_file = exec_options.pid_file; crun_context.preserve_fds = exec_options.preserve_fds; if (getenv ("LISTEN_FDS")) { crun_context.listen_fds = strtoll (getenv ("LISTEN_FDS"), NULL, 10); crun_context.preserve_fds += crun_context.listen_fds; } if (exec_options.process) exec_opts.path = exec_options.process; else { process = xmalloc0 (sizeof (*process)); int i; process->args_len = argc; process->args = xmalloc0 ((argc + 1) * sizeof (*process->args)); for (i = 0; i < argc - first_arg; i++) process->args[i] = xstrdup (argv[first_arg + i + 1]); process->args[i] = NULL; if (exec_options.cwd) process->cwd = exec_options.cwd; process->terminal = exec_options.tty; process->env = exec_options.env; process->env_len = exec_options.env_size; process->user = make_oci_process_user (exec_options.user); if (exec_options.process_label != NULL) process->selinux_label = exec_options.process_label; if (exec_options.apparmor != NULL) process->apparmor_profile = exec_options.apparmor; if (exec_options.cap_size > 0) { runtime_spec_schema_config_schema_process_capabilities *capabilities = xmalloc (sizeof (runtime_spec_schema_config_schema_process_capabilities)); capabilities->effective = exec_options.cap; capabilities->effective_len = exec_options.cap_size; capabilities->inheritable = dup_array (exec_options.cap, exec_options.cap_size); capabilities->inheritable_len = exec_options.cap_size; capabilities->bounding = dup_array (exec_options.cap, exec_options.cap_size); capabilities->bounding_len = exec_options.cap_size; capabilities->ambient = dup_array (exec_options.cap, exec_options.cap_size); capabilities->ambient_len = exec_options.cap_size; capabilities->permitted = dup_array (exec_options.cap, exec_options.cap_size); capabilities->permitted_len = exec_options.cap_size; process->capabilities = capabilities; } // noNewPriviledges will remain `false` if basespec has `false` unless specified // Default is always `true` in generated basespec config if (exec_options.no_new_privs) process->no_new_privileges = 1; exec_opts.process = process; } exec_opts.cgroup = exec_options.cgroup; return libcrun_container_exec_with_options (&crun_context, argv[first_arg], &exec_opts, err); }
crun_command_exec (struct crun_global_arguments *global_args, int argc, char **argv, libcrun_error_t *err) { int first_arg = 0, ret = 0; libcrun_context_t crun_context = { 0, }; cleanup_process_schema runtime_spec_schema_config_schema_process *process = NULL; struct libcrun_container_exec_options_s exec_opts; memset (&exec_opts, 0, sizeof (exec_opts)); exec_opts.struct_size = sizeof (exec_opts); crun_context.preserve_fds = 0; crun_context.listen_fds = 0; argp_parse (&run_argp, argc, argv, ARGP_IN_ORDER, &first_arg, &exec_options); crun_assert_n_args (argc - first_arg, exec_options.process ? 1 : 2, -1); ret = init_libcrun_context (&crun_context, argv[first_arg], global_args, err); if (UNLIKELY (ret < 0)) return ret; crun_context.detach = exec_options.detach; crun_context.console_socket = exec_options.console_socket; crun_context.pid_file = exec_options.pid_file; crun_context.preserve_fds = exec_options.preserve_fds; if (getenv ("LISTEN_FDS")) { crun_context.listen_fds = strtoll (getenv ("LISTEN_FDS"), NULL, 10); crun_context.preserve_fds += crun_context.listen_fds; } if (exec_options.process) exec_opts.path = exec_options.process; else { process = xmalloc0 (sizeof (*process)); int i; process->args_len = argc; process->args = xmalloc0 ((argc + 1) * sizeof (*process->args)); for (i = 0; i < argc - first_arg; i++) process->args[i] = xstrdup (argv[first_arg + i + 1]); process->args[i] = NULL; if (exec_options.cwd) process->cwd = exec_options.cwd; process->terminal = exec_options.tty; process->env = exec_options.env; process->env_len = exec_options.env_size; process->user = make_oci_process_user (exec_options.user); if (exec_options.process_label != NULL) process->selinux_label = exec_options.process_label; if (exec_options.apparmor != NULL) process->apparmor_profile = exec_options.apparmor; if (exec_options.cap_size > 0) { runtime_spec_schema_config_schema_process_capabilities *capabilities = xmalloc (sizeof (runtime_spec_schema_config_schema_process_capabilities)); capabilities->effective = exec_options.cap; capabilities->effective_len = exec_options.cap_size; capabilities->inheritable = NULL; capabilities->inheritable_len = 0; capabilities->bounding = dup_array (exec_options.cap, exec_options.cap_size); capabilities->bounding_len = exec_options.cap_size; capabilities->ambient = dup_array (exec_options.cap, exec_options.cap_size); capabilities->ambient_len = exec_options.cap_size; capabilities->permitted = dup_array (exec_options.cap, exec_options.cap_size); capabilities->permitted_len = exec_options.cap_size; process->capabilities = capabilities; } // noNewPriviledges will remain `false` if basespec has `false` unless specified // Default is always `true` in generated basespec config if (exec_options.no_new_privs) process->no_new_privileges = 1; exec_opts.process = process; } exec_opts.cgroup = exec_options.cgroup; return libcrun_container_exec_with_options (&crun_context, argv[first_arg], &exec_opts, err); }
{'added': [(307, ' capabilities->inheritable = NULL;'), (308, ' capabilities->inheritable_len = 0;')], 'deleted': [(307, ' capabilities->inheritable = dup_array (exec_options.cap, exec_options.cap_size);'), (308, ' capabilities->inheritable_len = exec_options.cap_size;')]}
2
2
253
1,610
https://github.com/containers/crun
CVE-2022-27650
['CWE-276']
main.c
sasl_handle_login
/* * SPDX-License-Identifier: ISC * SPDX-URL: https://spdx.org/licenses/ISC.html * * Copyright (C) 2006-2015 Atheme Project (http://atheme.org/) * Copyright (C) 2017-2019 Atheme Development Group (https://atheme.github.io/) * * This file contains the main() routine. */ #include <atheme.h> #ifndef MINIMUM # define MINIMUM(a, b) (((a) < (b)) ? (a) : (b)) #endif #define ASASL_OUTFLAGS_WIPE_FREE_BUF (ASASL_OUTFLAG_WIPE_BUF | ASASL_OUTFLAG_FREE_BUF) #define LOGIN_CANCELLED_STR "There was a problem logging you in; login cancelled" static mowgli_list_t sasl_sessions; static mowgli_list_t sasl_mechanisms; static char sasl_mechlist_string[SASL_S2S_MAXLEN_ATONCE_B64]; static bool sasl_hide_server_names; static mowgli_eventloop_timer_t *sasl_delete_stale_timer = NULL; static struct service *saslsvs = NULL; static const char * sasl_format_sourceinfo(struct sourceinfo *const restrict si, const bool full) { static char result[BUFSIZE]; const struct sasl_sourceinfo *const ssi = (const struct sasl_sourceinfo *) si; if (full) (void) snprintf(result, sizeof result, "SASL/%s:%s[%s]:%s", *ssi->sess->uid ? ssi->sess->uid : "?", ssi->sess->host ? ssi->sess->host : "?", ssi->sess->ip ? ssi->sess->ip : "?", ssi->sess->server ? ssi->sess->server->name : "?"); else (void) snprintf(result, sizeof result, "SASL(%s)", ssi->sess->host ? ssi->sess->host : "?"); return result; } static const char * sasl_get_source_name(struct sourceinfo *const restrict si) { static char result[HOSTLEN + 1 + NICKLEN + 11]; char description[BUFSIZE]; const struct sasl_sourceinfo *const ssi = (const struct sasl_sourceinfo *) si; if (ssi->sess->server && ! sasl_hide_server_names) (void) snprintf(description, sizeof description, "Unknown user on %s (via SASL)", ssi->sess->server->name); else (void) mowgli_strlcpy(description, "Unknown user (via SASL)", sizeof description); // we can reasonably assume that si->v is non-null as this is part of the SASL vtable if (si->sourcedesc) (void) snprintf(result, sizeof result, "<%s:%s>%s", description, si->sourcedesc, si->smu ? entity(si->smu)->name : ""); else (void) snprintf(result, sizeof result, "<%s>%s", description, si->smu ? entity(si->smu)->name : ""); return result; } static void sasl_sourceinfo_recreate(struct sasl_session *const restrict p) { static struct sourceinfo_vtable sasl_vtable = { .description = "SASL", .format = sasl_format_sourceinfo, .get_source_name = sasl_get_source_name, .get_source_mask = sasl_get_source_name, }; if (p->si) (void) atheme_object_unref(p->si); struct sasl_sourceinfo *const ssi = smalloc(sizeof *ssi); (void) atheme_object_init(atheme_object(ssi), "<sasl sourceinfo>", &sfree); ssi->parent.s = p->server; ssi->parent.connection = curr_uplink->conn; if (p->host) ssi->parent.sourcedesc = p->host; ssi->parent.service = saslsvs; ssi->parent.v = &sasl_vtable; ssi->parent.force_language = language_find("en"); ssi->sess = p; p->si = &ssi->parent; } static struct sasl_session * sasl_session_find(const char *const restrict uid) { if (! uid || ! *uid) return NULL; mowgli_node_t *n; MOWGLI_ITER_FOREACH(n, sasl_sessions.head) { struct sasl_session *const p = n->data; if (strcmp(p->uid, uid) == 0) return p; } return NULL; } static struct sasl_session * sasl_session_find_or_make(const struct sasl_message *const restrict smsg) { struct sasl_session *p; if (! (p = sasl_session_find(smsg->uid))) { p = smalloc(sizeof *p); p->server = smsg->server; (void) mowgli_strlcpy(p->uid, smsg->uid, sizeof p->uid); (void) mowgli_node_add(p, &p->node, &sasl_sessions); } return p; } static const struct sasl_mechanism * sasl_mechanism_find(const char *const restrict name) { mowgli_node_t *n; MOWGLI_ITER_FOREACH(n, sasl_mechanisms.head) { const struct sasl_mechanism *const mptr = n->data; if (strcmp(mptr->name, name) == 0) return mptr; } (void) slog(LG_DEBUG, "%s: cannot find mechanism '%s'!", MOWGLI_FUNC_NAME, name); return NULL; } static void sasl_server_eob(struct server ATHEME_VATTR_UNUSED *const restrict s) { (void) sasl_mechlist_sts(sasl_mechlist_string); } static void sasl_mechlist_string_build(const struct sasl_session *const restrict p, const struct myuser *const restrict mu, const char **const restrict avoid) { char buf[sizeof sasl_mechlist_string]; char *bufptr = buf; size_t written = 0; mowgli_node_t *n; (void) memset(buf, 0x00, sizeof buf); MOWGLI_ITER_FOREACH(n, sasl_mechanisms.head) { const struct sasl_mechanism *const mptr = n->data; bool in_avoid_list = false; continue_if_fail(mptr != NULL); for (size_t i = 0; avoid != NULL && avoid[i] != NULL; i++) { if (strcmp(mptr->name, avoid[i]) == 0) { in_avoid_list = true; break; } } if (in_avoid_list || (mptr->password_based && mu != NULL && (mu->flags & MU_NOPASSWORD))) continue; const size_t namelen = strlen(mptr->name); if (written + namelen >= sizeof buf) break; (void) memcpy(bufptr, mptr->name, namelen); bufptr += namelen; *bufptr++ = ','; written += namelen + 1; } if (written) *(--bufptr) = 0x00; if (p) (void) sasl_sts(p->uid, 'M', buf); else (void) memcpy(sasl_mechlist_string, buf, sizeof buf); } static void sasl_mechlist_do_rebuild(void) { (void) sasl_mechlist_string_build(NULL, NULL, NULL); if (me.connected) (void) sasl_mechlist_sts(sasl_mechlist_string); } static bool sasl_may_impersonate(struct myuser *const source_mu, struct myuser *const target_mu) { if (source_mu == target_mu) return true; char priv[BUFSIZE] = PRIV_IMPERSONATE_ANY; // Check for wildcard priv if (has_priv_myuser(source_mu, priv)) return true; // Check for target-operclass specific priv const char *const classname = (target_mu->soper && target_mu->soper->classname) ? target_mu->soper->classname : "user"; (void) snprintf(priv, sizeof priv, PRIV_IMPERSONATE_CLASS_FMT, classname); if (has_priv_myuser(source_mu, priv)) return true; // Check for target-entity specific priv (void) snprintf(priv, sizeof priv, PRIV_IMPERSONATE_ENTITY_FMT, entity(target_mu)->name); if (has_priv_myuser(source_mu, priv)) return true; // Allow modules to check too struct hook_sasl_may_impersonate req = { .source_mu = source_mu, .target_mu = target_mu, .allowed = false, }; (void) hook_call_sasl_may_impersonate(&req); return req.allowed; } static struct myuser * sasl_user_can_login(struct sasl_session *const restrict p) { // source_mu is the user whose credentials we verified ("authentication id" / authcid) // target_mu is the user who will be ultimately logged in ("authorization id" / authzid) struct myuser *source_mu; struct myuser *target_mu; if (! *p->authceid || ! (source_mu = myuser_find_uid(p->authceid))) return NULL; if (! *p->authzeid) { target_mu = source_mu; (void) mowgli_strlcpy(p->authzid, p->authcid, sizeof p->authzid); (void) mowgli_strlcpy(p->authzeid, p->authceid, sizeof p->authzeid); } else if (! (target_mu = myuser_find_uid(p->authzeid))) return NULL; if (! sasl_may_impersonate(source_mu, target_mu)) { (void) logcommand(p->si, CMDLOG_LOGIN, "denied IMPERSONATE by \2%s\2 to \2%s\2", entity(source_mu)->name, entity(target_mu)->name); return NULL; } if (! (target_mu->flags & MU_LOGINNOLIMIT) && !has_priv_myuser(target_mu, PRIV_LOGIN_NOLIMIT) && MOWGLI_LIST_LENGTH(&target_mu->logins) >= me.maxlogins) { (void) logcommand(p->si, CMDLOG_LOGIN, "failed LOGIN to \2%s\2 (too many logins)", entity(target_mu)->name); return NULL; } /* We just did SASL authentication for a user. With IRCds which do not * have unique UIDs for users, we will likely be expecting the login * data to be bursted. As a result, we should give the core a heads' * up that this is going to happen so that hooks will be properly * fired... */ if (ircd->flags & IRCD_SASL_USE_PUID) { target_mu->flags &= ~MU_NOBURSTLOGIN; target_mu->flags |= MU_PENDINGLOGIN; } if (target_mu != source_mu) (void) logcommand(p->si, CMDLOG_LOGIN, "allowed IMPERSONATE by \2%s\2 to \2%s\2", entity(source_mu)->name, entity(target_mu)->name); return target_mu; } static void sasl_session_destroy(struct sasl_session *const restrict p) { mowgli_node_t *n; MOWGLI_ITER_FOREACH(n, sasl_sessions.head) { if (n == &p->node && n->data == p) { (void) mowgli_node_delete(n, &sasl_sessions); break; } } if (p->mechptr && p->mechptr->mech_finish) (void) p->mechptr->mech_finish(p); if (p->si) (void) atheme_object_unref(p->si); struct user *const u = user_find(p->uid); if (u) // If the user is still on the network, allow them to use NickServ IDENTIFY/LOGIN again u->flags &= ~UF_DOING_SASL; (void) sfree(p->certfp); (void) sfree(p->host); (void) sfree(p->buf); (void) sfree(p->ip); (void) sfree(p); } static inline void sasl_session_abort(struct sasl_session *const restrict p) { (void) sasl_sts(p->uid, 'D', "F"); (void) sasl_session_destroy(p); } static bool sasl_session_success(struct sasl_session *const restrict p, struct myuser *const restrict mu, const bool destroy) { /* Only burst an account name and vhost if the user has verified their e-mail address; * this prevents spambots from creating accounts to join registered-user-only channels */ if (! (mu->flags & MU_WAITAUTH)) { const struct metadata *const md = metadata_find(mu, "private:usercloak"); const char *const cloak = ((md != NULL) ? md->value : "*"); (void) svslogin_sts(p->uid, "*", "*", cloak, mu); } (void) sasl_sts(p->uid, 'D', "S"); if (destroy) (void) sasl_session_destroy(p); return true; } static bool sasl_handle_login(struct sasl_session *const restrict p, struct user *const u, struct myuser *mu) { bool was_killed = false; // Find the account if necessary if (! mu) { if (! *p->authzeid) { (void) slog(LG_INFO, "%s: session for '%s' without an authzeid (BUG)", MOWGLI_FUNC_NAME, u->nick); (void) notice(saslsvs->nick, u->nick, LOGIN_CANCELLED_STR); return false; } if (! (mu = myuser_find_uid(p->authzeid))) { if (*p->authzid) (void) notice(saslsvs->nick, u->nick, "Account %s dropped; login cancelled", p->authzid); else (void) notice(saslsvs->nick, u->nick, "Account dropped; login cancelled"); return false; } } // If the user is already logged in, and not to the same account, log them out first if (u->myuser && u->myuser != mu) { if (is_soper(u->myuser)) (void) logcommand_user(saslsvs, u, CMDLOG_ADMIN, "DESOPER: \2%s\2 as \2%s\2", u->nick, entity(u->myuser)->name); (void) logcommand_user(saslsvs, u, CMDLOG_LOGIN, "LOGOUT"); if (! (was_killed = ircd_on_logout(u, entity(u->myuser)->name))) { mowgli_node_t *n; MOWGLI_ITER_FOREACH(n, u->myuser->logins.head) { if (n->data == u) { (void) mowgli_node_delete(n, &u->myuser->logins); (void) mowgli_node_free(n); break; } } u->myuser = NULL; } } // If they were not killed above, log them in now if (! was_killed) { if (u->myuser != mu) { // If they're not logged in, or logging in to a different account, do a full login (void) myuser_login(saslsvs, u, mu, false); (void) logcommand_user(saslsvs, u, CMDLOG_LOGIN, "LOGIN (%s)", p->mechptr->name); } else { // Otherwise, just update login time ... mu->lastlogin = CURRTIME; (void) logcommand_user(saslsvs, u, CMDLOG_LOGIN, "REAUTHENTICATE (%s)", p->mechptr->name); } } return true; } static enum sasl_mechanism_result ATHEME_FATTR_WUR sasl_process_input(struct sasl_session *const restrict p, char *const restrict buf, const size_t len, struct sasl_output_buf *const restrict outbuf) { // A single + character is not data at all -- invoke mech_step without an input buffer if (*buf == '+' && len == 1) return p->mechptr->mech_step(p, NULL, outbuf); unsigned char decbuf[SASL_S2S_MAXLEN_TOTAL_RAW + 1]; const size_t declen = base64_decode(buf, decbuf, SASL_S2S_MAXLEN_TOTAL_RAW); if (declen == BASE64_FAIL) { (void) slog(LG_DEBUG, "%s: base64_decode() failed", MOWGLI_FUNC_NAME); return ASASL_MRESULT_ERROR; } /* Account for the fact that the client may have sent whitespace; our * decoder is tolerant of whitespace and will skip over it -- amdj */ if (declen == 0) return p->mechptr->mech_step(p, NULL, outbuf); unsigned int inflags = ASASL_INFLAG_NONE; const struct sasl_input_buf inbuf = { .buf = decbuf, .len = declen, .flags = &inflags, }; // Ensure input is NULL-terminated for modules that want to process the data as a string decbuf[declen] = 0x00; // Pass the data to the mechanism const enum sasl_mechanism_result rc = p->mechptr->mech_step(p, &inbuf, outbuf); // The mechanism instructed us to wipe the input data now that it has been processed if (inflags & ASASL_INFLAG_WIPE_BUF) { /* If we got here, the bufferred base64-encoded input data is either in a * dedicated buffer (buf == p->buf && len == p->len) or directly from a * parv[] inside struct sasl_message. Either way buf is mutable. -- amdj */ (void) smemzero(buf, len); // Erase the base64-encoded input data (void) smemzero(decbuf, declen); // Erase the base64-decoded input data } return rc; } static bool ATHEME_FATTR_WUR sasl_process_output(struct sasl_session *const restrict p, struct sasl_output_buf *const restrict outbuf) { char encbuf[SASL_S2S_MAXLEN_TOTAL_B64 + 1]; const size_t enclen = base64_encode(outbuf->buf, outbuf->len, encbuf, sizeof encbuf); if ((outbuf->flags & ASASL_OUTFLAGS_WIPE_FREE_BUF) == ASASL_OUTFLAGS_WIPE_FREE_BUF) // The mechanism instructed us to wipe and free the output data now that it has been encoded (void) smemzerofree(outbuf->buf, outbuf->len); else if (outbuf->flags & ASASL_OUTFLAG_WIPE_BUF) // The mechanism instructed us to wipe the output data now that it has been encoded (void) smemzero(outbuf->buf, outbuf->len); else if (outbuf->flags & ASASL_OUTFLAG_FREE_BUF) // The mechanism instructed us to free the output data now that it has been encoded (void) sfree(outbuf->buf); outbuf->buf = NULL; outbuf->len = 0; if (enclen == BASE64_FAIL) { (void) slog(LG_ERROR, "%s: base64_encode() failed", MOWGLI_FUNC_NAME); return false; } char *encbufptr = encbuf; size_t encbuflast = SASL_S2S_MAXLEN_ATONCE_B64; for (size_t encbufrem = enclen; encbufrem != 0; /* No action */) { char encbufpart[SASL_S2S_MAXLEN_ATONCE_B64 + 1]; const size_t encbufptrlen = MINIMUM(SASL_S2S_MAXLEN_ATONCE_B64, encbufrem); (void) memset(encbufpart, 0x00, sizeof encbufpart); (void) memcpy(encbufpart, encbufptr, encbufptrlen); (void) sasl_sts(p->uid, 'C', encbufpart); // The mechanism instructed us to wipe the output data now that it has been transmitted if (outbuf->flags & ASASL_OUTFLAG_WIPE_BUF) { (void) smemzero(encbufpart, encbufptrlen); (void) smemzero(encbufptr, encbufptrlen); } encbufptr += encbufptrlen; encbufrem -= encbufptrlen; encbuflast = encbufptrlen; } /* The end of a packet is indicated by a string not of the maximum length. If the last string * was the maximum length, send another, empty string, to advance the session. -- amdj */ if (encbuflast == SASL_S2S_MAXLEN_ATONCE_B64) (void) sasl_sts(p->uid, 'C', "+"); return true; } /* given an entire sasl message, advance session by passing data to mechanism * and feeding returned data back to client. */ static bool ATHEME_FATTR_WUR sasl_process_packet(struct sasl_session *const restrict p, char *const restrict buf, const size_t len) { struct sasl_output_buf outbuf = { .buf = NULL, .len = 0, .flags = ASASL_OUTFLAG_NONE, }; enum sasl_mechanism_result rc; bool have_responded = false; if (! p->mechptr && ! len) { // First piece of data in a session is the name of the SASL mechanism that will be used if (! (p->mechptr = sasl_mechanism_find(buf))) { (void) sasl_sts(p->uid, 'M', sasl_mechlist_string); return false; } (void) sasl_sourceinfo_recreate(p); if (p->mechptr->mech_start) rc = p->mechptr->mech_start(p, &outbuf); else rc = ASASL_MRESULT_CONTINUE; } else if (! p->mechptr) { (void) slog(LG_DEBUG, "%s: session has no mechanism?", MOWGLI_FUNC_NAME); return false; } else { rc = sasl_process_input(p, buf, len, &outbuf); } if (outbuf.buf && outbuf.len) { if (! sasl_process_output(p, &outbuf)) return false; have_responded = true; } // Some progress has been made, reset timeout. p->flags &= ~ASASL_SFLAG_MARKED_FOR_DELETION; switch (rc) { case ASASL_MRESULT_CONTINUE: { if (! have_responded) /* We want more data from the client, but we haven't sent any of our own. * Send an empty string to advance the session. -- amdj */ (void) sasl_sts(p->uid, 'C', "+"); return true; } case ASASL_MRESULT_SUCCESS: { struct user *const u = user_find(p->uid); struct myuser *const mu = sasl_user_can_login(p); if (! mu) { if (u) (void) notice(saslsvs->nick, u->nick, LOGIN_CANCELLED_STR); return false; } /* If the user is already on the network, attempt to log them in immediately. * Otherwise, we will log them in on introduction of user to network */ if (u && ! sasl_handle_login(p, u, mu)) return false; return sasl_session_success(p, mu, (u != NULL)); } case ASASL_MRESULT_FAILURE: { if (*p->authceid) { /* If we reach this, they failed SASL auth, so if they were trying * to authenticate as a specific user, run bad_password() on them. */ struct myuser *const mu = myuser_find_uid(p->authceid); if (! mu) return false; /* We might have more information to construct a more accurate sourceinfo now? * TODO: Investigate whether this is necessary */ (void) sasl_sourceinfo_recreate(p); (void) logcommand(p->si, CMDLOG_LOGIN, "failed LOGIN (%s) to \2%s\2 (bad password)", p->mechptr->name, entity(mu)->name); (void) bad_password(p->si, mu); } return false; } case ASASL_MRESULT_ERROR: return false; } /* This is only here to keep GCC happy -- Clang can see that the switch() handles all legal * values of the enumeration, and so knows that this function will never get to this point; * GCC is dumb, and warns that control reaches the end of this non-void function. -- amdj */ return false; } static bool ATHEME_FATTR_WUR sasl_process_buffer(struct sasl_session *const restrict p) { // Ensure the buffer is NULL-terminated so that base64_decode() doesn't overrun it p->buf[p->len] = 0x00; if (! sasl_process_packet(p, p->buf, p->len)) return false; (void) sfree(p->buf); p->buf = NULL; p->len = 0; return true; } static void sasl_input_hostinfo(const struct sasl_message *const restrict smsg, struct sasl_session *const restrict p) { p->host = sstrdup(smsg->parv[0]); p->ip = sstrdup(smsg->parv[1]); if (smsg->parc >= 3 && strcmp(smsg->parv[2], "P") != 0) p->flags |= ASASL_SFLAG_CLIENT_SECURE; } static bool ATHEME_FATTR_WUR sasl_input_startauth(const struct sasl_message *const restrict smsg, struct sasl_session *const restrict p) { if (strcmp(smsg->parv[0], "EXTERNAL") == 0) { if (smsg->parc < 2) { (void) slog(LG_DEBUG, "%s: client %s starting EXTERNAL authentication without a " "fingerprint", MOWGLI_FUNC_NAME, p->uid); return false; } (void) sfree(p->certfp); p->certfp = sstrdup(smsg->parv[1]); p->flags |= ASASL_SFLAG_CLIENT_SECURE; } struct user *const u = user_find(p->uid); if (u && u->myuser) { /* If the user is already on the network, they're doing an IRCv3.2 SASL * reauthentication. This means that if the user is logged in, we need * to call the user_can_logout hooks and maybe abort the exchange now. */ (void) slog(LG_DEBUG, "%s: user %s ('%s') is logged in as '%s' -- executing user_can_logout hooks", MOWGLI_FUNC_NAME, p->uid, u->nick, entity(u->myuser)->name); struct hook_user_logout_check req = { .si = p->si, .u = u, .allowed = true, .relogin = true, }; (void) hook_call_user_can_logout(&req); if (! req.allowed) { (void) notice(saslsvs->nick, u->nick, "You cannot log out \2%s\2 because the server configuration disallows it.", entity(u->myuser)->name); return false; } } if (u) u->flags |= UF_DOING_SASL; return sasl_process_packet(p, smsg->parv[0], 0); } static bool ATHEME_FATTR_WUR sasl_input_clientdata(const struct sasl_message *const restrict smsg, struct sasl_session *const restrict p) { /* This is complicated. * * Clients are restricted to sending us 300 bytes (400 Base-64 characters), but the mechanism * that they have chosen could require them to send more than this amount, so they have to send * it 400 Base-64 characters at a time in stages. When we receive data less than 400 characters, * we know we don't need to buffer any more data, and can finally process it. * * However, if the client wants to send us a multiple of 400 characters and no more, we would be * waiting forever for them to send 'the rest', even though there isn't any. This is solved by * having them send a single '+' character to indicate that they have no more data to send. * * This is also what clients send us when they do not want to send us any data at all, and in * either event, this is *NOT* *DATA* we are receiving, and we should not buffer it. * * Also, if the data is a single '*' character, the client is aborting authentication. Servers * should send us a 'D' packet instead of a 'C *' packet in this case, but this is for if they * don't. Note that this will usually result in the client getting a 904 numeric instead of 906, * but the alternative is not treating '*' specially and then going on to fail to decode it in * sasl_process_input() above, which will result in ... an aborted session and a 904 numeric. * So this just saves time. */ const size_t len = strlen(smsg->parv[0]); // Abort? if (len == 1 && smsg->parv[0][0] == '*') return false; // End of data? if (len == 1 && smsg->parv[0][0] == '+') { if (p->buf) return sasl_process_buffer(p); // This function already deals with the special case of 1 '+' character return sasl_process_packet(p, smsg->parv[0], len); } /* Optimisation: If there is no buffer yet and this data is less than 400 characters, we don't * need to buffer it at all, and can process it immediately. */ if (! p->buf && len < SASL_S2S_MAXLEN_ATONCE_B64) return sasl_process_packet(p, smsg->parv[0], len); /* We need to buffer the data now, but first check if the client hasn't sent us an excessive * amount already. */ if ((p->len + len) > SASL_S2S_MAXLEN_TOTAL_B64) { (void) slog(LG_DEBUG, "%s: client %s has exceeded allowed data length", MOWGLI_FUNC_NAME, p->uid); return false; } // (Re)allocate a buffer, append the received data to it, and update its recorded length. p->buf = srealloc(p->buf, p->len + len + 1); (void) memcpy(p->buf + p->len, smsg->parv[0], len); p->len += len; // Messages not exactly 400 characters are the end of data. if (len < SASL_S2S_MAXLEN_ATONCE_B64) return sasl_process_buffer(p); return true; } static void sasl_input(struct sasl_message *const restrict smsg) { struct sasl_session *const p = sasl_session_find_or_make(smsg); bool ret = true; switch (smsg->mode) { case 'H': // (H)ost information (void) sasl_input_hostinfo(smsg, p); break; case 'S': // (S)tart authentication ret = sasl_input_startauth(smsg, p); break; case 'C': // (C)lient data ret = sasl_input_clientdata(smsg, p); break; case 'D': // (D)one -- when we receive it, means client abort (void) sasl_session_destroy(p); break; } if (! ret) (void) sasl_session_abort(p); } static void sasl_user_add(struct hook_user_nick *const restrict data) { // If the user has been killed, don't do anything. struct user *const u = data->u; if (! u) return; // Not concerned unless it's an SASL login. struct sasl_session *const p = sasl_session_find(u->uid); if (! p) return; (void) sasl_handle_login(p, u, NULL); (void) sasl_session_destroy(p); } static void sasl_delete_stale(void ATHEME_VATTR_UNUSED *const restrict vptr) { mowgli_node_t *n, *tn; MOWGLI_ITER_FOREACH_SAFE(n, tn, sasl_sessions.head) { struct sasl_session *const p = n->data; if (p->flags & ASASL_SFLAG_MARKED_FOR_DELETION) (void) sasl_session_destroy(p); else p->flags |= ASASL_SFLAG_MARKED_FOR_DELETION; } } static void sasl_mech_register(const struct sasl_mechanism *const restrict mech) { if (sasl_mechanism_find(mech->name)) { (void) slog(LG_DEBUG, "%s: ignoring attempt to register %s again", MOWGLI_FUNC_NAME, mech->name); return; } (void) slog(LG_DEBUG, "%s: registering %s", MOWGLI_FUNC_NAME, mech->name); mowgli_node_t *const node = mowgli_node_create(); if (! node) { (void) slog(LG_ERROR, "%s: mowgli_node_create() failed; out of memory?", MOWGLI_FUNC_NAME); return; } /* Here we cast it to (void *) because mowgli_node_add() expects that; it cannot be made const because then * it would have to return a (const void *) too which would cause multiple warnings any time it is actually * storing, and thus gets assigned to, a pointer to a mutable object. * * To avoid the cast generating a diagnostic due to dropping a const qualifier, we first cast to uintptr_t. * This is not unprecedented in this codebase; libathemecore/crypto.c & libathemecore/strshare.c do the * same thing. */ (void) mowgli_node_add((void *)((uintptr_t) mech), node, &sasl_mechanisms); (void) sasl_mechlist_do_rebuild(); } static void sasl_mech_unregister(const struct sasl_mechanism *const restrict mech) { mowgli_node_t *n, *tn; MOWGLI_ITER_FOREACH_SAFE(n, tn, sasl_sessions.head) { struct sasl_session *const session = n->data; if (session->mechptr == mech) { (void) slog(LG_DEBUG, "%s: destroying session %s", MOWGLI_FUNC_NAME, session->uid); (void) sasl_session_destroy(session); } } MOWGLI_ITER_FOREACH_SAFE(n, tn, sasl_mechanisms.head) { if (n->data == mech) { (void) slog(LG_DEBUG, "%s: unregistering %s", MOWGLI_FUNC_NAME, mech->name); (void) mowgli_node_delete(n, &sasl_mechanisms); (void) mowgli_node_free(n); (void) sasl_mechlist_do_rebuild(); break; } } } static inline bool ATHEME_FATTR_WUR sasl_authxid_can_login(struct sasl_session *const restrict p, const char *const restrict authxid, struct myuser **const restrict muo, char *const restrict val_name, char *const restrict val_eid, const char *const restrict other_val_eid) { return_val_if_fail(p != NULL, false); return_val_if_fail(p->si != NULL, false); return_val_if_fail(p->mechptr != NULL, false); struct myuser *const mu = myuser_find_by_nick(authxid); if (! mu) { (void) slog(LG_DEBUG, "%s: myuser_find_by_nick: does not exist", MOWGLI_FUNC_NAME); return false; } if (metadata_find(mu, "private:freeze:freezer")) { (void) logcommand(p->si, CMDLOG_LOGIN, "failed LOGIN to \2%s\2 (frozen)", entity(mu)->name); return false; } if (muo) *muo = mu; (void) mowgli_strlcpy(val_name, entity(mu)->name, NICKLEN + 1); (void) mowgli_strlcpy(val_eid, entity(mu)->id, IDLEN + 1); if (p->mechptr->password_based && (mu->flags & MU_NOPASSWORD)) { (void) logcommand(p->si, CMDLOG_LOGIN, "failed LOGIN %s to \2%s\2 (password authentication disabled)", p->mechptr->name, entity(mu)->name); return false; } if (strcmp(val_eid, other_val_eid) == 0) // We have already executed the user_can_login hook for this user return true; struct hook_user_login_check req = { .si = p->si, .mu = mu, .allowed = true, }; (void) hook_call_user_can_login(&req); if (! req.allowed) (void) logcommand(p->si, CMDLOG_LOGIN, "failed LOGIN to \2%s\2 (denied by hook)", entity(mu)->name); return req.allowed; } static bool ATHEME_FATTR_WUR sasl_authcid_can_login(struct sasl_session *const restrict p, const char *const restrict authcid, struct myuser **const restrict muo) { return sasl_authxid_can_login(p, authcid, muo, p->authcid, p->authceid, p->authzeid); } static bool ATHEME_FATTR_WUR sasl_authzid_can_login(struct sasl_session *const restrict p, const char *const restrict authzid, struct myuser **const restrict muo) { return sasl_authxid_can_login(p, authzid, muo, p->authzid, p->authzeid, p->authceid); } extern const struct sasl_core_functions sasl_core_functions; const struct sasl_core_functions sasl_core_functions = { .mech_register = &sasl_mech_register, .mech_unregister = &sasl_mech_unregister, .authcid_can_login = &sasl_authcid_can_login, .authzid_can_login = &sasl_authzid_can_login, .recalc_mechlist = &sasl_mechlist_string_build, }; static void saslserv_message_handler(struct sourceinfo *const restrict si, const int parc, char **const restrict parv) { // this should never happen if (parv[0][0] == '&') { (void) slog(LG_ERROR, "%s: got parv with local channel: %s", MOWGLI_FUNC_NAME, parv[0]); return; } // make a copy of the original for debugging char orig[BUFSIZE]; (void) mowgli_strlcpy(orig, parv[parc - 1], sizeof orig); // lets go through this to get the command char *const cmd = strtok(parv[parc - 1], " "); char *const text = strtok(NULL, ""); if (! cmd) return; if (*orig == '\001') { (void) handle_ctcp_common(si, cmd, text); return; } (void) command_fail(si, fault_noprivs, _("This service exists to identify connecting clients " "to the network. It has no public interface.")); } static void mod_init(struct module *const restrict m) { if (! (saslsvs = service_add("saslserv", &saslserv_message_handler))) { (void) slog(LG_ERROR, "%s: service_add() failed", m->name); m->mflags |= MODFLAG_FAIL; return; } (void) hook_add_sasl_input(&sasl_input); (void) hook_add_user_add(&sasl_user_add); (void) hook_add_server_eob(&sasl_server_eob); sasl_delete_stale_timer = mowgli_timer_add(base_eventloop, "sasl_delete_stale", &sasl_delete_stale, NULL, SECONDS_PER_MINUTE / 2); authservice_loaded++; (void) add_bool_conf_item("HIDE_SERVER_NAMES", &saslsvs->conf_table, 0, &sasl_hide_server_names, false); } static void mod_deinit(const enum module_unload_intent ATHEME_VATTR_UNUSED intent) { (void) hook_del_sasl_input(&sasl_input); (void) hook_del_user_add(&sasl_user_add); (void) hook_del_server_eob(&sasl_server_eob); (void) mowgli_timer_destroy(base_eventloop, sasl_delete_stale_timer); (void) del_conf_item("HIDE_SERVER_NAMES", &saslsvs->conf_table); (void) service_delete(saslsvs); authservice_loaded--; if (sasl_sessions.head) (void) slog(LG_ERROR, "saslserv/main: shutting down with a non-empty session list; " "a mechanism did not unregister itself! (BUG)"); } SIMPLE_DECLARE_MODULE_V1("saslserv/main", MODULE_UNLOAD_CAPABILITY_OK)
/* * SPDX-License-Identifier: ISC * SPDX-URL: https://spdx.org/licenses/ISC.html * * Copyright (C) 2006-2015 Atheme Project (http://atheme.org/) * Copyright (C) 2017-2019 Atheme Development Group (https://atheme.github.io/) * * This file contains the main() routine. */ #include <atheme.h> #ifndef MINIMUM # define MINIMUM(a, b) (((a) < (b)) ? (a) : (b)) #endif #define ASASL_OUTFLAGS_WIPE_FREE_BUF (ASASL_OUTFLAG_WIPE_BUF | ASASL_OUTFLAG_FREE_BUF) #define LOGIN_CANCELLED_STR "There was a problem logging you in; login cancelled" static mowgli_list_t sasl_sessions; static mowgli_list_t sasl_mechanisms; static char sasl_mechlist_string[SASL_S2S_MAXLEN_ATONCE_B64]; static bool sasl_hide_server_names; static mowgli_eventloop_timer_t *sasl_delete_stale_timer = NULL; static struct service *saslsvs = NULL; static const char * sasl_format_sourceinfo(struct sourceinfo *const restrict si, const bool full) { static char result[BUFSIZE]; const struct sasl_sourceinfo *const ssi = (const struct sasl_sourceinfo *) si; if (full) (void) snprintf(result, sizeof result, "SASL/%s:%s[%s]:%s", *ssi->sess->uid ? ssi->sess->uid : "?", ssi->sess->host ? ssi->sess->host : "?", ssi->sess->ip ? ssi->sess->ip : "?", ssi->sess->server ? ssi->sess->server->name : "?"); else (void) snprintf(result, sizeof result, "SASL(%s)", ssi->sess->host ? ssi->sess->host : "?"); return result; } static const char * sasl_get_source_name(struct sourceinfo *const restrict si) { static char result[HOSTLEN + 1 + NICKLEN + 11]; char description[BUFSIZE]; const struct sasl_sourceinfo *const ssi = (const struct sasl_sourceinfo *) si; if (ssi->sess->server && ! sasl_hide_server_names) (void) snprintf(description, sizeof description, "Unknown user on %s (via SASL)", ssi->sess->server->name); else (void) mowgli_strlcpy(description, "Unknown user (via SASL)", sizeof description); // we can reasonably assume that si->v is non-null as this is part of the SASL vtable if (si->sourcedesc) (void) snprintf(result, sizeof result, "<%s:%s>%s", description, si->sourcedesc, si->smu ? entity(si->smu)->name : ""); else (void) snprintf(result, sizeof result, "<%s>%s", description, si->smu ? entity(si->smu)->name : ""); return result; } static void sasl_sourceinfo_recreate(struct sasl_session *const restrict p) { static struct sourceinfo_vtable sasl_vtable = { .description = "SASL", .format = sasl_format_sourceinfo, .get_source_name = sasl_get_source_name, .get_source_mask = sasl_get_source_name, }; if (p->si) (void) atheme_object_unref(p->si); struct sasl_sourceinfo *const ssi = smalloc(sizeof *ssi); (void) atheme_object_init(atheme_object(ssi), "<sasl sourceinfo>", &sfree); ssi->parent.s = p->server; ssi->parent.connection = curr_uplink->conn; if (p->host) ssi->parent.sourcedesc = p->host; ssi->parent.service = saslsvs; ssi->parent.v = &sasl_vtable; ssi->parent.force_language = language_find("en"); ssi->sess = p; p->si = &ssi->parent; } static struct sasl_session * sasl_session_find(const char *const restrict uid) { if (! uid || ! *uid) return NULL; mowgli_node_t *n; MOWGLI_ITER_FOREACH(n, sasl_sessions.head) { struct sasl_session *const p = n->data; if (strcmp(p->uid, uid) == 0) return p; } return NULL; } static struct sasl_session * sasl_session_find_or_make(const struct sasl_message *const restrict smsg) { struct sasl_session *p; if (! (p = sasl_session_find(smsg->uid))) { p = smalloc(sizeof *p); p->server = smsg->server; (void) mowgli_strlcpy(p->uid, smsg->uid, sizeof p->uid); (void) mowgli_node_add(p, &p->node, &sasl_sessions); } return p; } static const struct sasl_mechanism * sasl_mechanism_find(const char *const restrict name) { mowgli_node_t *n; MOWGLI_ITER_FOREACH(n, sasl_mechanisms.head) { const struct sasl_mechanism *const mptr = n->data; if (strcmp(mptr->name, name) == 0) return mptr; } (void) slog(LG_DEBUG, "%s: cannot find mechanism '%s'!", MOWGLI_FUNC_NAME, name); return NULL; } static void sasl_server_eob(struct server ATHEME_VATTR_UNUSED *const restrict s) { (void) sasl_mechlist_sts(sasl_mechlist_string); } static void sasl_mechlist_string_build(const struct sasl_session *const restrict p, const struct myuser *const restrict mu, const char **const restrict avoid) { char buf[sizeof sasl_mechlist_string]; char *bufptr = buf; size_t written = 0; mowgli_node_t *n; (void) memset(buf, 0x00, sizeof buf); MOWGLI_ITER_FOREACH(n, sasl_mechanisms.head) { const struct sasl_mechanism *const mptr = n->data; bool in_avoid_list = false; continue_if_fail(mptr != NULL); for (size_t i = 0; avoid != NULL && avoid[i] != NULL; i++) { if (strcmp(mptr->name, avoid[i]) == 0) { in_avoid_list = true; break; } } if (in_avoid_list || (mptr->password_based && mu != NULL && (mu->flags & MU_NOPASSWORD))) continue; const size_t namelen = strlen(mptr->name); if (written + namelen >= sizeof buf) break; (void) memcpy(bufptr, mptr->name, namelen); bufptr += namelen; *bufptr++ = ','; written += namelen + 1; } if (written) *(--bufptr) = 0x00; if (p) (void) sasl_sts(p->uid, 'M', buf); else (void) memcpy(sasl_mechlist_string, buf, sizeof buf); } static void sasl_mechlist_do_rebuild(void) { (void) sasl_mechlist_string_build(NULL, NULL, NULL); if (me.connected) (void) sasl_mechlist_sts(sasl_mechlist_string); } static bool sasl_may_impersonate(struct myuser *const source_mu, struct myuser *const target_mu) { if (source_mu == target_mu) return true; char priv[BUFSIZE] = PRIV_IMPERSONATE_ANY; // Check for wildcard priv if (has_priv_myuser(source_mu, priv)) return true; // Check for target-operclass specific priv const char *const classname = (target_mu->soper && target_mu->soper->classname) ? target_mu->soper->classname : "user"; (void) snprintf(priv, sizeof priv, PRIV_IMPERSONATE_CLASS_FMT, classname); if (has_priv_myuser(source_mu, priv)) return true; // Check for target-entity specific priv (void) snprintf(priv, sizeof priv, PRIV_IMPERSONATE_ENTITY_FMT, entity(target_mu)->name); if (has_priv_myuser(source_mu, priv)) return true; // Allow modules to check too struct hook_sasl_may_impersonate req = { .source_mu = source_mu, .target_mu = target_mu, .allowed = false, }; (void) hook_call_sasl_may_impersonate(&req); return req.allowed; } static struct myuser * sasl_user_can_login(struct sasl_session *const restrict p) { // source_mu is the user whose credentials we verified ("authentication id" / authcid) // target_mu is the user who will be ultimately logged in ("authorization id" / authzid) struct myuser *source_mu; struct myuser *target_mu; if (! *p->authceid || ! (source_mu = myuser_find_uid(p->authceid))) return NULL; if (! *p->authzeid) { target_mu = source_mu; (void) mowgli_strlcpy(p->authzid, p->authcid, sizeof p->authzid); (void) mowgli_strlcpy(p->authzeid, p->authceid, sizeof p->authzeid); } else if (! (target_mu = myuser_find_uid(p->authzeid))) return NULL; if (! sasl_may_impersonate(source_mu, target_mu)) { (void) logcommand(p->si, CMDLOG_LOGIN, "denied IMPERSONATE by \2%s\2 to \2%s\2", entity(source_mu)->name, entity(target_mu)->name); return NULL; } if (! (target_mu->flags & MU_LOGINNOLIMIT) && !has_priv_myuser(target_mu, PRIV_LOGIN_NOLIMIT) && MOWGLI_LIST_LENGTH(&target_mu->logins) >= me.maxlogins) { (void) logcommand(p->si, CMDLOG_LOGIN, "failed LOGIN to \2%s\2 (too many logins)", entity(target_mu)->name); return NULL; } /* We just did SASL authentication for a user. With IRCds which do not * have unique UIDs for users, we will likely be expecting the login * data to be bursted. As a result, we should give the core a heads' * up that this is going to happen so that hooks will be properly * fired... */ if (ircd->flags & IRCD_SASL_USE_PUID) { target_mu->flags &= ~MU_NOBURSTLOGIN; target_mu->flags |= MU_PENDINGLOGIN; } if (target_mu != source_mu) (void) logcommand(p->si, CMDLOG_LOGIN, "allowed IMPERSONATE by \2%s\2 to \2%s\2", entity(source_mu)->name, entity(target_mu)->name); return target_mu; } static void sasl_session_destroy(struct sasl_session *const restrict p) { mowgli_node_t *n; MOWGLI_ITER_FOREACH(n, sasl_sessions.head) { if (n == &p->node && n->data == p) { (void) mowgli_node_delete(n, &sasl_sessions); break; } } if (p->mechptr && p->mechptr->mech_finish) (void) p->mechptr->mech_finish(p); if (p->si) (void) atheme_object_unref(p->si); struct user *const u = user_find(p->uid); if (u) // If the user is still on the network, allow them to use NickServ IDENTIFY/LOGIN again u->flags &= ~UF_DOING_SASL; (void) sfree(p->certfp); (void) sfree(p->host); (void) sfree(p->buf); (void) sfree(p->ip); (void) sfree(p); } static inline void sasl_session_abort(struct sasl_session *const restrict p) { (void) sasl_sts(p->uid, 'D', "F"); (void) sasl_session_destroy(p); } static bool sasl_session_success(struct sasl_session *const restrict p, struct myuser *const restrict mu, const bool destroy) { /* Only burst an account name and vhost if the user has verified their e-mail address; * this prevents spambots from creating accounts to join registered-user-only channels */ if (! (mu->flags & MU_WAITAUTH)) { const struct metadata *const md = metadata_find(mu, "private:usercloak"); const char *const cloak = ((md != NULL) ? md->value : "*"); (void) svslogin_sts(p->uid, "*", "*", cloak, mu); } (void) sasl_sts(p->uid, 'D', "S"); if (destroy) (void) sasl_session_destroy(p); return true; } static bool sasl_handle_login(struct sasl_session *const restrict p, struct user *const u, struct myuser *mu) { bool was_killed = false; // Find the account if necessary if (! mu) { if (! *p->pendingeid) { (void) slog(LG_INFO, "%s: session for '%s' without an pendingeid (BUG)", MOWGLI_FUNC_NAME, u->nick); (void) notice(saslsvs->nick, u->nick, LOGIN_CANCELLED_STR); return false; } if (! (mu = myuser_find_uid(p->pendingeid))) { if (*p->authzid) (void) notice(saslsvs->nick, u->nick, "Account %s dropped; login cancelled", p->authzid); else (void) notice(saslsvs->nick, u->nick, "Account dropped; login cancelled"); return false; } } // If the user is already logged in, and not to the same account, log them out first if (u->myuser && u->myuser != mu) { if (is_soper(u->myuser)) (void) logcommand_user(saslsvs, u, CMDLOG_ADMIN, "DESOPER: \2%s\2 as \2%s\2", u->nick, entity(u->myuser)->name); (void) logcommand_user(saslsvs, u, CMDLOG_LOGIN, "LOGOUT"); if (! (was_killed = ircd_on_logout(u, entity(u->myuser)->name))) { mowgli_node_t *n; MOWGLI_ITER_FOREACH(n, u->myuser->logins.head) { if (n->data == u) { (void) mowgli_node_delete(n, &u->myuser->logins); (void) mowgli_node_free(n); break; } } u->myuser = NULL; } } // If they were not killed above, log them in now if (! was_killed) { if (u->myuser != mu) { // If they're not logged in, or logging in to a different account, do a full login (void) myuser_login(saslsvs, u, mu, false); (void) logcommand_user(saslsvs, u, CMDLOG_LOGIN, "LOGIN (%s)", p->mechptr->name); } else { // Otherwise, just update login time ... mu->lastlogin = CURRTIME; (void) logcommand_user(saslsvs, u, CMDLOG_LOGIN, "REAUTHENTICATE (%s)", p->mechptr->name); } } return true; } static enum sasl_mechanism_result ATHEME_FATTR_WUR sasl_process_input(struct sasl_session *const restrict p, char *const restrict buf, const size_t len, struct sasl_output_buf *const restrict outbuf) { // A single + character is not data at all -- invoke mech_step without an input buffer if (*buf == '+' && len == 1) return p->mechptr->mech_step(p, NULL, outbuf); unsigned char decbuf[SASL_S2S_MAXLEN_TOTAL_RAW + 1]; const size_t declen = base64_decode(buf, decbuf, SASL_S2S_MAXLEN_TOTAL_RAW); if (declen == BASE64_FAIL) { (void) slog(LG_DEBUG, "%s: base64_decode() failed", MOWGLI_FUNC_NAME); return ASASL_MRESULT_ERROR; } /* Account for the fact that the client may have sent whitespace; our * decoder is tolerant of whitespace and will skip over it -- amdj */ if (declen == 0) return p->mechptr->mech_step(p, NULL, outbuf); unsigned int inflags = ASASL_INFLAG_NONE; const struct sasl_input_buf inbuf = { .buf = decbuf, .len = declen, .flags = &inflags, }; // Ensure input is NULL-terminated for modules that want to process the data as a string decbuf[declen] = 0x00; // Pass the data to the mechanism const enum sasl_mechanism_result rc = p->mechptr->mech_step(p, &inbuf, outbuf); // The mechanism instructed us to wipe the input data now that it has been processed if (inflags & ASASL_INFLAG_WIPE_BUF) { /* If we got here, the bufferred base64-encoded input data is either in a * dedicated buffer (buf == p->buf && len == p->len) or directly from a * parv[] inside struct sasl_message. Either way buf is mutable. -- amdj */ (void) smemzero(buf, len); // Erase the base64-encoded input data (void) smemzero(decbuf, declen); // Erase the base64-decoded input data } return rc; } static bool ATHEME_FATTR_WUR sasl_process_output(struct sasl_session *const restrict p, struct sasl_output_buf *const restrict outbuf) { char encbuf[SASL_S2S_MAXLEN_TOTAL_B64 + 1]; const size_t enclen = base64_encode(outbuf->buf, outbuf->len, encbuf, sizeof encbuf); if ((outbuf->flags & ASASL_OUTFLAGS_WIPE_FREE_BUF) == ASASL_OUTFLAGS_WIPE_FREE_BUF) // The mechanism instructed us to wipe and free the output data now that it has been encoded (void) smemzerofree(outbuf->buf, outbuf->len); else if (outbuf->flags & ASASL_OUTFLAG_WIPE_BUF) // The mechanism instructed us to wipe the output data now that it has been encoded (void) smemzero(outbuf->buf, outbuf->len); else if (outbuf->flags & ASASL_OUTFLAG_FREE_BUF) // The mechanism instructed us to free the output data now that it has been encoded (void) sfree(outbuf->buf); outbuf->buf = NULL; outbuf->len = 0; if (enclen == BASE64_FAIL) { (void) slog(LG_ERROR, "%s: base64_encode() failed", MOWGLI_FUNC_NAME); return false; } char *encbufptr = encbuf; size_t encbuflast = SASL_S2S_MAXLEN_ATONCE_B64; for (size_t encbufrem = enclen; encbufrem != 0; /* No action */) { char encbufpart[SASL_S2S_MAXLEN_ATONCE_B64 + 1]; const size_t encbufptrlen = MINIMUM(SASL_S2S_MAXLEN_ATONCE_B64, encbufrem); (void) memset(encbufpart, 0x00, sizeof encbufpart); (void) memcpy(encbufpart, encbufptr, encbufptrlen); (void) sasl_sts(p->uid, 'C', encbufpart); // The mechanism instructed us to wipe the output data now that it has been transmitted if (outbuf->flags & ASASL_OUTFLAG_WIPE_BUF) { (void) smemzero(encbufpart, encbufptrlen); (void) smemzero(encbufptr, encbufptrlen); } encbufptr += encbufptrlen; encbufrem -= encbufptrlen; encbuflast = encbufptrlen; } /* The end of a packet is indicated by a string not of the maximum length. If the last string * was the maximum length, send another, empty string, to advance the session. -- amdj */ if (encbuflast == SASL_S2S_MAXLEN_ATONCE_B64) (void) sasl_sts(p->uid, 'C', "+"); return true; } /* given an entire sasl message, advance session by passing data to mechanism * and feeding returned data back to client. */ static bool ATHEME_FATTR_WUR sasl_process_packet(struct sasl_session *const restrict p, char *const restrict buf, const size_t len) { struct sasl_output_buf outbuf = { .buf = NULL, .len = 0, .flags = ASASL_OUTFLAG_NONE, }; enum sasl_mechanism_result rc; bool have_responded = false; if (! p->mechptr && ! len) { // First piece of data in a session is the name of the SASL mechanism that will be used if (! (p->mechptr = sasl_mechanism_find(buf))) { (void) sasl_sts(p->uid, 'M', sasl_mechlist_string); return false; } (void) sasl_sourceinfo_recreate(p); if (p->mechptr->mech_start) rc = p->mechptr->mech_start(p, &outbuf); else rc = ASASL_MRESULT_CONTINUE; } else if (! p->mechptr) { (void) slog(LG_DEBUG, "%s: session has no mechanism?", MOWGLI_FUNC_NAME); return false; } else { rc = sasl_process_input(p, buf, len, &outbuf); } if (outbuf.buf && outbuf.len) { if (! sasl_process_output(p, &outbuf)) return false; have_responded = true; } // Some progress has been made, reset timeout. p->flags &= ~ASASL_SFLAG_MARKED_FOR_DELETION; switch (rc) { case ASASL_MRESULT_CONTINUE: { if (! have_responded) /* We want more data from the client, but we haven't sent any of our own. * Send an empty string to advance the session. -- amdj */ (void) sasl_sts(p->uid, 'C', "+"); return true; } case ASASL_MRESULT_SUCCESS: { struct user *const u = user_find(p->uid); struct myuser *const mu = sasl_user_can_login(p); if (! mu) { if (u) (void) notice(saslsvs->nick, u->nick, LOGIN_CANCELLED_STR); return false; } (void) mowgli_strlcpy(p->pendingeid, p->authzeid, sizeof p->pendingeid); /* If the user is already on the network, attempt to log them in immediately. * Otherwise, we will log them in on introduction of user to network */ if (u && ! sasl_handle_login(p, u, mu)) return false; return sasl_session_success(p, mu, (u != NULL)); } case ASASL_MRESULT_FAILURE: { if (*p->authceid) { /* If we reach this, they failed SASL auth, so if they were trying * to authenticate as a specific user, run bad_password() on them. */ struct myuser *const mu = myuser_find_uid(p->authceid); if (! mu) return false; /* We might have more information to construct a more accurate sourceinfo now? * TODO: Investigate whether this is necessary */ (void) sasl_sourceinfo_recreate(p); (void) logcommand(p->si, CMDLOG_LOGIN, "failed LOGIN (%s) to \2%s\2 (bad password)", p->mechptr->name, entity(mu)->name); (void) bad_password(p->si, mu); } return false; } case ASASL_MRESULT_ERROR: return false; } /* This is only here to keep GCC happy -- Clang can see that the switch() handles all legal * values of the enumeration, and so knows that this function will never get to this point; * GCC is dumb, and warns that control reaches the end of this non-void function. -- amdj */ return false; } static bool ATHEME_FATTR_WUR sasl_process_buffer(struct sasl_session *const restrict p) { // Ensure the buffer is NULL-terminated so that base64_decode() doesn't overrun it p->buf[p->len] = 0x00; if (! sasl_process_packet(p, p->buf, p->len)) return false; (void) sfree(p->buf); p->buf = NULL; p->len = 0; return true; } static void sasl_input_hostinfo(const struct sasl_message *const restrict smsg, struct sasl_session *const restrict p) { p->host = sstrdup(smsg->parv[0]); p->ip = sstrdup(smsg->parv[1]); if (smsg->parc >= 3 && strcmp(smsg->parv[2], "P") != 0) p->flags |= ASASL_SFLAG_CLIENT_SECURE; } static bool ATHEME_FATTR_WUR sasl_input_startauth(const struct sasl_message *const restrict smsg, struct sasl_session *const restrict p) { if (strcmp(smsg->parv[0], "EXTERNAL") == 0) { if (smsg->parc < 2) { (void) slog(LG_DEBUG, "%s: client %s starting EXTERNAL authentication without a " "fingerprint", MOWGLI_FUNC_NAME, p->uid); return false; } (void) sfree(p->certfp); p->certfp = sstrdup(smsg->parv[1]); p->flags |= ASASL_SFLAG_CLIENT_SECURE; } struct user *const u = user_find(p->uid); if (u && u->myuser) { /* If the user is already on the network, they're doing an IRCv3.2 SASL * reauthentication. This means that if the user is logged in, we need * to call the user_can_logout hooks and maybe abort the exchange now. */ (void) slog(LG_DEBUG, "%s: user %s ('%s') is logged in as '%s' -- executing user_can_logout hooks", MOWGLI_FUNC_NAME, p->uid, u->nick, entity(u->myuser)->name); struct hook_user_logout_check req = { .si = p->si, .u = u, .allowed = true, .relogin = true, }; (void) hook_call_user_can_logout(&req); if (! req.allowed) { (void) notice(saslsvs->nick, u->nick, "You cannot log out \2%s\2 because the server configuration disallows it.", entity(u->myuser)->name); return false; } } if (u) u->flags |= UF_DOING_SASL; return sasl_process_packet(p, smsg->parv[0], 0); } static bool ATHEME_FATTR_WUR sasl_input_clientdata(const struct sasl_message *const restrict smsg, struct sasl_session *const restrict p) { /* This is complicated. * * Clients are restricted to sending us 300 bytes (400 Base-64 characters), but the mechanism * that they have chosen could require them to send more than this amount, so they have to send * it 400 Base-64 characters at a time in stages. When we receive data less than 400 characters, * we know we don't need to buffer any more data, and can finally process it. * * However, if the client wants to send us a multiple of 400 characters and no more, we would be * waiting forever for them to send 'the rest', even though there isn't any. This is solved by * having them send a single '+' character to indicate that they have no more data to send. * * This is also what clients send us when they do not want to send us any data at all, and in * either event, this is *NOT* *DATA* we are receiving, and we should not buffer it. * * Also, if the data is a single '*' character, the client is aborting authentication. Servers * should send us a 'D' packet instead of a 'C *' packet in this case, but this is for if they * don't. Note that this will usually result in the client getting a 904 numeric instead of 906, * but the alternative is not treating '*' specially and then going on to fail to decode it in * sasl_process_input() above, which will result in ... an aborted session and a 904 numeric. * So this just saves time. */ const size_t len = strlen(smsg->parv[0]); // Abort? if (len == 1 && smsg->parv[0][0] == '*') return false; // End of data? if (len == 1 && smsg->parv[0][0] == '+') { if (p->buf) return sasl_process_buffer(p); // This function already deals with the special case of 1 '+' character return sasl_process_packet(p, smsg->parv[0], len); } /* Optimisation: If there is no buffer yet and this data is less than 400 characters, we don't * need to buffer it at all, and can process it immediately. */ if (! p->buf && len < SASL_S2S_MAXLEN_ATONCE_B64) return sasl_process_packet(p, smsg->parv[0], len); /* We need to buffer the data now, but first check if the client hasn't sent us an excessive * amount already. */ if ((p->len + len) > SASL_S2S_MAXLEN_TOTAL_B64) { (void) slog(LG_DEBUG, "%s: client %s has exceeded allowed data length", MOWGLI_FUNC_NAME, p->uid); return false; } // (Re)allocate a buffer, append the received data to it, and update its recorded length. p->buf = srealloc(p->buf, p->len + len + 1); (void) memcpy(p->buf + p->len, smsg->parv[0], len); p->len += len; // Messages not exactly 400 characters are the end of data. if (len < SASL_S2S_MAXLEN_ATONCE_B64) return sasl_process_buffer(p); return true; } static void sasl_input(struct sasl_message *const restrict smsg) { struct sasl_session *const p = sasl_session_find_or_make(smsg); bool ret = true; switch (smsg->mode) { case 'H': // (H)ost information (void) sasl_input_hostinfo(smsg, p); break; case 'S': // (S)tart authentication ret = sasl_input_startauth(smsg, p); break; case 'C': // (C)lient data ret = sasl_input_clientdata(smsg, p); break; case 'D': // (D)one -- when we receive it, means client abort (void) sasl_session_destroy(p); break; } if (! ret) (void) sasl_session_abort(p); } static void sasl_user_add(struct hook_user_nick *const restrict data) { // If the user has been killed, don't do anything. struct user *const u = data->u; if (! u) return; // Not concerned unless it's an SASL login. struct sasl_session *const p = sasl_session_find(u->uid); if (! p) return; (void) sasl_handle_login(p, u, NULL); (void) sasl_session_destroy(p); } static void sasl_delete_stale(void ATHEME_VATTR_UNUSED *const restrict vptr) { mowgli_node_t *n, *tn; MOWGLI_ITER_FOREACH_SAFE(n, tn, sasl_sessions.head) { struct sasl_session *const p = n->data; if (p->flags & ASASL_SFLAG_MARKED_FOR_DELETION) (void) sasl_session_destroy(p); else p->flags |= ASASL_SFLAG_MARKED_FOR_DELETION; } } static void sasl_mech_register(const struct sasl_mechanism *const restrict mech) { if (sasl_mechanism_find(mech->name)) { (void) slog(LG_DEBUG, "%s: ignoring attempt to register %s again", MOWGLI_FUNC_NAME, mech->name); return; } (void) slog(LG_DEBUG, "%s: registering %s", MOWGLI_FUNC_NAME, mech->name); mowgli_node_t *const node = mowgli_node_create(); if (! node) { (void) slog(LG_ERROR, "%s: mowgli_node_create() failed; out of memory?", MOWGLI_FUNC_NAME); return; } /* Here we cast it to (void *) because mowgli_node_add() expects that; it cannot be made const because then * it would have to return a (const void *) too which would cause multiple warnings any time it is actually * storing, and thus gets assigned to, a pointer to a mutable object. * * To avoid the cast generating a diagnostic due to dropping a const qualifier, we first cast to uintptr_t. * This is not unprecedented in this codebase; libathemecore/crypto.c & libathemecore/strshare.c do the * same thing. */ (void) mowgli_node_add((void *)((uintptr_t) mech), node, &sasl_mechanisms); (void) sasl_mechlist_do_rebuild(); } static void sasl_mech_unregister(const struct sasl_mechanism *const restrict mech) { mowgli_node_t *n, *tn; MOWGLI_ITER_FOREACH_SAFE(n, tn, sasl_sessions.head) { struct sasl_session *const session = n->data; if (session->mechptr == mech) { (void) slog(LG_DEBUG, "%s: destroying session %s", MOWGLI_FUNC_NAME, session->uid); (void) sasl_session_destroy(session); } } MOWGLI_ITER_FOREACH_SAFE(n, tn, sasl_mechanisms.head) { if (n->data == mech) { (void) slog(LG_DEBUG, "%s: unregistering %s", MOWGLI_FUNC_NAME, mech->name); (void) mowgli_node_delete(n, &sasl_mechanisms); (void) mowgli_node_free(n); (void) sasl_mechlist_do_rebuild(); break; } } } static inline bool ATHEME_FATTR_WUR sasl_authxid_can_login(struct sasl_session *const restrict p, const char *const restrict authxid, struct myuser **const restrict muo, char *const restrict val_name, char *const restrict val_eid, const char *const restrict other_val_eid) { return_val_if_fail(p != NULL, false); return_val_if_fail(p->si != NULL, false); return_val_if_fail(p->mechptr != NULL, false); struct myuser *const mu = myuser_find_by_nick(authxid); if (! mu) { (void) slog(LG_DEBUG, "%s: myuser_find_by_nick: does not exist", MOWGLI_FUNC_NAME); return false; } if (metadata_find(mu, "private:freeze:freezer")) { (void) logcommand(p->si, CMDLOG_LOGIN, "failed LOGIN to \2%s\2 (frozen)", entity(mu)->name); return false; } if (muo) *muo = mu; (void) mowgli_strlcpy(val_name, entity(mu)->name, NICKLEN + 1); (void) mowgli_strlcpy(val_eid, entity(mu)->id, IDLEN + 1); if (p->mechptr->password_based && (mu->flags & MU_NOPASSWORD)) { (void) logcommand(p->si, CMDLOG_LOGIN, "failed LOGIN %s to \2%s\2 (password authentication disabled)", p->mechptr->name, entity(mu)->name); return false; } if (strcmp(val_eid, other_val_eid) == 0) // We have already executed the user_can_login hook for this user return true; struct hook_user_login_check req = { .si = p->si, .mu = mu, .allowed = true, }; (void) hook_call_user_can_login(&req); if (! req.allowed) (void) logcommand(p->si, CMDLOG_LOGIN, "failed LOGIN to \2%s\2 (denied by hook)", entity(mu)->name); return req.allowed; } static bool ATHEME_FATTR_WUR sasl_authcid_can_login(struct sasl_session *const restrict p, const char *const restrict authcid, struct myuser **const restrict muo) { return sasl_authxid_can_login(p, authcid, muo, p->authcid, p->authceid, p->authzeid); } static bool ATHEME_FATTR_WUR sasl_authzid_can_login(struct sasl_session *const restrict p, const char *const restrict authzid, struct myuser **const restrict muo) { return sasl_authxid_can_login(p, authzid, muo, p->authzid, p->authzeid, p->authceid); } extern const struct sasl_core_functions sasl_core_functions; const struct sasl_core_functions sasl_core_functions = { .mech_register = &sasl_mech_register, .mech_unregister = &sasl_mech_unregister, .authcid_can_login = &sasl_authcid_can_login, .authzid_can_login = &sasl_authzid_can_login, .recalc_mechlist = &sasl_mechlist_string_build, }; static void saslserv_message_handler(struct sourceinfo *const restrict si, const int parc, char **const restrict parv) { // this should never happen if (parv[0][0] == '&') { (void) slog(LG_ERROR, "%s: got parv with local channel: %s", MOWGLI_FUNC_NAME, parv[0]); return; } // make a copy of the original for debugging char orig[BUFSIZE]; (void) mowgli_strlcpy(orig, parv[parc - 1], sizeof orig); // lets go through this to get the command char *const cmd = strtok(parv[parc - 1], " "); char *const text = strtok(NULL, ""); if (! cmd) return; if (*orig == '\001') { (void) handle_ctcp_common(si, cmd, text); return; } (void) command_fail(si, fault_noprivs, _("This service exists to identify connecting clients " "to the network. It has no public interface.")); } static void mod_init(struct module *const restrict m) { if (! (saslsvs = service_add("saslserv", &saslserv_message_handler))) { (void) slog(LG_ERROR, "%s: service_add() failed", m->name); m->mflags |= MODFLAG_FAIL; return; } (void) hook_add_sasl_input(&sasl_input); (void) hook_add_user_add(&sasl_user_add); (void) hook_add_server_eob(&sasl_server_eob); sasl_delete_stale_timer = mowgli_timer_add(base_eventloop, "sasl_delete_stale", &sasl_delete_stale, NULL, SECONDS_PER_MINUTE / 2); authservice_loaded++; (void) add_bool_conf_item("HIDE_SERVER_NAMES", &saslsvs->conf_table, 0, &sasl_hide_server_names, false); } static void mod_deinit(const enum module_unload_intent ATHEME_VATTR_UNUSED intent) { (void) hook_del_sasl_input(&sasl_input); (void) hook_del_user_add(&sasl_user_add); (void) hook_del_server_eob(&sasl_server_eob); (void) mowgli_timer_destroy(base_eventloop, sasl_delete_stale_timer); (void) del_conf_item("HIDE_SERVER_NAMES", &saslsvs->conf_table); (void) service_delete(saslsvs); authservice_loaded--; if (sasl_sessions.head) (void) slog(LG_ERROR, "saslserv/main: shutting down with a non-empty session list; " "a mechanism did not unregister itself! (BUG)"); } SIMPLE_DECLARE_MODULE_V1("saslserv/main", MODULE_UNLOAD_CAPABILITY_OK)
sasl_handle_login(struct sasl_session *const restrict p, struct user *const u, struct myuser *mu) { bool was_killed = false; // Find the account if necessary if (! mu) { if (! *p->authzeid) { (void) slog(LG_INFO, "%s: session for '%s' without an authzeid (BUG)", MOWGLI_FUNC_NAME, u->nick); (void) notice(saslsvs->nick, u->nick, LOGIN_CANCELLED_STR); return false; } if (! (mu = myuser_find_uid(p->authzeid))) { if (*p->authzid) (void) notice(saslsvs->nick, u->nick, "Account %s dropped; login cancelled", p->authzid); else (void) notice(saslsvs->nick, u->nick, "Account dropped; login cancelled"); return false; } } // If the user is already logged in, and not to the same account, log them out first if (u->myuser && u->myuser != mu) { if (is_soper(u->myuser)) (void) logcommand_user(saslsvs, u, CMDLOG_ADMIN, "DESOPER: \2%s\2 as \2%s\2", u->nick, entity(u->myuser)->name); (void) logcommand_user(saslsvs, u, CMDLOG_LOGIN, "LOGOUT"); if (! (was_killed = ircd_on_logout(u, entity(u->myuser)->name))) { mowgli_node_t *n; MOWGLI_ITER_FOREACH(n, u->myuser->logins.head) { if (n->data == u) { (void) mowgli_node_delete(n, &u->myuser->logins); (void) mowgli_node_free(n); break; } } u->myuser = NULL; } } // If they were not killed above, log them in now if (! was_killed) { if (u->myuser != mu) { // If they're not logged in, or logging in to a different account, do a full login (void) myuser_login(saslsvs, u, mu, false); (void) logcommand_user(saslsvs, u, CMDLOG_LOGIN, "LOGIN (%s)", p->mechptr->name); } else { // Otherwise, just update login time ... mu->lastlogin = CURRTIME; (void) logcommand_user(saslsvs, u, CMDLOG_LOGIN, "REAUTHENTICATE (%s)", p->mechptr->name); } } return true; }
sasl_handle_login(struct sasl_session *const restrict p, struct user *const u, struct myuser *mu) { bool was_killed = false; // Find the account if necessary if (! mu) { if (! *p->pendingeid) { (void) slog(LG_INFO, "%s: session for '%s' without an pendingeid (BUG)", MOWGLI_FUNC_NAME, u->nick); (void) notice(saslsvs->nick, u->nick, LOGIN_CANCELLED_STR); return false; } if (! (mu = myuser_find_uid(p->pendingeid))) { if (*p->authzid) (void) notice(saslsvs->nick, u->nick, "Account %s dropped; login cancelled", p->authzid); else (void) notice(saslsvs->nick, u->nick, "Account dropped; login cancelled"); return false; } } // If the user is already logged in, and not to the same account, log them out first if (u->myuser && u->myuser != mu) { if (is_soper(u->myuser)) (void) logcommand_user(saslsvs, u, CMDLOG_ADMIN, "DESOPER: \2%s\2 as \2%s\2", u->nick, entity(u->myuser)->name); (void) logcommand_user(saslsvs, u, CMDLOG_LOGIN, "LOGOUT"); if (! (was_killed = ircd_on_logout(u, entity(u->myuser)->name))) { mowgli_node_t *n; MOWGLI_ITER_FOREACH(n, u->myuser->logins.head) { if (n->data == u) { (void) mowgli_node_delete(n, &u->myuser->logins); (void) mowgli_node_free(n); break; } } u->myuser = NULL; } } // If they were not killed above, log them in now if (! was_killed) { if (u->myuser != mu) { // If they're not logged in, or logging in to a different account, do a full login (void) myuser_login(saslsvs, u, mu, false); (void) logcommand_user(saslsvs, u, CMDLOG_LOGIN, "LOGIN (%s)", p->mechptr->name); } else { // Otherwise, just update login time ... mu->lastlogin = CURRTIME; (void) logcommand_user(saslsvs, u, CMDLOG_LOGIN, "REAUTHENTICATE (%s)", p->mechptr->name); } } return true; }
{'added': [(388, '\t\tif (! *p->pendingeid)'), (390, '\t\t\t(void) slog(LG_INFO, "%s: session for \'%s\' without an pendingeid (BUG)",'), (396, '\t\tif (! (mu = myuser_find_uid(p->pendingeid)))'), (641, '\t\t\t(void) mowgli_strlcpy(p->pendingeid, p->authzeid, sizeof p->pendingeid);'), (642, '')], 'deleted': [(388, '\t\tif (! *p->authzeid)'), (390, '\t\t\t(void) slog(LG_INFO, "%s: session for \'%s\' without an authzeid (BUG)",'), (396, '\t\tif (! (mu = myuser_find_uid(p->authzeid)))')]}
5
3
762
5,135
https://github.com/atheme/atheme
CVE-2022-24976
['CWE-287']
ecma-container-object.c
ecma_op_internal_buffer_append
/* Copyright JS Foundation and other contributors, http://js.foundation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "jcontext.h" #include "ecma-alloc.h" #include "ecma-array-object.h" #include "ecma-builtins.h" #include "ecma-builtin-helpers.h" #include "ecma-exceptions.h" #include "ecma-function-object.h" #include "ecma-gc.h" #include "ecma-helpers.h" #include "ecma-iterator-object.h" #include "ecma-container-object.h" #include "ecma-property-hashmap.h" #include "ecma-objects.h" #if ENABLED (JERRY_ES2015_BUILTIN_CONTAINER) /** \addtogroup ecma ECMA * @{ * * \addtogroup \addtogroup ecmamaphelpers ECMA builtin Map/Set helper functions * @{ */ /** * Create a new internal buffer. * * Note: * The first element of the collection tracks the size of the buffer. * ECMA_VALUE_EMPTY values are not calculated into the size. * * @return pointer to the internal buffer */ static inline ecma_collection_t * ecma_op_create_internal_buffer (void) { ecma_collection_t *collection_p = ecma_new_collection (); ecma_collection_push_back (collection_p, (ecma_value_t) 0); return collection_p; } /* ecma_op_create_internal_buffer */ /** * Append values to the internal buffer. */ static void ecma_op_internal_buffer_append (ecma_collection_t *container_p, /**< internal container pointer */ ecma_value_t key_arg, /**< key argument */ ecma_value_t value_arg, /**< value argument */ lit_magic_string_id_t lit_id) /**< class id */ { JERRY_ASSERT (container_p != NULL); ecma_collection_push_back (container_p, ecma_copy_value_if_not_object (key_arg)); if (lit_id == LIT_MAGIC_STRING_WEAKMAP_UL || lit_id == LIT_MAGIC_STRING_MAP_UL) { ecma_collection_push_back (container_p, ecma_copy_value_if_not_object (value_arg)); } ECMA_CONTAINER_SET_SIZE (container_p, ECMA_CONTAINER_GET_SIZE (container_p) + 1); } /* ecma_op_internal_buffer_append */ /** * Update the value of a given entry. */ static inline void ecma_op_internal_buffer_update (ecma_value_t *entry_p, /**< entry pointer */ ecma_value_t value_arg, /**< value argument */ lit_magic_string_id_t lit_id) /**< class id */ { JERRY_ASSERT (entry_p != NULL); if (lit_id == LIT_MAGIC_STRING_WEAKMAP_UL || lit_id == LIT_MAGIC_STRING_MAP_UL) { ecma_free_value_if_not_object (((ecma_container_pair_t *) entry_p)->value); ((ecma_container_pair_t *) entry_p)->value = ecma_copy_value_if_not_object (value_arg); } } /* ecma_op_internal_buffer_update */ /** * Delete element from the internal buffer. */ static void ecma_op_internal_buffer_delete (ecma_collection_t *container_p, /**< internal container pointer */ ecma_container_pair_t *entry_p, /**< entry pointer */ lit_magic_string_id_t lit_id) /**< class id */ { JERRY_ASSERT (container_p != NULL); JERRY_ASSERT (entry_p != NULL); ecma_free_value_if_not_object (entry_p->key); entry_p->key = ECMA_VALUE_EMPTY; if (lit_id == LIT_MAGIC_STRING_WEAKMAP_UL || lit_id == LIT_MAGIC_STRING_MAP_UL) { ecma_free_value_if_not_object (entry_p->value); entry_p->value = ECMA_VALUE_EMPTY; } ECMA_CONTAINER_SET_SIZE (container_p, ECMA_CONTAINER_GET_SIZE (container_p) - 1); } /* ecma_op_internal_buffer_delete */ /** * Find an entry in the collection. * * @return pointer to the appropriate entry. */ static ecma_value_t * ecma_op_internal_buffer_find (ecma_collection_t *container_p, /**< internal container pointer */ ecma_value_t key_arg, /**< key argument */ lit_magic_string_id_t lit_id) /**< class id */ { JERRY_ASSERT (container_p != NULL); uint8_t entry_size = ecma_op_container_entry_size (lit_id); uint32_t entry_count = ECMA_CONTAINER_ENTRY_COUNT (container_p); ecma_value_t *start_p = ECMA_CONTAINER_START (container_p); for (uint32_t i = 0; i < entry_count; i += entry_size) { ecma_value_t *entry_p = start_p + i; if (ecma_op_same_value_zero (*entry_p, key_arg)) { return entry_p; } } return NULL; } /* ecma_op_internal_buffer_find */ /** * Get the value that belongs to the key. * * Note: in case of Set containers, the values are the same as the keys. * * @return ecma value */ static ecma_value_t ecma_op_container_get_value (ecma_value_t *entry_p, /**< entry (key) pointer */ lit_magic_string_id_t lit_id) /**< class id */ { JERRY_ASSERT (entry_p != NULL); if (lit_id == LIT_MAGIC_STRING_WEAKMAP_UL || lit_id == LIT_MAGIC_STRING_MAP_UL) { return ((ecma_container_pair_t *) entry_p)->value; } return *entry_p; } /* ecma_op_container_get_value */ /** * Get the size (in ecma_value_t) of the stored entries. * * @return size of the entries. */ uint8_t ecma_op_container_entry_size (lit_magic_string_id_t lit_id) /**< class id */ { if (lit_id == LIT_MAGIC_STRING_WEAKMAP_UL || lit_id == LIT_MAGIC_STRING_MAP_UL) { return ECMA_CONTAINER_PAIR_SIZE; } return ECMA_CONTAINER_VALUE_SIZE; } /* ecma_op_container_entry_size */ #if ENABLED (JERRY_ES2015_BUILTIN_WEAKSET) /** * Release the entries in the WeakSet container. */ static void ecma_op_container_free_weakset_entries (ecma_object_t *object_p, /**< object pointer */ ecma_collection_t *container_p) /** internal buffer pointer */ { JERRY_ASSERT (object_p != NULL); JERRY_ASSERT (container_p != NULL); uint32_t entry_count = ECMA_CONTAINER_ENTRY_COUNT (container_p); ecma_value_t *start_p = ECMA_CONTAINER_START (container_p); for (uint32_t i = 0; i < entry_count; i += ECMA_CONTAINER_VALUE_SIZE) { ecma_value_t *entry_p = start_p + i; if (ecma_is_value_empty (*entry_p)) { continue; } ecma_op_container_unref_weak (ecma_get_object_from_value (*entry_p), ecma_make_object_value (object_p)); ecma_op_container_remove_weak_entry (object_p, *entry_p); *entry_p = ECMA_VALUE_EMPTY; } } /* ecma_op_container_free_weakset_entries */ #endif /* ENABLED (JERRY_ES2015_BUILTIN_WEAKSET) */ #if ENABLED (JERRY_ES2015_BUILTIN_WEAKMAP) /** * Release the entries in the WeakMap container. */ static void ecma_op_container_free_weakmap_entries (ecma_object_t *object_p, /**< object pointer */ ecma_collection_t *container_p) /**< internal buffer pointer */ { JERRY_ASSERT (object_p != NULL); JERRY_ASSERT (container_p != NULL); uint32_t entry_count = ECMA_CONTAINER_ENTRY_COUNT (container_p); ecma_value_t *start_p = ECMA_CONTAINER_START (container_p); for (uint32_t i = 0; i < entry_count; i += ECMA_CONTAINER_PAIR_SIZE) { ecma_container_pair_t *entry_p = (ecma_container_pair_t *) (start_p + i); if (ecma_is_value_empty (entry_p->key)) { continue; } ecma_op_container_unref_weak (ecma_get_object_from_value (entry_p->key), ecma_make_object_value (object_p)); ecma_op_container_remove_weak_entry (object_p, entry_p->key); ecma_free_value_if_not_object (entry_p->value); entry_p->key = ECMA_VALUE_EMPTY; entry_p->value = ECMA_VALUE_EMPTY; } } /* ecma_op_container_free_weakmap_entries */ #endif /* ENABLED (JERRY_ES2015_BUILTIN_WEAKMAP) */ #if ENABLED (JERRY_ES2015_BUILTIN_SET) /** * Release the entries in the Set container. */ static void ecma_op_container_free_set_entries (ecma_collection_t *container_p) { JERRY_ASSERT (container_p != NULL); uint32_t entry_count = ECMA_CONTAINER_ENTRY_COUNT (container_p); ecma_value_t *start_p = ECMA_CONTAINER_START (container_p); for (uint32_t i = 0; i < entry_count; i += ECMA_CONTAINER_VALUE_SIZE) { ecma_value_t *entry_p = start_p + i; if (ecma_is_value_empty (*entry_p)) { continue; } ecma_free_value_if_not_object (*entry_p); *entry_p = ECMA_VALUE_EMPTY; } } /* ecma_op_container_free_set_entries */ #endif /* ENABLED (JERRY_ES2015_BUILTIN_SET) */ #if ENABLED (JERRY_ES2015_BUILTIN_MAP) /** * Release the entries in the Map container. */ static void ecma_op_container_free_map_entries (ecma_collection_t *container_p) { JERRY_ASSERT (container_p != NULL); uint32_t entry_count = ECMA_CONTAINER_ENTRY_COUNT (container_p); ecma_value_t *start_p = ECMA_CONTAINER_START (container_p); for (uint32_t i = 0; i < entry_count; i += ECMA_CONTAINER_PAIR_SIZE) { ecma_container_pair_t *entry_p = (ecma_container_pair_t *) (start_p + i); if (ecma_is_value_empty (entry_p->key)) { continue; } ecma_free_value_if_not_object (entry_p->key); ecma_free_value_if_not_object (entry_p->value); entry_p->key = ECMA_VALUE_EMPTY; entry_p->value = ECMA_VALUE_EMPTY; } } /* ecma_op_container_free_map_entries */ #endif /* ENABLED (JERRY_ES2015_BUILTIN_MAP) */ /** * Release the internal buffer and the stored entries. */ void ecma_op_container_free_entries (ecma_object_t *object_p) /**< collection object pointer */ { JERRY_ASSERT (object_p != NULL); ecma_extended_object_t *map_object_p = (ecma_extended_object_t *) object_p; ecma_collection_t *container_p = ECMA_GET_INTERNAL_VALUE_POINTER (ecma_collection_t, map_object_p->u.class_prop.u.value); switch (map_object_p->u.class_prop.class_id) { #if ENABLED (JERRY_ES2015_BUILTIN_WEAKSET) case LIT_MAGIC_STRING_WEAKSET_UL: { ecma_op_container_free_weakset_entries (object_p, container_p); break; } #endif /* ENABLED (JERRY_ES2015_BUILTIN_WEAKSET) */ #if ENABLED (JERRY_ES2015_BUILTIN_WEAKMAP) case LIT_MAGIC_STRING_WEAKMAP_UL: { ecma_op_container_free_weakmap_entries (object_p, container_p); break; } #endif /* ENABLED (JERRY_ES2015_BUILTIN_WEAKMAP) */ #if ENABLED (JERRY_ES2015_BUILTIN_SET) case LIT_MAGIC_STRING_SET_UL: { ecma_op_container_free_set_entries (container_p); break; } #endif /* ENABLED (JERRY_ES2015_BUILTIN_SET) */ #if ENABLED (JERRY_ES2015_BUILTIN_MAP) case LIT_MAGIC_STRING_MAP_UL: { ecma_op_container_free_map_entries (container_p); break; } #endif /* ENABLED (JERRY_ES2015_BUILTIN_MAP) */ default: { break; } } ECMA_CONTAINER_SET_SIZE (container_p, 0); } /* ecma_op_container_free_entries */ /** * Handle calling [[Construct]] of built-in Map/Set like objects * * @return ecma value */ ecma_value_t ecma_op_container_create (const ecma_value_t *arguments_list_p, /**< arguments list */ ecma_length_t arguments_list_len, /**< number of arguments */ lit_magic_string_id_t lit_id, /**< internal class id */ ecma_builtin_id_t proto_id) /**< prototype builtin id */ { JERRY_ASSERT (arguments_list_len == 0 || arguments_list_p != NULL); JERRY_ASSERT (lit_id == LIT_MAGIC_STRING_MAP_UL || lit_id == LIT_MAGIC_STRING_SET_UL || lit_id == LIT_MAGIC_STRING_WEAKMAP_UL || lit_id == LIT_MAGIC_STRING_WEAKSET_UL); JERRY_ASSERT (JERRY_CONTEXT (current_new_target) != NULL); ecma_object_t *proto_p = ecma_op_get_prototype_from_constructor (JERRY_CONTEXT (current_new_target), proto_id); if (JERRY_UNLIKELY (proto_p == NULL)) { return ECMA_VALUE_ERROR; } ecma_collection_t *container_p = ecma_op_create_internal_buffer (); ecma_object_t *object_p = ecma_create_object (proto_p, sizeof (ecma_extended_object_t), ECMA_OBJECT_TYPE_CLASS); ecma_deref_object (proto_p); ecma_extended_object_t *map_obj_p = (ecma_extended_object_t *) object_p; map_obj_p->u.class_prop.extra_info = ECMA_CONTAINER_FLAGS_EMPTY; map_obj_p->u.class_prop.class_id = (uint16_t) lit_id; if (lit_id == LIT_MAGIC_STRING_WEAKMAP_UL || lit_id == LIT_MAGIC_STRING_WEAKSET_UL) { map_obj_p->u.class_prop.extra_info |= ECMA_CONTAINER_FLAGS_WEAK; } ECMA_SET_INTERNAL_VALUE_POINTER (map_obj_p->u.class_prop.u.value, container_p); ecma_value_t set_value = ecma_make_object_value (object_p); ecma_value_t result = set_value; #if ENABLED (JERRY_ES2015) if (arguments_list_len == 0) { return result; } ecma_value_t iterable = arguments_list_p[0]; if (ecma_is_value_undefined (iterable) || ecma_is_value_null (iterable)) { return result; } lit_magic_string_id_t adder_string_id; if (lit_id == LIT_MAGIC_STRING_MAP_UL || lit_id == LIT_MAGIC_STRING_WEAKMAP_UL) { adder_string_id = LIT_MAGIC_STRING_SET; } else { adder_string_id = LIT_MAGIC_STRING_ADD; } result = ecma_op_object_get_by_magic_id (object_p, adder_string_id); if (ECMA_IS_VALUE_ERROR (result)) { goto cleanup_object; } if (!ecma_op_is_callable (result)) { ecma_free_value (result); result = ecma_raise_type_error (ECMA_ERR_MSG ("add/set function is not callable.")); goto cleanup_object; } ecma_object_t *adder_func_p = ecma_get_object_from_value (result); result = ecma_op_get_iterator (iterable, ECMA_VALUE_EMPTY); if (ECMA_IS_VALUE_ERROR (result)) { goto cleanup_adder; } const ecma_value_t iter = result; while (true) { result = ecma_op_iterator_step (iter); if (ECMA_IS_VALUE_ERROR (result)) { goto cleanup_iter; } if (ecma_is_value_false (result)) { break; } const ecma_value_t next = result; result = ecma_op_iterator_value (next); ecma_free_value (next); if (ECMA_IS_VALUE_ERROR (result)) { goto cleanup_iter; } if (lit_id == LIT_MAGIC_STRING_SET_UL || lit_id == LIT_MAGIC_STRING_WEAKSET_UL) { const ecma_value_t value = result; ecma_value_t arguments[] = { value }; result = ecma_op_function_call (adder_func_p, set_value, arguments, 1); ecma_free_value (value); } else { if (!ecma_is_value_object (result)) { ecma_free_value (result); ecma_raise_type_error (ECMA_ERR_MSG ("Iterator value is not an object.")); result = ecma_op_iterator_close (iter); JERRY_ASSERT (ECMA_IS_VALUE_ERROR (result)); goto cleanup_iter; } ecma_object_t *next_object_p = ecma_get_object_from_value (result); result = ecma_op_object_get_by_uint32_index (next_object_p, 0); if (ECMA_IS_VALUE_ERROR (result)) { ecma_deref_object (next_object_p); ecma_op_iterator_close (iter); goto cleanup_iter; } const ecma_value_t key = result; result = ecma_op_object_get_by_uint32_index (next_object_p, 1); if (ECMA_IS_VALUE_ERROR (result)) { ecma_deref_object (next_object_p); ecma_free_value (key); ecma_op_iterator_close (iter); goto cleanup_iter; } const ecma_value_t value = result; ecma_value_t arguments[] = { key, value }; result = ecma_op_function_call (adder_func_p, set_value, arguments, 2); ecma_free_value (key); ecma_free_value (value); ecma_deref_object (next_object_p); } if (ECMA_IS_VALUE_ERROR (result)) { ecma_op_iterator_close (iter); goto cleanup_iter; } ecma_free_value (result); } ecma_free_value (iter); ecma_deref_object (adder_func_p); return ecma_make_object_value (object_p); cleanup_iter: ecma_free_value (iter); cleanup_adder: ecma_deref_object (adder_func_p); cleanup_object: ecma_deref_object (object_p); #endif /* ENABLED (JERRY_ES2015) */ return result; } /* ecma_op_container_create */ /** * Get Map/Set object pointer * * Note: * If the function returns with NULL, the error object has * already set, and the caller must return with ECMA_VALUE_ERROR * * @return pointer to the Map/Set if this_arg is a valid Map/Set object * NULL otherwise */ static ecma_extended_object_t * ecma_op_container_get_object (ecma_value_t this_arg, /**< this argument */ lit_magic_string_id_t lit_id) /**< internal class id */ { if (ecma_is_value_object (this_arg)) { ecma_extended_object_t *map_object_p = (ecma_extended_object_t *) ecma_get_object_from_value (this_arg); if (ecma_get_object_type ((ecma_object_t *) map_object_p) == ECMA_OBJECT_TYPE_CLASS && map_object_p->u.class_prop.class_id == lit_id) { return map_object_p; } } #if ENABLED (JERRY_ERROR_MESSAGES) ecma_raise_standard_error_with_format (ECMA_ERROR_TYPE, "Expected a % object.", ecma_make_string_value (ecma_get_magic_string (lit_id))); #else /* !ENABLED (JERRY_ERROR_MESSAGES) */ ecma_raise_type_error (NULL); #endif /* ENABLED (JERRY_ERROR_MESSAGES) */ return NULL; } /* ecma_op_container_get_object */ /** * Returns with the size of the Map/Set object. * * @return size of the Map/Set object as ecma-value. */ ecma_value_t ecma_op_container_size (ecma_value_t this_arg, /**< this argument */ lit_magic_string_id_t lit_id) /**< internal class id */ { ecma_extended_object_t *map_object_p = ecma_op_container_get_object (this_arg, lit_id); if (map_object_p == NULL) { return ECMA_VALUE_ERROR; } ecma_collection_t *container_p = ECMA_GET_INTERNAL_VALUE_POINTER (ecma_collection_t, map_object_p->u.class_prop.u.value); return ecma_make_uint32_value (ECMA_CONTAINER_GET_SIZE (container_p)); } /* ecma_op_container_size */ /** * The generic Map/WeakMap prototype object's 'get' routine * * @return ecma value * Returned value must be freed with ecma_free_value. */ ecma_value_t ecma_op_container_get (ecma_value_t this_arg, /**< this argument */ ecma_value_t key_arg, /**< key argument */ lit_magic_string_id_t lit_id) /**< internal class id */ { ecma_extended_object_t *map_object_p = ecma_op_container_get_object (this_arg, lit_id); if (map_object_p == NULL) { return ECMA_VALUE_ERROR; } #if ENABLED (JERRY_ES2015_BUILTIN_WEAKMAP) if (lit_id == LIT_MAGIC_STRING_WEAKMAP_UL && !ecma_is_value_object (key_arg)) { return ECMA_VALUE_UNDEFINED; } #endif /* ENABLED (JERRY_ES2015_BUILTIN_WEAKMAP) */ ecma_collection_t *container_p = ECMA_GET_INTERNAL_VALUE_POINTER (ecma_collection_t, map_object_p->u.class_prop.u.value); if (ECMA_CONTAINER_GET_SIZE (container_p) == 0) { return ECMA_VALUE_UNDEFINED; } ecma_value_t *entry_p = ecma_op_internal_buffer_find (container_p, key_arg, lit_id); if (entry_p == NULL) { return ECMA_VALUE_UNDEFINED; } return ecma_copy_value (((ecma_container_pair_t *) entry_p)->value); } /* ecma_op_container_get */ /** * The generic Map/Set prototype object's 'has' routine * * @return ecma value * Returned value must be freed with ecma_free_value. */ ecma_value_t ecma_op_container_has (ecma_value_t this_arg, /**< this argument */ ecma_value_t key_arg, /**< key argument */ lit_magic_string_id_t lit_id) /**< internal class id */ { ecma_extended_object_t *map_object_p = ecma_op_container_get_object (this_arg, lit_id); if (map_object_p == NULL) { return ECMA_VALUE_ERROR; } ecma_collection_t *container_p = ECMA_GET_INTERNAL_VALUE_POINTER (ecma_collection_t, map_object_p->u.class_prop.u.value); #if ENABLED (JERRY_ES2015_BUILTIN_WEAKMAP) || ENABLED (JERRY_ES2015_BUILTIN_WEAKSET) if ((map_object_p->u.class_prop.extra_info & ECMA_CONTAINER_FLAGS_WEAK) != 0 && !ecma_is_value_object (key_arg)) { return ECMA_VALUE_FALSE; } #endif /* ENABLED (JERRY_ES2015_BUILTIN_WEAKMAP) || ENABLED (JERRY_ES2015_BUILTIN_WEAKSET) */ if (ECMA_CONTAINER_GET_SIZE (container_p) == 0) { return ECMA_VALUE_FALSE; } ecma_value_t *entry_p = ecma_op_internal_buffer_find (container_p, key_arg, lit_id); return ecma_make_boolean_value (entry_p != NULL); } /* ecma_op_container_has */ /** * Set a weak reference from a container to a key object */ static void ecma_op_container_set_weak (ecma_object_t *const key_p, /**< key object */ ecma_extended_object_t *const container_p) /**< container */ { if (JERRY_UNLIKELY (ecma_op_object_is_fast_array (key_p))) { ecma_fast_array_convert_to_normal (key_p); } ecma_string_t *weak_refs_string_p = ecma_get_magic_string (LIT_INTERNAL_MAGIC_STRING_WEAK_REFS); ecma_property_t *property_p = ecma_find_named_property (key_p, weak_refs_string_p); ecma_collection_t *refs_p; if (property_p == NULL) { ecma_property_value_t *value_p = ecma_create_named_data_property (key_p, weak_refs_string_p, ECMA_PROPERTY_CONFIGURABLE_WRITABLE, &property_p); ECMA_CONVERT_DATA_PROPERTY_TO_INTERNAL_PROPERTY (property_p); refs_p = ecma_new_collection (); ECMA_SET_INTERNAL_VALUE_POINTER (value_p->value, refs_p); } else { refs_p = ECMA_GET_INTERNAL_VALUE_POINTER (ecma_collection_t, (ECMA_PROPERTY_VALUE_PTR (property_p)->value)); } const ecma_value_t container_value = ecma_make_object_value ((ecma_object_t *) container_p); for (uint32_t i = 0; i < refs_p->item_count; i++) { if (ecma_is_value_empty (refs_p->buffer_p[i])) { refs_p->buffer_p[i] = container_value; return; } } ecma_collection_push_back (refs_p, container_value); } /* ecma_op_container_set_weak */ /** * Helper method for the Map.prototype.set and Set.prototype.add methods to swap the sign of the given value if needed * * See also: * ECMA-262 v6, 23.2.3.1 step 6 * ECMA-262 v6, 23.1.3.9 step 6 * * @return ecma value */ static ecma_value_t ecma_op_container_set_noramlize_zero (ecma_value_t this_arg) /*< this arg */ { if (ecma_is_value_number (this_arg)) { ecma_number_t number_value = ecma_get_number_from_value (this_arg); if (JERRY_UNLIKELY (ecma_number_is_zero (number_value) && ecma_number_is_negative (number_value))) { return ecma_make_integer_value (0); } } return this_arg; } /* ecma_op_container_set_noramlize_zero */ /** * The generic Map prototype object's 'set' and Set prototype object's 'add' routine * * @return ecma value * Returned value must be freed with ecma_free_value. */ ecma_value_t ecma_op_container_set (ecma_value_t this_arg, /**< this argument */ ecma_value_t key_arg, /**< key argument */ ecma_value_t value_arg, /**< value argument */ lit_magic_string_id_t lit_id) /**< internal class id */ { ecma_extended_object_t *map_object_p = ecma_op_container_get_object (this_arg, lit_id); if (map_object_p == NULL) { return ECMA_VALUE_ERROR; } ecma_collection_t *container_p = ECMA_GET_INTERNAL_VALUE_POINTER (ecma_collection_t, map_object_p->u.class_prop.u.value); #if ENABLED (JERRY_ES2015_BUILTIN_WEAKMAP) || ENABLED (JERRY_ES2015_BUILTIN_WEAKSET) if ((map_object_p->u.class_prop.extra_info & ECMA_CONTAINER_FLAGS_WEAK) != 0 && !ecma_is_value_object (key_arg)) { return ecma_raise_type_error (ECMA_ERR_MSG ("Key must be an object")); } #endif /* ENABLED (JERRY_ES2015_BUILTIN_WEAKMAP) || ENABLED (JERRY_ES2015_BUILTIN_WEAKSET) */ ecma_value_t *entry_p = ecma_op_internal_buffer_find (container_p, key_arg, lit_id); if (entry_p == NULL) { ecma_op_internal_buffer_append (container_p, ecma_op_container_set_noramlize_zero (key_arg), value_arg, lit_id); #if ENABLED (JERRY_ES2015_BUILTIN_WEAKMAP) || ENABLED (JERRY_ES2015_BUILTIN_WEAKSET) if ((map_object_p->u.class_prop.extra_info & ECMA_CONTAINER_FLAGS_WEAK) != 0) { ecma_object_t *key_p = ecma_get_object_from_value (key_arg); ecma_op_container_set_weak (key_p, map_object_p); } #endif /* ENABLED (JERRY_ES2015_BUILTIN_WEAKMAP) || ENABLED (JERRY_ES2015_BUILTIN_WEAKSET) */ } else { ecma_op_internal_buffer_update (entry_p, ecma_op_container_set_noramlize_zero (value_arg), lit_id); } ecma_ref_object ((ecma_object_t *) map_object_p); return this_arg; } /* ecma_op_container_set */ /** * The generic Map/Set prototype object's 'forEach' routine * * @return ecma value * Returned value must be freed with ecma_free_value. */ ecma_value_t ecma_op_container_foreach (ecma_value_t this_arg, /**< this argument */ ecma_value_t predicate, /**< callback function */ ecma_value_t predicate_this_arg, /**< this argument for * invoke predicate */ lit_magic_string_id_t lit_id) /**< internal class id */ { ecma_extended_object_t *map_object_p = ecma_op_container_get_object (this_arg, lit_id); if (map_object_p == NULL) { return ECMA_VALUE_ERROR; } if (!ecma_op_is_callable (predicate)) { return ecma_raise_type_error (ECMA_ERR_MSG ("Callback function is not callable.")); } JERRY_ASSERT (ecma_is_value_object (predicate)); ecma_object_t *func_object_p = ecma_get_object_from_value (predicate); ecma_value_t ret_value = ECMA_VALUE_UNDEFINED; ecma_collection_t *container_p = ECMA_GET_INTERNAL_VALUE_POINTER (ecma_collection_t, map_object_p->u.class_prop.u.value); uint8_t entry_size = ecma_op_container_entry_size (lit_id); for (uint32_t i = 0; i < ECMA_CONTAINER_ENTRY_COUNT (container_p); i += entry_size) { ecma_value_t *entry_p = ECMA_CONTAINER_START (container_p) + i; if (ecma_is_value_empty (*entry_p)) { continue; } ecma_value_t key_arg = *entry_p; ecma_value_t value_arg = ecma_op_container_get_value (entry_p, lit_id); ecma_value_t call_args[] = { value_arg, key_arg, this_arg }; ecma_value_t call_value = ecma_op_function_call (func_object_p, predicate_this_arg, call_args, 3); if (ECMA_IS_VALUE_ERROR (call_value)) { ret_value = call_value; break; } ecma_free_value (call_value); } return ret_value; } /* ecma_op_container_foreach */ /** * The Map/Set prototype object's 'clear' routine * * @return ecma value * Returned value must be freed with ecma_free_value. */ ecma_value_t ecma_op_container_clear (ecma_value_t this_arg, /**< this argument */ lit_magic_string_id_t lit_id) /**< internal class id */ { ecma_extended_object_t *map_object_p = ecma_op_container_get_object (this_arg, lit_id); if (map_object_p == NULL) { return ECMA_VALUE_ERROR; } ecma_op_container_free_entries ((ecma_object_t *) map_object_p); return ECMA_VALUE_UNDEFINED; } /* ecma_op_container_clear */ /** * The generic Map/Set prototype object's 'delete' routine * * @return ecma value * Returned value must be freed with ecma_free_value. */ ecma_value_t ecma_op_container_delete (ecma_value_t this_arg, /**< this argument */ ecma_value_t key_arg, /**< key argument */ lit_magic_string_id_t lit_id) /**< internal class id */ { ecma_extended_object_t *map_object_p = ecma_op_container_get_object (this_arg, lit_id); if (map_object_p == NULL) { return ECMA_VALUE_ERROR; } ecma_collection_t *container_p = ECMA_GET_INTERNAL_VALUE_POINTER (ecma_collection_t, map_object_p->u.class_prop.u.value); ecma_value_t *entry_p = ecma_op_internal_buffer_find (container_p, key_arg, lit_id); if (entry_p == NULL) { return ECMA_VALUE_FALSE; } ecma_op_internal_buffer_delete (container_p, (ecma_container_pair_t *) entry_p, lit_id); return ECMA_VALUE_TRUE; } /* ecma_op_container_delete */ /** * The generic WeakMap/WeakSet prototype object's 'delete' routine * * @return ecma value * Returned value must be freed with ecma_free_value. */ ecma_value_t ecma_op_container_delete_weak (ecma_value_t this_arg, /**< this argument */ ecma_value_t key_arg, /**< key argument */ lit_magic_string_id_t lit_id) /**< internal class id */ { ecma_extended_object_t *map_object_p = ecma_op_container_get_object (this_arg, lit_id); if (map_object_p == NULL) { return ECMA_VALUE_ERROR; } if (!ecma_is_value_object (key_arg)) { return ECMA_VALUE_FALSE; } ecma_collection_t *container_p = ECMA_GET_INTERNAL_VALUE_POINTER (ecma_collection_t, map_object_p->u.class_prop.u.value); ecma_value_t *entry_p = ecma_op_internal_buffer_find (container_p, key_arg, lit_id); if (entry_p == NULL) { return ECMA_VALUE_FALSE; } ecma_op_internal_buffer_delete (container_p, (ecma_container_pair_t *) entry_p, lit_id); ecma_object_t *key_object_p = ecma_get_object_from_value (key_arg); ecma_op_container_unref_weak (key_object_p, ecma_make_object_value ((ecma_object_t *) map_object_p)); return ECMA_VALUE_TRUE; } /* ecma_op_container_delete_weak */ /** * Helper function to remove a weak reference to an object. * * @return ecma value * Returned value must be freed with ecma_free_value. */ void ecma_op_container_unref_weak (ecma_object_t *object_p, /**< this argument */ ecma_value_t ref_holder) /**< key argument */ { ecma_string_t *weak_refs_string_p = ecma_get_magic_string (LIT_INTERNAL_MAGIC_STRING_WEAK_REFS); ecma_property_t *property_p = ecma_find_named_property (object_p, weak_refs_string_p); JERRY_ASSERT (property_p != NULL); ecma_collection_t *refs_p = ECMA_GET_INTERNAL_VALUE_POINTER (ecma_collection_t, ECMA_PROPERTY_VALUE_PTR (property_p)->value); for (uint32_t i = 0; i < refs_p->item_count; i++) { if (refs_p->buffer_p[i] == ref_holder) { refs_p->buffer_p[i] = ECMA_VALUE_EMPTY; break; } } } /* ecma_op_container_unref_weak */ /** * Helper function to remove a key/value pair from a weak container object */ void ecma_op_container_remove_weak_entry (ecma_object_t *object_p, /**< internal container object */ ecma_value_t key_arg) /**< key */ { ecma_extended_object_t *map_object_p = (ecma_extended_object_t *) object_p; ecma_collection_t *container_p = ECMA_GET_INTERNAL_VALUE_POINTER (ecma_collection_t, map_object_p->u.class_prop.u.value); ecma_value_t *entry_p = ecma_op_internal_buffer_find (container_p, key_arg, map_object_p->u.class_prop.class_id); JERRY_ASSERT (entry_p != NULL); ecma_op_internal_buffer_delete (container_p, (ecma_container_pair_t *) entry_p, map_object_p->u.class_prop.class_id); } /* ecma_op_container_remove_weak_entry */ #if ENABLED (JERRY_ES2015) /** * The Create{Set, Map}Iterator Abstract operation * * See also: * ECMA-262 v6, 23.1.5.1 * ECMA-262 v6, 23.2.5.1 * * Note: * Returned value must be freed with ecma_free_value. * * @return Map/Set iterator object, if success * error - otherwise */ ecma_value_t ecma_op_container_create_iterator (ecma_value_t this_arg, /**< this argument */ uint8_t type, /**< any combination of * ecma_iterator_type_t bits */ lit_magic_string_id_t lit_id, /**< internal class id */ ecma_builtin_id_t proto_id, /**< prototype builtin id */ ecma_pseudo_array_type_t iterator_type) /**< type of the iterator */ { ecma_extended_object_t *map_object_p = ecma_op_container_get_object (this_arg, lit_id); if (map_object_p == NULL) { return ECMA_VALUE_ERROR; } return ecma_op_create_iterator_object (this_arg, ecma_builtin_get (proto_id), (uint8_t) iterator_type, type); } /* ecma_op_container_create_iterator */ /** * Get the index of the iterator object. * * @return index of the iterator. */ static uint32_t ecma_op_iterator_get_index (ecma_object_t *iter_obj_p) /**< iterator object pointer */ { uint32_t index = ((ecma_extended_object_t *) iter_obj_p)->u.pseudo_array.u1.iterator_index; if (JERRY_UNLIKELY (index == ECMA_ITERATOR_INDEX_LIMIT)) { ecma_string_t *prop_name_p = ecma_get_magic_string (LIT_INTERNAL_MAGIC_STRING_ITERATOR_NEXT_INDEX); ecma_property_t *property_p = ecma_find_named_property (iter_obj_p, prop_name_p); ecma_property_value_t *value_p = ECMA_PROPERTY_VALUE_PTR (property_p); return (uint32_t) (ecma_get_number_from_value (value_p->value)); } return index; } /* ecma_op_iterator_get_index */ /** * Set the index of the iterator object. */ static void ecma_op_iterator_set_index (ecma_object_t *iter_obj_p, /**< iterator object pointer */ uint32_t index) /* iterator index to set */ { if (JERRY_UNLIKELY (index >= ECMA_ITERATOR_INDEX_LIMIT)) { /* After the ECMA_ITERATOR_INDEX_LIMIT limit is reached the [[%Iterator%NextIndex]] property is stored as an internal property */ ecma_string_t *prop_name_p = ecma_get_magic_string (LIT_INTERNAL_MAGIC_STRING_ITERATOR_NEXT_INDEX); ecma_property_t *property_p = ecma_find_named_property (iter_obj_p, prop_name_p); ecma_property_value_t *value_p; if (property_p == NULL) { value_p = ecma_create_named_data_property (iter_obj_p, prop_name_p, ECMA_PROPERTY_FLAG_WRITABLE, &property_p); value_p->value = ecma_make_uint32_value (index); } else { value_p = ECMA_PROPERTY_VALUE_PTR (property_p); value_p->value = ecma_make_uint32_value (index); } } else { ((ecma_extended_object_t *) iter_obj_p)->u.pseudo_array.u1.iterator_index = (uint16_t) index; } } /* ecma_op_iterator_set_index */ /** * The %{Set, Map}IteratorPrototype% object's 'next' routine * * See also: * ECMA-262 v6, 23.1.5.2.1 * ECMA-262 v6, 23.2.5.2.1 * * Note: * Returned value must be freed with ecma_free_value. * * @return iterator result object, if success * error - otherwise */ ecma_value_t ecma_op_container_iterator_next (ecma_value_t this_val, /**< this argument */ ecma_pseudo_array_type_t iterator_type) /**< type of the iterator */ { if (!ecma_is_value_object (this_val)) { return ecma_raise_type_error (ECMA_ERR_MSG ("Argument 'this' is not an object.")); } ecma_object_t *obj_p = ecma_get_object_from_value (this_val); ecma_extended_object_t *ext_obj_p = (ecma_extended_object_t *) obj_p; if (ecma_get_object_type (obj_p) != ECMA_OBJECT_TYPE_PSEUDO_ARRAY || ext_obj_p->u.pseudo_array.type != iterator_type) { return ecma_raise_type_error (ECMA_ERR_MSG ("Argument 'this' is not an iterator.")); } ecma_value_t iterated_value = ext_obj_p->u.pseudo_array.u2.iterated_value; if (ecma_is_value_empty (iterated_value)) { return ecma_create_iter_result_object (ECMA_VALUE_UNDEFINED, ECMA_VALUE_TRUE); } ecma_extended_object_t *map_object_p = (ecma_extended_object_t *) (ecma_get_object_from_value (iterated_value)); lit_magic_string_id_t lit_id = map_object_p->u.class_prop.class_id; ecma_collection_t *container_p = ECMA_GET_INTERNAL_VALUE_POINTER (ecma_collection_t, map_object_p->u.class_prop.u.value); uint32_t entry_count = ECMA_CONTAINER_ENTRY_COUNT (container_p); uint32_t index = ecma_op_iterator_get_index (obj_p); if (index == entry_count) { ext_obj_p->u.pseudo_array.u2.iterated_value = ECMA_VALUE_EMPTY; return ecma_create_iter_result_object (ECMA_VALUE_UNDEFINED, ECMA_VALUE_TRUE); } uint8_t entry_size = ecma_op_container_entry_size (lit_id); uint8_t iterator_kind = ext_obj_p->u.pseudo_array.extra_info; ecma_value_t *start_p = ECMA_CONTAINER_START (container_p); ecma_value_t ret_value = ECMA_VALUE_UNDEFINED; for (uint32_t i = index; i < entry_count; i += entry_size) { ecma_value_t *entry_p = start_p + i; if (ecma_is_value_empty (*entry_p)) { if (i == (entry_count - entry_size)) { ret_value = ecma_create_iter_result_object (ECMA_VALUE_UNDEFINED, ECMA_VALUE_TRUE); break; } continue; } ecma_op_iterator_set_index (obj_p, i + entry_size); ecma_value_t key_arg = *entry_p; ecma_value_t value_arg = ecma_op_container_get_value (entry_p, lit_id); if (iterator_kind == ECMA_ITERATOR_KEYS) { ret_value = ecma_create_iter_result_object (key_arg, ECMA_VALUE_FALSE); } else if (iterator_kind == ECMA_ITERATOR_VALUES) { ret_value = ecma_create_iter_result_object (value_arg, ECMA_VALUE_FALSE); } else { JERRY_ASSERT (iterator_kind == ECMA_ITERATOR_KEYS_VALUES); ecma_value_t entry_array_value; entry_array_value = ecma_create_array_from_iter_element (value_arg, key_arg); ret_value = ecma_create_iter_result_object (entry_array_value, ECMA_VALUE_FALSE); ecma_free_value (entry_array_value); } break; } return ret_value; } /* ecma_op_container_iterator_next */ #endif /* ENABLED (JERRY_ES2015) */ /** * @} * @} */ #endif /* ENABLED (JERRY_ES2015_BUILTIN_CONTAINER) */
/* Copyright JS Foundation and other contributors, http://js.foundation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "jcontext.h" #include "ecma-alloc.h" #include "ecma-array-object.h" #include "ecma-builtins.h" #include "ecma-builtin-helpers.h" #include "ecma-exceptions.h" #include "ecma-function-object.h" #include "ecma-gc.h" #include "ecma-helpers.h" #include "ecma-iterator-object.h" #include "ecma-container-object.h" #include "ecma-property-hashmap.h" #include "ecma-objects.h" #if ENABLED (JERRY_ES2015_BUILTIN_CONTAINER) /** \addtogroup ecma ECMA * @{ * * \addtogroup \addtogroup ecmamaphelpers ECMA builtin Map/Set helper functions * @{ */ /** * Create a new internal buffer. * * Note: * The first element of the collection tracks the size of the buffer. * ECMA_VALUE_EMPTY values are not calculated into the size. * * @return pointer to the internal buffer */ static inline ecma_collection_t * ecma_op_create_internal_buffer (void) { ecma_collection_t *collection_p = ecma_new_collection (); ecma_collection_push_back (collection_p, (ecma_value_t) 0); return collection_p; } /* ecma_op_create_internal_buffer */ /** * Append values to the internal buffer. */ static void ecma_op_internal_buffer_append (ecma_collection_t *container_p, /**< internal container pointer */ ecma_value_t key_arg, /**< key argument */ ecma_value_t value_arg, /**< value argument */ lit_magic_string_id_t lit_id) /**< class id */ { JERRY_ASSERT (container_p != NULL); if (lit_id == LIT_MAGIC_STRING_WEAKMAP_UL || lit_id == LIT_MAGIC_STRING_MAP_UL) { ecma_value_t values[] = { ecma_copy_value_if_not_object (key_arg), ecma_copy_value_if_not_object (value_arg) }; ecma_collection_append (container_p, values, 2); } else { ecma_collection_push_back (container_p, ecma_copy_value_if_not_object (key_arg)); } ECMA_CONTAINER_SET_SIZE (container_p, ECMA_CONTAINER_GET_SIZE (container_p) + 1); } /* ecma_op_internal_buffer_append */ /** * Update the value of a given entry. */ static inline void ecma_op_internal_buffer_update (ecma_value_t *entry_p, /**< entry pointer */ ecma_value_t value_arg, /**< value argument */ lit_magic_string_id_t lit_id) /**< class id */ { JERRY_ASSERT (entry_p != NULL); if (lit_id == LIT_MAGIC_STRING_WEAKMAP_UL || lit_id == LIT_MAGIC_STRING_MAP_UL) { ecma_free_value_if_not_object (((ecma_container_pair_t *) entry_p)->value); ((ecma_container_pair_t *) entry_p)->value = ecma_copy_value_if_not_object (value_arg); } } /* ecma_op_internal_buffer_update */ /** * Delete element from the internal buffer. */ static void ecma_op_internal_buffer_delete (ecma_collection_t *container_p, /**< internal container pointer */ ecma_container_pair_t *entry_p, /**< entry pointer */ lit_magic_string_id_t lit_id) /**< class id */ { JERRY_ASSERT (container_p != NULL); JERRY_ASSERT (entry_p != NULL); ecma_free_value_if_not_object (entry_p->key); entry_p->key = ECMA_VALUE_EMPTY; if (lit_id == LIT_MAGIC_STRING_WEAKMAP_UL || lit_id == LIT_MAGIC_STRING_MAP_UL) { ecma_free_value_if_not_object (entry_p->value); entry_p->value = ECMA_VALUE_EMPTY; } ECMA_CONTAINER_SET_SIZE (container_p, ECMA_CONTAINER_GET_SIZE (container_p) - 1); } /* ecma_op_internal_buffer_delete */ /** * Find an entry in the collection. * * @return pointer to the appropriate entry. */ static ecma_value_t * ecma_op_internal_buffer_find (ecma_collection_t *container_p, /**< internal container pointer */ ecma_value_t key_arg, /**< key argument */ lit_magic_string_id_t lit_id) /**< class id */ { JERRY_ASSERT (container_p != NULL); uint8_t entry_size = ecma_op_container_entry_size (lit_id); uint32_t entry_count = ECMA_CONTAINER_ENTRY_COUNT (container_p); ecma_value_t *start_p = ECMA_CONTAINER_START (container_p); for (uint32_t i = 0; i < entry_count; i += entry_size) { ecma_value_t *entry_p = start_p + i; if (ecma_op_same_value_zero (*entry_p, key_arg)) { return entry_p; } } return NULL; } /* ecma_op_internal_buffer_find */ /** * Get the value that belongs to the key. * * Note: in case of Set containers, the values are the same as the keys. * * @return ecma value */ static ecma_value_t ecma_op_container_get_value (ecma_value_t *entry_p, /**< entry (key) pointer */ lit_magic_string_id_t lit_id) /**< class id */ { JERRY_ASSERT (entry_p != NULL); if (lit_id == LIT_MAGIC_STRING_WEAKMAP_UL || lit_id == LIT_MAGIC_STRING_MAP_UL) { return ((ecma_container_pair_t *) entry_p)->value; } return *entry_p; } /* ecma_op_container_get_value */ /** * Get the size (in ecma_value_t) of the stored entries. * * @return size of the entries. */ uint8_t ecma_op_container_entry_size (lit_magic_string_id_t lit_id) /**< class id */ { if (lit_id == LIT_MAGIC_STRING_WEAKMAP_UL || lit_id == LIT_MAGIC_STRING_MAP_UL) { return ECMA_CONTAINER_PAIR_SIZE; } return ECMA_CONTAINER_VALUE_SIZE; } /* ecma_op_container_entry_size */ #if ENABLED (JERRY_ES2015_BUILTIN_WEAKSET) /** * Release the entries in the WeakSet container. */ static void ecma_op_container_free_weakset_entries (ecma_object_t *object_p, /**< object pointer */ ecma_collection_t *container_p) /** internal buffer pointer */ { JERRY_ASSERT (object_p != NULL); JERRY_ASSERT (container_p != NULL); uint32_t entry_count = ECMA_CONTAINER_ENTRY_COUNT (container_p); ecma_value_t *start_p = ECMA_CONTAINER_START (container_p); for (uint32_t i = 0; i < entry_count; i += ECMA_CONTAINER_VALUE_SIZE) { ecma_value_t *entry_p = start_p + i; if (ecma_is_value_empty (*entry_p)) { continue; } ecma_op_container_unref_weak (ecma_get_object_from_value (*entry_p), ecma_make_object_value (object_p)); ecma_op_container_remove_weak_entry (object_p, *entry_p); *entry_p = ECMA_VALUE_EMPTY; } } /* ecma_op_container_free_weakset_entries */ #endif /* ENABLED (JERRY_ES2015_BUILTIN_WEAKSET) */ #if ENABLED (JERRY_ES2015_BUILTIN_WEAKMAP) /** * Release the entries in the WeakMap container. */ static void ecma_op_container_free_weakmap_entries (ecma_object_t *object_p, /**< object pointer */ ecma_collection_t *container_p) /**< internal buffer pointer */ { JERRY_ASSERT (object_p != NULL); JERRY_ASSERT (container_p != NULL); uint32_t entry_count = ECMA_CONTAINER_ENTRY_COUNT (container_p); ecma_value_t *start_p = ECMA_CONTAINER_START (container_p); for (uint32_t i = 0; i < entry_count; i += ECMA_CONTAINER_PAIR_SIZE) { ecma_container_pair_t *entry_p = (ecma_container_pair_t *) (start_p + i); if (ecma_is_value_empty (entry_p->key)) { continue; } ecma_op_container_unref_weak (ecma_get_object_from_value (entry_p->key), ecma_make_object_value (object_p)); ecma_op_container_remove_weak_entry (object_p, entry_p->key); ecma_free_value_if_not_object (entry_p->value); entry_p->key = ECMA_VALUE_EMPTY; entry_p->value = ECMA_VALUE_EMPTY; } } /* ecma_op_container_free_weakmap_entries */ #endif /* ENABLED (JERRY_ES2015_BUILTIN_WEAKMAP) */ #if ENABLED (JERRY_ES2015_BUILTIN_SET) /** * Release the entries in the Set container. */ static void ecma_op_container_free_set_entries (ecma_collection_t *container_p) { JERRY_ASSERT (container_p != NULL); uint32_t entry_count = ECMA_CONTAINER_ENTRY_COUNT (container_p); ecma_value_t *start_p = ECMA_CONTAINER_START (container_p); for (uint32_t i = 0; i < entry_count; i += ECMA_CONTAINER_VALUE_SIZE) { ecma_value_t *entry_p = start_p + i; if (ecma_is_value_empty (*entry_p)) { continue; } ecma_free_value_if_not_object (*entry_p); *entry_p = ECMA_VALUE_EMPTY; } } /* ecma_op_container_free_set_entries */ #endif /* ENABLED (JERRY_ES2015_BUILTIN_SET) */ #if ENABLED (JERRY_ES2015_BUILTIN_MAP) /** * Release the entries in the Map container. */ static void ecma_op_container_free_map_entries (ecma_collection_t *container_p) { JERRY_ASSERT (container_p != NULL); uint32_t entry_count = ECMA_CONTAINER_ENTRY_COUNT (container_p); ecma_value_t *start_p = ECMA_CONTAINER_START (container_p); for (uint32_t i = 0; i < entry_count; i += ECMA_CONTAINER_PAIR_SIZE) { ecma_container_pair_t *entry_p = (ecma_container_pair_t *) (start_p + i); if (ecma_is_value_empty (entry_p->key)) { continue; } ecma_free_value_if_not_object (entry_p->key); ecma_free_value_if_not_object (entry_p->value); entry_p->key = ECMA_VALUE_EMPTY; entry_p->value = ECMA_VALUE_EMPTY; } } /* ecma_op_container_free_map_entries */ #endif /* ENABLED (JERRY_ES2015_BUILTIN_MAP) */ /** * Release the internal buffer and the stored entries. */ void ecma_op_container_free_entries (ecma_object_t *object_p) /**< collection object pointer */ { JERRY_ASSERT (object_p != NULL); ecma_extended_object_t *map_object_p = (ecma_extended_object_t *) object_p; ecma_collection_t *container_p = ECMA_GET_INTERNAL_VALUE_POINTER (ecma_collection_t, map_object_p->u.class_prop.u.value); switch (map_object_p->u.class_prop.class_id) { #if ENABLED (JERRY_ES2015_BUILTIN_WEAKSET) case LIT_MAGIC_STRING_WEAKSET_UL: { ecma_op_container_free_weakset_entries (object_p, container_p); break; } #endif /* ENABLED (JERRY_ES2015_BUILTIN_WEAKSET) */ #if ENABLED (JERRY_ES2015_BUILTIN_WEAKMAP) case LIT_MAGIC_STRING_WEAKMAP_UL: { ecma_op_container_free_weakmap_entries (object_p, container_p); break; } #endif /* ENABLED (JERRY_ES2015_BUILTIN_WEAKMAP) */ #if ENABLED (JERRY_ES2015_BUILTIN_SET) case LIT_MAGIC_STRING_SET_UL: { ecma_op_container_free_set_entries (container_p); break; } #endif /* ENABLED (JERRY_ES2015_BUILTIN_SET) */ #if ENABLED (JERRY_ES2015_BUILTIN_MAP) case LIT_MAGIC_STRING_MAP_UL: { ecma_op_container_free_map_entries (container_p); break; } #endif /* ENABLED (JERRY_ES2015_BUILTIN_MAP) */ default: { break; } } ECMA_CONTAINER_SET_SIZE (container_p, 0); } /* ecma_op_container_free_entries */ /** * Handle calling [[Construct]] of built-in Map/Set like objects * * @return ecma value */ ecma_value_t ecma_op_container_create (const ecma_value_t *arguments_list_p, /**< arguments list */ ecma_length_t arguments_list_len, /**< number of arguments */ lit_magic_string_id_t lit_id, /**< internal class id */ ecma_builtin_id_t proto_id) /**< prototype builtin id */ { JERRY_ASSERT (arguments_list_len == 0 || arguments_list_p != NULL); JERRY_ASSERT (lit_id == LIT_MAGIC_STRING_MAP_UL || lit_id == LIT_MAGIC_STRING_SET_UL || lit_id == LIT_MAGIC_STRING_WEAKMAP_UL || lit_id == LIT_MAGIC_STRING_WEAKSET_UL); JERRY_ASSERT (JERRY_CONTEXT (current_new_target) != NULL); ecma_object_t *proto_p = ecma_op_get_prototype_from_constructor (JERRY_CONTEXT (current_new_target), proto_id); if (JERRY_UNLIKELY (proto_p == NULL)) { return ECMA_VALUE_ERROR; } ecma_collection_t *container_p = ecma_op_create_internal_buffer (); ecma_object_t *object_p = ecma_create_object (proto_p, sizeof (ecma_extended_object_t), ECMA_OBJECT_TYPE_CLASS); ecma_deref_object (proto_p); ecma_extended_object_t *map_obj_p = (ecma_extended_object_t *) object_p; map_obj_p->u.class_prop.extra_info = ECMA_CONTAINER_FLAGS_EMPTY; map_obj_p->u.class_prop.class_id = (uint16_t) lit_id; if (lit_id == LIT_MAGIC_STRING_WEAKMAP_UL || lit_id == LIT_MAGIC_STRING_WEAKSET_UL) { map_obj_p->u.class_prop.extra_info |= ECMA_CONTAINER_FLAGS_WEAK; } ECMA_SET_INTERNAL_VALUE_POINTER (map_obj_p->u.class_prop.u.value, container_p); ecma_value_t set_value = ecma_make_object_value (object_p); ecma_value_t result = set_value; #if ENABLED (JERRY_ES2015) if (arguments_list_len == 0) { return result; } ecma_value_t iterable = arguments_list_p[0]; if (ecma_is_value_undefined (iterable) || ecma_is_value_null (iterable)) { return result; } lit_magic_string_id_t adder_string_id; if (lit_id == LIT_MAGIC_STRING_MAP_UL || lit_id == LIT_MAGIC_STRING_WEAKMAP_UL) { adder_string_id = LIT_MAGIC_STRING_SET; } else { adder_string_id = LIT_MAGIC_STRING_ADD; } result = ecma_op_object_get_by_magic_id (object_p, adder_string_id); if (ECMA_IS_VALUE_ERROR (result)) { goto cleanup_object; } if (!ecma_op_is_callable (result)) { ecma_free_value (result); result = ecma_raise_type_error (ECMA_ERR_MSG ("add/set function is not callable.")); goto cleanup_object; } ecma_object_t *adder_func_p = ecma_get_object_from_value (result); result = ecma_op_get_iterator (iterable, ECMA_VALUE_EMPTY); if (ECMA_IS_VALUE_ERROR (result)) { goto cleanup_adder; } const ecma_value_t iter = result; while (true) { result = ecma_op_iterator_step (iter); if (ECMA_IS_VALUE_ERROR (result)) { goto cleanup_iter; } if (ecma_is_value_false (result)) { break; } const ecma_value_t next = result; result = ecma_op_iterator_value (next); ecma_free_value (next); if (ECMA_IS_VALUE_ERROR (result)) { goto cleanup_iter; } if (lit_id == LIT_MAGIC_STRING_SET_UL || lit_id == LIT_MAGIC_STRING_WEAKSET_UL) { const ecma_value_t value = result; ecma_value_t arguments[] = { value }; result = ecma_op_function_call (adder_func_p, set_value, arguments, 1); ecma_free_value (value); } else { if (!ecma_is_value_object (result)) { ecma_free_value (result); ecma_raise_type_error (ECMA_ERR_MSG ("Iterator value is not an object.")); result = ecma_op_iterator_close (iter); JERRY_ASSERT (ECMA_IS_VALUE_ERROR (result)); goto cleanup_iter; } ecma_object_t *next_object_p = ecma_get_object_from_value (result); result = ecma_op_object_get_by_uint32_index (next_object_p, 0); if (ECMA_IS_VALUE_ERROR (result)) { ecma_deref_object (next_object_p); ecma_op_iterator_close (iter); goto cleanup_iter; } const ecma_value_t key = result; result = ecma_op_object_get_by_uint32_index (next_object_p, 1); if (ECMA_IS_VALUE_ERROR (result)) { ecma_deref_object (next_object_p); ecma_free_value (key); ecma_op_iterator_close (iter); goto cleanup_iter; } const ecma_value_t value = result; ecma_value_t arguments[] = { key, value }; result = ecma_op_function_call (adder_func_p, set_value, arguments, 2); ecma_free_value (key); ecma_free_value (value); ecma_deref_object (next_object_p); } if (ECMA_IS_VALUE_ERROR (result)) { ecma_op_iterator_close (iter); goto cleanup_iter; } ecma_free_value (result); } ecma_free_value (iter); ecma_deref_object (adder_func_p); return ecma_make_object_value (object_p); cleanup_iter: ecma_free_value (iter); cleanup_adder: ecma_deref_object (adder_func_p); cleanup_object: ecma_deref_object (object_p); #endif /* ENABLED (JERRY_ES2015) */ return result; } /* ecma_op_container_create */ /** * Get Map/Set object pointer * * Note: * If the function returns with NULL, the error object has * already set, and the caller must return with ECMA_VALUE_ERROR * * @return pointer to the Map/Set if this_arg is a valid Map/Set object * NULL otherwise */ static ecma_extended_object_t * ecma_op_container_get_object (ecma_value_t this_arg, /**< this argument */ lit_magic_string_id_t lit_id) /**< internal class id */ { if (ecma_is_value_object (this_arg)) { ecma_extended_object_t *map_object_p = (ecma_extended_object_t *) ecma_get_object_from_value (this_arg); if (ecma_get_object_type ((ecma_object_t *) map_object_p) == ECMA_OBJECT_TYPE_CLASS && map_object_p->u.class_prop.class_id == lit_id) { return map_object_p; } } #if ENABLED (JERRY_ERROR_MESSAGES) ecma_raise_standard_error_with_format (ECMA_ERROR_TYPE, "Expected a % object.", ecma_make_string_value (ecma_get_magic_string (lit_id))); #else /* !ENABLED (JERRY_ERROR_MESSAGES) */ ecma_raise_type_error (NULL); #endif /* ENABLED (JERRY_ERROR_MESSAGES) */ return NULL; } /* ecma_op_container_get_object */ /** * Returns with the size of the Map/Set object. * * @return size of the Map/Set object as ecma-value. */ ecma_value_t ecma_op_container_size (ecma_value_t this_arg, /**< this argument */ lit_magic_string_id_t lit_id) /**< internal class id */ { ecma_extended_object_t *map_object_p = ecma_op_container_get_object (this_arg, lit_id); if (map_object_p == NULL) { return ECMA_VALUE_ERROR; } ecma_collection_t *container_p = ECMA_GET_INTERNAL_VALUE_POINTER (ecma_collection_t, map_object_p->u.class_prop.u.value); return ecma_make_uint32_value (ECMA_CONTAINER_GET_SIZE (container_p)); } /* ecma_op_container_size */ /** * The generic Map/WeakMap prototype object's 'get' routine * * @return ecma value * Returned value must be freed with ecma_free_value. */ ecma_value_t ecma_op_container_get (ecma_value_t this_arg, /**< this argument */ ecma_value_t key_arg, /**< key argument */ lit_magic_string_id_t lit_id) /**< internal class id */ { ecma_extended_object_t *map_object_p = ecma_op_container_get_object (this_arg, lit_id); if (map_object_p == NULL) { return ECMA_VALUE_ERROR; } #if ENABLED (JERRY_ES2015_BUILTIN_WEAKMAP) if (lit_id == LIT_MAGIC_STRING_WEAKMAP_UL && !ecma_is_value_object (key_arg)) { return ECMA_VALUE_UNDEFINED; } #endif /* ENABLED (JERRY_ES2015_BUILTIN_WEAKMAP) */ ecma_collection_t *container_p = ECMA_GET_INTERNAL_VALUE_POINTER (ecma_collection_t, map_object_p->u.class_prop.u.value); if (ECMA_CONTAINER_GET_SIZE (container_p) == 0) { return ECMA_VALUE_UNDEFINED; } ecma_value_t *entry_p = ecma_op_internal_buffer_find (container_p, key_arg, lit_id); if (entry_p == NULL) { return ECMA_VALUE_UNDEFINED; } return ecma_copy_value (((ecma_container_pair_t *) entry_p)->value); } /* ecma_op_container_get */ /** * The generic Map/Set prototype object's 'has' routine * * @return ecma value * Returned value must be freed with ecma_free_value. */ ecma_value_t ecma_op_container_has (ecma_value_t this_arg, /**< this argument */ ecma_value_t key_arg, /**< key argument */ lit_magic_string_id_t lit_id) /**< internal class id */ { ecma_extended_object_t *map_object_p = ecma_op_container_get_object (this_arg, lit_id); if (map_object_p == NULL) { return ECMA_VALUE_ERROR; } ecma_collection_t *container_p = ECMA_GET_INTERNAL_VALUE_POINTER (ecma_collection_t, map_object_p->u.class_prop.u.value); #if ENABLED (JERRY_ES2015_BUILTIN_WEAKMAP) || ENABLED (JERRY_ES2015_BUILTIN_WEAKSET) if ((map_object_p->u.class_prop.extra_info & ECMA_CONTAINER_FLAGS_WEAK) != 0 && !ecma_is_value_object (key_arg)) { return ECMA_VALUE_FALSE; } #endif /* ENABLED (JERRY_ES2015_BUILTIN_WEAKMAP) || ENABLED (JERRY_ES2015_BUILTIN_WEAKSET) */ if (ECMA_CONTAINER_GET_SIZE (container_p) == 0) { return ECMA_VALUE_FALSE; } ecma_value_t *entry_p = ecma_op_internal_buffer_find (container_p, key_arg, lit_id); return ecma_make_boolean_value (entry_p != NULL); } /* ecma_op_container_has */ /** * Set a weak reference from a container to a key object */ static void ecma_op_container_set_weak (ecma_object_t *const key_p, /**< key object */ ecma_extended_object_t *const container_p) /**< container */ { if (JERRY_UNLIKELY (ecma_op_object_is_fast_array (key_p))) { ecma_fast_array_convert_to_normal (key_p); } ecma_string_t *weak_refs_string_p = ecma_get_magic_string (LIT_INTERNAL_MAGIC_STRING_WEAK_REFS); ecma_property_t *property_p = ecma_find_named_property (key_p, weak_refs_string_p); ecma_collection_t *refs_p; if (property_p == NULL) { ecma_property_value_t *value_p = ecma_create_named_data_property (key_p, weak_refs_string_p, ECMA_PROPERTY_CONFIGURABLE_WRITABLE, &property_p); ECMA_CONVERT_DATA_PROPERTY_TO_INTERNAL_PROPERTY (property_p); refs_p = ecma_new_collection (); ECMA_SET_INTERNAL_VALUE_POINTER (value_p->value, refs_p); } else { refs_p = ECMA_GET_INTERNAL_VALUE_POINTER (ecma_collection_t, (ECMA_PROPERTY_VALUE_PTR (property_p)->value)); } const ecma_value_t container_value = ecma_make_object_value ((ecma_object_t *) container_p); for (uint32_t i = 0; i < refs_p->item_count; i++) { if (ecma_is_value_empty (refs_p->buffer_p[i])) { refs_p->buffer_p[i] = container_value; return; } } ecma_collection_push_back (refs_p, container_value); } /* ecma_op_container_set_weak */ /** * Helper method for the Map.prototype.set and Set.prototype.add methods to swap the sign of the given value if needed * * See also: * ECMA-262 v6, 23.2.3.1 step 6 * ECMA-262 v6, 23.1.3.9 step 6 * * @return ecma value */ static ecma_value_t ecma_op_container_set_noramlize_zero (ecma_value_t this_arg) /*< this arg */ { if (ecma_is_value_number (this_arg)) { ecma_number_t number_value = ecma_get_number_from_value (this_arg); if (JERRY_UNLIKELY (ecma_number_is_zero (number_value) && ecma_number_is_negative (number_value))) { return ecma_make_integer_value (0); } } return this_arg; } /* ecma_op_container_set_noramlize_zero */ /** * The generic Map prototype object's 'set' and Set prototype object's 'add' routine * * @return ecma value * Returned value must be freed with ecma_free_value. */ ecma_value_t ecma_op_container_set (ecma_value_t this_arg, /**< this argument */ ecma_value_t key_arg, /**< key argument */ ecma_value_t value_arg, /**< value argument */ lit_magic_string_id_t lit_id) /**< internal class id */ { ecma_extended_object_t *map_object_p = ecma_op_container_get_object (this_arg, lit_id); if (map_object_p == NULL) { return ECMA_VALUE_ERROR; } ecma_collection_t *container_p = ECMA_GET_INTERNAL_VALUE_POINTER (ecma_collection_t, map_object_p->u.class_prop.u.value); #if ENABLED (JERRY_ES2015_BUILTIN_WEAKMAP) || ENABLED (JERRY_ES2015_BUILTIN_WEAKSET) if ((map_object_p->u.class_prop.extra_info & ECMA_CONTAINER_FLAGS_WEAK) != 0 && !ecma_is_value_object (key_arg)) { return ecma_raise_type_error (ECMA_ERR_MSG ("Key must be an object")); } #endif /* ENABLED (JERRY_ES2015_BUILTIN_WEAKMAP) || ENABLED (JERRY_ES2015_BUILTIN_WEAKSET) */ ecma_value_t *entry_p = ecma_op_internal_buffer_find (container_p, key_arg, lit_id); if (entry_p == NULL) { ecma_op_internal_buffer_append (container_p, ecma_op_container_set_noramlize_zero (key_arg), value_arg, lit_id); #if ENABLED (JERRY_ES2015_BUILTIN_WEAKMAP) || ENABLED (JERRY_ES2015_BUILTIN_WEAKSET) if ((map_object_p->u.class_prop.extra_info & ECMA_CONTAINER_FLAGS_WEAK) != 0) { ecma_object_t *key_p = ecma_get_object_from_value (key_arg); ecma_op_container_set_weak (key_p, map_object_p); } #endif /* ENABLED (JERRY_ES2015_BUILTIN_WEAKMAP) || ENABLED (JERRY_ES2015_BUILTIN_WEAKSET) */ } else { ecma_op_internal_buffer_update (entry_p, ecma_op_container_set_noramlize_zero (value_arg), lit_id); } ecma_ref_object ((ecma_object_t *) map_object_p); return this_arg; } /* ecma_op_container_set */ /** * The generic Map/Set prototype object's 'forEach' routine * * @return ecma value * Returned value must be freed with ecma_free_value. */ ecma_value_t ecma_op_container_foreach (ecma_value_t this_arg, /**< this argument */ ecma_value_t predicate, /**< callback function */ ecma_value_t predicate_this_arg, /**< this argument for * invoke predicate */ lit_magic_string_id_t lit_id) /**< internal class id */ { ecma_extended_object_t *map_object_p = ecma_op_container_get_object (this_arg, lit_id); if (map_object_p == NULL) { return ECMA_VALUE_ERROR; } if (!ecma_op_is_callable (predicate)) { return ecma_raise_type_error (ECMA_ERR_MSG ("Callback function is not callable.")); } JERRY_ASSERT (ecma_is_value_object (predicate)); ecma_object_t *func_object_p = ecma_get_object_from_value (predicate); ecma_value_t ret_value = ECMA_VALUE_UNDEFINED; ecma_collection_t *container_p = ECMA_GET_INTERNAL_VALUE_POINTER (ecma_collection_t, map_object_p->u.class_prop.u.value); uint8_t entry_size = ecma_op_container_entry_size (lit_id); for (uint32_t i = 0; i < ECMA_CONTAINER_ENTRY_COUNT (container_p); i += entry_size) { ecma_value_t *entry_p = ECMA_CONTAINER_START (container_p) + i; if (ecma_is_value_empty (*entry_p)) { continue; } ecma_value_t key_arg = *entry_p; ecma_value_t value_arg = ecma_op_container_get_value (entry_p, lit_id); ecma_value_t call_args[] = { value_arg, key_arg, this_arg }; ecma_value_t call_value = ecma_op_function_call (func_object_p, predicate_this_arg, call_args, 3); if (ECMA_IS_VALUE_ERROR (call_value)) { ret_value = call_value; break; } ecma_free_value (call_value); } return ret_value; } /* ecma_op_container_foreach */ /** * The Map/Set prototype object's 'clear' routine * * @return ecma value * Returned value must be freed with ecma_free_value. */ ecma_value_t ecma_op_container_clear (ecma_value_t this_arg, /**< this argument */ lit_magic_string_id_t lit_id) /**< internal class id */ { ecma_extended_object_t *map_object_p = ecma_op_container_get_object (this_arg, lit_id); if (map_object_p == NULL) { return ECMA_VALUE_ERROR; } ecma_op_container_free_entries ((ecma_object_t *) map_object_p); return ECMA_VALUE_UNDEFINED; } /* ecma_op_container_clear */ /** * The generic Map/Set prototype object's 'delete' routine * * @return ecma value * Returned value must be freed with ecma_free_value. */ ecma_value_t ecma_op_container_delete (ecma_value_t this_arg, /**< this argument */ ecma_value_t key_arg, /**< key argument */ lit_magic_string_id_t lit_id) /**< internal class id */ { ecma_extended_object_t *map_object_p = ecma_op_container_get_object (this_arg, lit_id); if (map_object_p == NULL) { return ECMA_VALUE_ERROR; } ecma_collection_t *container_p = ECMA_GET_INTERNAL_VALUE_POINTER (ecma_collection_t, map_object_p->u.class_prop.u.value); ecma_value_t *entry_p = ecma_op_internal_buffer_find (container_p, key_arg, lit_id); if (entry_p == NULL) { return ECMA_VALUE_FALSE; } ecma_op_internal_buffer_delete (container_p, (ecma_container_pair_t *) entry_p, lit_id); return ECMA_VALUE_TRUE; } /* ecma_op_container_delete */ /** * The generic WeakMap/WeakSet prototype object's 'delete' routine * * @return ecma value * Returned value must be freed with ecma_free_value. */ ecma_value_t ecma_op_container_delete_weak (ecma_value_t this_arg, /**< this argument */ ecma_value_t key_arg, /**< key argument */ lit_magic_string_id_t lit_id) /**< internal class id */ { ecma_extended_object_t *map_object_p = ecma_op_container_get_object (this_arg, lit_id); if (map_object_p == NULL) { return ECMA_VALUE_ERROR; } if (!ecma_is_value_object (key_arg)) { return ECMA_VALUE_FALSE; } ecma_collection_t *container_p = ECMA_GET_INTERNAL_VALUE_POINTER (ecma_collection_t, map_object_p->u.class_prop.u.value); ecma_value_t *entry_p = ecma_op_internal_buffer_find (container_p, key_arg, lit_id); if (entry_p == NULL) { return ECMA_VALUE_FALSE; } ecma_op_internal_buffer_delete (container_p, (ecma_container_pair_t *) entry_p, lit_id); ecma_object_t *key_object_p = ecma_get_object_from_value (key_arg); ecma_op_container_unref_weak (key_object_p, ecma_make_object_value ((ecma_object_t *) map_object_p)); return ECMA_VALUE_TRUE; } /* ecma_op_container_delete_weak */ /** * Helper function to remove a weak reference to an object. * * @return ecma value * Returned value must be freed with ecma_free_value. */ void ecma_op_container_unref_weak (ecma_object_t *object_p, /**< this argument */ ecma_value_t ref_holder) /**< key argument */ { ecma_string_t *weak_refs_string_p = ecma_get_magic_string (LIT_INTERNAL_MAGIC_STRING_WEAK_REFS); ecma_property_t *property_p = ecma_find_named_property (object_p, weak_refs_string_p); JERRY_ASSERT (property_p != NULL); ecma_collection_t *refs_p = ECMA_GET_INTERNAL_VALUE_POINTER (ecma_collection_t, ECMA_PROPERTY_VALUE_PTR (property_p)->value); for (uint32_t i = 0; i < refs_p->item_count; i++) { if (refs_p->buffer_p[i] == ref_holder) { refs_p->buffer_p[i] = ECMA_VALUE_EMPTY; break; } } } /* ecma_op_container_unref_weak */ /** * Helper function to remove a key/value pair from a weak container object */ void ecma_op_container_remove_weak_entry (ecma_object_t *object_p, /**< internal container object */ ecma_value_t key_arg) /**< key */ { ecma_extended_object_t *map_object_p = (ecma_extended_object_t *) object_p; ecma_collection_t *container_p = ECMA_GET_INTERNAL_VALUE_POINTER (ecma_collection_t, map_object_p->u.class_prop.u.value); ecma_value_t *entry_p = ecma_op_internal_buffer_find (container_p, key_arg, map_object_p->u.class_prop.class_id); JERRY_ASSERT (entry_p != NULL); ecma_op_internal_buffer_delete (container_p, (ecma_container_pair_t *) entry_p, map_object_p->u.class_prop.class_id); } /* ecma_op_container_remove_weak_entry */ #if ENABLED (JERRY_ES2015) /** * The Create{Set, Map}Iterator Abstract operation * * See also: * ECMA-262 v6, 23.1.5.1 * ECMA-262 v6, 23.2.5.1 * * Note: * Returned value must be freed with ecma_free_value. * * @return Map/Set iterator object, if success * error - otherwise */ ecma_value_t ecma_op_container_create_iterator (ecma_value_t this_arg, /**< this argument */ uint8_t type, /**< any combination of * ecma_iterator_type_t bits */ lit_magic_string_id_t lit_id, /**< internal class id */ ecma_builtin_id_t proto_id, /**< prototype builtin id */ ecma_pseudo_array_type_t iterator_type) /**< type of the iterator */ { ecma_extended_object_t *map_object_p = ecma_op_container_get_object (this_arg, lit_id); if (map_object_p == NULL) { return ECMA_VALUE_ERROR; } return ecma_op_create_iterator_object (this_arg, ecma_builtin_get (proto_id), (uint8_t) iterator_type, type); } /* ecma_op_container_create_iterator */ /** * Get the index of the iterator object. * * @return index of the iterator. */ static uint32_t ecma_op_iterator_get_index (ecma_object_t *iter_obj_p) /**< iterator object pointer */ { uint32_t index = ((ecma_extended_object_t *) iter_obj_p)->u.pseudo_array.u1.iterator_index; if (JERRY_UNLIKELY (index == ECMA_ITERATOR_INDEX_LIMIT)) { ecma_string_t *prop_name_p = ecma_get_magic_string (LIT_INTERNAL_MAGIC_STRING_ITERATOR_NEXT_INDEX); ecma_property_t *property_p = ecma_find_named_property (iter_obj_p, prop_name_p); ecma_property_value_t *value_p = ECMA_PROPERTY_VALUE_PTR (property_p); return (uint32_t) (ecma_get_number_from_value (value_p->value)); } return index; } /* ecma_op_iterator_get_index */ /** * Set the index of the iterator object. */ static void ecma_op_iterator_set_index (ecma_object_t *iter_obj_p, /**< iterator object pointer */ uint32_t index) /* iterator index to set */ { if (JERRY_UNLIKELY (index >= ECMA_ITERATOR_INDEX_LIMIT)) { /* After the ECMA_ITERATOR_INDEX_LIMIT limit is reached the [[%Iterator%NextIndex]] property is stored as an internal property */ ecma_string_t *prop_name_p = ecma_get_magic_string (LIT_INTERNAL_MAGIC_STRING_ITERATOR_NEXT_INDEX); ecma_property_t *property_p = ecma_find_named_property (iter_obj_p, prop_name_p); ecma_property_value_t *value_p; if (property_p == NULL) { value_p = ecma_create_named_data_property (iter_obj_p, prop_name_p, ECMA_PROPERTY_FLAG_WRITABLE, &property_p); value_p->value = ecma_make_uint32_value (index); } else { value_p = ECMA_PROPERTY_VALUE_PTR (property_p); value_p->value = ecma_make_uint32_value (index); } } else { ((ecma_extended_object_t *) iter_obj_p)->u.pseudo_array.u1.iterator_index = (uint16_t) index; } } /* ecma_op_iterator_set_index */ /** * The %{Set, Map}IteratorPrototype% object's 'next' routine * * See also: * ECMA-262 v6, 23.1.5.2.1 * ECMA-262 v6, 23.2.5.2.1 * * Note: * Returned value must be freed with ecma_free_value. * * @return iterator result object, if success * error - otherwise */ ecma_value_t ecma_op_container_iterator_next (ecma_value_t this_val, /**< this argument */ ecma_pseudo_array_type_t iterator_type) /**< type of the iterator */ { if (!ecma_is_value_object (this_val)) { return ecma_raise_type_error (ECMA_ERR_MSG ("Argument 'this' is not an object.")); } ecma_object_t *obj_p = ecma_get_object_from_value (this_val); ecma_extended_object_t *ext_obj_p = (ecma_extended_object_t *) obj_p; if (ecma_get_object_type (obj_p) != ECMA_OBJECT_TYPE_PSEUDO_ARRAY || ext_obj_p->u.pseudo_array.type != iterator_type) { return ecma_raise_type_error (ECMA_ERR_MSG ("Argument 'this' is not an iterator.")); } ecma_value_t iterated_value = ext_obj_p->u.pseudo_array.u2.iterated_value; if (ecma_is_value_empty (iterated_value)) { return ecma_create_iter_result_object (ECMA_VALUE_UNDEFINED, ECMA_VALUE_TRUE); } ecma_extended_object_t *map_object_p = (ecma_extended_object_t *) (ecma_get_object_from_value (iterated_value)); lit_magic_string_id_t lit_id = map_object_p->u.class_prop.class_id; ecma_collection_t *container_p = ECMA_GET_INTERNAL_VALUE_POINTER (ecma_collection_t, map_object_p->u.class_prop.u.value); uint32_t entry_count = ECMA_CONTAINER_ENTRY_COUNT (container_p); uint32_t index = ecma_op_iterator_get_index (obj_p); if (index == entry_count) { ext_obj_p->u.pseudo_array.u2.iterated_value = ECMA_VALUE_EMPTY; return ecma_create_iter_result_object (ECMA_VALUE_UNDEFINED, ECMA_VALUE_TRUE); } uint8_t entry_size = ecma_op_container_entry_size (lit_id); uint8_t iterator_kind = ext_obj_p->u.pseudo_array.extra_info; ecma_value_t *start_p = ECMA_CONTAINER_START (container_p); ecma_value_t ret_value = ECMA_VALUE_UNDEFINED; for (uint32_t i = index; i < entry_count; i += entry_size) { ecma_value_t *entry_p = start_p + i; if (ecma_is_value_empty (*entry_p)) { if (i == (entry_count - entry_size)) { ret_value = ecma_create_iter_result_object (ECMA_VALUE_UNDEFINED, ECMA_VALUE_TRUE); break; } continue; } ecma_op_iterator_set_index (obj_p, i + entry_size); ecma_value_t key_arg = *entry_p; ecma_value_t value_arg = ecma_op_container_get_value (entry_p, lit_id); if (iterator_kind == ECMA_ITERATOR_KEYS) { ret_value = ecma_create_iter_result_object (key_arg, ECMA_VALUE_FALSE); } else if (iterator_kind == ECMA_ITERATOR_VALUES) { ret_value = ecma_create_iter_result_object (value_arg, ECMA_VALUE_FALSE); } else { JERRY_ASSERT (iterator_kind == ECMA_ITERATOR_KEYS_VALUES); ecma_value_t entry_array_value; entry_array_value = ecma_create_array_from_iter_element (value_arg, key_arg); ret_value = ecma_create_iter_result_object (entry_array_value, ECMA_VALUE_FALSE); ecma_free_value (entry_array_value); } break; } return ret_value; } /* ecma_op_container_iterator_next */ #endif /* ENABLED (JERRY_ES2015) */ /** * @} * @} */ #endif /* ENABLED (JERRY_ES2015_BUILTIN_CONTAINER) */
ecma_op_internal_buffer_append (ecma_collection_t *container_p, /**< internal container pointer */ ecma_value_t key_arg, /**< key argument */ ecma_value_t value_arg, /**< value argument */ lit_magic_string_id_t lit_id) /**< class id */ { JERRY_ASSERT (container_p != NULL); ecma_collection_push_back (container_p, ecma_copy_value_if_not_object (key_arg)); if (lit_id == LIT_MAGIC_STRING_WEAKMAP_UL || lit_id == LIT_MAGIC_STRING_MAP_UL) { ecma_collection_push_back (container_p, ecma_copy_value_if_not_object (value_arg)); } ECMA_CONTAINER_SET_SIZE (container_p, ECMA_CONTAINER_GET_SIZE (container_p) + 1); } /* ecma_op_internal_buffer_append */
ecma_op_internal_buffer_append (ecma_collection_t *container_p, /**< internal container pointer */ ecma_value_t key_arg, /**< key argument */ ecma_value_t value_arg, /**< value argument */ lit_magic_string_id_t lit_id) /**< class id */ { JERRY_ASSERT (container_p != NULL); if (lit_id == LIT_MAGIC_STRING_WEAKMAP_UL || lit_id == LIT_MAGIC_STRING_MAP_UL) { ecma_value_t values[] = { ecma_copy_value_if_not_object (key_arg), ecma_copy_value_if_not_object (value_arg) }; ecma_collection_append (container_p, values, 2); } else { ecma_collection_push_back (container_p, ecma_copy_value_if_not_object (key_arg)); } ECMA_CONTAINER_SET_SIZE (container_p, ECMA_CONTAINER_GET_SIZE (container_p) + 1); } /* ecma_op_internal_buffer_append */
{'added': [(69, ' ecma_value_t values[] = { ecma_copy_value_if_not_object (key_arg), ecma_copy_value_if_not_object (value_arg) };'), (70, ' ecma_collection_append (container_p, values, 2);'), (71, ' }'), (72, ' else'), (73, ' {'), (74, ' ecma_collection_push_back (container_p, ecma_copy_value_if_not_object (key_arg));')], 'deleted': [(67, ' ecma_collection_push_back (container_p, ecma_copy_value_if_not_object (key_arg));'), (68, ''), (71, ' ecma_collection_push_back (container_p, ecma_copy_value_if_not_object (value_arg));')]}
6
3
787
3,834
https://github.com/jerryscript-project/jerryscript
CVE-2020-14163
['CWE-125']
nf_nat_redirect.c
nf_nat_redirect_ipv4
/* * (C) 1999-2001 Paul `Rusty' Russell * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org> * Copyright (c) 2011 Patrick McHardy <kaber@trash.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Based on Rusty Russell's IPv4 REDIRECT target. Development of IPv6 * NAT funded by Astaro. */ #include <linux/if.h> #include <linux/inetdevice.h> #include <linux/ip.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/netfilter.h> #include <linux/types.h> #include <linux/netfilter_ipv4.h> #include <linux/netfilter_ipv6.h> #include <linux/netfilter/x_tables.h> #include <net/addrconf.h> #include <net/checksum.h> #include <net/protocol.h> #include <net/netfilter/nf_nat.h> #include <net/netfilter/nf_nat_redirect.h> unsigned int nf_nat_redirect_ipv4(struct sk_buff *skb, const struct nf_nat_ipv4_multi_range_compat *mr, unsigned int hooknum) { struct nf_conn *ct; enum ip_conntrack_info ctinfo; __be32 newdst; struct nf_nat_range newrange; NF_CT_ASSERT(hooknum == NF_INET_PRE_ROUTING || hooknum == NF_INET_LOCAL_OUT); ct = nf_ct_get(skb, &ctinfo); NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED)); /* Local packets: make them go to loopback */ if (hooknum == NF_INET_LOCAL_OUT) { newdst = htonl(0x7F000001); } else { struct in_device *indev; struct in_ifaddr *ifa; newdst = 0; rcu_read_lock(); indev = __in_dev_get_rcu(skb->dev); if (indev != NULL) { ifa = indev->ifa_list; newdst = ifa->ifa_local; } rcu_read_unlock(); if (!newdst) return NF_DROP; } /* Transfer from original range. */ memset(&newrange.min_addr, 0, sizeof(newrange.min_addr)); memset(&newrange.max_addr, 0, sizeof(newrange.max_addr)); newrange.flags = mr->range[0].flags | NF_NAT_RANGE_MAP_IPS; newrange.min_addr.ip = newdst; newrange.max_addr.ip = newdst; newrange.min_proto = mr->range[0].min; newrange.max_proto = mr->range[0].max; /* Hand modified range to generic setup. */ return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_DST); } EXPORT_SYMBOL_GPL(nf_nat_redirect_ipv4); static const struct in6_addr loopback_addr = IN6ADDR_LOOPBACK_INIT; unsigned int nf_nat_redirect_ipv6(struct sk_buff *skb, const struct nf_nat_range *range, unsigned int hooknum) { struct nf_nat_range newrange; struct in6_addr newdst; enum ip_conntrack_info ctinfo; struct nf_conn *ct; ct = nf_ct_get(skb, &ctinfo); if (hooknum == NF_INET_LOCAL_OUT) { newdst = loopback_addr; } else { struct inet6_dev *idev; struct inet6_ifaddr *ifa; bool addr = false; rcu_read_lock(); idev = __in6_dev_get(skb->dev); if (idev != NULL) { list_for_each_entry(ifa, &idev->addr_list, if_list) { newdst = ifa->addr; addr = true; break; } } rcu_read_unlock(); if (!addr) return NF_DROP; } newrange.flags = range->flags | NF_NAT_RANGE_MAP_IPS; newrange.min_addr.in6 = newdst; newrange.max_addr.in6 = newdst; newrange.min_proto = range->min_proto; newrange.max_proto = range->max_proto; return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_DST); } EXPORT_SYMBOL_GPL(nf_nat_redirect_ipv6); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
/* * (C) 1999-2001 Paul `Rusty' Russell * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org> * Copyright (c) 2011 Patrick McHardy <kaber@trash.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Based on Rusty Russell's IPv4 REDIRECT target. Development of IPv6 * NAT funded by Astaro. */ #include <linux/if.h> #include <linux/inetdevice.h> #include <linux/ip.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/netfilter.h> #include <linux/types.h> #include <linux/netfilter_ipv4.h> #include <linux/netfilter_ipv6.h> #include <linux/netfilter/x_tables.h> #include <net/addrconf.h> #include <net/checksum.h> #include <net/protocol.h> #include <net/netfilter/nf_nat.h> #include <net/netfilter/nf_nat_redirect.h> unsigned int nf_nat_redirect_ipv4(struct sk_buff *skb, const struct nf_nat_ipv4_multi_range_compat *mr, unsigned int hooknum) { struct nf_conn *ct; enum ip_conntrack_info ctinfo; __be32 newdst; struct nf_nat_range newrange; NF_CT_ASSERT(hooknum == NF_INET_PRE_ROUTING || hooknum == NF_INET_LOCAL_OUT); ct = nf_ct_get(skb, &ctinfo); NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED)); /* Local packets: make them go to loopback */ if (hooknum == NF_INET_LOCAL_OUT) { newdst = htonl(0x7F000001); } else { struct in_device *indev; struct in_ifaddr *ifa; newdst = 0; rcu_read_lock(); indev = __in_dev_get_rcu(skb->dev); if (indev && indev->ifa_list) { ifa = indev->ifa_list; newdst = ifa->ifa_local; } rcu_read_unlock(); if (!newdst) return NF_DROP; } /* Transfer from original range. */ memset(&newrange.min_addr, 0, sizeof(newrange.min_addr)); memset(&newrange.max_addr, 0, sizeof(newrange.max_addr)); newrange.flags = mr->range[0].flags | NF_NAT_RANGE_MAP_IPS; newrange.min_addr.ip = newdst; newrange.max_addr.ip = newdst; newrange.min_proto = mr->range[0].min; newrange.max_proto = mr->range[0].max; /* Hand modified range to generic setup. */ return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_DST); } EXPORT_SYMBOL_GPL(nf_nat_redirect_ipv4); static const struct in6_addr loopback_addr = IN6ADDR_LOOPBACK_INIT; unsigned int nf_nat_redirect_ipv6(struct sk_buff *skb, const struct nf_nat_range *range, unsigned int hooknum) { struct nf_nat_range newrange; struct in6_addr newdst; enum ip_conntrack_info ctinfo; struct nf_conn *ct; ct = nf_ct_get(skb, &ctinfo); if (hooknum == NF_INET_LOCAL_OUT) { newdst = loopback_addr; } else { struct inet6_dev *idev; struct inet6_ifaddr *ifa; bool addr = false; rcu_read_lock(); idev = __in6_dev_get(skb->dev); if (idev != NULL) { list_for_each_entry(ifa, &idev->addr_list, if_list) { newdst = ifa->addr; addr = true; break; } } rcu_read_unlock(); if (!addr) return NF_DROP; } newrange.flags = range->flags | NF_NAT_RANGE_MAP_IPS; newrange.min_addr.in6 = newdst; newrange.max_addr.in6 = newdst; newrange.min_proto = range->min_proto; newrange.max_proto = range->max_proto; return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_DST); } EXPORT_SYMBOL_GPL(nf_nat_redirect_ipv6); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
nf_nat_redirect_ipv4(struct sk_buff *skb, const struct nf_nat_ipv4_multi_range_compat *mr, unsigned int hooknum) { struct nf_conn *ct; enum ip_conntrack_info ctinfo; __be32 newdst; struct nf_nat_range newrange; NF_CT_ASSERT(hooknum == NF_INET_PRE_ROUTING || hooknum == NF_INET_LOCAL_OUT); ct = nf_ct_get(skb, &ctinfo); NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED)); /* Local packets: make them go to loopback */ if (hooknum == NF_INET_LOCAL_OUT) { newdst = htonl(0x7F000001); } else { struct in_device *indev; struct in_ifaddr *ifa; newdst = 0; rcu_read_lock(); indev = __in_dev_get_rcu(skb->dev); if (indev != NULL) { ifa = indev->ifa_list; newdst = ifa->ifa_local; } rcu_read_unlock(); if (!newdst) return NF_DROP; } /* Transfer from original range. */ memset(&newrange.min_addr, 0, sizeof(newrange.min_addr)); memset(&newrange.max_addr, 0, sizeof(newrange.max_addr)); newrange.flags = mr->range[0].flags | NF_NAT_RANGE_MAP_IPS; newrange.min_addr.ip = newdst; newrange.max_addr.ip = newdst; newrange.min_proto = mr->range[0].min; newrange.max_proto = mr->range[0].max; /* Hand modified range to generic setup. */ return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_DST); }
nf_nat_redirect_ipv4(struct sk_buff *skb, const struct nf_nat_ipv4_multi_range_compat *mr, unsigned int hooknum) { struct nf_conn *ct; enum ip_conntrack_info ctinfo; __be32 newdst; struct nf_nat_range newrange; NF_CT_ASSERT(hooknum == NF_INET_PRE_ROUTING || hooknum == NF_INET_LOCAL_OUT); ct = nf_ct_get(skb, &ctinfo); NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED)); /* Local packets: make them go to loopback */ if (hooknum == NF_INET_LOCAL_OUT) { newdst = htonl(0x7F000001); } else { struct in_device *indev; struct in_ifaddr *ifa; newdst = 0; rcu_read_lock(); indev = __in_dev_get_rcu(skb->dev); if (indev && indev->ifa_list) { ifa = indev->ifa_list; newdst = ifa->ifa_local; } rcu_read_unlock(); if (!newdst) return NF_DROP; } /* Transfer from original range. */ memset(&newrange.min_addr, 0, sizeof(newrange.min_addr)); memset(&newrange.max_addr, 0, sizeof(newrange.max_addr)); newrange.flags = mr->range[0].flags | NF_NAT_RANGE_MAP_IPS; newrange.min_addr.ip = newdst; newrange.max_addr.ip = newdst; newrange.min_proto = mr->range[0].min; newrange.max_proto = mr->range[0].max; /* Hand modified range to generic setup. */ return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_DST); }
{'added': [(58, '\t\tif (indev && indev->ifa_list) {')], 'deleted': [(58, '\t\tif (indev != NULL) {')]}
1
1
94
503
https://github.com/torvalds/linux
CVE-2015-8787
['CWE-476']
fiber.c
fiber_switch
#include <mruby.h> #include <mruby/array.h> #include <mruby/class.h> #include <mruby/proc.h> #define fiber_ptr(o) ((struct RFiber*)mrb_ptr(o)) #define FIBER_STACK_INIT_SIZE 64 #define FIBER_CI_INIT_SIZE 8 #define CI_ACC_RESUMED -3 /* * call-seq: * Fiber.new{...} -> obj * * Creates a fiber, whose execution is suspend until it is explicitly * resumed using <code>Fiber#resume</code> method. * The code running inside the fiber can give up control by calling * <code>Fiber.yield</code> in which case it yields control back to caller * (the caller of the <code>Fiber#resume</code>). * * Upon yielding or termination the Fiber returns the value of the last * executed expression * * For instance: * * fiber = Fiber.new do * Fiber.yield 1 * 2 * end * * puts fiber.resume * puts fiber.resume * puts fiber.resume * * <em>produces</em> * * 1 * 2 * resuming dead fiber (FiberError) * * The <code>Fiber#resume</code> method accepts an arbitrary number of * parameters, if it is the first call to <code>resume</code> then they * will be passed as block arguments. Otherwise they will be the return * value of the call to <code>Fiber.yield</code> * * Example: * * fiber = Fiber.new do |first| * second = Fiber.yield first + 2 * end * * puts fiber.resume 10 * puts fiber.resume 14 * puts fiber.resume 18 * * <em>produces</em> * * 12 * 14 * resuming dead fiber (FiberError) * */ static mrb_value fiber_init(mrb_state *mrb, mrb_value self) { static const struct mrb_context mrb_context_zero = { 0 }; struct RFiber *f = fiber_ptr(self); struct mrb_context *c; struct RProc *p; mrb_callinfo *ci; mrb_value blk; size_t slen; mrb_get_args(mrb, "&", &blk); if (f->cxt) { mrb_raise(mrb, E_RUNTIME_ERROR, "cannot initialize twice"); } if (mrb_nil_p(blk)) { mrb_raise(mrb, E_ARGUMENT_ERROR, "tried to create Fiber object without a block"); } p = mrb_proc_ptr(blk); if (MRB_PROC_CFUNC_P(p)) { mrb_raise(mrb, E_FIBER_ERROR, "tried to create Fiber from C defined method"); } c = (struct mrb_context*)mrb_malloc(mrb, sizeof(struct mrb_context)); *c = mrb_context_zero; f->cxt = c; /* initialize VM stack */ slen = FIBER_STACK_INIT_SIZE; if (p->body.irep->nregs > slen) { slen += p->body.irep->nregs; } c->stbase = (mrb_value *)mrb_malloc(mrb, slen*sizeof(mrb_value)); c->stend = c->stbase + slen; c->stack = c->stbase; #ifdef MRB_NAN_BOXING { mrb_value *p = c->stbase; mrb_value *pend = c->stend; while (p < pend) { SET_NIL_VALUE(*p); p++; } } #else memset(c->stbase, 0, slen * sizeof(mrb_value)); #endif /* copy receiver from a block */ c->stack[0] = mrb->c->stack[0]; /* initialize callinfo stack */ c->cibase = (mrb_callinfo *)mrb_calloc(mrb, FIBER_CI_INIT_SIZE, sizeof(mrb_callinfo)); c->ciend = c->cibase + FIBER_CI_INIT_SIZE; c->ci = c->cibase; c->ci->stackent = c->stack; /* adjust return callinfo */ ci = c->ci; ci->target_class = MRB_PROC_TARGET_CLASS(p); ci->proc = p; mrb_field_write_barrier(mrb, (struct RBasic*)mrb_obj_ptr(self), (struct RBasic*)p); ci->pc = p->body.irep->iseq; ci->nregs = p->body.irep->nregs; ci[1] = ci[0]; c->ci++; /* push dummy callinfo */ c->fib = f; c->status = MRB_FIBER_CREATED; return self; } static struct mrb_context* fiber_check(mrb_state *mrb, mrb_value fib) { struct RFiber *f = fiber_ptr(fib); mrb_assert(f->tt == MRB_TT_FIBER); if (!f->cxt) { mrb_raise(mrb, E_FIBER_ERROR, "uninitialized Fiber"); } return f->cxt; } static mrb_value fiber_result(mrb_state *mrb, const mrb_value *a, mrb_int len) { if (len == 0) return mrb_nil_value(); if (len == 1) return a[0]; return mrb_ary_new_from_values(mrb, len, a); } /* mark return from context modifying method */ #define MARK_CONTEXT_MODIFY(c) (c)->ci->target_class = NULL static void fiber_check_cfunc(mrb_state *mrb, struct mrb_context *c) { mrb_callinfo *ci; for (ci = c->ci; ci >= c->cibase; ci--) { if (ci->acc < 0) { mrb_raise(mrb, E_FIBER_ERROR, "can't cross C function boundary"); } } } static void fiber_switch_context(mrb_state *mrb, struct mrb_context *c) { c->status = MRB_FIBER_RUNNING; mrb->c = c; } static mrb_value fiber_switch(mrb_state *mrb, mrb_value self, mrb_int len, const mrb_value *a, mrb_bool resume, mrb_bool vmexec) { struct mrb_context *c = fiber_check(mrb, self); struct mrb_context *old_c = mrb->c; mrb_value value; fiber_check_cfunc(mrb, c); if (resume && c->status == MRB_FIBER_TRANSFERRED) { mrb_raise(mrb, E_FIBER_ERROR, "resuming transferred fiber"); } if (c->status == MRB_FIBER_RUNNING || c->status == MRB_FIBER_RESUMED) { mrb_raise(mrb, E_FIBER_ERROR, "double resume (fib)"); } if (c->status == MRB_FIBER_TERMINATED) { mrb_raise(mrb, E_FIBER_ERROR, "resuming dead fiber"); } mrb->c->status = resume ? MRB_FIBER_RESUMED : MRB_FIBER_TRANSFERRED; c->prev = resume ? mrb->c : (c->prev ? c->prev : mrb->root_c); if (c->status == MRB_FIBER_CREATED) { mrb_value *b, *e; if (len >= c->stend - c->stack) { mrb_raise(mrb, E_FIBER_ERROR, "too many arguments to fiber"); } b = c->stack+1; e = b + len; while (b<e) { *b++ = *a++; } c->cibase->argc = (int)len; value = c->stack[0] = MRB_PROC_ENV(c->ci->proc)->stack[0]; } else { value = fiber_result(mrb, a, len); } fiber_switch_context(mrb, c); if (vmexec) { c->vmexec = TRUE; value = mrb_vm_exec(mrb, c->ci[-1].proc, c->ci->pc); mrb->c = old_c; } else { MARK_CONTEXT_MODIFY(c); } return value; } /* * call-seq: * fiber.resume(args, ...) -> obj * * Resumes the fiber from the point at which the last <code>Fiber.yield</code> * was called, or starts running it if it is the first call to * <code>resume</code>. Arguments passed to resume will be the value of * the <code>Fiber.yield</code> expression or will be passed as block * parameters to the fiber's block if this is the first <code>resume</code>. * * Alternatively, when resume is called it evaluates to the arguments passed * to the next <code>Fiber.yield</code> statement inside the fiber's block * or to the block value if it runs to completion without any * <code>Fiber.yield</code> */ static mrb_value fiber_resume(mrb_state *mrb, mrb_value self) { mrb_value *a; mrb_int len; mrb_bool vmexec = FALSE; mrb_get_args(mrb, "*!", &a, &len); if (mrb->c->ci->acc < 0) { vmexec = TRUE; } return fiber_switch(mrb, self, len, a, TRUE, vmexec); } /* resume thread with given arguments */ MRB_API mrb_value mrb_fiber_resume(mrb_state *mrb, mrb_value fib, mrb_int len, const mrb_value *a) { return fiber_switch(mrb, fib, len, a, TRUE, TRUE); } /* * call-seq: * fiber.alive? -> true or false * * Returns true if the fiber can still be resumed. After finishing * execution of the fiber block this method will always return false. */ MRB_API mrb_value mrb_fiber_alive_p(mrb_state *mrb, mrb_value self) { struct mrb_context *c = fiber_check(mrb, self); return mrb_bool_value(c->status != MRB_FIBER_TERMINATED); } #define fiber_alive_p mrb_fiber_alive_p static mrb_value fiber_eq(mrb_state *mrb, mrb_value self) { mrb_value other; mrb_get_args(mrb, "o", &other); if (mrb_type(other) != MRB_TT_FIBER) { return mrb_false_value(); } return mrb_bool_value(fiber_ptr(self) == fiber_ptr(other)); } /* * call-seq: * fiber.transfer(args, ...) -> obj * * Transfers control to receiver fiber of the method call. * Unlike <code>resume</code> the receiver wouldn't be pushed to call * stack of fibers. Instead it will switch to the call stack of * transferring fiber. * When resuming a fiber that was transferred to another fiber it would * cause double resume error. Though when the fiber is re-transferred * and <code>Fiber.yield</code> is called, the fiber would be resumable. */ static mrb_value fiber_transfer(mrb_state *mrb, mrb_value self) { struct mrb_context *c = fiber_check(mrb, self); mrb_value* a; mrb_int len; fiber_check_cfunc(mrb, mrb->c); mrb_get_args(mrb, "*!", &a, &len); if (c == mrb->root_c) { mrb->c->status = MRB_FIBER_TRANSFERRED; fiber_switch_context(mrb, c); MARK_CONTEXT_MODIFY(c); return fiber_result(mrb, a, len); } if (c == mrb->c) { return fiber_result(mrb, a, len); } return fiber_switch(mrb, self, len, a, FALSE, FALSE); } /* yield values to the caller fiber */ /* mrb_fiber_yield() must be called as `return mrb_fiber_yield(...)` */ MRB_API mrb_value mrb_fiber_yield(mrb_state *mrb, mrb_int len, const mrb_value *a) { struct mrb_context *c = mrb->c; if (!c->prev) { mrb_raise(mrb, E_FIBER_ERROR, "can't yield from root fiber"); } fiber_check_cfunc(mrb, c); c->prev->status = MRB_FIBER_RUNNING; c->status = MRB_FIBER_SUSPENDED; fiber_switch_context(mrb, c->prev); c->prev = NULL; if (c->vmexec) { c->vmexec = FALSE; mrb->c->ci->acc = CI_ACC_RESUMED; } MARK_CONTEXT_MODIFY(mrb->c); return fiber_result(mrb, a, len); } /* * call-seq: * Fiber.yield(args, ...) -> obj * * Yields control back to the context that resumed the fiber, passing * along any arguments that were passed to it. The fiber will resume * processing at this point when <code>resume</code> is called next. * Any arguments passed to the next <code>resume</code> will be the * * mruby limitation: Fiber resume/yield cannot cross C function boundary. * thus you cannot yield from #initialize which is called by mrb_funcall(). */ static mrb_value fiber_yield(mrb_state *mrb, mrb_value self) { mrb_value *a; mrb_int len; mrb_get_args(mrb, "*!", &a, &len); return mrb_fiber_yield(mrb, len, a); } /* * call-seq: * Fiber.current() -> fiber * * Returns the current fiber. If you are not running in the context of * a fiber this method will return the root fiber. */ static mrb_value fiber_current(mrb_state *mrb, mrb_value self) { if (!mrb->c->fib) { struct RFiber *f = (struct RFiber*)mrb_obj_alloc(mrb, MRB_TT_FIBER, mrb_class_ptr(self)); f->cxt = mrb->c; mrb->c->fib = f; } return mrb_obj_value(mrb->c->fib); } void mrb_mruby_fiber_gem_init(mrb_state* mrb) { struct RClass *c; c = mrb_define_class(mrb, "Fiber", mrb->object_class); MRB_SET_INSTANCE_TT(c, MRB_TT_FIBER); mrb_define_method(mrb, c, "initialize", fiber_init, MRB_ARGS_NONE()); mrb_define_method(mrb, c, "resume", fiber_resume, MRB_ARGS_ANY()); mrb_define_method(mrb, c, "transfer", fiber_transfer, MRB_ARGS_ANY()); mrb_define_method(mrb, c, "alive?", fiber_alive_p, MRB_ARGS_NONE()); mrb_define_method(mrb, c, "==", fiber_eq, MRB_ARGS_REQ(1)); mrb_define_class_method(mrb, c, "yield", fiber_yield, MRB_ARGS_ANY()); mrb_define_class_method(mrb, c, "current", fiber_current, MRB_ARGS_NONE()); mrb_define_class(mrb, "FiberError", mrb->eStandardError_class); } void mrb_mruby_fiber_gem_final(mrb_state* mrb) { }
#include <mruby.h> #include <mruby/array.h> #include <mruby/class.h> #include <mruby/proc.h> #define fiber_ptr(o) ((struct RFiber*)mrb_ptr(o)) #define FIBER_STACK_INIT_SIZE 64 #define FIBER_CI_INIT_SIZE 8 #define CI_ACC_RESUMED -3 /* * call-seq: * Fiber.new{...} -> obj * * Creates a fiber, whose execution is suspend until it is explicitly * resumed using <code>Fiber#resume</code> method. * The code running inside the fiber can give up control by calling * <code>Fiber.yield</code> in which case it yields control back to caller * (the caller of the <code>Fiber#resume</code>). * * Upon yielding or termination the Fiber returns the value of the last * executed expression * * For instance: * * fiber = Fiber.new do * Fiber.yield 1 * 2 * end * * puts fiber.resume * puts fiber.resume * puts fiber.resume * * <em>produces</em> * * 1 * 2 * resuming dead fiber (FiberError) * * The <code>Fiber#resume</code> method accepts an arbitrary number of * parameters, if it is the first call to <code>resume</code> then they * will be passed as block arguments. Otherwise they will be the return * value of the call to <code>Fiber.yield</code> * * Example: * * fiber = Fiber.new do |first| * second = Fiber.yield first + 2 * end * * puts fiber.resume 10 * puts fiber.resume 14 * puts fiber.resume 18 * * <em>produces</em> * * 12 * 14 * resuming dead fiber (FiberError) * */ static mrb_value fiber_init(mrb_state *mrb, mrb_value self) { static const struct mrb_context mrb_context_zero = { 0 }; struct RFiber *f = fiber_ptr(self); struct mrb_context *c; struct RProc *p; mrb_callinfo *ci; mrb_value blk; size_t slen; mrb_get_args(mrb, "&", &blk); if (f->cxt) { mrb_raise(mrb, E_RUNTIME_ERROR, "cannot initialize twice"); } if (mrb_nil_p(blk)) { mrb_raise(mrb, E_ARGUMENT_ERROR, "tried to create Fiber object without a block"); } p = mrb_proc_ptr(blk); if (MRB_PROC_CFUNC_P(p)) { mrb_raise(mrb, E_FIBER_ERROR, "tried to create Fiber from C defined method"); } c = (struct mrb_context*)mrb_malloc(mrb, sizeof(struct mrb_context)); *c = mrb_context_zero; f->cxt = c; /* initialize VM stack */ slen = FIBER_STACK_INIT_SIZE; if (p->body.irep->nregs > slen) { slen += p->body.irep->nregs; } c->stbase = (mrb_value *)mrb_malloc(mrb, slen*sizeof(mrb_value)); c->stend = c->stbase + slen; c->stack = c->stbase; #ifdef MRB_NAN_BOXING { mrb_value *p = c->stbase; mrb_value *pend = c->stend; while (p < pend) { SET_NIL_VALUE(*p); p++; } } #else memset(c->stbase, 0, slen * sizeof(mrb_value)); #endif /* copy receiver from a block */ c->stack[0] = mrb->c->stack[0]; /* initialize callinfo stack */ c->cibase = (mrb_callinfo *)mrb_calloc(mrb, FIBER_CI_INIT_SIZE, sizeof(mrb_callinfo)); c->ciend = c->cibase + FIBER_CI_INIT_SIZE; c->ci = c->cibase; c->ci->stackent = c->stack; /* adjust return callinfo */ ci = c->ci; ci->target_class = MRB_PROC_TARGET_CLASS(p); ci->proc = p; mrb_field_write_barrier(mrb, (struct RBasic*)mrb_obj_ptr(self), (struct RBasic*)p); ci->pc = p->body.irep->iseq; ci->nregs = p->body.irep->nregs; ci[1] = ci[0]; c->ci++; /* push dummy callinfo */ c->fib = f; c->status = MRB_FIBER_CREATED; return self; } static struct mrb_context* fiber_check(mrb_state *mrb, mrb_value fib) { struct RFiber *f = fiber_ptr(fib); mrb_assert(f->tt == MRB_TT_FIBER); if (!f->cxt) { mrb_raise(mrb, E_FIBER_ERROR, "uninitialized Fiber"); } return f->cxt; } static mrb_value fiber_result(mrb_state *mrb, const mrb_value *a, mrb_int len) { if (len == 0) return mrb_nil_value(); if (len == 1) return a[0]; return mrb_ary_new_from_values(mrb, len, a); } /* mark return from context modifying method */ #define MARK_CONTEXT_MODIFY(c) (c)->ci->target_class = NULL static void fiber_check_cfunc(mrb_state *mrb, struct mrb_context *c) { mrb_callinfo *ci; for (ci = c->ci; ci >= c->cibase; ci--) { if (ci->acc < 0) { mrb_raise(mrb, E_FIBER_ERROR, "can't cross C function boundary"); } } } static void fiber_switch_context(mrb_state *mrb, struct mrb_context *c) { c->status = MRB_FIBER_RUNNING; mrb->c = c; } static mrb_value fiber_switch(mrb_state *mrb, mrb_value self, mrb_int len, const mrb_value *a, mrb_bool resume, mrb_bool vmexec) { struct mrb_context *c = fiber_check(mrb, self); struct mrb_context *old_c = mrb->c; enum mrb_fiber_state status; mrb_value value; fiber_check_cfunc(mrb, c); status = c->status; if (resume && status == MRB_FIBER_TRANSFERRED) { mrb_raise(mrb, E_FIBER_ERROR, "resuming transferred fiber"); } if (status == MRB_FIBER_RUNNING || status == MRB_FIBER_RESUMED) { mrb_raise(mrb, E_FIBER_ERROR, "double resume (fib)"); } if (status == MRB_FIBER_TERMINATED) { mrb_raise(mrb, E_FIBER_ERROR, "resuming dead fiber"); } old_c->status = resume ? MRB_FIBER_RESUMED : MRB_FIBER_TRANSFERRED; c->prev = resume ? mrb->c : (c->prev ? c->prev : mrb->root_c); fiber_switch_context(mrb, c); if (status == MRB_FIBER_CREATED) { mrb_value *b, *e; mrb_stack_extend(mrb, len+2); /* for receiver and (optional) block */ b = c->stack+1; e = b + len; while (b<e) { *b++ = *a++; } c->cibase->argc = (int)len; value = c->stack[0] = MRB_PROC_ENV(c->ci->proc)->stack[0]; } else { value = fiber_result(mrb, a, len); } if (vmexec) { c->vmexec = TRUE; value = mrb_vm_exec(mrb, c->ci[-1].proc, c->ci->pc); mrb->c = old_c; } else { MARK_CONTEXT_MODIFY(c); } return value; } /* * call-seq: * fiber.resume(args, ...) -> obj * * Resumes the fiber from the point at which the last <code>Fiber.yield</code> * was called, or starts running it if it is the first call to * <code>resume</code>. Arguments passed to resume will be the value of * the <code>Fiber.yield</code> expression or will be passed as block * parameters to the fiber's block if this is the first <code>resume</code>. * * Alternatively, when resume is called it evaluates to the arguments passed * to the next <code>Fiber.yield</code> statement inside the fiber's block * or to the block value if it runs to completion without any * <code>Fiber.yield</code> */ static mrb_value fiber_resume(mrb_state *mrb, mrb_value self) { mrb_value *a; mrb_int len; mrb_bool vmexec = FALSE; mrb_get_args(mrb, "*!", &a, &len); if (mrb->c->ci->acc < 0) { vmexec = TRUE; } return fiber_switch(mrb, self, len, a, TRUE, vmexec); } /* resume thread with given arguments */ MRB_API mrb_value mrb_fiber_resume(mrb_state *mrb, mrb_value fib, mrb_int len, const mrb_value *a) { return fiber_switch(mrb, fib, len, a, TRUE, TRUE); } /* * call-seq: * fiber.alive? -> true or false * * Returns true if the fiber can still be resumed. After finishing * execution of the fiber block this method will always return false. */ MRB_API mrb_value mrb_fiber_alive_p(mrb_state *mrb, mrb_value self) { struct mrb_context *c = fiber_check(mrb, self); return mrb_bool_value(c->status != MRB_FIBER_TERMINATED); } #define fiber_alive_p mrb_fiber_alive_p static mrb_value fiber_eq(mrb_state *mrb, mrb_value self) { mrb_value other; mrb_get_args(mrb, "o", &other); if (mrb_type(other) != MRB_TT_FIBER) { return mrb_false_value(); } return mrb_bool_value(fiber_ptr(self) == fiber_ptr(other)); } /* * call-seq: * fiber.transfer(args, ...) -> obj * * Transfers control to receiver fiber of the method call. * Unlike <code>resume</code> the receiver wouldn't be pushed to call * stack of fibers. Instead it will switch to the call stack of * transferring fiber. * When resuming a fiber that was transferred to another fiber it would * cause double resume error. Though when the fiber is re-transferred * and <code>Fiber.yield</code> is called, the fiber would be resumable. */ static mrb_value fiber_transfer(mrb_state *mrb, mrb_value self) { struct mrb_context *c = fiber_check(mrb, self); mrb_value* a; mrb_int len; fiber_check_cfunc(mrb, mrb->c); mrb_get_args(mrb, "*!", &a, &len); if (c == mrb->root_c) { mrb->c->status = MRB_FIBER_TRANSFERRED; fiber_switch_context(mrb, c); MARK_CONTEXT_MODIFY(c); return fiber_result(mrb, a, len); } if (c == mrb->c) { return fiber_result(mrb, a, len); } return fiber_switch(mrb, self, len, a, FALSE, FALSE); } /* yield values to the caller fiber */ /* mrb_fiber_yield() must be called as `return mrb_fiber_yield(...)` */ MRB_API mrb_value mrb_fiber_yield(mrb_state *mrb, mrb_int len, const mrb_value *a) { struct mrb_context *c = mrb->c; if (!c->prev) { mrb_raise(mrb, E_FIBER_ERROR, "can't yield from root fiber"); } fiber_check_cfunc(mrb, c); c->prev->status = MRB_FIBER_RUNNING; c->status = MRB_FIBER_SUSPENDED; fiber_switch_context(mrb, c->prev); c->prev = NULL; if (c->vmexec) { c->vmexec = FALSE; mrb->c->ci->acc = CI_ACC_RESUMED; } MARK_CONTEXT_MODIFY(mrb->c); return fiber_result(mrb, a, len); } /* * call-seq: * Fiber.yield(args, ...) -> obj * * Yields control back to the context that resumed the fiber, passing * along any arguments that were passed to it. The fiber will resume * processing at this point when <code>resume</code> is called next. * Any arguments passed to the next <code>resume</code> will be the * * mruby limitation: Fiber resume/yield cannot cross C function boundary. * thus you cannot yield from #initialize which is called by mrb_funcall(). */ static mrb_value fiber_yield(mrb_state *mrb, mrb_value self) { mrb_value *a; mrb_int len; mrb_get_args(mrb, "*!", &a, &len); return mrb_fiber_yield(mrb, len, a); } /* * call-seq: * Fiber.current() -> fiber * * Returns the current fiber. If you are not running in the context of * a fiber this method will return the root fiber. */ static mrb_value fiber_current(mrb_state *mrb, mrb_value self) { if (!mrb->c->fib) { struct RFiber *f = (struct RFiber*)mrb_obj_alloc(mrb, MRB_TT_FIBER, mrb_class_ptr(self)); f->cxt = mrb->c; mrb->c->fib = f; } return mrb_obj_value(mrb->c->fib); } void mrb_mruby_fiber_gem_init(mrb_state* mrb) { struct RClass *c; c = mrb_define_class(mrb, "Fiber", mrb->object_class); MRB_SET_INSTANCE_TT(c, MRB_TT_FIBER); mrb_define_method(mrb, c, "initialize", fiber_init, MRB_ARGS_NONE()); mrb_define_method(mrb, c, "resume", fiber_resume, MRB_ARGS_ANY()); mrb_define_method(mrb, c, "transfer", fiber_transfer, MRB_ARGS_ANY()); mrb_define_method(mrb, c, "alive?", fiber_alive_p, MRB_ARGS_NONE()); mrb_define_method(mrb, c, "==", fiber_eq, MRB_ARGS_REQ(1)); mrb_define_class_method(mrb, c, "yield", fiber_yield, MRB_ARGS_ANY()); mrb_define_class_method(mrb, c, "current", fiber_current, MRB_ARGS_NONE()); mrb_define_class(mrb, "FiberError", mrb->eStandardError_class); } void mrb_mruby_fiber_gem_final(mrb_state* mrb) { }
fiber_switch(mrb_state *mrb, mrb_value self, mrb_int len, const mrb_value *a, mrb_bool resume, mrb_bool vmexec) { struct mrb_context *c = fiber_check(mrb, self); struct mrb_context *old_c = mrb->c; mrb_value value; fiber_check_cfunc(mrb, c); if (resume && c->status == MRB_FIBER_TRANSFERRED) { mrb_raise(mrb, E_FIBER_ERROR, "resuming transferred fiber"); } if (c->status == MRB_FIBER_RUNNING || c->status == MRB_FIBER_RESUMED) { mrb_raise(mrb, E_FIBER_ERROR, "double resume (fib)"); } if (c->status == MRB_FIBER_TERMINATED) { mrb_raise(mrb, E_FIBER_ERROR, "resuming dead fiber"); } mrb->c->status = resume ? MRB_FIBER_RESUMED : MRB_FIBER_TRANSFERRED; c->prev = resume ? mrb->c : (c->prev ? c->prev : mrb->root_c); if (c->status == MRB_FIBER_CREATED) { mrb_value *b, *e; if (len >= c->stend - c->stack) { mrb_raise(mrb, E_FIBER_ERROR, "too many arguments to fiber"); } b = c->stack+1; e = b + len; while (b<e) { *b++ = *a++; } c->cibase->argc = (int)len; value = c->stack[0] = MRB_PROC_ENV(c->ci->proc)->stack[0]; } else { value = fiber_result(mrb, a, len); } fiber_switch_context(mrb, c); if (vmexec) { c->vmexec = TRUE; value = mrb_vm_exec(mrb, c->ci[-1].proc, c->ci->pc); mrb->c = old_c; } else { MARK_CONTEXT_MODIFY(c); } return value; }
fiber_switch(mrb_state *mrb, mrb_value self, mrb_int len, const mrb_value *a, mrb_bool resume, mrb_bool vmexec) { struct mrb_context *c = fiber_check(mrb, self); struct mrb_context *old_c = mrb->c; enum mrb_fiber_state status; mrb_value value; fiber_check_cfunc(mrb, c); status = c->status; if (resume && status == MRB_FIBER_TRANSFERRED) { mrb_raise(mrb, E_FIBER_ERROR, "resuming transferred fiber"); } if (status == MRB_FIBER_RUNNING || status == MRB_FIBER_RESUMED) { mrb_raise(mrb, E_FIBER_ERROR, "double resume (fib)"); } if (status == MRB_FIBER_TERMINATED) { mrb_raise(mrb, E_FIBER_ERROR, "resuming dead fiber"); } old_c->status = resume ? MRB_FIBER_RESUMED : MRB_FIBER_TRANSFERRED; c->prev = resume ? mrb->c : (c->prev ? c->prev : mrb->root_c); fiber_switch_context(mrb, c); if (status == MRB_FIBER_CREATED) { mrb_value *b, *e; mrb_stack_extend(mrb, len+2); /* for receiver and (optional) block */ b = c->stack+1; e = b + len; while (b<e) { *b++ = *a++; } c->cibase->argc = (int)len; value = c->stack[0] = MRB_PROC_ENV(c->ci->proc)->stack[0]; } else { value = fiber_result(mrb, a, len); } if (vmexec) { c->vmexec = TRUE; value = mrb_vm_exec(mrb, c->ci[-1].proc, c->ci->pc); mrb->c = old_c; } else { MARK_CONTEXT_MODIFY(c); } return value; }
{'added': [(187, ' enum mrb_fiber_state status;'), (191, ' status = c->status;'), (192, ' if (resume && status == MRB_FIBER_TRANSFERRED) {'), (195, ' if (status == MRB_FIBER_RUNNING || status == MRB_FIBER_RESUMED) {'), (198, ' if (status == MRB_FIBER_TERMINATED) {'), (201, ' old_c->status = resume ? MRB_FIBER_RESUMED : MRB_FIBER_TRANSFERRED;'), (203, ' fiber_switch_context(mrb, c);'), (204, ' if (status == MRB_FIBER_CREATED) {'), (207, ' mrb_stack_extend(mrb, len+2); /* for receiver and (optional) block */')], 'deleted': [(190, ' if (resume && c->status == MRB_FIBER_TRANSFERRED) {'), (193, ' if (c->status == MRB_FIBER_RUNNING || c->status == MRB_FIBER_RESUMED) {'), (196, ' if (c->status == MRB_FIBER_TERMINATED) {'), (199, ' mrb->c->status = resume ? MRB_FIBER_RESUMED : MRB_FIBER_TRANSFERRED;'), (201, ' if (c->status == MRB_FIBER_CREATED) {'), (204, ' if (len >= c->stend - c->stack) {'), (205, ' mrb_raise(mrb, E_FIBER_ERROR, "too many arguments to fiber");'), (206, ' }'), (218, ' fiber_switch_context(mrb, c);')]}
9
9
247
1,677
https://github.com/mruby/mruby
CVE-2018-12248
['CWE-125']
map_engine.c
layer_resize
/** * miniSphere JavaScript game engine * Copyright (c) 2015-2018, Fat Cerberus * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * * Neither the name of miniSphere nor the names of its contributors may be * used to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. **/ #include "minisphere.h" #include "map_engine.h" #include "api.h" #include "audio.h" #include "color.h" #include "dispatch.h" #include "geometry.h" #include "image.h" #include "input.h" #include "jsal.h" #include "obstruction.h" #include "script.h" #include "spriteset.h" #include "tileset.h" #include "vanilla.h" #include "vector.h" static const person_t* s_acting_person; static mixer_t* s_bgm_mixer = NULL; static person_t* s_camera_person = NULL; static int s_camera_x = 0; static int s_camera_y = 0; static color_t s_color_mask; static const person_t* s_current_person = NULL; static int s_current_trigger = -1; static int s_current_zone = -1; static script_t* s_def_map_scripts[MAP_SCRIPT_MAX]; static script_t* s_def_person_scripts[PERSON_SCRIPT_MAX]; static bool s_exiting = false; static color_t s_fade_color_from; static color_t s_fade_color_to; static int s_fade_frames; static int s_fade_progress; static int s_frame_rate = 0; static unsigned int s_frames = 0; static bool s_is_map_running = false; static lstring_t* s_last_bgm_file = NULL; static struct map* s_map = NULL; static sound_t* s_map_bgm_stream = NULL; static char* s_map_filename = NULL; static int s_max_deferreds = 0; static int s_max_persons = 0; static unsigned int s_next_person_id = 0; static int s_num_deferreds = 0; static int s_num_persons = 0; static struct map_trigger* s_on_trigger = NULL; static unsigned int s_queued_id = 0; static vector_t* s_person_list = NULL; static struct player* s_players; static script_t* s_render_script = NULL; static int s_talk_button = 0; static int s_talk_distance = 8; static script_t* s_update_script = NULL; static struct deferred *s_deferreds = NULL; static person_t* *s_persons = NULL; struct deferred { script_t* script; int frames_left; }; struct map { int width, height; bool is_repeating; point3_t origin; lstring_t* bgm_file; script_t* scripts[MAP_SCRIPT_MAX]; tileset_t* tileset; vector_t* triggers; vector_t* zones; int num_layers; int num_persons; struct map_layer *layers; struct map_person *persons; }; struct map_layer { lstring_t* name; bool is_parallax; bool is_reflective; bool is_visible; float autoscroll_x; float autoscroll_y; color_t color_mask; int height; obsmap_t* obsmap; float parallax_x; float parallax_y; script_t* render_script; struct map_tile* tilemap; int width; }; struct map_person { lstring_t* name; lstring_t* spriteset; int x, y, z; lstring_t* create_script; lstring_t* destroy_script; lstring_t* command_script; lstring_t* talk_script; lstring_t* touch_script; }; struct map_tile { int tile_index; int frames_left; }; struct map_trigger { script_t* script; int x, y, z; }; struct map_zone { bool is_active; rect_t bounds; int interval; int steps_left; int layer; script_t* script; }; struct person { unsigned int id; char* name; int anim_frames; char* direction; int follow_distance; int frame; bool ignore_all_persons; bool ignore_all_tiles; vector_t* ignore_list; bool is_persistent; bool is_visible; int layer; person_t* leader; color_t mask; int mv_x, mv_y; int revert_delay; int revert_frames; double scale_x; double scale_y; script_t* scripts[PERSON_SCRIPT_MAX]; double speed_x, speed_y; spriteset_t* sprite; double theta; double x, y; int x_offset, y_offset; int max_commands; int max_history; int num_commands; int num_ignores; struct command *commands; char* *ignores; struct step *steps; }; struct step { double x, y; }; struct command { int type; bool is_immediate; script_t* script; }; struct player { bool is_talk_allowed; person_t* person; int talk_key; }; #pragma pack(push, 1) struct rmp_header { char signature[4]; int16_t version; uint8_t type; int8_t num_layers; uint8_t reserved_1; int16_t num_entities; int16_t start_x; int16_t start_y; int8_t start_layer; int8_t start_direction; int16_t num_strings; int16_t num_zones; uint8_t repeat_map; uint8_t reserved[234]; }; struct rmp_entity_header { uint16_t x; uint16_t y; uint16_t z; uint16_t type; uint8_t reserved[8]; }; struct rmp_layer_header { int16_t width; int16_t height; uint16_t flags; float parallax_x; float parallax_y; float scrolling_x; float scrolling_y; int32_t num_segments; uint8_t is_reflective; uint8_t reserved[3]; }; struct rmp_zone_header { uint16_t x1; uint16_t y1; uint16_t x2; uint16_t y2; uint16_t layer; uint16_t interval; uint8_t reserved[4]; }; #pragma pack(pop) static bool change_map (const char* filename, bool preserve_persons); static void command_person (person_t* person, int command); static int compare_persons (const void* a, const void* b); static void detach_person (const person_t* person); static bool does_person_exist (const person_t* person); static void draw_persons (int layer, bool is_flipped, int cam_x, int cam_y); static bool enlarge_step_history (person_t* person, int new_size); static void free_map (struct map* map); static void free_person (person_t* person); static struct map_trigger* get_trigger_at (int x, int y, int layer, int* out_index); static struct map_zone* get_zone_at (int x, int y, int layer, int which, int* out_index); static struct map* load_map (const char* path); static void map_screen_to_layer (int layer, int camera_x, int camera_y, int* inout_x, int* inout_y); static void map_screen_to_map (int camera_x, int camera_y, int* inout_x, int* inout_y); static void process_map_input (void); static void record_step (person_t* person); static void reset_persons (bool keep_existing); static void set_person_name (person_t* person, const char* name); static void sort_persons (void); static void update_map_engine (bool is_main_loop); static void update_person (person_t* person, bool* out_has_moved); void map_engine_init(void) { int i; console_log(1, "initializing map engine subsystem"); audio_init(); s_bgm_mixer = mixer_new(44100, 16, 2); memset(s_def_map_scripts, 0, MAP_SCRIPT_MAX * sizeof(int)); memset(s_def_person_scripts, 0, PERSON_SCRIPT_MAX * sizeof(int)); s_map = NULL; s_map_filename = NULL; s_camera_person = NULL; s_players = calloc(PLAYER_MAX, sizeof(struct player)); for (i = 0; i < PLAYER_MAX; ++i) s_players[i].is_talk_allowed = true; s_current_trigger = -1; s_current_zone = -1; s_render_script = NULL; s_update_script = NULL; s_num_deferreds = s_max_deferreds = 0; s_deferreds = NULL; s_talk_button = 0; s_is_map_running = false; s_color_mask = mk_color(0, 0, 0, 0); s_on_trigger = NULL; s_num_persons = s_max_persons = 0; s_persons = NULL; s_talk_distance = 8; s_acting_person = NULL; s_current_person = NULL; } void map_engine_uninit(void) { int i; console_log(1, "shutting down map engine subsystem"); vector_free(s_person_list); for (i = 0; i < s_num_deferreds; ++i) script_unref(s_deferreds[i].script); free(s_deferreds); for (i = 0; i < MAP_SCRIPT_MAX; ++i) script_unref(s_def_map_scripts[i]); script_unref(s_update_script); script_unref(s_render_script); free_map(s_map); free(s_players); for (i = 0; i < s_num_persons; ++i) free_person(s_persons[i]); for (i = 0; i < PERSON_SCRIPT_MAX; ++i) script_unref(s_def_person_scripts[i]); free(s_persons); mixer_unref(s_bgm_mixer); audio_uninit(); } void map_engine_on_map_event(map_op_t op, script_t* script) { script_t* old_script; old_script = s_def_map_scripts[op]; s_def_map_scripts[op] = script_ref(script); script_unref(old_script); } void map_engine_on_person_event(person_op_t op, script_t* script) { script_t* old_script; old_script = s_def_person_scripts[op]; s_def_person_scripts[op] = script_ref(script); script_unref(old_script); } void map_engine_on_render(script_t* script) { script_unref(s_render_script); s_render_script = script_ref(script); } void map_engine_on_update(script_t* script) { script_unref(s_update_script); s_update_script = script_ref(script); } const person_t* map_engine_acting_person(void) { return s_acting_person; } const person_t* map_engine_active_person(void) { return s_current_person; } int map_engine_active_trigger(void) { return s_current_trigger; } int map_engine_active_zone(void) { return s_current_zone; } vector_t* map_engine_persons(void) { int i; if (s_person_list == NULL) s_person_list = vector_new(sizeof(person_t*)); vector_clear(s_person_list); for (i = 0; i < s_num_persons; ++i) vector_push(s_person_list, &s_persons[i]); return s_person_list; } bool map_engine_running(void) { return s_is_map_running; } int map_engine_get_framerate(void) { return s_frame_rate; } person_t* map_engine_get_player(player_id_t player_id) { return s_players[player_id].person; } person_t* map_engine_get_subject(void) { return s_camera_person; } int map_engine_get_talk_button(void) { return s_talk_button; } int map_engine_get_talk_distance(void) { return s_talk_distance; } int map_engine_get_talk_key(player_id_t player_id) { return s_players[player_id].talk_key; } void map_engine_set_framerate(int framerate) { s_frame_rate = framerate; } void map_engine_set_player(player_id_t player_id, person_t* person) { int i; // detach person from any other players for (i = 0; i < PLAYER_MAX; ++i) { if (s_players[i].person == person) s_players[i].person = NULL; } s_players[player_id].person = person; } void map_engine_set_subject(person_t* person) { s_camera_person = person; } void map_engine_set_talk_button(int button_id) { s_talk_button = button_id; } void map_engine_set_talk_distance(int distance) { s_talk_distance = distance; } void map_engine_set_talk_key(player_id_t player_id, int key) { s_players[player_id].talk_key = key; } bool map_engine_change_map(const char* filename) { return change_map(filename, false); } void map_engine_defer(script_t* script, int num_frames) { struct deferred* deferred; if (++s_num_deferreds > s_max_deferreds) { s_max_deferreds = s_num_deferreds * 2; s_deferreds = realloc(s_deferreds, s_max_deferreds * sizeof(struct deferred)); } deferred = &s_deferreds[s_num_deferreds - 1]; deferred->script = script; deferred->frames_left = num_frames; } void map_engine_draw_map(void) { bool is_repeating; int cell_x; int cell_y; int first_cell_x; int first_cell_y; struct map_layer* layer; int layer_height; int layer_width; size2_t resolution; int tile_height; int tile_index; int tile_width; int off_x; int off_y; int x, y, z; if (screen_skipping_frame(g_screen)) return; resolution = screen_size(g_screen); tileset_get_size(s_map->tileset, &tile_width, &tile_height); // render map layers from bottom to top (+Z = up) for (z = 0; z < s_map->num_layers; ++z) { layer = &s_map->layers[z]; is_repeating = s_map->is_repeating || layer->is_parallax; layer_width = layer->width * tile_width; layer_height = layer->height * tile_height; off_x = 0; off_y = 0; map_screen_to_layer(z, s_camera_x, s_camera_y, &off_x, &off_y); // render person reflections if layer is reflective al_hold_bitmap_drawing(true); if (layer->is_reflective) { if (is_repeating) { // for small repeating maps, persons need to be repeated as well for (y = 0; y < resolution.height / layer_height + 2; ++y) for (x = 0; x < resolution.width / layer_width + 2; ++x) draw_persons(z, true, off_x - x * layer_width, off_y - y * layer_height); } else { draw_persons(z, true, off_x, off_y); } } // render tiles, but only if the layer is visible if (layer->is_visible) { first_cell_x = off_x / tile_width; first_cell_y = off_y / tile_height; for (y = 0; y < resolution.height / tile_height + 2; ++y) for (x = 0; x < resolution.width / tile_width + 2; ++x) { cell_x = is_repeating ? (x + first_cell_x) % layer->width : x + first_cell_x; cell_y = is_repeating ? (y + first_cell_y) % layer->height : y + first_cell_y; if (cell_x < 0 || cell_x >= layer->width || cell_y < 0 || cell_y >= layer->height) continue; tile_index = layer->tilemap[cell_x + cell_y * layer->width].tile_index; tileset_draw(s_map->tileset, layer->color_mask, x * tile_width - off_x % tile_width, y * tile_height - off_y % tile_height, tile_index); } } // render persons if (is_repeating) { // for small repeating maps, persons need to be repeated as well for (y = 0; y < resolution.height / layer_height + 2; ++y) for (x = 0; x < resolution.width / layer_width + 2; ++x) draw_persons(z, false, off_x - x * layer_width, off_y - y * layer_height); } else { draw_persons(z, false, off_x, off_y); } al_hold_bitmap_drawing(false); script_run(layer->render_script, false); } al_draw_filled_rectangle(0, 0, resolution.width, resolution.height, nativecolor(s_color_mask)); script_run(s_render_script, false); } void map_engine_exit(void) { s_exiting = true; } void map_engine_fade_to(color_t color_mask, int num_frames) { if (num_frames > 0) { s_fade_color_to = color_mask; s_fade_color_from = s_color_mask; s_fade_frames = num_frames; s_fade_progress = 0; } else { s_color_mask = color_mask; s_fade_color_to = s_fade_color_from = color_mask; s_fade_progress = s_fade_frames = 0; } } bool map_engine_start(const char* filename, int framerate) { s_is_map_running = true; s_exiting = false; s_color_mask = mk_color(0, 0, 0, 0); s_fade_color_to = s_fade_color_from = s_color_mask; s_fade_progress = s_fade_frames = 0; al_clear_to_color(al_map_rgba(0, 0, 0, 255)); s_frame_rate = framerate; if (!change_map(filename, true)) goto on_error; while (!s_exiting && jsal_vm_enabled()) { sphere_heartbeat(true, 1); // order of operations matches Sphere 1.x. not sure why, but Sphere 1.x // checks for input AFTER an update for some reason... update_map_engine(true); process_map_input(); map_engine_draw_map(); // don't clear the backbuffer. the Sphere 1.x map engine has a bug where it doesn't // clear the backbuffer between frames; as it turns out, a good deal of of v1 code relies // on that behavior. sphere_tick(1, false, s_frame_rate); } reset_persons(false); s_is_map_running = false; return true; on_error: s_is_map_running = false; return false; } void map_engine_update(void) { update_map_engine(false); } rect_t map_bounds(void) { rect_t bounds; int tile_w, tile_h; tileset_get_size(s_map->tileset, &tile_w, &tile_h); bounds.x1 = 0; bounds.y1 = 0; bounds.x2 = s_map->width * tile_w; bounds.y2 = s_map->height * tile_h; return bounds; } int map_layer_by_name(const char* name) { int i; for (i = 0; i < s_map->num_layers; ++i) { if (strcmp(name, lstr_cstr(s_map->layers[0].name)) == 0) return i; } return -1; } int map_num_layers(void) { return s_map->num_layers; } int map_num_persons(void) { return s_num_persons; } int map_num_triggers(void) { return vector_len(s_map->triggers); } int map_num_zones(void) { return vector_len(s_map->zones); } point3_t map_origin(void) { return s_map != NULL ? s_map->origin : mk_point3(0, 0, 0); } const char* map_pathname(void) { return s_map ? s_map_filename : NULL; } person_t* map_person_by_name(const char* name) { int i; for (i = 0; i < s_num_persons; ++i) { if (strcmp(name, s_persons[i]->name) == 0) return s_persons[i]; } return NULL; } int map_tile_at(int x, int y, int layer) { int layer_h; int layer_w; layer_w = s_map->layers[layer].width; layer_h = s_map->layers[layer].height; if (s_map->is_repeating || s_map->layers[layer].is_parallax) { x = (x % layer_w + layer_w) % layer_w; y = (y % layer_h + layer_h) % layer_h; } if (x < 0 || y < 0 || x >= layer_w || y >= layer_h) return -1; return layer_get_tile(layer, x, y); } tileset_t* map_tileset(void) { return s_map->tileset; } int map_trigger_at(int x, int y, int layer) { rect_t bounds; int tile_w, tile_h; struct map_trigger* trigger; iter_t iter; tileset_get_size(s_map->tileset, &tile_w, &tile_h); iter = vector_enum(s_map->triggers); while ((trigger = iter_next(&iter))) { if (trigger->z != layer && false) // layer ignored for compatibility continue; bounds.x1 = trigger->x - tile_w / 2; bounds.y1 = trigger->y - tile_h / 2; bounds.x2 = bounds.x1 + tile_w; bounds.y2 = bounds.y1 + tile_h; if (is_point_in_rect(x, y, bounds)) return iter.index; } return -1; } point2_t map_xy_from_screen(point2_t screen_xy) { int x; int y; x = screen_xy.x; y = screen_xy.y; map_screen_to_map(s_camera_x, s_camera_y, &x, &y); return mk_point2(x, y); } int map_zone_at(int x, int y, int layer, int which) { struct map_zone* zone; iter_t iter; iter = vector_enum(s_map->zones); while ((zone = iter_next(&iter))) { if (zone->layer != layer && false) // layer ignored for compatibility continue; if (is_point_in_rect(x, y, zone->bounds) && --which < 0) return iter.index; } return -1; } point2_t map_get_camera_xy(void) { return mk_point2(s_camera_x, s_camera_y); } void map_set_camera_xy(point2_t where) { s_camera_x = where.x; s_camera_y = where.y; } void map_activate(map_op_t op, bool use_default) { if (use_default) script_run(s_def_map_scripts[op], false); script_run(s_map->scripts[op], false); } bool map_add_trigger(int x, int y, int layer, script_t* script) { struct map_trigger trigger; console_log(2, "creating trigger #%d on map '%s'", vector_len(s_map->triggers), s_map_filename); console_log(3, " location: '%s' @ (%d,%d)", lstr_cstr(s_map->layers[layer].name), x, y); trigger.x = x; trigger.y = y; trigger.z = layer; trigger.script = script_ref(script); if (!vector_push(s_map->triggers, &trigger)) return false; return true; } bool map_add_zone(rect_t bounds, int layer, script_t* script, int steps) { struct map_zone zone; console_log(2, "creating %u-step zone #%d on map '%s'", steps, vector_len(s_map->zones), s_map_filename); console_log(3, " bounds: (%d,%d)-(%d,%d)", bounds.x1, bounds.y1, bounds.x2, bounds.y2); memset(&zone, 0, sizeof(struct map_zone)); zone.bounds = bounds; zone.layer = layer; zone.script = script_ref(script); zone.interval = steps; zone.steps_left = 0; if (!vector_push(s_map->zones, &zone)) return false; return true; } void map_call_default(map_op_t op) { script_run(s_def_map_scripts[op], false); } void map_normalize_xy(double* inout_x, double* inout_y, int layer) { int tile_w, tile_h; int layer_w, layer_h; if (s_map == NULL) return; // can't normalize if no map loaded if (!s_map->is_repeating && !s_map->layers[layer].is_parallax) return; tileset_get_size(s_map->tileset, &tile_w, &tile_h); layer_w = s_map->layers[layer].width * tile_w; layer_h = s_map->layers[layer].height * tile_h; if (inout_x) *inout_x = fmod(fmod(*inout_x, layer_w) + layer_w, layer_w); if (inout_y) *inout_y = fmod(fmod(*inout_y, layer_h) + layer_h, layer_h); } void map_remove_trigger(int trigger_index) { vector_remove(s_map->triggers, trigger_index); } void map_remove_zone(int zone_index) { vector_remove(s_map->zones, zone_index); } void layer_on_render(int layer, script_t* script) { script_unref(s_map->layers[layer].render_script); s_map->layers[layer].render_script = script_ref(script); } const char* layer_name(int layer) { return lstr_cstr(s_map->layers[layer].name); } const obsmap_t* layer_obsmap(int layer) { return s_map->layers[layer].obsmap; } size2_t layer_size(int layer) { struct map_layer* layer_data; layer_data = &s_map->layers[layer]; return mk_size2(layer_data->width, layer_data->height); } color_t layer_get_color_mask(int layer) { return s_map->layers[layer].color_mask; } bool layer_get_reflective(int layer) { return s_map->layers[layer].is_reflective; } int layer_get_tile(int layer, int x, int y) { struct map_tile* tile; int width; width = s_map->layers[layer].width; tile = &s_map->layers[layer].tilemap[x + y * width]; return tile->tile_index; } bool layer_get_visible(int layer) { return s_map->layers[layer].is_visible; } void layer_set_color_mask(int layer, color_t color) { s_map->layers[layer].color_mask = color; } void layer_set_reflective(int layer, bool reflective) { s_map->layers[layer].is_reflective = reflective; } void layer_set_tile(int layer, int x, int y, int tile_index) { struct map_tile* tile; int width; width = s_map->layers[layer].width; tile = &s_map->layers[layer].tilemap[x + y * width]; tile->tile_index = tile_index; tile->frames_left = tileset_get_delay(s_map->tileset, tile_index); } void layer_set_visible(int layer, bool visible) { s_map->layers[layer].is_visible = visible; } void layer_replace_tiles(int layer, int old_index, int new_index) { int layer_h; int layer_w; struct map_tile* tile; int i_x, i_y; layer_w = s_map->layers[layer].width; layer_h = s_map->layers[layer].height; for (i_x = 0; i_x < layer_w; ++i_x) for (i_y = 0; i_y < layer_h; ++i_y) { tile = &s_map->layers[layer].tilemap[i_x + i_y * layer_w]; if (tile->tile_index == old_index) tile->tile_index = new_index; } } bool layer_resize(int layer, int x_size, int y_size) { int old_height; int old_width; struct map_tile* tile; int tile_width; int tile_height; struct map_tile* tilemap; struct map_trigger* trigger; struct map_zone* zone; int x, y, i; old_width = s_map->layers[layer].width; old_height = s_map->layers[layer].height; // allocate a new tilemap and copy the old layer tiles into it. we can't simply realloc // because the tilemap is a 2D array. if (!(tilemap = malloc(x_size * y_size * sizeof(struct map_tile)))) return false; for (x = 0; x < x_size; ++x) { for (y = 0; y < y_size; ++y) { if (x < old_width && y < old_height) { tilemap[x + y * x_size] = s_map->layers[layer].tilemap[x + y * old_width]; } else { tile = &tilemap[x + y * x_size]; tile->frames_left = tileset_get_delay(s_map->tileset, 0); tile->tile_index = 0; } } } // free the old tilemap and substitute the new one free(s_map->layers[layer].tilemap); s_map->layers[layer].tilemap = tilemap; s_map->layers[layer].width = x_size; s_map->layers[layer].height = y_size; // if we resize the largest layer, the overall map size will change. // recalcuate it. tileset_get_size(s_map->tileset, &tile_width, &tile_height); s_map->width = 0; s_map->height = 0; for (i = 0; i < s_map->num_layers; ++i) { if (!s_map->layers[i].is_parallax) { s_map->width = fmax(s_map->width, s_map->layers[i].width * tile_width); s_map->height = fmax(s_map->height, s_map->layers[i].height * tile_height); } } // ensure zones and triggers remain in-bounds. if any are completely // out-of-bounds, delete them. for (i = (int)vector_len(s_map->zones) - 1; i >= 0; --i) { zone = vector_get(s_map->zones, i); if (zone->bounds.x1 >= s_map->width || zone->bounds.y1 >= s_map->height) vector_remove(s_map->zones, i); else { if (zone->bounds.x2 > s_map->width) zone->bounds.x2 = s_map->width; if (zone->bounds.y2 > s_map->height) zone->bounds.y2 = s_map->height; } } for (i = (int)vector_len(s_map->triggers) - 1; i >= 0; --i) { trigger = vector_get(s_map->triggers, i); if (trigger->x >= s_map->width || trigger->y >= s_map->height) vector_remove(s_map->triggers, i); } return true; } person_t* person_new(const char* name, spriteset_t* spriteset, bool is_persistent, script_t* create_script) { point3_t origin = map_origin(); person_t* person; if (++s_num_persons > s_max_persons) { s_max_persons = s_num_persons * 2; s_persons = realloc(s_persons, s_max_persons * sizeof(person_t*)); } person = s_persons[s_num_persons - 1] = calloc(1, sizeof(person_t)); person->id = s_next_person_id++; person->sprite = spriteset_ref(spriteset); set_person_name(person, name); person_set_pose(person, spriteset_pose_name(spriteset, 0)); person->is_persistent = is_persistent; person->is_visible = true; person->x = origin.x; person->y = origin.y; person->layer = origin.z; person->speed_x = 1.0; person->speed_y = 1.0; person->anim_frames = spriteset_frame_delay(person->sprite, person->direction, 0); person->mask = mk_color(255, 255, 255, 255); person->scale_x = person->scale_y = 1.0; person->scripts[PERSON_SCRIPT_ON_CREATE] = create_script; person_activate(person, PERSON_SCRIPT_ON_CREATE, NULL, true); sort_persons(); return person; } void person_free(person_t* person) { int i, j; // call the person's destroy script *before* renouncing leadership. // the destroy script may want to reassign followers (they will be orphaned otherwise), so // we want to give it a chance to do so. person_activate(person, PERSON_SCRIPT_ON_DESTROY, NULL, true); for (i = 0; i < s_num_persons; ++i) { if (s_persons[i]->leader == person) s_persons[i]->leader = NULL; } // remove the person from the engine detach_person(person); for (i = 0; i < s_num_persons; ++i) { if (s_persons[i] == person) { for (j = i; j < s_num_persons - 1; ++j) s_persons[j] = s_persons[j + 1]; --s_num_persons; --i; } } vector_free(person->ignore_list); free_person(person); sort_persons(); } rect_t person_base(const person_t* person) { rect_t base_rect; int base_x; int base_y; double x; double y; base_rect = rect_zoom(spriteset_get_base(person->sprite), person->scale_x, person->scale_y); person_get_xy(person, &x, &y, true); base_x = x - (base_rect.x1 + (base_rect.x2 - base_rect.x1) / 2); base_y = y - (base_rect.y1 + (base_rect.y2 - base_rect.y1) / 2); base_rect.x1 += base_x; base_rect.x2 += base_x; base_rect.y1 += base_y; base_rect.y2 += base_y; return base_rect; } bool person_following(const person_t* person, const person_t* leader) { const person_t* node; node = person; while ((node = node->leader)) if (node == leader) return true; return false; } bool person_has_moved(const person_t* person) { return person->mv_x != 0 || person->mv_y != 0; } vector_t* person_ignore_list(person_t* person) { // note: the returned vector is an array of C strings. these should be treated // as const char*; in other words, don't free them! int i; if (person->ignore_list == NULL) person->ignore_list = vector_new(sizeof(const char*)); vector_clear(person->ignore_list); for (i = 0; i < person->num_ignores; ++i) vector_push(person->ignore_list, &person->ignores[i]); return person->ignore_list; } bool person_ignored_by(const person_t* person, const person_t* other) { // note: commutative; if either person ignores the other, the function will return true int i; if (other->ignore_all_persons || person->ignore_all_persons) return true; for (i = 0; i < other->num_ignores; ++i) if (strcmp(other->ignores[i], person->name) == 0) return true; for (i = 0; i < person->num_ignores; ++i) if (strcmp(person->ignores[i], other->name) == 0) return true; return false; } bool person_moving(const person_t* person) { return person->num_commands > 0; } const char* person_name(const person_t* person) { return person != NULL ? person->name : ""; } bool person_obstructed_at(const person_t* person, double x, double y, person_t** out_obstructing_person, int* out_tile_index) { rect_t area; rect_t base, my_base; double cur_x, cur_y; bool is_obstructed = false; int layer; const obsmap_t* obsmap; int tile_w, tile_h; const tileset_t* tileset; int i, i_x, i_y; map_normalize_xy(&x, &y, person->layer); person_get_xyz(person, &cur_x, &cur_y, &layer, true); my_base = rect_translate(person_base(person), x - cur_x, y - cur_y); if (out_obstructing_person != NULL) *out_obstructing_person = NULL; if (out_tile_index != NULL) *out_tile_index = -1; // check for obstructing persons if (!person->ignore_all_persons) { for (i = 0; i < s_num_persons; ++i) { if (s_persons[i] == person) // these persons aren't going to obstruct themselves! continue; if (s_persons[i]->layer != layer) continue; // ignore persons not on the same layer if (person_following(s_persons[i], person)) continue; // ignore own followers base = person_base(s_persons[i]); if (do_rects_overlap(my_base, base) && !person_ignored_by(person, s_persons[i])) { is_obstructed = true; if (out_obstructing_person) *out_obstructing_person = s_persons[i]; break; } } } // no obstructing person, check map-defined obstructions obsmap = layer_obsmap(layer); if (obsmap_test_rect(obsmap, my_base)) is_obstructed = true; // check for obstructing tiles // for performance reasons, the search is constrained to the immediate vicinity // of the person's sprite base. if (!person->ignore_all_tiles) { tileset = map_tileset(); tileset_get_size(tileset, &tile_w, &tile_h); area.x1 = my_base.x1 / tile_w; area.y1 = my_base.y1 / tile_h; area.x2 = area.x1 + (my_base.x2 - my_base.x1) / tile_w + 2; area.y2 = area.y1 + (my_base.y2 - my_base.y1) / tile_h + 2; for (i_x = area.x1; i_x < area.x2; ++i_x) for (i_y = area.y1; i_y < area.y2; ++i_y) { base = rect_translate(my_base, -(i_x * tile_w), -(i_y * tile_h)); obsmap = tileset_obsmap(tileset, map_tile_at(i_x, i_y, layer)); if (obsmap != NULL && obsmap_test_rect(obsmap, base)) { is_obstructed = true; if (out_tile_index) *out_tile_index = map_tile_at(i_x, i_y, layer); break; } } } return is_obstructed; } double person_get_angle(const person_t* person) { return person->theta; } color_t person_get_color(const person_t* person) { return person->mask; } int person_get_frame(const person_t* person) { int num_frames; num_frames = spriteset_num_frames(person->sprite, person->direction); return person->frame % num_frames; } int person_get_frame_delay(const person_t* person) { return person->anim_frames; } bool person_get_ignore_persons(const person_t* person) { return person->ignore_all_persons; } bool person_get_ignore_tiles(const person_t* person) { return person->ignore_all_tiles; } int person_get_layer(const person_t* person) { return person->layer; } person_t* person_get_leader(const person_t* person) { return person->leader; } point2_t person_get_offset(const person_t* person) { return mk_point2(person->x_offset, person->y_offset); } const char* person_get_pose(const person_t* person) { return person->direction; } int person_get_revert_delay(const person_t* person) { return person->revert_delay; } void person_get_scale(const person_t* person, double* out_scale_x, double* out_scale_y) { *out_scale_x = person->scale_x; *out_scale_y = person->scale_y; } void person_get_speed(const person_t* person, double* out_x_speed, double* out_y_speed) { if (out_x_speed) *out_x_speed = person->speed_x; if (out_y_speed) *out_y_speed = person->speed_y; } spriteset_t* person_get_spriteset(const person_t* person) { return person->sprite; } int person_get_trailing(const person_t* person) { return person->follow_distance; } bool person_get_visible(const person_t* person) { return person->is_visible; } void person_get_xy(const person_t* person, double* out_x, double* out_y, bool normalize) { *out_x = person->x; *out_y = person->y; if (normalize) map_normalize_xy(out_x, out_y, person->layer); } void person_get_xyz(const person_t* person, double* out_x, double* out_y, int* out_layer, bool normalize) { *out_x = person->x; *out_y = person->y; *out_layer = person->layer; if (normalize) map_normalize_xy(out_x, out_y, *out_layer); } void person_set_angle(person_t* person, double theta) { person->theta = theta; } void person_set_color(person_t* person, color_t mask) { person->mask = mask; } void person_set_frame(person_t* person, int frame_index) { int num_frames; num_frames = spriteset_num_frames(person->sprite, person->direction); person->frame = (frame_index % num_frames + num_frames) % num_frames; person->anim_frames = spriteset_frame_delay(person->sprite, person->direction, person->frame); person->revert_frames = person->revert_delay; } void person_set_frame_delay(person_t* person, int num_frames) { person->anim_frames = num_frames; person->revert_frames = person->revert_delay; } void person_set_ignore_persons(person_t* person, bool ignoring) { person->ignore_all_persons = ignoring; } void person_set_ignore_tiles (person_t* person, bool ignoring) { person->ignore_all_tiles = ignoring; } void person_set_layer(person_t* person, int layer) { person->layer = layer; } bool person_set_leader(person_t* person, person_t* leader, int distance) { const person_t* node; // prevent circular follower chains from forming if (leader != NULL) { node = leader; do { if (node == person) return false; } while ((node = node->leader)); } // add the person as a follower (or sever existing link if leader==NULL) if (leader != NULL) { if (!enlarge_step_history(leader, distance)) return false; person->leader = leader; person->follow_distance = distance; } person->leader = leader; return true; } void person_set_offset(person_t* person, point2_t offset) { person->x_offset = offset.x; person->y_offset = offset.y; } void person_set_pose(person_t* person, const char* pose_name) { person->direction = realloc(person->direction, (strlen(pose_name) + 1) * sizeof(char)); strcpy(person->direction, pose_name); } void person_set_revert_delay(person_t* person, int num_frames) { person->revert_delay = num_frames; person->revert_frames = num_frames; } void person_set_scale(person_t* person, double scale_x, double scale_y) { person->scale_x = scale_x; person->scale_y = scale_y; } void person_set_speed(person_t* person, double x_speed, double y_speed) { person->speed_x = x_speed; person->speed_y = y_speed; } void person_set_spriteset(person_t* person, spriteset_t* spriteset) { spriteset_t* old_spriteset; old_spriteset = person->sprite; person->sprite = spriteset_ref(spriteset); person->anim_frames = spriteset_frame_delay(person->sprite, person->direction, 0); person->frame = 0; spriteset_unref(old_spriteset); } void person_set_trailing(person_t* person, int distance) { enlarge_step_history(person->leader, distance); person->follow_distance = distance; } void person_set_visible(person_t* person, bool visible) { person->is_visible = visible; } void person_set_xyz(person_t* person, double x, double y, int layer) { person->x = x; person->y = y; person->layer = layer; sort_persons(); } void person_on_event(person_t* person, int type, script_t* script) { script_unref(person->scripts[type]); person->scripts[type] = script; } void person_activate(const person_t* person, person_op_t op, const person_t* acting_person, bool use_default) { const person_t* last_acting; const person_t* last_current; last_acting = s_acting_person; last_current = s_current_person; s_acting_person = acting_person; s_current_person = person; if (use_default) script_run(s_def_person_scripts[op], false); if (does_person_exist(person)) script_run(person->scripts[op], false); s_acting_person = last_acting; s_current_person = last_current; } void person_call_default(const person_t* person, person_op_t op, const person_t* acting_person) { const person_t* last_acting; const person_t* last_current; last_acting = s_acting_person; last_current = s_current_person; s_acting_person = acting_person; s_current_person = person; script_run(s_def_person_scripts[op], false); s_acting_person = last_acting; s_current_person = last_current; } void person_clear_ignores(person_t* person) { int i; for (i = 0; i < person->num_ignores; ++i) free(person->ignores[i]); person->num_ignores = 0; } void person_clear_queue(person_t* person) { person->num_commands = 0; } bool person_compile_script(person_t* person, int type, const lstring_t* codestring) { script_t* script; const char* script_name; script_name = type == PERSON_SCRIPT_ON_CREATE ? "onCreate" : type == PERSON_SCRIPT_ON_DESTROY ? "onDestroy" : type == PERSON_SCRIPT_ON_TOUCH ? "onTouch" : type == PERSON_SCRIPT_ON_TALK ? "onTalk" : type == PERSON_SCRIPT_GENERATOR ? "genCommands" : NULL; if (script_name == NULL) return false; script = script_new(codestring, "%s/%s/%s.js", map_pathname(), person->name, script_name); person_on_event(person, type, script); return true; } void person_ignore_name(person_t* person, const char* name) { int index; index = person->num_ignores++; person->ignores = realloc(person->ignores, person->num_ignores * sizeof(char*)); person->ignores[index] = strdup(name); // ignore list changed, delete cache vector_free(person->ignore_list); person->ignore_list = NULL; } bool person_queue_command(person_t* person, int command, bool is_immediate) { struct command* commands; bool is_aok = true; switch (command) { case COMMAND_MOVE_NORTHEAST: is_aok &= person_queue_command(person, COMMAND_MOVE_NORTH, true); is_aok &= person_queue_command(person, COMMAND_MOVE_EAST, is_immediate); return is_aok; case COMMAND_MOVE_SOUTHEAST: is_aok &= person_queue_command(person, COMMAND_MOVE_SOUTH, true); is_aok &= person_queue_command(person, COMMAND_MOVE_EAST, is_immediate); return is_aok; case COMMAND_MOVE_SOUTHWEST: is_aok &= person_queue_command(person, COMMAND_MOVE_SOUTH, true); is_aok &= person_queue_command(person, COMMAND_MOVE_WEST, is_immediate); return is_aok; case COMMAND_MOVE_NORTHWEST: is_aok &= person_queue_command(person, COMMAND_MOVE_NORTH, true); is_aok &= person_queue_command(person, COMMAND_MOVE_WEST, is_immediate); return is_aok; default: ++person->num_commands; if (person->num_commands > person->max_commands) { if (!(commands = realloc(person->commands, person->num_commands * 2 * sizeof(struct command)))) return false; person->max_commands = person->num_commands * 2; person->commands = commands; } person->commands[person->num_commands - 1].type = command; person->commands[person->num_commands - 1].is_immediate = is_immediate; person->commands[person->num_commands - 1].script = NULL; return true; } } bool person_queue_script(person_t* person, script_t* script, bool is_immediate) { ++person->num_commands; if (person->num_commands > person->max_commands) { person->max_commands = person->num_commands * 2; if (!(person->commands = realloc(person->commands, person->max_commands * sizeof(struct command)))) return false; } person->commands[person->num_commands - 1].type = COMMAND_RUN_SCRIPT; person->commands[person->num_commands - 1].is_immediate = is_immediate; person->commands[person->num_commands - 1].script = script; return true; } void person_talk(const person_t* person) { rect_t map_rect; person_t* target_person; double talk_x, talk_y; map_rect = map_bounds(); // check if anyone else is within earshot person_get_xy(person, &talk_x, &talk_y, true); if (strstr(person->direction, "north")) talk_y -= s_talk_distance; if (strstr(person->direction, "east")) talk_x += s_talk_distance; if (strstr(person->direction, "south")) talk_y += s_talk_distance; if (strstr(person->direction, "west")) talk_x -= s_talk_distance; person_obstructed_at(person, talk_x, talk_y, &target_person, NULL); // if so, call their talk script if (target_person != NULL) person_activate(target_person, PERSON_SCRIPT_ON_TALK, person, true); } void trigger_get_xyz(int trigger_index, int* out_x, int* out_y, int* out_layer) { struct map_trigger* trigger; trigger = vector_get(s_map->triggers, trigger_index); if (out_x != NULL) *out_x = trigger->x; if (out_y != NULL) *out_y = trigger->y; if (out_layer) *out_layer = trigger->z; } void trigger_set_layer(int trigger_index, int layer) { struct map_trigger* trigger; trigger = vector_get(s_map->triggers, trigger_index); trigger->z = layer; } void trigger_set_script(int trigger_index, script_t* script) { script_t* old_script; struct map_trigger* trigger; trigger = vector_get(s_map->triggers, trigger_index); old_script = trigger->script; trigger->script = script_ref(script); script_unref(old_script); } void trigger_set_xy(int trigger_index, int x, int y) { struct map_trigger* trigger; trigger = vector_get(s_map->triggers, trigger_index); trigger->x = x; trigger->y = y; } void trigger_activate(int trigger_index) { int last_trigger; struct map_trigger* trigger; trigger = vector_get(s_map->triggers, trigger_index); last_trigger = s_current_trigger; s_current_trigger = trigger_index; script_run(trigger->script, true); s_current_trigger = last_trigger; } rect_t zone_get_bounds(int zone_index) { struct map_zone* zone; zone = vector_get(s_map->zones, zone_index); return zone->bounds; } int zone_get_layer(int zone_index) { struct map_zone* zone; zone = vector_get(s_map->zones, zone_index); return zone->layer; } int zone_get_steps(int zone_index) { struct map_zone* zone; zone = vector_get(s_map->zones, zone_index); return zone->interval; } void zone_set_bounds(int zone_index, rect_t bounds) { struct map_zone* zone; zone = vector_get(s_map->zones, zone_index); rect_normalize(&bounds); zone->bounds = bounds; } void zone_set_layer(int zone_index, int layer) { struct map_zone* zone; zone = vector_get(s_map->zones, zone_index); zone->layer = layer; } void zone_set_script(int zone_index, script_t* script) { script_t* old_script; struct map_zone* zone; zone = vector_get(s_map->zones, zone_index); old_script = zone->script; zone->script = script_ref(script); script_unref(old_script); } void zone_set_steps(int zone_index, int interval) { struct map_zone* zone; zone = vector_get(s_map->zones, zone_index); zone->interval = interval; zone->steps_left = 0; } void zone_activate(int zone_index) { int last_zone; struct map_zone* zone; zone = vector_get(s_map->zones, zone_index); last_zone = s_current_zone; s_current_zone = zone_index; script_run(zone->script, true); s_current_zone = last_zone; } static bool change_map(const char* filename, bool preserve_persons) { // note: if an error is detected during a map change, change_map() will return false, but // the map engine may be left in an inconsistent state. it is therefore probably wise // to consider such a situation unrecoverable. struct map* map; person_t* person; struct map_person* person_info; path_t* path; spriteset_t* spriteset = NULL; int i; console_log(2, "changing current map to '%s'", filename); map = load_map(filename); if (map == NULL) return false; if (s_map != NULL) { // run map exit scripts first, before loading new map map_activate(MAP_SCRIPT_ON_LEAVE, true); } // close out old map and prep for new one free_map(s_map); free(s_map_filename); for (i = 0; i < s_num_deferreds; ++i) script_unref(s_deferreds[i].script); s_num_deferreds = 0; s_map = map; s_map_filename = strdup(filename); reset_persons(preserve_persons); // populate persons for (i = 0; i < s_map->num_persons; ++i) { person_info = &s_map->persons[i]; path = game_full_path(g_game, lstr_cstr(person_info->spriteset), "spritesets", true); spriteset = spriteset_load(path_cstr(path)); path_free(path); if (spriteset == NULL) goto on_error; if (!(person = person_new(lstr_cstr(person_info->name), spriteset, false, NULL))) goto on_error; spriteset_unref(spriteset); person_set_xyz(person, person_info->x, person_info->y, person_info->z); person_compile_script(person, PERSON_SCRIPT_ON_CREATE, person_info->create_script); person_compile_script(person, PERSON_SCRIPT_ON_DESTROY, person_info->destroy_script); person_compile_script(person, PERSON_SCRIPT_ON_TOUCH, person_info->touch_script); person_compile_script(person, PERSON_SCRIPT_ON_TALK, person_info->talk_script); person_compile_script(person, PERSON_SCRIPT_GENERATOR, person_info->command_script); // normally this is handled by person_new(), but since in this case the // person-specific create script isn't compiled until after the person is created, // the map engine gets the responsibility. person_activate(person, PERSON_SCRIPT_ON_CREATE, NULL, false); } // set camera over starting position s_camera_x = s_map->origin.x; s_camera_y = s_map->origin.y; // start up map BGM (if same as previous, leave alone) if (s_map->bgm_file == NULL && s_map_bgm_stream != NULL) { sound_unref(s_map_bgm_stream); lstr_free(s_last_bgm_file); s_map_bgm_stream = NULL; s_last_bgm_file = NULL; } else if (s_map->bgm_file != NULL && (s_last_bgm_file == NULL || lstr_cmp(s_map->bgm_file, s_last_bgm_file) != 0)) { sound_unref(s_map_bgm_stream); lstr_free(s_last_bgm_file); s_last_bgm_file = lstr_dup(s_map->bgm_file); path = game_full_path(g_game, lstr_cstr(s_map->bgm_file), "sounds", true); if ((s_map_bgm_stream = sound_new(path_cstr(path)))) { sound_set_repeat(s_map_bgm_stream, true); sound_play(s_map_bgm_stream, s_bgm_mixer); } path_free(path); } // run map entry scripts map_activate(MAP_SCRIPT_ON_ENTER, true); s_frames = 0; return true; on_error: spriteset_unref(spriteset); free_map(s_map); return false; } static void command_person(person_t* person, int command) { double new_x; double new_y; person_t* person_to_touch; new_x = person->x; new_y = person->y; switch (command) { case COMMAND_ANIMATE: person->revert_frames = person->revert_delay; if (person->anim_frames > 0 && --person->anim_frames == 0) { ++person->frame; person->anim_frames = spriteset_frame_delay(person->sprite, person->direction, person->frame); } break; case COMMAND_FACE_NORTH: person_set_pose(person, "north"); break; case COMMAND_FACE_NORTHEAST: person_set_pose(person, "northeast"); break; case COMMAND_FACE_EAST: person_set_pose(person, "east"); break; case COMMAND_FACE_SOUTHEAST: person_set_pose(person, "southeast"); break; case COMMAND_FACE_SOUTH: person_set_pose(person, "south"); break; case COMMAND_FACE_SOUTHWEST: person_set_pose(person, "southwest"); break; case COMMAND_FACE_WEST: person_set_pose(person, "west"); break; case COMMAND_FACE_NORTHWEST: person_set_pose(person, "northwest"); break; case COMMAND_MOVE_NORTH: new_y = person->y - person->speed_y; break; case COMMAND_MOVE_EAST: new_x = person->x + person->speed_x; break; case COMMAND_MOVE_SOUTH: new_y = person->y + person->speed_y; break; case COMMAND_MOVE_WEST: new_x = person->x - person->speed_x; break; } if (new_x != person->x || new_y != person->y) { // person is trying to move, make sure the path is clear of obstructions if (!person_obstructed_at(person, new_x, new_y, &person_to_touch, NULL)) { if (new_x != person->x) person->mv_x = new_x > person->x ? 1 : -1; if (new_y != person->y) person->mv_y = new_y > person->y ? 1 : -1; person->x = new_x; person->y = new_y; } else { // if not, and we collided with a person, call that person's touch script if (person_to_touch != NULL) person_activate(person_to_touch, PERSON_SCRIPT_ON_TOUCH, person, true); } } } static int compare_persons(const void* a, const void* b) { person_t* p1 = *(person_t**)a; person_t* p2 = *(person_t**)b; double x, y_p1, y_p2; int y_delta; person_get_xy(p1, &x, &y_p1, true); person_get_xy(p2, &x, &y_p2, true); y_delta = y_p1 - y_p2; if (y_delta != 0) return y_delta; else if (person_following(p1, p2)) return -1; else if (person_following(p2, p1)) return 1; else return p1->id - p2->id; } static void detach_person(const person_t* person) { int i; if (s_camera_person == person) s_camera_person = NULL; for (i = 0; i < PLAYER_MAX; ++i) { if (s_players[i].person == person) s_players[i].person = NULL; } } static bool does_person_exist(const person_t* person) { int i; for (i = 0; i < s_num_persons; ++i) if (person == s_persons[i]) return true; return false; } void draw_persons(int layer, bool is_flipped, int cam_x, int cam_y) { person_t* person; spriteset_t* sprite; int w, h; double x, y; int i; for (i = 0; i < s_num_persons; ++i) { person = s_persons[i]; if (!person->is_visible || person->layer != layer) continue; sprite = person->sprite; w = spriteset_width(sprite); h = spriteset_height(sprite); person_get_xy(person, &x, &y, true); x -= cam_x - person->x_offset; y -= cam_y - person->y_offset; spriteset_draw(sprite, person->mask, is_flipped, person->theta, person->scale_x, person->scale_y, person->direction, trunc(x), trunc(y), person->frame); } } static bool enlarge_step_history(person_t* person, int new_size) { struct step *new_steps; size_t pastmost; double last_x; double last_y; int i; if (new_size > person->max_history) { if (!(new_steps = realloc(person->steps, new_size * sizeof(struct step)))) return false; // when enlarging the history buffer, fill new slots with pastmost values // (kind of like sign extension) pastmost = person->max_history - 1; last_x = person->steps != NULL ? person->steps[pastmost].x : person->x; last_y = person->steps != NULL ? person->steps[pastmost].y : person->y; for (i = person->max_history; i < new_size; ++i) { new_steps[i].x = last_x; new_steps[i].y = last_y; } person->steps = new_steps; person->max_history = new_size; } return true; } static void free_map(struct map* map) { struct map_trigger* trigger; struct map_zone* zone; iter_t iter; int i; if (map == NULL) return; for (i = 0; i < MAP_SCRIPT_MAX; ++i) script_unref(map->scripts[i]); for (i = 0; i < map->num_layers; ++i) { script_unref(map->layers[i].render_script); lstr_free(map->layers[i].name); free(map->layers[i].tilemap); obsmap_free(map->layers[i].obsmap); } for (i = 0; i < map->num_persons; ++i) { lstr_free(map->persons[i].name); lstr_free(map->persons[i].spriteset); lstr_free(map->persons[i].create_script); lstr_free(map->persons[i].destroy_script); lstr_free(map->persons[i].command_script); lstr_free(map->persons[i].talk_script); lstr_free(map->persons[i].touch_script); } iter = vector_enum(s_map->triggers); while ((trigger = iter_next(&iter))) script_unref(trigger->script); iter = vector_enum(s_map->zones); while ((zone = iter_next(&iter))) script_unref(zone->script); lstr_free(s_map->bgm_file); tileset_free(map->tileset); free(map->layers); free(map->persons); vector_free(map->triggers); vector_free(map->zones); free(map); } static void free_person(person_t* person) { int i; free(person->steps); for (i = 0; i < PERSON_SCRIPT_MAX; ++i) script_unref(person->scripts[i]); spriteset_unref(person->sprite); free(person->commands); free(person->name); free(person->direction); free(person); } static struct map_trigger* get_trigger_at(int x, int y, int layer, int* out_index) { rect_t bounds; struct map_trigger* found_item = NULL; int tile_w, tile_h; struct map_trigger* trigger; iter_t iter; tileset_get_size(s_map->tileset, &tile_w, &tile_h); iter = vector_enum(s_map->triggers); while ((trigger = iter_next(&iter))) { if (trigger->z != layer && false) // layer ignored for compatibility reasons continue; bounds.x1 = trigger->x - tile_w / 2; bounds.y1 = trigger->y - tile_h / 2; bounds.x2 = bounds.x1 + tile_w; bounds.y2 = bounds.y1 + tile_h; if (is_point_in_rect(x, y, bounds)) { found_item = trigger; if (out_index != NULL) *out_index = (int)iter.index; break; } } return found_item; } static struct map_zone* get_zone_at(int x, int y, int layer, int which, int* out_index) { struct map_zone* found_item = NULL; struct map_zone* zone; iter_t iter; int i; iter = vector_enum(s_map->zones); i = -1; while ((zone = iter_next(&iter))) { if (zone->layer != layer && false) // layer ignored for compatibility continue; if (is_point_in_rect(x, y, zone->bounds) && which-- == 0) { found_item = zone; if (out_index) *out_index = (int)iter.index; break; } } return found_item; } static struct map* load_map(const char* filename) { // strings: 0 - tileset filename // 1 - music filename // 2 - script filename (obsolete, not used) // 3 - entry script // 4 - exit script // 5 - exit north script // 6 - exit east script // 7 - exit south script // 8 - exit west script uint16_t count; struct rmp_entity_header entity_hdr; file_t* file = NULL; bool has_failed; struct map_layer* layer; struct rmp_layer_header layer_hdr; struct map* map = NULL; int num_tiles; struct map_person* person; struct rmp_header rmp; lstring_t* script; rect_t segment; int16_t* tile_data = NULL; path_t* tileset_path; tileset_t* tileset; struct map_trigger trigger; struct map_zone zone; struct rmp_zone_header zone_hdr; lstring_t* *strings = NULL; int i, j, x, y, z; console_log(2, "constructing new map from '%s'", filename); memset(&rmp, 0, sizeof(struct rmp_header)); if (!(file = file_open(g_game, filename, "rb"))) goto on_error; map = calloc(1, sizeof(struct map)); if (file_read(file, &rmp, 1, sizeof(struct rmp_header)) != 1) goto on_error; if (memcmp(rmp.signature, ".rmp", 4) != 0) goto on_error; if (rmp.num_strings != 3 && rmp.num_strings != 5 && rmp.num_strings < 9) goto on_error; if (rmp.start_layer < 0 || rmp.start_layer >= rmp.num_layers) rmp.start_layer = 0; // being nice here, this really should fail outright switch (rmp.version) { case 1: // load strings (resource filenames, scripts, etc.) strings = calloc(rmp.num_strings, sizeof(lstring_t*)); has_failed = false; for (i = 0; i < rmp.num_strings; ++i) has_failed = has_failed || ((strings[i] = read_lstring(file, true)) == NULL); if (has_failed) goto on_error; // pre-allocate map structures map->layers = calloc(rmp.num_layers, sizeof(struct map_layer)); map->persons = calloc(rmp.num_entities, sizeof(struct map_person)); map->triggers = vector_new(sizeof(struct map_trigger)); map->zones = vector_new(sizeof(struct map_zone)); // load layers for (i = 0; i < rmp.num_layers; ++i) { if (file_read(file, &layer_hdr, 1, sizeof(struct rmp_layer_header)) != 1) goto on_error; layer = &map->layers[i]; layer->is_parallax = (layer_hdr.flags & 2) != 0x0; layer->is_reflective = layer_hdr.is_reflective; layer->is_visible = (layer_hdr.flags & 1) == 0x0; layer->color_mask = mk_color(255, 255, 255, 255); layer->width = layer_hdr.width; layer->height = layer_hdr.height; layer->autoscroll_x = layer->is_parallax ? layer_hdr.scrolling_x : 0.0; layer->autoscroll_y = layer->is_parallax ? layer_hdr.scrolling_y : 0.0; layer->parallax_x = layer->is_parallax ? layer_hdr.parallax_x : 1.0; layer->parallax_y = layer->is_parallax ? layer_hdr.parallax_y : 1.0; if (!layer->is_parallax) { map->width = fmax(map->width, layer->width); map->height = fmax(map->height, layer->height); } if (!(layer->tilemap = malloc(layer_hdr.width * layer_hdr.height * sizeof(struct map_tile)))) goto on_error; layer->name = read_lstring(file, true); layer->obsmap = obsmap_new(); num_tiles = layer_hdr.width * layer_hdr.height; if ((tile_data = malloc(num_tiles * 2)) == NULL) goto on_error; if (file_read(file, tile_data, num_tiles, 2) != num_tiles) goto on_error; for (j = 0; j < num_tiles; ++j) layer->tilemap[j].tile_index = tile_data[j]; for (j = 0; j < layer_hdr.num_segments; ++j) { if (!fread_rect32(file, &segment)) goto on_error; obsmap_add_line(layer->obsmap, segment); } free(tile_data); tile_data = NULL; } // if either dimension is zero, the map has no non-parallax layers and is thus malformed if (map->width == 0 || map->height == 0) goto on_error; // load entities map->num_persons = 0; for (i = 0; i < rmp.num_entities; ++i) { if (file_read(file, &entity_hdr, 1, sizeof(struct rmp_entity_header)) != 1) goto on_error; if (entity_hdr.z < 0 || entity_hdr.z >= rmp.num_layers) entity_hdr.z = 0; switch (entity_hdr.type) { case 1: // person ++map->num_persons; person = &map->persons[map->num_persons - 1]; memset(person, 0, sizeof(struct map_person)); if (!(person->name = read_lstring(file, true))) goto on_error; if (!(person->spriteset = read_lstring(file, true))) goto on_error; person->x = entity_hdr.x; person->y = entity_hdr.y; person->z = entity_hdr.z; if (file_read(file, &count, 1, 2) != 1 || count < 5) goto on_error; person->create_script = read_lstring(file, false); person->destroy_script = read_lstring(file, false); person->touch_script = read_lstring(file, false); person->talk_script = read_lstring(file, false); person->command_script = read_lstring(file, false); for (j = 5; j < count; ++j) lstr_free(read_lstring(file, true)); file_seek(file, 16, WHENCE_CUR); break; case 2: // trigger if ((script = read_lstring(file, false)) == NULL) goto on_error; memset(&trigger, 0, sizeof(struct map_trigger)); trigger.x = entity_hdr.x; trigger.y = entity_hdr.y; trigger.z = entity_hdr.z; trigger.script = script_new(script, "%s/trig%d", filename, vector_len(map->triggers)); if (!vector_push(map->triggers, &trigger)) return false; lstr_free(script); break; default: goto on_error; } } // load zones for (i = 0; i < rmp.num_zones; ++i) { if (file_read(file, &zone_hdr, 1, sizeof(struct rmp_zone_header)) != 1) goto on_error; if ((script = read_lstring(file, false)) == NULL) goto on_error; if (zone_hdr.layer < 0 || zone_hdr.layer >= rmp.num_layers) zone_hdr.layer = 0; zone.layer = zone_hdr.layer; zone.bounds = mk_rect(zone_hdr.x1, zone_hdr.y1, zone_hdr.x2, zone_hdr.y2); zone.interval = zone_hdr.interval; zone.steps_left = 0; zone.script = script_new(script, "%s/zone%d", filename, vector_len(map->zones)); rect_normalize(&zone.bounds); if (!vector_push(map->zones, &zone)) return false; lstr_free(script); } // load tileset if (strcmp(lstr_cstr(strings[0]), "") != 0) { tileset_path = path_strip(path_new(filename)); path_append(tileset_path, lstr_cstr(strings[0])); tileset = tileset_new(path_cstr(tileset_path)); path_free(tileset_path); } else { tileset = tileset_read(file); } if (tileset == NULL) goto on_error; // initialize tile animation for (z = 0; z < rmp.num_layers; ++z) { layer = &map->layers[z]; for (x = 0; x < layer->width; ++x) for (y = 0; y < layer->height; ++y) { i = x + y * layer->width; map->layers[z].tilemap[i].frames_left = tileset_get_delay(tileset, map->layers[z].tilemap[i].tile_index); } } // wrap things up map->bgm_file = strcmp(lstr_cstr(strings[1]), "") != 0 ? lstr_dup(strings[1]) : NULL; map->num_layers = rmp.num_layers; map->is_repeating = rmp.repeat_map; map->origin.x = rmp.start_x; map->origin.y = rmp.start_y; map->origin.z = rmp.start_layer; map->tileset = tileset; if (rmp.num_strings >= 5) { map->scripts[MAP_SCRIPT_ON_ENTER] = script_new(strings[3], "%s/onEnter", filename); map->scripts[MAP_SCRIPT_ON_LEAVE] = script_new(strings[4], "%s/onLeave", filename); } if (rmp.num_strings >= 9) { map->scripts[MAP_SCRIPT_ON_LEAVE_NORTH] = script_new(strings[5], "%s/onLeave", filename); map->scripts[MAP_SCRIPT_ON_LEAVE_EAST] = script_new(strings[6], "%s/onLeaveEast", filename); map->scripts[MAP_SCRIPT_ON_LEAVE_SOUTH] = script_new(strings[7], "%s/onLeaveSouth", filename); map->scripts[MAP_SCRIPT_ON_LEAVE_WEST] = script_new(strings[8], "%s/onLeaveWest", filename); } for (i = 0; i < rmp.num_strings; ++i) lstr_free(strings[i]); free(strings); break; default: goto on_error; } file_close(file); return map; on_error: if (file != NULL) file_close(file); free(tile_data); if (strings != NULL) { for (i = 0; i < rmp.num_strings; ++i) lstr_free(strings[i]); free(strings); } if (map != NULL) { if (map->layers != NULL) { for (i = 0; i < rmp.num_layers; ++i) { lstr_free(map->layers[i].name); free(map->layers[i].tilemap); obsmap_free(map->layers[i].obsmap); } free(map->layers); } if (map->persons != NULL) { for (i = 0; i < map->num_persons; ++i) { lstr_free(map->persons[i].name); lstr_free(map->persons[i].spriteset); lstr_free(map->persons[i].create_script); lstr_free(map->persons[i].destroy_script); lstr_free(map->persons[i].command_script); lstr_free(map->persons[i].talk_script); lstr_free(map->persons[i].touch_script); } free(map->persons); } vector_free(map->triggers); vector_free(map->zones); free(map); } return NULL; } void map_screen_to_layer(int layer, int camera_x, int camera_y, int* inout_x, int* inout_y) { rect_t bounds; int center_x; int center_y; int layer_h; int layer_w; float plx_offset_x = 0.0; int plx_offset_y = 0.0; size2_t resolution; int tile_w; int tile_h; int x_offset; int y_offset; // get layer and screen metrics resolution = screen_size(g_screen); tileset_get_size(s_map->tileset, &tile_w, &tile_h); layer_w = s_map->layers[layer].width * tile_w; layer_h = s_map->layers[layer].height * tile_h; center_x = resolution.width / 2; center_y = resolution.height / 2; // initial camera correction if (!s_map->is_repeating) { bounds = map_bounds(); camera_x = fmin(fmax(camera_x, bounds.x1 + center_x), bounds.x2 - center_x); camera_y = fmin(fmax(camera_y, bounds.y1 + center_y), bounds.y2 - center_y); } // remap screen coordinates to layer coordinates plx_offset_x = s_frames * s_map->layers[layer].autoscroll_x - camera_x * (s_map->layers[layer].parallax_x - 1.0); plx_offset_y = s_frames * s_map->layers[layer].autoscroll_y - camera_y * (s_map->layers[layer].parallax_y - 1.0); x_offset = camera_x - center_x - plx_offset_x; y_offset = camera_y - center_y - plx_offset_y; if (!s_map->is_repeating && !s_map->layers[layer].is_parallax) { // if the map is smaller than the screen, align to top left. centering // would be better aesthetically, but there are a couple Sphere 1.x games // that depend on top-left justification. if (layer_w < resolution.width) x_offset = 0; if (layer_h < resolution.height) y_offset = 0; } if (inout_x != NULL) *inout_x += x_offset; if (inout_y != NULL) *inout_y += y_offset; // normalize coordinates. this simplifies rendering calculations. if (s_map->is_repeating || s_map->layers[layer].is_parallax) { if (inout_x) *inout_x = (*inout_x % layer_w + layer_w) % layer_w; if (inout_y) *inout_y = (*inout_y % layer_h + layer_h) % layer_h; } } static void map_screen_to_map(int camera_x, int camera_y, int* inout_x, int* inout_y) { rect_t bounds; int center_x; int center_y; int map_h; int map_w; size2_t resolution; int tile_h; int tile_w; int x_offset; int y_offset; // get layer and screen metrics resolution = screen_size(g_screen); tileset_get_size(s_map->tileset, &tile_w, &tile_h); map_w = s_map->width * tile_w; map_h = s_map->height * tile_h; center_x = resolution.width / 2; center_y = resolution.height / 2; // initial camera correction if (!s_map->is_repeating) { bounds = map_bounds(); camera_x = fmin(fmax(camera_x, bounds.x1 + center_x), bounds.x2 - center_x); camera_y = fmin(fmax(camera_y, bounds.y1 + center_y), bounds.y2 - center_y); } // remap screen coordinates to map coordinates x_offset = camera_x - center_x; y_offset = camera_y - center_y; if (!s_map->is_repeating) { // if the map is smaller than the screen, align to top left. centering // would be better aesthetically, but there are a couple Sphere 1.x games // that depend on top-left justification. if (map_w < resolution.width) x_offset = 0; if (map_h < resolution.height) y_offset = 0; } if (inout_x != NULL) *inout_x += x_offset; if (inout_y != NULL) *inout_y += y_offset; // normalize coordinates if (s_map->is_repeating) { if (inout_x) *inout_x = (*inout_x % map_w + map_w) % map_w; if (inout_y) *inout_y = (*inout_y % map_h + map_h) % map_h; } } static void process_map_input(void) { int mv_x, mv_y; person_t* person; int i; // clear out excess keys from key queue kb_clear_queue(); // check for player control of input persons, if there are any for (i = 0; i < PLAYER_MAX; ++i) { person = s_players[i].person; if (person != NULL) { if (kb_is_key_down(get_player_key(i, PLAYER_KEY_A)) || kb_is_key_down(s_players[i].talk_key) || joy_is_button_down(i, s_talk_button)) { if (s_players[i].is_talk_allowed) person_talk(person); s_players[i].is_talk_allowed = false; } else { // allow talking again only after key is released s_players[i].is_talk_allowed = true; } mv_x = 0; mv_y = 0; if (person->num_commands == 0 && person->leader == NULL) { // allow player control only if the input person is idle and not being led around // by someone else. if (kb_is_key_down(get_player_key(i, PLAYER_KEY_UP)) || joy_position(i, 1) <= -0.5) mv_y = -1; if (kb_is_key_down(get_player_key(i, PLAYER_KEY_RIGHT)) || joy_position(i, 0) >= 0.5) mv_x = 1; if (kb_is_key_down(get_player_key(i, PLAYER_KEY_DOWN)) || joy_position(i, 1) >= 0.5) mv_y = 1; if (kb_is_key_down(get_player_key(i, PLAYER_KEY_LEFT)) || joy_position(i, 0) <= -0.5) mv_x = -1; } switch (mv_x + mv_y * 3) { case -3: // north person_queue_command(person, COMMAND_MOVE_NORTH, true); person_queue_command(person, COMMAND_FACE_NORTH, true); person_queue_command(person, COMMAND_ANIMATE, false); break; case -2: // northeast person_queue_command(person, COMMAND_MOVE_NORTHEAST, true); person_queue_command(person, COMMAND_FACE_NORTHEAST, true); person_queue_command(person, COMMAND_ANIMATE, false); break; case 1: // east person_queue_command(person, COMMAND_MOVE_EAST, true); person_queue_command(person, COMMAND_FACE_EAST, true); person_queue_command(person, COMMAND_ANIMATE, false); break; case 4: // southeast person_queue_command(person, COMMAND_MOVE_SOUTHEAST, true); person_queue_command(person, COMMAND_FACE_SOUTHEAST, true); person_queue_command(person, COMMAND_ANIMATE, false); break; case 3: // south person_queue_command(person, COMMAND_MOVE_SOUTH, true); person_queue_command(person, COMMAND_FACE_SOUTH, true); person_queue_command(person, COMMAND_ANIMATE, false); break; case 2: // southwest person_queue_command(person, COMMAND_MOVE_SOUTHWEST, true); person_queue_command(person, COMMAND_FACE_SOUTHWEST, true); person_queue_command(person, COMMAND_ANIMATE, false); break; case -1: // west person_queue_command(person, COMMAND_MOVE_WEST, true); person_queue_command(person, COMMAND_FACE_WEST, true); person_queue_command(person, COMMAND_ANIMATE, false); break; case -4: // northwest person_queue_command(person, COMMAND_MOVE_NORTHWEST, true); person_queue_command(person, COMMAND_FACE_NORTHWEST, true); person_queue_command(person, COMMAND_ANIMATE, false); break; } } } update_bound_keys(true); } static void record_step(person_t* person) { struct step* p_step; if (person->max_history <= 0) return; memmove(&person->steps[1], &person->steps[0], (person->max_history - 1) * sizeof(struct step)); p_step = &person->steps[0]; p_step->x = person->x; p_step->y = person->y; } void reset_persons(bool keep_existing) { unsigned int id; point3_t origin; person_t* person; int i, j; origin = map_origin(); for (i = 0; i < s_num_persons; ++i) { person = s_persons[i]; id = person->id; if (!keep_existing) person->num_commands = 0; if (person->is_persistent || keep_existing) { person->x = origin.x; person->y = origin.y; person->layer = origin.z; } else { person_activate(person, PERSON_SCRIPT_ON_DESTROY, NULL, true); free_person(person); --s_num_persons; for (j = i; j < s_num_persons; ++j) s_persons[j] = s_persons[j + 1]; --i; } } sort_persons(); } static void set_person_name(person_t* person, const char* name) { person->name = realloc(person->name, (strlen(name) + 1) * sizeof(char)); strcpy(person->name, name); } static void sort_persons(void) { qsort(s_persons, s_num_persons, sizeof(person_t*), compare_persons); } static void update_map_engine(bool in_main_loop) { bool has_moved; int index; bool is_sort_needed = false; int last_trigger; int last_zone; int layer; int map_w, map_h; int num_zone_steps; script_t* script_to_run; int script_type; double start_x[PLAYER_MAX]; double start_y[PLAYER_MAX]; int tile_w, tile_h; struct map_trigger* trigger; double x, y, px, py; struct map_zone* zone; int i, j, k; ++s_frames; tileset_get_size(s_map->tileset, &tile_w, &tile_h); map_w = s_map->width * tile_w; map_h = s_map->height * tile_h; tileset_update(s_map->tileset); for (i = 0; i < PLAYER_MAX; ++i) if (s_players[i].person != NULL) person_get_xy(s_players[i].person, &start_x[i], &start_y[i], false); for (i = 0; i < s_num_persons; ++i) { if (s_persons[i]->leader != NULL) continue; // skip followers for now update_person(s_persons[i], &has_moved); is_sort_needed |= has_moved; } if (is_sort_needed) sort_persons(); // update color mask fade level if (s_fade_progress < s_fade_frames) { ++s_fade_progress; s_color_mask = color_mix(s_fade_color_to, s_fade_color_from, s_fade_progress, s_fade_frames - s_fade_progress); } // update camera if (s_camera_person != NULL) { person_get_xy(s_camera_person, &x, &y, true); s_camera_x = x; s_camera_y = y; } // run edge script if the camera has moved past the edge of the map // note: only applies for non-repeating maps if (in_main_loop && !s_map->is_repeating) { script_type = s_camera_y < 0 ? MAP_SCRIPT_ON_LEAVE_NORTH : s_camera_x >= map_w ? MAP_SCRIPT_ON_LEAVE_EAST : s_camera_y >= map_h ? MAP_SCRIPT_ON_LEAVE_SOUTH : s_camera_x < 0 ? MAP_SCRIPT_ON_LEAVE_WEST : MAP_SCRIPT_MAX; if (script_type < MAP_SCRIPT_MAX) map_activate(script_type, true); } // if there are any input persons, check for trigger activation for (i = 0; i < PLAYER_MAX; ++i) if (s_players[i].person != NULL) { // did we step on a trigger or move to a new one? person_get_xyz(s_players[i].person, &x, &y, &layer, true); trigger = get_trigger_at(x, y, layer, &index); if (trigger != s_on_trigger) { last_trigger = s_current_trigger; s_current_trigger = index; s_on_trigger = trigger; if (trigger != NULL) script_run(trigger->script, false); s_current_trigger = last_trigger; } } // update any zones occupied by the input person // note: a zone's step count is in reality a pixel count, so a zone // may be updated multiple times in a single frame. for (k = 0; k < PLAYER_MAX; ++k) if (s_players[k].person != NULL) { person_get_xy(s_players[k].person, &x, &y, false); px = fabs(x - start_x[k]); py = fabs(y - start_y[k]); num_zone_steps = px > py ? px : py; for (i = 0; i < num_zone_steps; ++i) { j = 0; while ((zone = get_zone_at(x, y, layer, j++, &index))) { if (zone->steps_left-- <= 0) { last_zone = s_current_zone; s_current_zone = index; zone->steps_left = zone->interval; script_run(zone->script, true); s_current_zone = last_zone; } } } } // check if there are any deferred scripts due to run this frame // and run the ones that are for (i = 0; i < s_num_deferreds; ++i) { if (s_deferreds[i].frames_left-- <= 0) { script_to_run = s_deferreds[i].script; for (j = i; j < s_num_deferreds - 1; ++j) s_deferreds[j] = s_deferreds[j + 1]; --s_num_deferreds; script_run(script_to_run, false); script_unref(script_to_run); --i; } } // now that everything else is in order, we can run the // update script! script_run(s_update_script, false); } static void update_person(person_t* person, bool* out_has_moved) { struct command command; double delta_x, delta_y; int facing; bool has_moved; bool is_finished; const person_t* last_person; struct step step; int vector; int i; person->mv_x = 0; person->mv_y = 0; if (person->revert_frames > 0 && --person->revert_frames <= 0) person->frame = 0; if (person->leader == NULL) { // no leader; use command queue // call the command generator if the queue is empty if (person->num_commands == 0) person_activate(person, PERSON_SCRIPT_GENERATOR, NULL, true); // run through the queue, stopping after the first non-immediate command is_finished = !does_person_exist(person) || person->num_commands == 0; while (!is_finished) { command = person->commands[0]; --person->num_commands; for (i = 0; i < person->num_commands; ++i) person->commands[i] = person->commands[i + 1]; last_person = s_current_person; s_current_person = person; if (command.type != COMMAND_RUN_SCRIPT) command_person(person, command.type); else script_run(command.script, false); s_current_person = last_person; script_unref(command.script); is_finished = !does_person_exist(person) // stop if person was destroyed || !command.is_immediate || person->num_commands == 0; } } else { // leader set; follow the leader! step = person->leader->steps[person->follow_distance - 1]; delta_x = step.x - person->x; delta_y = step.y - person->y; if (fabs(delta_x) > person->speed_x) command_person(person, delta_x > 0 ? COMMAND_MOVE_EAST : COMMAND_MOVE_WEST); if (!does_person_exist(person)) return; if (fabs(delta_y) > person->speed_y) command_person(person, delta_y > 0 ? COMMAND_MOVE_SOUTH : COMMAND_MOVE_NORTH); if (!does_person_exist(person)) return; vector = person->mv_x + person->mv_y * 3; facing = vector == -3 ? COMMAND_FACE_NORTH : vector == -2 ? COMMAND_FACE_NORTHEAST : vector == 1 ? COMMAND_FACE_EAST : vector == 4 ? COMMAND_FACE_SOUTHEAST : vector == 3 ? COMMAND_FACE_SOUTH : vector == 2 ? COMMAND_FACE_SOUTHWEST : vector == -1 ? COMMAND_FACE_WEST : vector == -4 ? COMMAND_FACE_NORTHWEST : COMMAND_WAIT; if (facing != COMMAND_WAIT) command_person(person, COMMAND_ANIMATE); if (!does_person_exist(person)) return; command_person(person, facing); } // check that the person didn't mysteriously disappear... if (!does_person_exist(person)) return; // they probably got eaten by a pig. // if the person's position changed, record it in their step history *out_has_moved = person_has_moved(person); if (*out_has_moved) record_step(person); // recursively update the follower chain for (i = 0; i < s_num_persons; ++i) { if (s_persons[i]->leader != person) continue; update_person(s_persons[i], &has_moved); *out_has_moved |= has_moved; } }
/** * miniSphere JavaScript game engine * Copyright (c) 2015-2018, Fat Cerberus * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * * Neither the name of miniSphere nor the names of its contributors may be * used to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. **/ #include "minisphere.h" #include "map_engine.h" #include "api.h" #include "audio.h" #include "color.h" #include "dispatch.h" #include "geometry.h" #include "image.h" #include "input.h" #include "jsal.h" #include "obstruction.h" #include "script.h" #include "spriteset.h" #include "tileset.h" #include "vanilla.h" #include "vector.h" static const person_t* s_acting_person; static mixer_t* s_bgm_mixer = NULL; static person_t* s_camera_person = NULL; static int s_camera_x = 0; static int s_camera_y = 0; static color_t s_color_mask; static const person_t* s_current_person = NULL; static int s_current_trigger = -1; static int s_current_zone = -1; static script_t* s_def_map_scripts[MAP_SCRIPT_MAX]; static script_t* s_def_person_scripts[PERSON_SCRIPT_MAX]; static bool s_exiting = false; static color_t s_fade_color_from; static color_t s_fade_color_to; static int s_fade_frames; static int s_fade_progress; static int s_frame_rate = 0; static unsigned int s_frames = 0; static bool s_is_map_running = false; static lstring_t* s_last_bgm_file = NULL; static struct map* s_map = NULL; static sound_t* s_map_bgm_stream = NULL; static char* s_map_filename = NULL; static int s_max_deferreds = 0; static int s_max_persons = 0; static unsigned int s_next_person_id = 0; static int s_num_deferreds = 0; static int s_num_persons = 0; static struct map_trigger* s_on_trigger = NULL; static unsigned int s_queued_id = 0; static vector_t* s_person_list = NULL; static struct player* s_players; static script_t* s_render_script = NULL; static int s_talk_button = 0; static int s_talk_distance = 8; static script_t* s_update_script = NULL; static struct deferred *s_deferreds = NULL; static person_t* *s_persons = NULL; struct deferred { script_t* script; int frames_left; }; struct map { int width, height; bool is_repeating; point3_t origin; lstring_t* bgm_file; script_t* scripts[MAP_SCRIPT_MAX]; tileset_t* tileset; vector_t* triggers; vector_t* zones; int num_layers; int num_persons; struct map_layer *layers; struct map_person *persons; }; struct map_layer { lstring_t* name; bool is_parallax; bool is_reflective; bool is_visible; float autoscroll_x; float autoscroll_y; color_t color_mask; int height; obsmap_t* obsmap; float parallax_x; float parallax_y; script_t* render_script; struct map_tile* tilemap; int width; }; struct map_person { lstring_t* name; lstring_t* spriteset; int x, y, z; lstring_t* create_script; lstring_t* destroy_script; lstring_t* command_script; lstring_t* talk_script; lstring_t* touch_script; }; struct map_tile { int tile_index; int frames_left; }; struct map_trigger { script_t* script; int x, y, z; }; struct map_zone { bool is_active; rect_t bounds; int interval; int steps_left; int layer; script_t* script; }; struct person { unsigned int id; char* name; int anim_frames; char* direction; int follow_distance; int frame; bool ignore_all_persons; bool ignore_all_tiles; vector_t* ignore_list; bool is_persistent; bool is_visible; int layer; person_t* leader; color_t mask; int mv_x, mv_y; int revert_delay; int revert_frames; double scale_x; double scale_y; script_t* scripts[PERSON_SCRIPT_MAX]; double speed_x, speed_y; spriteset_t* sprite; double theta; double x, y; int x_offset, y_offset; int max_commands; int max_history; int num_commands; int num_ignores; struct command *commands; char* *ignores; struct step *steps; }; struct step { double x, y; }; struct command { int type; bool is_immediate; script_t* script; }; struct player { bool is_talk_allowed; person_t* person; int talk_key; }; #pragma pack(push, 1) struct rmp_header { char signature[4]; int16_t version; uint8_t type; int8_t num_layers; uint8_t reserved_1; int16_t num_entities; int16_t start_x; int16_t start_y; int8_t start_layer; int8_t start_direction; int16_t num_strings; int16_t num_zones; uint8_t repeat_map; uint8_t reserved[234]; }; struct rmp_entity_header { uint16_t x; uint16_t y; uint16_t z; uint16_t type; uint8_t reserved[8]; }; struct rmp_layer_header { int16_t width; int16_t height; uint16_t flags; float parallax_x; float parallax_y; float scrolling_x; float scrolling_y; int32_t num_segments; uint8_t is_reflective; uint8_t reserved[3]; }; struct rmp_zone_header { uint16_t x1; uint16_t y1; uint16_t x2; uint16_t y2; uint16_t layer; uint16_t interval; uint8_t reserved[4]; }; #pragma pack(pop) static bool change_map (const char* filename, bool preserve_persons); static void command_person (person_t* person, int command); static int compare_persons (const void* a, const void* b); static void detach_person (const person_t* person); static bool does_person_exist (const person_t* person); static void draw_persons (int layer, bool is_flipped, int cam_x, int cam_y); static bool enlarge_step_history (person_t* person, int new_size); static void free_map (struct map* map); static void free_person (person_t* person); static struct map_trigger* get_trigger_at (int x, int y, int layer, int* out_index); static struct map_zone* get_zone_at (int x, int y, int layer, int which, int* out_index); static struct map* load_map (const char* path); static void map_screen_to_layer (int layer, int camera_x, int camera_y, int* inout_x, int* inout_y); static void map_screen_to_map (int camera_x, int camera_y, int* inout_x, int* inout_y); static void process_map_input (void); static void record_step (person_t* person); static void reset_persons (bool keep_existing); static void set_person_name (person_t* person, const char* name); static void sort_persons (void); static void update_map_engine (bool is_main_loop); static void update_person (person_t* person, bool* out_has_moved); void map_engine_init(void) { int i; console_log(1, "initializing map engine subsystem"); audio_init(); s_bgm_mixer = mixer_new(44100, 16, 2); memset(s_def_map_scripts, 0, MAP_SCRIPT_MAX * sizeof(int)); memset(s_def_person_scripts, 0, PERSON_SCRIPT_MAX * sizeof(int)); s_map = NULL; s_map_filename = NULL; s_camera_person = NULL; s_players = calloc(PLAYER_MAX, sizeof(struct player)); for (i = 0; i < PLAYER_MAX; ++i) s_players[i].is_talk_allowed = true; s_current_trigger = -1; s_current_zone = -1; s_render_script = NULL; s_update_script = NULL; s_num_deferreds = s_max_deferreds = 0; s_deferreds = NULL; s_talk_button = 0; s_is_map_running = false; s_color_mask = mk_color(0, 0, 0, 0); s_on_trigger = NULL; s_num_persons = s_max_persons = 0; s_persons = NULL; s_talk_distance = 8; s_acting_person = NULL; s_current_person = NULL; } void map_engine_uninit(void) { int i; console_log(1, "shutting down map engine subsystem"); vector_free(s_person_list); for (i = 0; i < s_num_deferreds; ++i) script_unref(s_deferreds[i].script); free(s_deferreds); for (i = 0; i < MAP_SCRIPT_MAX; ++i) script_unref(s_def_map_scripts[i]); script_unref(s_update_script); script_unref(s_render_script); free_map(s_map); free(s_players); for (i = 0; i < s_num_persons; ++i) free_person(s_persons[i]); for (i = 0; i < PERSON_SCRIPT_MAX; ++i) script_unref(s_def_person_scripts[i]); free(s_persons); mixer_unref(s_bgm_mixer); audio_uninit(); } void map_engine_on_map_event(map_op_t op, script_t* script) { script_t* old_script; old_script = s_def_map_scripts[op]; s_def_map_scripts[op] = script_ref(script); script_unref(old_script); } void map_engine_on_person_event(person_op_t op, script_t* script) { script_t* old_script; old_script = s_def_person_scripts[op]; s_def_person_scripts[op] = script_ref(script); script_unref(old_script); } void map_engine_on_render(script_t* script) { script_unref(s_render_script); s_render_script = script_ref(script); } void map_engine_on_update(script_t* script) { script_unref(s_update_script); s_update_script = script_ref(script); } const person_t* map_engine_acting_person(void) { return s_acting_person; } const person_t* map_engine_active_person(void) { return s_current_person; } int map_engine_active_trigger(void) { return s_current_trigger; } int map_engine_active_zone(void) { return s_current_zone; } vector_t* map_engine_persons(void) { int i; if (s_person_list == NULL) s_person_list = vector_new(sizeof(person_t*)); vector_clear(s_person_list); for (i = 0; i < s_num_persons; ++i) vector_push(s_person_list, &s_persons[i]); return s_person_list; } bool map_engine_running(void) { return s_is_map_running; } int map_engine_get_framerate(void) { return s_frame_rate; } person_t* map_engine_get_player(player_id_t player_id) { return s_players[player_id].person; } person_t* map_engine_get_subject(void) { return s_camera_person; } int map_engine_get_talk_button(void) { return s_talk_button; } int map_engine_get_talk_distance(void) { return s_talk_distance; } int map_engine_get_talk_key(player_id_t player_id) { return s_players[player_id].talk_key; } void map_engine_set_framerate(int framerate) { s_frame_rate = framerate; } void map_engine_set_player(player_id_t player_id, person_t* person) { int i; // detach person from any other players for (i = 0; i < PLAYER_MAX; ++i) { if (s_players[i].person == person) s_players[i].person = NULL; } s_players[player_id].person = person; } void map_engine_set_subject(person_t* person) { s_camera_person = person; } void map_engine_set_talk_button(int button_id) { s_talk_button = button_id; } void map_engine_set_talk_distance(int distance) { s_talk_distance = distance; } void map_engine_set_talk_key(player_id_t player_id, int key) { s_players[player_id].talk_key = key; } bool map_engine_change_map(const char* filename) { return change_map(filename, false); } void map_engine_defer(script_t* script, int num_frames) { struct deferred* deferred; if (++s_num_deferreds > s_max_deferreds) { s_max_deferreds = s_num_deferreds * 2; s_deferreds = realloc(s_deferreds, s_max_deferreds * sizeof(struct deferred)); } deferred = &s_deferreds[s_num_deferreds - 1]; deferred->script = script; deferred->frames_left = num_frames; } void map_engine_draw_map(void) { bool is_repeating; int cell_x; int cell_y; int first_cell_x; int first_cell_y; struct map_layer* layer; int layer_height; int layer_width; size2_t resolution; int tile_height; int tile_index; int tile_width; int off_x; int off_y; int x, y, z; if (screen_skipping_frame(g_screen)) return; resolution = screen_size(g_screen); tileset_get_size(s_map->tileset, &tile_width, &tile_height); // render map layers from bottom to top (+Z = up) for (z = 0; z < s_map->num_layers; ++z) { layer = &s_map->layers[z]; is_repeating = s_map->is_repeating || layer->is_parallax; layer_width = layer->width * tile_width; layer_height = layer->height * tile_height; off_x = 0; off_y = 0; map_screen_to_layer(z, s_camera_x, s_camera_y, &off_x, &off_y); // render person reflections if layer is reflective al_hold_bitmap_drawing(true); if (layer->is_reflective) { if (is_repeating) { // for small repeating maps, persons need to be repeated as well for (y = 0; y < resolution.height / layer_height + 2; ++y) for (x = 0; x < resolution.width / layer_width + 2; ++x) draw_persons(z, true, off_x - x * layer_width, off_y - y * layer_height); } else { draw_persons(z, true, off_x, off_y); } } // render tiles, but only if the layer is visible if (layer->is_visible) { first_cell_x = off_x / tile_width; first_cell_y = off_y / tile_height; for (y = 0; y < resolution.height / tile_height + 2; ++y) for (x = 0; x < resolution.width / tile_width + 2; ++x) { cell_x = is_repeating ? (x + first_cell_x) % layer->width : x + first_cell_x; cell_y = is_repeating ? (y + first_cell_y) % layer->height : y + first_cell_y; if (cell_x < 0 || cell_x >= layer->width || cell_y < 0 || cell_y >= layer->height) continue; tile_index = layer->tilemap[cell_x + cell_y * layer->width].tile_index; tileset_draw(s_map->tileset, layer->color_mask, x * tile_width - off_x % tile_width, y * tile_height - off_y % tile_height, tile_index); } } // render persons if (is_repeating) { // for small repeating maps, persons need to be repeated as well for (y = 0; y < resolution.height / layer_height + 2; ++y) for (x = 0; x < resolution.width / layer_width + 2; ++x) draw_persons(z, false, off_x - x * layer_width, off_y - y * layer_height); } else { draw_persons(z, false, off_x, off_y); } al_hold_bitmap_drawing(false); script_run(layer->render_script, false); } al_draw_filled_rectangle(0, 0, resolution.width, resolution.height, nativecolor(s_color_mask)); script_run(s_render_script, false); } void map_engine_exit(void) { s_exiting = true; } void map_engine_fade_to(color_t color_mask, int num_frames) { if (num_frames > 0) { s_fade_color_to = color_mask; s_fade_color_from = s_color_mask; s_fade_frames = num_frames; s_fade_progress = 0; } else { s_color_mask = color_mask; s_fade_color_to = s_fade_color_from = color_mask; s_fade_progress = s_fade_frames = 0; } } bool map_engine_start(const char* filename, int framerate) { s_is_map_running = true; s_exiting = false; s_color_mask = mk_color(0, 0, 0, 0); s_fade_color_to = s_fade_color_from = s_color_mask; s_fade_progress = s_fade_frames = 0; al_clear_to_color(al_map_rgba(0, 0, 0, 255)); s_frame_rate = framerate; if (!change_map(filename, true)) goto on_error; while (!s_exiting && jsal_vm_enabled()) { sphere_heartbeat(true, 1); // order of operations matches Sphere 1.x. not sure why, but Sphere 1.x // checks for input AFTER an update for some reason... update_map_engine(true); process_map_input(); map_engine_draw_map(); // don't clear the backbuffer. the Sphere 1.x map engine has a bug where it doesn't // clear the backbuffer between frames; as it turns out, a good deal of of v1 code relies // on that behavior. sphere_tick(1, false, s_frame_rate); } reset_persons(false); s_is_map_running = false; return true; on_error: s_is_map_running = false; return false; } void map_engine_update(void) { update_map_engine(false); } rect_t map_bounds(void) { rect_t bounds; int tile_w, tile_h; tileset_get_size(s_map->tileset, &tile_w, &tile_h); bounds.x1 = 0; bounds.y1 = 0; bounds.x2 = s_map->width * tile_w; bounds.y2 = s_map->height * tile_h; return bounds; } int map_layer_by_name(const char* name) { int i; for (i = 0; i < s_map->num_layers; ++i) { if (strcmp(name, lstr_cstr(s_map->layers[0].name)) == 0) return i; } return -1; } int map_num_layers(void) { return s_map->num_layers; } int map_num_persons(void) { return s_num_persons; } int map_num_triggers(void) { return vector_len(s_map->triggers); } int map_num_zones(void) { return vector_len(s_map->zones); } point3_t map_origin(void) { return s_map != NULL ? s_map->origin : mk_point3(0, 0, 0); } const char* map_pathname(void) { return s_map ? s_map_filename : NULL; } person_t* map_person_by_name(const char* name) { int i; for (i = 0; i < s_num_persons; ++i) { if (strcmp(name, s_persons[i]->name) == 0) return s_persons[i]; } return NULL; } int map_tile_at(int x, int y, int layer) { int layer_h; int layer_w; layer_w = s_map->layers[layer].width; layer_h = s_map->layers[layer].height; if (s_map->is_repeating || s_map->layers[layer].is_parallax) { x = (x % layer_w + layer_w) % layer_w; y = (y % layer_h + layer_h) % layer_h; } if (x < 0 || y < 0 || x >= layer_w || y >= layer_h) return -1; return layer_get_tile(layer, x, y); } tileset_t* map_tileset(void) { return s_map->tileset; } int map_trigger_at(int x, int y, int layer) { rect_t bounds; int tile_w, tile_h; struct map_trigger* trigger; iter_t iter; tileset_get_size(s_map->tileset, &tile_w, &tile_h); iter = vector_enum(s_map->triggers); while ((trigger = iter_next(&iter))) { if (trigger->z != layer && false) // layer ignored for compatibility continue; bounds.x1 = trigger->x - tile_w / 2; bounds.y1 = trigger->y - tile_h / 2; bounds.x2 = bounds.x1 + tile_w; bounds.y2 = bounds.y1 + tile_h; if (is_point_in_rect(x, y, bounds)) return iter.index; } return -1; } point2_t map_xy_from_screen(point2_t screen_xy) { int x; int y; x = screen_xy.x; y = screen_xy.y; map_screen_to_map(s_camera_x, s_camera_y, &x, &y); return mk_point2(x, y); } int map_zone_at(int x, int y, int layer, int which) { struct map_zone* zone; iter_t iter; iter = vector_enum(s_map->zones); while ((zone = iter_next(&iter))) { if (zone->layer != layer && false) // layer ignored for compatibility continue; if (is_point_in_rect(x, y, zone->bounds) && --which < 0) return iter.index; } return -1; } point2_t map_get_camera_xy(void) { return mk_point2(s_camera_x, s_camera_y); } void map_set_camera_xy(point2_t where) { s_camera_x = where.x; s_camera_y = where.y; } void map_activate(map_op_t op, bool use_default) { if (use_default) script_run(s_def_map_scripts[op], false); script_run(s_map->scripts[op], false); } bool map_add_trigger(int x, int y, int layer, script_t* script) { struct map_trigger trigger; console_log(2, "creating trigger #%d on map '%s'", vector_len(s_map->triggers), s_map_filename); console_log(3, " location: '%s' @ (%d,%d)", lstr_cstr(s_map->layers[layer].name), x, y); trigger.x = x; trigger.y = y; trigger.z = layer; trigger.script = script_ref(script); if (!vector_push(s_map->triggers, &trigger)) return false; return true; } bool map_add_zone(rect_t bounds, int layer, script_t* script, int steps) { struct map_zone zone; console_log(2, "creating %u-step zone #%d on map '%s'", steps, vector_len(s_map->zones), s_map_filename); console_log(3, " bounds: (%d,%d)-(%d,%d)", bounds.x1, bounds.y1, bounds.x2, bounds.y2); memset(&zone, 0, sizeof(struct map_zone)); zone.bounds = bounds; zone.layer = layer; zone.script = script_ref(script); zone.interval = steps; zone.steps_left = 0; if (!vector_push(s_map->zones, &zone)) return false; return true; } void map_call_default(map_op_t op) { script_run(s_def_map_scripts[op], false); } void map_normalize_xy(double* inout_x, double* inout_y, int layer) { int tile_w, tile_h; int layer_w, layer_h; if (s_map == NULL) return; // can't normalize if no map loaded if (!s_map->is_repeating && !s_map->layers[layer].is_parallax) return; tileset_get_size(s_map->tileset, &tile_w, &tile_h); layer_w = s_map->layers[layer].width * tile_w; layer_h = s_map->layers[layer].height * tile_h; if (inout_x) *inout_x = fmod(fmod(*inout_x, layer_w) + layer_w, layer_w); if (inout_y) *inout_y = fmod(fmod(*inout_y, layer_h) + layer_h, layer_h); } void map_remove_trigger(int trigger_index) { vector_remove(s_map->triggers, trigger_index); } void map_remove_zone(int zone_index) { vector_remove(s_map->zones, zone_index); } void layer_on_render(int layer, script_t* script) { script_unref(s_map->layers[layer].render_script); s_map->layers[layer].render_script = script_ref(script); } const char* layer_name(int layer) { return lstr_cstr(s_map->layers[layer].name); } const obsmap_t* layer_obsmap(int layer) { return s_map->layers[layer].obsmap; } size2_t layer_size(int layer) { struct map_layer* layer_data; layer_data = &s_map->layers[layer]; return mk_size2(layer_data->width, layer_data->height); } color_t layer_get_color_mask(int layer) { return s_map->layers[layer].color_mask; } bool layer_get_reflective(int layer) { return s_map->layers[layer].is_reflective; } int layer_get_tile(int layer, int x, int y) { struct map_tile* tile; int width; width = s_map->layers[layer].width; tile = &s_map->layers[layer].tilemap[x + y * width]; return tile->tile_index; } bool layer_get_visible(int layer) { return s_map->layers[layer].is_visible; } void layer_set_color_mask(int layer, color_t color) { s_map->layers[layer].color_mask = color; } void layer_set_reflective(int layer, bool reflective) { s_map->layers[layer].is_reflective = reflective; } void layer_set_tile(int layer, int x, int y, int tile_index) { struct map_tile* tile; int width; width = s_map->layers[layer].width; tile = &s_map->layers[layer].tilemap[x + y * width]; tile->tile_index = tile_index; tile->frames_left = tileset_get_delay(s_map->tileset, tile_index); } void layer_set_visible(int layer, bool visible) { s_map->layers[layer].is_visible = visible; } void layer_replace_tiles(int layer, int old_index, int new_index) { int layer_h; int layer_w; struct map_tile* tile; int i_x, i_y; layer_w = s_map->layers[layer].width; layer_h = s_map->layers[layer].height; for (i_x = 0; i_x < layer_w; ++i_x) for (i_y = 0; i_y < layer_h; ++i_y) { tile = &s_map->layers[layer].tilemap[i_x + i_y * layer_w]; if (tile->tile_index == old_index) tile->tile_index = new_index; } } bool layer_resize(int layer, int x_size, int y_size) { int old_height; int old_width; struct map_tile* tile; int tile_width; int tile_height; struct map_tile* tilemap; struct map_trigger* trigger; struct map_zone* zone; size_t tilemap_size; int x, y, i; old_width = s_map->layers[layer].width; old_height = s_map->layers[layer].height; // allocate a new tilemap and copy the old layer tiles into it. we can't simply realloc // because the tilemap is a 2D array. tilemap_size = x_size * y_size * sizeof(struct map_tile); if (x_size == 0 || tilemap_size / x_size / sizeof(struct map_tile) != y_size || !(tilemap = malloc(tilemap_size))) return false; for (x = 0; x < x_size; ++x) { for (y = 0; y < y_size; ++y) { if (x < old_width && y < old_height) { tilemap[x + y * x_size] = s_map->layers[layer].tilemap[x + y * old_width]; } else { tile = &tilemap[x + y * x_size]; tile->frames_left = tileset_get_delay(s_map->tileset, 0); tile->tile_index = 0; } } } // free the old tilemap and substitute the new one free(s_map->layers[layer].tilemap); s_map->layers[layer].tilemap = tilemap; s_map->layers[layer].width = x_size; s_map->layers[layer].height = y_size; // if we resize the largest layer, the overall map size will change. // recalcuate it. tileset_get_size(s_map->tileset, &tile_width, &tile_height); s_map->width = 0; s_map->height = 0; for (i = 0; i < s_map->num_layers; ++i) { if (!s_map->layers[i].is_parallax) { s_map->width = fmax(s_map->width, s_map->layers[i].width * tile_width); s_map->height = fmax(s_map->height, s_map->layers[i].height * tile_height); } } // ensure zones and triggers remain in-bounds. if any are completely // out-of-bounds, delete them. for (i = (int)vector_len(s_map->zones) - 1; i >= 0; --i) { zone = vector_get(s_map->zones, i); if (zone->bounds.x1 >= s_map->width || zone->bounds.y1 >= s_map->height) vector_remove(s_map->zones, i); else { if (zone->bounds.x2 > s_map->width) zone->bounds.x2 = s_map->width; if (zone->bounds.y2 > s_map->height) zone->bounds.y2 = s_map->height; } } for (i = (int)vector_len(s_map->triggers) - 1; i >= 0; --i) { trigger = vector_get(s_map->triggers, i); if (trigger->x >= s_map->width || trigger->y >= s_map->height) vector_remove(s_map->triggers, i); } return true; } person_t* person_new(const char* name, spriteset_t* spriteset, bool is_persistent, script_t* create_script) { point3_t origin = map_origin(); person_t* person; if (++s_num_persons > s_max_persons) { s_max_persons = s_num_persons * 2; s_persons = realloc(s_persons, s_max_persons * sizeof(person_t*)); } person = s_persons[s_num_persons - 1] = calloc(1, sizeof(person_t)); person->id = s_next_person_id++; person->sprite = spriteset_ref(spriteset); set_person_name(person, name); person_set_pose(person, spriteset_pose_name(spriteset, 0)); person->is_persistent = is_persistent; person->is_visible = true; person->x = origin.x; person->y = origin.y; person->layer = origin.z; person->speed_x = 1.0; person->speed_y = 1.0; person->anim_frames = spriteset_frame_delay(person->sprite, person->direction, 0); person->mask = mk_color(255, 255, 255, 255); person->scale_x = person->scale_y = 1.0; person->scripts[PERSON_SCRIPT_ON_CREATE] = create_script; person_activate(person, PERSON_SCRIPT_ON_CREATE, NULL, true); sort_persons(); return person; } void person_free(person_t* person) { int i, j; // call the person's destroy script *before* renouncing leadership. // the destroy script may want to reassign followers (they will be orphaned otherwise), so // we want to give it a chance to do so. person_activate(person, PERSON_SCRIPT_ON_DESTROY, NULL, true); for (i = 0; i < s_num_persons; ++i) { if (s_persons[i]->leader == person) s_persons[i]->leader = NULL; } // remove the person from the engine detach_person(person); for (i = 0; i < s_num_persons; ++i) { if (s_persons[i] == person) { for (j = i; j < s_num_persons - 1; ++j) s_persons[j] = s_persons[j + 1]; --s_num_persons; --i; } } vector_free(person->ignore_list); free_person(person); sort_persons(); } rect_t person_base(const person_t* person) { rect_t base_rect; int base_x; int base_y; double x; double y; base_rect = rect_zoom(spriteset_get_base(person->sprite), person->scale_x, person->scale_y); person_get_xy(person, &x, &y, true); base_x = x - (base_rect.x1 + (base_rect.x2 - base_rect.x1) / 2); base_y = y - (base_rect.y1 + (base_rect.y2 - base_rect.y1) / 2); base_rect.x1 += base_x; base_rect.x2 += base_x; base_rect.y1 += base_y; base_rect.y2 += base_y; return base_rect; } bool person_following(const person_t* person, const person_t* leader) { const person_t* node; node = person; while ((node = node->leader)) if (node == leader) return true; return false; } bool person_has_moved(const person_t* person) { return person->mv_x != 0 || person->mv_y != 0; } vector_t* person_ignore_list(person_t* person) { // note: the returned vector is an array of C strings. these should be treated // as const char*; in other words, don't free them! int i; if (person->ignore_list == NULL) person->ignore_list = vector_new(sizeof(const char*)); vector_clear(person->ignore_list); for (i = 0; i < person->num_ignores; ++i) vector_push(person->ignore_list, &person->ignores[i]); return person->ignore_list; } bool person_ignored_by(const person_t* person, const person_t* other) { // note: commutative; if either person ignores the other, the function will return true int i; if (other->ignore_all_persons || person->ignore_all_persons) return true; for (i = 0; i < other->num_ignores; ++i) if (strcmp(other->ignores[i], person->name) == 0) return true; for (i = 0; i < person->num_ignores; ++i) if (strcmp(person->ignores[i], other->name) == 0) return true; return false; } bool person_moving(const person_t* person) { return person->num_commands > 0; } const char* person_name(const person_t* person) { return person != NULL ? person->name : ""; } bool person_obstructed_at(const person_t* person, double x, double y, person_t** out_obstructing_person, int* out_tile_index) { rect_t area; rect_t base, my_base; double cur_x, cur_y; bool is_obstructed = false; int layer; const obsmap_t* obsmap; int tile_w, tile_h; const tileset_t* tileset; int i, i_x, i_y; map_normalize_xy(&x, &y, person->layer); person_get_xyz(person, &cur_x, &cur_y, &layer, true); my_base = rect_translate(person_base(person), x - cur_x, y - cur_y); if (out_obstructing_person != NULL) *out_obstructing_person = NULL; if (out_tile_index != NULL) *out_tile_index = -1; // check for obstructing persons if (!person->ignore_all_persons) { for (i = 0; i < s_num_persons; ++i) { if (s_persons[i] == person) // these persons aren't going to obstruct themselves! continue; if (s_persons[i]->layer != layer) continue; // ignore persons not on the same layer if (person_following(s_persons[i], person)) continue; // ignore own followers base = person_base(s_persons[i]); if (do_rects_overlap(my_base, base) && !person_ignored_by(person, s_persons[i])) { is_obstructed = true; if (out_obstructing_person) *out_obstructing_person = s_persons[i]; break; } } } // no obstructing person, check map-defined obstructions obsmap = layer_obsmap(layer); if (obsmap_test_rect(obsmap, my_base)) is_obstructed = true; // check for obstructing tiles // for performance reasons, the search is constrained to the immediate vicinity // of the person's sprite base. if (!person->ignore_all_tiles) { tileset = map_tileset(); tileset_get_size(tileset, &tile_w, &tile_h); area.x1 = my_base.x1 / tile_w; area.y1 = my_base.y1 / tile_h; area.x2 = area.x1 + (my_base.x2 - my_base.x1) / tile_w + 2; area.y2 = area.y1 + (my_base.y2 - my_base.y1) / tile_h + 2; for (i_x = area.x1; i_x < area.x2; ++i_x) for (i_y = area.y1; i_y < area.y2; ++i_y) { base = rect_translate(my_base, -(i_x * tile_w), -(i_y * tile_h)); obsmap = tileset_obsmap(tileset, map_tile_at(i_x, i_y, layer)); if (obsmap != NULL && obsmap_test_rect(obsmap, base)) { is_obstructed = true; if (out_tile_index) *out_tile_index = map_tile_at(i_x, i_y, layer); break; } } } return is_obstructed; } double person_get_angle(const person_t* person) { return person->theta; } color_t person_get_color(const person_t* person) { return person->mask; } int person_get_frame(const person_t* person) { int num_frames; num_frames = spriteset_num_frames(person->sprite, person->direction); return person->frame % num_frames; } int person_get_frame_delay(const person_t* person) { return person->anim_frames; } bool person_get_ignore_persons(const person_t* person) { return person->ignore_all_persons; } bool person_get_ignore_tiles(const person_t* person) { return person->ignore_all_tiles; } int person_get_layer(const person_t* person) { return person->layer; } person_t* person_get_leader(const person_t* person) { return person->leader; } point2_t person_get_offset(const person_t* person) { return mk_point2(person->x_offset, person->y_offset); } const char* person_get_pose(const person_t* person) { return person->direction; } int person_get_revert_delay(const person_t* person) { return person->revert_delay; } void person_get_scale(const person_t* person, double* out_scale_x, double* out_scale_y) { *out_scale_x = person->scale_x; *out_scale_y = person->scale_y; } void person_get_speed(const person_t* person, double* out_x_speed, double* out_y_speed) { if (out_x_speed) *out_x_speed = person->speed_x; if (out_y_speed) *out_y_speed = person->speed_y; } spriteset_t* person_get_spriteset(const person_t* person) { return person->sprite; } int person_get_trailing(const person_t* person) { return person->follow_distance; } bool person_get_visible(const person_t* person) { return person->is_visible; } void person_get_xy(const person_t* person, double* out_x, double* out_y, bool normalize) { *out_x = person->x; *out_y = person->y; if (normalize) map_normalize_xy(out_x, out_y, person->layer); } void person_get_xyz(const person_t* person, double* out_x, double* out_y, int* out_layer, bool normalize) { *out_x = person->x; *out_y = person->y; *out_layer = person->layer; if (normalize) map_normalize_xy(out_x, out_y, *out_layer); } void person_set_angle(person_t* person, double theta) { person->theta = theta; } void person_set_color(person_t* person, color_t mask) { person->mask = mask; } void person_set_frame(person_t* person, int frame_index) { int num_frames; num_frames = spriteset_num_frames(person->sprite, person->direction); person->frame = (frame_index % num_frames + num_frames) % num_frames; person->anim_frames = spriteset_frame_delay(person->sprite, person->direction, person->frame); person->revert_frames = person->revert_delay; } void person_set_frame_delay(person_t* person, int num_frames) { person->anim_frames = num_frames; person->revert_frames = person->revert_delay; } void person_set_ignore_persons(person_t* person, bool ignoring) { person->ignore_all_persons = ignoring; } void person_set_ignore_tiles (person_t* person, bool ignoring) { person->ignore_all_tiles = ignoring; } void person_set_layer(person_t* person, int layer) { person->layer = layer; } bool person_set_leader(person_t* person, person_t* leader, int distance) { const person_t* node; // prevent circular follower chains from forming if (leader != NULL) { node = leader; do { if (node == person) return false; } while ((node = node->leader)); } // add the person as a follower (or sever existing link if leader==NULL) if (leader != NULL) { if (!enlarge_step_history(leader, distance)) return false; person->leader = leader; person->follow_distance = distance; } person->leader = leader; return true; } void person_set_offset(person_t* person, point2_t offset) { person->x_offset = offset.x; person->y_offset = offset.y; } void person_set_pose(person_t* person, const char* pose_name) { person->direction = realloc(person->direction, (strlen(pose_name) + 1) * sizeof(char)); strcpy(person->direction, pose_name); } void person_set_revert_delay(person_t* person, int num_frames) { person->revert_delay = num_frames; person->revert_frames = num_frames; } void person_set_scale(person_t* person, double scale_x, double scale_y) { person->scale_x = scale_x; person->scale_y = scale_y; } void person_set_speed(person_t* person, double x_speed, double y_speed) { person->speed_x = x_speed; person->speed_y = y_speed; } void person_set_spriteset(person_t* person, spriteset_t* spriteset) { spriteset_t* old_spriteset; old_spriteset = person->sprite; person->sprite = spriteset_ref(spriteset); person->anim_frames = spriteset_frame_delay(person->sprite, person->direction, 0); person->frame = 0; spriteset_unref(old_spriteset); } void person_set_trailing(person_t* person, int distance) { enlarge_step_history(person->leader, distance); person->follow_distance = distance; } void person_set_visible(person_t* person, bool visible) { person->is_visible = visible; } void person_set_xyz(person_t* person, double x, double y, int layer) { person->x = x; person->y = y; person->layer = layer; sort_persons(); } void person_on_event(person_t* person, int type, script_t* script) { script_unref(person->scripts[type]); person->scripts[type] = script; } void person_activate(const person_t* person, person_op_t op, const person_t* acting_person, bool use_default) { const person_t* last_acting; const person_t* last_current; last_acting = s_acting_person; last_current = s_current_person; s_acting_person = acting_person; s_current_person = person; if (use_default) script_run(s_def_person_scripts[op], false); if (does_person_exist(person)) script_run(person->scripts[op], false); s_acting_person = last_acting; s_current_person = last_current; } void person_call_default(const person_t* person, person_op_t op, const person_t* acting_person) { const person_t* last_acting; const person_t* last_current; last_acting = s_acting_person; last_current = s_current_person; s_acting_person = acting_person; s_current_person = person; script_run(s_def_person_scripts[op], false); s_acting_person = last_acting; s_current_person = last_current; } void person_clear_ignores(person_t* person) { int i; for (i = 0; i < person->num_ignores; ++i) free(person->ignores[i]); person->num_ignores = 0; } void person_clear_queue(person_t* person) { person->num_commands = 0; } bool person_compile_script(person_t* person, int type, const lstring_t* codestring) { script_t* script; const char* script_name; script_name = type == PERSON_SCRIPT_ON_CREATE ? "onCreate" : type == PERSON_SCRIPT_ON_DESTROY ? "onDestroy" : type == PERSON_SCRIPT_ON_TOUCH ? "onTouch" : type == PERSON_SCRIPT_ON_TALK ? "onTalk" : type == PERSON_SCRIPT_GENERATOR ? "genCommands" : NULL; if (script_name == NULL) return false; script = script_new(codestring, "%s/%s/%s.js", map_pathname(), person->name, script_name); person_on_event(person, type, script); return true; } void person_ignore_name(person_t* person, const char* name) { int index; index = person->num_ignores++; person->ignores = realloc(person->ignores, person->num_ignores * sizeof(char*)); person->ignores[index] = strdup(name); // ignore list changed, delete cache vector_free(person->ignore_list); person->ignore_list = NULL; } bool person_queue_command(person_t* person, int command, bool is_immediate) { struct command* commands; bool is_aok = true; switch (command) { case COMMAND_MOVE_NORTHEAST: is_aok &= person_queue_command(person, COMMAND_MOVE_NORTH, true); is_aok &= person_queue_command(person, COMMAND_MOVE_EAST, is_immediate); return is_aok; case COMMAND_MOVE_SOUTHEAST: is_aok &= person_queue_command(person, COMMAND_MOVE_SOUTH, true); is_aok &= person_queue_command(person, COMMAND_MOVE_EAST, is_immediate); return is_aok; case COMMAND_MOVE_SOUTHWEST: is_aok &= person_queue_command(person, COMMAND_MOVE_SOUTH, true); is_aok &= person_queue_command(person, COMMAND_MOVE_WEST, is_immediate); return is_aok; case COMMAND_MOVE_NORTHWEST: is_aok &= person_queue_command(person, COMMAND_MOVE_NORTH, true); is_aok &= person_queue_command(person, COMMAND_MOVE_WEST, is_immediate); return is_aok; default: ++person->num_commands; if (person->num_commands > person->max_commands) { if (!(commands = realloc(person->commands, person->num_commands * 2 * sizeof(struct command)))) return false; person->max_commands = person->num_commands * 2; person->commands = commands; } person->commands[person->num_commands - 1].type = command; person->commands[person->num_commands - 1].is_immediate = is_immediate; person->commands[person->num_commands - 1].script = NULL; return true; } } bool person_queue_script(person_t* person, script_t* script, bool is_immediate) { ++person->num_commands; if (person->num_commands > person->max_commands) { person->max_commands = person->num_commands * 2; if (!(person->commands = realloc(person->commands, person->max_commands * sizeof(struct command)))) return false; } person->commands[person->num_commands - 1].type = COMMAND_RUN_SCRIPT; person->commands[person->num_commands - 1].is_immediate = is_immediate; person->commands[person->num_commands - 1].script = script; return true; } void person_talk(const person_t* person) { rect_t map_rect; person_t* target_person; double talk_x, talk_y; map_rect = map_bounds(); // check if anyone else is within earshot person_get_xy(person, &talk_x, &talk_y, true); if (strstr(person->direction, "north")) talk_y -= s_talk_distance; if (strstr(person->direction, "east")) talk_x += s_talk_distance; if (strstr(person->direction, "south")) talk_y += s_talk_distance; if (strstr(person->direction, "west")) talk_x -= s_talk_distance; person_obstructed_at(person, talk_x, talk_y, &target_person, NULL); // if so, call their talk script if (target_person != NULL) person_activate(target_person, PERSON_SCRIPT_ON_TALK, person, true); } void trigger_get_xyz(int trigger_index, int* out_x, int* out_y, int* out_layer) { struct map_trigger* trigger; trigger = vector_get(s_map->triggers, trigger_index); if (out_x != NULL) *out_x = trigger->x; if (out_y != NULL) *out_y = trigger->y; if (out_layer) *out_layer = trigger->z; } void trigger_set_layer(int trigger_index, int layer) { struct map_trigger* trigger; trigger = vector_get(s_map->triggers, trigger_index); trigger->z = layer; } void trigger_set_script(int trigger_index, script_t* script) { script_t* old_script; struct map_trigger* trigger; trigger = vector_get(s_map->triggers, trigger_index); old_script = trigger->script; trigger->script = script_ref(script); script_unref(old_script); } void trigger_set_xy(int trigger_index, int x, int y) { struct map_trigger* trigger; trigger = vector_get(s_map->triggers, trigger_index); trigger->x = x; trigger->y = y; } void trigger_activate(int trigger_index) { int last_trigger; struct map_trigger* trigger; trigger = vector_get(s_map->triggers, trigger_index); last_trigger = s_current_trigger; s_current_trigger = trigger_index; script_run(trigger->script, true); s_current_trigger = last_trigger; } rect_t zone_get_bounds(int zone_index) { struct map_zone* zone; zone = vector_get(s_map->zones, zone_index); return zone->bounds; } int zone_get_layer(int zone_index) { struct map_zone* zone; zone = vector_get(s_map->zones, zone_index); return zone->layer; } int zone_get_steps(int zone_index) { struct map_zone* zone; zone = vector_get(s_map->zones, zone_index); return zone->interval; } void zone_set_bounds(int zone_index, rect_t bounds) { struct map_zone* zone; zone = vector_get(s_map->zones, zone_index); rect_normalize(&bounds); zone->bounds = bounds; } void zone_set_layer(int zone_index, int layer) { struct map_zone* zone; zone = vector_get(s_map->zones, zone_index); zone->layer = layer; } void zone_set_script(int zone_index, script_t* script) { script_t* old_script; struct map_zone* zone; zone = vector_get(s_map->zones, zone_index); old_script = zone->script; zone->script = script_ref(script); script_unref(old_script); } void zone_set_steps(int zone_index, int interval) { struct map_zone* zone; zone = vector_get(s_map->zones, zone_index); zone->interval = interval; zone->steps_left = 0; } void zone_activate(int zone_index) { int last_zone; struct map_zone* zone; zone = vector_get(s_map->zones, zone_index); last_zone = s_current_zone; s_current_zone = zone_index; script_run(zone->script, true); s_current_zone = last_zone; } static bool change_map(const char* filename, bool preserve_persons) { // note: if an error is detected during a map change, change_map() will return false, but // the map engine may be left in an inconsistent state. it is therefore probably wise // to consider such a situation unrecoverable. struct map* map; person_t* person; struct map_person* person_info; path_t* path; spriteset_t* spriteset = NULL; int i; console_log(2, "changing current map to '%s'", filename); map = load_map(filename); if (map == NULL) return false; if (s_map != NULL) { // run map exit scripts first, before loading new map map_activate(MAP_SCRIPT_ON_LEAVE, true); } // close out old map and prep for new one free_map(s_map); free(s_map_filename); for (i = 0; i < s_num_deferreds; ++i) script_unref(s_deferreds[i].script); s_num_deferreds = 0; s_map = map; s_map_filename = strdup(filename); reset_persons(preserve_persons); // populate persons for (i = 0; i < s_map->num_persons; ++i) { person_info = &s_map->persons[i]; path = game_full_path(g_game, lstr_cstr(person_info->spriteset), "spritesets", true); spriteset = spriteset_load(path_cstr(path)); path_free(path); if (spriteset == NULL) goto on_error; if (!(person = person_new(lstr_cstr(person_info->name), spriteset, false, NULL))) goto on_error; spriteset_unref(spriteset); person_set_xyz(person, person_info->x, person_info->y, person_info->z); person_compile_script(person, PERSON_SCRIPT_ON_CREATE, person_info->create_script); person_compile_script(person, PERSON_SCRIPT_ON_DESTROY, person_info->destroy_script); person_compile_script(person, PERSON_SCRIPT_ON_TOUCH, person_info->touch_script); person_compile_script(person, PERSON_SCRIPT_ON_TALK, person_info->talk_script); person_compile_script(person, PERSON_SCRIPT_GENERATOR, person_info->command_script); // normally this is handled by person_new(), but since in this case the // person-specific create script isn't compiled until after the person is created, // the map engine gets the responsibility. person_activate(person, PERSON_SCRIPT_ON_CREATE, NULL, false); } // set camera over starting position s_camera_x = s_map->origin.x; s_camera_y = s_map->origin.y; // start up map BGM (if same as previous, leave alone) if (s_map->bgm_file == NULL && s_map_bgm_stream != NULL) { sound_unref(s_map_bgm_stream); lstr_free(s_last_bgm_file); s_map_bgm_stream = NULL; s_last_bgm_file = NULL; } else if (s_map->bgm_file != NULL && (s_last_bgm_file == NULL || lstr_cmp(s_map->bgm_file, s_last_bgm_file) != 0)) { sound_unref(s_map_bgm_stream); lstr_free(s_last_bgm_file); s_last_bgm_file = lstr_dup(s_map->bgm_file); path = game_full_path(g_game, lstr_cstr(s_map->bgm_file), "sounds", true); if ((s_map_bgm_stream = sound_new(path_cstr(path)))) { sound_set_repeat(s_map_bgm_stream, true); sound_play(s_map_bgm_stream, s_bgm_mixer); } path_free(path); } // run map entry scripts map_activate(MAP_SCRIPT_ON_ENTER, true); s_frames = 0; return true; on_error: spriteset_unref(spriteset); free_map(s_map); return false; } static void command_person(person_t* person, int command) { double new_x; double new_y; person_t* person_to_touch; new_x = person->x; new_y = person->y; switch (command) { case COMMAND_ANIMATE: person->revert_frames = person->revert_delay; if (person->anim_frames > 0 && --person->anim_frames == 0) { ++person->frame; person->anim_frames = spriteset_frame_delay(person->sprite, person->direction, person->frame); } break; case COMMAND_FACE_NORTH: person_set_pose(person, "north"); break; case COMMAND_FACE_NORTHEAST: person_set_pose(person, "northeast"); break; case COMMAND_FACE_EAST: person_set_pose(person, "east"); break; case COMMAND_FACE_SOUTHEAST: person_set_pose(person, "southeast"); break; case COMMAND_FACE_SOUTH: person_set_pose(person, "south"); break; case COMMAND_FACE_SOUTHWEST: person_set_pose(person, "southwest"); break; case COMMAND_FACE_WEST: person_set_pose(person, "west"); break; case COMMAND_FACE_NORTHWEST: person_set_pose(person, "northwest"); break; case COMMAND_MOVE_NORTH: new_y = person->y - person->speed_y; break; case COMMAND_MOVE_EAST: new_x = person->x + person->speed_x; break; case COMMAND_MOVE_SOUTH: new_y = person->y + person->speed_y; break; case COMMAND_MOVE_WEST: new_x = person->x - person->speed_x; break; } if (new_x != person->x || new_y != person->y) { // person is trying to move, make sure the path is clear of obstructions if (!person_obstructed_at(person, new_x, new_y, &person_to_touch, NULL)) { if (new_x != person->x) person->mv_x = new_x > person->x ? 1 : -1; if (new_y != person->y) person->mv_y = new_y > person->y ? 1 : -1; person->x = new_x; person->y = new_y; } else { // if not, and we collided with a person, call that person's touch script if (person_to_touch != NULL) person_activate(person_to_touch, PERSON_SCRIPT_ON_TOUCH, person, true); } } } static int compare_persons(const void* a, const void* b) { person_t* p1 = *(person_t**)a; person_t* p2 = *(person_t**)b; double x, y_p1, y_p2; int y_delta; person_get_xy(p1, &x, &y_p1, true); person_get_xy(p2, &x, &y_p2, true); y_delta = y_p1 - y_p2; if (y_delta != 0) return y_delta; else if (person_following(p1, p2)) return -1; else if (person_following(p2, p1)) return 1; else return p1->id - p2->id; } static void detach_person(const person_t* person) { int i; if (s_camera_person == person) s_camera_person = NULL; for (i = 0; i < PLAYER_MAX; ++i) { if (s_players[i].person == person) s_players[i].person = NULL; } } static bool does_person_exist(const person_t* person) { int i; for (i = 0; i < s_num_persons; ++i) if (person == s_persons[i]) return true; return false; } void draw_persons(int layer, bool is_flipped, int cam_x, int cam_y) { person_t* person; spriteset_t* sprite; int w, h; double x, y; int i; for (i = 0; i < s_num_persons; ++i) { person = s_persons[i]; if (!person->is_visible || person->layer != layer) continue; sprite = person->sprite; w = spriteset_width(sprite); h = spriteset_height(sprite); person_get_xy(person, &x, &y, true); x -= cam_x - person->x_offset; y -= cam_y - person->y_offset; spriteset_draw(sprite, person->mask, is_flipped, person->theta, person->scale_x, person->scale_y, person->direction, trunc(x), trunc(y), person->frame); } } static bool enlarge_step_history(person_t* person, int new_size) { struct step *new_steps; size_t pastmost; double last_x; double last_y; int i; if (new_size > person->max_history) { if (!(new_steps = realloc(person->steps, new_size * sizeof(struct step)))) return false; // when enlarging the history buffer, fill new slots with pastmost values // (kind of like sign extension) pastmost = person->max_history - 1; last_x = person->steps != NULL ? person->steps[pastmost].x : person->x; last_y = person->steps != NULL ? person->steps[pastmost].y : person->y; for (i = person->max_history; i < new_size; ++i) { new_steps[i].x = last_x; new_steps[i].y = last_y; } person->steps = new_steps; person->max_history = new_size; } return true; } static void free_map(struct map* map) { struct map_trigger* trigger; struct map_zone* zone; iter_t iter; int i; if (map == NULL) return; for (i = 0; i < MAP_SCRIPT_MAX; ++i) script_unref(map->scripts[i]); for (i = 0; i < map->num_layers; ++i) { script_unref(map->layers[i].render_script); lstr_free(map->layers[i].name); free(map->layers[i].tilemap); obsmap_free(map->layers[i].obsmap); } for (i = 0; i < map->num_persons; ++i) { lstr_free(map->persons[i].name); lstr_free(map->persons[i].spriteset); lstr_free(map->persons[i].create_script); lstr_free(map->persons[i].destroy_script); lstr_free(map->persons[i].command_script); lstr_free(map->persons[i].talk_script); lstr_free(map->persons[i].touch_script); } iter = vector_enum(s_map->triggers); while ((trigger = iter_next(&iter))) script_unref(trigger->script); iter = vector_enum(s_map->zones); while ((zone = iter_next(&iter))) script_unref(zone->script); lstr_free(s_map->bgm_file); tileset_free(map->tileset); free(map->layers); free(map->persons); vector_free(map->triggers); vector_free(map->zones); free(map); } static void free_person(person_t* person) { int i; free(person->steps); for (i = 0; i < PERSON_SCRIPT_MAX; ++i) script_unref(person->scripts[i]); spriteset_unref(person->sprite); free(person->commands); free(person->name); free(person->direction); free(person); } static struct map_trigger* get_trigger_at(int x, int y, int layer, int* out_index) { rect_t bounds; struct map_trigger* found_item = NULL; int tile_w, tile_h; struct map_trigger* trigger; iter_t iter; tileset_get_size(s_map->tileset, &tile_w, &tile_h); iter = vector_enum(s_map->triggers); while ((trigger = iter_next(&iter))) { if (trigger->z != layer && false) // layer ignored for compatibility reasons continue; bounds.x1 = trigger->x - tile_w / 2; bounds.y1 = trigger->y - tile_h / 2; bounds.x2 = bounds.x1 + tile_w; bounds.y2 = bounds.y1 + tile_h; if (is_point_in_rect(x, y, bounds)) { found_item = trigger; if (out_index != NULL) *out_index = (int)iter.index; break; } } return found_item; } static struct map_zone* get_zone_at(int x, int y, int layer, int which, int* out_index) { struct map_zone* found_item = NULL; struct map_zone* zone; iter_t iter; int i; iter = vector_enum(s_map->zones); i = -1; while ((zone = iter_next(&iter))) { if (zone->layer != layer && false) // layer ignored for compatibility continue; if (is_point_in_rect(x, y, zone->bounds) && which-- == 0) { found_item = zone; if (out_index) *out_index = (int)iter.index; break; } } return found_item; } static struct map* load_map(const char* filename) { // strings: 0 - tileset filename // 1 - music filename // 2 - script filename (obsolete, not used) // 3 - entry script // 4 - exit script // 5 - exit north script // 6 - exit east script // 7 - exit south script // 8 - exit west script uint16_t count; struct rmp_entity_header entity_hdr; file_t* file = NULL; bool has_failed; struct map_layer* layer; struct rmp_layer_header layer_hdr; struct map* map = NULL; int num_tiles; struct map_person* person; struct rmp_header rmp; lstring_t* script; rect_t segment; int16_t* tile_data = NULL; path_t* tileset_path; tileset_t* tileset; struct map_trigger trigger; struct map_zone zone; struct rmp_zone_header zone_hdr; lstring_t* *strings = NULL; int i, j, x, y, z; console_log(2, "constructing new map from '%s'", filename); memset(&rmp, 0, sizeof(struct rmp_header)); if (!(file = file_open(g_game, filename, "rb"))) goto on_error; map = calloc(1, sizeof(struct map)); if (file_read(file, &rmp, 1, sizeof(struct rmp_header)) != 1) goto on_error; if (memcmp(rmp.signature, ".rmp", 4) != 0) goto on_error; if (rmp.num_strings != 3 && rmp.num_strings != 5 && rmp.num_strings < 9) goto on_error; if (rmp.start_layer < 0 || rmp.start_layer >= rmp.num_layers) rmp.start_layer = 0; // being nice here, this really should fail outright switch (rmp.version) { case 1: // load strings (resource filenames, scripts, etc.) strings = calloc(rmp.num_strings, sizeof(lstring_t*)); has_failed = false; for (i = 0; i < rmp.num_strings; ++i) has_failed = has_failed || ((strings[i] = read_lstring(file, true)) == NULL); if (has_failed) goto on_error; // pre-allocate map structures map->layers = calloc(rmp.num_layers, sizeof(struct map_layer)); map->persons = calloc(rmp.num_entities, sizeof(struct map_person)); map->triggers = vector_new(sizeof(struct map_trigger)); map->zones = vector_new(sizeof(struct map_zone)); // load layers for (i = 0; i < rmp.num_layers; ++i) { if (file_read(file, &layer_hdr, 1, sizeof(struct rmp_layer_header)) != 1) goto on_error; layer = &map->layers[i]; layer->is_parallax = (layer_hdr.flags & 2) != 0x0; layer->is_reflective = layer_hdr.is_reflective; layer->is_visible = (layer_hdr.flags & 1) == 0x0; layer->color_mask = mk_color(255, 255, 255, 255); layer->width = layer_hdr.width; layer->height = layer_hdr.height; layer->autoscroll_x = layer->is_parallax ? layer_hdr.scrolling_x : 0.0; layer->autoscroll_y = layer->is_parallax ? layer_hdr.scrolling_y : 0.0; layer->parallax_x = layer->is_parallax ? layer_hdr.parallax_x : 1.0; layer->parallax_y = layer->is_parallax ? layer_hdr.parallax_y : 1.0; if (!layer->is_parallax) { map->width = fmax(map->width, layer->width); map->height = fmax(map->height, layer->height); } if (!(layer->tilemap = malloc(layer_hdr.width * layer_hdr.height * sizeof(struct map_tile)))) goto on_error; layer->name = read_lstring(file, true); layer->obsmap = obsmap_new(); num_tiles = layer_hdr.width * layer_hdr.height; if ((tile_data = malloc(num_tiles * 2)) == NULL) goto on_error; if (file_read(file, tile_data, num_tiles, 2) != num_tiles) goto on_error; for (j = 0; j < num_tiles; ++j) layer->tilemap[j].tile_index = tile_data[j]; for (j = 0; j < layer_hdr.num_segments; ++j) { if (!fread_rect32(file, &segment)) goto on_error; obsmap_add_line(layer->obsmap, segment); } free(tile_data); tile_data = NULL; } // if either dimension is zero, the map has no non-parallax layers and is thus malformed if (map->width == 0 || map->height == 0) goto on_error; // load entities map->num_persons = 0; for (i = 0; i < rmp.num_entities; ++i) { if (file_read(file, &entity_hdr, 1, sizeof(struct rmp_entity_header)) != 1) goto on_error; if (entity_hdr.z < 0 || entity_hdr.z >= rmp.num_layers) entity_hdr.z = 0; switch (entity_hdr.type) { case 1: // person ++map->num_persons; person = &map->persons[map->num_persons - 1]; memset(person, 0, sizeof(struct map_person)); if (!(person->name = read_lstring(file, true))) goto on_error; if (!(person->spriteset = read_lstring(file, true))) goto on_error; person->x = entity_hdr.x; person->y = entity_hdr.y; person->z = entity_hdr.z; if (file_read(file, &count, 1, 2) != 1 || count < 5) goto on_error; person->create_script = read_lstring(file, false); person->destroy_script = read_lstring(file, false); person->touch_script = read_lstring(file, false); person->talk_script = read_lstring(file, false); person->command_script = read_lstring(file, false); for (j = 5; j < count; ++j) lstr_free(read_lstring(file, true)); file_seek(file, 16, WHENCE_CUR); break; case 2: // trigger if ((script = read_lstring(file, false)) == NULL) goto on_error; memset(&trigger, 0, sizeof(struct map_trigger)); trigger.x = entity_hdr.x; trigger.y = entity_hdr.y; trigger.z = entity_hdr.z; trigger.script = script_new(script, "%s/trig%d", filename, vector_len(map->triggers)); if (!vector_push(map->triggers, &trigger)) return false; lstr_free(script); break; default: goto on_error; } } // load zones for (i = 0; i < rmp.num_zones; ++i) { if (file_read(file, &zone_hdr, 1, sizeof(struct rmp_zone_header)) != 1) goto on_error; if ((script = read_lstring(file, false)) == NULL) goto on_error; if (zone_hdr.layer < 0 || zone_hdr.layer >= rmp.num_layers) zone_hdr.layer = 0; zone.layer = zone_hdr.layer; zone.bounds = mk_rect(zone_hdr.x1, zone_hdr.y1, zone_hdr.x2, zone_hdr.y2); zone.interval = zone_hdr.interval; zone.steps_left = 0; zone.script = script_new(script, "%s/zone%d", filename, vector_len(map->zones)); rect_normalize(&zone.bounds); if (!vector_push(map->zones, &zone)) return false; lstr_free(script); } // load tileset if (strcmp(lstr_cstr(strings[0]), "") != 0) { tileset_path = path_strip(path_new(filename)); path_append(tileset_path, lstr_cstr(strings[0])); tileset = tileset_new(path_cstr(tileset_path)); path_free(tileset_path); } else { tileset = tileset_read(file); } if (tileset == NULL) goto on_error; // initialize tile animation for (z = 0; z < rmp.num_layers; ++z) { layer = &map->layers[z]; for (x = 0; x < layer->width; ++x) for (y = 0; y < layer->height; ++y) { i = x + y * layer->width; map->layers[z].tilemap[i].frames_left = tileset_get_delay(tileset, map->layers[z].tilemap[i].tile_index); } } // wrap things up map->bgm_file = strcmp(lstr_cstr(strings[1]), "") != 0 ? lstr_dup(strings[1]) : NULL; map->num_layers = rmp.num_layers; map->is_repeating = rmp.repeat_map; map->origin.x = rmp.start_x; map->origin.y = rmp.start_y; map->origin.z = rmp.start_layer; map->tileset = tileset; if (rmp.num_strings >= 5) { map->scripts[MAP_SCRIPT_ON_ENTER] = script_new(strings[3], "%s/onEnter", filename); map->scripts[MAP_SCRIPT_ON_LEAVE] = script_new(strings[4], "%s/onLeave", filename); } if (rmp.num_strings >= 9) { map->scripts[MAP_SCRIPT_ON_LEAVE_NORTH] = script_new(strings[5], "%s/onLeave", filename); map->scripts[MAP_SCRIPT_ON_LEAVE_EAST] = script_new(strings[6], "%s/onLeaveEast", filename); map->scripts[MAP_SCRIPT_ON_LEAVE_SOUTH] = script_new(strings[7], "%s/onLeaveSouth", filename); map->scripts[MAP_SCRIPT_ON_LEAVE_WEST] = script_new(strings[8], "%s/onLeaveWest", filename); } for (i = 0; i < rmp.num_strings; ++i) lstr_free(strings[i]); free(strings); break; default: goto on_error; } file_close(file); return map; on_error: if (file != NULL) file_close(file); free(tile_data); if (strings != NULL) { for (i = 0; i < rmp.num_strings; ++i) lstr_free(strings[i]); free(strings); } if (map != NULL) { if (map->layers != NULL) { for (i = 0; i < rmp.num_layers; ++i) { lstr_free(map->layers[i].name); free(map->layers[i].tilemap); obsmap_free(map->layers[i].obsmap); } free(map->layers); } if (map->persons != NULL) { for (i = 0; i < map->num_persons; ++i) { lstr_free(map->persons[i].name); lstr_free(map->persons[i].spriteset); lstr_free(map->persons[i].create_script); lstr_free(map->persons[i].destroy_script); lstr_free(map->persons[i].command_script); lstr_free(map->persons[i].talk_script); lstr_free(map->persons[i].touch_script); } free(map->persons); } vector_free(map->triggers); vector_free(map->zones); free(map); } return NULL; } void map_screen_to_layer(int layer, int camera_x, int camera_y, int* inout_x, int* inout_y) { rect_t bounds; int center_x; int center_y; int layer_h; int layer_w; float plx_offset_x = 0.0; int plx_offset_y = 0.0; size2_t resolution; int tile_w; int tile_h; int x_offset; int y_offset; // get layer and screen metrics resolution = screen_size(g_screen); tileset_get_size(s_map->tileset, &tile_w, &tile_h); layer_w = s_map->layers[layer].width * tile_w; layer_h = s_map->layers[layer].height * tile_h; center_x = resolution.width / 2; center_y = resolution.height / 2; // initial camera correction if (!s_map->is_repeating) { bounds = map_bounds(); camera_x = fmin(fmax(camera_x, bounds.x1 + center_x), bounds.x2 - center_x); camera_y = fmin(fmax(camera_y, bounds.y1 + center_y), bounds.y2 - center_y); } // remap screen coordinates to layer coordinates plx_offset_x = s_frames * s_map->layers[layer].autoscroll_x - camera_x * (s_map->layers[layer].parallax_x - 1.0); plx_offset_y = s_frames * s_map->layers[layer].autoscroll_y - camera_y * (s_map->layers[layer].parallax_y - 1.0); x_offset = camera_x - center_x - plx_offset_x; y_offset = camera_y - center_y - plx_offset_y; if (!s_map->is_repeating && !s_map->layers[layer].is_parallax) { // if the map is smaller than the screen, align to top left. centering // would be better aesthetically, but there are a couple Sphere 1.x games // that depend on top-left justification. if (layer_w < resolution.width) x_offset = 0; if (layer_h < resolution.height) y_offset = 0; } if (inout_x != NULL) *inout_x += x_offset; if (inout_y != NULL) *inout_y += y_offset; // normalize coordinates. this simplifies rendering calculations. if (s_map->is_repeating || s_map->layers[layer].is_parallax) { if (inout_x) *inout_x = (*inout_x % layer_w + layer_w) % layer_w; if (inout_y) *inout_y = (*inout_y % layer_h + layer_h) % layer_h; } } static void map_screen_to_map(int camera_x, int camera_y, int* inout_x, int* inout_y) { rect_t bounds; int center_x; int center_y; int map_h; int map_w; size2_t resolution; int tile_h; int tile_w; int x_offset; int y_offset; // get layer and screen metrics resolution = screen_size(g_screen); tileset_get_size(s_map->tileset, &tile_w, &tile_h); map_w = s_map->width * tile_w; map_h = s_map->height * tile_h; center_x = resolution.width / 2; center_y = resolution.height / 2; // initial camera correction if (!s_map->is_repeating) { bounds = map_bounds(); camera_x = fmin(fmax(camera_x, bounds.x1 + center_x), bounds.x2 - center_x); camera_y = fmin(fmax(camera_y, bounds.y1 + center_y), bounds.y2 - center_y); } // remap screen coordinates to map coordinates x_offset = camera_x - center_x; y_offset = camera_y - center_y; if (!s_map->is_repeating) { // if the map is smaller than the screen, align to top left. centering // would be better aesthetically, but there are a couple Sphere 1.x games // that depend on top-left justification. if (map_w < resolution.width) x_offset = 0; if (map_h < resolution.height) y_offset = 0; } if (inout_x != NULL) *inout_x += x_offset; if (inout_y != NULL) *inout_y += y_offset; // normalize coordinates if (s_map->is_repeating) { if (inout_x) *inout_x = (*inout_x % map_w + map_w) % map_w; if (inout_y) *inout_y = (*inout_y % map_h + map_h) % map_h; } } static void process_map_input(void) { int mv_x, mv_y; person_t* person; int i; // clear out excess keys from key queue kb_clear_queue(); // check for player control of input persons, if there are any for (i = 0; i < PLAYER_MAX; ++i) { person = s_players[i].person; if (person != NULL) { if (kb_is_key_down(get_player_key(i, PLAYER_KEY_A)) || kb_is_key_down(s_players[i].talk_key) || joy_is_button_down(i, s_talk_button)) { if (s_players[i].is_talk_allowed) person_talk(person); s_players[i].is_talk_allowed = false; } else { // allow talking again only after key is released s_players[i].is_talk_allowed = true; } mv_x = 0; mv_y = 0; if (person->num_commands == 0 && person->leader == NULL) { // allow player control only if the input person is idle and not being led around // by someone else. if (kb_is_key_down(get_player_key(i, PLAYER_KEY_UP)) || joy_position(i, 1) <= -0.5) mv_y = -1; if (kb_is_key_down(get_player_key(i, PLAYER_KEY_RIGHT)) || joy_position(i, 0) >= 0.5) mv_x = 1; if (kb_is_key_down(get_player_key(i, PLAYER_KEY_DOWN)) || joy_position(i, 1) >= 0.5) mv_y = 1; if (kb_is_key_down(get_player_key(i, PLAYER_KEY_LEFT)) || joy_position(i, 0) <= -0.5) mv_x = -1; } switch (mv_x + mv_y * 3) { case -3: // north person_queue_command(person, COMMAND_MOVE_NORTH, true); person_queue_command(person, COMMAND_FACE_NORTH, true); person_queue_command(person, COMMAND_ANIMATE, false); break; case -2: // northeast person_queue_command(person, COMMAND_MOVE_NORTHEAST, true); person_queue_command(person, COMMAND_FACE_NORTHEAST, true); person_queue_command(person, COMMAND_ANIMATE, false); break; case 1: // east person_queue_command(person, COMMAND_MOVE_EAST, true); person_queue_command(person, COMMAND_FACE_EAST, true); person_queue_command(person, COMMAND_ANIMATE, false); break; case 4: // southeast person_queue_command(person, COMMAND_MOVE_SOUTHEAST, true); person_queue_command(person, COMMAND_FACE_SOUTHEAST, true); person_queue_command(person, COMMAND_ANIMATE, false); break; case 3: // south person_queue_command(person, COMMAND_MOVE_SOUTH, true); person_queue_command(person, COMMAND_FACE_SOUTH, true); person_queue_command(person, COMMAND_ANIMATE, false); break; case 2: // southwest person_queue_command(person, COMMAND_MOVE_SOUTHWEST, true); person_queue_command(person, COMMAND_FACE_SOUTHWEST, true); person_queue_command(person, COMMAND_ANIMATE, false); break; case -1: // west person_queue_command(person, COMMAND_MOVE_WEST, true); person_queue_command(person, COMMAND_FACE_WEST, true); person_queue_command(person, COMMAND_ANIMATE, false); break; case -4: // northwest person_queue_command(person, COMMAND_MOVE_NORTHWEST, true); person_queue_command(person, COMMAND_FACE_NORTHWEST, true); person_queue_command(person, COMMAND_ANIMATE, false); break; } } } update_bound_keys(true); } static void record_step(person_t* person) { struct step* p_step; if (person->max_history <= 0) return; memmove(&person->steps[1], &person->steps[0], (person->max_history - 1) * sizeof(struct step)); p_step = &person->steps[0]; p_step->x = person->x; p_step->y = person->y; } void reset_persons(bool keep_existing) { unsigned int id; point3_t origin; person_t* person; int i, j; origin = map_origin(); for (i = 0; i < s_num_persons; ++i) { person = s_persons[i]; id = person->id; if (!keep_existing) person->num_commands = 0; if (person->is_persistent || keep_existing) { person->x = origin.x; person->y = origin.y; person->layer = origin.z; } else { person_activate(person, PERSON_SCRIPT_ON_DESTROY, NULL, true); free_person(person); --s_num_persons; for (j = i; j < s_num_persons; ++j) s_persons[j] = s_persons[j + 1]; --i; } } sort_persons(); } static void set_person_name(person_t* person, const char* name) { person->name = realloc(person->name, (strlen(name) + 1) * sizeof(char)); strcpy(person->name, name); } static void sort_persons(void) { qsort(s_persons, s_num_persons, sizeof(person_t*), compare_persons); } static void update_map_engine(bool in_main_loop) { bool has_moved; int index; bool is_sort_needed = false; int last_trigger; int last_zone; int layer; int map_w, map_h; int num_zone_steps; script_t* script_to_run; int script_type; double start_x[PLAYER_MAX]; double start_y[PLAYER_MAX]; int tile_w, tile_h; struct map_trigger* trigger; double x, y, px, py; struct map_zone* zone; int i, j, k; ++s_frames; tileset_get_size(s_map->tileset, &tile_w, &tile_h); map_w = s_map->width * tile_w; map_h = s_map->height * tile_h; tileset_update(s_map->tileset); for (i = 0; i < PLAYER_MAX; ++i) if (s_players[i].person != NULL) person_get_xy(s_players[i].person, &start_x[i], &start_y[i], false); for (i = 0; i < s_num_persons; ++i) { if (s_persons[i]->leader != NULL) continue; // skip followers for now update_person(s_persons[i], &has_moved); is_sort_needed |= has_moved; } if (is_sort_needed) sort_persons(); // update color mask fade level if (s_fade_progress < s_fade_frames) { ++s_fade_progress; s_color_mask = color_mix(s_fade_color_to, s_fade_color_from, s_fade_progress, s_fade_frames - s_fade_progress); } // update camera if (s_camera_person != NULL) { person_get_xy(s_camera_person, &x, &y, true); s_camera_x = x; s_camera_y = y; } // run edge script if the camera has moved past the edge of the map // note: only applies for non-repeating maps if (in_main_loop && !s_map->is_repeating) { script_type = s_camera_y < 0 ? MAP_SCRIPT_ON_LEAVE_NORTH : s_camera_x >= map_w ? MAP_SCRIPT_ON_LEAVE_EAST : s_camera_y >= map_h ? MAP_SCRIPT_ON_LEAVE_SOUTH : s_camera_x < 0 ? MAP_SCRIPT_ON_LEAVE_WEST : MAP_SCRIPT_MAX; if (script_type < MAP_SCRIPT_MAX) map_activate(script_type, true); } // if there are any input persons, check for trigger activation for (i = 0; i < PLAYER_MAX; ++i) if (s_players[i].person != NULL) { // did we step on a trigger or move to a new one? person_get_xyz(s_players[i].person, &x, &y, &layer, true); trigger = get_trigger_at(x, y, layer, &index); if (trigger != s_on_trigger) { last_trigger = s_current_trigger; s_current_trigger = index; s_on_trigger = trigger; if (trigger != NULL) script_run(trigger->script, false); s_current_trigger = last_trigger; } } // update any zones occupied by the input person // note: a zone's step count is in reality a pixel count, so a zone // may be updated multiple times in a single frame. for (k = 0; k < PLAYER_MAX; ++k) if (s_players[k].person != NULL) { person_get_xy(s_players[k].person, &x, &y, false); px = fabs(x - start_x[k]); py = fabs(y - start_y[k]); num_zone_steps = px > py ? px : py; for (i = 0; i < num_zone_steps; ++i) { j = 0; while ((zone = get_zone_at(x, y, layer, j++, &index))) { if (zone->steps_left-- <= 0) { last_zone = s_current_zone; s_current_zone = index; zone->steps_left = zone->interval; script_run(zone->script, true); s_current_zone = last_zone; } } } } // check if there are any deferred scripts due to run this frame // and run the ones that are for (i = 0; i < s_num_deferreds; ++i) { if (s_deferreds[i].frames_left-- <= 0) { script_to_run = s_deferreds[i].script; for (j = i; j < s_num_deferreds - 1; ++j) s_deferreds[j] = s_deferreds[j + 1]; --s_num_deferreds; script_run(script_to_run, false); script_unref(script_to_run); --i; } } // now that everything else is in order, we can run the // update script! script_run(s_update_script, false); } static void update_person(person_t* person, bool* out_has_moved) { struct command command; double delta_x, delta_y; int facing; bool has_moved; bool is_finished; const person_t* last_person; struct step step; int vector; int i; person->mv_x = 0; person->mv_y = 0; if (person->revert_frames > 0 && --person->revert_frames <= 0) person->frame = 0; if (person->leader == NULL) { // no leader; use command queue // call the command generator if the queue is empty if (person->num_commands == 0) person_activate(person, PERSON_SCRIPT_GENERATOR, NULL, true); // run through the queue, stopping after the first non-immediate command is_finished = !does_person_exist(person) || person->num_commands == 0; while (!is_finished) { command = person->commands[0]; --person->num_commands; for (i = 0; i < person->num_commands; ++i) person->commands[i] = person->commands[i + 1]; last_person = s_current_person; s_current_person = person; if (command.type != COMMAND_RUN_SCRIPT) command_person(person, command.type); else script_run(command.script, false); s_current_person = last_person; script_unref(command.script); is_finished = !does_person_exist(person) // stop if person was destroyed || !command.is_immediate || person->num_commands == 0; } } else { // leader set; follow the leader! step = person->leader->steps[person->follow_distance - 1]; delta_x = step.x - person->x; delta_y = step.y - person->y; if (fabs(delta_x) > person->speed_x) command_person(person, delta_x > 0 ? COMMAND_MOVE_EAST : COMMAND_MOVE_WEST); if (!does_person_exist(person)) return; if (fabs(delta_y) > person->speed_y) command_person(person, delta_y > 0 ? COMMAND_MOVE_SOUTH : COMMAND_MOVE_NORTH); if (!does_person_exist(person)) return; vector = person->mv_x + person->mv_y * 3; facing = vector == -3 ? COMMAND_FACE_NORTH : vector == -2 ? COMMAND_FACE_NORTHEAST : vector == 1 ? COMMAND_FACE_EAST : vector == 4 ? COMMAND_FACE_SOUTHEAST : vector == 3 ? COMMAND_FACE_SOUTH : vector == 2 ? COMMAND_FACE_SOUTHWEST : vector == -1 ? COMMAND_FACE_WEST : vector == -4 ? COMMAND_FACE_NORTHWEST : COMMAND_WAIT; if (facing != COMMAND_WAIT) command_person(person, COMMAND_ANIMATE); if (!does_person_exist(person)) return; command_person(person, facing); } // check that the person didn't mysteriously disappear... if (!does_person_exist(person)) return; // they probably got eaten by a pig. // if the person's position changed, record it in their step history *out_has_moved = person_has_moved(person); if (*out_has_moved) record_step(person); // recursively update the follower chain for (i = 0; i < s_num_persons; ++i) { if (s_persons[i]->leader != person) continue; update_person(s_persons[i], &has_moved); *out_has_moved |= has_moved; } }
layer_resize(int layer, int x_size, int y_size) { int old_height; int old_width; struct map_tile* tile; int tile_width; int tile_height; struct map_tile* tilemap; struct map_trigger* trigger; struct map_zone* zone; int x, y, i; old_width = s_map->layers[layer].width; old_height = s_map->layers[layer].height; // allocate a new tilemap and copy the old layer tiles into it. we can't simply realloc // because the tilemap is a 2D array. if (!(tilemap = malloc(x_size * y_size * sizeof(struct map_tile)))) return false; for (x = 0; x < x_size; ++x) { for (y = 0; y < y_size; ++y) { if (x < old_width && y < old_height) { tilemap[x + y * x_size] = s_map->layers[layer].tilemap[x + y * old_width]; } else { tile = &tilemap[x + y * x_size]; tile->frames_left = tileset_get_delay(s_map->tileset, 0); tile->tile_index = 0; } } } // free the old tilemap and substitute the new one free(s_map->layers[layer].tilemap); s_map->layers[layer].tilemap = tilemap; s_map->layers[layer].width = x_size; s_map->layers[layer].height = y_size; // if we resize the largest layer, the overall map size will change. // recalcuate it. tileset_get_size(s_map->tileset, &tile_width, &tile_height); s_map->width = 0; s_map->height = 0; for (i = 0; i < s_map->num_layers; ++i) { if (!s_map->layers[i].is_parallax) { s_map->width = fmax(s_map->width, s_map->layers[i].width * tile_width); s_map->height = fmax(s_map->height, s_map->layers[i].height * tile_height); } } // ensure zones and triggers remain in-bounds. if any are completely // out-of-bounds, delete them. for (i = (int)vector_len(s_map->zones) - 1; i >= 0; --i) { zone = vector_get(s_map->zones, i); if (zone->bounds.x1 >= s_map->width || zone->bounds.y1 >= s_map->height) vector_remove(s_map->zones, i); else { if (zone->bounds.x2 > s_map->width) zone->bounds.x2 = s_map->width; if (zone->bounds.y2 > s_map->height) zone->bounds.y2 = s_map->height; } } for (i = (int)vector_len(s_map->triggers) - 1; i >= 0; --i) { trigger = vector_get(s_map->triggers, i); if (trigger->x >= s_map->width || trigger->y >= s_map->height) vector_remove(s_map->triggers, i); } return true; }
layer_resize(int layer, int x_size, int y_size) { int old_height; int old_width; struct map_tile* tile; int tile_width; int tile_height; struct map_tile* tilemap; struct map_trigger* trigger; struct map_zone* zone; size_t tilemap_size; int x, y, i; old_width = s_map->layers[layer].width; old_height = s_map->layers[layer].height; // allocate a new tilemap and copy the old layer tiles into it. we can't simply realloc // because the tilemap is a 2D array. tilemap_size = x_size * y_size * sizeof(struct map_tile); if (x_size == 0 || tilemap_size / x_size / sizeof(struct map_tile) != y_size || !(tilemap = malloc(tilemap_size))) return false; for (x = 0; x < x_size; ++x) { for (y = 0; y < y_size; ++y) { if (x < old_width && y < old_height) { tilemap[x + y * x_size] = s_map->layers[layer].tilemap[x + y * old_width]; } else { tile = &tilemap[x + y * x_size]; tile->frames_left = tileset_get_delay(s_map->tileset, 0); tile->tile_index = 0; } } } // free the old tilemap and substitute the new one free(s_map->layers[layer].tilemap); s_map->layers[layer].tilemap = tilemap; s_map->layers[layer].width = x_size; s_map->layers[layer].height = y_size; // if we resize the largest layer, the overall map size will change. // recalcuate it. tileset_get_size(s_map->tileset, &tile_width, &tile_height); s_map->width = 0; s_map->height = 0; for (i = 0; i < s_map->num_layers; ++i) { if (!s_map->layers[i].is_parallax) { s_map->width = fmax(s_map->width, s_map->layers[i].width * tile_width); s_map->height = fmax(s_map->height, s_map->layers[i].height * tile_height); } } // ensure zones and triggers remain in-bounds. if any are completely // out-of-bounds, delete them. for (i = (int)vector_len(s_map->zones) - 1; i >= 0; --i) { zone = vector_get(s_map->zones, i); if (zone->bounds.x1 >= s_map->width || zone->bounds.y1 >= s_map->height) vector_remove(s_map->zones, i); else { if (zone->bounds.x2 > s_map->width) zone->bounds.x2 = s_map->width; if (zone->bounds.y2 > s_map->height) zone->bounds.y2 = s_map->height; } } for (i = (int)vector_len(s_map->triggers) - 1; i >= 0; --i) { trigger = vector_get(s_map->triggers, i); if (trigger->x >= s_map->width || trigger->y >= s_map->height) vector_remove(s_map->triggers, i); } return true; }
{'added': [(1039, '\tsize_t tilemap_size;'), (1048, '\ttilemap_size = x_size * y_size * sizeof(struct map_tile);'), (1049, '\tif (x_size == 0 || tilemap_size / x_size / sizeof(struct map_tile) != y_size'), (1050, '\t\t|| !(tilemap = malloc(tilemap_size)))')], 'deleted': [(1047, '\tif (!(tilemap = malloc(x_size * y_size * sizeof(struct map_tile))))')]}
4
1
2,485
15,482
https://github.com/fatcerberus/minisphere
CVE-2018-1000524
['CWE-190']
nsc_encode.c
nsc_encode
/** * FreeRDP: A Remote Desktop Protocol Implementation * NSCodec Encoder * * Copyright 2012 Vic Lee * Copyright 2016 Armin Novak <armin.novak@thincast.com> * Copyright 2016 Thincast Technologies GmbH * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <stdio.h> #include <stdlib.h> #include <string.h> #include <winpr/crt.h> #include <freerdp/codec/nsc.h> #include <freerdp/codec/color.h> #include "nsc_types.h" #include "nsc_encode.h" static BOOL nsc_context_initialize_encode(NSC_CONTEXT* context) { int i; UINT32 length; UINT32 tempWidth; UINT32 tempHeight; tempWidth = ROUND_UP_TO(context->width, 8); tempHeight = ROUND_UP_TO(context->height, 2); /* The maximum length a decoded plane can reach in all cases */ length = tempWidth * tempHeight + 16; if (length > context->priv->PlaneBuffersLength) { for (i = 0; i < 5; i++) { BYTE* tmp = (BYTE*) realloc(context->priv->PlaneBuffers[i], length); if (!tmp) goto fail; context->priv->PlaneBuffers[i] = tmp; } context->priv->PlaneBuffersLength = length; } if (context->ChromaSubsamplingLevel) { context->OrgByteCount[0] = tempWidth * context->height; context->OrgByteCount[1] = tempWidth * tempHeight / 4; context->OrgByteCount[2] = tempWidth * tempHeight / 4; context->OrgByteCount[3] = context->width * context->height; } else { context->OrgByteCount[0] = context->width * context->height; context->OrgByteCount[1] = context->width * context->height; context->OrgByteCount[2] = context->width * context->height; context->OrgByteCount[3] = context->width * context->height; } return TRUE; fail: if (length > context->priv->PlaneBuffersLength) { for (i = 0; i < 5; i++) free(context->priv->PlaneBuffers[i]); } return FALSE; } static void nsc_encode_argb_to_aycocg(NSC_CONTEXT* context, const BYTE* data, UINT32 scanline) { UINT16 x; UINT16 y; UINT16 rw; BYTE ccl; const BYTE* src; BYTE* yplane = NULL; BYTE* coplane = NULL; BYTE* cgplane = NULL; BYTE* aplane = NULL; INT16 r_val; INT16 g_val; INT16 b_val; BYTE a_val; UINT32 tempWidth; tempWidth = ROUND_UP_TO(context->width, 8); rw = (context->ChromaSubsamplingLevel ? tempWidth : context->width); ccl = context->ColorLossLevel; for (y = 0; y < context->height; y++) { src = data + (context->height - 1 - y) * scanline; yplane = context->priv->PlaneBuffers[0] + y * rw; coplane = context->priv->PlaneBuffers[1] + y * rw; cgplane = context->priv->PlaneBuffers[2] + y * rw; aplane = context->priv->PlaneBuffers[3] + y * context->width; for (x = 0; x < context->width; x++) { switch (context->format) { case PIXEL_FORMAT_BGRX32: b_val = *src++; g_val = *src++; r_val = *src++; src++; a_val = 0xFF; break; case PIXEL_FORMAT_BGRA32: b_val = *src++; g_val = *src++; r_val = *src++; a_val = *src++; break; case PIXEL_FORMAT_RGBX32: r_val = *src++; g_val = *src++; b_val = *src++; src++; a_val = 0xFF; break; case PIXEL_FORMAT_RGBA32: r_val = *src++; g_val = *src++; b_val = *src++; a_val = *src++; break; case PIXEL_FORMAT_BGR24: b_val = *src++; g_val = *src++; r_val = *src++; a_val = 0xFF; break; case PIXEL_FORMAT_RGB24: r_val = *src++; g_val = *src++; b_val = *src++; a_val = 0xFF; break; case PIXEL_FORMAT_BGR16: b_val = (INT16)(((*(src + 1)) & 0xF8) | ((*(src + 1)) >> 5)); g_val = (INT16)((((*(src + 1)) & 0x07) << 5) | (((*src) & 0xE0) >> 3)); r_val = (INT16)((((*src) & 0x1F) << 3) | (((*src) >> 2) & 0x07)); a_val = 0xFF; src += 2; break; case PIXEL_FORMAT_RGB16: r_val = (INT16)(((*(src + 1)) & 0xF8) | ((*(src + 1)) >> 5)); g_val = (INT16)((((*(src + 1)) & 0x07) << 5) | (((*src) & 0xE0) >> 3)); b_val = (INT16)((((*src) & 0x1F) << 3) | (((*src) >> 2) & 0x07)); a_val = 0xFF; src += 2; break; case PIXEL_FORMAT_A4: { int shift; BYTE idx; shift = (7 - (x % 8)); idx = ((*src) >> shift) & 1; idx |= (((*(src + 1)) >> shift) & 1) << 1; idx |= (((*(src + 2)) >> shift) & 1) << 2; idx |= (((*(src + 3)) >> shift) & 1) << 3; idx *= 3; r_val = (INT16) context->palette[idx]; g_val = (INT16) context->palette[idx + 1]; b_val = (INT16) context->palette[idx + 2]; if (shift == 0) src += 4; } a_val = 0xFF; break; case PIXEL_FORMAT_RGB8: { int idx = (*src) * 3; r_val = (INT16) context->palette[idx]; g_val = (INT16) context->palette[idx + 1]; b_val = (INT16) context->palette[idx + 2]; src++; } a_val = 0xFF; break; default: r_val = g_val = b_val = a_val = 0; break; } *yplane++ = (BYTE)((r_val >> 2) + (g_val >> 1) + (b_val >> 2)); /* Perform color loss reduction here */ *coplane++ = (BYTE)((r_val - b_val) >> ccl); *cgplane++ = (BYTE)((-(r_val >> 1) + g_val - (b_val >> 1)) >> ccl); *aplane++ = a_val; } if (context->ChromaSubsamplingLevel && (x % 2) == 1) { *yplane = *(yplane - 1); *coplane = *(coplane - 1); *cgplane = *(cgplane - 1); } } if (context->ChromaSubsamplingLevel && (y % 2) == 1) { yplane = context->priv->PlaneBuffers[0] + y * rw; coplane = context->priv->PlaneBuffers[1] + y * rw; cgplane = context->priv->PlaneBuffers[2] + y * rw; CopyMemory(yplane, yplane - rw, rw); CopyMemory(coplane, coplane - rw, rw); CopyMemory(cgplane, cgplane - rw, rw); } } static void nsc_encode_subsampling(NSC_CONTEXT* context) { UINT16 x; UINT16 y; BYTE* co_dst; BYTE* cg_dst; INT8* co_src0; INT8* co_src1; INT8* cg_src0; INT8* cg_src1; UINT32 tempWidth; UINT32 tempHeight; tempWidth = ROUND_UP_TO(context->width, 8); tempHeight = ROUND_UP_TO(context->height, 2); for (y = 0; y < tempHeight >> 1; y++) { co_dst = context->priv->PlaneBuffers[1] + y * (tempWidth >> 1); cg_dst = context->priv->PlaneBuffers[2] + y * (tempWidth >> 1); co_src0 = (INT8*) context->priv->PlaneBuffers[1] + (y << 1) * tempWidth; co_src1 = co_src0 + tempWidth; cg_src0 = (INT8*) context->priv->PlaneBuffers[2] + (y << 1) * tempWidth; cg_src1 = cg_src0 + tempWidth; for (x = 0; x < tempWidth >> 1; x++) { *co_dst++ = (BYTE)(((INT16) * co_src0 + (INT16) * (co_src0 + 1) + (INT16) * co_src1 + (INT16) * (co_src1 + 1)) >> 2); *cg_dst++ = (BYTE)(((INT16) * cg_src0 + (INT16) * (cg_src0 + 1) + (INT16) * cg_src1 + (INT16) * (cg_src1 + 1)) >> 2); co_src0 += 2; co_src1 += 2; cg_src0 += 2; cg_src1 += 2; } } } void nsc_encode(NSC_CONTEXT* context, const BYTE* bmpdata, UINT32 rowstride) { nsc_encode_argb_to_aycocg(context, bmpdata, rowstride); if (context->ChromaSubsamplingLevel) { nsc_encode_subsampling(context); } } static UINT32 nsc_rle_encode(BYTE* in, BYTE* out, UINT32 originalSize) { UINT32 left; UINT32 runlength = 1; UINT32 planeSize = 0; left = originalSize; /** * We quit the loop if the running compressed size is larger than the original. * In such cases data will be sent uncompressed. */ while (left > 4 && planeSize < originalSize - 4) { if (left > 5 && *in == *(in + 1)) { runlength++; } else if (runlength == 1) { *out++ = *in; planeSize++; } else if (runlength < 256) { *out++ = *in; *out++ = *in; *out++ = runlength - 2; runlength = 1; planeSize += 3; } else { *out++ = *in; *out++ = *in; *out++ = 0xFF; *out++ = (runlength & 0x000000FF); *out++ = (runlength & 0x0000FF00) >> 8; *out++ = (runlength & 0x00FF0000) >> 16; *out++ = (runlength & 0xFF000000) >> 24; runlength = 1; planeSize += 7; } in++; left--; } if (planeSize < originalSize - 4) CopyMemory(out, in, 4); planeSize += 4; return planeSize; } static void nsc_rle_compress_data(NSC_CONTEXT* context) { UINT16 i; UINT32 planeSize; UINT32 originalSize; for (i = 0; i < 4; i++) { originalSize = context->OrgByteCount[i]; if (originalSize == 0) { planeSize = 0; } else { planeSize = nsc_rle_encode(context->priv->PlaneBuffers[i], context->priv->PlaneBuffers[4], originalSize); if (planeSize < originalSize) CopyMemory(context->priv->PlaneBuffers[i], context->priv->PlaneBuffers[4], planeSize); else planeSize = originalSize; } context->PlaneByteCount[i] = planeSize; } } UINT32 nsc_compute_byte_count(NSC_CONTEXT* context, UINT32* ByteCount, UINT32 width, UINT32 height) { UINT32 tempWidth; UINT32 tempHeight; UINT32 maxPlaneSize; tempWidth = ROUND_UP_TO(width, 8); tempHeight = ROUND_UP_TO(height, 2); maxPlaneSize = tempWidth * tempHeight + 16; if (context->ChromaSubsamplingLevel) { ByteCount[0] = tempWidth * height; ByteCount[1] = tempWidth * tempHeight / 4; ByteCount[2] = tempWidth * tempHeight / 4; ByteCount[3] = width * height; } else { ByteCount[0] = width * height; ByteCount[1] = width * height; ByteCount[2] = width * height; ByteCount[3] = width * height; } return maxPlaneSize; } NSC_MESSAGE* nsc_encode_messages(NSC_CONTEXT* context, const BYTE* data, UINT32 x, UINT32 y, UINT32 width, UINT32 height, UINT32 scanline, UINT32* numMessages, UINT32 maxDataSize) { UINT32 i, j, k; UINT32 dataOffset; UINT32 rows, cols; UINT32 BytesPerPixel; UINT32 MaxRegionWidth; UINT32 MaxRegionHeight; UINT32 ByteCount[4]; UINT32 MaxPlaneSize; UINT32 MaxMessageSize; NSC_MESSAGE* messages; UINT32 PaddedMaxPlaneSize; k = 0; MaxRegionWidth = 64 * 4; MaxRegionHeight = 64 * 2; BytesPerPixel = GetBytesPerPixel(context->format); rows = (width + (MaxRegionWidth - (width % MaxRegionWidth))) / MaxRegionWidth; cols = (height + (MaxRegionHeight - (height % MaxRegionHeight))) / MaxRegionHeight; *numMessages = rows * cols; MaxPlaneSize = nsc_compute_byte_count(context, (UINT32*) ByteCount, width, height); MaxMessageSize = ByteCount[0] + ByteCount[1] + ByteCount[2] + ByteCount[3] + 20; maxDataSize -= 1024; /* reserve enough space for headers */ messages = (NSC_MESSAGE*) calloc(*numMessages, sizeof(NSC_MESSAGE)); if (!messages) return NULL; for (i = 0; i < rows; i++) { for (j = 0; j < cols; j++) { messages[k].x = x + (i * MaxRegionWidth); messages[k].y = y + (j * MaxRegionHeight); messages[k].width = (i < (rows - 1)) ? MaxRegionWidth : width - (i * MaxRegionWidth); messages[k].height = (j < (cols - 1)) ? MaxRegionHeight : height - (j * MaxRegionHeight); messages[k].data = data; messages[k].scanline = scanline; messages[k].MaxPlaneSize = nsc_compute_byte_count(context, (UINT32*) messages[k].OrgByteCount, messages[k].width, messages[k].height); k++; } } *numMessages = k; for (i = 0; i < *numMessages; i++) { PaddedMaxPlaneSize = messages[i].MaxPlaneSize + 32; messages[i].PlaneBuffer = (BYTE*) BufferPool_Take(context->priv->PlanePool, PaddedMaxPlaneSize * 5); if (!messages[i].PlaneBuffer) goto fail; messages[i].PlaneBuffers[0] = (BYTE*) & (messages[i].PlaneBuffer[(PaddedMaxPlaneSize * 0) + 16]); messages[i].PlaneBuffers[1] = (BYTE*) & (messages[i].PlaneBuffer[(PaddedMaxPlaneSize * 1) + 16]); messages[i].PlaneBuffers[2] = (BYTE*) & (messages[i].PlaneBuffer[(PaddedMaxPlaneSize * 2) + 16]); messages[i].PlaneBuffers[3] = (BYTE*) & (messages[i].PlaneBuffer[(PaddedMaxPlaneSize * 3) + 16]); messages[i].PlaneBuffers[4] = (BYTE*) & (messages[i].PlaneBuffer[(PaddedMaxPlaneSize * 4) + 16]); } for (i = 0; i < *numMessages; i++) { context->width = messages[i].width; context->height = messages[i].height; context->OrgByteCount[0] = messages[i].OrgByteCount[0]; context->OrgByteCount[1] = messages[i].OrgByteCount[1]; context->OrgByteCount[2] = messages[i].OrgByteCount[2]; context->OrgByteCount[3] = messages[i].OrgByteCount[3]; context->priv->PlaneBuffersLength = messages[i].MaxPlaneSize; context->priv->PlaneBuffers[0] = messages[i].PlaneBuffers[0]; context->priv->PlaneBuffers[1] = messages[i].PlaneBuffers[1]; context->priv->PlaneBuffers[2] = messages[i].PlaneBuffers[2]; context->priv->PlaneBuffers[3] = messages[i].PlaneBuffers[3]; context->priv->PlaneBuffers[4] = messages[i].PlaneBuffers[4]; dataOffset = (messages[i].y * messages[i].scanline) + (messages[i].x * BytesPerPixel); PROFILER_ENTER(context->priv->prof_nsc_encode) context->encode(context, &data[dataOffset], scanline); PROFILER_EXIT(context->priv->prof_nsc_encode) PROFILER_ENTER(context->priv->prof_nsc_rle_compress_data) nsc_rle_compress_data(context); PROFILER_EXIT(context->priv->prof_nsc_rle_compress_data) messages[i].LumaPlaneByteCount = context->PlaneByteCount[0]; messages[i].OrangeChromaPlaneByteCount = context->PlaneByteCount[1]; messages[i].GreenChromaPlaneByteCount = context->PlaneByteCount[2]; messages[i].AlphaPlaneByteCount = context->PlaneByteCount[3]; messages[i].ColorLossLevel = context->ColorLossLevel; messages[i].ChromaSubsamplingLevel = context->ChromaSubsamplingLevel; } context->priv->PlaneBuffers[0] = NULL; context->priv->PlaneBuffers[1] = NULL; context->priv->PlaneBuffers[2] = NULL; context->priv->PlaneBuffers[3] = NULL; context->priv->PlaneBuffers[4] = NULL; return messages; fail: for (i = 0; i < *numMessages; i++) BufferPool_Return(context->priv->PlanePool, messages[i].PlaneBuffer); free(messages); return NULL; } BOOL nsc_write_message(NSC_CONTEXT* context, wStream* s, NSC_MESSAGE* message) { UINT32 totalPlaneByteCount; totalPlaneByteCount = message->LumaPlaneByteCount + message->OrangeChromaPlaneByteCount + message->GreenChromaPlaneByteCount + message->AlphaPlaneByteCount; if (!Stream_EnsureRemainingCapacity(s, 20 + totalPlaneByteCount)) return -1; Stream_Write_UINT32(s, message->LumaPlaneByteCount); /* LumaPlaneByteCount (4 bytes) */ Stream_Write_UINT32(s, message->OrangeChromaPlaneByteCount); /* OrangeChromaPlaneByteCount (4 bytes) */ Stream_Write_UINT32(s, message->GreenChromaPlaneByteCount); /* GreenChromaPlaneByteCount (4 bytes) */ Stream_Write_UINT32(s, message->AlphaPlaneByteCount); /* AlphaPlaneByteCount (4 bytes) */ Stream_Write_UINT8(s, message->ColorLossLevel); /* ColorLossLevel (1 byte) */ Stream_Write_UINT8(s, message->ChromaSubsamplingLevel); /* ChromaSubsamplingLevel (1 byte) */ Stream_Write_UINT16(s, 0); /* Reserved (2 bytes) */ if (message->LumaPlaneByteCount) Stream_Write(s, message->PlaneBuffers[0], message->LumaPlaneByteCount); /* LumaPlane */ if (message->OrangeChromaPlaneByteCount) Stream_Write(s, message->PlaneBuffers[1], message->OrangeChromaPlaneByteCount); /* OrangeChromaPlane */ if (message->GreenChromaPlaneByteCount) Stream_Write(s, message->PlaneBuffers[2], message->GreenChromaPlaneByteCount); /* GreenChromaPlane */ if (message->AlphaPlaneByteCount) Stream_Write(s, message->PlaneBuffers[3], message->AlphaPlaneByteCount); /* AlphaPlane */ return TRUE; } void nsc_message_free(NSC_CONTEXT* context, NSC_MESSAGE* message) { BufferPool_Return(context->priv->PlanePool, message->PlaneBuffer); } BOOL nsc_compose_message(NSC_CONTEXT* context, wStream* s, const BYTE* data, UINT32 width, UINT32 height, UINT32 scanline) { NSC_MESSAGE s_message = { 0 }; NSC_MESSAGE* message = &s_message; context->width = width; context->height = height; if (!nsc_context_initialize_encode(context)) return FALSE; /* ARGB to AYCoCg conversion, chroma subsampling and colorloss reduction */ PROFILER_ENTER(context->priv->prof_nsc_encode) context->encode(context, data, scanline); PROFILER_EXIT(context->priv->prof_nsc_encode) /* RLE encode */ PROFILER_ENTER(context->priv->prof_nsc_rle_compress_data) nsc_rle_compress_data(context); PROFILER_EXIT(context->priv->prof_nsc_rle_compress_data) message->PlaneBuffers[0] = context->priv->PlaneBuffers[0]; message->PlaneBuffers[1] = context->priv->PlaneBuffers[1]; message->PlaneBuffers[2] = context->priv->PlaneBuffers[2]; message->PlaneBuffers[3] = context->priv->PlaneBuffers[3]; message->LumaPlaneByteCount = context->PlaneByteCount[0]; message->OrangeChromaPlaneByteCount = context->PlaneByteCount[1]; message->GreenChromaPlaneByteCount = context->PlaneByteCount[2]; message->AlphaPlaneByteCount = context->PlaneByteCount[3]; message->ColorLossLevel = context->ColorLossLevel; message->ChromaSubsamplingLevel = context->ChromaSubsamplingLevel; return nsc_write_message(context, s, message); }
/** * FreeRDP: A Remote Desktop Protocol Implementation * NSCodec Encoder * * Copyright 2012 Vic Lee * Copyright 2016 Armin Novak <armin.novak@thincast.com> * Copyright 2016 Thincast Technologies GmbH * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <stdio.h> #include <stdlib.h> #include <string.h> #include <winpr/crt.h> #include <freerdp/codec/nsc.h> #include <freerdp/codec/color.h> #include "nsc_types.h" #include "nsc_encode.h" static BOOL nsc_context_initialize_encode(NSC_CONTEXT* context) { int i; UINT32 length; UINT32 tempWidth; UINT32 tempHeight; tempWidth = ROUND_UP_TO(context->width, 8); tempHeight = ROUND_UP_TO(context->height, 2); /* The maximum length a decoded plane can reach in all cases */ length = tempWidth * tempHeight + 16; if (length > context->priv->PlaneBuffersLength) { for (i = 0; i < 5; i++) { BYTE* tmp = (BYTE*) realloc(context->priv->PlaneBuffers[i], length); if (!tmp) goto fail; context->priv->PlaneBuffers[i] = tmp; } context->priv->PlaneBuffersLength = length; } if (context->ChromaSubsamplingLevel) { context->OrgByteCount[0] = tempWidth * context->height; context->OrgByteCount[1] = tempWidth * tempHeight / 4; context->OrgByteCount[2] = tempWidth * tempHeight / 4; context->OrgByteCount[3] = context->width * context->height; } else { context->OrgByteCount[0] = context->width * context->height; context->OrgByteCount[1] = context->width * context->height; context->OrgByteCount[2] = context->width * context->height; context->OrgByteCount[3] = context->width * context->height; } return TRUE; fail: if (length > context->priv->PlaneBuffersLength) { for (i = 0; i < 5; i++) free(context->priv->PlaneBuffers[i]); } return FALSE; } static BOOL nsc_encode_argb_to_aycocg(NSC_CONTEXT* context, const BYTE* data, UINT32 scanline) { UINT16 x; UINT16 y; UINT16 rw; BYTE ccl; const BYTE* src; BYTE* yplane = NULL; BYTE* coplane = NULL; BYTE* cgplane = NULL; BYTE* aplane = NULL; INT16 r_val; INT16 g_val; INT16 b_val; BYTE a_val; UINT32 tempWidth; if (!context || data || (scanline == 0)) return FALSE; tempWidth = ROUND_UP_TO(context->width, 8); rw = (context->ChromaSubsamplingLevel ? tempWidth : context->width); ccl = context->ColorLossLevel; if (context->priv->PlaneBuffersLength < rw * scanline) return FALSE; if (rw < scanline * 2) return FALSE; for (y = 0; y < context->height; y++) { src = data + (context->height - 1 - y) * scanline; yplane = context->priv->PlaneBuffers[0] + y * rw; coplane = context->priv->PlaneBuffers[1] + y * rw; cgplane = context->priv->PlaneBuffers[2] + y * rw; aplane = context->priv->PlaneBuffers[3] + y * context->width; for (x = 0; x < context->width; x++) { switch (context->format) { case PIXEL_FORMAT_BGRX32: b_val = *src++; g_val = *src++; r_val = *src++; src++; a_val = 0xFF; break; case PIXEL_FORMAT_BGRA32: b_val = *src++; g_val = *src++; r_val = *src++; a_val = *src++; break; case PIXEL_FORMAT_RGBX32: r_val = *src++; g_val = *src++; b_val = *src++; src++; a_val = 0xFF; break; case PIXEL_FORMAT_RGBA32: r_val = *src++; g_val = *src++; b_val = *src++; a_val = *src++; break; case PIXEL_FORMAT_BGR24: b_val = *src++; g_val = *src++; r_val = *src++; a_val = 0xFF; break; case PIXEL_FORMAT_RGB24: r_val = *src++; g_val = *src++; b_val = *src++; a_val = 0xFF; break; case PIXEL_FORMAT_BGR16: b_val = (INT16)(((*(src + 1)) & 0xF8) | ((*(src + 1)) >> 5)); g_val = (INT16)((((*(src + 1)) & 0x07) << 5) | (((*src) & 0xE0) >> 3)); r_val = (INT16)((((*src) & 0x1F) << 3) | (((*src) >> 2) & 0x07)); a_val = 0xFF; src += 2; break; case PIXEL_FORMAT_RGB16: r_val = (INT16)(((*(src + 1)) & 0xF8) | ((*(src + 1)) >> 5)); g_val = (INT16)((((*(src + 1)) & 0x07) << 5) | (((*src) & 0xE0) >> 3)); b_val = (INT16)((((*src) & 0x1F) << 3) | (((*src) >> 2) & 0x07)); a_val = 0xFF; src += 2; break; case PIXEL_FORMAT_A4: { int shift; BYTE idx; shift = (7 - (x % 8)); idx = ((*src) >> shift) & 1; idx |= (((*(src + 1)) >> shift) & 1) << 1; idx |= (((*(src + 2)) >> shift) & 1) << 2; idx |= (((*(src + 3)) >> shift) & 1) << 3; idx *= 3; r_val = (INT16) context->palette[idx]; g_val = (INT16) context->palette[idx + 1]; b_val = (INT16) context->palette[idx + 2]; if (shift == 0) src += 4; } a_val = 0xFF; break; case PIXEL_FORMAT_RGB8: { int idx = (*src) * 3; r_val = (INT16) context->palette[idx]; g_val = (INT16) context->palette[idx + 1]; b_val = (INT16) context->palette[idx + 2]; src++; } a_val = 0xFF; break; default: r_val = g_val = b_val = a_val = 0; break; } *yplane++ = (BYTE)((r_val >> 2) + (g_val >> 1) + (b_val >> 2)); /* Perform color loss reduction here */ *coplane++ = (BYTE)((r_val - b_val) >> ccl); *cgplane++ = (BYTE)((-(r_val >> 1) + g_val - (b_val >> 1)) >> ccl); *aplane++ = a_val; } if (context->ChromaSubsamplingLevel && (x % 2) == 1) { *yplane = *(yplane - 1); *coplane = *(coplane - 1); *cgplane = *(cgplane - 1); } } if (context->ChromaSubsamplingLevel && (y % 2) == 1) { yplane = context->priv->PlaneBuffers[0] + y * rw; coplane = context->priv->PlaneBuffers[1] + y * rw; cgplane = context->priv->PlaneBuffers[2] + y * rw; CopyMemory(yplane, yplane - rw, rw); CopyMemory(coplane, coplane - rw, rw); CopyMemory(cgplane, cgplane - rw, rw); } return TRUE; } static BOOL nsc_encode_subsampling(NSC_CONTEXT* context) { UINT16 x; UINT16 y; UINT32 tempWidth; UINT32 tempHeight; if (!context) return FALSE; tempWidth = ROUND_UP_TO(context->width, 8); tempHeight = ROUND_UP_TO(context->height, 2); if (tempHeight == 0) return FALSE; if (tempWidth > context->priv->PlaneBuffersLength / tempHeight) return FALSE; for (y = 0; y < tempHeight >> 1; y++) { BYTE* co_dst = context->priv->PlaneBuffers[1] + y * (tempWidth >> 1); BYTE* cg_dst = context->priv->PlaneBuffers[2] + y * (tempWidth >> 1); const INT8* co_src0 = (INT8*) context->priv->PlaneBuffers[1] + (y << 1) * tempWidth; const INT8* co_src1 = co_src0 + tempWidth; const INT8* cg_src0 = (INT8*) context->priv->PlaneBuffers[2] + (y << 1) * tempWidth; const INT8* cg_src1 = cg_src0 + tempWidth; for (x = 0; x < tempWidth >> 1; x++) { *co_dst++ = (BYTE)(((INT16) * co_src0 + (INT16) * (co_src0 + 1) + (INT16) * co_src1 + (INT16) * (co_src1 + 1)) >> 2); *cg_dst++ = (BYTE)(((INT16) * cg_src0 + (INT16) * (cg_src0 + 1) + (INT16) * cg_src1 + (INT16) * (cg_src1 + 1)) >> 2); co_src0 += 2; co_src1 += 2; cg_src0 += 2; cg_src1 += 2; } } return TRUE; } BOOL nsc_encode(NSC_CONTEXT* context, const BYTE* bmpdata, UINT32 rowstride) { if (!context || !bmpdata || (rowstride == 0)) return FALSE; if (!nsc_encode_argb_to_aycocg(context, bmpdata, rowstride)) return FALSE; if (context->ChromaSubsamplingLevel) { if (!nsc_encode_subsampling(context)) return FALSE; } return TRUE; } static UINT32 nsc_rle_encode(const BYTE* in, BYTE* out, UINT32 originalSize) { UINT32 left; UINT32 runlength = 1; UINT32 planeSize = 0; left = originalSize; /** * We quit the loop if the running compressed size is larger than the original. * In such cases data will be sent uncompressed. */ while (left > 4 && planeSize < originalSize - 4) { if (left > 5 && *in == *(in + 1)) { runlength++; } else if (runlength == 1) { *out++ = *in; planeSize++; } else if (runlength < 256) { *out++ = *in; *out++ = *in; *out++ = runlength - 2; runlength = 1; planeSize += 3; } else { *out++ = *in; *out++ = *in; *out++ = 0xFF; *out++ = (runlength & 0x000000FF); *out++ = (runlength & 0x0000FF00) >> 8; *out++ = (runlength & 0x00FF0000) >> 16; *out++ = (runlength & 0xFF000000) >> 24; runlength = 1; planeSize += 7; } in++; left--; } if (planeSize < originalSize - 4) CopyMemory(out, in, 4); planeSize += 4; return planeSize; } static void nsc_rle_compress_data(NSC_CONTEXT* context) { UINT16 i; UINT32 planeSize; UINT32 originalSize; for (i = 0; i < 4; i++) { originalSize = context->OrgByteCount[i]; if (originalSize == 0) { planeSize = 0; } else { planeSize = nsc_rle_encode(context->priv->PlaneBuffers[i], context->priv->PlaneBuffers[4], originalSize); if (planeSize < originalSize) CopyMemory(context->priv->PlaneBuffers[i], context->priv->PlaneBuffers[4], planeSize); else planeSize = originalSize; } context->PlaneByteCount[i] = planeSize; } } UINT32 nsc_compute_byte_count(NSC_CONTEXT* context, UINT32* ByteCount, UINT32 width, UINT32 height) { UINT32 tempWidth; UINT32 tempHeight; UINT32 maxPlaneSize; tempWidth = ROUND_UP_TO(width, 8); tempHeight = ROUND_UP_TO(height, 2); maxPlaneSize = tempWidth * tempHeight + 16; if (context->ChromaSubsamplingLevel) { ByteCount[0] = tempWidth * height; ByteCount[1] = tempWidth * tempHeight / 4; ByteCount[2] = tempWidth * tempHeight / 4; ByteCount[3] = width * height; } else { ByteCount[0] = width * height; ByteCount[1] = width * height; ByteCount[2] = width * height; ByteCount[3] = width * height; } return maxPlaneSize; } NSC_MESSAGE* nsc_encode_messages(NSC_CONTEXT* context, const BYTE* data, UINT32 x, UINT32 y, UINT32 width, UINT32 height, UINT32 scanline, UINT32* numMessages, UINT32 maxDataSize) { UINT32 i, j, k; UINT32 dataOffset; UINT32 rows, cols; UINT32 BytesPerPixel; UINT32 MaxRegionWidth; UINT32 MaxRegionHeight; UINT32 ByteCount[4]; UINT32 MaxPlaneSize; UINT32 MaxMessageSize; NSC_MESSAGE* messages; UINT32 PaddedMaxPlaneSize; k = 0; MaxRegionWidth = 64 * 4; MaxRegionHeight = 64 * 2; BytesPerPixel = GetBytesPerPixel(context->format); rows = (width + (MaxRegionWidth - (width % MaxRegionWidth))) / MaxRegionWidth; cols = (height + (MaxRegionHeight - (height % MaxRegionHeight))) / MaxRegionHeight; *numMessages = rows * cols; MaxPlaneSize = nsc_compute_byte_count(context, (UINT32*) ByteCount, width, height); MaxMessageSize = ByteCount[0] + ByteCount[1] + ByteCount[2] + ByteCount[3] + 20; maxDataSize -= 1024; /* reserve enough space for headers */ messages = (NSC_MESSAGE*) calloc(*numMessages, sizeof(NSC_MESSAGE)); if (!messages) return NULL; for (i = 0; i < rows; i++) { for (j = 0; j < cols; j++) { messages[k].x = x + (i * MaxRegionWidth); messages[k].y = y + (j * MaxRegionHeight); messages[k].width = (i < (rows - 1)) ? MaxRegionWidth : width - (i * MaxRegionWidth); messages[k].height = (j < (cols - 1)) ? MaxRegionHeight : height - (j * MaxRegionHeight); messages[k].data = data; messages[k].scanline = scanline; messages[k].MaxPlaneSize = nsc_compute_byte_count(context, (UINT32*) messages[k].OrgByteCount, messages[k].width, messages[k].height); k++; } } *numMessages = k; for (i = 0; i < *numMessages; i++) { PaddedMaxPlaneSize = messages[i].MaxPlaneSize + 32; messages[i].PlaneBuffer = (BYTE*) BufferPool_Take(context->priv->PlanePool, PaddedMaxPlaneSize * 5); if (!messages[i].PlaneBuffer) goto fail; messages[i].PlaneBuffers[0] = (BYTE*) & (messages[i].PlaneBuffer[(PaddedMaxPlaneSize * 0) + 16]); messages[i].PlaneBuffers[1] = (BYTE*) & (messages[i].PlaneBuffer[(PaddedMaxPlaneSize * 1) + 16]); messages[i].PlaneBuffers[2] = (BYTE*) & (messages[i].PlaneBuffer[(PaddedMaxPlaneSize * 2) + 16]); messages[i].PlaneBuffers[3] = (BYTE*) & (messages[i].PlaneBuffer[(PaddedMaxPlaneSize * 3) + 16]); messages[i].PlaneBuffers[4] = (BYTE*) & (messages[i].PlaneBuffer[(PaddedMaxPlaneSize * 4) + 16]); } for (i = 0; i < *numMessages; i++) { context->width = messages[i].width; context->height = messages[i].height; context->OrgByteCount[0] = messages[i].OrgByteCount[0]; context->OrgByteCount[1] = messages[i].OrgByteCount[1]; context->OrgByteCount[2] = messages[i].OrgByteCount[2]; context->OrgByteCount[3] = messages[i].OrgByteCount[3]; context->priv->PlaneBuffersLength = messages[i].MaxPlaneSize; context->priv->PlaneBuffers[0] = messages[i].PlaneBuffers[0]; context->priv->PlaneBuffers[1] = messages[i].PlaneBuffers[1]; context->priv->PlaneBuffers[2] = messages[i].PlaneBuffers[2]; context->priv->PlaneBuffers[3] = messages[i].PlaneBuffers[3]; context->priv->PlaneBuffers[4] = messages[i].PlaneBuffers[4]; dataOffset = (messages[i].y * messages[i].scanline) + (messages[i].x * BytesPerPixel); PROFILER_ENTER(context->priv->prof_nsc_encode) context->encode(context, &data[dataOffset], scanline); PROFILER_EXIT(context->priv->prof_nsc_encode) PROFILER_ENTER(context->priv->prof_nsc_rle_compress_data) nsc_rle_compress_data(context); PROFILER_EXIT(context->priv->prof_nsc_rle_compress_data) messages[i].LumaPlaneByteCount = context->PlaneByteCount[0]; messages[i].OrangeChromaPlaneByteCount = context->PlaneByteCount[1]; messages[i].GreenChromaPlaneByteCount = context->PlaneByteCount[2]; messages[i].AlphaPlaneByteCount = context->PlaneByteCount[3]; messages[i].ColorLossLevel = context->ColorLossLevel; messages[i].ChromaSubsamplingLevel = context->ChromaSubsamplingLevel; } context->priv->PlaneBuffers[0] = NULL; context->priv->PlaneBuffers[1] = NULL; context->priv->PlaneBuffers[2] = NULL; context->priv->PlaneBuffers[3] = NULL; context->priv->PlaneBuffers[4] = NULL; return messages; fail: for (i = 0; i < *numMessages; i++) BufferPool_Return(context->priv->PlanePool, messages[i].PlaneBuffer); free(messages); return NULL; } BOOL nsc_write_message(NSC_CONTEXT* context, wStream* s, NSC_MESSAGE* message) { UINT32 totalPlaneByteCount; totalPlaneByteCount = message->LumaPlaneByteCount + message->OrangeChromaPlaneByteCount + message->GreenChromaPlaneByteCount + message->AlphaPlaneByteCount; if (!Stream_EnsureRemainingCapacity(s, 20 + totalPlaneByteCount)) return -1; Stream_Write_UINT32(s, message->LumaPlaneByteCount); /* LumaPlaneByteCount (4 bytes) */ Stream_Write_UINT32(s, message->OrangeChromaPlaneByteCount); /* OrangeChromaPlaneByteCount (4 bytes) */ Stream_Write_UINT32(s, message->GreenChromaPlaneByteCount); /* GreenChromaPlaneByteCount (4 bytes) */ Stream_Write_UINT32(s, message->AlphaPlaneByteCount); /* AlphaPlaneByteCount (4 bytes) */ Stream_Write_UINT8(s, message->ColorLossLevel); /* ColorLossLevel (1 byte) */ Stream_Write_UINT8(s, message->ChromaSubsamplingLevel); /* ChromaSubsamplingLevel (1 byte) */ Stream_Write_UINT16(s, 0); /* Reserved (2 bytes) */ if (message->LumaPlaneByteCount) Stream_Write(s, message->PlaneBuffers[0], message->LumaPlaneByteCount); /* LumaPlane */ if (message->OrangeChromaPlaneByteCount) Stream_Write(s, message->PlaneBuffers[1], message->OrangeChromaPlaneByteCount); /* OrangeChromaPlane */ if (message->GreenChromaPlaneByteCount) Stream_Write(s, message->PlaneBuffers[2], message->GreenChromaPlaneByteCount); /* GreenChromaPlane */ if (message->AlphaPlaneByteCount) Stream_Write(s, message->PlaneBuffers[3], message->AlphaPlaneByteCount); /* AlphaPlane */ return TRUE; } void nsc_message_free(NSC_CONTEXT* context, NSC_MESSAGE* message) { BufferPool_Return(context->priv->PlanePool, message->PlaneBuffer); } BOOL nsc_compose_message(NSC_CONTEXT* context, wStream* s, const BYTE* data, UINT32 width, UINT32 height, UINT32 scanline) { NSC_MESSAGE s_message = { 0 }; NSC_MESSAGE* message = &s_message; context->width = width; context->height = height; if (!nsc_context_initialize_encode(context)) return FALSE; /* ARGB to AYCoCg conversion, chroma subsampling and colorloss reduction */ PROFILER_ENTER(context->priv->prof_nsc_encode) context->encode(context, data, scanline); PROFILER_EXIT(context->priv->prof_nsc_encode) /* RLE encode */ PROFILER_ENTER(context->priv->prof_nsc_rle_compress_data) nsc_rle_compress_data(context); PROFILER_EXIT(context->priv->prof_nsc_rle_compress_data) message->PlaneBuffers[0] = context->priv->PlaneBuffers[0]; message->PlaneBuffers[1] = context->priv->PlaneBuffers[1]; message->PlaneBuffers[2] = context->priv->PlaneBuffers[2]; message->PlaneBuffers[3] = context->priv->PlaneBuffers[3]; message->LumaPlaneByteCount = context->PlaneByteCount[0]; message->OrangeChromaPlaneByteCount = context->PlaneByteCount[1]; message->GreenChromaPlaneByteCount = context->PlaneByteCount[2]; message->AlphaPlaneByteCount = context->PlaneByteCount[3]; message->ColorLossLevel = context->ColorLossLevel; message->ChromaSubsamplingLevel = context->ChromaSubsamplingLevel; return nsc_write_message(context, s, message); }
void nsc_encode(NSC_CONTEXT* context, const BYTE* bmpdata, UINT32 rowstride) { nsc_encode_argb_to_aycocg(context, bmpdata, rowstride); if (context->ChromaSubsamplingLevel) { nsc_encode_subsampling(context); } }
BOOL nsc_encode(NSC_CONTEXT* context, const BYTE* bmpdata, UINT32 rowstride) { if (!context || !bmpdata || (rowstride == 0)) return FALSE; if (!nsc_encode_argb_to_aycocg(context, bmpdata, rowstride)) return FALSE; if (context->ChromaSubsamplingLevel) { if (!nsc_encode_subsampling(context)) return FALSE; } return TRUE; }
{'added': [(54, ''), (91, 'static BOOL nsc_encode_argb_to_aycocg(NSC_CONTEXT* context, const BYTE* data,'), (108, ''), (109, '\tif (!context || data || (scanline == 0))'), (110, '\t\treturn FALSE;'), (111, ''), (116, '\tif (context->priv->PlaneBuffersLength < rw * scanline)'), (117, '\t\treturn FALSE;'), (118, ''), (119, '\tif (rw < scanline * 2)'), (120, '\t\treturn FALSE;'), (121, ''), (256, ''), (257, '\treturn TRUE;'), (260, 'static BOOL nsc_encode_subsampling(NSC_CONTEXT* context)'), (266, ''), (267, '\tif (!context)'), (268, '\t\treturn FALSE;'), (269, ''), (273, '\tif (tempHeight == 0)'), (274, '\t\treturn FALSE;'), (275, ''), (276, '\tif (tempWidth > context->priv->PlaneBuffersLength / tempHeight)'), (277, '\t\treturn FALSE;'), (278, ''), (281, '\t\tBYTE* co_dst = context->priv->PlaneBuffers[1] + y * (tempWidth >> 1);'), (282, '\t\tBYTE* cg_dst = context->priv->PlaneBuffers[2] + y * (tempWidth >> 1);'), (283, '\t\tconst INT8* co_src0 = (INT8*) context->priv->PlaneBuffers[1] + (y << 1) * tempWidth;'), (284, '\t\tconst INT8* co_src1 = co_src0 + tempWidth;'), (285, '\t\tconst INT8* cg_src0 = (INT8*) context->priv->PlaneBuffers[2] + (y << 1) * tempWidth;'), (286, '\t\tconst INT8* cg_src1 = cg_src0 + tempWidth;'), (300, ''), (301, '\treturn TRUE;'), (304, 'BOOL nsc_encode(NSC_CONTEXT* context, const BYTE* bmpdata, UINT32 rowstride)'), (306, '\tif (!context || !bmpdata || (rowstride == 0))'), (307, '\t\treturn FALSE;'), (308, ''), (309, '\tif (!nsc_encode_argb_to_aycocg(context, bmpdata, rowstride))'), (310, '\t\treturn FALSE;'), (314, '\t\tif (!nsc_encode_subsampling(context))'), (315, '\t\t\treturn FALSE;'), (317, ''), (318, '\treturn TRUE;'), (321, 'static UINT32 nsc_rle_encode(const BYTE* in, BYTE* out, UINT32 originalSize)')], 'deleted': [(90, 'static void nsc_encode_argb_to_aycocg(NSC_CONTEXT* context, const BYTE* data,'), (247, 'static void nsc_encode_subsampling(NSC_CONTEXT* context)'), (251, '\tBYTE* co_dst;'), (252, '\tBYTE* cg_dst;'), (253, '\tINT8* co_src0;'), (254, '\tINT8* co_src1;'), (255, '\tINT8* cg_src0;'), (256, '\tINT8* cg_src1;'), (264, '\t\tco_dst = context->priv->PlaneBuffers[1] + y * (tempWidth >> 1);'), (265, '\t\tcg_dst = context->priv->PlaneBuffers[2] + y * (tempWidth >> 1);'), (266, '\t\tco_src0 = (INT8*) context->priv->PlaneBuffers[1] + (y << 1) * tempWidth;'), (267, '\t\tco_src1 = co_src0 + tempWidth;'), (268, '\t\tcg_src0 = (INT8*) context->priv->PlaneBuffers[2] + (y << 1) * tempWidth;'), (269, '\t\tcg_src1 = cg_src0 + tempWidth;'), (285, 'void nsc_encode(NSC_CONTEXT* context, const BYTE* bmpdata, UINT32 rowstride)'), (287, '\tnsc_encode_argb_to_aycocg(context, bmpdata, rowstride);'), (291, '\t\tnsc_encode_subsampling(context);'), (295, 'static UINT32 nsc_rle_encode(BYTE* in, BYTE* out, UINT32 originalSize)')]}
44
18
513
3,961
https://github.com/FreeRDP/FreeRDP
CVE-2018-8788
['CWE-787']
nsc_encode.c
nsc_encode_argb_to_aycocg
/** * FreeRDP: A Remote Desktop Protocol Implementation * NSCodec Encoder * * Copyright 2012 Vic Lee * Copyright 2016 Armin Novak <armin.novak@thincast.com> * Copyright 2016 Thincast Technologies GmbH * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <stdio.h> #include <stdlib.h> #include <string.h> #include <winpr/crt.h> #include <freerdp/codec/nsc.h> #include <freerdp/codec/color.h> #include "nsc_types.h" #include "nsc_encode.h" static BOOL nsc_context_initialize_encode(NSC_CONTEXT* context) { int i; UINT32 length; UINT32 tempWidth; UINT32 tempHeight; tempWidth = ROUND_UP_TO(context->width, 8); tempHeight = ROUND_UP_TO(context->height, 2); /* The maximum length a decoded plane can reach in all cases */ length = tempWidth * tempHeight + 16; if (length > context->priv->PlaneBuffersLength) { for (i = 0; i < 5; i++) { BYTE* tmp = (BYTE*) realloc(context->priv->PlaneBuffers[i], length); if (!tmp) goto fail; context->priv->PlaneBuffers[i] = tmp; } context->priv->PlaneBuffersLength = length; } if (context->ChromaSubsamplingLevel) { context->OrgByteCount[0] = tempWidth * context->height; context->OrgByteCount[1] = tempWidth * tempHeight / 4; context->OrgByteCount[2] = tempWidth * tempHeight / 4; context->OrgByteCount[3] = context->width * context->height; } else { context->OrgByteCount[0] = context->width * context->height; context->OrgByteCount[1] = context->width * context->height; context->OrgByteCount[2] = context->width * context->height; context->OrgByteCount[3] = context->width * context->height; } return TRUE; fail: if (length > context->priv->PlaneBuffersLength) { for (i = 0; i < 5; i++) free(context->priv->PlaneBuffers[i]); } return FALSE; } static void nsc_encode_argb_to_aycocg(NSC_CONTEXT* context, const BYTE* data, UINT32 scanline) { UINT16 x; UINT16 y; UINT16 rw; BYTE ccl; const BYTE* src; BYTE* yplane = NULL; BYTE* coplane = NULL; BYTE* cgplane = NULL; BYTE* aplane = NULL; INT16 r_val; INT16 g_val; INT16 b_val; BYTE a_val; UINT32 tempWidth; tempWidth = ROUND_UP_TO(context->width, 8); rw = (context->ChromaSubsamplingLevel ? tempWidth : context->width); ccl = context->ColorLossLevel; for (y = 0; y < context->height; y++) { src = data + (context->height - 1 - y) * scanline; yplane = context->priv->PlaneBuffers[0] + y * rw; coplane = context->priv->PlaneBuffers[1] + y * rw; cgplane = context->priv->PlaneBuffers[2] + y * rw; aplane = context->priv->PlaneBuffers[3] + y * context->width; for (x = 0; x < context->width; x++) { switch (context->format) { case PIXEL_FORMAT_BGRX32: b_val = *src++; g_val = *src++; r_val = *src++; src++; a_val = 0xFF; break; case PIXEL_FORMAT_BGRA32: b_val = *src++; g_val = *src++; r_val = *src++; a_val = *src++; break; case PIXEL_FORMAT_RGBX32: r_val = *src++; g_val = *src++; b_val = *src++; src++; a_val = 0xFF; break; case PIXEL_FORMAT_RGBA32: r_val = *src++; g_val = *src++; b_val = *src++; a_val = *src++; break; case PIXEL_FORMAT_BGR24: b_val = *src++; g_val = *src++; r_val = *src++; a_val = 0xFF; break; case PIXEL_FORMAT_RGB24: r_val = *src++; g_val = *src++; b_val = *src++; a_val = 0xFF; break; case PIXEL_FORMAT_BGR16: b_val = (INT16)(((*(src + 1)) & 0xF8) | ((*(src + 1)) >> 5)); g_val = (INT16)((((*(src + 1)) & 0x07) << 5) | (((*src) & 0xE0) >> 3)); r_val = (INT16)((((*src) & 0x1F) << 3) | (((*src) >> 2) & 0x07)); a_val = 0xFF; src += 2; break; case PIXEL_FORMAT_RGB16: r_val = (INT16)(((*(src + 1)) & 0xF8) | ((*(src + 1)) >> 5)); g_val = (INT16)((((*(src + 1)) & 0x07) << 5) | (((*src) & 0xE0) >> 3)); b_val = (INT16)((((*src) & 0x1F) << 3) | (((*src) >> 2) & 0x07)); a_val = 0xFF; src += 2; break; case PIXEL_FORMAT_A4: { int shift; BYTE idx; shift = (7 - (x % 8)); idx = ((*src) >> shift) & 1; idx |= (((*(src + 1)) >> shift) & 1) << 1; idx |= (((*(src + 2)) >> shift) & 1) << 2; idx |= (((*(src + 3)) >> shift) & 1) << 3; idx *= 3; r_val = (INT16) context->palette[idx]; g_val = (INT16) context->palette[idx + 1]; b_val = (INT16) context->palette[idx + 2]; if (shift == 0) src += 4; } a_val = 0xFF; break; case PIXEL_FORMAT_RGB8: { int idx = (*src) * 3; r_val = (INT16) context->palette[idx]; g_val = (INT16) context->palette[idx + 1]; b_val = (INT16) context->palette[idx + 2]; src++; } a_val = 0xFF; break; default: r_val = g_val = b_val = a_val = 0; break; } *yplane++ = (BYTE)((r_val >> 2) + (g_val >> 1) + (b_val >> 2)); /* Perform color loss reduction here */ *coplane++ = (BYTE)((r_val - b_val) >> ccl); *cgplane++ = (BYTE)((-(r_val >> 1) + g_val - (b_val >> 1)) >> ccl); *aplane++ = a_val; } if (context->ChromaSubsamplingLevel && (x % 2) == 1) { *yplane = *(yplane - 1); *coplane = *(coplane - 1); *cgplane = *(cgplane - 1); } } if (context->ChromaSubsamplingLevel && (y % 2) == 1) { yplane = context->priv->PlaneBuffers[0] + y * rw; coplane = context->priv->PlaneBuffers[1] + y * rw; cgplane = context->priv->PlaneBuffers[2] + y * rw; CopyMemory(yplane, yplane - rw, rw); CopyMemory(coplane, coplane - rw, rw); CopyMemory(cgplane, cgplane - rw, rw); } } static void nsc_encode_subsampling(NSC_CONTEXT* context) { UINT16 x; UINT16 y; BYTE* co_dst; BYTE* cg_dst; INT8* co_src0; INT8* co_src1; INT8* cg_src0; INT8* cg_src1; UINT32 tempWidth; UINT32 tempHeight; tempWidth = ROUND_UP_TO(context->width, 8); tempHeight = ROUND_UP_TO(context->height, 2); for (y = 0; y < tempHeight >> 1; y++) { co_dst = context->priv->PlaneBuffers[1] + y * (tempWidth >> 1); cg_dst = context->priv->PlaneBuffers[2] + y * (tempWidth >> 1); co_src0 = (INT8*) context->priv->PlaneBuffers[1] + (y << 1) * tempWidth; co_src1 = co_src0 + tempWidth; cg_src0 = (INT8*) context->priv->PlaneBuffers[2] + (y << 1) * tempWidth; cg_src1 = cg_src0 + tempWidth; for (x = 0; x < tempWidth >> 1; x++) { *co_dst++ = (BYTE)(((INT16) * co_src0 + (INT16) * (co_src0 + 1) + (INT16) * co_src1 + (INT16) * (co_src1 + 1)) >> 2); *cg_dst++ = (BYTE)(((INT16) * cg_src0 + (INT16) * (cg_src0 + 1) + (INT16) * cg_src1 + (INT16) * (cg_src1 + 1)) >> 2); co_src0 += 2; co_src1 += 2; cg_src0 += 2; cg_src1 += 2; } } } void nsc_encode(NSC_CONTEXT* context, const BYTE* bmpdata, UINT32 rowstride) { nsc_encode_argb_to_aycocg(context, bmpdata, rowstride); if (context->ChromaSubsamplingLevel) { nsc_encode_subsampling(context); } } static UINT32 nsc_rle_encode(BYTE* in, BYTE* out, UINT32 originalSize) { UINT32 left; UINT32 runlength = 1; UINT32 planeSize = 0; left = originalSize; /** * We quit the loop if the running compressed size is larger than the original. * In such cases data will be sent uncompressed. */ while (left > 4 && planeSize < originalSize - 4) { if (left > 5 && *in == *(in + 1)) { runlength++; } else if (runlength == 1) { *out++ = *in; planeSize++; } else if (runlength < 256) { *out++ = *in; *out++ = *in; *out++ = runlength - 2; runlength = 1; planeSize += 3; } else { *out++ = *in; *out++ = *in; *out++ = 0xFF; *out++ = (runlength & 0x000000FF); *out++ = (runlength & 0x0000FF00) >> 8; *out++ = (runlength & 0x00FF0000) >> 16; *out++ = (runlength & 0xFF000000) >> 24; runlength = 1; planeSize += 7; } in++; left--; } if (planeSize < originalSize - 4) CopyMemory(out, in, 4); planeSize += 4; return planeSize; } static void nsc_rle_compress_data(NSC_CONTEXT* context) { UINT16 i; UINT32 planeSize; UINT32 originalSize; for (i = 0; i < 4; i++) { originalSize = context->OrgByteCount[i]; if (originalSize == 0) { planeSize = 0; } else { planeSize = nsc_rle_encode(context->priv->PlaneBuffers[i], context->priv->PlaneBuffers[4], originalSize); if (planeSize < originalSize) CopyMemory(context->priv->PlaneBuffers[i], context->priv->PlaneBuffers[4], planeSize); else planeSize = originalSize; } context->PlaneByteCount[i] = planeSize; } } UINT32 nsc_compute_byte_count(NSC_CONTEXT* context, UINT32* ByteCount, UINT32 width, UINT32 height) { UINT32 tempWidth; UINT32 tempHeight; UINT32 maxPlaneSize; tempWidth = ROUND_UP_TO(width, 8); tempHeight = ROUND_UP_TO(height, 2); maxPlaneSize = tempWidth * tempHeight + 16; if (context->ChromaSubsamplingLevel) { ByteCount[0] = tempWidth * height; ByteCount[1] = tempWidth * tempHeight / 4; ByteCount[2] = tempWidth * tempHeight / 4; ByteCount[3] = width * height; } else { ByteCount[0] = width * height; ByteCount[1] = width * height; ByteCount[2] = width * height; ByteCount[3] = width * height; } return maxPlaneSize; } NSC_MESSAGE* nsc_encode_messages(NSC_CONTEXT* context, const BYTE* data, UINT32 x, UINT32 y, UINT32 width, UINT32 height, UINT32 scanline, UINT32* numMessages, UINT32 maxDataSize) { UINT32 i, j, k; UINT32 dataOffset; UINT32 rows, cols; UINT32 BytesPerPixel; UINT32 MaxRegionWidth; UINT32 MaxRegionHeight; UINT32 ByteCount[4]; UINT32 MaxPlaneSize; UINT32 MaxMessageSize; NSC_MESSAGE* messages; UINT32 PaddedMaxPlaneSize; k = 0; MaxRegionWidth = 64 * 4; MaxRegionHeight = 64 * 2; BytesPerPixel = GetBytesPerPixel(context->format); rows = (width + (MaxRegionWidth - (width % MaxRegionWidth))) / MaxRegionWidth; cols = (height + (MaxRegionHeight - (height % MaxRegionHeight))) / MaxRegionHeight; *numMessages = rows * cols; MaxPlaneSize = nsc_compute_byte_count(context, (UINT32*) ByteCount, width, height); MaxMessageSize = ByteCount[0] + ByteCount[1] + ByteCount[2] + ByteCount[3] + 20; maxDataSize -= 1024; /* reserve enough space for headers */ messages = (NSC_MESSAGE*) calloc(*numMessages, sizeof(NSC_MESSAGE)); if (!messages) return NULL; for (i = 0; i < rows; i++) { for (j = 0; j < cols; j++) { messages[k].x = x + (i * MaxRegionWidth); messages[k].y = y + (j * MaxRegionHeight); messages[k].width = (i < (rows - 1)) ? MaxRegionWidth : width - (i * MaxRegionWidth); messages[k].height = (j < (cols - 1)) ? MaxRegionHeight : height - (j * MaxRegionHeight); messages[k].data = data; messages[k].scanline = scanline; messages[k].MaxPlaneSize = nsc_compute_byte_count(context, (UINT32*) messages[k].OrgByteCount, messages[k].width, messages[k].height); k++; } } *numMessages = k; for (i = 0; i < *numMessages; i++) { PaddedMaxPlaneSize = messages[i].MaxPlaneSize + 32; messages[i].PlaneBuffer = (BYTE*) BufferPool_Take(context->priv->PlanePool, PaddedMaxPlaneSize * 5); if (!messages[i].PlaneBuffer) goto fail; messages[i].PlaneBuffers[0] = (BYTE*) & (messages[i].PlaneBuffer[(PaddedMaxPlaneSize * 0) + 16]); messages[i].PlaneBuffers[1] = (BYTE*) & (messages[i].PlaneBuffer[(PaddedMaxPlaneSize * 1) + 16]); messages[i].PlaneBuffers[2] = (BYTE*) & (messages[i].PlaneBuffer[(PaddedMaxPlaneSize * 2) + 16]); messages[i].PlaneBuffers[3] = (BYTE*) & (messages[i].PlaneBuffer[(PaddedMaxPlaneSize * 3) + 16]); messages[i].PlaneBuffers[4] = (BYTE*) & (messages[i].PlaneBuffer[(PaddedMaxPlaneSize * 4) + 16]); } for (i = 0; i < *numMessages; i++) { context->width = messages[i].width; context->height = messages[i].height; context->OrgByteCount[0] = messages[i].OrgByteCount[0]; context->OrgByteCount[1] = messages[i].OrgByteCount[1]; context->OrgByteCount[2] = messages[i].OrgByteCount[2]; context->OrgByteCount[3] = messages[i].OrgByteCount[3]; context->priv->PlaneBuffersLength = messages[i].MaxPlaneSize; context->priv->PlaneBuffers[0] = messages[i].PlaneBuffers[0]; context->priv->PlaneBuffers[1] = messages[i].PlaneBuffers[1]; context->priv->PlaneBuffers[2] = messages[i].PlaneBuffers[2]; context->priv->PlaneBuffers[3] = messages[i].PlaneBuffers[3]; context->priv->PlaneBuffers[4] = messages[i].PlaneBuffers[4]; dataOffset = (messages[i].y * messages[i].scanline) + (messages[i].x * BytesPerPixel); PROFILER_ENTER(context->priv->prof_nsc_encode) context->encode(context, &data[dataOffset], scanline); PROFILER_EXIT(context->priv->prof_nsc_encode) PROFILER_ENTER(context->priv->prof_nsc_rle_compress_data) nsc_rle_compress_data(context); PROFILER_EXIT(context->priv->prof_nsc_rle_compress_data) messages[i].LumaPlaneByteCount = context->PlaneByteCount[0]; messages[i].OrangeChromaPlaneByteCount = context->PlaneByteCount[1]; messages[i].GreenChromaPlaneByteCount = context->PlaneByteCount[2]; messages[i].AlphaPlaneByteCount = context->PlaneByteCount[3]; messages[i].ColorLossLevel = context->ColorLossLevel; messages[i].ChromaSubsamplingLevel = context->ChromaSubsamplingLevel; } context->priv->PlaneBuffers[0] = NULL; context->priv->PlaneBuffers[1] = NULL; context->priv->PlaneBuffers[2] = NULL; context->priv->PlaneBuffers[3] = NULL; context->priv->PlaneBuffers[4] = NULL; return messages; fail: for (i = 0; i < *numMessages; i++) BufferPool_Return(context->priv->PlanePool, messages[i].PlaneBuffer); free(messages); return NULL; } BOOL nsc_write_message(NSC_CONTEXT* context, wStream* s, NSC_MESSAGE* message) { UINT32 totalPlaneByteCount; totalPlaneByteCount = message->LumaPlaneByteCount + message->OrangeChromaPlaneByteCount + message->GreenChromaPlaneByteCount + message->AlphaPlaneByteCount; if (!Stream_EnsureRemainingCapacity(s, 20 + totalPlaneByteCount)) return -1; Stream_Write_UINT32(s, message->LumaPlaneByteCount); /* LumaPlaneByteCount (4 bytes) */ Stream_Write_UINT32(s, message->OrangeChromaPlaneByteCount); /* OrangeChromaPlaneByteCount (4 bytes) */ Stream_Write_UINT32(s, message->GreenChromaPlaneByteCount); /* GreenChromaPlaneByteCount (4 bytes) */ Stream_Write_UINT32(s, message->AlphaPlaneByteCount); /* AlphaPlaneByteCount (4 bytes) */ Stream_Write_UINT8(s, message->ColorLossLevel); /* ColorLossLevel (1 byte) */ Stream_Write_UINT8(s, message->ChromaSubsamplingLevel); /* ChromaSubsamplingLevel (1 byte) */ Stream_Write_UINT16(s, 0); /* Reserved (2 bytes) */ if (message->LumaPlaneByteCount) Stream_Write(s, message->PlaneBuffers[0], message->LumaPlaneByteCount); /* LumaPlane */ if (message->OrangeChromaPlaneByteCount) Stream_Write(s, message->PlaneBuffers[1], message->OrangeChromaPlaneByteCount); /* OrangeChromaPlane */ if (message->GreenChromaPlaneByteCount) Stream_Write(s, message->PlaneBuffers[2], message->GreenChromaPlaneByteCount); /* GreenChromaPlane */ if (message->AlphaPlaneByteCount) Stream_Write(s, message->PlaneBuffers[3], message->AlphaPlaneByteCount); /* AlphaPlane */ return TRUE; } void nsc_message_free(NSC_CONTEXT* context, NSC_MESSAGE* message) { BufferPool_Return(context->priv->PlanePool, message->PlaneBuffer); } BOOL nsc_compose_message(NSC_CONTEXT* context, wStream* s, const BYTE* data, UINT32 width, UINT32 height, UINT32 scanline) { NSC_MESSAGE s_message = { 0 }; NSC_MESSAGE* message = &s_message; context->width = width; context->height = height; if (!nsc_context_initialize_encode(context)) return FALSE; /* ARGB to AYCoCg conversion, chroma subsampling and colorloss reduction */ PROFILER_ENTER(context->priv->prof_nsc_encode) context->encode(context, data, scanline); PROFILER_EXIT(context->priv->prof_nsc_encode) /* RLE encode */ PROFILER_ENTER(context->priv->prof_nsc_rle_compress_data) nsc_rle_compress_data(context); PROFILER_EXIT(context->priv->prof_nsc_rle_compress_data) message->PlaneBuffers[0] = context->priv->PlaneBuffers[0]; message->PlaneBuffers[1] = context->priv->PlaneBuffers[1]; message->PlaneBuffers[2] = context->priv->PlaneBuffers[2]; message->PlaneBuffers[3] = context->priv->PlaneBuffers[3]; message->LumaPlaneByteCount = context->PlaneByteCount[0]; message->OrangeChromaPlaneByteCount = context->PlaneByteCount[1]; message->GreenChromaPlaneByteCount = context->PlaneByteCount[2]; message->AlphaPlaneByteCount = context->PlaneByteCount[3]; message->ColorLossLevel = context->ColorLossLevel; message->ChromaSubsamplingLevel = context->ChromaSubsamplingLevel; return nsc_write_message(context, s, message); }
/** * FreeRDP: A Remote Desktop Protocol Implementation * NSCodec Encoder * * Copyright 2012 Vic Lee * Copyright 2016 Armin Novak <armin.novak@thincast.com> * Copyright 2016 Thincast Technologies GmbH * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <stdio.h> #include <stdlib.h> #include <string.h> #include <winpr/crt.h> #include <freerdp/codec/nsc.h> #include <freerdp/codec/color.h> #include "nsc_types.h" #include "nsc_encode.h" static BOOL nsc_context_initialize_encode(NSC_CONTEXT* context) { int i; UINT32 length; UINT32 tempWidth; UINT32 tempHeight; tempWidth = ROUND_UP_TO(context->width, 8); tempHeight = ROUND_UP_TO(context->height, 2); /* The maximum length a decoded plane can reach in all cases */ length = tempWidth * tempHeight + 16; if (length > context->priv->PlaneBuffersLength) { for (i = 0; i < 5; i++) { BYTE* tmp = (BYTE*) realloc(context->priv->PlaneBuffers[i], length); if (!tmp) goto fail; context->priv->PlaneBuffers[i] = tmp; } context->priv->PlaneBuffersLength = length; } if (context->ChromaSubsamplingLevel) { context->OrgByteCount[0] = tempWidth * context->height; context->OrgByteCount[1] = tempWidth * tempHeight / 4; context->OrgByteCount[2] = tempWidth * tempHeight / 4; context->OrgByteCount[3] = context->width * context->height; } else { context->OrgByteCount[0] = context->width * context->height; context->OrgByteCount[1] = context->width * context->height; context->OrgByteCount[2] = context->width * context->height; context->OrgByteCount[3] = context->width * context->height; } return TRUE; fail: if (length > context->priv->PlaneBuffersLength) { for (i = 0; i < 5; i++) free(context->priv->PlaneBuffers[i]); } return FALSE; } static BOOL nsc_encode_argb_to_aycocg(NSC_CONTEXT* context, const BYTE* data, UINT32 scanline) { UINT16 x; UINT16 y; UINT16 rw; BYTE ccl; const BYTE* src; BYTE* yplane = NULL; BYTE* coplane = NULL; BYTE* cgplane = NULL; BYTE* aplane = NULL; INT16 r_val; INT16 g_val; INT16 b_val; BYTE a_val; UINT32 tempWidth; if (!context || data || (scanline == 0)) return FALSE; tempWidth = ROUND_UP_TO(context->width, 8); rw = (context->ChromaSubsamplingLevel ? tempWidth : context->width); ccl = context->ColorLossLevel; if (context->priv->PlaneBuffersLength < rw * scanline) return FALSE; if (rw < scanline * 2) return FALSE; for (y = 0; y < context->height; y++) { src = data + (context->height - 1 - y) * scanline; yplane = context->priv->PlaneBuffers[0] + y * rw; coplane = context->priv->PlaneBuffers[1] + y * rw; cgplane = context->priv->PlaneBuffers[2] + y * rw; aplane = context->priv->PlaneBuffers[3] + y * context->width; for (x = 0; x < context->width; x++) { switch (context->format) { case PIXEL_FORMAT_BGRX32: b_val = *src++; g_val = *src++; r_val = *src++; src++; a_val = 0xFF; break; case PIXEL_FORMAT_BGRA32: b_val = *src++; g_val = *src++; r_val = *src++; a_val = *src++; break; case PIXEL_FORMAT_RGBX32: r_val = *src++; g_val = *src++; b_val = *src++; src++; a_val = 0xFF; break; case PIXEL_FORMAT_RGBA32: r_val = *src++; g_val = *src++; b_val = *src++; a_val = *src++; break; case PIXEL_FORMAT_BGR24: b_val = *src++; g_val = *src++; r_val = *src++; a_val = 0xFF; break; case PIXEL_FORMAT_RGB24: r_val = *src++; g_val = *src++; b_val = *src++; a_val = 0xFF; break; case PIXEL_FORMAT_BGR16: b_val = (INT16)(((*(src + 1)) & 0xF8) | ((*(src + 1)) >> 5)); g_val = (INT16)((((*(src + 1)) & 0x07) << 5) | (((*src) & 0xE0) >> 3)); r_val = (INT16)((((*src) & 0x1F) << 3) | (((*src) >> 2) & 0x07)); a_val = 0xFF; src += 2; break; case PIXEL_FORMAT_RGB16: r_val = (INT16)(((*(src + 1)) & 0xF8) | ((*(src + 1)) >> 5)); g_val = (INT16)((((*(src + 1)) & 0x07) << 5) | (((*src) & 0xE0) >> 3)); b_val = (INT16)((((*src) & 0x1F) << 3) | (((*src) >> 2) & 0x07)); a_val = 0xFF; src += 2; break; case PIXEL_FORMAT_A4: { int shift; BYTE idx; shift = (7 - (x % 8)); idx = ((*src) >> shift) & 1; idx |= (((*(src + 1)) >> shift) & 1) << 1; idx |= (((*(src + 2)) >> shift) & 1) << 2; idx |= (((*(src + 3)) >> shift) & 1) << 3; idx *= 3; r_val = (INT16) context->palette[idx]; g_val = (INT16) context->palette[idx + 1]; b_val = (INT16) context->palette[idx + 2]; if (shift == 0) src += 4; } a_val = 0xFF; break; case PIXEL_FORMAT_RGB8: { int idx = (*src) * 3; r_val = (INT16) context->palette[idx]; g_val = (INT16) context->palette[idx + 1]; b_val = (INT16) context->palette[idx + 2]; src++; } a_val = 0xFF; break; default: r_val = g_val = b_val = a_val = 0; break; } *yplane++ = (BYTE)((r_val >> 2) + (g_val >> 1) + (b_val >> 2)); /* Perform color loss reduction here */ *coplane++ = (BYTE)((r_val - b_val) >> ccl); *cgplane++ = (BYTE)((-(r_val >> 1) + g_val - (b_val >> 1)) >> ccl); *aplane++ = a_val; } if (context->ChromaSubsamplingLevel && (x % 2) == 1) { *yplane = *(yplane - 1); *coplane = *(coplane - 1); *cgplane = *(cgplane - 1); } } if (context->ChromaSubsamplingLevel && (y % 2) == 1) { yplane = context->priv->PlaneBuffers[0] + y * rw; coplane = context->priv->PlaneBuffers[1] + y * rw; cgplane = context->priv->PlaneBuffers[2] + y * rw; CopyMemory(yplane, yplane - rw, rw); CopyMemory(coplane, coplane - rw, rw); CopyMemory(cgplane, cgplane - rw, rw); } return TRUE; } static BOOL nsc_encode_subsampling(NSC_CONTEXT* context) { UINT16 x; UINT16 y; UINT32 tempWidth; UINT32 tempHeight; if (!context) return FALSE; tempWidth = ROUND_UP_TO(context->width, 8); tempHeight = ROUND_UP_TO(context->height, 2); if (tempHeight == 0) return FALSE; if (tempWidth > context->priv->PlaneBuffersLength / tempHeight) return FALSE; for (y = 0; y < tempHeight >> 1; y++) { BYTE* co_dst = context->priv->PlaneBuffers[1] + y * (tempWidth >> 1); BYTE* cg_dst = context->priv->PlaneBuffers[2] + y * (tempWidth >> 1); const INT8* co_src0 = (INT8*) context->priv->PlaneBuffers[1] + (y << 1) * tempWidth; const INT8* co_src1 = co_src0 + tempWidth; const INT8* cg_src0 = (INT8*) context->priv->PlaneBuffers[2] + (y << 1) * tempWidth; const INT8* cg_src1 = cg_src0 + tempWidth; for (x = 0; x < tempWidth >> 1; x++) { *co_dst++ = (BYTE)(((INT16) * co_src0 + (INT16) * (co_src0 + 1) + (INT16) * co_src1 + (INT16) * (co_src1 + 1)) >> 2); *cg_dst++ = (BYTE)(((INT16) * cg_src0 + (INT16) * (cg_src0 + 1) + (INT16) * cg_src1 + (INT16) * (cg_src1 + 1)) >> 2); co_src0 += 2; co_src1 += 2; cg_src0 += 2; cg_src1 += 2; } } return TRUE; } BOOL nsc_encode(NSC_CONTEXT* context, const BYTE* bmpdata, UINT32 rowstride) { if (!context || !bmpdata || (rowstride == 0)) return FALSE; if (!nsc_encode_argb_to_aycocg(context, bmpdata, rowstride)) return FALSE; if (context->ChromaSubsamplingLevel) { if (!nsc_encode_subsampling(context)) return FALSE; } return TRUE; } static UINT32 nsc_rle_encode(const BYTE* in, BYTE* out, UINT32 originalSize) { UINT32 left; UINT32 runlength = 1; UINT32 planeSize = 0; left = originalSize; /** * We quit the loop if the running compressed size is larger than the original. * In such cases data will be sent uncompressed. */ while (left > 4 && planeSize < originalSize - 4) { if (left > 5 && *in == *(in + 1)) { runlength++; } else if (runlength == 1) { *out++ = *in; planeSize++; } else if (runlength < 256) { *out++ = *in; *out++ = *in; *out++ = runlength - 2; runlength = 1; planeSize += 3; } else { *out++ = *in; *out++ = *in; *out++ = 0xFF; *out++ = (runlength & 0x000000FF); *out++ = (runlength & 0x0000FF00) >> 8; *out++ = (runlength & 0x00FF0000) >> 16; *out++ = (runlength & 0xFF000000) >> 24; runlength = 1; planeSize += 7; } in++; left--; } if (planeSize < originalSize - 4) CopyMemory(out, in, 4); planeSize += 4; return planeSize; } static void nsc_rle_compress_data(NSC_CONTEXT* context) { UINT16 i; UINT32 planeSize; UINT32 originalSize; for (i = 0; i < 4; i++) { originalSize = context->OrgByteCount[i]; if (originalSize == 0) { planeSize = 0; } else { planeSize = nsc_rle_encode(context->priv->PlaneBuffers[i], context->priv->PlaneBuffers[4], originalSize); if (planeSize < originalSize) CopyMemory(context->priv->PlaneBuffers[i], context->priv->PlaneBuffers[4], planeSize); else planeSize = originalSize; } context->PlaneByteCount[i] = planeSize; } } UINT32 nsc_compute_byte_count(NSC_CONTEXT* context, UINT32* ByteCount, UINT32 width, UINT32 height) { UINT32 tempWidth; UINT32 tempHeight; UINT32 maxPlaneSize; tempWidth = ROUND_UP_TO(width, 8); tempHeight = ROUND_UP_TO(height, 2); maxPlaneSize = tempWidth * tempHeight + 16; if (context->ChromaSubsamplingLevel) { ByteCount[0] = tempWidth * height; ByteCount[1] = tempWidth * tempHeight / 4; ByteCount[2] = tempWidth * tempHeight / 4; ByteCount[3] = width * height; } else { ByteCount[0] = width * height; ByteCount[1] = width * height; ByteCount[2] = width * height; ByteCount[3] = width * height; } return maxPlaneSize; } NSC_MESSAGE* nsc_encode_messages(NSC_CONTEXT* context, const BYTE* data, UINT32 x, UINT32 y, UINT32 width, UINT32 height, UINT32 scanline, UINT32* numMessages, UINT32 maxDataSize) { UINT32 i, j, k; UINT32 dataOffset; UINT32 rows, cols; UINT32 BytesPerPixel; UINT32 MaxRegionWidth; UINT32 MaxRegionHeight; UINT32 ByteCount[4]; UINT32 MaxPlaneSize; UINT32 MaxMessageSize; NSC_MESSAGE* messages; UINT32 PaddedMaxPlaneSize; k = 0; MaxRegionWidth = 64 * 4; MaxRegionHeight = 64 * 2; BytesPerPixel = GetBytesPerPixel(context->format); rows = (width + (MaxRegionWidth - (width % MaxRegionWidth))) / MaxRegionWidth; cols = (height + (MaxRegionHeight - (height % MaxRegionHeight))) / MaxRegionHeight; *numMessages = rows * cols; MaxPlaneSize = nsc_compute_byte_count(context, (UINT32*) ByteCount, width, height); MaxMessageSize = ByteCount[0] + ByteCount[1] + ByteCount[2] + ByteCount[3] + 20; maxDataSize -= 1024; /* reserve enough space for headers */ messages = (NSC_MESSAGE*) calloc(*numMessages, sizeof(NSC_MESSAGE)); if (!messages) return NULL; for (i = 0; i < rows; i++) { for (j = 0; j < cols; j++) { messages[k].x = x + (i * MaxRegionWidth); messages[k].y = y + (j * MaxRegionHeight); messages[k].width = (i < (rows - 1)) ? MaxRegionWidth : width - (i * MaxRegionWidth); messages[k].height = (j < (cols - 1)) ? MaxRegionHeight : height - (j * MaxRegionHeight); messages[k].data = data; messages[k].scanline = scanline; messages[k].MaxPlaneSize = nsc_compute_byte_count(context, (UINT32*) messages[k].OrgByteCount, messages[k].width, messages[k].height); k++; } } *numMessages = k; for (i = 0; i < *numMessages; i++) { PaddedMaxPlaneSize = messages[i].MaxPlaneSize + 32; messages[i].PlaneBuffer = (BYTE*) BufferPool_Take(context->priv->PlanePool, PaddedMaxPlaneSize * 5); if (!messages[i].PlaneBuffer) goto fail; messages[i].PlaneBuffers[0] = (BYTE*) & (messages[i].PlaneBuffer[(PaddedMaxPlaneSize * 0) + 16]); messages[i].PlaneBuffers[1] = (BYTE*) & (messages[i].PlaneBuffer[(PaddedMaxPlaneSize * 1) + 16]); messages[i].PlaneBuffers[2] = (BYTE*) & (messages[i].PlaneBuffer[(PaddedMaxPlaneSize * 2) + 16]); messages[i].PlaneBuffers[3] = (BYTE*) & (messages[i].PlaneBuffer[(PaddedMaxPlaneSize * 3) + 16]); messages[i].PlaneBuffers[4] = (BYTE*) & (messages[i].PlaneBuffer[(PaddedMaxPlaneSize * 4) + 16]); } for (i = 0; i < *numMessages; i++) { context->width = messages[i].width; context->height = messages[i].height; context->OrgByteCount[0] = messages[i].OrgByteCount[0]; context->OrgByteCount[1] = messages[i].OrgByteCount[1]; context->OrgByteCount[2] = messages[i].OrgByteCount[2]; context->OrgByteCount[3] = messages[i].OrgByteCount[3]; context->priv->PlaneBuffersLength = messages[i].MaxPlaneSize; context->priv->PlaneBuffers[0] = messages[i].PlaneBuffers[0]; context->priv->PlaneBuffers[1] = messages[i].PlaneBuffers[1]; context->priv->PlaneBuffers[2] = messages[i].PlaneBuffers[2]; context->priv->PlaneBuffers[3] = messages[i].PlaneBuffers[3]; context->priv->PlaneBuffers[4] = messages[i].PlaneBuffers[4]; dataOffset = (messages[i].y * messages[i].scanline) + (messages[i].x * BytesPerPixel); PROFILER_ENTER(context->priv->prof_nsc_encode) context->encode(context, &data[dataOffset], scanline); PROFILER_EXIT(context->priv->prof_nsc_encode) PROFILER_ENTER(context->priv->prof_nsc_rle_compress_data) nsc_rle_compress_data(context); PROFILER_EXIT(context->priv->prof_nsc_rle_compress_data) messages[i].LumaPlaneByteCount = context->PlaneByteCount[0]; messages[i].OrangeChromaPlaneByteCount = context->PlaneByteCount[1]; messages[i].GreenChromaPlaneByteCount = context->PlaneByteCount[2]; messages[i].AlphaPlaneByteCount = context->PlaneByteCount[3]; messages[i].ColorLossLevel = context->ColorLossLevel; messages[i].ChromaSubsamplingLevel = context->ChromaSubsamplingLevel; } context->priv->PlaneBuffers[0] = NULL; context->priv->PlaneBuffers[1] = NULL; context->priv->PlaneBuffers[2] = NULL; context->priv->PlaneBuffers[3] = NULL; context->priv->PlaneBuffers[4] = NULL; return messages; fail: for (i = 0; i < *numMessages; i++) BufferPool_Return(context->priv->PlanePool, messages[i].PlaneBuffer); free(messages); return NULL; } BOOL nsc_write_message(NSC_CONTEXT* context, wStream* s, NSC_MESSAGE* message) { UINT32 totalPlaneByteCount; totalPlaneByteCount = message->LumaPlaneByteCount + message->OrangeChromaPlaneByteCount + message->GreenChromaPlaneByteCount + message->AlphaPlaneByteCount; if (!Stream_EnsureRemainingCapacity(s, 20 + totalPlaneByteCount)) return -1; Stream_Write_UINT32(s, message->LumaPlaneByteCount); /* LumaPlaneByteCount (4 bytes) */ Stream_Write_UINT32(s, message->OrangeChromaPlaneByteCount); /* OrangeChromaPlaneByteCount (4 bytes) */ Stream_Write_UINT32(s, message->GreenChromaPlaneByteCount); /* GreenChromaPlaneByteCount (4 bytes) */ Stream_Write_UINT32(s, message->AlphaPlaneByteCount); /* AlphaPlaneByteCount (4 bytes) */ Stream_Write_UINT8(s, message->ColorLossLevel); /* ColorLossLevel (1 byte) */ Stream_Write_UINT8(s, message->ChromaSubsamplingLevel); /* ChromaSubsamplingLevel (1 byte) */ Stream_Write_UINT16(s, 0); /* Reserved (2 bytes) */ if (message->LumaPlaneByteCount) Stream_Write(s, message->PlaneBuffers[0], message->LumaPlaneByteCount); /* LumaPlane */ if (message->OrangeChromaPlaneByteCount) Stream_Write(s, message->PlaneBuffers[1], message->OrangeChromaPlaneByteCount); /* OrangeChromaPlane */ if (message->GreenChromaPlaneByteCount) Stream_Write(s, message->PlaneBuffers[2], message->GreenChromaPlaneByteCount); /* GreenChromaPlane */ if (message->AlphaPlaneByteCount) Stream_Write(s, message->PlaneBuffers[3], message->AlphaPlaneByteCount); /* AlphaPlane */ return TRUE; } void nsc_message_free(NSC_CONTEXT* context, NSC_MESSAGE* message) { BufferPool_Return(context->priv->PlanePool, message->PlaneBuffer); } BOOL nsc_compose_message(NSC_CONTEXT* context, wStream* s, const BYTE* data, UINT32 width, UINT32 height, UINT32 scanline) { NSC_MESSAGE s_message = { 0 }; NSC_MESSAGE* message = &s_message; context->width = width; context->height = height; if (!nsc_context_initialize_encode(context)) return FALSE; /* ARGB to AYCoCg conversion, chroma subsampling and colorloss reduction */ PROFILER_ENTER(context->priv->prof_nsc_encode) context->encode(context, data, scanline); PROFILER_EXIT(context->priv->prof_nsc_encode) /* RLE encode */ PROFILER_ENTER(context->priv->prof_nsc_rle_compress_data) nsc_rle_compress_data(context); PROFILER_EXIT(context->priv->prof_nsc_rle_compress_data) message->PlaneBuffers[0] = context->priv->PlaneBuffers[0]; message->PlaneBuffers[1] = context->priv->PlaneBuffers[1]; message->PlaneBuffers[2] = context->priv->PlaneBuffers[2]; message->PlaneBuffers[3] = context->priv->PlaneBuffers[3]; message->LumaPlaneByteCount = context->PlaneByteCount[0]; message->OrangeChromaPlaneByteCount = context->PlaneByteCount[1]; message->GreenChromaPlaneByteCount = context->PlaneByteCount[2]; message->AlphaPlaneByteCount = context->PlaneByteCount[3]; message->ColorLossLevel = context->ColorLossLevel; message->ChromaSubsamplingLevel = context->ChromaSubsamplingLevel; return nsc_write_message(context, s, message); }
static void nsc_encode_argb_to_aycocg(NSC_CONTEXT* context, const BYTE* data, UINT32 scanline) { UINT16 x; UINT16 y; UINT16 rw; BYTE ccl; const BYTE* src; BYTE* yplane = NULL; BYTE* coplane = NULL; BYTE* cgplane = NULL; BYTE* aplane = NULL; INT16 r_val; INT16 g_val; INT16 b_val; BYTE a_val; UINT32 tempWidth; tempWidth = ROUND_UP_TO(context->width, 8); rw = (context->ChromaSubsamplingLevel ? tempWidth : context->width); ccl = context->ColorLossLevel; for (y = 0; y < context->height; y++) { src = data + (context->height - 1 - y) * scanline; yplane = context->priv->PlaneBuffers[0] + y * rw; coplane = context->priv->PlaneBuffers[1] + y * rw; cgplane = context->priv->PlaneBuffers[2] + y * rw; aplane = context->priv->PlaneBuffers[3] + y * context->width; for (x = 0; x < context->width; x++) { switch (context->format) { case PIXEL_FORMAT_BGRX32: b_val = *src++; g_val = *src++; r_val = *src++; src++; a_val = 0xFF; break; case PIXEL_FORMAT_BGRA32: b_val = *src++; g_val = *src++; r_val = *src++; a_val = *src++; break; case PIXEL_FORMAT_RGBX32: r_val = *src++; g_val = *src++; b_val = *src++; src++; a_val = 0xFF; break; case PIXEL_FORMAT_RGBA32: r_val = *src++; g_val = *src++; b_val = *src++; a_val = *src++; break; case PIXEL_FORMAT_BGR24: b_val = *src++; g_val = *src++; r_val = *src++; a_val = 0xFF; break; case PIXEL_FORMAT_RGB24: r_val = *src++; g_val = *src++; b_val = *src++; a_val = 0xFF; break; case PIXEL_FORMAT_BGR16: b_val = (INT16)(((*(src + 1)) & 0xF8) | ((*(src + 1)) >> 5)); g_val = (INT16)((((*(src + 1)) & 0x07) << 5) | (((*src) & 0xE0) >> 3)); r_val = (INT16)((((*src) & 0x1F) << 3) | (((*src) >> 2) & 0x07)); a_val = 0xFF; src += 2; break; case PIXEL_FORMAT_RGB16: r_val = (INT16)(((*(src + 1)) & 0xF8) | ((*(src + 1)) >> 5)); g_val = (INT16)((((*(src + 1)) & 0x07) << 5) | (((*src) & 0xE0) >> 3)); b_val = (INT16)((((*src) & 0x1F) << 3) | (((*src) >> 2) & 0x07)); a_val = 0xFF; src += 2; break; case PIXEL_FORMAT_A4: { int shift; BYTE idx; shift = (7 - (x % 8)); idx = ((*src) >> shift) & 1; idx |= (((*(src + 1)) >> shift) & 1) << 1; idx |= (((*(src + 2)) >> shift) & 1) << 2; idx |= (((*(src + 3)) >> shift) & 1) << 3; idx *= 3; r_val = (INT16) context->palette[idx]; g_val = (INT16) context->palette[idx + 1]; b_val = (INT16) context->palette[idx + 2]; if (shift == 0) src += 4; } a_val = 0xFF; break; case PIXEL_FORMAT_RGB8: { int idx = (*src) * 3; r_val = (INT16) context->palette[idx]; g_val = (INT16) context->palette[idx + 1]; b_val = (INT16) context->palette[idx + 2]; src++; } a_val = 0xFF; break; default: r_val = g_val = b_val = a_val = 0; break; } *yplane++ = (BYTE)((r_val >> 2) + (g_val >> 1) + (b_val >> 2)); /* Perform color loss reduction here */ *coplane++ = (BYTE)((r_val - b_val) >> ccl); *cgplane++ = (BYTE)((-(r_val >> 1) + g_val - (b_val >> 1)) >> ccl); *aplane++ = a_val; } if (context->ChromaSubsamplingLevel && (x % 2) == 1) { *yplane = *(yplane - 1); *coplane = *(coplane - 1); *cgplane = *(cgplane - 1); } } if (context->ChromaSubsamplingLevel && (y % 2) == 1) { yplane = context->priv->PlaneBuffers[0] + y * rw; coplane = context->priv->PlaneBuffers[1] + y * rw; cgplane = context->priv->PlaneBuffers[2] + y * rw; CopyMemory(yplane, yplane - rw, rw); CopyMemory(coplane, coplane - rw, rw); CopyMemory(cgplane, cgplane - rw, rw); } }
static BOOL nsc_encode_argb_to_aycocg(NSC_CONTEXT* context, const BYTE* data, UINT32 scanline) { UINT16 x; UINT16 y; UINT16 rw; BYTE ccl; const BYTE* src; BYTE* yplane = NULL; BYTE* coplane = NULL; BYTE* cgplane = NULL; BYTE* aplane = NULL; INT16 r_val; INT16 g_val; INT16 b_val; BYTE a_val; UINT32 tempWidth; if (!context || data || (scanline == 0)) return FALSE; tempWidth = ROUND_UP_TO(context->width, 8); rw = (context->ChromaSubsamplingLevel ? tempWidth : context->width); ccl = context->ColorLossLevel; if (context->priv->PlaneBuffersLength < rw * scanline) return FALSE; if (rw < scanline * 2) return FALSE; for (y = 0; y < context->height; y++) { src = data + (context->height - 1 - y) * scanline; yplane = context->priv->PlaneBuffers[0] + y * rw; coplane = context->priv->PlaneBuffers[1] + y * rw; cgplane = context->priv->PlaneBuffers[2] + y * rw; aplane = context->priv->PlaneBuffers[3] + y * context->width; for (x = 0; x < context->width; x++) { switch (context->format) { case PIXEL_FORMAT_BGRX32: b_val = *src++; g_val = *src++; r_val = *src++; src++; a_val = 0xFF; break; case PIXEL_FORMAT_BGRA32: b_val = *src++; g_val = *src++; r_val = *src++; a_val = *src++; break; case PIXEL_FORMAT_RGBX32: r_val = *src++; g_val = *src++; b_val = *src++; src++; a_val = 0xFF; break; case PIXEL_FORMAT_RGBA32: r_val = *src++; g_val = *src++; b_val = *src++; a_val = *src++; break; case PIXEL_FORMAT_BGR24: b_val = *src++; g_val = *src++; r_val = *src++; a_val = 0xFF; break; case PIXEL_FORMAT_RGB24: r_val = *src++; g_val = *src++; b_val = *src++; a_val = 0xFF; break; case PIXEL_FORMAT_BGR16: b_val = (INT16)(((*(src + 1)) & 0xF8) | ((*(src + 1)) >> 5)); g_val = (INT16)((((*(src + 1)) & 0x07) << 5) | (((*src) & 0xE0) >> 3)); r_val = (INT16)((((*src) & 0x1F) << 3) | (((*src) >> 2) & 0x07)); a_val = 0xFF; src += 2; break; case PIXEL_FORMAT_RGB16: r_val = (INT16)(((*(src + 1)) & 0xF8) | ((*(src + 1)) >> 5)); g_val = (INT16)((((*(src + 1)) & 0x07) << 5) | (((*src) & 0xE0) >> 3)); b_val = (INT16)((((*src) & 0x1F) << 3) | (((*src) >> 2) & 0x07)); a_val = 0xFF; src += 2; break; case PIXEL_FORMAT_A4: { int shift; BYTE idx; shift = (7 - (x % 8)); idx = ((*src) >> shift) & 1; idx |= (((*(src + 1)) >> shift) & 1) << 1; idx |= (((*(src + 2)) >> shift) & 1) << 2; idx |= (((*(src + 3)) >> shift) & 1) << 3; idx *= 3; r_val = (INT16) context->palette[idx]; g_val = (INT16) context->palette[idx + 1]; b_val = (INT16) context->palette[idx + 2]; if (shift == 0) src += 4; } a_val = 0xFF; break; case PIXEL_FORMAT_RGB8: { int idx = (*src) * 3; r_val = (INT16) context->palette[idx]; g_val = (INT16) context->palette[idx + 1]; b_val = (INT16) context->palette[idx + 2]; src++; } a_val = 0xFF; break; default: r_val = g_val = b_val = a_val = 0; break; } *yplane++ = (BYTE)((r_val >> 2) + (g_val >> 1) + (b_val >> 2)); /* Perform color loss reduction here */ *coplane++ = (BYTE)((r_val - b_val) >> ccl); *cgplane++ = (BYTE)((-(r_val >> 1) + g_val - (b_val >> 1)) >> ccl); *aplane++ = a_val; } if (context->ChromaSubsamplingLevel && (x % 2) == 1) { *yplane = *(yplane - 1); *coplane = *(coplane - 1); *cgplane = *(cgplane - 1); } } if (context->ChromaSubsamplingLevel && (y % 2) == 1) { yplane = context->priv->PlaneBuffers[0] + y * rw; coplane = context->priv->PlaneBuffers[1] + y * rw; cgplane = context->priv->PlaneBuffers[2] + y * rw; CopyMemory(yplane, yplane - rw, rw); CopyMemory(coplane, coplane - rw, rw); CopyMemory(cgplane, cgplane - rw, rw); } return TRUE; }
{'added': [(54, ''), (91, 'static BOOL nsc_encode_argb_to_aycocg(NSC_CONTEXT* context, const BYTE* data,'), (108, ''), (109, '\tif (!context || data || (scanline == 0))'), (110, '\t\treturn FALSE;'), (111, ''), (116, '\tif (context->priv->PlaneBuffersLength < rw * scanline)'), (117, '\t\treturn FALSE;'), (118, ''), (119, '\tif (rw < scanline * 2)'), (120, '\t\treturn FALSE;'), (121, ''), (256, ''), (257, '\treturn TRUE;'), (260, 'static BOOL nsc_encode_subsampling(NSC_CONTEXT* context)'), (266, ''), (267, '\tif (!context)'), (268, '\t\treturn FALSE;'), (269, ''), (273, '\tif (tempHeight == 0)'), (274, '\t\treturn FALSE;'), (275, ''), (276, '\tif (tempWidth > context->priv->PlaneBuffersLength / tempHeight)'), (277, '\t\treturn FALSE;'), (278, ''), (281, '\t\tBYTE* co_dst = context->priv->PlaneBuffers[1] + y * (tempWidth >> 1);'), (282, '\t\tBYTE* cg_dst = context->priv->PlaneBuffers[2] + y * (tempWidth >> 1);'), (283, '\t\tconst INT8* co_src0 = (INT8*) context->priv->PlaneBuffers[1] + (y << 1) * tempWidth;'), (284, '\t\tconst INT8* co_src1 = co_src0 + tempWidth;'), (285, '\t\tconst INT8* cg_src0 = (INT8*) context->priv->PlaneBuffers[2] + (y << 1) * tempWidth;'), (286, '\t\tconst INT8* cg_src1 = cg_src0 + tempWidth;'), (300, ''), (301, '\treturn TRUE;'), (304, 'BOOL nsc_encode(NSC_CONTEXT* context, const BYTE* bmpdata, UINT32 rowstride)'), (306, '\tif (!context || !bmpdata || (rowstride == 0))'), (307, '\t\treturn FALSE;'), (308, ''), (309, '\tif (!nsc_encode_argb_to_aycocg(context, bmpdata, rowstride))'), (310, '\t\treturn FALSE;'), (314, '\t\tif (!nsc_encode_subsampling(context))'), (315, '\t\t\treturn FALSE;'), (317, ''), (318, '\treturn TRUE;'), (321, 'static UINT32 nsc_rle_encode(const BYTE* in, BYTE* out, UINT32 originalSize)')], 'deleted': [(90, 'static void nsc_encode_argb_to_aycocg(NSC_CONTEXT* context, const BYTE* data,'), (247, 'static void nsc_encode_subsampling(NSC_CONTEXT* context)'), (251, '\tBYTE* co_dst;'), (252, '\tBYTE* cg_dst;'), (253, '\tINT8* co_src0;'), (254, '\tINT8* co_src1;'), (255, '\tINT8* cg_src0;'), (256, '\tINT8* cg_src1;'), (264, '\t\tco_dst = context->priv->PlaneBuffers[1] + y * (tempWidth >> 1);'), (265, '\t\tcg_dst = context->priv->PlaneBuffers[2] + y * (tempWidth >> 1);'), (266, '\t\tco_src0 = (INT8*) context->priv->PlaneBuffers[1] + (y << 1) * tempWidth;'), (267, '\t\tco_src1 = co_src0 + tempWidth;'), (268, '\t\tcg_src0 = (INT8*) context->priv->PlaneBuffers[2] + (y << 1) * tempWidth;'), (269, '\t\tcg_src1 = cg_src0 + tempWidth;'), (285, 'void nsc_encode(NSC_CONTEXT* context, const BYTE* bmpdata, UINT32 rowstride)'), (287, '\tnsc_encode_argb_to_aycocg(context, bmpdata, rowstride);'), (291, '\t\tnsc_encode_subsampling(context);'), (295, 'static UINT32 nsc_rle_encode(BYTE* in, BYTE* out, UINT32 originalSize)')]}
44
18
513
3,961
https://github.com/FreeRDP/FreeRDP
CVE-2018-8788
['CWE-787']
nsc_encode.c
nsc_encode_subsampling
/** * FreeRDP: A Remote Desktop Protocol Implementation * NSCodec Encoder * * Copyright 2012 Vic Lee * Copyright 2016 Armin Novak <armin.novak@thincast.com> * Copyright 2016 Thincast Technologies GmbH * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <stdio.h> #include <stdlib.h> #include <string.h> #include <winpr/crt.h> #include <freerdp/codec/nsc.h> #include <freerdp/codec/color.h> #include "nsc_types.h" #include "nsc_encode.h" static BOOL nsc_context_initialize_encode(NSC_CONTEXT* context) { int i; UINT32 length; UINT32 tempWidth; UINT32 tempHeight; tempWidth = ROUND_UP_TO(context->width, 8); tempHeight = ROUND_UP_TO(context->height, 2); /* The maximum length a decoded plane can reach in all cases */ length = tempWidth * tempHeight + 16; if (length > context->priv->PlaneBuffersLength) { for (i = 0; i < 5; i++) { BYTE* tmp = (BYTE*) realloc(context->priv->PlaneBuffers[i], length); if (!tmp) goto fail; context->priv->PlaneBuffers[i] = tmp; } context->priv->PlaneBuffersLength = length; } if (context->ChromaSubsamplingLevel) { context->OrgByteCount[0] = tempWidth * context->height; context->OrgByteCount[1] = tempWidth * tempHeight / 4; context->OrgByteCount[2] = tempWidth * tempHeight / 4; context->OrgByteCount[3] = context->width * context->height; } else { context->OrgByteCount[0] = context->width * context->height; context->OrgByteCount[1] = context->width * context->height; context->OrgByteCount[2] = context->width * context->height; context->OrgByteCount[3] = context->width * context->height; } return TRUE; fail: if (length > context->priv->PlaneBuffersLength) { for (i = 0; i < 5; i++) free(context->priv->PlaneBuffers[i]); } return FALSE; } static void nsc_encode_argb_to_aycocg(NSC_CONTEXT* context, const BYTE* data, UINT32 scanline) { UINT16 x; UINT16 y; UINT16 rw; BYTE ccl; const BYTE* src; BYTE* yplane = NULL; BYTE* coplane = NULL; BYTE* cgplane = NULL; BYTE* aplane = NULL; INT16 r_val; INT16 g_val; INT16 b_val; BYTE a_val; UINT32 tempWidth; tempWidth = ROUND_UP_TO(context->width, 8); rw = (context->ChromaSubsamplingLevel ? tempWidth : context->width); ccl = context->ColorLossLevel; for (y = 0; y < context->height; y++) { src = data + (context->height - 1 - y) * scanline; yplane = context->priv->PlaneBuffers[0] + y * rw; coplane = context->priv->PlaneBuffers[1] + y * rw; cgplane = context->priv->PlaneBuffers[2] + y * rw; aplane = context->priv->PlaneBuffers[3] + y * context->width; for (x = 0; x < context->width; x++) { switch (context->format) { case PIXEL_FORMAT_BGRX32: b_val = *src++; g_val = *src++; r_val = *src++; src++; a_val = 0xFF; break; case PIXEL_FORMAT_BGRA32: b_val = *src++; g_val = *src++; r_val = *src++; a_val = *src++; break; case PIXEL_FORMAT_RGBX32: r_val = *src++; g_val = *src++; b_val = *src++; src++; a_val = 0xFF; break; case PIXEL_FORMAT_RGBA32: r_val = *src++; g_val = *src++; b_val = *src++; a_val = *src++; break; case PIXEL_FORMAT_BGR24: b_val = *src++; g_val = *src++; r_val = *src++; a_val = 0xFF; break; case PIXEL_FORMAT_RGB24: r_val = *src++; g_val = *src++; b_val = *src++; a_val = 0xFF; break; case PIXEL_FORMAT_BGR16: b_val = (INT16)(((*(src + 1)) & 0xF8) | ((*(src + 1)) >> 5)); g_val = (INT16)((((*(src + 1)) & 0x07) << 5) | (((*src) & 0xE0) >> 3)); r_val = (INT16)((((*src) & 0x1F) << 3) | (((*src) >> 2) & 0x07)); a_val = 0xFF; src += 2; break; case PIXEL_FORMAT_RGB16: r_val = (INT16)(((*(src + 1)) & 0xF8) | ((*(src + 1)) >> 5)); g_val = (INT16)((((*(src + 1)) & 0x07) << 5) | (((*src) & 0xE0) >> 3)); b_val = (INT16)((((*src) & 0x1F) << 3) | (((*src) >> 2) & 0x07)); a_val = 0xFF; src += 2; break; case PIXEL_FORMAT_A4: { int shift; BYTE idx; shift = (7 - (x % 8)); idx = ((*src) >> shift) & 1; idx |= (((*(src + 1)) >> shift) & 1) << 1; idx |= (((*(src + 2)) >> shift) & 1) << 2; idx |= (((*(src + 3)) >> shift) & 1) << 3; idx *= 3; r_val = (INT16) context->palette[idx]; g_val = (INT16) context->palette[idx + 1]; b_val = (INT16) context->palette[idx + 2]; if (shift == 0) src += 4; } a_val = 0xFF; break; case PIXEL_FORMAT_RGB8: { int idx = (*src) * 3; r_val = (INT16) context->palette[idx]; g_val = (INT16) context->palette[idx + 1]; b_val = (INT16) context->palette[idx + 2]; src++; } a_val = 0xFF; break; default: r_val = g_val = b_val = a_val = 0; break; } *yplane++ = (BYTE)((r_val >> 2) + (g_val >> 1) + (b_val >> 2)); /* Perform color loss reduction here */ *coplane++ = (BYTE)((r_val - b_val) >> ccl); *cgplane++ = (BYTE)((-(r_val >> 1) + g_val - (b_val >> 1)) >> ccl); *aplane++ = a_val; } if (context->ChromaSubsamplingLevel && (x % 2) == 1) { *yplane = *(yplane - 1); *coplane = *(coplane - 1); *cgplane = *(cgplane - 1); } } if (context->ChromaSubsamplingLevel && (y % 2) == 1) { yplane = context->priv->PlaneBuffers[0] + y * rw; coplane = context->priv->PlaneBuffers[1] + y * rw; cgplane = context->priv->PlaneBuffers[2] + y * rw; CopyMemory(yplane, yplane - rw, rw); CopyMemory(coplane, coplane - rw, rw); CopyMemory(cgplane, cgplane - rw, rw); } } static void nsc_encode_subsampling(NSC_CONTEXT* context) { UINT16 x; UINT16 y; BYTE* co_dst; BYTE* cg_dst; INT8* co_src0; INT8* co_src1; INT8* cg_src0; INT8* cg_src1; UINT32 tempWidth; UINT32 tempHeight; tempWidth = ROUND_UP_TO(context->width, 8); tempHeight = ROUND_UP_TO(context->height, 2); for (y = 0; y < tempHeight >> 1; y++) { co_dst = context->priv->PlaneBuffers[1] + y * (tempWidth >> 1); cg_dst = context->priv->PlaneBuffers[2] + y * (tempWidth >> 1); co_src0 = (INT8*) context->priv->PlaneBuffers[1] + (y << 1) * tempWidth; co_src1 = co_src0 + tempWidth; cg_src0 = (INT8*) context->priv->PlaneBuffers[2] + (y << 1) * tempWidth; cg_src1 = cg_src0 + tempWidth; for (x = 0; x < tempWidth >> 1; x++) { *co_dst++ = (BYTE)(((INT16) * co_src0 + (INT16) * (co_src0 + 1) + (INT16) * co_src1 + (INT16) * (co_src1 + 1)) >> 2); *cg_dst++ = (BYTE)(((INT16) * cg_src0 + (INT16) * (cg_src0 + 1) + (INT16) * cg_src1 + (INT16) * (cg_src1 + 1)) >> 2); co_src0 += 2; co_src1 += 2; cg_src0 += 2; cg_src1 += 2; } } } void nsc_encode(NSC_CONTEXT* context, const BYTE* bmpdata, UINT32 rowstride) { nsc_encode_argb_to_aycocg(context, bmpdata, rowstride); if (context->ChromaSubsamplingLevel) { nsc_encode_subsampling(context); } } static UINT32 nsc_rle_encode(BYTE* in, BYTE* out, UINT32 originalSize) { UINT32 left; UINT32 runlength = 1; UINT32 planeSize = 0; left = originalSize; /** * We quit the loop if the running compressed size is larger than the original. * In such cases data will be sent uncompressed. */ while (left > 4 && planeSize < originalSize - 4) { if (left > 5 && *in == *(in + 1)) { runlength++; } else if (runlength == 1) { *out++ = *in; planeSize++; } else if (runlength < 256) { *out++ = *in; *out++ = *in; *out++ = runlength - 2; runlength = 1; planeSize += 3; } else { *out++ = *in; *out++ = *in; *out++ = 0xFF; *out++ = (runlength & 0x000000FF); *out++ = (runlength & 0x0000FF00) >> 8; *out++ = (runlength & 0x00FF0000) >> 16; *out++ = (runlength & 0xFF000000) >> 24; runlength = 1; planeSize += 7; } in++; left--; } if (planeSize < originalSize - 4) CopyMemory(out, in, 4); planeSize += 4; return planeSize; } static void nsc_rle_compress_data(NSC_CONTEXT* context) { UINT16 i; UINT32 planeSize; UINT32 originalSize; for (i = 0; i < 4; i++) { originalSize = context->OrgByteCount[i]; if (originalSize == 0) { planeSize = 0; } else { planeSize = nsc_rle_encode(context->priv->PlaneBuffers[i], context->priv->PlaneBuffers[4], originalSize); if (planeSize < originalSize) CopyMemory(context->priv->PlaneBuffers[i], context->priv->PlaneBuffers[4], planeSize); else planeSize = originalSize; } context->PlaneByteCount[i] = planeSize; } } UINT32 nsc_compute_byte_count(NSC_CONTEXT* context, UINT32* ByteCount, UINT32 width, UINT32 height) { UINT32 tempWidth; UINT32 tempHeight; UINT32 maxPlaneSize; tempWidth = ROUND_UP_TO(width, 8); tempHeight = ROUND_UP_TO(height, 2); maxPlaneSize = tempWidth * tempHeight + 16; if (context->ChromaSubsamplingLevel) { ByteCount[0] = tempWidth * height; ByteCount[1] = tempWidth * tempHeight / 4; ByteCount[2] = tempWidth * tempHeight / 4; ByteCount[3] = width * height; } else { ByteCount[0] = width * height; ByteCount[1] = width * height; ByteCount[2] = width * height; ByteCount[3] = width * height; } return maxPlaneSize; } NSC_MESSAGE* nsc_encode_messages(NSC_CONTEXT* context, const BYTE* data, UINT32 x, UINT32 y, UINT32 width, UINT32 height, UINT32 scanline, UINT32* numMessages, UINT32 maxDataSize) { UINT32 i, j, k; UINT32 dataOffset; UINT32 rows, cols; UINT32 BytesPerPixel; UINT32 MaxRegionWidth; UINT32 MaxRegionHeight; UINT32 ByteCount[4]; UINT32 MaxPlaneSize; UINT32 MaxMessageSize; NSC_MESSAGE* messages; UINT32 PaddedMaxPlaneSize; k = 0; MaxRegionWidth = 64 * 4; MaxRegionHeight = 64 * 2; BytesPerPixel = GetBytesPerPixel(context->format); rows = (width + (MaxRegionWidth - (width % MaxRegionWidth))) / MaxRegionWidth; cols = (height + (MaxRegionHeight - (height % MaxRegionHeight))) / MaxRegionHeight; *numMessages = rows * cols; MaxPlaneSize = nsc_compute_byte_count(context, (UINT32*) ByteCount, width, height); MaxMessageSize = ByteCount[0] + ByteCount[1] + ByteCount[2] + ByteCount[3] + 20; maxDataSize -= 1024; /* reserve enough space for headers */ messages = (NSC_MESSAGE*) calloc(*numMessages, sizeof(NSC_MESSAGE)); if (!messages) return NULL; for (i = 0; i < rows; i++) { for (j = 0; j < cols; j++) { messages[k].x = x + (i * MaxRegionWidth); messages[k].y = y + (j * MaxRegionHeight); messages[k].width = (i < (rows - 1)) ? MaxRegionWidth : width - (i * MaxRegionWidth); messages[k].height = (j < (cols - 1)) ? MaxRegionHeight : height - (j * MaxRegionHeight); messages[k].data = data; messages[k].scanline = scanline; messages[k].MaxPlaneSize = nsc_compute_byte_count(context, (UINT32*) messages[k].OrgByteCount, messages[k].width, messages[k].height); k++; } } *numMessages = k; for (i = 0; i < *numMessages; i++) { PaddedMaxPlaneSize = messages[i].MaxPlaneSize + 32; messages[i].PlaneBuffer = (BYTE*) BufferPool_Take(context->priv->PlanePool, PaddedMaxPlaneSize * 5); if (!messages[i].PlaneBuffer) goto fail; messages[i].PlaneBuffers[0] = (BYTE*) & (messages[i].PlaneBuffer[(PaddedMaxPlaneSize * 0) + 16]); messages[i].PlaneBuffers[1] = (BYTE*) & (messages[i].PlaneBuffer[(PaddedMaxPlaneSize * 1) + 16]); messages[i].PlaneBuffers[2] = (BYTE*) & (messages[i].PlaneBuffer[(PaddedMaxPlaneSize * 2) + 16]); messages[i].PlaneBuffers[3] = (BYTE*) & (messages[i].PlaneBuffer[(PaddedMaxPlaneSize * 3) + 16]); messages[i].PlaneBuffers[4] = (BYTE*) & (messages[i].PlaneBuffer[(PaddedMaxPlaneSize * 4) + 16]); } for (i = 0; i < *numMessages; i++) { context->width = messages[i].width; context->height = messages[i].height; context->OrgByteCount[0] = messages[i].OrgByteCount[0]; context->OrgByteCount[1] = messages[i].OrgByteCount[1]; context->OrgByteCount[2] = messages[i].OrgByteCount[2]; context->OrgByteCount[3] = messages[i].OrgByteCount[3]; context->priv->PlaneBuffersLength = messages[i].MaxPlaneSize; context->priv->PlaneBuffers[0] = messages[i].PlaneBuffers[0]; context->priv->PlaneBuffers[1] = messages[i].PlaneBuffers[1]; context->priv->PlaneBuffers[2] = messages[i].PlaneBuffers[2]; context->priv->PlaneBuffers[3] = messages[i].PlaneBuffers[3]; context->priv->PlaneBuffers[4] = messages[i].PlaneBuffers[4]; dataOffset = (messages[i].y * messages[i].scanline) + (messages[i].x * BytesPerPixel); PROFILER_ENTER(context->priv->prof_nsc_encode) context->encode(context, &data[dataOffset], scanline); PROFILER_EXIT(context->priv->prof_nsc_encode) PROFILER_ENTER(context->priv->prof_nsc_rle_compress_data) nsc_rle_compress_data(context); PROFILER_EXIT(context->priv->prof_nsc_rle_compress_data) messages[i].LumaPlaneByteCount = context->PlaneByteCount[0]; messages[i].OrangeChromaPlaneByteCount = context->PlaneByteCount[1]; messages[i].GreenChromaPlaneByteCount = context->PlaneByteCount[2]; messages[i].AlphaPlaneByteCount = context->PlaneByteCount[3]; messages[i].ColorLossLevel = context->ColorLossLevel; messages[i].ChromaSubsamplingLevel = context->ChromaSubsamplingLevel; } context->priv->PlaneBuffers[0] = NULL; context->priv->PlaneBuffers[1] = NULL; context->priv->PlaneBuffers[2] = NULL; context->priv->PlaneBuffers[3] = NULL; context->priv->PlaneBuffers[4] = NULL; return messages; fail: for (i = 0; i < *numMessages; i++) BufferPool_Return(context->priv->PlanePool, messages[i].PlaneBuffer); free(messages); return NULL; } BOOL nsc_write_message(NSC_CONTEXT* context, wStream* s, NSC_MESSAGE* message) { UINT32 totalPlaneByteCount; totalPlaneByteCount = message->LumaPlaneByteCount + message->OrangeChromaPlaneByteCount + message->GreenChromaPlaneByteCount + message->AlphaPlaneByteCount; if (!Stream_EnsureRemainingCapacity(s, 20 + totalPlaneByteCount)) return -1; Stream_Write_UINT32(s, message->LumaPlaneByteCount); /* LumaPlaneByteCount (4 bytes) */ Stream_Write_UINT32(s, message->OrangeChromaPlaneByteCount); /* OrangeChromaPlaneByteCount (4 bytes) */ Stream_Write_UINT32(s, message->GreenChromaPlaneByteCount); /* GreenChromaPlaneByteCount (4 bytes) */ Stream_Write_UINT32(s, message->AlphaPlaneByteCount); /* AlphaPlaneByteCount (4 bytes) */ Stream_Write_UINT8(s, message->ColorLossLevel); /* ColorLossLevel (1 byte) */ Stream_Write_UINT8(s, message->ChromaSubsamplingLevel); /* ChromaSubsamplingLevel (1 byte) */ Stream_Write_UINT16(s, 0); /* Reserved (2 bytes) */ if (message->LumaPlaneByteCount) Stream_Write(s, message->PlaneBuffers[0], message->LumaPlaneByteCount); /* LumaPlane */ if (message->OrangeChromaPlaneByteCount) Stream_Write(s, message->PlaneBuffers[1], message->OrangeChromaPlaneByteCount); /* OrangeChromaPlane */ if (message->GreenChromaPlaneByteCount) Stream_Write(s, message->PlaneBuffers[2], message->GreenChromaPlaneByteCount); /* GreenChromaPlane */ if (message->AlphaPlaneByteCount) Stream_Write(s, message->PlaneBuffers[3], message->AlphaPlaneByteCount); /* AlphaPlane */ return TRUE; } void nsc_message_free(NSC_CONTEXT* context, NSC_MESSAGE* message) { BufferPool_Return(context->priv->PlanePool, message->PlaneBuffer); } BOOL nsc_compose_message(NSC_CONTEXT* context, wStream* s, const BYTE* data, UINT32 width, UINT32 height, UINT32 scanline) { NSC_MESSAGE s_message = { 0 }; NSC_MESSAGE* message = &s_message; context->width = width; context->height = height; if (!nsc_context_initialize_encode(context)) return FALSE; /* ARGB to AYCoCg conversion, chroma subsampling and colorloss reduction */ PROFILER_ENTER(context->priv->prof_nsc_encode) context->encode(context, data, scanline); PROFILER_EXIT(context->priv->prof_nsc_encode) /* RLE encode */ PROFILER_ENTER(context->priv->prof_nsc_rle_compress_data) nsc_rle_compress_data(context); PROFILER_EXIT(context->priv->prof_nsc_rle_compress_data) message->PlaneBuffers[0] = context->priv->PlaneBuffers[0]; message->PlaneBuffers[1] = context->priv->PlaneBuffers[1]; message->PlaneBuffers[2] = context->priv->PlaneBuffers[2]; message->PlaneBuffers[3] = context->priv->PlaneBuffers[3]; message->LumaPlaneByteCount = context->PlaneByteCount[0]; message->OrangeChromaPlaneByteCount = context->PlaneByteCount[1]; message->GreenChromaPlaneByteCount = context->PlaneByteCount[2]; message->AlphaPlaneByteCount = context->PlaneByteCount[3]; message->ColorLossLevel = context->ColorLossLevel; message->ChromaSubsamplingLevel = context->ChromaSubsamplingLevel; return nsc_write_message(context, s, message); }
/** * FreeRDP: A Remote Desktop Protocol Implementation * NSCodec Encoder * * Copyright 2012 Vic Lee * Copyright 2016 Armin Novak <armin.novak@thincast.com> * Copyright 2016 Thincast Technologies GmbH * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <stdio.h> #include <stdlib.h> #include <string.h> #include <winpr/crt.h> #include <freerdp/codec/nsc.h> #include <freerdp/codec/color.h> #include "nsc_types.h" #include "nsc_encode.h" static BOOL nsc_context_initialize_encode(NSC_CONTEXT* context) { int i; UINT32 length; UINT32 tempWidth; UINT32 tempHeight; tempWidth = ROUND_UP_TO(context->width, 8); tempHeight = ROUND_UP_TO(context->height, 2); /* The maximum length a decoded plane can reach in all cases */ length = tempWidth * tempHeight + 16; if (length > context->priv->PlaneBuffersLength) { for (i = 0; i < 5; i++) { BYTE* tmp = (BYTE*) realloc(context->priv->PlaneBuffers[i], length); if (!tmp) goto fail; context->priv->PlaneBuffers[i] = tmp; } context->priv->PlaneBuffersLength = length; } if (context->ChromaSubsamplingLevel) { context->OrgByteCount[0] = tempWidth * context->height; context->OrgByteCount[1] = tempWidth * tempHeight / 4; context->OrgByteCount[2] = tempWidth * tempHeight / 4; context->OrgByteCount[3] = context->width * context->height; } else { context->OrgByteCount[0] = context->width * context->height; context->OrgByteCount[1] = context->width * context->height; context->OrgByteCount[2] = context->width * context->height; context->OrgByteCount[3] = context->width * context->height; } return TRUE; fail: if (length > context->priv->PlaneBuffersLength) { for (i = 0; i < 5; i++) free(context->priv->PlaneBuffers[i]); } return FALSE; } static BOOL nsc_encode_argb_to_aycocg(NSC_CONTEXT* context, const BYTE* data, UINT32 scanline) { UINT16 x; UINT16 y; UINT16 rw; BYTE ccl; const BYTE* src; BYTE* yplane = NULL; BYTE* coplane = NULL; BYTE* cgplane = NULL; BYTE* aplane = NULL; INT16 r_val; INT16 g_val; INT16 b_val; BYTE a_val; UINT32 tempWidth; if (!context || data || (scanline == 0)) return FALSE; tempWidth = ROUND_UP_TO(context->width, 8); rw = (context->ChromaSubsamplingLevel ? tempWidth : context->width); ccl = context->ColorLossLevel; if (context->priv->PlaneBuffersLength < rw * scanline) return FALSE; if (rw < scanline * 2) return FALSE; for (y = 0; y < context->height; y++) { src = data + (context->height - 1 - y) * scanline; yplane = context->priv->PlaneBuffers[0] + y * rw; coplane = context->priv->PlaneBuffers[1] + y * rw; cgplane = context->priv->PlaneBuffers[2] + y * rw; aplane = context->priv->PlaneBuffers[3] + y * context->width; for (x = 0; x < context->width; x++) { switch (context->format) { case PIXEL_FORMAT_BGRX32: b_val = *src++; g_val = *src++; r_val = *src++; src++; a_val = 0xFF; break; case PIXEL_FORMAT_BGRA32: b_val = *src++; g_val = *src++; r_val = *src++; a_val = *src++; break; case PIXEL_FORMAT_RGBX32: r_val = *src++; g_val = *src++; b_val = *src++; src++; a_val = 0xFF; break; case PIXEL_FORMAT_RGBA32: r_val = *src++; g_val = *src++; b_val = *src++; a_val = *src++; break; case PIXEL_FORMAT_BGR24: b_val = *src++; g_val = *src++; r_val = *src++; a_val = 0xFF; break; case PIXEL_FORMAT_RGB24: r_val = *src++; g_val = *src++; b_val = *src++; a_val = 0xFF; break; case PIXEL_FORMAT_BGR16: b_val = (INT16)(((*(src + 1)) & 0xF8) | ((*(src + 1)) >> 5)); g_val = (INT16)((((*(src + 1)) & 0x07) << 5) | (((*src) & 0xE0) >> 3)); r_val = (INT16)((((*src) & 0x1F) << 3) | (((*src) >> 2) & 0x07)); a_val = 0xFF; src += 2; break; case PIXEL_FORMAT_RGB16: r_val = (INT16)(((*(src + 1)) & 0xF8) | ((*(src + 1)) >> 5)); g_val = (INT16)((((*(src + 1)) & 0x07) << 5) | (((*src) & 0xE0) >> 3)); b_val = (INT16)((((*src) & 0x1F) << 3) | (((*src) >> 2) & 0x07)); a_val = 0xFF; src += 2; break; case PIXEL_FORMAT_A4: { int shift; BYTE idx; shift = (7 - (x % 8)); idx = ((*src) >> shift) & 1; idx |= (((*(src + 1)) >> shift) & 1) << 1; idx |= (((*(src + 2)) >> shift) & 1) << 2; idx |= (((*(src + 3)) >> shift) & 1) << 3; idx *= 3; r_val = (INT16) context->palette[idx]; g_val = (INT16) context->palette[idx + 1]; b_val = (INT16) context->palette[idx + 2]; if (shift == 0) src += 4; } a_val = 0xFF; break; case PIXEL_FORMAT_RGB8: { int idx = (*src) * 3; r_val = (INT16) context->palette[idx]; g_val = (INT16) context->palette[idx + 1]; b_val = (INT16) context->palette[idx + 2]; src++; } a_val = 0xFF; break; default: r_val = g_val = b_val = a_val = 0; break; } *yplane++ = (BYTE)((r_val >> 2) + (g_val >> 1) + (b_val >> 2)); /* Perform color loss reduction here */ *coplane++ = (BYTE)((r_val - b_val) >> ccl); *cgplane++ = (BYTE)((-(r_val >> 1) + g_val - (b_val >> 1)) >> ccl); *aplane++ = a_val; } if (context->ChromaSubsamplingLevel && (x % 2) == 1) { *yplane = *(yplane - 1); *coplane = *(coplane - 1); *cgplane = *(cgplane - 1); } } if (context->ChromaSubsamplingLevel && (y % 2) == 1) { yplane = context->priv->PlaneBuffers[0] + y * rw; coplane = context->priv->PlaneBuffers[1] + y * rw; cgplane = context->priv->PlaneBuffers[2] + y * rw; CopyMemory(yplane, yplane - rw, rw); CopyMemory(coplane, coplane - rw, rw); CopyMemory(cgplane, cgplane - rw, rw); } return TRUE; } static BOOL nsc_encode_subsampling(NSC_CONTEXT* context) { UINT16 x; UINT16 y; UINT32 tempWidth; UINT32 tempHeight; if (!context) return FALSE; tempWidth = ROUND_UP_TO(context->width, 8); tempHeight = ROUND_UP_TO(context->height, 2); if (tempHeight == 0) return FALSE; if (tempWidth > context->priv->PlaneBuffersLength / tempHeight) return FALSE; for (y = 0; y < tempHeight >> 1; y++) { BYTE* co_dst = context->priv->PlaneBuffers[1] + y * (tempWidth >> 1); BYTE* cg_dst = context->priv->PlaneBuffers[2] + y * (tempWidth >> 1); const INT8* co_src0 = (INT8*) context->priv->PlaneBuffers[1] + (y << 1) * tempWidth; const INT8* co_src1 = co_src0 + tempWidth; const INT8* cg_src0 = (INT8*) context->priv->PlaneBuffers[2] + (y << 1) * tempWidth; const INT8* cg_src1 = cg_src0 + tempWidth; for (x = 0; x < tempWidth >> 1; x++) { *co_dst++ = (BYTE)(((INT16) * co_src0 + (INT16) * (co_src0 + 1) + (INT16) * co_src1 + (INT16) * (co_src1 + 1)) >> 2); *cg_dst++ = (BYTE)(((INT16) * cg_src0 + (INT16) * (cg_src0 + 1) + (INT16) * cg_src1 + (INT16) * (cg_src1 + 1)) >> 2); co_src0 += 2; co_src1 += 2; cg_src0 += 2; cg_src1 += 2; } } return TRUE; } BOOL nsc_encode(NSC_CONTEXT* context, const BYTE* bmpdata, UINT32 rowstride) { if (!context || !bmpdata || (rowstride == 0)) return FALSE; if (!nsc_encode_argb_to_aycocg(context, bmpdata, rowstride)) return FALSE; if (context->ChromaSubsamplingLevel) { if (!nsc_encode_subsampling(context)) return FALSE; } return TRUE; } static UINT32 nsc_rle_encode(const BYTE* in, BYTE* out, UINT32 originalSize) { UINT32 left; UINT32 runlength = 1; UINT32 planeSize = 0; left = originalSize; /** * We quit the loop if the running compressed size is larger than the original. * In such cases data will be sent uncompressed. */ while (left > 4 && planeSize < originalSize - 4) { if (left > 5 && *in == *(in + 1)) { runlength++; } else if (runlength == 1) { *out++ = *in; planeSize++; } else if (runlength < 256) { *out++ = *in; *out++ = *in; *out++ = runlength - 2; runlength = 1; planeSize += 3; } else { *out++ = *in; *out++ = *in; *out++ = 0xFF; *out++ = (runlength & 0x000000FF); *out++ = (runlength & 0x0000FF00) >> 8; *out++ = (runlength & 0x00FF0000) >> 16; *out++ = (runlength & 0xFF000000) >> 24; runlength = 1; planeSize += 7; } in++; left--; } if (planeSize < originalSize - 4) CopyMemory(out, in, 4); planeSize += 4; return planeSize; } static void nsc_rle_compress_data(NSC_CONTEXT* context) { UINT16 i; UINT32 planeSize; UINT32 originalSize; for (i = 0; i < 4; i++) { originalSize = context->OrgByteCount[i]; if (originalSize == 0) { planeSize = 0; } else { planeSize = nsc_rle_encode(context->priv->PlaneBuffers[i], context->priv->PlaneBuffers[4], originalSize); if (planeSize < originalSize) CopyMemory(context->priv->PlaneBuffers[i], context->priv->PlaneBuffers[4], planeSize); else planeSize = originalSize; } context->PlaneByteCount[i] = planeSize; } } UINT32 nsc_compute_byte_count(NSC_CONTEXT* context, UINT32* ByteCount, UINT32 width, UINT32 height) { UINT32 tempWidth; UINT32 tempHeight; UINT32 maxPlaneSize; tempWidth = ROUND_UP_TO(width, 8); tempHeight = ROUND_UP_TO(height, 2); maxPlaneSize = tempWidth * tempHeight + 16; if (context->ChromaSubsamplingLevel) { ByteCount[0] = tempWidth * height; ByteCount[1] = tempWidth * tempHeight / 4; ByteCount[2] = tempWidth * tempHeight / 4; ByteCount[3] = width * height; } else { ByteCount[0] = width * height; ByteCount[1] = width * height; ByteCount[2] = width * height; ByteCount[3] = width * height; } return maxPlaneSize; } NSC_MESSAGE* nsc_encode_messages(NSC_CONTEXT* context, const BYTE* data, UINT32 x, UINT32 y, UINT32 width, UINT32 height, UINT32 scanline, UINT32* numMessages, UINT32 maxDataSize) { UINT32 i, j, k; UINT32 dataOffset; UINT32 rows, cols; UINT32 BytesPerPixel; UINT32 MaxRegionWidth; UINT32 MaxRegionHeight; UINT32 ByteCount[4]; UINT32 MaxPlaneSize; UINT32 MaxMessageSize; NSC_MESSAGE* messages; UINT32 PaddedMaxPlaneSize; k = 0; MaxRegionWidth = 64 * 4; MaxRegionHeight = 64 * 2; BytesPerPixel = GetBytesPerPixel(context->format); rows = (width + (MaxRegionWidth - (width % MaxRegionWidth))) / MaxRegionWidth; cols = (height + (MaxRegionHeight - (height % MaxRegionHeight))) / MaxRegionHeight; *numMessages = rows * cols; MaxPlaneSize = nsc_compute_byte_count(context, (UINT32*) ByteCount, width, height); MaxMessageSize = ByteCount[0] + ByteCount[1] + ByteCount[2] + ByteCount[3] + 20; maxDataSize -= 1024; /* reserve enough space for headers */ messages = (NSC_MESSAGE*) calloc(*numMessages, sizeof(NSC_MESSAGE)); if (!messages) return NULL; for (i = 0; i < rows; i++) { for (j = 0; j < cols; j++) { messages[k].x = x + (i * MaxRegionWidth); messages[k].y = y + (j * MaxRegionHeight); messages[k].width = (i < (rows - 1)) ? MaxRegionWidth : width - (i * MaxRegionWidth); messages[k].height = (j < (cols - 1)) ? MaxRegionHeight : height - (j * MaxRegionHeight); messages[k].data = data; messages[k].scanline = scanline; messages[k].MaxPlaneSize = nsc_compute_byte_count(context, (UINT32*) messages[k].OrgByteCount, messages[k].width, messages[k].height); k++; } } *numMessages = k; for (i = 0; i < *numMessages; i++) { PaddedMaxPlaneSize = messages[i].MaxPlaneSize + 32; messages[i].PlaneBuffer = (BYTE*) BufferPool_Take(context->priv->PlanePool, PaddedMaxPlaneSize * 5); if (!messages[i].PlaneBuffer) goto fail; messages[i].PlaneBuffers[0] = (BYTE*) & (messages[i].PlaneBuffer[(PaddedMaxPlaneSize * 0) + 16]); messages[i].PlaneBuffers[1] = (BYTE*) & (messages[i].PlaneBuffer[(PaddedMaxPlaneSize * 1) + 16]); messages[i].PlaneBuffers[2] = (BYTE*) & (messages[i].PlaneBuffer[(PaddedMaxPlaneSize * 2) + 16]); messages[i].PlaneBuffers[3] = (BYTE*) & (messages[i].PlaneBuffer[(PaddedMaxPlaneSize * 3) + 16]); messages[i].PlaneBuffers[4] = (BYTE*) & (messages[i].PlaneBuffer[(PaddedMaxPlaneSize * 4) + 16]); } for (i = 0; i < *numMessages; i++) { context->width = messages[i].width; context->height = messages[i].height; context->OrgByteCount[0] = messages[i].OrgByteCount[0]; context->OrgByteCount[1] = messages[i].OrgByteCount[1]; context->OrgByteCount[2] = messages[i].OrgByteCount[2]; context->OrgByteCount[3] = messages[i].OrgByteCount[3]; context->priv->PlaneBuffersLength = messages[i].MaxPlaneSize; context->priv->PlaneBuffers[0] = messages[i].PlaneBuffers[0]; context->priv->PlaneBuffers[1] = messages[i].PlaneBuffers[1]; context->priv->PlaneBuffers[2] = messages[i].PlaneBuffers[2]; context->priv->PlaneBuffers[3] = messages[i].PlaneBuffers[3]; context->priv->PlaneBuffers[4] = messages[i].PlaneBuffers[4]; dataOffset = (messages[i].y * messages[i].scanline) + (messages[i].x * BytesPerPixel); PROFILER_ENTER(context->priv->prof_nsc_encode) context->encode(context, &data[dataOffset], scanline); PROFILER_EXIT(context->priv->prof_nsc_encode) PROFILER_ENTER(context->priv->prof_nsc_rle_compress_data) nsc_rle_compress_data(context); PROFILER_EXIT(context->priv->prof_nsc_rle_compress_data) messages[i].LumaPlaneByteCount = context->PlaneByteCount[0]; messages[i].OrangeChromaPlaneByteCount = context->PlaneByteCount[1]; messages[i].GreenChromaPlaneByteCount = context->PlaneByteCount[2]; messages[i].AlphaPlaneByteCount = context->PlaneByteCount[3]; messages[i].ColorLossLevel = context->ColorLossLevel; messages[i].ChromaSubsamplingLevel = context->ChromaSubsamplingLevel; } context->priv->PlaneBuffers[0] = NULL; context->priv->PlaneBuffers[1] = NULL; context->priv->PlaneBuffers[2] = NULL; context->priv->PlaneBuffers[3] = NULL; context->priv->PlaneBuffers[4] = NULL; return messages; fail: for (i = 0; i < *numMessages; i++) BufferPool_Return(context->priv->PlanePool, messages[i].PlaneBuffer); free(messages); return NULL; } BOOL nsc_write_message(NSC_CONTEXT* context, wStream* s, NSC_MESSAGE* message) { UINT32 totalPlaneByteCount; totalPlaneByteCount = message->LumaPlaneByteCount + message->OrangeChromaPlaneByteCount + message->GreenChromaPlaneByteCount + message->AlphaPlaneByteCount; if (!Stream_EnsureRemainingCapacity(s, 20 + totalPlaneByteCount)) return -1; Stream_Write_UINT32(s, message->LumaPlaneByteCount); /* LumaPlaneByteCount (4 bytes) */ Stream_Write_UINT32(s, message->OrangeChromaPlaneByteCount); /* OrangeChromaPlaneByteCount (4 bytes) */ Stream_Write_UINT32(s, message->GreenChromaPlaneByteCount); /* GreenChromaPlaneByteCount (4 bytes) */ Stream_Write_UINT32(s, message->AlphaPlaneByteCount); /* AlphaPlaneByteCount (4 bytes) */ Stream_Write_UINT8(s, message->ColorLossLevel); /* ColorLossLevel (1 byte) */ Stream_Write_UINT8(s, message->ChromaSubsamplingLevel); /* ChromaSubsamplingLevel (1 byte) */ Stream_Write_UINT16(s, 0); /* Reserved (2 bytes) */ if (message->LumaPlaneByteCount) Stream_Write(s, message->PlaneBuffers[0], message->LumaPlaneByteCount); /* LumaPlane */ if (message->OrangeChromaPlaneByteCount) Stream_Write(s, message->PlaneBuffers[1], message->OrangeChromaPlaneByteCount); /* OrangeChromaPlane */ if (message->GreenChromaPlaneByteCount) Stream_Write(s, message->PlaneBuffers[2], message->GreenChromaPlaneByteCount); /* GreenChromaPlane */ if (message->AlphaPlaneByteCount) Stream_Write(s, message->PlaneBuffers[3], message->AlphaPlaneByteCount); /* AlphaPlane */ return TRUE; } void nsc_message_free(NSC_CONTEXT* context, NSC_MESSAGE* message) { BufferPool_Return(context->priv->PlanePool, message->PlaneBuffer); } BOOL nsc_compose_message(NSC_CONTEXT* context, wStream* s, const BYTE* data, UINT32 width, UINT32 height, UINT32 scanline) { NSC_MESSAGE s_message = { 0 }; NSC_MESSAGE* message = &s_message; context->width = width; context->height = height; if (!nsc_context_initialize_encode(context)) return FALSE; /* ARGB to AYCoCg conversion, chroma subsampling and colorloss reduction */ PROFILER_ENTER(context->priv->prof_nsc_encode) context->encode(context, data, scanline); PROFILER_EXIT(context->priv->prof_nsc_encode) /* RLE encode */ PROFILER_ENTER(context->priv->prof_nsc_rle_compress_data) nsc_rle_compress_data(context); PROFILER_EXIT(context->priv->prof_nsc_rle_compress_data) message->PlaneBuffers[0] = context->priv->PlaneBuffers[0]; message->PlaneBuffers[1] = context->priv->PlaneBuffers[1]; message->PlaneBuffers[2] = context->priv->PlaneBuffers[2]; message->PlaneBuffers[3] = context->priv->PlaneBuffers[3]; message->LumaPlaneByteCount = context->PlaneByteCount[0]; message->OrangeChromaPlaneByteCount = context->PlaneByteCount[1]; message->GreenChromaPlaneByteCount = context->PlaneByteCount[2]; message->AlphaPlaneByteCount = context->PlaneByteCount[3]; message->ColorLossLevel = context->ColorLossLevel; message->ChromaSubsamplingLevel = context->ChromaSubsamplingLevel; return nsc_write_message(context, s, message); }
static void nsc_encode_subsampling(NSC_CONTEXT* context) { UINT16 x; UINT16 y; BYTE* co_dst; BYTE* cg_dst; INT8* co_src0; INT8* co_src1; INT8* cg_src0; INT8* cg_src1; UINT32 tempWidth; UINT32 tempHeight; tempWidth = ROUND_UP_TO(context->width, 8); tempHeight = ROUND_UP_TO(context->height, 2); for (y = 0; y < tempHeight >> 1; y++) { co_dst = context->priv->PlaneBuffers[1] + y * (tempWidth >> 1); cg_dst = context->priv->PlaneBuffers[2] + y * (tempWidth >> 1); co_src0 = (INT8*) context->priv->PlaneBuffers[1] + (y << 1) * tempWidth; co_src1 = co_src0 + tempWidth; cg_src0 = (INT8*) context->priv->PlaneBuffers[2] + (y << 1) * tempWidth; cg_src1 = cg_src0 + tempWidth; for (x = 0; x < tempWidth >> 1; x++) { *co_dst++ = (BYTE)(((INT16) * co_src0 + (INT16) * (co_src0 + 1) + (INT16) * co_src1 + (INT16) * (co_src1 + 1)) >> 2); *cg_dst++ = (BYTE)(((INT16) * cg_src0 + (INT16) * (cg_src0 + 1) + (INT16) * cg_src1 + (INT16) * (cg_src1 + 1)) >> 2); co_src0 += 2; co_src1 += 2; cg_src0 += 2; cg_src1 += 2; } } }
static BOOL nsc_encode_subsampling(NSC_CONTEXT* context) { UINT16 x; UINT16 y; UINT32 tempWidth; UINT32 tempHeight; if (!context) return FALSE; tempWidth = ROUND_UP_TO(context->width, 8); tempHeight = ROUND_UP_TO(context->height, 2); if (tempHeight == 0) return FALSE; if (tempWidth > context->priv->PlaneBuffersLength / tempHeight) return FALSE; for (y = 0; y < tempHeight >> 1; y++) { BYTE* co_dst = context->priv->PlaneBuffers[1] + y * (tempWidth >> 1); BYTE* cg_dst = context->priv->PlaneBuffers[2] + y * (tempWidth >> 1); const INT8* co_src0 = (INT8*) context->priv->PlaneBuffers[1] + (y << 1) * tempWidth; const INT8* co_src1 = co_src0 + tempWidth; const INT8* cg_src0 = (INT8*) context->priv->PlaneBuffers[2] + (y << 1) * tempWidth; const INT8* cg_src1 = cg_src0 + tempWidth; for (x = 0; x < tempWidth >> 1; x++) { *co_dst++ = (BYTE)(((INT16) * co_src0 + (INT16) * (co_src0 + 1) + (INT16) * co_src1 + (INT16) * (co_src1 + 1)) >> 2); *cg_dst++ = (BYTE)(((INT16) * cg_src0 + (INT16) * (cg_src0 + 1) + (INT16) * cg_src1 + (INT16) * (cg_src1 + 1)) >> 2); co_src0 += 2; co_src1 += 2; cg_src0 += 2; cg_src1 += 2; } } return TRUE; }
{'added': [(54, ''), (91, 'static BOOL nsc_encode_argb_to_aycocg(NSC_CONTEXT* context, const BYTE* data,'), (108, ''), (109, '\tif (!context || data || (scanline == 0))'), (110, '\t\treturn FALSE;'), (111, ''), (116, '\tif (context->priv->PlaneBuffersLength < rw * scanline)'), (117, '\t\treturn FALSE;'), (118, ''), (119, '\tif (rw < scanline * 2)'), (120, '\t\treturn FALSE;'), (121, ''), (256, ''), (257, '\treturn TRUE;'), (260, 'static BOOL nsc_encode_subsampling(NSC_CONTEXT* context)'), (266, ''), (267, '\tif (!context)'), (268, '\t\treturn FALSE;'), (269, ''), (273, '\tif (tempHeight == 0)'), (274, '\t\treturn FALSE;'), (275, ''), (276, '\tif (tempWidth > context->priv->PlaneBuffersLength / tempHeight)'), (277, '\t\treturn FALSE;'), (278, ''), (281, '\t\tBYTE* co_dst = context->priv->PlaneBuffers[1] + y * (tempWidth >> 1);'), (282, '\t\tBYTE* cg_dst = context->priv->PlaneBuffers[2] + y * (tempWidth >> 1);'), (283, '\t\tconst INT8* co_src0 = (INT8*) context->priv->PlaneBuffers[1] + (y << 1) * tempWidth;'), (284, '\t\tconst INT8* co_src1 = co_src0 + tempWidth;'), (285, '\t\tconst INT8* cg_src0 = (INT8*) context->priv->PlaneBuffers[2] + (y << 1) * tempWidth;'), (286, '\t\tconst INT8* cg_src1 = cg_src0 + tempWidth;'), (300, ''), (301, '\treturn TRUE;'), (304, 'BOOL nsc_encode(NSC_CONTEXT* context, const BYTE* bmpdata, UINT32 rowstride)'), (306, '\tif (!context || !bmpdata || (rowstride == 0))'), (307, '\t\treturn FALSE;'), (308, ''), (309, '\tif (!nsc_encode_argb_to_aycocg(context, bmpdata, rowstride))'), (310, '\t\treturn FALSE;'), (314, '\t\tif (!nsc_encode_subsampling(context))'), (315, '\t\t\treturn FALSE;'), (317, ''), (318, '\treturn TRUE;'), (321, 'static UINT32 nsc_rle_encode(const BYTE* in, BYTE* out, UINT32 originalSize)')], 'deleted': [(90, 'static void nsc_encode_argb_to_aycocg(NSC_CONTEXT* context, const BYTE* data,'), (247, 'static void nsc_encode_subsampling(NSC_CONTEXT* context)'), (251, '\tBYTE* co_dst;'), (252, '\tBYTE* cg_dst;'), (253, '\tINT8* co_src0;'), (254, '\tINT8* co_src1;'), (255, '\tINT8* cg_src0;'), (256, '\tINT8* cg_src1;'), (264, '\t\tco_dst = context->priv->PlaneBuffers[1] + y * (tempWidth >> 1);'), (265, '\t\tcg_dst = context->priv->PlaneBuffers[2] + y * (tempWidth >> 1);'), (266, '\t\tco_src0 = (INT8*) context->priv->PlaneBuffers[1] + (y << 1) * tempWidth;'), (267, '\t\tco_src1 = co_src0 + tempWidth;'), (268, '\t\tcg_src0 = (INT8*) context->priv->PlaneBuffers[2] + (y << 1) * tempWidth;'), (269, '\t\tcg_src1 = cg_src0 + tempWidth;'), (285, 'void nsc_encode(NSC_CONTEXT* context, const BYTE* bmpdata, UINT32 rowstride)'), (287, '\tnsc_encode_argb_to_aycocg(context, bmpdata, rowstride);'), (291, '\t\tnsc_encode_subsampling(context);'), (295, 'static UINT32 nsc_rle_encode(BYTE* in, BYTE* out, UINT32 originalSize)')]}
44
18
513
3,961
https://github.com/FreeRDP/FreeRDP
CVE-2018-8788
['CWE-787']
nsc_encode.c
nsc_rle_encode
/** * FreeRDP: A Remote Desktop Protocol Implementation * NSCodec Encoder * * Copyright 2012 Vic Lee * Copyright 2016 Armin Novak <armin.novak@thincast.com> * Copyright 2016 Thincast Technologies GmbH * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <stdio.h> #include <stdlib.h> #include <string.h> #include <winpr/crt.h> #include <freerdp/codec/nsc.h> #include <freerdp/codec/color.h> #include "nsc_types.h" #include "nsc_encode.h" static BOOL nsc_context_initialize_encode(NSC_CONTEXT* context) { int i; UINT32 length; UINT32 tempWidth; UINT32 tempHeight; tempWidth = ROUND_UP_TO(context->width, 8); tempHeight = ROUND_UP_TO(context->height, 2); /* The maximum length a decoded plane can reach in all cases */ length = tempWidth * tempHeight + 16; if (length > context->priv->PlaneBuffersLength) { for (i = 0; i < 5; i++) { BYTE* tmp = (BYTE*) realloc(context->priv->PlaneBuffers[i], length); if (!tmp) goto fail; context->priv->PlaneBuffers[i] = tmp; } context->priv->PlaneBuffersLength = length; } if (context->ChromaSubsamplingLevel) { context->OrgByteCount[0] = tempWidth * context->height; context->OrgByteCount[1] = tempWidth * tempHeight / 4; context->OrgByteCount[2] = tempWidth * tempHeight / 4; context->OrgByteCount[3] = context->width * context->height; } else { context->OrgByteCount[0] = context->width * context->height; context->OrgByteCount[1] = context->width * context->height; context->OrgByteCount[2] = context->width * context->height; context->OrgByteCount[3] = context->width * context->height; } return TRUE; fail: if (length > context->priv->PlaneBuffersLength) { for (i = 0; i < 5; i++) free(context->priv->PlaneBuffers[i]); } return FALSE; } static void nsc_encode_argb_to_aycocg(NSC_CONTEXT* context, const BYTE* data, UINT32 scanline) { UINT16 x; UINT16 y; UINT16 rw; BYTE ccl; const BYTE* src; BYTE* yplane = NULL; BYTE* coplane = NULL; BYTE* cgplane = NULL; BYTE* aplane = NULL; INT16 r_val; INT16 g_val; INT16 b_val; BYTE a_val; UINT32 tempWidth; tempWidth = ROUND_UP_TO(context->width, 8); rw = (context->ChromaSubsamplingLevel ? tempWidth : context->width); ccl = context->ColorLossLevel; for (y = 0; y < context->height; y++) { src = data + (context->height - 1 - y) * scanline; yplane = context->priv->PlaneBuffers[0] + y * rw; coplane = context->priv->PlaneBuffers[1] + y * rw; cgplane = context->priv->PlaneBuffers[2] + y * rw; aplane = context->priv->PlaneBuffers[3] + y * context->width; for (x = 0; x < context->width; x++) { switch (context->format) { case PIXEL_FORMAT_BGRX32: b_val = *src++; g_val = *src++; r_val = *src++; src++; a_val = 0xFF; break; case PIXEL_FORMAT_BGRA32: b_val = *src++; g_val = *src++; r_val = *src++; a_val = *src++; break; case PIXEL_FORMAT_RGBX32: r_val = *src++; g_val = *src++; b_val = *src++; src++; a_val = 0xFF; break; case PIXEL_FORMAT_RGBA32: r_val = *src++; g_val = *src++; b_val = *src++; a_val = *src++; break; case PIXEL_FORMAT_BGR24: b_val = *src++; g_val = *src++; r_val = *src++; a_val = 0xFF; break; case PIXEL_FORMAT_RGB24: r_val = *src++; g_val = *src++; b_val = *src++; a_val = 0xFF; break; case PIXEL_FORMAT_BGR16: b_val = (INT16)(((*(src + 1)) & 0xF8) | ((*(src + 1)) >> 5)); g_val = (INT16)((((*(src + 1)) & 0x07) << 5) | (((*src) & 0xE0) >> 3)); r_val = (INT16)((((*src) & 0x1F) << 3) | (((*src) >> 2) & 0x07)); a_val = 0xFF; src += 2; break; case PIXEL_FORMAT_RGB16: r_val = (INT16)(((*(src + 1)) & 0xF8) | ((*(src + 1)) >> 5)); g_val = (INT16)((((*(src + 1)) & 0x07) << 5) | (((*src) & 0xE0) >> 3)); b_val = (INT16)((((*src) & 0x1F) << 3) | (((*src) >> 2) & 0x07)); a_val = 0xFF; src += 2; break; case PIXEL_FORMAT_A4: { int shift; BYTE idx; shift = (7 - (x % 8)); idx = ((*src) >> shift) & 1; idx |= (((*(src + 1)) >> shift) & 1) << 1; idx |= (((*(src + 2)) >> shift) & 1) << 2; idx |= (((*(src + 3)) >> shift) & 1) << 3; idx *= 3; r_val = (INT16) context->palette[idx]; g_val = (INT16) context->palette[idx + 1]; b_val = (INT16) context->palette[idx + 2]; if (shift == 0) src += 4; } a_val = 0xFF; break; case PIXEL_FORMAT_RGB8: { int idx = (*src) * 3; r_val = (INT16) context->palette[idx]; g_val = (INT16) context->palette[idx + 1]; b_val = (INT16) context->palette[idx + 2]; src++; } a_val = 0xFF; break; default: r_val = g_val = b_val = a_val = 0; break; } *yplane++ = (BYTE)((r_val >> 2) + (g_val >> 1) + (b_val >> 2)); /* Perform color loss reduction here */ *coplane++ = (BYTE)((r_val - b_val) >> ccl); *cgplane++ = (BYTE)((-(r_val >> 1) + g_val - (b_val >> 1)) >> ccl); *aplane++ = a_val; } if (context->ChromaSubsamplingLevel && (x % 2) == 1) { *yplane = *(yplane - 1); *coplane = *(coplane - 1); *cgplane = *(cgplane - 1); } } if (context->ChromaSubsamplingLevel && (y % 2) == 1) { yplane = context->priv->PlaneBuffers[0] + y * rw; coplane = context->priv->PlaneBuffers[1] + y * rw; cgplane = context->priv->PlaneBuffers[2] + y * rw; CopyMemory(yplane, yplane - rw, rw); CopyMemory(coplane, coplane - rw, rw); CopyMemory(cgplane, cgplane - rw, rw); } } static void nsc_encode_subsampling(NSC_CONTEXT* context) { UINT16 x; UINT16 y; BYTE* co_dst; BYTE* cg_dst; INT8* co_src0; INT8* co_src1; INT8* cg_src0; INT8* cg_src1; UINT32 tempWidth; UINT32 tempHeight; tempWidth = ROUND_UP_TO(context->width, 8); tempHeight = ROUND_UP_TO(context->height, 2); for (y = 0; y < tempHeight >> 1; y++) { co_dst = context->priv->PlaneBuffers[1] + y * (tempWidth >> 1); cg_dst = context->priv->PlaneBuffers[2] + y * (tempWidth >> 1); co_src0 = (INT8*) context->priv->PlaneBuffers[1] + (y << 1) * tempWidth; co_src1 = co_src0 + tempWidth; cg_src0 = (INT8*) context->priv->PlaneBuffers[2] + (y << 1) * tempWidth; cg_src1 = cg_src0 + tempWidth; for (x = 0; x < tempWidth >> 1; x++) { *co_dst++ = (BYTE)(((INT16) * co_src0 + (INT16) * (co_src0 + 1) + (INT16) * co_src1 + (INT16) * (co_src1 + 1)) >> 2); *cg_dst++ = (BYTE)(((INT16) * cg_src0 + (INT16) * (cg_src0 + 1) + (INT16) * cg_src1 + (INT16) * (cg_src1 + 1)) >> 2); co_src0 += 2; co_src1 += 2; cg_src0 += 2; cg_src1 += 2; } } } void nsc_encode(NSC_CONTEXT* context, const BYTE* bmpdata, UINT32 rowstride) { nsc_encode_argb_to_aycocg(context, bmpdata, rowstride); if (context->ChromaSubsamplingLevel) { nsc_encode_subsampling(context); } } static UINT32 nsc_rle_encode(BYTE* in, BYTE* out, UINT32 originalSize) { UINT32 left; UINT32 runlength = 1; UINT32 planeSize = 0; left = originalSize; /** * We quit the loop if the running compressed size is larger than the original. * In such cases data will be sent uncompressed. */ while (left > 4 && planeSize < originalSize - 4) { if (left > 5 && *in == *(in + 1)) { runlength++; } else if (runlength == 1) { *out++ = *in; planeSize++; } else if (runlength < 256) { *out++ = *in; *out++ = *in; *out++ = runlength - 2; runlength = 1; planeSize += 3; } else { *out++ = *in; *out++ = *in; *out++ = 0xFF; *out++ = (runlength & 0x000000FF); *out++ = (runlength & 0x0000FF00) >> 8; *out++ = (runlength & 0x00FF0000) >> 16; *out++ = (runlength & 0xFF000000) >> 24; runlength = 1; planeSize += 7; } in++; left--; } if (planeSize < originalSize - 4) CopyMemory(out, in, 4); planeSize += 4; return planeSize; } static void nsc_rle_compress_data(NSC_CONTEXT* context) { UINT16 i; UINT32 planeSize; UINT32 originalSize; for (i = 0; i < 4; i++) { originalSize = context->OrgByteCount[i]; if (originalSize == 0) { planeSize = 0; } else { planeSize = nsc_rle_encode(context->priv->PlaneBuffers[i], context->priv->PlaneBuffers[4], originalSize); if (planeSize < originalSize) CopyMemory(context->priv->PlaneBuffers[i], context->priv->PlaneBuffers[4], planeSize); else planeSize = originalSize; } context->PlaneByteCount[i] = planeSize; } } UINT32 nsc_compute_byte_count(NSC_CONTEXT* context, UINT32* ByteCount, UINT32 width, UINT32 height) { UINT32 tempWidth; UINT32 tempHeight; UINT32 maxPlaneSize; tempWidth = ROUND_UP_TO(width, 8); tempHeight = ROUND_UP_TO(height, 2); maxPlaneSize = tempWidth * tempHeight + 16; if (context->ChromaSubsamplingLevel) { ByteCount[0] = tempWidth * height; ByteCount[1] = tempWidth * tempHeight / 4; ByteCount[2] = tempWidth * tempHeight / 4; ByteCount[3] = width * height; } else { ByteCount[0] = width * height; ByteCount[1] = width * height; ByteCount[2] = width * height; ByteCount[3] = width * height; } return maxPlaneSize; } NSC_MESSAGE* nsc_encode_messages(NSC_CONTEXT* context, const BYTE* data, UINT32 x, UINT32 y, UINT32 width, UINT32 height, UINT32 scanline, UINT32* numMessages, UINT32 maxDataSize) { UINT32 i, j, k; UINT32 dataOffset; UINT32 rows, cols; UINT32 BytesPerPixel; UINT32 MaxRegionWidth; UINT32 MaxRegionHeight; UINT32 ByteCount[4]; UINT32 MaxPlaneSize; UINT32 MaxMessageSize; NSC_MESSAGE* messages; UINT32 PaddedMaxPlaneSize; k = 0; MaxRegionWidth = 64 * 4; MaxRegionHeight = 64 * 2; BytesPerPixel = GetBytesPerPixel(context->format); rows = (width + (MaxRegionWidth - (width % MaxRegionWidth))) / MaxRegionWidth; cols = (height + (MaxRegionHeight - (height % MaxRegionHeight))) / MaxRegionHeight; *numMessages = rows * cols; MaxPlaneSize = nsc_compute_byte_count(context, (UINT32*) ByteCount, width, height); MaxMessageSize = ByteCount[0] + ByteCount[1] + ByteCount[2] + ByteCount[3] + 20; maxDataSize -= 1024; /* reserve enough space for headers */ messages = (NSC_MESSAGE*) calloc(*numMessages, sizeof(NSC_MESSAGE)); if (!messages) return NULL; for (i = 0; i < rows; i++) { for (j = 0; j < cols; j++) { messages[k].x = x + (i * MaxRegionWidth); messages[k].y = y + (j * MaxRegionHeight); messages[k].width = (i < (rows - 1)) ? MaxRegionWidth : width - (i * MaxRegionWidth); messages[k].height = (j < (cols - 1)) ? MaxRegionHeight : height - (j * MaxRegionHeight); messages[k].data = data; messages[k].scanline = scanline; messages[k].MaxPlaneSize = nsc_compute_byte_count(context, (UINT32*) messages[k].OrgByteCount, messages[k].width, messages[k].height); k++; } } *numMessages = k; for (i = 0; i < *numMessages; i++) { PaddedMaxPlaneSize = messages[i].MaxPlaneSize + 32; messages[i].PlaneBuffer = (BYTE*) BufferPool_Take(context->priv->PlanePool, PaddedMaxPlaneSize * 5); if (!messages[i].PlaneBuffer) goto fail; messages[i].PlaneBuffers[0] = (BYTE*) & (messages[i].PlaneBuffer[(PaddedMaxPlaneSize * 0) + 16]); messages[i].PlaneBuffers[1] = (BYTE*) & (messages[i].PlaneBuffer[(PaddedMaxPlaneSize * 1) + 16]); messages[i].PlaneBuffers[2] = (BYTE*) & (messages[i].PlaneBuffer[(PaddedMaxPlaneSize * 2) + 16]); messages[i].PlaneBuffers[3] = (BYTE*) & (messages[i].PlaneBuffer[(PaddedMaxPlaneSize * 3) + 16]); messages[i].PlaneBuffers[4] = (BYTE*) & (messages[i].PlaneBuffer[(PaddedMaxPlaneSize * 4) + 16]); } for (i = 0; i < *numMessages; i++) { context->width = messages[i].width; context->height = messages[i].height; context->OrgByteCount[0] = messages[i].OrgByteCount[0]; context->OrgByteCount[1] = messages[i].OrgByteCount[1]; context->OrgByteCount[2] = messages[i].OrgByteCount[2]; context->OrgByteCount[3] = messages[i].OrgByteCount[3]; context->priv->PlaneBuffersLength = messages[i].MaxPlaneSize; context->priv->PlaneBuffers[0] = messages[i].PlaneBuffers[0]; context->priv->PlaneBuffers[1] = messages[i].PlaneBuffers[1]; context->priv->PlaneBuffers[2] = messages[i].PlaneBuffers[2]; context->priv->PlaneBuffers[3] = messages[i].PlaneBuffers[3]; context->priv->PlaneBuffers[4] = messages[i].PlaneBuffers[4]; dataOffset = (messages[i].y * messages[i].scanline) + (messages[i].x * BytesPerPixel); PROFILER_ENTER(context->priv->prof_nsc_encode) context->encode(context, &data[dataOffset], scanline); PROFILER_EXIT(context->priv->prof_nsc_encode) PROFILER_ENTER(context->priv->prof_nsc_rle_compress_data) nsc_rle_compress_data(context); PROFILER_EXIT(context->priv->prof_nsc_rle_compress_data) messages[i].LumaPlaneByteCount = context->PlaneByteCount[0]; messages[i].OrangeChromaPlaneByteCount = context->PlaneByteCount[1]; messages[i].GreenChromaPlaneByteCount = context->PlaneByteCount[2]; messages[i].AlphaPlaneByteCount = context->PlaneByteCount[3]; messages[i].ColorLossLevel = context->ColorLossLevel; messages[i].ChromaSubsamplingLevel = context->ChromaSubsamplingLevel; } context->priv->PlaneBuffers[0] = NULL; context->priv->PlaneBuffers[1] = NULL; context->priv->PlaneBuffers[2] = NULL; context->priv->PlaneBuffers[3] = NULL; context->priv->PlaneBuffers[4] = NULL; return messages; fail: for (i = 0; i < *numMessages; i++) BufferPool_Return(context->priv->PlanePool, messages[i].PlaneBuffer); free(messages); return NULL; } BOOL nsc_write_message(NSC_CONTEXT* context, wStream* s, NSC_MESSAGE* message) { UINT32 totalPlaneByteCount; totalPlaneByteCount = message->LumaPlaneByteCount + message->OrangeChromaPlaneByteCount + message->GreenChromaPlaneByteCount + message->AlphaPlaneByteCount; if (!Stream_EnsureRemainingCapacity(s, 20 + totalPlaneByteCount)) return -1; Stream_Write_UINT32(s, message->LumaPlaneByteCount); /* LumaPlaneByteCount (4 bytes) */ Stream_Write_UINT32(s, message->OrangeChromaPlaneByteCount); /* OrangeChromaPlaneByteCount (4 bytes) */ Stream_Write_UINT32(s, message->GreenChromaPlaneByteCount); /* GreenChromaPlaneByteCount (4 bytes) */ Stream_Write_UINT32(s, message->AlphaPlaneByteCount); /* AlphaPlaneByteCount (4 bytes) */ Stream_Write_UINT8(s, message->ColorLossLevel); /* ColorLossLevel (1 byte) */ Stream_Write_UINT8(s, message->ChromaSubsamplingLevel); /* ChromaSubsamplingLevel (1 byte) */ Stream_Write_UINT16(s, 0); /* Reserved (2 bytes) */ if (message->LumaPlaneByteCount) Stream_Write(s, message->PlaneBuffers[0], message->LumaPlaneByteCount); /* LumaPlane */ if (message->OrangeChromaPlaneByteCount) Stream_Write(s, message->PlaneBuffers[1], message->OrangeChromaPlaneByteCount); /* OrangeChromaPlane */ if (message->GreenChromaPlaneByteCount) Stream_Write(s, message->PlaneBuffers[2], message->GreenChromaPlaneByteCount); /* GreenChromaPlane */ if (message->AlphaPlaneByteCount) Stream_Write(s, message->PlaneBuffers[3], message->AlphaPlaneByteCount); /* AlphaPlane */ return TRUE; } void nsc_message_free(NSC_CONTEXT* context, NSC_MESSAGE* message) { BufferPool_Return(context->priv->PlanePool, message->PlaneBuffer); } BOOL nsc_compose_message(NSC_CONTEXT* context, wStream* s, const BYTE* data, UINT32 width, UINT32 height, UINT32 scanline) { NSC_MESSAGE s_message = { 0 }; NSC_MESSAGE* message = &s_message; context->width = width; context->height = height; if (!nsc_context_initialize_encode(context)) return FALSE; /* ARGB to AYCoCg conversion, chroma subsampling and colorloss reduction */ PROFILER_ENTER(context->priv->prof_nsc_encode) context->encode(context, data, scanline); PROFILER_EXIT(context->priv->prof_nsc_encode) /* RLE encode */ PROFILER_ENTER(context->priv->prof_nsc_rle_compress_data) nsc_rle_compress_data(context); PROFILER_EXIT(context->priv->prof_nsc_rle_compress_data) message->PlaneBuffers[0] = context->priv->PlaneBuffers[0]; message->PlaneBuffers[1] = context->priv->PlaneBuffers[1]; message->PlaneBuffers[2] = context->priv->PlaneBuffers[2]; message->PlaneBuffers[3] = context->priv->PlaneBuffers[3]; message->LumaPlaneByteCount = context->PlaneByteCount[0]; message->OrangeChromaPlaneByteCount = context->PlaneByteCount[1]; message->GreenChromaPlaneByteCount = context->PlaneByteCount[2]; message->AlphaPlaneByteCount = context->PlaneByteCount[3]; message->ColorLossLevel = context->ColorLossLevel; message->ChromaSubsamplingLevel = context->ChromaSubsamplingLevel; return nsc_write_message(context, s, message); }
/** * FreeRDP: A Remote Desktop Protocol Implementation * NSCodec Encoder * * Copyright 2012 Vic Lee * Copyright 2016 Armin Novak <armin.novak@thincast.com> * Copyright 2016 Thincast Technologies GmbH * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <stdio.h> #include <stdlib.h> #include <string.h> #include <winpr/crt.h> #include <freerdp/codec/nsc.h> #include <freerdp/codec/color.h> #include "nsc_types.h" #include "nsc_encode.h" static BOOL nsc_context_initialize_encode(NSC_CONTEXT* context) { int i; UINT32 length; UINT32 tempWidth; UINT32 tempHeight; tempWidth = ROUND_UP_TO(context->width, 8); tempHeight = ROUND_UP_TO(context->height, 2); /* The maximum length a decoded plane can reach in all cases */ length = tempWidth * tempHeight + 16; if (length > context->priv->PlaneBuffersLength) { for (i = 0; i < 5; i++) { BYTE* tmp = (BYTE*) realloc(context->priv->PlaneBuffers[i], length); if (!tmp) goto fail; context->priv->PlaneBuffers[i] = tmp; } context->priv->PlaneBuffersLength = length; } if (context->ChromaSubsamplingLevel) { context->OrgByteCount[0] = tempWidth * context->height; context->OrgByteCount[1] = tempWidth * tempHeight / 4; context->OrgByteCount[2] = tempWidth * tempHeight / 4; context->OrgByteCount[3] = context->width * context->height; } else { context->OrgByteCount[0] = context->width * context->height; context->OrgByteCount[1] = context->width * context->height; context->OrgByteCount[2] = context->width * context->height; context->OrgByteCount[3] = context->width * context->height; } return TRUE; fail: if (length > context->priv->PlaneBuffersLength) { for (i = 0; i < 5; i++) free(context->priv->PlaneBuffers[i]); } return FALSE; } static BOOL nsc_encode_argb_to_aycocg(NSC_CONTEXT* context, const BYTE* data, UINT32 scanline) { UINT16 x; UINT16 y; UINT16 rw; BYTE ccl; const BYTE* src; BYTE* yplane = NULL; BYTE* coplane = NULL; BYTE* cgplane = NULL; BYTE* aplane = NULL; INT16 r_val; INT16 g_val; INT16 b_val; BYTE a_val; UINT32 tempWidth; if (!context || data || (scanline == 0)) return FALSE; tempWidth = ROUND_UP_TO(context->width, 8); rw = (context->ChromaSubsamplingLevel ? tempWidth : context->width); ccl = context->ColorLossLevel; if (context->priv->PlaneBuffersLength < rw * scanline) return FALSE; if (rw < scanline * 2) return FALSE; for (y = 0; y < context->height; y++) { src = data + (context->height - 1 - y) * scanline; yplane = context->priv->PlaneBuffers[0] + y * rw; coplane = context->priv->PlaneBuffers[1] + y * rw; cgplane = context->priv->PlaneBuffers[2] + y * rw; aplane = context->priv->PlaneBuffers[3] + y * context->width; for (x = 0; x < context->width; x++) { switch (context->format) { case PIXEL_FORMAT_BGRX32: b_val = *src++; g_val = *src++; r_val = *src++; src++; a_val = 0xFF; break; case PIXEL_FORMAT_BGRA32: b_val = *src++; g_val = *src++; r_val = *src++; a_val = *src++; break; case PIXEL_FORMAT_RGBX32: r_val = *src++; g_val = *src++; b_val = *src++; src++; a_val = 0xFF; break; case PIXEL_FORMAT_RGBA32: r_val = *src++; g_val = *src++; b_val = *src++; a_val = *src++; break; case PIXEL_FORMAT_BGR24: b_val = *src++; g_val = *src++; r_val = *src++; a_val = 0xFF; break; case PIXEL_FORMAT_RGB24: r_val = *src++; g_val = *src++; b_val = *src++; a_val = 0xFF; break; case PIXEL_FORMAT_BGR16: b_val = (INT16)(((*(src + 1)) & 0xF8) | ((*(src + 1)) >> 5)); g_val = (INT16)((((*(src + 1)) & 0x07) << 5) | (((*src) & 0xE0) >> 3)); r_val = (INT16)((((*src) & 0x1F) << 3) | (((*src) >> 2) & 0x07)); a_val = 0xFF; src += 2; break; case PIXEL_FORMAT_RGB16: r_val = (INT16)(((*(src + 1)) & 0xF8) | ((*(src + 1)) >> 5)); g_val = (INT16)((((*(src + 1)) & 0x07) << 5) | (((*src) & 0xE0) >> 3)); b_val = (INT16)((((*src) & 0x1F) << 3) | (((*src) >> 2) & 0x07)); a_val = 0xFF; src += 2; break; case PIXEL_FORMAT_A4: { int shift; BYTE idx; shift = (7 - (x % 8)); idx = ((*src) >> shift) & 1; idx |= (((*(src + 1)) >> shift) & 1) << 1; idx |= (((*(src + 2)) >> shift) & 1) << 2; idx |= (((*(src + 3)) >> shift) & 1) << 3; idx *= 3; r_val = (INT16) context->palette[idx]; g_val = (INT16) context->palette[idx + 1]; b_val = (INT16) context->palette[idx + 2]; if (shift == 0) src += 4; } a_val = 0xFF; break; case PIXEL_FORMAT_RGB8: { int idx = (*src) * 3; r_val = (INT16) context->palette[idx]; g_val = (INT16) context->palette[idx + 1]; b_val = (INT16) context->palette[idx + 2]; src++; } a_val = 0xFF; break; default: r_val = g_val = b_val = a_val = 0; break; } *yplane++ = (BYTE)((r_val >> 2) + (g_val >> 1) + (b_val >> 2)); /* Perform color loss reduction here */ *coplane++ = (BYTE)((r_val - b_val) >> ccl); *cgplane++ = (BYTE)((-(r_val >> 1) + g_val - (b_val >> 1)) >> ccl); *aplane++ = a_val; } if (context->ChromaSubsamplingLevel && (x % 2) == 1) { *yplane = *(yplane - 1); *coplane = *(coplane - 1); *cgplane = *(cgplane - 1); } } if (context->ChromaSubsamplingLevel && (y % 2) == 1) { yplane = context->priv->PlaneBuffers[0] + y * rw; coplane = context->priv->PlaneBuffers[1] + y * rw; cgplane = context->priv->PlaneBuffers[2] + y * rw; CopyMemory(yplane, yplane - rw, rw); CopyMemory(coplane, coplane - rw, rw); CopyMemory(cgplane, cgplane - rw, rw); } return TRUE; } static BOOL nsc_encode_subsampling(NSC_CONTEXT* context) { UINT16 x; UINT16 y; UINT32 tempWidth; UINT32 tempHeight; if (!context) return FALSE; tempWidth = ROUND_UP_TO(context->width, 8); tempHeight = ROUND_UP_TO(context->height, 2); if (tempHeight == 0) return FALSE; if (tempWidth > context->priv->PlaneBuffersLength / tempHeight) return FALSE; for (y = 0; y < tempHeight >> 1; y++) { BYTE* co_dst = context->priv->PlaneBuffers[1] + y * (tempWidth >> 1); BYTE* cg_dst = context->priv->PlaneBuffers[2] + y * (tempWidth >> 1); const INT8* co_src0 = (INT8*) context->priv->PlaneBuffers[1] + (y << 1) * tempWidth; const INT8* co_src1 = co_src0 + tempWidth; const INT8* cg_src0 = (INT8*) context->priv->PlaneBuffers[2] + (y << 1) * tempWidth; const INT8* cg_src1 = cg_src0 + tempWidth; for (x = 0; x < tempWidth >> 1; x++) { *co_dst++ = (BYTE)(((INT16) * co_src0 + (INT16) * (co_src0 + 1) + (INT16) * co_src1 + (INT16) * (co_src1 + 1)) >> 2); *cg_dst++ = (BYTE)(((INT16) * cg_src0 + (INT16) * (cg_src0 + 1) + (INT16) * cg_src1 + (INT16) * (cg_src1 + 1)) >> 2); co_src0 += 2; co_src1 += 2; cg_src0 += 2; cg_src1 += 2; } } return TRUE; } BOOL nsc_encode(NSC_CONTEXT* context, const BYTE* bmpdata, UINT32 rowstride) { if (!context || !bmpdata || (rowstride == 0)) return FALSE; if (!nsc_encode_argb_to_aycocg(context, bmpdata, rowstride)) return FALSE; if (context->ChromaSubsamplingLevel) { if (!nsc_encode_subsampling(context)) return FALSE; } return TRUE; } static UINT32 nsc_rle_encode(const BYTE* in, BYTE* out, UINT32 originalSize) { UINT32 left; UINT32 runlength = 1; UINT32 planeSize = 0; left = originalSize; /** * We quit the loop if the running compressed size is larger than the original. * In such cases data will be sent uncompressed. */ while (left > 4 && planeSize < originalSize - 4) { if (left > 5 && *in == *(in + 1)) { runlength++; } else if (runlength == 1) { *out++ = *in; planeSize++; } else if (runlength < 256) { *out++ = *in; *out++ = *in; *out++ = runlength - 2; runlength = 1; planeSize += 3; } else { *out++ = *in; *out++ = *in; *out++ = 0xFF; *out++ = (runlength & 0x000000FF); *out++ = (runlength & 0x0000FF00) >> 8; *out++ = (runlength & 0x00FF0000) >> 16; *out++ = (runlength & 0xFF000000) >> 24; runlength = 1; planeSize += 7; } in++; left--; } if (planeSize < originalSize - 4) CopyMemory(out, in, 4); planeSize += 4; return planeSize; } static void nsc_rle_compress_data(NSC_CONTEXT* context) { UINT16 i; UINT32 planeSize; UINT32 originalSize; for (i = 0; i < 4; i++) { originalSize = context->OrgByteCount[i]; if (originalSize == 0) { planeSize = 0; } else { planeSize = nsc_rle_encode(context->priv->PlaneBuffers[i], context->priv->PlaneBuffers[4], originalSize); if (planeSize < originalSize) CopyMemory(context->priv->PlaneBuffers[i], context->priv->PlaneBuffers[4], planeSize); else planeSize = originalSize; } context->PlaneByteCount[i] = planeSize; } } UINT32 nsc_compute_byte_count(NSC_CONTEXT* context, UINT32* ByteCount, UINT32 width, UINT32 height) { UINT32 tempWidth; UINT32 tempHeight; UINT32 maxPlaneSize; tempWidth = ROUND_UP_TO(width, 8); tempHeight = ROUND_UP_TO(height, 2); maxPlaneSize = tempWidth * tempHeight + 16; if (context->ChromaSubsamplingLevel) { ByteCount[0] = tempWidth * height; ByteCount[1] = tempWidth * tempHeight / 4; ByteCount[2] = tempWidth * tempHeight / 4; ByteCount[3] = width * height; } else { ByteCount[0] = width * height; ByteCount[1] = width * height; ByteCount[2] = width * height; ByteCount[3] = width * height; } return maxPlaneSize; } NSC_MESSAGE* nsc_encode_messages(NSC_CONTEXT* context, const BYTE* data, UINT32 x, UINT32 y, UINT32 width, UINT32 height, UINT32 scanline, UINT32* numMessages, UINT32 maxDataSize) { UINT32 i, j, k; UINT32 dataOffset; UINT32 rows, cols; UINT32 BytesPerPixel; UINT32 MaxRegionWidth; UINT32 MaxRegionHeight; UINT32 ByteCount[4]; UINT32 MaxPlaneSize; UINT32 MaxMessageSize; NSC_MESSAGE* messages; UINT32 PaddedMaxPlaneSize; k = 0; MaxRegionWidth = 64 * 4; MaxRegionHeight = 64 * 2; BytesPerPixel = GetBytesPerPixel(context->format); rows = (width + (MaxRegionWidth - (width % MaxRegionWidth))) / MaxRegionWidth; cols = (height + (MaxRegionHeight - (height % MaxRegionHeight))) / MaxRegionHeight; *numMessages = rows * cols; MaxPlaneSize = nsc_compute_byte_count(context, (UINT32*) ByteCount, width, height); MaxMessageSize = ByteCount[0] + ByteCount[1] + ByteCount[2] + ByteCount[3] + 20; maxDataSize -= 1024; /* reserve enough space for headers */ messages = (NSC_MESSAGE*) calloc(*numMessages, sizeof(NSC_MESSAGE)); if (!messages) return NULL; for (i = 0; i < rows; i++) { for (j = 0; j < cols; j++) { messages[k].x = x + (i * MaxRegionWidth); messages[k].y = y + (j * MaxRegionHeight); messages[k].width = (i < (rows - 1)) ? MaxRegionWidth : width - (i * MaxRegionWidth); messages[k].height = (j < (cols - 1)) ? MaxRegionHeight : height - (j * MaxRegionHeight); messages[k].data = data; messages[k].scanline = scanline; messages[k].MaxPlaneSize = nsc_compute_byte_count(context, (UINT32*) messages[k].OrgByteCount, messages[k].width, messages[k].height); k++; } } *numMessages = k; for (i = 0; i < *numMessages; i++) { PaddedMaxPlaneSize = messages[i].MaxPlaneSize + 32; messages[i].PlaneBuffer = (BYTE*) BufferPool_Take(context->priv->PlanePool, PaddedMaxPlaneSize * 5); if (!messages[i].PlaneBuffer) goto fail; messages[i].PlaneBuffers[0] = (BYTE*) & (messages[i].PlaneBuffer[(PaddedMaxPlaneSize * 0) + 16]); messages[i].PlaneBuffers[1] = (BYTE*) & (messages[i].PlaneBuffer[(PaddedMaxPlaneSize * 1) + 16]); messages[i].PlaneBuffers[2] = (BYTE*) & (messages[i].PlaneBuffer[(PaddedMaxPlaneSize * 2) + 16]); messages[i].PlaneBuffers[3] = (BYTE*) & (messages[i].PlaneBuffer[(PaddedMaxPlaneSize * 3) + 16]); messages[i].PlaneBuffers[4] = (BYTE*) & (messages[i].PlaneBuffer[(PaddedMaxPlaneSize * 4) + 16]); } for (i = 0; i < *numMessages; i++) { context->width = messages[i].width; context->height = messages[i].height; context->OrgByteCount[0] = messages[i].OrgByteCount[0]; context->OrgByteCount[1] = messages[i].OrgByteCount[1]; context->OrgByteCount[2] = messages[i].OrgByteCount[2]; context->OrgByteCount[3] = messages[i].OrgByteCount[3]; context->priv->PlaneBuffersLength = messages[i].MaxPlaneSize; context->priv->PlaneBuffers[0] = messages[i].PlaneBuffers[0]; context->priv->PlaneBuffers[1] = messages[i].PlaneBuffers[1]; context->priv->PlaneBuffers[2] = messages[i].PlaneBuffers[2]; context->priv->PlaneBuffers[3] = messages[i].PlaneBuffers[3]; context->priv->PlaneBuffers[4] = messages[i].PlaneBuffers[4]; dataOffset = (messages[i].y * messages[i].scanline) + (messages[i].x * BytesPerPixel); PROFILER_ENTER(context->priv->prof_nsc_encode) context->encode(context, &data[dataOffset], scanline); PROFILER_EXIT(context->priv->prof_nsc_encode) PROFILER_ENTER(context->priv->prof_nsc_rle_compress_data) nsc_rle_compress_data(context); PROFILER_EXIT(context->priv->prof_nsc_rle_compress_data) messages[i].LumaPlaneByteCount = context->PlaneByteCount[0]; messages[i].OrangeChromaPlaneByteCount = context->PlaneByteCount[1]; messages[i].GreenChromaPlaneByteCount = context->PlaneByteCount[2]; messages[i].AlphaPlaneByteCount = context->PlaneByteCount[3]; messages[i].ColorLossLevel = context->ColorLossLevel; messages[i].ChromaSubsamplingLevel = context->ChromaSubsamplingLevel; } context->priv->PlaneBuffers[0] = NULL; context->priv->PlaneBuffers[1] = NULL; context->priv->PlaneBuffers[2] = NULL; context->priv->PlaneBuffers[3] = NULL; context->priv->PlaneBuffers[4] = NULL; return messages; fail: for (i = 0; i < *numMessages; i++) BufferPool_Return(context->priv->PlanePool, messages[i].PlaneBuffer); free(messages); return NULL; } BOOL nsc_write_message(NSC_CONTEXT* context, wStream* s, NSC_MESSAGE* message) { UINT32 totalPlaneByteCount; totalPlaneByteCount = message->LumaPlaneByteCount + message->OrangeChromaPlaneByteCount + message->GreenChromaPlaneByteCount + message->AlphaPlaneByteCount; if (!Stream_EnsureRemainingCapacity(s, 20 + totalPlaneByteCount)) return -1; Stream_Write_UINT32(s, message->LumaPlaneByteCount); /* LumaPlaneByteCount (4 bytes) */ Stream_Write_UINT32(s, message->OrangeChromaPlaneByteCount); /* OrangeChromaPlaneByteCount (4 bytes) */ Stream_Write_UINT32(s, message->GreenChromaPlaneByteCount); /* GreenChromaPlaneByteCount (4 bytes) */ Stream_Write_UINT32(s, message->AlphaPlaneByteCount); /* AlphaPlaneByteCount (4 bytes) */ Stream_Write_UINT8(s, message->ColorLossLevel); /* ColorLossLevel (1 byte) */ Stream_Write_UINT8(s, message->ChromaSubsamplingLevel); /* ChromaSubsamplingLevel (1 byte) */ Stream_Write_UINT16(s, 0); /* Reserved (2 bytes) */ if (message->LumaPlaneByteCount) Stream_Write(s, message->PlaneBuffers[0], message->LumaPlaneByteCount); /* LumaPlane */ if (message->OrangeChromaPlaneByteCount) Stream_Write(s, message->PlaneBuffers[1], message->OrangeChromaPlaneByteCount); /* OrangeChromaPlane */ if (message->GreenChromaPlaneByteCount) Stream_Write(s, message->PlaneBuffers[2], message->GreenChromaPlaneByteCount); /* GreenChromaPlane */ if (message->AlphaPlaneByteCount) Stream_Write(s, message->PlaneBuffers[3], message->AlphaPlaneByteCount); /* AlphaPlane */ return TRUE; } void nsc_message_free(NSC_CONTEXT* context, NSC_MESSAGE* message) { BufferPool_Return(context->priv->PlanePool, message->PlaneBuffer); } BOOL nsc_compose_message(NSC_CONTEXT* context, wStream* s, const BYTE* data, UINT32 width, UINT32 height, UINT32 scanline) { NSC_MESSAGE s_message = { 0 }; NSC_MESSAGE* message = &s_message; context->width = width; context->height = height; if (!nsc_context_initialize_encode(context)) return FALSE; /* ARGB to AYCoCg conversion, chroma subsampling and colorloss reduction */ PROFILER_ENTER(context->priv->prof_nsc_encode) context->encode(context, data, scanline); PROFILER_EXIT(context->priv->prof_nsc_encode) /* RLE encode */ PROFILER_ENTER(context->priv->prof_nsc_rle_compress_data) nsc_rle_compress_data(context); PROFILER_EXIT(context->priv->prof_nsc_rle_compress_data) message->PlaneBuffers[0] = context->priv->PlaneBuffers[0]; message->PlaneBuffers[1] = context->priv->PlaneBuffers[1]; message->PlaneBuffers[2] = context->priv->PlaneBuffers[2]; message->PlaneBuffers[3] = context->priv->PlaneBuffers[3]; message->LumaPlaneByteCount = context->PlaneByteCount[0]; message->OrangeChromaPlaneByteCount = context->PlaneByteCount[1]; message->GreenChromaPlaneByteCount = context->PlaneByteCount[2]; message->AlphaPlaneByteCount = context->PlaneByteCount[3]; message->ColorLossLevel = context->ColorLossLevel; message->ChromaSubsamplingLevel = context->ChromaSubsamplingLevel; return nsc_write_message(context, s, message); }
static UINT32 nsc_rle_encode(BYTE* in, BYTE* out, UINT32 originalSize) { UINT32 left; UINT32 runlength = 1; UINT32 planeSize = 0; left = originalSize; /** * We quit the loop if the running compressed size is larger than the original. * In such cases data will be sent uncompressed. */ while (left > 4 && planeSize < originalSize - 4) { if (left > 5 && *in == *(in + 1)) { runlength++; } else if (runlength == 1) { *out++ = *in; planeSize++; } else if (runlength < 256) { *out++ = *in; *out++ = *in; *out++ = runlength - 2; runlength = 1; planeSize += 3; } else { *out++ = *in; *out++ = *in; *out++ = 0xFF; *out++ = (runlength & 0x000000FF); *out++ = (runlength & 0x0000FF00) >> 8; *out++ = (runlength & 0x00FF0000) >> 16; *out++ = (runlength & 0xFF000000) >> 24; runlength = 1; planeSize += 7; } in++; left--; } if (planeSize < originalSize - 4) CopyMemory(out, in, 4); planeSize += 4; return planeSize; }
static UINT32 nsc_rle_encode(const BYTE* in, BYTE* out, UINT32 originalSize) { UINT32 left; UINT32 runlength = 1; UINT32 planeSize = 0; left = originalSize; /** * We quit the loop if the running compressed size is larger than the original. * In such cases data will be sent uncompressed. */ while (left > 4 && planeSize < originalSize - 4) { if (left > 5 && *in == *(in + 1)) { runlength++; } else if (runlength == 1) { *out++ = *in; planeSize++; } else if (runlength < 256) { *out++ = *in; *out++ = *in; *out++ = runlength - 2; runlength = 1; planeSize += 3; } else { *out++ = *in; *out++ = *in; *out++ = 0xFF; *out++ = (runlength & 0x000000FF); *out++ = (runlength & 0x0000FF00) >> 8; *out++ = (runlength & 0x00FF0000) >> 16; *out++ = (runlength & 0xFF000000) >> 24; runlength = 1; planeSize += 7; } in++; left--; } if (planeSize < originalSize - 4) CopyMemory(out, in, 4); planeSize += 4; return planeSize; }
{'added': [(54, ''), (91, 'static BOOL nsc_encode_argb_to_aycocg(NSC_CONTEXT* context, const BYTE* data,'), (108, ''), (109, '\tif (!context || data || (scanline == 0))'), (110, '\t\treturn FALSE;'), (111, ''), (116, '\tif (context->priv->PlaneBuffersLength < rw * scanline)'), (117, '\t\treturn FALSE;'), (118, ''), (119, '\tif (rw < scanline * 2)'), (120, '\t\treturn FALSE;'), (121, ''), (256, ''), (257, '\treturn TRUE;'), (260, 'static BOOL nsc_encode_subsampling(NSC_CONTEXT* context)'), (266, ''), (267, '\tif (!context)'), (268, '\t\treturn FALSE;'), (269, ''), (273, '\tif (tempHeight == 0)'), (274, '\t\treturn FALSE;'), (275, ''), (276, '\tif (tempWidth > context->priv->PlaneBuffersLength / tempHeight)'), (277, '\t\treturn FALSE;'), (278, ''), (281, '\t\tBYTE* co_dst = context->priv->PlaneBuffers[1] + y * (tempWidth >> 1);'), (282, '\t\tBYTE* cg_dst = context->priv->PlaneBuffers[2] + y * (tempWidth >> 1);'), (283, '\t\tconst INT8* co_src0 = (INT8*) context->priv->PlaneBuffers[1] + (y << 1) * tempWidth;'), (284, '\t\tconst INT8* co_src1 = co_src0 + tempWidth;'), (285, '\t\tconst INT8* cg_src0 = (INT8*) context->priv->PlaneBuffers[2] + (y << 1) * tempWidth;'), (286, '\t\tconst INT8* cg_src1 = cg_src0 + tempWidth;'), (300, ''), (301, '\treturn TRUE;'), (304, 'BOOL nsc_encode(NSC_CONTEXT* context, const BYTE* bmpdata, UINT32 rowstride)'), (306, '\tif (!context || !bmpdata || (rowstride == 0))'), (307, '\t\treturn FALSE;'), (308, ''), (309, '\tif (!nsc_encode_argb_to_aycocg(context, bmpdata, rowstride))'), (310, '\t\treturn FALSE;'), (314, '\t\tif (!nsc_encode_subsampling(context))'), (315, '\t\t\treturn FALSE;'), (317, ''), (318, '\treturn TRUE;'), (321, 'static UINT32 nsc_rle_encode(const BYTE* in, BYTE* out, UINT32 originalSize)')], 'deleted': [(90, 'static void nsc_encode_argb_to_aycocg(NSC_CONTEXT* context, const BYTE* data,'), (247, 'static void nsc_encode_subsampling(NSC_CONTEXT* context)'), (251, '\tBYTE* co_dst;'), (252, '\tBYTE* cg_dst;'), (253, '\tINT8* co_src0;'), (254, '\tINT8* co_src1;'), (255, '\tINT8* cg_src0;'), (256, '\tINT8* cg_src1;'), (264, '\t\tco_dst = context->priv->PlaneBuffers[1] + y * (tempWidth >> 1);'), (265, '\t\tcg_dst = context->priv->PlaneBuffers[2] + y * (tempWidth >> 1);'), (266, '\t\tco_src0 = (INT8*) context->priv->PlaneBuffers[1] + (y << 1) * tempWidth;'), (267, '\t\tco_src1 = co_src0 + tempWidth;'), (268, '\t\tcg_src0 = (INT8*) context->priv->PlaneBuffers[2] + (y << 1) * tempWidth;'), (269, '\t\tcg_src1 = cg_src0 + tempWidth;'), (285, 'void nsc_encode(NSC_CONTEXT* context, const BYTE* bmpdata, UINT32 rowstride)'), (287, '\tnsc_encode_argb_to_aycocg(context, bmpdata, rowstride);'), (291, '\t\tnsc_encode_subsampling(context);'), (295, 'static UINT32 nsc_rle_encode(BYTE* in, BYTE* out, UINT32 originalSize)')]}
44
18
513
3,961
https://github.com/FreeRDP/FreeRDP
CVE-2018-8788
['CWE-787']
quantized_mul_op.cc
tensorflow::QuantizedMulOp::Compute
/* Copyright 2015 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // Implements a quantized eight-bit version of the matmul operation. #define EIGEN_USE_THREADS #if defined(__ARM_NEON__) || defined(__ARM_NEON) #define USE_NEON #include <arm_neon.h> #endif #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/kernels/meta_support.h" #include "tensorflow/core/kernels/quantization_utils.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/util/bcast.h" namespace tensorflow { namespace { template <class T, class Toutput> void ScalarMultiply(OpKernelContext* context, const T* full_input, int32 full_input_offset, int64 num_elements, T scalar_input, int32 scalar_input_offset, Toutput* output) { const int32 scalar_minus_offset = static_cast<int32>(scalar_input) - scalar_input_offset; for (int i = 0; i < num_elements; ++i) { output[i] = (static_cast<int32>(full_input[i]) - full_input_offset) * scalar_minus_offset; } } #ifdef USE_NEON template <> void ScalarMultiply<quint8, qint32>(OpKernelContext* context, const quint8* full_input, int32 full_input_offset, int64 num_elements, quint8 scalar_input, int32 scalar_input_offset, qint32* output) { const int16 scalar_minus_offset = static_cast<int16>(scalar_input) - scalar_input_offset; const int16x4_t scalar_minus_offset_16x4 = vmov_n_s16(scalar_minus_offset); const uint8x8_t full_input_offset_8x8 = vmov_n_u8(full_input_offset); // Go through the results in 16-element chunks for NEON acceleration. int i; for (i = 0; i < (num_elements - 15); i += 16) { // Load the tensor inputs. const uint8* full_input_ptr = &(full_input->value) + i; const uint8x16_t full_input_8x16 = vld1q_u8(full_input_ptr); // Break into two sets of vectors so we can do further calculations // easily. const uint8x8_t full_input_high_8x8 = vget_high_u8(full_input_8x16); const uint8x8_t full_input_low_8x8 = vget_low_u8(full_input_8x16); // Subtract off the offset value to get 16-bit results. const int16x8_t full_input_minus_offset_high_16x8 = vreinterpretq_s16_u16( vsubl_u8(full_input_high_8x8, full_input_offset_8x8)); const int16x8_t full_input_minus_offset_low_16x8 = vreinterpretq_s16_u16( vsubl_u8(full_input_low_8x8, full_input_offset_8x8)); // We have to work with 4-wide vectors, so extract them. const int16x4_t x_high_high_16x4 = vget_high_s16(full_input_minus_offset_high_16x8); const int16x4_t x_high_low_16x4 = vget_low_s16(full_input_minus_offset_high_16x8); const int16x4_t x_low_high_16x4 = vget_high_s16(full_input_minus_offset_low_16x8); const int16x4_t x_low_low_16x4 = vget_low_s16(full_input_minus_offset_low_16x8); // Perform the multiplication. const int32x4_t z_high_high_32x4 = vmull_s16(x_high_high_16x4, scalar_minus_offset_16x4); const int32x4_t z_high_low_32x4 = vmull_s16(x_high_low_16x4, scalar_minus_offset_16x4); const int32x4_t z_low_high_32x4 = vmull_s16(x_low_high_16x4, scalar_minus_offset_16x4); const int32x4_t z_low_low_32x4 = vmull_s16(x_low_low_16x4, scalar_minus_offset_16x4); // Write out the results. int32* output_ptr = &(output->value) + i; vst1q_s32(output_ptr + 0, z_low_low_32x4); vst1q_s32(output_ptr + 4, z_low_high_32x4); vst1q_s32(output_ptr + 8, z_high_low_32x4); vst1q_s32(output_ptr + 12, z_high_high_32x4); } // Finish up any remaining elements that weren't a multiple of 16. for (; i < num_elements; ++i) { output[i] = (static_cast<int32>(full_input[i]) - full_input_offset) * scalar_minus_offset; } } #endif // USE_NEON template <class T, class Toutput> void VectorMultiply(OpKernelContext* context, const T* x_data, int32 offset_x, const T* y_data, int32 offset_y, int64 num_elements, Toutput* output) { for (int i = 0; i < num_elements; ++i) { output[i] = (static_cast<int32>(x_data[i]) - offset_x) * (static_cast<int32>(y_data[i]) - offset_y); } } #ifdef USE_NEON template <> void VectorMultiply<quint8, qint32>(OpKernelContext* context, const quint8* x_data, int32 offset_x, const quint8* y_data, int32 offset_y, int64 num_elements, qint32* output) { const uint8x8_t offset_x_8x8 = vmov_n_u8(offset_x); const uint8x8_t offset_y_8x8 = vmov_n_u8(offset_y); int i; // Go through the results in 16-element chunks for NEON acceleration. for (i = 0; i < (num_elements - 15); i += 16) { // Load the vector inputs. const uint8* x_data_ptr = &(x_data->value) + i; const uint8x16_t x_8x16 = vld1q_u8(x_data_ptr); const uint8* y_data_ptr = &(y_data->value) + i; const uint8x16_t y_8x16 = vld1q_u8(y_data_ptr); // Break into two sets of vectors so we can do further calculations easily. const uint8x8_t x_high_8x8 = vget_high_u8(x_8x16); const uint8x8_t x_low_8x8 = vget_low_u8(x_8x16); const uint8x8_t y_high_8x8 = vget_high_u8(y_8x16); const uint8x8_t y_low_8x8 = vget_low_u8(y_8x16); // Subtract off the offset values to get 16-bit results. const int16x8_t x_minus_offset_high_16x8 = vreinterpretq_s16_u16(vsubl_u8(x_high_8x8, offset_x_8x8)); const int16x8_t x_minus_offset_low_16x8 = vreinterpretq_s16_u16(vsubl_u8(x_low_8x8, offset_x_8x8)); const int16x8_t y_minus_offset_high_16x8 = vreinterpretq_s16_u16(vsubl_u8(y_high_8x8, offset_y_8x8)); const int16x8_t y_minus_offset_low_16x8 = vreinterpretq_s16_u16(vsubl_u8(y_low_8x8, offset_y_8x8)); // We have to work with 4-wide vectors, so extract them. const int16x4_t x_high_high_16x4 = vget_high_s16(x_minus_offset_high_16x8); const int16x4_t x_high_low_16x4 = vget_low_s16(x_minus_offset_high_16x8); const int16x4_t x_low_high_16x4 = vget_high_s16(x_minus_offset_low_16x8); const int16x4_t x_low_low_16x4 = vget_low_s16(x_minus_offset_low_16x8); const int16x4_t y_high_high_16x4 = vget_high_s16(y_minus_offset_high_16x8); const int16x4_t y_high_low_16x4 = vget_low_s16(y_minus_offset_high_16x8); const int16x4_t y_low_high_16x4 = vget_high_s16(y_minus_offset_low_16x8); const int16x4_t y_low_low_16x4 = vget_low_s16(y_minus_offset_low_16x8); // Perform the multiplication. const int32x4_t z_high_high_32x4 = vmull_s16(x_high_high_16x4, y_high_high_16x4); const int32x4_t z_high_low_32x4 = vmull_s16(x_high_low_16x4, y_high_low_16x4); const int32x4_t z_low_high_32x4 = vmull_s16(x_low_high_16x4, y_low_high_16x4); const int32x4_t z_low_low_32x4 = vmull_s16(x_low_low_16x4, y_low_low_16x4); // Write out the results. int32* output_ptr = &(output->value) + i; vst1q_s32(output_ptr + 0, z_low_low_32x4); vst1q_s32(output_ptr + 4, z_low_high_32x4); vst1q_s32(output_ptr + 8, z_high_low_32x4); vst1q_s32(output_ptr + 12, z_high_high_32x4); } for (; i < num_elements; ++i) { output[i] = (static_cast<int32>(x_data[i]) - offset_x) * (static_cast<int32>(y_data[i]) - offset_y); } } #endif // USE_NEON template <class T, class Toutput> void VectorTensorMultiply(const T* vector_data, int32 vector_offset, int64 vector_num_elements, const T* tensor_data, int32 tensor_offset, int64 tensor_num_elements, Toutput* output) { for (int i = 0; i < tensor_num_elements; ++i) { const int64 vector_i = i % vector_num_elements; output[i] = (static_cast<int32>(vector_data[vector_i]) - vector_offset) * (static_cast<int32>(tensor_data[i]) - tensor_offset); } } #ifdef USE_NEON template <> void VectorTensorMultiply<quint8, qint32>( const quint8* vector_data, int32 vector_offset, int64 vector_num_elements, const quint8* tensor_data, int32 tensor_offset, int64 tensor_num_elements, qint32* output) { const uint8x8_t offset_x_8x8 = vmov_n_u8(vector_offset); const uint8x8_t offset_y_8x8 = vmov_n_u8(tensor_offset); CHECK_EQ(0, tensor_num_elements % vector_num_elements); for (int base_i = 0; base_i < tensor_num_elements; base_i += vector_num_elements) { int i = base_i; const int end_i = base_i + vector_num_elements; // Go through the results in 16-element chunks for NEON acceleration. int vector_i; for (vector_i = 0; vector_i < (vector_num_elements - 15); vector_i += 16, i += 16) { // Load the vector inputs. const uint8* x_data_ptr = &(vector_data->value) + vector_i; const uint8x16_t x_8x16 = vld1q_u8(x_data_ptr); const uint8* y_data_ptr = &(tensor_data->value) + i; const uint8x16_t y_8x16 = vld1q_u8(y_data_ptr); // Break into two sets of vectors so we can do further calculations // easily. const uint8x8_t x_high_8x8 = vget_high_u8(x_8x16); const uint8x8_t x_low_8x8 = vget_low_u8(x_8x16); const uint8x8_t y_high_8x8 = vget_high_u8(y_8x16); const uint8x8_t y_low_8x8 = vget_low_u8(y_8x16); // Subtract off the offset values to get 16-bit results. const int16x8_t x_minus_offset_high_16x8 = vreinterpretq_s16_u16(vsubl_u8(x_high_8x8, offset_x_8x8)); const int16x8_t x_minus_offset_low_16x8 = vreinterpretq_s16_u16(vsubl_u8(x_low_8x8, offset_x_8x8)); const int16x8_t y_minus_offset_high_16x8 = vreinterpretq_s16_u16(vsubl_u8(y_high_8x8, offset_y_8x8)); const int16x8_t y_minus_offset_low_16x8 = vreinterpretq_s16_u16(vsubl_u8(y_low_8x8, offset_y_8x8)); // We have to work with 4-wide vectors, so extract them. const int16x4_t x_high_high_16x4 = vget_high_s16(x_minus_offset_high_16x8); const int16x4_t x_high_low_16x4 = vget_low_s16(x_minus_offset_high_16x8); const int16x4_t x_low_high_16x4 = vget_high_s16(x_minus_offset_low_16x8); const int16x4_t x_low_low_16x4 = vget_low_s16(x_minus_offset_low_16x8); const int16x4_t y_high_high_16x4 = vget_high_s16(y_minus_offset_high_16x8); const int16x4_t y_high_low_16x4 = vget_low_s16(y_minus_offset_high_16x8); const int16x4_t y_low_high_16x4 = vget_high_s16(y_minus_offset_low_16x8); const int16x4_t y_low_low_16x4 = vget_low_s16(y_minus_offset_low_16x8); // Perform the multiplication. const int32x4_t z_high_high_32x4 = vmull_s16(x_high_high_16x4, y_high_high_16x4); const int32x4_t z_high_low_32x4 = vmull_s16(x_high_low_16x4, y_high_low_16x4); const int32x4_t z_low_high_32x4 = vmull_s16(x_low_high_16x4, y_low_high_16x4); const int32x4_t z_low_low_32x4 = vmull_s16(x_low_low_16x4, y_low_low_16x4); // Write out the results. int32* output_ptr = &(output->value) + i; vst1q_s32(output_ptr + 0, z_low_low_32x4); vst1q_s32(output_ptr + 4, z_low_high_32x4); vst1q_s32(output_ptr + 8, z_high_low_32x4); vst1q_s32(output_ptr + 12, z_high_high_32x4); } for (; i < end_i; ++i, ++vector_i) { output[i] = (static_cast<int32>(vector_data[vector_i]) - vector_offset) * (static_cast<int32>(tensor_data[i]) - tensor_offset); } } } #endif // USE_NEON } // namespace template <class T, class Toutput> class QuantizedMulOp : public OpKernel { public: explicit QuantizedMulOp(OpKernelConstruction* context) : OpKernel(context) {} void Compute(OpKernelContext* context) override { const Tensor& x = context->input(0); const Tensor& y = context->input(1); const float min_x = context->input(2).flat<float>()(0); const float max_x = context->input(3).flat<float>()(0); const float min_y = context->input(4).flat<float>()(0); const float max_y = context->input(5).flat<float>()(0); BCast bcast(BCast::FromShape(x.shape()), BCast::FromShape(y.shape())); if (!bcast.IsValid()) { context->SetStatus(errors::InvalidArgument( "Incompatible shapes: ", x.shape().DebugString(), " vs. ", y.shape().DebugString())); return; } Tensor* z; OP_REQUIRES_OK(context, context->allocate_output( 0, BCast::ToShape(bcast.output_shape()), &z)); // Make sure that we have valid quantization ranges for the input buffers. // If the difference between the min and max is negative or zero, it makes // it hard to do meaningful intermediate operations on the values. OP_REQUIRES(context, (max_x > min_x), errors::InvalidArgument("max_x must be larger than min_a.")); OP_REQUIRES(context, (max_y > min_y), errors::InvalidArgument("max_x must be larger than min_b.")); const int32 offset_x = FloatToQuantizedUnclamped<T>(0.0f, min_x, max_x); const int32 offset_y = FloatToQuantizedUnclamped<T>(0.0f, min_y, max_y); const T* x_data = x.flat<T>().data(); const T* y_data = y.flat<T>().data(); Toutput* z_data = z->flat<Toutput>().data(); const int ndims = bcast.x_reshape().size(); if (ndims <= 1) { if (x.NumElements() == 1) { ScalarMultiply<T, Toutput>(context, y_data, offset_y, y.NumElements(), x_data[0], offset_x, z_data); } else if (y.NumElements() == 1) { ScalarMultiply<T, Toutput>(context, x_data, offset_x, x.NumElements(), y_data[0], offset_y, z_data); } else { VectorMultiply<T, Toutput>(context, x_data, offset_x, y_data, offset_y, x.NumElements(), z_data); } } else if (ndims == 2) { const T* vector_data; int64 vector_num_elements; int32 vector_offset; const T* tensor_data; int64 tensor_num_elements; int32 tensor_offset; if (x.NumElements() < y.NumElements()) { vector_data = x_data; vector_num_elements = x.NumElements(); vector_offset = offset_x; tensor_data = y_data; tensor_num_elements = y.NumElements(); tensor_offset = offset_y; } else { vector_data = y_data; vector_num_elements = y.NumElements(); vector_offset = offset_y; tensor_data = x_data; tensor_num_elements = x.NumElements(); tensor_offset = offset_x; } if (vector_num_elements == 0) { context->SetStatus( errors::InvalidArgument("vector must have at least 1 element")); return; } VectorTensorMultiply<T, Toutput>( vector_data, vector_offset, vector_num_elements, tensor_data, tensor_offset, tensor_num_elements, z_data); } else { LOG(INFO) << "ndims=" << ndims; LOG(INFO) << "bcast.x_reshape()=" << TensorShape(bcast.x_reshape()).DebugString(); LOG(INFO) << "bcast.y_reshape()=" << TensorShape(bcast.y_reshape()).DebugString(); LOG(INFO) << "bcast.x_bcast()=" << TensorShape(bcast.x_bcast()).DebugString(); LOG(INFO) << "bcast.y_bcast()=" << TensorShape(bcast.y_bcast()).DebugString(); context->SetStatus(errors::Unimplemented( "Broadcast between ", context->input(0).shape().DebugString(), " and ", context->input(1).shape().DebugString(), " is not supported yet.")); return; } float min_z_value; float max_z_value; QuantizationRangeForMultiplication<T, T, Toutput>( min_x, max_x, min_y, max_y, &min_z_value, &max_z_value); Tensor* z_min = nullptr; OP_REQUIRES_OK(context, context->allocate_output(1, {}, &z_min)); z_min->flat<float>()(0) = min_z_value; Tensor* z_max = nullptr; OP_REQUIRES_OK(context, context->allocate_output(2, {}, &z_max)); z_max->flat<float>()(0) = max_z_value; } }; REGISTER_KERNEL_BUILDER(Name("QuantizedMul") .Device(DEVICE_CPU) .TypeConstraint<quint8>("T1") .TypeConstraint<quint8>("T2") .TypeConstraint<qint32>("Toutput"), QuantizedMulOp<quint8, qint32>); } // namespace tensorflow
/* Copyright 2015 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // Implements a quantized eight-bit version of the matmul operation. #define EIGEN_USE_THREADS #if defined(__ARM_NEON__) || defined(__ARM_NEON) #define USE_NEON #include <arm_neon.h> #endif #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/kernels/meta_support.h" #include "tensorflow/core/kernels/quantization_utils.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/util/bcast.h" namespace tensorflow { namespace { template <class T, class Toutput> void ScalarMultiply(OpKernelContext* context, const T* full_input, int32 full_input_offset, int64 num_elements, T scalar_input, int32 scalar_input_offset, Toutput* output) { const int32 scalar_minus_offset = static_cast<int32>(scalar_input) - scalar_input_offset; for (int i = 0; i < num_elements; ++i) { output[i] = (static_cast<int32>(full_input[i]) - full_input_offset) * scalar_minus_offset; } } #ifdef USE_NEON template <> void ScalarMultiply<quint8, qint32>(OpKernelContext* context, const quint8* full_input, int32 full_input_offset, int64 num_elements, quint8 scalar_input, int32 scalar_input_offset, qint32* output) { const int16 scalar_minus_offset = static_cast<int16>(scalar_input) - scalar_input_offset; const int16x4_t scalar_minus_offset_16x4 = vmov_n_s16(scalar_minus_offset); const uint8x8_t full_input_offset_8x8 = vmov_n_u8(full_input_offset); // Go through the results in 16-element chunks for NEON acceleration. int i; for (i = 0; i < (num_elements - 15); i += 16) { // Load the tensor inputs. const uint8* full_input_ptr = &(full_input->value) + i; const uint8x16_t full_input_8x16 = vld1q_u8(full_input_ptr); // Break into two sets of vectors so we can do further calculations // easily. const uint8x8_t full_input_high_8x8 = vget_high_u8(full_input_8x16); const uint8x8_t full_input_low_8x8 = vget_low_u8(full_input_8x16); // Subtract off the offset value to get 16-bit results. const int16x8_t full_input_minus_offset_high_16x8 = vreinterpretq_s16_u16( vsubl_u8(full_input_high_8x8, full_input_offset_8x8)); const int16x8_t full_input_minus_offset_low_16x8 = vreinterpretq_s16_u16( vsubl_u8(full_input_low_8x8, full_input_offset_8x8)); // We have to work with 4-wide vectors, so extract them. const int16x4_t x_high_high_16x4 = vget_high_s16(full_input_minus_offset_high_16x8); const int16x4_t x_high_low_16x4 = vget_low_s16(full_input_minus_offset_high_16x8); const int16x4_t x_low_high_16x4 = vget_high_s16(full_input_minus_offset_low_16x8); const int16x4_t x_low_low_16x4 = vget_low_s16(full_input_minus_offset_low_16x8); // Perform the multiplication. const int32x4_t z_high_high_32x4 = vmull_s16(x_high_high_16x4, scalar_minus_offset_16x4); const int32x4_t z_high_low_32x4 = vmull_s16(x_high_low_16x4, scalar_minus_offset_16x4); const int32x4_t z_low_high_32x4 = vmull_s16(x_low_high_16x4, scalar_minus_offset_16x4); const int32x4_t z_low_low_32x4 = vmull_s16(x_low_low_16x4, scalar_minus_offset_16x4); // Write out the results. int32* output_ptr = &(output->value) + i; vst1q_s32(output_ptr + 0, z_low_low_32x4); vst1q_s32(output_ptr + 4, z_low_high_32x4); vst1q_s32(output_ptr + 8, z_high_low_32x4); vst1q_s32(output_ptr + 12, z_high_high_32x4); } // Finish up any remaining elements that weren't a multiple of 16. for (; i < num_elements; ++i) { output[i] = (static_cast<int32>(full_input[i]) - full_input_offset) * scalar_minus_offset; } } #endif // USE_NEON template <class T, class Toutput> void VectorMultiply(OpKernelContext* context, const T* x_data, int32 offset_x, const T* y_data, int32 offset_y, int64 num_elements, Toutput* output) { for (int i = 0; i < num_elements; ++i) { output[i] = (static_cast<int32>(x_data[i]) - offset_x) * (static_cast<int32>(y_data[i]) - offset_y); } } #ifdef USE_NEON template <> void VectorMultiply<quint8, qint32>(OpKernelContext* context, const quint8* x_data, int32 offset_x, const quint8* y_data, int32 offset_y, int64 num_elements, qint32* output) { const uint8x8_t offset_x_8x8 = vmov_n_u8(offset_x); const uint8x8_t offset_y_8x8 = vmov_n_u8(offset_y); int i; // Go through the results in 16-element chunks for NEON acceleration. for (i = 0; i < (num_elements - 15); i += 16) { // Load the vector inputs. const uint8* x_data_ptr = &(x_data->value) + i; const uint8x16_t x_8x16 = vld1q_u8(x_data_ptr); const uint8* y_data_ptr = &(y_data->value) + i; const uint8x16_t y_8x16 = vld1q_u8(y_data_ptr); // Break into two sets of vectors so we can do further calculations easily. const uint8x8_t x_high_8x8 = vget_high_u8(x_8x16); const uint8x8_t x_low_8x8 = vget_low_u8(x_8x16); const uint8x8_t y_high_8x8 = vget_high_u8(y_8x16); const uint8x8_t y_low_8x8 = vget_low_u8(y_8x16); // Subtract off the offset values to get 16-bit results. const int16x8_t x_minus_offset_high_16x8 = vreinterpretq_s16_u16(vsubl_u8(x_high_8x8, offset_x_8x8)); const int16x8_t x_minus_offset_low_16x8 = vreinterpretq_s16_u16(vsubl_u8(x_low_8x8, offset_x_8x8)); const int16x8_t y_minus_offset_high_16x8 = vreinterpretq_s16_u16(vsubl_u8(y_high_8x8, offset_y_8x8)); const int16x8_t y_minus_offset_low_16x8 = vreinterpretq_s16_u16(vsubl_u8(y_low_8x8, offset_y_8x8)); // We have to work with 4-wide vectors, so extract them. const int16x4_t x_high_high_16x4 = vget_high_s16(x_minus_offset_high_16x8); const int16x4_t x_high_low_16x4 = vget_low_s16(x_minus_offset_high_16x8); const int16x4_t x_low_high_16x4 = vget_high_s16(x_minus_offset_low_16x8); const int16x4_t x_low_low_16x4 = vget_low_s16(x_minus_offset_low_16x8); const int16x4_t y_high_high_16x4 = vget_high_s16(y_minus_offset_high_16x8); const int16x4_t y_high_low_16x4 = vget_low_s16(y_minus_offset_high_16x8); const int16x4_t y_low_high_16x4 = vget_high_s16(y_minus_offset_low_16x8); const int16x4_t y_low_low_16x4 = vget_low_s16(y_minus_offset_low_16x8); // Perform the multiplication. const int32x4_t z_high_high_32x4 = vmull_s16(x_high_high_16x4, y_high_high_16x4); const int32x4_t z_high_low_32x4 = vmull_s16(x_high_low_16x4, y_high_low_16x4); const int32x4_t z_low_high_32x4 = vmull_s16(x_low_high_16x4, y_low_high_16x4); const int32x4_t z_low_low_32x4 = vmull_s16(x_low_low_16x4, y_low_low_16x4); // Write out the results. int32* output_ptr = &(output->value) + i; vst1q_s32(output_ptr + 0, z_low_low_32x4); vst1q_s32(output_ptr + 4, z_low_high_32x4); vst1q_s32(output_ptr + 8, z_high_low_32x4); vst1q_s32(output_ptr + 12, z_high_high_32x4); } for (; i < num_elements; ++i) { output[i] = (static_cast<int32>(x_data[i]) - offset_x) * (static_cast<int32>(y_data[i]) - offset_y); } } #endif // USE_NEON template <class T, class Toutput> void VectorTensorMultiply(const T* vector_data, int32 vector_offset, int64 vector_num_elements, const T* tensor_data, int32 tensor_offset, int64 tensor_num_elements, Toutput* output) { for (int i = 0; i < tensor_num_elements; ++i) { const int64 vector_i = i % vector_num_elements; output[i] = (static_cast<int32>(vector_data[vector_i]) - vector_offset) * (static_cast<int32>(tensor_data[i]) - tensor_offset); } } #ifdef USE_NEON template <> void VectorTensorMultiply<quint8, qint32>( const quint8* vector_data, int32 vector_offset, int64 vector_num_elements, const quint8* tensor_data, int32 tensor_offset, int64 tensor_num_elements, qint32* output) { const uint8x8_t offset_x_8x8 = vmov_n_u8(vector_offset); const uint8x8_t offset_y_8x8 = vmov_n_u8(tensor_offset); CHECK_EQ(0, tensor_num_elements % vector_num_elements); for (int base_i = 0; base_i < tensor_num_elements; base_i += vector_num_elements) { int i = base_i; const int end_i = base_i + vector_num_elements; // Go through the results in 16-element chunks for NEON acceleration. int vector_i; for (vector_i = 0; vector_i < (vector_num_elements - 15); vector_i += 16, i += 16) { // Load the vector inputs. const uint8* x_data_ptr = &(vector_data->value) + vector_i; const uint8x16_t x_8x16 = vld1q_u8(x_data_ptr); const uint8* y_data_ptr = &(tensor_data->value) + i; const uint8x16_t y_8x16 = vld1q_u8(y_data_ptr); // Break into two sets of vectors so we can do further calculations // easily. const uint8x8_t x_high_8x8 = vget_high_u8(x_8x16); const uint8x8_t x_low_8x8 = vget_low_u8(x_8x16); const uint8x8_t y_high_8x8 = vget_high_u8(y_8x16); const uint8x8_t y_low_8x8 = vget_low_u8(y_8x16); // Subtract off the offset values to get 16-bit results. const int16x8_t x_minus_offset_high_16x8 = vreinterpretq_s16_u16(vsubl_u8(x_high_8x8, offset_x_8x8)); const int16x8_t x_minus_offset_low_16x8 = vreinterpretq_s16_u16(vsubl_u8(x_low_8x8, offset_x_8x8)); const int16x8_t y_minus_offset_high_16x8 = vreinterpretq_s16_u16(vsubl_u8(y_high_8x8, offset_y_8x8)); const int16x8_t y_minus_offset_low_16x8 = vreinterpretq_s16_u16(vsubl_u8(y_low_8x8, offset_y_8x8)); // We have to work with 4-wide vectors, so extract them. const int16x4_t x_high_high_16x4 = vget_high_s16(x_minus_offset_high_16x8); const int16x4_t x_high_low_16x4 = vget_low_s16(x_minus_offset_high_16x8); const int16x4_t x_low_high_16x4 = vget_high_s16(x_minus_offset_low_16x8); const int16x4_t x_low_low_16x4 = vget_low_s16(x_minus_offset_low_16x8); const int16x4_t y_high_high_16x4 = vget_high_s16(y_minus_offset_high_16x8); const int16x4_t y_high_low_16x4 = vget_low_s16(y_minus_offset_high_16x8); const int16x4_t y_low_high_16x4 = vget_high_s16(y_minus_offset_low_16x8); const int16x4_t y_low_low_16x4 = vget_low_s16(y_minus_offset_low_16x8); // Perform the multiplication. const int32x4_t z_high_high_32x4 = vmull_s16(x_high_high_16x4, y_high_high_16x4); const int32x4_t z_high_low_32x4 = vmull_s16(x_high_low_16x4, y_high_low_16x4); const int32x4_t z_low_high_32x4 = vmull_s16(x_low_high_16x4, y_low_high_16x4); const int32x4_t z_low_low_32x4 = vmull_s16(x_low_low_16x4, y_low_low_16x4); // Write out the results. int32* output_ptr = &(output->value) + i; vst1q_s32(output_ptr + 0, z_low_low_32x4); vst1q_s32(output_ptr + 4, z_low_high_32x4); vst1q_s32(output_ptr + 8, z_high_low_32x4); vst1q_s32(output_ptr + 12, z_high_high_32x4); } for (; i < end_i; ++i, ++vector_i) { output[i] = (static_cast<int32>(vector_data[vector_i]) - vector_offset) * (static_cast<int32>(tensor_data[i]) - tensor_offset); } } } #endif // USE_NEON } // namespace template <class T, class Toutput> class QuantizedMulOp : public OpKernel { public: explicit QuantizedMulOp(OpKernelConstruction* context) : OpKernel(context) {} void Compute(OpKernelContext* context) override { const Tensor& x = context->input(0); const Tensor& y = context->input(1); auto& min_x_tensor = context->input(2); OP_REQUIRES(context, TensorShapeUtils::IsScalar(min_x_tensor.shape()), errors::InvalidArgument("min_x must be a scalar")); const float min_x = min_x_tensor.flat<float>()(0); auto& max_x_tensor = context->input(3); OP_REQUIRES(context, TensorShapeUtils::IsScalar(max_x_tensor.shape()), errors::InvalidArgument("max_x must be a scalar")); const float max_x = max_x_tensor.flat<float>()(0); auto& min_y_tensor = context->input(4); OP_REQUIRES(context, TensorShapeUtils::IsScalar(min_y_tensor.shape()), errors::InvalidArgument("min_y must be a scalar")); const float min_y = min_y_tensor.flat<float>()(0); auto& max_y_tensor = context->input(5); OP_REQUIRES(context, TensorShapeUtils::IsScalar(max_y_tensor.shape()), errors::InvalidArgument("max_y must be a scalar")); const float max_y = max_y_tensor.flat<float>()(0); BCast bcast(BCast::FromShape(x.shape()), BCast::FromShape(y.shape())); if (!bcast.IsValid()) { context->SetStatus(errors::InvalidArgument( "Incompatible shapes: ", x.shape().DebugString(), " vs. ", y.shape().DebugString())); return; } Tensor* z; OP_REQUIRES_OK(context, context->allocate_output( 0, BCast::ToShape(bcast.output_shape()), &z)); // Make sure that we have valid quantization ranges for the input buffers. // If the difference between the min and max is negative or zero, it makes // it hard to do meaningful intermediate operations on the values. OP_REQUIRES(context, (max_x > min_x), errors::InvalidArgument("max_x must be larger than min_a.")); OP_REQUIRES(context, (max_y > min_y), errors::InvalidArgument("max_x must be larger than min_b.")); const int32 offset_x = FloatToQuantizedUnclamped<T>(0.0f, min_x, max_x); const int32 offset_y = FloatToQuantizedUnclamped<T>(0.0f, min_y, max_y); const T* x_data = x.flat<T>().data(); const T* y_data = y.flat<T>().data(); Toutput* z_data = z->flat<Toutput>().data(); const int ndims = bcast.x_reshape().size(); if (ndims <= 1) { if (x.NumElements() == 1) { ScalarMultiply<T, Toutput>(context, y_data, offset_y, y.NumElements(), x_data[0], offset_x, z_data); } else if (y.NumElements() == 1) { ScalarMultiply<T, Toutput>(context, x_data, offset_x, x.NumElements(), y_data[0], offset_y, z_data); } else { VectorMultiply<T, Toutput>(context, x_data, offset_x, y_data, offset_y, x.NumElements(), z_data); } } else if (ndims == 2) { const T* vector_data; int64 vector_num_elements; int32 vector_offset; const T* tensor_data; int64 tensor_num_elements; int32 tensor_offset; if (x.NumElements() < y.NumElements()) { vector_data = x_data; vector_num_elements = x.NumElements(); vector_offset = offset_x; tensor_data = y_data; tensor_num_elements = y.NumElements(); tensor_offset = offset_y; } else { vector_data = y_data; vector_num_elements = y.NumElements(); vector_offset = offset_y; tensor_data = x_data; tensor_num_elements = x.NumElements(); tensor_offset = offset_x; } if (vector_num_elements == 0) { context->SetStatus( errors::InvalidArgument("vector must have at least 1 element")); return; } VectorTensorMultiply<T, Toutput>( vector_data, vector_offset, vector_num_elements, tensor_data, tensor_offset, tensor_num_elements, z_data); } else { LOG(INFO) << "ndims=" << ndims; LOG(INFO) << "bcast.x_reshape()=" << TensorShape(bcast.x_reshape()).DebugString(); LOG(INFO) << "bcast.y_reshape()=" << TensorShape(bcast.y_reshape()).DebugString(); LOG(INFO) << "bcast.x_bcast()=" << TensorShape(bcast.x_bcast()).DebugString(); LOG(INFO) << "bcast.y_bcast()=" << TensorShape(bcast.y_bcast()).DebugString(); context->SetStatus(errors::Unimplemented( "Broadcast between ", context->input(0).shape().DebugString(), " and ", context->input(1).shape().DebugString(), " is not supported yet.")); return; } float min_z_value; float max_z_value; QuantizationRangeForMultiplication<T, T, Toutput>( min_x, max_x, min_y, max_y, &min_z_value, &max_z_value); Tensor* z_min = nullptr; OP_REQUIRES_OK(context, context->allocate_output(1, {}, &z_min)); z_min->flat<float>()(0) = min_z_value; Tensor* z_max = nullptr; OP_REQUIRES_OK(context, context->allocate_output(2, {}, &z_max)); z_max->flat<float>()(0) = max_z_value; } }; REGISTER_KERNEL_BUILDER(Name("QuantizedMul") .Device(DEVICE_CPU) .TypeConstraint<quint8>("T1") .TypeConstraint<quint8>("T2") .TypeConstraint<qint32>("Toutput"), QuantizedMulOp<quint8, qint32>); } // namespace tensorflow
void Compute(OpKernelContext* context) override { const Tensor& x = context->input(0); const Tensor& y = context->input(1); const float min_x = context->input(2).flat<float>()(0); const float max_x = context->input(3).flat<float>()(0); const float min_y = context->input(4).flat<float>()(0); const float max_y = context->input(5).flat<float>()(0); BCast bcast(BCast::FromShape(x.shape()), BCast::FromShape(y.shape())); if (!bcast.IsValid()) { context->SetStatus(errors::InvalidArgument( "Incompatible shapes: ", x.shape().DebugString(), " vs. ", y.shape().DebugString())); return; } Tensor* z; OP_REQUIRES_OK(context, context->allocate_output( 0, BCast::ToShape(bcast.output_shape()), &z)); // Make sure that we have valid quantization ranges for the input buffers. // If the difference between the min and max is negative or zero, it makes // it hard to do meaningful intermediate operations on the values. OP_REQUIRES(context, (max_x > min_x), errors::InvalidArgument("max_x must be larger than min_a.")); OP_REQUIRES(context, (max_y > min_y), errors::InvalidArgument("max_x must be larger than min_b.")); const int32 offset_x = FloatToQuantizedUnclamped<T>(0.0f, min_x, max_x); const int32 offset_y = FloatToQuantizedUnclamped<T>(0.0f, min_y, max_y); const T* x_data = x.flat<T>().data(); const T* y_data = y.flat<T>().data(); Toutput* z_data = z->flat<Toutput>().data(); const int ndims = bcast.x_reshape().size(); if (ndims <= 1) { if (x.NumElements() == 1) { ScalarMultiply<T, Toutput>(context, y_data, offset_y, y.NumElements(), x_data[0], offset_x, z_data); } else if (y.NumElements() == 1) { ScalarMultiply<T, Toutput>(context, x_data, offset_x, x.NumElements(), y_data[0], offset_y, z_data); } else { VectorMultiply<T, Toutput>(context, x_data, offset_x, y_data, offset_y, x.NumElements(), z_data); } } else if (ndims == 2) { const T* vector_data; int64 vector_num_elements; int32 vector_offset; const T* tensor_data; int64 tensor_num_elements; int32 tensor_offset; if (x.NumElements() < y.NumElements()) { vector_data = x_data; vector_num_elements = x.NumElements(); vector_offset = offset_x; tensor_data = y_data; tensor_num_elements = y.NumElements(); tensor_offset = offset_y; } else { vector_data = y_data; vector_num_elements = y.NumElements(); vector_offset = offset_y; tensor_data = x_data; tensor_num_elements = x.NumElements(); tensor_offset = offset_x; } if (vector_num_elements == 0) { context->SetStatus( errors::InvalidArgument("vector must have at least 1 element")); return; } VectorTensorMultiply<T, Toutput>( vector_data, vector_offset, vector_num_elements, tensor_data, tensor_offset, tensor_num_elements, z_data); } else { LOG(INFO) << "ndims=" << ndims; LOG(INFO) << "bcast.x_reshape()=" << TensorShape(bcast.x_reshape()).DebugString(); LOG(INFO) << "bcast.y_reshape()=" << TensorShape(bcast.y_reshape()).DebugString(); LOG(INFO) << "bcast.x_bcast()=" << TensorShape(bcast.x_bcast()).DebugString(); LOG(INFO) << "bcast.y_bcast()=" << TensorShape(bcast.y_bcast()).DebugString(); context->SetStatus(errors::Unimplemented( "Broadcast between ", context->input(0).shape().DebugString(), " and ", context->input(1).shape().DebugString(), " is not supported yet.")); return; } float min_z_value; float max_z_value; QuantizationRangeForMultiplication<T, T, Toutput>( min_x, max_x, min_y, max_y, &min_z_value, &max_z_value); Tensor* z_min = nullptr; OP_REQUIRES_OK(context, context->allocate_output(1, {}, &z_min)); z_min->flat<float>()(0) = min_z_value; Tensor* z_max = nullptr; OP_REQUIRES_OK(context, context->allocate_output(2, {}, &z_max)); z_max->flat<float>()(0) = max_z_value; }
void Compute(OpKernelContext* context) override { const Tensor& x = context->input(0); const Tensor& y = context->input(1); auto& min_x_tensor = context->input(2); OP_REQUIRES(context, TensorShapeUtils::IsScalar(min_x_tensor.shape()), errors::InvalidArgument("min_x must be a scalar")); const float min_x = min_x_tensor.flat<float>()(0); auto& max_x_tensor = context->input(3); OP_REQUIRES(context, TensorShapeUtils::IsScalar(max_x_tensor.shape()), errors::InvalidArgument("max_x must be a scalar")); const float max_x = max_x_tensor.flat<float>()(0); auto& min_y_tensor = context->input(4); OP_REQUIRES(context, TensorShapeUtils::IsScalar(min_y_tensor.shape()), errors::InvalidArgument("min_y must be a scalar")); const float min_y = min_y_tensor.flat<float>()(0); auto& max_y_tensor = context->input(5); OP_REQUIRES(context, TensorShapeUtils::IsScalar(max_y_tensor.shape()), errors::InvalidArgument("max_y must be a scalar")); const float max_y = max_y_tensor.flat<float>()(0); BCast bcast(BCast::FromShape(x.shape()), BCast::FromShape(y.shape())); if (!bcast.IsValid()) { context->SetStatus(errors::InvalidArgument( "Incompatible shapes: ", x.shape().DebugString(), " vs. ", y.shape().DebugString())); return; } Tensor* z; OP_REQUIRES_OK(context, context->allocate_output( 0, BCast::ToShape(bcast.output_shape()), &z)); // Make sure that we have valid quantization ranges for the input buffers. // If the difference between the min and max is negative or zero, it makes // it hard to do meaningful intermediate operations on the values. OP_REQUIRES(context, (max_x > min_x), errors::InvalidArgument("max_x must be larger than min_a.")); OP_REQUIRES(context, (max_y > min_y), errors::InvalidArgument("max_x must be larger than min_b.")); const int32 offset_x = FloatToQuantizedUnclamped<T>(0.0f, min_x, max_x); const int32 offset_y = FloatToQuantizedUnclamped<T>(0.0f, min_y, max_y); const T* x_data = x.flat<T>().data(); const T* y_data = y.flat<T>().data(); Toutput* z_data = z->flat<Toutput>().data(); const int ndims = bcast.x_reshape().size(); if (ndims <= 1) { if (x.NumElements() == 1) { ScalarMultiply<T, Toutput>(context, y_data, offset_y, y.NumElements(), x_data[0], offset_x, z_data); } else if (y.NumElements() == 1) { ScalarMultiply<T, Toutput>(context, x_data, offset_x, x.NumElements(), y_data[0], offset_y, z_data); } else { VectorMultiply<T, Toutput>(context, x_data, offset_x, y_data, offset_y, x.NumElements(), z_data); } } else if (ndims == 2) { const T* vector_data; int64 vector_num_elements; int32 vector_offset; const T* tensor_data; int64 tensor_num_elements; int32 tensor_offset; if (x.NumElements() < y.NumElements()) { vector_data = x_data; vector_num_elements = x.NumElements(); vector_offset = offset_x; tensor_data = y_data; tensor_num_elements = y.NumElements(); tensor_offset = offset_y; } else { vector_data = y_data; vector_num_elements = y.NumElements(); vector_offset = offset_y; tensor_data = x_data; tensor_num_elements = x.NumElements(); tensor_offset = offset_x; } if (vector_num_elements == 0) { context->SetStatus( errors::InvalidArgument("vector must have at least 1 element")); return; } VectorTensorMultiply<T, Toutput>( vector_data, vector_offset, vector_num_elements, tensor_data, tensor_offset, tensor_num_elements, z_data); } else { LOG(INFO) << "ndims=" << ndims; LOG(INFO) << "bcast.x_reshape()=" << TensorShape(bcast.x_reshape()).DebugString(); LOG(INFO) << "bcast.y_reshape()=" << TensorShape(bcast.y_reshape()).DebugString(); LOG(INFO) << "bcast.x_bcast()=" << TensorShape(bcast.x_bcast()).DebugString(); LOG(INFO) << "bcast.y_bcast()=" << TensorShape(bcast.y_bcast()).DebugString(); context->SetStatus(errors::Unimplemented( "Broadcast between ", context->input(0).shape().DebugString(), " and ", context->input(1).shape().DebugString(), " is not supported yet.")); return; } float min_z_value; float max_z_value; QuantizationRangeForMultiplication<T, T, Toutput>( min_x, max_x, min_y, max_y, &min_z_value, &max_z_value); Tensor* z_min = nullptr; OP_REQUIRES_OK(context, context->allocate_output(1, {}, &z_min)); z_min->flat<float>()(0) = min_z_value; Tensor* z_max = nullptr; OP_REQUIRES_OK(context, context->allocate_output(2, {}, &z_max)); z_max->flat<float>()(0) = max_z_value; }
{'added': [(287, ' auto& min_x_tensor = context->input(2);'), (288, ' OP_REQUIRES(context, TensorShapeUtils::IsScalar(min_x_tensor.shape()),'), (289, ' errors::InvalidArgument("min_x must be a scalar"));'), (290, ' const float min_x = min_x_tensor.flat<float>()(0);'), (291, ' auto& max_x_tensor = context->input(3);'), (292, ' OP_REQUIRES(context, TensorShapeUtils::IsScalar(max_x_tensor.shape()),'), (293, ' errors::InvalidArgument("max_x must be a scalar"));'), (294, ' const float max_x = max_x_tensor.flat<float>()(0);'), (295, ' auto& min_y_tensor = context->input(4);'), (296, ' OP_REQUIRES(context, TensorShapeUtils::IsScalar(min_y_tensor.shape()),'), (297, ' errors::InvalidArgument("min_y must be a scalar"));'), (298, ' const float min_y = min_y_tensor.flat<float>()(0);'), (299, ' auto& max_y_tensor = context->input(5);'), (300, ' OP_REQUIRES(context, TensorShapeUtils::IsScalar(max_y_tensor.shape()),'), (301, ' errors::InvalidArgument("max_y must be a scalar"));'), (302, ' const float max_y = max_y_tensor.flat<float>()(0);')], 'deleted': [(287, ' const float min_x = context->input(2).flat<float>()(0);'), (288, ' const float max_x = context->input(3).flat<float>()(0);'), (289, ' const float min_y = context->input(4).flat<float>()(0);'), (290, ' const float max_y = context->input(5).flat<float>()(0);')]}
16
4
319
2,618
https://github.com/tensorflow/tensorflow
CVE-2021-29535
['CWE-787']
ast-build.c
ExprAppendMultiKeysymList
/************************************************************ * Copyright (c) 1994 by Silicon Graphics Computer Systems, Inc. * * Permission to use, copy, modify, and distribute this * software and its documentation for any purpose and without * fee is hereby granted, provided that the above copyright * notice appear in all copies and that both that copyright * notice and this permission notice appear in supporting * documentation, and that the name of Silicon Graphics not be * used in advertising or publicity pertaining to distribution * of the software without specific prior written permission. * Silicon Graphics makes no representation about the suitability * of this software for any purpose. It is provided "as is" * without any express or implied warranty. * * SILICON GRAPHICS DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS * SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY * AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT SHALL SILICON * GRAPHICS BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH * THE USE OR PERFORMANCE OF THIS SOFTWARE. * ********************************************************/ /* * Copyright © 2012 Intel Corporation * Copyright © 2012 Ran Benita <ran234@gmail.com> * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Author: Daniel Stone <daniel@fooishbar.org> * Ran Benita <ran234@gmail.com> */ #include "xkbcomp-priv.h" #include "ast-build.h" #include "include.h" ParseCommon * AppendStmt(ParseCommon *to, ParseCommon *append) { ParseCommon *iter; if (!to) return append; for (iter = to; iter->next; iter = iter->next); iter->next = append; return to; } static ExprDef * ExprCreate(enum expr_op_type op, enum expr_value_type type, size_t size) { ExprDef *expr = malloc(size); if (!expr) return NULL; expr->common.type = STMT_EXPR; expr->common.next = NULL; expr->expr.op = op; expr->expr.value_type = type; return expr; } #define EXPR_CREATE(type_, name_, op_, value_type_) \ ExprDef *name_ = ExprCreate(op_, value_type_, sizeof(type_)); \ if (!name_) \ return NULL; ExprDef * ExprCreateString(xkb_atom_t str) { EXPR_CREATE(ExprString, expr, EXPR_VALUE, EXPR_TYPE_STRING); expr->string.str = str; return expr; } ExprDef * ExprCreateInteger(int ival) { EXPR_CREATE(ExprInteger, expr, EXPR_VALUE, EXPR_TYPE_INT); expr->integer.ival = ival; return expr; } ExprDef * ExprCreateFloat(void) { EXPR_CREATE(ExprFloat, expr, EXPR_VALUE, EXPR_TYPE_FLOAT); return expr; } ExprDef * ExprCreateBoolean(bool set) { EXPR_CREATE(ExprBoolean, expr, EXPR_VALUE, EXPR_TYPE_BOOLEAN); expr->boolean.set = set; return expr; } ExprDef * ExprCreateKeyName(xkb_atom_t key_name) { EXPR_CREATE(ExprKeyName, expr, EXPR_VALUE, EXPR_TYPE_KEYNAME); expr->key_name.key_name = key_name; return expr; } ExprDef * ExprCreateIdent(xkb_atom_t ident) { EXPR_CREATE(ExprIdent, expr, EXPR_IDENT, EXPR_TYPE_UNKNOWN); expr->ident.ident = ident; return expr; } ExprDef * ExprCreateUnary(enum expr_op_type op, enum expr_value_type type, ExprDef *child) { EXPR_CREATE(ExprUnary, expr, op, type); expr->unary.child = child; return expr; } ExprDef * ExprCreateBinary(enum expr_op_type op, ExprDef *left, ExprDef *right) { EXPR_CREATE(ExprBinary, expr, op, EXPR_TYPE_UNKNOWN); if (op == EXPR_ASSIGN || left->expr.value_type == EXPR_TYPE_UNKNOWN) expr->expr.value_type = right->expr.value_type; else if (left->expr.value_type == right->expr.value_type || right->expr.value_type == EXPR_TYPE_UNKNOWN) expr->expr.value_type = left->expr.value_type; expr->binary.left = left; expr->binary.right = right; return expr; } ExprDef * ExprCreateFieldRef(xkb_atom_t element, xkb_atom_t field) { EXPR_CREATE(ExprFieldRef, expr, EXPR_FIELD_REF, EXPR_TYPE_UNKNOWN); expr->field_ref.element = element; expr->field_ref.field = field; return expr; } ExprDef * ExprCreateArrayRef(xkb_atom_t element, xkb_atom_t field, ExprDef *entry) { EXPR_CREATE(ExprArrayRef, expr, EXPR_ARRAY_REF, EXPR_TYPE_UNKNOWN); expr->array_ref.element = element; expr->array_ref.field = field; expr->array_ref.entry = entry; return expr; } ExprDef * ExprCreateAction(xkb_atom_t name, ExprDef *args) { EXPR_CREATE(ExprAction, expr, EXPR_ACTION_DECL, EXPR_TYPE_UNKNOWN); expr->action.name = name; expr->action.args = args; return expr; } ExprDef * ExprCreateKeysymList(xkb_keysym_t sym) { EXPR_CREATE(ExprKeysymList, expr, EXPR_KEYSYM_LIST, EXPR_TYPE_SYMBOLS); darray_init(expr->keysym_list.syms); darray_init(expr->keysym_list.symsMapIndex); darray_init(expr->keysym_list.symsNumEntries); darray_append(expr->keysym_list.syms, sym); darray_append(expr->keysym_list.symsMapIndex, 0); darray_append(expr->keysym_list.symsNumEntries, 1); return expr; } ExprDef * ExprCreateMultiKeysymList(ExprDef *expr) { unsigned nLevels = darray_size(expr->keysym_list.symsMapIndex); darray_resize(expr->keysym_list.symsMapIndex, 1); darray_resize(expr->keysym_list.symsNumEntries, 1); darray_item(expr->keysym_list.symsMapIndex, 0) = 0; darray_item(expr->keysym_list.symsNumEntries, 0) = nLevels; return expr; } ExprDef * ExprAppendKeysymList(ExprDef *expr, xkb_keysym_t sym) { unsigned nSyms = darray_size(expr->keysym_list.syms); darray_append(expr->keysym_list.symsMapIndex, nSyms); darray_append(expr->keysym_list.symsNumEntries, 1); darray_append(expr->keysym_list.syms, sym); return expr; } ExprDef * ExprAppendMultiKeysymList(ExprDef *expr, ExprDef *append) { unsigned nSyms = darray_size(expr->keysym_list.syms); unsigned numEntries = darray_size(append->keysym_list.syms); darray_append(expr->keysym_list.symsMapIndex, nSyms); darray_append(expr->keysym_list.symsNumEntries, numEntries); darray_concat(expr->keysym_list.syms, append->keysym_list.syms); FreeStmt((ParseCommon *) &append); return expr; } KeycodeDef * KeycodeCreate(xkb_atom_t name, int64_t value) { KeycodeDef *def = malloc(sizeof(*def)); if (!def) return NULL; def->common.type = STMT_KEYCODE; def->common.next = NULL; def->name = name; def->value = value; return def; } KeyAliasDef * KeyAliasCreate(xkb_atom_t alias, xkb_atom_t real) { KeyAliasDef *def = malloc(sizeof(*def)); if (!def) return NULL; def->common.type = STMT_ALIAS; def->common.next = NULL; def->alias = alias; def->real = real; return def; } VModDef * VModCreate(xkb_atom_t name, ExprDef *value) { VModDef *def = malloc(sizeof(*def)); if (!def) return NULL; def->common.type = STMT_VMOD; def->common.next = NULL; def->name = name; def->value = value; return def; } VarDef * VarCreate(ExprDef *name, ExprDef *value) { VarDef *def = malloc(sizeof(*def)); if (!def) return NULL; def->common.type = STMT_VAR; def->common.next = NULL; def->name = name; def->value = value; return def; } VarDef * BoolVarCreate(xkb_atom_t ident, bool set) { ExprDef *name, *value; VarDef *def; if (!(name = ExprCreateIdent(ident))) { return NULL; } if (!(value = ExprCreateBoolean(set))) { FreeStmt((ParseCommon *) name); return NULL; } if (!(def = VarCreate(name, value))) { FreeStmt((ParseCommon *) name); FreeStmt((ParseCommon *) value); return NULL; } return def; } InterpDef * InterpCreate(xkb_keysym_t sym, ExprDef *match) { InterpDef *def = malloc(sizeof(*def)); if (!def) return NULL; def->common.type = STMT_INTERP; def->common.next = NULL; def->sym = sym; def->match = match; def->def = NULL; return def; } KeyTypeDef * KeyTypeCreate(xkb_atom_t name, VarDef *body) { KeyTypeDef *def = malloc(sizeof(*def)); if (!def) return NULL; def->common.type = STMT_TYPE; def->common.next = NULL; def->merge = MERGE_DEFAULT; def->name = name; def->body = body; return def; } SymbolsDef * SymbolsCreate(xkb_atom_t keyName, VarDef *symbols) { SymbolsDef *def = malloc(sizeof(*def)); if (!def) return NULL; def->common.type = STMT_SYMBOLS; def->common.next = NULL; def->merge = MERGE_DEFAULT; def->keyName = keyName; def->symbols = symbols; return def; } GroupCompatDef * GroupCompatCreate(unsigned group, ExprDef *val) { GroupCompatDef *def = malloc(sizeof(*def)); if (!def) return NULL; def->common.type = STMT_GROUP_COMPAT; def->common.next = NULL; def->merge = MERGE_DEFAULT; def->group = group; def->def = val; return def; } ModMapDef * ModMapCreate(xkb_atom_t modifier, ExprDef *keys) { ModMapDef *def = malloc(sizeof(*def)); if (!def) return NULL; def->common.type = STMT_MODMAP; def->common.next = NULL; def->merge = MERGE_DEFAULT; def->modifier = modifier; def->keys = keys; return def; } LedMapDef * LedMapCreate(xkb_atom_t name, VarDef *body) { LedMapDef *def = malloc(sizeof(*def)); if (!def) return NULL; def->common.type = STMT_LED_MAP; def->common.next = NULL; def->merge = MERGE_DEFAULT; def->name = name; def->body = body; return def; } LedNameDef * LedNameCreate(unsigned ndx, ExprDef *name, bool virtual) { LedNameDef *def = malloc(sizeof(*def)); if (!def) return NULL; def->common.type = STMT_LED_NAME; def->common.next = NULL; def->merge = MERGE_DEFAULT; def->ndx = ndx; def->name = name; def->virtual = virtual; return def; } static void FreeInclude(IncludeStmt *incl); IncludeStmt * IncludeCreate(struct xkb_context *ctx, char *str, enum merge_mode merge) { IncludeStmt *incl, *first; char *file, *map, *stmt, *tmp, *extra_data; char nextop; incl = first = NULL; file = map = NULL; tmp = str; stmt = strdup_safe(str); while (tmp && *tmp) { if (!ParseIncludeMap(&tmp, &file, &map, &nextop, &extra_data)) goto err; /* * Given an RMLVO (here layout) like 'us,,fr', the rules parser * will give out something like 'pc+us+:2+fr:3+inet(evdev)'. * We should just skip the ':2' in this case and leave it to the * appropriate section to deal with the empty group. */ if (isempty(file)) { free(file); free(map); free(extra_data); continue; } if (first == NULL) { first = incl = malloc(sizeof(*first)); } else { incl->next_incl = malloc(sizeof(*first)); incl = incl->next_incl; } if (!incl) break; incl->common.type = STMT_INCLUDE; incl->common.next = NULL; incl->merge = merge; incl->stmt = NULL; incl->file = file; incl->map = map; incl->modifier = extra_data; incl->next_incl = NULL; if (nextop == '|') merge = MERGE_AUGMENT; else merge = MERGE_OVERRIDE; } if (first) first->stmt = stmt; else free(stmt); return first; err: log_err(ctx, "Illegal include statement \"%s\"; Ignored\n", stmt); FreeInclude(first); free(stmt); return NULL; } XkbFile * XkbFileCreate(enum xkb_file_type type, char *name, ParseCommon *defs, enum xkb_map_flags flags) { XkbFile *file; file = calloc(1, sizeof(*file)); if (!file) return NULL; XkbEscapeMapName(name); file->file_type = type; file->name = name ? name : strdup("(unnamed)"); file->defs = defs; file->flags = flags; return file; } XkbFile * XkbFileFromComponents(struct xkb_context *ctx, const struct xkb_component_names *kkctgs) { char *const components[] = { kkctgs->keycodes, kkctgs->types, kkctgs->compat, kkctgs->symbols, }; enum xkb_file_type type; IncludeStmt *include = NULL; XkbFile *file = NULL; ParseCommon *defs = NULL; for (type = FIRST_KEYMAP_FILE_TYPE; type <= LAST_KEYMAP_FILE_TYPE; type++) { include = IncludeCreate(ctx, components[type], MERGE_DEFAULT); if (!include) goto err; file = XkbFileCreate(type, NULL, (ParseCommon *) include, 0); if (!file) { FreeInclude(include); goto err; } defs = AppendStmt(defs, &file->common); } file = XkbFileCreate(FILE_TYPE_KEYMAP, NULL, defs, 0); if (!file) goto err; return file; err: FreeXkbFile((XkbFile *) defs); return NULL; } static void FreeExpr(ExprDef *expr) { if (!expr) return; switch (expr->expr.op) { case EXPR_ACTION_LIST: case EXPR_NEGATE: case EXPR_UNARY_PLUS: case EXPR_NOT: case EXPR_INVERT: FreeStmt((ParseCommon *) expr->unary.child); break; case EXPR_DIVIDE: case EXPR_ADD: case EXPR_SUBTRACT: case EXPR_MULTIPLY: case EXPR_ASSIGN: FreeStmt((ParseCommon *) expr->binary.left); FreeStmt((ParseCommon *) expr->binary.right); break; case EXPR_ACTION_DECL: FreeStmt((ParseCommon *) expr->action.args); break; case EXPR_ARRAY_REF: FreeStmt((ParseCommon *) expr->array_ref.entry); break; case EXPR_KEYSYM_LIST: darray_free(expr->keysym_list.syms); darray_free(expr->keysym_list.symsMapIndex); darray_free(expr->keysym_list.symsNumEntries); break; default: break; } } static void FreeInclude(IncludeStmt *incl) { IncludeStmt *next; while (incl) { next = incl->next_incl; free(incl->file); free(incl->map); free(incl->modifier); free(incl->stmt); free(incl); incl = next; } } void FreeStmt(ParseCommon *stmt) { ParseCommon *next; while (stmt) { next = stmt->next; switch (stmt->type) { case STMT_INCLUDE: FreeInclude((IncludeStmt *) stmt); /* stmt is already free'd here. */ stmt = NULL; break; case STMT_EXPR: FreeExpr((ExprDef *) stmt); break; case STMT_VAR: FreeStmt((ParseCommon *) ((VarDef *) stmt)->name); FreeStmt((ParseCommon *) ((VarDef *) stmt)->value); break; case STMT_TYPE: FreeStmt((ParseCommon *) ((KeyTypeDef *) stmt)->body); break; case STMT_INTERP: FreeStmt((ParseCommon *) ((InterpDef *) stmt)->match); FreeStmt((ParseCommon *) ((InterpDef *) stmt)->def); break; case STMT_VMOD: FreeStmt((ParseCommon *) ((VModDef *) stmt)->value); break; case STMT_SYMBOLS: FreeStmt((ParseCommon *) ((SymbolsDef *) stmt)->symbols); break; case STMT_MODMAP: FreeStmt((ParseCommon *) ((ModMapDef *) stmt)->keys); break; case STMT_GROUP_COMPAT: FreeStmt((ParseCommon *) ((GroupCompatDef *) stmt)->def); break; case STMT_LED_MAP: FreeStmt((ParseCommon *) ((LedMapDef *) stmt)->body); break; case STMT_LED_NAME: FreeStmt((ParseCommon *) ((LedNameDef *) stmt)->name); break; default: break; } free(stmt); stmt = next; } } void FreeXkbFile(XkbFile *file) { XkbFile *next; while (file) { next = (XkbFile *) file->common.next; switch (file->file_type) { case FILE_TYPE_KEYMAP: FreeXkbFile((XkbFile *) file->defs); break; case FILE_TYPE_TYPES: case FILE_TYPE_COMPAT: case FILE_TYPE_SYMBOLS: case FILE_TYPE_KEYCODES: case FILE_TYPE_GEOMETRY: FreeStmt(file->defs); break; default: break; } free(file->name); free(file); file = next; } } static const char *xkb_file_type_strings[_FILE_TYPE_NUM_ENTRIES] = { [FILE_TYPE_KEYCODES] = "xkb_keycodes", [FILE_TYPE_TYPES] = "xkb_types", [FILE_TYPE_COMPAT] = "xkb_compatibility", [FILE_TYPE_SYMBOLS] = "xkb_symbols", [FILE_TYPE_GEOMETRY] = "xkb_geometry", [FILE_TYPE_KEYMAP] = "xkb_keymap", [FILE_TYPE_RULES] = "rules", }; const char * xkb_file_type_to_string(enum xkb_file_type type) { if (type > _FILE_TYPE_NUM_ENTRIES) return "unknown"; return xkb_file_type_strings[type]; } static const char *stmt_type_strings[_STMT_NUM_VALUES] = { [STMT_UNKNOWN] = "unknown statement", [STMT_INCLUDE] = "include statement", [STMT_KEYCODE] = "key name definition", [STMT_ALIAS] = "key alias definition", [STMT_EXPR] = "expression", [STMT_VAR] = "variable definition", [STMT_TYPE] = "key type definition", [STMT_INTERP] = "symbol interpretation definition", [STMT_VMOD] = "virtual modifiers definition", [STMT_SYMBOLS] = "key symbols definition", [STMT_MODMAP] = "modifier map declaration", [STMT_GROUP_COMPAT] = "group declaration", [STMT_LED_MAP] = "indicator map declaration", [STMT_LED_NAME] = "indicator name declaration", }; const char * stmt_type_to_string(enum stmt_type type) { if (type >= _STMT_NUM_VALUES) return NULL; return stmt_type_strings[type]; } static const char *expr_op_type_strings[_EXPR_NUM_VALUES] = { [EXPR_VALUE] = "literal", [EXPR_IDENT] = "identifier", [EXPR_ACTION_DECL] = "action declaration", [EXPR_FIELD_REF] = "field reference", [EXPR_ARRAY_REF] = "array reference", [EXPR_KEYSYM_LIST] = "list of keysyms", [EXPR_ACTION_LIST] = "list of actions", [EXPR_ADD] = "addition", [EXPR_SUBTRACT] = "subtraction", [EXPR_MULTIPLY] = "multiplication", [EXPR_DIVIDE] = "division", [EXPR_ASSIGN] = "assignment", [EXPR_NOT] = "logical negation", [EXPR_NEGATE] = "arithmetic negation", [EXPR_INVERT] = "bitwise inversion", [EXPR_UNARY_PLUS] = "unary plus", }; const char * expr_op_type_to_string(enum expr_op_type type) { if (type >= _EXPR_NUM_VALUES) return NULL; return expr_op_type_strings[type]; } static const char *expr_value_type_strings[_EXPR_TYPE_NUM_VALUES] = { [EXPR_TYPE_UNKNOWN] = "unknown", [EXPR_TYPE_BOOLEAN] = "boolean", [EXPR_TYPE_INT] = "int", [EXPR_TYPE_FLOAT] = "float", [EXPR_TYPE_STRING] = "string", [EXPR_TYPE_ACTION] = "action", [EXPR_TYPE_KEYNAME] = "keyname", [EXPR_TYPE_SYMBOLS] = "symbols", }; const char * expr_value_type_to_string(enum expr_value_type type) { if (type >= _EXPR_TYPE_NUM_VALUES) return NULL; return expr_value_type_strings[type]; }
/************************************************************ * Copyright (c) 1994 by Silicon Graphics Computer Systems, Inc. * * Permission to use, copy, modify, and distribute this * software and its documentation for any purpose and without * fee is hereby granted, provided that the above copyright * notice appear in all copies and that both that copyright * notice and this permission notice appear in supporting * documentation, and that the name of Silicon Graphics not be * used in advertising or publicity pertaining to distribution * of the software without specific prior written permission. * Silicon Graphics makes no representation about the suitability * of this software for any purpose. It is provided "as is" * without any express or implied warranty. * * SILICON GRAPHICS DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS * SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY * AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT SHALL SILICON * GRAPHICS BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH * THE USE OR PERFORMANCE OF THIS SOFTWARE. * ********************************************************/ /* * Copyright © 2012 Intel Corporation * Copyright © 2012 Ran Benita <ran234@gmail.com> * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Author: Daniel Stone <daniel@fooishbar.org> * Ran Benita <ran234@gmail.com> */ #include "xkbcomp-priv.h" #include "ast-build.h" #include "include.h" ParseCommon * AppendStmt(ParseCommon *to, ParseCommon *append) { ParseCommon *iter; if (!to) return append; for (iter = to; iter->next; iter = iter->next); iter->next = append; return to; } static ExprDef * ExprCreate(enum expr_op_type op, enum expr_value_type type, size_t size) { ExprDef *expr = malloc(size); if (!expr) return NULL; expr->common.type = STMT_EXPR; expr->common.next = NULL; expr->expr.op = op; expr->expr.value_type = type; return expr; } #define EXPR_CREATE(type_, name_, op_, value_type_) \ ExprDef *name_ = ExprCreate(op_, value_type_, sizeof(type_)); \ if (!name_) \ return NULL; ExprDef * ExprCreateString(xkb_atom_t str) { EXPR_CREATE(ExprString, expr, EXPR_VALUE, EXPR_TYPE_STRING); expr->string.str = str; return expr; } ExprDef * ExprCreateInteger(int ival) { EXPR_CREATE(ExprInteger, expr, EXPR_VALUE, EXPR_TYPE_INT); expr->integer.ival = ival; return expr; } ExprDef * ExprCreateFloat(void) { EXPR_CREATE(ExprFloat, expr, EXPR_VALUE, EXPR_TYPE_FLOAT); return expr; } ExprDef * ExprCreateBoolean(bool set) { EXPR_CREATE(ExprBoolean, expr, EXPR_VALUE, EXPR_TYPE_BOOLEAN); expr->boolean.set = set; return expr; } ExprDef * ExprCreateKeyName(xkb_atom_t key_name) { EXPR_CREATE(ExprKeyName, expr, EXPR_VALUE, EXPR_TYPE_KEYNAME); expr->key_name.key_name = key_name; return expr; } ExprDef * ExprCreateIdent(xkb_atom_t ident) { EXPR_CREATE(ExprIdent, expr, EXPR_IDENT, EXPR_TYPE_UNKNOWN); expr->ident.ident = ident; return expr; } ExprDef * ExprCreateUnary(enum expr_op_type op, enum expr_value_type type, ExprDef *child) { EXPR_CREATE(ExprUnary, expr, op, type); expr->unary.child = child; return expr; } ExprDef * ExprCreateBinary(enum expr_op_type op, ExprDef *left, ExprDef *right) { EXPR_CREATE(ExprBinary, expr, op, EXPR_TYPE_UNKNOWN); if (op == EXPR_ASSIGN || left->expr.value_type == EXPR_TYPE_UNKNOWN) expr->expr.value_type = right->expr.value_type; else if (left->expr.value_type == right->expr.value_type || right->expr.value_type == EXPR_TYPE_UNKNOWN) expr->expr.value_type = left->expr.value_type; expr->binary.left = left; expr->binary.right = right; return expr; } ExprDef * ExprCreateFieldRef(xkb_atom_t element, xkb_atom_t field) { EXPR_CREATE(ExprFieldRef, expr, EXPR_FIELD_REF, EXPR_TYPE_UNKNOWN); expr->field_ref.element = element; expr->field_ref.field = field; return expr; } ExprDef * ExprCreateArrayRef(xkb_atom_t element, xkb_atom_t field, ExprDef *entry) { EXPR_CREATE(ExprArrayRef, expr, EXPR_ARRAY_REF, EXPR_TYPE_UNKNOWN); expr->array_ref.element = element; expr->array_ref.field = field; expr->array_ref.entry = entry; return expr; } ExprDef * ExprCreateAction(xkb_atom_t name, ExprDef *args) { EXPR_CREATE(ExprAction, expr, EXPR_ACTION_DECL, EXPR_TYPE_UNKNOWN); expr->action.name = name; expr->action.args = args; return expr; } ExprDef * ExprCreateKeysymList(xkb_keysym_t sym) { EXPR_CREATE(ExprKeysymList, expr, EXPR_KEYSYM_LIST, EXPR_TYPE_SYMBOLS); darray_init(expr->keysym_list.syms); darray_init(expr->keysym_list.symsMapIndex); darray_init(expr->keysym_list.symsNumEntries); darray_append(expr->keysym_list.syms, sym); darray_append(expr->keysym_list.symsMapIndex, 0); darray_append(expr->keysym_list.symsNumEntries, 1); return expr; } ExprDef * ExprCreateMultiKeysymList(ExprDef *expr) { unsigned nLevels = darray_size(expr->keysym_list.symsMapIndex); darray_resize(expr->keysym_list.symsMapIndex, 1); darray_resize(expr->keysym_list.symsNumEntries, 1); darray_item(expr->keysym_list.symsMapIndex, 0) = 0; darray_item(expr->keysym_list.symsNumEntries, 0) = nLevels; return expr; } ExprDef * ExprAppendKeysymList(ExprDef *expr, xkb_keysym_t sym) { unsigned nSyms = darray_size(expr->keysym_list.syms); darray_append(expr->keysym_list.symsMapIndex, nSyms); darray_append(expr->keysym_list.symsNumEntries, 1); darray_append(expr->keysym_list.syms, sym); return expr; } ExprDef * ExprAppendMultiKeysymList(ExprDef *expr, ExprDef *append) { unsigned nSyms = darray_size(expr->keysym_list.syms); unsigned numEntries = darray_size(append->keysym_list.syms); darray_append(expr->keysym_list.symsMapIndex, nSyms); darray_append(expr->keysym_list.symsNumEntries, numEntries); darray_concat(expr->keysym_list.syms, append->keysym_list.syms); FreeStmt((ParseCommon *) append); return expr; } KeycodeDef * KeycodeCreate(xkb_atom_t name, int64_t value) { KeycodeDef *def = malloc(sizeof(*def)); if (!def) return NULL; def->common.type = STMT_KEYCODE; def->common.next = NULL; def->name = name; def->value = value; return def; } KeyAliasDef * KeyAliasCreate(xkb_atom_t alias, xkb_atom_t real) { KeyAliasDef *def = malloc(sizeof(*def)); if (!def) return NULL; def->common.type = STMT_ALIAS; def->common.next = NULL; def->alias = alias; def->real = real; return def; } VModDef * VModCreate(xkb_atom_t name, ExprDef *value) { VModDef *def = malloc(sizeof(*def)); if (!def) return NULL; def->common.type = STMT_VMOD; def->common.next = NULL; def->name = name; def->value = value; return def; } VarDef * VarCreate(ExprDef *name, ExprDef *value) { VarDef *def = malloc(sizeof(*def)); if (!def) return NULL; def->common.type = STMT_VAR; def->common.next = NULL; def->name = name; def->value = value; return def; } VarDef * BoolVarCreate(xkb_atom_t ident, bool set) { ExprDef *name, *value; VarDef *def; if (!(name = ExprCreateIdent(ident))) { return NULL; } if (!(value = ExprCreateBoolean(set))) { FreeStmt((ParseCommon *) name); return NULL; } if (!(def = VarCreate(name, value))) { FreeStmt((ParseCommon *) name); FreeStmt((ParseCommon *) value); return NULL; } return def; } InterpDef * InterpCreate(xkb_keysym_t sym, ExprDef *match) { InterpDef *def = malloc(sizeof(*def)); if (!def) return NULL; def->common.type = STMT_INTERP; def->common.next = NULL; def->sym = sym; def->match = match; def->def = NULL; return def; } KeyTypeDef * KeyTypeCreate(xkb_atom_t name, VarDef *body) { KeyTypeDef *def = malloc(sizeof(*def)); if (!def) return NULL; def->common.type = STMT_TYPE; def->common.next = NULL; def->merge = MERGE_DEFAULT; def->name = name; def->body = body; return def; } SymbolsDef * SymbolsCreate(xkb_atom_t keyName, VarDef *symbols) { SymbolsDef *def = malloc(sizeof(*def)); if (!def) return NULL; def->common.type = STMT_SYMBOLS; def->common.next = NULL; def->merge = MERGE_DEFAULT; def->keyName = keyName; def->symbols = symbols; return def; } GroupCompatDef * GroupCompatCreate(unsigned group, ExprDef *val) { GroupCompatDef *def = malloc(sizeof(*def)); if (!def) return NULL; def->common.type = STMT_GROUP_COMPAT; def->common.next = NULL; def->merge = MERGE_DEFAULT; def->group = group; def->def = val; return def; } ModMapDef * ModMapCreate(xkb_atom_t modifier, ExprDef *keys) { ModMapDef *def = malloc(sizeof(*def)); if (!def) return NULL; def->common.type = STMT_MODMAP; def->common.next = NULL; def->merge = MERGE_DEFAULT; def->modifier = modifier; def->keys = keys; return def; } LedMapDef * LedMapCreate(xkb_atom_t name, VarDef *body) { LedMapDef *def = malloc(sizeof(*def)); if (!def) return NULL; def->common.type = STMT_LED_MAP; def->common.next = NULL; def->merge = MERGE_DEFAULT; def->name = name; def->body = body; return def; } LedNameDef * LedNameCreate(unsigned ndx, ExprDef *name, bool virtual) { LedNameDef *def = malloc(sizeof(*def)); if (!def) return NULL; def->common.type = STMT_LED_NAME; def->common.next = NULL; def->merge = MERGE_DEFAULT; def->ndx = ndx; def->name = name; def->virtual = virtual; return def; } static void FreeInclude(IncludeStmt *incl); IncludeStmt * IncludeCreate(struct xkb_context *ctx, char *str, enum merge_mode merge) { IncludeStmt *incl, *first; char *file, *map, *stmt, *tmp, *extra_data; char nextop; incl = first = NULL; file = map = NULL; tmp = str; stmt = strdup_safe(str); while (tmp && *tmp) { if (!ParseIncludeMap(&tmp, &file, &map, &nextop, &extra_data)) goto err; /* * Given an RMLVO (here layout) like 'us,,fr', the rules parser * will give out something like 'pc+us+:2+fr:3+inet(evdev)'. * We should just skip the ':2' in this case and leave it to the * appropriate section to deal with the empty group. */ if (isempty(file)) { free(file); free(map); free(extra_data); continue; } if (first == NULL) { first = incl = malloc(sizeof(*first)); } else { incl->next_incl = malloc(sizeof(*first)); incl = incl->next_incl; } if (!incl) break; incl->common.type = STMT_INCLUDE; incl->common.next = NULL; incl->merge = merge; incl->stmt = NULL; incl->file = file; incl->map = map; incl->modifier = extra_data; incl->next_incl = NULL; if (nextop == '|') merge = MERGE_AUGMENT; else merge = MERGE_OVERRIDE; } if (first) first->stmt = stmt; else free(stmt); return first; err: log_err(ctx, "Illegal include statement \"%s\"; Ignored\n", stmt); FreeInclude(first); free(stmt); return NULL; } XkbFile * XkbFileCreate(enum xkb_file_type type, char *name, ParseCommon *defs, enum xkb_map_flags flags) { XkbFile *file; file = calloc(1, sizeof(*file)); if (!file) return NULL; XkbEscapeMapName(name); file->file_type = type; file->name = name ? name : strdup("(unnamed)"); file->defs = defs; file->flags = flags; return file; } XkbFile * XkbFileFromComponents(struct xkb_context *ctx, const struct xkb_component_names *kkctgs) { char *const components[] = { kkctgs->keycodes, kkctgs->types, kkctgs->compat, kkctgs->symbols, }; enum xkb_file_type type; IncludeStmt *include = NULL; XkbFile *file = NULL; ParseCommon *defs = NULL; for (type = FIRST_KEYMAP_FILE_TYPE; type <= LAST_KEYMAP_FILE_TYPE; type++) { include = IncludeCreate(ctx, components[type], MERGE_DEFAULT); if (!include) goto err; file = XkbFileCreate(type, NULL, (ParseCommon *) include, 0); if (!file) { FreeInclude(include); goto err; } defs = AppendStmt(defs, &file->common); } file = XkbFileCreate(FILE_TYPE_KEYMAP, NULL, defs, 0); if (!file) goto err; return file; err: FreeXkbFile((XkbFile *) defs); return NULL; } static void FreeExpr(ExprDef *expr) { if (!expr) return; switch (expr->expr.op) { case EXPR_ACTION_LIST: case EXPR_NEGATE: case EXPR_UNARY_PLUS: case EXPR_NOT: case EXPR_INVERT: FreeStmt((ParseCommon *) expr->unary.child); break; case EXPR_DIVIDE: case EXPR_ADD: case EXPR_SUBTRACT: case EXPR_MULTIPLY: case EXPR_ASSIGN: FreeStmt((ParseCommon *) expr->binary.left); FreeStmt((ParseCommon *) expr->binary.right); break; case EXPR_ACTION_DECL: FreeStmt((ParseCommon *) expr->action.args); break; case EXPR_ARRAY_REF: FreeStmt((ParseCommon *) expr->array_ref.entry); break; case EXPR_KEYSYM_LIST: darray_free(expr->keysym_list.syms); darray_free(expr->keysym_list.symsMapIndex); darray_free(expr->keysym_list.symsNumEntries); break; default: break; } } static void FreeInclude(IncludeStmt *incl) { IncludeStmt *next; while (incl) { next = incl->next_incl; free(incl->file); free(incl->map); free(incl->modifier); free(incl->stmt); free(incl); incl = next; } } void FreeStmt(ParseCommon *stmt) { ParseCommon *next; while (stmt) { next = stmt->next; switch (stmt->type) { case STMT_INCLUDE: FreeInclude((IncludeStmt *) stmt); /* stmt is already free'd here. */ stmt = NULL; break; case STMT_EXPR: FreeExpr((ExprDef *) stmt); break; case STMT_VAR: FreeStmt((ParseCommon *) ((VarDef *) stmt)->name); FreeStmt((ParseCommon *) ((VarDef *) stmt)->value); break; case STMT_TYPE: FreeStmt((ParseCommon *) ((KeyTypeDef *) stmt)->body); break; case STMT_INTERP: FreeStmt((ParseCommon *) ((InterpDef *) stmt)->match); FreeStmt((ParseCommon *) ((InterpDef *) stmt)->def); break; case STMT_VMOD: FreeStmt((ParseCommon *) ((VModDef *) stmt)->value); break; case STMT_SYMBOLS: FreeStmt((ParseCommon *) ((SymbolsDef *) stmt)->symbols); break; case STMT_MODMAP: FreeStmt((ParseCommon *) ((ModMapDef *) stmt)->keys); break; case STMT_GROUP_COMPAT: FreeStmt((ParseCommon *) ((GroupCompatDef *) stmt)->def); break; case STMT_LED_MAP: FreeStmt((ParseCommon *) ((LedMapDef *) stmt)->body); break; case STMT_LED_NAME: FreeStmt((ParseCommon *) ((LedNameDef *) stmt)->name); break; default: break; } free(stmt); stmt = next; } } void FreeXkbFile(XkbFile *file) { XkbFile *next; while (file) { next = (XkbFile *) file->common.next; switch (file->file_type) { case FILE_TYPE_KEYMAP: FreeXkbFile((XkbFile *) file->defs); break; case FILE_TYPE_TYPES: case FILE_TYPE_COMPAT: case FILE_TYPE_SYMBOLS: case FILE_TYPE_KEYCODES: case FILE_TYPE_GEOMETRY: FreeStmt(file->defs); break; default: break; } free(file->name); free(file); file = next; } } static const char *xkb_file_type_strings[_FILE_TYPE_NUM_ENTRIES] = { [FILE_TYPE_KEYCODES] = "xkb_keycodes", [FILE_TYPE_TYPES] = "xkb_types", [FILE_TYPE_COMPAT] = "xkb_compatibility", [FILE_TYPE_SYMBOLS] = "xkb_symbols", [FILE_TYPE_GEOMETRY] = "xkb_geometry", [FILE_TYPE_KEYMAP] = "xkb_keymap", [FILE_TYPE_RULES] = "rules", }; const char * xkb_file_type_to_string(enum xkb_file_type type) { if (type > _FILE_TYPE_NUM_ENTRIES) return "unknown"; return xkb_file_type_strings[type]; } static const char *stmt_type_strings[_STMT_NUM_VALUES] = { [STMT_UNKNOWN] = "unknown statement", [STMT_INCLUDE] = "include statement", [STMT_KEYCODE] = "key name definition", [STMT_ALIAS] = "key alias definition", [STMT_EXPR] = "expression", [STMT_VAR] = "variable definition", [STMT_TYPE] = "key type definition", [STMT_INTERP] = "symbol interpretation definition", [STMT_VMOD] = "virtual modifiers definition", [STMT_SYMBOLS] = "key symbols definition", [STMT_MODMAP] = "modifier map declaration", [STMT_GROUP_COMPAT] = "group declaration", [STMT_LED_MAP] = "indicator map declaration", [STMT_LED_NAME] = "indicator name declaration", }; const char * stmt_type_to_string(enum stmt_type type) { if (type >= _STMT_NUM_VALUES) return NULL; return stmt_type_strings[type]; } static const char *expr_op_type_strings[_EXPR_NUM_VALUES] = { [EXPR_VALUE] = "literal", [EXPR_IDENT] = "identifier", [EXPR_ACTION_DECL] = "action declaration", [EXPR_FIELD_REF] = "field reference", [EXPR_ARRAY_REF] = "array reference", [EXPR_KEYSYM_LIST] = "list of keysyms", [EXPR_ACTION_LIST] = "list of actions", [EXPR_ADD] = "addition", [EXPR_SUBTRACT] = "subtraction", [EXPR_MULTIPLY] = "multiplication", [EXPR_DIVIDE] = "division", [EXPR_ASSIGN] = "assignment", [EXPR_NOT] = "logical negation", [EXPR_NEGATE] = "arithmetic negation", [EXPR_INVERT] = "bitwise inversion", [EXPR_UNARY_PLUS] = "unary plus", }; const char * expr_op_type_to_string(enum expr_op_type type) { if (type >= _EXPR_NUM_VALUES) return NULL; return expr_op_type_strings[type]; } static const char *expr_value_type_strings[_EXPR_TYPE_NUM_VALUES] = { [EXPR_TYPE_UNKNOWN] = "unknown", [EXPR_TYPE_BOOLEAN] = "boolean", [EXPR_TYPE_INT] = "int", [EXPR_TYPE_FLOAT] = "float", [EXPR_TYPE_STRING] = "string", [EXPR_TYPE_ACTION] = "action", [EXPR_TYPE_KEYNAME] = "keyname", [EXPR_TYPE_SYMBOLS] = "symbols", }; const char * expr_value_type_to_string(enum expr_value_type type) { if (type >= _EXPR_TYPE_NUM_VALUES) return NULL; return expr_value_type_strings[type]; }
ExprAppendMultiKeysymList(ExprDef *expr, ExprDef *append) { unsigned nSyms = darray_size(expr->keysym_list.syms); unsigned numEntries = darray_size(append->keysym_list.syms); darray_append(expr->keysym_list.symsMapIndex, nSyms); darray_append(expr->keysym_list.symsNumEntries, numEntries); darray_concat(expr->keysym_list.syms, append->keysym_list.syms); FreeStmt((ParseCommon *) &append); return expr; }
ExprAppendMultiKeysymList(ExprDef *expr, ExprDef *append) { unsigned nSyms = darray_size(expr->keysym_list.syms); unsigned numEntries = darray_size(append->keysym_list.syms); darray_append(expr->keysym_list.symsMapIndex, nSyms); darray_append(expr->keysym_list.symsNumEntries, numEntries); darray_concat(expr->keysym_list.syms, append->keysym_list.syms); FreeStmt((ParseCommon *) append); return expr; }
{'added': [(243, ' FreeStmt((ParseCommon *) append);')], 'deleted': [(243, ' FreeStmt((ParseCommon *) &append);')]}
1
1
622
3,400
https://github.com/xkbcommon/libxkbcommon
CVE-2018-15857
['CWE-416']
smb2pdu.c
SMB2_sess_establish_session
/* * fs/cifs/smb2pdu.c * * Copyright (C) International Business Machines Corp., 2009, 2013 * Etersoft, 2012 * Author(s): Steve French (sfrench@us.ibm.com) * Pavel Shilovsky (pshilovsky@samba.org) 2012 * * Contains the routines for constructing the SMB2 PDUs themselves * * This library is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published * by the Free Software Foundation; either version 2.1 of the License, or * (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* SMB2 PDU handling routines here - except for leftovers (eg session setup) */ /* Note that there are handle based routines which must be */ /* treated slightly differently for reconnection purposes since we never */ /* want to reuse a stale file handle and only the caller knows the file info */ #include <linux/fs.h> #include <linux/kernel.h> #include <linux/vfs.h> #include <linux/task_io_accounting_ops.h> #include <linux/uaccess.h> #include <linux/pagemap.h> #include <linux/xattr.h> #include "smb2pdu.h" #include "cifsglob.h" #include "cifsacl.h" #include "cifsproto.h" #include "smb2proto.h" #include "cifs_unicode.h" #include "cifs_debug.h" #include "ntlmssp.h" #include "smb2status.h" #include "smb2glob.h" #include "cifspdu.h" #include "cifs_spnego.h" /* * The following table defines the expected "StructureSize" of SMB2 requests * in order by SMB2 command. This is similar to "wct" in SMB/CIFS requests. * * Note that commands are defined in smb2pdu.h in le16 but the array below is * indexed by command in host byte order. */ static const int smb2_req_struct_sizes[NUMBER_OF_SMB2_COMMANDS] = { /* SMB2_NEGOTIATE */ 36, /* SMB2_SESSION_SETUP */ 25, /* SMB2_LOGOFF */ 4, /* SMB2_TREE_CONNECT */ 9, /* SMB2_TREE_DISCONNECT */ 4, /* SMB2_CREATE */ 57, /* SMB2_CLOSE */ 24, /* SMB2_FLUSH */ 24, /* SMB2_READ */ 49, /* SMB2_WRITE */ 49, /* SMB2_LOCK */ 48, /* SMB2_IOCTL */ 57, /* SMB2_CANCEL */ 4, /* SMB2_ECHO */ 4, /* SMB2_QUERY_DIRECTORY */ 33, /* SMB2_CHANGE_NOTIFY */ 32, /* SMB2_QUERY_INFO */ 41, /* SMB2_SET_INFO */ 33, /* SMB2_OPLOCK_BREAK */ 24 /* BB this is 36 for LEASE_BREAK variant */ }; static int encryption_required(const struct cifs_tcon *tcon) { if ((tcon->ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA) || (tcon->share_flags & SHI1005_FLAGS_ENCRYPT_DATA)) return 1; return 0; } static void smb2_hdr_assemble(struct smb2_sync_hdr *shdr, __le16 smb2_cmd, const struct cifs_tcon *tcon) { shdr->ProtocolId = SMB2_PROTO_NUMBER; shdr->StructureSize = cpu_to_le16(64); shdr->Command = smb2_cmd; if (tcon && tcon->ses && tcon->ses->server) { struct TCP_Server_Info *server = tcon->ses->server; spin_lock(&server->req_lock); /* Request up to 2 credits but don't go over the limit. */ if (server->credits >= server->max_credits) shdr->CreditRequest = cpu_to_le16(0); else shdr->CreditRequest = cpu_to_le16( min_t(int, server->max_credits - server->credits, 2)); spin_unlock(&server->req_lock); } else { shdr->CreditRequest = cpu_to_le16(2); } shdr->ProcessId = cpu_to_le32((__u16)current->tgid); if (!tcon) goto out; /* GLOBAL_CAP_LARGE_MTU will only be set if dialect > SMB2.02 */ /* See sections 2.2.4 and 3.2.4.1.5 of MS-SMB2 */ if ((tcon->ses) && (tcon->ses->server) && (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU)) shdr->CreditCharge = cpu_to_le16(1); /* else CreditCharge MBZ */ shdr->TreeId = tcon->tid; /* Uid is not converted */ if (tcon->ses) shdr->SessionId = tcon->ses->Suid; /* * If we would set SMB2_FLAGS_DFS_OPERATIONS on open we also would have * to pass the path on the Open SMB prefixed by \\server\share. * Not sure when we would need to do the augmented path (if ever) and * setting this flag breaks the SMB2 open operation since it is * illegal to send an empty path name (without \\server\share prefix) * when the DFS flag is set in the SMB open header. We could * consider setting the flag on all operations other than open * but it is safer to net set it for now. */ /* if (tcon->share_flags & SHI1005_FLAGS_DFS) shdr->Flags |= SMB2_FLAGS_DFS_OPERATIONS; */ if (tcon->ses && tcon->ses->server && tcon->ses->server->sign && !encryption_required(tcon)) shdr->Flags |= SMB2_FLAGS_SIGNED; out: return; } static int smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon) { int rc = 0; struct nls_table *nls_codepage; struct cifs_ses *ses; struct TCP_Server_Info *server; /* * SMB2s NegProt, SessSetup, Logoff do not have tcon yet so * check for tcp and smb session status done differently * for those three - in the calling routine. */ if (tcon == NULL) return rc; if (smb2_command == SMB2_TREE_CONNECT) return rc; if (tcon->tidStatus == CifsExiting) { /* * only tree disconnect, open, and write, * (and ulogoff which does not have tcon) * are allowed as we start force umount. */ if ((smb2_command != SMB2_WRITE) && (smb2_command != SMB2_CREATE) && (smb2_command != SMB2_TREE_DISCONNECT)) { cifs_dbg(FYI, "can not send cmd %d while umounting\n", smb2_command); return -ENODEV; } } if ((!tcon->ses) || (tcon->ses->status == CifsExiting) || (!tcon->ses->server)) return -EIO; ses = tcon->ses; server = ses->server; /* * Give demultiplex thread up to 10 seconds to reconnect, should be * greater than cifs socket timeout which is 7 seconds */ while (server->tcpStatus == CifsNeedReconnect) { /* * Return to caller for TREE_DISCONNECT and LOGOFF and CLOSE * here since they are implicitly done when session drops. */ switch (smb2_command) { /* * BB Should we keep oplock break and add flush to exceptions? */ case SMB2_TREE_DISCONNECT: case SMB2_CANCEL: case SMB2_CLOSE: case SMB2_OPLOCK_BREAK: return -EAGAIN; } wait_event_interruptible_timeout(server->response_q, (server->tcpStatus != CifsNeedReconnect), 10 * HZ); /* are we still trying to reconnect? */ if (server->tcpStatus != CifsNeedReconnect) break; /* * on "soft" mounts we wait once. Hard mounts keep * retrying until process is killed or server comes * back on-line */ if (!tcon->retry) { cifs_dbg(FYI, "gave up waiting on reconnect in smb_init\n"); return -EHOSTDOWN; } } if (!tcon->ses->need_reconnect && !tcon->need_reconnect) return rc; nls_codepage = load_nls_default(); /* * need to prevent multiple threads trying to simultaneously reconnect * the same SMB session */ mutex_lock(&tcon->ses->session_mutex); rc = cifs_negotiate_protocol(0, tcon->ses); if (!rc && tcon->ses->need_reconnect) rc = cifs_setup_session(0, tcon->ses, nls_codepage); if (rc || !tcon->need_reconnect) { mutex_unlock(&tcon->ses->session_mutex); goto out; } cifs_mark_open_files_invalid(tcon); if (tcon->use_persistent) tcon->need_reopen_files = true; rc = SMB2_tcon(0, tcon->ses, tcon->treeName, tcon, nls_codepage); mutex_unlock(&tcon->ses->session_mutex); cifs_dbg(FYI, "reconnect tcon rc = %d\n", rc); if (rc) goto out; if (smb2_command != SMB2_INTERNAL_CMD) queue_delayed_work(cifsiod_wq, &server->reconnect, 0); atomic_inc(&tconInfoReconnectCount); out: /* * Check if handle based operation so we know whether we can continue * or not without returning to caller to reset file handle. */ /* * BB Is flush done by server on drop of tcp session? Should we special * case it and skip above? */ switch (smb2_command) { case SMB2_FLUSH: case SMB2_READ: case SMB2_WRITE: case SMB2_LOCK: case SMB2_IOCTL: case SMB2_QUERY_DIRECTORY: case SMB2_CHANGE_NOTIFY: case SMB2_QUERY_INFO: case SMB2_SET_INFO: rc = -EAGAIN; } unload_nls(nls_codepage); return rc; } static void fill_small_buf(__le16 smb2_command, struct cifs_tcon *tcon, void *buf, unsigned int *total_len) { struct smb2_sync_pdu *spdu = (struct smb2_sync_pdu *)buf; /* lookup word count ie StructureSize from table */ __u16 parmsize = smb2_req_struct_sizes[le16_to_cpu(smb2_command)]; /* * smaller than SMALL_BUFFER_SIZE but bigger than fixed area of * largest operations (Create) */ memset(buf, 0, 256); smb2_hdr_assemble(&spdu->sync_hdr, smb2_command, tcon); spdu->StructureSize2 = cpu_to_le16(parmsize); *total_len = parmsize + sizeof(struct smb2_sync_hdr); } /* init request without RFC1001 length at the beginning */ static int smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon, void **request_buf, unsigned int *total_len) { int rc; struct smb2_sync_hdr *shdr; rc = smb2_reconnect(smb2_command, tcon); if (rc) return rc; /* BB eventually switch this to SMB2 specific small buf size */ *request_buf = cifs_small_buf_get(); if (*request_buf == NULL) { /* BB should we add a retry in here if not a writepage? */ return -ENOMEM; } shdr = (struct smb2_sync_hdr *)(*request_buf); fill_small_buf(smb2_command, tcon, shdr, total_len); if (tcon != NULL) { #ifdef CONFIG_CIFS_STATS2 uint16_t com_code = le16_to_cpu(smb2_command); cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_sent[com_code]); #endif cifs_stats_inc(&tcon->num_smbs_sent); } return rc; } /* * Allocate and return pointer to an SMB request hdr, and set basic * SMB information in the SMB header. If the return code is zero, this * function must have filled in request_buf pointer. The returned buffer * has RFC1001 length at the beginning. */ static int small_smb2_init(__le16 smb2_command, struct cifs_tcon *tcon, void **request_buf) { int rc; unsigned int total_len; struct smb2_pdu *pdu; rc = smb2_reconnect(smb2_command, tcon); if (rc) return rc; /* BB eventually switch this to SMB2 specific small buf size */ *request_buf = cifs_small_buf_get(); if (*request_buf == NULL) { /* BB should we add a retry in here if not a writepage? */ return -ENOMEM; } pdu = (struct smb2_pdu *)(*request_buf); fill_small_buf(smb2_command, tcon, get_sync_hdr(pdu), &total_len); /* Note this is only network field converted to big endian */ pdu->hdr.smb2_buf_length = cpu_to_be32(total_len); if (tcon != NULL) { #ifdef CONFIG_CIFS_STATS2 uint16_t com_code = le16_to_cpu(smb2_command); cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_sent[com_code]); #endif cifs_stats_inc(&tcon->num_smbs_sent); } return rc; } #ifdef CONFIG_CIFS_SMB311 /* offset is sizeof smb2_negotiate_req - 4 but rounded up to 8 bytes */ #define OFFSET_OF_NEG_CONTEXT 0x68 /* sizeof(struct smb2_negotiate_req) - 4 */ #define SMB2_PREAUTH_INTEGRITY_CAPABILITIES cpu_to_le16(1) #define SMB2_ENCRYPTION_CAPABILITIES cpu_to_le16(2) static void build_preauth_ctxt(struct smb2_preauth_neg_context *pneg_ctxt) { pneg_ctxt->ContextType = SMB2_PREAUTH_INTEGRITY_CAPABILITIES; pneg_ctxt->DataLength = cpu_to_le16(38); pneg_ctxt->HashAlgorithmCount = cpu_to_le16(1); pneg_ctxt->SaltLength = cpu_to_le16(SMB311_SALT_SIZE); get_random_bytes(pneg_ctxt->Salt, SMB311_SALT_SIZE); pneg_ctxt->HashAlgorithms = SMB2_PREAUTH_INTEGRITY_SHA512; } static void build_encrypt_ctxt(struct smb2_encryption_neg_context *pneg_ctxt) { pneg_ctxt->ContextType = SMB2_ENCRYPTION_CAPABILITIES; pneg_ctxt->DataLength = cpu_to_le16(6); pneg_ctxt->CipherCount = cpu_to_le16(2); pneg_ctxt->Ciphers[0] = SMB2_ENCRYPTION_AES128_GCM; pneg_ctxt->Ciphers[1] = SMB2_ENCRYPTION_AES128_CCM; } static void assemble_neg_contexts(struct smb2_negotiate_req *req) { /* +4 is to account for the RFC1001 len field */ char *pneg_ctxt = (char *)req + OFFSET_OF_NEG_CONTEXT + 4; build_preauth_ctxt((struct smb2_preauth_neg_context *)pneg_ctxt); /* Add 2 to size to round to 8 byte boundary */ pneg_ctxt += 2 + sizeof(struct smb2_preauth_neg_context); build_encrypt_ctxt((struct smb2_encryption_neg_context *)pneg_ctxt); req->NegotiateContextOffset = cpu_to_le32(OFFSET_OF_NEG_CONTEXT); req->NegotiateContextCount = cpu_to_le16(2); inc_rfc1001_len(req, 4 + sizeof(struct smb2_preauth_neg_context) + 2 + sizeof(struct smb2_encryption_neg_context)); /* calculate hash */ } #else static void assemble_neg_contexts(struct smb2_negotiate_req *req) { return; } #endif /* SMB311 */ /* * * SMB2 Worker functions follow: * * The general structure of the worker functions is: * 1) Call smb2_init (assembles SMB2 header) * 2) Initialize SMB2 command specific fields in fixed length area of SMB * 3) Call smb_sendrcv2 (sends request on socket and waits for response) * 4) Decode SMB2 command specific fields in the fixed length area * 5) Decode variable length data area (if any for this SMB2 command type) * 6) Call free smb buffer * 7) return * */ int SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses) { struct smb2_negotiate_req *req; struct smb2_negotiate_rsp *rsp; struct kvec iov[1]; struct kvec rsp_iov; int rc = 0; int resp_buftype; struct TCP_Server_Info *server = ses->server; int blob_offset, blob_length; char *security_blob; int flags = CIFS_NEG_OP; cifs_dbg(FYI, "Negotiate protocol\n"); if (!server) { WARN(1, "%s: server is NULL!\n", __func__); return -EIO; } rc = small_smb2_init(SMB2_NEGOTIATE, NULL, (void **) &req); if (rc) return rc; req->hdr.sync_hdr.SessionId = 0; req->Dialects[0] = cpu_to_le16(ses->server->vals->protocol_id); req->DialectCount = cpu_to_le16(1); /* One vers= at a time for now */ inc_rfc1001_len(req, 2); /* only one of SMB2 signing flags may be set in SMB2 request */ if (ses->sign) req->SecurityMode = cpu_to_le16(SMB2_NEGOTIATE_SIGNING_REQUIRED); else if (global_secflags & CIFSSEC_MAY_SIGN) req->SecurityMode = cpu_to_le16(SMB2_NEGOTIATE_SIGNING_ENABLED); else req->SecurityMode = 0; req->Capabilities = cpu_to_le32(ses->server->vals->req_capabilities); /* ClientGUID must be zero for SMB2.02 dialect */ if (ses->server->vals->protocol_id == SMB20_PROT_ID) memset(req->ClientGUID, 0, SMB2_CLIENT_GUID_SIZE); else { memcpy(req->ClientGUID, server->client_guid, SMB2_CLIENT_GUID_SIZE); if (ses->server->vals->protocol_id == SMB311_PROT_ID) assemble_neg_contexts(req); } iov[0].iov_base = (char *)req; /* 4 for rfc1002 length field */ iov[0].iov_len = get_rfc1002_length(req) + 4; rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, flags, &rsp_iov); cifs_small_buf_release(req); rsp = (struct smb2_negotiate_rsp *)rsp_iov.iov_base; /* * No tcon so can't do * cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]); */ if (rc != 0) goto neg_exit; cifs_dbg(FYI, "mode 0x%x\n", rsp->SecurityMode); /* BB we may eventually want to match the negotiated vs. requested dialect, even though we are only requesting one at a time */ if (rsp->DialectRevision == cpu_to_le16(SMB20_PROT_ID)) cifs_dbg(FYI, "negotiated smb2.0 dialect\n"); else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID)) cifs_dbg(FYI, "negotiated smb2.1 dialect\n"); else if (rsp->DialectRevision == cpu_to_le16(SMB30_PROT_ID)) cifs_dbg(FYI, "negotiated smb3.0 dialect\n"); else if (rsp->DialectRevision == cpu_to_le16(SMB302_PROT_ID)) cifs_dbg(FYI, "negotiated smb3.02 dialect\n"); #ifdef CONFIG_CIFS_SMB311 else if (rsp->DialectRevision == cpu_to_le16(SMB311_PROT_ID)) cifs_dbg(FYI, "negotiated smb3.1.1 dialect\n"); #endif /* SMB311 */ else { cifs_dbg(VFS, "Illegal dialect returned by server 0x%x\n", le16_to_cpu(rsp->DialectRevision)); rc = -EIO; goto neg_exit; } server->dialect = le16_to_cpu(rsp->DialectRevision); /* SMB2 only has an extended negflavor */ server->negflavor = CIFS_NEGFLAVOR_EXTENDED; /* set it to the maximum buffer size value we can send with 1 credit */ server->maxBuf = min_t(unsigned int, le32_to_cpu(rsp->MaxTransactSize), SMB2_MAX_BUFFER_SIZE); server->max_read = le32_to_cpu(rsp->MaxReadSize); server->max_write = le32_to_cpu(rsp->MaxWriteSize); /* BB Do we need to validate the SecurityMode? */ server->sec_mode = le16_to_cpu(rsp->SecurityMode); server->capabilities = le32_to_cpu(rsp->Capabilities); /* Internal types */ server->capabilities |= SMB2_NT_FIND | SMB2_LARGE_FILES; security_blob = smb2_get_data_area_len(&blob_offset, &blob_length, &rsp->hdr); /* * See MS-SMB2 section 2.2.4: if no blob, client picks default which * for us will be * ses->sectype = RawNTLMSSP; * but for time being this is our only auth choice so doesn't matter. * We just found a server which sets blob length to zero expecting raw. */ if (blob_length == 0) cifs_dbg(FYI, "missing security blob on negprot\n"); rc = cifs_enable_signing(server, ses->sign); if (rc) goto neg_exit; if (blob_length) { rc = decode_negTokenInit(security_blob, blob_length, server); if (rc == 1) rc = 0; else if (rc == 0) rc = -EIO; } neg_exit: free_rsp_buf(resp_buftype, rsp); return rc; } int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon) { int rc = 0; struct validate_negotiate_info_req vneg_inbuf; struct validate_negotiate_info_rsp *pneg_rsp; u32 rsplen; cifs_dbg(FYI, "validate negotiate\n"); /* * validation ioctl must be signed, so no point sending this if we * can not sign it. We could eventually change this to selectively * sign just this, the first and only signed request on a connection. * This is good enough for now since a user who wants better security * would also enable signing on the mount. Having validation of * negotiate info for signed connections helps reduce attack vectors */ if (tcon->ses->server->sign == false) return 0; /* validation requires signing */ vneg_inbuf.Capabilities = cpu_to_le32(tcon->ses->server->vals->req_capabilities); memcpy(vneg_inbuf.Guid, tcon->ses->server->client_guid, SMB2_CLIENT_GUID_SIZE); if (tcon->ses->sign) vneg_inbuf.SecurityMode = cpu_to_le16(SMB2_NEGOTIATE_SIGNING_REQUIRED); else if (global_secflags & CIFSSEC_MAY_SIGN) vneg_inbuf.SecurityMode = cpu_to_le16(SMB2_NEGOTIATE_SIGNING_ENABLED); else vneg_inbuf.SecurityMode = 0; vneg_inbuf.DialectCount = cpu_to_le16(1); vneg_inbuf.Dialects[0] = cpu_to_le16(tcon->ses->server->vals->protocol_id); rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID, FSCTL_VALIDATE_NEGOTIATE_INFO, true /* is_fsctl */, (char *)&vneg_inbuf, sizeof(struct validate_negotiate_info_req), (char **)&pneg_rsp, &rsplen); if (rc != 0) { cifs_dbg(VFS, "validate protocol negotiate failed: %d\n", rc); return -EIO; } if (rsplen != sizeof(struct validate_negotiate_info_rsp)) { cifs_dbg(VFS, "invalid size of protocol negotiate response\n"); return -EIO; } /* check validate negotiate info response matches what we got earlier */ if (pneg_rsp->Dialect != cpu_to_le16(tcon->ses->server->vals->protocol_id)) goto vneg_out; if (pneg_rsp->SecurityMode != cpu_to_le16(tcon->ses->server->sec_mode)) goto vneg_out; /* do not validate server guid because not saved at negprot time yet */ if ((le32_to_cpu(pneg_rsp->Capabilities) | SMB2_NT_FIND | SMB2_LARGE_FILES) != tcon->ses->server->capabilities) goto vneg_out; /* validate negotiate successful */ cifs_dbg(FYI, "validate negotiate info successful\n"); return 0; vneg_out: cifs_dbg(VFS, "protocol revalidation - security settings mismatch\n"); return -EIO; } struct SMB2_sess_data { unsigned int xid; struct cifs_ses *ses; struct nls_table *nls_cp; void (*func)(struct SMB2_sess_data *); int result; u64 previous_session; /* we will send the SMB in three pieces: * a fixed length beginning part, an optional * SPNEGO blob (which can be zero length), and a * last part which will include the strings * and rest of bcc area. This allows us to avoid * a large buffer 17K allocation */ int buf0_type; struct kvec iov[2]; }; static int SMB2_sess_alloc_buffer(struct SMB2_sess_data *sess_data) { int rc; struct cifs_ses *ses = sess_data->ses; struct smb2_sess_setup_req *req; struct TCP_Server_Info *server = ses->server; rc = small_smb2_init(SMB2_SESSION_SETUP, NULL, (void **) &req); if (rc) return rc; /* First session, not a reauthenticate */ req->hdr.sync_hdr.SessionId = 0; /* if reconnect, we need to send previous sess id, otherwise it is 0 */ req->PreviousSessionId = sess_data->previous_session; req->Flags = 0; /* MBZ */ /* to enable echos and oplocks */ req->hdr.sync_hdr.CreditRequest = cpu_to_le16(3); /* only one of SMB2 signing flags may be set in SMB2 request */ if (server->sign) req->SecurityMode = SMB2_NEGOTIATE_SIGNING_REQUIRED; else if (global_secflags & CIFSSEC_MAY_SIGN) /* one flag unlike MUST_ */ req->SecurityMode = SMB2_NEGOTIATE_SIGNING_ENABLED; else req->SecurityMode = 0; req->Capabilities = 0; req->Channel = 0; /* MBZ */ sess_data->iov[0].iov_base = (char *)req; /* 4 for rfc1002 length field and 1 for pad */ sess_data->iov[0].iov_len = get_rfc1002_length(req) + 4 - 1; /* * This variable will be used to clear the buffer * allocated above in case of any error in the calling function. */ sess_data->buf0_type = CIFS_SMALL_BUFFER; return 0; } static void SMB2_sess_free_buffer(struct SMB2_sess_data *sess_data) { free_rsp_buf(sess_data->buf0_type, sess_data->iov[0].iov_base); sess_data->buf0_type = CIFS_NO_BUFFER; } static int SMB2_sess_sendreceive(struct SMB2_sess_data *sess_data) { int rc; struct smb2_sess_setup_req *req = sess_data->iov[0].iov_base; struct kvec rsp_iov = { NULL, 0 }; /* Testing shows that buffer offset must be at location of Buffer[0] */ req->SecurityBufferOffset = cpu_to_le16(sizeof(struct smb2_sess_setup_req) - 1 /* pad */ - 4 /* rfc1001 len */); req->SecurityBufferLength = cpu_to_le16(sess_data->iov[1].iov_len); inc_rfc1001_len(req, sess_data->iov[1].iov_len - 1 /* pad */); /* BB add code to build os and lm fields */ rc = SendReceive2(sess_data->xid, sess_data->ses, sess_data->iov, 2, &sess_data->buf0_type, CIFS_LOG_ERROR | CIFS_NEG_OP, &rsp_iov); cifs_small_buf_release(sess_data->iov[0].iov_base); memcpy(&sess_data->iov[0], &rsp_iov, sizeof(struct kvec)); return rc; } static int SMB2_sess_establish_session(struct SMB2_sess_data *sess_data) { int rc = 0; struct cifs_ses *ses = sess_data->ses; mutex_lock(&ses->server->srv_mutex); if (ses->server->sign && ses->server->ops->generate_signingkey) { rc = ses->server->ops->generate_signingkey(ses); kfree(ses->auth_key.response); ses->auth_key.response = NULL; if (rc) { cifs_dbg(FYI, "SMB3 session key generation failed\n"); mutex_unlock(&ses->server->srv_mutex); goto keygen_exit; } } if (!ses->server->session_estab) { ses->server->sequence_number = 0x2; ses->server->session_estab = true; } mutex_unlock(&ses->server->srv_mutex); cifs_dbg(FYI, "SMB2/3 session established successfully\n"); spin_lock(&GlobalMid_Lock); ses->status = CifsGood; ses->need_reconnect = false; spin_unlock(&GlobalMid_Lock); keygen_exit: if (!ses->server->sign) { kfree(ses->auth_key.response); ses->auth_key.response = NULL; } return rc; } #ifdef CONFIG_CIFS_UPCALL static void SMB2_auth_kerberos(struct SMB2_sess_data *sess_data) { int rc; struct cifs_ses *ses = sess_data->ses; struct cifs_spnego_msg *msg; struct key *spnego_key = NULL; struct smb2_sess_setup_rsp *rsp = NULL; rc = SMB2_sess_alloc_buffer(sess_data); if (rc) goto out; spnego_key = cifs_get_spnego_key(ses); if (IS_ERR(spnego_key)) { rc = PTR_ERR(spnego_key); spnego_key = NULL; goto out; } msg = spnego_key->payload.data[0]; /* * check version field to make sure that cifs.upcall is * sending us a response in an expected form */ if (msg->version != CIFS_SPNEGO_UPCALL_VERSION) { cifs_dbg(VFS, "bad cifs.upcall version. Expected %d got %d", CIFS_SPNEGO_UPCALL_VERSION, msg->version); rc = -EKEYREJECTED; goto out_put_spnego_key; } ses->auth_key.response = kmemdup(msg->data, msg->sesskey_len, GFP_KERNEL); if (!ses->auth_key.response) { cifs_dbg(VFS, "Kerberos can't allocate (%u bytes) memory", msg->sesskey_len); rc = -ENOMEM; goto out_put_spnego_key; } ses->auth_key.len = msg->sesskey_len; sess_data->iov[1].iov_base = msg->data + msg->sesskey_len; sess_data->iov[1].iov_len = msg->secblob_len; rc = SMB2_sess_sendreceive(sess_data); if (rc) goto out_put_spnego_key; rsp = (struct smb2_sess_setup_rsp *)sess_data->iov[0].iov_base; ses->Suid = rsp->hdr.sync_hdr.SessionId; ses->session_flags = le16_to_cpu(rsp->SessionFlags); if (ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA) cifs_dbg(VFS, "SMB3 encryption not supported yet\n"); rc = SMB2_sess_establish_session(sess_data); out_put_spnego_key: key_invalidate(spnego_key); key_put(spnego_key); out: sess_data->result = rc; sess_data->func = NULL; SMB2_sess_free_buffer(sess_data); } #else static void SMB2_auth_kerberos(struct SMB2_sess_data *sess_data) { cifs_dbg(VFS, "Kerberos negotiated but upcall support disabled!\n"); sess_data->result = -EOPNOTSUPP; sess_data->func = NULL; } #endif static void SMB2_sess_auth_rawntlmssp_authenticate(struct SMB2_sess_data *sess_data); static void SMB2_sess_auth_rawntlmssp_negotiate(struct SMB2_sess_data *sess_data) { int rc; struct cifs_ses *ses = sess_data->ses; struct smb2_sess_setup_rsp *rsp = NULL; char *ntlmssp_blob = NULL; bool use_spnego = false; /* else use raw ntlmssp */ u16 blob_length = 0; /* * If memory allocation is successful, caller of this function * frees it. */ ses->ntlmssp = kmalloc(sizeof(struct ntlmssp_auth), GFP_KERNEL); if (!ses->ntlmssp) { rc = -ENOMEM; goto out_err; } ses->ntlmssp->sesskey_per_smbsess = true; rc = SMB2_sess_alloc_buffer(sess_data); if (rc) goto out_err; ntlmssp_blob = kmalloc(sizeof(struct _NEGOTIATE_MESSAGE), GFP_KERNEL); if (ntlmssp_blob == NULL) { rc = -ENOMEM; goto out; } build_ntlmssp_negotiate_blob(ntlmssp_blob, ses); if (use_spnego) { /* BB eventually need to add this */ cifs_dbg(VFS, "spnego not supported for SMB2 yet\n"); rc = -EOPNOTSUPP; goto out; } else { blob_length = sizeof(struct _NEGOTIATE_MESSAGE); /* with raw NTLMSSP we don't encapsulate in SPNEGO */ } sess_data->iov[1].iov_base = ntlmssp_blob; sess_data->iov[1].iov_len = blob_length; rc = SMB2_sess_sendreceive(sess_data); rsp = (struct smb2_sess_setup_rsp *)sess_data->iov[0].iov_base; /* If true, rc here is expected and not an error */ if (sess_data->buf0_type != CIFS_NO_BUFFER && rsp->hdr.sync_hdr.Status == STATUS_MORE_PROCESSING_REQUIRED) rc = 0; if (rc) goto out; if (offsetof(struct smb2_sess_setup_rsp, Buffer) - 4 != le16_to_cpu(rsp->SecurityBufferOffset)) { cifs_dbg(VFS, "Invalid security buffer offset %d\n", le16_to_cpu(rsp->SecurityBufferOffset)); rc = -EIO; goto out; } rc = decode_ntlmssp_challenge(rsp->Buffer, le16_to_cpu(rsp->SecurityBufferLength), ses); if (rc) goto out; cifs_dbg(FYI, "rawntlmssp session setup challenge phase\n"); ses->Suid = rsp->hdr.sync_hdr.SessionId; ses->session_flags = le16_to_cpu(rsp->SessionFlags); if (ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA) cifs_dbg(VFS, "SMB3 encryption not supported yet\n"); out: kfree(ntlmssp_blob); SMB2_sess_free_buffer(sess_data); if (!rc) { sess_data->result = 0; sess_data->func = SMB2_sess_auth_rawntlmssp_authenticate; return; } out_err: kfree(ses->ntlmssp); ses->ntlmssp = NULL; sess_data->result = rc; sess_data->func = NULL; } static void SMB2_sess_auth_rawntlmssp_authenticate(struct SMB2_sess_data *sess_data) { int rc; struct cifs_ses *ses = sess_data->ses; struct smb2_sess_setup_req *req; struct smb2_sess_setup_rsp *rsp = NULL; unsigned char *ntlmssp_blob = NULL; bool use_spnego = false; /* else use raw ntlmssp */ u16 blob_length = 0; rc = SMB2_sess_alloc_buffer(sess_data); if (rc) goto out; req = (struct smb2_sess_setup_req *) sess_data->iov[0].iov_base; req->hdr.sync_hdr.SessionId = ses->Suid; rc = build_ntlmssp_auth_blob(&ntlmssp_blob, &blob_length, ses, sess_data->nls_cp); if (rc) { cifs_dbg(FYI, "build_ntlmssp_auth_blob failed %d\n", rc); goto out; } if (use_spnego) { /* BB eventually need to add this */ cifs_dbg(VFS, "spnego not supported for SMB2 yet\n"); rc = -EOPNOTSUPP; goto out; } sess_data->iov[1].iov_base = ntlmssp_blob; sess_data->iov[1].iov_len = blob_length; rc = SMB2_sess_sendreceive(sess_data); if (rc) goto out; rsp = (struct smb2_sess_setup_rsp *)sess_data->iov[0].iov_base; ses->Suid = rsp->hdr.sync_hdr.SessionId; ses->session_flags = le16_to_cpu(rsp->SessionFlags); if (ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA) cifs_dbg(VFS, "SMB3 encryption not supported yet\n"); rc = SMB2_sess_establish_session(sess_data); out: kfree(ntlmssp_blob); SMB2_sess_free_buffer(sess_data); kfree(ses->ntlmssp); ses->ntlmssp = NULL; sess_data->result = rc; sess_data->func = NULL; } static int SMB2_select_sec(struct cifs_ses *ses, struct SMB2_sess_data *sess_data) { if (ses->sectype != Kerberos && ses->sectype != RawNTLMSSP) ses->sectype = RawNTLMSSP; switch (ses->sectype) { case Kerberos: sess_data->func = SMB2_auth_kerberos; break; case RawNTLMSSP: sess_data->func = SMB2_sess_auth_rawntlmssp_negotiate; break; default: cifs_dbg(VFS, "secType %d not supported!\n", ses->sectype); return -EOPNOTSUPP; } return 0; } int SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses, const struct nls_table *nls_cp) { int rc = 0; struct TCP_Server_Info *server = ses->server; struct SMB2_sess_data *sess_data; cifs_dbg(FYI, "Session Setup\n"); if (!server) { WARN(1, "%s: server is NULL!\n", __func__); return -EIO; } sess_data = kzalloc(sizeof(struct SMB2_sess_data), GFP_KERNEL); if (!sess_data) return -ENOMEM; rc = SMB2_select_sec(ses, sess_data); if (rc) goto out; sess_data->xid = xid; sess_data->ses = ses; sess_data->buf0_type = CIFS_NO_BUFFER; sess_data->nls_cp = (struct nls_table *) nls_cp; while (sess_data->func) sess_data->func(sess_data); rc = sess_data->result; out: kfree(sess_data); return rc; } int SMB2_logoff(const unsigned int xid, struct cifs_ses *ses) { struct smb2_logoff_req *req; /* response is also trivial struct */ int rc = 0; struct TCP_Server_Info *server; int flags = 0; cifs_dbg(FYI, "disconnect session %p\n", ses); if (ses && (ses->server)) server = ses->server; else return -EIO; /* no need to send SMB logoff if uid already closed due to reconnect */ if (ses->need_reconnect) goto smb2_session_already_dead; rc = small_smb2_init(SMB2_LOGOFF, NULL, (void **) &req); if (rc) return rc; /* since no tcon, smb2_init can not do this, so do here */ req->hdr.sync_hdr.SessionId = ses->Suid; if (ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA) flags |= CIFS_TRANSFORM_REQ; else if (server->sign) req->hdr.sync_hdr.Flags |= SMB2_FLAGS_SIGNED; rc = SendReceiveNoRsp(xid, ses, (char *) req, flags); cifs_small_buf_release(req); /* * No tcon so can't do * cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]); */ smb2_session_already_dead: return rc; } static inline void cifs_stats_fail_inc(struct cifs_tcon *tcon, uint16_t code) { cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_failed[code]); } #define MAX_SHARENAME_LENGTH (255 /* server */ + 80 /* share */ + 1 /* NULL */) /* These are similar values to what Windows uses */ static inline void init_copy_chunk_defaults(struct cifs_tcon *tcon) { tcon->max_chunks = 256; tcon->max_bytes_chunk = 1048576; tcon->max_bytes_copy = 16777216; } int SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree, struct cifs_tcon *tcon, const struct nls_table *cp) { struct smb2_tree_connect_req *req; struct smb2_tree_connect_rsp *rsp = NULL; struct kvec iov[2]; struct kvec rsp_iov; int rc = 0; int resp_buftype; int unc_path_len; struct TCP_Server_Info *server; __le16 *unc_path = NULL; int flags = 0; cifs_dbg(FYI, "TCON\n"); if ((ses->server) && tree) server = ses->server; else return -EIO; if (tcon && tcon->bad_network_name) return -ENOENT; if ((tcon && tcon->seal) && ((ses->server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION) == 0)) { cifs_dbg(VFS, "encryption requested but no server support"); return -EOPNOTSUPP; } unc_path = kmalloc(MAX_SHARENAME_LENGTH * 2, GFP_KERNEL); if (unc_path == NULL) return -ENOMEM; unc_path_len = cifs_strtoUTF16(unc_path, tree, strlen(tree), cp) + 1; unc_path_len *= 2; if (unc_path_len < 2) { kfree(unc_path); return -EINVAL; } rc = small_smb2_init(SMB2_TREE_CONNECT, tcon, (void **) &req); if (rc) { kfree(unc_path); return rc; } if (ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA) flags |= CIFS_TRANSFORM_REQ; if (tcon == NULL) { /* since no tcon, smb2_init can not do this, so do here */ req->hdr.sync_hdr.SessionId = ses->Suid; /* if (ses->server->sec_mode & SECMODE_SIGN_REQUIRED) req->hdr.Flags |= SMB2_FLAGS_SIGNED; */ } iov[0].iov_base = (char *)req; /* 4 for rfc1002 length field and 1 for pad */ iov[0].iov_len = get_rfc1002_length(req) + 4 - 1; /* Testing shows that buffer offset must be at location of Buffer[0] */ req->PathOffset = cpu_to_le16(sizeof(struct smb2_tree_connect_req) - 1 /* pad */ - 4 /* do not count rfc1001 len field */); req->PathLength = cpu_to_le16(unc_path_len - 2); iov[1].iov_base = unc_path; iov[1].iov_len = unc_path_len; inc_rfc1001_len(req, unc_path_len - 1 /* pad */); rc = SendReceive2(xid, ses, iov, 2, &resp_buftype, flags, &rsp_iov); cifs_small_buf_release(req); rsp = (struct smb2_tree_connect_rsp *)rsp_iov.iov_base; if (rc != 0) { if (tcon) { cifs_stats_fail_inc(tcon, SMB2_TREE_CONNECT_HE); tcon->need_reconnect = true; } goto tcon_error_exit; } if (tcon == NULL) { ses->ipc_tid = rsp->hdr.sync_hdr.TreeId; goto tcon_exit; } if (rsp->ShareType & SMB2_SHARE_TYPE_DISK) cifs_dbg(FYI, "connection to disk share\n"); else if (rsp->ShareType & SMB2_SHARE_TYPE_PIPE) { tcon->ipc = true; cifs_dbg(FYI, "connection to pipe share\n"); } else if (rsp->ShareType & SMB2_SHARE_TYPE_PRINT) { tcon->print = true; cifs_dbg(FYI, "connection to printer\n"); } else { cifs_dbg(VFS, "unknown share type %d\n", rsp->ShareType); rc = -EOPNOTSUPP; goto tcon_error_exit; } tcon->share_flags = le32_to_cpu(rsp->ShareFlags); tcon->capabilities = rsp->Capabilities; /* we keep caps little endian */ tcon->maximal_access = le32_to_cpu(rsp->MaximalAccess); tcon->tidStatus = CifsGood; tcon->need_reconnect = false; tcon->tid = rsp->hdr.sync_hdr.TreeId; strlcpy(tcon->treeName, tree, sizeof(tcon->treeName)); if ((rsp->Capabilities & SMB2_SHARE_CAP_DFS) && ((tcon->share_flags & SHI1005_FLAGS_DFS) == 0)) cifs_dbg(VFS, "DFS capability contradicts DFS flag\n"); init_copy_chunk_defaults(tcon); if (tcon->share_flags & SHI1005_FLAGS_ENCRYPT_DATA) cifs_dbg(VFS, "Encrypted shares not supported"); if (tcon->ses->server->ops->validate_negotiate) rc = tcon->ses->server->ops->validate_negotiate(xid, tcon); tcon_exit: free_rsp_buf(resp_buftype, rsp); kfree(unc_path); return rc; tcon_error_exit: if (rsp->hdr.sync_hdr.Status == STATUS_BAD_NETWORK_NAME) { cifs_dbg(VFS, "BAD_NETWORK_NAME: %s\n", tree); if (tcon) tcon->bad_network_name = true; } goto tcon_exit; } int SMB2_tdis(const unsigned int xid, struct cifs_tcon *tcon) { struct smb2_tree_disconnect_req *req; /* response is trivial */ int rc = 0; struct TCP_Server_Info *server; struct cifs_ses *ses = tcon->ses; int flags = 0; cifs_dbg(FYI, "Tree Disconnect\n"); if (ses && (ses->server)) server = ses->server; else return -EIO; if ((tcon->need_reconnect) || (tcon->ses->need_reconnect)) return 0; rc = small_smb2_init(SMB2_TREE_DISCONNECT, tcon, (void **) &req); if (rc) return rc; if (encryption_required(tcon)) flags |= CIFS_TRANSFORM_REQ; rc = SendReceiveNoRsp(xid, ses, (char *)req, flags); cifs_small_buf_release(req); if (rc) cifs_stats_fail_inc(tcon, SMB2_TREE_DISCONNECT_HE); return rc; } static struct create_durable * create_durable_buf(void) { struct create_durable *buf; buf = kzalloc(sizeof(struct create_durable), GFP_KERNEL); if (!buf) return NULL; buf->ccontext.DataOffset = cpu_to_le16(offsetof (struct create_durable, Data)); buf->ccontext.DataLength = cpu_to_le32(16); buf->ccontext.NameOffset = cpu_to_le16(offsetof (struct create_durable, Name)); buf->ccontext.NameLength = cpu_to_le16(4); /* SMB2_CREATE_DURABLE_HANDLE_REQUEST is "DHnQ" */ buf->Name[0] = 'D'; buf->Name[1] = 'H'; buf->Name[2] = 'n'; buf->Name[3] = 'Q'; return buf; } static struct create_durable * create_reconnect_durable_buf(struct cifs_fid *fid) { struct create_durable *buf; buf = kzalloc(sizeof(struct create_durable), GFP_KERNEL); if (!buf) return NULL; buf->ccontext.DataOffset = cpu_to_le16(offsetof (struct create_durable, Data)); buf->ccontext.DataLength = cpu_to_le32(16); buf->ccontext.NameOffset = cpu_to_le16(offsetof (struct create_durable, Name)); buf->ccontext.NameLength = cpu_to_le16(4); buf->Data.Fid.PersistentFileId = fid->persistent_fid; buf->Data.Fid.VolatileFileId = fid->volatile_fid; /* SMB2_CREATE_DURABLE_HANDLE_RECONNECT is "DHnC" */ buf->Name[0] = 'D'; buf->Name[1] = 'H'; buf->Name[2] = 'n'; buf->Name[3] = 'C'; return buf; } static __u8 parse_lease_state(struct TCP_Server_Info *server, struct smb2_create_rsp *rsp, unsigned int *epoch) { char *data_offset; struct create_context *cc; unsigned int next; unsigned int remaining; char *name; data_offset = (char *)rsp + 4 + le32_to_cpu(rsp->CreateContextsOffset); remaining = le32_to_cpu(rsp->CreateContextsLength); cc = (struct create_context *)data_offset; while (remaining >= sizeof(struct create_context)) { name = le16_to_cpu(cc->NameOffset) + (char *)cc; if (le16_to_cpu(cc->NameLength) == 4 && strncmp(name, "RqLs", 4) == 0) return server->ops->parse_lease_buf(cc, epoch); next = le32_to_cpu(cc->Next); if (!next) break; remaining -= next; cc = (struct create_context *)((char *)cc + next); } return 0; } static int add_lease_context(struct TCP_Server_Info *server, struct kvec *iov, unsigned int *num_iovec, __u8 *oplock) { struct smb2_create_req *req = iov[0].iov_base; unsigned int num = *num_iovec; iov[num].iov_base = server->ops->create_lease_buf(oplock+1, *oplock); if (iov[num].iov_base == NULL) return -ENOMEM; iov[num].iov_len = server->vals->create_lease_size; req->RequestedOplockLevel = SMB2_OPLOCK_LEVEL_LEASE; if (!req->CreateContextsOffset) req->CreateContextsOffset = cpu_to_le32( sizeof(struct smb2_create_req) - 4 + iov[num - 1].iov_len); le32_add_cpu(&req->CreateContextsLength, server->vals->create_lease_size); inc_rfc1001_len(&req->hdr, server->vals->create_lease_size); *num_iovec = num + 1; return 0; } static struct create_durable_v2 * create_durable_v2_buf(struct cifs_fid *pfid) { struct create_durable_v2 *buf; buf = kzalloc(sizeof(struct create_durable_v2), GFP_KERNEL); if (!buf) return NULL; buf->ccontext.DataOffset = cpu_to_le16(offsetof (struct create_durable_v2, dcontext)); buf->ccontext.DataLength = cpu_to_le32(sizeof(struct durable_context_v2)); buf->ccontext.NameOffset = cpu_to_le16(offsetof (struct create_durable_v2, Name)); buf->ccontext.NameLength = cpu_to_le16(4); buf->dcontext.Timeout = 0; /* Should this be configurable by workload */ buf->dcontext.Flags = cpu_to_le32(SMB2_DHANDLE_FLAG_PERSISTENT); generate_random_uuid(buf->dcontext.CreateGuid); memcpy(pfid->create_guid, buf->dcontext.CreateGuid, 16); /* SMB2_CREATE_DURABLE_HANDLE_REQUEST is "DH2Q" */ buf->Name[0] = 'D'; buf->Name[1] = 'H'; buf->Name[2] = '2'; buf->Name[3] = 'Q'; return buf; } static struct create_durable_handle_reconnect_v2 * create_reconnect_durable_v2_buf(struct cifs_fid *fid) { struct create_durable_handle_reconnect_v2 *buf; buf = kzalloc(sizeof(struct create_durable_handle_reconnect_v2), GFP_KERNEL); if (!buf) return NULL; buf->ccontext.DataOffset = cpu_to_le16(offsetof(struct create_durable_handle_reconnect_v2, dcontext)); buf->ccontext.DataLength = cpu_to_le32(sizeof(struct durable_reconnect_context_v2)); buf->ccontext.NameOffset = cpu_to_le16(offsetof(struct create_durable_handle_reconnect_v2, Name)); buf->ccontext.NameLength = cpu_to_le16(4); buf->dcontext.Fid.PersistentFileId = fid->persistent_fid; buf->dcontext.Fid.VolatileFileId = fid->volatile_fid; buf->dcontext.Flags = cpu_to_le32(SMB2_DHANDLE_FLAG_PERSISTENT); memcpy(buf->dcontext.CreateGuid, fid->create_guid, 16); /* SMB2_CREATE_DURABLE_HANDLE_RECONNECT_V2 is "DH2C" */ buf->Name[0] = 'D'; buf->Name[1] = 'H'; buf->Name[2] = '2'; buf->Name[3] = 'C'; return buf; } static int add_durable_v2_context(struct kvec *iov, unsigned int *num_iovec, struct cifs_open_parms *oparms) { struct smb2_create_req *req = iov[0].iov_base; unsigned int num = *num_iovec; iov[num].iov_base = create_durable_v2_buf(oparms->fid); if (iov[num].iov_base == NULL) return -ENOMEM; iov[num].iov_len = sizeof(struct create_durable_v2); if (!req->CreateContextsOffset) req->CreateContextsOffset = cpu_to_le32(sizeof(struct smb2_create_req) - 4 + iov[1].iov_len); le32_add_cpu(&req->CreateContextsLength, sizeof(struct create_durable_v2)); inc_rfc1001_len(&req->hdr, sizeof(struct create_durable_v2)); *num_iovec = num + 1; return 0; } static int add_durable_reconnect_v2_context(struct kvec *iov, unsigned int *num_iovec, struct cifs_open_parms *oparms) { struct smb2_create_req *req = iov[0].iov_base; unsigned int num = *num_iovec; /* indicate that we don't need to relock the file */ oparms->reconnect = false; iov[num].iov_base = create_reconnect_durable_v2_buf(oparms->fid); if (iov[num].iov_base == NULL) return -ENOMEM; iov[num].iov_len = sizeof(struct create_durable_handle_reconnect_v2); if (!req->CreateContextsOffset) req->CreateContextsOffset = cpu_to_le32(sizeof(struct smb2_create_req) - 4 + iov[1].iov_len); le32_add_cpu(&req->CreateContextsLength, sizeof(struct create_durable_handle_reconnect_v2)); inc_rfc1001_len(&req->hdr, sizeof(struct create_durable_handle_reconnect_v2)); *num_iovec = num + 1; return 0; } static int add_durable_context(struct kvec *iov, unsigned int *num_iovec, struct cifs_open_parms *oparms, bool use_persistent) { struct smb2_create_req *req = iov[0].iov_base; unsigned int num = *num_iovec; if (use_persistent) { if (oparms->reconnect) return add_durable_reconnect_v2_context(iov, num_iovec, oparms); else return add_durable_v2_context(iov, num_iovec, oparms); } if (oparms->reconnect) { iov[num].iov_base = create_reconnect_durable_buf(oparms->fid); /* indicate that we don't need to relock the file */ oparms->reconnect = false; } else iov[num].iov_base = create_durable_buf(); if (iov[num].iov_base == NULL) return -ENOMEM; iov[num].iov_len = sizeof(struct create_durable); if (!req->CreateContextsOffset) req->CreateContextsOffset = cpu_to_le32(sizeof(struct smb2_create_req) - 4 + iov[1].iov_len); le32_add_cpu(&req->CreateContextsLength, sizeof(struct create_durable)); inc_rfc1001_len(&req->hdr, sizeof(struct create_durable)); *num_iovec = num + 1; return 0; } int SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path, __u8 *oplock, struct smb2_file_all_info *buf, struct smb2_err_rsp **err_buf) { struct smb2_create_req *req; struct smb2_create_rsp *rsp; struct TCP_Server_Info *server; struct cifs_tcon *tcon = oparms->tcon; struct cifs_ses *ses = tcon->ses; struct kvec iov[4]; struct kvec rsp_iov; int resp_buftype; int uni_path_len; __le16 *copy_path = NULL; int copy_size; int rc = 0; unsigned int n_iov = 2; __u32 file_attributes = 0; char *dhc_buf = NULL, *lc_buf = NULL; int flags = 0; cifs_dbg(FYI, "create/open\n"); if (ses && (ses->server)) server = ses->server; else return -EIO; rc = small_smb2_init(SMB2_CREATE, tcon, (void **) &req); if (rc) return rc; if (encryption_required(tcon)) flags |= CIFS_TRANSFORM_REQ; if (oparms->create_options & CREATE_OPTION_READONLY) file_attributes |= ATTR_READONLY; if (oparms->create_options & CREATE_OPTION_SPECIAL) file_attributes |= ATTR_SYSTEM; req->ImpersonationLevel = IL_IMPERSONATION; req->DesiredAccess = cpu_to_le32(oparms->desired_access); /* File attributes ignored on open (used in create though) */ req->FileAttributes = cpu_to_le32(file_attributes); req->ShareAccess = FILE_SHARE_ALL_LE; req->CreateDisposition = cpu_to_le32(oparms->disposition); req->CreateOptions = cpu_to_le32(oparms->create_options & CREATE_OPTIONS_MASK); uni_path_len = (2 * UniStrnlen((wchar_t *)path, PATH_MAX)) + 2; /* do not count rfc1001 len field */ req->NameOffset = cpu_to_le16(sizeof(struct smb2_create_req) - 4); iov[0].iov_base = (char *)req; /* 4 for rfc1002 length field */ iov[0].iov_len = get_rfc1002_length(req) + 4; /* MUST set path len (NameLength) to 0 opening root of share */ req->NameLength = cpu_to_le16(uni_path_len - 2); /* -1 since last byte is buf[0] which is sent below (path) */ iov[0].iov_len--; if (uni_path_len % 8 != 0) { copy_size = uni_path_len / 8 * 8; if (copy_size < uni_path_len) copy_size += 8; copy_path = kzalloc(copy_size, GFP_KERNEL); if (!copy_path) return -ENOMEM; memcpy((char *)copy_path, (const char *)path, uni_path_len); uni_path_len = copy_size; path = copy_path; } iov[1].iov_len = uni_path_len; iov[1].iov_base = path; /* -1 since last byte is buf[0] which was counted in smb2_buf_len */ inc_rfc1001_len(req, uni_path_len - 1); if (!server->oplocks) *oplock = SMB2_OPLOCK_LEVEL_NONE; if (!(server->capabilities & SMB2_GLOBAL_CAP_LEASING) || *oplock == SMB2_OPLOCK_LEVEL_NONE) req->RequestedOplockLevel = *oplock; else { rc = add_lease_context(server, iov, &n_iov, oplock); if (rc) { cifs_small_buf_release(req); kfree(copy_path); return rc; } lc_buf = iov[n_iov-1].iov_base; } if (*oplock == SMB2_OPLOCK_LEVEL_BATCH) { /* need to set Next field of lease context if we request it */ if (server->capabilities & SMB2_GLOBAL_CAP_LEASING) { struct create_context *ccontext = (struct create_context *)iov[n_iov-1].iov_base; ccontext->Next = cpu_to_le32(server->vals->create_lease_size); } rc = add_durable_context(iov, &n_iov, oparms, tcon->use_persistent); if (rc) { cifs_small_buf_release(req); kfree(copy_path); kfree(lc_buf); return rc; } dhc_buf = iov[n_iov-1].iov_base; } rc = SendReceive2(xid, ses, iov, n_iov, &resp_buftype, flags, &rsp_iov); cifs_small_buf_release(req); rsp = (struct smb2_create_rsp *)rsp_iov.iov_base; if (rc != 0) { cifs_stats_fail_inc(tcon, SMB2_CREATE_HE); if (err_buf) *err_buf = kmemdup(rsp, get_rfc1002_length(rsp) + 4, GFP_KERNEL); goto creat_exit; } oparms->fid->persistent_fid = rsp->PersistentFileId; oparms->fid->volatile_fid = rsp->VolatileFileId; if (buf) { memcpy(buf, &rsp->CreationTime, 32); buf->AllocationSize = rsp->AllocationSize; buf->EndOfFile = rsp->EndofFile; buf->Attributes = rsp->FileAttributes; buf->NumberOfLinks = cpu_to_le32(1); buf->DeletePending = 0; } if (rsp->OplockLevel == SMB2_OPLOCK_LEVEL_LEASE) *oplock = parse_lease_state(server, rsp, &oparms->fid->epoch); else *oplock = rsp->OplockLevel; creat_exit: kfree(copy_path); kfree(lc_buf); kfree(dhc_buf); free_rsp_buf(resp_buftype, rsp); return rc; } /* * SMB2 IOCTL is used for both IOCTLs and FSCTLs */ int SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid, u32 opcode, bool is_fsctl, char *in_data, u32 indatalen, char **out_data, u32 *plen /* returned data len */) { struct smb2_ioctl_req *req; struct smb2_ioctl_rsp *rsp; struct smb2_sync_hdr *shdr; struct TCP_Server_Info *server; struct cifs_ses *ses; struct kvec iov[2]; struct kvec rsp_iov; int resp_buftype; int n_iov; int rc = 0; int flags = 0; cifs_dbg(FYI, "SMB2 IOCTL\n"); if (out_data != NULL) *out_data = NULL; /* zero out returned data len, in case of error */ if (plen) *plen = 0; if (tcon) ses = tcon->ses; else return -EIO; if (ses && (ses->server)) server = ses->server; else return -EIO; rc = small_smb2_init(SMB2_IOCTL, tcon, (void **) &req); if (rc) return rc; if (encryption_required(tcon)) flags |= CIFS_TRANSFORM_REQ; req->CtlCode = cpu_to_le32(opcode); req->PersistentFileId = persistent_fid; req->VolatileFileId = volatile_fid; if (indatalen) { req->InputCount = cpu_to_le32(indatalen); /* do not set InputOffset if no input data */ req->InputOffset = cpu_to_le32(offsetof(struct smb2_ioctl_req, Buffer) - 4); iov[1].iov_base = in_data; iov[1].iov_len = indatalen; n_iov = 2; } else n_iov = 1; req->OutputOffset = 0; req->OutputCount = 0; /* MBZ */ /* * Could increase MaxOutputResponse, but that would require more * than one credit. Windows typically sets this smaller, but for some * ioctls it may be useful to allow server to send more. No point * limiting what the server can send as long as fits in one credit */ req->MaxOutputResponse = cpu_to_le32(0xFF00); /* < 64K uses 1 credit */ if (is_fsctl) req->Flags = cpu_to_le32(SMB2_0_IOCTL_IS_FSCTL); else req->Flags = 0; iov[0].iov_base = (char *)req; /* * If no input data, the size of ioctl struct in * protocol spec still includes a 1 byte data buffer, * but if input data passed to ioctl, we do not * want to double count this, so we do not send * the dummy one byte of data in iovec[0] if sending * input data (in iovec[1]). We also must add 4 bytes * in first iovec to allow for rfc1002 length field. */ if (indatalen) { iov[0].iov_len = get_rfc1002_length(req) + 4 - 1; inc_rfc1001_len(req, indatalen - 1); } else iov[0].iov_len = get_rfc1002_length(req) + 4; rc = SendReceive2(xid, ses, iov, n_iov, &resp_buftype, flags, &rsp_iov); cifs_small_buf_release(req); rsp = (struct smb2_ioctl_rsp *)rsp_iov.iov_base; if ((rc != 0) && (rc != -EINVAL)) { cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE); goto ioctl_exit; } else if (rc == -EINVAL) { if ((opcode != FSCTL_SRV_COPYCHUNK_WRITE) && (opcode != FSCTL_SRV_COPYCHUNK)) { cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE); goto ioctl_exit; } } /* check if caller wants to look at return data or just return rc */ if ((plen == NULL) || (out_data == NULL)) goto ioctl_exit; *plen = le32_to_cpu(rsp->OutputCount); /* We check for obvious errors in the output buffer length and offset */ if (*plen == 0) goto ioctl_exit; /* server returned no data */ else if (*plen > 0xFF00) { cifs_dbg(VFS, "srv returned invalid ioctl length: %d\n", *plen); *plen = 0; rc = -EIO; goto ioctl_exit; } if (get_rfc1002_length(rsp) < le32_to_cpu(rsp->OutputOffset) + *plen) { cifs_dbg(VFS, "Malformed ioctl resp: len %d offset %d\n", *plen, le32_to_cpu(rsp->OutputOffset)); *plen = 0; rc = -EIO; goto ioctl_exit; } *out_data = kmalloc(*plen, GFP_KERNEL); if (*out_data == NULL) { rc = -ENOMEM; goto ioctl_exit; } shdr = get_sync_hdr(rsp); memcpy(*out_data, (char *)shdr + le32_to_cpu(rsp->OutputOffset), *plen); ioctl_exit: free_rsp_buf(resp_buftype, rsp); return rc; } /* * Individual callers to ioctl worker function follow */ int SMB2_set_compression(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid) { int rc; struct compress_ioctl fsctl_input; char *ret_data = NULL; fsctl_input.CompressionState = cpu_to_le16(COMPRESSION_FORMAT_DEFAULT); rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid, FSCTL_SET_COMPRESSION, true /* is_fsctl */, (char *)&fsctl_input /* data input */, 2 /* in data len */, &ret_data /* out data */, NULL); cifs_dbg(FYI, "set compression rc %d\n", rc); return rc; } int SMB2_close(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid) { struct smb2_close_req *req; struct smb2_close_rsp *rsp; struct TCP_Server_Info *server; struct cifs_ses *ses = tcon->ses; struct kvec iov[1]; struct kvec rsp_iov; int resp_buftype; int rc = 0; int flags = 0; cifs_dbg(FYI, "Close\n"); if (ses && (ses->server)) server = ses->server; else return -EIO; rc = small_smb2_init(SMB2_CLOSE, tcon, (void **) &req); if (rc) return rc; if (encryption_required(tcon)) flags |= CIFS_TRANSFORM_REQ; req->PersistentFileId = persistent_fid; req->VolatileFileId = volatile_fid; iov[0].iov_base = (char *)req; /* 4 for rfc1002 length field */ iov[0].iov_len = get_rfc1002_length(req) + 4; rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, flags, &rsp_iov); cifs_small_buf_release(req); rsp = (struct smb2_close_rsp *)rsp_iov.iov_base; if (rc != 0) { cifs_stats_fail_inc(tcon, SMB2_CLOSE_HE); goto close_exit; } /* BB FIXME - decode close response, update inode for caching */ close_exit: free_rsp_buf(resp_buftype, rsp); return rc; } static int validate_buf(unsigned int offset, unsigned int buffer_length, struct smb2_hdr *hdr, unsigned int min_buf_size) { unsigned int smb_len = be32_to_cpu(hdr->smb2_buf_length); char *end_of_smb = smb_len + 4 /* RFC1001 length field */ + (char *)hdr; char *begin_of_buf = 4 /* RFC1001 len field */ + offset + (char *)hdr; char *end_of_buf = begin_of_buf + buffer_length; if (buffer_length < min_buf_size) { cifs_dbg(VFS, "buffer length %d smaller than minimum size %d\n", buffer_length, min_buf_size); return -EINVAL; } /* check if beyond RFC1001 maximum length */ if ((smb_len > 0x7FFFFF) || (buffer_length > 0x7FFFFF)) { cifs_dbg(VFS, "buffer length %d or smb length %d too large\n", buffer_length, smb_len); return -EINVAL; } if ((begin_of_buf > end_of_smb) || (end_of_buf > end_of_smb)) { cifs_dbg(VFS, "illegal server response, bad offset to data\n"); return -EINVAL; } return 0; } /* * If SMB buffer fields are valid, copy into temporary buffer to hold result. * Caller must free buffer. */ static int validate_and_copy_buf(unsigned int offset, unsigned int buffer_length, struct smb2_hdr *hdr, unsigned int minbufsize, char *data) { char *begin_of_buf = 4 /* RFC1001 len field */ + offset + (char *)hdr; int rc; if (!data) return -EINVAL; rc = validate_buf(offset, buffer_length, hdr, minbufsize); if (rc) return rc; memcpy(data, begin_of_buf, buffer_length); return 0; } static int query_info(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid, u8 info_class, size_t output_len, size_t min_len, void *data) { struct smb2_query_info_req *req; struct smb2_query_info_rsp *rsp = NULL; struct kvec iov[2]; struct kvec rsp_iov; int rc = 0; int resp_buftype; struct TCP_Server_Info *server; struct cifs_ses *ses = tcon->ses; int flags = 0; cifs_dbg(FYI, "Query Info\n"); if (ses && (ses->server)) server = ses->server; else return -EIO; rc = small_smb2_init(SMB2_QUERY_INFO, tcon, (void **) &req); if (rc) return rc; if (encryption_required(tcon)) flags |= CIFS_TRANSFORM_REQ; req->InfoType = SMB2_O_INFO_FILE; req->FileInfoClass = info_class; req->PersistentFileId = persistent_fid; req->VolatileFileId = volatile_fid; /* 4 for rfc1002 length field and 1 for Buffer */ req->InputBufferOffset = cpu_to_le16(sizeof(struct smb2_query_info_req) - 1 - 4); req->OutputBufferLength = cpu_to_le32(output_len); iov[0].iov_base = (char *)req; /* 4 for rfc1002 length field */ iov[0].iov_len = get_rfc1002_length(req) + 4; rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, flags, &rsp_iov); cifs_small_buf_release(req); rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base; if (rc) { cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE); goto qinf_exit; } rc = validate_and_copy_buf(le16_to_cpu(rsp->OutputBufferOffset), le32_to_cpu(rsp->OutputBufferLength), &rsp->hdr, min_len, data); qinf_exit: free_rsp_buf(resp_buftype, rsp); return rc; } int SMB2_query_info(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid, struct smb2_file_all_info *data) { return query_info(xid, tcon, persistent_fid, volatile_fid, FILE_ALL_INFORMATION, sizeof(struct smb2_file_all_info) + PATH_MAX * 2, sizeof(struct smb2_file_all_info), data); } int SMB2_get_srv_num(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid, __le64 *uniqueid) { return query_info(xid, tcon, persistent_fid, volatile_fid, FILE_INTERNAL_INFORMATION, sizeof(struct smb2_file_internal_info), sizeof(struct smb2_file_internal_info), uniqueid); } /* * This is a no-op for now. We're not really interested in the reply, but * rather in the fact that the server sent one and that server->lstrp * gets updated. * * FIXME: maybe we should consider checking that the reply matches request? */ static void smb2_echo_callback(struct mid_q_entry *mid) { struct TCP_Server_Info *server = mid->callback_data; struct smb2_echo_rsp *rsp = (struct smb2_echo_rsp *)mid->resp_buf; unsigned int credits_received = 1; if (mid->mid_state == MID_RESPONSE_RECEIVED) credits_received = le16_to_cpu(rsp->hdr.sync_hdr.CreditRequest); mutex_lock(&server->srv_mutex); DeleteMidQEntry(mid); mutex_unlock(&server->srv_mutex); add_credits(server, credits_received, CIFS_ECHO_OP); } void smb2_reconnect_server(struct work_struct *work) { struct TCP_Server_Info *server = container_of(work, struct TCP_Server_Info, reconnect.work); struct cifs_ses *ses; struct cifs_tcon *tcon, *tcon2; struct list_head tmp_list; int tcon_exist = false; /* Prevent simultaneous reconnects that can corrupt tcon->rlist list */ mutex_lock(&server->reconnect_mutex); INIT_LIST_HEAD(&tmp_list); cifs_dbg(FYI, "Need negotiate, reconnecting tcons\n"); spin_lock(&cifs_tcp_ses_lock); list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) { list_for_each_entry(tcon, &ses->tcon_list, tcon_list) { if (tcon->need_reconnect || tcon->need_reopen_files) { tcon->tc_count++; list_add_tail(&tcon->rlist, &tmp_list); tcon_exist = true; } } } /* * Get the reference to server struct to be sure that the last call of * cifs_put_tcon() in the loop below won't release the server pointer. */ if (tcon_exist) server->srv_count++; spin_unlock(&cifs_tcp_ses_lock); list_for_each_entry_safe(tcon, tcon2, &tmp_list, rlist) { if (!smb2_reconnect(SMB2_INTERNAL_CMD, tcon)) cifs_reopen_persistent_handles(tcon); list_del_init(&tcon->rlist); cifs_put_tcon(tcon); } cifs_dbg(FYI, "Reconnecting tcons finished\n"); mutex_unlock(&server->reconnect_mutex); /* now we can safely release srv struct */ if (tcon_exist) cifs_put_tcp_session(server, 1); } int SMB2_echo(struct TCP_Server_Info *server) { struct smb2_echo_req *req; int rc = 0; struct kvec iov[2]; struct smb_rqst rqst = { .rq_iov = iov, .rq_nvec = 2 }; cifs_dbg(FYI, "In echo request\n"); if (server->tcpStatus == CifsNeedNegotiate) { /* No need to send echo on newly established connections */ queue_delayed_work(cifsiod_wq, &server->reconnect, 0); return rc; } rc = small_smb2_init(SMB2_ECHO, NULL, (void **)&req); if (rc) return rc; req->hdr.sync_hdr.CreditRequest = cpu_to_le16(1); /* 4 for rfc1002 length field */ iov[0].iov_len = 4; iov[0].iov_base = (char *)req; iov[1].iov_len = get_rfc1002_length(req); iov[1].iov_base = (char *)req + 4; rc = cifs_call_async(server, &rqst, NULL, smb2_echo_callback, server, CIFS_ECHO_OP); if (rc) cifs_dbg(FYI, "Echo request failed: %d\n", rc); cifs_small_buf_release(req); return rc; } int SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid) { struct smb2_flush_req *req; struct TCP_Server_Info *server; struct cifs_ses *ses = tcon->ses; struct kvec iov[1]; struct kvec rsp_iov; int resp_buftype; int rc = 0; int flags = 0; cifs_dbg(FYI, "Flush\n"); if (ses && (ses->server)) server = ses->server; else return -EIO; rc = small_smb2_init(SMB2_FLUSH, tcon, (void **) &req); if (rc) return rc; if (encryption_required(tcon)) flags |= CIFS_TRANSFORM_REQ; req->PersistentFileId = persistent_fid; req->VolatileFileId = volatile_fid; iov[0].iov_base = (char *)req; /* 4 for rfc1002 length field */ iov[0].iov_len = get_rfc1002_length(req) + 4; rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, flags, &rsp_iov); cifs_small_buf_release(req); if (rc != 0) cifs_stats_fail_inc(tcon, SMB2_FLUSH_HE); free_rsp_buf(resp_buftype, rsp_iov.iov_base); return rc; } /* * To form a chain of read requests, any read requests after the first should * have the end_of_chain boolean set to true. */ static int smb2_new_read_req(void **buf, unsigned int *total_len, struct cifs_io_parms *io_parms, unsigned int remaining_bytes, int request_type) { int rc = -EACCES; struct smb2_read_plain_req *req = NULL; struct smb2_sync_hdr *shdr; rc = smb2_plain_req_init(SMB2_READ, io_parms->tcon, (void **) &req, total_len); if (rc) return rc; if (io_parms->tcon->ses->server == NULL) return -ECONNABORTED; shdr = &req->sync_hdr; shdr->ProcessId = cpu_to_le32(io_parms->pid); req->PersistentFileId = io_parms->persistent_fid; req->VolatileFileId = io_parms->volatile_fid; req->ReadChannelInfoOffset = 0; /* reserved */ req->ReadChannelInfoLength = 0; /* reserved */ req->Channel = 0; /* reserved */ req->MinimumCount = 0; req->Length = cpu_to_le32(io_parms->length); req->Offset = cpu_to_le64(io_parms->offset); if (request_type & CHAINED_REQUEST) { if (!(request_type & END_OF_CHAIN)) { /* next 8-byte aligned request */ *total_len = DIV_ROUND_UP(*total_len, 8) * 8; shdr->NextCommand = cpu_to_le32(*total_len); } else /* END_OF_CHAIN */ shdr->NextCommand = 0; if (request_type & RELATED_REQUEST) { shdr->Flags |= SMB2_FLAGS_RELATED_OPERATIONS; /* * Related requests use info from previous read request * in chain. */ shdr->SessionId = 0xFFFFFFFF; shdr->TreeId = 0xFFFFFFFF; req->PersistentFileId = 0xFFFFFFFF; req->VolatileFileId = 0xFFFFFFFF; } } if (remaining_bytes > io_parms->length) req->RemainingBytes = cpu_to_le32(remaining_bytes); else req->RemainingBytes = 0; *buf = req; return rc; } static void smb2_readv_callback(struct mid_q_entry *mid) { struct cifs_readdata *rdata = mid->callback_data; struct cifs_tcon *tcon = tlink_tcon(rdata->cfile->tlink); struct TCP_Server_Info *server = tcon->ses->server; struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)rdata->iov[1].iov_base; unsigned int credits_received = 1; struct smb_rqst rqst = { .rq_iov = rdata->iov, .rq_nvec = 2, .rq_pages = rdata->pages, .rq_npages = rdata->nr_pages, .rq_pagesz = rdata->pagesz, .rq_tailsz = rdata->tailsz }; cifs_dbg(FYI, "%s: mid=%llu state=%d result=%d bytes=%u\n", __func__, mid->mid, mid->mid_state, rdata->result, rdata->bytes); switch (mid->mid_state) { case MID_RESPONSE_RECEIVED: credits_received = le16_to_cpu(shdr->CreditRequest); /* result already set, check signature */ if (server->sign) { int rc; rc = smb2_verify_signature(&rqst, server); if (rc) cifs_dbg(VFS, "SMB signature verification returned error = %d\n", rc); } /* FIXME: should this be counted toward the initiating task? */ task_io_account_read(rdata->got_bytes); cifs_stats_bytes_read(tcon, rdata->got_bytes); break; case MID_REQUEST_SUBMITTED: case MID_RETRY_NEEDED: rdata->result = -EAGAIN; if (server->sign && rdata->got_bytes) /* reset bytes number since we can not check a sign */ rdata->got_bytes = 0; /* FIXME: should this be counted toward the initiating task? */ task_io_account_read(rdata->got_bytes); cifs_stats_bytes_read(tcon, rdata->got_bytes); break; default: if (rdata->result != -ENODATA) rdata->result = -EIO; } if (rdata->result) cifs_stats_fail_inc(tcon, SMB2_READ_HE); queue_work(cifsiod_wq, &rdata->work); mutex_lock(&server->srv_mutex); DeleteMidQEntry(mid); mutex_unlock(&server->srv_mutex); add_credits(server, credits_received, 0); } /* smb2_async_readv - send an async read, and set up mid to handle result */ int smb2_async_readv(struct cifs_readdata *rdata) { int rc, flags = 0; char *buf; struct smb2_sync_hdr *shdr; struct cifs_io_parms io_parms; struct smb_rqst rqst = { .rq_iov = rdata->iov, .rq_nvec = 2 }; struct TCP_Server_Info *server; unsigned int total_len; __be32 req_len; cifs_dbg(FYI, "%s: offset=%llu bytes=%u\n", __func__, rdata->offset, rdata->bytes); io_parms.tcon = tlink_tcon(rdata->cfile->tlink); io_parms.offset = rdata->offset; io_parms.length = rdata->bytes; io_parms.persistent_fid = rdata->cfile->fid.persistent_fid; io_parms.volatile_fid = rdata->cfile->fid.volatile_fid; io_parms.pid = rdata->pid; server = io_parms.tcon->ses->server; rc = smb2_new_read_req((void **) &buf, &total_len, &io_parms, 0, 0); if (rc) { if (rc == -EAGAIN && rdata->credits) { /* credits was reset by reconnect */ rdata->credits = 0; /* reduce in_flight value since we won't send the req */ spin_lock(&server->req_lock); server->in_flight--; spin_unlock(&server->req_lock); } return rc; } if (encryption_required(io_parms.tcon)) flags |= CIFS_TRANSFORM_REQ; req_len = cpu_to_be32(total_len); rdata->iov[0].iov_base = &req_len; rdata->iov[0].iov_len = sizeof(__be32); rdata->iov[1].iov_base = buf; rdata->iov[1].iov_len = total_len; shdr = (struct smb2_sync_hdr *)buf; if (rdata->credits) { shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(rdata->bytes, SMB2_MAX_BUFFER_SIZE)); shdr->CreditRequest = shdr->CreditCharge; spin_lock(&server->req_lock); server->credits += rdata->credits - le16_to_cpu(shdr->CreditCharge); spin_unlock(&server->req_lock); wake_up(&server->request_q); flags |= CIFS_HAS_CREDITS; } kref_get(&rdata->refcount); rc = cifs_call_async(io_parms.tcon->ses->server, &rqst, cifs_readv_receive, smb2_readv_callback, rdata, flags); if (rc) { kref_put(&rdata->refcount, cifs_readdata_release); cifs_stats_fail_inc(io_parms.tcon, SMB2_READ_HE); } cifs_small_buf_release(buf); return rc; } int SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms, unsigned int *nbytes, char **buf, int *buf_type) { int resp_buftype, rc = -EACCES; struct smb2_read_plain_req *req = NULL; struct smb2_read_rsp *rsp = NULL; struct smb2_sync_hdr *shdr; struct kvec iov[2]; struct kvec rsp_iov; unsigned int total_len; __be32 req_len; struct smb_rqst rqst = { .rq_iov = iov, .rq_nvec = 2 }; int flags = CIFS_LOG_ERROR; struct cifs_ses *ses = io_parms->tcon->ses; *nbytes = 0; rc = smb2_new_read_req((void **)&req, &total_len, io_parms, 0, 0); if (rc) return rc; if (encryption_required(io_parms->tcon)) flags |= CIFS_TRANSFORM_REQ; req_len = cpu_to_be32(total_len); iov[0].iov_base = &req_len; iov[0].iov_len = sizeof(__be32); iov[1].iov_base = req; iov[1].iov_len = total_len; rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov); cifs_small_buf_release(req); rsp = (struct smb2_read_rsp *)rsp_iov.iov_base; shdr = get_sync_hdr(rsp); if (shdr->Status == STATUS_END_OF_FILE) { free_rsp_buf(resp_buftype, rsp_iov.iov_base); return 0; } if (rc) { cifs_stats_fail_inc(io_parms->tcon, SMB2_READ_HE); cifs_dbg(VFS, "Send error in read = %d\n", rc); } else { *nbytes = le32_to_cpu(rsp->DataLength); if ((*nbytes > CIFS_MAX_MSGSIZE) || (*nbytes > io_parms->length)) { cifs_dbg(FYI, "bad length %d for count %d\n", *nbytes, io_parms->length); rc = -EIO; *nbytes = 0; } } if (*buf) { memcpy(*buf, (char *)shdr + rsp->DataOffset, *nbytes); free_rsp_buf(resp_buftype, rsp_iov.iov_base); } else if (resp_buftype != CIFS_NO_BUFFER) { *buf = rsp_iov.iov_base; if (resp_buftype == CIFS_SMALL_BUFFER) *buf_type = CIFS_SMALL_BUFFER; else if (resp_buftype == CIFS_LARGE_BUFFER) *buf_type = CIFS_LARGE_BUFFER; } return rc; } /* * Check the mid_state and signature on received buffer (if any), and queue the * workqueue completion task. */ static void smb2_writev_callback(struct mid_q_entry *mid) { struct cifs_writedata *wdata = mid->callback_data; struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink); struct TCP_Server_Info *server = tcon->ses->server; unsigned int written; struct smb2_write_rsp *rsp = (struct smb2_write_rsp *)mid->resp_buf; unsigned int credits_received = 1; switch (mid->mid_state) { case MID_RESPONSE_RECEIVED: credits_received = le16_to_cpu(rsp->hdr.sync_hdr.CreditRequest); wdata->result = smb2_check_receive(mid, tcon->ses->server, 0); if (wdata->result != 0) break; written = le32_to_cpu(rsp->DataLength); /* * Mask off high 16 bits when bytes written as returned * by the server is greater than bytes requested by the * client. OS/2 servers are known to set incorrect * CountHigh values. */ if (written > wdata->bytes) written &= 0xFFFF; if (written < wdata->bytes) wdata->result = -ENOSPC; else wdata->bytes = written; break; case MID_REQUEST_SUBMITTED: case MID_RETRY_NEEDED: wdata->result = -EAGAIN; break; default: wdata->result = -EIO; break; } if (wdata->result) cifs_stats_fail_inc(tcon, SMB2_WRITE_HE); queue_work(cifsiod_wq, &wdata->work); mutex_lock(&server->srv_mutex); DeleteMidQEntry(mid); mutex_unlock(&server->srv_mutex); add_credits(tcon->ses->server, credits_received, 0); } /* smb2_async_writev - send an async write, and set up mid to handle result */ int smb2_async_writev(struct cifs_writedata *wdata, void (*release)(struct kref *kref)) { int rc = -EACCES, flags = 0; struct smb2_write_req *req = NULL; struct smb2_sync_hdr *shdr; struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink); struct TCP_Server_Info *server = tcon->ses->server; struct kvec iov[2]; struct smb_rqst rqst = { }; rc = small_smb2_init(SMB2_WRITE, tcon, (void **) &req); if (rc) { if (rc == -EAGAIN && wdata->credits) { /* credits was reset by reconnect */ wdata->credits = 0; /* reduce in_flight value since we won't send the req */ spin_lock(&server->req_lock); server->in_flight--; spin_unlock(&server->req_lock); } goto async_writev_out; } if (encryption_required(tcon)) flags |= CIFS_TRANSFORM_REQ; shdr = get_sync_hdr(req); shdr->ProcessId = cpu_to_le32(wdata->cfile->pid); req->PersistentFileId = wdata->cfile->fid.persistent_fid; req->VolatileFileId = wdata->cfile->fid.volatile_fid; req->WriteChannelInfoOffset = 0; req->WriteChannelInfoLength = 0; req->Channel = 0; req->Offset = cpu_to_le64(wdata->offset); /* 4 for rfc1002 length field */ req->DataOffset = cpu_to_le16( offsetof(struct smb2_write_req, Buffer) - 4); req->RemainingBytes = 0; /* 4 for rfc1002 length field and 1 for Buffer */ iov[0].iov_len = 4; iov[0].iov_base = req; iov[1].iov_len = get_rfc1002_length(req) - 1; iov[1].iov_base = (char *)req + 4; rqst.rq_iov = iov; rqst.rq_nvec = 2; rqst.rq_pages = wdata->pages; rqst.rq_npages = wdata->nr_pages; rqst.rq_pagesz = wdata->pagesz; rqst.rq_tailsz = wdata->tailsz; cifs_dbg(FYI, "async write at %llu %u bytes\n", wdata->offset, wdata->bytes); req->Length = cpu_to_le32(wdata->bytes); inc_rfc1001_len(&req->hdr, wdata->bytes - 1 /* Buffer */); if (wdata->credits) { shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(wdata->bytes, SMB2_MAX_BUFFER_SIZE)); shdr->CreditRequest = shdr->CreditCharge; spin_lock(&server->req_lock); server->credits += wdata->credits - le16_to_cpu(shdr->CreditCharge); spin_unlock(&server->req_lock); wake_up(&server->request_q); flags |= CIFS_HAS_CREDITS; } kref_get(&wdata->refcount); rc = cifs_call_async(server, &rqst, NULL, smb2_writev_callback, wdata, flags); if (rc) { kref_put(&wdata->refcount, release); cifs_stats_fail_inc(tcon, SMB2_WRITE_HE); } async_writev_out: cifs_small_buf_release(req); return rc; } /* * SMB2_write function gets iov pointer to kvec array with n_vec as a length. * The length field from io_parms must be at least 1 and indicates a number of * elements with data to write that begins with position 1 in iov array. All * data length is specified by count. */ int SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms, unsigned int *nbytes, struct kvec *iov, int n_vec) { int rc = 0; struct smb2_write_req *req = NULL; struct smb2_write_rsp *rsp = NULL; int resp_buftype; struct kvec rsp_iov; int flags = 0; *nbytes = 0; if (n_vec < 1) return rc; rc = small_smb2_init(SMB2_WRITE, io_parms->tcon, (void **) &req); if (rc) return rc; if (io_parms->tcon->ses->server == NULL) return -ECONNABORTED; if (encryption_required(io_parms->tcon)) flags |= CIFS_TRANSFORM_REQ; req->hdr.sync_hdr.ProcessId = cpu_to_le32(io_parms->pid); req->PersistentFileId = io_parms->persistent_fid; req->VolatileFileId = io_parms->volatile_fid; req->WriteChannelInfoOffset = 0; req->WriteChannelInfoLength = 0; req->Channel = 0; req->Length = cpu_to_le32(io_parms->length); req->Offset = cpu_to_le64(io_parms->offset); /* 4 for rfc1002 length field */ req->DataOffset = cpu_to_le16( offsetof(struct smb2_write_req, Buffer) - 4); req->RemainingBytes = 0; iov[0].iov_base = (char *)req; /* 4 for rfc1002 length field and 1 for Buffer */ iov[0].iov_len = get_rfc1002_length(req) + 4 - 1; /* length of entire message including data to be written */ inc_rfc1001_len(req, io_parms->length - 1 /* Buffer */); rc = SendReceive2(xid, io_parms->tcon->ses, iov, n_vec + 1, &resp_buftype, flags, &rsp_iov); cifs_small_buf_release(req); rsp = (struct smb2_write_rsp *)rsp_iov.iov_base; if (rc) { cifs_stats_fail_inc(io_parms->tcon, SMB2_WRITE_HE); cifs_dbg(VFS, "Send error in write = %d\n", rc); } else *nbytes = le32_to_cpu(rsp->DataLength); free_rsp_buf(resp_buftype, rsp); return rc; } static unsigned int num_entries(char *bufstart, char *end_of_buf, char **lastentry, size_t size) { int len; unsigned int entrycount = 0; unsigned int next_offset = 0; FILE_DIRECTORY_INFO *entryptr; if (bufstart == NULL) return 0; entryptr = (FILE_DIRECTORY_INFO *)bufstart; while (1) { entryptr = (FILE_DIRECTORY_INFO *) ((char *)entryptr + next_offset); if ((char *)entryptr + size > end_of_buf) { cifs_dbg(VFS, "malformed search entry would overflow\n"); break; } len = le32_to_cpu(entryptr->FileNameLength); if ((char *)entryptr + len + size > end_of_buf) { cifs_dbg(VFS, "directory entry name would overflow frame end of buf %p\n", end_of_buf); break; } *lastentry = (char *)entryptr; entrycount++; next_offset = le32_to_cpu(entryptr->NextEntryOffset); if (!next_offset) break; } return entrycount; } /* * Readdir/FindFirst */ int SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid, int index, struct cifs_search_info *srch_inf) { struct smb2_query_directory_req *req; struct smb2_query_directory_rsp *rsp = NULL; struct kvec iov[2]; struct kvec rsp_iov; int rc = 0; int len; int resp_buftype = CIFS_NO_BUFFER; unsigned char *bufptr; struct TCP_Server_Info *server; struct cifs_ses *ses = tcon->ses; __le16 asteriks = cpu_to_le16('*'); char *end_of_smb; unsigned int output_size = CIFSMaxBufSize; size_t info_buf_size; int flags = 0; if (ses && (ses->server)) server = ses->server; else return -EIO; rc = small_smb2_init(SMB2_QUERY_DIRECTORY, tcon, (void **) &req); if (rc) return rc; if (encryption_required(tcon)) flags |= CIFS_TRANSFORM_REQ; switch (srch_inf->info_level) { case SMB_FIND_FILE_DIRECTORY_INFO: req->FileInformationClass = FILE_DIRECTORY_INFORMATION; info_buf_size = sizeof(FILE_DIRECTORY_INFO) - 1; break; case SMB_FIND_FILE_ID_FULL_DIR_INFO: req->FileInformationClass = FILEID_FULL_DIRECTORY_INFORMATION; info_buf_size = sizeof(SEARCH_ID_FULL_DIR_INFO) - 1; break; default: cifs_dbg(VFS, "info level %u isn't supported\n", srch_inf->info_level); rc = -EINVAL; goto qdir_exit; } req->FileIndex = cpu_to_le32(index); req->PersistentFileId = persistent_fid; req->VolatileFileId = volatile_fid; len = 0x2; bufptr = req->Buffer; memcpy(bufptr, &asteriks, len); req->FileNameOffset = cpu_to_le16(sizeof(struct smb2_query_directory_req) - 1 - 4); req->FileNameLength = cpu_to_le16(len); /* * BB could be 30 bytes or so longer if we used SMB2 specific * buffer lengths, but this is safe and close enough. */ output_size = min_t(unsigned int, output_size, server->maxBuf); output_size = min_t(unsigned int, output_size, 2 << 15); req->OutputBufferLength = cpu_to_le32(output_size); iov[0].iov_base = (char *)req; /* 4 for RFC1001 length and 1 for Buffer */ iov[0].iov_len = get_rfc1002_length(req) + 4 - 1; iov[1].iov_base = (char *)(req->Buffer); iov[1].iov_len = len; inc_rfc1001_len(req, len - 1 /* Buffer */); rc = SendReceive2(xid, ses, iov, 2, &resp_buftype, flags, &rsp_iov); cifs_small_buf_release(req); rsp = (struct smb2_query_directory_rsp *)rsp_iov.iov_base; if (rc) { if (rc == -ENODATA && rsp->hdr.sync_hdr.Status == STATUS_NO_MORE_FILES) { srch_inf->endOfSearch = true; rc = 0; } cifs_stats_fail_inc(tcon, SMB2_QUERY_DIRECTORY_HE); goto qdir_exit; } rc = validate_buf(le16_to_cpu(rsp->OutputBufferOffset), le32_to_cpu(rsp->OutputBufferLength), &rsp->hdr, info_buf_size); if (rc) goto qdir_exit; srch_inf->unicode = true; if (srch_inf->ntwrk_buf_start) { if (srch_inf->smallBuf) cifs_small_buf_release(srch_inf->ntwrk_buf_start); else cifs_buf_release(srch_inf->ntwrk_buf_start); } srch_inf->ntwrk_buf_start = (char *)rsp; srch_inf->srch_entries_start = srch_inf->last_entry = 4 /* rfclen */ + (char *)&rsp->hdr + le16_to_cpu(rsp->OutputBufferOffset); /* 4 for rfc1002 length field */ end_of_smb = get_rfc1002_length(rsp) + 4 + (char *)&rsp->hdr; srch_inf->entries_in_buffer = num_entries(srch_inf->srch_entries_start, end_of_smb, &srch_inf->last_entry, info_buf_size); srch_inf->index_of_last_entry += srch_inf->entries_in_buffer; cifs_dbg(FYI, "num entries %d last_index %lld srch start %p srch end %p\n", srch_inf->entries_in_buffer, srch_inf->index_of_last_entry, srch_inf->srch_entries_start, srch_inf->last_entry); if (resp_buftype == CIFS_LARGE_BUFFER) srch_inf->smallBuf = false; else if (resp_buftype == CIFS_SMALL_BUFFER) srch_inf->smallBuf = true; else cifs_dbg(VFS, "illegal search buffer type\n"); return rc; qdir_exit: free_rsp_buf(resp_buftype, rsp); return rc; } static int send_set_info(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid, u32 pid, int info_class, unsigned int num, void **data, unsigned int *size) { struct smb2_set_info_req *req; struct smb2_set_info_rsp *rsp = NULL; struct kvec *iov; struct kvec rsp_iov; int rc = 0; int resp_buftype; unsigned int i; struct TCP_Server_Info *server; struct cifs_ses *ses = tcon->ses; int flags = 0; if (ses && (ses->server)) server = ses->server; else return -EIO; if (!num) return -EINVAL; iov = kmalloc(sizeof(struct kvec) * num, GFP_KERNEL); if (!iov) return -ENOMEM; rc = small_smb2_init(SMB2_SET_INFO, tcon, (void **) &req); if (rc) { kfree(iov); return rc; } if (encryption_required(tcon)) flags |= CIFS_TRANSFORM_REQ; req->hdr.sync_hdr.ProcessId = cpu_to_le32(pid); req->InfoType = SMB2_O_INFO_FILE; req->FileInfoClass = info_class; req->PersistentFileId = persistent_fid; req->VolatileFileId = volatile_fid; /* 4 for RFC1001 length and 1 for Buffer */ req->BufferOffset = cpu_to_le16(sizeof(struct smb2_set_info_req) - 1 - 4); req->BufferLength = cpu_to_le32(*size); inc_rfc1001_len(req, *size - 1 /* Buffer */); memcpy(req->Buffer, *data, *size); iov[0].iov_base = (char *)req; /* 4 for RFC1001 length */ iov[0].iov_len = get_rfc1002_length(req) + 4; for (i = 1; i < num; i++) { inc_rfc1001_len(req, size[i]); le32_add_cpu(&req->BufferLength, size[i]); iov[i].iov_base = (char *)data[i]; iov[i].iov_len = size[i]; } rc = SendReceive2(xid, ses, iov, num, &resp_buftype, flags, &rsp_iov); cifs_small_buf_release(req); rsp = (struct smb2_set_info_rsp *)rsp_iov.iov_base; if (rc != 0) cifs_stats_fail_inc(tcon, SMB2_SET_INFO_HE); free_rsp_buf(resp_buftype, rsp); kfree(iov); return rc; } int SMB2_rename(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid, __le16 *target_file) { struct smb2_file_rename_info info; void **data; unsigned int size[2]; int rc; int len = (2 * UniStrnlen((wchar_t *)target_file, PATH_MAX)); data = kmalloc(sizeof(void *) * 2, GFP_KERNEL); if (!data) return -ENOMEM; info.ReplaceIfExists = 1; /* 1 = replace existing target with new */ /* 0 = fail if target already exists */ info.RootDirectory = 0; /* MBZ for network ops (why does spec say?) */ info.FileNameLength = cpu_to_le32(len); data[0] = &info; size[0] = sizeof(struct smb2_file_rename_info); data[1] = target_file; size[1] = len + 2 /* null */; rc = send_set_info(xid, tcon, persistent_fid, volatile_fid, current->tgid, FILE_RENAME_INFORMATION, 2, data, size); kfree(data); return rc; } int SMB2_rmdir(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid) { __u8 delete_pending = 1; void *data; unsigned int size; data = &delete_pending; size = 1; /* sizeof __u8 */ return send_set_info(xid, tcon, persistent_fid, volatile_fid, current->tgid, FILE_DISPOSITION_INFORMATION, 1, &data, &size); } int SMB2_set_hardlink(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid, __le16 *target_file) { struct smb2_file_link_info info; void **data; unsigned int size[2]; int rc; int len = (2 * UniStrnlen((wchar_t *)target_file, PATH_MAX)); data = kmalloc(sizeof(void *) * 2, GFP_KERNEL); if (!data) return -ENOMEM; info.ReplaceIfExists = 0; /* 1 = replace existing link with new */ /* 0 = fail if link already exists */ info.RootDirectory = 0; /* MBZ for network ops (why does spec say?) */ info.FileNameLength = cpu_to_le32(len); data[0] = &info; size[0] = sizeof(struct smb2_file_link_info); data[1] = target_file; size[1] = len + 2 /* null */; rc = send_set_info(xid, tcon, persistent_fid, volatile_fid, current->tgid, FILE_LINK_INFORMATION, 2, data, size); kfree(data); return rc; } int SMB2_set_eof(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid, u32 pid, __le64 *eof, bool is_falloc) { struct smb2_file_eof_info info; void *data; unsigned int size; info.EndOfFile = *eof; data = &info; size = sizeof(struct smb2_file_eof_info); if (is_falloc) return send_set_info(xid, tcon, persistent_fid, volatile_fid, pid, FILE_ALLOCATION_INFORMATION, 1, &data, &size); else return send_set_info(xid, tcon, persistent_fid, volatile_fid, pid, FILE_END_OF_FILE_INFORMATION, 1, &data, &size); } int SMB2_set_info(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid, FILE_BASIC_INFO *buf) { unsigned int size; size = sizeof(FILE_BASIC_INFO); return send_set_info(xid, tcon, persistent_fid, volatile_fid, current->tgid, FILE_BASIC_INFORMATION, 1, (void **)&buf, &size); } int SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon, const u64 persistent_fid, const u64 volatile_fid, __u8 oplock_level) { int rc; struct smb2_oplock_break *req = NULL; int flags = CIFS_OBREAK_OP; cifs_dbg(FYI, "SMB2_oplock_break\n"); rc = small_smb2_init(SMB2_OPLOCK_BREAK, tcon, (void **) &req); if (rc) return rc; if (encryption_required(tcon)) flags |= CIFS_TRANSFORM_REQ; req->VolatileFid = volatile_fid; req->PersistentFid = persistent_fid; req->OplockLevel = oplock_level; req->hdr.sync_hdr.CreditRequest = cpu_to_le16(1); rc = SendReceiveNoRsp(xid, tcon->ses, (char *) req, flags); cifs_small_buf_release(req); if (rc) { cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE); cifs_dbg(FYI, "Send error in Oplock Break = %d\n", rc); } return rc; } static void copy_fs_info_to_kstatfs(struct smb2_fs_full_size_info *pfs_inf, struct kstatfs *kst) { kst->f_bsize = le32_to_cpu(pfs_inf->BytesPerSector) * le32_to_cpu(pfs_inf->SectorsPerAllocationUnit); kst->f_blocks = le64_to_cpu(pfs_inf->TotalAllocationUnits); kst->f_bfree = le64_to_cpu(pfs_inf->ActualAvailableAllocationUnits); kst->f_bavail = le64_to_cpu(pfs_inf->CallerAvailableAllocationUnits); return; } static int build_qfs_info_req(struct kvec *iov, struct cifs_tcon *tcon, int level, int outbuf_len, u64 persistent_fid, u64 volatile_fid) { int rc; struct smb2_query_info_req *req; cifs_dbg(FYI, "Query FSInfo level %d\n", level); if ((tcon->ses == NULL) || (tcon->ses->server == NULL)) return -EIO; rc = small_smb2_init(SMB2_QUERY_INFO, tcon, (void **) &req); if (rc) return rc; req->InfoType = SMB2_O_INFO_FILESYSTEM; req->FileInfoClass = level; req->PersistentFileId = persistent_fid; req->VolatileFileId = volatile_fid; /* 4 for rfc1002 length field and 1 for pad */ req->InputBufferOffset = cpu_to_le16(sizeof(struct smb2_query_info_req) - 1 - 4); req->OutputBufferLength = cpu_to_le32( outbuf_len + sizeof(struct smb2_query_info_rsp) - 1 - 4); iov->iov_base = (char *)req; /* 4 for rfc1002 length field */ iov->iov_len = get_rfc1002_length(req) + 4; return 0; } int SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid, struct kstatfs *fsdata) { struct smb2_query_info_rsp *rsp = NULL; struct kvec iov; struct kvec rsp_iov; int rc = 0; int resp_buftype; struct cifs_ses *ses = tcon->ses; struct smb2_fs_full_size_info *info = NULL; int flags = 0; rc = build_qfs_info_req(&iov, tcon, FS_FULL_SIZE_INFORMATION, sizeof(struct smb2_fs_full_size_info), persistent_fid, volatile_fid); if (rc) return rc; if (encryption_required(tcon)) flags |= CIFS_TRANSFORM_REQ; rc = SendReceive2(xid, ses, &iov, 1, &resp_buftype, flags, &rsp_iov); cifs_small_buf_release(iov.iov_base); if (rc) { cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE); goto qfsinf_exit; } rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base; info = (struct smb2_fs_full_size_info *)(4 /* RFC1001 len */ + le16_to_cpu(rsp->OutputBufferOffset) + (char *)&rsp->hdr); rc = validate_buf(le16_to_cpu(rsp->OutputBufferOffset), le32_to_cpu(rsp->OutputBufferLength), &rsp->hdr, sizeof(struct smb2_fs_full_size_info)); if (!rc) copy_fs_info_to_kstatfs(info, fsdata); qfsinf_exit: free_rsp_buf(resp_buftype, rsp_iov.iov_base); return rc; } int SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid, int level) { struct smb2_query_info_rsp *rsp = NULL; struct kvec iov; struct kvec rsp_iov; int rc = 0; int resp_buftype, max_len, min_len; struct cifs_ses *ses = tcon->ses; unsigned int rsp_len, offset; int flags = 0; if (level == FS_DEVICE_INFORMATION) { max_len = sizeof(FILE_SYSTEM_DEVICE_INFO); min_len = sizeof(FILE_SYSTEM_DEVICE_INFO); } else if (level == FS_ATTRIBUTE_INFORMATION) { max_len = sizeof(FILE_SYSTEM_ATTRIBUTE_INFO); min_len = MIN_FS_ATTR_INFO_SIZE; } else if (level == FS_SECTOR_SIZE_INFORMATION) { max_len = sizeof(struct smb3_fs_ss_info); min_len = sizeof(struct smb3_fs_ss_info); } else { cifs_dbg(FYI, "Invalid qfsinfo level %d\n", level); return -EINVAL; } rc = build_qfs_info_req(&iov, tcon, level, max_len, persistent_fid, volatile_fid); if (rc) return rc; if (encryption_required(tcon)) flags |= CIFS_TRANSFORM_REQ; rc = SendReceive2(xid, ses, &iov, 1, &resp_buftype, flags, &rsp_iov); cifs_small_buf_release(iov.iov_base); if (rc) { cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE); goto qfsattr_exit; } rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base; rsp_len = le32_to_cpu(rsp->OutputBufferLength); offset = le16_to_cpu(rsp->OutputBufferOffset); rc = validate_buf(offset, rsp_len, &rsp->hdr, min_len); if (rc) goto qfsattr_exit; if (level == FS_ATTRIBUTE_INFORMATION) memcpy(&tcon->fsAttrInfo, 4 /* RFC1001 len */ + offset + (char *)&rsp->hdr, min_t(unsigned int, rsp_len, max_len)); else if (level == FS_DEVICE_INFORMATION) memcpy(&tcon->fsDevInfo, 4 /* RFC1001 len */ + offset + (char *)&rsp->hdr, sizeof(FILE_SYSTEM_DEVICE_INFO)); else if (level == FS_SECTOR_SIZE_INFORMATION) { struct smb3_fs_ss_info *ss_info = (struct smb3_fs_ss_info *) (4 /* RFC1001 len */ + offset + (char *)&rsp->hdr); tcon->ss_flags = le32_to_cpu(ss_info->Flags); tcon->perf_sector_size = le32_to_cpu(ss_info->PhysicalBytesPerSectorForPerf); } qfsattr_exit: free_rsp_buf(resp_buftype, rsp_iov.iov_base); return rc; } int smb2_lockv(const unsigned int xid, struct cifs_tcon *tcon, const __u64 persist_fid, const __u64 volatile_fid, const __u32 pid, const __u32 num_lock, struct smb2_lock_element *buf) { int rc = 0; struct smb2_lock_req *req = NULL; struct kvec iov[2]; struct kvec rsp_iov; int resp_buf_type; unsigned int count; int flags = CIFS_NO_RESP; cifs_dbg(FYI, "smb2_lockv num lock %d\n", num_lock); rc = small_smb2_init(SMB2_LOCK, tcon, (void **) &req); if (rc) return rc; if (encryption_required(tcon)) flags |= CIFS_TRANSFORM_REQ; req->hdr.sync_hdr.ProcessId = cpu_to_le32(pid); req->LockCount = cpu_to_le16(num_lock); req->PersistentFileId = persist_fid; req->VolatileFileId = volatile_fid; count = num_lock * sizeof(struct smb2_lock_element); inc_rfc1001_len(req, count - sizeof(struct smb2_lock_element)); iov[0].iov_base = (char *)req; /* 4 for rfc1002 length field and count for all locks */ iov[0].iov_len = get_rfc1002_length(req) + 4 - count; iov[1].iov_base = (char *)buf; iov[1].iov_len = count; cifs_stats_inc(&tcon->stats.cifs_stats.num_locks); rc = SendReceive2(xid, tcon->ses, iov, 2, &resp_buf_type, flags, &rsp_iov); cifs_small_buf_release(req); if (rc) { cifs_dbg(FYI, "Send error in smb2_lockv = %d\n", rc); cifs_stats_fail_inc(tcon, SMB2_LOCK_HE); } return rc; } int SMB2_lock(const unsigned int xid, struct cifs_tcon *tcon, const __u64 persist_fid, const __u64 volatile_fid, const __u32 pid, const __u64 length, const __u64 offset, const __u32 lock_flags, const bool wait) { struct smb2_lock_element lock; lock.Offset = cpu_to_le64(offset); lock.Length = cpu_to_le64(length); lock.Flags = cpu_to_le32(lock_flags); if (!wait && lock_flags != SMB2_LOCKFLAG_UNLOCK) lock.Flags |= cpu_to_le32(SMB2_LOCKFLAG_FAIL_IMMEDIATELY); return smb2_lockv(xid, tcon, persist_fid, volatile_fid, pid, 1, &lock); } int SMB2_lease_break(const unsigned int xid, struct cifs_tcon *tcon, __u8 *lease_key, const __le32 lease_state) { int rc; struct smb2_lease_ack *req = NULL; int flags = CIFS_OBREAK_OP; cifs_dbg(FYI, "SMB2_lease_break\n"); rc = small_smb2_init(SMB2_OPLOCK_BREAK, tcon, (void **) &req); if (rc) return rc; if (encryption_required(tcon)) flags |= CIFS_TRANSFORM_REQ; req->hdr.sync_hdr.CreditRequest = cpu_to_le16(1); req->StructureSize = cpu_to_le16(36); inc_rfc1001_len(req, 12); memcpy(req->LeaseKey, lease_key, 16); req->LeaseState = lease_state; rc = SendReceiveNoRsp(xid, tcon->ses, (char *) req, flags); cifs_small_buf_release(req); if (rc) { cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE); cifs_dbg(FYI, "Send error in Lease Break = %d\n", rc); } return rc; }
/* * fs/cifs/smb2pdu.c * * Copyright (C) International Business Machines Corp., 2009, 2013 * Etersoft, 2012 * Author(s): Steve French (sfrench@us.ibm.com) * Pavel Shilovsky (pshilovsky@samba.org) 2012 * * Contains the routines for constructing the SMB2 PDUs themselves * * This library is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published * by the Free Software Foundation; either version 2.1 of the License, or * (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* SMB2 PDU handling routines here - except for leftovers (eg session setup) */ /* Note that there are handle based routines which must be */ /* treated slightly differently for reconnection purposes since we never */ /* want to reuse a stale file handle and only the caller knows the file info */ #include <linux/fs.h> #include <linux/kernel.h> #include <linux/vfs.h> #include <linux/task_io_accounting_ops.h> #include <linux/uaccess.h> #include <linux/pagemap.h> #include <linux/xattr.h> #include "smb2pdu.h" #include "cifsglob.h" #include "cifsacl.h" #include "cifsproto.h" #include "smb2proto.h" #include "cifs_unicode.h" #include "cifs_debug.h" #include "ntlmssp.h" #include "smb2status.h" #include "smb2glob.h" #include "cifspdu.h" #include "cifs_spnego.h" /* * The following table defines the expected "StructureSize" of SMB2 requests * in order by SMB2 command. This is similar to "wct" in SMB/CIFS requests. * * Note that commands are defined in smb2pdu.h in le16 but the array below is * indexed by command in host byte order. */ static const int smb2_req_struct_sizes[NUMBER_OF_SMB2_COMMANDS] = { /* SMB2_NEGOTIATE */ 36, /* SMB2_SESSION_SETUP */ 25, /* SMB2_LOGOFF */ 4, /* SMB2_TREE_CONNECT */ 9, /* SMB2_TREE_DISCONNECT */ 4, /* SMB2_CREATE */ 57, /* SMB2_CLOSE */ 24, /* SMB2_FLUSH */ 24, /* SMB2_READ */ 49, /* SMB2_WRITE */ 49, /* SMB2_LOCK */ 48, /* SMB2_IOCTL */ 57, /* SMB2_CANCEL */ 4, /* SMB2_ECHO */ 4, /* SMB2_QUERY_DIRECTORY */ 33, /* SMB2_CHANGE_NOTIFY */ 32, /* SMB2_QUERY_INFO */ 41, /* SMB2_SET_INFO */ 33, /* SMB2_OPLOCK_BREAK */ 24 /* BB this is 36 for LEASE_BREAK variant */ }; static int encryption_required(const struct cifs_tcon *tcon) { if ((tcon->ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA) || (tcon->share_flags & SHI1005_FLAGS_ENCRYPT_DATA)) return 1; return 0; } static void smb2_hdr_assemble(struct smb2_sync_hdr *shdr, __le16 smb2_cmd, const struct cifs_tcon *tcon) { shdr->ProtocolId = SMB2_PROTO_NUMBER; shdr->StructureSize = cpu_to_le16(64); shdr->Command = smb2_cmd; if (tcon && tcon->ses && tcon->ses->server) { struct TCP_Server_Info *server = tcon->ses->server; spin_lock(&server->req_lock); /* Request up to 2 credits but don't go over the limit. */ if (server->credits >= server->max_credits) shdr->CreditRequest = cpu_to_le16(0); else shdr->CreditRequest = cpu_to_le16( min_t(int, server->max_credits - server->credits, 2)); spin_unlock(&server->req_lock); } else { shdr->CreditRequest = cpu_to_le16(2); } shdr->ProcessId = cpu_to_le32((__u16)current->tgid); if (!tcon) goto out; /* GLOBAL_CAP_LARGE_MTU will only be set if dialect > SMB2.02 */ /* See sections 2.2.4 and 3.2.4.1.5 of MS-SMB2 */ if ((tcon->ses) && (tcon->ses->server) && (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU)) shdr->CreditCharge = cpu_to_le16(1); /* else CreditCharge MBZ */ shdr->TreeId = tcon->tid; /* Uid is not converted */ if (tcon->ses) shdr->SessionId = tcon->ses->Suid; /* * If we would set SMB2_FLAGS_DFS_OPERATIONS on open we also would have * to pass the path on the Open SMB prefixed by \\server\share. * Not sure when we would need to do the augmented path (if ever) and * setting this flag breaks the SMB2 open operation since it is * illegal to send an empty path name (without \\server\share prefix) * when the DFS flag is set in the SMB open header. We could * consider setting the flag on all operations other than open * but it is safer to net set it for now. */ /* if (tcon->share_flags & SHI1005_FLAGS_DFS) shdr->Flags |= SMB2_FLAGS_DFS_OPERATIONS; */ if (tcon->ses && tcon->ses->server && tcon->ses->server->sign && !encryption_required(tcon)) shdr->Flags |= SMB2_FLAGS_SIGNED; out: return; } static int smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon) { int rc = 0; struct nls_table *nls_codepage; struct cifs_ses *ses; struct TCP_Server_Info *server; /* * SMB2s NegProt, SessSetup, Logoff do not have tcon yet so * check for tcp and smb session status done differently * for those three - in the calling routine. */ if (tcon == NULL) return rc; if (smb2_command == SMB2_TREE_CONNECT) return rc; if (tcon->tidStatus == CifsExiting) { /* * only tree disconnect, open, and write, * (and ulogoff which does not have tcon) * are allowed as we start force umount. */ if ((smb2_command != SMB2_WRITE) && (smb2_command != SMB2_CREATE) && (smb2_command != SMB2_TREE_DISCONNECT)) { cifs_dbg(FYI, "can not send cmd %d while umounting\n", smb2_command); return -ENODEV; } } if ((!tcon->ses) || (tcon->ses->status == CifsExiting) || (!tcon->ses->server)) return -EIO; ses = tcon->ses; server = ses->server; /* * Give demultiplex thread up to 10 seconds to reconnect, should be * greater than cifs socket timeout which is 7 seconds */ while (server->tcpStatus == CifsNeedReconnect) { /* * Return to caller for TREE_DISCONNECT and LOGOFF and CLOSE * here since they are implicitly done when session drops. */ switch (smb2_command) { /* * BB Should we keep oplock break and add flush to exceptions? */ case SMB2_TREE_DISCONNECT: case SMB2_CANCEL: case SMB2_CLOSE: case SMB2_OPLOCK_BREAK: return -EAGAIN; } wait_event_interruptible_timeout(server->response_q, (server->tcpStatus != CifsNeedReconnect), 10 * HZ); /* are we still trying to reconnect? */ if (server->tcpStatus != CifsNeedReconnect) break; /* * on "soft" mounts we wait once. Hard mounts keep * retrying until process is killed or server comes * back on-line */ if (!tcon->retry) { cifs_dbg(FYI, "gave up waiting on reconnect in smb_init\n"); return -EHOSTDOWN; } } if (!tcon->ses->need_reconnect && !tcon->need_reconnect) return rc; nls_codepage = load_nls_default(); /* * need to prevent multiple threads trying to simultaneously reconnect * the same SMB session */ mutex_lock(&tcon->ses->session_mutex); rc = cifs_negotiate_protocol(0, tcon->ses); if (!rc && tcon->ses->need_reconnect) rc = cifs_setup_session(0, tcon->ses, nls_codepage); if (rc || !tcon->need_reconnect) { mutex_unlock(&tcon->ses->session_mutex); goto out; } cifs_mark_open_files_invalid(tcon); if (tcon->use_persistent) tcon->need_reopen_files = true; rc = SMB2_tcon(0, tcon->ses, tcon->treeName, tcon, nls_codepage); mutex_unlock(&tcon->ses->session_mutex); cifs_dbg(FYI, "reconnect tcon rc = %d\n", rc); if (rc) goto out; if (smb2_command != SMB2_INTERNAL_CMD) queue_delayed_work(cifsiod_wq, &server->reconnect, 0); atomic_inc(&tconInfoReconnectCount); out: /* * Check if handle based operation so we know whether we can continue * or not without returning to caller to reset file handle. */ /* * BB Is flush done by server on drop of tcp session? Should we special * case it and skip above? */ switch (smb2_command) { case SMB2_FLUSH: case SMB2_READ: case SMB2_WRITE: case SMB2_LOCK: case SMB2_IOCTL: case SMB2_QUERY_DIRECTORY: case SMB2_CHANGE_NOTIFY: case SMB2_QUERY_INFO: case SMB2_SET_INFO: rc = -EAGAIN; } unload_nls(nls_codepage); return rc; } static void fill_small_buf(__le16 smb2_command, struct cifs_tcon *tcon, void *buf, unsigned int *total_len) { struct smb2_sync_pdu *spdu = (struct smb2_sync_pdu *)buf; /* lookup word count ie StructureSize from table */ __u16 parmsize = smb2_req_struct_sizes[le16_to_cpu(smb2_command)]; /* * smaller than SMALL_BUFFER_SIZE but bigger than fixed area of * largest operations (Create) */ memset(buf, 0, 256); smb2_hdr_assemble(&spdu->sync_hdr, smb2_command, tcon); spdu->StructureSize2 = cpu_to_le16(parmsize); *total_len = parmsize + sizeof(struct smb2_sync_hdr); } /* init request without RFC1001 length at the beginning */ static int smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon, void **request_buf, unsigned int *total_len) { int rc; struct smb2_sync_hdr *shdr; rc = smb2_reconnect(smb2_command, tcon); if (rc) return rc; /* BB eventually switch this to SMB2 specific small buf size */ *request_buf = cifs_small_buf_get(); if (*request_buf == NULL) { /* BB should we add a retry in here if not a writepage? */ return -ENOMEM; } shdr = (struct smb2_sync_hdr *)(*request_buf); fill_small_buf(smb2_command, tcon, shdr, total_len); if (tcon != NULL) { #ifdef CONFIG_CIFS_STATS2 uint16_t com_code = le16_to_cpu(smb2_command); cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_sent[com_code]); #endif cifs_stats_inc(&tcon->num_smbs_sent); } return rc; } /* * Allocate and return pointer to an SMB request hdr, and set basic * SMB information in the SMB header. If the return code is zero, this * function must have filled in request_buf pointer. The returned buffer * has RFC1001 length at the beginning. */ static int small_smb2_init(__le16 smb2_command, struct cifs_tcon *tcon, void **request_buf) { int rc; unsigned int total_len; struct smb2_pdu *pdu; rc = smb2_reconnect(smb2_command, tcon); if (rc) return rc; /* BB eventually switch this to SMB2 specific small buf size */ *request_buf = cifs_small_buf_get(); if (*request_buf == NULL) { /* BB should we add a retry in here if not a writepage? */ return -ENOMEM; } pdu = (struct smb2_pdu *)(*request_buf); fill_small_buf(smb2_command, tcon, get_sync_hdr(pdu), &total_len); /* Note this is only network field converted to big endian */ pdu->hdr.smb2_buf_length = cpu_to_be32(total_len); if (tcon != NULL) { #ifdef CONFIG_CIFS_STATS2 uint16_t com_code = le16_to_cpu(smb2_command); cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_sent[com_code]); #endif cifs_stats_inc(&tcon->num_smbs_sent); } return rc; } #ifdef CONFIG_CIFS_SMB311 /* offset is sizeof smb2_negotiate_req - 4 but rounded up to 8 bytes */ #define OFFSET_OF_NEG_CONTEXT 0x68 /* sizeof(struct smb2_negotiate_req) - 4 */ #define SMB2_PREAUTH_INTEGRITY_CAPABILITIES cpu_to_le16(1) #define SMB2_ENCRYPTION_CAPABILITIES cpu_to_le16(2) static void build_preauth_ctxt(struct smb2_preauth_neg_context *pneg_ctxt) { pneg_ctxt->ContextType = SMB2_PREAUTH_INTEGRITY_CAPABILITIES; pneg_ctxt->DataLength = cpu_to_le16(38); pneg_ctxt->HashAlgorithmCount = cpu_to_le16(1); pneg_ctxt->SaltLength = cpu_to_le16(SMB311_SALT_SIZE); get_random_bytes(pneg_ctxt->Salt, SMB311_SALT_SIZE); pneg_ctxt->HashAlgorithms = SMB2_PREAUTH_INTEGRITY_SHA512; } static void build_encrypt_ctxt(struct smb2_encryption_neg_context *pneg_ctxt) { pneg_ctxt->ContextType = SMB2_ENCRYPTION_CAPABILITIES; pneg_ctxt->DataLength = cpu_to_le16(6); pneg_ctxt->CipherCount = cpu_to_le16(2); pneg_ctxt->Ciphers[0] = SMB2_ENCRYPTION_AES128_GCM; pneg_ctxt->Ciphers[1] = SMB2_ENCRYPTION_AES128_CCM; } static void assemble_neg_contexts(struct smb2_negotiate_req *req) { /* +4 is to account for the RFC1001 len field */ char *pneg_ctxt = (char *)req + OFFSET_OF_NEG_CONTEXT + 4; build_preauth_ctxt((struct smb2_preauth_neg_context *)pneg_ctxt); /* Add 2 to size to round to 8 byte boundary */ pneg_ctxt += 2 + sizeof(struct smb2_preauth_neg_context); build_encrypt_ctxt((struct smb2_encryption_neg_context *)pneg_ctxt); req->NegotiateContextOffset = cpu_to_le32(OFFSET_OF_NEG_CONTEXT); req->NegotiateContextCount = cpu_to_le16(2); inc_rfc1001_len(req, 4 + sizeof(struct smb2_preauth_neg_context) + 2 + sizeof(struct smb2_encryption_neg_context)); /* calculate hash */ } #else static void assemble_neg_contexts(struct smb2_negotiate_req *req) { return; } #endif /* SMB311 */ /* * * SMB2 Worker functions follow: * * The general structure of the worker functions is: * 1) Call smb2_init (assembles SMB2 header) * 2) Initialize SMB2 command specific fields in fixed length area of SMB * 3) Call smb_sendrcv2 (sends request on socket and waits for response) * 4) Decode SMB2 command specific fields in the fixed length area * 5) Decode variable length data area (if any for this SMB2 command type) * 6) Call free smb buffer * 7) return * */ int SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses) { struct smb2_negotiate_req *req; struct smb2_negotiate_rsp *rsp; struct kvec iov[1]; struct kvec rsp_iov; int rc = 0; int resp_buftype; struct TCP_Server_Info *server = ses->server; int blob_offset, blob_length; char *security_blob; int flags = CIFS_NEG_OP; cifs_dbg(FYI, "Negotiate protocol\n"); if (!server) { WARN(1, "%s: server is NULL!\n", __func__); return -EIO; } rc = small_smb2_init(SMB2_NEGOTIATE, NULL, (void **) &req); if (rc) return rc; req->hdr.sync_hdr.SessionId = 0; req->Dialects[0] = cpu_to_le16(ses->server->vals->protocol_id); req->DialectCount = cpu_to_le16(1); /* One vers= at a time for now */ inc_rfc1001_len(req, 2); /* only one of SMB2 signing flags may be set in SMB2 request */ if (ses->sign) req->SecurityMode = cpu_to_le16(SMB2_NEGOTIATE_SIGNING_REQUIRED); else if (global_secflags & CIFSSEC_MAY_SIGN) req->SecurityMode = cpu_to_le16(SMB2_NEGOTIATE_SIGNING_ENABLED); else req->SecurityMode = 0; req->Capabilities = cpu_to_le32(ses->server->vals->req_capabilities); /* ClientGUID must be zero for SMB2.02 dialect */ if (ses->server->vals->protocol_id == SMB20_PROT_ID) memset(req->ClientGUID, 0, SMB2_CLIENT_GUID_SIZE); else { memcpy(req->ClientGUID, server->client_guid, SMB2_CLIENT_GUID_SIZE); if (ses->server->vals->protocol_id == SMB311_PROT_ID) assemble_neg_contexts(req); } iov[0].iov_base = (char *)req; /* 4 for rfc1002 length field */ iov[0].iov_len = get_rfc1002_length(req) + 4; rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, flags, &rsp_iov); cifs_small_buf_release(req); rsp = (struct smb2_negotiate_rsp *)rsp_iov.iov_base; /* * No tcon so can't do * cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]); */ if (rc != 0) goto neg_exit; cifs_dbg(FYI, "mode 0x%x\n", rsp->SecurityMode); /* BB we may eventually want to match the negotiated vs. requested dialect, even though we are only requesting one at a time */ if (rsp->DialectRevision == cpu_to_le16(SMB20_PROT_ID)) cifs_dbg(FYI, "negotiated smb2.0 dialect\n"); else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID)) cifs_dbg(FYI, "negotiated smb2.1 dialect\n"); else if (rsp->DialectRevision == cpu_to_le16(SMB30_PROT_ID)) cifs_dbg(FYI, "negotiated smb3.0 dialect\n"); else if (rsp->DialectRevision == cpu_to_le16(SMB302_PROT_ID)) cifs_dbg(FYI, "negotiated smb3.02 dialect\n"); #ifdef CONFIG_CIFS_SMB311 else if (rsp->DialectRevision == cpu_to_le16(SMB311_PROT_ID)) cifs_dbg(FYI, "negotiated smb3.1.1 dialect\n"); #endif /* SMB311 */ else { cifs_dbg(VFS, "Illegal dialect returned by server 0x%x\n", le16_to_cpu(rsp->DialectRevision)); rc = -EIO; goto neg_exit; } server->dialect = le16_to_cpu(rsp->DialectRevision); /* SMB2 only has an extended negflavor */ server->negflavor = CIFS_NEGFLAVOR_EXTENDED; /* set it to the maximum buffer size value we can send with 1 credit */ server->maxBuf = min_t(unsigned int, le32_to_cpu(rsp->MaxTransactSize), SMB2_MAX_BUFFER_SIZE); server->max_read = le32_to_cpu(rsp->MaxReadSize); server->max_write = le32_to_cpu(rsp->MaxWriteSize); /* BB Do we need to validate the SecurityMode? */ server->sec_mode = le16_to_cpu(rsp->SecurityMode); server->capabilities = le32_to_cpu(rsp->Capabilities); /* Internal types */ server->capabilities |= SMB2_NT_FIND | SMB2_LARGE_FILES; security_blob = smb2_get_data_area_len(&blob_offset, &blob_length, &rsp->hdr); /* * See MS-SMB2 section 2.2.4: if no blob, client picks default which * for us will be * ses->sectype = RawNTLMSSP; * but for time being this is our only auth choice so doesn't matter. * We just found a server which sets blob length to zero expecting raw. */ if (blob_length == 0) cifs_dbg(FYI, "missing security blob on negprot\n"); rc = cifs_enable_signing(server, ses->sign); if (rc) goto neg_exit; if (blob_length) { rc = decode_negTokenInit(security_blob, blob_length, server); if (rc == 1) rc = 0; else if (rc == 0) rc = -EIO; } neg_exit: free_rsp_buf(resp_buftype, rsp); return rc; } int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon) { int rc = 0; struct validate_negotiate_info_req vneg_inbuf; struct validate_negotiate_info_rsp *pneg_rsp; u32 rsplen; cifs_dbg(FYI, "validate negotiate\n"); /* * validation ioctl must be signed, so no point sending this if we * can not sign it. We could eventually change this to selectively * sign just this, the first and only signed request on a connection. * This is good enough for now since a user who wants better security * would also enable signing on the mount. Having validation of * negotiate info for signed connections helps reduce attack vectors */ if (tcon->ses->server->sign == false) return 0; /* validation requires signing */ vneg_inbuf.Capabilities = cpu_to_le32(tcon->ses->server->vals->req_capabilities); memcpy(vneg_inbuf.Guid, tcon->ses->server->client_guid, SMB2_CLIENT_GUID_SIZE); if (tcon->ses->sign) vneg_inbuf.SecurityMode = cpu_to_le16(SMB2_NEGOTIATE_SIGNING_REQUIRED); else if (global_secflags & CIFSSEC_MAY_SIGN) vneg_inbuf.SecurityMode = cpu_to_le16(SMB2_NEGOTIATE_SIGNING_ENABLED); else vneg_inbuf.SecurityMode = 0; vneg_inbuf.DialectCount = cpu_to_le16(1); vneg_inbuf.Dialects[0] = cpu_to_le16(tcon->ses->server->vals->protocol_id); rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID, FSCTL_VALIDATE_NEGOTIATE_INFO, true /* is_fsctl */, (char *)&vneg_inbuf, sizeof(struct validate_negotiate_info_req), (char **)&pneg_rsp, &rsplen); if (rc != 0) { cifs_dbg(VFS, "validate protocol negotiate failed: %d\n", rc); return -EIO; } if (rsplen != sizeof(struct validate_negotiate_info_rsp)) { cifs_dbg(VFS, "invalid size of protocol negotiate response\n"); return -EIO; } /* check validate negotiate info response matches what we got earlier */ if (pneg_rsp->Dialect != cpu_to_le16(tcon->ses->server->vals->protocol_id)) goto vneg_out; if (pneg_rsp->SecurityMode != cpu_to_le16(tcon->ses->server->sec_mode)) goto vneg_out; /* do not validate server guid because not saved at negprot time yet */ if ((le32_to_cpu(pneg_rsp->Capabilities) | SMB2_NT_FIND | SMB2_LARGE_FILES) != tcon->ses->server->capabilities) goto vneg_out; /* validate negotiate successful */ cifs_dbg(FYI, "validate negotiate info successful\n"); return 0; vneg_out: cifs_dbg(VFS, "protocol revalidation - security settings mismatch\n"); return -EIO; } struct SMB2_sess_data { unsigned int xid; struct cifs_ses *ses; struct nls_table *nls_cp; void (*func)(struct SMB2_sess_data *); int result; u64 previous_session; /* we will send the SMB in three pieces: * a fixed length beginning part, an optional * SPNEGO blob (which can be zero length), and a * last part which will include the strings * and rest of bcc area. This allows us to avoid * a large buffer 17K allocation */ int buf0_type; struct kvec iov[2]; }; static int SMB2_sess_alloc_buffer(struct SMB2_sess_data *sess_data) { int rc; struct cifs_ses *ses = sess_data->ses; struct smb2_sess_setup_req *req; struct TCP_Server_Info *server = ses->server; rc = small_smb2_init(SMB2_SESSION_SETUP, NULL, (void **) &req); if (rc) return rc; /* First session, not a reauthenticate */ req->hdr.sync_hdr.SessionId = 0; /* if reconnect, we need to send previous sess id, otherwise it is 0 */ req->PreviousSessionId = sess_data->previous_session; req->Flags = 0; /* MBZ */ /* to enable echos and oplocks */ req->hdr.sync_hdr.CreditRequest = cpu_to_le16(3); /* only one of SMB2 signing flags may be set in SMB2 request */ if (server->sign) req->SecurityMode = SMB2_NEGOTIATE_SIGNING_REQUIRED; else if (global_secflags & CIFSSEC_MAY_SIGN) /* one flag unlike MUST_ */ req->SecurityMode = SMB2_NEGOTIATE_SIGNING_ENABLED; else req->SecurityMode = 0; req->Capabilities = 0; req->Channel = 0; /* MBZ */ sess_data->iov[0].iov_base = (char *)req; /* 4 for rfc1002 length field and 1 for pad */ sess_data->iov[0].iov_len = get_rfc1002_length(req) + 4 - 1; /* * This variable will be used to clear the buffer * allocated above in case of any error in the calling function. */ sess_data->buf0_type = CIFS_SMALL_BUFFER; return 0; } static void SMB2_sess_free_buffer(struct SMB2_sess_data *sess_data) { free_rsp_buf(sess_data->buf0_type, sess_data->iov[0].iov_base); sess_data->buf0_type = CIFS_NO_BUFFER; } static int SMB2_sess_sendreceive(struct SMB2_sess_data *sess_data) { int rc; struct smb2_sess_setup_req *req = sess_data->iov[0].iov_base; struct kvec rsp_iov = { NULL, 0 }; /* Testing shows that buffer offset must be at location of Buffer[0] */ req->SecurityBufferOffset = cpu_to_le16(sizeof(struct smb2_sess_setup_req) - 1 /* pad */ - 4 /* rfc1001 len */); req->SecurityBufferLength = cpu_to_le16(sess_data->iov[1].iov_len); inc_rfc1001_len(req, sess_data->iov[1].iov_len - 1 /* pad */); /* BB add code to build os and lm fields */ rc = SendReceive2(sess_data->xid, sess_data->ses, sess_data->iov, 2, &sess_data->buf0_type, CIFS_LOG_ERROR | CIFS_NEG_OP, &rsp_iov); cifs_small_buf_release(sess_data->iov[0].iov_base); memcpy(&sess_data->iov[0], &rsp_iov, sizeof(struct kvec)); return rc; } static int SMB2_sess_establish_session(struct SMB2_sess_data *sess_data) { int rc = 0; struct cifs_ses *ses = sess_data->ses; mutex_lock(&ses->server->srv_mutex); if (ses->server->ops->generate_signingkey) { rc = ses->server->ops->generate_signingkey(ses); if (rc) { cifs_dbg(FYI, "SMB3 session key generation failed\n"); mutex_unlock(&ses->server->srv_mutex); return rc; } } if (!ses->server->session_estab) { ses->server->sequence_number = 0x2; ses->server->session_estab = true; } mutex_unlock(&ses->server->srv_mutex); cifs_dbg(FYI, "SMB2/3 session established successfully\n"); spin_lock(&GlobalMid_Lock); ses->status = CifsGood; ses->need_reconnect = false; spin_unlock(&GlobalMid_Lock); return rc; } #ifdef CONFIG_CIFS_UPCALL static void SMB2_auth_kerberos(struct SMB2_sess_data *sess_data) { int rc; struct cifs_ses *ses = sess_data->ses; struct cifs_spnego_msg *msg; struct key *spnego_key = NULL; struct smb2_sess_setup_rsp *rsp = NULL; rc = SMB2_sess_alloc_buffer(sess_data); if (rc) goto out; spnego_key = cifs_get_spnego_key(ses); if (IS_ERR(spnego_key)) { rc = PTR_ERR(spnego_key); spnego_key = NULL; goto out; } msg = spnego_key->payload.data[0]; /* * check version field to make sure that cifs.upcall is * sending us a response in an expected form */ if (msg->version != CIFS_SPNEGO_UPCALL_VERSION) { cifs_dbg(VFS, "bad cifs.upcall version. Expected %d got %d", CIFS_SPNEGO_UPCALL_VERSION, msg->version); rc = -EKEYREJECTED; goto out_put_spnego_key; } ses->auth_key.response = kmemdup(msg->data, msg->sesskey_len, GFP_KERNEL); if (!ses->auth_key.response) { cifs_dbg(VFS, "Kerberos can't allocate (%u bytes) memory", msg->sesskey_len); rc = -ENOMEM; goto out_put_spnego_key; } ses->auth_key.len = msg->sesskey_len; sess_data->iov[1].iov_base = msg->data + msg->sesskey_len; sess_data->iov[1].iov_len = msg->secblob_len; rc = SMB2_sess_sendreceive(sess_data); if (rc) goto out_put_spnego_key; rsp = (struct smb2_sess_setup_rsp *)sess_data->iov[0].iov_base; ses->Suid = rsp->hdr.sync_hdr.SessionId; ses->session_flags = le16_to_cpu(rsp->SessionFlags); if (ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA) cifs_dbg(VFS, "SMB3 encryption not supported yet\n"); rc = SMB2_sess_establish_session(sess_data); out_put_spnego_key: key_invalidate(spnego_key); key_put(spnego_key); out: sess_data->result = rc; sess_data->func = NULL; SMB2_sess_free_buffer(sess_data); } #else static void SMB2_auth_kerberos(struct SMB2_sess_data *sess_data) { cifs_dbg(VFS, "Kerberos negotiated but upcall support disabled!\n"); sess_data->result = -EOPNOTSUPP; sess_data->func = NULL; } #endif static void SMB2_sess_auth_rawntlmssp_authenticate(struct SMB2_sess_data *sess_data); static void SMB2_sess_auth_rawntlmssp_negotiate(struct SMB2_sess_data *sess_data) { int rc; struct cifs_ses *ses = sess_data->ses; struct smb2_sess_setup_rsp *rsp = NULL; char *ntlmssp_blob = NULL; bool use_spnego = false; /* else use raw ntlmssp */ u16 blob_length = 0; /* * If memory allocation is successful, caller of this function * frees it. */ ses->ntlmssp = kmalloc(sizeof(struct ntlmssp_auth), GFP_KERNEL); if (!ses->ntlmssp) { rc = -ENOMEM; goto out_err; } ses->ntlmssp->sesskey_per_smbsess = true; rc = SMB2_sess_alloc_buffer(sess_data); if (rc) goto out_err; ntlmssp_blob = kmalloc(sizeof(struct _NEGOTIATE_MESSAGE), GFP_KERNEL); if (ntlmssp_blob == NULL) { rc = -ENOMEM; goto out; } build_ntlmssp_negotiate_blob(ntlmssp_blob, ses); if (use_spnego) { /* BB eventually need to add this */ cifs_dbg(VFS, "spnego not supported for SMB2 yet\n"); rc = -EOPNOTSUPP; goto out; } else { blob_length = sizeof(struct _NEGOTIATE_MESSAGE); /* with raw NTLMSSP we don't encapsulate in SPNEGO */ } sess_data->iov[1].iov_base = ntlmssp_blob; sess_data->iov[1].iov_len = blob_length; rc = SMB2_sess_sendreceive(sess_data); rsp = (struct smb2_sess_setup_rsp *)sess_data->iov[0].iov_base; /* If true, rc here is expected and not an error */ if (sess_data->buf0_type != CIFS_NO_BUFFER && rsp->hdr.sync_hdr.Status == STATUS_MORE_PROCESSING_REQUIRED) rc = 0; if (rc) goto out; if (offsetof(struct smb2_sess_setup_rsp, Buffer) - 4 != le16_to_cpu(rsp->SecurityBufferOffset)) { cifs_dbg(VFS, "Invalid security buffer offset %d\n", le16_to_cpu(rsp->SecurityBufferOffset)); rc = -EIO; goto out; } rc = decode_ntlmssp_challenge(rsp->Buffer, le16_to_cpu(rsp->SecurityBufferLength), ses); if (rc) goto out; cifs_dbg(FYI, "rawntlmssp session setup challenge phase\n"); ses->Suid = rsp->hdr.sync_hdr.SessionId; ses->session_flags = le16_to_cpu(rsp->SessionFlags); if (ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA) cifs_dbg(VFS, "SMB3 encryption not supported yet\n"); out: kfree(ntlmssp_blob); SMB2_sess_free_buffer(sess_data); if (!rc) { sess_data->result = 0; sess_data->func = SMB2_sess_auth_rawntlmssp_authenticate; return; } out_err: kfree(ses->ntlmssp); ses->ntlmssp = NULL; sess_data->result = rc; sess_data->func = NULL; } static void SMB2_sess_auth_rawntlmssp_authenticate(struct SMB2_sess_data *sess_data) { int rc; struct cifs_ses *ses = sess_data->ses; struct smb2_sess_setup_req *req; struct smb2_sess_setup_rsp *rsp = NULL; unsigned char *ntlmssp_blob = NULL; bool use_spnego = false; /* else use raw ntlmssp */ u16 blob_length = 0; rc = SMB2_sess_alloc_buffer(sess_data); if (rc) goto out; req = (struct smb2_sess_setup_req *) sess_data->iov[0].iov_base; req->hdr.sync_hdr.SessionId = ses->Suid; rc = build_ntlmssp_auth_blob(&ntlmssp_blob, &blob_length, ses, sess_data->nls_cp); if (rc) { cifs_dbg(FYI, "build_ntlmssp_auth_blob failed %d\n", rc); goto out; } if (use_spnego) { /* BB eventually need to add this */ cifs_dbg(VFS, "spnego not supported for SMB2 yet\n"); rc = -EOPNOTSUPP; goto out; } sess_data->iov[1].iov_base = ntlmssp_blob; sess_data->iov[1].iov_len = blob_length; rc = SMB2_sess_sendreceive(sess_data); if (rc) goto out; rsp = (struct smb2_sess_setup_rsp *)sess_data->iov[0].iov_base; ses->Suid = rsp->hdr.sync_hdr.SessionId; ses->session_flags = le16_to_cpu(rsp->SessionFlags); if (ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA) cifs_dbg(VFS, "SMB3 encryption not supported yet\n"); rc = SMB2_sess_establish_session(sess_data); out: kfree(ntlmssp_blob); SMB2_sess_free_buffer(sess_data); kfree(ses->ntlmssp); ses->ntlmssp = NULL; sess_data->result = rc; sess_data->func = NULL; } static int SMB2_select_sec(struct cifs_ses *ses, struct SMB2_sess_data *sess_data) { if (ses->sectype != Kerberos && ses->sectype != RawNTLMSSP) ses->sectype = RawNTLMSSP; switch (ses->sectype) { case Kerberos: sess_data->func = SMB2_auth_kerberos; break; case RawNTLMSSP: sess_data->func = SMB2_sess_auth_rawntlmssp_negotiate; break; default: cifs_dbg(VFS, "secType %d not supported!\n", ses->sectype); return -EOPNOTSUPP; } return 0; } int SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses, const struct nls_table *nls_cp) { int rc = 0; struct TCP_Server_Info *server = ses->server; struct SMB2_sess_data *sess_data; cifs_dbg(FYI, "Session Setup\n"); if (!server) { WARN(1, "%s: server is NULL!\n", __func__); return -EIO; } sess_data = kzalloc(sizeof(struct SMB2_sess_data), GFP_KERNEL); if (!sess_data) return -ENOMEM; rc = SMB2_select_sec(ses, sess_data); if (rc) goto out; sess_data->xid = xid; sess_data->ses = ses; sess_data->buf0_type = CIFS_NO_BUFFER; sess_data->nls_cp = (struct nls_table *) nls_cp; while (sess_data->func) sess_data->func(sess_data); rc = sess_data->result; out: kfree(sess_data); return rc; } int SMB2_logoff(const unsigned int xid, struct cifs_ses *ses) { struct smb2_logoff_req *req; /* response is also trivial struct */ int rc = 0; struct TCP_Server_Info *server; int flags = 0; cifs_dbg(FYI, "disconnect session %p\n", ses); if (ses && (ses->server)) server = ses->server; else return -EIO; /* no need to send SMB logoff if uid already closed due to reconnect */ if (ses->need_reconnect) goto smb2_session_already_dead; rc = small_smb2_init(SMB2_LOGOFF, NULL, (void **) &req); if (rc) return rc; /* since no tcon, smb2_init can not do this, so do here */ req->hdr.sync_hdr.SessionId = ses->Suid; if (ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA) flags |= CIFS_TRANSFORM_REQ; else if (server->sign) req->hdr.sync_hdr.Flags |= SMB2_FLAGS_SIGNED; rc = SendReceiveNoRsp(xid, ses, (char *) req, flags); cifs_small_buf_release(req); /* * No tcon so can't do * cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]); */ smb2_session_already_dead: return rc; } static inline void cifs_stats_fail_inc(struct cifs_tcon *tcon, uint16_t code) { cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_failed[code]); } #define MAX_SHARENAME_LENGTH (255 /* server */ + 80 /* share */ + 1 /* NULL */) /* These are similar values to what Windows uses */ static inline void init_copy_chunk_defaults(struct cifs_tcon *tcon) { tcon->max_chunks = 256; tcon->max_bytes_chunk = 1048576; tcon->max_bytes_copy = 16777216; } int SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree, struct cifs_tcon *tcon, const struct nls_table *cp) { struct smb2_tree_connect_req *req; struct smb2_tree_connect_rsp *rsp = NULL; struct kvec iov[2]; struct kvec rsp_iov; int rc = 0; int resp_buftype; int unc_path_len; struct TCP_Server_Info *server; __le16 *unc_path = NULL; int flags = 0; cifs_dbg(FYI, "TCON\n"); if ((ses->server) && tree) server = ses->server; else return -EIO; if (tcon && tcon->bad_network_name) return -ENOENT; if ((tcon && tcon->seal) && ((ses->server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION) == 0)) { cifs_dbg(VFS, "encryption requested but no server support"); return -EOPNOTSUPP; } unc_path = kmalloc(MAX_SHARENAME_LENGTH * 2, GFP_KERNEL); if (unc_path == NULL) return -ENOMEM; unc_path_len = cifs_strtoUTF16(unc_path, tree, strlen(tree), cp) + 1; unc_path_len *= 2; if (unc_path_len < 2) { kfree(unc_path); return -EINVAL; } rc = small_smb2_init(SMB2_TREE_CONNECT, tcon, (void **) &req); if (rc) { kfree(unc_path); return rc; } if (ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA) flags |= CIFS_TRANSFORM_REQ; if (tcon == NULL) { /* since no tcon, smb2_init can not do this, so do here */ req->hdr.sync_hdr.SessionId = ses->Suid; /* if (ses->server->sec_mode & SECMODE_SIGN_REQUIRED) req->hdr.Flags |= SMB2_FLAGS_SIGNED; */ } iov[0].iov_base = (char *)req; /* 4 for rfc1002 length field and 1 for pad */ iov[0].iov_len = get_rfc1002_length(req) + 4 - 1; /* Testing shows that buffer offset must be at location of Buffer[0] */ req->PathOffset = cpu_to_le16(sizeof(struct smb2_tree_connect_req) - 1 /* pad */ - 4 /* do not count rfc1001 len field */); req->PathLength = cpu_to_le16(unc_path_len - 2); iov[1].iov_base = unc_path; iov[1].iov_len = unc_path_len; inc_rfc1001_len(req, unc_path_len - 1 /* pad */); rc = SendReceive2(xid, ses, iov, 2, &resp_buftype, flags, &rsp_iov); cifs_small_buf_release(req); rsp = (struct smb2_tree_connect_rsp *)rsp_iov.iov_base; if (rc != 0) { if (tcon) { cifs_stats_fail_inc(tcon, SMB2_TREE_CONNECT_HE); tcon->need_reconnect = true; } goto tcon_error_exit; } if (tcon == NULL) { ses->ipc_tid = rsp->hdr.sync_hdr.TreeId; goto tcon_exit; } if (rsp->ShareType & SMB2_SHARE_TYPE_DISK) cifs_dbg(FYI, "connection to disk share\n"); else if (rsp->ShareType & SMB2_SHARE_TYPE_PIPE) { tcon->ipc = true; cifs_dbg(FYI, "connection to pipe share\n"); } else if (rsp->ShareType & SMB2_SHARE_TYPE_PRINT) { tcon->print = true; cifs_dbg(FYI, "connection to printer\n"); } else { cifs_dbg(VFS, "unknown share type %d\n", rsp->ShareType); rc = -EOPNOTSUPP; goto tcon_error_exit; } tcon->share_flags = le32_to_cpu(rsp->ShareFlags); tcon->capabilities = rsp->Capabilities; /* we keep caps little endian */ tcon->maximal_access = le32_to_cpu(rsp->MaximalAccess); tcon->tidStatus = CifsGood; tcon->need_reconnect = false; tcon->tid = rsp->hdr.sync_hdr.TreeId; strlcpy(tcon->treeName, tree, sizeof(tcon->treeName)); if ((rsp->Capabilities & SMB2_SHARE_CAP_DFS) && ((tcon->share_flags & SHI1005_FLAGS_DFS) == 0)) cifs_dbg(VFS, "DFS capability contradicts DFS flag\n"); init_copy_chunk_defaults(tcon); if (tcon->share_flags & SHI1005_FLAGS_ENCRYPT_DATA) cifs_dbg(VFS, "Encrypted shares not supported"); if (tcon->ses->server->ops->validate_negotiate) rc = tcon->ses->server->ops->validate_negotiate(xid, tcon); tcon_exit: free_rsp_buf(resp_buftype, rsp); kfree(unc_path); return rc; tcon_error_exit: if (rsp->hdr.sync_hdr.Status == STATUS_BAD_NETWORK_NAME) { cifs_dbg(VFS, "BAD_NETWORK_NAME: %s\n", tree); if (tcon) tcon->bad_network_name = true; } goto tcon_exit; } int SMB2_tdis(const unsigned int xid, struct cifs_tcon *tcon) { struct smb2_tree_disconnect_req *req; /* response is trivial */ int rc = 0; struct TCP_Server_Info *server; struct cifs_ses *ses = tcon->ses; int flags = 0; cifs_dbg(FYI, "Tree Disconnect\n"); if (ses && (ses->server)) server = ses->server; else return -EIO; if ((tcon->need_reconnect) || (tcon->ses->need_reconnect)) return 0; rc = small_smb2_init(SMB2_TREE_DISCONNECT, tcon, (void **) &req); if (rc) return rc; if (encryption_required(tcon)) flags |= CIFS_TRANSFORM_REQ; rc = SendReceiveNoRsp(xid, ses, (char *)req, flags); cifs_small_buf_release(req); if (rc) cifs_stats_fail_inc(tcon, SMB2_TREE_DISCONNECT_HE); return rc; } static struct create_durable * create_durable_buf(void) { struct create_durable *buf; buf = kzalloc(sizeof(struct create_durable), GFP_KERNEL); if (!buf) return NULL; buf->ccontext.DataOffset = cpu_to_le16(offsetof (struct create_durable, Data)); buf->ccontext.DataLength = cpu_to_le32(16); buf->ccontext.NameOffset = cpu_to_le16(offsetof (struct create_durable, Name)); buf->ccontext.NameLength = cpu_to_le16(4); /* SMB2_CREATE_DURABLE_HANDLE_REQUEST is "DHnQ" */ buf->Name[0] = 'D'; buf->Name[1] = 'H'; buf->Name[2] = 'n'; buf->Name[3] = 'Q'; return buf; } static struct create_durable * create_reconnect_durable_buf(struct cifs_fid *fid) { struct create_durable *buf; buf = kzalloc(sizeof(struct create_durable), GFP_KERNEL); if (!buf) return NULL; buf->ccontext.DataOffset = cpu_to_le16(offsetof (struct create_durable, Data)); buf->ccontext.DataLength = cpu_to_le32(16); buf->ccontext.NameOffset = cpu_to_le16(offsetof (struct create_durable, Name)); buf->ccontext.NameLength = cpu_to_le16(4); buf->Data.Fid.PersistentFileId = fid->persistent_fid; buf->Data.Fid.VolatileFileId = fid->volatile_fid; /* SMB2_CREATE_DURABLE_HANDLE_RECONNECT is "DHnC" */ buf->Name[0] = 'D'; buf->Name[1] = 'H'; buf->Name[2] = 'n'; buf->Name[3] = 'C'; return buf; } static __u8 parse_lease_state(struct TCP_Server_Info *server, struct smb2_create_rsp *rsp, unsigned int *epoch) { char *data_offset; struct create_context *cc; unsigned int next; unsigned int remaining; char *name; data_offset = (char *)rsp + 4 + le32_to_cpu(rsp->CreateContextsOffset); remaining = le32_to_cpu(rsp->CreateContextsLength); cc = (struct create_context *)data_offset; while (remaining >= sizeof(struct create_context)) { name = le16_to_cpu(cc->NameOffset) + (char *)cc; if (le16_to_cpu(cc->NameLength) == 4 && strncmp(name, "RqLs", 4) == 0) return server->ops->parse_lease_buf(cc, epoch); next = le32_to_cpu(cc->Next); if (!next) break; remaining -= next; cc = (struct create_context *)((char *)cc + next); } return 0; } static int add_lease_context(struct TCP_Server_Info *server, struct kvec *iov, unsigned int *num_iovec, __u8 *oplock) { struct smb2_create_req *req = iov[0].iov_base; unsigned int num = *num_iovec; iov[num].iov_base = server->ops->create_lease_buf(oplock+1, *oplock); if (iov[num].iov_base == NULL) return -ENOMEM; iov[num].iov_len = server->vals->create_lease_size; req->RequestedOplockLevel = SMB2_OPLOCK_LEVEL_LEASE; if (!req->CreateContextsOffset) req->CreateContextsOffset = cpu_to_le32( sizeof(struct smb2_create_req) - 4 + iov[num - 1].iov_len); le32_add_cpu(&req->CreateContextsLength, server->vals->create_lease_size); inc_rfc1001_len(&req->hdr, server->vals->create_lease_size); *num_iovec = num + 1; return 0; } static struct create_durable_v2 * create_durable_v2_buf(struct cifs_fid *pfid) { struct create_durable_v2 *buf; buf = kzalloc(sizeof(struct create_durable_v2), GFP_KERNEL); if (!buf) return NULL; buf->ccontext.DataOffset = cpu_to_le16(offsetof (struct create_durable_v2, dcontext)); buf->ccontext.DataLength = cpu_to_le32(sizeof(struct durable_context_v2)); buf->ccontext.NameOffset = cpu_to_le16(offsetof (struct create_durable_v2, Name)); buf->ccontext.NameLength = cpu_to_le16(4); buf->dcontext.Timeout = 0; /* Should this be configurable by workload */ buf->dcontext.Flags = cpu_to_le32(SMB2_DHANDLE_FLAG_PERSISTENT); generate_random_uuid(buf->dcontext.CreateGuid); memcpy(pfid->create_guid, buf->dcontext.CreateGuid, 16); /* SMB2_CREATE_DURABLE_HANDLE_REQUEST is "DH2Q" */ buf->Name[0] = 'D'; buf->Name[1] = 'H'; buf->Name[2] = '2'; buf->Name[3] = 'Q'; return buf; } static struct create_durable_handle_reconnect_v2 * create_reconnect_durable_v2_buf(struct cifs_fid *fid) { struct create_durable_handle_reconnect_v2 *buf; buf = kzalloc(sizeof(struct create_durable_handle_reconnect_v2), GFP_KERNEL); if (!buf) return NULL; buf->ccontext.DataOffset = cpu_to_le16(offsetof(struct create_durable_handle_reconnect_v2, dcontext)); buf->ccontext.DataLength = cpu_to_le32(sizeof(struct durable_reconnect_context_v2)); buf->ccontext.NameOffset = cpu_to_le16(offsetof(struct create_durable_handle_reconnect_v2, Name)); buf->ccontext.NameLength = cpu_to_le16(4); buf->dcontext.Fid.PersistentFileId = fid->persistent_fid; buf->dcontext.Fid.VolatileFileId = fid->volatile_fid; buf->dcontext.Flags = cpu_to_le32(SMB2_DHANDLE_FLAG_PERSISTENT); memcpy(buf->dcontext.CreateGuid, fid->create_guid, 16); /* SMB2_CREATE_DURABLE_HANDLE_RECONNECT_V2 is "DH2C" */ buf->Name[0] = 'D'; buf->Name[1] = 'H'; buf->Name[2] = '2'; buf->Name[3] = 'C'; return buf; } static int add_durable_v2_context(struct kvec *iov, unsigned int *num_iovec, struct cifs_open_parms *oparms) { struct smb2_create_req *req = iov[0].iov_base; unsigned int num = *num_iovec; iov[num].iov_base = create_durable_v2_buf(oparms->fid); if (iov[num].iov_base == NULL) return -ENOMEM; iov[num].iov_len = sizeof(struct create_durable_v2); if (!req->CreateContextsOffset) req->CreateContextsOffset = cpu_to_le32(sizeof(struct smb2_create_req) - 4 + iov[1].iov_len); le32_add_cpu(&req->CreateContextsLength, sizeof(struct create_durable_v2)); inc_rfc1001_len(&req->hdr, sizeof(struct create_durable_v2)); *num_iovec = num + 1; return 0; } static int add_durable_reconnect_v2_context(struct kvec *iov, unsigned int *num_iovec, struct cifs_open_parms *oparms) { struct smb2_create_req *req = iov[0].iov_base; unsigned int num = *num_iovec; /* indicate that we don't need to relock the file */ oparms->reconnect = false; iov[num].iov_base = create_reconnect_durable_v2_buf(oparms->fid); if (iov[num].iov_base == NULL) return -ENOMEM; iov[num].iov_len = sizeof(struct create_durable_handle_reconnect_v2); if (!req->CreateContextsOffset) req->CreateContextsOffset = cpu_to_le32(sizeof(struct smb2_create_req) - 4 + iov[1].iov_len); le32_add_cpu(&req->CreateContextsLength, sizeof(struct create_durable_handle_reconnect_v2)); inc_rfc1001_len(&req->hdr, sizeof(struct create_durable_handle_reconnect_v2)); *num_iovec = num + 1; return 0; } static int add_durable_context(struct kvec *iov, unsigned int *num_iovec, struct cifs_open_parms *oparms, bool use_persistent) { struct smb2_create_req *req = iov[0].iov_base; unsigned int num = *num_iovec; if (use_persistent) { if (oparms->reconnect) return add_durable_reconnect_v2_context(iov, num_iovec, oparms); else return add_durable_v2_context(iov, num_iovec, oparms); } if (oparms->reconnect) { iov[num].iov_base = create_reconnect_durable_buf(oparms->fid); /* indicate that we don't need to relock the file */ oparms->reconnect = false; } else iov[num].iov_base = create_durable_buf(); if (iov[num].iov_base == NULL) return -ENOMEM; iov[num].iov_len = sizeof(struct create_durable); if (!req->CreateContextsOffset) req->CreateContextsOffset = cpu_to_le32(sizeof(struct smb2_create_req) - 4 + iov[1].iov_len); le32_add_cpu(&req->CreateContextsLength, sizeof(struct create_durable)); inc_rfc1001_len(&req->hdr, sizeof(struct create_durable)); *num_iovec = num + 1; return 0; } int SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path, __u8 *oplock, struct smb2_file_all_info *buf, struct smb2_err_rsp **err_buf) { struct smb2_create_req *req; struct smb2_create_rsp *rsp; struct TCP_Server_Info *server; struct cifs_tcon *tcon = oparms->tcon; struct cifs_ses *ses = tcon->ses; struct kvec iov[4]; struct kvec rsp_iov; int resp_buftype; int uni_path_len; __le16 *copy_path = NULL; int copy_size; int rc = 0; unsigned int n_iov = 2; __u32 file_attributes = 0; char *dhc_buf = NULL, *lc_buf = NULL; int flags = 0; cifs_dbg(FYI, "create/open\n"); if (ses && (ses->server)) server = ses->server; else return -EIO; rc = small_smb2_init(SMB2_CREATE, tcon, (void **) &req); if (rc) return rc; if (encryption_required(tcon)) flags |= CIFS_TRANSFORM_REQ; if (oparms->create_options & CREATE_OPTION_READONLY) file_attributes |= ATTR_READONLY; if (oparms->create_options & CREATE_OPTION_SPECIAL) file_attributes |= ATTR_SYSTEM; req->ImpersonationLevel = IL_IMPERSONATION; req->DesiredAccess = cpu_to_le32(oparms->desired_access); /* File attributes ignored on open (used in create though) */ req->FileAttributes = cpu_to_le32(file_attributes); req->ShareAccess = FILE_SHARE_ALL_LE; req->CreateDisposition = cpu_to_le32(oparms->disposition); req->CreateOptions = cpu_to_le32(oparms->create_options & CREATE_OPTIONS_MASK); uni_path_len = (2 * UniStrnlen((wchar_t *)path, PATH_MAX)) + 2; /* do not count rfc1001 len field */ req->NameOffset = cpu_to_le16(sizeof(struct smb2_create_req) - 4); iov[0].iov_base = (char *)req; /* 4 for rfc1002 length field */ iov[0].iov_len = get_rfc1002_length(req) + 4; /* MUST set path len (NameLength) to 0 opening root of share */ req->NameLength = cpu_to_le16(uni_path_len - 2); /* -1 since last byte is buf[0] which is sent below (path) */ iov[0].iov_len--; if (uni_path_len % 8 != 0) { copy_size = uni_path_len / 8 * 8; if (copy_size < uni_path_len) copy_size += 8; copy_path = kzalloc(copy_size, GFP_KERNEL); if (!copy_path) return -ENOMEM; memcpy((char *)copy_path, (const char *)path, uni_path_len); uni_path_len = copy_size; path = copy_path; } iov[1].iov_len = uni_path_len; iov[1].iov_base = path; /* -1 since last byte is buf[0] which was counted in smb2_buf_len */ inc_rfc1001_len(req, uni_path_len - 1); if (!server->oplocks) *oplock = SMB2_OPLOCK_LEVEL_NONE; if (!(server->capabilities & SMB2_GLOBAL_CAP_LEASING) || *oplock == SMB2_OPLOCK_LEVEL_NONE) req->RequestedOplockLevel = *oplock; else { rc = add_lease_context(server, iov, &n_iov, oplock); if (rc) { cifs_small_buf_release(req); kfree(copy_path); return rc; } lc_buf = iov[n_iov-1].iov_base; } if (*oplock == SMB2_OPLOCK_LEVEL_BATCH) { /* need to set Next field of lease context if we request it */ if (server->capabilities & SMB2_GLOBAL_CAP_LEASING) { struct create_context *ccontext = (struct create_context *)iov[n_iov-1].iov_base; ccontext->Next = cpu_to_le32(server->vals->create_lease_size); } rc = add_durable_context(iov, &n_iov, oparms, tcon->use_persistent); if (rc) { cifs_small_buf_release(req); kfree(copy_path); kfree(lc_buf); return rc; } dhc_buf = iov[n_iov-1].iov_base; } rc = SendReceive2(xid, ses, iov, n_iov, &resp_buftype, flags, &rsp_iov); cifs_small_buf_release(req); rsp = (struct smb2_create_rsp *)rsp_iov.iov_base; if (rc != 0) { cifs_stats_fail_inc(tcon, SMB2_CREATE_HE); if (err_buf) *err_buf = kmemdup(rsp, get_rfc1002_length(rsp) + 4, GFP_KERNEL); goto creat_exit; } oparms->fid->persistent_fid = rsp->PersistentFileId; oparms->fid->volatile_fid = rsp->VolatileFileId; if (buf) { memcpy(buf, &rsp->CreationTime, 32); buf->AllocationSize = rsp->AllocationSize; buf->EndOfFile = rsp->EndofFile; buf->Attributes = rsp->FileAttributes; buf->NumberOfLinks = cpu_to_le32(1); buf->DeletePending = 0; } if (rsp->OplockLevel == SMB2_OPLOCK_LEVEL_LEASE) *oplock = parse_lease_state(server, rsp, &oparms->fid->epoch); else *oplock = rsp->OplockLevel; creat_exit: kfree(copy_path); kfree(lc_buf); kfree(dhc_buf); free_rsp_buf(resp_buftype, rsp); return rc; } /* * SMB2 IOCTL is used for both IOCTLs and FSCTLs */ int SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid, u32 opcode, bool is_fsctl, char *in_data, u32 indatalen, char **out_data, u32 *plen /* returned data len */) { struct smb2_ioctl_req *req; struct smb2_ioctl_rsp *rsp; struct smb2_sync_hdr *shdr; struct TCP_Server_Info *server; struct cifs_ses *ses; struct kvec iov[2]; struct kvec rsp_iov; int resp_buftype; int n_iov; int rc = 0; int flags = 0; cifs_dbg(FYI, "SMB2 IOCTL\n"); if (out_data != NULL) *out_data = NULL; /* zero out returned data len, in case of error */ if (plen) *plen = 0; if (tcon) ses = tcon->ses; else return -EIO; if (ses && (ses->server)) server = ses->server; else return -EIO; rc = small_smb2_init(SMB2_IOCTL, tcon, (void **) &req); if (rc) return rc; if (encryption_required(tcon)) flags |= CIFS_TRANSFORM_REQ; req->CtlCode = cpu_to_le32(opcode); req->PersistentFileId = persistent_fid; req->VolatileFileId = volatile_fid; if (indatalen) { req->InputCount = cpu_to_le32(indatalen); /* do not set InputOffset if no input data */ req->InputOffset = cpu_to_le32(offsetof(struct smb2_ioctl_req, Buffer) - 4); iov[1].iov_base = in_data; iov[1].iov_len = indatalen; n_iov = 2; } else n_iov = 1; req->OutputOffset = 0; req->OutputCount = 0; /* MBZ */ /* * Could increase MaxOutputResponse, but that would require more * than one credit. Windows typically sets this smaller, but for some * ioctls it may be useful to allow server to send more. No point * limiting what the server can send as long as fits in one credit */ req->MaxOutputResponse = cpu_to_le32(0xFF00); /* < 64K uses 1 credit */ if (is_fsctl) req->Flags = cpu_to_le32(SMB2_0_IOCTL_IS_FSCTL); else req->Flags = 0; iov[0].iov_base = (char *)req; /* * If no input data, the size of ioctl struct in * protocol spec still includes a 1 byte data buffer, * but if input data passed to ioctl, we do not * want to double count this, so we do not send * the dummy one byte of data in iovec[0] if sending * input data (in iovec[1]). We also must add 4 bytes * in first iovec to allow for rfc1002 length field. */ if (indatalen) { iov[0].iov_len = get_rfc1002_length(req) + 4 - 1; inc_rfc1001_len(req, indatalen - 1); } else iov[0].iov_len = get_rfc1002_length(req) + 4; rc = SendReceive2(xid, ses, iov, n_iov, &resp_buftype, flags, &rsp_iov); cifs_small_buf_release(req); rsp = (struct smb2_ioctl_rsp *)rsp_iov.iov_base; if ((rc != 0) && (rc != -EINVAL)) { cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE); goto ioctl_exit; } else if (rc == -EINVAL) { if ((opcode != FSCTL_SRV_COPYCHUNK_WRITE) && (opcode != FSCTL_SRV_COPYCHUNK)) { cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE); goto ioctl_exit; } } /* check if caller wants to look at return data or just return rc */ if ((plen == NULL) || (out_data == NULL)) goto ioctl_exit; *plen = le32_to_cpu(rsp->OutputCount); /* We check for obvious errors in the output buffer length and offset */ if (*plen == 0) goto ioctl_exit; /* server returned no data */ else if (*plen > 0xFF00) { cifs_dbg(VFS, "srv returned invalid ioctl length: %d\n", *plen); *plen = 0; rc = -EIO; goto ioctl_exit; } if (get_rfc1002_length(rsp) < le32_to_cpu(rsp->OutputOffset) + *plen) { cifs_dbg(VFS, "Malformed ioctl resp: len %d offset %d\n", *plen, le32_to_cpu(rsp->OutputOffset)); *plen = 0; rc = -EIO; goto ioctl_exit; } *out_data = kmalloc(*plen, GFP_KERNEL); if (*out_data == NULL) { rc = -ENOMEM; goto ioctl_exit; } shdr = get_sync_hdr(rsp); memcpy(*out_data, (char *)shdr + le32_to_cpu(rsp->OutputOffset), *plen); ioctl_exit: free_rsp_buf(resp_buftype, rsp); return rc; } /* * Individual callers to ioctl worker function follow */ int SMB2_set_compression(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid) { int rc; struct compress_ioctl fsctl_input; char *ret_data = NULL; fsctl_input.CompressionState = cpu_to_le16(COMPRESSION_FORMAT_DEFAULT); rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid, FSCTL_SET_COMPRESSION, true /* is_fsctl */, (char *)&fsctl_input /* data input */, 2 /* in data len */, &ret_data /* out data */, NULL); cifs_dbg(FYI, "set compression rc %d\n", rc); return rc; } int SMB2_close(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid) { struct smb2_close_req *req; struct smb2_close_rsp *rsp; struct TCP_Server_Info *server; struct cifs_ses *ses = tcon->ses; struct kvec iov[1]; struct kvec rsp_iov; int resp_buftype; int rc = 0; int flags = 0; cifs_dbg(FYI, "Close\n"); if (ses && (ses->server)) server = ses->server; else return -EIO; rc = small_smb2_init(SMB2_CLOSE, tcon, (void **) &req); if (rc) return rc; if (encryption_required(tcon)) flags |= CIFS_TRANSFORM_REQ; req->PersistentFileId = persistent_fid; req->VolatileFileId = volatile_fid; iov[0].iov_base = (char *)req; /* 4 for rfc1002 length field */ iov[0].iov_len = get_rfc1002_length(req) + 4; rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, flags, &rsp_iov); cifs_small_buf_release(req); rsp = (struct smb2_close_rsp *)rsp_iov.iov_base; if (rc != 0) { cifs_stats_fail_inc(tcon, SMB2_CLOSE_HE); goto close_exit; } /* BB FIXME - decode close response, update inode for caching */ close_exit: free_rsp_buf(resp_buftype, rsp); return rc; } static int validate_buf(unsigned int offset, unsigned int buffer_length, struct smb2_hdr *hdr, unsigned int min_buf_size) { unsigned int smb_len = be32_to_cpu(hdr->smb2_buf_length); char *end_of_smb = smb_len + 4 /* RFC1001 length field */ + (char *)hdr; char *begin_of_buf = 4 /* RFC1001 len field */ + offset + (char *)hdr; char *end_of_buf = begin_of_buf + buffer_length; if (buffer_length < min_buf_size) { cifs_dbg(VFS, "buffer length %d smaller than minimum size %d\n", buffer_length, min_buf_size); return -EINVAL; } /* check if beyond RFC1001 maximum length */ if ((smb_len > 0x7FFFFF) || (buffer_length > 0x7FFFFF)) { cifs_dbg(VFS, "buffer length %d or smb length %d too large\n", buffer_length, smb_len); return -EINVAL; } if ((begin_of_buf > end_of_smb) || (end_of_buf > end_of_smb)) { cifs_dbg(VFS, "illegal server response, bad offset to data\n"); return -EINVAL; } return 0; } /* * If SMB buffer fields are valid, copy into temporary buffer to hold result. * Caller must free buffer. */ static int validate_and_copy_buf(unsigned int offset, unsigned int buffer_length, struct smb2_hdr *hdr, unsigned int minbufsize, char *data) { char *begin_of_buf = 4 /* RFC1001 len field */ + offset + (char *)hdr; int rc; if (!data) return -EINVAL; rc = validate_buf(offset, buffer_length, hdr, minbufsize); if (rc) return rc; memcpy(data, begin_of_buf, buffer_length); return 0; } static int query_info(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid, u8 info_class, size_t output_len, size_t min_len, void *data) { struct smb2_query_info_req *req; struct smb2_query_info_rsp *rsp = NULL; struct kvec iov[2]; struct kvec rsp_iov; int rc = 0; int resp_buftype; struct TCP_Server_Info *server; struct cifs_ses *ses = tcon->ses; int flags = 0; cifs_dbg(FYI, "Query Info\n"); if (ses && (ses->server)) server = ses->server; else return -EIO; rc = small_smb2_init(SMB2_QUERY_INFO, tcon, (void **) &req); if (rc) return rc; if (encryption_required(tcon)) flags |= CIFS_TRANSFORM_REQ; req->InfoType = SMB2_O_INFO_FILE; req->FileInfoClass = info_class; req->PersistentFileId = persistent_fid; req->VolatileFileId = volatile_fid; /* 4 for rfc1002 length field and 1 for Buffer */ req->InputBufferOffset = cpu_to_le16(sizeof(struct smb2_query_info_req) - 1 - 4); req->OutputBufferLength = cpu_to_le32(output_len); iov[0].iov_base = (char *)req; /* 4 for rfc1002 length field */ iov[0].iov_len = get_rfc1002_length(req) + 4; rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, flags, &rsp_iov); cifs_small_buf_release(req); rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base; if (rc) { cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE); goto qinf_exit; } rc = validate_and_copy_buf(le16_to_cpu(rsp->OutputBufferOffset), le32_to_cpu(rsp->OutputBufferLength), &rsp->hdr, min_len, data); qinf_exit: free_rsp_buf(resp_buftype, rsp); return rc; } int SMB2_query_info(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid, struct smb2_file_all_info *data) { return query_info(xid, tcon, persistent_fid, volatile_fid, FILE_ALL_INFORMATION, sizeof(struct smb2_file_all_info) + PATH_MAX * 2, sizeof(struct smb2_file_all_info), data); } int SMB2_get_srv_num(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid, __le64 *uniqueid) { return query_info(xid, tcon, persistent_fid, volatile_fid, FILE_INTERNAL_INFORMATION, sizeof(struct smb2_file_internal_info), sizeof(struct smb2_file_internal_info), uniqueid); } /* * This is a no-op for now. We're not really interested in the reply, but * rather in the fact that the server sent one and that server->lstrp * gets updated. * * FIXME: maybe we should consider checking that the reply matches request? */ static void smb2_echo_callback(struct mid_q_entry *mid) { struct TCP_Server_Info *server = mid->callback_data; struct smb2_echo_rsp *rsp = (struct smb2_echo_rsp *)mid->resp_buf; unsigned int credits_received = 1; if (mid->mid_state == MID_RESPONSE_RECEIVED) credits_received = le16_to_cpu(rsp->hdr.sync_hdr.CreditRequest); mutex_lock(&server->srv_mutex); DeleteMidQEntry(mid); mutex_unlock(&server->srv_mutex); add_credits(server, credits_received, CIFS_ECHO_OP); } void smb2_reconnect_server(struct work_struct *work) { struct TCP_Server_Info *server = container_of(work, struct TCP_Server_Info, reconnect.work); struct cifs_ses *ses; struct cifs_tcon *tcon, *tcon2; struct list_head tmp_list; int tcon_exist = false; /* Prevent simultaneous reconnects that can corrupt tcon->rlist list */ mutex_lock(&server->reconnect_mutex); INIT_LIST_HEAD(&tmp_list); cifs_dbg(FYI, "Need negotiate, reconnecting tcons\n"); spin_lock(&cifs_tcp_ses_lock); list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) { list_for_each_entry(tcon, &ses->tcon_list, tcon_list) { if (tcon->need_reconnect || tcon->need_reopen_files) { tcon->tc_count++; list_add_tail(&tcon->rlist, &tmp_list); tcon_exist = true; } } } /* * Get the reference to server struct to be sure that the last call of * cifs_put_tcon() in the loop below won't release the server pointer. */ if (tcon_exist) server->srv_count++; spin_unlock(&cifs_tcp_ses_lock); list_for_each_entry_safe(tcon, tcon2, &tmp_list, rlist) { if (!smb2_reconnect(SMB2_INTERNAL_CMD, tcon)) cifs_reopen_persistent_handles(tcon); list_del_init(&tcon->rlist); cifs_put_tcon(tcon); } cifs_dbg(FYI, "Reconnecting tcons finished\n"); mutex_unlock(&server->reconnect_mutex); /* now we can safely release srv struct */ if (tcon_exist) cifs_put_tcp_session(server, 1); } int SMB2_echo(struct TCP_Server_Info *server) { struct smb2_echo_req *req; int rc = 0; struct kvec iov[2]; struct smb_rqst rqst = { .rq_iov = iov, .rq_nvec = 2 }; cifs_dbg(FYI, "In echo request\n"); if (server->tcpStatus == CifsNeedNegotiate) { /* No need to send echo on newly established connections */ queue_delayed_work(cifsiod_wq, &server->reconnect, 0); return rc; } rc = small_smb2_init(SMB2_ECHO, NULL, (void **)&req); if (rc) return rc; req->hdr.sync_hdr.CreditRequest = cpu_to_le16(1); /* 4 for rfc1002 length field */ iov[0].iov_len = 4; iov[0].iov_base = (char *)req; iov[1].iov_len = get_rfc1002_length(req); iov[1].iov_base = (char *)req + 4; rc = cifs_call_async(server, &rqst, NULL, smb2_echo_callback, server, CIFS_ECHO_OP); if (rc) cifs_dbg(FYI, "Echo request failed: %d\n", rc); cifs_small_buf_release(req); return rc; } int SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid) { struct smb2_flush_req *req; struct TCP_Server_Info *server; struct cifs_ses *ses = tcon->ses; struct kvec iov[1]; struct kvec rsp_iov; int resp_buftype; int rc = 0; int flags = 0; cifs_dbg(FYI, "Flush\n"); if (ses && (ses->server)) server = ses->server; else return -EIO; rc = small_smb2_init(SMB2_FLUSH, tcon, (void **) &req); if (rc) return rc; if (encryption_required(tcon)) flags |= CIFS_TRANSFORM_REQ; req->PersistentFileId = persistent_fid; req->VolatileFileId = volatile_fid; iov[0].iov_base = (char *)req; /* 4 for rfc1002 length field */ iov[0].iov_len = get_rfc1002_length(req) + 4; rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, flags, &rsp_iov); cifs_small_buf_release(req); if (rc != 0) cifs_stats_fail_inc(tcon, SMB2_FLUSH_HE); free_rsp_buf(resp_buftype, rsp_iov.iov_base); return rc; } /* * To form a chain of read requests, any read requests after the first should * have the end_of_chain boolean set to true. */ static int smb2_new_read_req(void **buf, unsigned int *total_len, struct cifs_io_parms *io_parms, unsigned int remaining_bytes, int request_type) { int rc = -EACCES; struct smb2_read_plain_req *req = NULL; struct smb2_sync_hdr *shdr; rc = smb2_plain_req_init(SMB2_READ, io_parms->tcon, (void **) &req, total_len); if (rc) return rc; if (io_parms->tcon->ses->server == NULL) return -ECONNABORTED; shdr = &req->sync_hdr; shdr->ProcessId = cpu_to_le32(io_parms->pid); req->PersistentFileId = io_parms->persistent_fid; req->VolatileFileId = io_parms->volatile_fid; req->ReadChannelInfoOffset = 0; /* reserved */ req->ReadChannelInfoLength = 0; /* reserved */ req->Channel = 0; /* reserved */ req->MinimumCount = 0; req->Length = cpu_to_le32(io_parms->length); req->Offset = cpu_to_le64(io_parms->offset); if (request_type & CHAINED_REQUEST) { if (!(request_type & END_OF_CHAIN)) { /* next 8-byte aligned request */ *total_len = DIV_ROUND_UP(*total_len, 8) * 8; shdr->NextCommand = cpu_to_le32(*total_len); } else /* END_OF_CHAIN */ shdr->NextCommand = 0; if (request_type & RELATED_REQUEST) { shdr->Flags |= SMB2_FLAGS_RELATED_OPERATIONS; /* * Related requests use info from previous read request * in chain. */ shdr->SessionId = 0xFFFFFFFF; shdr->TreeId = 0xFFFFFFFF; req->PersistentFileId = 0xFFFFFFFF; req->VolatileFileId = 0xFFFFFFFF; } } if (remaining_bytes > io_parms->length) req->RemainingBytes = cpu_to_le32(remaining_bytes); else req->RemainingBytes = 0; *buf = req; return rc; } static void smb2_readv_callback(struct mid_q_entry *mid) { struct cifs_readdata *rdata = mid->callback_data; struct cifs_tcon *tcon = tlink_tcon(rdata->cfile->tlink); struct TCP_Server_Info *server = tcon->ses->server; struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)rdata->iov[1].iov_base; unsigned int credits_received = 1; struct smb_rqst rqst = { .rq_iov = rdata->iov, .rq_nvec = 2, .rq_pages = rdata->pages, .rq_npages = rdata->nr_pages, .rq_pagesz = rdata->pagesz, .rq_tailsz = rdata->tailsz }; cifs_dbg(FYI, "%s: mid=%llu state=%d result=%d bytes=%u\n", __func__, mid->mid, mid->mid_state, rdata->result, rdata->bytes); switch (mid->mid_state) { case MID_RESPONSE_RECEIVED: credits_received = le16_to_cpu(shdr->CreditRequest); /* result already set, check signature */ if (server->sign) { int rc; rc = smb2_verify_signature(&rqst, server); if (rc) cifs_dbg(VFS, "SMB signature verification returned error = %d\n", rc); } /* FIXME: should this be counted toward the initiating task? */ task_io_account_read(rdata->got_bytes); cifs_stats_bytes_read(tcon, rdata->got_bytes); break; case MID_REQUEST_SUBMITTED: case MID_RETRY_NEEDED: rdata->result = -EAGAIN; if (server->sign && rdata->got_bytes) /* reset bytes number since we can not check a sign */ rdata->got_bytes = 0; /* FIXME: should this be counted toward the initiating task? */ task_io_account_read(rdata->got_bytes); cifs_stats_bytes_read(tcon, rdata->got_bytes); break; default: if (rdata->result != -ENODATA) rdata->result = -EIO; } if (rdata->result) cifs_stats_fail_inc(tcon, SMB2_READ_HE); queue_work(cifsiod_wq, &rdata->work); mutex_lock(&server->srv_mutex); DeleteMidQEntry(mid); mutex_unlock(&server->srv_mutex); add_credits(server, credits_received, 0); } /* smb2_async_readv - send an async read, and set up mid to handle result */ int smb2_async_readv(struct cifs_readdata *rdata) { int rc, flags = 0; char *buf; struct smb2_sync_hdr *shdr; struct cifs_io_parms io_parms; struct smb_rqst rqst = { .rq_iov = rdata->iov, .rq_nvec = 2 }; struct TCP_Server_Info *server; unsigned int total_len; __be32 req_len; cifs_dbg(FYI, "%s: offset=%llu bytes=%u\n", __func__, rdata->offset, rdata->bytes); io_parms.tcon = tlink_tcon(rdata->cfile->tlink); io_parms.offset = rdata->offset; io_parms.length = rdata->bytes; io_parms.persistent_fid = rdata->cfile->fid.persistent_fid; io_parms.volatile_fid = rdata->cfile->fid.volatile_fid; io_parms.pid = rdata->pid; server = io_parms.tcon->ses->server; rc = smb2_new_read_req((void **) &buf, &total_len, &io_parms, 0, 0); if (rc) { if (rc == -EAGAIN && rdata->credits) { /* credits was reset by reconnect */ rdata->credits = 0; /* reduce in_flight value since we won't send the req */ spin_lock(&server->req_lock); server->in_flight--; spin_unlock(&server->req_lock); } return rc; } if (encryption_required(io_parms.tcon)) flags |= CIFS_TRANSFORM_REQ; req_len = cpu_to_be32(total_len); rdata->iov[0].iov_base = &req_len; rdata->iov[0].iov_len = sizeof(__be32); rdata->iov[1].iov_base = buf; rdata->iov[1].iov_len = total_len; shdr = (struct smb2_sync_hdr *)buf; if (rdata->credits) { shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(rdata->bytes, SMB2_MAX_BUFFER_SIZE)); shdr->CreditRequest = shdr->CreditCharge; spin_lock(&server->req_lock); server->credits += rdata->credits - le16_to_cpu(shdr->CreditCharge); spin_unlock(&server->req_lock); wake_up(&server->request_q); flags |= CIFS_HAS_CREDITS; } kref_get(&rdata->refcount); rc = cifs_call_async(io_parms.tcon->ses->server, &rqst, cifs_readv_receive, smb2_readv_callback, rdata, flags); if (rc) { kref_put(&rdata->refcount, cifs_readdata_release); cifs_stats_fail_inc(io_parms.tcon, SMB2_READ_HE); } cifs_small_buf_release(buf); return rc; } int SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms, unsigned int *nbytes, char **buf, int *buf_type) { int resp_buftype, rc = -EACCES; struct smb2_read_plain_req *req = NULL; struct smb2_read_rsp *rsp = NULL; struct smb2_sync_hdr *shdr; struct kvec iov[2]; struct kvec rsp_iov; unsigned int total_len; __be32 req_len; struct smb_rqst rqst = { .rq_iov = iov, .rq_nvec = 2 }; int flags = CIFS_LOG_ERROR; struct cifs_ses *ses = io_parms->tcon->ses; *nbytes = 0; rc = smb2_new_read_req((void **)&req, &total_len, io_parms, 0, 0); if (rc) return rc; if (encryption_required(io_parms->tcon)) flags |= CIFS_TRANSFORM_REQ; req_len = cpu_to_be32(total_len); iov[0].iov_base = &req_len; iov[0].iov_len = sizeof(__be32); iov[1].iov_base = req; iov[1].iov_len = total_len; rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov); cifs_small_buf_release(req); rsp = (struct smb2_read_rsp *)rsp_iov.iov_base; shdr = get_sync_hdr(rsp); if (shdr->Status == STATUS_END_OF_FILE) { free_rsp_buf(resp_buftype, rsp_iov.iov_base); return 0; } if (rc) { cifs_stats_fail_inc(io_parms->tcon, SMB2_READ_HE); cifs_dbg(VFS, "Send error in read = %d\n", rc); } else { *nbytes = le32_to_cpu(rsp->DataLength); if ((*nbytes > CIFS_MAX_MSGSIZE) || (*nbytes > io_parms->length)) { cifs_dbg(FYI, "bad length %d for count %d\n", *nbytes, io_parms->length); rc = -EIO; *nbytes = 0; } } if (*buf) { memcpy(*buf, (char *)shdr + rsp->DataOffset, *nbytes); free_rsp_buf(resp_buftype, rsp_iov.iov_base); } else if (resp_buftype != CIFS_NO_BUFFER) { *buf = rsp_iov.iov_base; if (resp_buftype == CIFS_SMALL_BUFFER) *buf_type = CIFS_SMALL_BUFFER; else if (resp_buftype == CIFS_LARGE_BUFFER) *buf_type = CIFS_LARGE_BUFFER; } return rc; } /* * Check the mid_state and signature on received buffer (if any), and queue the * workqueue completion task. */ static void smb2_writev_callback(struct mid_q_entry *mid) { struct cifs_writedata *wdata = mid->callback_data; struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink); struct TCP_Server_Info *server = tcon->ses->server; unsigned int written; struct smb2_write_rsp *rsp = (struct smb2_write_rsp *)mid->resp_buf; unsigned int credits_received = 1; switch (mid->mid_state) { case MID_RESPONSE_RECEIVED: credits_received = le16_to_cpu(rsp->hdr.sync_hdr.CreditRequest); wdata->result = smb2_check_receive(mid, tcon->ses->server, 0); if (wdata->result != 0) break; written = le32_to_cpu(rsp->DataLength); /* * Mask off high 16 bits when bytes written as returned * by the server is greater than bytes requested by the * client. OS/2 servers are known to set incorrect * CountHigh values. */ if (written > wdata->bytes) written &= 0xFFFF; if (written < wdata->bytes) wdata->result = -ENOSPC; else wdata->bytes = written; break; case MID_REQUEST_SUBMITTED: case MID_RETRY_NEEDED: wdata->result = -EAGAIN; break; default: wdata->result = -EIO; break; } if (wdata->result) cifs_stats_fail_inc(tcon, SMB2_WRITE_HE); queue_work(cifsiod_wq, &wdata->work); mutex_lock(&server->srv_mutex); DeleteMidQEntry(mid); mutex_unlock(&server->srv_mutex); add_credits(tcon->ses->server, credits_received, 0); } /* smb2_async_writev - send an async write, and set up mid to handle result */ int smb2_async_writev(struct cifs_writedata *wdata, void (*release)(struct kref *kref)) { int rc = -EACCES, flags = 0; struct smb2_write_req *req = NULL; struct smb2_sync_hdr *shdr; struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink); struct TCP_Server_Info *server = tcon->ses->server; struct kvec iov[2]; struct smb_rqst rqst = { }; rc = small_smb2_init(SMB2_WRITE, tcon, (void **) &req); if (rc) { if (rc == -EAGAIN && wdata->credits) { /* credits was reset by reconnect */ wdata->credits = 0; /* reduce in_flight value since we won't send the req */ spin_lock(&server->req_lock); server->in_flight--; spin_unlock(&server->req_lock); } goto async_writev_out; } if (encryption_required(tcon)) flags |= CIFS_TRANSFORM_REQ; shdr = get_sync_hdr(req); shdr->ProcessId = cpu_to_le32(wdata->cfile->pid); req->PersistentFileId = wdata->cfile->fid.persistent_fid; req->VolatileFileId = wdata->cfile->fid.volatile_fid; req->WriteChannelInfoOffset = 0; req->WriteChannelInfoLength = 0; req->Channel = 0; req->Offset = cpu_to_le64(wdata->offset); /* 4 for rfc1002 length field */ req->DataOffset = cpu_to_le16( offsetof(struct smb2_write_req, Buffer) - 4); req->RemainingBytes = 0; /* 4 for rfc1002 length field and 1 for Buffer */ iov[0].iov_len = 4; iov[0].iov_base = req; iov[1].iov_len = get_rfc1002_length(req) - 1; iov[1].iov_base = (char *)req + 4; rqst.rq_iov = iov; rqst.rq_nvec = 2; rqst.rq_pages = wdata->pages; rqst.rq_npages = wdata->nr_pages; rqst.rq_pagesz = wdata->pagesz; rqst.rq_tailsz = wdata->tailsz; cifs_dbg(FYI, "async write at %llu %u bytes\n", wdata->offset, wdata->bytes); req->Length = cpu_to_le32(wdata->bytes); inc_rfc1001_len(&req->hdr, wdata->bytes - 1 /* Buffer */); if (wdata->credits) { shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(wdata->bytes, SMB2_MAX_BUFFER_SIZE)); shdr->CreditRequest = shdr->CreditCharge; spin_lock(&server->req_lock); server->credits += wdata->credits - le16_to_cpu(shdr->CreditCharge); spin_unlock(&server->req_lock); wake_up(&server->request_q); flags |= CIFS_HAS_CREDITS; } kref_get(&wdata->refcount); rc = cifs_call_async(server, &rqst, NULL, smb2_writev_callback, wdata, flags); if (rc) { kref_put(&wdata->refcount, release); cifs_stats_fail_inc(tcon, SMB2_WRITE_HE); } async_writev_out: cifs_small_buf_release(req); return rc; } /* * SMB2_write function gets iov pointer to kvec array with n_vec as a length. * The length field from io_parms must be at least 1 and indicates a number of * elements with data to write that begins with position 1 in iov array. All * data length is specified by count. */ int SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms, unsigned int *nbytes, struct kvec *iov, int n_vec) { int rc = 0; struct smb2_write_req *req = NULL; struct smb2_write_rsp *rsp = NULL; int resp_buftype; struct kvec rsp_iov; int flags = 0; *nbytes = 0; if (n_vec < 1) return rc; rc = small_smb2_init(SMB2_WRITE, io_parms->tcon, (void **) &req); if (rc) return rc; if (io_parms->tcon->ses->server == NULL) return -ECONNABORTED; if (encryption_required(io_parms->tcon)) flags |= CIFS_TRANSFORM_REQ; req->hdr.sync_hdr.ProcessId = cpu_to_le32(io_parms->pid); req->PersistentFileId = io_parms->persistent_fid; req->VolatileFileId = io_parms->volatile_fid; req->WriteChannelInfoOffset = 0; req->WriteChannelInfoLength = 0; req->Channel = 0; req->Length = cpu_to_le32(io_parms->length); req->Offset = cpu_to_le64(io_parms->offset); /* 4 for rfc1002 length field */ req->DataOffset = cpu_to_le16( offsetof(struct smb2_write_req, Buffer) - 4); req->RemainingBytes = 0; iov[0].iov_base = (char *)req; /* 4 for rfc1002 length field and 1 for Buffer */ iov[0].iov_len = get_rfc1002_length(req) + 4 - 1; /* length of entire message including data to be written */ inc_rfc1001_len(req, io_parms->length - 1 /* Buffer */); rc = SendReceive2(xid, io_parms->tcon->ses, iov, n_vec + 1, &resp_buftype, flags, &rsp_iov); cifs_small_buf_release(req); rsp = (struct smb2_write_rsp *)rsp_iov.iov_base; if (rc) { cifs_stats_fail_inc(io_parms->tcon, SMB2_WRITE_HE); cifs_dbg(VFS, "Send error in write = %d\n", rc); } else *nbytes = le32_to_cpu(rsp->DataLength); free_rsp_buf(resp_buftype, rsp); return rc; } static unsigned int num_entries(char *bufstart, char *end_of_buf, char **lastentry, size_t size) { int len; unsigned int entrycount = 0; unsigned int next_offset = 0; FILE_DIRECTORY_INFO *entryptr; if (bufstart == NULL) return 0; entryptr = (FILE_DIRECTORY_INFO *)bufstart; while (1) { entryptr = (FILE_DIRECTORY_INFO *) ((char *)entryptr + next_offset); if ((char *)entryptr + size > end_of_buf) { cifs_dbg(VFS, "malformed search entry would overflow\n"); break; } len = le32_to_cpu(entryptr->FileNameLength); if ((char *)entryptr + len + size > end_of_buf) { cifs_dbg(VFS, "directory entry name would overflow frame end of buf %p\n", end_of_buf); break; } *lastentry = (char *)entryptr; entrycount++; next_offset = le32_to_cpu(entryptr->NextEntryOffset); if (!next_offset) break; } return entrycount; } /* * Readdir/FindFirst */ int SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid, int index, struct cifs_search_info *srch_inf) { struct smb2_query_directory_req *req; struct smb2_query_directory_rsp *rsp = NULL; struct kvec iov[2]; struct kvec rsp_iov; int rc = 0; int len; int resp_buftype = CIFS_NO_BUFFER; unsigned char *bufptr; struct TCP_Server_Info *server; struct cifs_ses *ses = tcon->ses; __le16 asteriks = cpu_to_le16('*'); char *end_of_smb; unsigned int output_size = CIFSMaxBufSize; size_t info_buf_size; int flags = 0; if (ses && (ses->server)) server = ses->server; else return -EIO; rc = small_smb2_init(SMB2_QUERY_DIRECTORY, tcon, (void **) &req); if (rc) return rc; if (encryption_required(tcon)) flags |= CIFS_TRANSFORM_REQ; switch (srch_inf->info_level) { case SMB_FIND_FILE_DIRECTORY_INFO: req->FileInformationClass = FILE_DIRECTORY_INFORMATION; info_buf_size = sizeof(FILE_DIRECTORY_INFO) - 1; break; case SMB_FIND_FILE_ID_FULL_DIR_INFO: req->FileInformationClass = FILEID_FULL_DIRECTORY_INFORMATION; info_buf_size = sizeof(SEARCH_ID_FULL_DIR_INFO) - 1; break; default: cifs_dbg(VFS, "info level %u isn't supported\n", srch_inf->info_level); rc = -EINVAL; goto qdir_exit; } req->FileIndex = cpu_to_le32(index); req->PersistentFileId = persistent_fid; req->VolatileFileId = volatile_fid; len = 0x2; bufptr = req->Buffer; memcpy(bufptr, &asteriks, len); req->FileNameOffset = cpu_to_le16(sizeof(struct smb2_query_directory_req) - 1 - 4); req->FileNameLength = cpu_to_le16(len); /* * BB could be 30 bytes or so longer if we used SMB2 specific * buffer lengths, but this is safe and close enough. */ output_size = min_t(unsigned int, output_size, server->maxBuf); output_size = min_t(unsigned int, output_size, 2 << 15); req->OutputBufferLength = cpu_to_le32(output_size); iov[0].iov_base = (char *)req; /* 4 for RFC1001 length and 1 for Buffer */ iov[0].iov_len = get_rfc1002_length(req) + 4 - 1; iov[1].iov_base = (char *)(req->Buffer); iov[1].iov_len = len; inc_rfc1001_len(req, len - 1 /* Buffer */); rc = SendReceive2(xid, ses, iov, 2, &resp_buftype, flags, &rsp_iov); cifs_small_buf_release(req); rsp = (struct smb2_query_directory_rsp *)rsp_iov.iov_base; if (rc) { if (rc == -ENODATA && rsp->hdr.sync_hdr.Status == STATUS_NO_MORE_FILES) { srch_inf->endOfSearch = true; rc = 0; } cifs_stats_fail_inc(tcon, SMB2_QUERY_DIRECTORY_HE); goto qdir_exit; } rc = validate_buf(le16_to_cpu(rsp->OutputBufferOffset), le32_to_cpu(rsp->OutputBufferLength), &rsp->hdr, info_buf_size); if (rc) goto qdir_exit; srch_inf->unicode = true; if (srch_inf->ntwrk_buf_start) { if (srch_inf->smallBuf) cifs_small_buf_release(srch_inf->ntwrk_buf_start); else cifs_buf_release(srch_inf->ntwrk_buf_start); } srch_inf->ntwrk_buf_start = (char *)rsp; srch_inf->srch_entries_start = srch_inf->last_entry = 4 /* rfclen */ + (char *)&rsp->hdr + le16_to_cpu(rsp->OutputBufferOffset); /* 4 for rfc1002 length field */ end_of_smb = get_rfc1002_length(rsp) + 4 + (char *)&rsp->hdr; srch_inf->entries_in_buffer = num_entries(srch_inf->srch_entries_start, end_of_smb, &srch_inf->last_entry, info_buf_size); srch_inf->index_of_last_entry += srch_inf->entries_in_buffer; cifs_dbg(FYI, "num entries %d last_index %lld srch start %p srch end %p\n", srch_inf->entries_in_buffer, srch_inf->index_of_last_entry, srch_inf->srch_entries_start, srch_inf->last_entry); if (resp_buftype == CIFS_LARGE_BUFFER) srch_inf->smallBuf = false; else if (resp_buftype == CIFS_SMALL_BUFFER) srch_inf->smallBuf = true; else cifs_dbg(VFS, "illegal search buffer type\n"); return rc; qdir_exit: free_rsp_buf(resp_buftype, rsp); return rc; } static int send_set_info(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid, u32 pid, int info_class, unsigned int num, void **data, unsigned int *size) { struct smb2_set_info_req *req; struct smb2_set_info_rsp *rsp = NULL; struct kvec *iov; struct kvec rsp_iov; int rc = 0; int resp_buftype; unsigned int i; struct TCP_Server_Info *server; struct cifs_ses *ses = tcon->ses; int flags = 0; if (ses && (ses->server)) server = ses->server; else return -EIO; if (!num) return -EINVAL; iov = kmalloc(sizeof(struct kvec) * num, GFP_KERNEL); if (!iov) return -ENOMEM; rc = small_smb2_init(SMB2_SET_INFO, tcon, (void **) &req); if (rc) { kfree(iov); return rc; } if (encryption_required(tcon)) flags |= CIFS_TRANSFORM_REQ; req->hdr.sync_hdr.ProcessId = cpu_to_le32(pid); req->InfoType = SMB2_O_INFO_FILE; req->FileInfoClass = info_class; req->PersistentFileId = persistent_fid; req->VolatileFileId = volatile_fid; /* 4 for RFC1001 length and 1 for Buffer */ req->BufferOffset = cpu_to_le16(sizeof(struct smb2_set_info_req) - 1 - 4); req->BufferLength = cpu_to_le32(*size); inc_rfc1001_len(req, *size - 1 /* Buffer */); memcpy(req->Buffer, *data, *size); iov[0].iov_base = (char *)req; /* 4 for RFC1001 length */ iov[0].iov_len = get_rfc1002_length(req) + 4; for (i = 1; i < num; i++) { inc_rfc1001_len(req, size[i]); le32_add_cpu(&req->BufferLength, size[i]); iov[i].iov_base = (char *)data[i]; iov[i].iov_len = size[i]; } rc = SendReceive2(xid, ses, iov, num, &resp_buftype, flags, &rsp_iov); cifs_small_buf_release(req); rsp = (struct smb2_set_info_rsp *)rsp_iov.iov_base; if (rc != 0) cifs_stats_fail_inc(tcon, SMB2_SET_INFO_HE); free_rsp_buf(resp_buftype, rsp); kfree(iov); return rc; } int SMB2_rename(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid, __le16 *target_file) { struct smb2_file_rename_info info; void **data; unsigned int size[2]; int rc; int len = (2 * UniStrnlen((wchar_t *)target_file, PATH_MAX)); data = kmalloc(sizeof(void *) * 2, GFP_KERNEL); if (!data) return -ENOMEM; info.ReplaceIfExists = 1; /* 1 = replace existing target with new */ /* 0 = fail if target already exists */ info.RootDirectory = 0; /* MBZ for network ops (why does spec say?) */ info.FileNameLength = cpu_to_le32(len); data[0] = &info; size[0] = sizeof(struct smb2_file_rename_info); data[1] = target_file; size[1] = len + 2 /* null */; rc = send_set_info(xid, tcon, persistent_fid, volatile_fid, current->tgid, FILE_RENAME_INFORMATION, 2, data, size); kfree(data); return rc; } int SMB2_rmdir(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid) { __u8 delete_pending = 1; void *data; unsigned int size; data = &delete_pending; size = 1; /* sizeof __u8 */ return send_set_info(xid, tcon, persistent_fid, volatile_fid, current->tgid, FILE_DISPOSITION_INFORMATION, 1, &data, &size); } int SMB2_set_hardlink(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid, __le16 *target_file) { struct smb2_file_link_info info; void **data; unsigned int size[2]; int rc; int len = (2 * UniStrnlen((wchar_t *)target_file, PATH_MAX)); data = kmalloc(sizeof(void *) * 2, GFP_KERNEL); if (!data) return -ENOMEM; info.ReplaceIfExists = 0; /* 1 = replace existing link with new */ /* 0 = fail if link already exists */ info.RootDirectory = 0; /* MBZ for network ops (why does spec say?) */ info.FileNameLength = cpu_to_le32(len); data[0] = &info; size[0] = sizeof(struct smb2_file_link_info); data[1] = target_file; size[1] = len + 2 /* null */; rc = send_set_info(xid, tcon, persistent_fid, volatile_fid, current->tgid, FILE_LINK_INFORMATION, 2, data, size); kfree(data); return rc; } int SMB2_set_eof(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid, u32 pid, __le64 *eof, bool is_falloc) { struct smb2_file_eof_info info; void *data; unsigned int size; info.EndOfFile = *eof; data = &info; size = sizeof(struct smb2_file_eof_info); if (is_falloc) return send_set_info(xid, tcon, persistent_fid, volatile_fid, pid, FILE_ALLOCATION_INFORMATION, 1, &data, &size); else return send_set_info(xid, tcon, persistent_fid, volatile_fid, pid, FILE_END_OF_FILE_INFORMATION, 1, &data, &size); } int SMB2_set_info(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid, FILE_BASIC_INFO *buf) { unsigned int size; size = sizeof(FILE_BASIC_INFO); return send_set_info(xid, tcon, persistent_fid, volatile_fid, current->tgid, FILE_BASIC_INFORMATION, 1, (void **)&buf, &size); } int SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon, const u64 persistent_fid, const u64 volatile_fid, __u8 oplock_level) { int rc; struct smb2_oplock_break *req = NULL; int flags = CIFS_OBREAK_OP; cifs_dbg(FYI, "SMB2_oplock_break\n"); rc = small_smb2_init(SMB2_OPLOCK_BREAK, tcon, (void **) &req); if (rc) return rc; if (encryption_required(tcon)) flags |= CIFS_TRANSFORM_REQ; req->VolatileFid = volatile_fid; req->PersistentFid = persistent_fid; req->OplockLevel = oplock_level; req->hdr.sync_hdr.CreditRequest = cpu_to_le16(1); rc = SendReceiveNoRsp(xid, tcon->ses, (char *) req, flags); cifs_small_buf_release(req); if (rc) { cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE); cifs_dbg(FYI, "Send error in Oplock Break = %d\n", rc); } return rc; } static void copy_fs_info_to_kstatfs(struct smb2_fs_full_size_info *pfs_inf, struct kstatfs *kst) { kst->f_bsize = le32_to_cpu(pfs_inf->BytesPerSector) * le32_to_cpu(pfs_inf->SectorsPerAllocationUnit); kst->f_blocks = le64_to_cpu(pfs_inf->TotalAllocationUnits); kst->f_bfree = le64_to_cpu(pfs_inf->ActualAvailableAllocationUnits); kst->f_bavail = le64_to_cpu(pfs_inf->CallerAvailableAllocationUnits); return; } static int build_qfs_info_req(struct kvec *iov, struct cifs_tcon *tcon, int level, int outbuf_len, u64 persistent_fid, u64 volatile_fid) { int rc; struct smb2_query_info_req *req; cifs_dbg(FYI, "Query FSInfo level %d\n", level); if ((tcon->ses == NULL) || (tcon->ses->server == NULL)) return -EIO; rc = small_smb2_init(SMB2_QUERY_INFO, tcon, (void **) &req); if (rc) return rc; req->InfoType = SMB2_O_INFO_FILESYSTEM; req->FileInfoClass = level; req->PersistentFileId = persistent_fid; req->VolatileFileId = volatile_fid; /* 4 for rfc1002 length field and 1 for pad */ req->InputBufferOffset = cpu_to_le16(sizeof(struct smb2_query_info_req) - 1 - 4); req->OutputBufferLength = cpu_to_le32( outbuf_len + sizeof(struct smb2_query_info_rsp) - 1 - 4); iov->iov_base = (char *)req; /* 4 for rfc1002 length field */ iov->iov_len = get_rfc1002_length(req) + 4; return 0; } int SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid, struct kstatfs *fsdata) { struct smb2_query_info_rsp *rsp = NULL; struct kvec iov; struct kvec rsp_iov; int rc = 0; int resp_buftype; struct cifs_ses *ses = tcon->ses; struct smb2_fs_full_size_info *info = NULL; int flags = 0; rc = build_qfs_info_req(&iov, tcon, FS_FULL_SIZE_INFORMATION, sizeof(struct smb2_fs_full_size_info), persistent_fid, volatile_fid); if (rc) return rc; if (encryption_required(tcon)) flags |= CIFS_TRANSFORM_REQ; rc = SendReceive2(xid, ses, &iov, 1, &resp_buftype, flags, &rsp_iov); cifs_small_buf_release(iov.iov_base); if (rc) { cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE); goto qfsinf_exit; } rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base; info = (struct smb2_fs_full_size_info *)(4 /* RFC1001 len */ + le16_to_cpu(rsp->OutputBufferOffset) + (char *)&rsp->hdr); rc = validate_buf(le16_to_cpu(rsp->OutputBufferOffset), le32_to_cpu(rsp->OutputBufferLength), &rsp->hdr, sizeof(struct smb2_fs_full_size_info)); if (!rc) copy_fs_info_to_kstatfs(info, fsdata); qfsinf_exit: free_rsp_buf(resp_buftype, rsp_iov.iov_base); return rc; } int SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid, int level) { struct smb2_query_info_rsp *rsp = NULL; struct kvec iov; struct kvec rsp_iov; int rc = 0; int resp_buftype, max_len, min_len; struct cifs_ses *ses = tcon->ses; unsigned int rsp_len, offset; int flags = 0; if (level == FS_DEVICE_INFORMATION) { max_len = sizeof(FILE_SYSTEM_DEVICE_INFO); min_len = sizeof(FILE_SYSTEM_DEVICE_INFO); } else if (level == FS_ATTRIBUTE_INFORMATION) { max_len = sizeof(FILE_SYSTEM_ATTRIBUTE_INFO); min_len = MIN_FS_ATTR_INFO_SIZE; } else if (level == FS_SECTOR_SIZE_INFORMATION) { max_len = sizeof(struct smb3_fs_ss_info); min_len = sizeof(struct smb3_fs_ss_info); } else { cifs_dbg(FYI, "Invalid qfsinfo level %d\n", level); return -EINVAL; } rc = build_qfs_info_req(&iov, tcon, level, max_len, persistent_fid, volatile_fid); if (rc) return rc; if (encryption_required(tcon)) flags |= CIFS_TRANSFORM_REQ; rc = SendReceive2(xid, ses, &iov, 1, &resp_buftype, flags, &rsp_iov); cifs_small_buf_release(iov.iov_base); if (rc) { cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE); goto qfsattr_exit; } rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base; rsp_len = le32_to_cpu(rsp->OutputBufferLength); offset = le16_to_cpu(rsp->OutputBufferOffset); rc = validate_buf(offset, rsp_len, &rsp->hdr, min_len); if (rc) goto qfsattr_exit; if (level == FS_ATTRIBUTE_INFORMATION) memcpy(&tcon->fsAttrInfo, 4 /* RFC1001 len */ + offset + (char *)&rsp->hdr, min_t(unsigned int, rsp_len, max_len)); else if (level == FS_DEVICE_INFORMATION) memcpy(&tcon->fsDevInfo, 4 /* RFC1001 len */ + offset + (char *)&rsp->hdr, sizeof(FILE_SYSTEM_DEVICE_INFO)); else if (level == FS_SECTOR_SIZE_INFORMATION) { struct smb3_fs_ss_info *ss_info = (struct smb3_fs_ss_info *) (4 /* RFC1001 len */ + offset + (char *)&rsp->hdr); tcon->ss_flags = le32_to_cpu(ss_info->Flags); tcon->perf_sector_size = le32_to_cpu(ss_info->PhysicalBytesPerSectorForPerf); } qfsattr_exit: free_rsp_buf(resp_buftype, rsp_iov.iov_base); return rc; } int smb2_lockv(const unsigned int xid, struct cifs_tcon *tcon, const __u64 persist_fid, const __u64 volatile_fid, const __u32 pid, const __u32 num_lock, struct smb2_lock_element *buf) { int rc = 0; struct smb2_lock_req *req = NULL; struct kvec iov[2]; struct kvec rsp_iov; int resp_buf_type; unsigned int count; int flags = CIFS_NO_RESP; cifs_dbg(FYI, "smb2_lockv num lock %d\n", num_lock); rc = small_smb2_init(SMB2_LOCK, tcon, (void **) &req); if (rc) return rc; if (encryption_required(tcon)) flags |= CIFS_TRANSFORM_REQ; req->hdr.sync_hdr.ProcessId = cpu_to_le32(pid); req->LockCount = cpu_to_le16(num_lock); req->PersistentFileId = persist_fid; req->VolatileFileId = volatile_fid; count = num_lock * sizeof(struct smb2_lock_element); inc_rfc1001_len(req, count - sizeof(struct smb2_lock_element)); iov[0].iov_base = (char *)req; /* 4 for rfc1002 length field and count for all locks */ iov[0].iov_len = get_rfc1002_length(req) + 4 - count; iov[1].iov_base = (char *)buf; iov[1].iov_len = count; cifs_stats_inc(&tcon->stats.cifs_stats.num_locks); rc = SendReceive2(xid, tcon->ses, iov, 2, &resp_buf_type, flags, &rsp_iov); cifs_small_buf_release(req); if (rc) { cifs_dbg(FYI, "Send error in smb2_lockv = %d\n", rc); cifs_stats_fail_inc(tcon, SMB2_LOCK_HE); } return rc; } int SMB2_lock(const unsigned int xid, struct cifs_tcon *tcon, const __u64 persist_fid, const __u64 volatile_fid, const __u32 pid, const __u64 length, const __u64 offset, const __u32 lock_flags, const bool wait) { struct smb2_lock_element lock; lock.Offset = cpu_to_le64(offset); lock.Length = cpu_to_le64(length); lock.Flags = cpu_to_le32(lock_flags); if (!wait && lock_flags != SMB2_LOCKFLAG_UNLOCK) lock.Flags |= cpu_to_le32(SMB2_LOCKFLAG_FAIL_IMMEDIATELY); return smb2_lockv(xid, tcon, persist_fid, volatile_fid, pid, 1, &lock); } int SMB2_lease_break(const unsigned int xid, struct cifs_tcon *tcon, __u8 *lease_key, const __le32 lease_state) { int rc; struct smb2_lease_ack *req = NULL; int flags = CIFS_OBREAK_OP; cifs_dbg(FYI, "SMB2_lease_break\n"); rc = small_smb2_init(SMB2_OPLOCK_BREAK, tcon, (void **) &req); if (rc) return rc; if (encryption_required(tcon)) flags |= CIFS_TRANSFORM_REQ; req->hdr.sync_hdr.CreditRequest = cpu_to_le16(1); req->StructureSize = cpu_to_le16(36); inc_rfc1001_len(req, 12); memcpy(req->LeaseKey, lease_key, 16); req->LeaseState = lease_state; rc = SendReceiveNoRsp(xid, tcon->ses, (char *) req, flags); cifs_small_buf_release(req); if (rc) { cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE); cifs_dbg(FYI, "Send error in Lease Break = %d\n", rc); } return rc; }
SMB2_sess_establish_session(struct SMB2_sess_data *sess_data) { int rc = 0; struct cifs_ses *ses = sess_data->ses; mutex_lock(&ses->server->srv_mutex); if (ses->server->sign && ses->server->ops->generate_signingkey) { rc = ses->server->ops->generate_signingkey(ses); kfree(ses->auth_key.response); ses->auth_key.response = NULL; if (rc) { cifs_dbg(FYI, "SMB3 session key generation failed\n"); mutex_unlock(&ses->server->srv_mutex); goto keygen_exit; } } if (!ses->server->session_estab) { ses->server->sequence_number = 0x2; ses->server->session_estab = true; } mutex_unlock(&ses->server->srv_mutex); cifs_dbg(FYI, "SMB2/3 session established successfully\n"); spin_lock(&GlobalMid_Lock); ses->status = CifsGood; ses->need_reconnect = false; spin_unlock(&GlobalMid_Lock); keygen_exit: if (!ses->server->sign) { kfree(ses->auth_key.response); ses->auth_key.response = NULL; } return rc; }
SMB2_sess_establish_session(struct SMB2_sess_data *sess_data) { int rc = 0; struct cifs_ses *ses = sess_data->ses; mutex_lock(&ses->server->srv_mutex); if (ses->server->ops->generate_signingkey) { rc = ses->server->ops->generate_signingkey(ses); if (rc) { cifs_dbg(FYI, "SMB3 session key generation failed\n"); mutex_unlock(&ses->server->srv_mutex); return rc; } } if (!ses->server->session_estab) { ses->server->sequence_number = 0x2; ses->server->session_estab = true; } mutex_unlock(&ses->server->srv_mutex); cifs_dbg(FYI, "SMB2/3 session established successfully\n"); spin_lock(&GlobalMid_Lock); ses->status = CifsGood; ses->need_reconnect = false; spin_unlock(&GlobalMid_Lock); return rc; }
{'added': [(759, '\tif (ses->server->ops->generate_signingkey) {'), (765, '\t\t\treturn rc;')], 'deleted': [(759, '\tif (ses->server->sign && ses->server->ops->generate_signingkey) {'), (761, '\t\tkfree(ses->auth_key.response);'), (762, '\t\tses->auth_key.response = NULL;'), (767, '\t\t\tgoto keygen_exit;'), (781, ''), (782, 'keygen_exit:'), (783, '\tif (!ses->server->sign) {'), (784, '\t\tkfree(ses->auth_key.response);'), (785, '\t\tses->auth_key.response = NULL;'), (786, '\t}')]}
2
10
2,474
16,304
https://github.com/torvalds/linux
CVE-2018-1066
['CWE-476']
wpg.c
ReadWPGImage
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % W W PPPP GGGG % % W W P P G % % W W W PPPP G GGG % % WW WW P G G % % W W P GGG % % % % % % Read WordPerfect Image Format % % % % Software Design % % Jaroslav Fojtik % % June 2000 % % % % % % Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/color-private.h" #include "MagickCore/colormap.h" #include "MagickCore/colormap-private.h" #include "MagickCore/constitute.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/cache.h" #include "MagickCore/distort.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/magic.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/resource_.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/quantum-private.h" #include "MagickCore/static.h" #include "MagickCore/string_.h" #include "MagickCore/module.h" #include "MagickCore/transform.h" #include "MagickCore/utility.h" #include "MagickCore/utility-private.h" typedef struct { unsigned char Red; unsigned char Blue; unsigned char Green; } RGB_Record; /* Default palette for WPG level 1 */ static const RGB_Record WPG1_Palette[256]={ { 0, 0, 0}, { 0, 0,168}, { 0,168, 0}, { 0,168,168}, {168, 0, 0}, {168, 0,168}, {168, 84, 0}, {168,168,168}, { 84, 84, 84}, { 84, 84,252}, { 84,252, 84}, { 84,252,252}, {252, 84, 84}, {252, 84,252}, {252,252, 84}, {252,252,252}, /*16*/ { 0, 0, 0}, { 20, 20, 20}, { 32, 32, 32}, { 44, 44, 44}, { 56, 56, 56}, { 68, 68, 68}, { 80, 80, 80}, { 96, 96, 96}, {112,112,112}, {128,128,128}, {144,144,144}, {160,160,160}, {180,180,180}, {200,200,200}, {224,224,224}, {252,252,252}, /*32*/ { 0, 0,252}, { 64, 0,252}, {124, 0,252}, {188, 0,252}, {252, 0,252}, {252, 0,188}, {252, 0,124}, {252, 0, 64}, {252, 0, 0}, {252, 64, 0}, {252,124, 0}, {252,188, 0}, {252,252, 0}, {188,252, 0}, {124,252, 0}, { 64,252, 0}, /*48*/ { 0,252, 0}, { 0,252, 64}, { 0,252,124}, { 0,252,188}, { 0,252,252}, { 0,188,252}, { 0,124,252}, { 0, 64,252}, {124,124,252}, {156,124,252}, {188,124,252}, {220,124,252}, {252,124,252}, {252,124,220}, {252,124,188}, {252,124,156}, /*64*/ {252,124,124}, {252,156,124}, {252,188,124}, {252,220,124}, {252,252,124}, {220,252,124}, {188,252,124}, {156,252,124}, {124,252,124}, {124,252,156}, {124,252,188}, {124,252,220}, {124,252,252}, {124,220,252}, {124,188,252}, {124,156,252}, /*80*/ {180,180,252}, {196,180,252}, {216,180,252}, {232,180,252}, {252,180,252}, {252,180,232}, {252,180,216}, {252,180,196}, {252,180,180}, {252,196,180}, {252,216,180}, {252,232,180}, {252,252,180}, {232,252,180}, {216,252,180}, {196,252,180}, /*96*/ {180,220,180}, {180,252,196}, {180,252,216}, {180,252,232}, {180,252,252}, {180,232,252}, {180,216,252}, {180,196,252}, {0,0,112}, {28,0,112}, {56,0,112}, {84,0,112}, {112,0,112}, {112,0,84}, {112,0,56}, {112,0,28}, /*112*/ {112,0,0}, {112,28,0}, {112,56,0}, {112,84,0}, {112,112,0}, {84,112,0}, {56,112,0}, {28,112,0}, {0,112,0}, {0,112,28}, {0,112,56}, {0,112,84}, {0,112,112}, {0,84,112}, {0,56,112}, {0,28,112}, /*128*/ {56,56,112}, {68,56,112}, {84,56,112}, {96,56,112}, {112,56,112}, {112,56,96}, {112,56,84}, {112,56,68}, {112,56,56}, {112,68,56}, {112,84,56}, {112,96,56}, {112,112,56}, {96,112,56}, {84,112,56}, {68,112,56}, /*144*/ {56,112,56}, {56,112,69}, {56,112,84}, {56,112,96}, {56,112,112}, {56,96,112}, {56,84,112}, {56,68,112}, {80,80,112}, {88,80,112}, {96,80,112}, {104,80,112}, {112,80,112}, {112,80,104}, {112,80,96}, {112,80,88}, /*160*/ {112,80,80}, {112,88,80}, {112,96,80}, {112,104,80}, {112,112,80}, {104,112,80}, {96,112,80}, {88,112,80}, {80,112,80}, {80,112,88}, {80,112,96}, {80,112,104}, {80,112,112}, {80,114,112}, {80,96,112}, {80,88,112}, /*176*/ {0,0,64}, {16,0,64}, {32,0,64}, {48,0,64}, {64,0,64}, {64,0,48}, {64,0,32}, {64,0,16}, {64,0,0}, {64,16,0}, {64,32,0}, {64,48,0}, {64,64,0}, {48,64,0}, {32,64,0}, {16,64,0}, /*192*/ {0,64,0}, {0,64,16}, {0,64,32}, {0,64,48}, {0,64,64}, {0,48,64}, {0,32,64}, {0,16,64}, {32,32,64}, {40,32,64}, {48,32,64}, {56,32,64}, {64,32,64}, {64,32,56}, {64,32,48}, {64,32,40}, /*208*/ {64,32,32}, {64,40,32}, {64,48,32}, {64,56,32}, {64,64,32}, {56,64,32}, {48,64,32}, {40,64,32}, {32,64,32}, {32,64,40}, {32,64,48}, {32,64,56}, {32,64,64}, {32,56,64}, {32,48,64}, {32,40,64}, /*224*/ {44,44,64}, {48,44,64}, {52,44,64}, {60,44,64}, {64,44,64}, {64,44,60}, {64,44,52}, {64,44,48}, {64,44,44}, {64,48,44}, {64,52,44}, {64,60,44}, {64,64,44}, {60,64,44}, {52,64,44}, {48,64,44}, /*240*/ {44,64,44}, {44,64,48}, {44,64,52}, {44,64,60}, {44,64,64}, {44,60,64}, {44,55,64}, {44,48,64}, {0,0,0}, {0,0,0}, {0,0,0}, {0,0,0}, {0,0,0}, {0,0,0}, {0,0,0}, {0,0,0} /*256*/ }; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s W P G % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsWPG() returns True if the image format type, identified by the magick % string, is WPG. % % The format of the IsWPG method is: % % unsigned int IsWPG(const unsigned char *magick,const size_t length) % % A description of each parameter follows: % % o status: Method IsWPG returns True if the image format type is WPG. % % o magick: compare image format pattern against these bytes. % % o length: Specifies the length of the magick string. % */ static unsigned int IsWPG(const unsigned char *magick,const size_t length) { if (length < 4) return(MagickFalse); if (memcmp(magick,"\377WPC",4) == 0) return(MagickTrue); return(MagickFalse); } static void Rd_WP_DWORD(Image *image,size_t *d) { unsigned char b; b=ReadBlobByte(image); *d=b; if (b < 0xFFU) return; b=ReadBlobByte(image); *d=(size_t) b; b=ReadBlobByte(image); *d+=(size_t) b*256l; if (*d < 0x8000) return; *d=(*d & 0x7FFF) << 16; b=ReadBlobByte(image); *d+=(size_t) b; b=ReadBlobByte(image); *d+=(size_t) b*256l; return; } static MagickBooleanType InsertRow(Image *image,unsigned char *p,ssize_t y, int bpp,ExceptionInfo *exception) { int bit; Quantum index; register Quantum *q; ssize_t x; q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) return(MagickFalse); switch (bpp) { case 1: /* Convert bitmap scanline. */ { for (x=0; x < ((ssize_t) image->columns-7); x+=8) { for (bit=0; bit < 8; bit++) { index=((*p) & (0x80 >> bit) ? 0x01 : 0x00); SetPixelIndex(image,index,q); SetPixelViaPixelInfo(image,image->colormap+(ssize_t) index,q); q+=GetPixelChannels(image); } p++; } if ((image->columns % 8) != 0) { for (bit=0; bit < (ssize_t) (image->columns % 8); bit++) { index=((*p) & (0x80 >> bit) ? 0x01 : 0x00); SetPixelIndex(image,index,q); SetPixelViaPixelInfo(image,image->colormap+(ssize_t) index,q); q+=GetPixelChannels(image); } p++; } break; } case 2: /* Convert PseudoColor scanline. */ { for (x=0; x < ((ssize_t) image->columns-3); x+=4) { index=ConstrainColormapIndex(image,(*p >> 6) & 0x3,exception); SetPixelIndex(image,index,q); SetPixelViaPixelInfo(image,image->colormap+(ssize_t) index,q); q+=GetPixelChannels(image); index=ConstrainColormapIndex(image,(*p >> 4) & 0x3,exception); SetPixelIndex(image,index,q); SetPixelViaPixelInfo(image,image->colormap+(ssize_t) index,q); q+=GetPixelChannels(image); index=ConstrainColormapIndex(image,(*p >> 2) & 0x3,exception); SetPixelIndex(image,index,q); SetPixelViaPixelInfo(image,image->colormap+(ssize_t) index,q); q+=GetPixelChannels(image); index=ConstrainColormapIndex(image,(*p) & 0x3,exception); SetPixelIndex(image,index,q); SetPixelViaPixelInfo(image,image->colormap+(ssize_t) index,q); q+=GetPixelChannels(image); p++; } if ((image->columns % 4) != 0) { index=ConstrainColormapIndex(image,(*p >> 6) & 0x3,exception); SetPixelIndex(image,index,q); SetPixelViaPixelInfo(image,image->colormap+(ssize_t) index,q); q+=GetPixelChannels(image); if ((image->columns % 4) > 1) { index=ConstrainColormapIndex(image,(*p >> 4) & 0x3,exception); SetPixelIndex(image,index,q); SetPixelViaPixelInfo(image,image->colormap+(ssize_t) index,q); q+=GetPixelChannels(image); if ((image->columns % 4) > 2) { index=ConstrainColormapIndex(image,(*p >> 2) & 0x3, exception); SetPixelIndex(image,index,q); SetPixelViaPixelInfo(image,image->colormap+(ssize_t) index,q); q+=GetPixelChannels(image); } } p++; } break; } case 4: /* Convert PseudoColor scanline. */ { for (x=0; x < ((ssize_t) image->columns-1); x+=2) { index=ConstrainColormapIndex(image,(*p >> 4) & 0x0f,exception); SetPixelIndex(image,index,q); SetPixelViaPixelInfo(image,image->colormap+(ssize_t) index,q); q+=GetPixelChannels(image); index=ConstrainColormapIndex(image,(*p) & 0x0f,exception); SetPixelIndex(image,index,q); SetPixelViaPixelInfo(image,image->colormap+(ssize_t) index,q); p++; q+=GetPixelChannels(image); } if ((image->columns % 2) != 0) { index=ConstrainColormapIndex(image,(*p >> 4) & 0x0f,exception); SetPixelIndex(image,index,q); SetPixelViaPixelInfo(image,image->colormap+(ssize_t) index,q); p++; q+=GetPixelChannels(image); } break; } case 8: /* Convert PseudoColor scanline. */ { for (x=0; x < (ssize_t) image->columns; x++) { index=ConstrainColormapIndex(image,*p,exception); SetPixelIndex(image,index,q); SetPixelViaPixelInfo(image,image->colormap+(ssize_t) index,q); p++; q+=GetPixelChannels(image); } } break; case 24: /* Convert DirectColor scanline. */ for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(image,ScaleCharToQuantum(*p++),q); SetPixelGreen(image,ScaleCharToQuantum(*p++),q); SetPixelBlue(image,ScaleCharToQuantum(*p++),q); q+=GetPixelChannels(image); } break; } if (!SyncAuthenticPixels(image,exception)) return(MagickFalse); return(MagickTrue); } /* Helper for WPG1 raster reader. */ #define InsertByte(b) \ { \ BImgBuff[x]=b; \ x++; \ if((ssize_t) x>=ldblk) \ { \ if (InsertRow(image,BImgBuff,(ssize_t) y,bpp,exception) != MagickFalse) \ y++; \ x=0; \ } \ } /* WPG1 raster reader. */ static int UnpackWPGRaster(Image *image,int bpp,ExceptionInfo *exception) { int x, y, i; unsigned char bbuf, *BImgBuff, RunCount; ssize_t ldblk; x=0; y=0; ldblk=(ssize_t) ((bpp*image->columns+7)/8); BImgBuff=(unsigned char *) AcquireQuantumMemory((size_t) ldblk, 8*sizeof(*BImgBuff)); if(BImgBuff==NULL) return(-2); while(y<(ssize_t) image->rows) { int c; c=ReadBlobByte(image); if (c == EOF) break; bbuf=(unsigned char) c; RunCount=bbuf & 0x7F; if(bbuf & 0x80) { if(RunCount) /* repeat next byte runcount * */ { bbuf=ReadBlobByte(image); for(i=0;i<(int) RunCount;i++) InsertByte(bbuf); } else { /* read next byte as RunCount; repeat 0xFF runcount* */ c=ReadBlobByte(image); if (c < 0) break; RunCount=(unsigned char) c; for(i=0;i<(int) RunCount;i++) InsertByte(0xFF); } } else { if(RunCount) /* next runcount byte are readed directly */ { for(i=0;i < (int) RunCount;i++) { bbuf=ReadBlobByte(image); InsertByte(bbuf); } } else { /* repeat previous line runcount* */ c=ReadBlobByte(image); if (c < 0) break; RunCount=(unsigned char) c; if(x) { /* attempt to duplicate row from x position: */ /* I do not know what to do here */ BImgBuff=(unsigned char *) RelinquishMagickMemory(BImgBuff); return(-3); } for(i=0;i < (int) RunCount;i++) { x=0; y++; /* Here I need to duplicate previous row RUNCOUNT* */ if(y<2) continue; if(y>(ssize_t) image->rows) { BImgBuff=(unsigned char *) RelinquishMagickMemory(BImgBuff); return(-4); } InsertRow(image,BImgBuff,y-1,bpp,exception); } } } if (EOFBlob(image) != MagickFalse) break; } BImgBuff=(unsigned char *) RelinquishMagickMemory(BImgBuff); return(y <(ssize_t) image->rows ? -5 : 0); } /* Helper for WPG2 reader. */ #define InsertByte6(b) \ { \ DisableMSCWarning(4310) \ if(XorMe)\ BImgBuff[x] = (unsigned char)~b;\ else\ BImgBuff[x] = b;\ RestoreMSCWarning \ x++; \ if((ssize_t) x >= ldblk) \ { \ if (InsertRow(image,BImgBuff,(ssize_t) y,bpp,exception) != MagickFalse) \ y++; \ x=0; \ } \ } /* WPG2 raster reader. */ static int UnpackWPG2Raster(Image *image,int bpp,ExceptionInfo *exception) { int RunCount, XorMe = 0; size_t x, y; ssize_t i, ldblk; unsigned int SampleSize=1; unsigned char bbuf, *BImgBuff, SampleBuffer[8] = { 0, 0, 0, 0, 0, 0, 0, 0 }; x=0; y=0; ldblk=(ssize_t) ((bpp*image->columns+7)/8); BImgBuff=(unsigned char *) AcquireQuantumMemory((size_t) ldblk, sizeof(*BImgBuff)); if(BImgBuff==NULL) return(-2); while( y< image->rows) { bbuf=ReadBlobByte(image); switch(bbuf) { case 0x7D: SampleSize=ReadBlobByte(image); /* DSZ */ if(SampleSize>8) { BImgBuff=(unsigned char *) RelinquishMagickMemory(BImgBuff); return(-2); } if(SampleSize<1) { BImgBuff=(unsigned char *) RelinquishMagickMemory(BImgBuff); return(-2); } break; case 0x7E: (void) FormatLocaleFile(stderr, "\nUnsupported WPG token XOR, please report!"); XorMe=!XorMe; break; case 0x7F: RunCount=ReadBlobByte(image); /* BLK */ if (RunCount < 0) break; for(i=0; i < SampleSize*(RunCount+1); i++) { InsertByte6(0); } break; case 0xFD: RunCount=ReadBlobByte(image); /* EXT */ if (RunCount < 0) break; for(i=0; i<= RunCount;i++) for(bbuf=0; bbuf < SampleSize; bbuf++) InsertByte6(SampleBuffer[bbuf]); break; case 0xFE: RunCount=ReadBlobByte(image); /* RST */ if (RunCount < 0) break; if(x!=0) { (void) FormatLocaleFile(stderr, "\nUnsupported WPG2 unaligned token RST x=%.20g, please report!\n" ,(double) x); BImgBuff=(unsigned char *) RelinquishMagickMemory(BImgBuff); return(-3); } { /* duplicate the previous row RunCount x */ for(i=0;i<=RunCount;i++) { if (InsertRow(image,BImgBuff,(ssize_t) (image->rows >= y ? y : image->rows-1), bpp,exception) != MagickFalse) y++; } } break; case 0xFF: RunCount=ReadBlobByte(image); /* WHT */ if (RunCount < 0) break; for(i=0; i < SampleSize*(RunCount+1); i++) { InsertByte6(0xFF); } break; default: RunCount=bbuf & 0x7F; if(bbuf & 0x80) /* REP */ { for(i=0; i < SampleSize; i++) SampleBuffer[i]=ReadBlobByte(image); for(i=0;i<=RunCount;i++) for(bbuf=0;bbuf<SampleSize;bbuf++) InsertByte6(SampleBuffer[bbuf]); } else { /* NRP */ for(i=0; i< SampleSize*(RunCount+1);i++) { bbuf=ReadBlobByte(image); InsertByte6(bbuf); } } } if (EOFBlob(image) != MagickFalse) break; } BImgBuff=(unsigned char *) RelinquishMagickMemory(BImgBuff); return(0); } typedef float tCTM[3][3]; static unsigned LoadWPG2Flags(Image *image,char Precision,float *Angle,tCTM *CTM) { const unsigned char TPR=1,TRN=2,SKW=4,SCL=8,ROT=0x10,OID=0x20,LCK=0x80; ssize_t x; unsigned DenX; unsigned Flags; (void) memset(*CTM,0,sizeof(*CTM)); /*CTM.erase();CTM.resize(3,3);*/ (*CTM)[0][0]=1; (*CTM)[1][1]=1; (*CTM)[2][2]=1; Flags=ReadBlobLSBShort(image); if(Flags & LCK) (void) ReadBlobLSBLong(image); /*Edit lock*/ if(Flags & OID) { if(Precision==0) {(void) ReadBlobLSBShort(image);} /*ObjectID*/ else {(void) ReadBlobLSBLong(image);} /*ObjectID (Double precision)*/ } if(Flags & ROT) { x=ReadBlobLSBLong(image); /*Rot Angle*/ if(Angle) *Angle=x/65536.0; } if(Flags & (ROT|SCL)) { x=ReadBlobLSBLong(image); /*Sx*cos()*/ (*CTM)[0][0] = (float)x/0x10000; x=ReadBlobLSBLong(image); /*Sy*cos()*/ (*CTM)[1][1] = (float)x/0x10000; } if(Flags & (ROT|SKW)) { x=ReadBlobLSBLong(image); /*Kx*sin()*/ (*CTM)[1][0] = (float)x/0x10000; x=ReadBlobLSBLong(image); /*Ky*sin()*/ (*CTM)[0][1] = (float)x/0x10000; } if(Flags & TRN) { x=ReadBlobLSBLong(image); DenX=ReadBlobLSBShort(image); /*Tx*/ if(x>=0) (*CTM)[0][2] = (float)x+(float)DenX/0x10000; else (*CTM)[0][2] = (float)x-(float)DenX/0x10000; x=ReadBlobLSBLong(image); DenX=ReadBlobLSBShort(image); /*Ty*/ (*CTM)[1][2]=(float)x + ((x>=0)?1:-1)*(float)DenX/0x10000; if(x>=0) (*CTM)[1][2] = (float)x+(float)DenX/0x10000; else (*CTM)[1][2] = (float)x-(float)DenX/0x10000; } if(Flags & TPR) { x=ReadBlobLSBShort(image); DenX=ReadBlobLSBShort(image); /*Px*/ (*CTM)[2][0] = x + (float)DenX/0x10000;; x=ReadBlobLSBShort(image); DenX=ReadBlobLSBShort(image); /*Py*/ (*CTM)[2][1] = x + (float)DenX/0x10000; } return(Flags); } static Image *ExtractPostscript(Image *image,const ImageInfo *image_info, MagickOffsetType PS_Offset,ssize_t PS_Size,ExceptionInfo *exception) { char postscript_file[MagickPathExtent]; const MagicInfo *magic_info; FILE *ps_file; ImageInfo *clone_info; Image *image2; unsigned char magick[2*MagickPathExtent]; if ((clone_info=CloneImageInfo(image_info)) == NULL) return(image); clone_info->blob=(void *) NULL; clone_info->length=0; /* Obtain temporary file */ (void) AcquireUniqueFilename(postscript_file); ps_file=fopen_utf8(postscript_file,"wb"); if (ps_file == (FILE *) NULL) goto FINISH; /* Copy postscript to temporary file */ (void) SeekBlob(image,PS_Offset,SEEK_SET); (void) ReadBlob(image, 2*MagickPathExtent, magick); (void) SeekBlob(image,PS_Offset,SEEK_SET); while(PS_Size-- > 0) { (void) fputc(ReadBlobByte(image),ps_file); } (void) fclose(ps_file); /* Detect file format - Check magic.mgk configuration file. */ magic_info=GetMagicInfo(magick,2*MagickPathExtent,exception); if(magic_info == (const MagicInfo *) NULL) goto FINISH_UNL; /* printf("Detected:%s \n",magic_info->name); */ if(exception->severity != UndefinedException) goto FINISH_UNL; if(magic_info->name == (char *) NULL) goto FINISH_UNL; (void) strncpy(clone_info->magick,magic_info->name,MagickPathExtent-1); /* Read nested image */ /*FormatString(clone_info->filename,"%s:%s",magic_info->name,postscript_file);*/ FormatLocaleString(clone_info->filename,MagickPathExtent,"%s",postscript_file); image2=ReadImage(clone_info,exception); if (!image2) goto FINISH_UNL; /* Replace current image with new image while copying base image attributes. */ (void) CopyMagickString(image2->filename,image->filename,MagickPathExtent); (void) CopyMagickString(image2->magick_filename,image->magick_filename,MagickPathExtent); (void) CopyMagickString(image2->magick,image->magick,MagickPathExtent); image2->depth=image->depth; DestroyBlob(image2); image2->blob=ReferenceBlob(image->blob); if ((image->rows == 0) || (image->columns == 0)) DeleteImageFromList(&image); AppendImageToList(&image,image2); FINISH_UNL: (void) RelinquishUniqueFileResource(postscript_file); FINISH: DestroyImageInfo(clone_info); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d W P G I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Method ReadWPGImage reads an WPG X image file and returns it. It % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % The format of the ReadWPGImage method is: % % Image *ReadWPGImage(const ImageInfo *image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: Method ReadWPGImage returns a pointer to the image after % reading. A null image is returned if there is a memory shortage or if % the image cannot be read. % % o image_info: Specifies a pointer to a ImageInfo structure. % % o exception: return any errors or warnings in this structure. % */ static Image *ReadWPGImage(const ImageInfo *image_info, ExceptionInfo *exception) { typedef struct { size_t FileId; MagickOffsetType DataOffset; unsigned int ProductType; unsigned int FileType; unsigned char MajorVersion; unsigned char MinorVersion; unsigned int EncryptKey; unsigned int Reserved; } WPGHeader; typedef struct { unsigned char RecType; size_t RecordLength; } WPGRecord; typedef struct { unsigned char Class; unsigned char RecType; size_t Extension; size_t RecordLength; } WPG2Record; typedef struct { unsigned HorizontalUnits; unsigned VerticalUnits; unsigned char PosSizePrecision; } WPG2Start; typedef struct { unsigned int Width; unsigned int Height; unsigned int Depth; unsigned int HorzRes; unsigned int VertRes; } WPGBitmapType1; typedef struct { unsigned int Width; unsigned int Height; unsigned char Depth; unsigned char Compression; } WPG2BitmapType1; typedef struct { unsigned int RotAngle; unsigned int LowLeftX; unsigned int LowLeftY; unsigned int UpRightX; unsigned int UpRightY; unsigned int Width; unsigned int Height; unsigned int Depth; unsigned int HorzRes; unsigned int VertRes; } WPGBitmapType2; typedef struct { unsigned int StartIndex; unsigned int NumOfEntries; } WPGColorMapRec; /* typedef struct { size_t PS_unknown1; unsigned int PS_unknown2; unsigned int PS_unknown3; } WPGPSl1Record; */ Image *image; unsigned int status; WPGHeader Header; WPGRecord Rec; WPG2Record Rec2; WPG2Start StartWPG; WPGBitmapType1 BitmapHeader1; WPG2BitmapType1 Bitmap2Header1; WPGBitmapType2 BitmapHeader2; WPGColorMapRec WPG_Palette; int i, bpp, WPG2Flags; ssize_t ldblk; size_t one; unsigned char *BImgBuff; tCTM CTM; /*current transform matrix*/ /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); one=1; image=AcquireImage(image_info,exception); image->depth=8; status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Read WPG image. */ Header.FileId=ReadBlobLSBLong(image); Header.DataOffset=(MagickOffsetType) ReadBlobLSBLong(image); Header.ProductType=ReadBlobLSBShort(image); Header.FileType=ReadBlobLSBShort(image); Header.MajorVersion=ReadBlobByte(image); Header.MinorVersion=ReadBlobByte(image); Header.EncryptKey=ReadBlobLSBShort(image); Header.Reserved=ReadBlobLSBShort(image); if (Header.FileId!=0x435057FF || (Header.ProductType>>8)!=0x16) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (Header.EncryptKey!=0) ThrowReaderException(CoderError,"EncryptedWPGImageFileNotSupported"); image->columns = 1; image->rows = 1; image->colors = 0; bpp=0; BitmapHeader2.RotAngle=0; Rec2.RecordLength=0; switch(Header.FileType) { case 1: /* WPG level 1 */ while(!EOFBlob(image)) /* object parser loop */ { (void) SeekBlob(image,Header.DataOffset,SEEK_SET); if(EOFBlob(image)) break; Rec.RecType=(i=ReadBlobByte(image)); if(i==EOF) break; Rd_WP_DWORD(image,&Rec.RecordLength); if (Rec.RecordLength > GetBlobSize(image)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if(EOFBlob(image)) break; Header.DataOffset=TellBlob(image)+Rec.RecordLength; switch(Rec.RecType) { case 0x0B: /* bitmap type 1 */ BitmapHeader1.Width=ReadBlobLSBShort(image); BitmapHeader1.Height=ReadBlobLSBShort(image); if ((BitmapHeader1.Width == 0) || (BitmapHeader1.Height == 0)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); BitmapHeader1.Depth=ReadBlobLSBShort(image); BitmapHeader1.HorzRes=ReadBlobLSBShort(image); BitmapHeader1.VertRes=ReadBlobLSBShort(image); if(BitmapHeader1.HorzRes && BitmapHeader1.VertRes) { image->units=PixelsPerCentimeterResolution; image->resolution.x=BitmapHeader1.HorzRes/470.0; image->resolution.y=BitmapHeader1.VertRes/470.0; } image->columns=BitmapHeader1.Width; image->rows=BitmapHeader1.Height; bpp=BitmapHeader1.Depth; goto UnpackRaster; case 0x0E: /*Color palette */ WPG_Palette.StartIndex=ReadBlobLSBShort(image); WPG_Palette.NumOfEntries=ReadBlobLSBShort(image); if ((WPG_Palette.NumOfEntries-WPG_Palette.StartIndex) > (Rec2.RecordLength-2-2) / 3) ThrowReaderException(CorruptImageError,"InvalidColormapIndex"); image->colors=WPG_Palette.NumOfEntries; if (!AcquireImageColormap(image,image->colors,exception)) goto NoMemory; for (i=WPG_Palette.StartIndex; i < (int)WPG_Palette.NumOfEntries; i++) { image->colormap[i].red=ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); image->colormap[i].green=ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); image->colormap[i].blue=ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); } break; case 0x11: /* Start PS l1 */ if(Rec.RecordLength > 8) image=ExtractPostscript(image,image_info, TellBlob(image)+8, /* skip PS header in the wpg */ (ssize_t) Rec.RecordLength-8,exception); break; case 0x14: /* bitmap type 2 */ BitmapHeader2.RotAngle=ReadBlobLSBShort(image); BitmapHeader2.LowLeftX=ReadBlobLSBShort(image); BitmapHeader2.LowLeftY=ReadBlobLSBShort(image); BitmapHeader2.UpRightX=ReadBlobLSBShort(image); BitmapHeader2.UpRightY=ReadBlobLSBShort(image); BitmapHeader2.Width=ReadBlobLSBShort(image); BitmapHeader2.Height=ReadBlobLSBShort(image); if ((BitmapHeader2.Width == 0) || (BitmapHeader2.Height == 0)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); BitmapHeader2.Depth=ReadBlobLSBShort(image); BitmapHeader2.HorzRes=ReadBlobLSBShort(image); BitmapHeader2.VertRes=ReadBlobLSBShort(image); image->units=PixelsPerCentimeterResolution; image->page.width=(unsigned int) ((BitmapHeader2.LowLeftX-BitmapHeader2.UpRightX)/470.0); image->page.height=(unsigned int) ((BitmapHeader2.LowLeftX-BitmapHeader2.UpRightY)/470.0); image->page.x=(int) (BitmapHeader2.LowLeftX/470.0); image->page.y=(int) (BitmapHeader2.LowLeftX/470.0); if(BitmapHeader2.HorzRes && BitmapHeader2.VertRes) { image->resolution.x=BitmapHeader2.HorzRes/470.0; image->resolution.y=BitmapHeader2.VertRes/470.0; } image->columns=BitmapHeader2.Width; image->rows=BitmapHeader2.Height; bpp=BitmapHeader2.Depth; UnpackRaster: status=SetImageExtent(image,image->columns,image->rows,exception); if (status == MagickFalse) break; if ((image->colors == 0) && (bpp <= 16)) { image->colors=one << bpp; if (!AcquireImageColormap(image,image->colors,exception)) { NoMemory: ThrowReaderException(ResourceLimitError, "MemoryAllocationFailed"); } /* printf("Load default colormap \n"); */ for (i=0; (i < (int) image->colors) && (i < 256); i++) { image->colormap[i].red=ScaleCharToQuantum(WPG1_Palette[i].Red); image->colormap[i].green=ScaleCharToQuantum(WPG1_Palette[i].Green); image->colormap[i].blue=ScaleCharToQuantum(WPG1_Palette[i].Blue); } } else { if (bpp < 24) if ( (image->colors < (one << bpp)) && (bpp != 24) ) image->colormap=(PixelInfo *) ResizeQuantumMemory( image->colormap,(size_t) (one << bpp), sizeof(*image->colormap)); } if (bpp == 1) { if(image->colormap[0].red==0 && image->colormap[0].green==0 && image->colormap[0].blue==0 && image->colormap[1].red==0 && image->colormap[1].green==0 && image->colormap[1].blue==0) { /* fix crippled monochrome palette */ image->colormap[1].red = image->colormap[1].green = image->colormap[1].blue = QuantumRange; } } if(UnpackWPGRaster(image,bpp,exception) < 0) /* The raster cannot be unpacked */ { DecompressionFailed: ThrowReaderException(CoderError,"UnableToDecompressImage"); } if(Rec.RecType==0x14 && BitmapHeader2.RotAngle!=0 && !image_info->ping) { /* flop command */ if(BitmapHeader2.RotAngle & 0x8000) { Image *flop_image; flop_image = FlopImage(image, exception); if (flop_image != (Image *) NULL) { DuplicateBlob(flop_image,image); ReplaceImageInList(&image,flop_image); } } /* flip command */ if(BitmapHeader2.RotAngle & 0x2000) { Image *flip_image; flip_image = FlipImage(image, exception); if (flip_image != (Image *) NULL) { DuplicateBlob(flip_image,image); ReplaceImageInList(&image,flip_image); } } /* rotate command */ if(BitmapHeader2.RotAngle & 0x0FFF) { Image *rotate_image; rotate_image=RotateImage(image,(BitmapHeader2.RotAngle & 0x0FFF), exception); if (rotate_image != (Image *) NULL) { DuplicateBlob(rotate_image,image); ReplaceImageInList(&image,rotate_image); } } } /* Allocate next image structure. */ AcquireNextImage(image_info,image,exception); image->depth=8; if (image->next == (Image *) NULL) goto Finish; image=SyncNextImageInList(image); image->columns=image->rows=1; image->colors=0; break; case 0x1B: /* Postscript l2 */ if(Rec.RecordLength>0x3C) image=ExtractPostscript(image,image_info, TellBlob(image)+0x3C, /* skip PS l2 header in the wpg */ (ssize_t) Rec.RecordLength-0x3C,exception); break; } } break; case 2: /* WPG level 2 */ (void) memset(CTM,0,sizeof(CTM)); StartWPG.PosSizePrecision = 0; while(!EOFBlob(image)) /* object parser loop */ { (void) SeekBlob(image,Header.DataOffset,SEEK_SET); if(EOFBlob(image)) break; Rec2.Class=(i=ReadBlobByte(image)); if(i==EOF) break; Rec2.RecType=(i=ReadBlobByte(image)); if(i==EOF) break; Rd_WP_DWORD(image,&Rec2.Extension); Rd_WP_DWORD(image,&Rec2.RecordLength); if(EOFBlob(image)) break; Header.DataOffset=TellBlob(image)+Rec2.RecordLength; switch(Rec2.RecType) { case 1: StartWPG.HorizontalUnits=ReadBlobLSBShort(image); StartWPG.VerticalUnits=ReadBlobLSBShort(image); StartWPG.PosSizePrecision=ReadBlobByte(image); break; case 0x0C: /* Color palette */ WPG_Palette.StartIndex=ReadBlobLSBShort(image); WPG_Palette.NumOfEntries=ReadBlobLSBShort(image); if ((WPG_Palette.NumOfEntries-WPG_Palette.StartIndex) > (Rec2.RecordLength-2-2) / 3) ThrowReaderException(CorruptImageError,"InvalidColormapIndex"); image->colors=WPG_Palette.NumOfEntries; if (AcquireImageColormap(image,image->colors,exception) == MagickFalse) ThrowReaderException(ResourceLimitError, "MemoryAllocationFailed"); for (i=WPG_Palette.StartIndex; i < (int)WPG_Palette.NumOfEntries; i++) { image->colormap[i].red=ScaleCharToQuantum((char) ReadBlobByte(image)); image->colormap[i].green=ScaleCharToQuantum((char) ReadBlobByte(image)); image->colormap[i].blue=ScaleCharToQuantum((char) ReadBlobByte(image)); (void) ReadBlobByte(image); /*Opacity??*/ } break; case 0x0E: Bitmap2Header1.Width=ReadBlobLSBShort(image); Bitmap2Header1.Height=ReadBlobLSBShort(image); if ((Bitmap2Header1.Width == 0) || (Bitmap2Header1.Height == 0)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); Bitmap2Header1.Depth=ReadBlobByte(image); Bitmap2Header1.Compression=ReadBlobByte(image); if(Bitmap2Header1.Compression > 1) continue; /*Unknown compression method */ switch(Bitmap2Header1.Depth) { case 1: bpp=1; break; case 2: bpp=2; break; case 3: bpp=4; break; case 4: bpp=8; break; case 8: bpp=24; break; default: continue; /*Ignore raster with unknown depth*/ } image->columns=Bitmap2Header1.Width; image->rows=Bitmap2Header1.Height; status=SetImageExtent(image,image->columns,image->rows,exception); if (status == MagickFalse) break; if ((image->colors == 0) && (bpp != 24)) { image->colors=one << bpp; if (!AcquireImageColormap(image,image->colors,exception)) goto NoMemory; } else { if(bpp < 24) if( image->colors<(one << bpp) && bpp!=24 ) image->colormap=(PixelInfo *) ResizeQuantumMemory( image->colormap,(size_t) (one << bpp), sizeof(*image->colormap)); } switch(Bitmap2Header1.Compression) { case 0: /*Uncompressed raster*/ { ldblk=(ssize_t) ((bpp*image->columns+7)/8); BImgBuff=(unsigned char *) AcquireQuantumMemory((size_t) ldblk+1,sizeof(*BImgBuff)); if (BImgBuff == (unsigned char *) NULL) goto NoMemory; for(i=0; i< (ssize_t) image->rows; i++) { (void) ReadBlob(image,ldblk,BImgBuff); InsertRow(image,BImgBuff,i,bpp,exception); } if(BImgBuff) BImgBuff=(unsigned char *) RelinquishMagickMemory(BImgBuff); break; } case 1: /*RLE for WPG2 */ { if( UnpackWPG2Raster(image,bpp,exception) < 0) goto DecompressionFailed; break; } } if(CTM[0][0]<0 && !image_info->ping) { /*?? RotAngle=360-RotAngle;*/ Image *flop_image; flop_image = FlopImage(image, exception); if (flop_image != (Image *) NULL) { DuplicateBlob(flop_image,image); ReplaceImageInList(&image,flop_image); } /* Try to change CTM according to Flip - I am not sure, must be checked. Tx(0,0)=-1; Tx(1,0)=0; Tx(2,0)=0; Tx(0,1)= 0; Tx(1,1)=1; Tx(2,1)=0; Tx(0,2)=(WPG._2Rect.X_ur+WPG._2Rect.X_ll); Tx(1,2)=0; Tx(2,2)=1; */ } if(CTM[1][1]<0 && !image_info->ping) { /*?? RotAngle=360-RotAngle;*/ Image *flip_image; flip_image = FlipImage(image, exception); if (flip_image != (Image *) NULL) { DuplicateBlob(flip_image,image); ReplaceImageInList(&image,flip_image); } /* Try to change CTM according to Flip - I am not sure, must be checked. float_matrix Tx(3,3); Tx(0,0)= 1; Tx(1,0)= 0; Tx(2,0)=0; Tx(0,1)= 0; Tx(1,1)=-1; Tx(2,1)=0; Tx(0,2)= 0; Tx(1,2)=(WPG._2Rect.Y_ur+WPG._2Rect.Y_ll); Tx(2,2)=1; */ } /* Allocate next image structure. */ AcquireNextImage(image_info,image,exception); image->depth=8; if (image->next == (Image *) NULL) goto Finish; image=SyncNextImageInList(image); image->columns=image->rows=1; image->colors=0; break; case 0x12: /* Postscript WPG2*/ i=ReadBlobLSBShort(image); if(Rec2.RecordLength > (unsigned int) i) image=ExtractPostscript(image,image_info, TellBlob(image)+i, /*skip PS header in the wpg2*/ (ssize_t) (Rec2.RecordLength-i-2),exception); break; case 0x1B: /*bitmap rectangle*/ WPG2Flags = LoadWPG2Flags(image,StartWPG.PosSizePrecision,NULL,&CTM); (void) WPG2Flags; break; } } break; default: { ThrowReaderException(CoderError,"DataEncodingSchemeIsNotSupported"); } } Finish: (void) CloseBlob(image); { Image *p; ssize_t scene=0; /* Rewind list, removing any empty images while rewinding. */ p=image; image=NULL; while (p != (Image *) NULL) { Image *tmp=p; if ((p->rows == 0) || (p->columns == 0)) { p=p->previous; DeleteImageFromList(&tmp); } else { image=p; p=p->previous; } } /* Fix scene numbers. */ for (p=image; p != (Image *) NULL; p=p->next) p->scene=(size_t) scene++; } if (image == (Image *) NULL) ThrowReaderException(CorruptImageError, "ImageFileDoesNotContainAnyImageData"); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e g i s t e r W P G I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Method RegisterWPGImage adds attributes for the WPG image format to % the list of supported formats. The attributes include the image format % tag, a method to read and/or write the format, whether the format % supports the saving of more than one frame to the same file or blob, % whether the format supports native in-memory I/O, and a brief % description of the format. % % The format of the RegisterWPGImage method is: % % size_t RegisterWPGImage(void) % */ ModuleExport size_t RegisterWPGImage(void) { MagickInfo *entry; entry=AcquireMagickInfo("WPG","WPG","Word Perfect Graphics"); entry->decoder=(DecodeImageHandler *) ReadWPGImage; entry->magick=(IsImageFormatHandler *) IsWPG; entry->flags|=CoderDecoderSeekableStreamFlag; (void) RegisterMagickInfo(entry); return(MagickImageCoderSignature); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n r e g i s t e r W P G I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Method UnregisterWPGImage removes format registrations made by the % WPG module from the list of supported formats. % % The format of the UnregisterWPGImage method is: % % UnregisterWPGImage(void) % */ ModuleExport void UnregisterWPGImage(void) { (void) UnregisterMagickInfo("WPG"); }
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % W W PPPP GGGG % % W W P P G % % W W W PPPP G GGG % % WW WW P G G % % W W P GGG % % % % % % Read WordPerfect Image Format % % % % Software Design % % Jaroslav Fojtik % % June 2000 % % % % % % Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/color-private.h" #include "MagickCore/colormap.h" #include "MagickCore/colormap-private.h" #include "MagickCore/constitute.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/cache.h" #include "MagickCore/distort.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/magic.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/resource_.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/quantum-private.h" #include "MagickCore/static.h" #include "MagickCore/string_.h" #include "MagickCore/module.h" #include "MagickCore/transform.h" #include "MagickCore/utility.h" #include "MagickCore/utility-private.h" typedef struct { unsigned char Red; unsigned char Blue; unsigned char Green; } RGB_Record; /* Default palette for WPG level 1 */ static const RGB_Record WPG1_Palette[256]={ { 0, 0, 0}, { 0, 0,168}, { 0,168, 0}, { 0,168,168}, {168, 0, 0}, {168, 0,168}, {168, 84, 0}, {168,168,168}, { 84, 84, 84}, { 84, 84,252}, { 84,252, 84}, { 84,252,252}, {252, 84, 84}, {252, 84,252}, {252,252, 84}, {252,252,252}, /*16*/ { 0, 0, 0}, { 20, 20, 20}, { 32, 32, 32}, { 44, 44, 44}, { 56, 56, 56}, { 68, 68, 68}, { 80, 80, 80}, { 96, 96, 96}, {112,112,112}, {128,128,128}, {144,144,144}, {160,160,160}, {180,180,180}, {200,200,200}, {224,224,224}, {252,252,252}, /*32*/ { 0, 0,252}, { 64, 0,252}, {124, 0,252}, {188, 0,252}, {252, 0,252}, {252, 0,188}, {252, 0,124}, {252, 0, 64}, {252, 0, 0}, {252, 64, 0}, {252,124, 0}, {252,188, 0}, {252,252, 0}, {188,252, 0}, {124,252, 0}, { 64,252, 0}, /*48*/ { 0,252, 0}, { 0,252, 64}, { 0,252,124}, { 0,252,188}, { 0,252,252}, { 0,188,252}, { 0,124,252}, { 0, 64,252}, {124,124,252}, {156,124,252}, {188,124,252}, {220,124,252}, {252,124,252}, {252,124,220}, {252,124,188}, {252,124,156}, /*64*/ {252,124,124}, {252,156,124}, {252,188,124}, {252,220,124}, {252,252,124}, {220,252,124}, {188,252,124}, {156,252,124}, {124,252,124}, {124,252,156}, {124,252,188}, {124,252,220}, {124,252,252}, {124,220,252}, {124,188,252}, {124,156,252}, /*80*/ {180,180,252}, {196,180,252}, {216,180,252}, {232,180,252}, {252,180,252}, {252,180,232}, {252,180,216}, {252,180,196}, {252,180,180}, {252,196,180}, {252,216,180}, {252,232,180}, {252,252,180}, {232,252,180}, {216,252,180}, {196,252,180}, /*96*/ {180,220,180}, {180,252,196}, {180,252,216}, {180,252,232}, {180,252,252}, {180,232,252}, {180,216,252}, {180,196,252}, {0,0,112}, {28,0,112}, {56,0,112}, {84,0,112}, {112,0,112}, {112,0,84}, {112,0,56}, {112,0,28}, /*112*/ {112,0,0}, {112,28,0}, {112,56,0}, {112,84,0}, {112,112,0}, {84,112,0}, {56,112,0}, {28,112,0}, {0,112,0}, {0,112,28}, {0,112,56}, {0,112,84}, {0,112,112}, {0,84,112}, {0,56,112}, {0,28,112}, /*128*/ {56,56,112}, {68,56,112}, {84,56,112}, {96,56,112}, {112,56,112}, {112,56,96}, {112,56,84}, {112,56,68}, {112,56,56}, {112,68,56}, {112,84,56}, {112,96,56}, {112,112,56}, {96,112,56}, {84,112,56}, {68,112,56}, /*144*/ {56,112,56}, {56,112,69}, {56,112,84}, {56,112,96}, {56,112,112}, {56,96,112}, {56,84,112}, {56,68,112}, {80,80,112}, {88,80,112}, {96,80,112}, {104,80,112}, {112,80,112}, {112,80,104}, {112,80,96}, {112,80,88}, /*160*/ {112,80,80}, {112,88,80}, {112,96,80}, {112,104,80}, {112,112,80}, {104,112,80}, {96,112,80}, {88,112,80}, {80,112,80}, {80,112,88}, {80,112,96}, {80,112,104}, {80,112,112}, {80,114,112}, {80,96,112}, {80,88,112}, /*176*/ {0,0,64}, {16,0,64}, {32,0,64}, {48,0,64}, {64,0,64}, {64,0,48}, {64,0,32}, {64,0,16}, {64,0,0}, {64,16,0}, {64,32,0}, {64,48,0}, {64,64,0}, {48,64,0}, {32,64,0}, {16,64,0}, /*192*/ {0,64,0}, {0,64,16}, {0,64,32}, {0,64,48}, {0,64,64}, {0,48,64}, {0,32,64}, {0,16,64}, {32,32,64}, {40,32,64}, {48,32,64}, {56,32,64}, {64,32,64}, {64,32,56}, {64,32,48}, {64,32,40}, /*208*/ {64,32,32}, {64,40,32}, {64,48,32}, {64,56,32}, {64,64,32}, {56,64,32}, {48,64,32}, {40,64,32}, {32,64,32}, {32,64,40}, {32,64,48}, {32,64,56}, {32,64,64}, {32,56,64}, {32,48,64}, {32,40,64}, /*224*/ {44,44,64}, {48,44,64}, {52,44,64}, {60,44,64}, {64,44,64}, {64,44,60}, {64,44,52}, {64,44,48}, {64,44,44}, {64,48,44}, {64,52,44}, {64,60,44}, {64,64,44}, {60,64,44}, {52,64,44}, {48,64,44}, /*240*/ {44,64,44}, {44,64,48}, {44,64,52}, {44,64,60}, {44,64,64}, {44,60,64}, {44,55,64}, {44,48,64}, {0,0,0}, {0,0,0}, {0,0,0}, {0,0,0}, {0,0,0}, {0,0,0}, {0,0,0}, {0,0,0} /*256*/ }; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s W P G % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsWPG() returns True if the image format type, identified by the magick % string, is WPG. % % The format of the IsWPG method is: % % unsigned int IsWPG(const unsigned char *magick,const size_t length) % % A description of each parameter follows: % % o status: Method IsWPG returns True if the image format type is WPG. % % o magick: compare image format pattern against these bytes. % % o length: Specifies the length of the magick string. % */ static unsigned int IsWPG(const unsigned char *magick,const size_t length) { if (length < 4) return(MagickFalse); if (memcmp(magick,"\377WPC",4) == 0) return(MagickTrue); return(MagickFalse); } static void Rd_WP_DWORD(Image *image,size_t *d) { unsigned char b; b=ReadBlobByte(image); *d=b; if (b < 0xFFU) return; b=ReadBlobByte(image); *d=(size_t) b; b=ReadBlobByte(image); *d+=(size_t) b*256l; if (*d < 0x8000) return; *d=(*d & 0x7FFF) << 16; b=ReadBlobByte(image); *d+=(size_t) b; b=ReadBlobByte(image); *d+=(size_t) b*256l; return; } static MagickBooleanType InsertRow(Image *image,unsigned char *p,ssize_t y, int bpp,ExceptionInfo *exception) { int bit; Quantum index; register Quantum *q; ssize_t x; q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) return(MagickFalse); switch (bpp) { case 1: /* Convert bitmap scanline. */ { for (x=0; x < ((ssize_t) image->columns-7); x+=8) { for (bit=0; bit < 8; bit++) { index=((*p) & (0x80 >> bit) ? 0x01 : 0x00); SetPixelIndex(image,index,q); SetPixelViaPixelInfo(image,image->colormap+(ssize_t) index,q); q+=GetPixelChannels(image); } p++; } if ((image->columns % 8) != 0) { for (bit=0; bit < (ssize_t) (image->columns % 8); bit++) { index=((*p) & (0x80 >> bit) ? 0x01 : 0x00); SetPixelIndex(image,index,q); SetPixelViaPixelInfo(image,image->colormap+(ssize_t) index,q); q+=GetPixelChannels(image); } p++; } break; } case 2: /* Convert PseudoColor scanline. */ { for (x=0; x < ((ssize_t) image->columns-3); x+=4) { index=ConstrainColormapIndex(image,(*p >> 6) & 0x3,exception); SetPixelIndex(image,index,q); SetPixelViaPixelInfo(image,image->colormap+(ssize_t) index,q); q+=GetPixelChannels(image); index=ConstrainColormapIndex(image,(*p >> 4) & 0x3,exception); SetPixelIndex(image,index,q); SetPixelViaPixelInfo(image,image->colormap+(ssize_t) index,q); q+=GetPixelChannels(image); index=ConstrainColormapIndex(image,(*p >> 2) & 0x3,exception); SetPixelIndex(image,index,q); SetPixelViaPixelInfo(image,image->colormap+(ssize_t) index,q); q+=GetPixelChannels(image); index=ConstrainColormapIndex(image,(*p) & 0x3,exception); SetPixelIndex(image,index,q); SetPixelViaPixelInfo(image,image->colormap+(ssize_t) index,q); q+=GetPixelChannels(image); p++; } if ((image->columns % 4) != 0) { index=ConstrainColormapIndex(image,(*p >> 6) & 0x3,exception); SetPixelIndex(image,index,q); SetPixelViaPixelInfo(image,image->colormap+(ssize_t) index,q); q+=GetPixelChannels(image); if ((image->columns % 4) > 1) { index=ConstrainColormapIndex(image,(*p >> 4) & 0x3,exception); SetPixelIndex(image,index,q); SetPixelViaPixelInfo(image,image->colormap+(ssize_t) index,q); q+=GetPixelChannels(image); if ((image->columns % 4) > 2) { index=ConstrainColormapIndex(image,(*p >> 2) & 0x3, exception); SetPixelIndex(image,index,q); SetPixelViaPixelInfo(image,image->colormap+(ssize_t) index,q); q+=GetPixelChannels(image); } } p++; } break; } case 4: /* Convert PseudoColor scanline. */ { for (x=0; x < ((ssize_t) image->columns-1); x+=2) { index=ConstrainColormapIndex(image,(*p >> 4) & 0x0f,exception); SetPixelIndex(image,index,q); SetPixelViaPixelInfo(image,image->colormap+(ssize_t) index,q); q+=GetPixelChannels(image); index=ConstrainColormapIndex(image,(*p) & 0x0f,exception); SetPixelIndex(image,index,q); SetPixelViaPixelInfo(image,image->colormap+(ssize_t) index,q); p++; q+=GetPixelChannels(image); } if ((image->columns % 2) != 0) { index=ConstrainColormapIndex(image,(*p >> 4) & 0x0f,exception); SetPixelIndex(image,index,q); SetPixelViaPixelInfo(image,image->colormap+(ssize_t) index,q); p++; q+=GetPixelChannels(image); } break; } case 8: /* Convert PseudoColor scanline. */ { for (x=0; x < (ssize_t) image->columns; x++) { index=ConstrainColormapIndex(image,*p,exception); SetPixelIndex(image,index,q); SetPixelViaPixelInfo(image,image->colormap+(ssize_t) index,q); p++; q+=GetPixelChannels(image); } } break; case 24: /* Convert DirectColor scanline. */ for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(image,ScaleCharToQuantum(*p++),q); SetPixelGreen(image,ScaleCharToQuantum(*p++),q); SetPixelBlue(image,ScaleCharToQuantum(*p++),q); q+=GetPixelChannels(image); } break; } if (!SyncAuthenticPixels(image,exception)) return(MagickFalse); return(MagickTrue); } /* Helper for WPG1 raster reader. */ #define InsertByte(b) \ { \ BImgBuff[x]=b; \ x++; \ if((ssize_t) x>=ldblk) \ { \ if (InsertRow(image,BImgBuff,(ssize_t) y,bpp,exception) != MagickFalse) \ y++; \ x=0; \ } \ } /* WPG1 raster reader. */ static int UnpackWPGRaster(Image *image,int bpp,ExceptionInfo *exception) { int x, y, i; unsigned char bbuf, *BImgBuff, RunCount; ssize_t ldblk; x=0; y=0; ldblk=(ssize_t) ((bpp*image->columns+7)/8); BImgBuff=(unsigned char *) AcquireQuantumMemory((size_t) ldblk, 8*sizeof(*BImgBuff)); if(BImgBuff==NULL) return(-2); while(y<(ssize_t) image->rows) { int c; c=ReadBlobByte(image); if (c == EOF) break; bbuf=(unsigned char) c; RunCount=bbuf & 0x7F; if(bbuf & 0x80) { if(RunCount) /* repeat next byte runcount * */ { bbuf=ReadBlobByte(image); for(i=0;i<(int) RunCount;i++) InsertByte(bbuf); } else { /* read next byte as RunCount; repeat 0xFF runcount* */ c=ReadBlobByte(image); if (c < 0) break; RunCount=(unsigned char) c; for(i=0;i<(int) RunCount;i++) InsertByte(0xFF); } } else { if(RunCount) /* next runcount byte are readed directly */ { for(i=0;i < (int) RunCount;i++) { bbuf=ReadBlobByte(image); InsertByte(bbuf); } } else { /* repeat previous line runcount* */ c=ReadBlobByte(image); if (c < 0) break; RunCount=(unsigned char) c; if(x) { /* attempt to duplicate row from x position: */ /* I do not know what to do here */ BImgBuff=(unsigned char *) RelinquishMagickMemory(BImgBuff); return(-3); } for(i=0;i < (int) RunCount;i++) { x=0; y++; /* Here I need to duplicate previous row RUNCOUNT* */ if(y<2) continue; if(y>(ssize_t) image->rows) { BImgBuff=(unsigned char *) RelinquishMagickMemory(BImgBuff); return(-4); } InsertRow(image,BImgBuff,y-1,bpp,exception); } } } if (EOFBlob(image) != MagickFalse) break; } BImgBuff=(unsigned char *) RelinquishMagickMemory(BImgBuff); return(y <(ssize_t) image->rows ? -5 : 0); } /* Helper for WPG2 reader. */ #define InsertByte6(b) \ { \ DisableMSCWarning(4310) \ if(XorMe)\ BImgBuff[x] = (unsigned char)~b;\ else\ BImgBuff[x] = b;\ RestoreMSCWarning \ x++; \ if((ssize_t) x >= ldblk) \ { \ if (InsertRow(image,BImgBuff,(ssize_t) y,bpp,exception) != MagickFalse) \ y++; \ x=0; \ } \ } /* WPG2 raster reader. */ static int UnpackWPG2Raster(Image *image,int bpp,ExceptionInfo *exception) { int RunCount, XorMe = 0; size_t x, y; ssize_t i, ldblk; unsigned int SampleSize=1; unsigned char bbuf, *BImgBuff, SampleBuffer[8] = { 0, 0, 0, 0, 0, 0, 0, 0 }; x=0; y=0; ldblk=(ssize_t) ((bpp*image->columns+7)/8); BImgBuff=(unsigned char *) AcquireQuantumMemory((size_t) ldblk, sizeof(*BImgBuff)); if(BImgBuff==NULL) return(-2); while( y< image->rows) { bbuf=ReadBlobByte(image); switch(bbuf) { case 0x7D: SampleSize=ReadBlobByte(image); /* DSZ */ if(SampleSize>8) { BImgBuff=(unsigned char *) RelinquishMagickMemory(BImgBuff); return(-2); } if(SampleSize<1) { BImgBuff=(unsigned char *) RelinquishMagickMemory(BImgBuff); return(-2); } break; case 0x7E: (void) FormatLocaleFile(stderr, "\nUnsupported WPG token XOR, please report!"); XorMe=!XorMe; break; case 0x7F: RunCount=ReadBlobByte(image); /* BLK */ if (RunCount < 0) break; for(i=0; i < SampleSize*(RunCount+1); i++) { InsertByte6(0); } break; case 0xFD: RunCount=ReadBlobByte(image); /* EXT */ if (RunCount < 0) break; for(i=0; i<= RunCount;i++) for(bbuf=0; bbuf < SampleSize; bbuf++) InsertByte6(SampleBuffer[bbuf]); break; case 0xFE: RunCount=ReadBlobByte(image); /* RST */ if (RunCount < 0) break; if(x!=0) { (void) FormatLocaleFile(stderr, "\nUnsupported WPG2 unaligned token RST x=%.20g, please report!\n" ,(double) x); BImgBuff=(unsigned char *) RelinquishMagickMemory(BImgBuff); return(-3); } { /* duplicate the previous row RunCount x */ for(i=0;i<=RunCount;i++) { if (InsertRow(image,BImgBuff,(ssize_t) (image->rows >= y ? y : image->rows-1), bpp,exception) != MagickFalse) y++; } } break; case 0xFF: RunCount=ReadBlobByte(image); /* WHT */ if (RunCount < 0) break; for(i=0; i < SampleSize*(RunCount+1); i++) { InsertByte6(0xFF); } break; default: RunCount=bbuf & 0x7F; if(bbuf & 0x80) /* REP */ { for(i=0; i < SampleSize; i++) SampleBuffer[i]=ReadBlobByte(image); for(i=0;i<=RunCount;i++) for(bbuf=0;bbuf<SampleSize;bbuf++) InsertByte6(SampleBuffer[bbuf]); } else { /* NRP */ for(i=0; i< SampleSize*(RunCount+1);i++) { bbuf=ReadBlobByte(image); InsertByte6(bbuf); } } } if (EOFBlob(image) != MagickFalse) break; } BImgBuff=(unsigned char *) RelinquishMagickMemory(BImgBuff); return(0); } typedef float tCTM[3][3]; static unsigned LoadWPG2Flags(Image *image,char Precision,float *Angle,tCTM *CTM) { const unsigned char TPR=1,TRN=2,SKW=4,SCL=8,ROT=0x10,OID=0x20,LCK=0x80; ssize_t x; unsigned DenX; unsigned Flags; (void) memset(*CTM,0,sizeof(*CTM)); /*CTM.erase();CTM.resize(3,3);*/ (*CTM)[0][0]=1; (*CTM)[1][1]=1; (*CTM)[2][2]=1; Flags=ReadBlobLSBShort(image); if(Flags & LCK) (void) ReadBlobLSBLong(image); /*Edit lock*/ if(Flags & OID) { if(Precision==0) {(void) ReadBlobLSBShort(image);} /*ObjectID*/ else {(void) ReadBlobLSBLong(image);} /*ObjectID (Double precision)*/ } if(Flags & ROT) { x=ReadBlobLSBLong(image); /*Rot Angle*/ if(Angle) *Angle=x/65536.0; } if(Flags & (ROT|SCL)) { x=ReadBlobLSBLong(image); /*Sx*cos()*/ (*CTM)[0][0] = (float)x/0x10000; x=ReadBlobLSBLong(image); /*Sy*cos()*/ (*CTM)[1][1] = (float)x/0x10000; } if(Flags & (ROT|SKW)) { x=ReadBlobLSBLong(image); /*Kx*sin()*/ (*CTM)[1][0] = (float)x/0x10000; x=ReadBlobLSBLong(image); /*Ky*sin()*/ (*CTM)[0][1] = (float)x/0x10000; } if(Flags & TRN) { x=ReadBlobLSBLong(image); DenX=ReadBlobLSBShort(image); /*Tx*/ if(x>=0) (*CTM)[0][2] = (float)x+(float)DenX/0x10000; else (*CTM)[0][2] = (float)x-(float)DenX/0x10000; x=ReadBlobLSBLong(image); DenX=ReadBlobLSBShort(image); /*Ty*/ (*CTM)[1][2]=(float)x + ((x>=0)?1:-1)*(float)DenX/0x10000; if(x>=0) (*CTM)[1][2] = (float)x+(float)DenX/0x10000; else (*CTM)[1][2] = (float)x-(float)DenX/0x10000; } if(Flags & TPR) { x=ReadBlobLSBShort(image); DenX=ReadBlobLSBShort(image); /*Px*/ (*CTM)[2][0] = x + (float)DenX/0x10000;; x=ReadBlobLSBShort(image); DenX=ReadBlobLSBShort(image); /*Py*/ (*CTM)[2][1] = x + (float)DenX/0x10000; } return(Flags); } static Image *ExtractPostscript(Image *image,const ImageInfo *image_info, MagickOffsetType PS_Offset,ssize_t PS_Size,ExceptionInfo *exception) { char postscript_file[MagickPathExtent]; const MagicInfo *magic_info; FILE *ps_file; ImageInfo *clone_info; Image *image2; unsigned char magick[2*MagickPathExtent]; if ((clone_info=CloneImageInfo(image_info)) == NULL) return(image); clone_info->blob=(void *) NULL; clone_info->length=0; /* Obtain temporary file */ (void) AcquireUniqueFilename(postscript_file); ps_file=fopen_utf8(postscript_file,"wb"); if (ps_file == (FILE *) NULL) goto FINISH; /* Copy postscript to temporary file */ (void) SeekBlob(image,PS_Offset,SEEK_SET); (void) ReadBlob(image, 2*MagickPathExtent, magick); (void) SeekBlob(image,PS_Offset,SEEK_SET); while(PS_Size-- > 0) { (void) fputc(ReadBlobByte(image),ps_file); } (void) fclose(ps_file); /* Detect file format - Check magic.mgk configuration file. */ magic_info=GetMagicInfo(magick,2*MagickPathExtent,exception); if(magic_info == (const MagicInfo *) NULL) goto FINISH_UNL; /* printf("Detected:%s \n",magic_info->name); */ if(exception->severity != UndefinedException) goto FINISH_UNL; if(magic_info->name == (char *) NULL) goto FINISH_UNL; (void) strncpy(clone_info->magick,magic_info->name,MagickPathExtent-1); /* Read nested image */ /*FormatString(clone_info->filename,"%s:%s",magic_info->name,postscript_file);*/ FormatLocaleString(clone_info->filename,MagickPathExtent,"%s",postscript_file); image2=ReadImage(clone_info,exception); if (!image2) goto FINISH_UNL; /* Replace current image with new image while copying base image attributes. */ (void) CopyMagickString(image2->filename,image->filename,MagickPathExtent); (void) CopyMagickString(image2->magick_filename,image->magick_filename,MagickPathExtent); (void) CopyMagickString(image2->magick,image->magick,MagickPathExtent); image2->depth=image->depth; DestroyBlob(image2); image2->blob=ReferenceBlob(image->blob); if ((image->rows == 0) || (image->columns == 0)) DeleteImageFromList(&image); AppendImageToList(&image,image2); FINISH_UNL: (void) RelinquishUniqueFileResource(postscript_file); FINISH: DestroyImageInfo(clone_info); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d W P G I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Method ReadWPGImage reads an WPG X image file and returns it. It % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % The format of the ReadWPGImage method is: % % Image *ReadWPGImage(const ImageInfo *image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: Method ReadWPGImage returns a pointer to the image after % reading. A null image is returned if there is a memory shortage or if % the image cannot be read. % % o image_info: Specifies a pointer to a ImageInfo structure. % % o exception: return any errors or warnings in this structure. % */ static Image *ReadWPGImage(const ImageInfo *image_info, ExceptionInfo *exception) { typedef struct { size_t FileId; MagickOffsetType DataOffset; unsigned int ProductType; unsigned int FileType; unsigned char MajorVersion; unsigned char MinorVersion; unsigned int EncryptKey; unsigned int Reserved; } WPGHeader; typedef struct { unsigned char RecType; size_t RecordLength; } WPGRecord; typedef struct { unsigned char Class; unsigned char RecType; size_t Extension; size_t RecordLength; } WPG2Record; typedef struct { unsigned HorizontalUnits; unsigned VerticalUnits; unsigned char PosSizePrecision; } WPG2Start; typedef struct { unsigned int Width; unsigned int Height; unsigned int Depth; unsigned int HorzRes; unsigned int VertRes; } WPGBitmapType1; typedef struct { unsigned int Width; unsigned int Height; unsigned char Depth; unsigned char Compression; } WPG2BitmapType1; typedef struct { unsigned int RotAngle; unsigned int LowLeftX; unsigned int LowLeftY; unsigned int UpRightX; unsigned int UpRightY; unsigned int Width; unsigned int Height; unsigned int Depth; unsigned int HorzRes; unsigned int VertRes; } WPGBitmapType2; typedef struct { unsigned int StartIndex; unsigned int NumOfEntries; } WPGColorMapRec; /* typedef struct { size_t PS_unknown1; unsigned int PS_unknown2; unsigned int PS_unknown3; } WPGPSl1Record; */ Image *image; unsigned int status; WPGHeader Header; WPGRecord Rec; WPG2Record Rec2; WPG2Start StartWPG; WPGBitmapType1 BitmapHeader1; WPG2BitmapType1 Bitmap2Header1; WPGBitmapType2 BitmapHeader2; WPGColorMapRec WPG_Palette; int i, bpp, WPG2Flags; ssize_t ldblk; size_t one; unsigned char *BImgBuff; tCTM CTM; /*current transform matrix*/ /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); one=1; image=AcquireImage(image_info,exception); image->depth=8; status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Read WPG image. */ Header.FileId=ReadBlobLSBLong(image); Header.DataOffset=(MagickOffsetType) ReadBlobLSBLong(image); Header.ProductType=ReadBlobLSBShort(image); Header.FileType=ReadBlobLSBShort(image); Header.MajorVersion=ReadBlobByte(image); Header.MinorVersion=ReadBlobByte(image); Header.EncryptKey=ReadBlobLSBShort(image); Header.Reserved=ReadBlobLSBShort(image); if (Header.FileId!=0x435057FF || (Header.ProductType>>8)!=0x16) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (Header.EncryptKey!=0) ThrowReaderException(CoderError,"EncryptedWPGImageFileNotSupported"); image->columns = 1; image->rows = 1; image->colors = 0; bpp=0; BitmapHeader2.RotAngle=0; Rec2.RecordLength=0; switch(Header.FileType) { case 1: /* WPG level 1 */ while(!EOFBlob(image)) /* object parser loop */ { (void) SeekBlob(image,Header.DataOffset,SEEK_SET); if(EOFBlob(image)) break; Rec.RecType=(i=ReadBlobByte(image)); if(i==EOF) break; Rd_WP_DWORD(image,&Rec.RecordLength); if (Rec.RecordLength > GetBlobSize(image)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if(EOFBlob(image)) break; Header.DataOffset=TellBlob(image)+Rec.RecordLength; switch(Rec.RecType) { case 0x0B: /* bitmap type 1 */ BitmapHeader1.Width=ReadBlobLSBShort(image); BitmapHeader1.Height=ReadBlobLSBShort(image); if ((BitmapHeader1.Width == 0) || (BitmapHeader1.Height == 0)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); BitmapHeader1.Depth=ReadBlobLSBShort(image); BitmapHeader1.HorzRes=ReadBlobLSBShort(image); BitmapHeader1.VertRes=ReadBlobLSBShort(image); if(BitmapHeader1.HorzRes && BitmapHeader1.VertRes) { image->units=PixelsPerCentimeterResolution; image->resolution.x=BitmapHeader1.HorzRes/470.0; image->resolution.y=BitmapHeader1.VertRes/470.0; } image->columns=BitmapHeader1.Width; image->rows=BitmapHeader1.Height; bpp=BitmapHeader1.Depth; goto UnpackRaster; case 0x0E: /*Color palette */ WPG_Palette.StartIndex=ReadBlobLSBShort(image); WPG_Palette.NumOfEntries=ReadBlobLSBShort(image); if ((WPG_Palette.NumOfEntries-WPG_Palette.StartIndex) > (Rec2.RecordLength-2-2)/3) ThrowReaderException(CorruptImageError,"InvalidColormapIndex"); if (WPG_Palette.StartIndex > WPG_Palette.NumOfEntries) ThrowReaderException(CorruptImageError,"InvalidColormapIndex"); image->colors=WPG_Palette.NumOfEntries; if (!AcquireImageColormap(image,image->colors,exception)) goto NoMemory; for (i=WPG_Palette.StartIndex; i < (int)WPG_Palette.NumOfEntries; i++) { image->colormap[i].red=ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); image->colormap[i].green=ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); image->colormap[i].blue=ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); } break; case 0x11: /* Start PS l1 */ if(Rec.RecordLength > 8) image=ExtractPostscript(image,image_info, TellBlob(image)+8, /* skip PS header in the wpg */ (ssize_t) Rec.RecordLength-8,exception); break; case 0x14: /* bitmap type 2 */ BitmapHeader2.RotAngle=ReadBlobLSBShort(image); BitmapHeader2.LowLeftX=ReadBlobLSBShort(image); BitmapHeader2.LowLeftY=ReadBlobLSBShort(image); BitmapHeader2.UpRightX=ReadBlobLSBShort(image); BitmapHeader2.UpRightY=ReadBlobLSBShort(image); BitmapHeader2.Width=ReadBlobLSBShort(image); BitmapHeader2.Height=ReadBlobLSBShort(image); if ((BitmapHeader2.Width == 0) || (BitmapHeader2.Height == 0)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); BitmapHeader2.Depth=ReadBlobLSBShort(image); BitmapHeader2.HorzRes=ReadBlobLSBShort(image); BitmapHeader2.VertRes=ReadBlobLSBShort(image); image->units=PixelsPerCentimeterResolution; image->page.width=(unsigned int) ((BitmapHeader2.LowLeftX-BitmapHeader2.UpRightX)/470.0); image->page.height=(unsigned int) ((BitmapHeader2.LowLeftX-BitmapHeader2.UpRightY)/470.0); image->page.x=(int) (BitmapHeader2.LowLeftX/470.0); image->page.y=(int) (BitmapHeader2.LowLeftX/470.0); if(BitmapHeader2.HorzRes && BitmapHeader2.VertRes) { image->resolution.x=BitmapHeader2.HorzRes/470.0; image->resolution.y=BitmapHeader2.VertRes/470.0; } image->columns=BitmapHeader2.Width; image->rows=BitmapHeader2.Height; bpp=BitmapHeader2.Depth; UnpackRaster: status=SetImageExtent(image,image->columns,image->rows,exception); if (status == MagickFalse) break; if ((image->colors == 0) && (bpp <= 16)) { image->colors=one << bpp; if (!AcquireImageColormap(image,image->colors,exception)) { NoMemory: ThrowReaderException(ResourceLimitError, "MemoryAllocationFailed"); } /* printf("Load default colormap \n"); */ for (i=0; (i < (int) image->colors) && (i < 256); i++) { image->colormap[i].red=ScaleCharToQuantum(WPG1_Palette[i].Red); image->colormap[i].green=ScaleCharToQuantum(WPG1_Palette[i].Green); image->colormap[i].blue=ScaleCharToQuantum(WPG1_Palette[i].Blue); } } else { if (bpp < 24) if ( (image->colors < (one << bpp)) && (bpp != 24) ) image->colormap=(PixelInfo *) ResizeQuantumMemory( image->colormap,(size_t) (one << bpp), sizeof(*image->colormap)); } if (bpp == 1) { if(image->colormap[0].red==0 && image->colormap[0].green==0 && image->colormap[0].blue==0 && image->colormap[1].red==0 && image->colormap[1].green==0 && image->colormap[1].blue==0) { /* fix crippled monochrome palette */ image->colormap[1].red = image->colormap[1].green = image->colormap[1].blue = QuantumRange; } } if(UnpackWPGRaster(image,bpp,exception) < 0) /* The raster cannot be unpacked */ { DecompressionFailed: ThrowReaderException(CoderError,"UnableToDecompressImage"); } if(Rec.RecType==0x14 && BitmapHeader2.RotAngle!=0 && !image_info->ping) { /* flop command */ if(BitmapHeader2.RotAngle & 0x8000) { Image *flop_image; flop_image = FlopImage(image, exception); if (flop_image != (Image *) NULL) { DuplicateBlob(flop_image,image); ReplaceImageInList(&image,flop_image); } } /* flip command */ if(BitmapHeader2.RotAngle & 0x2000) { Image *flip_image; flip_image = FlipImage(image, exception); if (flip_image != (Image *) NULL) { DuplicateBlob(flip_image,image); ReplaceImageInList(&image,flip_image); } } /* rotate command */ if(BitmapHeader2.RotAngle & 0x0FFF) { Image *rotate_image; rotate_image=RotateImage(image,(BitmapHeader2.RotAngle & 0x0FFF), exception); if (rotate_image != (Image *) NULL) { DuplicateBlob(rotate_image,image); ReplaceImageInList(&image,rotate_image); } } } /* Allocate next image structure. */ AcquireNextImage(image_info,image,exception); image->depth=8; if (image->next == (Image *) NULL) goto Finish; image=SyncNextImageInList(image); image->columns=image->rows=1; image->colors=0; break; case 0x1B: /* Postscript l2 */ if(Rec.RecordLength>0x3C) image=ExtractPostscript(image,image_info, TellBlob(image)+0x3C, /* skip PS l2 header in the wpg */ (ssize_t) Rec.RecordLength-0x3C,exception); break; } } break; case 2: /* WPG level 2 */ (void) memset(CTM,0,sizeof(CTM)); StartWPG.PosSizePrecision = 0; while(!EOFBlob(image)) /* object parser loop */ { (void) SeekBlob(image,Header.DataOffset,SEEK_SET); if(EOFBlob(image)) break; Rec2.Class=(i=ReadBlobByte(image)); if(i==EOF) break; Rec2.RecType=(i=ReadBlobByte(image)); if(i==EOF) break; Rd_WP_DWORD(image,&Rec2.Extension); Rd_WP_DWORD(image,&Rec2.RecordLength); if(EOFBlob(image)) break; Header.DataOffset=TellBlob(image)+Rec2.RecordLength; switch(Rec2.RecType) { case 1: StartWPG.HorizontalUnits=ReadBlobLSBShort(image); StartWPG.VerticalUnits=ReadBlobLSBShort(image); StartWPG.PosSizePrecision=ReadBlobByte(image); break; case 0x0C: /* Color palette */ WPG_Palette.StartIndex=ReadBlobLSBShort(image); WPG_Palette.NumOfEntries=ReadBlobLSBShort(image); if ((WPG_Palette.NumOfEntries-WPG_Palette.StartIndex) > (Rec2.RecordLength-2-2) / 3) ThrowReaderException(CorruptImageError,"InvalidColormapIndex"); image->colors=WPG_Palette.NumOfEntries; if (AcquireImageColormap(image,image->colors,exception) == MagickFalse) ThrowReaderException(ResourceLimitError, "MemoryAllocationFailed"); for (i=WPG_Palette.StartIndex; i < (int)WPG_Palette.NumOfEntries; i++) { image->colormap[i].red=ScaleCharToQuantum((char) ReadBlobByte(image)); image->colormap[i].green=ScaleCharToQuantum((char) ReadBlobByte(image)); image->colormap[i].blue=ScaleCharToQuantum((char) ReadBlobByte(image)); (void) ReadBlobByte(image); /*Opacity??*/ } break; case 0x0E: Bitmap2Header1.Width=ReadBlobLSBShort(image); Bitmap2Header1.Height=ReadBlobLSBShort(image); if ((Bitmap2Header1.Width == 0) || (Bitmap2Header1.Height == 0)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); Bitmap2Header1.Depth=ReadBlobByte(image); Bitmap2Header1.Compression=ReadBlobByte(image); if(Bitmap2Header1.Compression > 1) continue; /*Unknown compression method */ switch(Bitmap2Header1.Depth) { case 1: bpp=1; break; case 2: bpp=2; break; case 3: bpp=4; break; case 4: bpp=8; break; case 8: bpp=24; break; default: continue; /*Ignore raster with unknown depth*/ } image->columns=Bitmap2Header1.Width; image->rows=Bitmap2Header1.Height; status=SetImageExtent(image,image->columns,image->rows,exception); if (status == MagickFalse) break; if ((image->colors == 0) && (bpp != 24)) { image->colors=one << bpp; if (!AcquireImageColormap(image,image->colors,exception)) goto NoMemory; } else { if(bpp < 24) if( image->colors<(one << bpp) && bpp!=24 ) image->colormap=(PixelInfo *) ResizeQuantumMemory( image->colormap,(size_t) (one << bpp), sizeof(*image->colormap)); } switch(Bitmap2Header1.Compression) { case 0: /*Uncompressed raster*/ { ldblk=(ssize_t) ((bpp*image->columns+7)/8); BImgBuff=(unsigned char *) AcquireQuantumMemory((size_t) ldblk+1,sizeof(*BImgBuff)); if (BImgBuff == (unsigned char *) NULL) goto NoMemory; for(i=0; i< (ssize_t) image->rows; i++) { (void) ReadBlob(image,ldblk,BImgBuff); InsertRow(image,BImgBuff,i,bpp,exception); } if(BImgBuff) BImgBuff=(unsigned char *) RelinquishMagickMemory(BImgBuff); break; } case 1: /*RLE for WPG2 */ { if( UnpackWPG2Raster(image,bpp,exception) < 0) goto DecompressionFailed; break; } } if(CTM[0][0]<0 && !image_info->ping) { /*?? RotAngle=360-RotAngle;*/ Image *flop_image; flop_image = FlopImage(image, exception); if (flop_image != (Image *) NULL) { DuplicateBlob(flop_image,image); ReplaceImageInList(&image,flop_image); } /* Try to change CTM according to Flip - I am not sure, must be checked. Tx(0,0)=-1; Tx(1,0)=0; Tx(2,0)=0; Tx(0,1)= 0; Tx(1,1)=1; Tx(2,1)=0; Tx(0,2)=(WPG._2Rect.X_ur+WPG._2Rect.X_ll); Tx(1,2)=0; Tx(2,2)=1; */ } if(CTM[1][1]<0 && !image_info->ping) { /*?? RotAngle=360-RotAngle;*/ Image *flip_image; flip_image = FlipImage(image, exception); if (flip_image != (Image *) NULL) { DuplicateBlob(flip_image,image); ReplaceImageInList(&image,flip_image); } /* Try to change CTM according to Flip - I am not sure, must be checked. float_matrix Tx(3,3); Tx(0,0)= 1; Tx(1,0)= 0; Tx(2,0)=0; Tx(0,1)= 0; Tx(1,1)=-1; Tx(2,1)=0; Tx(0,2)= 0; Tx(1,2)=(WPG._2Rect.Y_ur+WPG._2Rect.Y_ll); Tx(2,2)=1; */ } /* Allocate next image structure. */ AcquireNextImage(image_info,image,exception); image->depth=8; if (image->next == (Image *) NULL) goto Finish; image=SyncNextImageInList(image); image->columns=image->rows=1; image->colors=0; break; case 0x12: /* Postscript WPG2*/ i=ReadBlobLSBShort(image); if(Rec2.RecordLength > (unsigned int) i) image=ExtractPostscript(image,image_info, TellBlob(image)+i, /*skip PS header in the wpg2*/ (ssize_t) (Rec2.RecordLength-i-2),exception); break; case 0x1B: /*bitmap rectangle*/ WPG2Flags = LoadWPG2Flags(image,StartWPG.PosSizePrecision,NULL,&CTM); (void) WPG2Flags; break; } } break; default: { ThrowReaderException(CoderError,"DataEncodingSchemeIsNotSupported"); } } Finish: (void) CloseBlob(image); { Image *p; ssize_t scene=0; /* Rewind list, removing any empty images while rewinding. */ p=image; image=NULL; while (p != (Image *) NULL) { Image *tmp=p; if ((p->rows == 0) || (p->columns == 0)) { p=p->previous; DeleteImageFromList(&tmp); } else { image=p; p=p->previous; } } /* Fix scene numbers. */ for (p=image; p != (Image *) NULL; p=p->next) p->scene=(size_t) scene++; } if (image == (Image *) NULL) ThrowReaderException(CorruptImageError, "ImageFileDoesNotContainAnyImageData"); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e g i s t e r W P G I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Method RegisterWPGImage adds attributes for the WPG image format to % the list of supported formats. The attributes include the image format % tag, a method to read and/or write the format, whether the format % supports the saving of more than one frame to the same file or blob, % whether the format supports native in-memory I/O, and a brief % description of the format. % % The format of the RegisterWPGImage method is: % % size_t RegisterWPGImage(void) % */ ModuleExport size_t RegisterWPGImage(void) { MagickInfo *entry; entry=AcquireMagickInfo("WPG","WPG","Word Perfect Graphics"); entry->decoder=(DecodeImageHandler *) ReadWPGImage; entry->magick=(IsImageFormatHandler *) IsWPG; entry->flags|=CoderDecoderSeekableStreamFlag; (void) RegisterMagickInfo(entry); return(MagickImageCoderSignature); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n r e g i s t e r W P G I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Method UnregisterWPGImage removes format registrations made by the % WPG module from the list of supported formats. % % The format of the UnregisterWPGImage method is: % % UnregisterWPGImage(void) % */ ModuleExport void UnregisterWPGImage(void) { (void) UnregisterMagickInfo("WPG"); }
static Image *ReadWPGImage(const ImageInfo *image_info, ExceptionInfo *exception) { typedef struct { size_t FileId; MagickOffsetType DataOffset; unsigned int ProductType; unsigned int FileType; unsigned char MajorVersion; unsigned char MinorVersion; unsigned int EncryptKey; unsigned int Reserved; } WPGHeader; typedef struct { unsigned char RecType; size_t RecordLength; } WPGRecord; typedef struct { unsigned char Class; unsigned char RecType; size_t Extension; size_t RecordLength; } WPG2Record; typedef struct { unsigned HorizontalUnits; unsigned VerticalUnits; unsigned char PosSizePrecision; } WPG2Start; typedef struct { unsigned int Width; unsigned int Height; unsigned int Depth; unsigned int HorzRes; unsigned int VertRes; } WPGBitmapType1; typedef struct { unsigned int Width; unsigned int Height; unsigned char Depth; unsigned char Compression; } WPG2BitmapType1; typedef struct { unsigned int RotAngle; unsigned int LowLeftX; unsigned int LowLeftY; unsigned int UpRightX; unsigned int UpRightY; unsigned int Width; unsigned int Height; unsigned int Depth; unsigned int HorzRes; unsigned int VertRes; } WPGBitmapType2; typedef struct { unsigned int StartIndex; unsigned int NumOfEntries; } WPGColorMapRec; /* typedef struct { size_t PS_unknown1; unsigned int PS_unknown2; unsigned int PS_unknown3; } WPGPSl1Record; */ Image *image; unsigned int status; WPGHeader Header; WPGRecord Rec; WPG2Record Rec2; WPG2Start StartWPG; WPGBitmapType1 BitmapHeader1; WPG2BitmapType1 Bitmap2Header1; WPGBitmapType2 BitmapHeader2; WPGColorMapRec WPG_Palette; int i, bpp, WPG2Flags; ssize_t ldblk; size_t one; unsigned char *BImgBuff; tCTM CTM; /*current transform matrix*/ /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); one=1; image=AcquireImage(image_info,exception); image->depth=8; status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Read WPG image. */ Header.FileId=ReadBlobLSBLong(image); Header.DataOffset=(MagickOffsetType) ReadBlobLSBLong(image); Header.ProductType=ReadBlobLSBShort(image); Header.FileType=ReadBlobLSBShort(image); Header.MajorVersion=ReadBlobByte(image); Header.MinorVersion=ReadBlobByte(image); Header.EncryptKey=ReadBlobLSBShort(image); Header.Reserved=ReadBlobLSBShort(image); if (Header.FileId!=0x435057FF || (Header.ProductType>>8)!=0x16) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (Header.EncryptKey!=0) ThrowReaderException(CoderError,"EncryptedWPGImageFileNotSupported"); image->columns = 1; image->rows = 1; image->colors = 0; bpp=0; BitmapHeader2.RotAngle=0; Rec2.RecordLength=0; switch(Header.FileType) { case 1: /* WPG level 1 */ while(!EOFBlob(image)) /* object parser loop */ { (void) SeekBlob(image,Header.DataOffset,SEEK_SET); if(EOFBlob(image)) break; Rec.RecType=(i=ReadBlobByte(image)); if(i==EOF) break; Rd_WP_DWORD(image,&Rec.RecordLength); if (Rec.RecordLength > GetBlobSize(image)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if(EOFBlob(image)) break; Header.DataOffset=TellBlob(image)+Rec.RecordLength; switch(Rec.RecType) { case 0x0B: /* bitmap type 1 */ BitmapHeader1.Width=ReadBlobLSBShort(image); BitmapHeader1.Height=ReadBlobLSBShort(image); if ((BitmapHeader1.Width == 0) || (BitmapHeader1.Height == 0)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); BitmapHeader1.Depth=ReadBlobLSBShort(image); BitmapHeader1.HorzRes=ReadBlobLSBShort(image); BitmapHeader1.VertRes=ReadBlobLSBShort(image); if(BitmapHeader1.HorzRes && BitmapHeader1.VertRes) { image->units=PixelsPerCentimeterResolution; image->resolution.x=BitmapHeader1.HorzRes/470.0; image->resolution.y=BitmapHeader1.VertRes/470.0; } image->columns=BitmapHeader1.Width; image->rows=BitmapHeader1.Height; bpp=BitmapHeader1.Depth; goto UnpackRaster; case 0x0E: /*Color palette */ WPG_Palette.StartIndex=ReadBlobLSBShort(image); WPG_Palette.NumOfEntries=ReadBlobLSBShort(image); if ((WPG_Palette.NumOfEntries-WPG_Palette.StartIndex) > (Rec2.RecordLength-2-2) / 3) ThrowReaderException(CorruptImageError,"InvalidColormapIndex"); image->colors=WPG_Palette.NumOfEntries; if (!AcquireImageColormap(image,image->colors,exception)) goto NoMemory; for (i=WPG_Palette.StartIndex; i < (int)WPG_Palette.NumOfEntries; i++) { image->colormap[i].red=ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); image->colormap[i].green=ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); image->colormap[i].blue=ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); } break; case 0x11: /* Start PS l1 */ if(Rec.RecordLength > 8) image=ExtractPostscript(image,image_info, TellBlob(image)+8, /* skip PS header in the wpg */ (ssize_t) Rec.RecordLength-8,exception); break; case 0x14: /* bitmap type 2 */ BitmapHeader2.RotAngle=ReadBlobLSBShort(image); BitmapHeader2.LowLeftX=ReadBlobLSBShort(image); BitmapHeader2.LowLeftY=ReadBlobLSBShort(image); BitmapHeader2.UpRightX=ReadBlobLSBShort(image); BitmapHeader2.UpRightY=ReadBlobLSBShort(image); BitmapHeader2.Width=ReadBlobLSBShort(image); BitmapHeader2.Height=ReadBlobLSBShort(image); if ((BitmapHeader2.Width == 0) || (BitmapHeader2.Height == 0)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); BitmapHeader2.Depth=ReadBlobLSBShort(image); BitmapHeader2.HorzRes=ReadBlobLSBShort(image); BitmapHeader2.VertRes=ReadBlobLSBShort(image); image->units=PixelsPerCentimeterResolution; image->page.width=(unsigned int) ((BitmapHeader2.LowLeftX-BitmapHeader2.UpRightX)/470.0); image->page.height=(unsigned int) ((BitmapHeader2.LowLeftX-BitmapHeader2.UpRightY)/470.0); image->page.x=(int) (BitmapHeader2.LowLeftX/470.0); image->page.y=(int) (BitmapHeader2.LowLeftX/470.0); if(BitmapHeader2.HorzRes && BitmapHeader2.VertRes) { image->resolution.x=BitmapHeader2.HorzRes/470.0; image->resolution.y=BitmapHeader2.VertRes/470.0; } image->columns=BitmapHeader2.Width; image->rows=BitmapHeader2.Height; bpp=BitmapHeader2.Depth; UnpackRaster: status=SetImageExtent(image,image->columns,image->rows,exception); if (status == MagickFalse) break; if ((image->colors == 0) && (bpp <= 16)) { image->colors=one << bpp; if (!AcquireImageColormap(image,image->colors,exception)) { NoMemory: ThrowReaderException(ResourceLimitError, "MemoryAllocationFailed"); } /* printf("Load default colormap \n"); */ for (i=0; (i < (int) image->colors) && (i < 256); i++) { image->colormap[i].red=ScaleCharToQuantum(WPG1_Palette[i].Red); image->colormap[i].green=ScaleCharToQuantum(WPG1_Palette[i].Green); image->colormap[i].blue=ScaleCharToQuantum(WPG1_Palette[i].Blue); } } else { if (bpp < 24) if ( (image->colors < (one << bpp)) && (bpp != 24) ) image->colormap=(PixelInfo *) ResizeQuantumMemory( image->colormap,(size_t) (one << bpp), sizeof(*image->colormap)); } if (bpp == 1) { if(image->colormap[0].red==0 && image->colormap[0].green==0 && image->colormap[0].blue==0 && image->colormap[1].red==0 && image->colormap[1].green==0 && image->colormap[1].blue==0) { /* fix crippled monochrome palette */ image->colormap[1].red = image->colormap[1].green = image->colormap[1].blue = QuantumRange; } } if(UnpackWPGRaster(image,bpp,exception) < 0) /* The raster cannot be unpacked */ { DecompressionFailed: ThrowReaderException(CoderError,"UnableToDecompressImage"); } if(Rec.RecType==0x14 && BitmapHeader2.RotAngle!=0 && !image_info->ping) { /* flop command */ if(BitmapHeader2.RotAngle & 0x8000) { Image *flop_image; flop_image = FlopImage(image, exception); if (flop_image != (Image *) NULL) { DuplicateBlob(flop_image,image); ReplaceImageInList(&image,flop_image); } } /* flip command */ if(BitmapHeader2.RotAngle & 0x2000) { Image *flip_image; flip_image = FlipImage(image, exception); if (flip_image != (Image *) NULL) { DuplicateBlob(flip_image,image); ReplaceImageInList(&image,flip_image); } } /* rotate command */ if(BitmapHeader2.RotAngle & 0x0FFF) { Image *rotate_image; rotate_image=RotateImage(image,(BitmapHeader2.RotAngle & 0x0FFF), exception); if (rotate_image != (Image *) NULL) { DuplicateBlob(rotate_image,image); ReplaceImageInList(&image,rotate_image); } } } /* Allocate next image structure. */ AcquireNextImage(image_info,image,exception); image->depth=8; if (image->next == (Image *) NULL) goto Finish; image=SyncNextImageInList(image); image->columns=image->rows=1; image->colors=0; break; case 0x1B: /* Postscript l2 */ if(Rec.RecordLength>0x3C) image=ExtractPostscript(image,image_info, TellBlob(image)+0x3C, /* skip PS l2 header in the wpg */ (ssize_t) Rec.RecordLength-0x3C,exception); break; } } break; case 2: /* WPG level 2 */ (void) memset(CTM,0,sizeof(CTM)); StartWPG.PosSizePrecision = 0; while(!EOFBlob(image)) /* object parser loop */ { (void) SeekBlob(image,Header.DataOffset,SEEK_SET); if(EOFBlob(image)) break; Rec2.Class=(i=ReadBlobByte(image)); if(i==EOF) break; Rec2.RecType=(i=ReadBlobByte(image)); if(i==EOF) break; Rd_WP_DWORD(image,&Rec2.Extension); Rd_WP_DWORD(image,&Rec2.RecordLength); if(EOFBlob(image)) break; Header.DataOffset=TellBlob(image)+Rec2.RecordLength; switch(Rec2.RecType) { case 1: StartWPG.HorizontalUnits=ReadBlobLSBShort(image); StartWPG.VerticalUnits=ReadBlobLSBShort(image); StartWPG.PosSizePrecision=ReadBlobByte(image); break; case 0x0C: /* Color palette */ WPG_Palette.StartIndex=ReadBlobLSBShort(image); WPG_Palette.NumOfEntries=ReadBlobLSBShort(image); if ((WPG_Palette.NumOfEntries-WPG_Palette.StartIndex) > (Rec2.RecordLength-2-2) / 3) ThrowReaderException(CorruptImageError,"InvalidColormapIndex"); image->colors=WPG_Palette.NumOfEntries; if (AcquireImageColormap(image,image->colors,exception) == MagickFalse) ThrowReaderException(ResourceLimitError, "MemoryAllocationFailed"); for (i=WPG_Palette.StartIndex; i < (int)WPG_Palette.NumOfEntries; i++) { image->colormap[i].red=ScaleCharToQuantum((char) ReadBlobByte(image)); image->colormap[i].green=ScaleCharToQuantum((char) ReadBlobByte(image)); image->colormap[i].blue=ScaleCharToQuantum((char) ReadBlobByte(image)); (void) ReadBlobByte(image); /*Opacity??*/ } break; case 0x0E: Bitmap2Header1.Width=ReadBlobLSBShort(image); Bitmap2Header1.Height=ReadBlobLSBShort(image); if ((Bitmap2Header1.Width == 0) || (Bitmap2Header1.Height == 0)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); Bitmap2Header1.Depth=ReadBlobByte(image); Bitmap2Header1.Compression=ReadBlobByte(image); if(Bitmap2Header1.Compression > 1) continue; /*Unknown compression method */ switch(Bitmap2Header1.Depth) { case 1: bpp=1; break; case 2: bpp=2; break; case 3: bpp=4; break; case 4: bpp=8; break; case 8: bpp=24; break; default: continue; /*Ignore raster with unknown depth*/ } image->columns=Bitmap2Header1.Width; image->rows=Bitmap2Header1.Height; status=SetImageExtent(image,image->columns,image->rows,exception); if (status == MagickFalse) break; if ((image->colors == 0) && (bpp != 24)) { image->colors=one << bpp; if (!AcquireImageColormap(image,image->colors,exception)) goto NoMemory; } else { if(bpp < 24) if( image->colors<(one << bpp) && bpp!=24 ) image->colormap=(PixelInfo *) ResizeQuantumMemory( image->colormap,(size_t) (one << bpp), sizeof(*image->colormap)); } switch(Bitmap2Header1.Compression) { case 0: /*Uncompressed raster*/ { ldblk=(ssize_t) ((bpp*image->columns+7)/8); BImgBuff=(unsigned char *) AcquireQuantumMemory((size_t) ldblk+1,sizeof(*BImgBuff)); if (BImgBuff == (unsigned char *) NULL) goto NoMemory; for(i=0; i< (ssize_t) image->rows; i++) { (void) ReadBlob(image,ldblk,BImgBuff); InsertRow(image,BImgBuff,i,bpp,exception); } if(BImgBuff) BImgBuff=(unsigned char *) RelinquishMagickMemory(BImgBuff); break; } case 1: /*RLE for WPG2 */ { if( UnpackWPG2Raster(image,bpp,exception) < 0) goto DecompressionFailed; break; } } if(CTM[0][0]<0 && !image_info->ping) { /*?? RotAngle=360-RotAngle;*/ Image *flop_image; flop_image = FlopImage(image, exception); if (flop_image != (Image *) NULL) { DuplicateBlob(flop_image,image); ReplaceImageInList(&image,flop_image); } /* Try to change CTM according to Flip - I am not sure, must be checked. Tx(0,0)=-1; Tx(1,0)=0; Tx(2,0)=0; Tx(0,1)= 0; Tx(1,1)=1; Tx(2,1)=0; Tx(0,2)=(WPG._2Rect.X_ur+WPG._2Rect.X_ll); Tx(1,2)=0; Tx(2,2)=1; */ } if(CTM[1][1]<0 && !image_info->ping) { /*?? RotAngle=360-RotAngle;*/ Image *flip_image; flip_image = FlipImage(image, exception); if (flip_image != (Image *) NULL) { DuplicateBlob(flip_image,image); ReplaceImageInList(&image,flip_image); } /* Try to change CTM according to Flip - I am not sure, must be checked. float_matrix Tx(3,3); Tx(0,0)= 1; Tx(1,0)= 0; Tx(2,0)=0; Tx(0,1)= 0; Tx(1,1)=-1; Tx(2,1)=0; Tx(0,2)= 0; Tx(1,2)=(WPG._2Rect.Y_ur+WPG._2Rect.Y_ll); Tx(2,2)=1; */ } /* Allocate next image structure. */ AcquireNextImage(image_info,image,exception); image->depth=8; if (image->next == (Image *) NULL) goto Finish; image=SyncNextImageInList(image); image->columns=image->rows=1; image->colors=0; break; case 0x12: /* Postscript WPG2*/ i=ReadBlobLSBShort(image); if(Rec2.RecordLength > (unsigned int) i) image=ExtractPostscript(image,image_info, TellBlob(image)+i, /*skip PS header in the wpg2*/ (ssize_t) (Rec2.RecordLength-i-2),exception); break; case 0x1B: /*bitmap rectangle*/ WPG2Flags = LoadWPG2Flags(image,StartWPG.PosSizePrecision,NULL,&CTM); (void) WPG2Flags; break; } } break; default: { ThrowReaderException(CoderError,"DataEncodingSchemeIsNotSupported"); } } Finish: (void) CloseBlob(image); { Image *p; ssize_t scene=0; /* Rewind list, removing any empty images while rewinding. */ p=image; image=NULL; while (p != (Image *) NULL) { Image *tmp=p; if ((p->rows == 0) || (p->columns == 0)) { p=p->previous; DeleteImageFromList(&tmp); } else { image=p; p=p->previous; } } /* Fix scene numbers. */ for (p=image; p != (Image *) NULL; p=p->next) p->scene=(size_t) scene++; } if (image == (Image *) NULL) ThrowReaderException(CorruptImageError, "ImageFileDoesNotContainAnyImageData"); return(image); }
static Image *ReadWPGImage(const ImageInfo *image_info, ExceptionInfo *exception) { typedef struct { size_t FileId; MagickOffsetType DataOffset; unsigned int ProductType; unsigned int FileType; unsigned char MajorVersion; unsigned char MinorVersion; unsigned int EncryptKey; unsigned int Reserved; } WPGHeader; typedef struct { unsigned char RecType; size_t RecordLength; } WPGRecord; typedef struct { unsigned char Class; unsigned char RecType; size_t Extension; size_t RecordLength; } WPG2Record; typedef struct { unsigned HorizontalUnits; unsigned VerticalUnits; unsigned char PosSizePrecision; } WPG2Start; typedef struct { unsigned int Width; unsigned int Height; unsigned int Depth; unsigned int HorzRes; unsigned int VertRes; } WPGBitmapType1; typedef struct { unsigned int Width; unsigned int Height; unsigned char Depth; unsigned char Compression; } WPG2BitmapType1; typedef struct { unsigned int RotAngle; unsigned int LowLeftX; unsigned int LowLeftY; unsigned int UpRightX; unsigned int UpRightY; unsigned int Width; unsigned int Height; unsigned int Depth; unsigned int HorzRes; unsigned int VertRes; } WPGBitmapType2; typedef struct { unsigned int StartIndex; unsigned int NumOfEntries; } WPGColorMapRec; /* typedef struct { size_t PS_unknown1; unsigned int PS_unknown2; unsigned int PS_unknown3; } WPGPSl1Record; */ Image *image; unsigned int status; WPGHeader Header; WPGRecord Rec; WPG2Record Rec2; WPG2Start StartWPG; WPGBitmapType1 BitmapHeader1; WPG2BitmapType1 Bitmap2Header1; WPGBitmapType2 BitmapHeader2; WPGColorMapRec WPG_Palette; int i, bpp, WPG2Flags; ssize_t ldblk; size_t one; unsigned char *BImgBuff; tCTM CTM; /*current transform matrix*/ /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); one=1; image=AcquireImage(image_info,exception); image->depth=8; status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Read WPG image. */ Header.FileId=ReadBlobLSBLong(image); Header.DataOffset=(MagickOffsetType) ReadBlobLSBLong(image); Header.ProductType=ReadBlobLSBShort(image); Header.FileType=ReadBlobLSBShort(image); Header.MajorVersion=ReadBlobByte(image); Header.MinorVersion=ReadBlobByte(image); Header.EncryptKey=ReadBlobLSBShort(image); Header.Reserved=ReadBlobLSBShort(image); if (Header.FileId!=0x435057FF || (Header.ProductType>>8)!=0x16) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (Header.EncryptKey!=0) ThrowReaderException(CoderError,"EncryptedWPGImageFileNotSupported"); image->columns = 1; image->rows = 1; image->colors = 0; bpp=0; BitmapHeader2.RotAngle=0; Rec2.RecordLength=0; switch(Header.FileType) { case 1: /* WPG level 1 */ while(!EOFBlob(image)) /* object parser loop */ { (void) SeekBlob(image,Header.DataOffset,SEEK_SET); if(EOFBlob(image)) break; Rec.RecType=(i=ReadBlobByte(image)); if(i==EOF) break; Rd_WP_DWORD(image,&Rec.RecordLength); if (Rec.RecordLength > GetBlobSize(image)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if(EOFBlob(image)) break; Header.DataOffset=TellBlob(image)+Rec.RecordLength; switch(Rec.RecType) { case 0x0B: /* bitmap type 1 */ BitmapHeader1.Width=ReadBlobLSBShort(image); BitmapHeader1.Height=ReadBlobLSBShort(image); if ((BitmapHeader1.Width == 0) || (BitmapHeader1.Height == 0)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); BitmapHeader1.Depth=ReadBlobLSBShort(image); BitmapHeader1.HorzRes=ReadBlobLSBShort(image); BitmapHeader1.VertRes=ReadBlobLSBShort(image); if(BitmapHeader1.HorzRes && BitmapHeader1.VertRes) { image->units=PixelsPerCentimeterResolution; image->resolution.x=BitmapHeader1.HorzRes/470.0; image->resolution.y=BitmapHeader1.VertRes/470.0; } image->columns=BitmapHeader1.Width; image->rows=BitmapHeader1.Height; bpp=BitmapHeader1.Depth; goto UnpackRaster; case 0x0E: /*Color palette */ WPG_Palette.StartIndex=ReadBlobLSBShort(image); WPG_Palette.NumOfEntries=ReadBlobLSBShort(image); if ((WPG_Palette.NumOfEntries-WPG_Palette.StartIndex) > (Rec2.RecordLength-2-2)/3) ThrowReaderException(CorruptImageError,"InvalidColormapIndex"); if (WPG_Palette.StartIndex > WPG_Palette.NumOfEntries) ThrowReaderException(CorruptImageError,"InvalidColormapIndex"); image->colors=WPG_Palette.NumOfEntries; if (!AcquireImageColormap(image,image->colors,exception)) goto NoMemory; for (i=WPG_Palette.StartIndex; i < (int)WPG_Palette.NumOfEntries; i++) { image->colormap[i].red=ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); image->colormap[i].green=ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); image->colormap[i].blue=ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); } break; case 0x11: /* Start PS l1 */ if(Rec.RecordLength > 8) image=ExtractPostscript(image,image_info, TellBlob(image)+8, /* skip PS header in the wpg */ (ssize_t) Rec.RecordLength-8,exception); break; case 0x14: /* bitmap type 2 */ BitmapHeader2.RotAngle=ReadBlobLSBShort(image); BitmapHeader2.LowLeftX=ReadBlobLSBShort(image); BitmapHeader2.LowLeftY=ReadBlobLSBShort(image); BitmapHeader2.UpRightX=ReadBlobLSBShort(image); BitmapHeader2.UpRightY=ReadBlobLSBShort(image); BitmapHeader2.Width=ReadBlobLSBShort(image); BitmapHeader2.Height=ReadBlobLSBShort(image); if ((BitmapHeader2.Width == 0) || (BitmapHeader2.Height == 0)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); BitmapHeader2.Depth=ReadBlobLSBShort(image); BitmapHeader2.HorzRes=ReadBlobLSBShort(image); BitmapHeader2.VertRes=ReadBlobLSBShort(image); image->units=PixelsPerCentimeterResolution; image->page.width=(unsigned int) ((BitmapHeader2.LowLeftX-BitmapHeader2.UpRightX)/470.0); image->page.height=(unsigned int) ((BitmapHeader2.LowLeftX-BitmapHeader2.UpRightY)/470.0); image->page.x=(int) (BitmapHeader2.LowLeftX/470.0); image->page.y=(int) (BitmapHeader2.LowLeftX/470.0); if(BitmapHeader2.HorzRes && BitmapHeader2.VertRes) { image->resolution.x=BitmapHeader2.HorzRes/470.0; image->resolution.y=BitmapHeader2.VertRes/470.0; } image->columns=BitmapHeader2.Width; image->rows=BitmapHeader2.Height; bpp=BitmapHeader2.Depth; UnpackRaster: status=SetImageExtent(image,image->columns,image->rows,exception); if (status == MagickFalse) break; if ((image->colors == 0) && (bpp <= 16)) { image->colors=one << bpp; if (!AcquireImageColormap(image,image->colors,exception)) { NoMemory: ThrowReaderException(ResourceLimitError, "MemoryAllocationFailed"); } /* printf("Load default colormap \n"); */ for (i=0; (i < (int) image->colors) && (i < 256); i++) { image->colormap[i].red=ScaleCharToQuantum(WPG1_Palette[i].Red); image->colormap[i].green=ScaleCharToQuantum(WPG1_Palette[i].Green); image->colormap[i].blue=ScaleCharToQuantum(WPG1_Palette[i].Blue); } } else { if (bpp < 24) if ( (image->colors < (one << bpp)) && (bpp != 24) ) image->colormap=(PixelInfo *) ResizeQuantumMemory( image->colormap,(size_t) (one << bpp), sizeof(*image->colormap)); } if (bpp == 1) { if(image->colormap[0].red==0 && image->colormap[0].green==0 && image->colormap[0].blue==0 && image->colormap[1].red==0 && image->colormap[1].green==0 && image->colormap[1].blue==0) { /* fix crippled monochrome palette */ image->colormap[1].red = image->colormap[1].green = image->colormap[1].blue = QuantumRange; } } if(UnpackWPGRaster(image,bpp,exception) < 0) /* The raster cannot be unpacked */ { DecompressionFailed: ThrowReaderException(CoderError,"UnableToDecompressImage"); } if(Rec.RecType==0x14 && BitmapHeader2.RotAngle!=0 && !image_info->ping) { /* flop command */ if(BitmapHeader2.RotAngle & 0x8000) { Image *flop_image; flop_image = FlopImage(image, exception); if (flop_image != (Image *) NULL) { DuplicateBlob(flop_image,image); ReplaceImageInList(&image,flop_image); } } /* flip command */ if(BitmapHeader2.RotAngle & 0x2000) { Image *flip_image; flip_image = FlipImage(image, exception); if (flip_image != (Image *) NULL) { DuplicateBlob(flip_image,image); ReplaceImageInList(&image,flip_image); } } /* rotate command */ if(BitmapHeader2.RotAngle & 0x0FFF) { Image *rotate_image; rotate_image=RotateImage(image,(BitmapHeader2.RotAngle & 0x0FFF), exception); if (rotate_image != (Image *) NULL) { DuplicateBlob(rotate_image,image); ReplaceImageInList(&image,rotate_image); } } } /* Allocate next image structure. */ AcquireNextImage(image_info,image,exception); image->depth=8; if (image->next == (Image *) NULL) goto Finish; image=SyncNextImageInList(image); image->columns=image->rows=1; image->colors=0; break; case 0x1B: /* Postscript l2 */ if(Rec.RecordLength>0x3C) image=ExtractPostscript(image,image_info, TellBlob(image)+0x3C, /* skip PS l2 header in the wpg */ (ssize_t) Rec.RecordLength-0x3C,exception); break; } } break; case 2: /* WPG level 2 */ (void) memset(CTM,0,sizeof(CTM)); StartWPG.PosSizePrecision = 0; while(!EOFBlob(image)) /* object parser loop */ { (void) SeekBlob(image,Header.DataOffset,SEEK_SET); if(EOFBlob(image)) break; Rec2.Class=(i=ReadBlobByte(image)); if(i==EOF) break; Rec2.RecType=(i=ReadBlobByte(image)); if(i==EOF) break; Rd_WP_DWORD(image,&Rec2.Extension); Rd_WP_DWORD(image,&Rec2.RecordLength); if(EOFBlob(image)) break; Header.DataOffset=TellBlob(image)+Rec2.RecordLength; switch(Rec2.RecType) { case 1: StartWPG.HorizontalUnits=ReadBlobLSBShort(image); StartWPG.VerticalUnits=ReadBlobLSBShort(image); StartWPG.PosSizePrecision=ReadBlobByte(image); break; case 0x0C: /* Color palette */ WPG_Palette.StartIndex=ReadBlobLSBShort(image); WPG_Palette.NumOfEntries=ReadBlobLSBShort(image); if ((WPG_Palette.NumOfEntries-WPG_Palette.StartIndex) > (Rec2.RecordLength-2-2) / 3) ThrowReaderException(CorruptImageError,"InvalidColormapIndex"); image->colors=WPG_Palette.NumOfEntries; if (AcquireImageColormap(image,image->colors,exception) == MagickFalse) ThrowReaderException(ResourceLimitError, "MemoryAllocationFailed"); for (i=WPG_Palette.StartIndex; i < (int)WPG_Palette.NumOfEntries; i++) { image->colormap[i].red=ScaleCharToQuantum((char) ReadBlobByte(image)); image->colormap[i].green=ScaleCharToQuantum((char) ReadBlobByte(image)); image->colormap[i].blue=ScaleCharToQuantum((char) ReadBlobByte(image)); (void) ReadBlobByte(image); /*Opacity??*/ } break; case 0x0E: Bitmap2Header1.Width=ReadBlobLSBShort(image); Bitmap2Header1.Height=ReadBlobLSBShort(image); if ((Bitmap2Header1.Width == 0) || (Bitmap2Header1.Height == 0)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); Bitmap2Header1.Depth=ReadBlobByte(image); Bitmap2Header1.Compression=ReadBlobByte(image); if(Bitmap2Header1.Compression > 1) continue; /*Unknown compression method */ switch(Bitmap2Header1.Depth) { case 1: bpp=1; break; case 2: bpp=2; break; case 3: bpp=4; break; case 4: bpp=8; break; case 8: bpp=24; break; default: continue; /*Ignore raster with unknown depth*/ } image->columns=Bitmap2Header1.Width; image->rows=Bitmap2Header1.Height; status=SetImageExtent(image,image->columns,image->rows,exception); if (status == MagickFalse) break; if ((image->colors == 0) && (bpp != 24)) { image->colors=one << bpp; if (!AcquireImageColormap(image,image->colors,exception)) goto NoMemory; } else { if(bpp < 24) if( image->colors<(one << bpp) && bpp!=24 ) image->colormap=(PixelInfo *) ResizeQuantumMemory( image->colormap,(size_t) (one << bpp), sizeof(*image->colormap)); } switch(Bitmap2Header1.Compression) { case 0: /*Uncompressed raster*/ { ldblk=(ssize_t) ((bpp*image->columns+7)/8); BImgBuff=(unsigned char *) AcquireQuantumMemory((size_t) ldblk+1,sizeof(*BImgBuff)); if (BImgBuff == (unsigned char *) NULL) goto NoMemory; for(i=0; i< (ssize_t) image->rows; i++) { (void) ReadBlob(image,ldblk,BImgBuff); InsertRow(image,BImgBuff,i,bpp,exception); } if(BImgBuff) BImgBuff=(unsigned char *) RelinquishMagickMemory(BImgBuff); break; } case 1: /*RLE for WPG2 */ { if( UnpackWPG2Raster(image,bpp,exception) < 0) goto DecompressionFailed; break; } } if(CTM[0][0]<0 && !image_info->ping) { /*?? RotAngle=360-RotAngle;*/ Image *flop_image; flop_image = FlopImage(image, exception); if (flop_image != (Image *) NULL) { DuplicateBlob(flop_image,image); ReplaceImageInList(&image,flop_image); } /* Try to change CTM according to Flip - I am not sure, must be checked. Tx(0,0)=-1; Tx(1,0)=0; Tx(2,0)=0; Tx(0,1)= 0; Tx(1,1)=1; Tx(2,1)=0; Tx(0,2)=(WPG._2Rect.X_ur+WPG._2Rect.X_ll); Tx(1,2)=0; Tx(2,2)=1; */ } if(CTM[1][1]<0 && !image_info->ping) { /*?? RotAngle=360-RotAngle;*/ Image *flip_image; flip_image = FlipImage(image, exception); if (flip_image != (Image *) NULL) { DuplicateBlob(flip_image,image); ReplaceImageInList(&image,flip_image); } /* Try to change CTM according to Flip - I am not sure, must be checked. float_matrix Tx(3,3); Tx(0,0)= 1; Tx(1,0)= 0; Tx(2,0)=0; Tx(0,1)= 0; Tx(1,1)=-1; Tx(2,1)=0; Tx(0,2)= 0; Tx(1,2)=(WPG._2Rect.Y_ur+WPG._2Rect.Y_ll); Tx(2,2)=1; */ } /* Allocate next image structure. */ AcquireNextImage(image_info,image,exception); image->depth=8; if (image->next == (Image *) NULL) goto Finish; image=SyncNextImageInList(image); image->columns=image->rows=1; image->colors=0; break; case 0x12: /* Postscript WPG2*/ i=ReadBlobLSBShort(image); if(Rec2.RecordLength > (unsigned int) i) image=ExtractPostscript(image,image_info, TellBlob(image)+i, /*skip PS header in the wpg2*/ (ssize_t) (Rec2.RecordLength-i-2),exception); break; case 0x1B: /*bitmap rectangle*/ WPG2Flags = LoadWPG2Flags(image,StartWPG.PosSizePrecision,NULL,&CTM); (void) WPG2Flags; break; } } break; default: { ThrowReaderException(CoderError,"DataEncodingSchemeIsNotSupported"); } } Finish: (void) CloseBlob(image); { Image *p; ssize_t scene=0; /* Rewind list, removing any empty images while rewinding. */ p=image; image=NULL; while (p != (Image *) NULL) { Image *tmp=p; if ((p->rows == 0) || (p->columns == 0)) { p=p->previous; DeleteImageFromList(&tmp); } else { image=p; p=p->previous; } } /* Fix scene numbers. */ for (p=image; p != (Image *) NULL; p=p->next) p->scene=(size_t) scene++; } if (image == (Image *) NULL) ThrowReaderException(CorruptImageError, "ImageFileDoesNotContainAnyImageData"); return(image); }
{'added': [(1053, ' (Rec2.RecordLength-2-2)/3)'), (1054, ' ThrowReaderException(CorruptImageError,"InvalidColormapIndex");'), (1055, ' if (WPG_Palette.StartIndex > WPG_Palette.NumOfEntries)')], 'deleted': [(1053, ' (Rec2.RecordLength-2-2) / 3)')]}
3
1
1,169
8,651
https://github.com/ImageMagick/ImageMagick
CVE-2017-16546
['CWE-119']
tiff.c
ReadTIFFImage
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % TTTTT IIIII FFFFF FFFFF % % T I F F % % T I FFF FFF % % T I F F % % T IIIII F F % % % % % % Read/Write TIFF Image Format % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #ifdef __VMS #define JPEG_SUPPORT 1 #endif #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/channel.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colormap.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/constitute.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/geometry.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/module.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/property.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/profile.h" #include "MagickCore/resize.h" #include "MagickCore/resource_.h" #include "MagickCore/semaphore.h" #include "MagickCore/splay-tree.h" #include "MagickCore/static.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread_.h" #include "MagickCore/token.h" #include "MagickCore/utility.h" #include "psd-private.h" #if defined(MAGICKCORE_TIFF_DELEGATE) # if defined(MAGICKCORE_HAVE_TIFFCONF_H) # include <tiffconf.h> # endif # include <tiff.h> # include <tiffio.h> # if !defined(COMPRESSION_ADOBE_DEFLATE) # define COMPRESSION_ADOBE_DEFLATE 8 # endif # if !defined(PREDICTOR_HORIZONTAL) # define PREDICTOR_HORIZONTAL 2 # endif # if !defined(TIFFTAG_COPYRIGHT) # define TIFFTAG_COPYRIGHT 33432 # endif # if !defined(TIFFTAG_OPIIMAGEID) # define TIFFTAG_OPIIMAGEID 32781 # endif # if defined(COMPRESSION_ZSTD) && defined(MAGICKCORE_ZSTD_DELEGATE) # include <zstd.h> # endif #if defined(MAGICKCORE_HAVE_STDINT_H) && (TIFFLIB_VERSION >= 20201219) # undef uint16 # define uint16 uint16_t # undef uint32 # define uint32 uint32_t #endif /* Typedef declarations. */ typedef enum { ReadYCCKMethod, ReadStripMethod, ReadTileMethod, ReadGenericMethod } TIFFMethodType; typedef struct _PhotoshopProfile { StringInfo *data; MagickOffsetType offset; size_t length, extent, quantum; } PhotoshopProfile; /* Global declarations. */ static MagickThreadKey tiff_exception; static SemaphoreInfo *tiff_semaphore = (SemaphoreInfo *) NULL; static TIFFErrorHandler error_handler, warning_handler; static volatile MagickBooleanType instantiate_key = MagickFalse; /* Forward declarations. */ static Image * ReadTIFFImage(const ImageInfo *,ExceptionInfo *); static MagickBooleanType WriteGROUP4Image(const ImageInfo *,Image *,ExceptionInfo *), WritePTIFImage(const ImageInfo *,Image *,ExceptionInfo *), WriteTIFFImage(const ImageInfo *,Image *,ExceptionInfo *); static MagickOffsetType TIFFSeekCustomStream(const MagickOffsetType offset, const int whence,void *user_data) { PhotoshopProfile *profile; profile=(PhotoshopProfile *) user_data; switch (whence) { case SEEK_SET: default: { if (offset < 0) return(-1); profile->offset=offset; break; } case SEEK_CUR: { if (((offset > 0) && (profile->offset > (MAGICK_SSIZE_MAX-offset))) || ((offset < 0) && (profile->offset < (MAGICK_SSIZE_MIN-offset)))) { errno=EOVERFLOW; return(-1); } if ((profile->offset+offset) < 0) return(-1); profile->offset+=offset; break; } case SEEK_END: { if (((MagickOffsetType) profile->length+offset) < 0) return(-1); profile->offset=profile->length+offset; break; } } return(profile->offset); } static MagickOffsetType TIFFTellCustomStream(void *user_data) { PhotoshopProfile *profile; profile=(PhotoshopProfile *) user_data; return(profile->offset); } static void InitPSDInfo(const Image *image,PSDInfo *info) { (void) memset(info,0,sizeof(*info)); info->version=1; info->columns=image->columns; info->rows=image->rows; info->mode=10; /* Set the mode to a value that won't change the colorspace */ info->channels=1U; info->min_channels=1U; info->has_merged_image=MagickFalse; if (image->storage_class == PseudoClass) info->mode=2; /* indexed mode */ else { info->channels=(unsigned short) image->number_channels; info->min_channels=info->channels; if (image->alpha_trait == BlendPixelTrait) info->min_channels--; } } #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s T I F F % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsTIFF() returns MagickTrue if the image format type, identified by the % magick string, is TIFF. % % The format of the IsTIFF method is: % % MagickBooleanType IsTIFF(const unsigned char *magick,const size_t length) % % A description of each parameter follows: % % o magick: compare image format pattern against these bytes. % % o length: Specifies the length of the magick string. % */ static MagickBooleanType IsTIFF(const unsigned char *magick,const size_t length) { if (length < 4) return(MagickFalse); if (memcmp(magick,"\115\115\000\052",4) == 0) return(MagickTrue); if (memcmp(magick,"\111\111\052\000",4) == 0) return(MagickTrue); #if defined(TIFF_VERSION_BIG) if (length < 8) return(MagickFalse); if (memcmp(magick,"\115\115\000\053\000\010\000\000",8) == 0) return(MagickTrue); if (memcmp(magick,"\111\111\053\000\010\000\000\000",8) == 0) return(MagickTrue); #endif return(MagickFalse); } #if defined(MAGICKCORE_TIFF_DELEGATE) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d G R O U P 4 I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadGROUP4Image() reads a raw CCITT Group 4 image file and returns it. It % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % The format of the ReadGROUP4Image method is: % % Image *ReadGROUP4Image(const ImageInfo *image_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o exception: return any errors or warnings in this structure. % */ static inline size_t WriteLSBLong(FILE *file,const unsigned int value) { unsigned char buffer[4]; buffer[0]=(unsigned char) value; buffer[1]=(unsigned char) (value >> 8); buffer[2]=(unsigned char) (value >> 16); buffer[3]=(unsigned char) (value >> 24); return(fwrite(buffer,1,4,file)); } static Image *ReadGROUP4Image(const ImageInfo *image_info, ExceptionInfo *exception) { char filename[MagickPathExtent]; FILE *file; Image *image; ImageInfo *read_info; int c, unique_file; MagickBooleanType status; size_t length; ssize_t offset, strip_offset; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=AcquireImage(image_info,exception); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Write raw CCITT Group 4 wrapped as a TIFF image file. */ file=(FILE *) NULL; unique_file=AcquireUniqueFileResource(filename); if (unique_file != -1) file=fdopen(unique_file,"wb"); if ((unique_file == -1) || (file == (FILE *) NULL)) ThrowImageException(FileOpenError,"UnableToCreateTemporaryFile"); length=fwrite("\111\111\052\000\010\000\000\000\016\000",1,10,file); if (length != 10) ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile"); length=fwrite("\376\000\003\000\001\000\000\000\000\000\000\000",1,12,file); length=fwrite("\000\001\004\000\001\000\000\000",1,8,file); length=WriteLSBLong(file,(unsigned int) image->columns); length=fwrite("\001\001\004\000\001\000\000\000",1,8,file); length=WriteLSBLong(file,(unsigned int) image->rows); length=fwrite("\002\001\003\000\001\000\000\000\001\000\000\000",1,12,file); length=fwrite("\003\001\003\000\001\000\000\000\004\000\000\000",1,12,file); length=fwrite("\006\001\003\000\001\000\000\000\000\000\000\000",1,12,file); length=fwrite("\021\001\003\000\001\000\000\000",1,8,file); strip_offset=10+(12*14)+4+8; length=WriteLSBLong(file,(unsigned int) strip_offset); length=fwrite("\022\001\003\000\001\000\000\000",1,8,file); length=WriteLSBLong(file,(unsigned int) image_info->orientation); length=fwrite("\025\001\003\000\001\000\000\000\001\000\000\000",1,12,file); length=fwrite("\026\001\004\000\001\000\000\000",1,8,file); length=WriteLSBLong(file,(unsigned int) image->rows); length=fwrite("\027\001\004\000\001\000\000\000\000\000\000\000",1,12,file); offset=(ssize_t) ftell(file)-4; length=fwrite("\032\001\005\000\001\000\000\000",1,8,file); length=WriteLSBLong(file,(unsigned int) (strip_offset-8)); length=fwrite("\033\001\005\000\001\000\000\000",1,8,file); length=WriteLSBLong(file,(unsigned int) (strip_offset-8)); length=fwrite("\050\001\003\000\001\000\000\000\002\000\000\000",1,12,file); length=fwrite("\000\000\000\000",1,4,file); length=WriteLSBLong(file,(unsigned int) image->resolution.x); length=WriteLSBLong(file,1); status=MagickTrue; for (length=0; (c=ReadBlobByte(image)) != EOF; length++) if (fputc(c,file) != c) status=MagickFalse; offset=(ssize_t) fseek(file,(ssize_t) offset,SEEK_SET); length=WriteLSBLong(file,(unsigned int) length); if (ferror(file) != 0) { (void) fclose(file); ThrowImageException(FileOpenError,"UnableToCreateTemporaryFile"); } (void) fclose(file); (void) CloseBlob(image); image=DestroyImage(image); /* Read TIFF image. */ read_info=CloneImageInfo((ImageInfo *) NULL); (void) FormatLocaleString(read_info->filename,MagickPathExtent,"%s",filename); image=ReadTIFFImage(read_info,exception); read_info=DestroyImageInfo(read_info); if (image != (Image *) NULL) { (void) CopyMagickString(image->filename,image_info->filename, MagickPathExtent); (void) CopyMagickString(image->magick_filename,image_info->filename, MagickPathExtent); (void) CopyMagickString(image->magick,"GROUP4",MagickPathExtent); } (void) RelinquishUniqueFileResource(filename); if (status == MagickFalse) image=DestroyImage(image); return(image); } #endif #if defined(MAGICKCORE_TIFF_DELEGATE) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d T I F F I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadTIFFImage() reads a Tagged image file and returns it. It allocates the % memory necessary for the new Image structure and returns a pointer to the % new image. % % The format of the ReadTIFFImage method is: % % Image *ReadTIFFImage(const ImageInfo *image_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o exception: return any errors or warnings in this structure. % */ static inline unsigned char ClampYCC(double value) { value=255.0-value; if (value < 0.0) return((unsigned char)0); if (value > 255.0) return((unsigned char)255); return((unsigned char)(value)); } static MagickBooleanType DecodeLabImage(Image *image,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; ssize_t y; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; break; } for (x=0; x < (ssize_t) image->columns; x++) { double a, b; a=QuantumScale*GetPixela(image,q)+0.5; if (a > 1.0) a-=1.0; b=QuantumScale*GetPixelb(image,q)+0.5; if (b > 1.0) b-=1.0; SetPixela(image,QuantumRange*a,q); SetPixelb(image,QuantumRange*b,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) { status=MagickFalse; break; } } image_view=DestroyCacheView(image_view); return(status); } static MagickBooleanType ReadProfile(Image *image,const char *name, const unsigned char *datum,ssize_t length,ExceptionInfo *exception) { MagickBooleanType status; StringInfo *profile; if (length < 4) return(MagickFalse); profile=BlobToStringInfo(datum,(size_t) length); if (profile == (StringInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=SetImageProfile(image,name,profile,exception); profile=DestroyStringInfo(profile); if (status == MagickFalse) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); return(MagickTrue); } #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static int TIFFCloseBlob(thandle_t image) { (void) CloseBlob((Image *) image); return(0); } static void TIFFErrors(const char *,const char *,va_list) magick_attribute((__format__ (__printf__,2,0))); static void TIFFErrors(const char *module,const char *format,va_list error) { char message[MagickPathExtent]; ExceptionInfo *exception; #if defined(MAGICKCORE_HAVE_VSNPRINTF) (void) vsnprintf(message,MagickPathExtent-2,format,error); #else (void) vsprintf(message,format,error); #endif message[MagickPathExtent-2]='\0'; (void) ConcatenateMagickString(message,".",MagickPathExtent); exception=(ExceptionInfo *) GetMagickThreadValue(tiff_exception); if (exception != (ExceptionInfo *) NULL) (void) ThrowMagickException(exception,GetMagickModule(),CoderError,message, "`%s'",module); } static toff_t TIFFGetBlobSize(thandle_t image) { return((toff_t) GetBlobSize((Image *) image)); } static MagickBooleanType TIFFGetProfiles(TIFF *tiff,Image *image, ExceptionInfo *exception) { MagickBooleanType status; uint32 length = 0; unsigned char *profile = (unsigned char *) NULL; status=MagickTrue; #if defined(TIFFTAG_ICCPROFILE) if ((TIFFGetField(tiff,TIFFTAG_ICCPROFILE,&length,&profile) == 1) && (profile != (unsigned char *) NULL)) status=ReadProfile(image,"icc",profile,(ssize_t) length,exception); #endif #if defined(TIFFTAG_PHOTOSHOP) if ((TIFFGetField(tiff,TIFFTAG_PHOTOSHOP,&length,&profile) == 1) && (profile != (unsigned char *) NULL)) status=ReadProfile(image,"8bim",profile,(ssize_t) length,exception); #endif #if defined(TIFFTAG_RICHTIFFIPTC) && (TIFFLIB_VERSION >= 20191103) if ((TIFFGetField(tiff,TIFFTAG_RICHTIFFIPTC,&length,&profile) == 1) && (profile != (unsigned char *) NULL)) { const TIFFField *field; field=TIFFFieldWithTag(tiff,TIFFTAG_RICHTIFFIPTC); if (TIFFFieldDataType(field) == TIFF_LONG) { if (TIFFIsByteSwapped(tiff) != 0) TIFFSwabArrayOfLong((uint32 *) profile,(size_t) length); status=ReadProfile(image,"iptc",profile,4L*length,exception); } else status=ReadProfile(image,"iptc",profile,length,exception); } #endif #if defined(TIFFTAG_XMLPACKET) if ((TIFFGetField(tiff,TIFFTAG_XMLPACKET,&length,&profile) == 1) && (profile != (unsigned char *) NULL)) { StringInfo *dng; status=ReadProfile(image,"xmp",profile,(ssize_t) length,exception); dng=BlobToStringInfo(profile,length); if (dng != (StringInfo *) NULL) { const char *target = "dc:format=\"image/dng\""; if (strstr((char *) GetStringInfoDatum(dng),target) != (char *) NULL) (void) CopyMagickString(image->magick,"DNG",MagickPathExtent); dng=DestroyStringInfo(dng); } } #endif if ((TIFFGetField(tiff,34118,&length,&profile) == 1) && (profile != (unsigned char *) NULL)) status=ReadProfile(image,"tiff:34118",profile,(ssize_t) length, exception); if ((TIFFGetField(tiff,37724,&length,&profile) == 1) && (profile != (unsigned char *) NULL)) status=ReadProfile(image,"tiff:37724",profile,(ssize_t) length,exception); return(status); } static MagickBooleanType TIFFGetProperties(TIFF *tiff,Image *image, ExceptionInfo *exception) { char message[MagickPathExtent], *text; MagickBooleanType status; uint32 count, type; text=(char *) NULL; status=MagickTrue; if ((TIFFGetField(tiff,TIFFTAG_ARTIST,&text) == 1) && (text != (char *) NULL)) status=SetImageProperty(image,"tiff:artist",text,exception); if ((TIFFGetField(tiff,TIFFTAG_COPYRIGHT,&text) == 1) && (text != (char *) NULL)) status=SetImageProperty(image,"tiff:copyright",text,exception); if ((TIFFGetField(tiff,TIFFTAG_DATETIME,&text) == 1) && (text != (char *) NULL)) status=SetImageProperty(image,"tiff:timestamp",text,exception); if ((TIFFGetField(tiff,TIFFTAG_DOCUMENTNAME,&text) == 1) && (text != (char *) NULL)) status=SetImageProperty(image,"tiff:document",text,exception); if ((TIFFGetField(tiff,TIFFTAG_HOSTCOMPUTER,&text) == 1) && (text != (char *) NULL)) status=SetImageProperty(image,"tiff:hostcomputer",text,exception); if ((TIFFGetField(tiff,TIFFTAG_IMAGEDESCRIPTION,&text) == 1) && (text != (char *) NULL)) status=SetImageProperty(image,"comment",text,exception); if ((TIFFGetField(tiff,TIFFTAG_MAKE,&text) == 1) && (text != (char *) NULL)) status=SetImageProperty(image,"tiff:make",text,exception); if ((TIFFGetField(tiff,TIFFTAG_MODEL,&text) == 1) && (text != (char *) NULL)) status=SetImageProperty(image,"tiff:model",text,exception); if ((TIFFGetField(tiff,TIFFTAG_OPIIMAGEID,&count,&text) == 1) && (text != (char *) NULL)) { if (count >= MagickPathExtent) count=MagickPathExtent-1; (void) CopyMagickString(message,text,count+1); status=SetImageProperty(image,"tiff:image-id",message,exception); } if ((TIFFGetField(tiff,TIFFTAG_PAGENAME,&text) == 1) && (text != (char *) NULL)) status=SetImageProperty(image,"label",text,exception); if ((TIFFGetField(tiff,TIFFTAG_SOFTWARE,&text) == 1) && (text != (char *) NULL)) status=SetImageProperty(image,"tiff:software",text,exception); if ((TIFFGetField(tiff,33423,&count,&text) == 1) && (text != (char *) NULL)) { if (count >= MagickPathExtent) count=MagickPathExtent-1; (void) CopyMagickString(message,text,count+1); status=SetImageProperty(image,"tiff:kodak-33423",message,exception); } if ((TIFFGetField(tiff,36867,&count,&text) == 1) && (text != (char *) NULL)) { if (count >= MagickPathExtent) count=MagickPathExtent-1; (void) CopyMagickString(message,text,count+1); status=SetImageProperty(image,"tiff:kodak-36867",message,exception); } if (TIFFGetField(tiff,TIFFTAG_SUBFILETYPE,&type) == 1) switch (type) { case 0x01: { status=SetImageProperty(image,"tiff:subfiletype","REDUCEDIMAGE", exception); break; } case 0x02: { status=SetImageProperty(image,"tiff:subfiletype","PAGE",exception); break; } case 0x04: { status=SetImageProperty(image,"tiff:subfiletype","MASK",exception); break; } default: break; } return(status); } static MagickBooleanType TIFFSetImageProperties(TIFF *tiff,Image *image, const char *tag,ExceptionInfo *exception) { char buffer[MagickPathExtent], filename[MagickPathExtent]; FILE *file; int unique_file; /* Set EXIF or GPS image properties. */ unique_file=AcquireUniqueFileResource(filename); file=(FILE *) NULL; if (unique_file != -1) file=fdopen(unique_file,"rb+"); if ((unique_file == -1) || (file == (FILE *) NULL)) { (void) RelinquishUniqueFileResource(filename); (void) ThrowMagickException(exception,GetMagickModule(),WandError, "UnableToCreateTemporaryFile","`%s'",filename); return(MagickFalse); } TIFFPrintDirectory(tiff,file,0); (void) fseek(file,0,SEEK_SET); while (fgets(buffer,(int) sizeof(buffer),file) != NULL) { char *p, property[MagickPathExtent], value[MagickPathExtent]; StripString(buffer); p=strchr(buffer,':'); if (p == (char *) NULL) continue; *p='\0'; (void) sprintf(property,"%s%.1024s",tag,buffer); (void) sprintf(value,"%s",p+1); StripString(value); (void) SetImageProperty(image,property,value,exception); } (void) fclose(file); (void) RelinquishUniqueFileResource(filename); return(MagickTrue); } static MagickBooleanType TIFFGetEXIFProperties(TIFF *tiff,Image *image, ExceptionInfo *exception) { #if defined(MAGICKCORE_HAVE_TIFFREADEXIFDIRECTORY) MagickBooleanType status; tdir_t directory; #if defined(TIFF_VERSION_BIG) uint64 #else uint32 #endif offset; /* Read EXIF properties. */ offset=0; if (TIFFGetField(tiff,TIFFTAG_EXIFIFD,&offset) != 1) return(MagickFalse); directory=TIFFCurrentDirectory(tiff); if (TIFFReadEXIFDirectory(tiff,offset) != 1) { TIFFSetDirectory(tiff,directory); return(MagickFalse); } status=TIFFSetImageProperties(tiff,image,"exif:",exception); TIFFSetDirectory(tiff,directory); return(status); #else (void) tiff; (void) image; return(MagickTrue); #endif } static MagickBooleanType TIFFGetGPSProperties(TIFF *tiff,Image *image, ExceptionInfo *exception) { #if defined(MAGICKCORE_HAVE_TIFFREADGPSDIRECTORY) MagickBooleanType status; tdir_t directory; #if defined(TIFF_VERSION_BIG) uint64 #else uint32 #endif offset; /* Read GPS properties. */ offset=0; if (TIFFGetField(tiff,TIFFTAG_GPSIFD,&offset) != 1) return(MagickFalse); directory=TIFFCurrentDirectory(tiff); if (TIFFReadGPSDirectory(tiff,offset) != 1) { TIFFSetDirectory(tiff,directory); return(MagickFalse); } status=TIFFSetImageProperties(tiff,image,"exif:GPS",exception); TIFFSetDirectory(tiff,directory); return(status); #else magick_unreferenced(tiff); magick_unreferenced(image); magick_unreferenced(exception); return(MagickTrue); #endif } static int TIFFMapBlob(thandle_t image,tdata_t *base,toff_t *size) { *base=(tdata_t *) GetBlobStreamData((Image *) image); if (*base != (tdata_t *) NULL) *size=(toff_t) GetBlobSize((Image *) image); if (*base != (tdata_t *) NULL) return(1); return(0); } static tsize_t TIFFReadBlob(thandle_t image,tdata_t data,tsize_t size) { tsize_t count; count=(tsize_t) ReadBlob((Image *) image,(size_t) size, (unsigned char *) data); return(count); } static int32 TIFFReadPixels(TIFF *tiff,const tsample_t sample,const ssize_t row, tdata_t scanline) { int32 status; status=TIFFReadScanline(tiff,scanline,(uint32) row,sample); return(status); } static toff_t TIFFSeekBlob(thandle_t image,toff_t offset,int whence) { return((toff_t) SeekBlob((Image *) image,(MagickOffsetType) offset,whence)); } static void TIFFUnmapBlob(thandle_t image,tdata_t base,toff_t size) { (void) image; (void) base; (void) size; } static void TIFFWarnings(const char *,const char *,va_list) magick_attribute((__format__ (__printf__,2,0))); static void TIFFWarnings(const char *module,const char *format,va_list warning) { char message[MagickPathExtent]; ExceptionInfo *exception; #if defined(MAGICKCORE_HAVE_VSNPRINTF) (void) vsnprintf(message,MagickPathExtent-2,format,warning); #else (void) vsprintf(message,format,warning); #endif message[MagickPathExtent-2]='\0'; (void) ConcatenateMagickString(message,".",MagickPathExtent); exception=(ExceptionInfo *) GetMagickThreadValue(tiff_exception); if (exception != (ExceptionInfo *) NULL) (void) ThrowMagickException(exception,GetMagickModule(),CoderWarning, message,"`%s'",module); } static tsize_t TIFFWriteBlob(thandle_t image,tdata_t data,tsize_t size) { tsize_t count; count=(tsize_t) WriteBlob((Image *) image,(size_t) size, (unsigned char *) data); return(count); } static TIFFMethodType GetJPEGMethod(Image* image,TIFF *tiff,uint16 photometric, uint16 bits_per_sample,uint16 samples_per_pixel) { #define BUFFER_SIZE 2048 MagickOffsetType position, offset; size_t i; TIFFMethodType method; #if defined(TIFF_VERSION_BIG) uint64 #else uint32 #endif *value; unsigned char buffer[BUFFER_SIZE+32]; unsigned short length; /* Only support 8 bit for now. */ if ((photometric != PHOTOMETRIC_SEPARATED) || (bits_per_sample != 8) || (samples_per_pixel != 4)) return(ReadGenericMethod); /* Search for Adobe APP14 JPEG marker. */ value=NULL; if (!TIFFGetField(tiff,TIFFTAG_STRIPOFFSETS,&value) || (value == NULL)) return(ReadStripMethod); position=TellBlob(image); offset=(MagickOffsetType) (value[0]); if (SeekBlob(image,offset,SEEK_SET) != offset) return(ReadStripMethod); method=ReadStripMethod; if (ReadBlob(image,BUFFER_SIZE,buffer) == BUFFER_SIZE) { for (i=0; i < BUFFER_SIZE; i++) { while (i < BUFFER_SIZE) { if (buffer[i++] == 255) break; } while (i < BUFFER_SIZE) { if (buffer[++i] != 255) break; } if (buffer[i++] == 216) /* JPEG_MARKER_SOI */ continue; length=(unsigned short) (((unsigned int) (buffer[i] << 8) | (unsigned int) buffer[i+1]) & 0xffff); if (i+(size_t) length >= BUFFER_SIZE) break; if (buffer[i-1] == 238) /* JPEG_MARKER_APP0+14 */ { if (length != 14) break; /* 0 == CMYK, 1 == YCbCr, 2 = YCCK */ if (buffer[i+13] == 2) method=ReadYCCKMethod; break; } i+=(size_t) length; } } (void) SeekBlob(image,position,SEEK_SET); return(method); } static ssize_t TIFFReadCustomStream(unsigned char *data,const size_t count, void *user_data) { PhotoshopProfile *profile; size_t total; MagickOffsetType remaining; if (count == 0) return(0); profile=(PhotoshopProfile *) user_data; remaining=(MagickOffsetType) profile->length-profile->offset; if (remaining <= 0) return(-1); total=MagickMin(count, (size_t) remaining); (void) memcpy(data,profile->data->datum+profile->offset,total); profile->offset+=total; return(total); } static CustomStreamInfo *TIFFAcquireCustomStreamForReading( PhotoshopProfile *profile,ExceptionInfo *exception) { CustomStreamInfo *custom_stream; custom_stream=AcquireCustomStreamInfo(exception); if (custom_stream == (CustomStreamInfo *) NULL) return(custom_stream); SetCustomStreamData(custom_stream,(void *) profile); SetCustomStreamReader(custom_stream,TIFFReadCustomStream); SetCustomStreamSeeker(custom_stream,TIFFSeekCustomStream); SetCustomStreamTeller(custom_stream,TIFFTellCustomStream); return(custom_stream); } static void TIFFReadPhotoshopLayers(const ImageInfo *image_info,Image *image, ExceptionInfo *exception) { const char *option; const StringInfo *profile; CustomStreamInfo *custom_stream; Image *layers; ImageInfo *clone_info; PhotoshopProfile photoshop_profile; PSDInfo info; ssize_t i; if (GetImageListLength(image) != 1) return; if ((image_info->number_scenes == 1) && (image_info->scene == 0)) return; option=GetImageOption(image_info,"tiff:ignore-layers"); if (option != (const char * ) NULL) return; profile=GetImageProfile(image,"tiff:37724"); if (profile == (const StringInfo *) NULL) return; for (i=0; i < (ssize_t) profile->length-8; i++) { if (LocaleNCompare((const char *) (profile->datum+i), image->endian == MSBEndian ? "8BIM" : "MIB8",4) != 0) continue; i+=4; if ((LocaleNCompare((const char *) (profile->datum+i), image->endian == MSBEndian ? "Layr" : "ryaL",4) == 0) || (LocaleNCompare((const char *) (profile->datum+i), image->endian == MSBEndian ? "LMsk" : "ksML",4) == 0) || (LocaleNCompare((const char *) (profile->datum+i), image->endian == MSBEndian ? "Lr16" : "61rL",4) == 0) || (LocaleNCompare((const char *) (profile->datum+i), image->endian == MSBEndian ? "Lr32" : "23rL",4) == 0)) break; } i+=4; if (i >= (ssize_t) (profile->length-8)) return; photoshop_profile.data=(StringInfo *) profile; photoshop_profile.length=profile->length; custom_stream=TIFFAcquireCustomStreamForReading(&photoshop_profile,exception); if (custom_stream == (CustomStreamInfo *) NULL) return; layers=CloneImage(image,0,0,MagickTrue,exception); if (layers == (Image *) NULL) { custom_stream=DestroyCustomStreamInfo(custom_stream); return; } (void) DeleteImageProfile(layers,"tiff:37724"); AttachCustomStream(layers->blob,custom_stream); SeekBlob(layers,(MagickOffsetType) i,SEEK_SET); InitPSDInfo(layers,&info); clone_info=CloneImageInfo(image_info); clone_info->number_scenes=0; (void) ReadPSDLayers(layers,clone_info,&info,exception); clone_info=DestroyImageInfo(clone_info); DeleteImageFromList(&layers); if (layers != (Image *) NULL) { SetImageArtifact(image,"tiff:has-layers","true"); AppendImageToList(&image,layers); while (layers != (Image *) NULL) { SetImageArtifact(layers,"tiff:has-layers","true"); DetachBlob(layers->blob); layers=GetNextImageInList(layers); } } custom_stream=DestroyCustomStreamInfo(custom_stream); } #if defined(__cplusplus) || defined(c_plusplus) } #endif static Image *ReadTIFFImage(const ImageInfo *image_info, ExceptionInfo *exception) { #define ThrowTIFFException(severity,message) \ { \ if (pixel_info != (MemoryInfo *) NULL) \ pixel_info=RelinquishVirtualMemory(pixel_info); \ if (quantum_info != (QuantumInfo *) NULL) \ quantum_info=DestroyQuantumInfo(quantum_info); \ TIFFClose(tiff); \ ThrowReaderException(severity,message); \ } const char *option; float *chromaticity, x_position, y_position, x_resolution, y_resolution; Image *image; int tiff_status; MagickBooleanType more_frames; MagickSizeType number_pixels; MagickStatusType status; MemoryInfo *pixel_info = (MemoryInfo *) NULL; QuantumInfo *quantum_info; QuantumType quantum_type; ssize_t i, scanline_size, y; TIFF *tiff; TIFFMethodType method; uint16 compress_tag, bits_per_sample, endian, extra_samples, interlace, max_sample_value, min_sample_value, orientation, pages, photometric, *sample_info, sample_format, samples_per_pixel, units, value; uint32 height, rows_per_strip, width; unsigned char *pixels; void *sans[4] = { NULL, NULL, NULL, NULL }; /* Open image. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=AcquireImage(image_info,exception); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } (void) SetMagickThreadValue(tiff_exception,exception); tiff=TIFFClientOpen(image->filename,"rb",(thandle_t) image,TIFFReadBlob, TIFFWriteBlob,TIFFSeekBlob,TIFFCloseBlob,TIFFGetBlobSize,TIFFMapBlob, TIFFUnmapBlob); if (tiff == (TIFF *) NULL) { image=DestroyImageList(image); return((Image *) NULL); } if (exception->severity > ErrorException) { TIFFClose(tiff); image=DestroyImageList(image); return((Image *) NULL); } if (image_info->number_scenes != 0) { /* Generate blank images for subimage specification (e.g. image.tif[4]. We need to check the number of directores because it is possible that the subimage(s) are stored in the photoshop profile. */ if (image_info->scene < (size_t) TIFFNumberOfDirectories(tiff)) { for (i=0; i < (ssize_t) image_info->scene; i++) { status=TIFFReadDirectory(tiff) != 0 ? MagickTrue : MagickFalse; if (status == MagickFalse) { TIFFClose(tiff); image=DestroyImageList(image); return((Image *) NULL); } AcquireNextImage(image_info,image,exception); if (GetNextImageInList(image) == (Image *) NULL) { TIFFClose(tiff); image=DestroyImageList(image); return((Image *) NULL); } image=SyncNextImageInList(image); } } } more_frames=MagickTrue; do { /* TIFFPrintDirectory(tiff,stdout,MagickFalse); */ photometric=PHOTOMETRIC_RGB; if ((TIFFGetField(tiff,TIFFTAG_IMAGEWIDTH,&width) != 1) || (TIFFGetField(tiff,TIFFTAG_IMAGELENGTH,&height) != 1) || (TIFFGetFieldDefaulted(tiff,TIFFTAG_PHOTOMETRIC,&photometric,sans) != 1) || (TIFFGetFieldDefaulted(tiff,TIFFTAG_COMPRESSION,&compress_tag,sans) != 1) || (TIFFGetFieldDefaulted(tiff,TIFFTAG_FILLORDER,&endian,sans) != 1) || (TIFFGetFieldDefaulted(tiff,TIFFTAG_PLANARCONFIG,&interlace,sans) != 1) || (TIFFGetFieldDefaulted(tiff,TIFFTAG_SAMPLESPERPIXEL,&samples_per_pixel,sans) != 1) || (TIFFGetFieldDefaulted(tiff,TIFFTAG_BITSPERSAMPLE,&bits_per_sample,sans) != 1) || (TIFFGetFieldDefaulted(tiff,TIFFTAG_SAMPLEFORMAT,&sample_format,sans) != 1) || (TIFFGetFieldDefaulted(tiff,TIFFTAG_MINSAMPLEVALUE,&min_sample_value,sans) != 1) || (TIFFGetFieldDefaulted(tiff,TIFFTAG_MAXSAMPLEVALUE,&max_sample_value,sans) != 1)) { TIFFClose(tiff); ThrowReaderException(CorruptImageError,"ImproperImageHeader"); } if (((sample_format != SAMPLEFORMAT_IEEEFP) || (bits_per_sample != 64)) && ((bits_per_sample <= 0) || (bits_per_sample > 32))) { TIFFClose(tiff); ThrowReaderException(CorruptImageError,"UnsupportedBitsPerPixel"); } if (samples_per_pixel > MaxPixelChannels) { TIFFClose(tiff); ThrowReaderException(CorruptImageError,"MaximumChannelsExceeded"); } if (sample_format == SAMPLEFORMAT_IEEEFP) (void) SetImageProperty(image,"quantum:format","floating-point", exception); switch (photometric) { case PHOTOMETRIC_MINISBLACK: { (void) SetImageProperty(image,"tiff:photometric","min-is-black", exception); break; } case PHOTOMETRIC_MINISWHITE: { (void) SetImageProperty(image,"tiff:photometric","min-is-white", exception); break; } case PHOTOMETRIC_PALETTE: { (void) SetImageProperty(image,"tiff:photometric","palette",exception); break; } case PHOTOMETRIC_RGB: { (void) SetImageProperty(image,"tiff:photometric","RGB",exception); break; } case PHOTOMETRIC_CIELAB: { (void) SetImageProperty(image,"tiff:photometric","CIELAB",exception); break; } case PHOTOMETRIC_LOGL: { (void) SetImageProperty(image,"tiff:photometric","CIE Log2(L)", exception); break; } case PHOTOMETRIC_LOGLUV: { (void) SetImageProperty(image,"tiff:photometric","LOGLUV",exception); break; } #if defined(PHOTOMETRIC_MASK) case PHOTOMETRIC_MASK: { (void) SetImageProperty(image,"tiff:photometric","MASK",exception); break; } #endif case PHOTOMETRIC_SEPARATED: { (void) SetImageProperty(image,"tiff:photometric","separated",exception); break; } case PHOTOMETRIC_YCBCR: { (void) SetImageProperty(image,"tiff:photometric","YCBCR",exception); break; } default: { (void) SetImageProperty(image,"tiff:photometric","unknown",exception); break; } } if (image->debug != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(),"Geometry: %ux%u", (unsigned int) width,(unsigned int) height); (void) LogMagickEvent(CoderEvent,GetMagickModule(),"Interlace: %u", interlace); (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Bits per sample: %u",bits_per_sample); (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Min sample value: %u",min_sample_value); (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Max sample value: %u",max_sample_value); (void) LogMagickEvent(CoderEvent,GetMagickModule(),"Photometric " "interpretation: %s",GetImageProperty(image,"tiff:photometric", exception)); } image->columns=(size_t) width; image->rows=(size_t) height; image->depth=(size_t) bits_per_sample; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(),"Image depth: %.20g", (double) image->depth); image->endian=MSBEndian; if (endian == FILLORDER_LSB2MSB) image->endian=LSBEndian; #if defined(MAGICKCORE_HAVE_TIFFISBIGENDIAN) if (TIFFIsBigEndian(tiff) == 0) { (void) SetImageProperty(image,"tiff:endian","lsb",exception); image->endian=LSBEndian; } else { (void) SetImageProperty(image,"tiff:endian","msb",exception); image->endian=MSBEndian; } #endif if ((photometric == PHOTOMETRIC_MINISBLACK) || (photometric == PHOTOMETRIC_MINISWHITE)) image->colorspace=GRAYColorspace; if (photometric == PHOTOMETRIC_SEPARATED) image->colorspace=CMYKColorspace; if (photometric == PHOTOMETRIC_CIELAB) image->colorspace=LabColorspace; if ((photometric == PHOTOMETRIC_YCBCR) && (compress_tag != COMPRESSION_JPEG)) image->colorspace=YCbCrColorspace; status=TIFFGetProfiles(tiff,image,exception); if (status == MagickFalse) { TIFFClose(tiff); return(DestroyImageList(image)); } status=TIFFGetProperties(tiff,image,exception); if (status == MagickFalse) { TIFFClose(tiff); return(DestroyImageList(image)); } option=GetImageOption(image_info,"tiff:exif-properties"); if (IsStringFalse(option) == MagickFalse) /* enabled by default */ (void) TIFFGetEXIFProperties(tiff,image,exception); option=GetImageOption(image_info,"tiff:gps-properties"); if (IsStringFalse(option) == MagickFalse) /* enabled by default */ (void) TIFFGetGPSProperties(tiff,image,exception); if ((TIFFGetFieldDefaulted(tiff,TIFFTAG_XRESOLUTION,&x_resolution,sans) == 1) && (TIFFGetFieldDefaulted(tiff,TIFFTAG_YRESOLUTION,&y_resolution,sans) == 1)) { image->resolution.x=x_resolution; image->resolution.y=y_resolution; } if (TIFFGetFieldDefaulted(tiff,TIFFTAG_RESOLUTIONUNIT,&units,sans,sans) == 1) { if (units == RESUNIT_INCH) image->units=PixelsPerInchResolution; if (units == RESUNIT_CENTIMETER) image->units=PixelsPerCentimeterResolution; } if ((TIFFGetFieldDefaulted(tiff,TIFFTAG_XPOSITION,&x_position,sans) == 1) && (TIFFGetFieldDefaulted(tiff,TIFFTAG_YPOSITION,&y_position,sans) == 1)) { image->page.x=CastDoubleToLong(ceil(x_position* image->resolution.x-0.5)); image->page.y=CastDoubleToLong(ceil(y_position* image->resolution.y-0.5)); } if (TIFFGetFieldDefaulted(tiff,TIFFTAG_ORIENTATION,&orientation,sans) == 1) image->orientation=(OrientationType) orientation; if (TIFFGetField(tiff,TIFFTAG_WHITEPOINT,&chromaticity) == 1) { if ((chromaticity != (float *) NULL) && (*chromaticity != 0.0)) { image->chromaticity.white_point.x=chromaticity[0]; image->chromaticity.white_point.y=chromaticity[1]; } } if (TIFFGetField(tiff,TIFFTAG_PRIMARYCHROMATICITIES,&chromaticity) == 1) { if ((chromaticity != (float *) NULL) && (*chromaticity != 0.0)) { image->chromaticity.red_primary.x=chromaticity[0]; image->chromaticity.red_primary.y=chromaticity[1]; image->chromaticity.green_primary.x=chromaticity[2]; image->chromaticity.green_primary.y=chromaticity[3]; image->chromaticity.blue_primary.x=chromaticity[4]; image->chromaticity.blue_primary.y=chromaticity[5]; } } #if defined(MAGICKCORE_HAVE_TIFFISCODECCONFIGURED) || (TIFFLIB_VERSION > 20040919) if ((compress_tag != COMPRESSION_NONE) && (TIFFIsCODECConfigured(compress_tag) == 0)) { TIFFClose(tiff); ThrowReaderException(CoderError,"CompressNotSupported"); } #endif switch (compress_tag) { case COMPRESSION_NONE: image->compression=NoCompression; break; case COMPRESSION_CCITTFAX3: image->compression=FaxCompression; break; case COMPRESSION_CCITTFAX4: image->compression=Group4Compression; break; case COMPRESSION_JPEG: { image->compression=JPEGCompression; #if defined(JPEG_SUPPORT) { char sampling_factor[MagickPathExtent]; uint16 horizontal, vertical; tiff_status=TIFFGetField(tiff,TIFFTAG_YCBCRSUBSAMPLING,&horizontal, &vertical); if (tiff_status == 1) { (void) FormatLocaleString(sampling_factor,MagickPathExtent, "%dx%d",horizontal,vertical); (void) SetImageProperty(image,"jpeg:sampling-factor", sampling_factor,exception); (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Sampling Factors: %s",sampling_factor); } } #endif break; } case COMPRESSION_OJPEG: image->compression=JPEGCompression; break; #if defined(COMPRESSION_LZMA) case COMPRESSION_LZMA: image->compression=LZMACompression; break; #endif case COMPRESSION_LZW: image->compression=LZWCompression; break; case COMPRESSION_DEFLATE: image->compression=ZipCompression; break; case COMPRESSION_ADOBE_DEFLATE: image->compression=ZipCompression; break; #if defined(COMPRESSION_WEBP) case COMPRESSION_WEBP: image->compression=WebPCompression; break; #endif #if defined(COMPRESSION_ZSTD) case COMPRESSION_ZSTD: image->compression=ZstdCompression; break; #endif default: image->compression=RLECompression; break; } quantum_info=(QuantumInfo *) NULL; if ((photometric == PHOTOMETRIC_PALETTE) && (pow(2.0,1.0*bits_per_sample) <= MaxColormapSize)) { size_t colors; colors=(size_t) GetQuantumRange(bits_per_sample)+1; if (AcquireImageColormap(image,colors,exception) == MagickFalse) { TIFFClose(tiff); ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); } } value=(unsigned short) image->scene; if (TIFFGetFieldDefaulted(tiff,TIFFTAG_PAGENUMBER,&value,&pages,sans) == 1) image->scene=value; if (image->storage_class == PseudoClass) { size_t range; uint16 *blue_colormap, *green_colormap, *red_colormap; /* Initialize colormap. */ tiff_status=TIFFGetField(tiff,TIFFTAG_COLORMAP,&red_colormap, &green_colormap,&blue_colormap); if (tiff_status == 1) { if ((red_colormap != (uint16 *) NULL) && (green_colormap != (uint16 *) NULL) && (blue_colormap != (uint16 *) NULL)) { range=255; /* might be old style 8-bit colormap */ for (i=0; i < (ssize_t) image->colors; i++) if ((red_colormap[i] >= 256) || (green_colormap[i] >= 256) || (blue_colormap[i] >= 256)) { range=65535; break; } for (i=0; i < (ssize_t) image->colors; i++) { image->colormap[i].red=ClampToQuantum(((double) QuantumRange*red_colormap[i])/range); image->colormap[i].green=ClampToQuantum(((double) QuantumRange*green_colormap[i])/range); image->colormap[i].blue=ClampToQuantum(((double) QuantumRange*blue_colormap[i])/range); } } } } if (image_info->ping != MagickFalse) { if (image_info->number_scenes != 0) if (image->scene >= (image_info->scene+image_info->number_scenes-1)) break; goto next_tiff_frame; } status=SetImageExtent(image,image->columns,image->rows,exception); if (status == MagickFalse) { TIFFClose(tiff); return(DestroyImageList(image)); } status=SetImageColorspace(image,image->colorspace,exception); status&=ResetImagePixels(image,exception); if (status == MagickFalse) { TIFFClose(tiff); return(DestroyImageList(image)); } /* Allocate memory for the image and pixel buffer. */ quantum_info=AcquireQuantumInfo(image_info,image); if (quantum_info == (QuantumInfo *) NULL) ThrowTIFFException(ResourceLimitError,"MemoryAllocationFailed"); if (sample_format == SAMPLEFORMAT_UINT) status=SetQuantumFormat(image,quantum_info,UnsignedQuantumFormat); if (sample_format == SAMPLEFORMAT_INT) status=SetQuantumFormat(image,quantum_info,SignedQuantumFormat); if (sample_format == SAMPLEFORMAT_IEEEFP) status=SetQuantumFormat(image,quantum_info,FloatingPointQuantumFormat); if (status == MagickFalse) ThrowTIFFException(ResourceLimitError,"MemoryAllocationFailed"); status=MagickTrue; switch (photometric) { case PHOTOMETRIC_MINISBLACK: { quantum_info->min_is_white=MagickFalse; break; } case PHOTOMETRIC_MINISWHITE: { quantum_info->min_is_white=MagickTrue; break; } default: break; } extra_samples=0; tiff_status=TIFFGetFieldDefaulted(tiff,TIFFTAG_EXTRASAMPLES,&extra_samples, &sample_info,sans); if (tiff_status == 1) { (void) SetImageProperty(image,"tiff:alpha","unspecified",exception); if (extra_samples == 0) { if ((samples_per_pixel == 4) && (photometric == PHOTOMETRIC_RGB)) image->alpha_trait=BlendPixelTrait; } else for (i=0; i < extra_samples; i++) { image->alpha_trait=BlendPixelTrait; if (sample_info[i] == EXTRASAMPLE_ASSOCALPHA) { SetQuantumAlphaType(quantum_info,AssociatedQuantumAlpha); (void) SetImageProperty(image,"tiff:alpha","associated", exception); } else if (sample_info[i] == EXTRASAMPLE_UNASSALPHA) { SetQuantumAlphaType(quantum_info,DisassociatedQuantumAlpha); (void) SetImageProperty(image,"tiff:alpha","unassociated", exception); } } } if (image->alpha_trait != UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); method=ReadGenericMethod; rows_per_strip=(uint32) image->rows; if (TIFFGetField(tiff,TIFFTAG_ROWSPERSTRIP,&rows_per_strip) == 1) { char buffer[MagickPathExtent]; (void) FormatLocaleString(buffer,MagickPathExtent,"%u", (unsigned int) rows_per_strip); (void) SetImageProperty(image,"tiff:rows-per-strip",buffer,exception); method=ReadStripMethod; if (rows_per_strip > (uint32) image->rows) rows_per_strip=(uint32) image->rows; } if (TIFFIsTiled(tiff) != MagickFalse) { uint32 columns, rows; if ((TIFFGetField(tiff,TIFFTAG_TILEWIDTH,&columns) != 1) || (TIFFGetField(tiff,TIFFTAG_TILELENGTH,&rows) != 1)) ThrowTIFFException(CoderError,"ImageIsNotTiled"); if ((AcquireMagickResource(WidthResource,columns) == MagickFalse) || (AcquireMagickResource(HeightResource,rows) == MagickFalse)) ThrowTIFFException(ImageError,"WidthOrHeightExceedsLimit"); method=ReadTileMethod; } if ((photometric == PHOTOMETRIC_LOGLUV) || (compress_tag == COMPRESSION_CCITTFAX3)) method=ReadGenericMethod; if (image->compression == JPEGCompression) method=GetJPEGMethod(image,tiff,photometric,bits_per_sample, samples_per_pixel); quantum_info->endian=LSBEndian; scanline_size=TIFFScanlineSize(tiff); if (scanline_size <= 0) ThrowTIFFException(ResourceLimitError,"MemoryAllocationFailed"); number_pixels=MagickMax((MagickSizeType) image->columns*samples_per_pixel* pow(2.0,ceil(log(bits_per_sample)/log(2.0))),image->columns* rows_per_strip); if ((double) scanline_size > 1.5*number_pixels) ThrowTIFFException(CorruptImageError,"CorruptImage"); number_pixels=MagickMax((MagickSizeType) scanline_size,number_pixels); pixel_info=AcquireVirtualMemory(number_pixels,sizeof(uint32)); if (pixel_info == (MemoryInfo *) NULL) ThrowTIFFException(ResourceLimitError,"MemoryAllocationFailed"); pixels=(unsigned char *) GetVirtualMemoryBlob(pixel_info); (void) memset(pixels,0,number_pixels*sizeof(uint32)); quantum_type=GrayQuantum; if (image->storage_class == PseudoClass) quantum_type=IndexQuantum; if (interlace != PLANARCONFIG_SEPARATE) { size_t pad; pad=(size_t) MagickMax((ssize_t) samples_per_pixel-1,0); if (image->alpha_trait != UndefinedPixelTrait) { if (image->storage_class == PseudoClass) quantum_type=IndexAlphaQuantum; else quantum_type=samples_per_pixel == 1 ? AlphaQuantum : GrayAlphaQuantum; } if ((samples_per_pixel > 2) && (interlace != PLANARCONFIG_SEPARATE)) { quantum_type=RGBQuantum; pad=(size_t) MagickMax((size_t) samples_per_pixel-3,0); if (image->alpha_trait != UndefinedPixelTrait) { quantum_type=RGBAQuantum; pad=(size_t) MagickMax((size_t) samples_per_pixel-4,0); } if (image->colorspace == CMYKColorspace) { quantum_type=CMYKQuantum; pad=(size_t) MagickMax((size_t) samples_per_pixel-4,0); if (image->alpha_trait != UndefinedPixelTrait) { quantum_type=CMYKAQuantum; pad=(size_t) MagickMax((size_t) samples_per_pixel-5,0); } } status=SetQuantumPad(image,quantum_info,pad*((bits_per_sample+7) >> 3)); if (status == MagickFalse) ThrowTIFFException(ResourceLimitError,"MemoryAllocationFailed"); } } switch (method) { case ReadYCCKMethod: { /* Convert YCC TIFF image. */ for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; unsigned char *p; tiff_status=TIFFReadPixels(tiff,0,y,(char *) pixels); if (tiff_status == -1) break; q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) break; p=pixels; for (x=0; x < (ssize_t) image->columns; x++) { SetPixelCyan(image,ScaleCharToQuantum(ClampYCC((double) *p+ (1.402*(double) *(p+2))-179.456)),q); SetPixelMagenta(image,ScaleCharToQuantum(ClampYCC((double) *p- (0.34414*(double) *(p+1))-(0.71414*(double ) *(p+2))+ 135.45984)),q); SetPixelYellow(image,ScaleCharToQuantum(ClampYCC((double) *p+ (1.772*(double) *(p+1))-226.816)),q); SetPixelBlack(image,ScaleCharToQuantum((unsigned char) *(p+3)),q); q+=GetPixelChannels(image); p+=4; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } break; } case ReadStripMethod: { unsigned char *p; size_t extent; ssize_t stride, strip_id; tsize_t strip_size; unsigned char *strip_pixels; /* Convert stripped TIFF image. */ extent=2*TIFFStripSize(tiff); #if defined(TIFF_VERSION_BIG) extent+=image->columns*sizeof(uint64); #else extent+=image->columns*sizeof(uint32); #endif strip_pixels=(unsigned char *) AcquireQuantumMemory(extent, sizeof(*strip_pixels)); if (strip_pixels == (unsigned char *) NULL) ThrowTIFFException(ResourceLimitError,"MemoryAllocationFailed"); (void) memset(strip_pixels,0,extent*sizeof(*strip_pixels)); stride=TIFFVStripSize(tiff,1); strip_id=0; p=strip_pixels; for (i=0; i < (ssize_t) samples_per_pixel; i++) { size_t rows_remaining; switch (i) { case 0: break; case 1: quantum_type=GreenQuantum; break; case 2: quantum_type=BlueQuantum; break; case 3: { quantum_type=AlphaQuantum; if (image->colorspace == CMYKColorspace) quantum_type=BlackQuantum; break; } case 4: quantum_type=AlphaQuantum; break; default: break; } rows_remaining=0; for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; q=GetAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) break; if (rows_remaining == 0) { strip_size=TIFFReadEncodedStrip(tiff,strip_id,strip_pixels, TIFFStripSize(tiff)); if (strip_size == -1) break; rows_remaining=rows_per_strip; if ((y+rows_per_strip) > (ssize_t) image->rows) rows_remaining=(rows_per_strip-(y+rows_per_strip- image->rows)); p=strip_pixels; strip_id++; } (void) ImportQuantumPixels(image,(CacheView *) NULL, quantum_info,quantum_type,p,exception); p+=stride; rows_remaining--; if (SyncAuthenticPixels(image,exception) == MagickFalse) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } if ((samples_per_pixel > 1) && (interlace != PLANARCONFIG_SEPARATE)) break; } strip_pixels=(unsigned char *) RelinquishMagickMemory(strip_pixels); break; } case ReadTileMethod: { unsigned char *p; size_t extent; uint32 columns, rows; unsigned char *tile_pixels; /* Convert tiled TIFF image. */ if ((TIFFGetField(tiff,TIFFTAG_TILEWIDTH,&columns) != 1) || (TIFFGetField(tiff,TIFFTAG_TILELENGTH,&rows) != 1)) ThrowTIFFException(CoderError,"ImageIsNotTiled"); number_pixels=(MagickSizeType) columns*rows; if (HeapOverflowSanityCheck(rows,sizeof(*tile_pixels)) != MagickFalse) ThrowTIFFException(ResourceLimitError,"MemoryAllocationFailed"); extent=TIFFTileSize(tiff); #if defined(TIFF_VERSION_BIG) extent+=columns*sizeof(uint64); #else extent+=columns*sizeof(uint32); #endif tile_pixels=(unsigned char *) AcquireQuantumMemory(extent, sizeof(*tile_pixels)); if (tile_pixels == (unsigned char *) NULL) ThrowTIFFException(ResourceLimitError,"MemoryAllocationFailed"); (void) memset(tile_pixels,0,extent*sizeof(*tile_pixels)); for (i=0; i < (ssize_t) samples_per_pixel; i++) { switch (i) { case 0: break; case 1: quantum_type=GreenQuantum; break; case 2: quantum_type=BlueQuantum; break; case 3: { quantum_type=AlphaQuantum; if (image->colorspace == CMYKColorspace) quantum_type=BlackQuantum; break; } case 4: quantum_type=AlphaQuantum; break; default: break; } for (y=0; y < (ssize_t) image->rows; y+=rows) { ssize_t x; size_t rows_remaining; rows_remaining=image->rows-y; if ((ssize_t) (y+rows) < (ssize_t) image->rows) rows_remaining=rows; for (x=0; x < (ssize_t) image->columns; x+=columns) { size_t columns_remaining, row; columns_remaining=image->columns-x; if ((ssize_t) (x+columns) < (ssize_t) image->columns) columns_remaining=columns; if (TIFFReadTile(tiff,tile_pixels,(uint32) x,(uint32) y,0,i) == 0) break; p=tile_pixels; for (row=0; row < rows_remaining; row++) { Quantum *magick_restrict q; q=GetAuthenticPixels(image,x,y+row,columns_remaining,1, exception); if (q == (Quantum *) NULL) break; (void) ImportQuantumPixels(image,(CacheView *) NULL, quantum_info,quantum_type,p,exception); p+=TIFFTileRowSize(tiff); if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } } } if ((samples_per_pixel > 1) && (interlace != PLANARCONFIG_SEPARATE)) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) i, samples_per_pixel); if (status == MagickFalse) break; } } tile_pixels=(unsigned char *) RelinquishMagickMemory(tile_pixels); break; } case ReadGenericMethod: default: { MemoryInfo *generic_info = (MemoryInfo * ) NULL; uint32 *p; uint32 *pixels; /* Convert generic TIFF image. */ if (HeapOverflowSanityCheck(image->rows,sizeof(*pixels)) != MagickFalse) ThrowTIFFException(ResourceLimitError,"MemoryAllocationFailed"); number_pixels=(MagickSizeType) image->columns*image->rows; #if defined(TIFF_VERSION_BIG) number_pixels+=image->columns*sizeof(uint64); #else number_pixels+=image->columns*sizeof(uint32); #endif generic_info=AcquireVirtualMemory(number_pixels,sizeof(uint32)); if (generic_info == (MemoryInfo *) NULL) ThrowTIFFException(ResourceLimitError,"MemoryAllocationFailed"); pixels=(uint32 *) GetVirtualMemoryBlob(generic_info); (void) TIFFReadRGBAImage(tiff,(uint32) image->columns,(uint32) image->rows,(uint32 *) pixels,0); p=pixels+(image->columns*image->rows)-1; for (y=0; y < (ssize_t) image->rows; y++) { ssize_t x; Quantum *magick_restrict q; q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) break; q+=GetPixelChannels(image)*(image->columns-1); for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(image,ScaleCharToQuantum((unsigned char) TIFFGetR(*p)),q); SetPixelGreen(image,ScaleCharToQuantum((unsigned char) TIFFGetG(*p)),q); SetPixelBlue(image,ScaleCharToQuantum((unsigned char) TIFFGetB(*p)),q); if (image->alpha_trait != UndefinedPixelTrait) SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) TIFFGetA(*p)),q); p--; q-=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } generic_info=RelinquishVirtualMemory(generic_info); break; } } pixel_info=RelinquishVirtualMemory(pixel_info); SetQuantumImageType(image,quantum_type); next_tiff_frame: if (quantum_info != (QuantumInfo *) NULL) quantum_info=DestroyQuantumInfo(quantum_info); if (photometric == PHOTOMETRIC_CIELAB) DecodeLabImage(image,exception); if ((photometric == PHOTOMETRIC_LOGL) || (photometric == PHOTOMETRIC_MINISBLACK) || (photometric == PHOTOMETRIC_MINISWHITE)) { image->type=GrayscaleType; if (bits_per_sample == 1) image->type=BilevelType; } /* Proceed to next image. */ if (image_info->number_scenes != 0) if (image->scene >= (image_info->scene+image_info->number_scenes-1)) break; more_frames=TIFFReadDirectory(tiff) != 0 ? MagickTrue : MagickFalse; if (more_frames != MagickFalse) { /* Allocate next image structure. */ AcquireNextImage(image_info,image,exception); if (GetNextImageInList(image) == (Image *) NULL) { status=MagickFalse; break; } image=SyncNextImageInList(image); status=SetImageProgress(image,LoadImagesTag,image->scene-1, image->scene); if (status == MagickFalse) break; } } while ((status != MagickFalse) && (more_frames != MagickFalse)); TIFFClose(tiff); if (status != MagickFalse) TIFFReadPhotoshopLayers(image_info,image,exception); if ((image_info->number_scenes != 0) && (image_info->scene >= GetImageListLength(image))) status=MagickFalse; if (status == MagickFalse) return(DestroyImageList(image)); return(GetFirstImageInList(image)); } #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e g i s t e r T I F F I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RegisterTIFFImage() adds properties for the TIFF image format to % the list of supported formats. The properties include the image format % tag, a method to read and/or write the format, whether the format % supports the saving of more than one frame to the same file or blob, % whether the format supports native in-memory I/O, and a brief % description of the format. % % The format of the RegisterTIFFImage method is: % % size_t RegisterTIFFImage(void) % */ #if defined(MAGICKCORE_TIFF_DELEGATE) #if defined(MAGICKCORE_HAVE_TIFFMERGEFIELDINFO) && defined(MAGICKCORE_HAVE_TIFFSETTAGEXTENDER) static TIFFExtendProc tag_extender = (TIFFExtendProc) NULL; static void TIFFIgnoreTags(TIFF *tiff) { char *q; const char *p, *tags; Image *image; ssize_t i; size_t count; TIFFFieldInfo *ignore; if (TIFFGetReadProc(tiff) != TIFFReadBlob) return; image=(Image *)TIFFClientdata(tiff); tags=GetImageArtifact(image,"tiff:ignore-tags"); if (tags == (const char *) NULL) return; count=0; p=tags; while (*p != '\0') { while ((isspace((int) ((unsigned char) *p)) != 0)) p++; (void) strtol(p,&q,10); if (p == q) return; p=q; count++; while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == ',')) p++; } if (count == 0) return; i=0; p=tags; ignore=(TIFFFieldInfo *) AcquireQuantumMemory(count,sizeof(*ignore)); if (ignore == (TIFFFieldInfo *) NULL) return; /* This also sets field_bit to 0 (FIELD_IGNORE). */ (void) memset(ignore,0,count*sizeof(*ignore)); while (*p != '\0') { while ((isspace((int) ((unsigned char) *p)) != 0)) p++; ignore[i].field_tag=(ttag_t) strtol(p,&q,10); p=q; i++; while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == ',')) p++; } (void) TIFFMergeFieldInfo(tiff,ignore,(uint32) count); ignore=(TIFFFieldInfo *) RelinquishMagickMemory(ignore); } static void TIFFTagExtender(TIFF *tiff) { static const TIFFFieldInfo TIFFExtensions[] = { { 37724, -3, -3, TIFF_UNDEFINED, FIELD_CUSTOM, 1, 1, (char *) "PhotoshopLayerData" }, { 34118, -3, -3, TIFF_UNDEFINED, FIELD_CUSTOM, 1, 1, (char *) "Microscope" } }; TIFFMergeFieldInfo(tiff,TIFFExtensions,sizeof(TIFFExtensions)/ sizeof(*TIFFExtensions)); if (tag_extender != (TIFFExtendProc) NULL) (*tag_extender)(tiff); TIFFIgnoreTags(tiff); } #endif #endif ModuleExport size_t RegisterTIFFImage(void) { #define TIFFDescription "Tagged Image File Format" char version[MagickPathExtent]; MagickInfo *entry; #if defined(MAGICKCORE_TIFF_DELEGATE) if (tiff_semaphore == (SemaphoreInfo *) NULL) ActivateSemaphoreInfo(&tiff_semaphore); LockSemaphoreInfo(tiff_semaphore); if (instantiate_key == MagickFalse) { if (CreateMagickThreadKey(&tiff_exception,NULL) == MagickFalse) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); error_handler=TIFFSetErrorHandler(TIFFErrors); warning_handler=TIFFSetWarningHandler(TIFFWarnings); #if defined(MAGICKCORE_HAVE_TIFFMERGEFIELDINFO) && defined(MAGICKCORE_HAVE_TIFFSETTAGEXTENDER) if (tag_extender == (TIFFExtendProc) NULL) tag_extender=TIFFSetTagExtender(TIFFTagExtender); #endif instantiate_key=MagickTrue; } UnlockSemaphoreInfo(tiff_semaphore); #endif *version='\0'; #if defined(TIFF_VERSION) (void) FormatLocaleString(version,MagickPathExtent,"%d",TIFF_VERSION); #endif #if defined(MAGICKCORE_TIFF_DELEGATE) { const char *p; ssize_t i; p=TIFFGetVersion(); for (i=0; (i < (MagickPathExtent-1)) && (*p != 0) && (*p != '\n'); i++) version[i]=(*p++); version[i]='\0'; } #endif entry=AcquireMagickInfo("TIFF","GROUP4","Raw CCITT Group4"); #if defined(MAGICKCORE_TIFF_DELEGATE) entry->decoder=(DecodeImageHandler *) ReadGROUP4Image; entry->encoder=(EncodeImageHandler *) WriteGROUP4Image; #endif entry->flags|=CoderRawSupportFlag; entry->flags|=CoderEndianSupportFlag; entry->flags|=CoderDecoderSeekableStreamFlag; entry->flags|=CoderEncoderSeekableStreamFlag; entry->flags^=CoderAdjoinFlag; entry->flags^=CoderUseExtensionFlag; entry->format_type=ImplicitFormatType; entry->mime_type=ConstantString("image/tiff"); (void) RegisterMagickInfo(entry); entry=AcquireMagickInfo("TIFF","PTIF","Pyramid encoded TIFF"); #if defined(MAGICKCORE_TIFF_DELEGATE) entry->decoder=(DecodeImageHandler *) ReadTIFFImage; entry->encoder=(EncodeImageHandler *) WritePTIFImage; #endif entry->flags|=CoderEndianSupportFlag; entry->flags|=CoderDecoderSeekableStreamFlag; entry->flags|=CoderEncoderSeekableStreamFlag; entry->flags^=CoderUseExtensionFlag; entry->mime_type=ConstantString("image/tiff"); (void) RegisterMagickInfo(entry); entry=AcquireMagickInfo("TIFF","TIF",TIFFDescription); #if defined(MAGICKCORE_TIFF_DELEGATE) entry->decoder=(DecodeImageHandler *) ReadTIFFImage; entry->encoder=(EncodeImageHandler *) WriteTIFFImage; #endif entry->flags|=CoderEndianSupportFlag; entry->flags|=CoderDecoderSeekableStreamFlag; entry->flags|=CoderEncoderSeekableStreamFlag; entry->flags|=CoderStealthFlag; entry->flags^=CoderUseExtensionFlag; if (*version != '\0') entry->version=ConstantString(version); entry->mime_type=ConstantString("image/tiff"); (void) RegisterMagickInfo(entry); entry=AcquireMagickInfo("TIFF","TIFF",TIFFDescription); #if defined(MAGICKCORE_TIFF_DELEGATE) entry->decoder=(DecodeImageHandler *) ReadTIFFImage; entry->encoder=(EncodeImageHandler *) WriteTIFFImage; #endif entry->magick=(IsImageFormatHandler *) IsTIFF; entry->flags|=CoderEndianSupportFlag; entry->flags|=CoderDecoderSeekableStreamFlag; entry->flags|=CoderEncoderSeekableStreamFlag; entry->flags^=CoderUseExtensionFlag; if (*version != '\0') entry->version=ConstantString(version); entry->mime_type=ConstantString("image/tiff"); (void) RegisterMagickInfo(entry); entry=AcquireMagickInfo("TIFF","TIFF64","Tagged Image File Format (64-bit)"); #if defined(TIFF_VERSION_BIG) entry->decoder=(DecodeImageHandler *) ReadTIFFImage; entry->encoder=(EncodeImageHandler *) WriteTIFFImage; #endif entry->flags|=CoderEndianSupportFlag; entry->flags|=CoderDecoderSeekableStreamFlag; entry->flags|=CoderEncoderSeekableStreamFlag; entry->flags^=CoderUseExtensionFlag; if (*version != '\0') entry->version=ConstantString(version); entry->mime_type=ConstantString("image/tiff"); (void) RegisterMagickInfo(entry); return(MagickImageCoderSignature); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n r e g i s t e r T I F F I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnregisterTIFFImage() removes format registrations made by the TIFF module % from the list of supported formats. % % The format of the UnregisterTIFFImage method is: % % UnregisterTIFFImage(void) % */ ModuleExport void UnregisterTIFFImage(void) { (void) UnregisterMagickInfo("TIFF64"); (void) UnregisterMagickInfo("TIFF"); (void) UnregisterMagickInfo("TIF"); (void) UnregisterMagickInfo("PTIF"); #if defined(MAGICKCORE_TIFF_DELEGATE) if (tiff_semaphore == (SemaphoreInfo *) NULL) ActivateSemaphoreInfo(&tiff_semaphore); LockSemaphoreInfo(tiff_semaphore); if (instantiate_key != MagickFalse) { #if defined(MAGICKCORE_HAVE_TIFFMERGEFIELDINFO) && defined(MAGICKCORE_HAVE_TIFFSETTAGEXTENDER) if (tag_extender == (TIFFExtendProc) NULL) (void) TIFFSetTagExtender(tag_extender); #endif if (DeleteMagickThreadKey(tiff_exception) == MagickFalse) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) TIFFSetWarningHandler(warning_handler); (void) TIFFSetErrorHandler(error_handler); instantiate_key=MagickFalse; } UnlockSemaphoreInfo(tiff_semaphore); RelinquishSemaphoreInfo(&tiff_semaphore); #endif } #if defined(MAGICKCORE_TIFF_DELEGATE) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e G R O U P 4 I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WriteGROUP4Image() writes an image in the raw CCITT Group 4 image format. % % The format of the WriteGROUP4Image method is: % % MagickBooleanType WriteGROUP4Image(const ImageInfo *image_info, % Image *image,ExceptionInfo *) % % A description of each parameter follows: % % o image_info: the image info. % % o image: The image. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType WriteGROUP4Image(const ImageInfo *image_info, Image *image,ExceptionInfo *exception) { char filename[MagickPathExtent]; FILE *file; Image *huffman_image; ImageInfo *write_info; int unique_file; MagickBooleanType status; ssize_t i; ssize_t count; TIFF *tiff; toff_t *byte_count, strip_size; unsigned char *buffer; /* Write image as CCITT Group4 TIFF image to a temporary file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception); if (status == MagickFalse) return(status); huffman_image=CloneImage(image,0,0,MagickTrue,exception); if (huffman_image == (Image *) NULL) { (void) CloseBlob(image); return(MagickFalse); } huffman_image->endian=MSBEndian; file=(FILE *) NULL; unique_file=AcquireUniqueFileResource(filename); if (unique_file != -1) file=fdopen(unique_file,"wb"); if ((unique_file == -1) || (file == (FILE *) NULL)) { ThrowFileException(exception,FileOpenError,"UnableToCreateTemporaryFile", filename); return(MagickFalse); } (void) FormatLocaleString(huffman_image->filename,MagickPathExtent,"tiff:%s", filename); if (IsImageMonochrome(image) == MagickFalse) (void) SetImageType(huffman_image,BilevelType,exception); write_info=CloneImageInfo((ImageInfo *) NULL); SetImageInfoFile(write_info,file); if (IsImageMonochrome(image) == MagickFalse) (void) SetImageType(image,BilevelType,exception); (void) SetImageDepth(image,1,exception); write_info->compression=Group4Compression; write_info->type=BilevelType; status=WriteTIFFImage(write_info,huffman_image,exception); (void) fflush(file); write_info=DestroyImageInfo(write_info); if (status == MagickFalse) { huffman_image=DestroyImage(huffman_image); (void) fclose(file); (void) RelinquishUniqueFileResource(filename); return(MagickFalse); } tiff=TIFFOpen(filename,"rb"); if (tiff == (TIFF *) NULL) { huffman_image=DestroyImage(huffman_image); (void) fclose(file); (void) RelinquishUniqueFileResource(filename); ThrowFileException(exception,FileOpenError,"UnableToOpenFile", image_info->filename); return(MagickFalse); } /* Allocate raw strip buffer. */ if (TIFFGetField(tiff,TIFFTAG_STRIPBYTECOUNTS,&byte_count) != 1) { TIFFClose(tiff); huffman_image=DestroyImage(huffman_image); (void) fclose(file); (void) RelinquishUniqueFileResource(filename); return(MagickFalse); } strip_size=byte_count[0]; for (i=1; i < (ssize_t) TIFFNumberOfStrips(tiff); i++) if (byte_count[i] > strip_size) strip_size=byte_count[i]; buffer=(unsigned char *) AcquireQuantumMemory((size_t) strip_size, sizeof(*buffer)); if (buffer == (unsigned char *) NULL) { TIFFClose(tiff); huffman_image=DestroyImage(huffman_image); (void) fclose(file); (void) RelinquishUniqueFileResource(filename); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image_info->filename); } /* Compress runlength encoded to 2D Huffman pixels. */ for (i=0; i < (ssize_t) TIFFNumberOfStrips(tiff); i++) { count=(ssize_t) TIFFReadRawStrip(tiff,(uint32) i,buffer,strip_size); if (WriteBlob(image,(size_t) count,buffer) != count) status=MagickFalse; } buffer=(unsigned char *) RelinquishMagickMemory(buffer); TIFFClose(tiff); huffman_image=DestroyImage(huffman_image); (void) fclose(file); (void) RelinquishUniqueFileResource(filename); (void) CloseBlob(image); return(status); } #endif #if defined(MAGICKCORE_TIFF_DELEGATE) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e P T I F I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WritePTIFImage() writes an image in the pyrimid-encoded Tagged image file % format. % % The format of the WritePTIFImage method is: % % MagickBooleanType WritePTIFImage(const ImageInfo *image_info, % Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o image: The image. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType WritePTIFImage(const ImageInfo *image_info, Image *image,ExceptionInfo *exception) { Image *images, *next, *pyramid_image; ImageInfo *write_info; MagickBooleanType status; PointInfo resolution; size_t columns, rows; /* Create pyramid-encoded TIFF image. */ images=NewImageList(); for (next=image; next != (Image *) NULL; next=GetNextImageInList(next)) { Image *clone_image; clone_image=CloneImage(next,0,0,MagickFalse,exception); if (clone_image == (Image *) NULL) break; clone_image->previous=NewImageList(); clone_image->next=NewImageList(); (void) SetImageProperty(clone_image,"tiff:subfiletype","none",exception); AppendImageToList(&images,clone_image); columns=next->columns; rows=next->rows; resolution=next->resolution; while ((columns > 64) && (rows > 64)) { columns/=2; rows/=2; resolution.x/=2; resolution.y/=2; pyramid_image=ResizeImage(next,columns,rows,image->filter,exception); if (pyramid_image == (Image *) NULL) break; DestroyBlob(pyramid_image); pyramid_image->blob=ReferenceBlob(next->blob); pyramid_image->resolution=resolution; (void) SetImageProperty(pyramid_image,"tiff:subfiletype","REDUCEDIMAGE", exception); AppendImageToList(&images,pyramid_image); } } status=MagickFalse; if (images != (Image *) NULL) { /* Write pyramid-encoded TIFF image. */ images=GetFirstImageInList(images); write_info=CloneImageInfo(image_info); write_info->adjoin=MagickTrue; (void) CopyMagickString(write_info->magick,"TIFF",MagickPathExtent); (void) CopyMagickString(images->magick,"TIFF",MagickPathExtent); status=WriteTIFFImage(write_info,images,exception); images=DestroyImageList(images); write_info=DestroyImageInfo(write_info); } return(status); } #endif #if defined(MAGICKCORE_TIFF_DELEGATE) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e T I F F I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WriteTIFFImage() writes an image in the Tagged image file format. % % The format of the WriteTIFFImage method is: % % MagickBooleanType WriteTIFFImage(const ImageInfo *image_info, % Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o image: The image. % % o exception: return any errors or warnings in this structure. % */ typedef struct _TIFFInfo { RectangleInfo tile_geometry; unsigned char *scanline, *scanlines, *pixels; } TIFFInfo; static void DestroyTIFFInfo(TIFFInfo *tiff_info) { assert(tiff_info != (TIFFInfo *) NULL); if (tiff_info->scanlines != (unsigned char *) NULL) tiff_info->scanlines=(unsigned char *) RelinquishMagickMemory( tiff_info->scanlines); if (tiff_info->pixels != (unsigned char *) NULL) tiff_info->pixels=(unsigned char *) RelinquishMagickMemory( tiff_info->pixels); } static MagickBooleanType EncodeLabImage(Image *image,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; ssize_t y; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; break; } for (x=0; x < (ssize_t) image->columns; x++) { double a, b; a=QuantumScale*GetPixela(image,q)-0.5; if (a < 0.0) a+=1.0; b=QuantumScale*GetPixelb(image,q)-0.5; if (b < 0.0) b+=1.0; SetPixela(image,QuantumRange*a,q); SetPixelb(image,QuantumRange*b,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) { status=MagickFalse; break; } } image_view=DestroyCacheView(image_view); return(status); } static MagickBooleanType GetTIFFInfo(const ImageInfo *image_info, TIFF *tiff,TIFFInfo *tiff_info) { #define TIFFStripSizeDefault 1048576 const char *option; MagickStatusType flags; uint32 tile_columns, tile_rows; assert(tiff_info != (TIFFInfo *) NULL); (void) memset(tiff_info,0,sizeof(*tiff_info)); option=GetImageOption(image_info,"tiff:tile-geometry"); if (option == (const char *) NULL) { size_t extent; uint32 rows, rows_per_strip; extent=TIFFScanlineSize(tiff); rows_per_strip=TIFFStripSizeDefault/(extent == 0 ? 1 : (uint32) extent); rows_per_strip=16*(((rows_per_strip < 16 ? 16 : rows_per_strip)+1)/16); TIFFGetField(tiff,TIFFTAG_IMAGELENGTH,&rows); if (rows_per_strip > rows) rows_per_strip=rows; option=GetImageOption(image_info,"tiff:rows-per-strip"); if (option != (const char *) NULL) rows_per_strip=(uint32) strtoul(option,(char **) NULL,10); rows_per_strip=TIFFDefaultStripSize(tiff,rows_per_strip); (void) TIFFSetField(tiff,TIFFTAG_ROWSPERSTRIP,rows_per_strip); return(MagickTrue); } /* Create tiled TIFF, ignore "tiff:rows-per-strip". */ flags=ParseAbsoluteGeometry(option,&tiff_info->tile_geometry); if ((flags & HeightValue) == 0) tiff_info->tile_geometry.height=tiff_info->tile_geometry.width; tile_columns=(uint32) tiff_info->tile_geometry.width; tile_rows=(uint32) tiff_info->tile_geometry.height; TIFFDefaultTileSize(tiff,&tile_columns,&tile_rows); (void) TIFFSetField(tiff,TIFFTAG_TILEWIDTH,tile_columns); (void) TIFFSetField(tiff,TIFFTAG_TILELENGTH,tile_rows); tiff_info->tile_geometry.width=tile_columns; tiff_info->tile_geometry.height=tile_rows; if ((TIFFScanlineSize(tiff) <= 0) || (TIFFTileSize(tiff) <= 0)) { DestroyTIFFInfo(tiff_info); return(MagickFalse); } tiff_info->scanlines=(unsigned char *) AcquireQuantumMemory((size_t) tile_rows*TIFFScanlineSize(tiff),sizeof(*tiff_info->scanlines)); tiff_info->pixels=(unsigned char *) AcquireQuantumMemory((size_t) tile_rows*TIFFTileSize(tiff),sizeof(*tiff_info->scanlines)); if ((tiff_info->scanlines == (unsigned char *) NULL) || (tiff_info->pixels == (unsigned char *) NULL)) { DestroyTIFFInfo(tiff_info); return(MagickFalse); } return(MagickTrue); } static int32 TIFFWritePixels(TIFF *tiff,TIFFInfo *tiff_info,ssize_t row, tsample_t sample,Image *image) { int32 status; ssize_t i; unsigned char *p, *q; size_t number_tiles, tile_width; ssize_t bytes_per_pixel, j, k, l; if (TIFFIsTiled(tiff) == 0) return(TIFFWriteScanline(tiff,tiff_info->scanline,(uint32) row,sample)); /* Fill scanlines to tile height. */ i=(ssize_t) (row % tiff_info->tile_geometry.height)*TIFFScanlineSize(tiff); (void) memcpy(tiff_info->scanlines+i,(char *) tiff_info->scanline, (size_t) TIFFScanlineSize(tiff)); if (((size_t) (row % tiff_info->tile_geometry.height) != (tiff_info->tile_geometry.height-1)) && (row != (ssize_t) (image->rows-1))) return(0); /* Write tile to TIFF image. */ status=0; bytes_per_pixel=TIFFTileSize(tiff)/(ssize_t) ( tiff_info->tile_geometry.height*tiff_info->tile_geometry.width); number_tiles=(image->columns+tiff_info->tile_geometry.width)/ tiff_info->tile_geometry.width; for (i=0; i < (ssize_t) number_tiles; i++) { tile_width=(i == (ssize_t) (number_tiles-1)) ? image->columns-(i* tiff_info->tile_geometry.width) : tiff_info->tile_geometry.width; for (j=0; j < (ssize_t) ((row % tiff_info->tile_geometry.height)+1); j++) for (k=0; k < (ssize_t) tile_width; k++) { if (bytes_per_pixel == 0) { p=tiff_info->scanlines+(j*TIFFScanlineSize(tiff)+(i* tiff_info->tile_geometry.width+k)/8); q=tiff_info->pixels+(j*TIFFTileRowSize(tiff)+k/8); *q++=(*p++); continue; } p=tiff_info->scanlines+(j*TIFFScanlineSize(tiff)+(i* tiff_info->tile_geometry.width+k)*bytes_per_pixel); q=tiff_info->pixels+(j*TIFFTileRowSize(tiff)+k*bytes_per_pixel); for (l=0; l < bytes_per_pixel; l++) *q++=(*p++); } if ((i*tiff_info->tile_geometry.width) != image->columns) status=TIFFWriteTile(tiff,tiff_info->pixels,(uint32) (i* tiff_info->tile_geometry.width),(uint32) ((row/ tiff_info->tile_geometry.height)*tiff_info->tile_geometry.height),0, sample); if (status < 0) break; } return(status); } static ssize_t TIFFWriteCustomStream(unsigned char *data,const size_t count, void *user_data) { PhotoshopProfile *profile; if (count == 0) return(0); profile=(PhotoshopProfile *) user_data; if ((profile->offset+(MagickOffsetType) count) >= (MagickOffsetType) profile->extent) { profile->extent+=count+profile->quantum; profile->quantum<<=1; SetStringInfoLength(profile->data,profile->extent); } (void) memcpy(profile->data->datum+profile->offset,data,count); profile->offset+=count; return(count); } static CustomStreamInfo *TIFFAcquireCustomStreamForWriting( PhotoshopProfile *profile,ExceptionInfo *exception) { CustomStreamInfo *custom_stream; custom_stream=AcquireCustomStreamInfo(exception); if (custom_stream == (CustomStreamInfo *) NULL) return(custom_stream); SetCustomStreamData(custom_stream,(void *) profile); SetCustomStreamWriter(custom_stream,TIFFWriteCustomStream); SetCustomStreamSeeker(custom_stream,TIFFSeekCustomStream); SetCustomStreamTeller(custom_stream,TIFFTellCustomStream); return(custom_stream); } static MagickBooleanType TIFFWritePhotoshopLayers(Image* image, const ImageInfo *image_info,EndianType endian,ExceptionInfo *exception) { BlobInfo *blob; CustomStreamInfo *custom_stream; Image *base_image, *next; ImageInfo *clone_info; MagickBooleanType status; PhotoshopProfile profile; PSDInfo info; StringInfo *layers; base_image=CloneImage(image,0,0,MagickFalse,exception); if (base_image == (Image *) NULL) return(MagickTrue); clone_info=CloneImageInfo(image_info); if (clone_info == (ImageInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); profile.offset=0; profile.quantum=MagickMinBlobExtent; layers=AcquireStringInfo(profile.quantum); if (layers == (StringInfo *) NULL) { base_image=DestroyImage(base_image); clone_info=DestroyImageInfo(clone_info); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } profile.data=layers; profile.extent=layers->length; custom_stream=TIFFAcquireCustomStreamForWriting(&profile,exception); if (custom_stream == (CustomStreamInfo *) NULL) { base_image=DestroyImage(base_image); clone_info=DestroyImageInfo(clone_info); layers=DestroyStringInfo(layers); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } blob=CloneBlobInfo((BlobInfo *) NULL); if (blob == (BlobInfo *) NULL) { base_image=DestroyImage(base_image); clone_info=DestroyImageInfo(clone_info); layers=DestroyStringInfo(layers); custom_stream=DestroyCustomStreamInfo(custom_stream); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } DestroyBlob(base_image); base_image->blob=blob; next=base_image; while (next != (Image *) NULL) next=SyncNextImageInList(next); AttachCustomStream(base_image->blob,custom_stream); InitPSDInfo(image,&info); base_image->endian=endian; WriteBlobString(base_image,"Adobe Photoshop Document Data Block"); WriteBlobByte(base_image,0); WriteBlobString(base_image,base_image->endian == LSBEndian ? "MIB8ryaL" : "8BIMLayr"); status=WritePSDLayers(base_image,clone_info,&info,exception); if (status != MagickFalse) { SetStringInfoLength(layers,(size_t) profile.offset); status=SetImageProfile(image,"tiff:37724",layers,exception); } next=base_image; while (next != (Image *) NULL) { CloseBlob(next); next=next->next; } layers=DestroyStringInfo(layers); clone_info=DestroyImageInfo(clone_info); custom_stream=DestroyCustomStreamInfo(custom_stream); return(status); } static void TIFFSetProfiles(TIFF *tiff,Image *image) { const char *name; const StringInfo *profile; if (image->profiles == (void *) NULL) return; ResetImageProfileIterator(image); for (name=GetNextImageProfile(image); name != (const char *) NULL; ) { profile=GetImageProfile(image,name); if (GetStringInfoLength(profile) == 0) { name=GetNextImageProfile(image); continue; } #if defined(TIFFTAG_XMLPACKET) if (LocaleCompare(name,"xmp") == 0) (void) TIFFSetField(tiff,TIFFTAG_XMLPACKET,(uint32) GetStringInfoLength( profile),GetStringInfoDatum(profile)); #endif #if defined(TIFFTAG_ICCPROFILE) if (LocaleCompare(name,"icc") == 0) (void) TIFFSetField(tiff,TIFFTAG_ICCPROFILE,(uint32) GetStringInfoLength( profile),GetStringInfoDatum(profile)); #endif if (LocaleCompare(name,"iptc") == 0) { size_t length; StringInfo *iptc_profile; iptc_profile=CloneStringInfo(profile); length=GetStringInfoLength(profile)+4-(GetStringInfoLength(profile) & 0x03); SetStringInfoLength(iptc_profile,length); if (TIFFIsByteSwapped(tiff)) TIFFSwabArrayOfLong((uint32 *) GetStringInfoDatum(iptc_profile), (unsigned long) (length/4)); (void) TIFFSetField(tiff,TIFFTAG_RICHTIFFIPTC,(uint32) GetStringInfoLength(iptc_profile)/4,GetStringInfoDatum(iptc_profile)); iptc_profile=DestroyStringInfo(iptc_profile); } #if defined(TIFFTAG_PHOTOSHOP) if (LocaleCompare(name,"8bim") == 0) (void) TIFFSetField(tiff,TIFFTAG_PHOTOSHOP,(uint32) GetStringInfoLength(profile),GetStringInfoDatum(profile)); #endif if (LocaleCompare(name,"tiff:37724") == 0) (void) TIFFSetField(tiff,37724,(uint32) GetStringInfoLength(profile), GetStringInfoDatum(profile)); if (LocaleCompare(name,"tiff:34118") == 0) (void) TIFFSetField(tiff,34118,(uint32) GetStringInfoLength(profile), GetStringInfoDatum(profile)); name=GetNextImageProfile(image); } } static void TIFFSetProperties(TIFF *tiff,const MagickBooleanType adjoin, Image *image,ExceptionInfo *exception) { const char *value; value=GetImageArtifact(image,"tiff:document"); if (value != (const char *) NULL) (void) TIFFSetField(tiff,TIFFTAG_DOCUMENTNAME,value); value=GetImageArtifact(image,"tiff:hostcomputer"); if (value != (const char *) NULL) (void) TIFFSetField(tiff,TIFFTAG_HOSTCOMPUTER,value); value=GetImageArtifact(image,"tiff:artist"); if (value != (const char *) NULL) (void) TIFFSetField(tiff,TIFFTAG_ARTIST,value); value=GetImageArtifact(image,"tiff:timestamp"); if (value != (const char *) NULL) (void) TIFFSetField(tiff,TIFFTAG_DATETIME,value); value=GetImageArtifact(image,"tiff:make"); if (value != (const char *) NULL) (void) TIFFSetField(tiff,TIFFTAG_MAKE,value); value=GetImageArtifact(image,"tiff:model"); if (value != (const char *) NULL) (void) TIFFSetField(tiff,TIFFTAG_MODEL,value); value=GetImageArtifact(image,"tiff:software"); if (value != (const char *) NULL) (void) TIFFSetField(tiff,TIFFTAG_SOFTWARE,value); value=GetImageArtifact(image,"tiff:copyright"); if (value != (const char *) NULL) (void) TIFFSetField(tiff,TIFFTAG_COPYRIGHT,value); value=GetImageArtifact(image,"kodak-33423"); if (value != (const char *) NULL) (void) TIFFSetField(tiff,33423,value); value=GetImageArtifact(image,"kodak-36867"); if (value != (const char *) NULL) (void) TIFFSetField(tiff,36867,value); value=GetImageProperty(image,"label",exception); if (value != (const char *) NULL) (void) TIFFSetField(tiff,TIFFTAG_PAGENAME,value); value=GetImageProperty(image,"comment",exception); if (value != (const char *) NULL) (void) TIFFSetField(tiff,TIFFTAG_IMAGEDESCRIPTION,value); value=GetImageArtifact(image,"tiff:subfiletype"); if (value != (const char *) NULL) { if (LocaleCompare(value,"REDUCEDIMAGE") == 0) (void) TIFFSetField(tiff,TIFFTAG_SUBFILETYPE,FILETYPE_REDUCEDIMAGE); else if (LocaleCompare(value,"PAGE") == 0) (void) TIFFSetField(tiff,TIFFTAG_SUBFILETYPE,FILETYPE_PAGE); else if (LocaleCompare(value,"MASK") == 0) (void) TIFFSetField(tiff,TIFFTAG_SUBFILETYPE,FILETYPE_MASK); } else { uint16 page, pages; page=(uint16) image->scene; pages=(uint16) GetImageListLength(image); if ((adjoin != MagickFalse) && (pages > 1)) (void) TIFFSetField(tiff,TIFFTAG_SUBFILETYPE,FILETYPE_PAGE); (void) TIFFSetField(tiff,TIFFTAG_PAGENUMBER,page,pages); } } static MagickBooleanType WriteTIFFImage(const ImageInfo *image_info, Image *image,ExceptionInfo *exception) { const char *mode, *option; CompressionType compression; EndianType endian_type; MagickBooleanType adjoin, preserve_compression, status; MagickOffsetType scene; QuantumInfo *quantum_info; QuantumType quantum_type; ssize_t i; size_t imageListLength, length; ssize_t y; TIFF *tiff; TIFFInfo tiff_info; uint16 bits_per_sample, compress_tag, endian, photometric, predictor; unsigned char *pixels; void *sans[2] = { NULL, NULL }; /* Open TIFF file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception); if (status == MagickFalse) return(status); (void) SetMagickThreadValue(tiff_exception,exception); endian_type=(HOST_FILLORDER == FILLORDER_LSB2MSB) ? LSBEndian : MSBEndian; option=GetImageOption(image_info,"tiff:endian"); if (option != (const char *) NULL) { if (LocaleNCompare(option,"msb",3) == 0) endian_type=MSBEndian; if (LocaleNCompare(option,"lsb",3) == 0) endian_type=LSBEndian; } mode=endian_type == LSBEndian ? "wl" : "wb"; #if defined(TIFF_VERSION_BIG) if (LocaleCompare(image_info->magick,"TIFF64") == 0) mode=endian_type == LSBEndian ? "wl8" : "wb8"; #endif tiff=TIFFClientOpen(image->filename,mode,(thandle_t) image,TIFFReadBlob, TIFFWriteBlob,TIFFSeekBlob,TIFFCloseBlob,TIFFGetBlobSize,TIFFMapBlob, TIFFUnmapBlob); if (tiff == (TIFF *) NULL) return(MagickFalse); if (exception->severity > ErrorException) { TIFFClose(tiff); return(MagickFalse); } (void) DeleteImageProfile(image,"tiff:37724"); scene=0; adjoin=image_info->adjoin; imageListLength=GetImageListLength(image); option=GetImageOption(image_info,"tiff:preserve-compression"); preserve_compression=IsStringTrue(option); do { /* Initialize TIFF fields. */ if ((image_info->type != UndefinedType) && (image_info->type != OptimizeType) && (image_info->type != image->type)) (void) SetImageType(image,image_info->type,exception); compression=image_info->compression; if (preserve_compression != MagickFalse) compression=image->compression; switch (compression) { case FaxCompression: case Group4Compression: { if (IsImageMonochrome(image) == MagickFalse) { if (IsImageGray(image) == MagickFalse) (void) SetImageType(image,BilevelType,exception); else (void) SetImageDepth(image,1,exception); } image->depth=1; break; } case JPEGCompression: { (void) SetImageStorageClass(image,DirectClass,exception); (void) SetImageDepth(image,8,exception); break; } default: break; } quantum_info=AcquireQuantumInfo(image_info,image); if (quantum_info == (QuantumInfo *) NULL) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); if ((image->storage_class != PseudoClass) && (image->depth >= 32) && (quantum_info->format == UndefinedQuantumFormat) && (IsHighDynamicRangeImage(image,exception) != MagickFalse)) { status=SetQuantumFormat(image,quantum_info,FloatingPointQuantumFormat); if (status == MagickFalse) { quantum_info=DestroyQuantumInfo(quantum_info); ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); } } if ((LocaleCompare(image_info->magick,"PTIF") == 0) && (GetPreviousImageInList(image) != (Image *) NULL)) (void) TIFFSetField(tiff,TIFFTAG_SUBFILETYPE,FILETYPE_REDUCEDIMAGE); if ((image->columns != (uint32) image->columns) || (image->rows != (uint32) image->rows)) ThrowWriterException(ImageError,"WidthOrHeightExceedsLimit"); (void) TIFFSetField(tiff,TIFFTAG_IMAGELENGTH,(uint32) image->rows); (void) TIFFSetField(tiff,TIFFTAG_IMAGEWIDTH,(uint32) image->columns); switch (compression) { case FaxCompression: { compress_tag=COMPRESSION_CCITTFAX3; option=GetImageOption(image_info,"quantum:polarity"); if (option == (const char *) NULL) SetQuantumMinIsWhite(quantum_info,MagickTrue); break; } case Group4Compression: { compress_tag=COMPRESSION_CCITTFAX4; option=GetImageOption(image_info,"quantum:polarity"); if (option == (const char *) NULL) SetQuantumMinIsWhite(quantum_info,MagickTrue); break; } #if defined(COMPRESSION_JBIG) case JBIG1Compression: { compress_tag=COMPRESSION_JBIG; break; } #endif case JPEGCompression: { compress_tag=COMPRESSION_JPEG; break; } #if defined(COMPRESSION_LZMA) case LZMACompression: { compress_tag=COMPRESSION_LZMA; break; } #endif case LZWCompression: { compress_tag=COMPRESSION_LZW; break; } case RLECompression: { compress_tag=COMPRESSION_PACKBITS; break; } case ZipCompression: { compress_tag=COMPRESSION_ADOBE_DEFLATE; break; } #if defined(COMPRESSION_ZSTD) case ZstdCompression: { compress_tag=COMPRESSION_ZSTD; break; } #endif case NoCompression: default: { compress_tag=COMPRESSION_NONE; break; } } #if defined(MAGICKCORE_HAVE_TIFFISCODECCONFIGURED) || (TIFFLIB_VERSION > 20040919) if ((compress_tag != COMPRESSION_NONE) && (TIFFIsCODECConfigured(compress_tag) == 0)) { (void) ThrowMagickException(exception,GetMagickModule(),CoderError, "CompressionNotSupported","`%s'",CommandOptionToMnemonic( MagickCompressOptions,(ssize_t) compression)); compress_tag=COMPRESSION_NONE; compression=NoCompression; } #else switch (compress_tag) { #if defined(CCITT_SUPPORT) case COMPRESSION_CCITTFAX3: case COMPRESSION_CCITTFAX4: #endif #if defined(YCBCR_SUPPORT) && defined(JPEG_SUPPORT) case COMPRESSION_JPEG: #endif #if defined(LZMA_SUPPORT) && defined(COMPRESSION_LZMA) case COMPRESSION_LZMA: #endif #if defined(LZW_SUPPORT) case COMPRESSION_LZW: #endif #if defined(PACKBITS_SUPPORT) case COMPRESSION_PACKBITS: #endif #if defined(ZIP_SUPPORT) case COMPRESSION_ADOBE_DEFLATE: #endif case COMPRESSION_NONE: break; default: { (void) ThrowMagickException(exception,GetMagickModule(),CoderError, "CompressionNotSupported","`%s'",CommandOptionToMnemonic( MagickCompressOptions,(ssize_t) compression)); compress_tag=COMPRESSION_NONE; compression=NoCompression; break; } } #endif if (image->colorspace == CMYKColorspace) { photometric=PHOTOMETRIC_SEPARATED; (void) TIFFSetField(tiff,TIFFTAG_SAMPLESPERPIXEL,4); (void) TIFFSetField(tiff,TIFFTAG_INKSET,INKSET_CMYK); } else { /* Full color TIFF raster. */ if (image->colorspace == LabColorspace) { photometric=PHOTOMETRIC_CIELAB; EncodeLabImage(image,exception); } else if (IsYCbCrCompatibleColorspace(image->colorspace) != MagickFalse) { photometric=PHOTOMETRIC_YCBCR; (void) TIFFSetField(tiff,TIFFTAG_YCBCRSUBSAMPLING,1,1); (void) SetImageStorageClass(image,DirectClass,exception); status=SetQuantumDepth(image,quantum_info,8); if (status == MagickFalse) ThrowWriterException(ResourceLimitError, "MemoryAllocationFailed"); } else photometric=PHOTOMETRIC_RGB; (void) TIFFSetField(tiff,TIFFTAG_SAMPLESPERPIXEL,3); if ((image_info->type != TrueColorType) && (image_info->type != TrueColorAlphaType)) { if ((image_info->type != PaletteType) && (SetImageGray(image,exception) != MagickFalse)) { photometric=(uint16) (quantum_info->min_is_white != MagickFalse ? PHOTOMETRIC_MINISWHITE : PHOTOMETRIC_MINISBLACK); (void) TIFFSetField(tiff,TIFFTAG_SAMPLESPERPIXEL,1); if ((image->depth == 1) && (image->alpha_trait == UndefinedPixelTrait)) SetImageMonochrome(image,exception); } else if ((image->storage_class == PseudoClass) && (image->alpha_trait == UndefinedPixelTrait)) { size_t depth; /* Colormapped TIFF raster. */ (void) TIFFSetField(tiff,TIFFTAG_SAMPLESPERPIXEL,1); photometric=PHOTOMETRIC_PALETTE; depth=1; while ((GetQuantumRange(depth)+1) < image->colors) depth<<=1; status=SetQuantumDepth(image,quantum_info,depth); if (status == MagickFalse) ThrowWriterException(ResourceLimitError, "MemoryAllocationFailed"); } } } (void) TIFFGetFieldDefaulted(tiff,TIFFTAG_FILLORDER,&endian,sans); if ((compress_tag == COMPRESSION_CCITTFAX3) || (compress_tag == COMPRESSION_CCITTFAX4)) { if ((photometric != PHOTOMETRIC_MINISWHITE) && (photometric != PHOTOMETRIC_MINISBLACK)) { compress_tag=COMPRESSION_NONE; endian=FILLORDER_MSB2LSB; } } option=GetImageOption(image_info,"tiff:fill-order"); if (option != (const char *) NULL) { if (LocaleNCompare(option,"msb",3) == 0) endian=FILLORDER_MSB2LSB; if (LocaleNCompare(option,"lsb",3) == 0) endian=FILLORDER_LSB2MSB; } (void) TIFFSetField(tiff,TIFFTAG_COMPRESSION,compress_tag); (void) TIFFSetField(tiff,TIFFTAG_FILLORDER,endian); (void) TIFFSetField(tiff,TIFFTAG_BITSPERSAMPLE,quantum_info->depth); if (image->alpha_trait != UndefinedPixelTrait) { uint16 extra_samples, sample_info[1], samples_per_pixel; /* TIFF has a matte channel. */ extra_samples=1; sample_info[0]=EXTRASAMPLE_UNASSALPHA; option=GetImageOption(image_info,"tiff:alpha"); if (option != (const char *) NULL) { if (LocaleCompare(option,"associated") == 0) sample_info[0]=EXTRASAMPLE_ASSOCALPHA; else if (LocaleCompare(option,"unspecified") == 0) sample_info[0]=EXTRASAMPLE_UNSPECIFIED; } (void) TIFFGetFieldDefaulted(tiff,TIFFTAG_SAMPLESPERPIXEL, &samples_per_pixel,sans); (void) TIFFSetField(tiff,TIFFTAG_SAMPLESPERPIXEL,samples_per_pixel+1); (void) TIFFSetField(tiff,TIFFTAG_EXTRASAMPLES,extra_samples, &sample_info); if (sample_info[0] == EXTRASAMPLE_ASSOCALPHA) SetQuantumAlphaType(quantum_info,AssociatedQuantumAlpha); } (void) TIFFSetField(tiff,TIFFTAG_PHOTOMETRIC,photometric); switch (quantum_info->format) { case FloatingPointQuantumFormat: { (void) TIFFSetField(tiff,TIFFTAG_SAMPLEFORMAT,SAMPLEFORMAT_IEEEFP); (void) TIFFSetField(tiff,TIFFTAG_SMINSAMPLEVALUE,quantum_info->minimum); (void) TIFFSetField(tiff,TIFFTAG_SMAXSAMPLEVALUE,quantum_info->maximum); break; } case SignedQuantumFormat: { (void) TIFFSetField(tiff,TIFFTAG_SAMPLEFORMAT,SAMPLEFORMAT_INT); break; } case UnsignedQuantumFormat: { (void) TIFFSetField(tiff,TIFFTAG_SAMPLEFORMAT,SAMPLEFORMAT_UINT); break; } default: break; } (void) TIFFSetField(tiff,TIFFTAG_PLANARCONFIG,PLANARCONFIG_CONTIG); if (photometric == PHOTOMETRIC_RGB) if ((image_info->interlace == PlaneInterlace) || (image_info->interlace == PartitionInterlace)) (void) TIFFSetField(tiff,TIFFTAG_PLANARCONFIG,PLANARCONFIG_SEPARATE); predictor=0; switch (compress_tag) { case COMPRESSION_JPEG: { #if defined(JPEG_SUPPORT) if (image_info->quality != UndefinedCompressionQuality) (void) TIFFSetField(tiff,TIFFTAG_JPEGQUALITY,image_info->quality); (void) TIFFSetField(tiff,TIFFTAG_JPEGCOLORMODE,JPEGCOLORMODE_RAW); if (IssRGBCompatibleColorspace(image->colorspace) != MagickFalse) { const char *value; (void) TIFFSetField(tiff,TIFFTAG_JPEGCOLORMODE,JPEGCOLORMODE_RGB); if (IsYCbCrCompatibleColorspace(image->colorspace) != MagickFalse) { const char *sampling_factor; GeometryInfo geometry_info; MagickStatusType flags; sampling_factor=(const char *) NULL; value=GetImageProperty(image,"jpeg:sampling-factor",exception); if (value != (char *) NULL) { sampling_factor=value; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Input sampling-factors=%s",sampling_factor); } if (image_info->sampling_factor != (char *) NULL) sampling_factor=image_info->sampling_factor; if (sampling_factor != (const char *) NULL) { flags=ParseGeometry(sampling_factor,&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=geometry_info.rho; (void) TIFFSetField(tiff,TIFFTAG_YCBCRSUBSAMPLING,(uint16) geometry_info.rho,(uint16) geometry_info.sigma); } } } (void) TIFFGetFieldDefaulted(tiff,TIFFTAG_BITSPERSAMPLE, &bits_per_sample,sans); if (bits_per_sample == 12) (void) TIFFSetField(tiff,TIFFTAG_JPEGTABLESMODE,JPEGTABLESMODE_QUANT); #endif break; } case COMPRESSION_ADOBE_DEFLATE: { (void) TIFFGetFieldDefaulted(tiff,TIFFTAG_BITSPERSAMPLE, &bits_per_sample,sans); if (((photometric == PHOTOMETRIC_RGB) || (photometric == PHOTOMETRIC_SEPARATED) || (photometric == PHOTOMETRIC_MINISBLACK)) && ((bits_per_sample == 8) || (bits_per_sample == 16))) predictor=PREDICTOR_HORIZONTAL; (void) TIFFSetField(tiff,TIFFTAG_ZIPQUALITY,(long) ( image_info->quality == UndefinedCompressionQuality ? 7 : MagickMin((ssize_t) image_info->quality/10,9))); break; } case COMPRESSION_CCITTFAX3: { /* Byte-aligned EOL. */ (void) TIFFSetField(tiff,TIFFTAG_GROUP3OPTIONS,4); break; } case COMPRESSION_CCITTFAX4: break; #if defined(LZMA_SUPPORT) && defined(COMPRESSION_LZMA) case COMPRESSION_LZMA: { if (((photometric == PHOTOMETRIC_RGB) || (photometric == PHOTOMETRIC_SEPARATED) || (photometric == PHOTOMETRIC_MINISBLACK)) && ((bits_per_sample == 8) || (bits_per_sample == 16))) predictor=PREDICTOR_HORIZONTAL; (void) TIFFSetField(tiff,TIFFTAG_LZMAPRESET,(long) ( image_info->quality == UndefinedCompressionQuality ? 7 : MagickMin((ssize_t) image_info->quality/10,9))); break; } #endif case COMPRESSION_LZW: { (void) TIFFGetFieldDefaulted(tiff,TIFFTAG_BITSPERSAMPLE, &bits_per_sample,sans); if (((photometric == PHOTOMETRIC_RGB) || (photometric == PHOTOMETRIC_SEPARATED) || (photometric == PHOTOMETRIC_MINISBLACK)) && ((bits_per_sample == 8) || (bits_per_sample == 16))) predictor=PREDICTOR_HORIZONTAL; break; } #if defined(WEBP_SUPPORT) && defined(COMPRESSION_WEBP) case COMPRESSION_WEBP: { (void) TIFFGetFieldDefaulted(tiff,TIFFTAG_BITSPERSAMPLE, &bits_per_sample,sans); if (((photometric == PHOTOMETRIC_RGB) || (photometric == PHOTOMETRIC_SEPARATED) || (photometric == PHOTOMETRIC_MINISBLACK)) && ((bits_per_sample == 8) || (bits_per_sample == 16))) predictor=PREDICTOR_HORIZONTAL; (void) TIFFSetField(tiff,TIFFTAG_WEBP_LEVEL,image_info->quality); if (image_info->quality >= 100) (void) TIFFSetField(tiff,TIFFTAG_WEBP_LOSSLESS,1); break; } #endif #if defined(ZSTD_SUPPORT) && defined(COMPRESSION_ZSTD) case COMPRESSION_ZSTD: { (void) TIFFGetFieldDefaulted(tiff,TIFFTAG_BITSPERSAMPLE, &bits_per_sample,sans); if (((photometric == PHOTOMETRIC_RGB) || (photometric == PHOTOMETRIC_SEPARATED) || (photometric == PHOTOMETRIC_MINISBLACK)) && ((bits_per_sample == 8) || (bits_per_sample == 16))) predictor=PREDICTOR_HORIZONTAL; (void) TIFFSetField(tiff,TIFFTAG_ZSTD_LEVEL,22*image_info->quality/ 100.0); break; } #endif default: break; } if (quantum_info->format == FloatingPointQuantumFormat) predictor=PREDICTOR_FLOATINGPOINT; option=GetImageOption(image_info,"tiff:predictor"); if (option != (const char * ) NULL) predictor=(uint16) strtol(option,(char **) NULL,10); if (predictor != 0) (void) TIFFSetField(tiff,TIFFTAG_PREDICTOR,predictor); if ((image->resolution.x != 0.0) && (image->resolution.y != 0.0)) { unsigned short units; /* Set image resolution. */ units=RESUNIT_NONE; if (image->units == PixelsPerInchResolution) units=RESUNIT_INCH; if (image->units == PixelsPerCentimeterResolution) units=RESUNIT_CENTIMETER; (void) TIFFSetField(tiff,TIFFTAG_RESOLUTIONUNIT,(uint16) units); (void) TIFFSetField(tiff,TIFFTAG_XRESOLUTION,image->resolution.x); (void) TIFFSetField(tiff,TIFFTAG_YRESOLUTION,image->resolution.y); if ((image->page.x < 0) || (image->page.y < 0)) (void) ThrowMagickException(exception,GetMagickModule(),CoderError, "TIFF: negative image positions unsupported","%s",image->filename); if ((image->page.x > 0) && (image->resolution.x > 0.0)) { /* Set horizontal image position. */ (void) TIFFSetField(tiff,TIFFTAG_XPOSITION,(float) image->page.x/ image->resolution.x); } if ((image->page.y > 0) && (image->resolution.y > 0.0)) { /* Set vertical image position. */ (void) TIFFSetField(tiff,TIFFTAG_YPOSITION,(float) image->page.y/ image->resolution.y); } } if (image->chromaticity.white_point.x != 0.0) { float chromaticity[6]; /* Set image chromaticity. */ chromaticity[0]=(float) image->chromaticity.red_primary.x; chromaticity[1]=(float) image->chromaticity.red_primary.y; chromaticity[2]=(float) image->chromaticity.green_primary.x; chromaticity[3]=(float) image->chromaticity.green_primary.y; chromaticity[4]=(float) image->chromaticity.blue_primary.x; chromaticity[5]=(float) image->chromaticity.blue_primary.y; (void) TIFFSetField(tiff,TIFFTAG_PRIMARYCHROMATICITIES,chromaticity); chromaticity[0]=(float) image->chromaticity.white_point.x; chromaticity[1]=(float) image->chromaticity.white_point.y; (void) TIFFSetField(tiff,TIFFTAG_WHITEPOINT,chromaticity); } option=GetImageOption(image_info,"tiff:write-layers"); if (IsStringTrue(option) != MagickFalse) { (void) TIFFWritePhotoshopLayers(image,image_info,endian_type,exception); adjoin=MagickFalse; } if ((LocaleCompare(image_info->magick,"PTIF") != 0) && (adjoin != MagickFalse) && (imageListLength > 1)) { (void) TIFFSetField(tiff,TIFFTAG_SUBFILETYPE,FILETYPE_PAGE); if (image->scene != 0) (void) TIFFSetField(tiff,TIFFTAG_PAGENUMBER,(uint16) image->scene, imageListLength); } if (image->orientation != UndefinedOrientation) (void) TIFFSetField(tiff,TIFFTAG_ORIENTATION,(uint16) image->orientation); else (void) TIFFSetField(tiff,TIFFTAG_ORIENTATION,ORIENTATION_TOPLEFT); TIFFSetProfiles(tiff,image); { uint16 page, pages; page=(uint16) scene; pages=(uint16) imageListLength; if ((LocaleCompare(image_info->magick,"PTIF") != 0) && (adjoin != MagickFalse) && (pages > 1)) (void) TIFFSetField(tiff,TIFFTAG_SUBFILETYPE,FILETYPE_PAGE); (void) TIFFSetField(tiff,TIFFTAG_PAGENUMBER,page,pages); } (void) TIFFSetProperties(tiff,adjoin,image,exception); /* Write image scanlines. */ if (GetTIFFInfo(image_info,tiff,&tiff_info) == MagickFalse) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); if (compress_tag == COMPRESSION_CCITTFAX4) (void) TIFFSetField(tiff,TIFFTAG_ROWSPERSTRIP,(uint32) image->rows); quantum_info->endian=LSBEndian; pixels=(unsigned char *) GetQuantumPixels(quantum_info); tiff_info.scanline=(unsigned char *) GetQuantumPixels(quantum_info); switch (photometric) { case PHOTOMETRIC_CIELAB: case PHOTOMETRIC_YCBCR: case PHOTOMETRIC_RGB: { /* RGB TIFF image. */ switch (image_info->interlace) { case NoInterlace: default: { quantum_type=RGBQuantum; if (image->alpha_trait != UndefinedPixelTrait) quantum_type=RGBAQuantum; for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; length=ExportQuantumPixels(image,(CacheView *) NULL,quantum_info, quantum_type,pixels,exception); (void) length; if (TIFFWritePixels(tiff,&tiff_info,y,0,image) == -1) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,image->rows); if (status == MagickFalse) break; } } break; } case PlaneInterlace: case PartitionInterlace: { /* Plane interlacing: RRRRRR...GGGGGG...BBBBBB... */ for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; length=ExportQuantumPixels(image,(CacheView *) NULL,quantum_info, RedQuantum,pixels,exception); if (TIFFWritePixels(tiff,&tiff_info,y,0,image) == -1) break; } if (image->previous == (Image *) NULL) { status=SetImageProgress(image,SaveImageTag,100,400); if (status == MagickFalse) break; } for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; length=ExportQuantumPixels(image,(CacheView *) NULL,quantum_info, GreenQuantum,pixels,exception); if (TIFFWritePixels(tiff,&tiff_info,y,1,image) == -1) break; } if (image->previous == (Image *) NULL) { status=SetImageProgress(image,SaveImageTag,200,400); if (status == MagickFalse) break; } for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; length=ExportQuantumPixels(image,(CacheView *) NULL,quantum_info, BlueQuantum,pixels,exception); if (TIFFWritePixels(tiff,&tiff_info,y,2,image) == -1) break; } if (image->previous == (Image *) NULL) { status=SetImageProgress(image,SaveImageTag,300,400); if (status == MagickFalse) break; } if (image->alpha_trait != UndefinedPixelTrait) for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; length=ExportQuantumPixels(image,(CacheView *) NULL, quantum_info,AlphaQuantum,pixels,exception); if (TIFFWritePixels(tiff,&tiff_info,y,3,image) == -1) break; } if (image->previous == (Image *) NULL) { status=SetImageProgress(image,SaveImageTag,400,400); if (status == MagickFalse) break; } break; } } break; } case PHOTOMETRIC_SEPARATED: { /* CMYK TIFF image. */ quantum_type=CMYKQuantum; if (image->alpha_trait != UndefinedPixelTrait) quantum_type=CMYKAQuantum; if (image->colorspace != CMYKColorspace) (void) TransformImageColorspace(image,CMYKColorspace,exception); for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; length=ExportQuantumPixels(image,(CacheView *) NULL,quantum_info, quantum_type,pixels,exception); if (TIFFWritePixels(tiff,&tiff_info,y,0,image) == -1) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } break; } case PHOTOMETRIC_PALETTE: { uint16 *blue, *green, *red; /* Colormapped TIFF image. */ red=(uint16 *) AcquireQuantumMemory(65536,sizeof(*red)); green=(uint16 *) AcquireQuantumMemory(65536,sizeof(*green)); blue=(uint16 *) AcquireQuantumMemory(65536,sizeof(*blue)); if ((red == (uint16 *) NULL) || (green == (uint16 *) NULL) || (blue == (uint16 *) NULL)) { if (red != (uint16 *) NULL) red=(uint16 *) RelinquishMagickMemory(red); if (green != (uint16 *) NULL) green=(uint16 *) RelinquishMagickMemory(green); if (blue != (uint16 *) NULL) blue=(uint16 *) RelinquishMagickMemory(blue); ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); } /* Initialize TIFF colormap. */ (void) memset(red,0,65536*sizeof(*red)); (void) memset(green,0,65536*sizeof(*green)); (void) memset(blue,0,65536*sizeof(*blue)); for (i=0; i < (ssize_t) image->colors; i++) { red[i]=ScaleQuantumToShort(image->colormap[i].red); green[i]=ScaleQuantumToShort(image->colormap[i].green); blue[i]=ScaleQuantumToShort(image->colormap[i].blue); } (void) TIFFSetField(tiff,TIFFTAG_COLORMAP,red,green,blue); red=(uint16 *) RelinquishMagickMemory(red); green=(uint16 *) RelinquishMagickMemory(green); blue=(uint16 *) RelinquishMagickMemory(blue); } default: { /* Convert PseudoClass packets to contiguous grayscale scanlines. */ quantum_type=IndexQuantum; if (image->alpha_trait != UndefinedPixelTrait) { if (photometric != PHOTOMETRIC_PALETTE) quantum_type=GrayAlphaQuantum; else quantum_type=IndexAlphaQuantum; } else if (photometric != PHOTOMETRIC_PALETTE) quantum_type=GrayQuantum; for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; length=ExportQuantumPixels(image,(CacheView *) NULL,quantum_info, quantum_type,pixels,exception); if (TIFFWritePixels(tiff,&tiff_info,y,0,image) == -1) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } break; } } quantum_info=DestroyQuantumInfo(quantum_info); if (image->colorspace == LabColorspace) DecodeLabImage(image,exception); DestroyTIFFInfo(&tiff_info); /* TIFFPrintDirectory(tiff,stdout,MagickFalse); */ if (TIFFWriteDirectory(tiff) == 0) { status=MagickFalse; break; } image=SyncNextImageInList(image); if (image == (Image *) NULL) break; status=SetImageProgress(image,SaveImagesTag,scene++,imageListLength); if (status == MagickFalse) break; } while (adjoin != MagickFalse); TIFFClose(tiff); return(status); } #endif
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % TTTTT IIIII FFFFF FFFFF % % T I F F % % T I FFF FFF % % T I F F % % T IIIII F F % % % % % % Read/Write TIFF Image Format % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #ifdef __VMS #define JPEG_SUPPORT 1 #endif #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/channel.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colormap.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/constitute.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/geometry.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/module.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/property.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/profile.h" #include "MagickCore/resize.h" #include "MagickCore/resource_.h" #include "MagickCore/semaphore.h" #include "MagickCore/splay-tree.h" #include "MagickCore/static.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread_.h" #include "MagickCore/token.h" #include "MagickCore/utility.h" #include "psd-private.h" #if defined(MAGICKCORE_TIFF_DELEGATE) # if defined(MAGICKCORE_HAVE_TIFFCONF_H) # include <tiffconf.h> # endif # include <tiff.h> # include <tiffio.h> # if !defined(COMPRESSION_ADOBE_DEFLATE) # define COMPRESSION_ADOBE_DEFLATE 8 # endif # if !defined(PREDICTOR_HORIZONTAL) # define PREDICTOR_HORIZONTAL 2 # endif # if !defined(TIFFTAG_COPYRIGHT) # define TIFFTAG_COPYRIGHT 33432 # endif # if !defined(TIFFTAG_OPIIMAGEID) # define TIFFTAG_OPIIMAGEID 32781 # endif # if defined(COMPRESSION_ZSTD) && defined(MAGICKCORE_ZSTD_DELEGATE) # include <zstd.h> # endif #if defined(MAGICKCORE_HAVE_STDINT_H) && (TIFFLIB_VERSION >= 20201219) # undef uint16 # define uint16 uint16_t # undef uint32 # define uint32 uint32_t #endif /* Typedef declarations. */ typedef enum { ReadYCCKMethod, ReadStripMethod, ReadTileMethod, ReadGenericMethod } TIFFMethodType; typedef struct _PhotoshopProfile { StringInfo *data; MagickOffsetType offset; size_t length, extent, quantum; } PhotoshopProfile; /* Global declarations. */ static MagickThreadKey tiff_exception; static SemaphoreInfo *tiff_semaphore = (SemaphoreInfo *) NULL; static TIFFErrorHandler error_handler, warning_handler; static volatile MagickBooleanType instantiate_key = MagickFalse; /* Forward declarations. */ static Image * ReadTIFFImage(const ImageInfo *,ExceptionInfo *); static MagickBooleanType WriteGROUP4Image(const ImageInfo *,Image *,ExceptionInfo *), WritePTIFImage(const ImageInfo *,Image *,ExceptionInfo *), WriteTIFFImage(const ImageInfo *,Image *,ExceptionInfo *); static MagickOffsetType TIFFSeekCustomStream(const MagickOffsetType offset, const int whence,void *user_data) { PhotoshopProfile *profile; profile=(PhotoshopProfile *) user_data; switch (whence) { case SEEK_SET: default: { if (offset < 0) return(-1); profile->offset=offset; break; } case SEEK_CUR: { if (((offset > 0) && (profile->offset > (MAGICK_SSIZE_MAX-offset))) || ((offset < 0) && (profile->offset < (MAGICK_SSIZE_MIN-offset)))) { errno=EOVERFLOW; return(-1); } if ((profile->offset+offset) < 0) return(-1); profile->offset+=offset; break; } case SEEK_END: { if (((MagickOffsetType) profile->length+offset) < 0) return(-1); profile->offset=profile->length+offset; break; } } return(profile->offset); } static MagickOffsetType TIFFTellCustomStream(void *user_data) { PhotoshopProfile *profile; profile=(PhotoshopProfile *) user_data; return(profile->offset); } static void InitPSDInfo(const Image *image,PSDInfo *info) { (void) memset(info,0,sizeof(*info)); info->version=1; info->columns=image->columns; info->rows=image->rows; info->mode=10; /* Set the mode to a value that won't change the colorspace */ info->channels=1U; info->min_channels=1U; info->has_merged_image=MagickFalse; if (image->storage_class == PseudoClass) info->mode=2; /* indexed mode */ else { info->channels=(unsigned short) image->number_channels; info->min_channels=info->channels; if (image->alpha_trait == BlendPixelTrait) info->min_channels--; } } #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s T I F F % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsTIFF() returns MagickTrue if the image format type, identified by the % magick string, is TIFF. % % The format of the IsTIFF method is: % % MagickBooleanType IsTIFF(const unsigned char *magick,const size_t length) % % A description of each parameter follows: % % o magick: compare image format pattern against these bytes. % % o length: Specifies the length of the magick string. % */ static MagickBooleanType IsTIFF(const unsigned char *magick,const size_t length) { if (length < 4) return(MagickFalse); if (memcmp(magick,"\115\115\000\052",4) == 0) return(MagickTrue); if (memcmp(magick,"\111\111\052\000",4) == 0) return(MagickTrue); #if defined(TIFF_VERSION_BIG) if (length < 8) return(MagickFalse); if (memcmp(magick,"\115\115\000\053\000\010\000\000",8) == 0) return(MagickTrue); if (memcmp(magick,"\111\111\053\000\010\000\000\000",8) == 0) return(MagickTrue); #endif return(MagickFalse); } #if defined(MAGICKCORE_TIFF_DELEGATE) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d G R O U P 4 I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadGROUP4Image() reads a raw CCITT Group 4 image file and returns it. It % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % The format of the ReadGROUP4Image method is: % % Image *ReadGROUP4Image(const ImageInfo *image_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o exception: return any errors or warnings in this structure. % */ static inline size_t WriteLSBLong(FILE *file,const unsigned int value) { unsigned char buffer[4]; buffer[0]=(unsigned char) value; buffer[1]=(unsigned char) (value >> 8); buffer[2]=(unsigned char) (value >> 16); buffer[3]=(unsigned char) (value >> 24); return(fwrite(buffer,1,4,file)); } static Image *ReadGROUP4Image(const ImageInfo *image_info, ExceptionInfo *exception) { char filename[MagickPathExtent]; FILE *file; Image *image; ImageInfo *read_info; int c, unique_file; MagickBooleanType status; size_t length; ssize_t offset, strip_offset; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=AcquireImage(image_info,exception); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Write raw CCITT Group 4 wrapped as a TIFF image file. */ file=(FILE *) NULL; unique_file=AcquireUniqueFileResource(filename); if (unique_file != -1) file=fdopen(unique_file,"wb"); if ((unique_file == -1) || (file == (FILE *) NULL)) ThrowImageException(FileOpenError,"UnableToCreateTemporaryFile"); length=fwrite("\111\111\052\000\010\000\000\000\016\000",1,10,file); if (length != 10) ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile"); length=fwrite("\376\000\003\000\001\000\000\000\000\000\000\000",1,12,file); length=fwrite("\000\001\004\000\001\000\000\000",1,8,file); length=WriteLSBLong(file,(unsigned int) image->columns); length=fwrite("\001\001\004\000\001\000\000\000",1,8,file); length=WriteLSBLong(file,(unsigned int) image->rows); length=fwrite("\002\001\003\000\001\000\000\000\001\000\000\000",1,12,file); length=fwrite("\003\001\003\000\001\000\000\000\004\000\000\000",1,12,file); length=fwrite("\006\001\003\000\001\000\000\000\000\000\000\000",1,12,file); length=fwrite("\021\001\003\000\001\000\000\000",1,8,file); strip_offset=10+(12*14)+4+8; length=WriteLSBLong(file,(unsigned int) strip_offset); length=fwrite("\022\001\003\000\001\000\000\000",1,8,file); length=WriteLSBLong(file,(unsigned int) image_info->orientation); length=fwrite("\025\001\003\000\001\000\000\000\001\000\000\000",1,12,file); length=fwrite("\026\001\004\000\001\000\000\000",1,8,file); length=WriteLSBLong(file,(unsigned int) image->rows); length=fwrite("\027\001\004\000\001\000\000\000\000\000\000\000",1,12,file); offset=(ssize_t) ftell(file)-4; length=fwrite("\032\001\005\000\001\000\000\000",1,8,file); length=WriteLSBLong(file,(unsigned int) (strip_offset-8)); length=fwrite("\033\001\005\000\001\000\000\000",1,8,file); length=WriteLSBLong(file,(unsigned int) (strip_offset-8)); length=fwrite("\050\001\003\000\001\000\000\000\002\000\000\000",1,12,file); length=fwrite("\000\000\000\000",1,4,file); length=WriteLSBLong(file,(unsigned int) image->resolution.x); length=WriteLSBLong(file,1); status=MagickTrue; for (length=0; (c=ReadBlobByte(image)) != EOF; length++) if (fputc(c,file) != c) status=MagickFalse; offset=(ssize_t) fseek(file,(ssize_t) offset,SEEK_SET); length=WriteLSBLong(file,(unsigned int) length); if (ferror(file) != 0) { (void) fclose(file); ThrowImageException(FileOpenError,"UnableToCreateTemporaryFile"); } (void) fclose(file); (void) CloseBlob(image); image=DestroyImage(image); /* Read TIFF image. */ read_info=CloneImageInfo((ImageInfo *) NULL); (void) FormatLocaleString(read_info->filename,MagickPathExtent,"%s",filename); image=ReadTIFFImage(read_info,exception); read_info=DestroyImageInfo(read_info); if (image != (Image *) NULL) { (void) CopyMagickString(image->filename,image_info->filename, MagickPathExtent); (void) CopyMagickString(image->magick_filename,image_info->filename, MagickPathExtent); (void) CopyMagickString(image->magick,"GROUP4",MagickPathExtent); } (void) RelinquishUniqueFileResource(filename); if (status == MagickFalse) image=DestroyImage(image); return(image); } #endif #if defined(MAGICKCORE_TIFF_DELEGATE) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d T I F F I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadTIFFImage() reads a Tagged image file and returns it. It allocates the % memory necessary for the new Image structure and returns a pointer to the % new image. % % The format of the ReadTIFFImage method is: % % Image *ReadTIFFImage(const ImageInfo *image_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o exception: return any errors or warnings in this structure. % */ static inline unsigned char ClampYCC(double value) { value=255.0-value; if (value < 0.0) return((unsigned char)0); if (value > 255.0) return((unsigned char)255); return((unsigned char)(value)); } static MagickBooleanType DecodeLabImage(Image *image,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; ssize_t y; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; break; } for (x=0; x < (ssize_t) image->columns; x++) { double a, b; a=QuantumScale*GetPixela(image,q)+0.5; if (a > 1.0) a-=1.0; b=QuantumScale*GetPixelb(image,q)+0.5; if (b > 1.0) b-=1.0; SetPixela(image,QuantumRange*a,q); SetPixelb(image,QuantumRange*b,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) { status=MagickFalse; break; } } image_view=DestroyCacheView(image_view); return(status); } static MagickBooleanType ReadProfile(Image *image,const char *name, const unsigned char *datum,ssize_t length,ExceptionInfo *exception) { MagickBooleanType status; StringInfo *profile; if (length < 4) return(MagickFalse); profile=BlobToStringInfo(datum,(size_t) length); if (profile == (StringInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=SetImageProfile(image,name,profile,exception); profile=DestroyStringInfo(profile); if (status == MagickFalse) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); return(MagickTrue); } #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static int TIFFCloseBlob(thandle_t image) { (void) CloseBlob((Image *) image); return(0); } static void TIFFErrors(const char *,const char *,va_list) magick_attribute((__format__ (__printf__,2,0))); static void TIFFErrors(const char *module,const char *format,va_list error) { char message[MagickPathExtent]; ExceptionInfo *exception; #if defined(MAGICKCORE_HAVE_VSNPRINTF) (void) vsnprintf(message,MagickPathExtent-2,format,error); #else (void) vsprintf(message,format,error); #endif message[MagickPathExtent-2]='\0'; (void) ConcatenateMagickString(message,".",MagickPathExtent); exception=(ExceptionInfo *) GetMagickThreadValue(tiff_exception); if (exception != (ExceptionInfo *) NULL) (void) ThrowMagickException(exception,GetMagickModule(),CoderError,message, "`%s'",module); } static toff_t TIFFGetBlobSize(thandle_t image) { return((toff_t) GetBlobSize((Image *) image)); } static MagickBooleanType TIFFGetProfiles(TIFF *tiff,Image *image, ExceptionInfo *exception) { MagickBooleanType status; uint32 length = 0; unsigned char *profile = (unsigned char *) NULL; status=MagickTrue; #if defined(TIFFTAG_ICCPROFILE) if ((TIFFGetField(tiff,TIFFTAG_ICCPROFILE,&length,&profile) == 1) && (profile != (unsigned char *) NULL)) status=ReadProfile(image,"icc",profile,(ssize_t) length,exception); #endif #if defined(TIFFTAG_PHOTOSHOP) if ((TIFFGetField(tiff,TIFFTAG_PHOTOSHOP,&length,&profile) == 1) && (profile != (unsigned char *) NULL)) status=ReadProfile(image,"8bim",profile,(ssize_t) length,exception); #endif #if defined(TIFFTAG_RICHTIFFIPTC) && (TIFFLIB_VERSION >= 20191103) if ((TIFFGetField(tiff,TIFFTAG_RICHTIFFIPTC,&length,&profile) == 1) && (profile != (unsigned char *) NULL)) { const TIFFField *field; field=TIFFFieldWithTag(tiff,TIFFTAG_RICHTIFFIPTC); if (TIFFFieldDataType(field) == TIFF_LONG) { if (TIFFIsByteSwapped(tiff) != 0) TIFFSwabArrayOfLong((uint32 *) profile,(size_t) length); status=ReadProfile(image,"iptc",profile,4L*length,exception); } else status=ReadProfile(image,"iptc",profile,length,exception); } #endif #if defined(TIFFTAG_XMLPACKET) if ((TIFFGetField(tiff,TIFFTAG_XMLPACKET,&length,&profile) == 1) && (profile != (unsigned char *) NULL)) { StringInfo *dng; status=ReadProfile(image,"xmp",profile,(ssize_t) length,exception); dng=BlobToStringInfo(profile,length); if (dng != (StringInfo *) NULL) { const char *target = "dc:format=\"image/dng\""; if (strstr((char *) GetStringInfoDatum(dng),target) != (char *) NULL) (void) CopyMagickString(image->magick,"DNG",MagickPathExtent); dng=DestroyStringInfo(dng); } } #endif if ((TIFFGetField(tiff,34118,&length,&profile) == 1) && (profile != (unsigned char *) NULL)) status=ReadProfile(image,"tiff:34118",profile,(ssize_t) length, exception); if ((TIFFGetField(tiff,37724,&length,&profile) == 1) && (profile != (unsigned char *) NULL)) status=ReadProfile(image,"tiff:37724",profile,(ssize_t) length,exception); return(status); } static MagickBooleanType TIFFGetProperties(TIFF *tiff,Image *image, ExceptionInfo *exception) { char message[MagickPathExtent], *text; MagickBooleanType status; uint32 count, type; text=(char *) NULL; status=MagickTrue; if ((TIFFGetField(tiff,TIFFTAG_ARTIST,&text) == 1) && (text != (char *) NULL)) status=SetImageProperty(image,"tiff:artist",text,exception); if ((TIFFGetField(tiff,TIFFTAG_COPYRIGHT,&text) == 1) && (text != (char *) NULL)) status=SetImageProperty(image,"tiff:copyright",text,exception); if ((TIFFGetField(tiff,TIFFTAG_DATETIME,&text) == 1) && (text != (char *) NULL)) status=SetImageProperty(image,"tiff:timestamp",text,exception); if ((TIFFGetField(tiff,TIFFTAG_DOCUMENTNAME,&text) == 1) && (text != (char *) NULL)) status=SetImageProperty(image,"tiff:document",text,exception); if ((TIFFGetField(tiff,TIFFTAG_HOSTCOMPUTER,&text) == 1) && (text != (char *) NULL)) status=SetImageProperty(image,"tiff:hostcomputer",text,exception); if ((TIFFGetField(tiff,TIFFTAG_IMAGEDESCRIPTION,&text) == 1) && (text != (char *) NULL)) status=SetImageProperty(image,"comment",text,exception); if ((TIFFGetField(tiff,TIFFTAG_MAKE,&text) == 1) && (text != (char *) NULL)) status=SetImageProperty(image,"tiff:make",text,exception); if ((TIFFGetField(tiff,TIFFTAG_MODEL,&text) == 1) && (text != (char *) NULL)) status=SetImageProperty(image,"tiff:model",text,exception); if ((TIFFGetField(tiff,TIFFTAG_OPIIMAGEID,&count,&text) == 1) && (text != (char *) NULL)) { if (count >= MagickPathExtent) count=MagickPathExtent-1; (void) CopyMagickString(message,text,count+1); status=SetImageProperty(image,"tiff:image-id",message,exception); } if ((TIFFGetField(tiff,TIFFTAG_PAGENAME,&text) == 1) && (text != (char *) NULL)) status=SetImageProperty(image,"label",text,exception); if ((TIFFGetField(tiff,TIFFTAG_SOFTWARE,&text) == 1) && (text != (char *) NULL)) status=SetImageProperty(image,"tiff:software",text,exception); if ((TIFFGetField(tiff,33423,&count,&text) == 1) && (text != (char *) NULL)) { if (count >= MagickPathExtent) count=MagickPathExtent-1; (void) CopyMagickString(message,text,count+1); status=SetImageProperty(image,"tiff:kodak-33423",message,exception); } if ((TIFFGetField(tiff,36867,&count,&text) == 1) && (text != (char *) NULL)) { if (count >= MagickPathExtent) count=MagickPathExtent-1; (void) CopyMagickString(message,text,count+1); status=SetImageProperty(image,"tiff:kodak-36867",message,exception); } if (TIFFGetField(tiff,TIFFTAG_SUBFILETYPE,&type) == 1) switch (type) { case 0x01: { status=SetImageProperty(image,"tiff:subfiletype","REDUCEDIMAGE", exception); break; } case 0x02: { status=SetImageProperty(image,"tiff:subfiletype","PAGE",exception); break; } case 0x04: { status=SetImageProperty(image,"tiff:subfiletype","MASK",exception); break; } default: break; } return(status); } static MagickBooleanType TIFFSetImageProperties(TIFF *tiff,Image *image, const char *tag,ExceptionInfo *exception) { char buffer[MagickPathExtent], filename[MagickPathExtent]; FILE *file; int unique_file; /* Set EXIF or GPS image properties. */ unique_file=AcquireUniqueFileResource(filename); file=(FILE *) NULL; if (unique_file != -1) file=fdopen(unique_file,"rb+"); if ((unique_file == -1) || (file == (FILE *) NULL)) { (void) RelinquishUniqueFileResource(filename); (void) ThrowMagickException(exception,GetMagickModule(),WandError, "UnableToCreateTemporaryFile","`%s'",filename); return(MagickFalse); } TIFFPrintDirectory(tiff,file,0); (void) fseek(file,0,SEEK_SET); while (fgets(buffer,(int) sizeof(buffer),file) != NULL) { char *p, property[MagickPathExtent], value[MagickPathExtent]; StripString(buffer); p=strchr(buffer,':'); if (p == (char *) NULL) continue; *p='\0'; (void) sprintf(property,"%s%.1024s",tag,buffer); (void) sprintf(value,"%s",p+1); StripString(value); (void) SetImageProperty(image,property,value,exception); } (void) fclose(file); (void) RelinquishUniqueFileResource(filename); return(MagickTrue); } static MagickBooleanType TIFFGetEXIFProperties(TIFF *tiff,Image *image, ExceptionInfo *exception) { #if defined(MAGICKCORE_HAVE_TIFFREADEXIFDIRECTORY) MagickBooleanType status; tdir_t directory; #if defined(TIFF_VERSION_BIG) uint64 #else uint32 #endif offset; /* Read EXIF properties. */ offset=0; if (TIFFGetField(tiff,TIFFTAG_EXIFIFD,&offset) != 1) return(MagickFalse); directory=TIFFCurrentDirectory(tiff); if (TIFFReadEXIFDirectory(tiff,offset) != 1) { TIFFSetDirectory(tiff,directory); return(MagickFalse); } status=TIFFSetImageProperties(tiff,image,"exif:",exception); TIFFSetDirectory(tiff,directory); return(status); #else (void) tiff; (void) image; return(MagickTrue); #endif } static MagickBooleanType TIFFGetGPSProperties(TIFF *tiff,Image *image, ExceptionInfo *exception) { #if defined(MAGICKCORE_HAVE_TIFFREADGPSDIRECTORY) MagickBooleanType status; tdir_t directory; #if defined(TIFF_VERSION_BIG) uint64 #else uint32 #endif offset; /* Read GPS properties. */ offset=0; if (TIFFGetField(tiff,TIFFTAG_GPSIFD,&offset) != 1) return(MagickFalse); directory=TIFFCurrentDirectory(tiff); if (TIFFReadGPSDirectory(tiff,offset) != 1) { TIFFSetDirectory(tiff,directory); return(MagickFalse); } status=TIFFSetImageProperties(tiff,image,"exif:GPS",exception); TIFFSetDirectory(tiff,directory); return(status); #else magick_unreferenced(tiff); magick_unreferenced(image); magick_unreferenced(exception); return(MagickTrue); #endif } static int TIFFMapBlob(thandle_t image,tdata_t *base,toff_t *size) { *base=(tdata_t *) GetBlobStreamData((Image *) image); if (*base != (tdata_t *) NULL) *size=(toff_t) GetBlobSize((Image *) image); if (*base != (tdata_t *) NULL) return(1); return(0); } static tsize_t TIFFReadBlob(thandle_t image,tdata_t data,tsize_t size) { tsize_t count; count=(tsize_t) ReadBlob((Image *) image,(size_t) size, (unsigned char *) data); return(count); } static int32 TIFFReadPixels(TIFF *tiff,const tsample_t sample,const ssize_t row, tdata_t scanline) { int32 status; status=TIFFReadScanline(tiff,scanline,(uint32) row,sample); return(status); } static toff_t TIFFSeekBlob(thandle_t image,toff_t offset,int whence) { return((toff_t) SeekBlob((Image *) image,(MagickOffsetType) offset,whence)); } static void TIFFUnmapBlob(thandle_t image,tdata_t base,toff_t size) { (void) image; (void) base; (void) size; } static void TIFFWarnings(const char *,const char *,va_list) magick_attribute((__format__ (__printf__,2,0))); static void TIFFWarnings(const char *module,const char *format,va_list warning) { char message[MagickPathExtent]; ExceptionInfo *exception; #if defined(MAGICKCORE_HAVE_VSNPRINTF) (void) vsnprintf(message,MagickPathExtent-2,format,warning); #else (void) vsprintf(message,format,warning); #endif message[MagickPathExtent-2]='\0'; (void) ConcatenateMagickString(message,".",MagickPathExtent); exception=(ExceptionInfo *) GetMagickThreadValue(tiff_exception); if (exception != (ExceptionInfo *) NULL) (void) ThrowMagickException(exception,GetMagickModule(),CoderWarning, message,"`%s'",module); } static tsize_t TIFFWriteBlob(thandle_t image,tdata_t data,tsize_t size) { tsize_t count; count=(tsize_t) WriteBlob((Image *) image,(size_t) size, (unsigned char *) data); return(count); } static TIFFMethodType GetJPEGMethod(Image* image,TIFF *tiff,uint16 photometric, uint16 bits_per_sample,uint16 samples_per_pixel) { #define BUFFER_SIZE 2048 MagickOffsetType position, offset; size_t i; TIFFMethodType method; #if defined(TIFF_VERSION_BIG) uint64 #else uint32 #endif *value; unsigned char buffer[BUFFER_SIZE+32]; unsigned short length; /* Only support 8 bit for now. */ if ((photometric != PHOTOMETRIC_SEPARATED) || (bits_per_sample != 8) || (samples_per_pixel != 4)) return(ReadGenericMethod); /* Search for Adobe APP14 JPEG marker. */ value=NULL; if (!TIFFGetField(tiff,TIFFTAG_STRIPOFFSETS,&value) || (value == NULL)) return(ReadStripMethod); position=TellBlob(image); offset=(MagickOffsetType) (value[0]); if (SeekBlob(image,offset,SEEK_SET) != offset) return(ReadStripMethod); method=ReadStripMethod; if (ReadBlob(image,BUFFER_SIZE,buffer) == BUFFER_SIZE) { for (i=0; i < BUFFER_SIZE; i++) { while (i < BUFFER_SIZE) { if (buffer[i++] == 255) break; } while (i < BUFFER_SIZE) { if (buffer[++i] != 255) break; } if (buffer[i++] == 216) /* JPEG_MARKER_SOI */ continue; length=(unsigned short) (((unsigned int) (buffer[i] << 8) | (unsigned int) buffer[i+1]) & 0xffff); if (i+(size_t) length >= BUFFER_SIZE) break; if (buffer[i-1] == 238) /* JPEG_MARKER_APP0+14 */ { if (length != 14) break; /* 0 == CMYK, 1 == YCbCr, 2 = YCCK */ if (buffer[i+13] == 2) method=ReadYCCKMethod; break; } i+=(size_t) length; } } (void) SeekBlob(image,position,SEEK_SET); return(method); } static ssize_t TIFFReadCustomStream(unsigned char *data,const size_t count, void *user_data) { PhotoshopProfile *profile; size_t total; MagickOffsetType remaining; if (count == 0) return(0); profile=(PhotoshopProfile *) user_data; remaining=(MagickOffsetType) profile->length-profile->offset; if (remaining <= 0) return(-1); total=MagickMin(count, (size_t) remaining); (void) memcpy(data,profile->data->datum+profile->offset,total); profile->offset+=total; return(total); } static CustomStreamInfo *TIFFAcquireCustomStreamForReading( PhotoshopProfile *profile,ExceptionInfo *exception) { CustomStreamInfo *custom_stream; custom_stream=AcquireCustomStreamInfo(exception); if (custom_stream == (CustomStreamInfo *) NULL) return(custom_stream); SetCustomStreamData(custom_stream,(void *) profile); SetCustomStreamReader(custom_stream,TIFFReadCustomStream); SetCustomStreamSeeker(custom_stream,TIFFSeekCustomStream); SetCustomStreamTeller(custom_stream,TIFFTellCustomStream); return(custom_stream); } static void TIFFReadPhotoshopLayers(const ImageInfo *image_info,Image *image, ExceptionInfo *exception) { const char *option; const StringInfo *profile; CustomStreamInfo *custom_stream; Image *layers; ImageInfo *clone_info; PhotoshopProfile photoshop_profile; PSDInfo info; ssize_t i; if (GetImageListLength(image) != 1) return; if ((image_info->number_scenes == 1) && (image_info->scene == 0)) return; option=GetImageOption(image_info,"tiff:ignore-layers"); if (option != (const char * ) NULL) return; profile=GetImageProfile(image,"tiff:37724"); if (profile == (const StringInfo *) NULL) return; for (i=0; i < (ssize_t) profile->length-8; i++) { if (LocaleNCompare((const char *) (profile->datum+i), image->endian == MSBEndian ? "8BIM" : "MIB8",4) != 0) continue; i+=4; if ((LocaleNCompare((const char *) (profile->datum+i), image->endian == MSBEndian ? "Layr" : "ryaL",4) == 0) || (LocaleNCompare((const char *) (profile->datum+i), image->endian == MSBEndian ? "LMsk" : "ksML",4) == 0) || (LocaleNCompare((const char *) (profile->datum+i), image->endian == MSBEndian ? "Lr16" : "61rL",4) == 0) || (LocaleNCompare((const char *) (profile->datum+i), image->endian == MSBEndian ? "Lr32" : "23rL",4) == 0)) break; } i+=4; if (i >= (ssize_t) (profile->length-8)) return; photoshop_profile.data=(StringInfo *) profile; photoshop_profile.length=profile->length; custom_stream=TIFFAcquireCustomStreamForReading(&photoshop_profile,exception); if (custom_stream == (CustomStreamInfo *) NULL) return; layers=CloneImage(image,0,0,MagickTrue,exception); if (layers == (Image *) NULL) { custom_stream=DestroyCustomStreamInfo(custom_stream); return; } (void) DeleteImageProfile(layers,"tiff:37724"); AttachCustomStream(layers->blob,custom_stream); SeekBlob(layers,(MagickOffsetType) i,SEEK_SET); InitPSDInfo(layers,&info); clone_info=CloneImageInfo(image_info); clone_info->number_scenes=0; (void) ReadPSDLayers(layers,clone_info,&info,exception); clone_info=DestroyImageInfo(clone_info); DeleteImageFromList(&layers); if (layers != (Image *) NULL) { SetImageArtifact(image,"tiff:has-layers","true"); AppendImageToList(&image,layers); while (layers != (Image *) NULL) { SetImageArtifact(layers,"tiff:has-layers","true"); DetachBlob(layers->blob); layers=GetNextImageInList(layers); } } custom_stream=DestroyCustomStreamInfo(custom_stream); } #if defined(__cplusplus) || defined(c_plusplus) } #endif static Image *ReadTIFFImage(const ImageInfo *image_info, ExceptionInfo *exception) { #define ThrowTIFFException(severity,message) \ { \ if (pixel_info != (MemoryInfo *) NULL) \ pixel_info=RelinquishVirtualMemory(pixel_info); \ if (quantum_info != (QuantumInfo *) NULL) \ quantum_info=DestroyQuantumInfo(quantum_info); \ TIFFClose(tiff); \ ThrowReaderException(severity,message); \ } const char *option; float *chromaticity, x_position, y_position, x_resolution, y_resolution; Image *image; int tiff_status; MagickBooleanType more_frames; MagickSizeType number_pixels; MagickStatusType status; MemoryInfo *pixel_info = (MemoryInfo *) NULL; QuantumInfo *quantum_info; QuantumType quantum_type; ssize_t i, scanline_size, y; TIFF *tiff; TIFFMethodType method; uint16 compress_tag, bits_per_sample, endian, extra_samples, interlace, max_sample_value, min_sample_value, orientation, pages, photometric, *sample_info, sample_format, samples_per_pixel, units, value; uint32 height, rows_per_strip, width; unsigned char *pixels; void *sans[4] = { NULL, NULL, NULL, NULL }; /* Open image. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=AcquireImage(image_info,exception); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } (void) SetMagickThreadValue(tiff_exception,exception); tiff=TIFFClientOpen(image->filename,"rb",(thandle_t) image,TIFFReadBlob, TIFFWriteBlob,TIFFSeekBlob,TIFFCloseBlob,TIFFGetBlobSize,TIFFMapBlob, TIFFUnmapBlob); if (tiff == (TIFF *) NULL) { image=DestroyImageList(image); return((Image *) NULL); } if (exception->severity > ErrorException) { TIFFClose(tiff); image=DestroyImageList(image); return((Image *) NULL); } if (image_info->number_scenes != 0) { /* Generate blank images for subimage specification (e.g. image.tif[4]. We need to check the number of directores because it is possible that the subimage(s) are stored in the photoshop profile. */ if (image_info->scene < (size_t) TIFFNumberOfDirectories(tiff)) { for (i=0; i < (ssize_t) image_info->scene; i++) { status=TIFFReadDirectory(tiff) != 0 ? MagickTrue : MagickFalse; if (status == MagickFalse) { TIFFClose(tiff); image=DestroyImageList(image); return((Image *) NULL); } AcquireNextImage(image_info,image,exception); if (GetNextImageInList(image) == (Image *) NULL) { TIFFClose(tiff); image=DestroyImageList(image); return((Image *) NULL); } image=SyncNextImageInList(image); } } } more_frames=MagickTrue; do { /* TIFFPrintDirectory(tiff,stdout,MagickFalse); */ photometric=PHOTOMETRIC_RGB; if ((TIFFGetField(tiff,TIFFTAG_IMAGEWIDTH,&width) != 1) || (TIFFGetField(tiff,TIFFTAG_IMAGELENGTH,&height) != 1) || (TIFFGetFieldDefaulted(tiff,TIFFTAG_PHOTOMETRIC,&photometric,sans) != 1) || (TIFFGetFieldDefaulted(tiff,TIFFTAG_COMPRESSION,&compress_tag,sans) != 1) || (TIFFGetFieldDefaulted(tiff,TIFFTAG_FILLORDER,&endian,sans) != 1) || (TIFFGetFieldDefaulted(tiff,TIFFTAG_PLANARCONFIG,&interlace,sans) != 1) || (TIFFGetFieldDefaulted(tiff,TIFFTAG_SAMPLESPERPIXEL,&samples_per_pixel,sans) != 1) || (TIFFGetFieldDefaulted(tiff,TIFFTAG_BITSPERSAMPLE,&bits_per_sample,sans) != 1) || (TIFFGetFieldDefaulted(tiff,TIFFTAG_SAMPLEFORMAT,&sample_format,sans) != 1) || (TIFFGetFieldDefaulted(tiff,TIFFTAG_MINSAMPLEVALUE,&min_sample_value,sans) != 1) || (TIFFGetFieldDefaulted(tiff,TIFFTAG_MAXSAMPLEVALUE,&max_sample_value,sans) != 1)) { TIFFClose(tiff); ThrowReaderException(CorruptImageError,"ImproperImageHeader"); } if (((sample_format != SAMPLEFORMAT_IEEEFP) || (bits_per_sample != 64)) && ((bits_per_sample <= 0) || (bits_per_sample > 32))) { TIFFClose(tiff); ThrowReaderException(CorruptImageError,"UnsupportedBitsPerPixel"); } if (samples_per_pixel > MaxPixelChannels) { TIFFClose(tiff); ThrowReaderException(CorruptImageError,"MaximumChannelsExceeded"); } if (sample_format == SAMPLEFORMAT_IEEEFP) (void) SetImageProperty(image,"quantum:format","floating-point", exception); switch (photometric) { case PHOTOMETRIC_MINISBLACK: { (void) SetImageProperty(image,"tiff:photometric","min-is-black", exception); break; } case PHOTOMETRIC_MINISWHITE: { (void) SetImageProperty(image,"tiff:photometric","min-is-white", exception); break; } case PHOTOMETRIC_PALETTE: { (void) SetImageProperty(image,"tiff:photometric","palette",exception); break; } case PHOTOMETRIC_RGB: { (void) SetImageProperty(image,"tiff:photometric","RGB",exception); break; } case PHOTOMETRIC_CIELAB: { (void) SetImageProperty(image,"tiff:photometric","CIELAB",exception); break; } case PHOTOMETRIC_LOGL: { (void) SetImageProperty(image,"tiff:photometric","CIE Log2(L)", exception); break; } case PHOTOMETRIC_LOGLUV: { (void) SetImageProperty(image,"tiff:photometric","LOGLUV",exception); break; } #if defined(PHOTOMETRIC_MASK) case PHOTOMETRIC_MASK: { (void) SetImageProperty(image,"tiff:photometric","MASK",exception); break; } #endif case PHOTOMETRIC_SEPARATED: { (void) SetImageProperty(image,"tiff:photometric","separated",exception); break; } case PHOTOMETRIC_YCBCR: { (void) SetImageProperty(image,"tiff:photometric","YCBCR",exception); break; } default: { (void) SetImageProperty(image,"tiff:photometric","unknown",exception); break; } } if (image->debug != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(),"Geometry: %ux%u", (unsigned int) width,(unsigned int) height); (void) LogMagickEvent(CoderEvent,GetMagickModule(),"Interlace: %u", interlace); (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Bits per sample: %u",bits_per_sample); (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Min sample value: %u",min_sample_value); (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Max sample value: %u",max_sample_value); (void) LogMagickEvent(CoderEvent,GetMagickModule(),"Photometric " "interpretation: %s",GetImageProperty(image,"tiff:photometric", exception)); } image->columns=(size_t) width; image->rows=(size_t) height; image->depth=(size_t) bits_per_sample; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(),"Image depth: %.20g", (double) image->depth); image->endian=MSBEndian; if (endian == FILLORDER_LSB2MSB) image->endian=LSBEndian; #if defined(MAGICKCORE_HAVE_TIFFISBIGENDIAN) if (TIFFIsBigEndian(tiff) == 0) { (void) SetImageProperty(image,"tiff:endian","lsb",exception); image->endian=LSBEndian; } else { (void) SetImageProperty(image,"tiff:endian","msb",exception); image->endian=MSBEndian; } #endif if ((photometric == PHOTOMETRIC_MINISBLACK) || (photometric == PHOTOMETRIC_MINISWHITE)) image->colorspace=GRAYColorspace; if (photometric == PHOTOMETRIC_SEPARATED) image->colorspace=CMYKColorspace; if (photometric == PHOTOMETRIC_CIELAB) image->colorspace=LabColorspace; if ((photometric == PHOTOMETRIC_YCBCR) && (compress_tag != COMPRESSION_JPEG)) image->colorspace=YCbCrColorspace; status=TIFFGetProfiles(tiff,image,exception); if (status == MagickFalse) { TIFFClose(tiff); return(DestroyImageList(image)); } status=TIFFGetProperties(tiff,image,exception); if (status == MagickFalse) { TIFFClose(tiff); return(DestroyImageList(image)); } option=GetImageOption(image_info,"tiff:exif-properties"); if (IsStringFalse(option) == MagickFalse) /* enabled by default */ (void) TIFFGetEXIFProperties(tiff,image,exception); option=GetImageOption(image_info,"tiff:gps-properties"); if (IsStringFalse(option) == MagickFalse) /* enabled by default */ (void) TIFFGetGPSProperties(tiff,image,exception); if ((TIFFGetFieldDefaulted(tiff,TIFFTAG_XRESOLUTION,&x_resolution,sans) == 1) && (TIFFGetFieldDefaulted(tiff,TIFFTAG_YRESOLUTION,&y_resolution,sans) == 1)) { image->resolution.x=x_resolution; image->resolution.y=y_resolution; } if (TIFFGetFieldDefaulted(tiff,TIFFTAG_RESOLUTIONUNIT,&units,sans,sans) == 1) { if (units == RESUNIT_INCH) image->units=PixelsPerInchResolution; if (units == RESUNIT_CENTIMETER) image->units=PixelsPerCentimeterResolution; } if ((TIFFGetFieldDefaulted(tiff,TIFFTAG_XPOSITION,&x_position,sans) == 1) && (TIFFGetFieldDefaulted(tiff,TIFFTAG_YPOSITION,&y_position,sans) == 1)) { image->page.x=CastDoubleToLong(ceil(x_position* image->resolution.x-0.5)); image->page.y=CastDoubleToLong(ceil(y_position* image->resolution.y-0.5)); } if (TIFFGetFieldDefaulted(tiff,TIFFTAG_ORIENTATION,&orientation,sans) == 1) image->orientation=(OrientationType) orientation; if (TIFFGetField(tiff,TIFFTAG_WHITEPOINT,&chromaticity) == 1) { if ((chromaticity != (float *) NULL) && (*chromaticity != 0.0)) { image->chromaticity.white_point.x=chromaticity[0]; image->chromaticity.white_point.y=chromaticity[1]; } } if (TIFFGetField(tiff,TIFFTAG_PRIMARYCHROMATICITIES,&chromaticity) == 1) { if ((chromaticity != (float *) NULL) && (*chromaticity != 0.0)) { image->chromaticity.red_primary.x=chromaticity[0]; image->chromaticity.red_primary.y=chromaticity[1]; image->chromaticity.green_primary.x=chromaticity[2]; image->chromaticity.green_primary.y=chromaticity[3]; image->chromaticity.blue_primary.x=chromaticity[4]; image->chromaticity.blue_primary.y=chromaticity[5]; } } #if defined(MAGICKCORE_HAVE_TIFFISCODECCONFIGURED) || (TIFFLIB_VERSION > 20040919) if ((compress_tag != COMPRESSION_NONE) && (TIFFIsCODECConfigured(compress_tag) == 0)) { TIFFClose(tiff); ThrowReaderException(CoderError,"CompressNotSupported"); } #endif switch (compress_tag) { case COMPRESSION_NONE: image->compression=NoCompression; break; case COMPRESSION_CCITTFAX3: image->compression=FaxCompression; break; case COMPRESSION_CCITTFAX4: image->compression=Group4Compression; break; case COMPRESSION_JPEG: { image->compression=JPEGCompression; #if defined(JPEG_SUPPORT) { char sampling_factor[MagickPathExtent]; uint16 horizontal, vertical; tiff_status=TIFFGetField(tiff,TIFFTAG_YCBCRSUBSAMPLING,&horizontal, &vertical); if (tiff_status == 1) { (void) FormatLocaleString(sampling_factor,MagickPathExtent, "%dx%d",horizontal,vertical); (void) SetImageProperty(image,"jpeg:sampling-factor", sampling_factor,exception); (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Sampling Factors: %s",sampling_factor); } } #endif break; } case COMPRESSION_OJPEG: image->compression=JPEGCompression; break; #if defined(COMPRESSION_LZMA) case COMPRESSION_LZMA: image->compression=LZMACompression; break; #endif case COMPRESSION_LZW: image->compression=LZWCompression; break; case COMPRESSION_DEFLATE: image->compression=ZipCompression; break; case COMPRESSION_ADOBE_DEFLATE: image->compression=ZipCompression; break; #if defined(COMPRESSION_WEBP) case COMPRESSION_WEBP: image->compression=WebPCompression; break; #endif #if defined(COMPRESSION_ZSTD) case COMPRESSION_ZSTD: image->compression=ZstdCompression; break; #endif default: image->compression=RLECompression; break; } quantum_info=(QuantumInfo *) NULL; if ((photometric == PHOTOMETRIC_PALETTE) && (pow(2.0,1.0*bits_per_sample) <= MaxColormapSize)) { size_t colors; colors=(size_t) GetQuantumRange(bits_per_sample)+1; if (AcquireImageColormap(image,colors,exception) == MagickFalse) { TIFFClose(tiff); ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); } } value=(unsigned short) image->scene; if (TIFFGetFieldDefaulted(tiff,TIFFTAG_PAGENUMBER,&value,&pages,sans) == 1) image->scene=value; if (image->storage_class == PseudoClass) { size_t range; uint16 *blue_colormap, *green_colormap, *red_colormap; /* Initialize colormap. */ tiff_status=TIFFGetField(tiff,TIFFTAG_COLORMAP,&red_colormap, &green_colormap,&blue_colormap); if (tiff_status == 1) { if ((red_colormap != (uint16 *) NULL) && (green_colormap != (uint16 *) NULL) && (blue_colormap != (uint16 *) NULL)) { range=255; /* might be old style 8-bit colormap */ for (i=0; i < (ssize_t) image->colors; i++) if ((red_colormap[i] >= 256) || (green_colormap[i] >= 256) || (blue_colormap[i] >= 256)) { range=65535; break; } for (i=0; i < (ssize_t) image->colors; i++) { image->colormap[i].red=ClampToQuantum(((double) QuantumRange*red_colormap[i])/range); image->colormap[i].green=ClampToQuantum(((double) QuantumRange*green_colormap[i])/range); image->colormap[i].blue=ClampToQuantum(((double) QuantumRange*blue_colormap[i])/range); } } } } if (image_info->ping != MagickFalse) { if (image_info->number_scenes != 0) if (image->scene >= (image_info->scene+image_info->number_scenes-1)) break; goto next_tiff_frame; } status=SetImageExtent(image,image->columns,image->rows,exception); if (status == MagickFalse) { TIFFClose(tiff); return(DestroyImageList(image)); } status=SetImageColorspace(image,image->colorspace,exception); status&=ResetImagePixels(image,exception); if (status == MagickFalse) { TIFFClose(tiff); return(DestroyImageList(image)); } /* Allocate memory for the image and pixel buffer. */ quantum_info=AcquireQuantumInfo(image_info,image); if (quantum_info == (QuantumInfo *) NULL) ThrowTIFFException(ResourceLimitError,"MemoryAllocationFailed"); if (sample_format == SAMPLEFORMAT_UINT) status=SetQuantumFormat(image,quantum_info,UnsignedQuantumFormat); if (sample_format == SAMPLEFORMAT_INT) status=SetQuantumFormat(image,quantum_info,SignedQuantumFormat); if (sample_format == SAMPLEFORMAT_IEEEFP) status=SetQuantumFormat(image,quantum_info,FloatingPointQuantumFormat); if (status == MagickFalse) ThrowTIFFException(ResourceLimitError,"MemoryAllocationFailed"); status=MagickTrue; switch (photometric) { case PHOTOMETRIC_MINISBLACK: { quantum_info->min_is_white=MagickFalse; break; } case PHOTOMETRIC_MINISWHITE: { quantum_info->min_is_white=MagickTrue; break; } default: break; } extra_samples=0; tiff_status=TIFFGetFieldDefaulted(tiff,TIFFTAG_EXTRASAMPLES,&extra_samples, &sample_info,sans); if (tiff_status == 1) { (void) SetImageProperty(image,"tiff:alpha","unspecified",exception); if (extra_samples == 0) { if ((samples_per_pixel == 4) && (photometric == PHOTOMETRIC_RGB)) image->alpha_trait=BlendPixelTrait; } else for (i=0; i < extra_samples; i++) { image->alpha_trait=BlendPixelTrait; if (sample_info[i] == EXTRASAMPLE_ASSOCALPHA) { SetQuantumAlphaType(quantum_info,AssociatedQuantumAlpha); (void) SetImageProperty(image,"tiff:alpha","associated", exception); } else if (sample_info[i] == EXTRASAMPLE_UNASSALPHA) { SetQuantumAlphaType(quantum_info,DisassociatedQuantumAlpha); (void) SetImageProperty(image,"tiff:alpha","unassociated", exception); } } } if (image->alpha_trait != UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); method=ReadGenericMethod; rows_per_strip=(uint32) image->rows; if (TIFFGetField(tiff,TIFFTAG_ROWSPERSTRIP,&rows_per_strip) == 1) { char buffer[MagickPathExtent]; (void) FormatLocaleString(buffer,MagickPathExtent,"%u", (unsigned int) rows_per_strip); (void) SetImageProperty(image,"tiff:rows-per-strip",buffer,exception); method=ReadStripMethod; if (rows_per_strip > (uint32) image->rows) rows_per_strip=(uint32) image->rows; } if (TIFFIsTiled(tiff) != MagickFalse) { uint32 columns, rows; if ((TIFFGetField(tiff,TIFFTAG_TILEWIDTH,&columns) != 1) || (TIFFGetField(tiff,TIFFTAG_TILELENGTH,&rows) != 1)) ThrowTIFFException(CoderError,"ImageIsNotTiled"); if ((AcquireMagickResource(WidthResource,columns) == MagickFalse) || (AcquireMagickResource(HeightResource,rows) == MagickFalse)) ThrowTIFFException(ImageError,"WidthOrHeightExceedsLimit"); method=ReadTileMethod; } if ((photometric == PHOTOMETRIC_LOGLUV) || (compress_tag == COMPRESSION_CCITTFAX3)) method=ReadGenericMethod; if (image->compression == JPEGCompression) method=GetJPEGMethod(image,tiff,photometric,bits_per_sample, samples_per_pixel); quantum_info->endian=LSBEndian; scanline_size=TIFFScanlineSize(tiff); if (scanline_size <= 0) ThrowTIFFException(ResourceLimitError,"MemoryAllocationFailed"); number_pixels=MagickMax((MagickSizeType) image->columns*samples_per_pixel* pow(2.0,ceil(log(bits_per_sample)/log(2.0))),image->columns* rows_per_strip); if ((double) scanline_size > 1.5*number_pixels) ThrowTIFFException(CorruptImageError,"CorruptImage"); number_pixels=MagickMax((MagickSizeType) scanline_size,number_pixels); pixel_info=AcquireVirtualMemory(number_pixels,sizeof(uint32)); if (pixel_info == (MemoryInfo *) NULL) ThrowTIFFException(ResourceLimitError,"MemoryAllocationFailed"); pixels=(unsigned char *) GetVirtualMemoryBlob(pixel_info); (void) memset(pixels,0,number_pixels*sizeof(uint32)); quantum_type=GrayQuantum; if (image->storage_class == PseudoClass) quantum_type=IndexQuantum; if (interlace != PLANARCONFIG_SEPARATE) { size_t pad; pad=(size_t) MagickMax((ssize_t) samples_per_pixel-1,0); if (image->alpha_trait != UndefinedPixelTrait) { if (image->storage_class == PseudoClass) quantum_type=IndexAlphaQuantum; else quantum_type=samples_per_pixel == 1 ? AlphaQuantum : GrayAlphaQuantum; } if ((samples_per_pixel > 2) && (interlace != PLANARCONFIG_SEPARATE)) { quantum_type=RGBQuantum; pad=(size_t) MagickMax((size_t) samples_per_pixel-3,0); if (image->alpha_trait != UndefinedPixelTrait) { quantum_type=RGBAQuantum; pad=(size_t) MagickMax((size_t) samples_per_pixel-4,0); } if (image->colorspace == CMYKColorspace) { quantum_type=CMYKQuantum; pad=(size_t) MagickMax((size_t) samples_per_pixel-4,0); if (image->alpha_trait != UndefinedPixelTrait) { quantum_type=CMYKAQuantum; pad=(size_t) MagickMax((size_t) samples_per_pixel-5,0); } } status=SetQuantumPad(image,quantum_info,pad*((bits_per_sample+7) >> 3)); if (status == MagickFalse) ThrowTIFFException(ResourceLimitError,"MemoryAllocationFailed"); } } switch (method) { case ReadYCCKMethod: { /* Convert YCC TIFF image. */ for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; unsigned char *p; tiff_status=TIFFReadPixels(tiff,0,y,(char *) pixels); if (tiff_status == -1) break; q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) break; p=pixels; for (x=0; x < (ssize_t) image->columns; x++) { SetPixelCyan(image,ScaleCharToQuantum(ClampYCC((double) *p+ (1.402*(double) *(p+2))-179.456)),q); SetPixelMagenta(image,ScaleCharToQuantum(ClampYCC((double) *p- (0.34414*(double) *(p+1))-(0.71414*(double ) *(p+2))+ 135.45984)),q); SetPixelYellow(image,ScaleCharToQuantum(ClampYCC((double) *p+ (1.772*(double) *(p+1))-226.816)),q); SetPixelBlack(image,ScaleCharToQuantum((unsigned char) *(p+3)),q); q+=GetPixelChannels(image); p+=4; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } break; } case ReadStripMethod: { unsigned char *p; size_t extent; ssize_t stride, strip_id; tsize_t strip_size; unsigned char *strip_pixels; /* Convert stripped TIFF image. */ extent=4*TIFFStripSize(tiff); #if defined(TIFF_VERSION_BIG) extent+=image->columns*sizeof(uint64); #else extent+=image->columns*sizeof(uint32); #endif strip_pixels=(unsigned char *) AcquireQuantumMemory(extent, sizeof(*strip_pixels)); if (strip_pixels == (unsigned char *) NULL) ThrowTIFFException(ResourceLimitError,"MemoryAllocationFailed"); (void) memset(strip_pixels,0,extent*sizeof(*strip_pixels)); stride=TIFFVStripSize(tiff,1); strip_id=0; p=strip_pixels; for (i=0; i < (ssize_t) samples_per_pixel; i++) { size_t rows_remaining; switch (i) { case 0: break; case 1: quantum_type=GreenQuantum; break; case 2: quantum_type=BlueQuantum; break; case 3: { quantum_type=AlphaQuantum; if (image->colorspace == CMYKColorspace) quantum_type=BlackQuantum; break; } case 4: quantum_type=AlphaQuantum; break; default: break; } rows_remaining=0; for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; q=GetAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) break; if (rows_remaining == 0) { strip_size=TIFFReadEncodedStrip(tiff,strip_id,strip_pixels, TIFFStripSize(tiff)); if (strip_size == -1) break; rows_remaining=rows_per_strip; if ((y+rows_per_strip) > (ssize_t) image->rows) rows_remaining=(rows_per_strip-(y+rows_per_strip- image->rows)); p=strip_pixels; strip_id++; } (void) ImportQuantumPixels(image,(CacheView *) NULL, quantum_info,quantum_type,p,exception); p+=stride; rows_remaining--; if (SyncAuthenticPixels(image,exception) == MagickFalse) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } if ((samples_per_pixel > 1) && (interlace != PLANARCONFIG_SEPARATE)) break; } strip_pixels=(unsigned char *) RelinquishMagickMemory(strip_pixels); break; } case ReadTileMethod: { unsigned char *p; size_t extent; uint32 columns, rows; unsigned char *tile_pixels; /* Convert tiled TIFF image. */ if ((TIFFGetField(tiff,TIFFTAG_TILEWIDTH,&columns) != 1) || (TIFFGetField(tiff,TIFFTAG_TILELENGTH,&rows) != 1)) ThrowTIFFException(CoderError,"ImageIsNotTiled"); number_pixels=(MagickSizeType) columns*rows; if (HeapOverflowSanityCheck(rows,sizeof(*tile_pixels)) != MagickFalse) ThrowTIFFException(ResourceLimitError,"MemoryAllocationFailed"); extent=TIFFTileSize(tiff); #if defined(TIFF_VERSION_BIG) extent+=columns*sizeof(uint64); #else extent+=columns*sizeof(uint32); #endif tile_pixels=(unsigned char *) AcquireQuantumMemory(extent, sizeof(*tile_pixels)); if (tile_pixels == (unsigned char *) NULL) ThrowTIFFException(ResourceLimitError,"MemoryAllocationFailed"); (void) memset(tile_pixels,0,extent*sizeof(*tile_pixels)); for (i=0; i < (ssize_t) samples_per_pixel; i++) { switch (i) { case 0: break; case 1: quantum_type=GreenQuantum; break; case 2: quantum_type=BlueQuantum; break; case 3: { quantum_type=AlphaQuantum; if (image->colorspace == CMYKColorspace) quantum_type=BlackQuantum; break; } case 4: quantum_type=AlphaQuantum; break; default: break; } for (y=0; y < (ssize_t) image->rows; y+=rows) { ssize_t x; size_t rows_remaining; rows_remaining=image->rows-y; if ((ssize_t) (y+rows) < (ssize_t) image->rows) rows_remaining=rows; for (x=0; x < (ssize_t) image->columns; x+=columns) { size_t columns_remaining, row; columns_remaining=image->columns-x; if ((ssize_t) (x+columns) < (ssize_t) image->columns) columns_remaining=columns; if (TIFFReadTile(tiff,tile_pixels,(uint32) x,(uint32) y,0,i) == 0) break; p=tile_pixels; for (row=0; row < rows_remaining; row++) { Quantum *magick_restrict q; q=GetAuthenticPixels(image,x,y+row,columns_remaining,1, exception); if (q == (Quantum *) NULL) break; (void) ImportQuantumPixels(image,(CacheView *) NULL, quantum_info,quantum_type,p,exception); p+=TIFFTileRowSize(tiff); if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } } } if ((samples_per_pixel > 1) && (interlace != PLANARCONFIG_SEPARATE)) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) i, samples_per_pixel); if (status == MagickFalse) break; } } tile_pixels=(unsigned char *) RelinquishMagickMemory(tile_pixels); break; } case ReadGenericMethod: default: { MemoryInfo *generic_info = (MemoryInfo * ) NULL; uint32 *p; uint32 *pixels; /* Convert generic TIFF image. */ if (HeapOverflowSanityCheck(image->rows,sizeof(*pixels)) != MagickFalse) ThrowTIFFException(ResourceLimitError,"MemoryAllocationFailed"); number_pixels=(MagickSizeType) image->columns*image->rows; #if defined(TIFF_VERSION_BIG) number_pixels+=image->columns*sizeof(uint64); #else number_pixels+=image->columns*sizeof(uint32); #endif generic_info=AcquireVirtualMemory(number_pixels,sizeof(uint32)); if (generic_info == (MemoryInfo *) NULL) ThrowTIFFException(ResourceLimitError,"MemoryAllocationFailed"); pixels=(uint32 *) GetVirtualMemoryBlob(generic_info); (void) TIFFReadRGBAImage(tiff,(uint32) image->columns,(uint32) image->rows,(uint32 *) pixels,0); p=pixels+(image->columns*image->rows)-1; for (y=0; y < (ssize_t) image->rows; y++) { ssize_t x; Quantum *magick_restrict q; q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) break; q+=GetPixelChannels(image)*(image->columns-1); for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(image,ScaleCharToQuantum((unsigned char) TIFFGetR(*p)),q); SetPixelGreen(image,ScaleCharToQuantum((unsigned char) TIFFGetG(*p)),q); SetPixelBlue(image,ScaleCharToQuantum((unsigned char) TIFFGetB(*p)),q); if (image->alpha_trait != UndefinedPixelTrait) SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) TIFFGetA(*p)),q); p--; q-=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } generic_info=RelinquishVirtualMemory(generic_info); break; } } pixel_info=RelinquishVirtualMemory(pixel_info); SetQuantumImageType(image,quantum_type); next_tiff_frame: if (quantum_info != (QuantumInfo *) NULL) quantum_info=DestroyQuantumInfo(quantum_info); if (photometric == PHOTOMETRIC_CIELAB) DecodeLabImage(image,exception); if ((photometric == PHOTOMETRIC_LOGL) || (photometric == PHOTOMETRIC_MINISBLACK) || (photometric == PHOTOMETRIC_MINISWHITE)) { image->type=GrayscaleType; if (bits_per_sample == 1) image->type=BilevelType; } /* Proceed to next image. */ if (image_info->number_scenes != 0) if (image->scene >= (image_info->scene+image_info->number_scenes-1)) break; more_frames=TIFFReadDirectory(tiff) != 0 ? MagickTrue : MagickFalse; if (more_frames != MagickFalse) { /* Allocate next image structure. */ AcquireNextImage(image_info,image,exception); if (GetNextImageInList(image) == (Image *) NULL) { status=MagickFalse; break; } image=SyncNextImageInList(image); status=SetImageProgress(image,LoadImagesTag,image->scene-1, image->scene); if (status == MagickFalse) break; } } while ((status != MagickFalse) && (more_frames != MagickFalse)); TIFFClose(tiff); if (status != MagickFalse) TIFFReadPhotoshopLayers(image_info,image,exception); if ((image_info->number_scenes != 0) && (image_info->scene >= GetImageListLength(image))) status=MagickFalse; if (status == MagickFalse) return(DestroyImageList(image)); return(GetFirstImageInList(image)); } #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e g i s t e r T I F F I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RegisterTIFFImage() adds properties for the TIFF image format to % the list of supported formats. The properties include the image format % tag, a method to read and/or write the format, whether the format % supports the saving of more than one frame to the same file or blob, % whether the format supports native in-memory I/O, and a brief % description of the format. % % The format of the RegisterTIFFImage method is: % % size_t RegisterTIFFImage(void) % */ #if defined(MAGICKCORE_TIFF_DELEGATE) #if defined(MAGICKCORE_HAVE_TIFFMERGEFIELDINFO) && defined(MAGICKCORE_HAVE_TIFFSETTAGEXTENDER) static TIFFExtendProc tag_extender = (TIFFExtendProc) NULL; static void TIFFIgnoreTags(TIFF *tiff) { char *q; const char *p, *tags; Image *image; ssize_t i; size_t count; TIFFFieldInfo *ignore; if (TIFFGetReadProc(tiff) != TIFFReadBlob) return; image=(Image *)TIFFClientdata(tiff); tags=GetImageArtifact(image,"tiff:ignore-tags"); if (tags == (const char *) NULL) return; count=0; p=tags; while (*p != '\0') { while ((isspace((int) ((unsigned char) *p)) != 0)) p++; (void) strtol(p,&q,10); if (p == q) return; p=q; count++; while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == ',')) p++; } if (count == 0) return; i=0; p=tags; ignore=(TIFFFieldInfo *) AcquireQuantumMemory(count,sizeof(*ignore)); if (ignore == (TIFFFieldInfo *) NULL) return; /* This also sets field_bit to 0 (FIELD_IGNORE). */ (void) memset(ignore,0,count*sizeof(*ignore)); while (*p != '\0') { while ((isspace((int) ((unsigned char) *p)) != 0)) p++; ignore[i].field_tag=(ttag_t) strtol(p,&q,10); p=q; i++; while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == ',')) p++; } (void) TIFFMergeFieldInfo(tiff,ignore,(uint32) count); ignore=(TIFFFieldInfo *) RelinquishMagickMemory(ignore); } static void TIFFTagExtender(TIFF *tiff) { static const TIFFFieldInfo TIFFExtensions[] = { { 37724, -3, -3, TIFF_UNDEFINED, FIELD_CUSTOM, 1, 1, (char *) "PhotoshopLayerData" }, { 34118, -3, -3, TIFF_UNDEFINED, FIELD_CUSTOM, 1, 1, (char *) "Microscope" } }; TIFFMergeFieldInfo(tiff,TIFFExtensions,sizeof(TIFFExtensions)/ sizeof(*TIFFExtensions)); if (tag_extender != (TIFFExtendProc) NULL) (*tag_extender)(tiff); TIFFIgnoreTags(tiff); } #endif #endif ModuleExport size_t RegisterTIFFImage(void) { #define TIFFDescription "Tagged Image File Format" char version[MagickPathExtent]; MagickInfo *entry; #if defined(MAGICKCORE_TIFF_DELEGATE) if (tiff_semaphore == (SemaphoreInfo *) NULL) ActivateSemaphoreInfo(&tiff_semaphore); LockSemaphoreInfo(tiff_semaphore); if (instantiate_key == MagickFalse) { if (CreateMagickThreadKey(&tiff_exception,NULL) == MagickFalse) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); error_handler=TIFFSetErrorHandler(TIFFErrors); warning_handler=TIFFSetWarningHandler(TIFFWarnings); #if defined(MAGICKCORE_HAVE_TIFFMERGEFIELDINFO) && defined(MAGICKCORE_HAVE_TIFFSETTAGEXTENDER) if (tag_extender == (TIFFExtendProc) NULL) tag_extender=TIFFSetTagExtender(TIFFTagExtender); #endif instantiate_key=MagickTrue; } UnlockSemaphoreInfo(tiff_semaphore); #endif *version='\0'; #if defined(TIFF_VERSION) (void) FormatLocaleString(version,MagickPathExtent,"%d",TIFF_VERSION); #endif #if defined(MAGICKCORE_TIFF_DELEGATE) { const char *p; ssize_t i; p=TIFFGetVersion(); for (i=0; (i < (MagickPathExtent-1)) && (*p != 0) && (*p != '\n'); i++) version[i]=(*p++); version[i]='\0'; } #endif entry=AcquireMagickInfo("TIFF","GROUP4","Raw CCITT Group4"); #if defined(MAGICKCORE_TIFF_DELEGATE) entry->decoder=(DecodeImageHandler *) ReadGROUP4Image; entry->encoder=(EncodeImageHandler *) WriteGROUP4Image; #endif entry->flags|=CoderRawSupportFlag; entry->flags|=CoderEndianSupportFlag; entry->flags|=CoderDecoderSeekableStreamFlag; entry->flags|=CoderEncoderSeekableStreamFlag; entry->flags^=CoderAdjoinFlag; entry->flags^=CoderUseExtensionFlag; entry->format_type=ImplicitFormatType; entry->mime_type=ConstantString("image/tiff"); (void) RegisterMagickInfo(entry); entry=AcquireMagickInfo("TIFF","PTIF","Pyramid encoded TIFF"); #if defined(MAGICKCORE_TIFF_DELEGATE) entry->decoder=(DecodeImageHandler *) ReadTIFFImage; entry->encoder=(EncodeImageHandler *) WritePTIFImage; #endif entry->flags|=CoderEndianSupportFlag; entry->flags|=CoderDecoderSeekableStreamFlag; entry->flags|=CoderEncoderSeekableStreamFlag; entry->flags^=CoderUseExtensionFlag; entry->mime_type=ConstantString("image/tiff"); (void) RegisterMagickInfo(entry); entry=AcquireMagickInfo("TIFF","TIF",TIFFDescription); #if defined(MAGICKCORE_TIFF_DELEGATE) entry->decoder=(DecodeImageHandler *) ReadTIFFImage; entry->encoder=(EncodeImageHandler *) WriteTIFFImage; #endif entry->flags|=CoderEndianSupportFlag; entry->flags|=CoderDecoderSeekableStreamFlag; entry->flags|=CoderEncoderSeekableStreamFlag; entry->flags|=CoderStealthFlag; entry->flags^=CoderUseExtensionFlag; if (*version != '\0') entry->version=ConstantString(version); entry->mime_type=ConstantString("image/tiff"); (void) RegisterMagickInfo(entry); entry=AcquireMagickInfo("TIFF","TIFF",TIFFDescription); #if defined(MAGICKCORE_TIFF_DELEGATE) entry->decoder=(DecodeImageHandler *) ReadTIFFImage; entry->encoder=(EncodeImageHandler *) WriteTIFFImage; #endif entry->magick=(IsImageFormatHandler *) IsTIFF; entry->flags|=CoderEndianSupportFlag; entry->flags|=CoderDecoderSeekableStreamFlag; entry->flags|=CoderEncoderSeekableStreamFlag; entry->flags^=CoderUseExtensionFlag; if (*version != '\0') entry->version=ConstantString(version); entry->mime_type=ConstantString("image/tiff"); (void) RegisterMagickInfo(entry); entry=AcquireMagickInfo("TIFF","TIFF64","Tagged Image File Format (64-bit)"); #if defined(TIFF_VERSION_BIG) entry->decoder=(DecodeImageHandler *) ReadTIFFImage; entry->encoder=(EncodeImageHandler *) WriteTIFFImage; #endif entry->flags|=CoderEndianSupportFlag; entry->flags|=CoderDecoderSeekableStreamFlag; entry->flags|=CoderEncoderSeekableStreamFlag; entry->flags^=CoderUseExtensionFlag; if (*version != '\0') entry->version=ConstantString(version); entry->mime_type=ConstantString("image/tiff"); (void) RegisterMagickInfo(entry); return(MagickImageCoderSignature); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n r e g i s t e r T I F F I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnregisterTIFFImage() removes format registrations made by the TIFF module % from the list of supported formats. % % The format of the UnregisterTIFFImage method is: % % UnregisterTIFFImage(void) % */ ModuleExport void UnregisterTIFFImage(void) { (void) UnregisterMagickInfo("TIFF64"); (void) UnregisterMagickInfo("TIFF"); (void) UnregisterMagickInfo("TIF"); (void) UnregisterMagickInfo("PTIF"); #if defined(MAGICKCORE_TIFF_DELEGATE) if (tiff_semaphore == (SemaphoreInfo *) NULL) ActivateSemaphoreInfo(&tiff_semaphore); LockSemaphoreInfo(tiff_semaphore); if (instantiate_key != MagickFalse) { #if defined(MAGICKCORE_HAVE_TIFFMERGEFIELDINFO) && defined(MAGICKCORE_HAVE_TIFFSETTAGEXTENDER) if (tag_extender == (TIFFExtendProc) NULL) (void) TIFFSetTagExtender(tag_extender); #endif if (DeleteMagickThreadKey(tiff_exception) == MagickFalse) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) TIFFSetWarningHandler(warning_handler); (void) TIFFSetErrorHandler(error_handler); instantiate_key=MagickFalse; } UnlockSemaphoreInfo(tiff_semaphore); RelinquishSemaphoreInfo(&tiff_semaphore); #endif } #if defined(MAGICKCORE_TIFF_DELEGATE) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e G R O U P 4 I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WriteGROUP4Image() writes an image in the raw CCITT Group 4 image format. % % The format of the WriteGROUP4Image method is: % % MagickBooleanType WriteGROUP4Image(const ImageInfo *image_info, % Image *image,ExceptionInfo *) % % A description of each parameter follows: % % o image_info: the image info. % % o image: The image. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType WriteGROUP4Image(const ImageInfo *image_info, Image *image,ExceptionInfo *exception) { char filename[MagickPathExtent]; FILE *file; Image *huffman_image; ImageInfo *write_info; int unique_file; MagickBooleanType status; ssize_t i; ssize_t count; TIFF *tiff; toff_t *byte_count, strip_size; unsigned char *buffer; /* Write image as CCITT Group4 TIFF image to a temporary file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception); if (status == MagickFalse) return(status); huffman_image=CloneImage(image,0,0,MagickTrue,exception); if (huffman_image == (Image *) NULL) { (void) CloseBlob(image); return(MagickFalse); } huffman_image->endian=MSBEndian; file=(FILE *) NULL; unique_file=AcquireUniqueFileResource(filename); if (unique_file != -1) file=fdopen(unique_file,"wb"); if ((unique_file == -1) || (file == (FILE *) NULL)) { ThrowFileException(exception,FileOpenError,"UnableToCreateTemporaryFile", filename); return(MagickFalse); } (void) FormatLocaleString(huffman_image->filename,MagickPathExtent,"tiff:%s", filename); if (IsImageMonochrome(image) == MagickFalse) (void) SetImageType(huffman_image,BilevelType,exception); write_info=CloneImageInfo((ImageInfo *) NULL); SetImageInfoFile(write_info,file); if (IsImageMonochrome(image) == MagickFalse) (void) SetImageType(image,BilevelType,exception); (void) SetImageDepth(image,1,exception); write_info->compression=Group4Compression; write_info->type=BilevelType; status=WriteTIFFImage(write_info,huffman_image,exception); (void) fflush(file); write_info=DestroyImageInfo(write_info); if (status == MagickFalse) { huffman_image=DestroyImage(huffman_image); (void) fclose(file); (void) RelinquishUniqueFileResource(filename); return(MagickFalse); } tiff=TIFFOpen(filename,"rb"); if (tiff == (TIFF *) NULL) { huffman_image=DestroyImage(huffman_image); (void) fclose(file); (void) RelinquishUniqueFileResource(filename); ThrowFileException(exception,FileOpenError,"UnableToOpenFile", image_info->filename); return(MagickFalse); } /* Allocate raw strip buffer. */ if (TIFFGetField(tiff,TIFFTAG_STRIPBYTECOUNTS,&byte_count) != 1) { TIFFClose(tiff); huffman_image=DestroyImage(huffman_image); (void) fclose(file); (void) RelinquishUniqueFileResource(filename); return(MagickFalse); } strip_size=byte_count[0]; for (i=1; i < (ssize_t) TIFFNumberOfStrips(tiff); i++) if (byte_count[i] > strip_size) strip_size=byte_count[i]; buffer=(unsigned char *) AcquireQuantumMemory((size_t) strip_size, sizeof(*buffer)); if (buffer == (unsigned char *) NULL) { TIFFClose(tiff); huffman_image=DestroyImage(huffman_image); (void) fclose(file); (void) RelinquishUniqueFileResource(filename); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image_info->filename); } /* Compress runlength encoded to 2D Huffman pixels. */ for (i=0; i < (ssize_t) TIFFNumberOfStrips(tiff); i++) { count=(ssize_t) TIFFReadRawStrip(tiff,(uint32) i,buffer,strip_size); if (WriteBlob(image,(size_t) count,buffer) != count) status=MagickFalse; } buffer=(unsigned char *) RelinquishMagickMemory(buffer); TIFFClose(tiff); huffman_image=DestroyImage(huffman_image); (void) fclose(file); (void) RelinquishUniqueFileResource(filename); (void) CloseBlob(image); return(status); } #endif #if defined(MAGICKCORE_TIFF_DELEGATE) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e P T I F I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WritePTIFImage() writes an image in the pyrimid-encoded Tagged image file % format. % % The format of the WritePTIFImage method is: % % MagickBooleanType WritePTIFImage(const ImageInfo *image_info, % Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o image: The image. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType WritePTIFImage(const ImageInfo *image_info, Image *image,ExceptionInfo *exception) { Image *images, *next, *pyramid_image; ImageInfo *write_info; MagickBooleanType status; PointInfo resolution; size_t columns, rows; /* Create pyramid-encoded TIFF image. */ images=NewImageList(); for (next=image; next != (Image *) NULL; next=GetNextImageInList(next)) { Image *clone_image; clone_image=CloneImage(next,0,0,MagickFalse,exception); if (clone_image == (Image *) NULL) break; clone_image->previous=NewImageList(); clone_image->next=NewImageList(); (void) SetImageProperty(clone_image,"tiff:subfiletype","none",exception); AppendImageToList(&images,clone_image); columns=next->columns; rows=next->rows; resolution=next->resolution; while ((columns > 64) && (rows > 64)) { columns/=2; rows/=2; resolution.x/=2; resolution.y/=2; pyramid_image=ResizeImage(next,columns,rows,image->filter,exception); if (pyramid_image == (Image *) NULL) break; DestroyBlob(pyramid_image); pyramid_image->blob=ReferenceBlob(next->blob); pyramid_image->resolution=resolution; (void) SetImageProperty(pyramid_image,"tiff:subfiletype","REDUCEDIMAGE", exception); AppendImageToList(&images,pyramid_image); } } status=MagickFalse; if (images != (Image *) NULL) { /* Write pyramid-encoded TIFF image. */ images=GetFirstImageInList(images); write_info=CloneImageInfo(image_info); write_info->adjoin=MagickTrue; (void) CopyMagickString(write_info->magick,"TIFF",MagickPathExtent); (void) CopyMagickString(images->magick,"TIFF",MagickPathExtent); status=WriteTIFFImage(write_info,images,exception); images=DestroyImageList(images); write_info=DestroyImageInfo(write_info); } return(status); } #endif #if defined(MAGICKCORE_TIFF_DELEGATE) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e T I F F I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WriteTIFFImage() writes an image in the Tagged image file format. % % The format of the WriteTIFFImage method is: % % MagickBooleanType WriteTIFFImage(const ImageInfo *image_info, % Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o image: The image. % % o exception: return any errors or warnings in this structure. % */ typedef struct _TIFFInfo { RectangleInfo tile_geometry; unsigned char *scanline, *scanlines, *pixels; } TIFFInfo; static void DestroyTIFFInfo(TIFFInfo *tiff_info) { assert(tiff_info != (TIFFInfo *) NULL); if (tiff_info->scanlines != (unsigned char *) NULL) tiff_info->scanlines=(unsigned char *) RelinquishMagickMemory( tiff_info->scanlines); if (tiff_info->pixels != (unsigned char *) NULL) tiff_info->pixels=(unsigned char *) RelinquishMagickMemory( tiff_info->pixels); } static MagickBooleanType EncodeLabImage(Image *image,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; ssize_t y; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; break; } for (x=0; x < (ssize_t) image->columns; x++) { double a, b; a=QuantumScale*GetPixela(image,q)-0.5; if (a < 0.0) a+=1.0; b=QuantumScale*GetPixelb(image,q)-0.5; if (b < 0.0) b+=1.0; SetPixela(image,QuantumRange*a,q); SetPixelb(image,QuantumRange*b,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) { status=MagickFalse; break; } } image_view=DestroyCacheView(image_view); return(status); } static MagickBooleanType GetTIFFInfo(const ImageInfo *image_info, TIFF *tiff,TIFFInfo *tiff_info) { #define TIFFStripSizeDefault 1048576 const char *option; MagickStatusType flags; uint32 tile_columns, tile_rows; assert(tiff_info != (TIFFInfo *) NULL); (void) memset(tiff_info,0,sizeof(*tiff_info)); option=GetImageOption(image_info,"tiff:tile-geometry"); if (option == (const char *) NULL) { size_t extent; uint32 rows, rows_per_strip; extent=TIFFScanlineSize(tiff); rows_per_strip=TIFFStripSizeDefault/(extent == 0 ? 1 : (uint32) extent); rows_per_strip=16*(((rows_per_strip < 16 ? 16 : rows_per_strip)+1)/16); TIFFGetField(tiff,TIFFTAG_IMAGELENGTH,&rows); if (rows_per_strip > rows) rows_per_strip=rows; option=GetImageOption(image_info,"tiff:rows-per-strip"); if (option != (const char *) NULL) rows_per_strip=(uint32) strtoul(option,(char **) NULL,10); rows_per_strip=TIFFDefaultStripSize(tiff,rows_per_strip); (void) TIFFSetField(tiff,TIFFTAG_ROWSPERSTRIP,rows_per_strip); return(MagickTrue); } /* Create tiled TIFF, ignore "tiff:rows-per-strip". */ flags=ParseAbsoluteGeometry(option,&tiff_info->tile_geometry); if ((flags & HeightValue) == 0) tiff_info->tile_geometry.height=tiff_info->tile_geometry.width; tile_columns=(uint32) tiff_info->tile_geometry.width; tile_rows=(uint32) tiff_info->tile_geometry.height; TIFFDefaultTileSize(tiff,&tile_columns,&tile_rows); (void) TIFFSetField(tiff,TIFFTAG_TILEWIDTH,tile_columns); (void) TIFFSetField(tiff,TIFFTAG_TILELENGTH,tile_rows); tiff_info->tile_geometry.width=tile_columns; tiff_info->tile_geometry.height=tile_rows; if ((TIFFScanlineSize(tiff) <= 0) || (TIFFTileSize(tiff) <= 0)) { DestroyTIFFInfo(tiff_info); return(MagickFalse); } tiff_info->scanlines=(unsigned char *) AcquireQuantumMemory((size_t) tile_rows*TIFFScanlineSize(tiff),sizeof(*tiff_info->scanlines)); tiff_info->pixels=(unsigned char *) AcquireQuantumMemory((size_t) tile_rows*TIFFTileSize(tiff),sizeof(*tiff_info->scanlines)); if ((tiff_info->scanlines == (unsigned char *) NULL) || (tiff_info->pixels == (unsigned char *) NULL)) { DestroyTIFFInfo(tiff_info); return(MagickFalse); } return(MagickTrue); } static int32 TIFFWritePixels(TIFF *tiff,TIFFInfo *tiff_info,ssize_t row, tsample_t sample,Image *image) { int32 status; ssize_t i; unsigned char *p, *q; size_t number_tiles, tile_width; ssize_t bytes_per_pixel, j, k, l; if (TIFFIsTiled(tiff) == 0) return(TIFFWriteScanline(tiff,tiff_info->scanline,(uint32) row,sample)); /* Fill scanlines to tile height. */ i=(ssize_t) (row % tiff_info->tile_geometry.height)*TIFFScanlineSize(tiff); (void) memcpy(tiff_info->scanlines+i,(char *) tiff_info->scanline, (size_t) TIFFScanlineSize(tiff)); if (((size_t) (row % tiff_info->tile_geometry.height) != (tiff_info->tile_geometry.height-1)) && (row != (ssize_t) (image->rows-1))) return(0); /* Write tile to TIFF image. */ status=0; bytes_per_pixel=TIFFTileSize(tiff)/(ssize_t) ( tiff_info->tile_geometry.height*tiff_info->tile_geometry.width); number_tiles=(image->columns+tiff_info->tile_geometry.width)/ tiff_info->tile_geometry.width; for (i=0; i < (ssize_t) number_tiles; i++) { tile_width=(i == (ssize_t) (number_tiles-1)) ? image->columns-(i* tiff_info->tile_geometry.width) : tiff_info->tile_geometry.width; for (j=0; j < (ssize_t) ((row % tiff_info->tile_geometry.height)+1); j++) for (k=0; k < (ssize_t) tile_width; k++) { if (bytes_per_pixel == 0) { p=tiff_info->scanlines+(j*TIFFScanlineSize(tiff)+(i* tiff_info->tile_geometry.width+k)/8); q=tiff_info->pixels+(j*TIFFTileRowSize(tiff)+k/8); *q++=(*p++); continue; } p=tiff_info->scanlines+(j*TIFFScanlineSize(tiff)+(i* tiff_info->tile_geometry.width+k)*bytes_per_pixel); q=tiff_info->pixels+(j*TIFFTileRowSize(tiff)+k*bytes_per_pixel); for (l=0; l < bytes_per_pixel; l++) *q++=(*p++); } if ((i*tiff_info->tile_geometry.width) != image->columns) status=TIFFWriteTile(tiff,tiff_info->pixels,(uint32) (i* tiff_info->tile_geometry.width),(uint32) ((row/ tiff_info->tile_geometry.height)*tiff_info->tile_geometry.height),0, sample); if (status < 0) break; } return(status); } static ssize_t TIFFWriteCustomStream(unsigned char *data,const size_t count, void *user_data) { PhotoshopProfile *profile; if (count == 0) return(0); profile=(PhotoshopProfile *) user_data; if ((profile->offset+(MagickOffsetType) count) >= (MagickOffsetType) profile->extent) { profile->extent+=count+profile->quantum; profile->quantum<<=1; SetStringInfoLength(profile->data,profile->extent); } (void) memcpy(profile->data->datum+profile->offset,data,count); profile->offset+=count; return(count); } static CustomStreamInfo *TIFFAcquireCustomStreamForWriting( PhotoshopProfile *profile,ExceptionInfo *exception) { CustomStreamInfo *custom_stream; custom_stream=AcquireCustomStreamInfo(exception); if (custom_stream == (CustomStreamInfo *) NULL) return(custom_stream); SetCustomStreamData(custom_stream,(void *) profile); SetCustomStreamWriter(custom_stream,TIFFWriteCustomStream); SetCustomStreamSeeker(custom_stream,TIFFSeekCustomStream); SetCustomStreamTeller(custom_stream,TIFFTellCustomStream); return(custom_stream); } static MagickBooleanType TIFFWritePhotoshopLayers(Image* image, const ImageInfo *image_info,EndianType endian,ExceptionInfo *exception) { BlobInfo *blob; CustomStreamInfo *custom_stream; Image *base_image, *next; ImageInfo *clone_info; MagickBooleanType status; PhotoshopProfile profile; PSDInfo info; StringInfo *layers; base_image=CloneImage(image,0,0,MagickFalse,exception); if (base_image == (Image *) NULL) return(MagickTrue); clone_info=CloneImageInfo(image_info); if (clone_info == (ImageInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); profile.offset=0; profile.quantum=MagickMinBlobExtent; layers=AcquireStringInfo(profile.quantum); if (layers == (StringInfo *) NULL) { base_image=DestroyImage(base_image); clone_info=DestroyImageInfo(clone_info); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } profile.data=layers; profile.extent=layers->length; custom_stream=TIFFAcquireCustomStreamForWriting(&profile,exception); if (custom_stream == (CustomStreamInfo *) NULL) { base_image=DestroyImage(base_image); clone_info=DestroyImageInfo(clone_info); layers=DestroyStringInfo(layers); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } blob=CloneBlobInfo((BlobInfo *) NULL); if (blob == (BlobInfo *) NULL) { base_image=DestroyImage(base_image); clone_info=DestroyImageInfo(clone_info); layers=DestroyStringInfo(layers); custom_stream=DestroyCustomStreamInfo(custom_stream); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } DestroyBlob(base_image); base_image->blob=blob; next=base_image; while (next != (Image *) NULL) next=SyncNextImageInList(next); AttachCustomStream(base_image->blob,custom_stream); InitPSDInfo(image,&info); base_image->endian=endian; WriteBlobString(base_image,"Adobe Photoshop Document Data Block"); WriteBlobByte(base_image,0); WriteBlobString(base_image,base_image->endian == LSBEndian ? "MIB8ryaL" : "8BIMLayr"); status=WritePSDLayers(base_image,clone_info,&info,exception); if (status != MagickFalse) { SetStringInfoLength(layers,(size_t) profile.offset); status=SetImageProfile(image,"tiff:37724",layers,exception); } next=base_image; while (next != (Image *) NULL) { CloseBlob(next); next=next->next; } layers=DestroyStringInfo(layers); clone_info=DestroyImageInfo(clone_info); custom_stream=DestroyCustomStreamInfo(custom_stream); return(status); } static void TIFFSetProfiles(TIFF *tiff,Image *image) { const char *name; const StringInfo *profile; if (image->profiles == (void *) NULL) return; ResetImageProfileIterator(image); for (name=GetNextImageProfile(image); name != (const char *) NULL; ) { profile=GetImageProfile(image,name); if (GetStringInfoLength(profile) == 0) { name=GetNextImageProfile(image); continue; } #if defined(TIFFTAG_XMLPACKET) if (LocaleCompare(name,"xmp") == 0) (void) TIFFSetField(tiff,TIFFTAG_XMLPACKET,(uint32) GetStringInfoLength( profile),GetStringInfoDatum(profile)); #endif #if defined(TIFFTAG_ICCPROFILE) if (LocaleCompare(name,"icc") == 0) (void) TIFFSetField(tiff,TIFFTAG_ICCPROFILE,(uint32) GetStringInfoLength( profile),GetStringInfoDatum(profile)); #endif if (LocaleCompare(name,"iptc") == 0) { size_t length; StringInfo *iptc_profile; iptc_profile=CloneStringInfo(profile); length=GetStringInfoLength(profile)+4-(GetStringInfoLength(profile) & 0x03); SetStringInfoLength(iptc_profile,length); if (TIFFIsByteSwapped(tiff)) TIFFSwabArrayOfLong((uint32 *) GetStringInfoDatum(iptc_profile), (unsigned long) (length/4)); (void) TIFFSetField(tiff,TIFFTAG_RICHTIFFIPTC,(uint32) GetStringInfoLength(iptc_profile)/4,GetStringInfoDatum(iptc_profile)); iptc_profile=DestroyStringInfo(iptc_profile); } #if defined(TIFFTAG_PHOTOSHOP) if (LocaleCompare(name,"8bim") == 0) (void) TIFFSetField(tiff,TIFFTAG_PHOTOSHOP,(uint32) GetStringInfoLength(profile),GetStringInfoDatum(profile)); #endif if (LocaleCompare(name,"tiff:37724") == 0) (void) TIFFSetField(tiff,37724,(uint32) GetStringInfoLength(profile), GetStringInfoDatum(profile)); if (LocaleCompare(name,"tiff:34118") == 0) (void) TIFFSetField(tiff,34118,(uint32) GetStringInfoLength(profile), GetStringInfoDatum(profile)); name=GetNextImageProfile(image); } } static void TIFFSetProperties(TIFF *tiff,const MagickBooleanType adjoin, Image *image,ExceptionInfo *exception) { const char *value; value=GetImageArtifact(image,"tiff:document"); if (value != (const char *) NULL) (void) TIFFSetField(tiff,TIFFTAG_DOCUMENTNAME,value); value=GetImageArtifact(image,"tiff:hostcomputer"); if (value != (const char *) NULL) (void) TIFFSetField(tiff,TIFFTAG_HOSTCOMPUTER,value); value=GetImageArtifact(image,"tiff:artist"); if (value != (const char *) NULL) (void) TIFFSetField(tiff,TIFFTAG_ARTIST,value); value=GetImageArtifact(image,"tiff:timestamp"); if (value != (const char *) NULL) (void) TIFFSetField(tiff,TIFFTAG_DATETIME,value); value=GetImageArtifact(image,"tiff:make"); if (value != (const char *) NULL) (void) TIFFSetField(tiff,TIFFTAG_MAKE,value); value=GetImageArtifact(image,"tiff:model"); if (value != (const char *) NULL) (void) TIFFSetField(tiff,TIFFTAG_MODEL,value); value=GetImageArtifact(image,"tiff:software"); if (value != (const char *) NULL) (void) TIFFSetField(tiff,TIFFTAG_SOFTWARE,value); value=GetImageArtifact(image,"tiff:copyright"); if (value != (const char *) NULL) (void) TIFFSetField(tiff,TIFFTAG_COPYRIGHT,value); value=GetImageArtifact(image,"kodak-33423"); if (value != (const char *) NULL) (void) TIFFSetField(tiff,33423,value); value=GetImageArtifact(image,"kodak-36867"); if (value != (const char *) NULL) (void) TIFFSetField(tiff,36867,value); value=GetImageProperty(image,"label",exception); if (value != (const char *) NULL) (void) TIFFSetField(tiff,TIFFTAG_PAGENAME,value); value=GetImageProperty(image,"comment",exception); if (value != (const char *) NULL) (void) TIFFSetField(tiff,TIFFTAG_IMAGEDESCRIPTION,value); value=GetImageArtifact(image,"tiff:subfiletype"); if (value != (const char *) NULL) { if (LocaleCompare(value,"REDUCEDIMAGE") == 0) (void) TIFFSetField(tiff,TIFFTAG_SUBFILETYPE,FILETYPE_REDUCEDIMAGE); else if (LocaleCompare(value,"PAGE") == 0) (void) TIFFSetField(tiff,TIFFTAG_SUBFILETYPE,FILETYPE_PAGE); else if (LocaleCompare(value,"MASK") == 0) (void) TIFFSetField(tiff,TIFFTAG_SUBFILETYPE,FILETYPE_MASK); } else { uint16 page, pages; page=(uint16) image->scene; pages=(uint16) GetImageListLength(image); if ((adjoin != MagickFalse) && (pages > 1)) (void) TIFFSetField(tiff,TIFFTAG_SUBFILETYPE,FILETYPE_PAGE); (void) TIFFSetField(tiff,TIFFTAG_PAGENUMBER,page,pages); } } static MagickBooleanType WriteTIFFImage(const ImageInfo *image_info, Image *image,ExceptionInfo *exception) { const char *mode, *option; CompressionType compression; EndianType endian_type; MagickBooleanType adjoin, preserve_compression, status; MagickOffsetType scene; QuantumInfo *quantum_info; QuantumType quantum_type; ssize_t i; size_t imageListLength, length; ssize_t y; TIFF *tiff; TIFFInfo tiff_info; uint16 bits_per_sample, compress_tag, endian, photometric, predictor; unsigned char *pixels; void *sans[2] = { NULL, NULL }; /* Open TIFF file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception); if (status == MagickFalse) return(status); (void) SetMagickThreadValue(tiff_exception,exception); endian_type=(HOST_FILLORDER == FILLORDER_LSB2MSB) ? LSBEndian : MSBEndian; option=GetImageOption(image_info,"tiff:endian"); if (option != (const char *) NULL) { if (LocaleNCompare(option,"msb",3) == 0) endian_type=MSBEndian; if (LocaleNCompare(option,"lsb",3) == 0) endian_type=LSBEndian; } mode=endian_type == LSBEndian ? "wl" : "wb"; #if defined(TIFF_VERSION_BIG) if (LocaleCompare(image_info->magick,"TIFF64") == 0) mode=endian_type == LSBEndian ? "wl8" : "wb8"; #endif tiff=TIFFClientOpen(image->filename,mode,(thandle_t) image,TIFFReadBlob, TIFFWriteBlob,TIFFSeekBlob,TIFFCloseBlob,TIFFGetBlobSize,TIFFMapBlob, TIFFUnmapBlob); if (tiff == (TIFF *) NULL) return(MagickFalse); if (exception->severity > ErrorException) { TIFFClose(tiff); return(MagickFalse); } (void) DeleteImageProfile(image,"tiff:37724"); scene=0; adjoin=image_info->adjoin; imageListLength=GetImageListLength(image); option=GetImageOption(image_info,"tiff:preserve-compression"); preserve_compression=IsStringTrue(option); do { /* Initialize TIFF fields. */ if ((image_info->type != UndefinedType) && (image_info->type != OptimizeType) && (image_info->type != image->type)) (void) SetImageType(image,image_info->type,exception); compression=image_info->compression; if (preserve_compression != MagickFalse) compression=image->compression; switch (compression) { case FaxCompression: case Group4Compression: { if (IsImageMonochrome(image) == MagickFalse) { if (IsImageGray(image) == MagickFalse) (void) SetImageType(image,BilevelType,exception); else (void) SetImageDepth(image,1,exception); } image->depth=1; break; } case JPEGCompression: { (void) SetImageStorageClass(image,DirectClass,exception); (void) SetImageDepth(image,8,exception); break; } default: break; } quantum_info=AcquireQuantumInfo(image_info,image); if (quantum_info == (QuantumInfo *) NULL) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); if ((image->storage_class != PseudoClass) && (image->depth >= 32) && (quantum_info->format == UndefinedQuantumFormat) && (IsHighDynamicRangeImage(image,exception) != MagickFalse)) { status=SetQuantumFormat(image,quantum_info,FloatingPointQuantumFormat); if (status == MagickFalse) { quantum_info=DestroyQuantumInfo(quantum_info); ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); } } if ((LocaleCompare(image_info->magick,"PTIF") == 0) && (GetPreviousImageInList(image) != (Image *) NULL)) (void) TIFFSetField(tiff,TIFFTAG_SUBFILETYPE,FILETYPE_REDUCEDIMAGE); if ((image->columns != (uint32) image->columns) || (image->rows != (uint32) image->rows)) ThrowWriterException(ImageError,"WidthOrHeightExceedsLimit"); (void) TIFFSetField(tiff,TIFFTAG_IMAGELENGTH,(uint32) image->rows); (void) TIFFSetField(tiff,TIFFTAG_IMAGEWIDTH,(uint32) image->columns); switch (compression) { case FaxCompression: { compress_tag=COMPRESSION_CCITTFAX3; option=GetImageOption(image_info,"quantum:polarity"); if (option == (const char *) NULL) SetQuantumMinIsWhite(quantum_info,MagickTrue); break; } case Group4Compression: { compress_tag=COMPRESSION_CCITTFAX4; option=GetImageOption(image_info,"quantum:polarity"); if (option == (const char *) NULL) SetQuantumMinIsWhite(quantum_info,MagickTrue); break; } #if defined(COMPRESSION_JBIG) case JBIG1Compression: { compress_tag=COMPRESSION_JBIG; break; } #endif case JPEGCompression: { compress_tag=COMPRESSION_JPEG; break; } #if defined(COMPRESSION_LZMA) case LZMACompression: { compress_tag=COMPRESSION_LZMA; break; } #endif case LZWCompression: { compress_tag=COMPRESSION_LZW; break; } case RLECompression: { compress_tag=COMPRESSION_PACKBITS; break; } case ZipCompression: { compress_tag=COMPRESSION_ADOBE_DEFLATE; break; } #if defined(COMPRESSION_ZSTD) case ZstdCompression: { compress_tag=COMPRESSION_ZSTD; break; } #endif case NoCompression: default: { compress_tag=COMPRESSION_NONE; break; } } #if defined(MAGICKCORE_HAVE_TIFFISCODECCONFIGURED) || (TIFFLIB_VERSION > 20040919) if ((compress_tag != COMPRESSION_NONE) && (TIFFIsCODECConfigured(compress_tag) == 0)) { (void) ThrowMagickException(exception,GetMagickModule(),CoderError, "CompressionNotSupported","`%s'",CommandOptionToMnemonic( MagickCompressOptions,(ssize_t) compression)); compress_tag=COMPRESSION_NONE; compression=NoCompression; } #else switch (compress_tag) { #if defined(CCITT_SUPPORT) case COMPRESSION_CCITTFAX3: case COMPRESSION_CCITTFAX4: #endif #if defined(YCBCR_SUPPORT) && defined(JPEG_SUPPORT) case COMPRESSION_JPEG: #endif #if defined(LZMA_SUPPORT) && defined(COMPRESSION_LZMA) case COMPRESSION_LZMA: #endif #if defined(LZW_SUPPORT) case COMPRESSION_LZW: #endif #if defined(PACKBITS_SUPPORT) case COMPRESSION_PACKBITS: #endif #if defined(ZIP_SUPPORT) case COMPRESSION_ADOBE_DEFLATE: #endif case COMPRESSION_NONE: break; default: { (void) ThrowMagickException(exception,GetMagickModule(),CoderError, "CompressionNotSupported","`%s'",CommandOptionToMnemonic( MagickCompressOptions,(ssize_t) compression)); compress_tag=COMPRESSION_NONE; compression=NoCompression; break; } } #endif if (image->colorspace == CMYKColorspace) { photometric=PHOTOMETRIC_SEPARATED; (void) TIFFSetField(tiff,TIFFTAG_SAMPLESPERPIXEL,4); (void) TIFFSetField(tiff,TIFFTAG_INKSET,INKSET_CMYK); } else { /* Full color TIFF raster. */ if (image->colorspace == LabColorspace) { photometric=PHOTOMETRIC_CIELAB; EncodeLabImage(image,exception); } else if (IsYCbCrCompatibleColorspace(image->colorspace) != MagickFalse) { photometric=PHOTOMETRIC_YCBCR; (void) TIFFSetField(tiff,TIFFTAG_YCBCRSUBSAMPLING,1,1); (void) SetImageStorageClass(image,DirectClass,exception); status=SetQuantumDepth(image,quantum_info,8); if (status == MagickFalse) ThrowWriterException(ResourceLimitError, "MemoryAllocationFailed"); } else photometric=PHOTOMETRIC_RGB; (void) TIFFSetField(tiff,TIFFTAG_SAMPLESPERPIXEL,3); if ((image_info->type != TrueColorType) && (image_info->type != TrueColorAlphaType)) { if ((image_info->type != PaletteType) && (SetImageGray(image,exception) != MagickFalse)) { photometric=(uint16) (quantum_info->min_is_white != MagickFalse ? PHOTOMETRIC_MINISWHITE : PHOTOMETRIC_MINISBLACK); (void) TIFFSetField(tiff,TIFFTAG_SAMPLESPERPIXEL,1); if ((image->depth == 1) && (image->alpha_trait == UndefinedPixelTrait)) SetImageMonochrome(image,exception); } else if ((image->storage_class == PseudoClass) && (image->alpha_trait == UndefinedPixelTrait)) { size_t depth; /* Colormapped TIFF raster. */ (void) TIFFSetField(tiff,TIFFTAG_SAMPLESPERPIXEL,1); photometric=PHOTOMETRIC_PALETTE; depth=1; while ((GetQuantumRange(depth)+1) < image->colors) depth<<=1; status=SetQuantumDepth(image,quantum_info,depth); if (status == MagickFalse) ThrowWriterException(ResourceLimitError, "MemoryAllocationFailed"); } } } (void) TIFFGetFieldDefaulted(tiff,TIFFTAG_FILLORDER,&endian,sans); if ((compress_tag == COMPRESSION_CCITTFAX3) || (compress_tag == COMPRESSION_CCITTFAX4)) { if ((photometric != PHOTOMETRIC_MINISWHITE) && (photometric != PHOTOMETRIC_MINISBLACK)) { compress_tag=COMPRESSION_NONE; endian=FILLORDER_MSB2LSB; } } option=GetImageOption(image_info,"tiff:fill-order"); if (option != (const char *) NULL) { if (LocaleNCompare(option,"msb",3) == 0) endian=FILLORDER_MSB2LSB; if (LocaleNCompare(option,"lsb",3) == 0) endian=FILLORDER_LSB2MSB; } (void) TIFFSetField(tiff,TIFFTAG_COMPRESSION,compress_tag); (void) TIFFSetField(tiff,TIFFTAG_FILLORDER,endian); (void) TIFFSetField(tiff,TIFFTAG_BITSPERSAMPLE,quantum_info->depth); if (image->alpha_trait != UndefinedPixelTrait) { uint16 extra_samples, sample_info[1], samples_per_pixel; /* TIFF has a matte channel. */ extra_samples=1; sample_info[0]=EXTRASAMPLE_UNASSALPHA; option=GetImageOption(image_info,"tiff:alpha"); if (option != (const char *) NULL) { if (LocaleCompare(option,"associated") == 0) sample_info[0]=EXTRASAMPLE_ASSOCALPHA; else if (LocaleCompare(option,"unspecified") == 0) sample_info[0]=EXTRASAMPLE_UNSPECIFIED; } (void) TIFFGetFieldDefaulted(tiff,TIFFTAG_SAMPLESPERPIXEL, &samples_per_pixel,sans); (void) TIFFSetField(tiff,TIFFTAG_SAMPLESPERPIXEL,samples_per_pixel+1); (void) TIFFSetField(tiff,TIFFTAG_EXTRASAMPLES,extra_samples, &sample_info); if (sample_info[0] == EXTRASAMPLE_ASSOCALPHA) SetQuantumAlphaType(quantum_info,AssociatedQuantumAlpha); } (void) TIFFSetField(tiff,TIFFTAG_PHOTOMETRIC,photometric); switch (quantum_info->format) { case FloatingPointQuantumFormat: { (void) TIFFSetField(tiff,TIFFTAG_SAMPLEFORMAT,SAMPLEFORMAT_IEEEFP); (void) TIFFSetField(tiff,TIFFTAG_SMINSAMPLEVALUE,quantum_info->minimum); (void) TIFFSetField(tiff,TIFFTAG_SMAXSAMPLEVALUE,quantum_info->maximum); break; } case SignedQuantumFormat: { (void) TIFFSetField(tiff,TIFFTAG_SAMPLEFORMAT,SAMPLEFORMAT_INT); break; } case UnsignedQuantumFormat: { (void) TIFFSetField(tiff,TIFFTAG_SAMPLEFORMAT,SAMPLEFORMAT_UINT); break; } default: break; } (void) TIFFSetField(tiff,TIFFTAG_PLANARCONFIG,PLANARCONFIG_CONTIG); if (photometric == PHOTOMETRIC_RGB) if ((image_info->interlace == PlaneInterlace) || (image_info->interlace == PartitionInterlace)) (void) TIFFSetField(tiff,TIFFTAG_PLANARCONFIG,PLANARCONFIG_SEPARATE); predictor=0; switch (compress_tag) { case COMPRESSION_JPEG: { #if defined(JPEG_SUPPORT) if (image_info->quality != UndefinedCompressionQuality) (void) TIFFSetField(tiff,TIFFTAG_JPEGQUALITY,image_info->quality); (void) TIFFSetField(tiff,TIFFTAG_JPEGCOLORMODE,JPEGCOLORMODE_RAW); if (IssRGBCompatibleColorspace(image->colorspace) != MagickFalse) { const char *value; (void) TIFFSetField(tiff,TIFFTAG_JPEGCOLORMODE,JPEGCOLORMODE_RGB); if (IsYCbCrCompatibleColorspace(image->colorspace) != MagickFalse) { const char *sampling_factor; GeometryInfo geometry_info; MagickStatusType flags; sampling_factor=(const char *) NULL; value=GetImageProperty(image,"jpeg:sampling-factor",exception); if (value != (char *) NULL) { sampling_factor=value; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Input sampling-factors=%s",sampling_factor); } if (image_info->sampling_factor != (char *) NULL) sampling_factor=image_info->sampling_factor; if (sampling_factor != (const char *) NULL) { flags=ParseGeometry(sampling_factor,&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=geometry_info.rho; (void) TIFFSetField(tiff,TIFFTAG_YCBCRSUBSAMPLING,(uint16) geometry_info.rho,(uint16) geometry_info.sigma); } } } (void) TIFFGetFieldDefaulted(tiff,TIFFTAG_BITSPERSAMPLE, &bits_per_sample,sans); if (bits_per_sample == 12) (void) TIFFSetField(tiff,TIFFTAG_JPEGTABLESMODE,JPEGTABLESMODE_QUANT); #endif break; } case COMPRESSION_ADOBE_DEFLATE: { (void) TIFFGetFieldDefaulted(tiff,TIFFTAG_BITSPERSAMPLE, &bits_per_sample,sans); if (((photometric == PHOTOMETRIC_RGB) || (photometric == PHOTOMETRIC_SEPARATED) || (photometric == PHOTOMETRIC_MINISBLACK)) && ((bits_per_sample == 8) || (bits_per_sample == 16))) predictor=PREDICTOR_HORIZONTAL; (void) TIFFSetField(tiff,TIFFTAG_ZIPQUALITY,(long) ( image_info->quality == UndefinedCompressionQuality ? 7 : MagickMin((ssize_t) image_info->quality/10,9))); break; } case COMPRESSION_CCITTFAX3: { /* Byte-aligned EOL. */ (void) TIFFSetField(tiff,TIFFTAG_GROUP3OPTIONS,4); break; } case COMPRESSION_CCITTFAX4: break; #if defined(LZMA_SUPPORT) && defined(COMPRESSION_LZMA) case COMPRESSION_LZMA: { if (((photometric == PHOTOMETRIC_RGB) || (photometric == PHOTOMETRIC_SEPARATED) || (photometric == PHOTOMETRIC_MINISBLACK)) && ((bits_per_sample == 8) || (bits_per_sample == 16))) predictor=PREDICTOR_HORIZONTAL; (void) TIFFSetField(tiff,TIFFTAG_LZMAPRESET,(long) ( image_info->quality == UndefinedCompressionQuality ? 7 : MagickMin((ssize_t) image_info->quality/10,9))); break; } #endif case COMPRESSION_LZW: { (void) TIFFGetFieldDefaulted(tiff,TIFFTAG_BITSPERSAMPLE, &bits_per_sample,sans); if (((photometric == PHOTOMETRIC_RGB) || (photometric == PHOTOMETRIC_SEPARATED) || (photometric == PHOTOMETRIC_MINISBLACK)) && ((bits_per_sample == 8) || (bits_per_sample == 16))) predictor=PREDICTOR_HORIZONTAL; break; } #if defined(WEBP_SUPPORT) && defined(COMPRESSION_WEBP) case COMPRESSION_WEBP: { (void) TIFFGetFieldDefaulted(tiff,TIFFTAG_BITSPERSAMPLE, &bits_per_sample,sans); if (((photometric == PHOTOMETRIC_RGB) || (photometric == PHOTOMETRIC_SEPARATED) || (photometric == PHOTOMETRIC_MINISBLACK)) && ((bits_per_sample == 8) || (bits_per_sample == 16))) predictor=PREDICTOR_HORIZONTAL; (void) TIFFSetField(tiff,TIFFTAG_WEBP_LEVEL,image_info->quality); if (image_info->quality >= 100) (void) TIFFSetField(tiff,TIFFTAG_WEBP_LOSSLESS,1); break; } #endif #if defined(ZSTD_SUPPORT) && defined(COMPRESSION_ZSTD) case COMPRESSION_ZSTD: { (void) TIFFGetFieldDefaulted(tiff,TIFFTAG_BITSPERSAMPLE, &bits_per_sample,sans); if (((photometric == PHOTOMETRIC_RGB) || (photometric == PHOTOMETRIC_SEPARATED) || (photometric == PHOTOMETRIC_MINISBLACK)) && ((bits_per_sample == 8) || (bits_per_sample == 16))) predictor=PREDICTOR_HORIZONTAL; (void) TIFFSetField(tiff,TIFFTAG_ZSTD_LEVEL,22*image_info->quality/ 100.0); break; } #endif default: break; } if (quantum_info->format == FloatingPointQuantumFormat) predictor=PREDICTOR_FLOATINGPOINT; option=GetImageOption(image_info,"tiff:predictor"); if (option != (const char * ) NULL) predictor=(uint16) strtol(option,(char **) NULL,10); if (predictor != 0) (void) TIFFSetField(tiff,TIFFTAG_PREDICTOR,predictor); if ((image->resolution.x != 0.0) && (image->resolution.y != 0.0)) { unsigned short units; /* Set image resolution. */ units=RESUNIT_NONE; if (image->units == PixelsPerInchResolution) units=RESUNIT_INCH; if (image->units == PixelsPerCentimeterResolution) units=RESUNIT_CENTIMETER; (void) TIFFSetField(tiff,TIFFTAG_RESOLUTIONUNIT,(uint16) units); (void) TIFFSetField(tiff,TIFFTAG_XRESOLUTION,image->resolution.x); (void) TIFFSetField(tiff,TIFFTAG_YRESOLUTION,image->resolution.y); if ((image->page.x < 0) || (image->page.y < 0)) (void) ThrowMagickException(exception,GetMagickModule(),CoderError, "TIFF: negative image positions unsupported","%s",image->filename); if ((image->page.x > 0) && (image->resolution.x > 0.0)) { /* Set horizontal image position. */ (void) TIFFSetField(tiff,TIFFTAG_XPOSITION,(float) image->page.x/ image->resolution.x); } if ((image->page.y > 0) && (image->resolution.y > 0.0)) { /* Set vertical image position. */ (void) TIFFSetField(tiff,TIFFTAG_YPOSITION,(float) image->page.y/ image->resolution.y); } } if (image->chromaticity.white_point.x != 0.0) { float chromaticity[6]; /* Set image chromaticity. */ chromaticity[0]=(float) image->chromaticity.red_primary.x; chromaticity[1]=(float) image->chromaticity.red_primary.y; chromaticity[2]=(float) image->chromaticity.green_primary.x; chromaticity[3]=(float) image->chromaticity.green_primary.y; chromaticity[4]=(float) image->chromaticity.blue_primary.x; chromaticity[5]=(float) image->chromaticity.blue_primary.y; (void) TIFFSetField(tiff,TIFFTAG_PRIMARYCHROMATICITIES,chromaticity); chromaticity[0]=(float) image->chromaticity.white_point.x; chromaticity[1]=(float) image->chromaticity.white_point.y; (void) TIFFSetField(tiff,TIFFTAG_WHITEPOINT,chromaticity); } option=GetImageOption(image_info,"tiff:write-layers"); if (IsStringTrue(option) != MagickFalse) { (void) TIFFWritePhotoshopLayers(image,image_info,endian_type,exception); adjoin=MagickFalse; } if ((LocaleCompare(image_info->magick,"PTIF") != 0) && (adjoin != MagickFalse) && (imageListLength > 1)) { (void) TIFFSetField(tiff,TIFFTAG_SUBFILETYPE,FILETYPE_PAGE); if (image->scene != 0) (void) TIFFSetField(tiff,TIFFTAG_PAGENUMBER,(uint16) image->scene, imageListLength); } if (image->orientation != UndefinedOrientation) (void) TIFFSetField(tiff,TIFFTAG_ORIENTATION,(uint16) image->orientation); else (void) TIFFSetField(tiff,TIFFTAG_ORIENTATION,ORIENTATION_TOPLEFT); TIFFSetProfiles(tiff,image); { uint16 page, pages; page=(uint16) scene; pages=(uint16) imageListLength; if ((LocaleCompare(image_info->magick,"PTIF") != 0) && (adjoin != MagickFalse) && (pages > 1)) (void) TIFFSetField(tiff,TIFFTAG_SUBFILETYPE,FILETYPE_PAGE); (void) TIFFSetField(tiff,TIFFTAG_PAGENUMBER,page,pages); } (void) TIFFSetProperties(tiff,adjoin,image,exception); /* Write image scanlines. */ if (GetTIFFInfo(image_info,tiff,&tiff_info) == MagickFalse) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); if (compress_tag == COMPRESSION_CCITTFAX4) (void) TIFFSetField(tiff,TIFFTAG_ROWSPERSTRIP,(uint32) image->rows); quantum_info->endian=LSBEndian; pixels=(unsigned char *) GetQuantumPixels(quantum_info); tiff_info.scanline=(unsigned char *) GetQuantumPixels(quantum_info); switch (photometric) { case PHOTOMETRIC_CIELAB: case PHOTOMETRIC_YCBCR: case PHOTOMETRIC_RGB: { /* RGB TIFF image. */ switch (image_info->interlace) { case NoInterlace: default: { quantum_type=RGBQuantum; if (image->alpha_trait != UndefinedPixelTrait) quantum_type=RGBAQuantum; for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; length=ExportQuantumPixels(image,(CacheView *) NULL,quantum_info, quantum_type,pixels,exception); (void) length; if (TIFFWritePixels(tiff,&tiff_info,y,0,image) == -1) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,image->rows); if (status == MagickFalse) break; } } break; } case PlaneInterlace: case PartitionInterlace: { /* Plane interlacing: RRRRRR...GGGGGG...BBBBBB... */ for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; length=ExportQuantumPixels(image,(CacheView *) NULL,quantum_info, RedQuantum,pixels,exception); if (TIFFWritePixels(tiff,&tiff_info,y,0,image) == -1) break; } if (image->previous == (Image *) NULL) { status=SetImageProgress(image,SaveImageTag,100,400); if (status == MagickFalse) break; } for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; length=ExportQuantumPixels(image,(CacheView *) NULL,quantum_info, GreenQuantum,pixels,exception); if (TIFFWritePixels(tiff,&tiff_info,y,1,image) == -1) break; } if (image->previous == (Image *) NULL) { status=SetImageProgress(image,SaveImageTag,200,400); if (status == MagickFalse) break; } for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; length=ExportQuantumPixels(image,(CacheView *) NULL,quantum_info, BlueQuantum,pixels,exception); if (TIFFWritePixels(tiff,&tiff_info,y,2,image) == -1) break; } if (image->previous == (Image *) NULL) { status=SetImageProgress(image,SaveImageTag,300,400); if (status == MagickFalse) break; } if (image->alpha_trait != UndefinedPixelTrait) for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; length=ExportQuantumPixels(image,(CacheView *) NULL, quantum_info,AlphaQuantum,pixels,exception); if (TIFFWritePixels(tiff,&tiff_info,y,3,image) == -1) break; } if (image->previous == (Image *) NULL) { status=SetImageProgress(image,SaveImageTag,400,400); if (status == MagickFalse) break; } break; } } break; } case PHOTOMETRIC_SEPARATED: { /* CMYK TIFF image. */ quantum_type=CMYKQuantum; if (image->alpha_trait != UndefinedPixelTrait) quantum_type=CMYKAQuantum; if (image->colorspace != CMYKColorspace) (void) TransformImageColorspace(image,CMYKColorspace,exception); for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; length=ExportQuantumPixels(image,(CacheView *) NULL,quantum_info, quantum_type,pixels,exception); if (TIFFWritePixels(tiff,&tiff_info,y,0,image) == -1) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } break; } case PHOTOMETRIC_PALETTE: { uint16 *blue, *green, *red; /* Colormapped TIFF image. */ red=(uint16 *) AcquireQuantumMemory(65536,sizeof(*red)); green=(uint16 *) AcquireQuantumMemory(65536,sizeof(*green)); blue=(uint16 *) AcquireQuantumMemory(65536,sizeof(*blue)); if ((red == (uint16 *) NULL) || (green == (uint16 *) NULL) || (blue == (uint16 *) NULL)) { if (red != (uint16 *) NULL) red=(uint16 *) RelinquishMagickMemory(red); if (green != (uint16 *) NULL) green=(uint16 *) RelinquishMagickMemory(green); if (blue != (uint16 *) NULL) blue=(uint16 *) RelinquishMagickMemory(blue); ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); } /* Initialize TIFF colormap. */ (void) memset(red,0,65536*sizeof(*red)); (void) memset(green,0,65536*sizeof(*green)); (void) memset(blue,0,65536*sizeof(*blue)); for (i=0; i < (ssize_t) image->colors; i++) { red[i]=ScaleQuantumToShort(image->colormap[i].red); green[i]=ScaleQuantumToShort(image->colormap[i].green); blue[i]=ScaleQuantumToShort(image->colormap[i].blue); } (void) TIFFSetField(tiff,TIFFTAG_COLORMAP,red,green,blue); red=(uint16 *) RelinquishMagickMemory(red); green=(uint16 *) RelinquishMagickMemory(green); blue=(uint16 *) RelinquishMagickMemory(blue); } default: { /* Convert PseudoClass packets to contiguous grayscale scanlines. */ quantum_type=IndexQuantum; if (image->alpha_trait != UndefinedPixelTrait) { if (photometric != PHOTOMETRIC_PALETTE) quantum_type=GrayAlphaQuantum; else quantum_type=IndexAlphaQuantum; } else if (photometric != PHOTOMETRIC_PALETTE) quantum_type=GrayQuantum; for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; length=ExportQuantumPixels(image,(CacheView *) NULL,quantum_info, quantum_type,pixels,exception); if (TIFFWritePixels(tiff,&tiff_info,y,0,image) == -1) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } break; } } quantum_info=DestroyQuantumInfo(quantum_info); if (image->colorspace == LabColorspace) DecodeLabImage(image,exception); DestroyTIFFInfo(&tiff_info); /* TIFFPrintDirectory(tiff,stdout,MagickFalse); */ if (TIFFWriteDirectory(tiff) == 0) { status=MagickFalse; break; } image=SyncNextImageInList(image); if (image == (Image *) NULL) break; status=SetImageProgress(image,SaveImagesTag,scene++,imageListLength); if (status == MagickFalse) break; } while (adjoin != MagickFalse); TIFFClose(tiff); return(status); } #endif
static Image *ReadTIFFImage(const ImageInfo *image_info, ExceptionInfo *exception) { #define ThrowTIFFException(severity,message) \ { \ if (pixel_info != (MemoryInfo *) NULL) \ pixel_info=RelinquishVirtualMemory(pixel_info); \ if (quantum_info != (QuantumInfo *) NULL) \ quantum_info=DestroyQuantumInfo(quantum_info); \ TIFFClose(tiff); \ ThrowReaderException(severity,message); \ } const char *option; float *chromaticity, x_position, y_position, x_resolution, y_resolution; Image *image; int tiff_status; MagickBooleanType more_frames; MagickSizeType number_pixels; MagickStatusType status; MemoryInfo *pixel_info = (MemoryInfo *) NULL; QuantumInfo *quantum_info; QuantumType quantum_type; ssize_t i, scanline_size, y; TIFF *tiff; TIFFMethodType method; uint16 compress_tag, bits_per_sample, endian, extra_samples, interlace, max_sample_value, min_sample_value, orientation, pages, photometric, *sample_info, sample_format, samples_per_pixel, units, value; uint32 height, rows_per_strip, width; unsigned char *pixels; void *sans[4] = { NULL, NULL, NULL, NULL }; /* Open image. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=AcquireImage(image_info,exception); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } (void) SetMagickThreadValue(tiff_exception,exception); tiff=TIFFClientOpen(image->filename,"rb",(thandle_t) image,TIFFReadBlob, TIFFWriteBlob,TIFFSeekBlob,TIFFCloseBlob,TIFFGetBlobSize,TIFFMapBlob, TIFFUnmapBlob); if (tiff == (TIFF *) NULL) { image=DestroyImageList(image); return((Image *) NULL); } if (exception->severity > ErrorException) { TIFFClose(tiff); image=DestroyImageList(image); return((Image *) NULL); } if (image_info->number_scenes != 0) { /* Generate blank images for subimage specification (e.g. image.tif[4]. We need to check the number of directores because it is possible that the subimage(s) are stored in the photoshop profile. */ if (image_info->scene < (size_t) TIFFNumberOfDirectories(tiff)) { for (i=0; i < (ssize_t) image_info->scene; i++) { status=TIFFReadDirectory(tiff) != 0 ? MagickTrue : MagickFalse; if (status == MagickFalse) { TIFFClose(tiff); image=DestroyImageList(image); return((Image *) NULL); } AcquireNextImage(image_info,image,exception); if (GetNextImageInList(image) == (Image *) NULL) { TIFFClose(tiff); image=DestroyImageList(image); return((Image *) NULL); } image=SyncNextImageInList(image); } } } more_frames=MagickTrue; do { /* TIFFPrintDirectory(tiff,stdout,MagickFalse); */ photometric=PHOTOMETRIC_RGB; if ((TIFFGetField(tiff,TIFFTAG_IMAGEWIDTH,&width) != 1) || (TIFFGetField(tiff,TIFFTAG_IMAGELENGTH,&height) != 1) || (TIFFGetFieldDefaulted(tiff,TIFFTAG_PHOTOMETRIC,&photometric,sans) != 1) || (TIFFGetFieldDefaulted(tiff,TIFFTAG_COMPRESSION,&compress_tag,sans) != 1) || (TIFFGetFieldDefaulted(tiff,TIFFTAG_FILLORDER,&endian,sans) != 1) || (TIFFGetFieldDefaulted(tiff,TIFFTAG_PLANARCONFIG,&interlace,sans) != 1) || (TIFFGetFieldDefaulted(tiff,TIFFTAG_SAMPLESPERPIXEL,&samples_per_pixel,sans) != 1) || (TIFFGetFieldDefaulted(tiff,TIFFTAG_BITSPERSAMPLE,&bits_per_sample,sans) != 1) || (TIFFGetFieldDefaulted(tiff,TIFFTAG_SAMPLEFORMAT,&sample_format,sans) != 1) || (TIFFGetFieldDefaulted(tiff,TIFFTAG_MINSAMPLEVALUE,&min_sample_value,sans) != 1) || (TIFFGetFieldDefaulted(tiff,TIFFTAG_MAXSAMPLEVALUE,&max_sample_value,sans) != 1)) { TIFFClose(tiff); ThrowReaderException(CorruptImageError,"ImproperImageHeader"); } if (((sample_format != SAMPLEFORMAT_IEEEFP) || (bits_per_sample != 64)) && ((bits_per_sample <= 0) || (bits_per_sample > 32))) { TIFFClose(tiff); ThrowReaderException(CorruptImageError,"UnsupportedBitsPerPixel"); } if (samples_per_pixel > MaxPixelChannels) { TIFFClose(tiff); ThrowReaderException(CorruptImageError,"MaximumChannelsExceeded"); } if (sample_format == SAMPLEFORMAT_IEEEFP) (void) SetImageProperty(image,"quantum:format","floating-point", exception); switch (photometric) { case PHOTOMETRIC_MINISBLACK: { (void) SetImageProperty(image,"tiff:photometric","min-is-black", exception); break; } case PHOTOMETRIC_MINISWHITE: { (void) SetImageProperty(image,"tiff:photometric","min-is-white", exception); break; } case PHOTOMETRIC_PALETTE: { (void) SetImageProperty(image,"tiff:photometric","palette",exception); break; } case PHOTOMETRIC_RGB: { (void) SetImageProperty(image,"tiff:photometric","RGB",exception); break; } case PHOTOMETRIC_CIELAB: { (void) SetImageProperty(image,"tiff:photometric","CIELAB",exception); break; } case PHOTOMETRIC_LOGL: { (void) SetImageProperty(image,"tiff:photometric","CIE Log2(L)", exception); break; } case PHOTOMETRIC_LOGLUV: { (void) SetImageProperty(image,"tiff:photometric","LOGLUV",exception); break; } #if defined(PHOTOMETRIC_MASK) case PHOTOMETRIC_MASK: { (void) SetImageProperty(image,"tiff:photometric","MASK",exception); break; } #endif case PHOTOMETRIC_SEPARATED: { (void) SetImageProperty(image,"tiff:photometric","separated",exception); break; } case PHOTOMETRIC_YCBCR: { (void) SetImageProperty(image,"tiff:photometric","YCBCR",exception); break; } default: { (void) SetImageProperty(image,"tiff:photometric","unknown",exception); break; } } if (image->debug != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(),"Geometry: %ux%u", (unsigned int) width,(unsigned int) height); (void) LogMagickEvent(CoderEvent,GetMagickModule(),"Interlace: %u", interlace); (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Bits per sample: %u",bits_per_sample); (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Min sample value: %u",min_sample_value); (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Max sample value: %u",max_sample_value); (void) LogMagickEvent(CoderEvent,GetMagickModule(),"Photometric " "interpretation: %s",GetImageProperty(image,"tiff:photometric", exception)); } image->columns=(size_t) width; image->rows=(size_t) height; image->depth=(size_t) bits_per_sample; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(),"Image depth: %.20g", (double) image->depth); image->endian=MSBEndian; if (endian == FILLORDER_LSB2MSB) image->endian=LSBEndian; #if defined(MAGICKCORE_HAVE_TIFFISBIGENDIAN) if (TIFFIsBigEndian(tiff) == 0) { (void) SetImageProperty(image,"tiff:endian","lsb",exception); image->endian=LSBEndian; } else { (void) SetImageProperty(image,"tiff:endian","msb",exception); image->endian=MSBEndian; } #endif if ((photometric == PHOTOMETRIC_MINISBLACK) || (photometric == PHOTOMETRIC_MINISWHITE)) image->colorspace=GRAYColorspace; if (photometric == PHOTOMETRIC_SEPARATED) image->colorspace=CMYKColorspace; if (photometric == PHOTOMETRIC_CIELAB) image->colorspace=LabColorspace; if ((photometric == PHOTOMETRIC_YCBCR) && (compress_tag != COMPRESSION_JPEG)) image->colorspace=YCbCrColorspace; status=TIFFGetProfiles(tiff,image,exception); if (status == MagickFalse) { TIFFClose(tiff); return(DestroyImageList(image)); } status=TIFFGetProperties(tiff,image,exception); if (status == MagickFalse) { TIFFClose(tiff); return(DestroyImageList(image)); } option=GetImageOption(image_info,"tiff:exif-properties"); if (IsStringFalse(option) == MagickFalse) /* enabled by default */ (void) TIFFGetEXIFProperties(tiff,image,exception); option=GetImageOption(image_info,"tiff:gps-properties"); if (IsStringFalse(option) == MagickFalse) /* enabled by default */ (void) TIFFGetGPSProperties(tiff,image,exception); if ((TIFFGetFieldDefaulted(tiff,TIFFTAG_XRESOLUTION,&x_resolution,sans) == 1) && (TIFFGetFieldDefaulted(tiff,TIFFTAG_YRESOLUTION,&y_resolution,sans) == 1)) { image->resolution.x=x_resolution; image->resolution.y=y_resolution; } if (TIFFGetFieldDefaulted(tiff,TIFFTAG_RESOLUTIONUNIT,&units,sans,sans) == 1) { if (units == RESUNIT_INCH) image->units=PixelsPerInchResolution; if (units == RESUNIT_CENTIMETER) image->units=PixelsPerCentimeterResolution; } if ((TIFFGetFieldDefaulted(tiff,TIFFTAG_XPOSITION,&x_position,sans) == 1) && (TIFFGetFieldDefaulted(tiff,TIFFTAG_YPOSITION,&y_position,sans) == 1)) { image->page.x=CastDoubleToLong(ceil(x_position* image->resolution.x-0.5)); image->page.y=CastDoubleToLong(ceil(y_position* image->resolution.y-0.5)); } if (TIFFGetFieldDefaulted(tiff,TIFFTAG_ORIENTATION,&orientation,sans) == 1) image->orientation=(OrientationType) orientation; if (TIFFGetField(tiff,TIFFTAG_WHITEPOINT,&chromaticity) == 1) { if ((chromaticity != (float *) NULL) && (*chromaticity != 0.0)) { image->chromaticity.white_point.x=chromaticity[0]; image->chromaticity.white_point.y=chromaticity[1]; } } if (TIFFGetField(tiff,TIFFTAG_PRIMARYCHROMATICITIES,&chromaticity) == 1) { if ((chromaticity != (float *) NULL) && (*chromaticity != 0.0)) { image->chromaticity.red_primary.x=chromaticity[0]; image->chromaticity.red_primary.y=chromaticity[1]; image->chromaticity.green_primary.x=chromaticity[2]; image->chromaticity.green_primary.y=chromaticity[3]; image->chromaticity.blue_primary.x=chromaticity[4]; image->chromaticity.blue_primary.y=chromaticity[5]; } } #if defined(MAGICKCORE_HAVE_TIFFISCODECCONFIGURED) || (TIFFLIB_VERSION > 20040919) if ((compress_tag != COMPRESSION_NONE) && (TIFFIsCODECConfigured(compress_tag) == 0)) { TIFFClose(tiff); ThrowReaderException(CoderError,"CompressNotSupported"); } #endif switch (compress_tag) { case COMPRESSION_NONE: image->compression=NoCompression; break; case COMPRESSION_CCITTFAX3: image->compression=FaxCompression; break; case COMPRESSION_CCITTFAX4: image->compression=Group4Compression; break; case COMPRESSION_JPEG: { image->compression=JPEGCompression; #if defined(JPEG_SUPPORT) { char sampling_factor[MagickPathExtent]; uint16 horizontal, vertical; tiff_status=TIFFGetField(tiff,TIFFTAG_YCBCRSUBSAMPLING,&horizontal, &vertical); if (tiff_status == 1) { (void) FormatLocaleString(sampling_factor,MagickPathExtent, "%dx%d",horizontal,vertical); (void) SetImageProperty(image,"jpeg:sampling-factor", sampling_factor,exception); (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Sampling Factors: %s",sampling_factor); } } #endif break; } case COMPRESSION_OJPEG: image->compression=JPEGCompression; break; #if defined(COMPRESSION_LZMA) case COMPRESSION_LZMA: image->compression=LZMACompression; break; #endif case COMPRESSION_LZW: image->compression=LZWCompression; break; case COMPRESSION_DEFLATE: image->compression=ZipCompression; break; case COMPRESSION_ADOBE_DEFLATE: image->compression=ZipCompression; break; #if defined(COMPRESSION_WEBP) case COMPRESSION_WEBP: image->compression=WebPCompression; break; #endif #if defined(COMPRESSION_ZSTD) case COMPRESSION_ZSTD: image->compression=ZstdCompression; break; #endif default: image->compression=RLECompression; break; } quantum_info=(QuantumInfo *) NULL; if ((photometric == PHOTOMETRIC_PALETTE) && (pow(2.0,1.0*bits_per_sample) <= MaxColormapSize)) { size_t colors; colors=(size_t) GetQuantumRange(bits_per_sample)+1; if (AcquireImageColormap(image,colors,exception) == MagickFalse) { TIFFClose(tiff); ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); } } value=(unsigned short) image->scene; if (TIFFGetFieldDefaulted(tiff,TIFFTAG_PAGENUMBER,&value,&pages,sans) == 1) image->scene=value; if (image->storage_class == PseudoClass) { size_t range; uint16 *blue_colormap, *green_colormap, *red_colormap; /* Initialize colormap. */ tiff_status=TIFFGetField(tiff,TIFFTAG_COLORMAP,&red_colormap, &green_colormap,&blue_colormap); if (tiff_status == 1) { if ((red_colormap != (uint16 *) NULL) && (green_colormap != (uint16 *) NULL) && (blue_colormap != (uint16 *) NULL)) { range=255; /* might be old style 8-bit colormap */ for (i=0; i < (ssize_t) image->colors; i++) if ((red_colormap[i] >= 256) || (green_colormap[i] >= 256) || (blue_colormap[i] >= 256)) { range=65535; break; } for (i=0; i < (ssize_t) image->colors; i++) { image->colormap[i].red=ClampToQuantum(((double) QuantumRange*red_colormap[i])/range); image->colormap[i].green=ClampToQuantum(((double) QuantumRange*green_colormap[i])/range); image->colormap[i].blue=ClampToQuantum(((double) QuantumRange*blue_colormap[i])/range); } } } } if (image_info->ping != MagickFalse) { if (image_info->number_scenes != 0) if (image->scene >= (image_info->scene+image_info->number_scenes-1)) break; goto next_tiff_frame; } status=SetImageExtent(image,image->columns,image->rows,exception); if (status == MagickFalse) { TIFFClose(tiff); return(DestroyImageList(image)); } status=SetImageColorspace(image,image->colorspace,exception); status&=ResetImagePixels(image,exception); if (status == MagickFalse) { TIFFClose(tiff); return(DestroyImageList(image)); } /* Allocate memory for the image and pixel buffer. */ quantum_info=AcquireQuantumInfo(image_info,image); if (quantum_info == (QuantumInfo *) NULL) ThrowTIFFException(ResourceLimitError,"MemoryAllocationFailed"); if (sample_format == SAMPLEFORMAT_UINT) status=SetQuantumFormat(image,quantum_info,UnsignedQuantumFormat); if (sample_format == SAMPLEFORMAT_INT) status=SetQuantumFormat(image,quantum_info,SignedQuantumFormat); if (sample_format == SAMPLEFORMAT_IEEEFP) status=SetQuantumFormat(image,quantum_info,FloatingPointQuantumFormat); if (status == MagickFalse) ThrowTIFFException(ResourceLimitError,"MemoryAllocationFailed"); status=MagickTrue; switch (photometric) { case PHOTOMETRIC_MINISBLACK: { quantum_info->min_is_white=MagickFalse; break; } case PHOTOMETRIC_MINISWHITE: { quantum_info->min_is_white=MagickTrue; break; } default: break; } extra_samples=0; tiff_status=TIFFGetFieldDefaulted(tiff,TIFFTAG_EXTRASAMPLES,&extra_samples, &sample_info,sans); if (tiff_status == 1) { (void) SetImageProperty(image,"tiff:alpha","unspecified",exception); if (extra_samples == 0) { if ((samples_per_pixel == 4) && (photometric == PHOTOMETRIC_RGB)) image->alpha_trait=BlendPixelTrait; } else for (i=0; i < extra_samples; i++) { image->alpha_trait=BlendPixelTrait; if (sample_info[i] == EXTRASAMPLE_ASSOCALPHA) { SetQuantumAlphaType(quantum_info,AssociatedQuantumAlpha); (void) SetImageProperty(image,"tiff:alpha","associated", exception); } else if (sample_info[i] == EXTRASAMPLE_UNASSALPHA) { SetQuantumAlphaType(quantum_info,DisassociatedQuantumAlpha); (void) SetImageProperty(image,"tiff:alpha","unassociated", exception); } } } if (image->alpha_trait != UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); method=ReadGenericMethod; rows_per_strip=(uint32) image->rows; if (TIFFGetField(tiff,TIFFTAG_ROWSPERSTRIP,&rows_per_strip) == 1) { char buffer[MagickPathExtent]; (void) FormatLocaleString(buffer,MagickPathExtent,"%u", (unsigned int) rows_per_strip); (void) SetImageProperty(image,"tiff:rows-per-strip",buffer,exception); method=ReadStripMethod; if (rows_per_strip > (uint32) image->rows) rows_per_strip=(uint32) image->rows; } if (TIFFIsTiled(tiff) != MagickFalse) { uint32 columns, rows; if ((TIFFGetField(tiff,TIFFTAG_TILEWIDTH,&columns) != 1) || (TIFFGetField(tiff,TIFFTAG_TILELENGTH,&rows) != 1)) ThrowTIFFException(CoderError,"ImageIsNotTiled"); if ((AcquireMagickResource(WidthResource,columns) == MagickFalse) || (AcquireMagickResource(HeightResource,rows) == MagickFalse)) ThrowTIFFException(ImageError,"WidthOrHeightExceedsLimit"); method=ReadTileMethod; } if ((photometric == PHOTOMETRIC_LOGLUV) || (compress_tag == COMPRESSION_CCITTFAX3)) method=ReadGenericMethod; if (image->compression == JPEGCompression) method=GetJPEGMethod(image,tiff,photometric,bits_per_sample, samples_per_pixel); quantum_info->endian=LSBEndian; scanline_size=TIFFScanlineSize(tiff); if (scanline_size <= 0) ThrowTIFFException(ResourceLimitError,"MemoryAllocationFailed"); number_pixels=MagickMax((MagickSizeType) image->columns*samples_per_pixel* pow(2.0,ceil(log(bits_per_sample)/log(2.0))),image->columns* rows_per_strip); if ((double) scanline_size > 1.5*number_pixels) ThrowTIFFException(CorruptImageError,"CorruptImage"); number_pixels=MagickMax((MagickSizeType) scanline_size,number_pixels); pixel_info=AcquireVirtualMemory(number_pixels,sizeof(uint32)); if (pixel_info == (MemoryInfo *) NULL) ThrowTIFFException(ResourceLimitError,"MemoryAllocationFailed"); pixels=(unsigned char *) GetVirtualMemoryBlob(pixel_info); (void) memset(pixels,0,number_pixels*sizeof(uint32)); quantum_type=GrayQuantum; if (image->storage_class == PseudoClass) quantum_type=IndexQuantum; if (interlace != PLANARCONFIG_SEPARATE) { size_t pad; pad=(size_t) MagickMax((ssize_t) samples_per_pixel-1,0); if (image->alpha_trait != UndefinedPixelTrait) { if (image->storage_class == PseudoClass) quantum_type=IndexAlphaQuantum; else quantum_type=samples_per_pixel == 1 ? AlphaQuantum : GrayAlphaQuantum; } if ((samples_per_pixel > 2) && (interlace != PLANARCONFIG_SEPARATE)) { quantum_type=RGBQuantum; pad=(size_t) MagickMax((size_t) samples_per_pixel-3,0); if (image->alpha_trait != UndefinedPixelTrait) { quantum_type=RGBAQuantum; pad=(size_t) MagickMax((size_t) samples_per_pixel-4,0); } if (image->colorspace == CMYKColorspace) { quantum_type=CMYKQuantum; pad=(size_t) MagickMax((size_t) samples_per_pixel-4,0); if (image->alpha_trait != UndefinedPixelTrait) { quantum_type=CMYKAQuantum; pad=(size_t) MagickMax((size_t) samples_per_pixel-5,0); } } status=SetQuantumPad(image,quantum_info,pad*((bits_per_sample+7) >> 3)); if (status == MagickFalse) ThrowTIFFException(ResourceLimitError,"MemoryAllocationFailed"); } } switch (method) { case ReadYCCKMethod: { /* Convert YCC TIFF image. */ for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; unsigned char *p; tiff_status=TIFFReadPixels(tiff,0,y,(char *) pixels); if (tiff_status == -1) break; q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) break; p=pixels; for (x=0; x < (ssize_t) image->columns; x++) { SetPixelCyan(image,ScaleCharToQuantum(ClampYCC((double) *p+ (1.402*(double) *(p+2))-179.456)),q); SetPixelMagenta(image,ScaleCharToQuantum(ClampYCC((double) *p- (0.34414*(double) *(p+1))-(0.71414*(double ) *(p+2))+ 135.45984)),q); SetPixelYellow(image,ScaleCharToQuantum(ClampYCC((double) *p+ (1.772*(double) *(p+1))-226.816)),q); SetPixelBlack(image,ScaleCharToQuantum((unsigned char) *(p+3)),q); q+=GetPixelChannels(image); p+=4; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } break; } case ReadStripMethod: { unsigned char *p; size_t extent; ssize_t stride, strip_id; tsize_t strip_size; unsigned char *strip_pixels; /* Convert stripped TIFF image. */ extent=2*TIFFStripSize(tiff); #if defined(TIFF_VERSION_BIG) extent+=image->columns*sizeof(uint64); #else extent+=image->columns*sizeof(uint32); #endif strip_pixels=(unsigned char *) AcquireQuantumMemory(extent, sizeof(*strip_pixels)); if (strip_pixels == (unsigned char *) NULL) ThrowTIFFException(ResourceLimitError,"MemoryAllocationFailed"); (void) memset(strip_pixels,0,extent*sizeof(*strip_pixels)); stride=TIFFVStripSize(tiff,1); strip_id=0; p=strip_pixels; for (i=0; i < (ssize_t) samples_per_pixel; i++) { size_t rows_remaining; switch (i) { case 0: break; case 1: quantum_type=GreenQuantum; break; case 2: quantum_type=BlueQuantum; break; case 3: { quantum_type=AlphaQuantum; if (image->colorspace == CMYKColorspace) quantum_type=BlackQuantum; break; } case 4: quantum_type=AlphaQuantum; break; default: break; } rows_remaining=0; for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; q=GetAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) break; if (rows_remaining == 0) { strip_size=TIFFReadEncodedStrip(tiff,strip_id,strip_pixels, TIFFStripSize(tiff)); if (strip_size == -1) break; rows_remaining=rows_per_strip; if ((y+rows_per_strip) > (ssize_t) image->rows) rows_remaining=(rows_per_strip-(y+rows_per_strip- image->rows)); p=strip_pixels; strip_id++; } (void) ImportQuantumPixels(image,(CacheView *) NULL, quantum_info,quantum_type,p,exception); p+=stride; rows_remaining--; if (SyncAuthenticPixels(image,exception) == MagickFalse) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } if ((samples_per_pixel > 1) && (interlace != PLANARCONFIG_SEPARATE)) break; } strip_pixels=(unsigned char *) RelinquishMagickMemory(strip_pixels); break; } case ReadTileMethod: { unsigned char *p; size_t extent; uint32 columns, rows; unsigned char *tile_pixels; /* Convert tiled TIFF image. */ if ((TIFFGetField(tiff,TIFFTAG_TILEWIDTH,&columns) != 1) || (TIFFGetField(tiff,TIFFTAG_TILELENGTH,&rows) != 1)) ThrowTIFFException(CoderError,"ImageIsNotTiled"); number_pixels=(MagickSizeType) columns*rows; if (HeapOverflowSanityCheck(rows,sizeof(*tile_pixels)) != MagickFalse) ThrowTIFFException(ResourceLimitError,"MemoryAllocationFailed"); extent=TIFFTileSize(tiff); #if defined(TIFF_VERSION_BIG) extent+=columns*sizeof(uint64); #else extent+=columns*sizeof(uint32); #endif tile_pixels=(unsigned char *) AcquireQuantumMemory(extent, sizeof(*tile_pixels)); if (tile_pixels == (unsigned char *) NULL) ThrowTIFFException(ResourceLimitError,"MemoryAllocationFailed"); (void) memset(tile_pixels,0,extent*sizeof(*tile_pixels)); for (i=0; i < (ssize_t) samples_per_pixel; i++) { switch (i) { case 0: break; case 1: quantum_type=GreenQuantum; break; case 2: quantum_type=BlueQuantum; break; case 3: { quantum_type=AlphaQuantum; if (image->colorspace == CMYKColorspace) quantum_type=BlackQuantum; break; } case 4: quantum_type=AlphaQuantum; break; default: break; } for (y=0; y < (ssize_t) image->rows; y+=rows) { ssize_t x; size_t rows_remaining; rows_remaining=image->rows-y; if ((ssize_t) (y+rows) < (ssize_t) image->rows) rows_remaining=rows; for (x=0; x < (ssize_t) image->columns; x+=columns) { size_t columns_remaining, row; columns_remaining=image->columns-x; if ((ssize_t) (x+columns) < (ssize_t) image->columns) columns_remaining=columns; if (TIFFReadTile(tiff,tile_pixels,(uint32) x,(uint32) y,0,i) == 0) break; p=tile_pixels; for (row=0; row < rows_remaining; row++) { Quantum *magick_restrict q; q=GetAuthenticPixels(image,x,y+row,columns_remaining,1, exception); if (q == (Quantum *) NULL) break; (void) ImportQuantumPixels(image,(CacheView *) NULL, quantum_info,quantum_type,p,exception); p+=TIFFTileRowSize(tiff); if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } } } if ((samples_per_pixel > 1) && (interlace != PLANARCONFIG_SEPARATE)) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) i, samples_per_pixel); if (status == MagickFalse) break; } } tile_pixels=(unsigned char *) RelinquishMagickMemory(tile_pixels); break; } case ReadGenericMethod: default: { MemoryInfo *generic_info = (MemoryInfo * ) NULL; uint32 *p; uint32 *pixels; /* Convert generic TIFF image. */ if (HeapOverflowSanityCheck(image->rows,sizeof(*pixels)) != MagickFalse) ThrowTIFFException(ResourceLimitError,"MemoryAllocationFailed"); number_pixels=(MagickSizeType) image->columns*image->rows; #if defined(TIFF_VERSION_BIG) number_pixels+=image->columns*sizeof(uint64); #else number_pixels+=image->columns*sizeof(uint32); #endif generic_info=AcquireVirtualMemory(number_pixels,sizeof(uint32)); if (generic_info == (MemoryInfo *) NULL) ThrowTIFFException(ResourceLimitError,"MemoryAllocationFailed"); pixels=(uint32 *) GetVirtualMemoryBlob(generic_info); (void) TIFFReadRGBAImage(tiff,(uint32) image->columns,(uint32) image->rows,(uint32 *) pixels,0); p=pixels+(image->columns*image->rows)-1; for (y=0; y < (ssize_t) image->rows; y++) { ssize_t x; Quantum *magick_restrict q; q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) break; q+=GetPixelChannels(image)*(image->columns-1); for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(image,ScaleCharToQuantum((unsigned char) TIFFGetR(*p)),q); SetPixelGreen(image,ScaleCharToQuantum((unsigned char) TIFFGetG(*p)),q); SetPixelBlue(image,ScaleCharToQuantum((unsigned char) TIFFGetB(*p)),q); if (image->alpha_trait != UndefinedPixelTrait) SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) TIFFGetA(*p)),q); p--; q-=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } generic_info=RelinquishVirtualMemory(generic_info); break; } } pixel_info=RelinquishVirtualMemory(pixel_info); SetQuantumImageType(image,quantum_type); next_tiff_frame: if (quantum_info != (QuantumInfo *) NULL) quantum_info=DestroyQuantumInfo(quantum_info); if (photometric == PHOTOMETRIC_CIELAB) DecodeLabImage(image,exception); if ((photometric == PHOTOMETRIC_LOGL) || (photometric == PHOTOMETRIC_MINISBLACK) || (photometric == PHOTOMETRIC_MINISWHITE)) { image->type=GrayscaleType; if (bits_per_sample == 1) image->type=BilevelType; } /* Proceed to next image. */ if (image_info->number_scenes != 0) if (image->scene >= (image_info->scene+image_info->number_scenes-1)) break; more_frames=TIFFReadDirectory(tiff) != 0 ? MagickTrue : MagickFalse; if (more_frames != MagickFalse) { /* Allocate next image structure. */ AcquireNextImage(image_info,image,exception); if (GetNextImageInList(image) == (Image *) NULL) { status=MagickFalse; break; } image=SyncNextImageInList(image); status=SetImageProgress(image,LoadImagesTag,image->scene-1, image->scene); if (status == MagickFalse) break; } } while ((status != MagickFalse) && (more_frames != MagickFalse)); TIFFClose(tiff); if (status != MagickFalse) TIFFReadPhotoshopLayers(image_info,image,exception); if ((image_info->number_scenes != 0) && (image_info->scene >= GetImageListLength(image))) status=MagickFalse; if (status == MagickFalse) return(DestroyImageList(image)); return(GetFirstImageInList(image)); }
static Image *ReadTIFFImage(const ImageInfo *image_info, ExceptionInfo *exception) { #define ThrowTIFFException(severity,message) \ { \ if (pixel_info != (MemoryInfo *) NULL) \ pixel_info=RelinquishVirtualMemory(pixel_info); \ if (quantum_info != (QuantumInfo *) NULL) \ quantum_info=DestroyQuantumInfo(quantum_info); \ TIFFClose(tiff); \ ThrowReaderException(severity,message); \ } const char *option; float *chromaticity, x_position, y_position, x_resolution, y_resolution; Image *image; int tiff_status; MagickBooleanType more_frames; MagickSizeType number_pixels; MagickStatusType status; MemoryInfo *pixel_info = (MemoryInfo *) NULL; QuantumInfo *quantum_info; QuantumType quantum_type; ssize_t i, scanline_size, y; TIFF *tiff; TIFFMethodType method; uint16 compress_tag, bits_per_sample, endian, extra_samples, interlace, max_sample_value, min_sample_value, orientation, pages, photometric, *sample_info, sample_format, samples_per_pixel, units, value; uint32 height, rows_per_strip, width; unsigned char *pixels; void *sans[4] = { NULL, NULL, NULL, NULL }; /* Open image. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=AcquireImage(image_info,exception); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } (void) SetMagickThreadValue(tiff_exception,exception); tiff=TIFFClientOpen(image->filename,"rb",(thandle_t) image,TIFFReadBlob, TIFFWriteBlob,TIFFSeekBlob,TIFFCloseBlob,TIFFGetBlobSize,TIFFMapBlob, TIFFUnmapBlob); if (tiff == (TIFF *) NULL) { image=DestroyImageList(image); return((Image *) NULL); } if (exception->severity > ErrorException) { TIFFClose(tiff); image=DestroyImageList(image); return((Image *) NULL); } if (image_info->number_scenes != 0) { /* Generate blank images for subimage specification (e.g. image.tif[4]. We need to check the number of directores because it is possible that the subimage(s) are stored in the photoshop profile. */ if (image_info->scene < (size_t) TIFFNumberOfDirectories(tiff)) { for (i=0; i < (ssize_t) image_info->scene; i++) { status=TIFFReadDirectory(tiff) != 0 ? MagickTrue : MagickFalse; if (status == MagickFalse) { TIFFClose(tiff); image=DestroyImageList(image); return((Image *) NULL); } AcquireNextImage(image_info,image,exception); if (GetNextImageInList(image) == (Image *) NULL) { TIFFClose(tiff); image=DestroyImageList(image); return((Image *) NULL); } image=SyncNextImageInList(image); } } } more_frames=MagickTrue; do { /* TIFFPrintDirectory(tiff,stdout,MagickFalse); */ photometric=PHOTOMETRIC_RGB; if ((TIFFGetField(tiff,TIFFTAG_IMAGEWIDTH,&width) != 1) || (TIFFGetField(tiff,TIFFTAG_IMAGELENGTH,&height) != 1) || (TIFFGetFieldDefaulted(tiff,TIFFTAG_PHOTOMETRIC,&photometric,sans) != 1) || (TIFFGetFieldDefaulted(tiff,TIFFTAG_COMPRESSION,&compress_tag,sans) != 1) || (TIFFGetFieldDefaulted(tiff,TIFFTAG_FILLORDER,&endian,sans) != 1) || (TIFFGetFieldDefaulted(tiff,TIFFTAG_PLANARCONFIG,&interlace,sans) != 1) || (TIFFGetFieldDefaulted(tiff,TIFFTAG_SAMPLESPERPIXEL,&samples_per_pixel,sans) != 1) || (TIFFGetFieldDefaulted(tiff,TIFFTAG_BITSPERSAMPLE,&bits_per_sample,sans) != 1) || (TIFFGetFieldDefaulted(tiff,TIFFTAG_SAMPLEFORMAT,&sample_format,sans) != 1) || (TIFFGetFieldDefaulted(tiff,TIFFTAG_MINSAMPLEVALUE,&min_sample_value,sans) != 1) || (TIFFGetFieldDefaulted(tiff,TIFFTAG_MAXSAMPLEVALUE,&max_sample_value,sans) != 1)) { TIFFClose(tiff); ThrowReaderException(CorruptImageError,"ImproperImageHeader"); } if (((sample_format != SAMPLEFORMAT_IEEEFP) || (bits_per_sample != 64)) && ((bits_per_sample <= 0) || (bits_per_sample > 32))) { TIFFClose(tiff); ThrowReaderException(CorruptImageError,"UnsupportedBitsPerPixel"); } if (samples_per_pixel > MaxPixelChannels) { TIFFClose(tiff); ThrowReaderException(CorruptImageError,"MaximumChannelsExceeded"); } if (sample_format == SAMPLEFORMAT_IEEEFP) (void) SetImageProperty(image,"quantum:format","floating-point", exception); switch (photometric) { case PHOTOMETRIC_MINISBLACK: { (void) SetImageProperty(image,"tiff:photometric","min-is-black", exception); break; } case PHOTOMETRIC_MINISWHITE: { (void) SetImageProperty(image,"tiff:photometric","min-is-white", exception); break; } case PHOTOMETRIC_PALETTE: { (void) SetImageProperty(image,"tiff:photometric","palette",exception); break; } case PHOTOMETRIC_RGB: { (void) SetImageProperty(image,"tiff:photometric","RGB",exception); break; } case PHOTOMETRIC_CIELAB: { (void) SetImageProperty(image,"tiff:photometric","CIELAB",exception); break; } case PHOTOMETRIC_LOGL: { (void) SetImageProperty(image,"tiff:photometric","CIE Log2(L)", exception); break; } case PHOTOMETRIC_LOGLUV: { (void) SetImageProperty(image,"tiff:photometric","LOGLUV",exception); break; } #if defined(PHOTOMETRIC_MASK) case PHOTOMETRIC_MASK: { (void) SetImageProperty(image,"tiff:photometric","MASK",exception); break; } #endif case PHOTOMETRIC_SEPARATED: { (void) SetImageProperty(image,"tiff:photometric","separated",exception); break; } case PHOTOMETRIC_YCBCR: { (void) SetImageProperty(image,"tiff:photometric","YCBCR",exception); break; } default: { (void) SetImageProperty(image,"tiff:photometric","unknown",exception); break; } } if (image->debug != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(),"Geometry: %ux%u", (unsigned int) width,(unsigned int) height); (void) LogMagickEvent(CoderEvent,GetMagickModule(),"Interlace: %u", interlace); (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Bits per sample: %u",bits_per_sample); (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Min sample value: %u",min_sample_value); (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Max sample value: %u",max_sample_value); (void) LogMagickEvent(CoderEvent,GetMagickModule(),"Photometric " "interpretation: %s",GetImageProperty(image,"tiff:photometric", exception)); } image->columns=(size_t) width; image->rows=(size_t) height; image->depth=(size_t) bits_per_sample; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(),"Image depth: %.20g", (double) image->depth); image->endian=MSBEndian; if (endian == FILLORDER_LSB2MSB) image->endian=LSBEndian; #if defined(MAGICKCORE_HAVE_TIFFISBIGENDIAN) if (TIFFIsBigEndian(tiff) == 0) { (void) SetImageProperty(image,"tiff:endian","lsb",exception); image->endian=LSBEndian; } else { (void) SetImageProperty(image,"tiff:endian","msb",exception); image->endian=MSBEndian; } #endif if ((photometric == PHOTOMETRIC_MINISBLACK) || (photometric == PHOTOMETRIC_MINISWHITE)) image->colorspace=GRAYColorspace; if (photometric == PHOTOMETRIC_SEPARATED) image->colorspace=CMYKColorspace; if (photometric == PHOTOMETRIC_CIELAB) image->colorspace=LabColorspace; if ((photometric == PHOTOMETRIC_YCBCR) && (compress_tag != COMPRESSION_JPEG)) image->colorspace=YCbCrColorspace; status=TIFFGetProfiles(tiff,image,exception); if (status == MagickFalse) { TIFFClose(tiff); return(DestroyImageList(image)); } status=TIFFGetProperties(tiff,image,exception); if (status == MagickFalse) { TIFFClose(tiff); return(DestroyImageList(image)); } option=GetImageOption(image_info,"tiff:exif-properties"); if (IsStringFalse(option) == MagickFalse) /* enabled by default */ (void) TIFFGetEXIFProperties(tiff,image,exception); option=GetImageOption(image_info,"tiff:gps-properties"); if (IsStringFalse(option) == MagickFalse) /* enabled by default */ (void) TIFFGetGPSProperties(tiff,image,exception); if ((TIFFGetFieldDefaulted(tiff,TIFFTAG_XRESOLUTION,&x_resolution,sans) == 1) && (TIFFGetFieldDefaulted(tiff,TIFFTAG_YRESOLUTION,&y_resolution,sans) == 1)) { image->resolution.x=x_resolution; image->resolution.y=y_resolution; } if (TIFFGetFieldDefaulted(tiff,TIFFTAG_RESOLUTIONUNIT,&units,sans,sans) == 1) { if (units == RESUNIT_INCH) image->units=PixelsPerInchResolution; if (units == RESUNIT_CENTIMETER) image->units=PixelsPerCentimeterResolution; } if ((TIFFGetFieldDefaulted(tiff,TIFFTAG_XPOSITION,&x_position,sans) == 1) && (TIFFGetFieldDefaulted(tiff,TIFFTAG_YPOSITION,&y_position,sans) == 1)) { image->page.x=CastDoubleToLong(ceil(x_position* image->resolution.x-0.5)); image->page.y=CastDoubleToLong(ceil(y_position* image->resolution.y-0.5)); } if (TIFFGetFieldDefaulted(tiff,TIFFTAG_ORIENTATION,&orientation,sans) == 1) image->orientation=(OrientationType) orientation; if (TIFFGetField(tiff,TIFFTAG_WHITEPOINT,&chromaticity) == 1) { if ((chromaticity != (float *) NULL) && (*chromaticity != 0.0)) { image->chromaticity.white_point.x=chromaticity[0]; image->chromaticity.white_point.y=chromaticity[1]; } } if (TIFFGetField(tiff,TIFFTAG_PRIMARYCHROMATICITIES,&chromaticity) == 1) { if ((chromaticity != (float *) NULL) && (*chromaticity != 0.0)) { image->chromaticity.red_primary.x=chromaticity[0]; image->chromaticity.red_primary.y=chromaticity[1]; image->chromaticity.green_primary.x=chromaticity[2]; image->chromaticity.green_primary.y=chromaticity[3]; image->chromaticity.blue_primary.x=chromaticity[4]; image->chromaticity.blue_primary.y=chromaticity[5]; } } #if defined(MAGICKCORE_HAVE_TIFFISCODECCONFIGURED) || (TIFFLIB_VERSION > 20040919) if ((compress_tag != COMPRESSION_NONE) && (TIFFIsCODECConfigured(compress_tag) == 0)) { TIFFClose(tiff); ThrowReaderException(CoderError,"CompressNotSupported"); } #endif switch (compress_tag) { case COMPRESSION_NONE: image->compression=NoCompression; break; case COMPRESSION_CCITTFAX3: image->compression=FaxCompression; break; case COMPRESSION_CCITTFAX4: image->compression=Group4Compression; break; case COMPRESSION_JPEG: { image->compression=JPEGCompression; #if defined(JPEG_SUPPORT) { char sampling_factor[MagickPathExtent]; uint16 horizontal, vertical; tiff_status=TIFFGetField(tiff,TIFFTAG_YCBCRSUBSAMPLING,&horizontal, &vertical); if (tiff_status == 1) { (void) FormatLocaleString(sampling_factor,MagickPathExtent, "%dx%d",horizontal,vertical); (void) SetImageProperty(image,"jpeg:sampling-factor", sampling_factor,exception); (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Sampling Factors: %s",sampling_factor); } } #endif break; } case COMPRESSION_OJPEG: image->compression=JPEGCompression; break; #if defined(COMPRESSION_LZMA) case COMPRESSION_LZMA: image->compression=LZMACompression; break; #endif case COMPRESSION_LZW: image->compression=LZWCompression; break; case COMPRESSION_DEFLATE: image->compression=ZipCompression; break; case COMPRESSION_ADOBE_DEFLATE: image->compression=ZipCompression; break; #if defined(COMPRESSION_WEBP) case COMPRESSION_WEBP: image->compression=WebPCompression; break; #endif #if defined(COMPRESSION_ZSTD) case COMPRESSION_ZSTD: image->compression=ZstdCompression; break; #endif default: image->compression=RLECompression; break; } quantum_info=(QuantumInfo *) NULL; if ((photometric == PHOTOMETRIC_PALETTE) && (pow(2.0,1.0*bits_per_sample) <= MaxColormapSize)) { size_t colors; colors=(size_t) GetQuantumRange(bits_per_sample)+1; if (AcquireImageColormap(image,colors,exception) == MagickFalse) { TIFFClose(tiff); ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); } } value=(unsigned short) image->scene; if (TIFFGetFieldDefaulted(tiff,TIFFTAG_PAGENUMBER,&value,&pages,sans) == 1) image->scene=value; if (image->storage_class == PseudoClass) { size_t range; uint16 *blue_colormap, *green_colormap, *red_colormap; /* Initialize colormap. */ tiff_status=TIFFGetField(tiff,TIFFTAG_COLORMAP,&red_colormap, &green_colormap,&blue_colormap); if (tiff_status == 1) { if ((red_colormap != (uint16 *) NULL) && (green_colormap != (uint16 *) NULL) && (blue_colormap != (uint16 *) NULL)) { range=255; /* might be old style 8-bit colormap */ for (i=0; i < (ssize_t) image->colors; i++) if ((red_colormap[i] >= 256) || (green_colormap[i] >= 256) || (blue_colormap[i] >= 256)) { range=65535; break; } for (i=0; i < (ssize_t) image->colors; i++) { image->colormap[i].red=ClampToQuantum(((double) QuantumRange*red_colormap[i])/range); image->colormap[i].green=ClampToQuantum(((double) QuantumRange*green_colormap[i])/range); image->colormap[i].blue=ClampToQuantum(((double) QuantumRange*blue_colormap[i])/range); } } } } if (image_info->ping != MagickFalse) { if (image_info->number_scenes != 0) if (image->scene >= (image_info->scene+image_info->number_scenes-1)) break; goto next_tiff_frame; } status=SetImageExtent(image,image->columns,image->rows,exception); if (status == MagickFalse) { TIFFClose(tiff); return(DestroyImageList(image)); } status=SetImageColorspace(image,image->colorspace,exception); status&=ResetImagePixels(image,exception); if (status == MagickFalse) { TIFFClose(tiff); return(DestroyImageList(image)); } /* Allocate memory for the image and pixel buffer. */ quantum_info=AcquireQuantumInfo(image_info,image); if (quantum_info == (QuantumInfo *) NULL) ThrowTIFFException(ResourceLimitError,"MemoryAllocationFailed"); if (sample_format == SAMPLEFORMAT_UINT) status=SetQuantumFormat(image,quantum_info,UnsignedQuantumFormat); if (sample_format == SAMPLEFORMAT_INT) status=SetQuantumFormat(image,quantum_info,SignedQuantumFormat); if (sample_format == SAMPLEFORMAT_IEEEFP) status=SetQuantumFormat(image,quantum_info,FloatingPointQuantumFormat); if (status == MagickFalse) ThrowTIFFException(ResourceLimitError,"MemoryAllocationFailed"); status=MagickTrue; switch (photometric) { case PHOTOMETRIC_MINISBLACK: { quantum_info->min_is_white=MagickFalse; break; } case PHOTOMETRIC_MINISWHITE: { quantum_info->min_is_white=MagickTrue; break; } default: break; } extra_samples=0; tiff_status=TIFFGetFieldDefaulted(tiff,TIFFTAG_EXTRASAMPLES,&extra_samples, &sample_info,sans); if (tiff_status == 1) { (void) SetImageProperty(image,"tiff:alpha","unspecified",exception); if (extra_samples == 0) { if ((samples_per_pixel == 4) && (photometric == PHOTOMETRIC_RGB)) image->alpha_trait=BlendPixelTrait; } else for (i=0; i < extra_samples; i++) { image->alpha_trait=BlendPixelTrait; if (sample_info[i] == EXTRASAMPLE_ASSOCALPHA) { SetQuantumAlphaType(quantum_info,AssociatedQuantumAlpha); (void) SetImageProperty(image,"tiff:alpha","associated", exception); } else if (sample_info[i] == EXTRASAMPLE_UNASSALPHA) { SetQuantumAlphaType(quantum_info,DisassociatedQuantumAlpha); (void) SetImageProperty(image,"tiff:alpha","unassociated", exception); } } } if (image->alpha_trait != UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); method=ReadGenericMethod; rows_per_strip=(uint32) image->rows; if (TIFFGetField(tiff,TIFFTAG_ROWSPERSTRIP,&rows_per_strip) == 1) { char buffer[MagickPathExtent]; (void) FormatLocaleString(buffer,MagickPathExtent,"%u", (unsigned int) rows_per_strip); (void) SetImageProperty(image,"tiff:rows-per-strip",buffer,exception); method=ReadStripMethod; if (rows_per_strip > (uint32) image->rows) rows_per_strip=(uint32) image->rows; } if (TIFFIsTiled(tiff) != MagickFalse) { uint32 columns, rows; if ((TIFFGetField(tiff,TIFFTAG_TILEWIDTH,&columns) != 1) || (TIFFGetField(tiff,TIFFTAG_TILELENGTH,&rows) != 1)) ThrowTIFFException(CoderError,"ImageIsNotTiled"); if ((AcquireMagickResource(WidthResource,columns) == MagickFalse) || (AcquireMagickResource(HeightResource,rows) == MagickFalse)) ThrowTIFFException(ImageError,"WidthOrHeightExceedsLimit"); method=ReadTileMethod; } if ((photometric == PHOTOMETRIC_LOGLUV) || (compress_tag == COMPRESSION_CCITTFAX3)) method=ReadGenericMethod; if (image->compression == JPEGCompression) method=GetJPEGMethod(image,tiff,photometric,bits_per_sample, samples_per_pixel); quantum_info->endian=LSBEndian; scanline_size=TIFFScanlineSize(tiff); if (scanline_size <= 0) ThrowTIFFException(ResourceLimitError,"MemoryAllocationFailed"); number_pixels=MagickMax((MagickSizeType) image->columns*samples_per_pixel* pow(2.0,ceil(log(bits_per_sample)/log(2.0))),image->columns* rows_per_strip); if ((double) scanline_size > 1.5*number_pixels) ThrowTIFFException(CorruptImageError,"CorruptImage"); number_pixels=MagickMax((MagickSizeType) scanline_size,number_pixels); pixel_info=AcquireVirtualMemory(number_pixels,sizeof(uint32)); if (pixel_info == (MemoryInfo *) NULL) ThrowTIFFException(ResourceLimitError,"MemoryAllocationFailed"); pixels=(unsigned char *) GetVirtualMemoryBlob(pixel_info); (void) memset(pixels,0,number_pixels*sizeof(uint32)); quantum_type=GrayQuantum; if (image->storage_class == PseudoClass) quantum_type=IndexQuantum; if (interlace != PLANARCONFIG_SEPARATE) { size_t pad; pad=(size_t) MagickMax((ssize_t) samples_per_pixel-1,0); if (image->alpha_trait != UndefinedPixelTrait) { if (image->storage_class == PseudoClass) quantum_type=IndexAlphaQuantum; else quantum_type=samples_per_pixel == 1 ? AlphaQuantum : GrayAlphaQuantum; } if ((samples_per_pixel > 2) && (interlace != PLANARCONFIG_SEPARATE)) { quantum_type=RGBQuantum; pad=(size_t) MagickMax((size_t) samples_per_pixel-3,0); if (image->alpha_trait != UndefinedPixelTrait) { quantum_type=RGBAQuantum; pad=(size_t) MagickMax((size_t) samples_per_pixel-4,0); } if (image->colorspace == CMYKColorspace) { quantum_type=CMYKQuantum; pad=(size_t) MagickMax((size_t) samples_per_pixel-4,0); if (image->alpha_trait != UndefinedPixelTrait) { quantum_type=CMYKAQuantum; pad=(size_t) MagickMax((size_t) samples_per_pixel-5,0); } } status=SetQuantumPad(image,quantum_info,pad*((bits_per_sample+7) >> 3)); if (status == MagickFalse) ThrowTIFFException(ResourceLimitError,"MemoryAllocationFailed"); } } switch (method) { case ReadYCCKMethod: { /* Convert YCC TIFF image. */ for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; unsigned char *p; tiff_status=TIFFReadPixels(tiff,0,y,(char *) pixels); if (tiff_status == -1) break; q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) break; p=pixels; for (x=0; x < (ssize_t) image->columns; x++) { SetPixelCyan(image,ScaleCharToQuantum(ClampYCC((double) *p+ (1.402*(double) *(p+2))-179.456)),q); SetPixelMagenta(image,ScaleCharToQuantum(ClampYCC((double) *p- (0.34414*(double) *(p+1))-(0.71414*(double ) *(p+2))+ 135.45984)),q); SetPixelYellow(image,ScaleCharToQuantum(ClampYCC((double) *p+ (1.772*(double) *(p+1))-226.816)),q); SetPixelBlack(image,ScaleCharToQuantum((unsigned char) *(p+3)),q); q+=GetPixelChannels(image); p+=4; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } break; } case ReadStripMethod: { unsigned char *p; size_t extent; ssize_t stride, strip_id; tsize_t strip_size; unsigned char *strip_pixels; /* Convert stripped TIFF image. */ extent=4*TIFFStripSize(tiff); #if defined(TIFF_VERSION_BIG) extent+=image->columns*sizeof(uint64); #else extent+=image->columns*sizeof(uint32); #endif strip_pixels=(unsigned char *) AcquireQuantumMemory(extent, sizeof(*strip_pixels)); if (strip_pixels == (unsigned char *) NULL) ThrowTIFFException(ResourceLimitError,"MemoryAllocationFailed"); (void) memset(strip_pixels,0,extent*sizeof(*strip_pixels)); stride=TIFFVStripSize(tiff,1); strip_id=0; p=strip_pixels; for (i=0; i < (ssize_t) samples_per_pixel; i++) { size_t rows_remaining; switch (i) { case 0: break; case 1: quantum_type=GreenQuantum; break; case 2: quantum_type=BlueQuantum; break; case 3: { quantum_type=AlphaQuantum; if (image->colorspace == CMYKColorspace) quantum_type=BlackQuantum; break; } case 4: quantum_type=AlphaQuantum; break; default: break; } rows_remaining=0; for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; q=GetAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) break; if (rows_remaining == 0) { strip_size=TIFFReadEncodedStrip(tiff,strip_id,strip_pixels, TIFFStripSize(tiff)); if (strip_size == -1) break; rows_remaining=rows_per_strip; if ((y+rows_per_strip) > (ssize_t) image->rows) rows_remaining=(rows_per_strip-(y+rows_per_strip- image->rows)); p=strip_pixels; strip_id++; } (void) ImportQuantumPixels(image,(CacheView *) NULL, quantum_info,quantum_type,p,exception); p+=stride; rows_remaining--; if (SyncAuthenticPixels(image,exception) == MagickFalse) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } if ((samples_per_pixel > 1) && (interlace != PLANARCONFIG_SEPARATE)) break; } strip_pixels=(unsigned char *) RelinquishMagickMemory(strip_pixels); break; } case ReadTileMethod: { unsigned char *p; size_t extent; uint32 columns, rows; unsigned char *tile_pixels; /* Convert tiled TIFF image. */ if ((TIFFGetField(tiff,TIFFTAG_TILEWIDTH,&columns) != 1) || (TIFFGetField(tiff,TIFFTAG_TILELENGTH,&rows) != 1)) ThrowTIFFException(CoderError,"ImageIsNotTiled"); number_pixels=(MagickSizeType) columns*rows; if (HeapOverflowSanityCheck(rows,sizeof(*tile_pixels)) != MagickFalse) ThrowTIFFException(ResourceLimitError,"MemoryAllocationFailed"); extent=TIFFTileSize(tiff); #if defined(TIFF_VERSION_BIG) extent+=columns*sizeof(uint64); #else extent+=columns*sizeof(uint32); #endif tile_pixels=(unsigned char *) AcquireQuantumMemory(extent, sizeof(*tile_pixels)); if (tile_pixels == (unsigned char *) NULL) ThrowTIFFException(ResourceLimitError,"MemoryAllocationFailed"); (void) memset(tile_pixels,0,extent*sizeof(*tile_pixels)); for (i=0; i < (ssize_t) samples_per_pixel; i++) { switch (i) { case 0: break; case 1: quantum_type=GreenQuantum; break; case 2: quantum_type=BlueQuantum; break; case 3: { quantum_type=AlphaQuantum; if (image->colorspace == CMYKColorspace) quantum_type=BlackQuantum; break; } case 4: quantum_type=AlphaQuantum; break; default: break; } for (y=0; y < (ssize_t) image->rows; y+=rows) { ssize_t x; size_t rows_remaining; rows_remaining=image->rows-y; if ((ssize_t) (y+rows) < (ssize_t) image->rows) rows_remaining=rows; for (x=0; x < (ssize_t) image->columns; x+=columns) { size_t columns_remaining, row; columns_remaining=image->columns-x; if ((ssize_t) (x+columns) < (ssize_t) image->columns) columns_remaining=columns; if (TIFFReadTile(tiff,tile_pixels,(uint32) x,(uint32) y,0,i) == 0) break; p=tile_pixels; for (row=0; row < rows_remaining; row++) { Quantum *magick_restrict q; q=GetAuthenticPixels(image,x,y+row,columns_remaining,1, exception); if (q == (Quantum *) NULL) break; (void) ImportQuantumPixels(image,(CacheView *) NULL, quantum_info,quantum_type,p,exception); p+=TIFFTileRowSize(tiff); if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } } } if ((samples_per_pixel > 1) && (interlace != PLANARCONFIG_SEPARATE)) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) i, samples_per_pixel); if (status == MagickFalse) break; } } tile_pixels=(unsigned char *) RelinquishMagickMemory(tile_pixels); break; } case ReadGenericMethod: default: { MemoryInfo *generic_info = (MemoryInfo * ) NULL; uint32 *p; uint32 *pixels; /* Convert generic TIFF image. */ if (HeapOverflowSanityCheck(image->rows,sizeof(*pixels)) != MagickFalse) ThrowTIFFException(ResourceLimitError,"MemoryAllocationFailed"); number_pixels=(MagickSizeType) image->columns*image->rows; #if defined(TIFF_VERSION_BIG) number_pixels+=image->columns*sizeof(uint64); #else number_pixels+=image->columns*sizeof(uint32); #endif generic_info=AcquireVirtualMemory(number_pixels,sizeof(uint32)); if (generic_info == (MemoryInfo *) NULL) ThrowTIFFException(ResourceLimitError,"MemoryAllocationFailed"); pixels=(uint32 *) GetVirtualMemoryBlob(generic_info); (void) TIFFReadRGBAImage(tiff,(uint32) image->columns,(uint32) image->rows,(uint32 *) pixels,0); p=pixels+(image->columns*image->rows)-1; for (y=0; y < (ssize_t) image->rows; y++) { ssize_t x; Quantum *magick_restrict q; q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) break; q+=GetPixelChannels(image)*(image->columns-1); for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(image,ScaleCharToQuantum((unsigned char) TIFFGetR(*p)),q); SetPixelGreen(image,ScaleCharToQuantum((unsigned char) TIFFGetG(*p)),q); SetPixelBlue(image,ScaleCharToQuantum((unsigned char) TIFFGetB(*p)),q); if (image->alpha_trait != UndefinedPixelTrait) SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) TIFFGetA(*p)),q); p--; q-=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } generic_info=RelinquishVirtualMemory(generic_info); break; } } pixel_info=RelinquishVirtualMemory(pixel_info); SetQuantumImageType(image,quantum_type); next_tiff_frame: if (quantum_info != (QuantumInfo *) NULL) quantum_info=DestroyQuantumInfo(quantum_info); if (photometric == PHOTOMETRIC_CIELAB) DecodeLabImage(image,exception); if ((photometric == PHOTOMETRIC_LOGL) || (photometric == PHOTOMETRIC_MINISBLACK) || (photometric == PHOTOMETRIC_MINISWHITE)) { image->type=GrayscaleType; if (bits_per_sample == 1) image->type=BilevelType; } /* Proceed to next image. */ if (image_info->number_scenes != 0) if (image->scene >= (image_info->scene+image_info->number_scenes-1)) break; more_frames=TIFFReadDirectory(tiff) != 0 ? MagickTrue : MagickFalse; if (more_frames != MagickFalse) { /* Allocate next image structure. */ AcquireNextImage(image_info,image,exception); if (GetNextImageInList(image) == (Image *) NULL) { status=MagickFalse; break; } image=SyncNextImageInList(image); status=SetImageProgress(image,LoadImagesTag,image->scene-1, image->scene); if (status == MagickFalse) break; } } while ((status != MagickFalse) && (more_frames != MagickFalse)); TIFFClose(tiff); if (status != MagickFalse) TIFFReadPhotoshopLayers(image_info,image,exception); if ((image_info->number_scenes != 0) && (image_info->scene >= GetImageListLength(image))) status=MagickFalse; if (status == MagickFalse) return(DestroyImageList(image)); return(GetFirstImageInList(image)); }
{'added': [(1897, ' extent=4*TIFFStripSize(tiff);')], 'deleted': [(1897, ' extent=2*TIFFStripSize(tiff);')]}
1
1
3,348
21,488
https://github.com/ImageMagick/ImageMagick
CVE-2021-3610
['CWE-125']
ib.c
rds_ib_laddr_check
/* * Copyright (c) 2006 Oracle. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #include <linux/kernel.h> #include <linux/in.h> #include <linux/if.h> #include <linux/netdevice.h> #include <linux/inetdevice.h> #include <linux/if_arp.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/module.h> #include "rds.h" #include "ib.h" static unsigned int fmr_pool_size = RDS_FMR_POOL_SIZE; unsigned int fmr_message_size = RDS_FMR_SIZE + 1; /* +1 allows for unaligned MRs */ unsigned int rds_ib_retry_count = RDS_IB_DEFAULT_RETRY_COUNT; module_param(fmr_pool_size, int, 0444); MODULE_PARM_DESC(fmr_pool_size, " Max number of fmr per HCA"); module_param(fmr_message_size, int, 0444); MODULE_PARM_DESC(fmr_message_size, " Max size of a RDMA transfer"); module_param(rds_ib_retry_count, int, 0444); MODULE_PARM_DESC(rds_ib_retry_count, " Number of hw retries before reporting an error"); /* * we have a clumsy combination of RCU and a rwsem protecting this list * because it is used both in the get_mr fast path and while blocking in * the FMR flushing path. */ DECLARE_RWSEM(rds_ib_devices_lock); struct list_head rds_ib_devices; /* NOTE: if also grabbing ibdev lock, grab this first */ DEFINE_SPINLOCK(ib_nodev_conns_lock); LIST_HEAD(ib_nodev_conns); static void rds_ib_nodev_connect(void) { struct rds_ib_connection *ic; spin_lock(&ib_nodev_conns_lock); list_for_each_entry(ic, &ib_nodev_conns, ib_node) rds_conn_connect_if_down(ic->conn); spin_unlock(&ib_nodev_conns_lock); } static void rds_ib_dev_shutdown(struct rds_ib_device *rds_ibdev) { struct rds_ib_connection *ic; unsigned long flags; spin_lock_irqsave(&rds_ibdev->spinlock, flags); list_for_each_entry(ic, &rds_ibdev->conn_list, ib_node) rds_conn_drop(ic->conn); spin_unlock_irqrestore(&rds_ibdev->spinlock, flags); } /* * rds_ib_destroy_mr_pool() blocks on a few things and mrs drop references * from interrupt context so we push freing off into a work struct in krdsd. */ static void rds_ib_dev_free(struct work_struct *work) { struct rds_ib_ipaddr *i_ipaddr, *i_next; struct rds_ib_device *rds_ibdev = container_of(work, struct rds_ib_device, free_work); if (rds_ibdev->mr_pool) rds_ib_destroy_mr_pool(rds_ibdev->mr_pool); if (rds_ibdev->mr) ib_dereg_mr(rds_ibdev->mr); if (rds_ibdev->pd) ib_dealloc_pd(rds_ibdev->pd); list_for_each_entry_safe(i_ipaddr, i_next, &rds_ibdev->ipaddr_list, list) { list_del(&i_ipaddr->list); kfree(i_ipaddr); } kfree(rds_ibdev); } void rds_ib_dev_put(struct rds_ib_device *rds_ibdev) { BUG_ON(atomic_read(&rds_ibdev->refcount) <= 0); if (atomic_dec_and_test(&rds_ibdev->refcount)) queue_work(rds_wq, &rds_ibdev->free_work); } static void rds_ib_add_one(struct ib_device *device) { struct rds_ib_device *rds_ibdev; struct ib_device_attr *dev_attr; /* Only handle IB (no iWARP) devices */ if (device->node_type != RDMA_NODE_IB_CA) return; dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL); if (!dev_attr) return; if (ib_query_device(device, dev_attr)) { rdsdebug("Query device failed for %s\n", device->name); goto free_attr; } rds_ibdev = kzalloc_node(sizeof(struct rds_ib_device), GFP_KERNEL, ibdev_to_node(device)); if (!rds_ibdev) goto free_attr; spin_lock_init(&rds_ibdev->spinlock); atomic_set(&rds_ibdev->refcount, 1); INIT_WORK(&rds_ibdev->free_work, rds_ib_dev_free); rds_ibdev->max_wrs = dev_attr->max_qp_wr; rds_ibdev->max_sge = min(dev_attr->max_sge, RDS_IB_MAX_SGE); rds_ibdev->fmr_max_remaps = dev_attr->max_map_per_fmr?: 32; rds_ibdev->max_fmrs = dev_attr->max_fmr ? min_t(unsigned int, dev_attr->max_fmr, fmr_pool_size) : fmr_pool_size; rds_ibdev->max_initiator_depth = dev_attr->max_qp_init_rd_atom; rds_ibdev->max_responder_resources = dev_attr->max_qp_rd_atom; rds_ibdev->dev = device; rds_ibdev->pd = ib_alloc_pd(device); if (IS_ERR(rds_ibdev->pd)) { rds_ibdev->pd = NULL; goto put_dev; } rds_ibdev->mr = ib_get_dma_mr(rds_ibdev->pd, IB_ACCESS_LOCAL_WRITE); if (IS_ERR(rds_ibdev->mr)) { rds_ibdev->mr = NULL; goto put_dev; } rds_ibdev->mr_pool = rds_ib_create_mr_pool(rds_ibdev); if (IS_ERR(rds_ibdev->mr_pool)) { rds_ibdev->mr_pool = NULL; goto put_dev; } INIT_LIST_HEAD(&rds_ibdev->ipaddr_list); INIT_LIST_HEAD(&rds_ibdev->conn_list); down_write(&rds_ib_devices_lock); list_add_tail_rcu(&rds_ibdev->list, &rds_ib_devices); up_write(&rds_ib_devices_lock); atomic_inc(&rds_ibdev->refcount); ib_set_client_data(device, &rds_ib_client, rds_ibdev); atomic_inc(&rds_ibdev->refcount); rds_ib_nodev_connect(); put_dev: rds_ib_dev_put(rds_ibdev); free_attr: kfree(dev_attr); } /* * New connections use this to find the device to associate with the * connection. It's not in the fast path so we're not concerned about the * performance of the IB call. (As of this writing, it uses an interrupt * blocking spinlock to serialize walking a per-device list of all registered * clients.) * * RCU is used to handle incoming connections racing with device teardown. * Rather than use a lock to serialize removal from the client_data and * getting a new reference, we use an RCU grace period. The destruction * path removes the device from client_data and then waits for all RCU * readers to finish. * * A new connection can get NULL from this if its arriving on a * device that is in the process of being removed. */ struct rds_ib_device *rds_ib_get_client_data(struct ib_device *device) { struct rds_ib_device *rds_ibdev; rcu_read_lock(); rds_ibdev = ib_get_client_data(device, &rds_ib_client); if (rds_ibdev) atomic_inc(&rds_ibdev->refcount); rcu_read_unlock(); return rds_ibdev; } /* * The IB stack is letting us know that a device is going away. This can * happen if the underlying HCA driver is removed or if PCI hotplug is removing * the pci function, for example. * * This can be called at any time and can be racing with any other RDS path. */ static void rds_ib_remove_one(struct ib_device *device) { struct rds_ib_device *rds_ibdev; rds_ibdev = ib_get_client_data(device, &rds_ib_client); if (!rds_ibdev) return; rds_ib_dev_shutdown(rds_ibdev); /* stop connection attempts from getting a reference to this device. */ ib_set_client_data(device, &rds_ib_client, NULL); down_write(&rds_ib_devices_lock); list_del_rcu(&rds_ibdev->list); up_write(&rds_ib_devices_lock); /* * This synchronize rcu is waiting for readers of both the ib * client data and the devices list to finish before we drop * both of those references. */ synchronize_rcu(); rds_ib_dev_put(rds_ibdev); rds_ib_dev_put(rds_ibdev); } struct ib_client rds_ib_client = { .name = "rds_ib", .add = rds_ib_add_one, .remove = rds_ib_remove_one }; static int rds_ib_conn_info_visitor(struct rds_connection *conn, void *buffer) { struct rds_info_rdma_connection *iinfo = buffer; struct rds_ib_connection *ic; /* We will only ever look at IB transports */ if (conn->c_trans != &rds_ib_transport) return 0; iinfo->src_addr = conn->c_laddr; iinfo->dst_addr = conn->c_faddr; memset(&iinfo->src_gid, 0, sizeof(iinfo->src_gid)); memset(&iinfo->dst_gid, 0, sizeof(iinfo->dst_gid)); if (rds_conn_state(conn) == RDS_CONN_UP) { struct rds_ib_device *rds_ibdev; struct rdma_dev_addr *dev_addr; ic = conn->c_transport_data; dev_addr = &ic->i_cm_id->route.addr.dev_addr; rdma_addr_get_sgid(dev_addr, (union ib_gid *) &iinfo->src_gid); rdma_addr_get_dgid(dev_addr, (union ib_gid *) &iinfo->dst_gid); rds_ibdev = ic->rds_ibdev; iinfo->max_send_wr = ic->i_send_ring.w_nr; iinfo->max_recv_wr = ic->i_recv_ring.w_nr; iinfo->max_send_sge = rds_ibdev->max_sge; rds_ib_get_mr_info(rds_ibdev, iinfo); } return 1; } static void rds_ib_ic_info(struct socket *sock, unsigned int len, struct rds_info_iterator *iter, struct rds_info_lengths *lens) { rds_for_each_conn_info(sock, len, iter, lens, rds_ib_conn_info_visitor, sizeof(struct rds_info_rdma_connection)); } /* * Early RDS/IB was built to only bind to an address if there is an IPoIB * device with that address set. * * If it were me, I'd advocate for something more flexible. Sending and * receiving should be device-agnostic. Transports would try and maintain * connections between peers who have messages queued. Userspace would be * allowed to influence which paths have priority. We could call userspace * asserting this policy "routing". */ static int rds_ib_laddr_check(__be32 addr) { int ret; struct rdma_cm_id *cm_id; struct sockaddr_in sin; /* Create a CMA ID and try to bind it. This catches both * IB and iWARP capable NICs. */ cm_id = rdma_create_id(NULL, NULL, RDMA_PS_TCP, IB_QPT_RC); if (IS_ERR(cm_id)) return PTR_ERR(cm_id); memset(&sin, 0, sizeof(sin)); sin.sin_family = AF_INET; sin.sin_addr.s_addr = addr; /* rdma_bind_addr will only succeed for IB & iWARP devices */ ret = rdma_bind_addr(cm_id, (struct sockaddr *)&sin); /* due to this, we will claim to support iWARP devices unless we check node_type. */ if (ret || cm_id->device->node_type != RDMA_NODE_IB_CA) ret = -EADDRNOTAVAIL; rdsdebug("addr %pI4 ret %d node type %d\n", &addr, ret, cm_id->device ? cm_id->device->node_type : -1); rdma_destroy_id(cm_id); return ret; } static void rds_ib_unregister_client(void) { ib_unregister_client(&rds_ib_client); /* wait for rds_ib_dev_free() to complete */ flush_workqueue(rds_wq); } void rds_ib_exit(void) { rds_info_deregister_func(RDS_INFO_IB_CONNECTIONS, rds_ib_ic_info); rds_ib_unregister_client(); rds_ib_destroy_nodev_conns(); rds_ib_sysctl_exit(); rds_ib_recv_exit(); rds_trans_unregister(&rds_ib_transport); } struct rds_transport rds_ib_transport = { .laddr_check = rds_ib_laddr_check, .xmit_complete = rds_ib_xmit_complete, .xmit = rds_ib_xmit, .xmit_rdma = rds_ib_xmit_rdma, .xmit_atomic = rds_ib_xmit_atomic, .recv = rds_ib_recv, .conn_alloc = rds_ib_conn_alloc, .conn_free = rds_ib_conn_free, .conn_connect = rds_ib_conn_connect, .conn_shutdown = rds_ib_conn_shutdown, .inc_copy_to_user = rds_ib_inc_copy_to_user, .inc_free = rds_ib_inc_free, .cm_initiate_connect = rds_ib_cm_initiate_connect, .cm_handle_connect = rds_ib_cm_handle_connect, .cm_connect_complete = rds_ib_cm_connect_complete, .stats_info_copy = rds_ib_stats_info_copy, .exit = rds_ib_exit, .get_mr = rds_ib_get_mr, .sync_mr = rds_ib_sync_mr, .free_mr = rds_ib_free_mr, .flush_mrs = rds_ib_flush_mrs, .t_owner = THIS_MODULE, .t_name = "infiniband", .t_type = RDS_TRANS_IB }; int rds_ib_init(void) { int ret; INIT_LIST_HEAD(&rds_ib_devices); ret = ib_register_client(&rds_ib_client); if (ret) goto out; ret = rds_ib_sysctl_init(); if (ret) goto out_ibreg; ret = rds_ib_recv_init(); if (ret) goto out_sysctl; ret = rds_trans_register(&rds_ib_transport); if (ret) goto out_recv; rds_info_register_func(RDS_INFO_IB_CONNECTIONS, rds_ib_ic_info); goto out; out_recv: rds_ib_recv_exit(); out_sysctl: rds_ib_sysctl_exit(); out_ibreg: rds_ib_unregister_client(); out: return ret; } MODULE_LICENSE("GPL");
/* * Copyright (c) 2006 Oracle. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #include <linux/kernel.h> #include <linux/in.h> #include <linux/if.h> #include <linux/netdevice.h> #include <linux/inetdevice.h> #include <linux/if_arp.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/module.h> #include "rds.h" #include "ib.h" static unsigned int fmr_pool_size = RDS_FMR_POOL_SIZE; unsigned int fmr_message_size = RDS_FMR_SIZE + 1; /* +1 allows for unaligned MRs */ unsigned int rds_ib_retry_count = RDS_IB_DEFAULT_RETRY_COUNT; module_param(fmr_pool_size, int, 0444); MODULE_PARM_DESC(fmr_pool_size, " Max number of fmr per HCA"); module_param(fmr_message_size, int, 0444); MODULE_PARM_DESC(fmr_message_size, " Max size of a RDMA transfer"); module_param(rds_ib_retry_count, int, 0444); MODULE_PARM_DESC(rds_ib_retry_count, " Number of hw retries before reporting an error"); /* * we have a clumsy combination of RCU and a rwsem protecting this list * because it is used both in the get_mr fast path and while blocking in * the FMR flushing path. */ DECLARE_RWSEM(rds_ib_devices_lock); struct list_head rds_ib_devices; /* NOTE: if also grabbing ibdev lock, grab this first */ DEFINE_SPINLOCK(ib_nodev_conns_lock); LIST_HEAD(ib_nodev_conns); static void rds_ib_nodev_connect(void) { struct rds_ib_connection *ic; spin_lock(&ib_nodev_conns_lock); list_for_each_entry(ic, &ib_nodev_conns, ib_node) rds_conn_connect_if_down(ic->conn); spin_unlock(&ib_nodev_conns_lock); } static void rds_ib_dev_shutdown(struct rds_ib_device *rds_ibdev) { struct rds_ib_connection *ic; unsigned long flags; spin_lock_irqsave(&rds_ibdev->spinlock, flags); list_for_each_entry(ic, &rds_ibdev->conn_list, ib_node) rds_conn_drop(ic->conn); spin_unlock_irqrestore(&rds_ibdev->spinlock, flags); } /* * rds_ib_destroy_mr_pool() blocks on a few things and mrs drop references * from interrupt context so we push freing off into a work struct in krdsd. */ static void rds_ib_dev_free(struct work_struct *work) { struct rds_ib_ipaddr *i_ipaddr, *i_next; struct rds_ib_device *rds_ibdev = container_of(work, struct rds_ib_device, free_work); if (rds_ibdev->mr_pool) rds_ib_destroy_mr_pool(rds_ibdev->mr_pool); if (rds_ibdev->mr) ib_dereg_mr(rds_ibdev->mr); if (rds_ibdev->pd) ib_dealloc_pd(rds_ibdev->pd); list_for_each_entry_safe(i_ipaddr, i_next, &rds_ibdev->ipaddr_list, list) { list_del(&i_ipaddr->list); kfree(i_ipaddr); } kfree(rds_ibdev); } void rds_ib_dev_put(struct rds_ib_device *rds_ibdev) { BUG_ON(atomic_read(&rds_ibdev->refcount) <= 0); if (atomic_dec_and_test(&rds_ibdev->refcount)) queue_work(rds_wq, &rds_ibdev->free_work); } static void rds_ib_add_one(struct ib_device *device) { struct rds_ib_device *rds_ibdev; struct ib_device_attr *dev_attr; /* Only handle IB (no iWARP) devices */ if (device->node_type != RDMA_NODE_IB_CA) return; dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL); if (!dev_attr) return; if (ib_query_device(device, dev_attr)) { rdsdebug("Query device failed for %s\n", device->name); goto free_attr; } rds_ibdev = kzalloc_node(sizeof(struct rds_ib_device), GFP_KERNEL, ibdev_to_node(device)); if (!rds_ibdev) goto free_attr; spin_lock_init(&rds_ibdev->spinlock); atomic_set(&rds_ibdev->refcount, 1); INIT_WORK(&rds_ibdev->free_work, rds_ib_dev_free); rds_ibdev->max_wrs = dev_attr->max_qp_wr; rds_ibdev->max_sge = min(dev_attr->max_sge, RDS_IB_MAX_SGE); rds_ibdev->fmr_max_remaps = dev_attr->max_map_per_fmr?: 32; rds_ibdev->max_fmrs = dev_attr->max_fmr ? min_t(unsigned int, dev_attr->max_fmr, fmr_pool_size) : fmr_pool_size; rds_ibdev->max_initiator_depth = dev_attr->max_qp_init_rd_atom; rds_ibdev->max_responder_resources = dev_attr->max_qp_rd_atom; rds_ibdev->dev = device; rds_ibdev->pd = ib_alloc_pd(device); if (IS_ERR(rds_ibdev->pd)) { rds_ibdev->pd = NULL; goto put_dev; } rds_ibdev->mr = ib_get_dma_mr(rds_ibdev->pd, IB_ACCESS_LOCAL_WRITE); if (IS_ERR(rds_ibdev->mr)) { rds_ibdev->mr = NULL; goto put_dev; } rds_ibdev->mr_pool = rds_ib_create_mr_pool(rds_ibdev); if (IS_ERR(rds_ibdev->mr_pool)) { rds_ibdev->mr_pool = NULL; goto put_dev; } INIT_LIST_HEAD(&rds_ibdev->ipaddr_list); INIT_LIST_HEAD(&rds_ibdev->conn_list); down_write(&rds_ib_devices_lock); list_add_tail_rcu(&rds_ibdev->list, &rds_ib_devices); up_write(&rds_ib_devices_lock); atomic_inc(&rds_ibdev->refcount); ib_set_client_data(device, &rds_ib_client, rds_ibdev); atomic_inc(&rds_ibdev->refcount); rds_ib_nodev_connect(); put_dev: rds_ib_dev_put(rds_ibdev); free_attr: kfree(dev_attr); } /* * New connections use this to find the device to associate with the * connection. It's not in the fast path so we're not concerned about the * performance of the IB call. (As of this writing, it uses an interrupt * blocking spinlock to serialize walking a per-device list of all registered * clients.) * * RCU is used to handle incoming connections racing with device teardown. * Rather than use a lock to serialize removal from the client_data and * getting a new reference, we use an RCU grace period. The destruction * path removes the device from client_data and then waits for all RCU * readers to finish. * * A new connection can get NULL from this if its arriving on a * device that is in the process of being removed. */ struct rds_ib_device *rds_ib_get_client_data(struct ib_device *device) { struct rds_ib_device *rds_ibdev; rcu_read_lock(); rds_ibdev = ib_get_client_data(device, &rds_ib_client); if (rds_ibdev) atomic_inc(&rds_ibdev->refcount); rcu_read_unlock(); return rds_ibdev; } /* * The IB stack is letting us know that a device is going away. This can * happen if the underlying HCA driver is removed or if PCI hotplug is removing * the pci function, for example. * * This can be called at any time and can be racing with any other RDS path. */ static void rds_ib_remove_one(struct ib_device *device) { struct rds_ib_device *rds_ibdev; rds_ibdev = ib_get_client_data(device, &rds_ib_client); if (!rds_ibdev) return; rds_ib_dev_shutdown(rds_ibdev); /* stop connection attempts from getting a reference to this device. */ ib_set_client_data(device, &rds_ib_client, NULL); down_write(&rds_ib_devices_lock); list_del_rcu(&rds_ibdev->list); up_write(&rds_ib_devices_lock); /* * This synchronize rcu is waiting for readers of both the ib * client data and the devices list to finish before we drop * both of those references. */ synchronize_rcu(); rds_ib_dev_put(rds_ibdev); rds_ib_dev_put(rds_ibdev); } struct ib_client rds_ib_client = { .name = "rds_ib", .add = rds_ib_add_one, .remove = rds_ib_remove_one }; static int rds_ib_conn_info_visitor(struct rds_connection *conn, void *buffer) { struct rds_info_rdma_connection *iinfo = buffer; struct rds_ib_connection *ic; /* We will only ever look at IB transports */ if (conn->c_trans != &rds_ib_transport) return 0; iinfo->src_addr = conn->c_laddr; iinfo->dst_addr = conn->c_faddr; memset(&iinfo->src_gid, 0, sizeof(iinfo->src_gid)); memset(&iinfo->dst_gid, 0, sizeof(iinfo->dst_gid)); if (rds_conn_state(conn) == RDS_CONN_UP) { struct rds_ib_device *rds_ibdev; struct rdma_dev_addr *dev_addr; ic = conn->c_transport_data; dev_addr = &ic->i_cm_id->route.addr.dev_addr; rdma_addr_get_sgid(dev_addr, (union ib_gid *) &iinfo->src_gid); rdma_addr_get_dgid(dev_addr, (union ib_gid *) &iinfo->dst_gid); rds_ibdev = ic->rds_ibdev; iinfo->max_send_wr = ic->i_send_ring.w_nr; iinfo->max_recv_wr = ic->i_recv_ring.w_nr; iinfo->max_send_sge = rds_ibdev->max_sge; rds_ib_get_mr_info(rds_ibdev, iinfo); } return 1; } static void rds_ib_ic_info(struct socket *sock, unsigned int len, struct rds_info_iterator *iter, struct rds_info_lengths *lens) { rds_for_each_conn_info(sock, len, iter, lens, rds_ib_conn_info_visitor, sizeof(struct rds_info_rdma_connection)); } /* * Early RDS/IB was built to only bind to an address if there is an IPoIB * device with that address set. * * If it were me, I'd advocate for something more flexible. Sending and * receiving should be device-agnostic. Transports would try and maintain * connections between peers who have messages queued. Userspace would be * allowed to influence which paths have priority. We could call userspace * asserting this policy "routing". */ static int rds_ib_laddr_check(__be32 addr) { int ret; struct rdma_cm_id *cm_id; struct sockaddr_in sin; /* Create a CMA ID and try to bind it. This catches both * IB and iWARP capable NICs. */ cm_id = rdma_create_id(NULL, NULL, RDMA_PS_TCP, IB_QPT_RC); if (IS_ERR(cm_id)) return PTR_ERR(cm_id); memset(&sin, 0, sizeof(sin)); sin.sin_family = AF_INET; sin.sin_addr.s_addr = addr; /* rdma_bind_addr will only succeed for IB & iWARP devices */ ret = rdma_bind_addr(cm_id, (struct sockaddr *)&sin); /* due to this, we will claim to support iWARP devices unless we check node_type. */ if (ret || !cm_id->device || cm_id->device->node_type != RDMA_NODE_IB_CA) ret = -EADDRNOTAVAIL; rdsdebug("addr %pI4 ret %d node type %d\n", &addr, ret, cm_id->device ? cm_id->device->node_type : -1); rdma_destroy_id(cm_id); return ret; } static void rds_ib_unregister_client(void) { ib_unregister_client(&rds_ib_client); /* wait for rds_ib_dev_free() to complete */ flush_workqueue(rds_wq); } void rds_ib_exit(void) { rds_info_deregister_func(RDS_INFO_IB_CONNECTIONS, rds_ib_ic_info); rds_ib_unregister_client(); rds_ib_destroy_nodev_conns(); rds_ib_sysctl_exit(); rds_ib_recv_exit(); rds_trans_unregister(&rds_ib_transport); } struct rds_transport rds_ib_transport = { .laddr_check = rds_ib_laddr_check, .xmit_complete = rds_ib_xmit_complete, .xmit = rds_ib_xmit, .xmit_rdma = rds_ib_xmit_rdma, .xmit_atomic = rds_ib_xmit_atomic, .recv = rds_ib_recv, .conn_alloc = rds_ib_conn_alloc, .conn_free = rds_ib_conn_free, .conn_connect = rds_ib_conn_connect, .conn_shutdown = rds_ib_conn_shutdown, .inc_copy_to_user = rds_ib_inc_copy_to_user, .inc_free = rds_ib_inc_free, .cm_initiate_connect = rds_ib_cm_initiate_connect, .cm_handle_connect = rds_ib_cm_handle_connect, .cm_connect_complete = rds_ib_cm_connect_complete, .stats_info_copy = rds_ib_stats_info_copy, .exit = rds_ib_exit, .get_mr = rds_ib_get_mr, .sync_mr = rds_ib_sync_mr, .free_mr = rds_ib_free_mr, .flush_mrs = rds_ib_flush_mrs, .t_owner = THIS_MODULE, .t_name = "infiniband", .t_type = RDS_TRANS_IB }; int rds_ib_init(void) { int ret; INIT_LIST_HEAD(&rds_ib_devices); ret = ib_register_client(&rds_ib_client); if (ret) goto out; ret = rds_ib_sysctl_init(); if (ret) goto out_ibreg; ret = rds_ib_recv_init(); if (ret) goto out_sysctl; ret = rds_trans_register(&rds_ib_transport); if (ret) goto out_recv; rds_info_register_func(RDS_INFO_IB_CONNECTIONS, rds_ib_ic_info); goto out; out_recv: rds_ib_recv_exit(); out_sysctl: rds_ib_sysctl_exit(); out_ibreg: rds_ib_unregister_client(); out: return ret; } MODULE_LICENSE("GPL");
static int rds_ib_laddr_check(__be32 addr) { int ret; struct rdma_cm_id *cm_id; struct sockaddr_in sin; /* Create a CMA ID and try to bind it. This catches both * IB and iWARP capable NICs. */ cm_id = rdma_create_id(NULL, NULL, RDMA_PS_TCP, IB_QPT_RC); if (IS_ERR(cm_id)) return PTR_ERR(cm_id); memset(&sin, 0, sizeof(sin)); sin.sin_family = AF_INET; sin.sin_addr.s_addr = addr; /* rdma_bind_addr will only succeed for IB & iWARP devices */ ret = rdma_bind_addr(cm_id, (struct sockaddr *)&sin); /* due to this, we will claim to support iWARP devices unless we check node_type. */ if (ret || cm_id->device->node_type != RDMA_NODE_IB_CA) ret = -EADDRNOTAVAIL; rdsdebug("addr %pI4 ret %d node type %d\n", &addr, ret, cm_id->device ? cm_id->device->node_type : -1); rdma_destroy_id(cm_id); return ret; }
static int rds_ib_laddr_check(__be32 addr) { int ret; struct rdma_cm_id *cm_id; struct sockaddr_in sin; /* Create a CMA ID and try to bind it. This catches both * IB and iWARP capable NICs. */ cm_id = rdma_create_id(NULL, NULL, RDMA_PS_TCP, IB_QPT_RC); if (IS_ERR(cm_id)) return PTR_ERR(cm_id); memset(&sin, 0, sizeof(sin)); sin.sin_family = AF_INET; sin.sin_addr.s_addr = addr; /* rdma_bind_addr will only succeed for IB & iWARP devices */ ret = rdma_bind_addr(cm_id, (struct sockaddr *)&sin); /* due to this, we will claim to support iWARP devices unless we check node_type. */ if (ret || !cm_id->device || cm_id->device->node_type != RDMA_NODE_IB_CA) ret = -EADDRNOTAVAIL; rdsdebug("addr %pI4 ret %d node type %d\n", &addr, ret, cm_id->device ? cm_id->device->node_type : -1); rdma_destroy_id(cm_id); return ret; }
{'added': [(341, '\tif (ret || !cm_id->device ||'), (342, '\t cm_id->device->node_type != RDMA_NODE_IB_CA)')], 'deleted': [(341, '\tif (ret || cm_id->device->node_type != RDMA_NODE_IB_CA)')]}
2
1
275
1,567
https://github.com/torvalds/linux
CVE-2013-7339
['CWE-476']
ping.c
ping_recvmsg
/* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * "Ping" sockets * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Based on ipv4/udp.c code. * * Authors: Vasiliy Kulikov / Openwall (for Linux 2.6), * Pavel Kankovsky (for Linux 2.4.32) * * Pavel gave all rights to bugs to Vasiliy, * none of the bugs are Pavel's now. * */ #include <linux/uaccess.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/socket.h> #include <linux/sockios.h> #include <linux/in.h> #include <linux/errno.h> #include <linux/timer.h> #include <linux/mm.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <net/snmp.h> #include <net/ip.h> #include <net/icmp.h> #include <net/protocol.h> #include <linux/skbuff.h> #include <linux/proc_fs.h> #include <linux/export.h> #include <net/sock.h> #include <net/ping.h> #include <net/udp.h> #include <net/route.h> #include <net/inet_common.h> #include <net/checksum.h> #if IS_ENABLED(CONFIG_IPV6) #include <linux/in6.h> #include <linux/icmpv6.h> #include <net/addrconf.h> #include <net/ipv6.h> #include <net/transp_v6.h> #endif struct ping_table ping_table; struct pingv6_ops pingv6_ops; EXPORT_SYMBOL_GPL(pingv6_ops); static u16 ping_port_rover; static inline int ping_hashfn(struct net *net, unsigned int num, unsigned int mask) { int res = (num + net_hash_mix(net)) & mask; pr_debug("hash(%d) = %d\n", num, res); return res; } EXPORT_SYMBOL_GPL(ping_hash); static inline struct hlist_nulls_head *ping_hashslot(struct ping_table *table, struct net *net, unsigned int num) { return &table->hash[ping_hashfn(net, num, PING_HTABLE_MASK)]; } int ping_get_port(struct sock *sk, unsigned short ident) { struct hlist_nulls_node *node; struct hlist_nulls_head *hlist; struct inet_sock *isk, *isk2; struct sock *sk2 = NULL; isk = inet_sk(sk); write_lock_bh(&ping_table.lock); if (ident == 0) { u32 i; u16 result = ping_port_rover + 1; for (i = 0; i < (1L << 16); i++, result++) { if (!result) result++; /* avoid zero */ hlist = ping_hashslot(&ping_table, sock_net(sk), result); ping_portaddr_for_each_entry(sk2, node, hlist) { isk2 = inet_sk(sk2); if (isk2->inet_num == result) goto next_port; } /* found */ ping_port_rover = ident = result; break; next_port: ; } if (i >= (1L << 16)) goto fail; } else { hlist = ping_hashslot(&ping_table, sock_net(sk), ident); ping_portaddr_for_each_entry(sk2, node, hlist) { isk2 = inet_sk(sk2); /* BUG? Why is this reuse and not reuseaddr? ping.c * doesn't turn off SO_REUSEADDR, and it doesn't expect * that other ping processes can steal its packets. */ if ((isk2->inet_num == ident) && (sk2 != sk) && (!sk2->sk_reuse || !sk->sk_reuse)) goto fail; } } pr_debug("found port/ident = %d\n", ident); isk->inet_num = ident; if (sk_unhashed(sk)) { pr_debug("was not hashed\n"); sock_hold(sk); hlist_nulls_add_head(&sk->sk_nulls_node, hlist); sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); } write_unlock_bh(&ping_table.lock); return 0; fail: write_unlock_bh(&ping_table.lock); return 1; } EXPORT_SYMBOL_GPL(ping_get_port); void ping_hash(struct sock *sk) { pr_debug("ping_hash(sk->port=%u)\n", inet_sk(sk)->inet_num); BUG(); /* "Please do not press this button again." */ } void ping_unhash(struct sock *sk) { struct inet_sock *isk = inet_sk(sk); pr_debug("ping_unhash(isk=%p,isk->num=%u)\n", isk, isk->inet_num); if (sk_hashed(sk)) { write_lock_bh(&ping_table.lock); hlist_nulls_del(&sk->sk_nulls_node); sock_put(sk); isk->inet_num = 0; isk->inet_sport = 0; sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); write_unlock_bh(&ping_table.lock); } } EXPORT_SYMBOL_GPL(ping_unhash); static struct sock *ping_lookup(struct net *net, struct sk_buff *skb, u16 ident) { struct hlist_nulls_head *hslot = ping_hashslot(&ping_table, net, ident); struct sock *sk = NULL; struct inet_sock *isk; struct hlist_nulls_node *hnode; int dif = skb->dev->ifindex; if (skb->protocol == htons(ETH_P_IP)) { pr_debug("try to find: num = %d, daddr = %pI4, dif = %d\n", (int)ident, &ip_hdr(skb)->daddr, dif); #if IS_ENABLED(CONFIG_IPV6) } else if (skb->protocol == htons(ETH_P_IPV6)) { pr_debug("try to find: num = %d, daddr = %pI6c, dif = %d\n", (int)ident, &ipv6_hdr(skb)->daddr, dif); #endif } read_lock_bh(&ping_table.lock); ping_portaddr_for_each_entry(sk, hnode, hslot) { isk = inet_sk(sk); pr_debug("iterate\n"); if (isk->inet_num != ident) continue; if (skb->protocol == htons(ETH_P_IP) && sk->sk_family == AF_INET) { pr_debug("found: %p: num=%d, daddr=%pI4, dif=%d\n", sk, (int) isk->inet_num, &isk->inet_rcv_saddr, sk->sk_bound_dev_if); if (isk->inet_rcv_saddr && isk->inet_rcv_saddr != ip_hdr(skb)->daddr) continue; #if IS_ENABLED(CONFIG_IPV6) } else if (skb->protocol == htons(ETH_P_IPV6) && sk->sk_family == AF_INET6) { pr_debug("found: %p: num=%d, daddr=%pI6c, dif=%d\n", sk, (int) isk->inet_num, &sk->sk_v6_rcv_saddr, sk->sk_bound_dev_if); if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr) && !ipv6_addr_equal(&sk->sk_v6_rcv_saddr, &ipv6_hdr(skb)->daddr)) continue; #endif } if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif) continue; sock_hold(sk); goto exit; } sk = NULL; exit: read_unlock_bh(&ping_table.lock); return sk; } static void inet_get_ping_group_range_net(struct net *net, kgid_t *low, kgid_t *high) { kgid_t *data = net->ipv4.sysctl_ping_group_range; unsigned int seq; do { seq = read_seqbegin(&net->ipv4.sysctl_local_ports.lock); *low = data[0]; *high = data[1]; } while (read_seqretry(&net->ipv4.sysctl_local_ports.lock, seq)); } int ping_init_sock(struct sock *sk) { struct net *net = sock_net(sk); kgid_t group = current_egid(); struct group_info *group_info = get_current_groups(); int i, j, count = group_info->ngroups; kgid_t low, high; inet_get_ping_group_range_net(net, &low, &high); if (gid_lte(low, group) && gid_lte(group, high)) return 0; for (i = 0; i < group_info->nblocks; i++) { int cp_count = min_t(int, NGROUPS_PER_BLOCK, count); for (j = 0; j < cp_count; j++) { kgid_t gid = group_info->blocks[i][j]; if (gid_lte(low, gid) && gid_lte(gid, high)) return 0; } count -= cp_count; } return -EACCES; } EXPORT_SYMBOL_GPL(ping_init_sock); void ping_close(struct sock *sk, long timeout) { pr_debug("ping_close(sk=%p,sk->num=%u)\n", inet_sk(sk), inet_sk(sk)->inet_num); pr_debug("isk->refcnt = %d\n", sk->sk_refcnt.counter); sk_common_release(sk); } EXPORT_SYMBOL_GPL(ping_close); /* Checks the bind address and possibly modifies sk->sk_bound_dev_if. */ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk, struct sockaddr *uaddr, int addr_len) { struct net *net = sock_net(sk); if (sk->sk_family == AF_INET) { struct sockaddr_in *addr = (struct sockaddr_in *) uaddr; int chk_addr_ret; if (addr_len < sizeof(*addr)) return -EINVAL; pr_debug("ping_check_bind_addr(sk=%p,addr=%pI4,port=%d)\n", sk, &addr->sin_addr.s_addr, ntohs(addr->sin_port)); chk_addr_ret = inet_addr_type(net, addr->sin_addr.s_addr); if (addr->sin_addr.s_addr == htonl(INADDR_ANY)) chk_addr_ret = RTN_LOCAL; if ((sysctl_ip_nonlocal_bind == 0 && isk->freebind == 0 && isk->transparent == 0 && chk_addr_ret != RTN_LOCAL) || chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST) return -EADDRNOTAVAIL; #if IS_ENABLED(CONFIG_IPV6) } else if (sk->sk_family == AF_INET6) { struct sockaddr_in6 *addr = (struct sockaddr_in6 *) uaddr; int addr_type, scoped, has_addr; struct net_device *dev = NULL; if (addr_len < sizeof(*addr)) return -EINVAL; pr_debug("ping_check_bind_addr(sk=%p,addr=%pI6c,port=%d)\n", sk, addr->sin6_addr.s6_addr, ntohs(addr->sin6_port)); addr_type = ipv6_addr_type(&addr->sin6_addr); scoped = __ipv6_addr_needs_scope_id(addr_type); if ((addr_type != IPV6_ADDR_ANY && !(addr_type & IPV6_ADDR_UNICAST)) || (scoped && !addr->sin6_scope_id)) return -EINVAL; rcu_read_lock(); if (addr->sin6_scope_id) { dev = dev_get_by_index_rcu(net, addr->sin6_scope_id); if (!dev) { rcu_read_unlock(); return -ENODEV; } } has_addr = pingv6_ops.ipv6_chk_addr(net, &addr->sin6_addr, dev, scoped); rcu_read_unlock(); if (!(isk->freebind || isk->transparent || has_addr || addr_type == IPV6_ADDR_ANY)) return -EADDRNOTAVAIL; if (scoped) sk->sk_bound_dev_if = addr->sin6_scope_id; #endif } else { return -EAFNOSUPPORT; } return 0; } static void ping_set_saddr(struct sock *sk, struct sockaddr *saddr) { if (saddr->sa_family == AF_INET) { struct inet_sock *isk = inet_sk(sk); struct sockaddr_in *addr = (struct sockaddr_in *) saddr; isk->inet_rcv_saddr = isk->inet_saddr = addr->sin_addr.s_addr; #if IS_ENABLED(CONFIG_IPV6) } else if (saddr->sa_family == AF_INET6) { struct sockaddr_in6 *addr = (struct sockaddr_in6 *) saddr; struct ipv6_pinfo *np = inet6_sk(sk); sk->sk_v6_rcv_saddr = np->saddr = addr->sin6_addr; #endif } } static void ping_clear_saddr(struct sock *sk, int dif) { sk->sk_bound_dev_if = dif; if (sk->sk_family == AF_INET) { struct inet_sock *isk = inet_sk(sk); isk->inet_rcv_saddr = isk->inet_saddr = 0; #if IS_ENABLED(CONFIG_IPV6) } else if (sk->sk_family == AF_INET6) { struct ipv6_pinfo *np = inet6_sk(sk); memset(&sk->sk_v6_rcv_saddr, 0, sizeof(sk->sk_v6_rcv_saddr)); memset(&np->saddr, 0, sizeof(np->saddr)); #endif } } /* * We need our own bind because there are no privileged id's == local ports. * Moreover, we don't allow binding to multi- and broadcast addresses. */ int ping_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) { struct inet_sock *isk = inet_sk(sk); unsigned short snum; int err; int dif = sk->sk_bound_dev_if; err = ping_check_bind_addr(sk, isk, uaddr, addr_len); if (err) return err; lock_sock(sk); err = -EINVAL; if (isk->inet_num != 0) goto out; err = -EADDRINUSE; ping_set_saddr(sk, uaddr); snum = ntohs(((struct sockaddr_in *)uaddr)->sin_port); if (ping_get_port(sk, snum) != 0) { ping_clear_saddr(sk, dif); goto out; } pr_debug("after bind(): num = %d, dif = %d\n", (int)isk->inet_num, (int)sk->sk_bound_dev_if); err = 0; if (sk->sk_family == AF_INET && isk->inet_rcv_saddr) sk->sk_userlocks |= SOCK_BINDADDR_LOCK; #if IS_ENABLED(CONFIG_IPV6) if (sk->sk_family == AF_INET6 && !ipv6_addr_any(&sk->sk_v6_rcv_saddr)) sk->sk_userlocks |= SOCK_BINDADDR_LOCK; #endif if (snum) sk->sk_userlocks |= SOCK_BINDPORT_LOCK; isk->inet_sport = htons(isk->inet_num); isk->inet_daddr = 0; isk->inet_dport = 0; #if IS_ENABLED(CONFIG_IPV6) if (sk->sk_family == AF_INET6) memset(&sk->sk_v6_daddr, 0, sizeof(sk->sk_v6_daddr)); #endif sk_dst_reset(sk); out: release_sock(sk); pr_debug("ping_v4_bind -> %d\n", err); return err; } EXPORT_SYMBOL_GPL(ping_bind); /* * Is this a supported type of ICMP message? */ static inline int ping_supported(int family, int type, int code) { return (family == AF_INET && type == ICMP_ECHO && code == 0) || (family == AF_INET6 && type == ICMPV6_ECHO_REQUEST && code == 0); } /* * This routine is called by the ICMP module when it gets some * sort of error condition. */ void ping_err(struct sk_buff *skb, int offset, u32 info) { int family; struct icmphdr *icmph; struct inet_sock *inet_sock; int type; int code; struct net *net = dev_net(skb->dev); struct sock *sk; int harderr; int err; if (skb->protocol == htons(ETH_P_IP)) { family = AF_INET; type = icmp_hdr(skb)->type; code = icmp_hdr(skb)->code; icmph = (struct icmphdr *)(skb->data + offset); } else if (skb->protocol == htons(ETH_P_IPV6)) { family = AF_INET6; type = icmp6_hdr(skb)->icmp6_type; code = icmp6_hdr(skb)->icmp6_code; icmph = (struct icmphdr *) (skb->data + offset); } else { BUG(); } /* We assume the packet has already been checked by icmp_unreach */ if (!ping_supported(family, icmph->type, icmph->code)) return; pr_debug("ping_err(proto=0x%x,type=%d,code=%d,id=%04x,seq=%04x)\n", skb->protocol, type, code, ntohs(icmph->un.echo.id), ntohs(icmph->un.echo.sequence)); sk = ping_lookup(net, skb, ntohs(icmph->un.echo.id)); if (sk == NULL) { pr_debug("no socket, dropping\n"); return; /* No socket for error */ } pr_debug("err on socket %p\n", sk); err = 0; harderr = 0; inet_sock = inet_sk(sk); if (skb->protocol == htons(ETH_P_IP)) { switch (type) { default: case ICMP_TIME_EXCEEDED: err = EHOSTUNREACH; break; case ICMP_SOURCE_QUENCH: /* This is not a real error but ping wants to see it. * Report it with some fake errno. */ err = EREMOTEIO; break; case ICMP_PARAMETERPROB: err = EPROTO; harderr = 1; break; case ICMP_DEST_UNREACH: if (code == ICMP_FRAG_NEEDED) { /* Path MTU discovery */ ipv4_sk_update_pmtu(skb, sk, info); if (inet_sock->pmtudisc != IP_PMTUDISC_DONT) { err = EMSGSIZE; harderr = 1; break; } goto out; } err = EHOSTUNREACH; if (code <= NR_ICMP_UNREACH) { harderr = icmp_err_convert[code].fatal; err = icmp_err_convert[code].errno; } break; case ICMP_REDIRECT: /* See ICMP_SOURCE_QUENCH */ ipv4_sk_redirect(skb, sk); err = EREMOTEIO; break; } #if IS_ENABLED(CONFIG_IPV6) } else if (skb->protocol == htons(ETH_P_IPV6)) { harderr = pingv6_ops.icmpv6_err_convert(type, code, &err); #endif } /* * RFC1122: OK. Passes ICMP errors back to application, as per * 4.1.3.3. */ if ((family == AF_INET && !inet_sock->recverr) || (family == AF_INET6 && !inet6_sk(sk)->recverr)) { if (!harderr || sk->sk_state != TCP_ESTABLISHED) goto out; } else { if (family == AF_INET) { ip_icmp_error(sk, skb, err, 0 /* no remote port */, info, (u8 *)icmph); #if IS_ENABLED(CONFIG_IPV6) } else if (family == AF_INET6) { pingv6_ops.ipv6_icmp_error(sk, skb, err, 0, info, (u8 *)icmph); #endif } } sk->sk_err = err; sk->sk_error_report(sk); out: sock_put(sk); } EXPORT_SYMBOL_GPL(ping_err); /* * Copy and checksum an ICMP Echo packet from user space into a buffer * starting from the payload. */ int ping_getfrag(void *from, char *to, int offset, int fraglen, int odd, struct sk_buff *skb) { struct pingfakehdr *pfh = (struct pingfakehdr *)from; if (offset == 0) { if (fraglen < sizeof(struct icmphdr)) BUG(); if (csum_partial_copy_fromiovecend(to + sizeof(struct icmphdr), pfh->iov, 0, fraglen - sizeof(struct icmphdr), &pfh->wcheck)) return -EFAULT; } else if (offset < sizeof(struct icmphdr)) { BUG(); } else { if (csum_partial_copy_fromiovecend (to, pfh->iov, offset - sizeof(struct icmphdr), fraglen, &pfh->wcheck)) return -EFAULT; } #if IS_ENABLED(CONFIG_IPV6) /* For IPv6, checksum each skb as we go along, as expected by * icmpv6_push_pending_frames. For IPv4, accumulate the checksum in * wcheck, it will be finalized in ping_v4_push_pending_frames. */ if (pfh->family == AF_INET6) { skb->csum = pfh->wcheck; skb->ip_summed = CHECKSUM_NONE; pfh->wcheck = 0; } #endif return 0; } EXPORT_SYMBOL_GPL(ping_getfrag); static int ping_v4_push_pending_frames(struct sock *sk, struct pingfakehdr *pfh, struct flowi4 *fl4) { struct sk_buff *skb = skb_peek(&sk->sk_write_queue); pfh->wcheck = csum_partial((char *)&pfh->icmph, sizeof(struct icmphdr), pfh->wcheck); pfh->icmph.checksum = csum_fold(pfh->wcheck); memcpy(icmp_hdr(skb), &pfh->icmph, sizeof(struct icmphdr)); skb->ip_summed = CHECKSUM_NONE; return ip_push_pending_frames(sk, fl4); } int ping_common_sendmsg(int family, struct msghdr *msg, size_t len, void *user_icmph, size_t icmph_len) { u8 type, code; if (len > 0xFFFF) return -EMSGSIZE; /* * Check the flags. */ /* Mirror BSD error message compatibility */ if (msg->msg_flags & MSG_OOB) return -EOPNOTSUPP; /* * Fetch the ICMP header provided by the userland. * iovec is modified! The ICMP header is consumed. */ if (memcpy_fromiovec(user_icmph, msg->msg_iov, icmph_len)) return -EFAULT; if (family == AF_INET) { type = ((struct icmphdr *) user_icmph)->type; code = ((struct icmphdr *) user_icmph)->code; #if IS_ENABLED(CONFIG_IPV6) } else if (family == AF_INET6) { type = ((struct icmp6hdr *) user_icmph)->icmp6_type; code = ((struct icmp6hdr *) user_icmph)->icmp6_code; #endif } else { BUG(); } if (!ping_supported(family, type, code)) return -EINVAL; return 0; } EXPORT_SYMBOL_GPL(ping_common_sendmsg); int ping_v4_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len) { struct net *net = sock_net(sk); struct flowi4 fl4; struct inet_sock *inet = inet_sk(sk); struct ipcm_cookie ipc; struct icmphdr user_icmph; struct pingfakehdr pfh; struct rtable *rt = NULL; struct ip_options_data opt_copy; int free = 0; __be32 saddr, daddr, faddr; u8 tos; int err; pr_debug("ping_v4_sendmsg(sk=%p,sk->num=%u)\n", inet, inet->inet_num); err = ping_common_sendmsg(AF_INET, msg, len, &user_icmph, sizeof(user_icmph)); if (err) return err; /* * Get and verify the address. */ if (msg->msg_name) { struct sockaddr_in *usin = (struct sockaddr_in *)msg->msg_name; if (msg->msg_namelen < sizeof(*usin)) return -EINVAL; if (usin->sin_family != AF_INET) return -EINVAL; daddr = usin->sin_addr.s_addr; /* no remote port */ } else { if (sk->sk_state != TCP_ESTABLISHED) return -EDESTADDRREQ; daddr = inet->inet_daddr; /* no remote port */ } ipc.addr = inet->inet_saddr; ipc.opt = NULL; ipc.oif = sk->sk_bound_dev_if; ipc.tx_flags = 0; ipc.ttl = 0; ipc.tos = -1; sock_tx_timestamp(sk, &ipc.tx_flags); if (msg->msg_controllen) { err = ip_cmsg_send(sock_net(sk), msg, &ipc); if (err) return err; if (ipc.opt) free = 1; } if (!ipc.opt) { struct ip_options_rcu *inet_opt; rcu_read_lock(); inet_opt = rcu_dereference(inet->inet_opt); if (inet_opt) { memcpy(&opt_copy, inet_opt, sizeof(*inet_opt) + inet_opt->opt.optlen); ipc.opt = &opt_copy.opt; } rcu_read_unlock(); } saddr = ipc.addr; ipc.addr = faddr = daddr; if (ipc.opt && ipc.opt->opt.srr) { if (!daddr) return -EINVAL; faddr = ipc.opt->opt.faddr; } tos = get_rttos(&ipc, inet); if (sock_flag(sk, SOCK_LOCALROUTE) || (msg->msg_flags & MSG_DONTROUTE) || (ipc.opt && ipc.opt->opt.is_strictroute)) { tos |= RTO_ONLINK; } if (ipv4_is_multicast(daddr)) { if (!ipc.oif) ipc.oif = inet->mc_index; if (!saddr) saddr = inet->mc_addr; } else if (!ipc.oif) ipc.oif = inet->uc_index; flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos, RT_SCOPE_UNIVERSE, sk->sk_protocol, inet_sk_flowi_flags(sk), faddr, saddr, 0, 0); security_sk_classify_flow(sk, flowi4_to_flowi(&fl4)); rt = ip_route_output_flow(net, &fl4, sk); if (IS_ERR(rt)) { err = PTR_ERR(rt); rt = NULL; if (err == -ENETUNREACH) IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES); goto out; } err = -EACCES; if ((rt->rt_flags & RTCF_BROADCAST) && !sock_flag(sk, SOCK_BROADCAST)) goto out; if (msg->msg_flags & MSG_CONFIRM) goto do_confirm; back_from_confirm: if (!ipc.addr) ipc.addr = fl4.daddr; lock_sock(sk); pfh.icmph.type = user_icmph.type; /* already checked */ pfh.icmph.code = user_icmph.code; /* ditto */ pfh.icmph.checksum = 0; pfh.icmph.un.echo.id = inet->inet_sport; pfh.icmph.un.echo.sequence = user_icmph.un.echo.sequence; pfh.iov = msg->msg_iov; pfh.wcheck = 0; pfh.family = AF_INET; err = ip_append_data(sk, &fl4, ping_getfrag, &pfh, len, 0, &ipc, &rt, msg->msg_flags); if (err) ip_flush_pending_frames(sk); else err = ping_v4_push_pending_frames(sk, &pfh, &fl4); release_sock(sk); out: ip_rt_put(rt); if (free) kfree(ipc.opt); if (!err) { icmp_out_count(sock_net(sk), user_icmph.type); return len; } return err; do_confirm: dst_confirm(&rt->dst); if (!(msg->msg_flags & MSG_PROBE) || len) goto back_from_confirm; err = 0; goto out; } int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len, int noblock, int flags, int *addr_len) { struct inet_sock *isk = inet_sk(sk); int family = sk->sk_family; struct sockaddr_in *sin; struct sockaddr_in6 *sin6; struct sk_buff *skb; int copied, err; pr_debug("ping_recvmsg(sk=%p,sk->num=%u)\n", isk, isk->inet_num); err = -EOPNOTSUPP; if (flags & MSG_OOB) goto out; if (addr_len) { if (family == AF_INET) *addr_len = sizeof(*sin); else if (family == AF_INET6 && addr_len) *addr_len = sizeof(*sin6); } if (flags & MSG_ERRQUEUE) { if (family == AF_INET) { return ip_recv_error(sk, msg, len); #if IS_ENABLED(CONFIG_IPV6) } else if (family == AF_INET6) { return pingv6_ops.ipv6_recv_error(sk, msg, len); #endif } } skb = skb_recv_datagram(sk, flags, noblock, &err); if (!skb) goto out; copied = skb->len; if (copied > len) { msg->msg_flags |= MSG_TRUNC; copied = len; } /* Don't bother checking the checksum */ err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); if (err) goto done; sock_recv_timestamp(msg, sk, skb); /* Copy the address and add cmsg data. */ if (family == AF_INET) { sin = (struct sockaddr_in *) msg->msg_name; sin->sin_family = AF_INET; sin->sin_port = 0 /* skb->h.uh->source */; sin->sin_addr.s_addr = ip_hdr(skb)->saddr; memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); if (isk->cmsg_flags) ip_cmsg_recv(msg, skb); #if IS_ENABLED(CONFIG_IPV6) } else if (family == AF_INET6) { struct ipv6_pinfo *np = inet6_sk(sk); struct ipv6hdr *ip6 = ipv6_hdr(skb); sin6 = (struct sockaddr_in6 *) msg->msg_name; sin6->sin6_family = AF_INET6; sin6->sin6_port = 0; sin6->sin6_addr = ip6->saddr; sin6->sin6_flowinfo = 0; if (np->sndflow) sin6->sin6_flowinfo = ip6_flowinfo(ip6); sin6->sin6_scope_id = ipv6_iface_scope_id(&sin6->sin6_addr, IP6CB(skb)->iif); if (inet6_sk(sk)->rxopt.all) pingv6_ops.ip6_datagram_recv_ctl(sk, msg, skb); #endif } else { BUG(); } err = copied; done: skb_free_datagram(sk, skb); out: pr_debug("ping_recvmsg -> %d\n", err); return err; } EXPORT_SYMBOL_GPL(ping_recvmsg); int ping_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) { pr_debug("ping_queue_rcv_skb(sk=%p,sk->num=%d,skb=%p)\n", inet_sk(sk), inet_sk(sk)->inet_num, skb); if (sock_queue_rcv_skb(sk, skb) < 0) { kfree_skb(skb); pr_debug("ping_queue_rcv_skb -> failed\n"); return -1; } return 0; } EXPORT_SYMBOL_GPL(ping_queue_rcv_skb); /* * All we need to do is get the socket. */ void ping_rcv(struct sk_buff *skb) { struct sock *sk; struct net *net = dev_net(skb->dev); struct icmphdr *icmph = icmp_hdr(skb); /* We assume the packet has already been checked by icmp_rcv */ pr_debug("ping_rcv(skb=%p,id=%04x,seq=%04x)\n", skb, ntohs(icmph->un.echo.id), ntohs(icmph->un.echo.sequence)); /* Push ICMP header back */ skb_push(skb, skb->data - (u8 *)icmph); sk = ping_lookup(net, skb, ntohs(icmph->un.echo.id)); if (sk != NULL) { pr_debug("rcv on socket %p\n", sk); ping_queue_rcv_skb(sk, skb_get(skb)); sock_put(sk); return; } pr_debug("no socket, dropping\n"); /* We're called from icmp_rcv(). kfree_skb() is done there. */ } EXPORT_SYMBOL_GPL(ping_rcv); struct proto ping_prot = { .name = "PING", .owner = THIS_MODULE, .init = ping_init_sock, .close = ping_close, .connect = ip4_datagram_connect, .disconnect = udp_disconnect, .setsockopt = ip_setsockopt, .getsockopt = ip_getsockopt, .sendmsg = ping_v4_sendmsg, .recvmsg = ping_recvmsg, .bind = ping_bind, .backlog_rcv = ping_queue_rcv_skb, .release_cb = ip4_datagram_release_cb, .hash = ping_hash, .unhash = ping_unhash, .get_port = ping_get_port, .obj_size = sizeof(struct inet_sock), }; EXPORT_SYMBOL(ping_prot); #ifdef CONFIG_PROC_FS static struct sock *ping_get_first(struct seq_file *seq, int start) { struct sock *sk; struct ping_iter_state *state = seq->private; struct net *net = seq_file_net(seq); for (state->bucket = start; state->bucket < PING_HTABLE_SIZE; ++state->bucket) { struct hlist_nulls_node *node; struct hlist_nulls_head *hslot; hslot = &ping_table.hash[state->bucket]; if (hlist_nulls_empty(hslot)) continue; sk_nulls_for_each(sk, node, hslot) { if (net_eq(sock_net(sk), net) && sk->sk_family == state->family) goto found; } } sk = NULL; found: return sk; } static struct sock *ping_get_next(struct seq_file *seq, struct sock *sk) { struct ping_iter_state *state = seq->private; struct net *net = seq_file_net(seq); do { sk = sk_nulls_next(sk); } while (sk && (!net_eq(sock_net(sk), net))); if (!sk) return ping_get_first(seq, state->bucket + 1); return sk; } static struct sock *ping_get_idx(struct seq_file *seq, loff_t pos) { struct sock *sk = ping_get_first(seq, 0); if (sk) while (pos && (sk = ping_get_next(seq, sk)) != NULL) --pos; return pos ? NULL : sk; } void *ping_seq_start(struct seq_file *seq, loff_t *pos, sa_family_t family) { struct ping_iter_state *state = seq->private; state->bucket = 0; state->family = family; read_lock_bh(&ping_table.lock); return *pos ? ping_get_idx(seq, *pos-1) : SEQ_START_TOKEN; } EXPORT_SYMBOL_GPL(ping_seq_start); static void *ping_v4_seq_start(struct seq_file *seq, loff_t *pos) { return ping_seq_start(seq, pos, AF_INET); } void *ping_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct sock *sk; if (v == SEQ_START_TOKEN) sk = ping_get_idx(seq, 0); else sk = ping_get_next(seq, v); ++*pos; return sk; } EXPORT_SYMBOL_GPL(ping_seq_next); void ping_seq_stop(struct seq_file *seq, void *v) { read_unlock_bh(&ping_table.lock); } EXPORT_SYMBOL_GPL(ping_seq_stop); static void ping_v4_format_sock(struct sock *sp, struct seq_file *f, int bucket, int *len) { struct inet_sock *inet = inet_sk(sp); __be32 dest = inet->inet_daddr; __be32 src = inet->inet_rcv_saddr; __u16 destp = ntohs(inet->inet_dport); __u16 srcp = ntohs(inet->inet_sport); seq_printf(f, "%5d: %08X:%04X %08X:%04X" " %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %d%n", bucket, src, srcp, dest, destp, sp->sk_state, sk_wmem_alloc_get(sp), sk_rmem_alloc_get(sp), 0, 0L, 0, from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)), 0, sock_i_ino(sp), atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops), len); } static int ping_v4_seq_show(struct seq_file *seq, void *v) { if (v == SEQ_START_TOKEN) seq_printf(seq, "%-127s\n", " sl local_address rem_address st tx_queue " "rx_queue tr tm->when retrnsmt uid timeout " "inode ref pointer drops"); else { struct ping_iter_state *state = seq->private; int len; ping_v4_format_sock(v, seq, state->bucket, &len); seq_printf(seq, "%*s\n", 127 - len, ""); } return 0; } static const struct seq_operations ping_v4_seq_ops = { .show = ping_v4_seq_show, .start = ping_v4_seq_start, .next = ping_seq_next, .stop = ping_seq_stop, }; static int ping_seq_open(struct inode *inode, struct file *file) { struct ping_seq_afinfo *afinfo = PDE_DATA(inode); return seq_open_net(inode, file, &afinfo->seq_ops, sizeof(struct ping_iter_state)); } const struct file_operations ping_seq_fops = { .open = ping_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_net, }; EXPORT_SYMBOL_GPL(ping_seq_fops); static struct ping_seq_afinfo ping_v4_seq_afinfo = { .name = "icmp", .family = AF_INET, .seq_fops = &ping_seq_fops, .seq_ops = { .start = ping_v4_seq_start, .show = ping_v4_seq_show, .next = ping_seq_next, .stop = ping_seq_stop, }, }; int ping_proc_register(struct net *net, struct ping_seq_afinfo *afinfo) { struct proc_dir_entry *p; p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net, afinfo->seq_fops, afinfo); if (!p) return -ENOMEM; return 0; } EXPORT_SYMBOL_GPL(ping_proc_register); void ping_proc_unregister(struct net *net, struct ping_seq_afinfo *afinfo) { remove_proc_entry(afinfo->name, net->proc_net); } EXPORT_SYMBOL_GPL(ping_proc_unregister); static int __net_init ping_v4_proc_init_net(struct net *net) { return ping_proc_register(net, &ping_v4_seq_afinfo); } static void __net_exit ping_v4_proc_exit_net(struct net *net) { ping_proc_unregister(net, &ping_v4_seq_afinfo); } static struct pernet_operations ping_v4_net_ops = { .init = ping_v4_proc_init_net, .exit = ping_v4_proc_exit_net, }; int __init ping_proc_init(void) { return register_pernet_subsys(&ping_v4_net_ops); } void ping_proc_exit(void) { unregister_pernet_subsys(&ping_v4_net_ops); } #endif void __init ping_init(void) { int i; for (i = 0; i < PING_HTABLE_SIZE; i++) INIT_HLIST_NULLS_HEAD(&ping_table.hash[i], i); rwlock_init(&ping_table.lock); }
/* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * "Ping" sockets * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Based on ipv4/udp.c code. * * Authors: Vasiliy Kulikov / Openwall (for Linux 2.6), * Pavel Kankovsky (for Linux 2.4.32) * * Pavel gave all rights to bugs to Vasiliy, * none of the bugs are Pavel's now. * */ #include <linux/uaccess.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/socket.h> #include <linux/sockios.h> #include <linux/in.h> #include <linux/errno.h> #include <linux/timer.h> #include <linux/mm.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <net/snmp.h> #include <net/ip.h> #include <net/icmp.h> #include <net/protocol.h> #include <linux/skbuff.h> #include <linux/proc_fs.h> #include <linux/export.h> #include <net/sock.h> #include <net/ping.h> #include <net/udp.h> #include <net/route.h> #include <net/inet_common.h> #include <net/checksum.h> #if IS_ENABLED(CONFIG_IPV6) #include <linux/in6.h> #include <linux/icmpv6.h> #include <net/addrconf.h> #include <net/ipv6.h> #include <net/transp_v6.h> #endif struct ping_table ping_table; struct pingv6_ops pingv6_ops; EXPORT_SYMBOL_GPL(pingv6_ops); static u16 ping_port_rover; static inline int ping_hashfn(struct net *net, unsigned int num, unsigned int mask) { int res = (num + net_hash_mix(net)) & mask; pr_debug("hash(%d) = %d\n", num, res); return res; } EXPORT_SYMBOL_GPL(ping_hash); static inline struct hlist_nulls_head *ping_hashslot(struct ping_table *table, struct net *net, unsigned int num) { return &table->hash[ping_hashfn(net, num, PING_HTABLE_MASK)]; } int ping_get_port(struct sock *sk, unsigned short ident) { struct hlist_nulls_node *node; struct hlist_nulls_head *hlist; struct inet_sock *isk, *isk2; struct sock *sk2 = NULL; isk = inet_sk(sk); write_lock_bh(&ping_table.lock); if (ident == 0) { u32 i; u16 result = ping_port_rover + 1; for (i = 0; i < (1L << 16); i++, result++) { if (!result) result++; /* avoid zero */ hlist = ping_hashslot(&ping_table, sock_net(sk), result); ping_portaddr_for_each_entry(sk2, node, hlist) { isk2 = inet_sk(sk2); if (isk2->inet_num == result) goto next_port; } /* found */ ping_port_rover = ident = result; break; next_port: ; } if (i >= (1L << 16)) goto fail; } else { hlist = ping_hashslot(&ping_table, sock_net(sk), ident); ping_portaddr_for_each_entry(sk2, node, hlist) { isk2 = inet_sk(sk2); /* BUG? Why is this reuse and not reuseaddr? ping.c * doesn't turn off SO_REUSEADDR, and it doesn't expect * that other ping processes can steal its packets. */ if ((isk2->inet_num == ident) && (sk2 != sk) && (!sk2->sk_reuse || !sk->sk_reuse)) goto fail; } } pr_debug("found port/ident = %d\n", ident); isk->inet_num = ident; if (sk_unhashed(sk)) { pr_debug("was not hashed\n"); sock_hold(sk); hlist_nulls_add_head(&sk->sk_nulls_node, hlist); sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); } write_unlock_bh(&ping_table.lock); return 0; fail: write_unlock_bh(&ping_table.lock); return 1; } EXPORT_SYMBOL_GPL(ping_get_port); void ping_hash(struct sock *sk) { pr_debug("ping_hash(sk->port=%u)\n", inet_sk(sk)->inet_num); BUG(); /* "Please do not press this button again." */ } void ping_unhash(struct sock *sk) { struct inet_sock *isk = inet_sk(sk); pr_debug("ping_unhash(isk=%p,isk->num=%u)\n", isk, isk->inet_num); if (sk_hashed(sk)) { write_lock_bh(&ping_table.lock); hlist_nulls_del(&sk->sk_nulls_node); sock_put(sk); isk->inet_num = 0; isk->inet_sport = 0; sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); write_unlock_bh(&ping_table.lock); } } EXPORT_SYMBOL_GPL(ping_unhash); static struct sock *ping_lookup(struct net *net, struct sk_buff *skb, u16 ident) { struct hlist_nulls_head *hslot = ping_hashslot(&ping_table, net, ident); struct sock *sk = NULL; struct inet_sock *isk; struct hlist_nulls_node *hnode; int dif = skb->dev->ifindex; if (skb->protocol == htons(ETH_P_IP)) { pr_debug("try to find: num = %d, daddr = %pI4, dif = %d\n", (int)ident, &ip_hdr(skb)->daddr, dif); #if IS_ENABLED(CONFIG_IPV6) } else if (skb->protocol == htons(ETH_P_IPV6)) { pr_debug("try to find: num = %d, daddr = %pI6c, dif = %d\n", (int)ident, &ipv6_hdr(skb)->daddr, dif); #endif } read_lock_bh(&ping_table.lock); ping_portaddr_for_each_entry(sk, hnode, hslot) { isk = inet_sk(sk); pr_debug("iterate\n"); if (isk->inet_num != ident) continue; if (skb->protocol == htons(ETH_P_IP) && sk->sk_family == AF_INET) { pr_debug("found: %p: num=%d, daddr=%pI4, dif=%d\n", sk, (int) isk->inet_num, &isk->inet_rcv_saddr, sk->sk_bound_dev_if); if (isk->inet_rcv_saddr && isk->inet_rcv_saddr != ip_hdr(skb)->daddr) continue; #if IS_ENABLED(CONFIG_IPV6) } else if (skb->protocol == htons(ETH_P_IPV6) && sk->sk_family == AF_INET6) { pr_debug("found: %p: num=%d, daddr=%pI6c, dif=%d\n", sk, (int) isk->inet_num, &sk->sk_v6_rcv_saddr, sk->sk_bound_dev_if); if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr) && !ipv6_addr_equal(&sk->sk_v6_rcv_saddr, &ipv6_hdr(skb)->daddr)) continue; #endif } if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif) continue; sock_hold(sk); goto exit; } sk = NULL; exit: read_unlock_bh(&ping_table.lock); return sk; } static void inet_get_ping_group_range_net(struct net *net, kgid_t *low, kgid_t *high) { kgid_t *data = net->ipv4.sysctl_ping_group_range; unsigned int seq; do { seq = read_seqbegin(&net->ipv4.sysctl_local_ports.lock); *low = data[0]; *high = data[1]; } while (read_seqretry(&net->ipv4.sysctl_local_ports.lock, seq)); } int ping_init_sock(struct sock *sk) { struct net *net = sock_net(sk); kgid_t group = current_egid(); struct group_info *group_info = get_current_groups(); int i, j, count = group_info->ngroups; kgid_t low, high; inet_get_ping_group_range_net(net, &low, &high); if (gid_lte(low, group) && gid_lte(group, high)) return 0; for (i = 0; i < group_info->nblocks; i++) { int cp_count = min_t(int, NGROUPS_PER_BLOCK, count); for (j = 0; j < cp_count; j++) { kgid_t gid = group_info->blocks[i][j]; if (gid_lte(low, gid) && gid_lte(gid, high)) return 0; } count -= cp_count; } return -EACCES; } EXPORT_SYMBOL_GPL(ping_init_sock); void ping_close(struct sock *sk, long timeout) { pr_debug("ping_close(sk=%p,sk->num=%u)\n", inet_sk(sk), inet_sk(sk)->inet_num); pr_debug("isk->refcnt = %d\n", sk->sk_refcnt.counter); sk_common_release(sk); } EXPORT_SYMBOL_GPL(ping_close); /* Checks the bind address and possibly modifies sk->sk_bound_dev_if. */ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk, struct sockaddr *uaddr, int addr_len) { struct net *net = sock_net(sk); if (sk->sk_family == AF_INET) { struct sockaddr_in *addr = (struct sockaddr_in *) uaddr; int chk_addr_ret; if (addr_len < sizeof(*addr)) return -EINVAL; pr_debug("ping_check_bind_addr(sk=%p,addr=%pI4,port=%d)\n", sk, &addr->sin_addr.s_addr, ntohs(addr->sin_port)); chk_addr_ret = inet_addr_type(net, addr->sin_addr.s_addr); if (addr->sin_addr.s_addr == htonl(INADDR_ANY)) chk_addr_ret = RTN_LOCAL; if ((sysctl_ip_nonlocal_bind == 0 && isk->freebind == 0 && isk->transparent == 0 && chk_addr_ret != RTN_LOCAL) || chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST) return -EADDRNOTAVAIL; #if IS_ENABLED(CONFIG_IPV6) } else if (sk->sk_family == AF_INET6) { struct sockaddr_in6 *addr = (struct sockaddr_in6 *) uaddr; int addr_type, scoped, has_addr; struct net_device *dev = NULL; if (addr_len < sizeof(*addr)) return -EINVAL; pr_debug("ping_check_bind_addr(sk=%p,addr=%pI6c,port=%d)\n", sk, addr->sin6_addr.s6_addr, ntohs(addr->sin6_port)); addr_type = ipv6_addr_type(&addr->sin6_addr); scoped = __ipv6_addr_needs_scope_id(addr_type); if ((addr_type != IPV6_ADDR_ANY && !(addr_type & IPV6_ADDR_UNICAST)) || (scoped && !addr->sin6_scope_id)) return -EINVAL; rcu_read_lock(); if (addr->sin6_scope_id) { dev = dev_get_by_index_rcu(net, addr->sin6_scope_id); if (!dev) { rcu_read_unlock(); return -ENODEV; } } has_addr = pingv6_ops.ipv6_chk_addr(net, &addr->sin6_addr, dev, scoped); rcu_read_unlock(); if (!(isk->freebind || isk->transparent || has_addr || addr_type == IPV6_ADDR_ANY)) return -EADDRNOTAVAIL; if (scoped) sk->sk_bound_dev_if = addr->sin6_scope_id; #endif } else { return -EAFNOSUPPORT; } return 0; } static void ping_set_saddr(struct sock *sk, struct sockaddr *saddr) { if (saddr->sa_family == AF_INET) { struct inet_sock *isk = inet_sk(sk); struct sockaddr_in *addr = (struct sockaddr_in *) saddr; isk->inet_rcv_saddr = isk->inet_saddr = addr->sin_addr.s_addr; #if IS_ENABLED(CONFIG_IPV6) } else if (saddr->sa_family == AF_INET6) { struct sockaddr_in6 *addr = (struct sockaddr_in6 *) saddr; struct ipv6_pinfo *np = inet6_sk(sk); sk->sk_v6_rcv_saddr = np->saddr = addr->sin6_addr; #endif } } static void ping_clear_saddr(struct sock *sk, int dif) { sk->sk_bound_dev_if = dif; if (sk->sk_family == AF_INET) { struct inet_sock *isk = inet_sk(sk); isk->inet_rcv_saddr = isk->inet_saddr = 0; #if IS_ENABLED(CONFIG_IPV6) } else if (sk->sk_family == AF_INET6) { struct ipv6_pinfo *np = inet6_sk(sk); memset(&sk->sk_v6_rcv_saddr, 0, sizeof(sk->sk_v6_rcv_saddr)); memset(&np->saddr, 0, sizeof(np->saddr)); #endif } } /* * We need our own bind because there are no privileged id's == local ports. * Moreover, we don't allow binding to multi- and broadcast addresses. */ int ping_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) { struct inet_sock *isk = inet_sk(sk); unsigned short snum; int err; int dif = sk->sk_bound_dev_if; err = ping_check_bind_addr(sk, isk, uaddr, addr_len); if (err) return err; lock_sock(sk); err = -EINVAL; if (isk->inet_num != 0) goto out; err = -EADDRINUSE; ping_set_saddr(sk, uaddr); snum = ntohs(((struct sockaddr_in *)uaddr)->sin_port); if (ping_get_port(sk, snum) != 0) { ping_clear_saddr(sk, dif); goto out; } pr_debug("after bind(): num = %d, dif = %d\n", (int)isk->inet_num, (int)sk->sk_bound_dev_if); err = 0; if (sk->sk_family == AF_INET && isk->inet_rcv_saddr) sk->sk_userlocks |= SOCK_BINDADDR_LOCK; #if IS_ENABLED(CONFIG_IPV6) if (sk->sk_family == AF_INET6 && !ipv6_addr_any(&sk->sk_v6_rcv_saddr)) sk->sk_userlocks |= SOCK_BINDADDR_LOCK; #endif if (snum) sk->sk_userlocks |= SOCK_BINDPORT_LOCK; isk->inet_sport = htons(isk->inet_num); isk->inet_daddr = 0; isk->inet_dport = 0; #if IS_ENABLED(CONFIG_IPV6) if (sk->sk_family == AF_INET6) memset(&sk->sk_v6_daddr, 0, sizeof(sk->sk_v6_daddr)); #endif sk_dst_reset(sk); out: release_sock(sk); pr_debug("ping_v4_bind -> %d\n", err); return err; } EXPORT_SYMBOL_GPL(ping_bind); /* * Is this a supported type of ICMP message? */ static inline int ping_supported(int family, int type, int code) { return (family == AF_INET && type == ICMP_ECHO && code == 0) || (family == AF_INET6 && type == ICMPV6_ECHO_REQUEST && code == 0); } /* * This routine is called by the ICMP module when it gets some * sort of error condition. */ void ping_err(struct sk_buff *skb, int offset, u32 info) { int family; struct icmphdr *icmph; struct inet_sock *inet_sock; int type; int code; struct net *net = dev_net(skb->dev); struct sock *sk; int harderr; int err; if (skb->protocol == htons(ETH_P_IP)) { family = AF_INET; type = icmp_hdr(skb)->type; code = icmp_hdr(skb)->code; icmph = (struct icmphdr *)(skb->data + offset); } else if (skb->protocol == htons(ETH_P_IPV6)) { family = AF_INET6; type = icmp6_hdr(skb)->icmp6_type; code = icmp6_hdr(skb)->icmp6_code; icmph = (struct icmphdr *) (skb->data + offset); } else { BUG(); } /* We assume the packet has already been checked by icmp_unreach */ if (!ping_supported(family, icmph->type, icmph->code)) return; pr_debug("ping_err(proto=0x%x,type=%d,code=%d,id=%04x,seq=%04x)\n", skb->protocol, type, code, ntohs(icmph->un.echo.id), ntohs(icmph->un.echo.sequence)); sk = ping_lookup(net, skb, ntohs(icmph->un.echo.id)); if (sk == NULL) { pr_debug("no socket, dropping\n"); return; /* No socket for error */ } pr_debug("err on socket %p\n", sk); err = 0; harderr = 0; inet_sock = inet_sk(sk); if (skb->protocol == htons(ETH_P_IP)) { switch (type) { default: case ICMP_TIME_EXCEEDED: err = EHOSTUNREACH; break; case ICMP_SOURCE_QUENCH: /* This is not a real error but ping wants to see it. * Report it with some fake errno. */ err = EREMOTEIO; break; case ICMP_PARAMETERPROB: err = EPROTO; harderr = 1; break; case ICMP_DEST_UNREACH: if (code == ICMP_FRAG_NEEDED) { /* Path MTU discovery */ ipv4_sk_update_pmtu(skb, sk, info); if (inet_sock->pmtudisc != IP_PMTUDISC_DONT) { err = EMSGSIZE; harderr = 1; break; } goto out; } err = EHOSTUNREACH; if (code <= NR_ICMP_UNREACH) { harderr = icmp_err_convert[code].fatal; err = icmp_err_convert[code].errno; } break; case ICMP_REDIRECT: /* See ICMP_SOURCE_QUENCH */ ipv4_sk_redirect(skb, sk); err = EREMOTEIO; break; } #if IS_ENABLED(CONFIG_IPV6) } else if (skb->protocol == htons(ETH_P_IPV6)) { harderr = pingv6_ops.icmpv6_err_convert(type, code, &err); #endif } /* * RFC1122: OK. Passes ICMP errors back to application, as per * 4.1.3.3. */ if ((family == AF_INET && !inet_sock->recverr) || (family == AF_INET6 && !inet6_sk(sk)->recverr)) { if (!harderr || sk->sk_state != TCP_ESTABLISHED) goto out; } else { if (family == AF_INET) { ip_icmp_error(sk, skb, err, 0 /* no remote port */, info, (u8 *)icmph); #if IS_ENABLED(CONFIG_IPV6) } else if (family == AF_INET6) { pingv6_ops.ipv6_icmp_error(sk, skb, err, 0, info, (u8 *)icmph); #endif } } sk->sk_err = err; sk->sk_error_report(sk); out: sock_put(sk); } EXPORT_SYMBOL_GPL(ping_err); /* * Copy and checksum an ICMP Echo packet from user space into a buffer * starting from the payload. */ int ping_getfrag(void *from, char *to, int offset, int fraglen, int odd, struct sk_buff *skb) { struct pingfakehdr *pfh = (struct pingfakehdr *)from; if (offset == 0) { if (fraglen < sizeof(struct icmphdr)) BUG(); if (csum_partial_copy_fromiovecend(to + sizeof(struct icmphdr), pfh->iov, 0, fraglen - sizeof(struct icmphdr), &pfh->wcheck)) return -EFAULT; } else if (offset < sizeof(struct icmphdr)) { BUG(); } else { if (csum_partial_copy_fromiovecend (to, pfh->iov, offset - sizeof(struct icmphdr), fraglen, &pfh->wcheck)) return -EFAULT; } #if IS_ENABLED(CONFIG_IPV6) /* For IPv6, checksum each skb as we go along, as expected by * icmpv6_push_pending_frames. For IPv4, accumulate the checksum in * wcheck, it will be finalized in ping_v4_push_pending_frames. */ if (pfh->family == AF_INET6) { skb->csum = pfh->wcheck; skb->ip_summed = CHECKSUM_NONE; pfh->wcheck = 0; } #endif return 0; } EXPORT_SYMBOL_GPL(ping_getfrag); static int ping_v4_push_pending_frames(struct sock *sk, struct pingfakehdr *pfh, struct flowi4 *fl4) { struct sk_buff *skb = skb_peek(&sk->sk_write_queue); pfh->wcheck = csum_partial((char *)&pfh->icmph, sizeof(struct icmphdr), pfh->wcheck); pfh->icmph.checksum = csum_fold(pfh->wcheck); memcpy(icmp_hdr(skb), &pfh->icmph, sizeof(struct icmphdr)); skb->ip_summed = CHECKSUM_NONE; return ip_push_pending_frames(sk, fl4); } int ping_common_sendmsg(int family, struct msghdr *msg, size_t len, void *user_icmph, size_t icmph_len) { u8 type, code; if (len > 0xFFFF) return -EMSGSIZE; /* * Check the flags. */ /* Mirror BSD error message compatibility */ if (msg->msg_flags & MSG_OOB) return -EOPNOTSUPP; /* * Fetch the ICMP header provided by the userland. * iovec is modified! The ICMP header is consumed. */ if (memcpy_fromiovec(user_icmph, msg->msg_iov, icmph_len)) return -EFAULT; if (family == AF_INET) { type = ((struct icmphdr *) user_icmph)->type; code = ((struct icmphdr *) user_icmph)->code; #if IS_ENABLED(CONFIG_IPV6) } else if (family == AF_INET6) { type = ((struct icmp6hdr *) user_icmph)->icmp6_type; code = ((struct icmp6hdr *) user_icmph)->icmp6_code; #endif } else { BUG(); } if (!ping_supported(family, type, code)) return -EINVAL; return 0; } EXPORT_SYMBOL_GPL(ping_common_sendmsg); int ping_v4_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len) { struct net *net = sock_net(sk); struct flowi4 fl4; struct inet_sock *inet = inet_sk(sk); struct ipcm_cookie ipc; struct icmphdr user_icmph; struct pingfakehdr pfh; struct rtable *rt = NULL; struct ip_options_data opt_copy; int free = 0; __be32 saddr, daddr, faddr; u8 tos; int err; pr_debug("ping_v4_sendmsg(sk=%p,sk->num=%u)\n", inet, inet->inet_num); err = ping_common_sendmsg(AF_INET, msg, len, &user_icmph, sizeof(user_icmph)); if (err) return err; /* * Get and verify the address. */ if (msg->msg_name) { struct sockaddr_in *usin = (struct sockaddr_in *)msg->msg_name; if (msg->msg_namelen < sizeof(*usin)) return -EINVAL; if (usin->sin_family != AF_INET) return -EINVAL; daddr = usin->sin_addr.s_addr; /* no remote port */ } else { if (sk->sk_state != TCP_ESTABLISHED) return -EDESTADDRREQ; daddr = inet->inet_daddr; /* no remote port */ } ipc.addr = inet->inet_saddr; ipc.opt = NULL; ipc.oif = sk->sk_bound_dev_if; ipc.tx_flags = 0; ipc.ttl = 0; ipc.tos = -1; sock_tx_timestamp(sk, &ipc.tx_flags); if (msg->msg_controllen) { err = ip_cmsg_send(sock_net(sk), msg, &ipc); if (err) return err; if (ipc.opt) free = 1; } if (!ipc.opt) { struct ip_options_rcu *inet_opt; rcu_read_lock(); inet_opt = rcu_dereference(inet->inet_opt); if (inet_opt) { memcpy(&opt_copy, inet_opt, sizeof(*inet_opt) + inet_opt->opt.optlen); ipc.opt = &opt_copy.opt; } rcu_read_unlock(); } saddr = ipc.addr; ipc.addr = faddr = daddr; if (ipc.opt && ipc.opt->opt.srr) { if (!daddr) return -EINVAL; faddr = ipc.opt->opt.faddr; } tos = get_rttos(&ipc, inet); if (sock_flag(sk, SOCK_LOCALROUTE) || (msg->msg_flags & MSG_DONTROUTE) || (ipc.opt && ipc.opt->opt.is_strictroute)) { tos |= RTO_ONLINK; } if (ipv4_is_multicast(daddr)) { if (!ipc.oif) ipc.oif = inet->mc_index; if (!saddr) saddr = inet->mc_addr; } else if (!ipc.oif) ipc.oif = inet->uc_index; flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos, RT_SCOPE_UNIVERSE, sk->sk_protocol, inet_sk_flowi_flags(sk), faddr, saddr, 0, 0); security_sk_classify_flow(sk, flowi4_to_flowi(&fl4)); rt = ip_route_output_flow(net, &fl4, sk); if (IS_ERR(rt)) { err = PTR_ERR(rt); rt = NULL; if (err == -ENETUNREACH) IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES); goto out; } err = -EACCES; if ((rt->rt_flags & RTCF_BROADCAST) && !sock_flag(sk, SOCK_BROADCAST)) goto out; if (msg->msg_flags & MSG_CONFIRM) goto do_confirm; back_from_confirm: if (!ipc.addr) ipc.addr = fl4.daddr; lock_sock(sk); pfh.icmph.type = user_icmph.type; /* already checked */ pfh.icmph.code = user_icmph.code; /* ditto */ pfh.icmph.checksum = 0; pfh.icmph.un.echo.id = inet->inet_sport; pfh.icmph.un.echo.sequence = user_icmph.un.echo.sequence; pfh.iov = msg->msg_iov; pfh.wcheck = 0; pfh.family = AF_INET; err = ip_append_data(sk, &fl4, ping_getfrag, &pfh, len, 0, &ipc, &rt, msg->msg_flags); if (err) ip_flush_pending_frames(sk); else err = ping_v4_push_pending_frames(sk, &pfh, &fl4); release_sock(sk); out: ip_rt_put(rt); if (free) kfree(ipc.opt); if (!err) { icmp_out_count(sock_net(sk), user_icmph.type); return len; } return err; do_confirm: dst_confirm(&rt->dst); if (!(msg->msg_flags & MSG_PROBE) || len) goto back_from_confirm; err = 0; goto out; } int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len, int noblock, int flags, int *addr_len) { struct inet_sock *isk = inet_sk(sk); int family = sk->sk_family; struct sk_buff *skb; int copied, err; pr_debug("ping_recvmsg(sk=%p,sk->num=%u)\n", isk, isk->inet_num); err = -EOPNOTSUPP; if (flags & MSG_OOB) goto out; if (flags & MSG_ERRQUEUE) { if (family == AF_INET) { return ip_recv_error(sk, msg, len); #if IS_ENABLED(CONFIG_IPV6) } else if (family == AF_INET6) { return pingv6_ops.ipv6_recv_error(sk, msg, len); #endif } } skb = skb_recv_datagram(sk, flags, noblock, &err); if (!skb) goto out; copied = skb->len; if (copied > len) { msg->msg_flags |= MSG_TRUNC; copied = len; } /* Don't bother checking the checksum */ err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); if (err) goto done; sock_recv_timestamp(msg, sk, skb); /* Copy the address and add cmsg data. */ if (family == AF_INET) { struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name; sin->sin_family = AF_INET; sin->sin_port = 0 /* skb->h.uh->source */; sin->sin_addr.s_addr = ip_hdr(skb)->saddr; memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); *addr_len = sizeof(*sin); if (isk->cmsg_flags) ip_cmsg_recv(msg, skb); #if IS_ENABLED(CONFIG_IPV6) } else if (family == AF_INET6) { struct ipv6_pinfo *np = inet6_sk(sk); struct ipv6hdr *ip6 = ipv6_hdr(skb); struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)msg->msg_name; sin6->sin6_family = AF_INET6; sin6->sin6_port = 0; sin6->sin6_addr = ip6->saddr; sin6->sin6_flowinfo = 0; if (np->sndflow) sin6->sin6_flowinfo = ip6_flowinfo(ip6); sin6->sin6_scope_id = ipv6_iface_scope_id(&sin6->sin6_addr, IP6CB(skb)->iif); *addr_len = sizeof(*sin6); if (inet6_sk(sk)->rxopt.all) pingv6_ops.ip6_datagram_recv_ctl(sk, msg, skb); #endif } else { BUG(); } err = copied; done: skb_free_datagram(sk, skb); out: pr_debug("ping_recvmsg -> %d\n", err); return err; } EXPORT_SYMBOL_GPL(ping_recvmsg); int ping_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) { pr_debug("ping_queue_rcv_skb(sk=%p,sk->num=%d,skb=%p)\n", inet_sk(sk), inet_sk(sk)->inet_num, skb); if (sock_queue_rcv_skb(sk, skb) < 0) { kfree_skb(skb); pr_debug("ping_queue_rcv_skb -> failed\n"); return -1; } return 0; } EXPORT_SYMBOL_GPL(ping_queue_rcv_skb); /* * All we need to do is get the socket. */ void ping_rcv(struct sk_buff *skb) { struct sock *sk; struct net *net = dev_net(skb->dev); struct icmphdr *icmph = icmp_hdr(skb); /* We assume the packet has already been checked by icmp_rcv */ pr_debug("ping_rcv(skb=%p,id=%04x,seq=%04x)\n", skb, ntohs(icmph->un.echo.id), ntohs(icmph->un.echo.sequence)); /* Push ICMP header back */ skb_push(skb, skb->data - (u8 *)icmph); sk = ping_lookup(net, skb, ntohs(icmph->un.echo.id)); if (sk != NULL) { pr_debug("rcv on socket %p\n", sk); ping_queue_rcv_skb(sk, skb_get(skb)); sock_put(sk); return; } pr_debug("no socket, dropping\n"); /* We're called from icmp_rcv(). kfree_skb() is done there. */ } EXPORT_SYMBOL_GPL(ping_rcv); struct proto ping_prot = { .name = "PING", .owner = THIS_MODULE, .init = ping_init_sock, .close = ping_close, .connect = ip4_datagram_connect, .disconnect = udp_disconnect, .setsockopt = ip_setsockopt, .getsockopt = ip_getsockopt, .sendmsg = ping_v4_sendmsg, .recvmsg = ping_recvmsg, .bind = ping_bind, .backlog_rcv = ping_queue_rcv_skb, .release_cb = ip4_datagram_release_cb, .hash = ping_hash, .unhash = ping_unhash, .get_port = ping_get_port, .obj_size = sizeof(struct inet_sock), }; EXPORT_SYMBOL(ping_prot); #ifdef CONFIG_PROC_FS static struct sock *ping_get_first(struct seq_file *seq, int start) { struct sock *sk; struct ping_iter_state *state = seq->private; struct net *net = seq_file_net(seq); for (state->bucket = start; state->bucket < PING_HTABLE_SIZE; ++state->bucket) { struct hlist_nulls_node *node; struct hlist_nulls_head *hslot; hslot = &ping_table.hash[state->bucket]; if (hlist_nulls_empty(hslot)) continue; sk_nulls_for_each(sk, node, hslot) { if (net_eq(sock_net(sk), net) && sk->sk_family == state->family) goto found; } } sk = NULL; found: return sk; } static struct sock *ping_get_next(struct seq_file *seq, struct sock *sk) { struct ping_iter_state *state = seq->private; struct net *net = seq_file_net(seq); do { sk = sk_nulls_next(sk); } while (sk && (!net_eq(sock_net(sk), net))); if (!sk) return ping_get_first(seq, state->bucket + 1); return sk; } static struct sock *ping_get_idx(struct seq_file *seq, loff_t pos) { struct sock *sk = ping_get_first(seq, 0); if (sk) while (pos && (sk = ping_get_next(seq, sk)) != NULL) --pos; return pos ? NULL : sk; } void *ping_seq_start(struct seq_file *seq, loff_t *pos, sa_family_t family) { struct ping_iter_state *state = seq->private; state->bucket = 0; state->family = family; read_lock_bh(&ping_table.lock); return *pos ? ping_get_idx(seq, *pos-1) : SEQ_START_TOKEN; } EXPORT_SYMBOL_GPL(ping_seq_start); static void *ping_v4_seq_start(struct seq_file *seq, loff_t *pos) { return ping_seq_start(seq, pos, AF_INET); } void *ping_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct sock *sk; if (v == SEQ_START_TOKEN) sk = ping_get_idx(seq, 0); else sk = ping_get_next(seq, v); ++*pos; return sk; } EXPORT_SYMBOL_GPL(ping_seq_next); void ping_seq_stop(struct seq_file *seq, void *v) { read_unlock_bh(&ping_table.lock); } EXPORT_SYMBOL_GPL(ping_seq_stop); static void ping_v4_format_sock(struct sock *sp, struct seq_file *f, int bucket, int *len) { struct inet_sock *inet = inet_sk(sp); __be32 dest = inet->inet_daddr; __be32 src = inet->inet_rcv_saddr; __u16 destp = ntohs(inet->inet_dport); __u16 srcp = ntohs(inet->inet_sport); seq_printf(f, "%5d: %08X:%04X %08X:%04X" " %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %d%n", bucket, src, srcp, dest, destp, sp->sk_state, sk_wmem_alloc_get(sp), sk_rmem_alloc_get(sp), 0, 0L, 0, from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)), 0, sock_i_ino(sp), atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops), len); } static int ping_v4_seq_show(struct seq_file *seq, void *v) { if (v == SEQ_START_TOKEN) seq_printf(seq, "%-127s\n", " sl local_address rem_address st tx_queue " "rx_queue tr tm->when retrnsmt uid timeout " "inode ref pointer drops"); else { struct ping_iter_state *state = seq->private; int len; ping_v4_format_sock(v, seq, state->bucket, &len); seq_printf(seq, "%*s\n", 127 - len, ""); } return 0; } static const struct seq_operations ping_v4_seq_ops = { .show = ping_v4_seq_show, .start = ping_v4_seq_start, .next = ping_seq_next, .stop = ping_seq_stop, }; static int ping_seq_open(struct inode *inode, struct file *file) { struct ping_seq_afinfo *afinfo = PDE_DATA(inode); return seq_open_net(inode, file, &afinfo->seq_ops, sizeof(struct ping_iter_state)); } const struct file_operations ping_seq_fops = { .open = ping_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_net, }; EXPORT_SYMBOL_GPL(ping_seq_fops); static struct ping_seq_afinfo ping_v4_seq_afinfo = { .name = "icmp", .family = AF_INET, .seq_fops = &ping_seq_fops, .seq_ops = { .start = ping_v4_seq_start, .show = ping_v4_seq_show, .next = ping_seq_next, .stop = ping_seq_stop, }, }; int ping_proc_register(struct net *net, struct ping_seq_afinfo *afinfo) { struct proc_dir_entry *p; p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net, afinfo->seq_fops, afinfo); if (!p) return -ENOMEM; return 0; } EXPORT_SYMBOL_GPL(ping_proc_register); void ping_proc_unregister(struct net *net, struct ping_seq_afinfo *afinfo) { remove_proc_entry(afinfo->name, net->proc_net); } EXPORT_SYMBOL_GPL(ping_proc_unregister); static int __net_init ping_v4_proc_init_net(struct net *net) { return ping_proc_register(net, &ping_v4_seq_afinfo); } static void __net_exit ping_v4_proc_exit_net(struct net *net) { ping_proc_unregister(net, &ping_v4_seq_afinfo); } static struct pernet_operations ping_v4_net_ops = { .init = ping_v4_proc_init_net, .exit = ping_v4_proc_exit_net, }; int __init ping_proc_init(void) { return register_pernet_subsys(&ping_v4_net_ops); } void ping_proc_exit(void) { unregister_pernet_subsys(&ping_v4_net_ops); } #endif void __init ping_init(void) { int i; for (i = 0; i < PING_HTABLE_SIZE; i++) INIT_HLIST_NULLS_HEAD(&ping_table.hash[i], i); rwlock_init(&ping_table.lock); }
int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len, int noblock, int flags, int *addr_len) { struct inet_sock *isk = inet_sk(sk); int family = sk->sk_family; struct sockaddr_in *sin; struct sockaddr_in6 *sin6; struct sk_buff *skb; int copied, err; pr_debug("ping_recvmsg(sk=%p,sk->num=%u)\n", isk, isk->inet_num); err = -EOPNOTSUPP; if (flags & MSG_OOB) goto out; if (addr_len) { if (family == AF_INET) *addr_len = sizeof(*sin); else if (family == AF_INET6 && addr_len) *addr_len = sizeof(*sin6); } if (flags & MSG_ERRQUEUE) { if (family == AF_INET) { return ip_recv_error(sk, msg, len); #if IS_ENABLED(CONFIG_IPV6) } else if (family == AF_INET6) { return pingv6_ops.ipv6_recv_error(sk, msg, len); #endif } } skb = skb_recv_datagram(sk, flags, noblock, &err); if (!skb) goto out; copied = skb->len; if (copied > len) { msg->msg_flags |= MSG_TRUNC; copied = len; } /* Don't bother checking the checksum */ err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); if (err) goto done; sock_recv_timestamp(msg, sk, skb); /* Copy the address and add cmsg data. */ if (family == AF_INET) { sin = (struct sockaddr_in *) msg->msg_name; sin->sin_family = AF_INET; sin->sin_port = 0 /* skb->h.uh->source */; sin->sin_addr.s_addr = ip_hdr(skb)->saddr; memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); if (isk->cmsg_flags) ip_cmsg_recv(msg, skb); #if IS_ENABLED(CONFIG_IPV6) } else if (family == AF_INET6) { struct ipv6_pinfo *np = inet6_sk(sk); struct ipv6hdr *ip6 = ipv6_hdr(skb); sin6 = (struct sockaddr_in6 *) msg->msg_name; sin6->sin6_family = AF_INET6; sin6->sin6_port = 0; sin6->sin6_addr = ip6->saddr; sin6->sin6_flowinfo = 0; if (np->sndflow) sin6->sin6_flowinfo = ip6_flowinfo(ip6); sin6->sin6_scope_id = ipv6_iface_scope_id(&sin6->sin6_addr, IP6CB(skb)->iif); if (inet6_sk(sk)->rxopt.all) pingv6_ops.ip6_datagram_recv_ctl(sk, msg, skb); #endif } else { BUG(); } err = copied; done: skb_free_datagram(sk, skb); out: pr_debug("ping_recvmsg -> %d\n", err); return err; }
int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len, int noblock, int flags, int *addr_len) { struct inet_sock *isk = inet_sk(sk); int family = sk->sk_family; struct sk_buff *skb; int copied, err; pr_debug("ping_recvmsg(sk=%p,sk->num=%u)\n", isk, isk->inet_num); err = -EOPNOTSUPP; if (flags & MSG_OOB) goto out; if (flags & MSG_ERRQUEUE) { if (family == AF_INET) { return ip_recv_error(sk, msg, len); #if IS_ENABLED(CONFIG_IPV6) } else if (family == AF_INET6) { return pingv6_ops.ipv6_recv_error(sk, msg, len); #endif } } skb = skb_recv_datagram(sk, flags, noblock, &err); if (!skb) goto out; copied = skb->len; if (copied > len) { msg->msg_flags |= MSG_TRUNC; copied = len; } /* Don't bother checking the checksum */ err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); if (err) goto done; sock_recv_timestamp(msg, sk, skb); /* Copy the address and add cmsg data. */ if (family == AF_INET) { struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name; sin->sin_family = AF_INET; sin->sin_port = 0 /* skb->h.uh->source */; sin->sin_addr.s_addr = ip_hdr(skb)->saddr; memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); *addr_len = sizeof(*sin); if (isk->cmsg_flags) ip_cmsg_recv(msg, skb); #if IS_ENABLED(CONFIG_IPV6) } else if (family == AF_INET6) { struct ipv6_pinfo *np = inet6_sk(sk); struct ipv6hdr *ip6 = ipv6_hdr(skb); struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)msg->msg_name; sin6->sin6_family = AF_INET6; sin6->sin6_port = 0; sin6->sin6_addr = ip6->saddr; sin6->sin6_flowinfo = 0; if (np->sndflow) sin6->sin6_flowinfo = ip6_flowinfo(ip6); sin6->sin6_scope_id = ipv6_iface_scope_id(&sin6->sin6_addr, IP6CB(skb)->iif); *addr_len = sizeof(*sin6); if (inet6_sk(sk)->rxopt.all) pingv6_ops.ip6_datagram_recv_ctl(sk, msg, skb); #endif } else { BUG(); } err = copied; done: skb_free_datagram(sk, skb); out: pr_debug("ping_recvmsg -> %d\n", err); return err; }
{'added': [(871, '\t\tstruct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name;'), (872, ''), (877, '\t\t*addr_len = sizeof(*sin);'), (886, '\t\tstruct sockaddr_in6 *sin6 ='), (887, '\t\t\t(struct sockaddr_in6 *)msg->msg_name;'), (888, ''), (898, '\t\t*addr_len = sizeof(*sin6);')], 'deleted': [(833, '\tstruct sockaddr_in *sin;'), (834, '\tstruct sockaddr_in6 *sin6;'), (844, '\tif (addr_len) {'), (845, '\t\tif (family == AF_INET)'), (846, '\t\t\t*addr_len = sizeof(*sin);'), (847, '\t\telse if (family == AF_INET6 && addr_len)'), (848, '\t\t\t*addr_len = sizeof(*sin6);'), (849, '\t}'), (850, ''), (880, '\t\tsin = (struct sockaddr_in *) msg->msg_name;'), (893, '\t\tsin6 = (struct sockaddr_in6 *) msg->msg_name;'), (897, '')]}
7
12
910
5,928
https://github.com/torvalds/linux
CVE-2013-7263
['CWE-20']
cipso_ipv4.c
cipso_v4_delopt
/* * CIPSO - Commercial IP Security Option * * This is an implementation of the CIPSO 2.2 protocol as specified in * draft-ietf-cipso-ipsecurity-01.txt with additional tag types as found in * FIPS-188. While CIPSO never became a full IETF RFC standard many vendors * have chosen to adopt the protocol and over the years it has become a * de-facto standard for labeled networking. * * The CIPSO draft specification can be found in the kernel's Documentation * directory as well as the following URL: * http://tools.ietf.org/id/draft-ietf-cipso-ipsecurity-01.txt * The FIPS-188 specification can be found at the following URL: * http://www.itl.nist.gov/fipspubs/fip188.htm * * Author: Paul Moore <paul.moore@hp.com> * */ /* * (c) Copyright Hewlett-Packard Development Company, L.P., 2006, 2008 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/init.h> #include <linux/types.h> #include <linux/rcupdate.h> #include <linux/list.h> #include <linux/spinlock.h> #include <linux/string.h> #include <linux/jhash.h> #include <linux/audit.h> #include <linux/slab.h> #include <net/ip.h> #include <net/icmp.h> #include <net/tcp.h> #include <net/netlabel.h> #include <net/cipso_ipv4.h> #include <asm/atomic.h> #include <asm/bug.h> #include <asm/unaligned.h> /* List of available DOI definitions */ /* XXX - This currently assumes a minimal number of different DOIs in use, * if in practice there are a lot of different DOIs this list should * probably be turned into a hash table or something similar so we * can do quick lookups. */ static DEFINE_SPINLOCK(cipso_v4_doi_list_lock); static LIST_HEAD(cipso_v4_doi_list); /* Label mapping cache */ int cipso_v4_cache_enabled = 1; int cipso_v4_cache_bucketsize = 10; #define CIPSO_V4_CACHE_BUCKETBITS 7 #define CIPSO_V4_CACHE_BUCKETS (1 << CIPSO_V4_CACHE_BUCKETBITS) #define CIPSO_V4_CACHE_REORDERLIMIT 10 struct cipso_v4_map_cache_bkt { spinlock_t lock; u32 size; struct list_head list; }; struct cipso_v4_map_cache_entry { u32 hash; unsigned char *key; size_t key_len; struct netlbl_lsm_cache *lsm_data; u32 activity; struct list_head list; }; static struct cipso_v4_map_cache_bkt *cipso_v4_cache = NULL; /* Restricted bitmap (tag #1) flags */ int cipso_v4_rbm_optfmt = 0; int cipso_v4_rbm_strictvalid = 1; /* * Protocol Constants */ /* Maximum size of the CIPSO IP option, derived from the fact that the maximum * IPv4 header size is 60 bytes and the base IPv4 header is 20 bytes long. */ #define CIPSO_V4_OPT_LEN_MAX 40 /* Length of the base CIPSO option, this includes the option type (1 byte), the * option length (1 byte), and the DOI (4 bytes). */ #define CIPSO_V4_HDR_LEN 6 /* Base length of the restrictive category bitmap tag (tag #1). */ #define CIPSO_V4_TAG_RBM_BLEN 4 /* Base length of the enumerated category tag (tag #2). */ #define CIPSO_V4_TAG_ENUM_BLEN 4 /* Base length of the ranged categories bitmap tag (tag #5). */ #define CIPSO_V4_TAG_RNG_BLEN 4 /* The maximum number of category ranges permitted in the ranged category tag * (tag #5). You may note that the IETF draft states that the maximum number * of category ranges is 7, but if the low end of the last category range is * zero then it is possible to fit 8 category ranges because the zero should * be omitted. */ #define CIPSO_V4_TAG_RNG_CAT_MAX 8 /* Base length of the local tag (non-standard tag). * Tag definition (may change between kernel versions) * * 0 8 16 24 32 * +----------+----------+----------+----------+ * | 10000000 | 00000110 | 32-bit secid value | * +----------+----------+----------+----------+ * | in (host byte order)| * +----------+----------+ * */ #define CIPSO_V4_TAG_LOC_BLEN 6 /* * Helper Functions */ /** * cipso_v4_bitmap_walk - Walk a bitmap looking for a bit * @bitmap: the bitmap * @bitmap_len: length in bits * @offset: starting offset * @state: if non-zero, look for a set (1) bit else look for a cleared (0) bit * * Description: * Starting at @offset, walk the bitmap from left to right until either the * desired bit is found or we reach the end. Return the bit offset, -1 if * not found, or -2 if error. */ static int cipso_v4_bitmap_walk(const unsigned char *bitmap, u32 bitmap_len, u32 offset, u8 state) { u32 bit_spot; u32 byte_offset; unsigned char bitmask; unsigned char byte; /* gcc always rounds to zero when doing integer division */ byte_offset = offset / 8; byte = bitmap[byte_offset]; bit_spot = offset; bitmask = 0x80 >> (offset % 8); while (bit_spot < bitmap_len) { if ((state && (byte & bitmask) == bitmask) || (state == 0 && (byte & bitmask) == 0)) return bit_spot; bit_spot++; bitmask >>= 1; if (bitmask == 0) { byte = bitmap[++byte_offset]; bitmask = 0x80; } } return -1; } /** * cipso_v4_bitmap_setbit - Sets a single bit in a bitmap * @bitmap: the bitmap * @bit: the bit * @state: if non-zero, set the bit (1) else clear the bit (0) * * Description: * Set a single bit in the bitmask. Returns zero on success, negative values * on error. */ static void cipso_v4_bitmap_setbit(unsigned char *bitmap, u32 bit, u8 state) { u32 byte_spot; u8 bitmask; /* gcc always rounds to zero when doing integer division */ byte_spot = bit / 8; bitmask = 0x80 >> (bit % 8); if (state) bitmap[byte_spot] |= bitmask; else bitmap[byte_spot] &= ~bitmask; } /** * cipso_v4_cache_entry_free - Frees a cache entry * @entry: the entry to free * * Description: * This function frees the memory associated with a cache entry including the * LSM cache data if there are no longer any users, i.e. reference count == 0. * */ static void cipso_v4_cache_entry_free(struct cipso_v4_map_cache_entry *entry) { if (entry->lsm_data) netlbl_secattr_cache_free(entry->lsm_data); kfree(entry->key); kfree(entry); } /** * cipso_v4_map_cache_hash - Hashing function for the CIPSO cache * @key: the hash key * @key_len: the length of the key in bytes * * Description: * The CIPSO tag hashing function. Returns a 32-bit hash value. * */ static u32 cipso_v4_map_cache_hash(const unsigned char *key, u32 key_len) { return jhash(key, key_len, 0); } /* * Label Mapping Cache Functions */ /** * cipso_v4_cache_init - Initialize the CIPSO cache * * Description: * Initializes the CIPSO label mapping cache, this function should be called * before any of the other functions defined in this file. Returns zero on * success, negative values on error. * */ static int cipso_v4_cache_init(void) { u32 iter; cipso_v4_cache = kcalloc(CIPSO_V4_CACHE_BUCKETS, sizeof(struct cipso_v4_map_cache_bkt), GFP_KERNEL); if (cipso_v4_cache == NULL) return -ENOMEM; for (iter = 0; iter < CIPSO_V4_CACHE_BUCKETS; iter++) { spin_lock_init(&cipso_v4_cache[iter].lock); cipso_v4_cache[iter].size = 0; INIT_LIST_HEAD(&cipso_v4_cache[iter].list); } return 0; } /** * cipso_v4_cache_invalidate - Invalidates the current CIPSO cache * * Description: * Invalidates and frees any entries in the CIPSO cache. Returns zero on * success and negative values on failure. * */ void cipso_v4_cache_invalidate(void) { struct cipso_v4_map_cache_entry *entry, *tmp_entry; u32 iter; for (iter = 0; iter < CIPSO_V4_CACHE_BUCKETS; iter++) { spin_lock_bh(&cipso_v4_cache[iter].lock); list_for_each_entry_safe(entry, tmp_entry, &cipso_v4_cache[iter].list, list) { list_del(&entry->list); cipso_v4_cache_entry_free(entry); } cipso_v4_cache[iter].size = 0; spin_unlock_bh(&cipso_v4_cache[iter].lock); } } /** * cipso_v4_cache_check - Check the CIPSO cache for a label mapping * @key: the buffer to check * @key_len: buffer length in bytes * @secattr: the security attribute struct to use * * Description: * This function checks the cache to see if a label mapping already exists for * the given key. If there is a match then the cache is adjusted and the * @secattr struct is populated with the correct LSM security attributes. The * cache is adjusted in the following manner if the entry is not already the * first in the cache bucket: * * 1. The cache entry's activity counter is incremented * 2. The previous (higher ranking) entry's activity counter is decremented * 3. If the difference between the two activity counters is geater than * CIPSO_V4_CACHE_REORDERLIMIT the two entries are swapped * * Returns zero on success, -ENOENT for a cache miss, and other negative values * on error. * */ static int cipso_v4_cache_check(const unsigned char *key, u32 key_len, struct netlbl_lsm_secattr *secattr) { u32 bkt; struct cipso_v4_map_cache_entry *entry; struct cipso_v4_map_cache_entry *prev_entry = NULL; u32 hash; if (!cipso_v4_cache_enabled) return -ENOENT; hash = cipso_v4_map_cache_hash(key, key_len); bkt = hash & (CIPSO_V4_CACHE_BUCKETS - 1); spin_lock_bh(&cipso_v4_cache[bkt].lock); list_for_each_entry(entry, &cipso_v4_cache[bkt].list, list) { if (entry->hash == hash && entry->key_len == key_len && memcmp(entry->key, key, key_len) == 0) { entry->activity += 1; atomic_inc(&entry->lsm_data->refcount); secattr->cache = entry->lsm_data; secattr->flags |= NETLBL_SECATTR_CACHE; secattr->type = NETLBL_NLTYPE_CIPSOV4; if (prev_entry == NULL) { spin_unlock_bh(&cipso_v4_cache[bkt].lock); return 0; } if (prev_entry->activity > 0) prev_entry->activity -= 1; if (entry->activity > prev_entry->activity && entry->activity - prev_entry->activity > CIPSO_V4_CACHE_REORDERLIMIT) { __list_del(entry->list.prev, entry->list.next); __list_add(&entry->list, prev_entry->list.prev, &prev_entry->list); } spin_unlock_bh(&cipso_v4_cache[bkt].lock); return 0; } prev_entry = entry; } spin_unlock_bh(&cipso_v4_cache[bkt].lock); return -ENOENT; } /** * cipso_v4_cache_add - Add an entry to the CIPSO cache * @skb: the packet * @secattr: the packet's security attributes * * Description: * Add a new entry into the CIPSO label mapping cache. Add the new entry to * head of the cache bucket's list, if the cache bucket is out of room remove * the last entry in the list first. It is important to note that there is * currently no checking for duplicate keys. Returns zero on success, * negative values on failure. * */ int cipso_v4_cache_add(const struct sk_buff *skb, const struct netlbl_lsm_secattr *secattr) { int ret_val = -EPERM; u32 bkt; struct cipso_v4_map_cache_entry *entry = NULL; struct cipso_v4_map_cache_entry *old_entry = NULL; unsigned char *cipso_ptr; u32 cipso_ptr_len; if (!cipso_v4_cache_enabled || cipso_v4_cache_bucketsize <= 0) return 0; cipso_ptr = CIPSO_V4_OPTPTR(skb); cipso_ptr_len = cipso_ptr[1]; entry = kzalloc(sizeof(*entry), GFP_ATOMIC); if (entry == NULL) return -ENOMEM; entry->key = kmemdup(cipso_ptr, cipso_ptr_len, GFP_ATOMIC); if (entry->key == NULL) { ret_val = -ENOMEM; goto cache_add_failure; } entry->key_len = cipso_ptr_len; entry->hash = cipso_v4_map_cache_hash(cipso_ptr, cipso_ptr_len); atomic_inc(&secattr->cache->refcount); entry->lsm_data = secattr->cache; bkt = entry->hash & (CIPSO_V4_CACHE_BUCKETS - 1); spin_lock_bh(&cipso_v4_cache[bkt].lock); if (cipso_v4_cache[bkt].size < cipso_v4_cache_bucketsize) { list_add(&entry->list, &cipso_v4_cache[bkt].list); cipso_v4_cache[bkt].size += 1; } else { old_entry = list_entry(cipso_v4_cache[bkt].list.prev, struct cipso_v4_map_cache_entry, list); list_del(&old_entry->list); list_add(&entry->list, &cipso_v4_cache[bkt].list); cipso_v4_cache_entry_free(old_entry); } spin_unlock_bh(&cipso_v4_cache[bkt].lock); return 0; cache_add_failure: if (entry) cipso_v4_cache_entry_free(entry); return ret_val; } /* * DOI List Functions */ /** * cipso_v4_doi_search - Searches for a DOI definition * @doi: the DOI to search for * * Description: * Search the DOI definition list for a DOI definition with a DOI value that * matches @doi. The caller is responsible for calling rcu_read_[un]lock(). * Returns a pointer to the DOI definition on success and NULL on failure. */ static struct cipso_v4_doi *cipso_v4_doi_search(u32 doi) { struct cipso_v4_doi *iter; list_for_each_entry_rcu(iter, &cipso_v4_doi_list, list) if (iter->doi == doi && atomic_read(&iter->refcount)) return iter; return NULL; } /** * cipso_v4_doi_add - Add a new DOI to the CIPSO protocol engine * @doi_def: the DOI structure * @audit_info: NetLabel audit information * * Description: * The caller defines a new DOI for use by the CIPSO engine and calls this * function to add it to the list of acceptable domains. The caller must * ensure that the mapping table specified in @doi_def->map meets all of the * requirements of the mapping type (see cipso_ipv4.h for details). Returns * zero on success and non-zero on failure. * */ int cipso_v4_doi_add(struct cipso_v4_doi *doi_def, struct netlbl_audit *audit_info) { int ret_val = -EINVAL; u32 iter; u32 doi; u32 doi_type; struct audit_buffer *audit_buf; doi = doi_def->doi; doi_type = doi_def->type; if (doi_def == NULL || doi_def->doi == CIPSO_V4_DOI_UNKNOWN) goto doi_add_return; for (iter = 0; iter < CIPSO_V4_TAG_MAXCNT; iter++) { switch (doi_def->tags[iter]) { case CIPSO_V4_TAG_RBITMAP: break; case CIPSO_V4_TAG_RANGE: case CIPSO_V4_TAG_ENUM: if (doi_def->type != CIPSO_V4_MAP_PASS) goto doi_add_return; break; case CIPSO_V4_TAG_LOCAL: if (doi_def->type != CIPSO_V4_MAP_LOCAL) goto doi_add_return; break; case CIPSO_V4_TAG_INVALID: if (iter == 0) goto doi_add_return; break; default: goto doi_add_return; } } atomic_set(&doi_def->refcount, 1); spin_lock(&cipso_v4_doi_list_lock); if (cipso_v4_doi_search(doi_def->doi) != NULL) { spin_unlock(&cipso_v4_doi_list_lock); ret_val = -EEXIST; goto doi_add_return; } list_add_tail_rcu(&doi_def->list, &cipso_v4_doi_list); spin_unlock(&cipso_v4_doi_list_lock); ret_val = 0; doi_add_return: audit_buf = netlbl_audit_start(AUDIT_MAC_CIPSOV4_ADD, audit_info); if (audit_buf != NULL) { const char *type_str; switch (doi_type) { case CIPSO_V4_MAP_TRANS: type_str = "trans"; break; case CIPSO_V4_MAP_PASS: type_str = "pass"; break; case CIPSO_V4_MAP_LOCAL: type_str = "local"; break; default: type_str = "(unknown)"; } audit_log_format(audit_buf, " cipso_doi=%u cipso_type=%s res=%u", doi, type_str, ret_val == 0 ? 1 : 0); audit_log_end(audit_buf); } return ret_val; } /** * cipso_v4_doi_free - Frees a DOI definition * @entry: the entry's RCU field * * Description: * This function frees all of the memory associated with a DOI definition. * */ void cipso_v4_doi_free(struct cipso_v4_doi *doi_def) { if (doi_def == NULL) return; switch (doi_def->type) { case CIPSO_V4_MAP_TRANS: kfree(doi_def->map.std->lvl.cipso); kfree(doi_def->map.std->lvl.local); kfree(doi_def->map.std->cat.cipso); kfree(doi_def->map.std->cat.local); break; } kfree(doi_def); } /** * cipso_v4_doi_free_rcu - Frees a DOI definition via the RCU pointer * @entry: the entry's RCU field * * Description: * This function is designed to be used as a callback to the call_rcu() * function so that the memory allocated to the DOI definition can be released * safely. * */ static void cipso_v4_doi_free_rcu(struct rcu_head *entry) { struct cipso_v4_doi *doi_def; doi_def = container_of(entry, struct cipso_v4_doi, rcu); cipso_v4_doi_free(doi_def); } /** * cipso_v4_doi_remove - Remove an existing DOI from the CIPSO protocol engine * @doi: the DOI value * @audit_secid: the LSM secid to use in the audit message * * Description: * Removes a DOI definition from the CIPSO engine. The NetLabel routines will * be called to release their own LSM domain mappings as well as our own * domain list. Returns zero on success and negative values on failure. * */ int cipso_v4_doi_remove(u32 doi, struct netlbl_audit *audit_info) { int ret_val; struct cipso_v4_doi *doi_def; struct audit_buffer *audit_buf; spin_lock(&cipso_v4_doi_list_lock); doi_def = cipso_v4_doi_search(doi); if (doi_def == NULL) { spin_unlock(&cipso_v4_doi_list_lock); ret_val = -ENOENT; goto doi_remove_return; } if (!atomic_dec_and_test(&doi_def->refcount)) { spin_unlock(&cipso_v4_doi_list_lock); ret_val = -EBUSY; goto doi_remove_return; } list_del_rcu(&doi_def->list); spin_unlock(&cipso_v4_doi_list_lock); cipso_v4_cache_invalidate(); call_rcu(&doi_def->rcu, cipso_v4_doi_free_rcu); ret_val = 0; doi_remove_return: audit_buf = netlbl_audit_start(AUDIT_MAC_CIPSOV4_DEL, audit_info); if (audit_buf != NULL) { audit_log_format(audit_buf, " cipso_doi=%u res=%u", doi, ret_val == 0 ? 1 : 0); audit_log_end(audit_buf); } return ret_val; } /** * cipso_v4_doi_getdef - Returns a reference to a valid DOI definition * @doi: the DOI value * * Description: * Searches for a valid DOI definition and if one is found it is returned to * the caller. Otherwise NULL is returned. The caller must ensure that * rcu_read_lock() is held while accessing the returned definition and the DOI * definition reference count is decremented when the caller is done. * */ struct cipso_v4_doi *cipso_v4_doi_getdef(u32 doi) { struct cipso_v4_doi *doi_def; rcu_read_lock(); doi_def = cipso_v4_doi_search(doi); if (doi_def == NULL) goto doi_getdef_return; if (!atomic_inc_not_zero(&doi_def->refcount)) doi_def = NULL; doi_getdef_return: rcu_read_unlock(); return doi_def; } /** * cipso_v4_doi_putdef - Releases a reference for the given DOI definition * @doi_def: the DOI definition * * Description: * Releases a DOI definition reference obtained from cipso_v4_doi_getdef(). * */ void cipso_v4_doi_putdef(struct cipso_v4_doi *doi_def) { if (doi_def == NULL) return; if (!atomic_dec_and_test(&doi_def->refcount)) return; spin_lock(&cipso_v4_doi_list_lock); list_del_rcu(&doi_def->list); spin_unlock(&cipso_v4_doi_list_lock); cipso_v4_cache_invalidate(); call_rcu(&doi_def->rcu, cipso_v4_doi_free_rcu); } /** * cipso_v4_doi_walk - Iterate through the DOI definitions * @skip_cnt: skip past this number of DOI definitions, updated * @callback: callback for each DOI definition * @cb_arg: argument for the callback function * * Description: * Iterate over the DOI definition list, skipping the first @skip_cnt entries. * For each entry call @callback, if @callback returns a negative value stop * 'walking' through the list and return. Updates the value in @skip_cnt upon * return. Returns zero on success, negative values on failure. * */ int cipso_v4_doi_walk(u32 *skip_cnt, int (*callback) (struct cipso_v4_doi *doi_def, void *arg), void *cb_arg) { int ret_val = -ENOENT; u32 doi_cnt = 0; struct cipso_v4_doi *iter_doi; rcu_read_lock(); list_for_each_entry_rcu(iter_doi, &cipso_v4_doi_list, list) if (atomic_read(&iter_doi->refcount) > 0) { if (doi_cnt++ < *skip_cnt) continue; ret_val = callback(iter_doi, cb_arg); if (ret_val < 0) { doi_cnt--; goto doi_walk_return; } } doi_walk_return: rcu_read_unlock(); *skip_cnt = doi_cnt; return ret_val; } /* * Label Mapping Functions */ /** * cipso_v4_map_lvl_valid - Checks to see if the given level is understood * @doi_def: the DOI definition * @level: the level to check * * Description: * Checks the given level against the given DOI definition and returns a * negative value if the level does not have a valid mapping and a zero value * if the level is defined by the DOI. * */ static int cipso_v4_map_lvl_valid(const struct cipso_v4_doi *doi_def, u8 level) { switch (doi_def->type) { case CIPSO_V4_MAP_PASS: return 0; case CIPSO_V4_MAP_TRANS: if (doi_def->map.std->lvl.cipso[level] < CIPSO_V4_INV_LVL) return 0; break; } return -EFAULT; } /** * cipso_v4_map_lvl_hton - Perform a level mapping from the host to the network * @doi_def: the DOI definition * @host_lvl: the host MLS level * @net_lvl: the network/CIPSO MLS level * * Description: * Perform a label mapping to translate a local MLS level to the correct * CIPSO level using the given DOI definition. Returns zero on success, * negative values otherwise. * */ static int cipso_v4_map_lvl_hton(const struct cipso_v4_doi *doi_def, u32 host_lvl, u32 *net_lvl) { switch (doi_def->type) { case CIPSO_V4_MAP_PASS: *net_lvl = host_lvl; return 0; case CIPSO_V4_MAP_TRANS: if (host_lvl < doi_def->map.std->lvl.local_size && doi_def->map.std->lvl.local[host_lvl] < CIPSO_V4_INV_LVL) { *net_lvl = doi_def->map.std->lvl.local[host_lvl]; return 0; } return -EPERM; } return -EINVAL; } /** * cipso_v4_map_lvl_ntoh - Perform a level mapping from the network to the host * @doi_def: the DOI definition * @net_lvl: the network/CIPSO MLS level * @host_lvl: the host MLS level * * Description: * Perform a label mapping to translate a CIPSO level to the correct local MLS * level using the given DOI definition. Returns zero on success, negative * values otherwise. * */ static int cipso_v4_map_lvl_ntoh(const struct cipso_v4_doi *doi_def, u32 net_lvl, u32 *host_lvl) { struct cipso_v4_std_map_tbl *map_tbl; switch (doi_def->type) { case CIPSO_V4_MAP_PASS: *host_lvl = net_lvl; return 0; case CIPSO_V4_MAP_TRANS: map_tbl = doi_def->map.std; if (net_lvl < map_tbl->lvl.cipso_size && map_tbl->lvl.cipso[net_lvl] < CIPSO_V4_INV_LVL) { *host_lvl = doi_def->map.std->lvl.cipso[net_lvl]; return 0; } return -EPERM; } return -EINVAL; } /** * cipso_v4_map_cat_rbm_valid - Checks to see if the category bitmap is valid * @doi_def: the DOI definition * @bitmap: category bitmap * @bitmap_len: bitmap length in bytes * * Description: * Checks the given category bitmap against the given DOI definition and * returns a negative value if any of the categories in the bitmap do not have * a valid mapping and a zero value if all of the categories are valid. * */ static int cipso_v4_map_cat_rbm_valid(const struct cipso_v4_doi *doi_def, const unsigned char *bitmap, u32 bitmap_len) { int cat = -1; u32 bitmap_len_bits = bitmap_len * 8; u32 cipso_cat_size; u32 *cipso_array; switch (doi_def->type) { case CIPSO_V4_MAP_PASS: return 0; case CIPSO_V4_MAP_TRANS: cipso_cat_size = doi_def->map.std->cat.cipso_size; cipso_array = doi_def->map.std->cat.cipso; for (;;) { cat = cipso_v4_bitmap_walk(bitmap, bitmap_len_bits, cat + 1, 1); if (cat < 0) break; if (cat >= cipso_cat_size || cipso_array[cat] >= CIPSO_V4_INV_CAT) return -EFAULT; } if (cat == -1) return 0; break; } return -EFAULT; } /** * cipso_v4_map_cat_rbm_hton - Perform a category mapping from host to network * @doi_def: the DOI definition * @secattr: the security attributes * @net_cat: the zero'd out category bitmap in network/CIPSO format * @net_cat_len: the length of the CIPSO bitmap in bytes * * Description: * Perform a label mapping to translate a local MLS category bitmap to the * correct CIPSO bitmap using the given DOI definition. Returns the minimum * size in bytes of the network bitmap on success, negative values otherwise. * */ static int cipso_v4_map_cat_rbm_hton(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *net_cat, u32 net_cat_len) { int host_spot = -1; u32 net_spot = CIPSO_V4_INV_CAT; u32 net_spot_max = 0; u32 net_clen_bits = net_cat_len * 8; u32 host_cat_size = 0; u32 *host_cat_array = NULL; if (doi_def->type == CIPSO_V4_MAP_TRANS) { host_cat_size = doi_def->map.std->cat.local_size; host_cat_array = doi_def->map.std->cat.local; } for (;;) { host_spot = netlbl_secattr_catmap_walk(secattr->attr.mls.cat, host_spot + 1); if (host_spot < 0) break; switch (doi_def->type) { case CIPSO_V4_MAP_PASS: net_spot = host_spot; break; case CIPSO_V4_MAP_TRANS: if (host_spot >= host_cat_size) return -EPERM; net_spot = host_cat_array[host_spot]; if (net_spot >= CIPSO_V4_INV_CAT) return -EPERM; break; } if (net_spot >= net_clen_bits) return -ENOSPC; cipso_v4_bitmap_setbit(net_cat, net_spot, 1); if (net_spot > net_spot_max) net_spot_max = net_spot; } if (++net_spot_max % 8) return net_spot_max / 8 + 1; return net_spot_max / 8; } /** * cipso_v4_map_cat_rbm_ntoh - Perform a category mapping from network to host * @doi_def: the DOI definition * @net_cat: the category bitmap in network/CIPSO format * @net_cat_len: the length of the CIPSO bitmap in bytes * @secattr: the security attributes * * Description: * Perform a label mapping to translate a CIPSO bitmap to the correct local * MLS category bitmap using the given DOI definition. Returns zero on * success, negative values on failure. * */ static int cipso_v4_map_cat_rbm_ntoh(const struct cipso_v4_doi *doi_def, const unsigned char *net_cat, u32 net_cat_len, struct netlbl_lsm_secattr *secattr) { int ret_val; int net_spot = -1; u32 host_spot = CIPSO_V4_INV_CAT; u32 net_clen_bits = net_cat_len * 8; u32 net_cat_size = 0; u32 *net_cat_array = NULL; if (doi_def->type == CIPSO_V4_MAP_TRANS) { net_cat_size = doi_def->map.std->cat.cipso_size; net_cat_array = doi_def->map.std->cat.cipso; } for (;;) { net_spot = cipso_v4_bitmap_walk(net_cat, net_clen_bits, net_spot + 1, 1); if (net_spot < 0) { if (net_spot == -2) return -EFAULT; return 0; } switch (doi_def->type) { case CIPSO_V4_MAP_PASS: host_spot = net_spot; break; case CIPSO_V4_MAP_TRANS: if (net_spot >= net_cat_size) return -EPERM; host_spot = net_cat_array[net_spot]; if (host_spot >= CIPSO_V4_INV_CAT) return -EPERM; break; } ret_val = netlbl_secattr_catmap_setbit(secattr->attr.mls.cat, host_spot, GFP_ATOMIC); if (ret_val != 0) return ret_val; } return -EINVAL; } /** * cipso_v4_map_cat_enum_valid - Checks to see if the categories are valid * @doi_def: the DOI definition * @enumcat: category list * @enumcat_len: length of the category list in bytes * * Description: * Checks the given categories against the given DOI definition and returns a * negative value if any of the categories do not have a valid mapping and a * zero value if all of the categories are valid. * */ static int cipso_v4_map_cat_enum_valid(const struct cipso_v4_doi *doi_def, const unsigned char *enumcat, u32 enumcat_len) { u16 cat; int cat_prev = -1; u32 iter; if (doi_def->type != CIPSO_V4_MAP_PASS || enumcat_len & 0x01) return -EFAULT; for (iter = 0; iter < enumcat_len; iter += 2) { cat = get_unaligned_be16(&enumcat[iter]); if (cat <= cat_prev) return -EFAULT; cat_prev = cat; } return 0; } /** * cipso_v4_map_cat_enum_hton - Perform a category mapping from host to network * @doi_def: the DOI definition * @secattr: the security attributes * @net_cat: the zero'd out category list in network/CIPSO format * @net_cat_len: the length of the CIPSO category list in bytes * * Description: * Perform a label mapping to translate a local MLS category bitmap to the * correct CIPSO category list using the given DOI definition. Returns the * size in bytes of the network category bitmap on success, negative values * otherwise. * */ static int cipso_v4_map_cat_enum_hton(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *net_cat, u32 net_cat_len) { int cat = -1; u32 cat_iter = 0; for (;;) { cat = netlbl_secattr_catmap_walk(secattr->attr.mls.cat, cat + 1); if (cat < 0) break; if ((cat_iter + 2) > net_cat_len) return -ENOSPC; *((__be16 *)&net_cat[cat_iter]) = htons(cat); cat_iter += 2; } return cat_iter; } /** * cipso_v4_map_cat_enum_ntoh - Perform a category mapping from network to host * @doi_def: the DOI definition * @net_cat: the category list in network/CIPSO format * @net_cat_len: the length of the CIPSO bitmap in bytes * @secattr: the security attributes * * Description: * Perform a label mapping to translate a CIPSO category list to the correct * local MLS category bitmap using the given DOI definition. Returns zero on * success, negative values on failure. * */ static int cipso_v4_map_cat_enum_ntoh(const struct cipso_v4_doi *doi_def, const unsigned char *net_cat, u32 net_cat_len, struct netlbl_lsm_secattr *secattr) { int ret_val; u32 iter; for (iter = 0; iter < net_cat_len; iter += 2) { ret_val = netlbl_secattr_catmap_setbit(secattr->attr.mls.cat, get_unaligned_be16(&net_cat[iter]), GFP_ATOMIC); if (ret_val != 0) return ret_val; } return 0; } /** * cipso_v4_map_cat_rng_valid - Checks to see if the categories are valid * @doi_def: the DOI definition * @rngcat: category list * @rngcat_len: length of the category list in bytes * * Description: * Checks the given categories against the given DOI definition and returns a * negative value if any of the categories do not have a valid mapping and a * zero value if all of the categories are valid. * */ static int cipso_v4_map_cat_rng_valid(const struct cipso_v4_doi *doi_def, const unsigned char *rngcat, u32 rngcat_len) { u16 cat_high; u16 cat_low; u32 cat_prev = CIPSO_V4_MAX_REM_CATS + 1; u32 iter; if (doi_def->type != CIPSO_V4_MAP_PASS || rngcat_len & 0x01) return -EFAULT; for (iter = 0; iter < rngcat_len; iter += 4) { cat_high = get_unaligned_be16(&rngcat[iter]); if ((iter + 4) <= rngcat_len) cat_low = get_unaligned_be16(&rngcat[iter + 2]); else cat_low = 0; if (cat_high > cat_prev) return -EFAULT; cat_prev = cat_low; } return 0; } /** * cipso_v4_map_cat_rng_hton - Perform a category mapping from host to network * @doi_def: the DOI definition * @secattr: the security attributes * @net_cat: the zero'd out category list in network/CIPSO format * @net_cat_len: the length of the CIPSO category list in bytes * * Description: * Perform a label mapping to translate a local MLS category bitmap to the * correct CIPSO category list using the given DOI definition. Returns the * size in bytes of the network category bitmap on success, negative values * otherwise. * */ static int cipso_v4_map_cat_rng_hton(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *net_cat, u32 net_cat_len) { int iter = -1; u16 array[CIPSO_V4_TAG_RNG_CAT_MAX * 2]; u32 array_cnt = 0; u32 cat_size = 0; /* make sure we don't overflow the 'array[]' variable */ if (net_cat_len > (CIPSO_V4_OPT_LEN_MAX - CIPSO_V4_HDR_LEN - CIPSO_V4_TAG_RNG_BLEN)) return -ENOSPC; for (;;) { iter = netlbl_secattr_catmap_walk(secattr->attr.mls.cat, iter + 1); if (iter < 0) break; cat_size += (iter == 0 ? 0 : sizeof(u16)); if (cat_size > net_cat_len) return -ENOSPC; array[array_cnt++] = iter; iter = netlbl_secattr_catmap_walk_rng(secattr->attr.mls.cat, iter); if (iter < 0) return -EFAULT; cat_size += sizeof(u16); if (cat_size > net_cat_len) return -ENOSPC; array[array_cnt++] = iter; } for (iter = 0; array_cnt > 0;) { *((__be16 *)&net_cat[iter]) = htons(array[--array_cnt]); iter += 2; array_cnt--; if (array[array_cnt] != 0) { *((__be16 *)&net_cat[iter]) = htons(array[array_cnt]); iter += 2; } } return cat_size; } /** * cipso_v4_map_cat_rng_ntoh - Perform a category mapping from network to host * @doi_def: the DOI definition * @net_cat: the category list in network/CIPSO format * @net_cat_len: the length of the CIPSO bitmap in bytes * @secattr: the security attributes * * Description: * Perform a label mapping to translate a CIPSO category list to the correct * local MLS category bitmap using the given DOI definition. Returns zero on * success, negative values on failure. * */ static int cipso_v4_map_cat_rng_ntoh(const struct cipso_v4_doi *doi_def, const unsigned char *net_cat, u32 net_cat_len, struct netlbl_lsm_secattr *secattr) { int ret_val; u32 net_iter; u16 cat_low; u16 cat_high; for (net_iter = 0; net_iter < net_cat_len; net_iter += 4) { cat_high = get_unaligned_be16(&net_cat[net_iter]); if ((net_iter + 4) <= net_cat_len) cat_low = get_unaligned_be16(&net_cat[net_iter + 2]); else cat_low = 0; ret_val = netlbl_secattr_catmap_setrng(secattr->attr.mls.cat, cat_low, cat_high, GFP_ATOMIC); if (ret_val != 0) return ret_val; } return 0; } /* * Protocol Handling Functions */ /** * cipso_v4_gentag_hdr - Generate a CIPSO option header * @doi_def: the DOI definition * @len: the total tag length in bytes, not including this header * @buf: the CIPSO option buffer * * Description: * Write a CIPSO header into the beginning of @buffer. * */ static void cipso_v4_gentag_hdr(const struct cipso_v4_doi *doi_def, unsigned char *buf, u32 len) { buf[0] = IPOPT_CIPSO; buf[1] = CIPSO_V4_HDR_LEN + len; *(__be32 *)&buf[2] = htonl(doi_def->doi); } /** * cipso_v4_gentag_rbm - Generate a CIPSO restricted bitmap tag (type #1) * @doi_def: the DOI definition * @secattr: the security attributes * @buffer: the option buffer * @buffer_len: length of buffer in bytes * * Description: * Generate a CIPSO option using the restricted bitmap tag, tag type #1. The * actual buffer length may be larger than the indicated size due to * translation between host and network category bitmaps. Returns the size of * the tag on success, negative values on failure. * */ static int cipso_v4_gentag_rbm(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *buffer, u32 buffer_len) { int ret_val; u32 tag_len; u32 level; if ((secattr->flags & NETLBL_SECATTR_MLS_LVL) == 0) return -EPERM; ret_val = cipso_v4_map_lvl_hton(doi_def, secattr->attr.mls.lvl, &level); if (ret_val != 0) return ret_val; if (secattr->flags & NETLBL_SECATTR_MLS_CAT) { ret_val = cipso_v4_map_cat_rbm_hton(doi_def, secattr, &buffer[4], buffer_len - 4); if (ret_val < 0) return ret_val; /* This will send packets using the "optimized" format when * possible as specified in section 3.4.2.6 of the * CIPSO draft. */ if (cipso_v4_rbm_optfmt && ret_val > 0 && ret_val <= 10) tag_len = 14; else tag_len = 4 + ret_val; } else tag_len = 4; buffer[0] = CIPSO_V4_TAG_RBITMAP; buffer[1] = tag_len; buffer[3] = level; return tag_len; } /** * cipso_v4_parsetag_rbm - Parse a CIPSO restricted bitmap tag * @doi_def: the DOI definition * @tag: the CIPSO tag * @secattr: the security attributes * * Description: * Parse a CIPSO restricted bitmap tag (tag type #1) and return the security * attributes in @secattr. Return zero on success, negatives values on * failure. * */ static int cipso_v4_parsetag_rbm(const struct cipso_v4_doi *doi_def, const unsigned char *tag, struct netlbl_lsm_secattr *secattr) { int ret_val; u8 tag_len = tag[1]; u32 level; ret_val = cipso_v4_map_lvl_ntoh(doi_def, tag[3], &level); if (ret_val != 0) return ret_val; secattr->attr.mls.lvl = level; secattr->flags |= NETLBL_SECATTR_MLS_LVL; if (tag_len > 4) { secattr->attr.mls.cat = netlbl_secattr_catmap_alloc(GFP_ATOMIC); if (secattr->attr.mls.cat == NULL) return -ENOMEM; ret_val = cipso_v4_map_cat_rbm_ntoh(doi_def, &tag[4], tag_len - 4, secattr); if (ret_val != 0) { netlbl_secattr_catmap_free(secattr->attr.mls.cat); return ret_val; } secattr->flags |= NETLBL_SECATTR_MLS_CAT; } return 0; } /** * cipso_v4_gentag_enum - Generate a CIPSO enumerated tag (type #2) * @doi_def: the DOI definition * @secattr: the security attributes * @buffer: the option buffer * @buffer_len: length of buffer in bytes * * Description: * Generate a CIPSO option using the enumerated tag, tag type #2. Returns the * size of the tag on success, negative values on failure. * */ static int cipso_v4_gentag_enum(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *buffer, u32 buffer_len) { int ret_val; u32 tag_len; u32 level; if (!(secattr->flags & NETLBL_SECATTR_MLS_LVL)) return -EPERM; ret_val = cipso_v4_map_lvl_hton(doi_def, secattr->attr.mls.lvl, &level); if (ret_val != 0) return ret_val; if (secattr->flags & NETLBL_SECATTR_MLS_CAT) { ret_val = cipso_v4_map_cat_enum_hton(doi_def, secattr, &buffer[4], buffer_len - 4); if (ret_val < 0) return ret_val; tag_len = 4 + ret_val; } else tag_len = 4; buffer[0] = CIPSO_V4_TAG_ENUM; buffer[1] = tag_len; buffer[3] = level; return tag_len; } /** * cipso_v4_parsetag_enum - Parse a CIPSO enumerated tag * @doi_def: the DOI definition * @tag: the CIPSO tag * @secattr: the security attributes * * Description: * Parse a CIPSO enumerated tag (tag type #2) and return the security * attributes in @secattr. Return zero on success, negatives values on * failure. * */ static int cipso_v4_parsetag_enum(const struct cipso_v4_doi *doi_def, const unsigned char *tag, struct netlbl_lsm_secattr *secattr) { int ret_val; u8 tag_len = tag[1]; u32 level; ret_val = cipso_v4_map_lvl_ntoh(doi_def, tag[3], &level); if (ret_val != 0) return ret_val; secattr->attr.mls.lvl = level; secattr->flags |= NETLBL_SECATTR_MLS_LVL; if (tag_len > 4) { secattr->attr.mls.cat = netlbl_secattr_catmap_alloc(GFP_ATOMIC); if (secattr->attr.mls.cat == NULL) return -ENOMEM; ret_val = cipso_v4_map_cat_enum_ntoh(doi_def, &tag[4], tag_len - 4, secattr); if (ret_val != 0) { netlbl_secattr_catmap_free(secattr->attr.mls.cat); return ret_val; } secattr->flags |= NETLBL_SECATTR_MLS_CAT; } return 0; } /** * cipso_v4_gentag_rng - Generate a CIPSO ranged tag (type #5) * @doi_def: the DOI definition * @secattr: the security attributes * @buffer: the option buffer * @buffer_len: length of buffer in bytes * * Description: * Generate a CIPSO option using the ranged tag, tag type #5. Returns the * size of the tag on success, negative values on failure. * */ static int cipso_v4_gentag_rng(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *buffer, u32 buffer_len) { int ret_val; u32 tag_len; u32 level; if (!(secattr->flags & NETLBL_SECATTR_MLS_LVL)) return -EPERM; ret_val = cipso_v4_map_lvl_hton(doi_def, secattr->attr.mls.lvl, &level); if (ret_val != 0) return ret_val; if (secattr->flags & NETLBL_SECATTR_MLS_CAT) { ret_val = cipso_v4_map_cat_rng_hton(doi_def, secattr, &buffer[4], buffer_len - 4); if (ret_val < 0) return ret_val; tag_len = 4 + ret_val; } else tag_len = 4; buffer[0] = CIPSO_V4_TAG_RANGE; buffer[1] = tag_len; buffer[3] = level; return tag_len; } /** * cipso_v4_parsetag_rng - Parse a CIPSO ranged tag * @doi_def: the DOI definition * @tag: the CIPSO tag * @secattr: the security attributes * * Description: * Parse a CIPSO ranged tag (tag type #5) and return the security attributes * in @secattr. Return zero on success, negatives values on failure. * */ static int cipso_v4_parsetag_rng(const struct cipso_v4_doi *doi_def, const unsigned char *tag, struct netlbl_lsm_secattr *secattr) { int ret_val; u8 tag_len = tag[1]; u32 level; ret_val = cipso_v4_map_lvl_ntoh(doi_def, tag[3], &level); if (ret_val != 0) return ret_val; secattr->attr.mls.lvl = level; secattr->flags |= NETLBL_SECATTR_MLS_LVL; if (tag_len > 4) { secattr->attr.mls.cat = netlbl_secattr_catmap_alloc(GFP_ATOMIC); if (secattr->attr.mls.cat == NULL) return -ENOMEM; ret_val = cipso_v4_map_cat_rng_ntoh(doi_def, &tag[4], tag_len - 4, secattr); if (ret_val != 0) { netlbl_secattr_catmap_free(secattr->attr.mls.cat); return ret_val; } secattr->flags |= NETLBL_SECATTR_MLS_CAT; } return 0; } /** * cipso_v4_gentag_loc - Generate a CIPSO local tag (non-standard) * @doi_def: the DOI definition * @secattr: the security attributes * @buffer: the option buffer * @buffer_len: length of buffer in bytes * * Description: * Generate a CIPSO option using the local tag. Returns the size of the tag * on success, negative values on failure. * */ static int cipso_v4_gentag_loc(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *buffer, u32 buffer_len) { if (!(secattr->flags & NETLBL_SECATTR_SECID)) return -EPERM; buffer[0] = CIPSO_V4_TAG_LOCAL; buffer[1] = CIPSO_V4_TAG_LOC_BLEN; *(u32 *)&buffer[2] = secattr->attr.secid; return CIPSO_V4_TAG_LOC_BLEN; } /** * cipso_v4_parsetag_loc - Parse a CIPSO local tag * @doi_def: the DOI definition * @tag: the CIPSO tag * @secattr: the security attributes * * Description: * Parse a CIPSO local tag and return the security attributes in @secattr. * Return zero on success, negatives values on failure. * */ static int cipso_v4_parsetag_loc(const struct cipso_v4_doi *doi_def, const unsigned char *tag, struct netlbl_lsm_secattr *secattr) { secattr->attr.secid = *(u32 *)&tag[2]; secattr->flags |= NETLBL_SECATTR_SECID; return 0; } /** * cipso_v4_validate - Validate a CIPSO option * @option: the start of the option, on error it is set to point to the error * * Description: * This routine is called to validate a CIPSO option, it checks all of the * fields to ensure that they are at least valid, see the draft snippet below * for details. If the option is valid then a zero value is returned and * the value of @option is unchanged. If the option is invalid then a * non-zero value is returned and @option is adjusted to point to the * offending portion of the option. From the IETF draft ... * * "If any field within the CIPSO options, such as the DOI identifier, is not * recognized the IP datagram is discarded and an ICMP 'parameter problem' * (type 12) is generated and returned. The ICMP code field is set to 'bad * parameter' (code 0) and the pointer is set to the start of the CIPSO field * that is unrecognized." * */ int cipso_v4_validate(const struct sk_buff *skb, unsigned char **option) { unsigned char *opt = *option; unsigned char *tag; unsigned char opt_iter; unsigned char err_offset = 0; u8 opt_len; u8 tag_len; struct cipso_v4_doi *doi_def = NULL; u32 tag_iter; /* caller already checks for length values that are too large */ opt_len = opt[1]; if (opt_len < 8) { err_offset = 1; goto validate_return; } rcu_read_lock(); doi_def = cipso_v4_doi_search(get_unaligned_be32(&opt[2])); if (doi_def == NULL) { err_offset = 2; goto validate_return_locked; } opt_iter = CIPSO_V4_HDR_LEN; tag = opt + opt_iter; while (opt_iter < opt_len) { for (tag_iter = 0; doi_def->tags[tag_iter] != tag[0];) if (doi_def->tags[tag_iter] == CIPSO_V4_TAG_INVALID || ++tag_iter == CIPSO_V4_TAG_MAXCNT) { err_offset = opt_iter; goto validate_return_locked; } tag_len = tag[1]; if (tag_len > (opt_len - opt_iter)) { err_offset = opt_iter + 1; goto validate_return_locked; } switch (tag[0]) { case CIPSO_V4_TAG_RBITMAP: if (tag_len < CIPSO_V4_TAG_RBM_BLEN) { err_offset = opt_iter + 1; goto validate_return_locked; } /* We are already going to do all the verification * necessary at the socket layer so from our point of * view it is safe to turn these checks off (and less * work), however, the CIPSO draft says we should do * all the CIPSO validations here but it doesn't * really specify _exactly_ what we need to validate * ... so, just make it a sysctl tunable. */ if (cipso_v4_rbm_strictvalid) { if (cipso_v4_map_lvl_valid(doi_def, tag[3]) < 0) { err_offset = opt_iter + 3; goto validate_return_locked; } if (tag_len > CIPSO_V4_TAG_RBM_BLEN && cipso_v4_map_cat_rbm_valid(doi_def, &tag[4], tag_len - 4) < 0) { err_offset = opt_iter + 4; goto validate_return_locked; } } break; case CIPSO_V4_TAG_ENUM: if (tag_len < CIPSO_V4_TAG_ENUM_BLEN) { err_offset = opt_iter + 1; goto validate_return_locked; } if (cipso_v4_map_lvl_valid(doi_def, tag[3]) < 0) { err_offset = opt_iter + 3; goto validate_return_locked; } if (tag_len > CIPSO_V4_TAG_ENUM_BLEN && cipso_v4_map_cat_enum_valid(doi_def, &tag[4], tag_len - 4) < 0) { err_offset = opt_iter + 4; goto validate_return_locked; } break; case CIPSO_V4_TAG_RANGE: if (tag_len < CIPSO_V4_TAG_RNG_BLEN) { err_offset = opt_iter + 1; goto validate_return_locked; } if (cipso_v4_map_lvl_valid(doi_def, tag[3]) < 0) { err_offset = opt_iter + 3; goto validate_return_locked; } if (tag_len > CIPSO_V4_TAG_RNG_BLEN && cipso_v4_map_cat_rng_valid(doi_def, &tag[4], tag_len - 4) < 0) { err_offset = opt_iter + 4; goto validate_return_locked; } break; case CIPSO_V4_TAG_LOCAL: /* This is a non-standard tag that we only allow for * local connections, so if the incoming interface is * not the loopback device drop the packet. */ if (!(skb->dev->flags & IFF_LOOPBACK)) { err_offset = opt_iter; goto validate_return_locked; } if (tag_len != CIPSO_V4_TAG_LOC_BLEN) { err_offset = opt_iter + 1; goto validate_return_locked; } break; default: err_offset = opt_iter; goto validate_return_locked; } tag += tag_len; opt_iter += tag_len; } validate_return_locked: rcu_read_unlock(); validate_return: *option = opt + err_offset; return err_offset; } /** * cipso_v4_error - Send the correct response for a bad packet * @skb: the packet * @error: the error code * @gateway: CIPSO gateway flag * * Description: * Based on the error code given in @error, send an ICMP error message back to * the originating host. From the IETF draft ... * * "If the contents of the CIPSO [option] are valid but the security label is * outside of the configured host or port label range, the datagram is * discarded and an ICMP 'destination unreachable' (type 3) is generated and * returned. The code field of the ICMP is set to 'communication with * destination network administratively prohibited' (code 9) or to * 'communication with destination host administratively prohibited' * (code 10). The value of the code is dependent on whether the originator * of the ICMP message is acting as a CIPSO host or a CIPSO gateway. The * recipient of the ICMP message MUST be able to handle either value. The * same procedure is performed if a CIPSO [option] can not be added to an * IP packet because it is too large to fit in the IP options area." * * "If the error is triggered by receipt of an ICMP message, the message is * discarded and no response is permitted (consistent with general ICMP * processing rules)." * */ void cipso_v4_error(struct sk_buff *skb, int error, u32 gateway) { if (ip_hdr(skb)->protocol == IPPROTO_ICMP || error != -EACCES) return; if (gateway) icmp_send(skb, ICMP_DEST_UNREACH, ICMP_NET_ANO, 0); else icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_ANO, 0); } /** * cipso_v4_genopt - Generate a CIPSO option * @buf: the option buffer * @buf_len: the size of opt_buf * @doi_def: the CIPSO DOI to use * @secattr: the security attributes * * Description: * Generate a CIPSO option using the DOI definition and security attributes * passed to the function. Returns the length of the option on success and * negative values on failure. * */ static int cipso_v4_genopt(unsigned char *buf, u32 buf_len, const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr) { int ret_val; u32 iter; if (buf_len <= CIPSO_V4_HDR_LEN) return -ENOSPC; /* XXX - This code assumes only one tag per CIPSO option which isn't * really a good assumption to make but since we only support the MAC * tags right now it is a safe assumption. */ iter = 0; do { memset(buf, 0, buf_len); switch (doi_def->tags[iter]) { case CIPSO_V4_TAG_RBITMAP: ret_val = cipso_v4_gentag_rbm(doi_def, secattr, &buf[CIPSO_V4_HDR_LEN], buf_len - CIPSO_V4_HDR_LEN); break; case CIPSO_V4_TAG_ENUM: ret_val = cipso_v4_gentag_enum(doi_def, secattr, &buf[CIPSO_V4_HDR_LEN], buf_len - CIPSO_V4_HDR_LEN); break; case CIPSO_V4_TAG_RANGE: ret_val = cipso_v4_gentag_rng(doi_def, secattr, &buf[CIPSO_V4_HDR_LEN], buf_len - CIPSO_V4_HDR_LEN); break; case CIPSO_V4_TAG_LOCAL: ret_val = cipso_v4_gentag_loc(doi_def, secattr, &buf[CIPSO_V4_HDR_LEN], buf_len - CIPSO_V4_HDR_LEN); break; default: return -EPERM; } iter++; } while (ret_val < 0 && iter < CIPSO_V4_TAG_MAXCNT && doi_def->tags[iter] != CIPSO_V4_TAG_INVALID); if (ret_val < 0) return ret_val; cipso_v4_gentag_hdr(doi_def, buf, ret_val); return CIPSO_V4_HDR_LEN + ret_val; } /** * cipso_v4_sock_setattr - Add a CIPSO option to a socket * @sk: the socket * @doi_def: the CIPSO DOI to use * @secattr: the specific security attributes of the socket * * Description: * Set the CIPSO option on the given socket using the DOI definition and * security attributes passed to the function. This function requires * exclusive access to @sk, which means it either needs to be in the * process of being created or locked. Returns zero on success and negative * values on failure. * */ int cipso_v4_sock_setattr(struct sock *sk, const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr) { int ret_val = -EPERM; unsigned char *buf = NULL; u32 buf_len; u32 opt_len; struct ip_options *opt = NULL; struct inet_sock *sk_inet; struct inet_connection_sock *sk_conn; /* In the case of sock_create_lite(), the sock->sk field is not * defined yet but it is not a problem as the only users of these * "lite" PF_INET sockets are functions which do an accept() call * afterwards so we will label the socket as part of the accept(). */ if (sk == NULL) return 0; /* We allocate the maximum CIPSO option size here so we are probably * being a little wasteful, but it makes our life _much_ easier later * on and after all we are only talking about 40 bytes. */ buf_len = CIPSO_V4_OPT_LEN_MAX; buf = kmalloc(buf_len, GFP_ATOMIC); if (buf == NULL) { ret_val = -ENOMEM; goto socket_setattr_failure; } ret_val = cipso_v4_genopt(buf, buf_len, doi_def, secattr); if (ret_val < 0) goto socket_setattr_failure; buf_len = ret_val; /* We can't use ip_options_get() directly because it makes a call to * ip_options_get_alloc() which allocates memory with GFP_KERNEL and * we won't always have CAP_NET_RAW even though we _always_ want to * set the IPOPT_CIPSO option. */ opt_len = (buf_len + 3) & ~3; opt = kzalloc(sizeof(*opt) + opt_len, GFP_ATOMIC); if (opt == NULL) { ret_val = -ENOMEM; goto socket_setattr_failure; } memcpy(opt->__data, buf, buf_len); opt->optlen = opt_len; opt->cipso = sizeof(struct iphdr); kfree(buf); buf = NULL; sk_inet = inet_sk(sk); if (sk_inet->is_icsk) { sk_conn = inet_csk(sk); if (sk_inet->opt) sk_conn->icsk_ext_hdr_len -= sk_inet->opt->optlen; sk_conn->icsk_ext_hdr_len += opt->optlen; sk_conn->icsk_sync_mss(sk, sk_conn->icsk_pmtu_cookie); } opt = xchg(&sk_inet->opt, opt); kfree(opt); return 0; socket_setattr_failure: kfree(buf); kfree(opt); return ret_val; } /** * cipso_v4_req_setattr - Add a CIPSO option to a connection request socket * @req: the connection request socket * @doi_def: the CIPSO DOI to use * @secattr: the specific security attributes of the socket * * Description: * Set the CIPSO option on the given socket using the DOI definition and * security attributes passed to the function. Returns zero on success and * negative values on failure. * */ int cipso_v4_req_setattr(struct request_sock *req, const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr) { int ret_val = -EPERM; unsigned char *buf = NULL; u32 buf_len; u32 opt_len; struct ip_options *opt = NULL; struct inet_request_sock *req_inet; /* We allocate the maximum CIPSO option size here so we are probably * being a little wasteful, but it makes our life _much_ easier later * on and after all we are only talking about 40 bytes. */ buf_len = CIPSO_V4_OPT_LEN_MAX; buf = kmalloc(buf_len, GFP_ATOMIC); if (buf == NULL) { ret_val = -ENOMEM; goto req_setattr_failure; } ret_val = cipso_v4_genopt(buf, buf_len, doi_def, secattr); if (ret_val < 0) goto req_setattr_failure; buf_len = ret_val; /* We can't use ip_options_get() directly because it makes a call to * ip_options_get_alloc() which allocates memory with GFP_KERNEL and * we won't always have CAP_NET_RAW even though we _always_ want to * set the IPOPT_CIPSO option. */ opt_len = (buf_len + 3) & ~3; opt = kzalloc(sizeof(*opt) + opt_len, GFP_ATOMIC); if (opt == NULL) { ret_val = -ENOMEM; goto req_setattr_failure; } memcpy(opt->__data, buf, buf_len); opt->optlen = opt_len; opt->cipso = sizeof(struct iphdr); kfree(buf); buf = NULL; req_inet = inet_rsk(req); opt = xchg(&req_inet->opt, opt); kfree(opt); return 0; req_setattr_failure: kfree(buf); kfree(opt); return ret_val; } /** * cipso_v4_delopt - Delete the CIPSO option from a set of IP options * @opt_ptr: IP option pointer * * Description: * Deletes the CIPSO IP option from a set of IP options and makes the necessary * adjustments to the IP option structure. Returns zero on success, negative * values on failure. * */ static int cipso_v4_delopt(struct ip_options **opt_ptr) { int hdr_delta = 0; struct ip_options *opt = *opt_ptr; if (opt->srr || opt->rr || opt->ts || opt->router_alert) { u8 cipso_len; u8 cipso_off; unsigned char *cipso_ptr; int iter; int optlen_new; cipso_off = opt->cipso - sizeof(struct iphdr); cipso_ptr = &opt->__data[cipso_off]; cipso_len = cipso_ptr[1]; if (opt->srr > opt->cipso) opt->srr -= cipso_len; if (opt->rr > opt->cipso) opt->rr -= cipso_len; if (opt->ts > opt->cipso) opt->ts -= cipso_len; if (opt->router_alert > opt->cipso) opt->router_alert -= cipso_len; opt->cipso = 0; memmove(cipso_ptr, cipso_ptr + cipso_len, opt->optlen - cipso_off - cipso_len); /* determining the new total option length is tricky because of * the padding necessary, the only thing i can think to do at * this point is walk the options one-by-one, skipping the * padding at the end to determine the actual option size and * from there we can determine the new total option length */ iter = 0; optlen_new = 0; while (iter < opt->optlen) if (opt->__data[iter] != IPOPT_NOP) { iter += opt->__data[iter + 1]; optlen_new = iter; } else iter++; hdr_delta = opt->optlen; opt->optlen = (optlen_new + 3) & ~3; hdr_delta -= opt->optlen; } else { /* only the cipso option was present on the socket so we can * remove the entire option struct */ *opt_ptr = NULL; hdr_delta = opt->optlen; kfree(opt); } return hdr_delta; } /** * cipso_v4_sock_delattr - Delete the CIPSO option from a socket * @sk: the socket * * Description: * Removes the CIPSO option from a socket, if present. * */ void cipso_v4_sock_delattr(struct sock *sk) { int hdr_delta; struct ip_options *opt; struct inet_sock *sk_inet; sk_inet = inet_sk(sk); opt = sk_inet->opt; if (opt == NULL || opt->cipso == 0) return; hdr_delta = cipso_v4_delopt(&sk_inet->opt); if (sk_inet->is_icsk && hdr_delta > 0) { struct inet_connection_sock *sk_conn = inet_csk(sk); sk_conn->icsk_ext_hdr_len -= hdr_delta; sk_conn->icsk_sync_mss(sk, sk_conn->icsk_pmtu_cookie); } } /** * cipso_v4_req_delattr - Delete the CIPSO option from a request socket * @reg: the request socket * * Description: * Removes the CIPSO option from a request socket, if present. * */ void cipso_v4_req_delattr(struct request_sock *req) { struct ip_options *opt; struct inet_request_sock *req_inet; req_inet = inet_rsk(req); opt = req_inet->opt; if (opt == NULL || opt->cipso == 0) return; cipso_v4_delopt(&req_inet->opt); } /** * cipso_v4_getattr - Helper function for the cipso_v4_*_getattr functions * @cipso: the CIPSO v4 option * @secattr: the security attributes * * Description: * Inspect @cipso and return the security attributes in @secattr. Returns zero * on success and negative values on failure. * */ static int cipso_v4_getattr(const unsigned char *cipso, struct netlbl_lsm_secattr *secattr) { int ret_val = -ENOMSG; u32 doi; struct cipso_v4_doi *doi_def; if (cipso_v4_cache_check(cipso, cipso[1], secattr) == 0) return 0; doi = get_unaligned_be32(&cipso[2]); rcu_read_lock(); doi_def = cipso_v4_doi_search(doi); if (doi_def == NULL) goto getattr_return; /* XXX - This code assumes only one tag per CIPSO option which isn't * really a good assumption to make but since we only support the MAC * tags right now it is a safe assumption. */ switch (cipso[6]) { case CIPSO_V4_TAG_RBITMAP: ret_val = cipso_v4_parsetag_rbm(doi_def, &cipso[6], secattr); break; case CIPSO_V4_TAG_ENUM: ret_val = cipso_v4_parsetag_enum(doi_def, &cipso[6], secattr); break; case CIPSO_V4_TAG_RANGE: ret_val = cipso_v4_parsetag_rng(doi_def, &cipso[6], secattr); break; case CIPSO_V4_TAG_LOCAL: ret_val = cipso_v4_parsetag_loc(doi_def, &cipso[6], secattr); break; } if (ret_val == 0) secattr->type = NETLBL_NLTYPE_CIPSOV4; getattr_return: rcu_read_unlock(); return ret_val; } /** * cipso_v4_sock_getattr - Get the security attributes from a sock * @sk: the sock * @secattr: the security attributes * * Description: * Query @sk to see if there is a CIPSO option attached to the sock and if * there is return the CIPSO security attributes in @secattr. This function * requires that @sk be locked, or privately held, but it does not do any * locking itself. Returns zero on success and negative values on failure. * */ int cipso_v4_sock_getattr(struct sock *sk, struct netlbl_lsm_secattr *secattr) { struct ip_options *opt; opt = inet_sk(sk)->opt; if (opt == NULL || opt->cipso == 0) return -ENOMSG; return cipso_v4_getattr(opt->__data + opt->cipso - sizeof(struct iphdr), secattr); } /** * cipso_v4_skbuff_setattr - Set the CIPSO option on a packet * @skb: the packet * @secattr: the security attributes * * Description: * Set the CIPSO option on the given packet based on the security attributes. * Returns a pointer to the IP header on success and NULL on failure. * */ int cipso_v4_skbuff_setattr(struct sk_buff *skb, const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr) { int ret_val; struct iphdr *iph; struct ip_options *opt = &IPCB(skb)->opt; unsigned char buf[CIPSO_V4_OPT_LEN_MAX]; u32 buf_len = CIPSO_V4_OPT_LEN_MAX; u32 opt_len; int len_delta; ret_val = cipso_v4_genopt(buf, buf_len, doi_def, secattr); if (ret_val < 0) return ret_val; buf_len = ret_val; opt_len = (buf_len + 3) & ~3; /* we overwrite any existing options to ensure that we have enough * room for the CIPSO option, the reason is that we _need_ to guarantee * that the security label is applied to the packet - we do the same * thing when using the socket options and it hasn't caused a problem, * if we need to we can always revisit this choice later */ len_delta = opt_len - opt->optlen; /* if we don't ensure enough headroom we could panic on the skb_push() * call below so make sure we have enough, we are also "mangling" the * packet so we should probably do a copy-on-write call anyway */ ret_val = skb_cow(skb, skb_headroom(skb) + len_delta); if (ret_val < 0) return ret_val; if (len_delta > 0) { /* we assume that the header + opt->optlen have already been * "pushed" in ip_options_build() or similar */ iph = ip_hdr(skb); skb_push(skb, len_delta); memmove((char *)iph - len_delta, iph, iph->ihl << 2); skb_reset_network_header(skb); iph = ip_hdr(skb); } else if (len_delta < 0) { iph = ip_hdr(skb); memset(iph + 1, IPOPT_NOP, opt->optlen); } else iph = ip_hdr(skb); if (opt->optlen > 0) memset(opt, 0, sizeof(*opt)); opt->optlen = opt_len; opt->cipso = sizeof(struct iphdr); opt->is_changed = 1; /* we have to do the following because we are being called from a * netfilter hook which means the packet already has had the header * fields populated and the checksum calculated - yes this means we * are doing more work than needed but we do it to keep the core * stack clean and tidy */ memcpy(iph + 1, buf, buf_len); if (opt_len > buf_len) memset((char *)(iph + 1) + buf_len, 0, opt_len - buf_len); if (len_delta != 0) { iph->ihl = 5 + (opt_len >> 2); iph->tot_len = htons(skb->len); } ip_send_check(iph); return 0; } /** * cipso_v4_skbuff_delattr - Delete any CIPSO options from a packet * @skb: the packet * * Description: * Removes any and all CIPSO options from the given packet. Returns zero on * success, negative values on failure. * */ int cipso_v4_skbuff_delattr(struct sk_buff *skb) { int ret_val; struct iphdr *iph; struct ip_options *opt = &IPCB(skb)->opt; unsigned char *cipso_ptr; if (opt->cipso == 0) return 0; /* since we are changing the packet we should make a copy */ ret_val = skb_cow(skb, skb_headroom(skb)); if (ret_val < 0) return ret_val; /* the easiest thing to do is just replace the cipso option with noop * options since we don't change the size of the packet, although we * still need to recalculate the checksum */ iph = ip_hdr(skb); cipso_ptr = (unsigned char *)iph + opt->cipso; memset(cipso_ptr, IPOPT_NOOP, cipso_ptr[1]); opt->cipso = 0; opt->is_changed = 1; ip_send_check(iph); return 0; } /** * cipso_v4_skbuff_getattr - Get the security attributes from the CIPSO option * @skb: the packet * @secattr: the security attributes * * Description: * Parse the given packet's CIPSO option and return the security attributes. * Returns zero on success and negative values on failure. * */ int cipso_v4_skbuff_getattr(const struct sk_buff *skb, struct netlbl_lsm_secattr *secattr) { return cipso_v4_getattr(CIPSO_V4_OPTPTR(skb), secattr); } /* * Setup Functions */ /** * cipso_v4_init - Initialize the CIPSO module * * Description: * Initialize the CIPSO module and prepare it for use. Returns zero on success * and negative values on failure. * */ static int __init cipso_v4_init(void) { int ret_val; ret_val = cipso_v4_cache_init(); if (ret_val != 0) panic("Failed to initialize the CIPSO/IPv4 cache (%d)\n", ret_val); return 0; } subsys_initcall(cipso_v4_init);
/* * CIPSO - Commercial IP Security Option * * This is an implementation of the CIPSO 2.2 protocol as specified in * draft-ietf-cipso-ipsecurity-01.txt with additional tag types as found in * FIPS-188. While CIPSO never became a full IETF RFC standard many vendors * have chosen to adopt the protocol and over the years it has become a * de-facto standard for labeled networking. * * The CIPSO draft specification can be found in the kernel's Documentation * directory as well as the following URL: * http://tools.ietf.org/id/draft-ietf-cipso-ipsecurity-01.txt * The FIPS-188 specification can be found at the following URL: * http://www.itl.nist.gov/fipspubs/fip188.htm * * Author: Paul Moore <paul.moore@hp.com> * */ /* * (c) Copyright Hewlett-Packard Development Company, L.P., 2006, 2008 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/init.h> #include <linux/types.h> #include <linux/rcupdate.h> #include <linux/list.h> #include <linux/spinlock.h> #include <linux/string.h> #include <linux/jhash.h> #include <linux/audit.h> #include <linux/slab.h> #include <net/ip.h> #include <net/icmp.h> #include <net/tcp.h> #include <net/netlabel.h> #include <net/cipso_ipv4.h> #include <asm/atomic.h> #include <asm/bug.h> #include <asm/unaligned.h> /* List of available DOI definitions */ /* XXX - This currently assumes a minimal number of different DOIs in use, * if in practice there are a lot of different DOIs this list should * probably be turned into a hash table or something similar so we * can do quick lookups. */ static DEFINE_SPINLOCK(cipso_v4_doi_list_lock); static LIST_HEAD(cipso_v4_doi_list); /* Label mapping cache */ int cipso_v4_cache_enabled = 1; int cipso_v4_cache_bucketsize = 10; #define CIPSO_V4_CACHE_BUCKETBITS 7 #define CIPSO_V4_CACHE_BUCKETS (1 << CIPSO_V4_CACHE_BUCKETBITS) #define CIPSO_V4_CACHE_REORDERLIMIT 10 struct cipso_v4_map_cache_bkt { spinlock_t lock; u32 size; struct list_head list; }; struct cipso_v4_map_cache_entry { u32 hash; unsigned char *key; size_t key_len; struct netlbl_lsm_cache *lsm_data; u32 activity; struct list_head list; }; static struct cipso_v4_map_cache_bkt *cipso_v4_cache = NULL; /* Restricted bitmap (tag #1) flags */ int cipso_v4_rbm_optfmt = 0; int cipso_v4_rbm_strictvalid = 1; /* * Protocol Constants */ /* Maximum size of the CIPSO IP option, derived from the fact that the maximum * IPv4 header size is 60 bytes and the base IPv4 header is 20 bytes long. */ #define CIPSO_V4_OPT_LEN_MAX 40 /* Length of the base CIPSO option, this includes the option type (1 byte), the * option length (1 byte), and the DOI (4 bytes). */ #define CIPSO_V4_HDR_LEN 6 /* Base length of the restrictive category bitmap tag (tag #1). */ #define CIPSO_V4_TAG_RBM_BLEN 4 /* Base length of the enumerated category tag (tag #2). */ #define CIPSO_V4_TAG_ENUM_BLEN 4 /* Base length of the ranged categories bitmap tag (tag #5). */ #define CIPSO_V4_TAG_RNG_BLEN 4 /* The maximum number of category ranges permitted in the ranged category tag * (tag #5). You may note that the IETF draft states that the maximum number * of category ranges is 7, but if the low end of the last category range is * zero then it is possible to fit 8 category ranges because the zero should * be omitted. */ #define CIPSO_V4_TAG_RNG_CAT_MAX 8 /* Base length of the local tag (non-standard tag). * Tag definition (may change between kernel versions) * * 0 8 16 24 32 * +----------+----------+----------+----------+ * | 10000000 | 00000110 | 32-bit secid value | * +----------+----------+----------+----------+ * | in (host byte order)| * +----------+----------+ * */ #define CIPSO_V4_TAG_LOC_BLEN 6 /* * Helper Functions */ /** * cipso_v4_bitmap_walk - Walk a bitmap looking for a bit * @bitmap: the bitmap * @bitmap_len: length in bits * @offset: starting offset * @state: if non-zero, look for a set (1) bit else look for a cleared (0) bit * * Description: * Starting at @offset, walk the bitmap from left to right until either the * desired bit is found or we reach the end. Return the bit offset, -1 if * not found, or -2 if error. */ static int cipso_v4_bitmap_walk(const unsigned char *bitmap, u32 bitmap_len, u32 offset, u8 state) { u32 bit_spot; u32 byte_offset; unsigned char bitmask; unsigned char byte; /* gcc always rounds to zero when doing integer division */ byte_offset = offset / 8; byte = bitmap[byte_offset]; bit_spot = offset; bitmask = 0x80 >> (offset % 8); while (bit_spot < bitmap_len) { if ((state && (byte & bitmask) == bitmask) || (state == 0 && (byte & bitmask) == 0)) return bit_spot; bit_spot++; bitmask >>= 1; if (bitmask == 0) { byte = bitmap[++byte_offset]; bitmask = 0x80; } } return -1; } /** * cipso_v4_bitmap_setbit - Sets a single bit in a bitmap * @bitmap: the bitmap * @bit: the bit * @state: if non-zero, set the bit (1) else clear the bit (0) * * Description: * Set a single bit in the bitmask. Returns zero on success, negative values * on error. */ static void cipso_v4_bitmap_setbit(unsigned char *bitmap, u32 bit, u8 state) { u32 byte_spot; u8 bitmask; /* gcc always rounds to zero when doing integer division */ byte_spot = bit / 8; bitmask = 0x80 >> (bit % 8); if (state) bitmap[byte_spot] |= bitmask; else bitmap[byte_spot] &= ~bitmask; } /** * cipso_v4_cache_entry_free - Frees a cache entry * @entry: the entry to free * * Description: * This function frees the memory associated with a cache entry including the * LSM cache data if there are no longer any users, i.e. reference count == 0. * */ static void cipso_v4_cache_entry_free(struct cipso_v4_map_cache_entry *entry) { if (entry->lsm_data) netlbl_secattr_cache_free(entry->lsm_data); kfree(entry->key); kfree(entry); } /** * cipso_v4_map_cache_hash - Hashing function for the CIPSO cache * @key: the hash key * @key_len: the length of the key in bytes * * Description: * The CIPSO tag hashing function. Returns a 32-bit hash value. * */ static u32 cipso_v4_map_cache_hash(const unsigned char *key, u32 key_len) { return jhash(key, key_len, 0); } /* * Label Mapping Cache Functions */ /** * cipso_v4_cache_init - Initialize the CIPSO cache * * Description: * Initializes the CIPSO label mapping cache, this function should be called * before any of the other functions defined in this file. Returns zero on * success, negative values on error. * */ static int cipso_v4_cache_init(void) { u32 iter; cipso_v4_cache = kcalloc(CIPSO_V4_CACHE_BUCKETS, sizeof(struct cipso_v4_map_cache_bkt), GFP_KERNEL); if (cipso_v4_cache == NULL) return -ENOMEM; for (iter = 0; iter < CIPSO_V4_CACHE_BUCKETS; iter++) { spin_lock_init(&cipso_v4_cache[iter].lock); cipso_v4_cache[iter].size = 0; INIT_LIST_HEAD(&cipso_v4_cache[iter].list); } return 0; } /** * cipso_v4_cache_invalidate - Invalidates the current CIPSO cache * * Description: * Invalidates and frees any entries in the CIPSO cache. Returns zero on * success and negative values on failure. * */ void cipso_v4_cache_invalidate(void) { struct cipso_v4_map_cache_entry *entry, *tmp_entry; u32 iter; for (iter = 0; iter < CIPSO_V4_CACHE_BUCKETS; iter++) { spin_lock_bh(&cipso_v4_cache[iter].lock); list_for_each_entry_safe(entry, tmp_entry, &cipso_v4_cache[iter].list, list) { list_del(&entry->list); cipso_v4_cache_entry_free(entry); } cipso_v4_cache[iter].size = 0; spin_unlock_bh(&cipso_v4_cache[iter].lock); } } /** * cipso_v4_cache_check - Check the CIPSO cache for a label mapping * @key: the buffer to check * @key_len: buffer length in bytes * @secattr: the security attribute struct to use * * Description: * This function checks the cache to see if a label mapping already exists for * the given key. If there is a match then the cache is adjusted and the * @secattr struct is populated with the correct LSM security attributes. The * cache is adjusted in the following manner if the entry is not already the * first in the cache bucket: * * 1. The cache entry's activity counter is incremented * 2. The previous (higher ranking) entry's activity counter is decremented * 3. If the difference between the two activity counters is geater than * CIPSO_V4_CACHE_REORDERLIMIT the two entries are swapped * * Returns zero on success, -ENOENT for a cache miss, and other negative values * on error. * */ static int cipso_v4_cache_check(const unsigned char *key, u32 key_len, struct netlbl_lsm_secattr *secattr) { u32 bkt; struct cipso_v4_map_cache_entry *entry; struct cipso_v4_map_cache_entry *prev_entry = NULL; u32 hash; if (!cipso_v4_cache_enabled) return -ENOENT; hash = cipso_v4_map_cache_hash(key, key_len); bkt = hash & (CIPSO_V4_CACHE_BUCKETS - 1); spin_lock_bh(&cipso_v4_cache[bkt].lock); list_for_each_entry(entry, &cipso_v4_cache[bkt].list, list) { if (entry->hash == hash && entry->key_len == key_len && memcmp(entry->key, key, key_len) == 0) { entry->activity += 1; atomic_inc(&entry->lsm_data->refcount); secattr->cache = entry->lsm_data; secattr->flags |= NETLBL_SECATTR_CACHE; secattr->type = NETLBL_NLTYPE_CIPSOV4; if (prev_entry == NULL) { spin_unlock_bh(&cipso_v4_cache[bkt].lock); return 0; } if (prev_entry->activity > 0) prev_entry->activity -= 1; if (entry->activity > prev_entry->activity && entry->activity - prev_entry->activity > CIPSO_V4_CACHE_REORDERLIMIT) { __list_del(entry->list.prev, entry->list.next); __list_add(&entry->list, prev_entry->list.prev, &prev_entry->list); } spin_unlock_bh(&cipso_v4_cache[bkt].lock); return 0; } prev_entry = entry; } spin_unlock_bh(&cipso_v4_cache[bkt].lock); return -ENOENT; } /** * cipso_v4_cache_add - Add an entry to the CIPSO cache * @skb: the packet * @secattr: the packet's security attributes * * Description: * Add a new entry into the CIPSO label mapping cache. Add the new entry to * head of the cache bucket's list, if the cache bucket is out of room remove * the last entry in the list first. It is important to note that there is * currently no checking for duplicate keys. Returns zero on success, * negative values on failure. * */ int cipso_v4_cache_add(const struct sk_buff *skb, const struct netlbl_lsm_secattr *secattr) { int ret_val = -EPERM; u32 bkt; struct cipso_v4_map_cache_entry *entry = NULL; struct cipso_v4_map_cache_entry *old_entry = NULL; unsigned char *cipso_ptr; u32 cipso_ptr_len; if (!cipso_v4_cache_enabled || cipso_v4_cache_bucketsize <= 0) return 0; cipso_ptr = CIPSO_V4_OPTPTR(skb); cipso_ptr_len = cipso_ptr[1]; entry = kzalloc(sizeof(*entry), GFP_ATOMIC); if (entry == NULL) return -ENOMEM; entry->key = kmemdup(cipso_ptr, cipso_ptr_len, GFP_ATOMIC); if (entry->key == NULL) { ret_val = -ENOMEM; goto cache_add_failure; } entry->key_len = cipso_ptr_len; entry->hash = cipso_v4_map_cache_hash(cipso_ptr, cipso_ptr_len); atomic_inc(&secattr->cache->refcount); entry->lsm_data = secattr->cache; bkt = entry->hash & (CIPSO_V4_CACHE_BUCKETS - 1); spin_lock_bh(&cipso_v4_cache[bkt].lock); if (cipso_v4_cache[bkt].size < cipso_v4_cache_bucketsize) { list_add(&entry->list, &cipso_v4_cache[bkt].list); cipso_v4_cache[bkt].size += 1; } else { old_entry = list_entry(cipso_v4_cache[bkt].list.prev, struct cipso_v4_map_cache_entry, list); list_del(&old_entry->list); list_add(&entry->list, &cipso_v4_cache[bkt].list); cipso_v4_cache_entry_free(old_entry); } spin_unlock_bh(&cipso_v4_cache[bkt].lock); return 0; cache_add_failure: if (entry) cipso_v4_cache_entry_free(entry); return ret_val; } /* * DOI List Functions */ /** * cipso_v4_doi_search - Searches for a DOI definition * @doi: the DOI to search for * * Description: * Search the DOI definition list for a DOI definition with a DOI value that * matches @doi. The caller is responsible for calling rcu_read_[un]lock(). * Returns a pointer to the DOI definition on success and NULL on failure. */ static struct cipso_v4_doi *cipso_v4_doi_search(u32 doi) { struct cipso_v4_doi *iter; list_for_each_entry_rcu(iter, &cipso_v4_doi_list, list) if (iter->doi == doi && atomic_read(&iter->refcount)) return iter; return NULL; } /** * cipso_v4_doi_add - Add a new DOI to the CIPSO protocol engine * @doi_def: the DOI structure * @audit_info: NetLabel audit information * * Description: * The caller defines a new DOI for use by the CIPSO engine and calls this * function to add it to the list of acceptable domains. The caller must * ensure that the mapping table specified in @doi_def->map meets all of the * requirements of the mapping type (see cipso_ipv4.h for details). Returns * zero on success and non-zero on failure. * */ int cipso_v4_doi_add(struct cipso_v4_doi *doi_def, struct netlbl_audit *audit_info) { int ret_val = -EINVAL; u32 iter; u32 doi; u32 doi_type; struct audit_buffer *audit_buf; doi = doi_def->doi; doi_type = doi_def->type; if (doi_def == NULL || doi_def->doi == CIPSO_V4_DOI_UNKNOWN) goto doi_add_return; for (iter = 0; iter < CIPSO_V4_TAG_MAXCNT; iter++) { switch (doi_def->tags[iter]) { case CIPSO_V4_TAG_RBITMAP: break; case CIPSO_V4_TAG_RANGE: case CIPSO_V4_TAG_ENUM: if (doi_def->type != CIPSO_V4_MAP_PASS) goto doi_add_return; break; case CIPSO_V4_TAG_LOCAL: if (doi_def->type != CIPSO_V4_MAP_LOCAL) goto doi_add_return; break; case CIPSO_V4_TAG_INVALID: if (iter == 0) goto doi_add_return; break; default: goto doi_add_return; } } atomic_set(&doi_def->refcount, 1); spin_lock(&cipso_v4_doi_list_lock); if (cipso_v4_doi_search(doi_def->doi) != NULL) { spin_unlock(&cipso_v4_doi_list_lock); ret_val = -EEXIST; goto doi_add_return; } list_add_tail_rcu(&doi_def->list, &cipso_v4_doi_list); spin_unlock(&cipso_v4_doi_list_lock); ret_val = 0; doi_add_return: audit_buf = netlbl_audit_start(AUDIT_MAC_CIPSOV4_ADD, audit_info); if (audit_buf != NULL) { const char *type_str; switch (doi_type) { case CIPSO_V4_MAP_TRANS: type_str = "trans"; break; case CIPSO_V4_MAP_PASS: type_str = "pass"; break; case CIPSO_V4_MAP_LOCAL: type_str = "local"; break; default: type_str = "(unknown)"; } audit_log_format(audit_buf, " cipso_doi=%u cipso_type=%s res=%u", doi, type_str, ret_val == 0 ? 1 : 0); audit_log_end(audit_buf); } return ret_val; } /** * cipso_v4_doi_free - Frees a DOI definition * @entry: the entry's RCU field * * Description: * This function frees all of the memory associated with a DOI definition. * */ void cipso_v4_doi_free(struct cipso_v4_doi *doi_def) { if (doi_def == NULL) return; switch (doi_def->type) { case CIPSO_V4_MAP_TRANS: kfree(doi_def->map.std->lvl.cipso); kfree(doi_def->map.std->lvl.local); kfree(doi_def->map.std->cat.cipso); kfree(doi_def->map.std->cat.local); break; } kfree(doi_def); } /** * cipso_v4_doi_free_rcu - Frees a DOI definition via the RCU pointer * @entry: the entry's RCU field * * Description: * This function is designed to be used as a callback to the call_rcu() * function so that the memory allocated to the DOI definition can be released * safely. * */ static void cipso_v4_doi_free_rcu(struct rcu_head *entry) { struct cipso_v4_doi *doi_def; doi_def = container_of(entry, struct cipso_v4_doi, rcu); cipso_v4_doi_free(doi_def); } /** * cipso_v4_doi_remove - Remove an existing DOI from the CIPSO protocol engine * @doi: the DOI value * @audit_secid: the LSM secid to use in the audit message * * Description: * Removes a DOI definition from the CIPSO engine. The NetLabel routines will * be called to release their own LSM domain mappings as well as our own * domain list. Returns zero on success and negative values on failure. * */ int cipso_v4_doi_remove(u32 doi, struct netlbl_audit *audit_info) { int ret_val; struct cipso_v4_doi *doi_def; struct audit_buffer *audit_buf; spin_lock(&cipso_v4_doi_list_lock); doi_def = cipso_v4_doi_search(doi); if (doi_def == NULL) { spin_unlock(&cipso_v4_doi_list_lock); ret_val = -ENOENT; goto doi_remove_return; } if (!atomic_dec_and_test(&doi_def->refcount)) { spin_unlock(&cipso_v4_doi_list_lock); ret_val = -EBUSY; goto doi_remove_return; } list_del_rcu(&doi_def->list); spin_unlock(&cipso_v4_doi_list_lock); cipso_v4_cache_invalidate(); call_rcu(&doi_def->rcu, cipso_v4_doi_free_rcu); ret_val = 0; doi_remove_return: audit_buf = netlbl_audit_start(AUDIT_MAC_CIPSOV4_DEL, audit_info); if (audit_buf != NULL) { audit_log_format(audit_buf, " cipso_doi=%u res=%u", doi, ret_val == 0 ? 1 : 0); audit_log_end(audit_buf); } return ret_val; } /** * cipso_v4_doi_getdef - Returns a reference to a valid DOI definition * @doi: the DOI value * * Description: * Searches for a valid DOI definition and if one is found it is returned to * the caller. Otherwise NULL is returned. The caller must ensure that * rcu_read_lock() is held while accessing the returned definition and the DOI * definition reference count is decremented when the caller is done. * */ struct cipso_v4_doi *cipso_v4_doi_getdef(u32 doi) { struct cipso_v4_doi *doi_def; rcu_read_lock(); doi_def = cipso_v4_doi_search(doi); if (doi_def == NULL) goto doi_getdef_return; if (!atomic_inc_not_zero(&doi_def->refcount)) doi_def = NULL; doi_getdef_return: rcu_read_unlock(); return doi_def; } /** * cipso_v4_doi_putdef - Releases a reference for the given DOI definition * @doi_def: the DOI definition * * Description: * Releases a DOI definition reference obtained from cipso_v4_doi_getdef(). * */ void cipso_v4_doi_putdef(struct cipso_v4_doi *doi_def) { if (doi_def == NULL) return; if (!atomic_dec_and_test(&doi_def->refcount)) return; spin_lock(&cipso_v4_doi_list_lock); list_del_rcu(&doi_def->list); spin_unlock(&cipso_v4_doi_list_lock); cipso_v4_cache_invalidate(); call_rcu(&doi_def->rcu, cipso_v4_doi_free_rcu); } /** * cipso_v4_doi_walk - Iterate through the DOI definitions * @skip_cnt: skip past this number of DOI definitions, updated * @callback: callback for each DOI definition * @cb_arg: argument for the callback function * * Description: * Iterate over the DOI definition list, skipping the first @skip_cnt entries. * For each entry call @callback, if @callback returns a negative value stop * 'walking' through the list and return. Updates the value in @skip_cnt upon * return. Returns zero on success, negative values on failure. * */ int cipso_v4_doi_walk(u32 *skip_cnt, int (*callback) (struct cipso_v4_doi *doi_def, void *arg), void *cb_arg) { int ret_val = -ENOENT; u32 doi_cnt = 0; struct cipso_v4_doi *iter_doi; rcu_read_lock(); list_for_each_entry_rcu(iter_doi, &cipso_v4_doi_list, list) if (atomic_read(&iter_doi->refcount) > 0) { if (doi_cnt++ < *skip_cnt) continue; ret_val = callback(iter_doi, cb_arg); if (ret_val < 0) { doi_cnt--; goto doi_walk_return; } } doi_walk_return: rcu_read_unlock(); *skip_cnt = doi_cnt; return ret_val; } /* * Label Mapping Functions */ /** * cipso_v4_map_lvl_valid - Checks to see if the given level is understood * @doi_def: the DOI definition * @level: the level to check * * Description: * Checks the given level against the given DOI definition and returns a * negative value if the level does not have a valid mapping and a zero value * if the level is defined by the DOI. * */ static int cipso_v4_map_lvl_valid(const struct cipso_v4_doi *doi_def, u8 level) { switch (doi_def->type) { case CIPSO_V4_MAP_PASS: return 0; case CIPSO_V4_MAP_TRANS: if (doi_def->map.std->lvl.cipso[level] < CIPSO_V4_INV_LVL) return 0; break; } return -EFAULT; } /** * cipso_v4_map_lvl_hton - Perform a level mapping from the host to the network * @doi_def: the DOI definition * @host_lvl: the host MLS level * @net_lvl: the network/CIPSO MLS level * * Description: * Perform a label mapping to translate a local MLS level to the correct * CIPSO level using the given DOI definition. Returns zero on success, * negative values otherwise. * */ static int cipso_v4_map_lvl_hton(const struct cipso_v4_doi *doi_def, u32 host_lvl, u32 *net_lvl) { switch (doi_def->type) { case CIPSO_V4_MAP_PASS: *net_lvl = host_lvl; return 0; case CIPSO_V4_MAP_TRANS: if (host_lvl < doi_def->map.std->lvl.local_size && doi_def->map.std->lvl.local[host_lvl] < CIPSO_V4_INV_LVL) { *net_lvl = doi_def->map.std->lvl.local[host_lvl]; return 0; } return -EPERM; } return -EINVAL; } /** * cipso_v4_map_lvl_ntoh - Perform a level mapping from the network to the host * @doi_def: the DOI definition * @net_lvl: the network/CIPSO MLS level * @host_lvl: the host MLS level * * Description: * Perform a label mapping to translate a CIPSO level to the correct local MLS * level using the given DOI definition. Returns zero on success, negative * values otherwise. * */ static int cipso_v4_map_lvl_ntoh(const struct cipso_v4_doi *doi_def, u32 net_lvl, u32 *host_lvl) { struct cipso_v4_std_map_tbl *map_tbl; switch (doi_def->type) { case CIPSO_V4_MAP_PASS: *host_lvl = net_lvl; return 0; case CIPSO_V4_MAP_TRANS: map_tbl = doi_def->map.std; if (net_lvl < map_tbl->lvl.cipso_size && map_tbl->lvl.cipso[net_lvl] < CIPSO_V4_INV_LVL) { *host_lvl = doi_def->map.std->lvl.cipso[net_lvl]; return 0; } return -EPERM; } return -EINVAL; } /** * cipso_v4_map_cat_rbm_valid - Checks to see if the category bitmap is valid * @doi_def: the DOI definition * @bitmap: category bitmap * @bitmap_len: bitmap length in bytes * * Description: * Checks the given category bitmap against the given DOI definition and * returns a negative value if any of the categories in the bitmap do not have * a valid mapping and a zero value if all of the categories are valid. * */ static int cipso_v4_map_cat_rbm_valid(const struct cipso_v4_doi *doi_def, const unsigned char *bitmap, u32 bitmap_len) { int cat = -1; u32 bitmap_len_bits = bitmap_len * 8; u32 cipso_cat_size; u32 *cipso_array; switch (doi_def->type) { case CIPSO_V4_MAP_PASS: return 0; case CIPSO_V4_MAP_TRANS: cipso_cat_size = doi_def->map.std->cat.cipso_size; cipso_array = doi_def->map.std->cat.cipso; for (;;) { cat = cipso_v4_bitmap_walk(bitmap, bitmap_len_bits, cat + 1, 1); if (cat < 0) break; if (cat >= cipso_cat_size || cipso_array[cat] >= CIPSO_V4_INV_CAT) return -EFAULT; } if (cat == -1) return 0; break; } return -EFAULT; } /** * cipso_v4_map_cat_rbm_hton - Perform a category mapping from host to network * @doi_def: the DOI definition * @secattr: the security attributes * @net_cat: the zero'd out category bitmap in network/CIPSO format * @net_cat_len: the length of the CIPSO bitmap in bytes * * Description: * Perform a label mapping to translate a local MLS category bitmap to the * correct CIPSO bitmap using the given DOI definition. Returns the minimum * size in bytes of the network bitmap on success, negative values otherwise. * */ static int cipso_v4_map_cat_rbm_hton(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *net_cat, u32 net_cat_len) { int host_spot = -1; u32 net_spot = CIPSO_V4_INV_CAT; u32 net_spot_max = 0; u32 net_clen_bits = net_cat_len * 8; u32 host_cat_size = 0; u32 *host_cat_array = NULL; if (doi_def->type == CIPSO_V4_MAP_TRANS) { host_cat_size = doi_def->map.std->cat.local_size; host_cat_array = doi_def->map.std->cat.local; } for (;;) { host_spot = netlbl_secattr_catmap_walk(secattr->attr.mls.cat, host_spot + 1); if (host_spot < 0) break; switch (doi_def->type) { case CIPSO_V4_MAP_PASS: net_spot = host_spot; break; case CIPSO_V4_MAP_TRANS: if (host_spot >= host_cat_size) return -EPERM; net_spot = host_cat_array[host_spot]; if (net_spot >= CIPSO_V4_INV_CAT) return -EPERM; break; } if (net_spot >= net_clen_bits) return -ENOSPC; cipso_v4_bitmap_setbit(net_cat, net_spot, 1); if (net_spot > net_spot_max) net_spot_max = net_spot; } if (++net_spot_max % 8) return net_spot_max / 8 + 1; return net_spot_max / 8; } /** * cipso_v4_map_cat_rbm_ntoh - Perform a category mapping from network to host * @doi_def: the DOI definition * @net_cat: the category bitmap in network/CIPSO format * @net_cat_len: the length of the CIPSO bitmap in bytes * @secattr: the security attributes * * Description: * Perform a label mapping to translate a CIPSO bitmap to the correct local * MLS category bitmap using the given DOI definition. Returns zero on * success, negative values on failure. * */ static int cipso_v4_map_cat_rbm_ntoh(const struct cipso_v4_doi *doi_def, const unsigned char *net_cat, u32 net_cat_len, struct netlbl_lsm_secattr *secattr) { int ret_val; int net_spot = -1; u32 host_spot = CIPSO_V4_INV_CAT; u32 net_clen_bits = net_cat_len * 8; u32 net_cat_size = 0; u32 *net_cat_array = NULL; if (doi_def->type == CIPSO_V4_MAP_TRANS) { net_cat_size = doi_def->map.std->cat.cipso_size; net_cat_array = doi_def->map.std->cat.cipso; } for (;;) { net_spot = cipso_v4_bitmap_walk(net_cat, net_clen_bits, net_spot + 1, 1); if (net_spot < 0) { if (net_spot == -2) return -EFAULT; return 0; } switch (doi_def->type) { case CIPSO_V4_MAP_PASS: host_spot = net_spot; break; case CIPSO_V4_MAP_TRANS: if (net_spot >= net_cat_size) return -EPERM; host_spot = net_cat_array[net_spot]; if (host_spot >= CIPSO_V4_INV_CAT) return -EPERM; break; } ret_val = netlbl_secattr_catmap_setbit(secattr->attr.mls.cat, host_spot, GFP_ATOMIC); if (ret_val != 0) return ret_val; } return -EINVAL; } /** * cipso_v4_map_cat_enum_valid - Checks to see if the categories are valid * @doi_def: the DOI definition * @enumcat: category list * @enumcat_len: length of the category list in bytes * * Description: * Checks the given categories against the given DOI definition and returns a * negative value if any of the categories do not have a valid mapping and a * zero value if all of the categories are valid. * */ static int cipso_v4_map_cat_enum_valid(const struct cipso_v4_doi *doi_def, const unsigned char *enumcat, u32 enumcat_len) { u16 cat; int cat_prev = -1; u32 iter; if (doi_def->type != CIPSO_V4_MAP_PASS || enumcat_len & 0x01) return -EFAULT; for (iter = 0; iter < enumcat_len; iter += 2) { cat = get_unaligned_be16(&enumcat[iter]); if (cat <= cat_prev) return -EFAULT; cat_prev = cat; } return 0; } /** * cipso_v4_map_cat_enum_hton - Perform a category mapping from host to network * @doi_def: the DOI definition * @secattr: the security attributes * @net_cat: the zero'd out category list in network/CIPSO format * @net_cat_len: the length of the CIPSO category list in bytes * * Description: * Perform a label mapping to translate a local MLS category bitmap to the * correct CIPSO category list using the given DOI definition. Returns the * size in bytes of the network category bitmap on success, negative values * otherwise. * */ static int cipso_v4_map_cat_enum_hton(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *net_cat, u32 net_cat_len) { int cat = -1; u32 cat_iter = 0; for (;;) { cat = netlbl_secattr_catmap_walk(secattr->attr.mls.cat, cat + 1); if (cat < 0) break; if ((cat_iter + 2) > net_cat_len) return -ENOSPC; *((__be16 *)&net_cat[cat_iter]) = htons(cat); cat_iter += 2; } return cat_iter; } /** * cipso_v4_map_cat_enum_ntoh - Perform a category mapping from network to host * @doi_def: the DOI definition * @net_cat: the category list in network/CIPSO format * @net_cat_len: the length of the CIPSO bitmap in bytes * @secattr: the security attributes * * Description: * Perform a label mapping to translate a CIPSO category list to the correct * local MLS category bitmap using the given DOI definition. Returns zero on * success, negative values on failure. * */ static int cipso_v4_map_cat_enum_ntoh(const struct cipso_v4_doi *doi_def, const unsigned char *net_cat, u32 net_cat_len, struct netlbl_lsm_secattr *secattr) { int ret_val; u32 iter; for (iter = 0; iter < net_cat_len; iter += 2) { ret_val = netlbl_secattr_catmap_setbit(secattr->attr.mls.cat, get_unaligned_be16(&net_cat[iter]), GFP_ATOMIC); if (ret_val != 0) return ret_val; } return 0; } /** * cipso_v4_map_cat_rng_valid - Checks to see if the categories are valid * @doi_def: the DOI definition * @rngcat: category list * @rngcat_len: length of the category list in bytes * * Description: * Checks the given categories against the given DOI definition and returns a * negative value if any of the categories do not have a valid mapping and a * zero value if all of the categories are valid. * */ static int cipso_v4_map_cat_rng_valid(const struct cipso_v4_doi *doi_def, const unsigned char *rngcat, u32 rngcat_len) { u16 cat_high; u16 cat_low; u32 cat_prev = CIPSO_V4_MAX_REM_CATS + 1; u32 iter; if (doi_def->type != CIPSO_V4_MAP_PASS || rngcat_len & 0x01) return -EFAULT; for (iter = 0; iter < rngcat_len; iter += 4) { cat_high = get_unaligned_be16(&rngcat[iter]); if ((iter + 4) <= rngcat_len) cat_low = get_unaligned_be16(&rngcat[iter + 2]); else cat_low = 0; if (cat_high > cat_prev) return -EFAULT; cat_prev = cat_low; } return 0; } /** * cipso_v4_map_cat_rng_hton - Perform a category mapping from host to network * @doi_def: the DOI definition * @secattr: the security attributes * @net_cat: the zero'd out category list in network/CIPSO format * @net_cat_len: the length of the CIPSO category list in bytes * * Description: * Perform a label mapping to translate a local MLS category bitmap to the * correct CIPSO category list using the given DOI definition. Returns the * size in bytes of the network category bitmap on success, negative values * otherwise. * */ static int cipso_v4_map_cat_rng_hton(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *net_cat, u32 net_cat_len) { int iter = -1; u16 array[CIPSO_V4_TAG_RNG_CAT_MAX * 2]; u32 array_cnt = 0; u32 cat_size = 0; /* make sure we don't overflow the 'array[]' variable */ if (net_cat_len > (CIPSO_V4_OPT_LEN_MAX - CIPSO_V4_HDR_LEN - CIPSO_V4_TAG_RNG_BLEN)) return -ENOSPC; for (;;) { iter = netlbl_secattr_catmap_walk(secattr->attr.mls.cat, iter + 1); if (iter < 0) break; cat_size += (iter == 0 ? 0 : sizeof(u16)); if (cat_size > net_cat_len) return -ENOSPC; array[array_cnt++] = iter; iter = netlbl_secattr_catmap_walk_rng(secattr->attr.mls.cat, iter); if (iter < 0) return -EFAULT; cat_size += sizeof(u16); if (cat_size > net_cat_len) return -ENOSPC; array[array_cnt++] = iter; } for (iter = 0; array_cnt > 0;) { *((__be16 *)&net_cat[iter]) = htons(array[--array_cnt]); iter += 2; array_cnt--; if (array[array_cnt] != 0) { *((__be16 *)&net_cat[iter]) = htons(array[array_cnt]); iter += 2; } } return cat_size; } /** * cipso_v4_map_cat_rng_ntoh - Perform a category mapping from network to host * @doi_def: the DOI definition * @net_cat: the category list in network/CIPSO format * @net_cat_len: the length of the CIPSO bitmap in bytes * @secattr: the security attributes * * Description: * Perform a label mapping to translate a CIPSO category list to the correct * local MLS category bitmap using the given DOI definition. Returns zero on * success, negative values on failure. * */ static int cipso_v4_map_cat_rng_ntoh(const struct cipso_v4_doi *doi_def, const unsigned char *net_cat, u32 net_cat_len, struct netlbl_lsm_secattr *secattr) { int ret_val; u32 net_iter; u16 cat_low; u16 cat_high; for (net_iter = 0; net_iter < net_cat_len; net_iter += 4) { cat_high = get_unaligned_be16(&net_cat[net_iter]); if ((net_iter + 4) <= net_cat_len) cat_low = get_unaligned_be16(&net_cat[net_iter + 2]); else cat_low = 0; ret_val = netlbl_secattr_catmap_setrng(secattr->attr.mls.cat, cat_low, cat_high, GFP_ATOMIC); if (ret_val != 0) return ret_val; } return 0; } /* * Protocol Handling Functions */ /** * cipso_v4_gentag_hdr - Generate a CIPSO option header * @doi_def: the DOI definition * @len: the total tag length in bytes, not including this header * @buf: the CIPSO option buffer * * Description: * Write a CIPSO header into the beginning of @buffer. * */ static void cipso_v4_gentag_hdr(const struct cipso_v4_doi *doi_def, unsigned char *buf, u32 len) { buf[0] = IPOPT_CIPSO; buf[1] = CIPSO_V4_HDR_LEN + len; *(__be32 *)&buf[2] = htonl(doi_def->doi); } /** * cipso_v4_gentag_rbm - Generate a CIPSO restricted bitmap tag (type #1) * @doi_def: the DOI definition * @secattr: the security attributes * @buffer: the option buffer * @buffer_len: length of buffer in bytes * * Description: * Generate a CIPSO option using the restricted bitmap tag, tag type #1. The * actual buffer length may be larger than the indicated size due to * translation between host and network category bitmaps. Returns the size of * the tag on success, negative values on failure. * */ static int cipso_v4_gentag_rbm(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *buffer, u32 buffer_len) { int ret_val; u32 tag_len; u32 level; if ((secattr->flags & NETLBL_SECATTR_MLS_LVL) == 0) return -EPERM; ret_val = cipso_v4_map_lvl_hton(doi_def, secattr->attr.mls.lvl, &level); if (ret_val != 0) return ret_val; if (secattr->flags & NETLBL_SECATTR_MLS_CAT) { ret_val = cipso_v4_map_cat_rbm_hton(doi_def, secattr, &buffer[4], buffer_len - 4); if (ret_val < 0) return ret_val; /* This will send packets using the "optimized" format when * possible as specified in section 3.4.2.6 of the * CIPSO draft. */ if (cipso_v4_rbm_optfmt && ret_val > 0 && ret_val <= 10) tag_len = 14; else tag_len = 4 + ret_val; } else tag_len = 4; buffer[0] = CIPSO_V4_TAG_RBITMAP; buffer[1] = tag_len; buffer[3] = level; return tag_len; } /** * cipso_v4_parsetag_rbm - Parse a CIPSO restricted bitmap tag * @doi_def: the DOI definition * @tag: the CIPSO tag * @secattr: the security attributes * * Description: * Parse a CIPSO restricted bitmap tag (tag type #1) and return the security * attributes in @secattr. Return zero on success, negatives values on * failure. * */ static int cipso_v4_parsetag_rbm(const struct cipso_v4_doi *doi_def, const unsigned char *tag, struct netlbl_lsm_secattr *secattr) { int ret_val; u8 tag_len = tag[1]; u32 level; ret_val = cipso_v4_map_lvl_ntoh(doi_def, tag[3], &level); if (ret_val != 0) return ret_val; secattr->attr.mls.lvl = level; secattr->flags |= NETLBL_SECATTR_MLS_LVL; if (tag_len > 4) { secattr->attr.mls.cat = netlbl_secattr_catmap_alloc(GFP_ATOMIC); if (secattr->attr.mls.cat == NULL) return -ENOMEM; ret_val = cipso_v4_map_cat_rbm_ntoh(doi_def, &tag[4], tag_len - 4, secattr); if (ret_val != 0) { netlbl_secattr_catmap_free(secattr->attr.mls.cat); return ret_val; } secattr->flags |= NETLBL_SECATTR_MLS_CAT; } return 0; } /** * cipso_v4_gentag_enum - Generate a CIPSO enumerated tag (type #2) * @doi_def: the DOI definition * @secattr: the security attributes * @buffer: the option buffer * @buffer_len: length of buffer in bytes * * Description: * Generate a CIPSO option using the enumerated tag, tag type #2. Returns the * size of the tag on success, negative values on failure. * */ static int cipso_v4_gentag_enum(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *buffer, u32 buffer_len) { int ret_val; u32 tag_len; u32 level; if (!(secattr->flags & NETLBL_SECATTR_MLS_LVL)) return -EPERM; ret_val = cipso_v4_map_lvl_hton(doi_def, secattr->attr.mls.lvl, &level); if (ret_val != 0) return ret_val; if (secattr->flags & NETLBL_SECATTR_MLS_CAT) { ret_val = cipso_v4_map_cat_enum_hton(doi_def, secattr, &buffer[4], buffer_len - 4); if (ret_val < 0) return ret_val; tag_len = 4 + ret_val; } else tag_len = 4; buffer[0] = CIPSO_V4_TAG_ENUM; buffer[1] = tag_len; buffer[3] = level; return tag_len; } /** * cipso_v4_parsetag_enum - Parse a CIPSO enumerated tag * @doi_def: the DOI definition * @tag: the CIPSO tag * @secattr: the security attributes * * Description: * Parse a CIPSO enumerated tag (tag type #2) and return the security * attributes in @secattr. Return zero on success, negatives values on * failure. * */ static int cipso_v4_parsetag_enum(const struct cipso_v4_doi *doi_def, const unsigned char *tag, struct netlbl_lsm_secattr *secattr) { int ret_val; u8 tag_len = tag[1]; u32 level; ret_val = cipso_v4_map_lvl_ntoh(doi_def, tag[3], &level); if (ret_val != 0) return ret_val; secattr->attr.mls.lvl = level; secattr->flags |= NETLBL_SECATTR_MLS_LVL; if (tag_len > 4) { secattr->attr.mls.cat = netlbl_secattr_catmap_alloc(GFP_ATOMIC); if (secattr->attr.mls.cat == NULL) return -ENOMEM; ret_val = cipso_v4_map_cat_enum_ntoh(doi_def, &tag[4], tag_len - 4, secattr); if (ret_val != 0) { netlbl_secattr_catmap_free(secattr->attr.mls.cat); return ret_val; } secattr->flags |= NETLBL_SECATTR_MLS_CAT; } return 0; } /** * cipso_v4_gentag_rng - Generate a CIPSO ranged tag (type #5) * @doi_def: the DOI definition * @secattr: the security attributes * @buffer: the option buffer * @buffer_len: length of buffer in bytes * * Description: * Generate a CIPSO option using the ranged tag, tag type #5. Returns the * size of the tag on success, negative values on failure. * */ static int cipso_v4_gentag_rng(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *buffer, u32 buffer_len) { int ret_val; u32 tag_len; u32 level; if (!(secattr->flags & NETLBL_SECATTR_MLS_LVL)) return -EPERM; ret_val = cipso_v4_map_lvl_hton(doi_def, secattr->attr.mls.lvl, &level); if (ret_val != 0) return ret_val; if (secattr->flags & NETLBL_SECATTR_MLS_CAT) { ret_val = cipso_v4_map_cat_rng_hton(doi_def, secattr, &buffer[4], buffer_len - 4); if (ret_val < 0) return ret_val; tag_len = 4 + ret_val; } else tag_len = 4; buffer[0] = CIPSO_V4_TAG_RANGE; buffer[1] = tag_len; buffer[3] = level; return tag_len; } /** * cipso_v4_parsetag_rng - Parse a CIPSO ranged tag * @doi_def: the DOI definition * @tag: the CIPSO tag * @secattr: the security attributes * * Description: * Parse a CIPSO ranged tag (tag type #5) and return the security attributes * in @secattr. Return zero on success, negatives values on failure. * */ static int cipso_v4_parsetag_rng(const struct cipso_v4_doi *doi_def, const unsigned char *tag, struct netlbl_lsm_secattr *secattr) { int ret_val; u8 tag_len = tag[1]; u32 level; ret_val = cipso_v4_map_lvl_ntoh(doi_def, tag[3], &level); if (ret_val != 0) return ret_val; secattr->attr.mls.lvl = level; secattr->flags |= NETLBL_SECATTR_MLS_LVL; if (tag_len > 4) { secattr->attr.mls.cat = netlbl_secattr_catmap_alloc(GFP_ATOMIC); if (secattr->attr.mls.cat == NULL) return -ENOMEM; ret_val = cipso_v4_map_cat_rng_ntoh(doi_def, &tag[4], tag_len - 4, secattr); if (ret_val != 0) { netlbl_secattr_catmap_free(secattr->attr.mls.cat); return ret_val; } secattr->flags |= NETLBL_SECATTR_MLS_CAT; } return 0; } /** * cipso_v4_gentag_loc - Generate a CIPSO local tag (non-standard) * @doi_def: the DOI definition * @secattr: the security attributes * @buffer: the option buffer * @buffer_len: length of buffer in bytes * * Description: * Generate a CIPSO option using the local tag. Returns the size of the tag * on success, negative values on failure. * */ static int cipso_v4_gentag_loc(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *buffer, u32 buffer_len) { if (!(secattr->flags & NETLBL_SECATTR_SECID)) return -EPERM; buffer[0] = CIPSO_V4_TAG_LOCAL; buffer[1] = CIPSO_V4_TAG_LOC_BLEN; *(u32 *)&buffer[2] = secattr->attr.secid; return CIPSO_V4_TAG_LOC_BLEN; } /** * cipso_v4_parsetag_loc - Parse a CIPSO local tag * @doi_def: the DOI definition * @tag: the CIPSO tag * @secattr: the security attributes * * Description: * Parse a CIPSO local tag and return the security attributes in @secattr. * Return zero on success, negatives values on failure. * */ static int cipso_v4_parsetag_loc(const struct cipso_v4_doi *doi_def, const unsigned char *tag, struct netlbl_lsm_secattr *secattr) { secattr->attr.secid = *(u32 *)&tag[2]; secattr->flags |= NETLBL_SECATTR_SECID; return 0; } /** * cipso_v4_validate - Validate a CIPSO option * @option: the start of the option, on error it is set to point to the error * * Description: * This routine is called to validate a CIPSO option, it checks all of the * fields to ensure that they are at least valid, see the draft snippet below * for details. If the option is valid then a zero value is returned and * the value of @option is unchanged. If the option is invalid then a * non-zero value is returned and @option is adjusted to point to the * offending portion of the option. From the IETF draft ... * * "If any field within the CIPSO options, such as the DOI identifier, is not * recognized the IP datagram is discarded and an ICMP 'parameter problem' * (type 12) is generated and returned. The ICMP code field is set to 'bad * parameter' (code 0) and the pointer is set to the start of the CIPSO field * that is unrecognized." * */ int cipso_v4_validate(const struct sk_buff *skb, unsigned char **option) { unsigned char *opt = *option; unsigned char *tag; unsigned char opt_iter; unsigned char err_offset = 0; u8 opt_len; u8 tag_len; struct cipso_v4_doi *doi_def = NULL; u32 tag_iter; /* caller already checks for length values that are too large */ opt_len = opt[1]; if (opt_len < 8) { err_offset = 1; goto validate_return; } rcu_read_lock(); doi_def = cipso_v4_doi_search(get_unaligned_be32(&opt[2])); if (doi_def == NULL) { err_offset = 2; goto validate_return_locked; } opt_iter = CIPSO_V4_HDR_LEN; tag = opt + opt_iter; while (opt_iter < opt_len) { for (tag_iter = 0; doi_def->tags[tag_iter] != tag[0];) if (doi_def->tags[tag_iter] == CIPSO_V4_TAG_INVALID || ++tag_iter == CIPSO_V4_TAG_MAXCNT) { err_offset = opt_iter; goto validate_return_locked; } tag_len = tag[1]; if (tag_len > (opt_len - opt_iter)) { err_offset = opt_iter + 1; goto validate_return_locked; } switch (tag[0]) { case CIPSO_V4_TAG_RBITMAP: if (tag_len < CIPSO_V4_TAG_RBM_BLEN) { err_offset = opt_iter + 1; goto validate_return_locked; } /* We are already going to do all the verification * necessary at the socket layer so from our point of * view it is safe to turn these checks off (and less * work), however, the CIPSO draft says we should do * all the CIPSO validations here but it doesn't * really specify _exactly_ what we need to validate * ... so, just make it a sysctl tunable. */ if (cipso_v4_rbm_strictvalid) { if (cipso_v4_map_lvl_valid(doi_def, tag[3]) < 0) { err_offset = opt_iter + 3; goto validate_return_locked; } if (tag_len > CIPSO_V4_TAG_RBM_BLEN && cipso_v4_map_cat_rbm_valid(doi_def, &tag[4], tag_len - 4) < 0) { err_offset = opt_iter + 4; goto validate_return_locked; } } break; case CIPSO_V4_TAG_ENUM: if (tag_len < CIPSO_V4_TAG_ENUM_BLEN) { err_offset = opt_iter + 1; goto validate_return_locked; } if (cipso_v4_map_lvl_valid(doi_def, tag[3]) < 0) { err_offset = opt_iter + 3; goto validate_return_locked; } if (tag_len > CIPSO_V4_TAG_ENUM_BLEN && cipso_v4_map_cat_enum_valid(doi_def, &tag[4], tag_len - 4) < 0) { err_offset = opt_iter + 4; goto validate_return_locked; } break; case CIPSO_V4_TAG_RANGE: if (tag_len < CIPSO_V4_TAG_RNG_BLEN) { err_offset = opt_iter + 1; goto validate_return_locked; } if (cipso_v4_map_lvl_valid(doi_def, tag[3]) < 0) { err_offset = opt_iter + 3; goto validate_return_locked; } if (tag_len > CIPSO_V4_TAG_RNG_BLEN && cipso_v4_map_cat_rng_valid(doi_def, &tag[4], tag_len - 4) < 0) { err_offset = opt_iter + 4; goto validate_return_locked; } break; case CIPSO_V4_TAG_LOCAL: /* This is a non-standard tag that we only allow for * local connections, so if the incoming interface is * not the loopback device drop the packet. */ if (!(skb->dev->flags & IFF_LOOPBACK)) { err_offset = opt_iter; goto validate_return_locked; } if (tag_len != CIPSO_V4_TAG_LOC_BLEN) { err_offset = opt_iter + 1; goto validate_return_locked; } break; default: err_offset = opt_iter; goto validate_return_locked; } tag += tag_len; opt_iter += tag_len; } validate_return_locked: rcu_read_unlock(); validate_return: *option = opt + err_offset; return err_offset; } /** * cipso_v4_error - Send the correct response for a bad packet * @skb: the packet * @error: the error code * @gateway: CIPSO gateway flag * * Description: * Based on the error code given in @error, send an ICMP error message back to * the originating host. From the IETF draft ... * * "If the contents of the CIPSO [option] are valid but the security label is * outside of the configured host or port label range, the datagram is * discarded and an ICMP 'destination unreachable' (type 3) is generated and * returned. The code field of the ICMP is set to 'communication with * destination network administratively prohibited' (code 9) or to * 'communication with destination host administratively prohibited' * (code 10). The value of the code is dependent on whether the originator * of the ICMP message is acting as a CIPSO host or a CIPSO gateway. The * recipient of the ICMP message MUST be able to handle either value. The * same procedure is performed if a CIPSO [option] can not be added to an * IP packet because it is too large to fit in the IP options area." * * "If the error is triggered by receipt of an ICMP message, the message is * discarded and no response is permitted (consistent with general ICMP * processing rules)." * */ void cipso_v4_error(struct sk_buff *skb, int error, u32 gateway) { if (ip_hdr(skb)->protocol == IPPROTO_ICMP || error != -EACCES) return; if (gateway) icmp_send(skb, ICMP_DEST_UNREACH, ICMP_NET_ANO, 0); else icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_ANO, 0); } /** * cipso_v4_genopt - Generate a CIPSO option * @buf: the option buffer * @buf_len: the size of opt_buf * @doi_def: the CIPSO DOI to use * @secattr: the security attributes * * Description: * Generate a CIPSO option using the DOI definition and security attributes * passed to the function. Returns the length of the option on success and * negative values on failure. * */ static int cipso_v4_genopt(unsigned char *buf, u32 buf_len, const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr) { int ret_val; u32 iter; if (buf_len <= CIPSO_V4_HDR_LEN) return -ENOSPC; /* XXX - This code assumes only one tag per CIPSO option which isn't * really a good assumption to make but since we only support the MAC * tags right now it is a safe assumption. */ iter = 0; do { memset(buf, 0, buf_len); switch (doi_def->tags[iter]) { case CIPSO_V4_TAG_RBITMAP: ret_val = cipso_v4_gentag_rbm(doi_def, secattr, &buf[CIPSO_V4_HDR_LEN], buf_len - CIPSO_V4_HDR_LEN); break; case CIPSO_V4_TAG_ENUM: ret_val = cipso_v4_gentag_enum(doi_def, secattr, &buf[CIPSO_V4_HDR_LEN], buf_len - CIPSO_V4_HDR_LEN); break; case CIPSO_V4_TAG_RANGE: ret_val = cipso_v4_gentag_rng(doi_def, secattr, &buf[CIPSO_V4_HDR_LEN], buf_len - CIPSO_V4_HDR_LEN); break; case CIPSO_V4_TAG_LOCAL: ret_val = cipso_v4_gentag_loc(doi_def, secattr, &buf[CIPSO_V4_HDR_LEN], buf_len - CIPSO_V4_HDR_LEN); break; default: return -EPERM; } iter++; } while (ret_val < 0 && iter < CIPSO_V4_TAG_MAXCNT && doi_def->tags[iter] != CIPSO_V4_TAG_INVALID); if (ret_val < 0) return ret_val; cipso_v4_gentag_hdr(doi_def, buf, ret_val); return CIPSO_V4_HDR_LEN + ret_val; } static void opt_kfree_rcu(struct rcu_head *head) { kfree(container_of(head, struct ip_options_rcu, rcu)); } /** * cipso_v4_sock_setattr - Add a CIPSO option to a socket * @sk: the socket * @doi_def: the CIPSO DOI to use * @secattr: the specific security attributes of the socket * * Description: * Set the CIPSO option on the given socket using the DOI definition and * security attributes passed to the function. This function requires * exclusive access to @sk, which means it either needs to be in the * process of being created or locked. Returns zero on success and negative * values on failure. * */ int cipso_v4_sock_setattr(struct sock *sk, const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr) { int ret_val = -EPERM; unsigned char *buf = NULL; u32 buf_len; u32 opt_len; struct ip_options_rcu *old, *opt = NULL; struct inet_sock *sk_inet; struct inet_connection_sock *sk_conn; /* In the case of sock_create_lite(), the sock->sk field is not * defined yet but it is not a problem as the only users of these * "lite" PF_INET sockets are functions which do an accept() call * afterwards so we will label the socket as part of the accept(). */ if (sk == NULL) return 0; /* We allocate the maximum CIPSO option size here so we are probably * being a little wasteful, but it makes our life _much_ easier later * on and after all we are only talking about 40 bytes. */ buf_len = CIPSO_V4_OPT_LEN_MAX; buf = kmalloc(buf_len, GFP_ATOMIC); if (buf == NULL) { ret_val = -ENOMEM; goto socket_setattr_failure; } ret_val = cipso_v4_genopt(buf, buf_len, doi_def, secattr); if (ret_val < 0) goto socket_setattr_failure; buf_len = ret_val; /* We can't use ip_options_get() directly because it makes a call to * ip_options_get_alloc() which allocates memory with GFP_KERNEL and * we won't always have CAP_NET_RAW even though we _always_ want to * set the IPOPT_CIPSO option. */ opt_len = (buf_len + 3) & ~3; opt = kzalloc(sizeof(*opt) + opt_len, GFP_ATOMIC); if (opt == NULL) { ret_val = -ENOMEM; goto socket_setattr_failure; } memcpy(opt->opt.__data, buf, buf_len); opt->opt.optlen = opt_len; opt->opt.cipso = sizeof(struct iphdr); kfree(buf); buf = NULL; sk_inet = inet_sk(sk); old = rcu_dereference_protected(sk_inet->inet_opt, sock_owned_by_user(sk)); if (sk_inet->is_icsk) { sk_conn = inet_csk(sk); if (old) sk_conn->icsk_ext_hdr_len -= old->opt.optlen; sk_conn->icsk_ext_hdr_len += opt->opt.optlen; sk_conn->icsk_sync_mss(sk, sk_conn->icsk_pmtu_cookie); } rcu_assign_pointer(sk_inet->inet_opt, opt); if (old) call_rcu(&old->rcu, opt_kfree_rcu); return 0; socket_setattr_failure: kfree(buf); kfree(opt); return ret_val; } /** * cipso_v4_req_setattr - Add a CIPSO option to a connection request socket * @req: the connection request socket * @doi_def: the CIPSO DOI to use * @secattr: the specific security attributes of the socket * * Description: * Set the CIPSO option on the given socket using the DOI definition and * security attributes passed to the function. Returns zero on success and * negative values on failure. * */ int cipso_v4_req_setattr(struct request_sock *req, const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr) { int ret_val = -EPERM; unsigned char *buf = NULL; u32 buf_len; u32 opt_len; struct ip_options_rcu *opt = NULL; struct inet_request_sock *req_inet; /* We allocate the maximum CIPSO option size here so we are probably * being a little wasteful, but it makes our life _much_ easier later * on and after all we are only talking about 40 bytes. */ buf_len = CIPSO_V4_OPT_LEN_MAX; buf = kmalloc(buf_len, GFP_ATOMIC); if (buf == NULL) { ret_val = -ENOMEM; goto req_setattr_failure; } ret_val = cipso_v4_genopt(buf, buf_len, doi_def, secattr); if (ret_val < 0) goto req_setattr_failure; buf_len = ret_val; /* We can't use ip_options_get() directly because it makes a call to * ip_options_get_alloc() which allocates memory with GFP_KERNEL and * we won't always have CAP_NET_RAW even though we _always_ want to * set the IPOPT_CIPSO option. */ opt_len = (buf_len + 3) & ~3; opt = kzalloc(sizeof(*opt) + opt_len, GFP_ATOMIC); if (opt == NULL) { ret_val = -ENOMEM; goto req_setattr_failure; } memcpy(opt->opt.__data, buf, buf_len); opt->opt.optlen = opt_len; opt->opt.cipso = sizeof(struct iphdr); kfree(buf); buf = NULL; req_inet = inet_rsk(req); opt = xchg(&req_inet->opt, opt); if (opt) call_rcu(&opt->rcu, opt_kfree_rcu); return 0; req_setattr_failure: kfree(buf); kfree(opt); return ret_val; } /** * cipso_v4_delopt - Delete the CIPSO option from a set of IP options * @opt_ptr: IP option pointer * * Description: * Deletes the CIPSO IP option from a set of IP options and makes the necessary * adjustments to the IP option structure. Returns zero on success, negative * values on failure. * */ static int cipso_v4_delopt(struct ip_options_rcu **opt_ptr) { int hdr_delta = 0; struct ip_options_rcu *opt = *opt_ptr; if (opt->opt.srr || opt->opt.rr || opt->opt.ts || opt->opt.router_alert) { u8 cipso_len; u8 cipso_off; unsigned char *cipso_ptr; int iter; int optlen_new; cipso_off = opt->opt.cipso - sizeof(struct iphdr); cipso_ptr = &opt->opt.__data[cipso_off]; cipso_len = cipso_ptr[1]; if (opt->opt.srr > opt->opt.cipso) opt->opt.srr -= cipso_len; if (opt->opt.rr > opt->opt.cipso) opt->opt.rr -= cipso_len; if (opt->opt.ts > opt->opt.cipso) opt->opt.ts -= cipso_len; if (opt->opt.router_alert > opt->opt.cipso) opt->opt.router_alert -= cipso_len; opt->opt.cipso = 0; memmove(cipso_ptr, cipso_ptr + cipso_len, opt->opt.optlen - cipso_off - cipso_len); /* determining the new total option length is tricky because of * the padding necessary, the only thing i can think to do at * this point is walk the options one-by-one, skipping the * padding at the end to determine the actual option size and * from there we can determine the new total option length */ iter = 0; optlen_new = 0; while (iter < opt->opt.optlen) if (opt->opt.__data[iter] != IPOPT_NOP) { iter += opt->opt.__data[iter + 1]; optlen_new = iter; } else iter++; hdr_delta = opt->opt.optlen; opt->opt.optlen = (optlen_new + 3) & ~3; hdr_delta -= opt->opt.optlen; } else { /* only the cipso option was present on the socket so we can * remove the entire option struct */ *opt_ptr = NULL; hdr_delta = opt->opt.optlen; call_rcu(&opt->rcu, opt_kfree_rcu); } return hdr_delta; } /** * cipso_v4_sock_delattr - Delete the CIPSO option from a socket * @sk: the socket * * Description: * Removes the CIPSO option from a socket, if present. * */ void cipso_v4_sock_delattr(struct sock *sk) { int hdr_delta; struct ip_options_rcu *opt; struct inet_sock *sk_inet; sk_inet = inet_sk(sk); opt = rcu_dereference_protected(sk_inet->inet_opt, 1); if (opt == NULL || opt->opt.cipso == 0) return; hdr_delta = cipso_v4_delopt(&sk_inet->inet_opt); if (sk_inet->is_icsk && hdr_delta > 0) { struct inet_connection_sock *sk_conn = inet_csk(sk); sk_conn->icsk_ext_hdr_len -= hdr_delta; sk_conn->icsk_sync_mss(sk, sk_conn->icsk_pmtu_cookie); } } /** * cipso_v4_req_delattr - Delete the CIPSO option from a request socket * @reg: the request socket * * Description: * Removes the CIPSO option from a request socket, if present. * */ void cipso_v4_req_delattr(struct request_sock *req) { struct ip_options_rcu *opt; struct inet_request_sock *req_inet; req_inet = inet_rsk(req); opt = req_inet->opt; if (opt == NULL || opt->opt.cipso == 0) return; cipso_v4_delopt(&req_inet->opt); } /** * cipso_v4_getattr - Helper function for the cipso_v4_*_getattr functions * @cipso: the CIPSO v4 option * @secattr: the security attributes * * Description: * Inspect @cipso and return the security attributes in @secattr. Returns zero * on success and negative values on failure. * */ static int cipso_v4_getattr(const unsigned char *cipso, struct netlbl_lsm_secattr *secattr) { int ret_val = -ENOMSG; u32 doi; struct cipso_v4_doi *doi_def; if (cipso_v4_cache_check(cipso, cipso[1], secattr) == 0) return 0; doi = get_unaligned_be32(&cipso[2]); rcu_read_lock(); doi_def = cipso_v4_doi_search(doi); if (doi_def == NULL) goto getattr_return; /* XXX - This code assumes only one tag per CIPSO option which isn't * really a good assumption to make but since we only support the MAC * tags right now it is a safe assumption. */ switch (cipso[6]) { case CIPSO_V4_TAG_RBITMAP: ret_val = cipso_v4_parsetag_rbm(doi_def, &cipso[6], secattr); break; case CIPSO_V4_TAG_ENUM: ret_val = cipso_v4_parsetag_enum(doi_def, &cipso[6], secattr); break; case CIPSO_V4_TAG_RANGE: ret_val = cipso_v4_parsetag_rng(doi_def, &cipso[6], secattr); break; case CIPSO_V4_TAG_LOCAL: ret_val = cipso_v4_parsetag_loc(doi_def, &cipso[6], secattr); break; } if (ret_val == 0) secattr->type = NETLBL_NLTYPE_CIPSOV4; getattr_return: rcu_read_unlock(); return ret_val; } /** * cipso_v4_sock_getattr - Get the security attributes from a sock * @sk: the sock * @secattr: the security attributes * * Description: * Query @sk to see if there is a CIPSO option attached to the sock and if * there is return the CIPSO security attributes in @secattr. This function * requires that @sk be locked, or privately held, but it does not do any * locking itself. Returns zero on success and negative values on failure. * */ int cipso_v4_sock_getattr(struct sock *sk, struct netlbl_lsm_secattr *secattr) { struct ip_options_rcu *opt; int res = -ENOMSG; rcu_read_lock(); opt = rcu_dereference(inet_sk(sk)->inet_opt); if (opt && opt->opt.cipso) res = cipso_v4_getattr(opt->opt.__data + opt->opt.cipso - sizeof(struct iphdr), secattr); rcu_read_unlock(); return res; } /** * cipso_v4_skbuff_setattr - Set the CIPSO option on a packet * @skb: the packet * @secattr: the security attributes * * Description: * Set the CIPSO option on the given packet based on the security attributes. * Returns a pointer to the IP header on success and NULL on failure. * */ int cipso_v4_skbuff_setattr(struct sk_buff *skb, const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr) { int ret_val; struct iphdr *iph; struct ip_options *opt = &IPCB(skb)->opt; unsigned char buf[CIPSO_V4_OPT_LEN_MAX]; u32 buf_len = CIPSO_V4_OPT_LEN_MAX; u32 opt_len; int len_delta; ret_val = cipso_v4_genopt(buf, buf_len, doi_def, secattr); if (ret_val < 0) return ret_val; buf_len = ret_val; opt_len = (buf_len + 3) & ~3; /* we overwrite any existing options to ensure that we have enough * room for the CIPSO option, the reason is that we _need_ to guarantee * that the security label is applied to the packet - we do the same * thing when using the socket options and it hasn't caused a problem, * if we need to we can always revisit this choice later */ len_delta = opt_len - opt->optlen; /* if we don't ensure enough headroom we could panic on the skb_push() * call below so make sure we have enough, we are also "mangling" the * packet so we should probably do a copy-on-write call anyway */ ret_val = skb_cow(skb, skb_headroom(skb) + len_delta); if (ret_val < 0) return ret_val; if (len_delta > 0) { /* we assume that the header + opt->optlen have already been * "pushed" in ip_options_build() or similar */ iph = ip_hdr(skb); skb_push(skb, len_delta); memmove((char *)iph - len_delta, iph, iph->ihl << 2); skb_reset_network_header(skb); iph = ip_hdr(skb); } else if (len_delta < 0) { iph = ip_hdr(skb); memset(iph + 1, IPOPT_NOP, opt->optlen); } else iph = ip_hdr(skb); if (opt->optlen > 0) memset(opt, 0, sizeof(*opt)); opt->optlen = opt_len; opt->cipso = sizeof(struct iphdr); opt->is_changed = 1; /* we have to do the following because we are being called from a * netfilter hook which means the packet already has had the header * fields populated and the checksum calculated - yes this means we * are doing more work than needed but we do it to keep the core * stack clean and tidy */ memcpy(iph + 1, buf, buf_len); if (opt_len > buf_len) memset((char *)(iph + 1) + buf_len, 0, opt_len - buf_len); if (len_delta != 0) { iph->ihl = 5 + (opt_len >> 2); iph->tot_len = htons(skb->len); } ip_send_check(iph); return 0; } /** * cipso_v4_skbuff_delattr - Delete any CIPSO options from a packet * @skb: the packet * * Description: * Removes any and all CIPSO options from the given packet. Returns zero on * success, negative values on failure. * */ int cipso_v4_skbuff_delattr(struct sk_buff *skb) { int ret_val; struct iphdr *iph; struct ip_options *opt = &IPCB(skb)->opt; unsigned char *cipso_ptr; if (opt->cipso == 0) return 0; /* since we are changing the packet we should make a copy */ ret_val = skb_cow(skb, skb_headroom(skb)); if (ret_val < 0) return ret_val; /* the easiest thing to do is just replace the cipso option with noop * options since we don't change the size of the packet, although we * still need to recalculate the checksum */ iph = ip_hdr(skb); cipso_ptr = (unsigned char *)iph + opt->cipso; memset(cipso_ptr, IPOPT_NOOP, cipso_ptr[1]); opt->cipso = 0; opt->is_changed = 1; ip_send_check(iph); return 0; } /** * cipso_v4_skbuff_getattr - Get the security attributes from the CIPSO option * @skb: the packet * @secattr: the security attributes * * Description: * Parse the given packet's CIPSO option and return the security attributes. * Returns zero on success and negative values on failure. * */ int cipso_v4_skbuff_getattr(const struct sk_buff *skb, struct netlbl_lsm_secattr *secattr) { return cipso_v4_getattr(CIPSO_V4_OPTPTR(skb), secattr); } /* * Setup Functions */ /** * cipso_v4_init - Initialize the CIPSO module * * Description: * Initialize the CIPSO module and prepare it for use. Returns zero on success * and negative values on failure. * */ static int __init cipso_v4_init(void) { int ret_val; ret_val = cipso_v4_cache_init(); if (ret_val != 0) panic("Failed to initialize the CIPSO/IPv4 cache (%d)\n", ret_val); return 0; } subsys_initcall(cipso_v4_init);
static int cipso_v4_delopt(struct ip_options **opt_ptr) { int hdr_delta = 0; struct ip_options *opt = *opt_ptr; if (opt->srr || opt->rr || opt->ts || opt->router_alert) { u8 cipso_len; u8 cipso_off; unsigned char *cipso_ptr; int iter; int optlen_new; cipso_off = opt->cipso - sizeof(struct iphdr); cipso_ptr = &opt->__data[cipso_off]; cipso_len = cipso_ptr[1]; if (opt->srr > opt->cipso) opt->srr -= cipso_len; if (opt->rr > opt->cipso) opt->rr -= cipso_len; if (opt->ts > opt->cipso) opt->ts -= cipso_len; if (opt->router_alert > opt->cipso) opt->router_alert -= cipso_len; opt->cipso = 0; memmove(cipso_ptr, cipso_ptr + cipso_len, opt->optlen - cipso_off - cipso_len); /* determining the new total option length is tricky because of * the padding necessary, the only thing i can think to do at * this point is walk the options one-by-one, skipping the * padding at the end to determine the actual option size and * from there we can determine the new total option length */ iter = 0; optlen_new = 0; while (iter < opt->optlen) if (opt->__data[iter] != IPOPT_NOP) { iter += opt->__data[iter + 1]; optlen_new = iter; } else iter++; hdr_delta = opt->optlen; opt->optlen = (optlen_new + 3) & ~3; hdr_delta -= opt->optlen; } else { /* only the cipso option was present on the socket so we can * remove the entire option struct */ *opt_ptr = NULL; hdr_delta = opt->optlen; kfree(opt); } return hdr_delta; }
static int cipso_v4_delopt(struct ip_options_rcu **opt_ptr) { int hdr_delta = 0; struct ip_options_rcu *opt = *opt_ptr; if (opt->opt.srr || opt->opt.rr || opt->opt.ts || opt->opt.router_alert) { u8 cipso_len; u8 cipso_off; unsigned char *cipso_ptr; int iter; int optlen_new; cipso_off = opt->opt.cipso - sizeof(struct iphdr); cipso_ptr = &opt->opt.__data[cipso_off]; cipso_len = cipso_ptr[1]; if (opt->opt.srr > opt->opt.cipso) opt->opt.srr -= cipso_len; if (opt->opt.rr > opt->opt.cipso) opt->opt.rr -= cipso_len; if (opt->opt.ts > opt->opt.cipso) opt->opt.ts -= cipso_len; if (opt->opt.router_alert > opt->opt.cipso) opt->opt.router_alert -= cipso_len; opt->opt.cipso = 0; memmove(cipso_ptr, cipso_ptr + cipso_len, opt->opt.optlen - cipso_off - cipso_len); /* determining the new total option length is tricky because of * the padding necessary, the only thing i can think to do at * this point is walk the options one-by-one, skipping the * padding at the end to determine the actual option size and * from there we can determine the new total option length */ iter = 0; optlen_new = 0; while (iter < opt->opt.optlen) if (opt->opt.__data[iter] != IPOPT_NOP) { iter += opt->opt.__data[iter + 1]; optlen_new = iter; } else iter++; hdr_delta = opt->opt.optlen; opt->opt.optlen = (optlen_new + 3) & ~3; hdr_delta -= opt->opt.optlen; } else { /* only the cipso option was present on the socket so we can * remove the entire option struct */ *opt_ptr = NULL; hdr_delta = opt->opt.optlen; call_rcu(&opt->rcu, opt_kfree_rcu); } return hdr_delta; }
{'added': [(1860, 'static void opt_kfree_rcu(struct rcu_head *head)'), (1861, '{'), (1862, '\tkfree(container_of(head, struct ip_options_rcu, rcu));'), (1863, '}'), (1864, ''), (1887, '\tstruct ip_options_rcu *old, *opt = NULL;'), (1923, '\tmemcpy(opt->opt.__data, buf, buf_len);'), (1924, '\topt->opt.optlen = opt_len;'), (1925, '\topt->opt.cipso = sizeof(struct iphdr);'), (1930, ''), (1931, '\told = rcu_dereference_protected(sk_inet->inet_opt, sock_owned_by_user(sk));'), (1934, '\t\tif (old)'), (1935, '\t\t\tsk_conn->icsk_ext_hdr_len -= old->opt.optlen;'), (1936, '\t\tsk_conn->icsk_ext_hdr_len += opt->opt.optlen;'), (1939, '\trcu_assign_pointer(sk_inet->inet_opt, opt);'), (1940, '\tif (old)'), (1941, '\t\tcall_rcu(&old->rcu, opt_kfree_rcu);'), (1971, '\tstruct ip_options_rcu *opt = NULL;'), (1999, '\tmemcpy(opt->opt.__data, buf, buf_len);'), (2000, '\topt->opt.optlen = opt_len;'), (2001, '\topt->opt.cipso = sizeof(struct iphdr);'), (2007, '\tif (opt)'), (2008, '\t\tcall_rcu(&opt->rcu, opt_kfree_rcu);'), (2028, 'static int cipso_v4_delopt(struct ip_options_rcu **opt_ptr)'), (2031, '\tstruct ip_options_rcu *opt = *opt_ptr;'), (2033, '\tif (opt->opt.srr || opt->opt.rr || opt->opt.ts || opt->opt.router_alert) {'), (2040, '\t\tcipso_off = opt->opt.cipso - sizeof(struct iphdr);'), (2041, '\t\tcipso_ptr = &opt->opt.__data[cipso_off];'), (2044, '\t\tif (opt->opt.srr > opt->opt.cipso)'), (2045, '\t\t\topt->opt.srr -= cipso_len;'), (2046, '\t\tif (opt->opt.rr > opt->opt.cipso)'), (2047, '\t\t\topt->opt.rr -= cipso_len;'), (2048, '\t\tif (opt->opt.ts > opt->opt.cipso)'), (2049, '\t\t\topt->opt.ts -= cipso_len;'), (2050, '\t\tif (opt->opt.router_alert > opt->opt.cipso)'), (2051, '\t\t\topt->opt.router_alert -= cipso_len;'), (2052, '\t\topt->opt.cipso = 0;'), (2055, '\t\t\topt->opt.optlen - cipso_off - cipso_len);'), (2064, '\t\twhile (iter < opt->opt.optlen)'), (2065, '\t\t\tif (opt->opt.__data[iter] != IPOPT_NOP) {'), (2066, '\t\t\t\titer += opt->opt.__data[iter + 1];'), (2070, '\t\thdr_delta = opt->opt.optlen;'), (2071, '\t\topt->opt.optlen = (optlen_new + 3) & ~3;'), (2072, '\t\thdr_delta -= opt->opt.optlen;'), (2077, '\t\thdr_delta = opt->opt.optlen;'), (2078, '\t\tcall_rcu(&opt->rcu, opt_kfree_rcu);'), (2095, '\tstruct ip_options_rcu *opt;'), (2099, '\topt = rcu_dereference_protected(sk_inet->inet_opt, 1);'), (2100, '\tif (opt == NULL || opt->opt.cipso == 0)'), (2103, '\thdr_delta = cipso_v4_delopt(&sk_inet->inet_opt);'), (2121, '\tstruct ip_options_rcu *opt;'), (2126, '\tif (opt == NULL || opt->opt.cipso == 0)'), (2196, '\tstruct ip_options_rcu *opt;'), (2197, '\tint res = -ENOMSG;'), (2199, '\trcu_read_lock();'), (2200, '\topt = rcu_dereference(inet_sk(sk)->inet_opt);'), (2201, '\tif (opt && opt->opt.cipso)'), (2202, '\t\tres = cipso_v4_getattr(opt->opt.__data +'), (2203, '\t\t\t\t\t\topt->opt.cipso -'), (2204, '\t\t\t\t\t\tsizeof(struct iphdr),'), (2205, '\t\t\t\t secattr);'), (2206, '\trcu_read_unlock();'), (2207, '\treturn res;')], 'deleted': [(1882, '\tstruct ip_options *opt = NULL;'), (1918, '\tmemcpy(opt->__data, buf, buf_len);'), (1919, '\topt->optlen = opt_len;'), (1920, '\topt->cipso = sizeof(struct iphdr);'), (1927, '\t\tif (sk_inet->opt)'), (1928, '\t\t\tsk_conn->icsk_ext_hdr_len -= sk_inet->opt->optlen;'), (1929, '\t\tsk_conn->icsk_ext_hdr_len += opt->optlen;'), (1932, '\topt = xchg(&sk_inet->opt, opt);'), (1933, '\tkfree(opt);'), (1963, '\tstruct ip_options *opt = NULL;'), (1991, '\tmemcpy(opt->__data, buf, buf_len);'), (1992, '\topt->optlen = opt_len;'), (1993, '\topt->cipso = sizeof(struct iphdr);'), (1999, '\tkfree(opt);'), (2019, 'static int cipso_v4_delopt(struct ip_options **opt_ptr)'), (2022, '\tstruct ip_options *opt = *opt_ptr;'), (2024, '\tif (opt->srr || opt->rr || opt->ts || opt->router_alert) {'), (2031, '\t\tcipso_off = opt->cipso - sizeof(struct iphdr);'), (2032, '\t\tcipso_ptr = &opt->__data[cipso_off];'), (2035, '\t\tif (opt->srr > opt->cipso)'), (2036, '\t\t\topt->srr -= cipso_len;'), (2037, '\t\tif (opt->rr > opt->cipso)'), (2038, '\t\t\topt->rr -= cipso_len;'), (2039, '\t\tif (opt->ts > opt->cipso)'), (2040, '\t\t\topt->ts -= cipso_len;'), (2041, '\t\tif (opt->router_alert > opt->cipso)'), (2042, '\t\t\topt->router_alert -= cipso_len;'), (2043, '\t\topt->cipso = 0;'), (2046, '\t\t\topt->optlen - cipso_off - cipso_len);'), (2055, '\t\twhile (iter < opt->optlen)'), (2056, '\t\t\tif (opt->__data[iter] != IPOPT_NOP) {'), (2057, '\t\t\t\titer += opt->__data[iter + 1];'), (2061, '\t\thdr_delta = opt->optlen;'), (2062, '\t\topt->optlen = (optlen_new + 3) & ~3;'), (2063, '\t\thdr_delta -= opt->optlen;'), (2068, '\t\thdr_delta = opt->optlen;'), (2069, '\t\tkfree(opt);'), (2086, '\tstruct ip_options *opt;'), (2090, '\topt = sk_inet->opt;'), (2091, '\tif (opt == NULL || opt->cipso == 0)'), (2094, '\thdr_delta = cipso_v4_delopt(&sk_inet->opt);'), (2112, '\tstruct ip_options *opt;'), (2117, '\tif (opt == NULL || opt->cipso == 0)'), (2187, '\tstruct ip_options *opt;'), (2189, '\topt = inet_sk(sk)->opt;'), (2190, '\tif (opt == NULL || opt->cipso == 0)'), (2191, '\t\treturn -ENOMSG;'), (2192, ''), (2193, '\treturn cipso_v4_getattr(opt->__data + opt->cipso - sizeof(struct iphdr),'), (2194, '\t\t\t\tsecattr);')]}
63
50
1,360
7,485
https://github.com/torvalds/linux
CVE-2012-3552
['CWE-362']
cipso_ipv4.c
cipso_v4_req_delattr
/* * CIPSO - Commercial IP Security Option * * This is an implementation of the CIPSO 2.2 protocol as specified in * draft-ietf-cipso-ipsecurity-01.txt with additional tag types as found in * FIPS-188. While CIPSO never became a full IETF RFC standard many vendors * have chosen to adopt the protocol and over the years it has become a * de-facto standard for labeled networking. * * The CIPSO draft specification can be found in the kernel's Documentation * directory as well as the following URL: * http://tools.ietf.org/id/draft-ietf-cipso-ipsecurity-01.txt * The FIPS-188 specification can be found at the following URL: * http://www.itl.nist.gov/fipspubs/fip188.htm * * Author: Paul Moore <paul.moore@hp.com> * */ /* * (c) Copyright Hewlett-Packard Development Company, L.P., 2006, 2008 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/init.h> #include <linux/types.h> #include <linux/rcupdate.h> #include <linux/list.h> #include <linux/spinlock.h> #include <linux/string.h> #include <linux/jhash.h> #include <linux/audit.h> #include <linux/slab.h> #include <net/ip.h> #include <net/icmp.h> #include <net/tcp.h> #include <net/netlabel.h> #include <net/cipso_ipv4.h> #include <asm/atomic.h> #include <asm/bug.h> #include <asm/unaligned.h> /* List of available DOI definitions */ /* XXX - This currently assumes a minimal number of different DOIs in use, * if in practice there are a lot of different DOIs this list should * probably be turned into a hash table or something similar so we * can do quick lookups. */ static DEFINE_SPINLOCK(cipso_v4_doi_list_lock); static LIST_HEAD(cipso_v4_doi_list); /* Label mapping cache */ int cipso_v4_cache_enabled = 1; int cipso_v4_cache_bucketsize = 10; #define CIPSO_V4_CACHE_BUCKETBITS 7 #define CIPSO_V4_CACHE_BUCKETS (1 << CIPSO_V4_CACHE_BUCKETBITS) #define CIPSO_V4_CACHE_REORDERLIMIT 10 struct cipso_v4_map_cache_bkt { spinlock_t lock; u32 size; struct list_head list; }; struct cipso_v4_map_cache_entry { u32 hash; unsigned char *key; size_t key_len; struct netlbl_lsm_cache *lsm_data; u32 activity; struct list_head list; }; static struct cipso_v4_map_cache_bkt *cipso_v4_cache = NULL; /* Restricted bitmap (tag #1) flags */ int cipso_v4_rbm_optfmt = 0; int cipso_v4_rbm_strictvalid = 1; /* * Protocol Constants */ /* Maximum size of the CIPSO IP option, derived from the fact that the maximum * IPv4 header size is 60 bytes and the base IPv4 header is 20 bytes long. */ #define CIPSO_V4_OPT_LEN_MAX 40 /* Length of the base CIPSO option, this includes the option type (1 byte), the * option length (1 byte), and the DOI (4 bytes). */ #define CIPSO_V4_HDR_LEN 6 /* Base length of the restrictive category bitmap tag (tag #1). */ #define CIPSO_V4_TAG_RBM_BLEN 4 /* Base length of the enumerated category tag (tag #2). */ #define CIPSO_V4_TAG_ENUM_BLEN 4 /* Base length of the ranged categories bitmap tag (tag #5). */ #define CIPSO_V4_TAG_RNG_BLEN 4 /* The maximum number of category ranges permitted in the ranged category tag * (tag #5). You may note that the IETF draft states that the maximum number * of category ranges is 7, but if the low end of the last category range is * zero then it is possible to fit 8 category ranges because the zero should * be omitted. */ #define CIPSO_V4_TAG_RNG_CAT_MAX 8 /* Base length of the local tag (non-standard tag). * Tag definition (may change between kernel versions) * * 0 8 16 24 32 * +----------+----------+----------+----------+ * | 10000000 | 00000110 | 32-bit secid value | * +----------+----------+----------+----------+ * | in (host byte order)| * +----------+----------+ * */ #define CIPSO_V4_TAG_LOC_BLEN 6 /* * Helper Functions */ /** * cipso_v4_bitmap_walk - Walk a bitmap looking for a bit * @bitmap: the bitmap * @bitmap_len: length in bits * @offset: starting offset * @state: if non-zero, look for a set (1) bit else look for a cleared (0) bit * * Description: * Starting at @offset, walk the bitmap from left to right until either the * desired bit is found or we reach the end. Return the bit offset, -1 if * not found, or -2 if error. */ static int cipso_v4_bitmap_walk(const unsigned char *bitmap, u32 bitmap_len, u32 offset, u8 state) { u32 bit_spot; u32 byte_offset; unsigned char bitmask; unsigned char byte; /* gcc always rounds to zero when doing integer division */ byte_offset = offset / 8; byte = bitmap[byte_offset]; bit_spot = offset; bitmask = 0x80 >> (offset % 8); while (bit_spot < bitmap_len) { if ((state && (byte & bitmask) == bitmask) || (state == 0 && (byte & bitmask) == 0)) return bit_spot; bit_spot++; bitmask >>= 1; if (bitmask == 0) { byte = bitmap[++byte_offset]; bitmask = 0x80; } } return -1; } /** * cipso_v4_bitmap_setbit - Sets a single bit in a bitmap * @bitmap: the bitmap * @bit: the bit * @state: if non-zero, set the bit (1) else clear the bit (0) * * Description: * Set a single bit in the bitmask. Returns zero on success, negative values * on error. */ static void cipso_v4_bitmap_setbit(unsigned char *bitmap, u32 bit, u8 state) { u32 byte_spot; u8 bitmask; /* gcc always rounds to zero when doing integer division */ byte_spot = bit / 8; bitmask = 0x80 >> (bit % 8); if (state) bitmap[byte_spot] |= bitmask; else bitmap[byte_spot] &= ~bitmask; } /** * cipso_v4_cache_entry_free - Frees a cache entry * @entry: the entry to free * * Description: * This function frees the memory associated with a cache entry including the * LSM cache data if there are no longer any users, i.e. reference count == 0. * */ static void cipso_v4_cache_entry_free(struct cipso_v4_map_cache_entry *entry) { if (entry->lsm_data) netlbl_secattr_cache_free(entry->lsm_data); kfree(entry->key); kfree(entry); } /** * cipso_v4_map_cache_hash - Hashing function for the CIPSO cache * @key: the hash key * @key_len: the length of the key in bytes * * Description: * The CIPSO tag hashing function. Returns a 32-bit hash value. * */ static u32 cipso_v4_map_cache_hash(const unsigned char *key, u32 key_len) { return jhash(key, key_len, 0); } /* * Label Mapping Cache Functions */ /** * cipso_v4_cache_init - Initialize the CIPSO cache * * Description: * Initializes the CIPSO label mapping cache, this function should be called * before any of the other functions defined in this file. Returns zero on * success, negative values on error. * */ static int cipso_v4_cache_init(void) { u32 iter; cipso_v4_cache = kcalloc(CIPSO_V4_CACHE_BUCKETS, sizeof(struct cipso_v4_map_cache_bkt), GFP_KERNEL); if (cipso_v4_cache == NULL) return -ENOMEM; for (iter = 0; iter < CIPSO_V4_CACHE_BUCKETS; iter++) { spin_lock_init(&cipso_v4_cache[iter].lock); cipso_v4_cache[iter].size = 0; INIT_LIST_HEAD(&cipso_v4_cache[iter].list); } return 0; } /** * cipso_v4_cache_invalidate - Invalidates the current CIPSO cache * * Description: * Invalidates and frees any entries in the CIPSO cache. Returns zero on * success and negative values on failure. * */ void cipso_v4_cache_invalidate(void) { struct cipso_v4_map_cache_entry *entry, *tmp_entry; u32 iter; for (iter = 0; iter < CIPSO_V4_CACHE_BUCKETS; iter++) { spin_lock_bh(&cipso_v4_cache[iter].lock); list_for_each_entry_safe(entry, tmp_entry, &cipso_v4_cache[iter].list, list) { list_del(&entry->list); cipso_v4_cache_entry_free(entry); } cipso_v4_cache[iter].size = 0; spin_unlock_bh(&cipso_v4_cache[iter].lock); } } /** * cipso_v4_cache_check - Check the CIPSO cache for a label mapping * @key: the buffer to check * @key_len: buffer length in bytes * @secattr: the security attribute struct to use * * Description: * This function checks the cache to see if a label mapping already exists for * the given key. If there is a match then the cache is adjusted and the * @secattr struct is populated with the correct LSM security attributes. The * cache is adjusted in the following manner if the entry is not already the * first in the cache bucket: * * 1. The cache entry's activity counter is incremented * 2. The previous (higher ranking) entry's activity counter is decremented * 3. If the difference between the two activity counters is geater than * CIPSO_V4_CACHE_REORDERLIMIT the two entries are swapped * * Returns zero on success, -ENOENT for a cache miss, and other negative values * on error. * */ static int cipso_v4_cache_check(const unsigned char *key, u32 key_len, struct netlbl_lsm_secattr *secattr) { u32 bkt; struct cipso_v4_map_cache_entry *entry; struct cipso_v4_map_cache_entry *prev_entry = NULL; u32 hash; if (!cipso_v4_cache_enabled) return -ENOENT; hash = cipso_v4_map_cache_hash(key, key_len); bkt = hash & (CIPSO_V4_CACHE_BUCKETS - 1); spin_lock_bh(&cipso_v4_cache[bkt].lock); list_for_each_entry(entry, &cipso_v4_cache[bkt].list, list) { if (entry->hash == hash && entry->key_len == key_len && memcmp(entry->key, key, key_len) == 0) { entry->activity += 1; atomic_inc(&entry->lsm_data->refcount); secattr->cache = entry->lsm_data; secattr->flags |= NETLBL_SECATTR_CACHE; secattr->type = NETLBL_NLTYPE_CIPSOV4; if (prev_entry == NULL) { spin_unlock_bh(&cipso_v4_cache[bkt].lock); return 0; } if (prev_entry->activity > 0) prev_entry->activity -= 1; if (entry->activity > prev_entry->activity && entry->activity - prev_entry->activity > CIPSO_V4_CACHE_REORDERLIMIT) { __list_del(entry->list.prev, entry->list.next); __list_add(&entry->list, prev_entry->list.prev, &prev_entry->list); } spin_unlock_bh(&cipso_v4_cache[bkt].lock); return 0; } prev_entry = entry; } spin_unlock_bh(&cipso_v4_cache[bkt].lock); return -ENOENT; } /** * cipso_v4_cache_add - Add an entry to the CIPSO cache * @skb: the packet * @secattr: the packet's security attributes * * Description: * Add a new entry into the CIPSO label mapping cache. Add the new entry to * head of the cache bucket's list, if the cache bucket is out of room remove * the last entry in the list first. It is important to note that there is * currently no checking for duplicate keys. Returns zero on success, * negative values on failure. * */ int cipso_v4_cache_add(const struct sk_buff *skb, const struct netlbl_lsm_secattr *secattr) { int ret_val = -EPERM; u32 bkt; struct cipso_v4_map_cache_entry *entry = NULL; struct cipso_v4_map_cache_entry *old_entry = NULL; unsigned char *cipso_ptr; u32 cipso_ptr_len; if (!cipso_v4_cache_enabled || cipso_v4_cache_bucketsize <= 0) return 0; cipso_ptr = CIPSO_V4_OPTPTR(skb); cipso_ptr_len = cipso_ptr[1]; entry = kzalloc(sizeof(*entry), GFP_ATOMIC); if (entry == NULL) return -ENOMEM; entry->key = kmemdup(cipso_ptr, cipso_ptr_len, GFP_ATOMIC); if (entry->key == NULL) { ret_val = -ENOMEM; goto cache_add_failure; } entry->key_len = cipso_ptr_len; entry->hash = cipso_v4_map_cache_hash(cipso_ptr, cipso_ptr_len); atomic_inc(&secattr->cache->refcount); entry->lsm_data = secattr->cache; bkt = entry->hash & (CIPSO_V4_CACHE_BUCKETS - 1); spin_lock_bh(&cipso_v4_cache[bkt].lock); if (cipso_v4_cache[bkt].size < cipso_v4_cache_bucketsize) { list_add(&entry->list, &cipso_v4_cache[bkt].list); cipso_v4_cache[bkt].size += 1; } else { old_entry = list_entry(cipso_v4_cache[bkt].list.prev, struct cipso_v4_map_cache_entry, list); list_del(&old_entry->list); list_add(&entry->list, &cipso_v4_cache[bkt].list); cipso_v4_cache_entry_free(old_entry); } spin_unlock_bh(&cipso_v4_cache[bkt].lock); return 0; cache_add_failure: if (entry) cipso_v4_cache_entry_free(entry); return ret_val; } /* * DOI List Functions */ /** * cipso_v4_doi_search - Searches for a DOI definition * @doi: the DOI to search for * * Description: * Search the DOI definition list for a DOI definition with a DOI value that * matches @doi. The caller is responsible for calling rcu_read_[un]lock(). * Returns a pointer to the DOI definition on success and NULL on failure. */ static struct cipso_v4_doi *cipso_v4_doi_search(u32 doi) { struct cipso_v4_doi *iter; list_for_each_entry_rcu(iter, &cipso_v4_doi_list, list) if (iter->doi == doi && atomic_read(&iter->refcount)) return iter; return NULL; } /** * cipso_v4_doi_add - Add a new DOI to the CIPSO protocol engine * @doi_def: the DOI structure * @audit_info: NetLabel audit information * * Description: * The caller defines a new DOI for use by the CIPSO engine and calls this * function to add it to the list of acceptable domains. The caller must * ensure that the mapping table specified in @doi_def->map meets all of the * requirements of the mapping type (see cipso_ipv4.h for details). Returns * zero on success and non-zero on failure. * */ int cipso_v4_doi_add(struct cipso_v4_doi *doi_def, struct netlbl_audit *audit_info) { int ret_val = -EINVAL; u32 iter; u32 doi; u32 doi_type; struct audit_buffer *audit_buf; doi = doi_def->doi; doi_type = doi_def->type; if (doi_def == NULL || doi_def->doi == CIPSO_V4_DOI_UNKNOWN) goto doi_add_return; for (iter = 0; iter < CIPSO_V4_TAG_MAXCNT; iter++) { switch (doi_def->tags[iter]) { case CIPSO_V4_TAG_RBITMAP: break; case CIPSO_V4_TAG_RANGE: case CIPSO_V4_TAG_ENUM: if (doi_def->type != CIPSO_V4_MAP_PASS) goto doi_add_return; break; case CIPSO_V4_TAG_LOCAL: if (doi_def->type != CIPSO_V4_MAP_LOCAL) goto doi_add_return; break; case CIPSO_V4_TAG_INVALID: if (iter == 0) goto doi_add_return; break; default: goto doi_add_return; } } atomic_set(&doi_def->refcount, 1); spin_lock(&cipso_v4_doi_list_lock); if (cipso_v4_doi_search(doi_def->doi) != NULL) { spin_unlock(&cipso_v4_doi_list_lock); ret_val = -EEXIST; goto doi_add_return; } list_add_tail_rcu(&doi_def->list, &cipso_v4_doi_list); spin_unlock(&cipso_v4_doi_list_lock); ret_val = 0; doi_add_return: audit_buf = netlbl_audit_start(AUDIT_MAC_CIPSOV4_ADD, audit_info); if (audit_buf != NULL) { const char *type_str; switch (doi_type) { case CIPSO_V4_MAP_TRANS: type_str = "trans"; break; case CIPSO_V4_MAP_PASS: type_str = "pass"; break; case CIPSO_V4_MAP_LOCAL: type_str = "local"; break; default: type_str = "(unknown)"; } audit_log_format(audit_buf, " cipso_doi=%u cipso_type=%s res=%u", doi, type_str, ret_val == 0 ? 1 : 0); audit_log_end(audit_buf); } return ret_val; } /** * cipso_v4_doi_free - Frees a DOI definition * @entry: the entry's RCU field * * Description: * This function frees all of the memory associated with a DOI definition. * */ void cipso_v4_doi_free(struct cipso_v4_doi *doi_def) { if (doi_def == NULL) return; switch (doi_def->type) { case CIPSO_V4_MAP_TRANS: kfree(doi_def->map.std->lvl.cipso); kfree(doi_def->map.std->lvl.local); kfree(doi_def->map.std->cat.cipso); kfree(doi_def->map.std->cat.local); break; } kfree(doi_def); } /** * cipso_v4_doi_free_rcu - Frees a DOI definition via the RCU pointer * @entry: the entry's RCU field * * Description: * This function is designed to be used as a callback to the call_rcu() * function so that the memory allocated to the DOI definition can be released * safely. * */ static void cipso_v4_doi_free_rcu(struct rcu_head *entry) { struct cipso_v4_doi *doi_def; doi_def = container_of(entry, struct cipso_v4_doi, rcu); cipso_v4_doi_free(doi_def); } /** * cipso_v4_doi_remove - Remove an existing DOI from the CIPSO protocol engine * @doi: the DOI value * @audit_secid: the LSM secid to use in the audit message * * Description: * Removes a DOI definition from the CIPSO engine. The NetLabel routines will * be called to release their own LSM domain mappings as well as our own * domain list. Returns zero on success and negative values on failure. * */ int cipso_v4_doi_remove(u32 doi, struct netlbl_audit *audit_info) { int ret_val; struct cipso_v4_doi *doi_def; struct audit_buffer *audit_buf; spin_lock(&cipso_v4_doi_list_lock); doi_def = cipso_v4_doi_search(doi); if (doi_def == NULL) { spin_unlock(&cipso_v4_doi_list_lock); ret_val = -ENOENT; goto doi_remove_return; } if (!atomic_dec_and_test(&doi_def->refcount)) { spin_unlock(&cipso_v4_doi_list_lock); ret_val = -EBUSY; goto doi_remove_return; } list_del_rcu(&doi_def->list); spin_unlock(&cipso_v4_doi_list_lock); cipso_v4_cache_invalidate(); call_rcu(&doi_def->rcu, cipso_v4_doi_free_rcu); ret_val = 0; doi_remove_return: audit_buf = netlbl_audit_start(AUDIT_MAC_CIPSOV4_DEL, audit_info); if (audit_buf != NULL) { audit_log_format(audit_buf, " cipso_doi=%u res=%u", doi, ret_val == 0 ? 1 : 0); audit_log_end(audit_buf); } return ret_val; } /** * cipso_v4_doi_getdef - Returns a reference to a valid DOI definition * @doi: the DOI value * * Description: * Searches for a valid DOI definition and if one is found it is returned to * the caller. Otherwise NULL is returned. The caller must ensure that * rcu_read_lock() is held while accessing the returned definition and the DOI * definition reference count is decremented when the caller is done. * */ struct cipso_v4_doi *cipso_v4_doi_getdef(u32 doi) { struct cipso_v4_doi *doi_def; rcu_read_lock(); doi_def = cipso_v4_doi_search(doi); if (doi_def == NULL) goto doi_getdef_return; if (!atomic_inc_not_zero(&doi_def->refcount)) doi_def = NULL; doi_getdef_return: rcu_read_unlock(); return doi_def; } /** * cipso_v4_doi_putdef - Releases a reference for the given DOI definition * @doi_def: the DOI definition * * Description: * Releases a DOI definition reference obtained from cipso_v4_doi_getdef(). * */ void cipso_v4_doi_putdef(struct cipso_v4_doi *doi_def) { if (doi_def == NULL) return; if (!atomic_dec_and_test(&doi_def->refcount)) return; spin_lock(&cipso_v4_doi_list_lock); list_del_rcu(&doi_def->list); spin_unlock(&cipso_v4_doi_list_lock); cipso_v4_cache_invalidate(); call_rcu(&doi_def->rcu, cipso_v4_doi_free_rcu); } /** * cipso_v4_doi_walk - Iterate through the DOI definitions * @skip_cnt: skip past this number of DOI definitions, updated * @callback: callback for each DOI definition * @cb_arg: argument for the callback function * * Description: * Iterate over the DOI definition list, skipping the first @skip_cnt entries. * For each entry call @callback, if @callback returns a negative value stop * 'walking' through the list and return. Updates the value in @skip_cnt upon * return. Returns zero on success, negative values on failure. * */ int cipso_v4_doi_walk(u32 *skip_cnt, int (*callback) (struct cipso_v4_doi *doi_def, void *arg), void *cb_arg) { int ret_val = -ENOENT; u32 doi_cnt = 0; struct cipso_v4_doi *iter_doi; rcu_read_lock(); list_for_each_entry_rcu(iter_doi, &cipso_v4_doi_list, list) if (atomic_read(&iter_doi->refcount) > 0) { if (doi_cnt++ < *skip_cnt) continue; ret_val = callback(iter_doi, cb_arg); if (ret_val < 0) { doi_cnt--; goto doi_walk_return; } } doi_walk_return: rcu_read_unlock(); *skip_cnt = doi_cnt; return ret_val; } /* * Label Mapping Functions */ /** * cipso_v4_map_lvl_valid - Checks to see if the given level is understood * @doi_def: the DOI definition * @level: the level to check * * Description: * Checks the given level against the given DOI definition and returns a * negative value if the level does not have a valid mapping and a zero value * if the level is defined by the DOI. * */ static int cipso_v4_map_lvl_valid(const struct cipso_v4_doi *doi_def, u8 level) { switch (doi_def->type) { case CIPSO_V4_MAP_PASS: return 0; case CIPSO_V4_MAP_TRANS: if (doi_def->map.std->lvl.cipso[level] < CIPSO_V4_INV_LVL) return 0; break; } return -EFAULT; } /** * cipso_v4_map_lvl_hton - Perform a level mapping from the host to the network * @doi_def: the DOI definition * @host_lvl: the host MLS level * @net_lvl: the network/CIPSO MLS level * * Description: * Perform a label mapping to translate a local MLS level to the correct * CIPSO level using the given DOI definition. Returns zero on success, * negative values otherwise. * */ static int cipso_v4_map_lvl_hton(const struct cipso_v4_doi *doi_def, u32 host_lvl, u32 *net_lvl) { switch (doi_def->type) { case CIPSO_V4_MAP_PASS: *net_lvl = host_lvl; return 0; case CIPSO_V4_MAP_TRANS: if (host_lvl < doi_def->map.std->lvl.local_size && doi_def->map.std->lvl.local[host_lvl] < CIPSO_V4_INV_LVL) { *net_lvl = doi_def->map.std->lvl.local[host_lvl]; return 0; } return -EPERM; } return -EINVAL; } /** * cipso_v4_map_lvl_ntoh - Perform a level mapping from the network to the host * @doi_def: the DOI definition * @net_lvl: the network/CIPSO MLS level * @host_lvl: the host MLS level * * Description: * Perform a label mapping to translate a CIPSO level to the correct local MLS * level using the given DOI definition. Returns zero on success, negative * values otherwise. * */ static int cipso_v4_map_lvl_ntoh(const struct cipso_v4_doi *doi_def, u32 net_lvl, u32 *host_lvl) { struct cipso_v4_std_map_tbl *map_tbl; switch (doi_def->type) { case CIPSO_V4_MAP_PASS: *host_lvl = net_lvl; return 0; case CIPSO_V4_MAP_TRANS: map_tbl = doi_def->map.std; if (net_lvl < map_tbl->lvl.cipso_size && map_tbl->lvl.cipso[net_lvl] < CIPSO_V4_INV_LVL) { *host_lvl = doi_def->map.std->lvl.cipso[net_lvl]; return 0; } return -EPERM; } return -EINVAL; } /** * cipso_v4_map_cat_rbm_valid - Checks to see if the category bitmap is valid * @doi_def: the DOI definition * @bitmap: category bitmap * @bitmap_len: bitmap length in bytes * * Description: * Checks the given category bitmap against the given DOI definition and * returns a negative value if any of the categories in the bitmap do not have * a valid mapping and a zero value if all of the categories are valid. * */ static int cipso_v4_map_cat_rbm_valid(const struct cipso_v4_doi *doi_def, const unsigned char *bitmap, u32 bitmap_len) { int cat = -1; u32 bitmap_len_bits = bitmap_len * 8; u32 cipso_cat_size; u32 *cipso_array; switch (doi_def->type) { case CIPSO_V4_MAP_PASS: return 0; case CIPSO_V4_MAP_TRANS: cipso_cat_size = doi_def->map.std->cat.cipso_size; cipso_array = doi_def->map.std->cat.cipso; for (;;) { cat = cipso_v4_bitmap_walk(bitmap, bitmap_len_bits, cat + 1, 1); if (cat < 0) break; if (cat >= cipso_cat_size || cipso_array[cat] >= CIPSO_V4_INV_CAT) return -EFAULT; } if (cat == -1) return 0; break; } return -EFAULT; } /** * cipso_v4_map_cat_rbm_hton - Perform a category mapping from host to network * @doi_def: the DOI definition * @secattr: the security attributes * @net_cat: the zero'd out category bitmap in network/CIPSO format * @net_cat_len: the length of the CIPSO bitmap in bytes * * Description: * Perform a label mapping to translate a local MLS category bitmap to the * correct CIPSO bitmap using the given DOI definition. Returns the minimum * size in bytes of the network bitmap on success, negative values otherwise. * */ static int cipso_v4_map_cat_rbm_hton(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *net_cat, u32 net_cat_len) { int host_spot = -1; u32 net_spot = CIPSO_V4_INV_CAT; u32 net_spot_max = 0; u32 net_clen_bits = net_cat_len * 8; u32 host_cat_size = 0; u32 *host_cat_array = NULL; if (doi_def->type == CIPSO_V4_MAP_TRANS) { host_cat_size = doi_def->map.std->cat.local_size; host_cat_array = doi_def->map.std->cat.local; } for (;;) { host_spot = netlbl_secattr_catmap_walk(secattr->attr.mls.cat, host_spot + 1); if (host_spot < 0) break; switch (doi_def->type) { case CIPSO_V4_MAP_PASS: net_spot = host_spot; break; case CIPSO_V4_MAP_TRANS: if (host_spot >= host_cat_size) return -EPERM; net_spot = host_cat_array[host_spot]; if (net_spot >= CIPSO_V4_INV_CAT) return -EPERM; break; } if (net_spot >= net_clen_bits) return -ENOSPC; cipso_v4_bitmap_setbit(net_cat, net_spot, 1); if (net_spot > net_spot_max) net_spot_max = net_spot; } if (++net_spot_max % 8) return net_spot_max / 8 + 1; return net_spot_max / 8; } /** * cipso_v4_map_cat_rbm_ntoh - Perform a category mapping from network to host * @doi_def: the DOI definition * @net_cat: the category bitmap in network/CIPSO format * @net_cat_len: the length of the CIPSO bitmap in bytes * @secattr: the security attributes * * Description: * Perform a label mapping to translate a CIPSO bitmap to the correct local * MLS category bitmap using the given DOI definition. Returns zero on * success, negative values on failure. * */ static int cipso_v4_map_cat_rbm_ntoh(const struct cipso_v4_doi *doi_def, const unsigned char *net_cat, u32 net_cat_len, struct netlbl_lsm_secattr *secattr) { int ret_val; int net_spot = -1; u32 host_spot = CIPSO_V4_INV_CAT; u32 net_clen_bits = net_cat_len * 8; u32 net_cat_size = 0; u32 *net_cat_array = NULL; if (doi_def->type == CIPSO_V4_MAP_TRANS) { net_cat_size = doi_def->map.std->cat.cipso_size; net_cat_array = doi_def->map.std->cat.cipso; } for (;;) { net_spot = cipso_v4_bitmap_walk(net_cat, net_clen_bits, net_spot + 1, 1); if (net_spot < 0) { if (net_spot == -2) return -EFAULT; return 0; } switch (doi_def->type) { case CIPSO_V4_MAP_PASS: host_spot = net_spot; break; case CIPSO_V4_MAP_TRANS: if (net_spot >= net_cat_size) return -EPERM; host_spot = net_cat_array[net_spot]; if (host_spot >= CIPSO_V4_INV_CAT) return -EPERM; break; } ret_val = netlbl_secattr_catmap_setbit(secattr->attr.mls.cat, host_spot, GFP_ATOMIC); if (ret_val != 0) return ret_val; } return -EINVAL; } /** * cipso_v4_map_cat_enum_valid - Checks to see if the categories are valid * @doi_def: the DOI definition * @enumcat: category list * @enumcat_len: length of the category list in bytes * * Description: * Checks the given categories against the given DOI definition and returns a * negative value if any of the categories do not have a valid mapping and a * zero value if all of the categories are valid. * */ static int cipso_v4_map_cat_enum_valid(const struct cipso_v4_doi *doi_def, const unsigned char *enumcat, u32 enumcat_len) { u16 cat; int cat_prev = -1; u32 iter; if (doi_def->type != CIPSO_V4_MAP_PASS || enumcat_len & 0x01) return -EFAULT; for (iter = 0; iter < enumcat_len; iter += 2) { cat = get_unaligned_be16(&enumcat[iter]); if (cat <= cat_prev) return -EFAULT; cat_prev = cat; } return 0; } /** * cipso_v4_map_cat_enum_hton - Perform a category mapping from host to network * @doi_def: the DOI definition * @secattr: the security attributes * @net_cat: the zero'd out category list in network/CIPSO format * @net_cat_len: the length of the CIPSO category list in bytes * * Description: * Perform a label mapping to translate a local MLS category bitmap to the * correct CIPSO category list using the given DOI definition. Returns the * size in bytes of the network category bitmap on success, negative values * otherwise. * */ static int cipso_v4_map_cat_enum_hton(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *net_cat, u32 net_cat_len) { int cat = -1; u32 cat_iter = 0; for (;;) { cat = netlbl_secattr_catmap_walk(secattr->attr.mls.cat, cat + 1); if (cat < 0) break; if ((cat_iter + 2) > net_cat_len) return -ENOSPC; *((__be16 *)&net_cat[cat_iter]) = htons(cat); cat_iter += 2; } return cat_iter; } /** * cipso_v4_map_cat_enum_ntoh - Perform a category mapping from network to host * @doi_def: the DOI definition * @net_cat: the category list in network/CIPSO format * @net_cat_len: the length of the CIPSO bitmap in bytes * @secattr: the security attributes * * Description: * Perform a label mapping to translate a CIPSO category list to the correct * local MLS category bitmap using the given DOI definition. Returns zero on * success, negative values on failure. * */ static int cipso_v4_map_cat_enum_ntoh(const struct cipso_v4_doi *doi_def, const unsigned char *net_cat, u32 net_cat_len, struct netlbl_lsm_secattr *secattr) { int ret_val; u32 iter; for (iter = 0; iter < net_cat_len; iter += 2) { ret_val = netlbl_secattr_catmap_setbit(secattr->attr.mls.cat, get_unaligned_be16(&net_cat[iter]), GFP_ATOMIC); if (ret_val != 0) return ret_val; } return 0; } /** * cipso_v4_map_cat_rng_valid - Checks to see if the categories are valid * @doi_def: the DOI definition * @rngcat: category list * @rngcat_len: length of the category list in bytes * * Description: * Checks the given categories against the given DOI definition and returns a * negative value if any of the categories do not have a valid mapping and a * zero value if all of the categories are valid. * */ static int cipso_v4_map_cat_rng_valid(const struct cipso_v4_doi *doi_def, const unsigned char *rngcat, u32 rngcat_len) { u16 cat_high; u16 cat_low; u32 cat_prev = CIPSO_V4_MAX_REM_CATS + 1; u32 iter; if (doi_def->type != CIPSO_V4_MAP_PASS || rngcat_len & 0x01) return -EFAULT; for (iter = 0; iter < rngcat_len; iter += 4) { cat_high = get_unaligned_be16(&rngcat[iter]); if ((iter + 4) <= rngcat_len) cat_low = get_unaligned_be16(&rngcat[iter + 2]); else cat_low = 0; if (cat_high > cat_prev) return -EFAULT; cat_prev = cat_low; } return 0; } /** * cipso_v4_map_cat_rng_hton - Perform a category mapping from host to network * @doi_def: the DOI definition * @secattr: the security attributes * @net_cat: the zero'd out category list in network/CIPSO format * @net_cat_len: the length of the CIPSO category list in bytes * * Description: * Perform a label mapping to translate a local MLS category bitmap to the * correct CIPSO category list using the given DOI definition. Returns the * size in bytes of the network category bitmap on success, negative values * otherwise. * */ static int cipso_v4_map_cat_rng_hton(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *net_cat, u32 net_cat_len) { int iter = -1; u16 array[CIPSO_V4_TAG_RNG_CAT_MAX * 2]; u32 array_cnt = 0; u32 cat_size = 0; /* make sure we don't overflow the 'array[]' variable */ if (net_cat_len > (CIPSO_V4_OPT_LEN_MAX - CIPSO_V4_HDR_LEN - CIPSO_V4_TAG_RNG_BLEN)) return -ENOSPC; for (;;) { iter = netlbl_secattr_catmap_walk(secattr->attr.mls.cat, iter + 1); if (iter < 0) break; cat_size += (iter == 0 ? 0 : sizeof(u16)); if (cat_size > net_cat_len) return -ENOSPC; array[array_cnt++] = iter; iter = netlbl_secattr_catmap_walk_rng(secattr->attr.mls.cat, iter); if (iter < 0) return -EFAULT; cat_size += sizeof(u16); if (cat_size > net_cat_len) return -ENOSPC; array[array_cnt++] = iter; } for (iter = 0; array_cnt > 0;) { *((__be16 *)&net_cat[iter]) = htons(array[--array_cnt]); iter += 2; array_cnt--; if (array[array_cnt] != 0) { *((__be16 *)&net_cat[iter]) = htons(array[array_cnt]); iter += 2; } } return cat_size; } /** * cipso_v4_map_cat_rng_ntoh - Perform a category mapping from network to host * @doi_def: the DOI definition * @net_cat: the category list in network/CIPSO format * @net_cat_len: the length of the CIPSO bitmap in bytes * @secattr: the security attributes * * Description: * Perform a label mapping to translate a CIPSO category list to the correct * local MLS category bitmap using the given DOI definition. Returns zero on * success, negative values on failure. * */ static int cipso_v4_map_cat_rng_ntoh(const struct cipso_v4_doi *doi_def, const unsigned char *net_cat, u32 net_cat_len, struct netlbl_lsm_secattr *secattr) { int ret_val; u32 net_iter; u16 cat_low; u16 cat_high; for (net_iter = 0; net_iter < net_cat_len; net_iter += 4) { cat_high = get_unaligned_be16(&net_cat[net_iter]); if ((net_iter + 4) <= net_cat_len) cat_low = get_unaligned_be16(&net_cat[net_iter + 2]); else cat_low = 0; ret_val = netlbl_secattr_catmap_setrng(secattr->attr.mls.cat, cat_low, cat_high, GFP_ATOMIC); if (ret_val != 0) return ret_val; } return 0; } /* * Protocol Handling Functions */ /** * cipso_v4_gentag_hdr - Generate a CIPSO option header * @doi_def: the DOI definition * @len: the total tag length in bytes, not including this header * @buf: the CIPSO option buffer * * Description: * Write a CIPSO header into the beginning of @buffer. * */ static void cipso_v4_gentag_hdr(const struct cipso_v4_doi *doi_def, unsigned char *buf, u32 len) { buf[0] = IPOPT_CIPSO; buf[1] = CIPSO_V4_HDR_LEN + len; *(__be32 *)&buf[2] = htonl(doi_def->doi); } /** * cipso_v4_gentag_rbm - Generate a CIPSO restricted bitmap tag (type #1) * @doi_def: the DOI definition * @secattr: the security attributes * @buffer: the option buffer * @buffer_len: length of buffer in bytes * * Description: * Generate a CIPSO option using the restricted bitmap tag, tag type #1. The * actual buffer length may be larger than the indicated size due to * translation between host and network category bitmaps. Returns the size of * the tag on success, negative values on failure. * */ static int cipso_v4_gentag_rbm(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *buffer, u32 buffer_len) { int ret_val; u32 tag_len; u32 level; if ((secattr->flags & NETLBL_SECATTR_MLS_LVL) == 0) return -EPERM; ret_val = cipso_v4_map_lvl_hton(doi_def, secattr->attr.mls.lvl, &level); if (ret_val != 0) return ret_val; if (secattr->flags & NETLBL_SECATTR_MLS_CAT) { ret_val = cipso_v4_map_cat_rbm_hton(doi_def, secattr, &buffer[4], buffer_len - 4); if (ret_val < 0) return ret_val; /* This will send packets using the "optimized" format when * possible as specified in section 3.4.2.6 of the * CIPSO draft. */ if (cipso_v4_rbm_optfmt && ret_val > 0 && ret_val <= 10) tag_len = 14; else tag_len = 4 + ret_val; } else tag_len = 4; buffer[0] = CIPSO_V4_TAG_RBITMAP; buffer[1] = tag_len; buffer[3] = level; return tag_len; } /** * cipso_v4_parsetag_rbm - Parse a CIPSO restricted bitmap tag * @doi_def: the DOI definition * @tag: the CIPSO tag * @secattr: the security attributes * * Description: * Parse a CIPSO restricted bitmap tag (tag type #1) and return the security * attributes in @secattr. Return zero on success, negatives values on * failure. * */ static int cipso_v4_parsetag_rbm(const struct cipso_v4_doi *doi_def, const unsigned char *tag, struct netlbl_lsm_secattr *secattr) { int ret_val; u8 tag_len = tag[1]; u32 level; ret_val = cipso_v4_map_lvl_ntoh(doi_def, tag[3], &level); if (ret_val != 0) return ret_val; secattr->attr.mls.lvl = level; secattr->flags |= NETLBL_SECATTR_MLS_LVL; if (tag_len > 4) { secattr->attr.mls.cat = netlbl_secattr_catmap_alloc(GFP_ATOMIC); if (secattr->attr.mls.cat == NULL) return -ENOMEM; ret_val = cipso_v4_map_cat_rbm_ntoh(doi_def, &tag[4], tag_len - 4, secattr); if (ret_val != 0) { netlbl_secattr_catmap_free(secattr->attr.mls.cat); return ret_val; } secattr->flags |= NETLBL_SECATTR_MLS_CAT; } return 0; } /** * cipso_v4_gentag_enum - Generate a CIPSO enumerated tag (type #2) * @doi_def: the DOI definition * @secattr: the security attributes * @buffer: the option buffer * @buffer_len: length of buffer in bytes * * Description: * Generate a CIPSO option using the enumerated tag, tag type #2. Returns the * size of the tag on success, negative values on failure. * */ static int cipso_v4_gentag_enum(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *buffer, u32 buffer_len) { int ret_val; u32 tag_len; u32 level; if (!(secattr->flags & NETLBL_SECATTR_MLS_LVL)) return -EPERM; ret_val = cipso_v4_map_lvl_hton(doi_def, secattr->attr.mls.lvl, &level); if (ret_val != 0) return ret_val; if (secattr->flags & NETLBL_SECATTR_MLS_CAT) { ret_val = cipso_v4_map_cat_enum_hton(doi_def, secattr, &buffer[4], buffer_len - 4); if (ret_val < 0) return ret_val; tag_len = 4 + ret_val; } else tag_len = 4; buffer[0] = CIPSO_V4_TAG_ENUM; buffer[1] = tag_len; buffer[3] = level; return tag_len; } /** * cipso_v4_parsetag_enum - Parse a CIPSO enumerated tag * @doi_def: the DOI definition * @tag: the CIPSO tag * @secattr: the security attributes * * Description: * Parse a CIPSO enumerated tag (tag type #2) and return the security * attributes in @secattr. Return zero on success, negatives values on * failure. * */ static int cipso_v4_parsetag_enum(const struct cipso_v4_doi *doi_def, const unsigned char *tag, struct netlbl_lsm_secattr *secattr) { int ret_val; u8 tag_len = tag[1]; u32 level; ret_val = cipso_v4_map_lvl_ntoh(doi_def, tag[3], &level); if (ret_val != 0) return ret_val; secattr->attr.mls.lvl = level; secattr->flags |= NETLBL_SECATTR_MLS_LVL; if (tag_len > 4) { secattr->attr.mls.cat = netlbl_secattr_catmap_alloc(GFP_ATOMIC); if (secattr->attr.mls.cat == NULL) return -ENOMEM; ret_val = cipso_v4_map_cat_enum_ntoh(doi_def, &tag[4], tag_len - 4, secattr); if (ret_val != 0) { netlbl_secattr_catmap_free(secattr->attr.mls.cat); return ret_val; } secattr->flags |= NETLBL_SECATTR_MLS_CAT; } return 0; } /** * cipso_v4_gentag_rng - Generate a CIPSO ranged tag (type #5) * @doi_def: the DOI definition * @secattr: the security attributes * @buffer: the option buffer * @buffer_len: length of buffer in bytes * * Description: * Generate a CIPSO option using the ranged tag, tag type #5. Returns the * size of the tag on success, negative values on failure. * */ static int cipso_v4_gentag_rng(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *buffer, u32 buffer_len) { int ret_val; u32 tag_len; u32 level; if (!(secattr->flags & NETLBL_SECATTR_MLS_LVL)) return -EPERM; ret_val = cipso_v4_map_lvl_hton(doi_def, secattr->attr.mls.lvl, &level); if (ret_val != 0) return ret_val; if (secattr->flags & NETLBL_SECATTR_MLS_CAT) { ret_val = cipso_v4_map_cat_rng_hton(doi_def, secattr, &buffer[4], buffer_len - 4); if (ret_val < 0) return ret_val; tag_len = 4 + ret_val; } else tag_len = 4; buffer[0] = CIPSO_V4_TAG_RANGE; buffer[1] = tag_len; buffer[3] = level; return tag_len; } /** * cipso_v4_parsetag_rng - Parse a CIPSO ranged tag * @doi_def: the DOI definition * @tag: the CIPSO tag * @secattr: the security attributes * * Description: * Parse a CIPSO ranged tag (tag type #5) and return the security attributes * in @secattr. Return zero on success, negatives values on failure. * */ static int cipso_v4_parsetag_rng(const struct cipso_v4_doi *doi_def, const unsigned char *tag, struct netlbl_lsm_secattr *secattr) { int ret_val; u8 tag_len = tag[1]; u32 level; ret_val = cipso_v4_map_lvl_ntoh(doi_def, tag[3], &level); if (ret_val != 0) return ret_val; secattr->attr.mls.lvl = level; secattr->flags |= NETLBL_SECATTR_MLS_LVL; if (tag_len > 4) { secattr->attr.mls.cat = netlbl_secattr_catmap_alloc(GFP_ATOMIC); if (secattr->attr.mls.cat == NULL) return -ENOMEM; ret_val = cipso_v4_map_cat_rng_ntoh(doi_def, &tag[4], tag_len - 4, secattr); if (ret_val != 0) { netlbl_secattr_catmap_free(secattr->attr.mls.cat); return ret_val; } secattr->flags |= NETLBL_SECATTR_MLS_CAT; } return 0; } /** * cipso_v4_gentag_loc - Generate a CIPSO local tag (non-standard) * @doi_def: the DOI definition * @secattr: the security attributes * @buffer: the option buffer * @buffer_len: length of buffer in bytes * * Description: * Generate a CIPSO option using the local tag. Returns the size of the tag * on success, negative values on failure. * */ static int cipso_v4_gentag_loc(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *buffer, u32 buffer_len) { if (!(secattr->flags & NETLBL_SECATTR_SECID)) return -EPERM; buffer[0] = CIPSO_V4_TAG_LOCAL; buffer[1] = CIPSO_V4_TAG_LOC_BLEN; *(u32 *)&buffer[2] = secattr->attr.secid; return CIPSO_V4_TAG_LOC_BLEN; } /** * cipso_v4_parsetag_loc - Parse a CIPSO local tag * @doi_def: the DOI definition * @tag: the CIPSO tag * @secattr: the security attributes * * Description: * Parse a CIPSO local tag and return the security attributes in @secattr. * Return zero on success, negatives values on failure. * */ static int cipso_v4_parsetag_loc(const struct cipso_v4_doi *doi_def, const unsigned char *tag, struct netlbl_lsm_secattr *secattr) { secattr->attr.secid = *(u32 *)&tag[2]; secattr->flags |= NETLBL_SECATTR_SECID; return 0; } /** * cipso_v4_validate - Validate a CIPSO option * @option: the start of the option, on error it is set to point to the error * * Description: * This routine is called to validate a CIPSO option, it checks all of the * fields to ensure that they are at least valid, see the draft snippet below * for details. If the option is valid then a zero value is returned and * the value of @option is unchanged. If the option is invalid then a * non-zero value is returned and @option is adjusted to point to the * offending portion of the option. From the IETF draft ... * * "If any field within the CIPSO options, such as the DOI identifier, is not * recognized the IP datagram is discarded and an ICMP 'parameter problem' * (type 12) is generated and returned. The ICMP code field is set to 'bad * parameter' (code 0) and the pointer is set to the start of the CIPSO field * that is unrecognized." * */ int cipso_v4_validate(const struct sk_buff *skb, unsigned char **option) { unsigned char *opt = *option; unsigned char *tag; unsigned char opt_iter; unsigned char err_offset = 0; u8 opt_len; u8 tag_len; struct cipso_v4_doi *doi_def = NULL; u32 tag_iter; /* caller already checks for length values that are too large */ opt_len = opt[1]; if (opt_len < 8) { err_offset = 1; goto validate_return; } rcu_read_lock(); doi_def = cipso_v4_doi_search(get_unaligned_be32(&opt[2])); if (doi_def == NULL) { err_offset = 2; goto validate_return_locked; } opt_iter = CIPSO_V4_HDR_LEN; tag = opt + opt_iter; while (opt_iter < opt_len) { for (tag_iter = 0; doi_def->tags[tag_iter] != tag[0];) if (doi_def->tags[tag_iter] == CIPSO_V4_TAG_INVALID || ++tag_iter == CIPSO_V4_TAG_MAXCNT) { err_offset = opt_iter; goto validate_return_locked; } tag_len = tag[1]; if (tag_len > (opt_len - opt_iter)) { err_offset = opt_iter + 1; goto validate_return_locked; } switch (tag[0]) { case CIPSO_V4_TAG_RBITMAP: if (tag_len < CIPSO_V4_TAG_RBM_BLEN) { err_offset = opt_iter + 1; goto validate_return_locked; } /* We are already going to do all the verification * necessary at the socket layer so from our point of * view it is safe to turn these checks off (and less * work), however, the CIPSO draft says we should do * all the CIPSO validations here but it doesn't * really specify _exactly_ what we need to validate * ... so, just make it a sysctl tunable. */ if (cipso_v4_rbm_strictvalid) { if (cipso_v4_map_lvl_valid(doi_def, tag[3]) < 0) { err_offset = opt_iter + 3; goto validate_return_locked; } if (tag_len > CIPSO_V4_TAG_RBM_BLEN && cipso_v4_map_cat_rbm_valid(doi_def, &tag[4], tag_len - 4) < 0) { err_offset = opt_iter + 4; goto validate_return_locked; } } break; case CIPSO_V4_TAG_ENUM: if (tag_len < CIPSO_V4_TAG_ENUM_BLEN) { err_offset = opt_iter + 1; goto validate_return_locked; } if (cipso_v4_map_lvl_valid(doi_def, tag[3]) < 0) { err_offset = opt_iter + 3; goto validate_return_locked; } if (tag_len > CIPSO_V4_TAG_ENUM_BLEN && cipso_v4_map_cat_enum_valid(doi_def, &tag[4], tag_len - 4) < 0) { err_offset = opt_iter + 4; goto validate_return_locked; } break; case CIPSO_V4_TAG_RANGE: if (tag_len < CIPSO_V4_TAG_RNG_BLEN) { err_offset = opt_iter + 1; goto validate_return_locked; } if (cipso_v4_map_lvl_valid(doi_def, tag[3]) < 0) { err_offset = opt_iter + 3; goto validate_return_locked; } if (tag_len > CIPSO_V4_TAG_RNG_BLEN && cipso_v4_map_cat_rng_valid(doi_def, &tag[4], tag_len - 4) < 0) { err_offset = opt_iter + 4; goto validate_return_locked; } break; case CIPSO_V4_TAG_LOCAL: /* This is a non-standard tag that we only allow for * local connections, so if the incoming interface is * not the loopback device drop the packet. */ if (!(skb->dev->flags & IFF_LOOPBACK)) { err_offset = opt_iter; goto validate_return_locked; } if (tag_len != CIPSO_V4_TAG_LOC_BLEN) { err_offset = opt_iter + 1; goto validate_return_locked; } break; default: err_offset = opt_iter; goto validate_return_locked; } tag += tag_len; opt_iter += tag_len; } validate_return_locked: rcu_read_unlock(); validate_return: *option = opt + err_offset; return err_offset; } /** * cipso_v4_error - Send the correct response for a bad packet * @skb: the packet * @error: the error code * @gateway: CIPSO gateway flag * * Description: * Based on the error code given in @error, send an ICMP error message back to * the originating host. From the IETF draft ... * * "If the contents of the CIPSO [option] are valid but the security label is * outside of the configured host or port label range, the datagram is * discarded and an ICMP 'destination unreachable' (type 3) is generated and * returned. The code field of the ICMP is set to 'communication with * destination network administratively prohibited' (code 9) or to * 'communication with destination host administratively prohibited' * (code 10). The value of the code is dependent on whether the originator * of the ICMP message is acting as a CIPSO host or a CIPSO gateway. The * recipient of the ICMP message MUST be able to handle either value. The * same procedure is performed if a CIPSO [option] can not be added to an * IP packet because it is too large to fit in the IP options area." * * "If the error is triggered by receipt of an ICMP message, the message is * discarded and no response is permitted (consistent with general ICMP * processing rules)." * */ void cipso_v4_error(struct sk_buff *skb, int error, u32 gateway) { if (ip_hdr(skb)->protocol == IPPROTO_ICMP || error != -EACCES) return; if (gateway) icmp_send(skb, ICMP_DEST_UNREACH, ICMP_NET_ANO, 0); else icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_ANO, 0); } /** * cipso_v4_genopt - Generate a CIPSO option * @buf: the option buffer * @buf_len: the size of opt_buf * @doi_def: the CIPSO DOI to use * @secattr: the security attributes * * Description: * Generate a CIPSO option using the DOI definition and security attributes * passed to the function. Returns the length of the option on success and * negative values on failure. * */ static int cipso_v4_genopt(unsigned char *buf, u32 buf_len, const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr) { int ret_val; u32 iter; if (buf_len <= CIPSO_V4_HDR_LEN) return -ENOSPC; /* XXX - This code assumes only one tag per CIPSO option which isn't * really a good assumption to make but since we only support the MAC * tags right now it is a safe assumption. */ iter = 0; do { memset(buf, 0, buf_len); switch (doi_def->tags[iter]) { case CIPSO_V4_TAG_RBITMAP: ret_val = cipso_v4_gentag_rbm(doi_def, secattr, &buf[CIPSO_V4_HDR_LEN], buf_len - CIPSO_V4_HDR_LEN); break; case CIPSO_V4_TAG_ENUM: ret_val = cipso_v4_gentag_enum(doi_def, secattr, &buf[CIPSO_V4_HDR_LEN], buf_len - CIPSO_V4_HDR_LEN); break; case CIPSO_V4_TAG_RANGE: ret_val = cipso_v4_gentag_rng(doi_def, secattr, &buf[CIPSO_V4_HDR_LEN], buf_len - CIPSO_V4_HDR_LEN); break; case CIPSO_V4_TAG_LOCAL: ret_val = cipso_v4_gentag_loc(doi_def, secattr, &buf[CIPSO_V4_HDR_LEN], buf_len - CIPSO_V4_HDR_LEN); break; default: return -EPERM; } iter++; } while (ret_val < 0 && iter < CIPSO_V4_TAG_MAXCNT && doi_def->tags[iter] != CIPSO_V4_TAG_INVALID); if (ret_val < 0) return ret_val; cipso_v4_gentag_hdr(doi_def, buf, ret_val); return CIPSO_V4_HDR_LEN + ret_val; } /** * cipso_v4_sock_setattr - Add a CIPSO option to a socket * @sk: the socket * @doi_def: the CIPSO DOI to use * @secattr: the specific security attributes of the socket * * Description: * Set the CIPSO option on the given socket using the DOI definition and * security attributes passed to the function. This function requires * exclusive access to @sk, which means it either needs to be in the * process of being created or locked. Returns zero on success and negative * values on failure. * */ int cipso_v4_sock_setattr(struct sock *sk, const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr) { int ret_val = -EPERM; unsigned char *buf = NULL; u32 buf_len; u32 opt_len; struct ip_options *opt = NULL; struct inet_sock *sk_inet; struct inet_connection_sock *sk_conn; /* In the case of sock_create_lite(), the sock->sk field is not * defined yet but it is not a problem as the only users of these * "lite" PF_INET sockets are functions which do an accept() call * afterwards so we will label the socket as part of the accept(). */ if (sk == NULL) return 0; /* We allocate the maximum CIPSO option size here so we are probably * being a little wasteful, but it makes our life _much_ easier later * on and after all we are only talking about 40 bytes. */ buf_len = CIPSO_V4_OPT_LEN_MAX; buf = kmalloc(buf_len, GFP_ATOMIC); if (buf == NULL) { ret_val = -ENOMEM; goto socket_setattr_failure; } ret_val = cipso_v4_genopt(buf, buf_len, doi_def, secattr); if (ret_val < 0) goto socket_setattr_failure; buf_len = ret_val; /* We can't use ip_options_get() directly because it makes a call to * ip_options_get_alloc() which allocates memory with GFP_KERNEL and * we won't always have CAP_NET_RAW even though we _always_ want to * set the IPOPT_CIPSO option. */ opt_len = (buf_len + 3) & ~3; opt = kzalloc(sizeof(*opt) + opt_len, GFP_ATOMIC); if (opt == NULL) { ret_val = -ENOMEM; goto socket_setattr_failure; } memcpy(opt->__data, buf, buf_len); opt->optlen = opt_len; opt->cipso = sizeof(struct iphdr); kfree(buf); buf = NULL; sk_inet = inet_sk(sk); if (sk_inet->is_icsk) { sk_conn = inet_csk(sk); if (sk_inet->opt) sk_conn->icsk_ext_hdr_len -= sk_inet->opt->optlen; sk_conn->icsk_ext_hdr_len += opt->optlen; sk_conn->icsk_sync_mss(sk, sk_conn->icsk_pmtu_cookie); } opt = xchg(&sk_inet->opt, opt); kfree(opt); return 0; socket_setattr_failure: kfree(buf); kfree(opt); return ret_val; } /** * cipso_v4_req_setattr - Add a CIPSO option to a connection request socket * @req: the connection request socket * @doi_def: the CIPSO DOI to use * @secattr: the specific security attributes of the socket * * Description: * Set the CIPSO option on the given socket using the DOI definition and * security attributes passed to the function. Returns zero on success and * negative values on failure. * */ int cipso_v4_req_setattr(struct request_sock *req, const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr) { int ret_val = -EPERM; unsigned char *buf = NULL; u32 buf_len; u32 opt_len; struct ip_options *opt = NULL; struct inet_request_sock *req_inet; /* We allocate the maximum CIPSO option size here so we are probably * being a little wasteful, but it makes our life _much_ easier later * on and after all we are only talking about 40 bytes. */ buf_len = CIPSO_V4_OPT_LEN_MAX; buf = kmalloc(buf_len, GFP_ATOMIC); if (buf == NULL) { ret_val = -ENOMEM; goto req_setattr_failure; } ret_val = cipso_v4_genopt(buf, buf_len, doi_def, secattr); if (ret_val < 0) goto req_setattr_failure; buf_len = ret_val; /* We can't use ip_options_get() directly because it makes a call to * ip_options_get_alloc() which allocates memory with GFP_KERNEL and * we won't always have CAP_NET_RAW even though we _always_ want to * set the IPOPT_CIPSO option. */ opt_len = (buf_len + 3) & ~3; opt = kzalloc(sizeof(*opt) + opt_len, GFP_ATOMIC); if (opt == NULL) { ret_val = -ENOMEM; goto req_setattr_failure; } memcpy(opt->__data, buf, buf_len); opt->optlen = opt_len; opt->cipso = sizeof(struct iphdr); kfree(buf); buf = NULL; req_inet = inet_rsk(req); opt = xchg(&req_inet->opt, opt); kfree(opt); return 0; req_setattr_failure: kfree(buf); kfree(opt); return ret_val; } /** * cipso_v4_delopt - Delete the CIPSO option from a set of IP options * @opt_ptr: IP option pointer * * Description: * Deletes the CIPSO IP option from a set of IP options and makes the necessary * adjustments to the IP option structure. Returns zero on success, negative * values on failure. * */ static int cipso_v4_delopt(struct ip_options **opt_ptr) { int hdr_delta = 0; struct ip_options *opt = *opt_ptr; if (opt->srr || opt->rr || opt->ts || opt->router_alert) { u8 cipso_len; u8 cipso_off; unsigned char *cipso_ptr; int iter; int optlen_new; cipso_off = opt->cipso - sizeof(struct iphdr); cipso_ptr = &opt->__data[cipso_off]; cipso_len = cipso_ptr[1]; if (opt->srr > opt->cipso) opt->srr -= cipso_len; if (opt->rr > opt->cipso) opt->rr -= cipso_len; if (opt->ts > opt->cipso) opt->ts -= cipso_len; if (opt->router_alert > opt->cipso) opt->router_alert -= cipso_len; opt->cipso = 0; memmove(cipso_ptr, cipso_ptr + cipso_len, opt->optlen - cipso_off - cipso_len); /* determining the new total option length is tricky because of * the padding necessary, the only thing i can think to do at * this point is walk the options one-by-one, skipping the * padding at the end to determine the actual option size and * from there we can determine the new total option length */ iter = 0; optlen_new = 0; while (iter < opt->optlen) if (opt->__data[iter] != IPOPT_NOP) { iter += opt->__data[iter + 1]; optlen_new = iter; } else iter++; hdr_delta = opt->optlen; opt->optlen = (optlen_new + 3) & ~3; hdr_delta -= opt->optlen; } else { /* only the cipso option was present on the socket so we can * remove the entire option struct */ *opt_ptr = NULL; hdr_delta = opt->optlen; kfree(opt); } return hdr_delta; } /** * cipso_v4_sock_delattr - Delete the CIPSO option from a socket * @sk: the socket * * Description: * Removes the CIPSO option from a socket, if present. * */ void cipso_v4_sock_delattr(struct sock *sk) { int hdr_delta; struct ip_options *opt; struct inet_sock *sk_inet; sk_inet = inet_sk(sk); opt = sk_inet->opt; if (opt == NULL || opt->cipso == 0) return; hdr_delta = cipso_v4_delopt(&sk_inet->opt); if (sk_inet->is_icsk && hdr_delta > 0) { struct inet_connection_sock *sk_conn = inet_csk(sk); sk_conn->icsk_ext_hdr_len -= hdr_delta; sk_conn->icsk_sync_mss(sk, sk_conn->icsk_pmtu_cookie); } } /** * cipso_v4_req_delattr - Delete the CIPSO option from a request socket * @reg: the request socket * * Description: * Removes the CIPSO option from a request socket, if present. * */ void cipso_v4_req_delattr(struct request_sock *req) { struct ip_options *opt; struct inet_request_sock *req_inet; req_inet = inet_rsk(req); opt = req_inet->opt; if (opt == NULL || opt->cipso == 0) return; cipso_v4_delopt(&req_inet->opt); } /** * cipso_v4_getattr - Helper function for the cipso_v4_*_getattr functions * @cipso: the CIPSO v4 option * @secattr: the security attributes * * Description: * Inspect @cipso and return the security attributes in @secattr. Returns zero * on success and negative values on failure. * */ static int cipso_v4_getattr(const unsigned char *cipso, struct netlbl_lsm_secattr *secattr) { int ret_val = -ENOMSG; u32 doi; struct cipso_v4_doi *doi_def; if (cipso_v4_cache_check(cipso, cipso[1], secattr) == 0) return 0; doi = get_unaligned_be32(&cipso[2]); rcu_read_lock(); doi_def = cipso_v4_doi_search(doi); if (doi_def == NULL) goto getattr_return; /* XXX - This code assumes only one tag per CIPSO option which isn't * really a good assumption to make but since we only support the MAC * tags right now it is a safe assumption. */ switch (cipso[6]) { case CIPSO_V4_TAG_RBITMAP: ret_val = cipso_v4_parsetag_rbm(doi_def, &cipso[6], secattr); break; case CIPSO_V4_TAG_ENUM: ret_val = cipso_v4_parsetag_enum(doi_def, &cipso[6], secattr); break; case CIPSO_V4_TAG_RANGE: ret_val = cipso_v4_parsetag_rng(doi_def, &cipso[6], secattr); break; case CIPSO_V4_TAG_LOCAL: ret_val = cipso_v4_parsetag_loc(doi_def, &cipso[6], secattr); break; } if (ret_val == 0) secattr->type = NETLBL_NLTYPE_CIPSOV4; getattr_return: rcu_read_unlock(); return ret_val; } /** * cipso_v4_sock_getattr - Get the security attributes from a sock * @sk: the sock * @secattr: the security attributes * * Description: * Query @sk to see if there is a CIPSO option attached to the sock and if * there is return the CIPSO security attributes in @secattr. This function * requires that @sk be locked, or privately held, but it does not do any * locking itself. Returns zero on success and negative values on failure. * */ int cipso_v4_sock_getattr(struct sock *sk, struct netlbl_lsm_secattr *secattr) { struct ip_options *opt; opt = inet_sk(sk)->opt; if (opt == NULL || opt->cipso == 0) return -ENOMSG; return cipso_v4_getattr(opt->__data + opt->cipso - sizeof(struct iphdr), secattr); } /** * cipso_v4_skbuff_setattr - Set the CIPSO option on a packet * @skb: the packet * @secattr: the security attributes * * Description: * Set the CIPSO option on the given packet based on the security attributes. * Returns a pointer to the IP header on success and NULL on failure. * */ int cipso_v4_skbuff_setattr(struct sk_buff *skb, const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr) { int ret_val; struct iphdr *iph; struct ip_options *opt = &IPCB(skb)->opt; unsigned char buf[CIPSO_V4_OPT_LEN_MAX]; u32 buf_len = CIPSO_V4_OPT_LEN_MAX; u32 opt_len; int len_delta; ret_val = cipso_v4_genopt(buf, buf_len, doi_def, secattr); if (ret_val < 0) return ret_val; buf_len = ret_val; opt_len = (buf_len + 3) & ~3; /* we overwrite any existing options to ensure that we have enough * room for the CIPSO option, the reason is that we _need_ to guarantee * that the security label is applied to the packet - we do the same * thing when using the socket options and it hasn't caused a problem, * if we need to we can always revisit this choice later */ len_delta = opt_len - opt->optlen; /* if we don't ensure enough headroom we could panic on the skb_push() * call below so make sure we have enough, we are also "mangling" the * packet so we should probably do a copy-on-write call anyway */ ret_val = skb_cow(skb, skb_headroom(skb) + len_delta); if (ret_val < 0) return ret_val; if (len_delta > 0) { /* we assume that the header + opt->optlen have already been * "pushed" in ip_options_build() or similar */ iph = ip_hdr(skb); skb_push(skb, len_delta); memmove((char *)iph - len_delta, iph, iph->ihl << 2); skb_reset_network_header(skb); iph = ip_hdr(skb); } else if (len_delta < 0) { iph = ip_hdr(skb); memset(iph + 1, IPOPT_NOP, opt->optlen); } else iph = ip_hdr(skb); if (opt->optlen > 0) memset(opt, 0, sizeof(*opt)); opt->optlen = opt_len; opt->cipso = sizeof(struct iphdr); opt->is_changed = 1; /* we have to do the following because we are being called from a * netfilter hook which means the packet already has had the header * fields populated and the checksum calculated - yes this means we * are doing more work than needed but we do it to keep the core * stack clean and tidy */ memcpy(iph + 1, buf, buf_len); if (opt_len > buf_len) memset((char *)(iph + 1) + buf_len, 0, opt_len - buf_len); if (len_delta != 0) { iph->ihl = 5 + (opt_len >> 2); iph->tot_len = htons(skb->len); } ip_send_check(iph); return 0; } /** * cipso_v4_skbuff_delattr - Delete any CIPSO options from a packet * @skb: the packet * * Description: * Removes any and all CIPSO options from the given packet. Returns zero on * success, negative values on failure. * */ int cipso_v4_skbuff_delattr(struct sk_buff *skb) { int ret_val; struct iphdr *iph; struct ip_options *opt = &IPCB(skb)->opt; unsigned char *cipso_ptr; if (opt->cipso == 0) return 0; /* since we are changing the packet we should make a copy */ ret_val = skb_cow(skb, skb_headroom(skb)); if (ret_val < 0) return ret_val; /* the easiest thing to do is just replace the cipso option with noop * options since we don't change the size of the packet, although we * still need to recalculate the checksum */ iph = ip_hdr(skb); cipso_ptr = (unsigned char *)iph + opt->cipso; memset(cipso_ptr, IPOPT_NOOP, cipso_ptr[1]); opt->cipso = 0; opt->is_changed = 1; ip_send_check(iph); return 0; } /** * cipso_v4_skbuff_getattr - Get the security attributes from the CIPSO option * @skb: the packet * @secattr: the security attributes * * Description: * Parse the given packet's CIPSO option and return the security attributes. * Returns zero on success and negative values on failure. * */ int cipso_v4_skbuff_getattr(const struct sk_buff *skb, struct netlbl_lsm_secattr *secattr) { return cipso_v4_getattr(CIPSO_V4_OPTPTR(skb), secattr); } /* * Setup Functions */ /** * cipso_v4_init - Initialize the CIPSO module * * Description: * Initialize the CIPSO module and prepare it for use. Returns zero on success * and negative values on failure. * */ static int __init cipso_v4_init(void) { int ret_val; ret_val = cipso_v4_cache_init(); if (ret_val != 0) panic("Failed to initialize the CIPSO/IPv4 cache (%d)\n", ret_val); return 0; } subsys_initcall(cipso_v4_init);
/* * CIPSO - Commercial IP Security Option * * This is an implementation of the CIPSO 2.2 protocol as specified in * draft-ietf-cipso-ipsecurity-01.txt with additional tag types as found in * FIPS-188. While CIPSO never became a full IETF RFC standard many vendors * have chosen to adopt the protocol and over the years it has become a * de-facto standard for labeled networking. * * The CIPSO draft specification can be found in the kernel's Documentation * directory as well as the following URL: * http://tools.ietf.org/id/draft-ietf-cipso-ipsecurity-01.txt * The FIPS-188 specification can be found at the following URL: * http://www.itl.nist.gov/fipspubs/fip188.htm * * Author: Paul Moore <paul.moore@hp.com> * */ /* * (c) Copyright Hewlett-Packard Development Company, L.P., 2006, 2008 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/init.h> #include <linux/types.h> #include <linux/rcupdate.h> #include <linux/list.h> #include <linux/spinlock.h> #include <linux/string.h> #include <linux/jhash.h> #include <linux/audit.h> #include <linux/slab.h> #include <net/ip.h> #include <net/icmp.h> #include <net/tcp.h> #include <net/netlabel.h> #include <net/cipso_ipv4.h> #include <asm/atomic.h> #include <asm/bug.h> #include <asm/unaligned.h> /* List of available DOI definitions */ /* XXX - This currently assumes a minimal number of different DOIs in use, * if in practice there are a lot of different DOIs this list should * probably be turned into a hash table or something similar so we * can do quick lookups. */ static DEFINE_SPINLOCK(cipso_v4_doi_list_lock); static LIST_HEAD(cipso_v4_doi_list); /* Label mapping cache */ int cipso_v4_cache_enabled = 1; int cipso_v4_cache_bucketsize = 10; #define CIPSO_V4_CACHE_BUCKETBITS 7 #define CIPSO_V4_CACHE_BUCKETS (1 << CIPSO_V4_CACHE_BUCKETBITS) #define CIPSO_V4_CACHE_REORDERLIMIT 10 struct cipso_v4_map_cache_bkt { spinlock_t lock; u32 size; struct list_head list; }; struct cipso_v4_map_cache_entry { u32 hash; unsigned char *key; size_t key_len; struct netlbl_lsm_cache *lsm_data; u32 activity; struct list_head list; }; static struct cipso_v4_map_cache_bkt *cipso_v4_cache = NULL; /* Restricted bitmap (tag #1) flags */ int cipso_v4_rbm_optfmt = 0; int cipso_v4_rbm_strictvalid = 1; /* * Protocol Constants */ /* Maximum size of the CIPSO IP option, derived from the fact that the maximum * IPv4 header size is 60 bytes and the base IPv4 header is 20 bytes long. */ #define CIPSO_V4_OPT_LEN_MAX 40 /* Length of the base CIPSO option, this includes the option type (1 byte), the * option length (1 byte), and the DOI (4 bytes). */ #define CIPSO_V4_HDR_LEN 6 /* Base length of the restrictive category bitmap tag (tag #1). */ #define CIPSO_V4_TAG_RBM_BLEN 4 /* Base length of the enumerated category tag (tag #2). */ #define CIPSO_V4_TAG_ENUM_BLEN 4 /* Base length of the ranged categories bitmap tag (tag #5). */ #define CIPSO_V4_TAG_RNG_BLEN 4 /* The maximum number of category ranges permitted in the ranged category tag * (tag #5). You may note that the IETF draft states that the maximum number * of category ranges is 7, but if the low end of the last category range is * zero then it is possible to fit 8 category ranges because the zero should * be omitted. */ #define CIPSO_V4_TAG_RNG_CAT_MAX 8 /* Base length of the local tag (non-standard tag). * Tag definition (may change between kernel versions) * * 0 8 16 24 32 * +----------+----------+----------+----------+ * | 10000000 | 00000110 | 32-bit secid value | * +----------+----------+----------+----------+ * | in (host byte order)| * +----------+----------+ * */ #define CIPSO_V4_TAG_LOC_BLEN 6 /* * Helper Functions */ /** * cipso_v4_bitmap_walk - Walk a bitmap looking for a bit * @bitmap: the bitmap * @bitmap_len: length in bits * @offset: starting offset * @state: if non-zero, look for a set (1) bit else look for a cleared (0) bit * * Description: * Starting at @offset, walk the bitmap from left to right until either the * desired bit is found or we reach the end. Return the bit offset, -1 if * not found, or -2 if error. */ static int cipso_v4_bitmap_walk(const unsigned char *bitmap, u32 bitmap_len, u32 offset, u8 state) { u32 bit_spot; u32 byte_offset; unsigned char bitmask; unsigned char byte; /* gcc always rounds to zero when doing integer division */ byte_offset = offset / 8; byte = bitmap[byte_offset]; bit_spot = offset; bitmask = 0x80 >> (offset % 8); while (bit_spot < bitmap_len) { if ((state && (byte & bitmask) == bitmask) || (state == 0 && (byte & bitmask) == 0)) return bit_spot; bit_spot++; bitmask >>= 1; if (bitmask == 0) { byte = bitmap[++byte_offset]; bitmask = 0x80; } } return -1; } /** * cipso_v4_bitmap_setbit - Sets a single bit in a bitmap * @bitmap: the bitmap * @bit: the bit * @state: if non-zero, set the bit (1) else clear the bit (0) * * Description: * Set a single bit in the bitmask. Returns zero on success, negative values * on error. */ static void cipso_v4_bitmap_setbit(unsigned char *bitmap, u32 bit, u8 state) { u32 byte_spot; u8 bitmask; /* gcc always rounds to zero when doing integer division */ byte_spot = bit / 8; bitmask = 0x80 >> (bit % 8); if (state) bitmap[byte_spot] |= bitmask; else bitmap[byte_spot] &= ~bitmask; } /** * cipso_v4_cache_entry_free - Frees a cache entry * @entry: the entry to free * * Description: * This function frees the memory associated with a cache entry including the * LSM cache data if there are no longer any users, i.e. reference count == 0. * */ static void cipso_v4_cache_entry_free(struct cipso_v4_map_cache_entry *entry) { if (entry->lsm_data) netlbl_secattr_cache_free(entry->lsm_data); kfree(entry->key); kfree(entry); } /** * cipso_v4_map_cache_hash - Hashing function for the CIPSO cache * @key: the hash key * @key_len: the length of the key in bytes * * Description: * The CIPSO tag hashing function. Returns a 32-bit hash value. * */ static u32 cipso_v4_map_cache_hash(const unsigned char *key, u32 key_len) { return jhash(key, key_len, 0); } /* * Label Mapping Cache Functions */ /** * cipso_v4_cache_init - Initialize the CIPSO cache * * Description: * Initializes the CIPSO label mapping cache, this function should be called * before any of the other functions defined in this file. Returns zero on * success, negative values on error. * */ static int cipso_v4_cache_init(void) { u32 iter; cipso_v4_cache = kcalloc(CIPSO_V4_CACHE_BUCKETS, sizeof(struct cipso_v4_map_cache_bkt), GFP_KERNEL); if (cipso_v4_cache == NULL) return -ENOMEM; for (iter = 0; iter < CIPSO_V4_CACHE_BUCKETS; iter++) { spin_lock_init(&cipso_v4_cache[iter].lock); cipso_v4_cache[iter].size = 0; INIT_LIST_HEAD(&cipso_v4_cache[iter].list); } return 0; } /** * cipso_v4_cache_invalidate - Invalidates the current CIPSO cache * * Description: * Invalidates and frees any entries in the CIPSO cache. Returns zero on * success and negative values on failure. * */ void cipso_v4_cache_invalidate(void) { struct cipso_v4_map_cache_entry *entry, *tmp_entry; u32 iter; for (iter = 0; iter < CIPSO_V4_CACHE_BUCKETS; iter++) { spin_lock_bh(&cipso_v4_cache[iter].lock); list_for_each_entry_safe(entry, tmp_entry, &cipso_v4_cache[iter].list, list) { list_del(&entry->list); cipso_v4_cache_entry_free(entry); } cipso_v4_cache[iter].size = 0; spin_unlock_bh(&cipso_v4_cache[iter].lock); } } /** * cipso_v4_cache_check - Check the CIPSO cache for a label mapping * @key: the buffer to check * @key_len: buffer length in bytes * @secattr: the security attribute struct to use * * Description: * This function checks the cache to see if a label mapping already exists for * the given key. If there is a match then the cache is adjusted and the * @secattr struct is populated with the correct LSM security attributes. The * cache is adjusted in the following manner if the entry is not already the * first in the cache bucket: * * 1. The cache entry's activity counter is incremented * 2. The previous (higher ranking) entry's activity counter is decremented * 3. If the difference between the two activity counters is geater than * CIPSO_V4_CACHE_REORDERLIMIT the two entries are swapped * * Returns zero on success, -ENOENT for a cache miss, and other negative values * on error. * */ static int cipso_v4_cache_check(const unsigned char *key, u32 key_len, struct netlbl_lsm_secattr *secattr) { u32 bkt; struct cipso_v4_map_cache_entry *entry; struct cipso_v4_map_cache_entry *prev_entry = NULL; u32 hash; if (!cipso_v4_cache_enabled) return -ENOENT; hash = cipso_v4_map_cache_hash(key, key_len); bkt = hash & (CIPSO_V4_CACHE_BUCKETS - 1); spin_lock_bh(&cipso_v4_cache[bkt].lock); list_for_each_entry(entry, &cipso_v4_cache[bkt].list, list) { if (entry->hash == hash && entry->key_len == key_len && memcmp(entry->key, key, key_len) == 0) { entry->activity += 1; atomic_inc(&entry->lsm_data->refcount); secattr->cache = entry->lsm_data; secattr->flags |= NETLBL_SECATTR_CACHE; secattr->type = NETLBL_NLTYPE_CIPSOV4; if (prev_entry == NULL) { spin_unlock_bh(&cipso_v4_cache[bkt].lock); return 0; } if (prev_entry->activity > 0) prev_entry->activity -= 1; if (entry->activity > prev_entry->activity && entry->activity - prev_entry->activity > CIPSO_V4_CACHE_REORDERLIMIT) { __list_del(entry->list.prev, entry->list.next); __list_add(&entry->list, prev_entry->list.prev, &prev_entry->list); } spin_unlock_bh(&cipso_v4_cache[bkt].lock); return 0; } prev_entry = entry; } spin_unlock_bh(&cipso_v4_cache[bkt].lock); return -ENOENT; } /** * cipso_v4_cache_add - Add an entry to the CIPSO cache * @skb: the packet * @secattr: the packet's security attributes * * Description: * Add a new entry into the CIPSO label mapping cache. Add the new entry to * head of the cache bucket's list, if the cache bucket is out of room remove * the last entry in the list first. It is important to note that there is * currently no checking for duplicate keys. Returns zero on success, * negative values on failure. * */ int cipso_v4_cache_add(const struct sk_buff *skb, const struct netlbl_lsm_secattr *secattr) { int ret_val = -EPERM; u32 bkt; struct cipso_v4_map_cache_entry *entry = NULL; struct cipso_v4_map_cache_entry *old_entry = NULL; unsigned char *cipso_ptr; u32 cipso_ptr_len; if (!cipso_v4_cache_enabled || cipso_v4_cache_bucketsize <= 0) return 0; cipso_ptr = CIPSO_V4_OPTPTR(skb); cipso_ptr_len = cipso_ptr[1]; entry = kzalloc(sizeof(*entry), GFP_ATOMIC); if (entry == NULL) return -ENOMEM; entry->key = kmemdup(cipso_ptr, cipso_ptr_len, GFP_ATOMIC); if (entry->key == NULL) { ret_val = -ENOMEM; goto cache_add_failure; } entry->key_len = cipso_ptr_len; entry->hash = cipso_v4_map_cache_hash(cipso_ptr, cipso_ptr_len); atomic_inc(&secattr->cache->refcount); entry->lsm_data = secattr->cache; bkt = entry->hash & (CIPSO_V4_CACHE_BUCKETS - 1); spin_lock_bh(&cipso_v4_cache[bkt].lock); if (cipso_v4_cache[bkt].size < cipso_v4_cache_bucketsize) { list_add(&entry->list, &cipso_v4_cache[bkt].list); cipso_v4_cache[bkt].size += 1; } else { old_entry = list_entry(cipso_v4_cache[bkt].list.prev, struct cipso_v4_map_cache_entry, list); list_del(&old_entry->list); list_add(&entry->list, &cipso_v4_cache[bkt].list); cipso_v4_cache_entry_free(old_entry); } spin_unlock_bh(&cipso_v4_cache[bkt].lock); return 0; cache_add_failure: if (entry) cipso_v4_cache_entry_free(entry); return ret_val; } /* * DOI List Functions */ /** * cipso_v4_doi_search - Searches for a DOI definition * @doi: the DOI to search for * * Description: * Search the DOI definition list for a DOI definition with a DOI value that * matches @doi. The caller is responsible for calling rcu_read_[un]lock(). * Returns a pointer to the DOI definition on success and NULL on failure. */ static struct cipso_v4_doi *cipso_v4_doi_search(u32 doi) { struct cipso_v4_doi *iter; list_for_each_entry_rcu(iter, &cipso_v4_doi_list, list) if (iter->doi == doi && atomic_read(&iter->refcount)) return iter; return NULL; } /** * cipso_v4_doi_add - Add a new DOI to the CIPSO protocol engine * @doi_def: the DOI structure * @audit_info: NetLabel audit information * * Description: * The caller defines a new DOI for use by the CIPSO engine and calls this * function to add it to the list of acceptable domains. The caller must * ensure that the mapping table specified in @doi_def->map meets all of the * requirements of the mapping type (see cipso_ipv4.h for details). Returns * zero on success and non-zero on failure. * */ int cipso_v4_doi_add(struct cipso_v4_doi *doi_def, struct netlbl_audit *audit_info) { int ret_val = -EINVAL; u32 iter; u32 doi; u32 doi_type; struct audit_buffer *audit_buf; doi = doi_def->doi; doi_type = doi_def->type; if (doi_def == NULL || doi_def->doi == CIPSO_V4_DOI_UNKNOWN) goto doi_add_return; for (iter = 0; iter < CIPSO_V4_TAG_MAXCNT; iter++) { switch (doi_def->tags[iter]) { case CIPSO_V4_TAG_RBITMAP: break; case CIPSO_V4_TAG_RANGE: case CIPSO_V4_TAG_ENUM: if (doi_def->type != CIPSO_V4_MAP_PASS) goto doi_add_return; break; case CIPSO_V4_TAG_LOCAL: if (doi_def->type != CIPSO_V4_MAP_LOCAL) goto doi_add_return; break; case CIPSO_V4_TAG_INVALID: if (iter == 0) goto doi_add_return; break; default: goto doi_add_return; } } atomic_set(&doi_def->refcount, 1); spin_lock(&cipso_v4_doi_list_lock); if (cipso_v4_doi_search(doi_def->doi) != NULL) { spin_unlock(&cipso_v4_doi_list_lock); ret_val = -EEXIST; goto doi_add_return; } list_add_tail_rcu(&doi_def->list, &cipso_v4_doi_list); spin_unlock(&cipso_v4_doi_list_lock); ret_val = 0; doi_add_return: audit_buf = netlbl_audit_start(AUDIT_MAC_CIPSOV4_ADD, audit_info); if (audit_buf != NULL) { const char *type_str; switch (doi_type) { case CIPSO_V4_MAP_TRANS: type_str = "trans"; break; case CIPSO_V4_MAP_PASS: type_str = "pass"; break; case CIPSO_V4_MAP_LOCAL: type_str = "local"; break; default: type_str = "(unknown)"; } audit_log_format(audit_buf, " cipso_doi=%u cipso_type=%s res=%u", doi, type_str, ret_val == 0 ? 1 : 0); audit_log_end(audit_buf); } return ret_val; } /** * cipso_v4_doi_free - Frees a DOI definition * @entry: the entry's RCU field * * Description: * This function frees all of the memory associated with a DOI definition. * */ void cipso_v4_doi_free(struct cipso_v4_doi *doi_def) { if (doi_def == NULL) return; switch (doi_def->type) { case CIPSO_V4_MAP_TRANS: kfree(doi_def->map.std->lvl.cipso); kfree(doi_def->map.std->lvl.local); kfree(doi_def->map.std->cat.cipso); kfree(doi_def->map.std->cat.local); break; } kfree(doi_def); } /** * cipso_v4_doi_free_rcu - Frees a DOI definition via the RCU pointer * @entry: the entry's RCU field * * Description: * This function is designed to be used as a callback to the call_rcu() * function so that the memory allocated to the DOI definition can be released * safely. * */ static void cipso_v4_doi_free_rcu(struct rcu_head *entry) { struct cipso_v4_doi *doi_def; doi_def = container_of(entry, struct cipso_v4_doi, rcu); cipso_v4_doi_free(doi_def); } /** * cipso_v4_doi_remove - Remove an existing DOI from the CIPSO protocol engine * @doi: the DOI value * @audit_secid: the LSM secid to use in the audit message * * Description: * Removes a DOI definition from the CIPSO engine. The NetLabel routines will * be called to release their own LSM domain mappings as well as our own * domain list. Returns zero on success and negative values on failure. * */ int cipso_v4_doi_remove(u32 doi, struct netlbl_audit *audit_info) { int ret_val; struct cipso_v4_doi *doi_def; struct audit_buffer *audit_buf; spin_lock(&cipso_v4_doi_list_lock); doi_def = cipso_v4_doi_search(doi); if (doi_def == NULL) { spin_unlock(&cipso_v4_doi_list_lock); ret_val = -ENOENT; goto doi_remove_return; } if (!atomic_dec_and_test(&doi_def->refcount)) { spin_unlock(&cipso_v4_doi_list_lock); ret_val = -EBUSY; goto doi_remove_return; } list_del_rcu(&doi_def->list); spin_unlock(&cipso_v4_doi_list_lock); cipso_v4_cache_invalidate(); call_rcu(&doi_def->rcu, cipso_v4_doi_free_rcu); ret_val = 0; doi_remove_return: audit_buf = netlbl_audit_start(AUDIT_MAC_CIPSOV4_DEL, audit_info); if (audit_buf != NULL) { audit_log_format(audit_buf, " cipso_doi=%u res=%u", doi, ret_val == 0 ? 1 : 0); audit_log_end(audit_buf); } return ret_val; } /** * cipso_v4_doi_getdef - Returns a reference to a valid DOI definition * @doi: the DOI value * * Description: * Searches for a valid DOI definition and if one is found it is returned to * the caller. Otherwise NULL is returned. The caller must ensure that * rcu_read_lock() is held while accessing the returned definition and the DOI * definition reference count is decremented when the caller is done. * */ struct cipso_v4_doi *cipso_v4_doi_getdef(u32 doi) { struct cipso_v4_doi *doi_def; rcu_read_lock(); doi_def = cipso_v4_doi_search(doi); if (doi_def == NULL) goto doi_getdef_return; if (!atomic_inc_not_zero(&doi_def->refcount)) doi_def = NULL; doi_getdef_return: rcu_read_unlock(); return doi_def; } /** * cipso_v4_doi_putdef - Releases a reference for the given DOI definition * @doi_def: the DOI definition * * Description: * Releases a DOI definition reference obtained from cipso_v4_doi_getdef(). * */ void cipso_v4_doi_putdef(struct cipso_v4_doi *doi_def) { if (doi_def == NULL) return; if (!atomic_dec_and_test(&doi_def->refcount)) return; spin_lock(&cipso_v4_doi_list_lock); list_del_rcu(&doi_def->list); spin_unlock(&cipso_v4_doi_list_lock); cipso_v4_cache_invalidate(); call_rcu(&doi_def->rcu, cipso_v4_doi_free_rcu); } /** * cipso_v4_doi_walk - Iterate through the DOI definitions * @skip_cnt: skip past this number of DOI definitions, updated * @callback: callback for each DOI definition * @cb_arg: argument for the callback function * * Description: * Iterate over the DOI definition list, skipping the first @skip_cnt entries. * For each entry call @callback, if @callback returns a negative value stop * 'walking' through the list and return. Updates the value in @skip_cnt upon * return. Returns zero on success, negative values on failure. * */ int cipso_v4_doi_walk(u32 *skip_cnt, int (*callback) (struct cipso_v4_doi *doi_def, void *arg), void *cb_arg) { int ret_val = -ENOENT; u32 doi_cnt = 0; struct cipso_v4_doi *iter_doi; rcu_read_lock(); list_for_each_entry_rcu(iter_doi, &cipso_v4_doi_list, list) if (atomic_read(&iter_doi->refcount) > 0) { if (doi_cnt++ < *skip_cnt) continue; ret_val = callback(iter_doi, cb_arg); if (ret_val < 0) { doi_cnt--; goto doi_walk_return; } } doi_walk_return: rcu_read_unlock(); *skip_cnt = doi_cnt; return ret_val; } /* * Label Mapping Functions */ /** * cipso_v4_map_lvl_valid - Checks to see if the given level is understood * @doi_def: the DOI definition * @level: the level to check * * Description: * Checks the given level against the given DOI definition and returns a * negative value if the level does not have a valid mapping and a zero value * if the level is defined by the DOI. * */ static int cipso_v4_map_lvl_valid(const struct cipso_v4_doi *doi_def, u8 level) { switch (doi_def->type) { case CIPSO_V4_MAP_PASS: return 0; case CIPSO_V4_MAP_TRANS: if (doi_def->map.std->lvl.cipso[level] < CIPSO_V4_INV_LVL) return 0; break; } return -EFAULT; } /** * cipso_v4_map_lvl_hton - Perform a level mapping from the host to the network * @doi_def: the DOI definition * @host_lvl: the host MLS level * @net_lvl: the network/CIPSO MLS level * * Description: * Perform a label mapping to translate a local MLS level to the correct * CIPSO level using the given DOI definition. Returns zero on success, * negative values otherwise. * */ static int cipso_v4_map_lvl_hton(const struct cipso_v4_doi *doi_def, u32 host_lvl, u32 *net_lvl) { switch (doi_def->type) { case CIPSO_V4_MAP_PASS: *net_lvl = host_lvl; return 0; case CIPSO_V4_MAP_TRANS: if (host_lvl < doi_def->map.std->lvl.local_size && doi_def->map.std->lvl.local[host_lvl] < CIPSO_V4_INV_LVL) { *net_lvl = doi_def->map.std->lvl.local[host_lvl]; return 0; } return -EPERM; } return -EINVAL; } /** * cipso_v4_map_lvl_ntoh - Perform a level mapping from the network to the host * @doi_def: the DOI definition * @net_lvl: the network/CIPSO MLS level * @host_lvl: the host MLS level * * Description: * Perform a label mapping to translate a CIPSO level to the correct local MLS * level using the given DOI definition. Returns zero on success, negative * values otherwise. * */ static int cipso_v4_map_lvl_ntoh(const struct cipso_v4_doi *doi_def, u32 net_lvl, u32 *host_lvl) { struct cipso_v4_std_map_tbl *map_tbl; switch (doi_def->type) { case CIPSO_V4_MAP_PASS: *host_lvl = net_lvl; return 0; case CIPSO_V4_MAP_TRANS: map_tbl = doi_def->map.std; if (net_lvl < map_tbl->lvl.cipso_size && map_tbl->lvl.cipso[net_lvl] < CIPSO_V4_INV_LVL) { *host_lvl = doi_def->map.std->lvl.cipso[net_lvl]; return 0; } return -EPERM; } return -EINVAL; } /** * cipso_v4_map_cat_rbm_valid - Checks to see if the category bitmap is valid * @doi_def: the DOI definition * @bitmap: category bitmap * @bitmap_len: bitmap length in bytes * * Description: * Checks the given category bitmap against the given DOI definition and * returns a negative value if any of the categories in the bitmap do not have * a valid mapping and a zero value if all of the categories are valid. * */ static int cipso_v4_map_cat_rbm_valid(const struct cipso_v4_doi *doi_def, const unsigned char *bitmap, u32 bitmap_len) { int cat = -1; u32 bitmap_len_bits = bitmap_len * 8; u32 cipso_cat_size; u32 *cipso_array; switch (doi_def->type) { case CIPSO_V4_MAP_PASS: return 0; case CIPSO_V4_MAP_TRANS: cipso_cat_size = doi_def->map.std->cat.cipso_size; cipso_array = doi_def->map.std->cat.cipso; for (;;) { cat = cipso_v4_bitmap_walk(bitmap, bitmap_len_bits, cat + 1, 1); if (cat < 0) break; if (cat >= cipso_cat_size || cipso_array[cat] >= CIPSO_V4_INV_CAT) return -EFAULT; } if (cat == -1) return 0; break; } return -EFAULT; } /** * cipso_v4_map_cat_rbm_hton - Perform a category mapping from host to network * @doi_def: the DOI definition * @secattr: the security attributes * @net_cat: the zero'd out category bitmap in network/CIPSO format * @net_cat_len: the length of the CIPSO bitmap in bytes * * Description: * Perform a label mapping to translate a local MLS category bitmap to the * correct CIPSO bitmap using the given DOI definition. Returns the minimum * size in bytes of the network bitmap on success, negative values otherwise. * */ static int cipso_v4_map_cat_rbm_hton(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *net_cat, u32 net_cat_len) { int host_spot = -1; u32 net_spot = CIPSO_V4_INV_CAT; u32 net_spot_max = 0; u32 net_clen_bits = net_cat_len * 8; u32 host_cat_size = 0; u32 *host_cat_array = NULL; if (doi_def->type == CIPSO_V4_MAP_TRANS) { host_cat_size = doi_def->map.std->cat.local_size; host_cat_array = doi_def->map.std->cat.local; } for (;;) { host_spot = netlbl_secattr_catmap_walk(secattr->attr.mls.cat, host_spot + 1); if (host_spot < 0) break; switch (doi_def->type) { case CIPSO_V4_MAP_PASS: net_spot = host_spot; break; case CIPSO_V4_MAP_TRANS: if (host_spot >= host_cat_size) return -EPERM; net_spot = host_cat_array[host_spot]; if (net_spot >= CIPSO_V4_INV_CAT) return -EPERM; break; } if (net_spot >= net_clen_bits) return -ENOSPC; cipso_v4_bitmap_setbit(net_cat, net_spot, 1); if (net_spot > net_spot_max) net_spot_max = net_spot; } if (++net_spot_max % 8) return net_spot_max / 8 + 1; return net_spot_max / 8; } /** * cipso_v4_map_cat_rbm_ntoh - Perform a category mapping from network to host * @doi_def: the DOI definition * @net_cat: the category bitmap in network/CIPSO format * @net_cat_len: the length of the CIPSO bitmap in bytes * @secattr: the security attributes * * Description: * Perform a label mapping to translate a CIPSO bitmap to the correct local * MLS category bitmap using the given DOI definition. Returns zero on * success, negative values on failure. * */ static int cipso_v4_map_cat_rbm_ntoh(const struct cipso_v4_doi *doi_def, const unsigned char *net_cat, u32 net_cat_len, struct netlbl_lsm_secattr *secattr) { int ret_val; int net_spot = -1; u32 host_spot = CIPSO_V4_INV_CAT; u32 net_clen_bits = net_cat_len * 8; u32 net_cat_size = 0; u32 *net_cat_array = NULL; if (doi_def->type == CIPSO_V4_MAP_TRANS) { net_cat_size = doi_def->map.std->cat.cipso_size; net_cat_array = doi_def->map.std->cat.cipso; } for (;;) { net_spot = cipso_v4_bitmap_walk(net_cat, net_clen_bits, net_spot + 1, 1); if (net_spot < 0) { if (net_spot == -2) return -EFAULT; return 0; } switch (doi_def->type) { case CIPSO_V4_MAP_PASS: host_spot = net_spot; break; case CIPSO_V4_MAP_TRANS: if (net_spot >= net_cat_size) return -EPERM; host_spot = net_cat_array[net_spot]; if (host_spot >= CIPSO_V4_INV_CAT) return -EPERM; break; } ret_val = netlbl_secattr_catmap_setbit(secattr->attr.mls.cat, host_spot, GFP_ATOMIC); if (ret_val != 0) return ret_val; } return -EINVAL; } /** * cipso_v4_map_cat_enum_valid - Checks to see if the categories are valid * @doi_def: the DOI definition * @enumcat: category list * @enumcat_len: length of the category list in bytes * * Description: * Checks the given categories against the given DOI definition and returns a * negative value if any of the categories do not have a valid mapping and a * zero value if all of the categories are valid. * */ static int cipso_v4_map_cat_enum_valid(const struct cipso_v4_doi *doi_def, const unsigned char *enumcat, u32 enumcat_len) { u16 cat; int cat_prev = -1; u32 iter; if (doi_def->type != CIPSO_V4_MAP_PASS || enumcat_len & 0x01) return -EFAULT; for (iter = 0; iter < enumcat_len; iter += 2) { cat = get_unaligned_be16(&enumcat[iter]); if (cat <= cat_prev) return -EFAULT; cat_prev = cat; } return 0; } /** * cipso_v4_map_cat_enum_hton - Perform a category mapping from host to network * @doi_def: the DOI definition * @secattr: the security attributes * @net_cat: the zero'd out category list in network/CIPSO format * @net_cat_len: the length of the CIPSO category list in bytes * * Description: * Perform a label mapping to translate a local MLS category bitmap to the * correct CIPSO category list using the given DOI definition. Returns the * size in bytes of the network category bitmap on success, negative values * otherwise. * */ static int cipso_v4_map_cat_enum_hton(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *net_cat, u32 net_cat_len) { int cat = -1; u32 cat_iter = 0; for (;;) { cat = netlbl_secattr_catmap_walk(secattr->attr.mls.cat, cat + 1); if (cat < 0) break; if ((cat_iter + 2) > net_cat_len) return -ENOSPC; *((__be16 *)&net_cat[cat_iter]) = htons(cat); cat_iter += 2; } return cat_iter; } /** * cipso_v4_map_cat_enum_ntoh - Perform a category mapping from network to host * @doi_def: the DOI definition * @net_cat: the category list in network/CIPSO format * @net_cat_len: the length of the CIPSO bitmap in bytes * @secattr: the security attributes * * Description: * Perform a label mapping to translate a CIPSO category list to the correct * local MLS category bitmap using the given DOI definition. Returns zero on * success, negative values on failure. * */ static int cipso_v4_map_cat_enum_ntoh(const struct cipso_v4_doi *doi_def, const unsigned char *net_cat, u32 net_cat_len, struct netlbl_lsm_secattr *secattr) { int ret_val; u32 iter; for (iter = 0; iter < net_cat_len; iter += 2) { ret_val = netlbl_secattr_catmap_setbit(secattr->attr.mls.cat, get_unaligned_be16(&net_cat[iter]), GFP_ATOMIC); if (ret_val != 0) return ret_val; } return 0; } /** * cipso_v4_map_cat_rng_valid - Checks to see if the categories are valid * @doi_def: the DOI definition * @rngcat: category list * @rngcat_len: length of the category list in bytes * * Description: * Checks the given categories against the given DOI definition and returns a * negative value if any of the categories do not have a valid mapping and a * zero value if all of the categories are valid. * */ static int cipso_v4_map_cat_rng_valid(const struct cipso_v4_doi *doi_def, const unsigned char *rngcat, u32 rngcat_len) { u16 cat_high; u16 cat_low; u32 cat_prev = CIPSO_V4_MAX_REM_CATS + 1; u32 iter; if (doi_def->type != CIPSO_V4_MAP_PASS || rngcat_len & 0x01) return -EFAULT; for (iter = 0; iter < rngcat_len; iter += 4) { cat_high = get_unaligned_be16(&rngcat[iter]); if ((iter + 4) <= rngcat_len) cat_low = get_unaligned_be16(&rngcat[iter + 2]); else cat_low = 0; if (cat_high > cat_prev) return -EFAULT; cat_prev = cat_low; } return 0; } /** * cipso_v4_map_cat_rng_hton - Perform a category mapping from host to network * @doi_def: the DOI definition * @secattr: the security attributes * @net_cat: the zero'd out category list in network/CIPSO format * @net_cat_len: the length of the CIPSO category list in bytes * * Description: * Perform a label mapping to translate a local MLS category bitmap to the * correct CIPSO category list using the given DOI definition. Returns the * size in bytes of the network category bitmap on success, negative values * otherwise. * */ static int cipso_v4_map_cat_rng_hton(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *net_cat, u32 net_cat_len) { int iter = -1; u16 array[CIPSO_V4_TAG_RNG_CAT_MAX * 2]; u32 array_cnt = 0; u32 cat_size = 0; /* make sure we don't overflow the 'array[]' variable */ if (net_cat_len > (CIPSO_V4_OPT_LEN_MAX - CIPSO_V4_HDR_LEN - CIPSO_V4_TAG_RNG_BLEN)) return -ENOSPC; for (;;) { iter = netlbl_secattr_catmap_walk(secattr->attr.mls.cat, iter + 1); if (iter < 0) break; cat_size += (iter == 0 ? 0 : sizeof(u16)); if (cat_size > net_cat_len) return -ENOSPC; array[array_cnt++] = iter; iter = netlbl_secattr_catmap_walk_rng(secattr->attr.mls.cat, iter); if (iter < 0) return -EFAULT; cat_size += sizeof(u16); if (cat_size > net_cat_len) return -ENOSPC; array[array_cnt++] = iter; } for (iter = 0; array_cnt > 0;) { *((__be16 *)&net_cat[iter]) = htons(array[--array_cnt]); iter += 2; array_cnt--; if (array[array_cnt] != 0) { *((__be16 *)&net_cat[iter]) = htons(array[array_cnt]); iter += 2; } } return cat_size; } /** * cipso_v4_map_cat_rng_ntoh - Perform a category mapping from network to host * @doi_def: the DOI definition * @net_cat: the category list in network/CIPSO format * @net_cat_len: the length of the CIPSO bitmap in bytes * @secattr: the security attributes * * Description: * Perform a label mapping to translate a CIPSO category list to the correct * local MLS category bitmap using the given DOI definition. Returns zero on * success, negative values on failure. * */ static int cipso_v4_map_cat_rng_ntoh(const struct cipso_v4_doi *doi_def, const unsigned char *net_cat, u32 net_cat_len, struct netlbl_lsm_secattr *secattr) { int ret_val; u32 net_iter; u16 cat_low; u16 cat_high; for (net_iter = 0; net_iter < net_cat_len; net_iter += 4) { cat_high = get_unaligned_be16(&net_cat[net_iter]); if ((net_iter + 4) <= net_cat_len) cat_low = get_unaligned_be16(&net_cat[net_iter + 2]); else cat_low = 0; ret_val = netlbl_secattr_catmap_setrng(secattr->attr.mls.cat, cat_low, cat_high, GFP_ATOMIC); if (ret_val != 0) return ret_val; } return 0; } /* * Protocol Handling Functions */ /** * cipso_v4_gentag_hdr - Generate a CIPSO option header * @doi_def: the DOI definition * @len: the total tag length in bytes, not including this header * @buf: the CIPSO option buffer * * Description: * Write a CIPSO header into the beginning of @buffer. * */ static void cipso_v4_gentag_hdr(const struct cipso_v4_doi *doi_def, unsigned char *buf, u32 len) { buf[0] = IPOPT_CIPSO; buf[1] = CIPSO_V4_HDR_LEN + len; *(__be32 *)&buf[2] = htonl(doi_def->doi); } /** * cipso_v4_gentag_rbm - Generate a CIPSO restricted bitmap tag (type #1) * @doi_def: the DOI definition * @secattr: the security attributes * @buffer: the option buffer * @buffer_len: length of buffer in bytes * * Description: * Generate a CIPSO option using the restricted bitmap tag, tag type #1. The * actual buffer length may be larger than the indicated size due to * translation between host and network category bitmaps. Returns the size of * the tag on success, negative values on failure. * */ static int cipso_v4_gentag_rbm(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *buffer, u32 buffer_len) { int ret_val; u32 tag_len; u32 level; if ((secattr->flags & NETLBL_SECATTR_MLS_LVL) == 0) return -EPERM; ret_val = cipso_v4_map_lvl_hton(doi_def, secattr->attr.mls.lvl, &level); if (ret_val != 0) return ret_val; if (secattr->flags & NETLBL_SECATTR_MLS_CAT) { ret_val = cipso_v4_map_cat_rbm_hton(doi_def, secattr, &buffer[4], buffer_len - 4); if (ret_val < 0) return ret_val; /* This will send packets using the "optimized" format when * possible as specified in section 3.4.2.6 of the * CIPSO draft. */ if (cipso_v4_rbm_optfmt && ret_val > 0 && ret_val <= 10) tag_len = 14; else tag_len = 4 + ret_val; } else tag_len = 4; buffer[0] = CIPSO_V4_TAG_RBITMAP; buffer[1] = tag_len; buffer[3] = level; return tag_len; } /** * cipso_v4_parsetag_rbm - Parse a CIPSO restricted bitmap tag * @doi_def: the DOI definition * @tag: the CIPSO tag * @secattr: the security attributes * * Description: * Parse a CIPSO restricted bitmap tag (tag type #1) and return the security * attributes in @secattr. Return zero on success, negatives values on * failure. * */ static int cipso_v4_parsetag_rbm(const struct cipso_v4_doi *doi_def, const unsigned char *tag, struct netlbl_lsm_secattr *secattr) { int ret_val; u8 tag_len = tag[1]; u32 level; ret_val = cipso_v4_map_lvl_ntoh(doi_def, tag[3], &level); if (ret_val != 0) return ret_val; secattr->attr.mls.lvl = level; secattr->flags |= NETLBL_SECATTR_MLS_LVL; if (tag_len > 4) { secattr->attr.mls.cat = netlbl_secattr_catmap_alloc(GFP_ATOMIC); if (secattr->attr.mls.cat == NULL) return -ENOMEM; ret_val = cipso_v4_map_cat_rbm_ntoh(doi_def, &tag[4], tag_len - 4, secattr); if (ret_val != 0) { netlbl_secattr_catmap_free(secattr->attr.mls.cat); return ret_val; } secattr->flags |= NETLBL_SECATTR_MLS_CAT; } return 0; } /** * cipso_v4_gentag_enum - Generate a CIPSO enumerated tag (type #2) * @doi_def: the DOI definition * @secattr: the security attributes * @buffer: the option buffer * @buffer_len: length of buffer in bytes * * Description: * Generate a CIPSO option using the enumerated tag, tag type #2. Returns the * size of the tag on success, negative values on failure. * */ static int cipso_v4_gentag_enum(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *buffer, u32 buffer_len) { int ret_val; u32 tag_len; u32 level; if (!(secattr->flags & NETLBL_SECATTR_MLS_LVL)) return -EPERM; ret_val = cipso_v4_map_lvl_hton(doi_def, secattr->attr.mls.lvl, &level); if (ret_val != 0) return ret_val; if (secattr->flags & NETLBL_SECATTR_MLS_CAT) { ret_val = cipso_v4_map_cat_enum_hton(doi_def, secattr, &buffer[4], buffer_len - 4); if (ret_val < 0) return ret_val; tag_len = 4 + ret_val; } else tag_len = 4; buffer[0] = CIPSO_V4_TAG_ENUM; buffer[1] = tag_len; buffer[3] = level; return tag_len; } /** * cipso_v4_parsetag_enum - Parse a CIPSO enumerated tag * @doi_def: the DOI definition * @tag: the CIPSO tag * @secattr: the security attributes * * Description: * Parse a CIPSO enumerated tag (tag type #2) and return the security * attributes in @secattr. Return zero on success, negatives values on * failure. * */ static int cipso_v4_parsetag_enum(const struct cipso_v4_doi *doi_def, const unsigned char *tag, struct netlbl_lsm_secattr *secattr) { int ret_val; u8 tag_len = tag[1]; u32 level; ret_val = cipso_v4_map_lvl_ntoh(doi_def, tag[3], &level); if (ret_val != 0) return ret_val; secattr->attr.mls.lvl = level; secattr->flags |= NETLBL_SECATTR_MLS_LVL; if (tag_len > 4) { secattr->attr.mls.cat = netlbl_secattr_catmap_alloc(GFP_ATOMIC); if (secattr->attr.mls.cat == NULL) return -ENOMEM; ret_val = cipso_v4_map_cat_enum_ntoh(doi_def, &tag[4], tag_len - 4, secattr); if (ret_val != 0) { netlbl_secattr_catmap_free(secattr->attr.mls.cat); return ret_val; } secattr->flags |= NETLBL_SECATTR_MLS_CAT; } return 0; } /** * cipso_v4_gentag_rng - Generate a CIPSO ranged tag (type #5) * @doi_def: the DOI definition * @secattr: the security attributes * @buffer: the option buffer * @buffer_len: length of buffer in bytes * * Description: * Generate a CIPSO option using the ranged tag, tag type #5. Returns the * size of the tag on success, negative values on failure. * */ static int cipso_v4_gentag_rng(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *buffer, u32 buffer_len) { int ret_val; u32 tag_len; u32 level; if (!(secattr->flags & NETLBL_SECATTR_MLS_LVL)) return -EPERM; ret_val = cipso_v4_map_lvl_hton(doi_def, secattr->attr.mls.lvl, &level); if (ret_val != 0) return ret_val; if (secattr->flags & NETLBL_SECATTR_MLS_CAT) { ret_val = cipso_v4_map_cat_rng_hton(doi_def, secattr, &buffer[4], buffer_len - 4); if (ret_val < 0) return ret_val; tag_len = 4 + ret_val; } else tag_len = 4; buffer[0] = CIPSO_V4_TAG_RANGE; buffer[1] = tag_len; buffer[3] = level; return tag_len; } /** * cipso_v4_parsetag_rng - Parse a CIPSO ranged tag * @doi_def: the DOI definition * @tag: the CIPSO tag * @secattr: the security attributes * * Description: * Parse a CIPSO ranged tag (tag type #5) and return the security attributes * in @secattr. Return zero on success, negatives values on failure. * */ static int cipso_v4_parsetag_rng(const struct cipso_v4_doi *doi_def, const unsigned char *tag, struct netlbl_lsm_secattr *secattr) { int ret_val; u8 tag_len = tag[1]; u32 level; ret_val = cipso_v4_map_lvl_ntoh(doi_def, tag[3], &level); if (ret_val != 0) return ret_val; secattr->attr.mls.lvl = level; secattr->flags |= NETLBL_SECATTR_MLS_LVL; if (tag_len > 4) { secattr->attr.mls.cat = netlbl_secattr_catmap_alloc(GFP_ATOMIC); if (secattr->attr.mls.cat == NULL) return -ENOMEM; ret_val = cipso_v4_map_cat_rng_ntoh(doi_def, &tag[4], tag_len - 4, secattr); if (ret_val != 0) { netlbl_secattr_catmap_free(secattr->attr.mls.cat); return ret_val; } secattr->flags |= NETLBL_SECATTR_MLS_CAT; } return 0; } /** * cipso_v4_gentag_loc - Generate a CIPSO local tag (non-standard) * @doi_def: the DOI definition * @secattr: the security attributes * @buffer: the option buffer * @buffer_len: length of buffer in bytes * * Description: * Generate a CIPSO option using the local tag. Returns the size of the tag * on success, negative values on failure. * */ static int cipso_v4_gentag_loc(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *buffer, u32 buffer_len) { if (!(secattr->flags & NETLBL_SECATTR_SECID)) return -EPERM; buffer[0] = CIPSO_V4_TAG_LOCAL; buffer[1] = CIPSO_V4_TAG_LOC_BLEN; *(u32 *)&buffer[2] = secattr->attr.secid; return CIPSO_V4_TAG_LOC_BLEN; } /** * cipso_v4_parsetag_loc - Parse a CIPSO local tag * @doi_def: the DOI definition * @tag: the CIPSO tag * @secattr: the security attributes * * Description: * Parse a CIPSO local tag and return the security attributes in @secattr. * Return zero on success, negatives values on failure. * */ static int cipso_v4_parsetag_loc(const struct cipso_v4_doi *doi_def, const unsigned char *tag, struct netlbl_lsm_secattr *secattr) { secattr->attr.secid = *(u32 *)&tag[2]; secattr->flags |= NETLBL_SECATTR_SECID; return 0; } /** * cipso_v4_validate - Validate a CIPSO option * @option: the start of the option, on error it is set to point to the error * * Description: * This routine is called to validate a CIPSO option, it checks all of the * fields to ensure that they are at least valid, see the draft snippet below * for details. If the option is valid then a zero value is returned and * the value of @option is unchanged. If the option is invalid then a * non-zero value is returned and @option is adjusted to point to the * offending portion of the option. From the IETF draft ... * * "If any field within the CIPSO options, such as the DOI identifier, is not * recognized the IP datagram is discarded and an ICMP 'parameter problem' * (type 12) is generated and returned. The ICMP code field is set to 'bad * parameter' (code 0) and the pointer is set to the start of the CIPSO field * that is unrecognized." * */ int cipso_v4_validate(const struct sk_buff *skb, unsigned char **option) { unsigned char *opt = *option; unsigned char *tag; unsigned char opt_iter; unsigned char err_offset = 0; u8 opt_len; u8 tag_len; struct cipso_v4_doi *doi_def = NULL; u32 tag_iter; /* caller already checks for length values that are too large */ opt_len = opt[1]; if (opt_len < 8) { err_offset = 1; goto validate_return; } rcu_read_lock(); doi_def = cipso_v4_doi_search(get_unaligned_be32(&opt[2])); if (doi_def == NULL) { err_offset = 2; goto validate_return_locked; } opt_iter = CIPSO_V4_HDR_LEN; tag = opt + opt_iter; while (opt_iter < opt_len) { for (tag_iter = 0; doi_def->tags[tag_iter] != tag[0];) if (doi_def->tags[tag_iter] == CIPSO_V4_TAG_INVALID || ++tag_iter == CIPSO_V4_TAG_MAXCNT) { err_offset = opt_iter; goto validate_return_locked; } tag_len = tag[1]; if (tag_len > (opt_len - opt_iter)) { err_offset = opt_iter + 1; goto validate_return_locked; } switch (tag[0]) { case CIPSO_V4_TAG_RBITMAP: if (tag_len < CIPSO_V4_TAG_RBM_BLEN) { err_offset = opt_iter + 1; goto validate_return_locked; } /* We are already going to do all the verification * necessary at the socket layer so from our point of * view it is safe to turn these checks off (and less * work), however, the CIPSO draft says we should do * all the CIPSO validations here but it doesn't * really specify _exactly_ what we need to validate * ... so, just make it a sysctl tunable. */ if (cipso_v4_rbm_strictvalid) { if (cipso_v4_map_lvl_valid(doi_def, tag[3]) < 0) { err_offset = opt_iter + 3; goto validate_return_locked; } if (tag_len > CIPSO_V4_TAG_RBM_BLEN && cipso_v4_map_cat_rbm_valid(doi_def, &tag[4], tag_len - 4) < 0) { err_offset = opt_iter + 4; goto validate_return_locked; } } break; case CIPSO_V4_TAG_ENUM: if (tag_len < CIPSO_V4_TAG_ENUM_BLEN) { err_offset = opt_iter + 1; goto validate_return_locked; } if (cipso_v4_map_lvl_valid(doi_def, tag[3]) < 0) { err_offset = opt_iter + 3; goto validate_return_locked; } if (tag_len > CIPSO_V4_TAG_ENUM_BLEN && cipso_v4_map_cat_enum_valid(doi_def, &tag[4], tag_len - 4) < 0) { err_offset = opt_iter + 4; goto validate_return_locked; } break; case CIPSO_V4_TAG_RANGE: if (tag_len < CIPSO_V4_TAG_RNG_BLEN) { err_offset = opt_iter + 1; goto validate_return_locked; } if (cipso_v4_map_lvl_valid(doi_def, tag[3]) < 0) { err_offset = opt_iter + 3; goto validate_return_locked; } if (tag_len > CIPSO_V4_TAG_RNG_BLEN && cipso_v4_map_cat_rng_valid(doi_def, &tag[4], tag_len - 4) < 0) { err_offset = opt_iter + 4; goto validate_return_locked; } break; case CIPSO_V4_TAG_LOCAL: /* This is a non-standard tag that we only allow for * local connections, so if the incoming interface is * not the loopback device drop the packet. */ if (!(skb->dev->flags & IFF_LOOPBACK)) { err_offset = opt_iter; goto validate_return_locked; } if (tag_len != CIPSO_V4_TAG_LOC_BLEN) { err_offset = opt_iter + 1; goto validate_return_locked; } break; default: err_offset = opt_iter; goto validate_return_locked; } tag += tag_len; opt_iter += tag_len; } validate_return_locked: rcu_read_unlock(); validate_return: *option = opt + err_offset; return err_offset; } /** * cipso_v4_error - Send the correct response for a bad packet * @skb: the packet * @error: the error code * @gateway: CIPSO gateway flag * * Description: * Based on the error code given in @error, send an ICMP error message back to * the originating host. From the IETF draft ... * * "If the contents of the CIPSO [option] are valid but the security label is * outside of the configured host or port label range, the datagram is * discarded and an ICMP 'destination unreachable' (type 3) is generated and * returned. The code field of the ICMP is set to 'communication with * destination network administratively prohibited' (code 9) or to * 'communication with destination host administratively prohibited' * (code 10). The value of the code is dependent on whether the originator * of the ICMP message is acting as a CIPSO host or a CIPSO gateway. The * recipient of the ICMP message MUST be able to handle either value. The * same procedure is performed if a CIPSO [option] can not be added to an * IP packet because it is too large to fit in the IP options area." * * "If the error is triggered by receipt of an ICMP message, the message is * discarded and no response is permitted (consistent with general ICMP * processing rules)." * */ void cipso_v4_error(struct sk_buff *skb, int error, u32 gateway) { if (ip_hdr(skb)->protocol == IPPROTO_ICMP || error != -EACCES) return; if (gateway) icmp_send(skb, ICMP_DEST_UNREACH, ICMP_NET_ANO, 0); else icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_ANO, 0); } /** * cipso_v4_genopt - Generate a CIPSO option * @buf: the option buffer * @buf_len: the size of opt_buf * @doi_def: the CIPSO DOI to use * @secattr: the security attributes * * Description: * Generate a CIPSO option using the DOI definition and security attributes * passed to the function. Returns the length of the option on success and * negative values on failure. * */ static int cipso_v4_genopt(unsigned char *buf, u32 buf_len, const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr) { int ret_val; u32 iter; if (buf_len <= CIPSO_V4_HDR_LEN) return -ENOSPC; /* XXX - This code assumes only one tag per CIPSO option which isn't * really a good assumption to make but since we only support the MAC * tags right now it is a safe assumption. */ iter = 0; do { memset(buf, 0, buf_len); switch (doi_def->tags[iter]) { case CIPSO_V4_TAG_RBITMAP: ret_val = cipso_v4_gentag_rbm(doi_def, secattr, &buf[CIPSO_V4_HDR_LEN], buf_len - CIPSO_V4_HDR_LEN); break; case CIPSO_V4_TAG_ENUM: ret_val = cipso_v4_gentag_enum(doi_def, secattr, &buf[CIPSO_V4_HDR_LEN], buf_len - CIPSO_V4_HDR_LEN); break; case CIPSO_V4_TAG_RANGE: ret_val = cipso_v4_gentag_rng(doi_def, secattr, &buf[CIPSO_V4_HDR_LEN], buf_len - CIPSO_V4_HDR_LEN); break; case CIPSO_V4_TAG_LOCAL: ret_val = cipso_v4_gentag_loc(doi_def, secattr, &buf[CIPSO_V4_HDR_LEN], buf_len - CIPSO_V4_HDR_LEN); break; default: return -EPERM; } iter++; } while (ret_val < 0 && iter < CIPSO_V4_TAG_MAXCNT && doi_def->tags[iter] != CIPSO_V4_TAG_INVALID); if (ret_val < 0) return ret_val; cipso_v4_gentag_hdr(doi_def, buf, ret_val); return CIPSO_V4_HDR_LEN + ret_val; } static void opt_kfree_rcu(struct rcu_head *head) { kfree(container_of(head, struct ip_options_rcu, rcu)); } /** * cipso_v4_sock_setattr - Add a CIPSO option to a socket * @sk: the socket * @doi_def: the CIPSO DOI to use * @secattr: the specific security attributes of the socket * * Description: * Set the CIPSO option on the given socket using the DOI definition and * security attributes passed to the function. This function requires * exclusive access to @sk, which means it either needs to be in the * process of being created or locked. Returns zero on success and negative * values on failure. * */ int cipso_v4_sock_setattr(struct sock *sk, const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr) { int ret_val = -EPERM; unsigned char *buf = NULL; u32 buf_len; u32 opt_len; struct ip_options_rcu *old, *opt = NULL; struct inet_sock *sk_inet; struct inet_connection_sock *sk_conn; /* In the case of sock_create_lite(), the sock->sk field is not * defined yet but it is not a problem as the only users of these * "lite" PF_INET sockets are functions which do an accept() call * afterwards so we will label the socket as part of the accept(). */ if (sk == NULL) return 0; /* We allocate the maximum CIPSO option size here so we are probably * being a little wasteful, but it makes our life _much_ easier later * on and after all we are only talking about 40 bytes. */ buf_len = CIPSO_V4_OPT_LEN_MAX; buf = kmalloc(buf_len, GFP_ATOMIC); if (buf == NULL) { ret_val = -ENOMEM; goto socket_setattr_failure; } ret_val = cipso_v4_genopt(buf, buf_len, doi_def, secattr); if (ret_val < 0) goto socket_setattr_failure; buf_len = ret_val; /* We can't use ip_options_get() directly because it makes a call to * ip_options_get_alloc() which allocates memory with GFP_KERNEL and * we won't always have CAP_NET_RAW even though we _always_ want to * set the IPOPT_CIPSO option. */ opt_len = (buf_len + 3) & ~3; opt = kzalloc(sizeof(*opt) + opt_len, GFP_ATOMIC); if (opt == NULL) { ret_val = -ENOMEM; goto socket_setattr_failure; } memcpy(opt->opt.__data, buf, buf_len); opt->opt.optlen = opt_len; opt->opt.cipso = sizeof(struct iphdr); kfree(buf); buf = NULL; sk_inet = inet_sk(sk); old = rcu_dereference_protected(sk_inet->inet_opt, sock_owned_by_user(sk)); if (sk_inet->is_icsk) { sk_conn = inet_csk(sk); if (old) sk_conn->icsk_ext_hdr_len -= old->opt.optlen; sk_conn->icsk_ext_hdr_len += opt->opt.optlen; sk_conn->icsk_sync_mss(sk, sk_conn->icsk_pmtu_cookie); } rcu_assign_pointer(sk_inet->inet_opt, opt); if (old) call_rcu(&old->rcu, opt_kfree_rcu); return 0; socket_setattr_failure: kfree(buf); kfree(opt); return ret_val; } /** * cipso_v4_req_setattr - Add a CIPSO option to a connection request socket * @req: the connection request socket * @doi_def: the CIPSO DOI to use * @secattr: the specific security attributes of the socket * * Description: * Set the CIPSO option on the given socket using the DOI definition and * security attributes passed to the function. Returns zero on success and * negative values on failure. * */ int cipso_v4_req_setattr(struct request_sock *req, const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr) { int ret_val = -EPERM; unsigned char *buf = NULL; u32 buf_len; u32 opt_len; struct ip_options_rcu *opt = NULL; struct inet_request_sock *req_inet; /* We allocate the maximum CIPSO option size here so we are probably * being a little wasteful, but it makes our life _much_ easier later * on and after all we are only talking about 40 bytes. */ buf_len = CIPSO_V4_OPT_LEN_MAX; buf = kmalloc(buf_len, GFP_ATOMIC); if (buf == NULL) { ret_val = -ENOMEM; goto req_setattr_failure; } ret_val = cipso_v4_genopt(buf, buf_len, doi_def, secattr); if (ret_val < 0) goto req_setattr_failure; buf_len = ret_val; /* We can't use ip_options_get() directly because it makes a call to * ip_options_get_alloc() which allocates memory with GFP_KERNEL and * we won't always have CAP_NET_RAW even though we _always_ want to * set the IPOPT_CIPSO option. */ opt_len = (buf_len + 3) & ~3; opt = kzalloc(sizeof(*opt) + opt_len, GFP_ATOMIC); if (opt == NULL) { ret_val = -ENOMEM; goto req_setattr_failure; } memcpy(opt->opt.__data, buf, buf_len); opt->opt.optlen = opt_len; opt->opt.cipso = sizeof(struct iphdr); kfree(buf); buf = NULL; req_inet = inet_rsk(req); opt = xchg(&req_inet->opt, opt); if (opt) call_rcu(&opt->rcu, opt_kfree_rcu); return 0; req_setattr_failure: kfree(buf); kfree(opt); return ret_val; } /** * cipso_v4_delopt - Delete the CIPSO option from a set of IP options * @opt_ptr: IP option pointer * * Description: * Deletes the CIPSO IP option from a set of IP options and makes the necessary * adjustments to the IP option structure. Returns zero on success, negative * values on failure. * */ static int cipso_v4_delopt(struct ip_options_rcu **opt_ptr) { int hdr_delta = 0; struct ip_options_rcu *opt = *opt_ptr; if (opt->opt.srr || opt->opt.rr || opt->opt.ts || opt->opt.router_alert) { u8 cipso_len; u8 cipso_off; unsigned char *cipso_ptr; int iter; int optlen_new; cipso_off = opt->opt.cipso - sizeof(struct iphdr); cipso_ptr = &opt->opt.__data[cipso_off]; cipso_len = cipso_ptr[1]; if (opt->opt.srr > opt->opt.cipso) opt->opt.srr -= cipso_len; if (opt->opt.rr > opt->opt.cipso) opt->opt.rr -= cipso_len; if (opt->opt.ts > opt->opt.cipso) opt->opt.ts -= cipso_len; if (opt->opt.router_alert > opt->opt.cipso) opt->opt.router_alert -= cipso_len; opt->opt.cipso = 0; memmove(cipso_ptr, cipso_ptr + cipso_len, opt->opt.optlen - cipso_off - cipso_len); /* determining the new total option length is tricky because of * the padding necessary, the only thing i can think to do at * this point is walk the options one-by-one, skipping the * padding at the end to determine the actual option size and * from there we can determine the new total option length */ iter = 0; optlen_new = 0; while (iter < opt->opt.optlen) if (opt->opt.__data[iter] != IPOPT_NOP) { iter += opt->opt.__data[iter + 1]; optlen_new = iter; } else iter++; hdr_delta = opt->opt.optlen; opt->opt.optlen = (optlen_new + 3) & ~3; hdr_delta -= opt->opt.optlen; } else { /* only the cipso option was present on the socket so we can * remove the entire option struct */ *opt_ptr = NULL; hdr_delta = opt->opt.optlen; call_rcu(&opt->rcu, opt_kfree_rcu); } return hdr_delta; } /** * cipso_v4_sock_delattr - Delete the CIPSO option from a socket * @sk: the socket * * Description: * Removes the CIPSO option from a socket, if present. * */ void cipso_v4_sock_delattr(struct sock *sk) { int hdr_delta; struct ip_options_rcu *opt; struct inet_sock *sk_inet; sk_inet = inet_sk(sk); opt = rcu_dereference_protected(sk_inet->inet_opt, 1); if (opt == NULL || opt->opt.cipso == 0) return; hdr_delta = cipso_v4_delopt(&sk_inet->inet_opt); if (sk_inet->is_icsk && hdr_delta > 0) { struct inet_connection_sock *sk_conn = inet_csk(sk); sk_conn->icsk_ext_hdr_len -= hdr_delta; sk_conn->icsk_sync_mss(sk, sk_conn->icsk_pmtu_cookie); } } /** * cipso_v4_req_delattr - Delete the CIPSO option from a request socket * @reg: the request socket * * Description: * Removes the CIPSO option from a request socket, if present. * */ void cipso_v4_req_delattr(struct request_sock *req) { struct ip_options_rcu *opt; struct inet_request_sock *req_inet; req_inet = inet_rsk(req); opt = req_inet->opt; if (opt == NULL || opt->opt.cipso == 0) return; cipso_v4_delopt(&req_inet->opt); } /** * cipso_v4_getattr - Helper function for the cipso_v4_*_getattr functions * @cipso: the CIPSO v4 option * @secattr: the security attributes * * Description: * Inspect @cipso and return the security attributes in @secattr. Returns zero * on success and negative values on failure. * */ static int cipso_v4_getattr(const unsigned char *cipso, struct netlbl_lsm_secattr *secattr) { int ret_val = -ENOMSG; u32 doi; struct cipso_v4_doi *doi_def; if (cipso_v4_cache_check(cipso, cipso[1], secattr) == 0) return 0; doi = get_unaligned_be32(&cipso[2]); rcu_read_lock(); doi_def = cipso_v4_doi_search(doi); if (doi_def == NULL) goto getattr_return; /* XXX - This code assumes only one tag per CIPSO option which isn't * really a good assumption to make but since we only support the MAC * tags right now it is a safe assumption. */ switch (cipso[6]) { case CIPSO_V4_TAG_RBITMAP: ret_val = cipso_v4_parsetag_rbm(doi_def, &cipso[6], secattr); break; case CIPSO_V4_TAG_ENUM: ret_val = cipso_v4_parsetag_enum(doi_def, &cipso[6], secattr); break; case CIPSO_V4_TAG_RANGE: ret_val = cipso_v4_parsetag_rng(doi_def, &cipso[6], secattr); break; case CIPSO_V4_TAG_LOCAL: ret_val = cipso_v4_parsetag_loc(doi_def, &cipso[6], secattr); break; } if (ret_val == 0) secattr->type = NETLBL_NLTYPE_CIPSOV4; getattr_return: rcu_read_unlock(); return ret_val; } /** * cipso_v4_sock_getattr - Get the security attributes from a sock * @sk: the sock * @secattr: the security attributes * * Description: * Query @sk to see if there is a CIPSO option attached to the sock and if * there is return the CIPSO security attributes in @secattr. This function * requires that @sk be locked, or privately held, but it does not do any * locking itself. Returns zero on success and negative values on failure. * */ int cipso_v4_sock_getattr(struct sock *sk, struct netlbl_lsm_secattr *secattr) { struct ip_options_rcu *opt; int res = -ENOMSG; rcu_read_lock(); opt = rcu_dereference(inet_sk(sk)->inet_opt); if (opt && opt->opt.cipso) res = cipso_v4_getattr(opt->opt.__data + opt->opt.cipso - sizeof(struct iphdr), secattr); rcu_read_unlock(); return res; } /** * cipso_v4_skbuff_setattr - Set the CIPSO option on a packet * @skb: the packet * @secattr: the security attributes * * Description: * Set the CIPSO option on the given packet based on the security attributes. * Returns a pointer to the IP header on success and NULL on failure. * */ int cipso_v4_skbuff_setattr(struct sk_buff *skb, const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr) { int ret_val; struct iphdr *iph; struct ip_options *opt = &IPCB(skb)->opt; unsigned char buf[CIPSO_V4_OPT_LEN_MAX]; u32 buf_len = CIPSO_V4_OPT_LEN_MAX; u32 opt_len; int len_delta; ret_val = cipso_v4_genopt(buf, buf_len, doi_def, secattr); if (ret_val < 0) return ret_val; buf_len = ret_val; opt_len = (buf_len + 3) & ~3; /* we overwrite any existing options to ensure that we have enough * room for the CIPSO option, the reason is that we _need_ to guarantee * that the security label is applied to the packet - we do the same * thing when using the socket options and it hasn't caused a problem, * if we need to we can always revisit this choice later */ len_delta = opt_len - opt->optlen; /* if we don't ensure enough headroom we could panic on the skb_push() * call below so make sure we have enough, we are also "mangling" the * packet so we should probably do a copy-on-write call anyway */ ret_val = skb_cow(skb, skb_headroom(skb) + len_delta); if (ret_val < 0) return ret_val; if (len_delta > 0) { /* we assume that the header + opt->optlen have already been * "pushed" in ip_options_build() or similar */ iph = ip_hdr(skb); skb_push(skb, len_delta); memmove((char *)iph - len_delta, iph, iph->ihl << 2); skb_reset_network_header(skb); iph = ip_hdr(skb); } else if (len_delta < 0) { iph = ip_hdr(skb); memset(iph + 1, IPOPT_NOP, opt->optlen); } else iph = ip_hdr(skb); if (opt->optlen > 0) memset(opt, 0, sizeof(*opt)); opt->optlen = opt_len; opt->cipso = sizeof(struct iphdr); opt->is_changed = 1; /* we have to do the following because we are being called from a * netfilter hook which means the packet already has had the header * fields populated and the checksum calculated - yes this means we * are doing more work than needed but we do it to keep the core * stack clean and tidy */ memcpy(iph + 1, buf, buf_len); if (opt_len > buf_len) memset((char *)(iph + 1) + buf_len, 0, opt_len - buf_len); if (len_delta != 0) { iph->ihl = 5 + (opt_len >> 2); iph->tot_len = htons(skb->len); } ip_send_check(iph); return 0; } /** * cipso_v4_skbuff_delattr - Delete any CIPSO options from a packet * @skb: the packet * * Description: * Removes any and all CIPSO options from the given packet. Returns zero on * success, negative values on failure. * */ int cipso_v4_skbuff_delattr(struct sk_buff *skb) { int ret_val; struct iphdr *iph; struct ip_options *opt = &IPCB(skb)->opt; unsigned char *cipso_ptr; if (opt->cipso == 0) return 0; /* since we are changing the packet we should make a copy */ ret_val = skb_cow(skb, skb_headroom(skb)); if (ret_val < 0) return ret_val; /* the easiest thing to do is just replace the cipso option with noop * options since we don't change the size of the packet, although we * still need to recalculate the checksum */ iph = ip_hdr(skb); cipso_ptr = (unsigned char *)iph + opt->cipso; memset(cipso_ptr, IPOPT_NOOP, cipso_ptr[1]); opt->cipso = 0; opt->is_changed = 1; ip_send_check(iph); return 0; } /** * cipso_v4_skbuff_getattr - Get the security attributes from the CIPSO option * @skb: the packet * @secattr: the security attributes * * Description: * Parse the given packet's CIPSO option and return the security attributes. * Returns zero on success and negative values on failure. * */ int cipso_v4_skbuff_getattr(const struct sk_buff *skb, struct netlbl_lsm_secattr *secattr) { return cipso_v4_getattr(CIPSO_V4_OPTPTR(skb), secattr); } /* * Setup Functions */ /** * cipso_v4_init - Initialize the CIPSO module * * Description: * Initialize the CIPSO module and prepare it for use. Returns zero on success * and negative values on failure. * */ static int __init cipso_v4_init(void) { int ret_val; ret_val = cipso_v4_cache_init(); if (ret_val != 0) panic("Failed to initialize the CIPSO/IPv4 cache (%d)\n", ret_val); return 0; } subsys_initcall(cipso_v4_init);
void cipso_v4_req_delattr(struct request_sock *req) { struct ip_options *opt; struct inet_request_sock *req_inet; req_inet = inet_rsk(req); opt = req_inet->opt; if (opt == NULL || opt->cipso == 0) return; cipso_v4_delopt(&req_inet->opt); }
void cipso_v4_req_delattr(struct request_sock *req) { struct ip_options_rcu *opt; struct inet_request_sock *req_inet; req_inet = inet_rsk(req); opt = req_inet->opt; if (opt == NULL || opt->opt.cipso == 0) return; cipso_v4_delopt(&req_inet->opt); }
{'added': [(1860, 'static void opt_kfree_rcu(struct rcu_head *head)'), (1861, '{'), (1862, '\tkfree(container_of(head, struct ip_options_rcu, rcu));'), (1863, '}'), (1864, ''), (1887, '\tstruct ip_options_rcu *old, *opt = NULL;'), (1923, '\tmemcpy(opt->opt.__data, buf, buf_len);'), (1924, '\topt->opt.optlen = opt_len;'), (1925, '\topt->opt.cipso = sizeof(struct iphdr);'), (1930, ''), (1931, '\told = rcu_dereference_protected(sk_inet->inet_opt, sock_owned_by_user(sk));'), (1934, '\t\tif (old)'), (1935, '\t\t\tsk_conn->icsk_ext_hdr_len -= old->opt.optlen;'), (1936, '\t\tsk_conn->icsk_ext_hdr_len += opt->opt.optlen;'), (1939, '\trcu_assign_pointer(sk_inet->inet_opt, opt);'), (1940, '\tif (old)'), (1941, '\t\tcall_rcu(&old->rcu, opt_kfree_rcu);'), (1971, '\tstruct ip_options_rcu *opt = NULL;'), (1999, '\tmemcpy(opt->opt.__data, buf, buf_len);'), (2000, '\topt->opt.optlen = opt_len;'), (2001, '\topt->opt.cipso = sizeof(struct iphdr);'), (2007, '\tif (opt)'), (2008, '\t\tcall_rcu(&opt->rcu, opt_kfree_rcu);'), (2028, 'static int cipso_v4_delopt(struct ip_options_rcu **opt_ptr)'), (2031, '\tstruct ip_options_rcu *opt = *opt_ptr;'), (2033, '\tif (opt->opt.srr || opt->opt.rr || opt->opt.ts || opt->opt.router_alert) {'), (2040, '\t\tcipso_off = opt->opt.cipso - sizeof(struct iphdr);'), (2041, '\t\tcipso_ptr = &opt->opt.__data[cipso_off];'), (2044, '\t\tif (opt->opt.srr > opt->opt.cipso)'), (2045, '\t\t\topt->opt.srr -= cipso_len;'), (2046, '\t\tif (opt->opt.rr > opt->opt.cipso)'), (2047, '\t\t\topt->opt.rr -= cipso_len;'), (2048, '\t\tif (opt->opt.ts > opt->opt.cipso)'), (2049, '\t\t\topt->opt.ts -= cipso_len;'), (2050, '\t\tif (opt->opt.router_alert > opt->opt.cipso)'), (2051, '\t\t\topt->opt.router_alert -= cipso_len;'), (2052, '\t\topt->opt.cipso = 0;'), (2055, '\t\t\topt->opt.optlen - cipso_off - cipso_len);'), (2064, '\t\twhile (iter < opt->opt.optlen)'), (2065, '\t\t\tif (opt->opt.__data[iter] != IPOPT_NOP) {'), (2066, '\t\t\t\titer += opt->opt.__data[iter + 1];'), (2070, '\t\thdr_delta = opt->opt.optlen;'), (2071, '\t\topt->opt.optlen = (optlen_new + 3) & ~3;'), (2072, '\t\thdr_delta -= opt->opt.optlen;'), (2077, '\t\thdr_delta = opt->opt.optlen;'), (2078, '\t\tcall_rcu(&opt->rcu, opt_kfree_rcu);'), (2095, '\tstruct ip_options_rcu *opt;'), (2099, '\topt = rcu_dereference_protected(sk_inet->inet_opt, 1);'), (2100, '\tif (opt == NULL || opt->opt.cipso == 0)'), (2103, '\thdr_delta = cipso_v4_delopt(&sk_inet->inet_opt);'), (2121, '\tstruct ip_options_rcu *opt;'), (2126, '\tif (opt == NULL || opt->opt.cipso == 0)'), (2196, '\tstruct ip_options_rcu *opt;'), (2197, '\tint res = -ENOMSG;'), (2199, '\trcu_read_lock();'), (2200, '\topt = rcu_dereference(inet_sk(sk)->inet_opt);'), (2201, '\tif (opt && opt->opt.cipso)'), (2202, '\t\tres = cipso_v4_getattr(opt->opt.__data +'), (2203, '\t\t\t\t\t\topt->opt.cipso -'), (2204, '\t\t\t\t\t\tsizeof(struct iphdr),'), (2205, '\t\t\t\t secattr);'), (2206, '\trcu_read_unlock();'), (2207, '\treturn res;')], 'deleted': [(1882, '\tstruct ip_options *opt = NULL;'), (1918, '\tmemcpy(opt->__data, buf, buf_len);'), (1919, '\topt->optlen = opt_len;'), (1920, '\topt->cipso = sizeof(struct iphdr);'), (1927, '\t\tif (sk_inet->opt)'), (1928, '\t\t\tsk_conn->icsk_ext_hdr_len -= sk_inet->opt->optlen;'), (1929, '\t\tsk_conn->icsk_ext_hdr_len += opt->optlen;'), (1932, '\topt = xchg(&sk_inet->opt, opt);'), (1933, '\tkfree(opt);'), (1963, '\tstruct ip_options *opt = NULL;'), (1991, '\tmemcpy(opt->__data, buf, buf_len);'), (1992, '\topt->optlen = opt_len;'), (1993, '\topt->cipso = sizeof(struct iphdr);'), (1999, '\tkfree(opt);'), (2019, 'static int cipso_v4_delopt(struct ip_options **opt_ptr)'), (2022, '\tstruct ip_options *opt = *opt_ptr;'), (2024, '\tif (opt->srr || opt->rr || opt->ts || opt->router_alert) {'), (2031, '\t\tcipso_off = opt->cipso - sizeof(struct iphdr);'), (2032, '\t\tcipso_ptr = &opt->__data[cipso_off];'), (2035, '\t\tif (opt->srr > opt->cipso)'), (2036, '\t\t\topt->srr -= cipso_len;'), (2037, '\t\tif (opt->rr > opt->cipso)'), (2038, '\t\t\topt->rr -= cipso_len;'), (2039, '\t\tif (opt->ts > opt->cipso)'), (2040, '\t\t\topt->ts -= cipso_len;'), (2041, '\t\tif (opt->router_alert > opt->cipso)'), (2042, '\t\t\topt->router_alert -= cipso_len;'), (2043, '\t\topt->cipso = 0;'), (2046, '\t\t\topt->optlen - cipso_off - cipso_len);'), (2055, '\t\twhile (iter < opt->optlen)'), (2056, '\t\t\tif (opt->__data[iter] != IPOPT_NOP) {'), (2057, '\t\t\t\titer += opt->__data[iter + 1];'), (2061, '\t\thdr_delta = opt->optlen;'), (2062, '\t\topt->optlen = (optlen_new + 3) & ~3;'), (2063, '\t\thdr_delta -= opt->optlen;'), (2068, '\t\thdr_delta = opt->optlen;'), (2069, '\t\tkfree(opt);'), (2086, '\tstruct ip_options *opt;'), (2090, '\topt = sk_inet->opt;'), (2091, '\tif (opt == NULL || opt->cipso == 0)'), (2094, '\thdr_delta = cipso_v4_delopt(&sk_inet->opt);'), (2112, '\tstruct ip_options *opt;'), (2117, '\tif (opt == NULL || opt->cipso == 0)'), (2187, '\tstruct ip_options *opt;'), (2189, '\topt = inet_sk(sk)->opt;'), (2190, '\tif (opt == NULL || opt->cipso == 0)'), (2191, '\t\treturn -ENOMSG;'), (2192, ''), (2193, '\treturn cipso_v4_getattr(opt->__data + opt->cipso - sizeof(struct iphdr),'), (2194, '\t\t\t\tsecattr);')]}
63
50
1,360
7,485
https://github.com/torvalds/linux
CVE-2012-3552
['CWE-362']
cipso_ipv4.c
cipso_v4_req_setattr
/* * CIPSO - Commercial IP Security Option * * This is an implementation of the CIPSO 2.2 protocol as specified in * draft-ietf-cipso-ipsecurity-01.txt with additional tag types as found in * FIPS-188. While CIPSO never became a full IETF RFC standard many vendors * have chosen to adopt the protocol and over the years it has become a * de-facto standard for labeled networking. * * The CIPSO draft specification can be found in the kernel's Documentation * directory as well as the following URL: * http://tools.ietf.org/id/draft-ietf-cipso-ipsecurity-01.txt * The FIPS-188 specification can be found at the following URL: * http://www.itl.nist.gov/fipspubs/fip188.htm * * Author: Paul Moore <paul.moore@hp.com> * */ /* * (c) Copyright Hewlett-Packard Development Company, L.P., 2006, 2008 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/init.h> #include <linux/types.h> #include <linux/rcupdate.h> #include <linux/list.h> #include <linux/spinlock.h> #include <linux/string.h> #include <linux/jhash.h> #include <linux/audit.h> #include <linux/slab.h> #include <net/ip.h> #include <net/icmp.h> #include <net/tcp.h> #include <net/netlabel.h> #include <net/cipso_ipv4.h> #include <asm/atomic.h> #include <asm/bug.h> #include <asm/unaligned.h> /* List of available DOI definitions */ /* XXX - This currently assumes a minimal number of different DOIs in use, * if in practice there are a lot of different DOIs this list should * probably be turned into a hash table or something similar so we * can do quick lookups. */ static DEFINE_SPINLOCK(cipso_v4_doi_list_lock); static LIST_HEAD(cipso_v4_doi_list); /* Label mapping cache */ int cipso_v4_cache_enabled = 1; int cipso_v4_cache_bucketsize = 10; #define CIPSO_V4_CACHE_BUCKETBITS 7 #define CIPSO_V4_CACHE_BUCKETS (1 << CIPSO_V4_CACHE_BUCKETBITS) #define CIPSO_V4_CACHE_REORDERLIMIT 10 struct cipso_v4_map_cache_bkt { spinlock_t lock; u32 size; struct list_head list; }; struct cipso_v4_map_cache_entry { u32 hash; unsigned char *key; size_t key_len; struct netlbl_lsm_cache *lsm_data; u32 activity; struct list_head list; }; static struct cipso_v4_map_cache_bkt *cipso_v4_cache = NULL; /* Restricted bitmap (tag #1) flags */ int cipso_v4_rbm_optfmt = 0; int cipso_v4_rbm_strictvalid = 1; /* * Protocol Constants */ /* Maximum size of the CIPSO IP option, derived from the fact that the maximum * IPv4 header size is 60 bytes and the base IPv4 header is 20 bytes long. */ #define CIPSO_V4_OPT_LEN_MAX 40 /* Length of the base CIPSO option, this includes the option type (1 byte), the * option length (1 byte), and the DOI (4 bytes). */ #define CIPSO_V4_HDR_LEN 6 /* Base length of the restrictive category bitmap tag (tag #1). */ #define CIPSO_V4_TAG_RBM_BLEN 4 /* Base length of the enumerated category tag (tag #2). */ #define CIPSO_V4_TAG_ENUM_BLEN 4 /* Base length of the ranged categories bitmap tag (tag #5). */ #define CIPSO_V4_TAG_RNG_BLEN 4 /* The maximum number of category ranges permitted in the ranged category tag * (tag #5). You may note that the IETF draft states that the maximum number * of category ranges is 7, but if the low end of the last category range is * zero then it is possible to fit 8 category ranges because the zero should * be omitted. */ #define CIPSO_V4_TAG_RNG_CAT_MAX 8 /* Base length of the local tag (non-standard tag). * Tag definition (may change between kernel versions) * * 0 8 16 24 32 * +----------+----------+----------+----------+ * | 10000000 | 00000110 | 32-bit secid value | * +----------+----------+----------+----------+ * | in (host byte order)| * +----------+----------+ * */ #define CIPSO_V4_TAG_LOC_BLEN 6 /* * Helper Functions */ /** * cipso_v4_bitmap_walk - Walk a bitmap looking for a bit * @bitmap: the bitmap * @bitmap_len: length in bits * @offset: starting offset * @state: if non-zero, look for a set (1) bit else look for a cleared (0) bit * * Description: * Starting at @offset, walk the bitmap from left to right until either the * desired bit is found or we reach the end. Return the bit offset, -1 if * not found, or -2 if error. */ static int cipso_v4_bitmap_walk(const unsigned char *bitmap, u32 bitmap_len, u32 offset, u8 state) { u32 bit_spot; u32 byte_offset; unsigned char bitmask; unsigned char byte; /* gcc always rounds to zero when doing integer division */ byte_offset = offset / 8; byte = bitmap[byte_offset]; bit_spot = offset; bitmask = 0x80 >> (offset % 8); while (bit_spot < bitmap_len) { if ((state && (byte & bitmask) == bitmask) || (state == 0 && (byte & bitmask) == 0)) return bit_spot; bit_spot++; bitmask >>= 1; if (bitmask == 0) { byte = bitmap[++byte_offset]; bitmask = 0x80; } } return -1; } /** * cipso_v4_bitmap_setbit - Sets a single bit in a bitmap * @bitmap: the bitmap * @bit: the bit * @state: if non-zero, set the bit (1) else clear the bit (0) * * Description: * Set a single bit in the bitmask. Returns zero on success, negative values * on error. */ static void cipso_v4_bitmap_setbit(unsigned char *bitmap, u32 bit, u8 state) { u32 byte_spot; u8 bitmask; /* gcc always rounds to zero when doing integer division */ byte_spot = bit / 8; bitmask = 0x80 >> (bit % 8); if (state) bitmap[byte_spot] |= bitmask; else bitmap[byte_spot] &= ~bitmask; } /** * cipso_v4_cache_entry_free - Frees a cache entry * @entry: the entry to free * * Description: * This function frees the memory associated with a cache entry including the * LSM cache data if there are no longer any users, i.e. reference count == 0. * */ static void cipso_v4_cache_entry_free(struct cipso_v4_map_cache_entry *entry) { if (entry->lsm_data) netlbl_secattr_cache_free(entry->lsm_data); kfree(entry->key); kfree(entry); } /** * cipso_v4_map_cache_hash - Hashing function for the CIPSO cache * @key: the hash key * @key_len: the length of the key in bytes * * Description: * The CIPSO tag hashing function. Returns a 32-bit hash value. * */ static u32 cipso_v4_map_cache_hash(const unsigned char *key, u32 key_len) { return jhash(key, key_len, 0); } /* * Label Mapping Cache Functions */ /** * cipso_v4_cache_init - Initialize the CIPSO cache * * Description: * Initializes the CIPSO label mapping cache, this function should be called * before any of the other functions defined in this file. Returns zero on * success, negative values on error. * */ static int cipso_v4_cache_init(void) { u32 iter; cipso_v4_cache = kcalloc(CIPSO_V4_CACHE_BUCKETS, sizeof(struct cipso_v4_map_cache_bkt), GFP_KERNEL); if (cipso_v4_cache == NULL) return -ENOMEM; for (iter = 0; iter < CIPSO_V4_CACHE_BUCKETS; iter++) { spin_lock_init(&cipso_v4_cache[iter].lock); cipso_v4_cache[iter].size = 0; INIT_LIST_HEAD(&cipso_v4_cache[iter].list); } return 0; } /** * cipso_v4_cache_invalidate - Invalidates the current CIPSO cache * * Description: * Invalidates and frees any entries in the CIPSO cache. Returns zero on * success and negative values on failure. * */ void cipso_v4_cache_invalidate(void) { struct cipso_v4_map_cache_entry *entry, *tmp_entry; u32 iter; for (iter = 0; iter < CIPSO_V4_CACHE_BUCKETS; iter++) { spin_lock_bh(&cipso_v4_cache[iter].lock); list_for_each_entry_safe(entry, tmp_entry, &cipso_v4_cache[iter].list, list) { list_del(&entry->list); cipso_v4_cache_entry_free(entry); } cipso_v4_cache[iter].size = 0; spin_unlock_bh(&cipso_v4_cache[iter].lock); } } /** * cipso_v4_cache_check - Check the CIPSO cache for a label mapping * @key: the buffer to check * @key_len: buffer length in bytes * @secattr: the security attribute struct to use * * Description: * This function checks the cache to see if a label mapping already exists for * the given key. If there is a match then the cache is adjusted and the * @secattr struct is populated with the correct LSM security attributes. The * cache is adjusted in the following manner if the entry is not already the * first in the cache bucket: * * 1. The cache entry's activity counter is incremented * 2. The previous (higher ranking) entry's activity counter is decremented * 3. If the difference between the two activity counters is geater than * CIPSO_V4_CACHE_REORDERLIMIT the two entries are swapped * * Returns zero on success, -ENOENT for a cache miss, and other negative values * on error. * */ static int cipso_v4_cache_check(const unsigned char *key, u32 key_len, struct netlbl_lsm_secattr *secattr) { u32 bkt; struct cipso_v4_map_cache_entry *entry; struct cipso_v4_map_cache_entry *prev_entry = NULL; u32 hash; if (!cipso_v4_cache_enabled) return -ENOENT; hash = cipso_v4_map_cache_hash(key, key_len); bkt = hash & (CIPSO_V4_CACHE_BUCKETS - 1); spin_lock_bh(&cipso_v4_cache[bkt].lock); list_for_each_entry(entry, &cipso_v4_cache[bkt].list, list) { if (entry->hash == hash && entry->key_len == key_len && memcmp(entry->key, key, key_len) == 0) { entry->activity += 1; atomic_inc(&entry->lsm_data->refcount); secattr->cache = entry->lsm_data; secattr->flags |= NETLBL_SECATTR_CACHE; secattr->type = NETLBL_NLTYPE_CIPSOV4; if (prev_entry == NULL) { spin_unlock_bh(&cipso_v4_cache[bkt].lock); return 0; } if (prev_entry->activity > 0) prev_entry->activity -= 1; if (entry->activity > prev_entry->activity && entry->activity - prev_entry->activity > CIPSO_V4_CACHE_REORDERLIMIT) { __list_del(entry->list.prev, entry->list.next); __list_add(&entry->list, prev_entry->list.prev, &prev_entry->list); } spin_unlock_bh(&cipso_v4_cache[bkt].lock); return 0; } prev_entry = entry; } spin_unlock_bh(&cipso_v4_cache[bkt].lock); return -ENOENT; } /** * cipso_v4_cache_add - Add an entry to the CIPSO cache * @skb: the packet * @secattr: the packet's security attributes * * Description: * Add a new entry into the CIPSO label mapping cache. Add the new entry to * head of the cache bucket's list, if the cache bucket is out of room remove * the last entry in the list first. It is important to note that there is * currently no checking for duplicate keys. Returns zero on success, * negative values on failure. * */ int cipso_v4_cache_add(const struct sk_buff *skb, const struct netlbl_lsm_secattr *secattr) { int ret_val = -EPERM; u32 bkt; struct cipso_v4_map_cache_entry *entry = NULL; struct cipso_v4_map_cache_entry *old_entry = NULL; unsigned char *cipso_ptr; u32 cipso_ptr_len; if (!cipso_v4_cache_enabled || cipso_v4_cache_bucketsize <= 0) return 0; cipso_ptr = CIPSO_V4_OPTPTR(skb); cipso_ptr_len = cipso_ptr[1]; entry = kzalloc(sizeof(*entry), GFP_ATOMIC); if (entry == NULL) return -ENOMEM; entry->key = kmemdup(cipso_ptr, cipso_ptr_len, GFP_ATOMIC); if (entry->key == NULL) { ret_val = -ENOMEM; goto cache_add_failure; } entry->key_len = cipso_ptr_len; entry->hash = cipso_v4_map_cache_hash(cipso_ptr, cipso_ptr_len); atomic_inc(&secattr->cache->refcount); entry->lsm_data = secattr->cache; bkt = entry->hash & (CIPSO_V4_CACHE_BUCKETS - 1); spin_lock_bh(&cipso_v4_cache[bkt].lock); if (cipso_v4_cache[bkt].size < cipso_v4_cache_bucketsize) { list_add(&entry->list, &cipso_v4_cache[bkt].list); cipso_v4_cache[bkt].size += 1; } else { old_entry = list_entry(cipso_v4_cache[bkt].list.prev, struct cipso_v4_map_cache_entry, list); list_del(&old_entry->list); list_add(&entry->list, &cipso_v4_cache[bkt].list); cipso_v4_cache_entry_free(old_entry); } spin_unlock_bh(&cipso_v4_cache[bkt].lock); return 0; cache_add_failure: if (entry) cipso_v4_cache_entry_free(entry); return ret_val; } /* * DOI List Functions */ /** * cipso_v4_doi_search - Searches for a DOI definition * @doi: the DOI to search for * * Description: * Search the DOI definition list for a DOI definition with a DOI value that * matches @doi. The caller is responsible for calling rcu_read_[un]lock(). * Returns a pointer to the DOI definition on success and NULL on failure. */ static struct cipso_v4_doi *cipso_v4_doi_search(u32 doi) { struct cipso_v4_doi *iter; list_for_each_entry_rcu(iter, &cipso_v4_doi_list, list) if (iter->doi == doi && atomic_read(&iter->refcount)) return iter; return NULL; } /** * cipso_v4_doi_add - Add a new DOI to the CIPSO protocol engine * @doi_def: the DOI structure * @audit_info: NetLabel audit information * * Description: * The caller defines a new DOI for use by the CIPSO engine and calls this * function to add it to the list of acceptable domains. The caller must * ensure that the mapping table specified in @doi_def->map meets all of the * requirements of the mapping type (see cipso_ipv4.h for details). Returns * zero on success and non-zero on failure. * */ int cipso_v4_doi_add(struct cipso_v4_doi *doi_def, struct netlbl_audit *audit_info) { int ret_val = -EINVAL; u32 iter; u32 doi; u32 doi_type; struct audit_buffer *audit_buf; doi = doi_def->doi; doi_type = doi_def->type; if (doi_def == NULL || doi_def->doi == CIPSO_V4_DOI_UNKNOWN) goto doi_add_return; for (iter = 0; iter < CIPSO_V4_TAG_MAXCNT; iter++) { switch (doi_def->tags[iter]) { case CIPSO_V4_TAG_RBITMAP: break; case CIPSO_V4_TAG_RANGE: case CIPSO_V4_TAG_ENUM: if (doi_def->type != CIPSO_V4_MAP_PASS) goto doi_add_return; break; case CIPSO_V4_TAG_LOCAL: if (doi_def->type != CIPSO_V4_MAP_LOCAL) goto doi_add_return; break; case CIPSO_V4_TAG_INVALID: if (iter == 0) goto doi_add_return; break; default: goto doi_add_return; } } atomic_set(&doi_def->refcount, 1); spin_lock(&cipso_v4_doi_list_lock); if (cipso_v4_doi_search(doi_def->doi) != NULL) { spin_unlock(&cipso_v4_doi_list_lock); ret_val = -EEXIST; goto doi_add_return; } list_add_tail_rcu(&doi_def->list, &cipso_v4_doi_list); spin_unlock(&cipso_v4_doi_list_lock); ret_val = 0; doi_add_return: audit_buf = netlbl_audit_start(AUDIT_MAC_CIPSOV4_ADD, audit_info); if (audit_buf != NULL) { const char *type_str; switch (doi_type) { case CIPSO_V4_MAP_TRANS: type_str = "trans"; break; case CIPSO_V4_MAP_PASS: type_str = "pass"; break; case CIPSO_V4_MAP_LOCAL: type_str = "local"; break; default: type_str = "(unknown)"; } audit_log_format(audit_buf, " cipso_doi=%u cipso_type=%s res=%u", doi, type_str, ret_val == 0 ? 1 : 0); audit_log_end(audit_buf); } return ret_val; } /** * cipso_v4_doi_free - Frees a DOI definition * @entry: the entry's RCU field * * Description: * This function frees all of the memory associated with a DOI definition. * */ void cipso_v4_doi_free(struct cipso_v4_doi *doi_def) { if (doi_def == NULL) return; switch (doi_def->type) { case CIPSO_V4_MAP_TRANS: kfree(doi_def->map.std->lvl.cipso); kfree(doi_def->map.std->lvl.local); kfree(doi_def->map.std->cat.cipso); kfree(doi_def->map.std->cat.local); break; } kfree(doi_def); } /** * cipso_v4_doi_free_rcu - Frees a DOI definition via the RCU pointer * @entry: the entry's RCU field * * Description: * This function is designed to be used as a callback to the call_rcu() * function so that the memory allocated to the DOI definition can be released * safely. * */ static void cipso_v4_doi_free_rcu(struct rcu_head *entry) { struct cipso_v4_doi *doi_def; doi_def = container_of(entry, struct cipso_v4_doi, rcu); cipso_v4_doi_free(doi_def); } /** * cipso_v4_doi_remove - Remove an existing DOI from the CIPSO protocol engine * @doi: the DOI value * @audit_secid: the LSM secid to use in the audit message * * Description: * Removes a DOI definition from the CIPSO engine. The NetLabel routines will * be called to release their own LSM domain mappings as well as our own * domain list. Returns zero on success and negative values on failure. * */ int cipso_v4_doi_remove(u32 doi, struct netlbl_audit *audit_info) { int ret_val; struct cipso_v4_doi *doi_def; struct audit_buffer *audit_buf; spin_lock(&cipso_v4_doi_list_lock); doi_def = cipso_v4_doi_search(doi); if (doi_def == NULL) { spin_unlock(&cipso_v4_doi_list_lock); ret_val = -ENOENT; goto doi_remove_return; } if (!atomic_dec_and_test(&doi_def->refcount)) { spin_unlock(&cipso_v4_doi_list_lock); ret_val = -EBUSY; goto doi_remove_return; } list_del_rcu(&doi_def->list); spin_unlock(&cipso_v4_doi_list_lock); cipso_v4_cache_invalidate(); call_rcu(&doi_def->rcu, cipso_v4_doi_free_rcu); ret_val = 0; doi_remove_return: audit_buf = netlbl_audit_start(AUDIT_MAC_CIPSOV4_DEL, audit_info); if (audit_buf != NULL) { audit_log_format(audit_buf, " cipso_doi=%u res=%u", doi, ret_val == 0 ? 1 : 0); audit_log_end(audit_buf); } return ret_val; } /** * cipso_v4_doi_getdef - Returns a reference to a valid DOI definition * @doi: the DOI value * * Description: * Searches for a valid DOI definition and if one is found it is returned to * the caller. Otherwise NULL is returned. The caller must ensure that * rcu_read_lock() is held while accessing the returned definition and the DOI * definition reference count is decremented when the caller is done. * */ struct cipso_v4_doi *cipso_v4_doi_getdef(u32 doi) { struct cipso_v4_doi *doi_def; rcu_read_lock(); doi_def = cipso_v4_doi_search(doi); if (doi_def == NULL) goto doi_getdef_return; if (!atomic_inc_not_zero(&doi_def->refcount)) doi_def = NULL; doi_getdef_return: rcu_read_unlock(); return doi_def; } /** * cipso_v4_doi_putdef - Releases a reference for the given DOI definition * @doi_def: the DOI definition * * Description: * Releases a DOI definition reference obtained from cipso_v4_doi_getdef(). * */ void cipso_v4_doi_putdef(struct cipso_v4_doi *doi_def) { if (doi_def == NULL) return; if (!atomic_dec_and_test(&doi_def->refcount)) return; spin_lock(&cipso_v4_doi_list_lock); list_del_rcu(&doi_def->list); spin_unlock(&cipso_v4_doi_list_lock); cipso_v4_cache_invalidate(); call_rcu(&doi_def->rcu, cipso_v4_doi_free_rcu); } /** * cipso_v4_doi_walk - Iterate through the DOI definitions * @skip_cnt: skip past this number of DOI definitions, updated * @callback: callback for each DOI definition * @cb_arg: argument for the callback function * * Description: * Iterate over the DOI definition list, skipping the first @skip_cnt entries. * For each entry call @callback, if @callback returns a negative value stop * 'walking' through the list and return. Updates the value in @skip_cnt upon * return. Returns zero on success, negative values on failure. * */ int cipso_v4_doi_walk(u32 *skip_cnt, int (*callback) (struct cipso_v4_doi *doi_def, void *arg), void *cb_arg) { int ret_val = -ENOENT; u32 doi_cnt = 0; struct cipso_v4_doi *iter_doi; rcu_read_lock(); list_for_each_entry_rcu(iter_doi, &cipso_v4_doi_list, list) if (atomic_read(&iter_doi->refcount) > 0) { if (doi_cnt++ < *skip_cnt) continue; ret_val = callback(iter_doi, cb_arg); if (ret_val < 0) { doi_cnt--; goto doi_walk_return; } } doi_walk_return: rcu_read_unlock(); *skip_cnt = doi_cnt; return ret_val; } /* * Label Mapping Functions */ /** * cipso_v4_map_lvl_valid - Checks to see if the given level is understood * @doi_def: the DOI definition * @level: the level to check * * Description: * Checks the given level against the given DOI definition and returns a * negative value if the level does not have a valid mapping and a zero value * if the level is defined by the DOI. * */ static int cipso_v4_map_lvl_valid(const struct cipso_v4_doi *doi_def, u8 level) { switch (doi_def->type) { case CIPSO_V4_MAP_PASS: return 0; case CIPSO_V4_MAP_TRANS: if (doi_def->map.std->lvl.cipso[level] < CIPSO_V4_INV_LVL) return 0; break; } return -EFAULT; } /** * cipso_v4_map_lvl_hton - Perform a level mapping from the host to the network * @doi_def: the DOI definition * @host_lvl: the host MLS level * @net_lvl: the network/CIPSO MLS level * * Description: * Perform a label mapping to translate a local MLS level to the correct * CIPSO level using the given DOI definition. Returns zero on success, * negative values otherwise. * */ static int cipso_v4_map_lvl_hton(const struct cipso_v4_doi *doi_def, u32 host_lvl, u32 *net_lvl) { switch (doi_def->type) { case CIPSO_V4_MAP_PASS: *net_lvl = host_lvl; return 0; case CIPSO_V4_MAP_TRANS: if (host_lvl < doi_def->map.std->lvl.local_size && doi_def->map.std->lvl.local[host_lvl] < CIPSO_V4_INV_LVL) { *net_lvl = doi_def->map.std->lvl.local[host_lvl]; return 0; } return -EPERM; } return -EINVAL; } /** * cipso_v4_map_lvl_ntoh - Perform a level mapping from the network to the host * @doi_def: the DOI definition * @net_lvl: the network/CIPSO MLS level * @host_lvl: the host MLS level * * Description: * Perform a label mapping to translate a CIPSO level to the correct local MLS * level using the given DOI definition. Returns zero on success, negative * values otherwise. * */ static int cipso_v4_map_lvl_ntoh(const struct cipso_v4_doi *doi_def, u32 net_lvl, u32 *host_lvl) { struct cipso_v4_std_map_tbl *map_tbl; switch (doi_def->type) { case CIPSO_V4_MAP_PASS: *host_lvl = net_lvl; return 0; case CIPSO_V4_MAP_TRANS: map_tbl = doi_def->map.std; if (net_lvl < map_tbl->lvl.cipso_size && map_tbl->lvl.cipso[net_lvl] < CIPSO_V4_INV_LVL) { *host_lvl = doi_def->map.std->lvl.cipso[net_lvl]; return 0; } return -EPERM; } return -EINVAL; } /** * cipso_v4_map_cat_rbm_valid - Checks to see if the category bitmap is valid * @doi_def: the DOI definition * @bitmap: category bitmap * @bitmap_len: bitmap length in bytes * * Description: * Checks the given category bitmap against the given DOI definition and * returns a negative value if any of the categories in the bitmap do not have * a valid mapping and a zero value if all of the categories are valid. * */ static int cipso_v4_map_cat_rbm_valid(const struct cipso_v4_doi *doi_def, const unsigned char *bitmap, u32 bitmap_len) { int cat = -1; u32 bitmap_len_bits = bitmap_len * 8; u32 cipso_cat_size; u32 *cipso_array; switch (doi_def->type) { case CIPSO_V4_MAP_PASS: return 0; case CIPSO_V4_MAP_TRANS: cipso_cat_size = doi_def->map.std->cat.cipso_size; cipso_array = doi_def->map.std->cat.cipso; for (;;) { cat = cipso_v4_bitmap_walk(bitmap, bitmap_len_bits, cat + 1, 1); if (cat < 0) break; if (cat >= cipso_cat_size || cipso_array[cat] >= CIPSO_V4_INV_CAT) return -EFAULT; } if (cat == -1) return 0; break; } return -EFAULT; } /** * cipso_v4_map_cat_rbm_hton - Perform a category mapping from host to network * @doi_def: the DOI definition * @secattr: the security attributes * @net_cat: the zero'd out category bitmap in network/CIPSO format * @net_cat_len: the length of the CIPSO bitmap in bytes * * Description: * Perform a label mapping to translate a local MLS category bitmap to the * correct CIPSO bitmap using the given DOI definition. Returns the minimum * size in bytes of the network bitmap on success, negative values otherwise. * */ static int cipso_v4_map_cat_rbm_hton(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *net_cat, u32 net_cat_len) { int host_spot = -1; u32 net_spot = CIPSO_V4_INV_CAT; u32 net_spot_max = 0; u32 net_clen_bits = net_cat_len * 8; u32 host_cat_size = 0; u32 *host_cat_array = NULL; if (doi_def->type == CIPSO_V4_MAP_TRANS) { host_cat_size = doi_def->map.std->cat.local_size; host_cat_array = doi_def->map.std->cat.local; } for (;;) { host_spot = netlbl_secattr_catmap_walk(secattr->attr.mls.cat, host_spot + 1); if (host_spot < 0) break; switch (doi_def->type) { case CIPSO_V4_MAP_PASS: net_spot = host_spot; break; case CIPSO_V4_MAP_TRANS: if (host_spot >= host_cat_size) return -EPERM; net_spot = host_cat_array[host_spot]; if (net_spot >= CIPSO_V4_INV_CAT) return -EPERM; break; } if (net_spot >= net_clen_bits) return -ENOSPC; cipso_v4_bitmap_setbit(net_cat, net_spot, 1); if (net_spot > net_spot_max) net_spot_max = net_spot; } if (++net_spot_max % 8) return net_spot_max / 8 + 1; return net_spot_max / 8; } /** * cipso_v4_map_cat_rbm_ntoh - Perform a category mapping from network to host * @doi_def: the DOI definition * @net_cat: the category bitmap in network/CIPSO format * @net_cat_len: the length of the CIPSO bitmap in bytes * @secattr: the security attributes * * Description: * Perform a label mapping to translate a CIPSO bitmap to the correct local * MLS category bitmap using the given DOI definition. Returns zero on * success, negative values on failure. * */ static int cipso_v4_map_cat_rbm_ntoh(const struct cipso_v4_doi *doi_def, const unsigned char *net_cat, u32 net_cat_len, struct netlbl_lsm_secattr *secattr) { int ret_val; int net_spot = -1; u32 host_spot = CIPSO_V4_INV_CAT; u32 net_clen_bits = net_cat_len * 8; u32 net_cat_size = 0; u32 *net_cat_array = NULL; if (doi_def->type == CIPSO_V4_MAP_TRANS) { net_cat_size = doi_def->map.std->cat.cipso_size; net_cat_array = doi_def->map.std->cat.cipso; } for (;;) { net_spot = cipso_v4_bitmap_walk(net_cat, net_clen_bits, net_spot + 1, 1); if (net_spot < 0) { if (net_spot == -2) return -EFAULT; return 0; } switch (doi_def->type) { case CIPSO_V4_MAP_PASS: host_spot = net_spot; break; case CIPSO_V4_MAP_TRANS: if (net_spot >= net_cat_size) return -EPERM; host_spot = net_cat_array[net_spot]; if (host_spot >= CIPSO_V4_INV_CAT) return -EPERM; break; } ret_val = netlbl_secattr_catmap_setbit(secattr->attr.mls.cat, host_spot, GFP_ATOMIC); if (ret_val != 0) return ret_val; } return -EINVAL; } /** * cipso_v4_map_cat_enum_valid - Checks to see if the categories are valid * @doi_def: the DOI definition * @enumcat: category list * @enumcat_len: length of the category list in bytes * * Description: * Checks the given categories against the given DOI definition and returns a * negative value if any of the categories do not have a valid mapping and a * zero value if all of the categories are valid. * */ static int cipso_v4_map_cat_enum_valid(const struct cipso_v4_doi *doi_def, const unsigned char *enumcat, u32 enumcat_len) { u16 cat; int cat_prev = -1; u32 iter; if (doi_def->type != CIPSO_V4_MAP_PASS || enumcat_len & 0x01) return -EFAULT; for (iter = 0; iter < enumcat_len; iter += 2) { cat = get_unaligned_be16(&enumcat[iter]); if (cat <= cat_prev) return -EFAULT; cat_prev = cat; } return 0; } /** * cipso_v4_map_cat_enum_hton - Perform a category mapping from host to network * @doi_def: the DOI definition * @secattr: the security attributes * @net_cat: the zero'd out category list in network/CIPSO format * @net_cat_len: the length of the CIPSO category list in bytes * * Description: * Perform a label mapping to translate a local MLS category bitmap to the * correct CIPSO category list using the given DOI definition. Returns the * size in bytes of the network category bitmap on success, negative values * otherwise. * */ static int cipso_v4_map_cat_enum_hton(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *net_cat, u32 net_cat_len) { int cat = -1; u32 cat_iter = 0; for (;;) { cat = netlbl_secattr_catmap_walk(secattr->attr.mls.cat, cat + 1); if (cat < 0) break; if ((cat_iter + 2) > net_cat_len) return -ENOSPC; *((__be16 *)&net_cat[cat_iter]) = htons(cat); cat_iter += 2; } return cat_iter; } /** * cipso_v4_map_cat_enum_ntoh - Perform a category mapping from network to host * @doi_def: the DOI definition * @net_cat: the category list in network/CIPSO format * @net_cat_len: the length of the CIPSO bitmap in bytes * @secattr: the security attributes * * Description: * Perform a label mapping to translate a CIPSO category list to the correct * local MLS category bitmap using the given DOI definition. Returns zero on * success, negative values on failure. * */ static int cipso_v4_map_cat_enum_ntoh(const struct cipso_v4_doi *doi_def, const unsigned char *net_cat, u32 net_cat_len, struct netlbl_lsm_secattr *secattr) { int ret_val; u32 iter; for (iter = 0; iter < net_cat_len; iter += 2) { ret_val = netlbl_secattr_catmap_setbit(secattr->attr.mls.cat, get_unaligned_be16(&net_cat[iter]), GFP_ATOMIC); if (ret_val != 0) return ret_val; } return 0; } /** * cipso_v4_map_cat_rng_valid - Checks to see if the categories are valid * @doi_def: the DOI definition * @rngcat: category list * @rngcat_len: length of the category list in bytes * * Description: * Checks the given categories against the given DOI definition and returns a * negative value if any of the categories do not have a valid mapping and a * zero value if all of the categories are valid. * */ static int cipso_v4_map_cat_rng_valid(const struct cipso_v4_doi *doi_def, const unsigned char *rngcat, u32 rngcat_len) { u16 cat_high; u16 cat_low; u32 cat_prev = CIPSO_V4_MAX_REM_CATS + 1; u32 iter; if (doi_def->type != CIPSO_V4_MAP_PASS || rngcat_len & 0x01) return -EFAULT; for (iter = 0; iter < rngcat_len; iter += 4) { cat_high = get_unaligned_be16(&rngcat[iter]); if ((iter + 4) <= rngcat_len) cat_low = get_unaligned_be16(&rngcat[iter + 2]); else cat_low = 0; if (cat_high > cat_prev) return -EFAULT; cat_prev = cat_low; } return 0; } /** * cipso_v4_map_cat_rng_hton - Perform a category mapping from host to network * @doi_def: the DOI definition * @secattr: the security attributes * @net_cat: the zero'd out category list in network/CIPSO format * @net_cat_len: the length of the CIPSO category list in bytes * * Description: * Perform a label mapping to translate a local MLS category bitmap to the * correct CIPSO category list using the given DOI definition. Returns the * size in bytes of the network category bitmap on success, negative values * otherwise. * */ static int cipso_v4_map_cat_rng_hton(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *net_cat, u32 net_cat_len) { int iter = -1; u16 array[CIPSO_V4_TAG_RNG_CAT_MAX * 2]; u32 array_cnt = 0; u32 cat_size = 0; /* make sure we don't overflow the 'array[]' variable */ if (net_cat_len > (CIPSO_V4_OPT_LEN_MAX - CIPSO_V4_HDR_LEN - CIPSO_V4_TAG_RNG_BLEN)) return -ENOSPC; for (;;) { iter = netlbl_secattr_catmap_walk(secattr->attr.mls.cat, iter + 1); if (iter < 0) break; cat_size += (iter == 0 ? 0 : sizeof(u16)); if (cat_size > net_cat_len) return -ENOSPC; array[array_cnt++] = iter; iter = netlbl_secattr_catmap_walk_rng(secattr->attr.mls.cat, iter); if (iter < 0) return -EFAULT; cat_size += sizeof(u16); if (cat_size > net_cat_len) return -ENOSPC; array[array_cnt++] = iter; } for (iter = 0; array_cnt > 0;) { *((__be16 *)&net_cat[iter]) = htons(array[--array_cnt]); iter += 2; array_cnt--; if (array[array_cnt] != 0) { *((__be16 *)&net_cat[iter]) = htons(array[array_cnt]); iter += 2; } } return cat_size; } /** * cipso_v4_map_cat_rng_ntoh - Perform a category mapping from network to host * @doi_def: the DOI definition * @net_cat: the category list in network/CIPSO format * @net_cat_len: the length of the CIPSO bitmap in bytes * @secattr: the security attributes * * Description: * Perform a label mapping to translate a CIPSO category list to the correct * local MLS category bitmap using the given DOI definition. Returns zero on * success, negative values on failure. * */ static int cipso_v4_map_cat_rng_ntoh(const struct cipso_v4_doi *doi_def, const unsigned char *net_cat, u32 net_cat_len, struct netlbl_lsm_secattr *secattr) { int ret_val; u32 net_iter; u16 cat_low; u16 cat_high; for (net_iter = 0; net_iter < net_cat_len; net_iter += 4) { cat_high = get_unaligned_be16(&net_cat[net_iter]); if ((net_iter + 4) <= net_cat_len) cat_low = get_unaligned_be16(&net_cat[net_iter + 2]); else cat_low = 0; ret_val = netlbl_secattr_catmap_setrng(secattr->attr.mls.cat, cat_low, cat_high, GFP_ATOMIC); if (ret_val != 0) return ret_val; } return 0; } /* * Protocol Handling Functions */ /** * cipso_v4_gentag_hdr - Generate a CIPSO option header * @doi_def: the DOI definition * @len: the total tag length in bytes, not including this header * @buf: the CIPSO option buffer * * Description: * Write a CIPSO header into the beginning of @buffer. * */ static void cipso_v4_gentag_hdr(const struct cipso_v4_doi *doi_def, unsigned char *buf, u32 len) { buf[0] = IPOPT_CIPSO; buf[1] = CIPSO_V4_HDR_LEN + len; *(__be32 *)&buf[2] = htonl(doi_def->doi); } /** * cipso_v4_gentag_rbm - Generate a CIPSO restricted bitmap tag (type #1) * @doi_def: the DOI definition * @secattr: the security attributes * @buffer: the option buffer * @buffer_len: length of buffer in bytes * * Description: * Generate a CIPSO option using the restricted bitmap tag, tag type #1. The * actual buffer length may be larger than the indicated size due to * translation between host and network category bitmaps. Returns the size of * the tag on success, negative values on failure. * */ static int cipso_v4_gentag_rbm(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *buffer, u32 buffer_len) { int ret_val; u32 tag_len; u32 level; if ((secattr->flags & NETLBL_SECATTR_MLS_LVL) == 0) return -EPERM; ret_val = cipso_v4_map_lvl_hton(doi_def, secattr->attr.mls.lvl, &level); if (ret_val != 0) return ret_val; if (secattr->flags & NETLBL_SECATTR_MLS_CAT) { ret_val = cipso_v4_map_cat_rbm_hton(doi_def, secattr, &buffer[4], buffer_len - 4); if (ret_val < 0) return ret_val; /* This will send packets using the "optimized" format when * possible as specified in section 3.4.2.6 of the * CIPSO draft. */ if (cipso_v4_rbm_optfmt && ret_val > 0 && ret_val <= 10) tag_len = 14; else tag_len = 4 + ret_val; } else tag_len = 4; buffer[0] = CIPSO_V4_TAG_RBITMAP; buffer[1] = tag_len; buffer[3] = level; return tag_len; } /** * cipso_v4_parsetag_rbm - Parse a CIPSO restricted bitmap tag * @doi_def: the DOI definition * @tag: the CIPSO tag * @secattr: the security attributes * * Description: * Parse a CIPSO restricted bitmap tag (tag type #1) and return the security * attributes in @secattr. Return zero on success, negatives values on * failure. * */ static int cipso_v4_parsetag_rbm(const struct cipso_v4_doi *doi_def, const unsigned char *tag, struct netlbl_lsm_secattr *secattr) { int ret_val; u8 tag_len = tag[1]; u32 level; ret_val = cipso_v4_map_lvl_ntoh(doi_def, tag[3], &level); if (ret_val != 0) return ret_val; secattr->attr.mls.lvl = level; secattr->flags |= NETLBL_SECATTR_MLS_LVL; if (tag_len > 4) { secattr->attr.mls.cat = netlbl_secattr_catmap_alloc(GFP_ATOMIC); if (secattr->attr.mls.cat == NULL) return -ENOMEM; ret_val = cipso_v4_map_cat_rbm_ntoh(doi_def, &tag[4], tag_len - 4, secattr); if (ret_val != 0) { netlbl_secattr_catmap_free(secattr->attr.mls.cat); return ret_val; } secattr->flags |= NETLBL_SECATTR_MLS_CAT; } return 0; } /** * cipso_v4_gentag_enum - Generate a CIPSO enumerated tag (type #2) * @doi_def: the DOI definition * @secattr: the security attributes * @buffer: the option buffer * @buffer_len: length of buffer in bytes * * Description: * Generate a CIPSO option using the enumerated tag, tag type #2. Returns the * size of the tag on success, negative values on failure. * */ static int cipso_v4_gentag_enum(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *buffer, u32 buffer_len) { int ret_val; u32 tag_len; u32 level; if (!(secattr->flags & NETLBL_SECATTR_MLS_LVL)) return -EPERM; ret_val = cipso_v4_map_lvl_hton(doi_def, secattr->attr.mls.lvl, &level); if (ret_val != 0) return ret_val; if (secattr->flags & NETLBL_SECATTR_MLS_CAT) { ret_val = cipso_v4_map_cat_enum_hton(doi_def, secattr, &buffer[4], buffer_len - 4); if (ret_val < 0) return ret_val; tag_len = 4 + ret_val; } else tag_len = 4; buffer[0] = CIPSO_V4_TAG_ENUM; buffer[1] = tag_len; buffer[3] = level; return tag_len; } /** * cipso_v4_parsetag_enum - Parse a CIPSO enumerated tag * @doi_def: the DOI definition * @tag: the CIPSO tag * @secattr: the security attributes * * Description: * Parse a CIPSO enumerated tag (tag type #2) and return the security * attributes in @secattr. Return zero on success, negatives values on * failure. * */ static int cipso_v4_parsetag_enum(const struct cipso_v4_doi *doi_def, const unsigned char *tag, struct netlbl_lsm_secattr *secattr) { int ret_val; u8 tag_len = tag[1]; u32 level; ret_val = cipso_v4_map_lvl_ntoh(doi_def, tag[3], &level); if (ret_val != 0) return ret_val; secattr->attr.mls.lvl = level; secattr->flags |= NETLBL_SECATTR_MLS_LVL; if (tag_len > 4) { secattr->attr.mls.cat = netlbl_secattr_catmap_alloc(GFP_ATOMIC); if (secattr->attr.mls.cat == NULL) return -ENOMEM; ret_val = cipso_v4_map_cat_enum_ntoh(doi_def, &tag[4], tag_len - 4, secattr); if (ret_val != 0) { netlbl_secattr_catmap_free(secattr->attr.mls.cat); return ret_val; } secattr->flags |= NETLBL_SECATTR_MLS_CAT; } return 0; } /** * cipso_v4_gentag_rng - Generate a CIPSO ranged tag (type #5) * @doi_def: the DOI definition * @secattr: the security attributes * @buffer: the option buffer * @buffer_len: length of buffer in bytes * * Description: * Generate a CIPSO option using the ranged tag, tag type #5. Returns the * size of the tag on success, negative values on failure. * */ static int cipso_v4_gentag_rng(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *buffer, u32 buffer_len) { int ret_val; u32 tag_len; u32 level; if (!(secattr->flags & NETLBL_SECATTR_MLS_LVL)) return -EPERM; ret_val = cipso_v4_map_lvl_hton(doi_def, secattr->attr.mls.lvl, &level); if (ret_val != 0) return ret_val; if (secattr->flags & NETLBL_SECATTR_MLS_CAT) { ret_val = cipso_v4_map_cat_rng_hton(doi_def, secattr, &buffer[4], buffer_len - 4); if (ret_val < 0) return ret_val; tag_len = 4 + ret_val; } else tag_len = 4; buffer[0] = CIPSO_V4_TAG_RANGE; buffer[1] = tag_len; buffer[3] = level; return tag_len; } /** * cipso_v4_parsetag_rng - Parse a CIPSO ranged tag * @doi_def: the DOI definition * @tag: the CIPSO tag * @secattr: the security attributes * * Description: * Parse a CIPSO ranged tag (tag type #5) and return the security attributes * in @secattr. Return zero on success, negatives values on failure. * */ static int cipso_v4_parsetag_rng(const struct cipso_v4_doi *doi_def, const unsigned char *tag, struct netlbl_lsm_secattr *secattr) { int ret_val; u8 tag_len = tag[1]; u32 level; ret_val = cipso_v4_map_lvl_ntoh(doi_def, tag[3], &level); if (ret_val != 0) return ret_val; secattr->attr.mls.lvl = level; secattr->flags |= NETLBL_SECATTR_MLS_LVL; if (tag_len > 4) { secattr->attr.mls.cat = netlbl_secattr_catmap_alloc(GFP_ATOMIC); if (secattr->attr.mls.cat == NULL) return -ENOMEM; ret_val = cipso_v4_map_cat_rng_ntoh(doi_def, &tag[4], tag_len - 4, secattr); if (ret_val != 0) { netlbl_secattr_catmap_free(secattr->attr.mls.cat); return ret_val; } secattr->flags |= NETLBL_SECATTR_MLS_CAT; } return 0; } /** * cipso_v4_gentag_loc - Generate a CIPSO local tag (non-standard) * @doi_def: the DOI definition * @secattr: the security attributes * @buffer: the option buffer * @buffer_len: length of buffer in bytes * * Description: * Generate a CIPSO option using the local tag. Returns the size of the tag * on success, negative values on failure. * */ static int cipso_v4_gentag_loc(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *buffer, u32 buffer_len) { if (!(secattr->flags & NETLBL_SECATTR_SECID)) return -EPERM; buffer[0] = CIPSO_V4_TAG_LOCAL; buffer[1] = CIPSO_V4_TAG_LOC_BLEN; *(u32 *)&buffer[2] = secattr->attr.secid; return CIPSO_V4_TAG_LOC_BLEN; } /** * cipso_v4_parsetag_loc - Parse a CIPSO local tag * @doi_def: the DOI definition * @tag: the CIPSO tag * @secattr: the security attributes * * Description: * Parse a CIPSO local tag and return the security attributes in @secattr. * Return zero on success, negatives values on failure. * */ static int cipso_v4_parsetag_loc(const struct cipso_v4_doi *doi_def, const unsigned char *tag, struct netlbl_lsm_secattr *secattr) { secattr->attr.secid = *(u32 *)&tag[2]; secattr->flags |= NETLBL_SECATTR_SECID; return 0; } /** * cipso_v4_validate - Validate a CIPSO option * @option: the start of the option, on error it is set to point to the error * * Description: * This routine is called to validate a CIPSO option, it checks all of the * fields to ensure that they are at least valid, see the draft snippet below * for details. If the option is valid then a zero value is returned and * the value of @option is unchanged. If the option is invalid then a * non-zero value is returned and @option is adjusted to point to the * offending portion of the option. From the IETF draft ... * * "If any field within the CIPSO options, such as the DOI identifier, is not * recognized the IP datagram is discarded and an ICMP 'parameter problem' * (type 12) is generated and returned. The ICMP code field is set to 'bad * parameter' (code 0) and the pointer is set to the start of the CIPSO field * that is unrecognized." * */ int cipso_v4_validate(const struct sk_buff *skb, unsigned char **option) { unsigned char *opt = *option; unsigned char *tag; unsigned char opt_iter; unsigned char err_offset = 0; u8 opt_len; u8 tag_len; struct cipso_v4_doi *doi_def = NULL; u32 tag_iter; /* caller already checks for length values that are too large */ opt_len = opt[1]; if (opt_len < 8) { err_offset = 1; goto validate_return; } rcu_read_lock(); doi_def = cipso_v4_doi_search(get_unaligned_be32(&opt[2])); if (doi_def == NULL) { err_offset = 2; goto validate_return_locked; } opt_iter = CIPSO_V4_HDR_LEN; tag = opt + opt_iter; while (opt_iter < opt_len) { for (tag_iter = 0; doi_def->tags[tag_iter] != tag[0];) if (doi_def->tags[tag_iter] == CIPSO_V4_TAG_INVALID || ++tag_iter == CIPSO_V4_TAG_MAXCNT) { err_offset = opt_iter; goto validate_return_locked; } tag_len = tag[1]; if (tag_len > (opt_len - opt_iter)) { err_offset = opt_iter + 1; goto validate_return_locked; } switch (tag[0]) { case CIPSO_V4_TAG_RBITMAP: if (tag_len < CIPSO_V4_TAG_RBM_BLEN) { err_offset = opt_iter + 1; goto validate_return_locked; } /* We are already going to do all the verification * necessary at the socket layer so from our point of * view it is safe to turn these checks off (and less * work), however, the CIPSO draft says we should do * all the CIPSO validations here but it doesn't * really specify _exactly_ what we need to validate * ... so, just make it a sysctl tunable. */ if (cipso_v4_rbm_strictvalid) { if (cipso_v4_map_lvl_valid(doi_def, tag[3]) < 0) { err_offset = opt_iter + 3; goto validate_return_locked; } if (tag_len > CIPSO_V4_TAG_RBM_BLEN && cipso_v4_map_cat_rbm_valid(doi_def, &tag[4], tag_len - 4) < 0) { err_offset = opt_iter + 4; goto validate_return_locked; } } break; case CIPSO_V4_TAG_ENUM: if (tag_len < CIPSO_V4_TAG_ENUM_BLEN) { err_offset = opt_iter + 1; goto validate_return_locked; } if (cipso_v4_map_lvl_valid(doi_def, tag[3]) < 0) { err_offset = opt_iter + 3; goto validate_return_locked; } if (tag_len > CIPSO_V4_TAG_ENUM_BLEN && cipso_v4_map_cat_enum_valid(doi_def, &tag[4], tag_len - 4) < 0) { err_offset = opt_iter + 4; goto validate_return_locked; } break; case CIPSO_V4_TAG_RANGE: if (tag_len < CIPSO_V4_TAG_RNG_BLEN) { err_offset = opt_iter + 1; goto validate_return_locked; } if (cipso_v4_map_lvl_valid(doi_def, tag[3]) < 0) { err_offset = opt_iter + 3; goto validate_return_locked; } if (tag_len > CIPSO_V4_TAG_RNG_BLEN && cipso_v4_map_cat_rng_valid(doi_def, &tag[4], tag_len - 4) < 0) { err_offset = opt_iter + 4; goto validate_return_locked; } break; case CIPSO_V4_TAG_LOCAL: /* This is a non-standard tag that we only allow for * local connections, so if the incoming interface is * not the loopback device drop the packet. */ if (!(skb->dev->flags & IFF_LOOPBACK)) { err_offset = opt_iter; goto validate_return_locked; } if (tag_len != CIPSO_V4_TAG_LOC_BLEN) { err_offset = opt_iter + 1; goto validate_return_locked; } break; default: err_offset = opt_iter; goto validate_return_locked; } tag += tag_len; opt_iter += tag_len; } validate_return_locked: rcu_read_unlock(); validate_return: *option = opt + err_offset; return err_offset; } /** * cipso_v4_error - Send the correct response for a bad packet * @skb: the packet * @error: the error code * @gateway: CIPSO gateway flag * * Description: * Based on the error code given in @error, send an ICMP error message back to * the originating host. From the IETF draft ... * * "If the contents of the CIPSO [option] are valid but the security label is * outside of the configured host or port label range, the datagram is * discarded and an ICMP 'destination unreachable' (type 3) is generated and * returned. The code field of the ICMP is set to 'communication with * destination network administratively prohibited' (code 9) or to * 'communication with destination host administratively prohibited' * (code 10). The value of the code is dependent on whether the originator * of the ICMP message is acting as a CIPSO host or a CIPSO gateway. The * recipient of the ICMP message MUST be able to handle either value. The * same procedure is performed if a CIPSO [option] can not be added to an * IP packet because it is too large to fit in the IP options area." * * "If the error is triggered by receipt of an ICMP message, the message is * discarded and no response is permitted (consistent with general ICMP * processing rules)." * */ void cipso_v4_error(struct sk_buff *skb, int error, u32 gateway) { if (ip_hdr(skb)->protocol == IPPROTO_ICMP || error != -EACCES) return; if (gateway) icmp_send(skb, ICMP_DEST_UNREACH, ICMP_NET_ANO, 0); else icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_ANO, 0); } /** * cipso_v4_genopt - Generate a CIPSO option * @buf: the option buffer * @buf_len: the size of opt_buf * @doi_def: the CIPSO DOI to use * @secattr: the security attributes * * Description: * Generate a CIPSO option using the DOI definition and security attributes * passed to the function. Returns the length of the option on success and * negative values on failure. * */ static int cipso_v4_genopt(unsigned char *buf, u32 buf_len, const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr) { int ret_val; u32 iter; if (buf_len <= CIPSO_V4_HDR_LEN) return -ENOSPC; /* XXX - This code assumes only one tag per CIPSO option which isn't * really a good assumption to make but since we only support the MAC * tags right now it is a safe assumption. */ iter = 0; do { memset(buf, 0, buf_len); switch (doi_def->tags[iter]) { case CIPSO_V4_TAG_RBITMAP: ret_val = cipso_v4_gentag_rbm(doi_def, secattr, &buf[CIPSO_V4_HDR_LEN], buf_len - CIPSO_V4_HDR_LEN); break; case CIPSO_V4_TAG_ENUM: ret_val = cipso_v4_gentag_enum(doi_def, secattr, &buf[CIPSO_V4_HDR_LEN], buf_len - CIPSO_V4_HDR_LEN); break; case CIPSO_V4_TAG_RANGE: ret_val = cipso_v4_gentag_rng(doi_def, secattr, &buf[CIPSO_V4_HDR_LEN], buf_len - CIPSO_V4_HDR_LEN); break; case CIPSO_V4_TAG_LOCAL: ret_val = cipso_v4_gentag_loc(doi_def, secattr, &buf[CIPSO_V4_HDR_LEN], buf_len - CIPSO_V4_HDR_LEN); break; default: return -EPERM; } iter++; } while (ret_val < 0 && iter < CIPSO_V4_TAG_MAXCNT && doi_def->tags[iter] != CIPSO_V4_TAG_INVALID); if (ret_val < 0) return ret_val; cipso_v4_gentag_hdr(doi_def, buf, ret_val); return CIPSO_V4_HDR_LEN + ret_val; } /** * cipso_v4_sock_setattr - Add a CIPSO option to a socket * @sk: the socket * @doi_def: the CIPSO DOI to use * @secattr: the specific security attributes of the socket * * Description: * Set the CIPSO option on the given socket using the DOI definition and * security attributes passed to the function. This function requires * exclusive access to @sk, which means it either needs to be in the * process of being created or locked. Returns zero on success and negative * values on failure. * */ int cipso_v4_sock_setattr(struct sock *sk, const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr) { int ret_val = -EPERM; unsigned char *buf = NULL; u32 buf_len; u32 opt_len; struct ip_options *opt = NULL; struct inet_sock *sk_inet; struct inet_connection_sock *sk_conn; /* In the case of sock_create_lite(), the sock->sk field is not * defined yet but it is not a problem as the only users of these * "lite" PF_INET sockets are functions which do an accept() call * afterwards so we will label the socket as part of the accept(). */ if (sk == NULL) return 0; /* We allocate the maximum CIPSO option size here so we are probably * being a little wasteful, but it makes our life _much_ easier later * on and after all we are only talking about 40 bytes. */ buf_len = CIPSO_V4_OPT_LEN_MAX; buf = kmalloc(buf_len, GFP_ATOMIC); if (buf == NULL) { ret_val = -ENOMEM; goto socket_setattr_failure; } ret_val = cipso_v4_genopt(buf, buf_len, doi_def, secattr); if (ret_val < 0) goto socket_setattr_failure; buf_len = ret_val; /* We can't use ip_options_get() directly because it makes a call to * ip_options_get_alloc() which allocates memory with GFP_KERNEL and * we won't always have CAP_NET_RAW even though we _always_ want to * set the IPOPT_CIPSO option. */ opt_len = (buf_len + 3) & ~3; opt = kzalloc(sizeof(*opt) + opt_len, GFP_ATOMIC); if (opt == NULL) { ret_val = -ENOMEM; goto socket_setattr_failure; } memcpy(opt->__data, buf, buf_len); opt->optlen = opt_len; opt->cipso = sizeof(struct iphdr); kfree(buf); buf = NULL; sk_inet = inet_sk(sk); if (sk_inet->is_icsk) { sk_conn = inet_csk(sk); if (sk_inet->opt) sk_conn->icsk_ext_hdr_len -= sk_inet->opt->optlen; sk_conn->icsk_ext_hdr_len += opt->optlen; sk_conn->icsk_sync_mss(sk, sk_conn->icsk_pmtu_cookie); } opt = xchg(&sk_inet->opt, opt); kfree(opt); return 0; socket_setattr_failure: kfree(buf); kfree(opt); return ret_val; } /** * cipso_v4_req_setattr - Add a CIPSO option to a connection request socket * @req: the connection request socket * @doi_def: the CIPSO DOI to use * @secattr: the specific security attributes of the socket * * Description: * Set the CIPSO option on the given socket using the DOI definition and * security attributes passed to the function. Returns zero on success and * negative values on failure. * */ int cipso_v4_req_setattr(struct request_sock *req, const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr) { int ret_val = -EPERM; unsigned char *buf = NULL; u32 buf_len; u32 opt_len; struct ip_options *opt = NULL; struct inet_request_sock *req_inet; /* We allocate the maximum CIPSO option size here so we are probably * being a little wasteful, but it makes our life _much_ easier later * on and after all we are only talking about 40 bytes. */ buf_len = CIPSO_V4_OPT_LEN_MAX; buf = kmalloc(buf_len, GFP_ATOMIC); if (buf == NULL) { ret_val = -ENOMEM; goto req_setattr_failure; } ret_val = cipso_v4_genopt(buf, buf_len, doi_def, secattr); if (ret_val < 0) goto req_setattr_failure; buf_len = ret_val; /* We can't use ip_options_get() directly because it makes a call to * ip_options_get_alloc() which allocates memory with GFP_KERNEL and * we won't always have CAP_NET_RAW even though we _always_ want to * set the IPOPT_CIPSO option. */ opt_len = (buf_len + 3) & ~3; opt = kzalloc(sizeof(*opt) + opt_len, GFP_ATOMIC); if (opt == NULL) { ret_val = -ENOMEM; goto req_setattr_failure; } memcpy(opt->__data, buf, buf_len); opt->optlen = opt_len; opt->cipso = sizeof(struct iphdr); kfree(buf); buf = NULL; req_inet = inet_rsk(req); opt = xchg(&req_inet->opt, opt); kfree(opt); return 0; req_setattr_failure: kfree(buf); kfree(opt); return ret_val; } /** * cipso_v4_delopt - Delete the CIPSO option from a set of IP options * @opt_ptr: IP option pointer * * Description: * Deletes the CIPSO IP option from a set of IP options and makes the necessary * adjustments to the IP option structure. Returns zero on success, negative * values on failure. * */ static int cipso_v4_delopt(struct ip_options **opt_ptr) { int hdr_delta = 0; struct ip_options *opt = *opt_ptr; if (opt->srr || opt->rr || opt->ts || opt->router_alert) { u8 cipso_len; u8 cipso_off; unsigned char *cipso_ptr; int iter; int optlen_new; cipso_off = opt->cipso - sizeof(struct iphdr); cipso_ptr = &opt->__data[cipso_off]; cipso_len = cipso_ptr[1]; if (opt->srr > opt->cipso) opt->srr -= cipso_len; if (opt->rr > opt->cipso) opt->rr -= cipso_len; if (opt->ts > opt->cipso) opt->ts -= cipso_len; if (opt->router_alert > opt->cipso) opt->router_alert -= cipso_len; opt->cipso = 0; memmove(cipso_ptr, cipso_ptr + cipso_len, opt->optlen - cipso_off - cipso_len); /* determining the new total option length is tricky because of * the padding necessary, the only thing i can think to do at * this point is walk the options one-by-one, skipping the * padding at the end to determine the actual option size and * from there we can determine the new total option length */ iter = 0; optlen_new = 0; while (iter < opt->optlen) if (opt->__data[iter] != IPOPT_NOP) { iter += opt->__data[iter + 1]; optlen_new = iter; } else iter++; hdr_delta = opt->optlen; opt->optlen = (optlen_new + 3) & ~3; hdr_delta -= opt->optlen; } else { /* only the cipso option was present on the socket so we can * remove the entire option struct */ *opt_ptr = NULL; hdr_delta = opt->optlen; kfree(opt); } return hdr_delta; } /** * cipso_v4_sock_delattr - Delete the CIPSO option from a socket * @sk: the socket * * Description: * Removes the CIPSO option from a socket, if present. * */ void cipso_v4_sock_delattr(struct sock *sk) { int hdr_delta; struct ip_options *opt; struct inet_sock *sk_inet; sk_inet = inet_sk(sk); opt = sk_inet->opt; if (opt == NULL || opt->cipso == 0) return; hdr_delta = cipso_v4_delopt(&sk_inet->opt); if (sk_inet->is_icsk && hdr_delta > 0) { struct inet_connection_sock *sk_conn = inet_csk(sk); sk_conn->icsk_ext_hdr_len -= hdr_delta; sk_conn->icsk_sync_mss(sk, sk_conn->icsk_pmtu_cookie); } } /** * cipso_v4_req_delattr - Delete the CIPSO option from a request socket * @reg: the request socket * * Description: * Removes the CIPSO option from a request socket, if present. * */ void cipso_v4_req_delattr(struct request_sock *req) { struct ip_options *opt; struct inet_request_sock *req_inet; req_inet = inet_rsk(req); opt = req_inet->opt; if (opt == NULL || opt->cipso == 0) return; cipso_v4_delopt(&req_inet->opt); } /** * cipso_v4_getattr - Helper function for the cipso_v4_*_getattr functions * @cipso: the CIPSO v4 option * @secattr: the security attributes * * Description: * Inspect @cipso and return the security attributes in @secattr. Returns zero * on success and negative values on failure. * */ static int cipso_v4_getattr(const unsigned char *cipso, struct netlbl_lsm_secattr *secattr) { int ret_val = -ENOMSG; u32 doi; struct cipso_v4_doi *doi_def; if (cipso_v4_cache_check(cipso, cipso[1], secattr) == 0) return 0; doi = get_unaligned_be32(&cipso[2]); rcu_read_lock(); doi_def = cipso_v4_doi_search(doi); if (doi_def == NULL) goto getattr_return; /* XXX - This code assumes only one tag per CIPSO option which isn't * really a good assumption to make but since we only support the MAC * tags right now it is a safe assumption. */ switch (cipso[6]) { case CIPSO_V4_TAG_RBITMAP: ret_val = cipso_v4_parsetag_rbm(doi_def, &cipso[6], secattr); break; case CIPSO_V4_TAG_ENUM: ret_val = cipso_v4_parsetag_enum(doi_def, &cipso[6], secattr); break; case CIPSO_V4_TAG_RANGE: ret_val = cipso_v4_parsetag_rng(doi_def, &cipso[6], secattr); break; case CIPSO_V4_TAG_LOCAL: ret_val = cipso_v4_parsetag_loc(doi_def, &cipso[6], secattr); break; } if (ret_val == 0) secattr->type = NETLBL_NLTYPE_CIPSOV4; getattr_return: rcu_read_unlock(); return ret_val; } /** * cipso_v4_sock_getattr - Get the security attributes from a sock * @sk: the sock * @secattr: the security attributes * * Description: * Query @sk to see if there is a CIPSO option attached to the sock and if * there is return the CIPSO security attributes in @secattr. This function * requires that @sk be locked, or privately held, but it does not do any * locking itself. Returns zero on success and negative values on failure. * */ int cipso_v4_sock_getattr(struct sock *sk, struct netlbl_lsm_secattr *secattr) { struct ip_options *opt; opt = inet_sk(sk)->opt; if (opt == NULL || opt->cipso == 0) return -ENOMSG; return cipso_v4_getattr(opt->__data + opt->cipso - sizeof(struct iphdr), secattr); } /** * cipso_v4_skbuff_setattr - Set the CIPSO option on a packet * @skb: the packet * @secattr: the security attributes * * Description: * Set the CIPSO option on the given packet based on the security attributes. * Returns a pointer to the IP header on success and NULL on failure. * */ int cipso_v4_skbuff_setattr(struct sk_buff *skb, const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr) { int ret_val; struct iphdr *iph; struct ip_options *opt = &IPCB(skb)->opt; unsigned char buf[CIPSO_V4_OPT_LEN_MAX]; u32 buf_len = CIPSO_V4_OPT_LEN_MAX; u32 opt_len; int len_delta; ret_val = cipso_v4_genopt(buf, buf_len, doi_def, secattr); if (ret_val < 0) return ret_val; buf_len = ret_val; opt_len = (buf_len + 3) & ~3; /* we overwrite any existing options to ensure that we have enough * room for the CIPSO option, the reason is that we _need_ to guarantee * that the security label is applied to the packet - we do the same * thing when using the socket options and it hasn't caused a problem, * if we need to we can always revisit this choice later */ len_delta = opt_len - opt->optlen; /* if we don't ensure enough headroom we could panic on the skb_push() * call below so make sure we have enough, we are also "mangling" the * packet so we should probably do a copy-on-write call anyway */ ret_val = skb_cow(skb, skb_headroom(skb) + len_delta); if (ret_val < 0) return ret_val; if (len_delta > 0) { /* we assume that the header + opt->optlen have already been * "pushed" in ip_options_build() or similar */ iph = ip_hdr(skb); skb_push(skb, len_delta); memmove((char *)iph - len_delta, iph, iph->ihl << 2); skb_reset_network_header(skb); iph = ip_hdr(skb); } else if (len_delta < 0) { iph = ip_hdr(skb); memset(iph + 1, IPOPT_NOP, opt->optlen); } else iph = ip_hdr(skb); if (opt->optlen > 0) memset(opt, 0, sizeof(*opt)); opt->optlen = opt_len; opt->cipso = sizeof(struct iphdr); opt->is_changed = 1; /* we have to do the following because we are being called from a * netfilter hook which means the packet already has had the header * fields populated and the checksum calculated - yes this means we * are doing more work than needed but we do it to keep the core * stack clean and tidy */ memcpy(iph + 1, buf, buf_len); if (opt_len > buf_len) memset((char *)(iph + 1) + buf_len, 0, opt_len - buf_len); if (len_delta != 0) { iph->ihl = 5 + (opt_len >> 2); iph->tot_len = htons(skb->len); } ip_send_check(iph); return 0; } /** * cipso_v4_skbuff_delattr - Delete any CIPSO options from a packet * @skb: the packet * * Description: * Removes any and all CIPSO options from the given packet. Returns zero on * success, negative values on failure. * */ int cipso_v4_skbuff_delattr(struct sk_buff *skb) { int ret_val; struct iphdr *iph; struct ip_options *opt = &IPCB(skb)->opt; unsigned char *cipso_ptr; if (opt->cipso == 0) return 0; /* since we are changing the packet we should make a copy */ ret_val = skb_cow(skb, skb_headroom(skb)); if (ret_val < 0) return ret_val; /* the easiest thing to do is just replace the cipso option with noop * options since we don't change the size of the packet, although we * still need to recalculate the checksum */ iph = ip_hdr(skb); cipso_ptr = (unsigned char *)iph + opt->cipso; memset(cipso_ptr, IPOPT_NOOP, cipso_ptr[1]); opt->cipso = 0; opt->is_changed = 1; ip_send_check(iph); return 0; } /** * cipso_v4_skbuff_getattr - Get the security attributes from the CIPSO option * @skb: the packet * @secattr: the security attributes * * Description: * Parse the given packet's CIPSO option and return the security attributes. * Returns zero on success and negative values on failure. * */ int cipso_v4_skbuff_getattr(const struct sk_buff *skb, struct netlbl_lsm_secattr *secattr) { return cipso_v4_getattr(CIPSO_V4_OPTPTR(skb), secattr); } /* * Setup Functions */ /** * cipso_v4_init - Initialize the CIPSO module * * Description: * Initialize the CIPSO module and prepare it for use. Returns zero on success * and negative values on failure. * */ static int __init cipso_v4_init(void) { int ret_val; ret_val = cipso_v4_cache_init(); if (ret_val != 0) panic("Failed to initialize the CIPSO/IPv4 cache (%d)\n", ret_val); return 0; } subsys_initcall(cipso_v4_init);
/* * CIPSO - Commercial IP Security Option * * This is an implementation of the CIPSO 2.2 protocol as specified in * draft-ietf-cipso-ipsecurity-01.txt with additional tag types as found in * FIPS-188. While CIPSO never became a full IETF RFC standard many vendors * have chosen to adopt the protocol and over the years it has become a * de-facto standard for labeled networking. * * The CIPSO draft specification can be found in the kernel's Documentation * directory as well as the following URL: * http://tools.ietf.org/id/draft-ietf-cipso-ipsecurity-01.txt * The FIPS-188 specification can be found at the following URL: * http://www.itl.nist.gov/fipspubs/fip188.htm * * Author: Paul Moore <paul.moore@hp.com> * */ /* * (c) Copyright Hewlett-Packard Development Company, L.P., 2006, 2008 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/init.h> #include <linux/types.h> #include <linux/rcupdate.h> #include <linux/list.h> #include <linux/spinlock.h> #include <linux/string.h> #include <linux/jhash.h> #include <linux/audit.h> #include <linux/slab.h> #include <net/ip.h> #include <net/icmp.h> #include <net/tcp.h> #include <net/netlabel.h> #include <net/cipso_ipv4.h> #include <asm/atomic.h> #include <asm/bug.h> #include <asm/unaligned.h> /* List of available DOI definitions */ /* XXX - This currently assumes a minimal number of different DOIs in use, * if in practice there are a lot of different DOIs this list should * probably be turned into a hash table or something similar so we * can do quick lookups. */ static DEFINE_SPINLOCK(cipso_v4_doi_list_lock); static LIST_HEAD(cipso_v4_doi_list); /* Label mapping cache */ int cipso_v4_cache_enabled = 1; int cipso_v4_cache_bucketsize = 10; #define CIPSO_V4_CACHE_BUCKETBITS 7 #define CIPSO_V4_CACHE_BUCKETS (1 << CIPSO_V4_CACHE_BUCKETBITS) #define CIPSO_V4_CACHE_REORDERLIMIT 10 struct cipso_v4_map_cache_bkt { spinlock_t lock; u32 size; struct list_head list; }; struct cipso_v4_map_cache_entry { u32 hash; unsigned char *key; size_t key_len; struct netlbl_lsm_cache *lsm_data; u32 activity; struct list_head list; }; static struct cipso_v4_map_cache_bkt *cipso_v4_cache = NULL; /* Restricted bitmap (tag #1) flags */ int cipso_v4_rbm_optfmt = 0; int cipso_v4_rbm_strictvalid = 1; /* * Protocol Constants */ /* Maximum size of the CIPSO IP option, derived from the fact that the maximum * IPv4 header size is 60 bytes and the base IPv4 header is 20 bytes long. */ #define CIPSO_V4_OPT_LEN_MAX 40 /* Length of the base CIPSO option, this includes the option type (1 byte), the * option length (1 byte), and the DOI (4 bytes). */ #define CIPSO_V4_HDR_LEN 6 /* Base length of the restrictive category bitmap tag (tag #1). */ #define CIPSO_V4_TAG_RBM_BLEN 4 /* Base length of the enumerated category tag (tag #2). */ #define CIPSO_V4_TAG_ENUM_BLEN 4 /* Base length of the ranged categories bitmap tag (tag #5). */ #define CIPSO_V4_TAG_RNG_BLEN 4 /* The maximum number of category ranges permitted in the ranged category tag * (tag #5). You may note that the IETF draft states that the maximum number * of category ranges is 7, but if the low end of the last category range is * zero then it is possible to fit 8 category ranges because the zero should * be omitted. */ #define CIPSO_V4_TAG_RNG_CAT_MAX 8 /* Base length of the local tag (non-standard tag). * Tag definition (may change between kernel versions) * * 0 8 16 24 32 * +----------+----------+----------+----------+ * | 10000000 | 00000110 | 32-bit secid value | * +----------+----------+----------+----------+ * | in (host byte order)| * +----------+----------+ * */ #define CIPSO_V4_TAG_LOC_BLEN 6 /* * Helper Functions */ /** * cipso_v4_bitmap_walk - Walk a bitmap looking for a bit * @bitmap: the bitmap * @bitmap_len: length in bits * @offset: starting offset * @state: if non-zero, look for a set (1) bit else look for a cleared (0) bit * * Description: * Starting at @offset, walk the bitmap from left to right until either the * desired bit is found or we reach the end. Return the bit offset, -1 if * not found, or -2 if error. */ static int cipso_v4_bitmap_walk(const unsigned char *bitmap, u32 bitmap_len, u32 offset, u8 state) { u32 bit_spot; u32 byte_offset; unsigned char bitmask; unsigned char byte; /* gcc always rounds to zero when doing integer division */ byte_offset = offset / 8; byte = bitmap[byte_offset]; bit_spot = offset; bitmask = 0x80 >> (offset % 8); while (bit_spot < bitmap_len) { if ((state && (byte & bitmask) == bitmask) || (state == 0 && (byte & bitmask) == 0)) return bit_spot; bit_spot++; bitmask >>= 1; if (bitmask == 0) { byte = bitmap[++byte_offset]; bitmask = 0x80; } } return -1; } /** * cipso_v4_bitmap_setbit - Sets a single bit in a bitmap * @bitmap: the bitmap * @bit: the bit * @state: if non-zero, set the bit (1) else clear the bit (0) * * Description: * Set a single bit in the bitmask. Returns zero on success, negative values * on error. */ static void cipso_v4_bitmap_setbit(unsigned char *bitmap, u32 bit, u8 state) { u32 byte_spot; u8 bitmask; /* gcc always rounds to zero when doing integer division */ byte_spot = bit / 8; bitmask = 0x80 >> (bit % 8); if (state) bitmap[byte_spot] |= bitmask; else bitmap[byte_spot] &= ~bitmask; } /** * cipso_v4_cache_entry_free - Frees a cache entry * @entry: the entry to free * * Description: * This function frees the memory associated with a cache entry including the * LSM cache data if there are no longer any users, i.e. reference count == 0. * */ static void cipso_v4_cache_entry_free(struct cipso_v4_map_cache_entry *entry) { if (entry->lsm_data) netlbl_secattr_cache_free(entry->lsm_data); kfree(entry->key); kfree(entry); } /** * cipso_v4_map_cache_hash - Hashing function for the CIPSO cache * @key: the hash key * @key_len: the length of the key in bytes * * Description: * The CIPSO tag hashing function. Returns a 32-bit hash value. * */ static u32 cipso_v4_map_cache_hash(const unsigned char *key, u32 key_len) { return jhash(key, key_len, 0); } /* * Label Mapping Cache Functions */ /** * cipso_v4_cache_init - Initialize the CIPSO cache * * Description: * Initializes the CIPSO label mapping cache, this function should be called * before any of the other functions defined in this file. Returns zero on * success, negative values on error. * */ static int cipso_v4_cache_init(void) { u32 iter; cipso_v4_cache = kcalloc(CIPSO_V4_CACHE_BUCKETS, sizeof(struct cipso_v4_map_cache_bkt), GFP_KERNEL); if (cipso_v4_cache == NULL) return -ENOMEM; for (iter = 0; iter < CIPSO_V4_CACHE_BUCKETS; iter++) { spin_lock_init(&cipso_v4_cache[iter].lock); cipso_v4_cache[iter].size = 0; INIT_LIST_HEAD(&cipso_v4_cache[iter].list); } return 0; } /** * cipso_v4_cache_invalidate - Invalidates the current CIPSO cache * * Description: * Invalidates and frees any entries in the CIPSO cache. Returns zero on * success and negative values on failure. * */ void cipso_v4_cache_invalidate(void) { struct cipso_v4_map_cache_entry *entry, *tmp_entry; u32 iter; for (iter = 0; iter < CIPSO_V4_CACHE_BUCKETS; iter++) { spin_lock_bh(&cipso_v4_cache[iter].lock); list_for_each_entry_safe(entry, tmp_entry, &cipso_v4_cache[iter].list, list) { list_del(&entry->list); cipso_v4_cache_entry_free(entry); } cipso_v4_cache[iter].size = 0; spin_unlock_bh(&cipso_v4_cache[iter].lock); } } /** * cipso_v4_cache_check - Check the CIPSO cache for a label mapping * @key: the buffer to check * @key_len: buffer length in bytes * @secattr: the security attribute struct to use * * Description: * This function checks the cache to see if a label mapping already exists for * the given key. If there is a match then the cache is adjusted and the * @secattr struct is populated with the correct LSM security attributes. The * cache is adjusted in the following manner if the entry is not already the * first in the cache bucket: * * 1. The cache entry's activity counter is incremented * 2. The previous (higher ranking) entry's activity counter is decremented * 3. If the difference between the two activity counters is geater than * CIPSO_V4_CACHE_REORDERLIMIT the two entries are swapped * * Returns zero on success, -ENOENT for a cache miss, and other negative values * on error. * */ static int cipso_v4_cache_check(const unsigned char *key, u32 key_len, struct netlbl_lsm_secattr *secattr) { u32 bkt; struct cipso_v4_map_cache_entry *entry; struct cipso_v4_map_cache_entry *prev_entry = NULL; u32 hash; if (!cipso_v4_cache_enabled) return -ENOENT; hash = cipso_v4_map_cache_hash(key, key_len); bkt = hash & (CIPSO_V4_CACHE_BUCKETS - 1); spin_lock_bh(&cipso_v4_cache[bkt].lock); list_for_each_entry(entry, &cipso_v4_cache[bkt].list, list) { if (entry->hash == hash && entry->key_len == key_len && memcmp(entry->key, key, key_len) == 0) { entry->activity += 1; atomic_inc(&entry->lsm_data->refcount); secattr->cache = entry->lsm_data; secattr->flags |= NETLBL_SECATTR_CACHE; secattr->type = NETLBL_NLTYPE_CIPSOV4; if (prev_entry == NULL) { spin_unlock_bh(&cipso_v4_cache[bkt].lock); return 0; } if (prev_entry->activity > 0) prev_entry->activity -= 1; if (entry->activity > prev_entry->activity && entry->activity - prev_entry->activity > CIPSO_V4_CACHE_REORDERLIMIT) { __list_del(entry->list.prev, entry->list.next); __list_add(&entry->list, prev_entry->list.prev, &prev_entry->list); } spin_unlock_bh(&cipso_v4_cache[bkt].lock); return 0; } prev_entry = entry; } spin_unlock_bh(&cipso_v4_cache[bkt].lock); return -ENOENT; } /** * cipso_v4_cache_add - Add an entry to the CIPSO cache * @skb: the packet * @secattr: the packet's security attributes * * Description: * Add a new entry into the CIPSO label mapping cache. Add the new entry to * head of the cache bucket's list, if the cache bucket is out of room remove * the last entry in the list first. It is important to note that there is * currently no checking for duplicate keys. Returns zero on success, * negative values on failure. * */ int cipso_v4_cache_add(const struct sk_buff *skb, const struct netlbl_lsm_secattr *secattr) { int ret_val = -EPERM; u32 bkt; struct cipso_v4_map_cache_entry *entry = NULL; struct cipso_v4_map_cache_entry *old_entry = NULL; unsigned char *cipso_ptr; u32 cipso_ptr_len; if (!cipso_v4_cache_enabled || cipso_v4_cache_bucketsize <= 0) return 0; cipso_ptr = CIPSO_V4_OPTPTR(skb); cipso_ptr_len = cipso_ptr[1]; entry = kzalloc(sizeof(*entry), GFP_ATOMIC); if (entry == NULL) return -ENOMEM; entry->key = kmemdup(cipso_ptr, cipso_ptr_len, GFP_ATOMIC); if (entry->key == NULL) { ret_val = -ENOMEM; goto cache_add_failure; } entry->key_len = cipso_ptr_len; entry->hash = cipso_v4_map_cache_hash(cipso_ptr, cipso_ptr_len); atomic_inc(&secattr->cache->refcount); entry->lsm_data = secattr->cache; bkt = entry->hash & (CIPSO_V4_CACHE_BUCKETS - 1); spin_lock_bh(&cipso_v4_cache[bkt].lock); if (cipso_v4_cache[bkt].size < cipso_v4_cache_bucketsize) { list_add(&entry->list, &cipso_v4_cache[bkt].list); cipso_v4_cache[bkt].size += 1; } else { old_entry = list_entry(cipso_v4_cache[bkt].list.prev, struct cipso_v4_map_cache_entry, list); list_del(&old_entry->list); list_add(&entry->list, &cipso_v4_cache[bkt].list); cipso_v4_cache_entry_free(old_entry); } spin_unlock_bh(&cipso_v4_cache[bkt].lock); return 0; cache_add_failure: if (entry) cipso_v4_cache_entry_free(entry); return ret_val; } /* * DOI List Functions */ /** * cipso_v4_doi_search - Searches for a DOI definition * @doi: the DOI to search for * * Description: * Search the DOI definition list for a DOI definition with a DOI value that * matches @doi. The caller is responsible for calling rcu_read_[un]lock(). * Returns a pointer to the DOI definition on success and NULL on failure. */ static struct cipso_v4_doi *cipso_v4_doi_search(u32 doi) { struct cipso_v4_doi *iter; list_for_each_entry_rcu(iter, &cipso_v4_doi_list, list) if (iter->doi == doi && atomic_read(&iter->refcount)) return iter; return NULL; } /** * cipso_v4_doi_add - Add a new DOI to the CIPSO protocol engine * @doi_def: the DOI structure * @audit_info: NetLabel audit information * * Description: * The caller defines a new DOI for use by the CIPSO engine and calls this * function to add it to the list of acceptable domains. The caller must * ensure that the mapping table specified in @doi_def->map meets all of the * requirements of the mapping type (see cipso_ipv4.h for details). Returns * zero on success and non-zero on failure. * */ int cipso_v4_doi_add(struct cipso_v4_doi *doi_def, struct netlbl_audit *audit_info) { int ret_val = -EINVAL; u32 iter; u32 doi; u32 doi_type; struct audit_buffer *audit_buf; doi = doi_def->doi; doi_type = doi_def->type; if (doi_def == NULL || doi_def->doi == CIPSO_V4_DOI_UNKNOWN) goto doi_add_return; for (iter = 0; iter < CIPSO_V4_TAG_MAXCNT; iter++) { switch (doi_def->tags[iter]) { case CIPSO_V4_TAG_RBITMAP: break; case CIPSO_V4_TAG_RANGE: case CIPSO_V4_TAG_ENUM: if (doi_def->type != CIPSO_V4_MAP_PASS) goto doi_add_return; break; case CIPSO_V4_TAG_LOCAL: if (doi_def->type != CIPSO_V4_MAP_LOCAL) goto doi_add_return; break; case CIPSO_V4_TAG_INVALID: if (iter == 0) goto doi_add_return; break; default: goto doi_add_return; } } atomic_set(&doi_def->refcount, 1); spin_lock(&cipso_v4_doi_list_lock); if (cipso_v4_doi_search(doi_def->doi) != NULL) { spin_unlock(&cipso_v4_doi_list_lock); ret_val = -EEXIST; goto doi_add_return; } list_add_tail_rcu(&doi_def->list, &cipso_v4_doi_list); spin_unlock(&cipso_v4_doi_list_lock); ret_val = 0; doi_add_return: audit_buf = netlbl_audit_start(AUDIT_MAC_CIPSOV4_ADD, audit_info); if (audit_buf != NULL) { const char *type_str; switch (doi_type) { case CIPSO_V4_MAP_TRANS: type_str = "trans"; break; case CIPSO_V4_MAP_PASS: type_str = "pass"; break; case CIPSO_V4_MAP_LOCAL: type_str = "local"; break; default: type_str = "(unknown)"; } audit_log_format(audit_buf, " cipso_doi=%u cipso_type=%s res=%u", doi, type_str, ret_val == 0 ? 1 : 0); audit_log_end(audit_buf); } return ret_val; } /** * cipso_v4_doi_free - Frees a DOI definition * @entry: the entry's RCU field * * Description: * This function frees all of the memory associated with a DOI definition. * */ void cipso_v4_doi_free(struct cipso_v4_doi *doi_def) { if (doi_def == NULL) return; switch (doi_def->type) { case CIPSO_V4_MAP_TRANS: kfree(doi_def->map.std->lvl.cipso); kfree(doi_def->map.std->lvl.local); kfree(doi_def->map.std->cat.cipso); kfree(doi_def->map.std->cat.local); break; } kfree(doi_def); } /** * cipso_v4_doi_free_rcu - Frees a DOI definition via the RCU pointer * @entry: the entry's RCU field * * Description: * This function is designed to be used as a callback to the call_rcu() * function so that the memory allocated to the DOI definition can be released * safely. * */ static void cipso_v4_doi_free_rcu(struct rcu_head *entry) { struct cipso_v4_doi *doi_def; doi_def = container_of(entry, struct cipso_v4_doi, rcu); cipso_v4_doi_free(doi_def); } /** * cipso_v4_doi_remove - Remove an existing DOI from the CIPSO protocol engine * @doi: the DOI value * @audit_secid: the LSM secid to use in the audit message * * Description: * Removes a DOI definition from the CIPSO engine. The NetLabel routines will * be called to release their own LSM domain mappings as well as our own * domain list. Returns zero on success and negative values on failure. * */ int cipso_v4_doi_remove(u32 doi, struct netlbl_audit *audit_info) { int ret_val; struct cipso_v4_doi *doi_def; struct audit_buffer *audit_buf; spin_lock(&cipso_v4_doi_list_lock); doi_def = cipso_v4_doi_search(doi); if (doi_def == NULL) { spin_unlock(&cipso_v4_doi_list_lock); ret_val = -ENOENT; goto doi_remove_return; } if (!atomic_dec_and_test(&doi_def->refcount)) { spin_unlock(&cipso_v4_doi_list_lock); ret_val = -EBUSY; goto doi_remove_return; } list_del_rcu(&doi_def->list); spin_unlock(&cipso_v4_doi_list_lock); cipso_v4_cache_invalidate(); call_rcu(&doi_def->rcu, cipso_v4_doi_free_rcu); ret_val = 0; doi_remove_return: audit_buf = netlbl_audit_start(AUDIT_MAC_CIPSOV4_DEL, audit_info); if (audit_buf != NULL) { audit_log_format(audit_buf, " cipso_doi=%u res=%u", doi, ret_val == 0 ? 1 : 0); audit_log_end(audit_buf); } return ret_val; } /** * cipso_v4_doi_getdef - Returns a reference to a valid DOI definition * @doi: the DOI value * * Description: * Searches for a valid DOI definition and if one is found it is returned to * the caller. Otherwise NULL is returned. The caller must ensure that * rcu_read_lock() is held while accessing the returned definition and the DOI * definition reference count is decremented when the caller is done. * */ struct cipso_v4_doi *cipso_v4_doi_getdef(u32 doi) { struct cipso_v4_doi *doi_def; rcu_read_lock(); doi_def = cipso_v4_doi_search(doi); if (doi_def == NULL) goto doi_getdef_return; if (!atomic_inc_not_zero(&doi_def->refcount)) doi_def = NULL; doi_getdef_return: rcu_read_unlock(); return doi_def; } /** * cipso_v4_doi_putdef - Releases a reference for the given DOI definition * @doi_def: the DOI definition * * Description: * Releases a DOI definition reference obtained from cipso_v4_doi_getdef(). * */ void cipso_v4_doi_putdef(struct cipso_v4_doi *doi_def) { if (doi_def == NULL) return; if (!atomic_dec_and_test(&doi_def->refcount)) return; spin_lock(&cipso_v4_doi_list_lock); list_del_rcu(&doi_def->list); spin_unlock(&cipso_v4_doi_list_lock); cipso_v4_cache_invalidate(); call_rcu(&doi_def->rcu, cipso_v4_doi_free_rcu); } /** * cipso_v4_doi_walk - Iterate through the DOI definitions * @skip_cnt: skip past this number of DOI definitions, updated * @callback: callback for each DOI definition * @cb_arg: argument for the callback function * * Description: * Iterate over the DOI definition list, skipping the first @skip_cnt entries. * For each entry call @callback, if @callback returns a negative value stop * 'walking' through the list and return. Updates the value in @skip_cnt upon * return. Returns zero on success, negative values on failure. * */ int cipso_v4_doi_walk(u32 *skip_cnt, int (*callback) (struct cipso_v4_doi *doi_def, void *arg), void *cb_arg) { int ret_val = -ENOENT; u32 doi_cnt = 0; struct cipso_v4_doi *iter_doi; rcu_read_lock(); list_for_each_entry_rcu(iter_doi, &cipso_v4_doi_list, list) if (atomic_read(&iter_doi->refcount) > 0) { if (doi_cnt++ < *skip_cnt) continue; ret_val = callback(iter_doi, cb_arg); if (ret_val < 0) { doi_cnt--; goto doi_walk_return; } } doi_walk_return: rcu_read_unlock(); *skip_cnt = doi_cnt; return ret_val; } /* * Label Mapping Functions */ /** * cipso_v4_map_lvl_valid - Checks to see if the given level is understood * @doi_def: the DOI definition * @level: the level to check * * Description: * Checks the given level against the given DOI definition and returns a * negative value if the level does not have a valid mapping and a zero value * if the level is defined by the DOI. * */ static int cipso_v4_map_lvl_valid(const struct cipso_v4_doi *doi_def, u8 level) { switch (doi_def->type) { case CIPSO_V4_MAP_PASS: return 0; case CIPSO_V4_MAP_TRANS: if (doi_def->map.std->lvl.cipso[level] < CIPSO_V4_INV_LVL) return 0; break; } return -EFAULT; } /** * cipso_v4_map_lvl_hton - Perform a level mapping from the host to the network * @doi_def: the DOI definition * @host_lvl: the host MLS level * @net_lvl: the network/CIPSO MLS level * * Description: * Perform a label mapping to translate a local MLS level to the correct * CIPSO level using the given DOI definition. Returns zero on success, * negative values otherwise. * */ static int cipso_v4_map_lvl_hton(const struct cipso_v4_doi *doi_def, u32 host_lvl, u32 *net_lvl) { switch (doi_def->type) { case CIPSO_V4_MAP_PASS: *net_lvl = host_lvl; return 0; case CIPSO_V4_MAP_TRANS: if (host_lvl < doi_def->map.std->lvl.local_size && doi_def->map.std->lvl.local[host_lvl] < CIPSO_V4_INV_LVL) { *net_lvl = doi_def->map.std->lvl.local[host_lvl]; return 0; } return -EPERM; } return -EINVAL; } /** * cipso_v4_map_lvl_ntoh - Perform a level mapping from the network to the host * @doi_def: the DOI definition * @net_lvl: the network/CIPSO MLS level * @host_lvl: the host MLS level * * Description: * Perform a label mapping to translate a CIPSO level to the correct local MLS * level using the given DOI definition. Returns zero on success, negative * values otherwise. * */ static int cipso_v4_map_lvl_ntoh(const struct cipso_v4_doi *doi_def, u32 net_lvl, u32 *host_lvl) { struct cipso_v4_std_map_tbl *map_tbl; switch (doi_def->type) { case CIPSO_V4_MAP_PASS: *host_lvl = net_lvl; return 0; case CIPSO_V4_MAP_TRANS: map_tbl = doi_def->map.std; if (net_lvl < map_tbl->lvl.cipso_size && map_tbl->lvl.cipso[net_lvl] < CIPSO_V4_INV_LVL) { *host_lvl = doi_def->map.std->lvl.cipso[net_lvl]; return 0; } return -EPERM; } return -EINVAL; } /** * cipso_v4_map_cat_rbm_valid - Checks to see if the category bitmap is valid * @doi_def: the DOI definition * @bitmap: category bitmap * @bitmap_len: bitmap length in bytes * * Description: * Checks the given category bitmap against the given DOI definition and * returns a negative value if any of the categories in the bitmap do not have * a valid mapping and a zero value if all of the categories are valid. * */ static int cipso_v4_map_cat_rbm_valid(const struct cipso_v4_doi *doi_def, const unsigned char *bitmap, u32 bitmap_len) { int cat = -1; u32 bitmap_len_bits = bitmap_len * 8; u32 cipso_cat_size; u32 *cipso_array; switch (doi_def->type) { case CIPSO_V4_MAP_PASS: return 0; case CIPSO_V4_MAP_TRANS: cipso_cat_size = doi_def->map.std->cat.cipso_size; cipso_array = doi_def->map.std->cat.cipso; for (;;) { cat = cipso_v4_bitmap_walk(bitmap, bitmap_len_bits, cat + 1, 1); if (cat < 0) break; if (cat >= cipso_cat_size || cipso_array[cat] >= CIPSO_V4_INV_CAT) return -EFAULT; } if (cat == -1) return 0; break; } return -EFAULT; } /** * cipso_v4_map_cat_rbm_hton - Perform a category mapping from host to network * @doi_def: the DOI definition * @secattr: the security attributes * @net_cat: the zero'd out category bitmap in network/CIPSO format * @net_cat_len: the length of the CIPSO bitmap in bytes * * Description: * Perform a label mapping to translate a local MLS category bitmap to the * correct CIPSO bitmap using the given DOI definition. Returns the minimum * size in bytes of the network bitmap on success, negative values otherwise. * */ static int cipso_v4_map_cat_rbm_hton(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *net_cat, u32 net_cat_len) { int host_spot = -1; u32 net_spot = CIPSO_V4_INV_CAT; u32 net_spot_max = 0; u32 net_clen_bits = net_cat_len * 8; u32 host_cat_size = 0; u32 *host_cat_array = NULL; if (doi_def->type == CIPSO_V4_MAP_TRANS) { host_cat_size = doi_def->map.std->cat.local_size; host_cat_array = doi_def->map.std->cat.local; } for (;;) { host_spot = netlbl_secattr_catmap_walk(secattr->attr.mls.cat, host_spot + 1); if (host_spot < 0) break; switch (doi_def->type) { case CIPSO_V4_MAP_PASS: net_spot = host_spot; break; case CIPSO_V4_MAP_TRANS: if (host_spot >= host_cat_size) return -EPERM; net_spot = host_cat_array[host_spot]; if (net_spot >= CIPSO_V4_INV_CAT) return -EPERM; break; } if (net_spot >= net_clen_bits) return -ENOSPC; cipso_v4_bitmap_setbit(net_cat, net_spot, 1); if (net_spot > net_spot_max) net_spot_max = net_spot; } if (++net_spot_max % 8) return net_spot_max / 8 + 1; return net_spot_max / 8; } /** * cipso_v4_map_cat_rbm_ntoh - Perform a category mapping from network to host * @doi_def: the DOI definition * @net_cat: the category bitmap in network/CIPSO format * @net_cat_len: the length of the CIPSO bitmap in bytes * @secattr: the security attributes * * Description: * Perform a label mapping to translate a CIPSO bitmap to the correct local * MLS category bitmap using the given DOI definition. Returns zero on * success, negative values on failure. * */ static int cipso_v4_map_cat_rbm_ntoh(const struct cipso_v4_doi *doi_def, const unsigned char *net_cat, u32 net_cat_len, struct netlbl_lsm_secattr *secattr) { int ret_val; int net_spot = -1; u32 host_spot = CIPSO_V4_INV_CAT; u32 net_clen_bits = net_cat_len * 8; u32 net_cat_size = 0; u32 *net_cat_array = NULL; if (doi_def->type == CIPSO_V4_MAP_TRANS) { net_cat_size = doi_def->map.std->cat.cipso_size; net_cat_array = doi_def->map.std->cat.cipso; } for (;;) { net_spot = cipso_v4_bitmap_walk(net_cat, net_clen_bits, net_spot + 1, 1); if (net_spot < 0) { if (net_spot == -2) return -EFAULT; return 0; } switch (doi_def->type) { case CIPSO_V4_MAP_PASS: host_spot = net_spot; break; case CIPSO_V4_MAP_TRANS: if (net_spot >= net_cat_size) return -EPERM; host_spot = net_cat_array[net_spot]; if (host_spot >= CIPSO_V4_INV_CAT) return -EPERM; break; } ret_val = netlbl_secattr_catmap_setbit(secattr->attr.mls.cat, host_spot, GFP_ATOMIC); if (ret_val != 0) return ret_val; } return -EINVAL; } /** * cipso_v4_map_cat_enum_valid - Checks to see if the categories are valid * @doi_def: the DOI definition * @enumcat: category list * @enumcat_len: length of the category list in bytes * * Description: * Checks the given categories against the given DOI definition and returns a * negative value if any of the categories do not have a valid mapping and a * zero value if all of the categories are valid. * */ static int cipso_v4_map_cat_enum_valid(const struct cipso_v4_doi *doi_def, const unsigned char *enumcat, u32 enumcat_len) { u16 cat; int cat_prev = -1; u32 iter; if (doi_def->type != CIPSO_V4_MAP_PASS || enumcat_len & 0x01) return -EFAULT; for (iter = 0; iter < enumcat_len; iter += 2) { cat = get_unaligned_be16(&enumcat[iter]); if (cat <= cat_prev) return -EFAULT; cat_prev = cat; } return 0; } /** * cipso_v4_map_cat_enum_hton - Perform a category mapping from host to network * @doi_def: the DOI definition * @secattr: the security attributes * @net_cat: the zero'd out category list in network/CIPSO format * @net_cat_len: the length of the CIPSO category list in bytes * * Description: * Perform a label mapping to translate a local MLS category bitmap to the * correct CIPSO category list using the given DOI definition. Returns the * size in bytes of the network category bitmap on success, negative values * otherwise. * */ static int cipso_v4_map_cat_enum_hton(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *net_cat, u32 net_cat_len) { int cat = -1; u32 cat_iter = 0; for (;;) { cat = netlbl_secattr_catmap_walk(secattr->attr.mls.cat, cat + 1); if (cat < 0) break; if ((cat_iter + 2) > net_cat_len) return -ENOSPC; *((__be16 *)&net_cat[cat_iter]) = htons(cat); cat_iter += 2; } return cat_iter; } /** * cipso_v4_map_cat_enum_ntoh - Perform a category mapping from network to host * @doi_def: the DOI definition * @net_cat: the category list in network/CIPSO format * @net_cat_len: the length of the CIPSO bitmap in bytes * @secattr: the security attributes * * Description: * Perform a label mapping to translate a CIPSO category list to the correct * local MLS category bitmap using the given DOI definition. Returns zero on * success, negative values on failure. * */ static int cipso_v4_map_cat_enum_ntoh(const struct cipso_v4_doi *doi_def, const unsigned char *net_cat, u32 net_cat_len, struct netlbl_lsm_secattr *secattr) { int ret_val; u32 iter; for (iter = 0; iter < net_cat_len; iter += 2) { ret_val = netlbl_secattr_catmap_setbit(secattr->attr.mls.cat, get_unaligned_be16(&net_cat[iter]), GFP_ATOMIC); if (ret_val != 0) return ret_val; } return 0; } /** * cipso_v4_map_cat_rng_valid - Checks to see if the categories are valid * @doi_def: the DOI definition * @rngcat: category list * @rngcat_len: length of the category list in bytes * * Description: * Checks the given categories against the given DOI definition and returns a * negative value if any of the categories do not have a valid mapping and a * zero value if all of the categories are valid. * */ static int cipso_v4_map_cat_rng_valid(const struct cipso_v4_doi *doi_def, const unsigned char *rngcat, u32 rngcat_len) { u16 cat_high; u16 cat_low; u32 cat_prev = CIPSO_V4_MAX_REM_CATS + 1; u32 iter; if (doi_def->type != CIPSO_V4_MAP_PASS || rngcat_len & 0x01) return -EFAULT; for (iter = 0; iter < rngcat_len; iter += 4) { cat_high = get_unaligned_be16(&rngcat[iter]); if ((iter + 4) <= rngcat_len) cat_low = get_unaligned_be16(&rngcat[iter + 2]); else cat_low = 0; if (cat_high > cat_prev) return -EFAULT; cat_prev = cat_low; } return 0; } /** * cipso_v4_map_cat_rng_hton - Perform a category mapping from host to network * @doi_def: the DOI definition * @secattr: the security attributes * @net_cat: the zero'd out category list in network/CIPSO format * @net_cat_len: the length of the CIPSO category list in bytes * * Description: * Perform a label mapping to translate a local MLS category bitmap to the * correct CIPSO category list using the given DOI definition. Returns the * size in bytes of the network category bitmap on success, negative values * otherwise. * */ static int cipso_v4_map_cat_rng_hton(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *net_cat, u32 net_cat_len) { int iter = -1; u16 array[CIPSO_V4_TAG_RNG_CAT_MAX * 2]; u32 array_cnt = 0; u32 cat_size = 0; /* make sure we don't overflow the 'array[]' variable */ if (net_cat_len > (CIPSO_V4_OPT_LEN_MAX - CIPSO_V4_HDR_LEN - CIPSO_V4_TAG_RNG_BLEN)) return -ENOSPC; for (;;) { iter = netlbl_secattr_catmap_walk(secattr->attr.mls.cat, iter + 1); if (iter < 0) break; cat_size += (iter == 0 ? 0 : sizeof(u16)); if (cat_size > net_cat_len) return -ENOSPC; array[array_cnt++] = iter; iter = netlbl_secattr_catmap_walk_rng(secattr->attr.mls.cat, iter); if (iter < 0) return -EFAULT; cat_size += sizeof(u16); if (cat_size > net_cat_len) return -ENOSPC; array[array_cnt++] = iter; } for (iter = 0; array_cnt > 0;) { *((__be16 *)&net_cat[iter]) = htons(array[--array_cnt]); iter += 2; array_cnt--; if (array[array_cnt] != 0) { *((__be16 *)&net_cat[iter]) = htons(array[array_cnt]); iter += 2; } } return cat_size; } /** * cipso_v4_map_cat_rng_ntoh - Perform a category mapping from network to host * @doi_def: the DOI definition * @net_cat: the category list in network/CIPSO format * @net_cat_len: the length of the CIPSO bitmap in bytes * @secattr: the security attributes * * Description: * Perform a label mapping to translate a CIPSO category list to the correct * local MLS category bitmap using the given DOI definition. Returns zero on * success, negative values on failure. * */ static int cipso_v4_map_cat_rng_ntoh(const struct cipso_v4_doi *doi_def, const unsigned char *net_cat, u32 net_cat_len, struct netlbl_lsm_secattr *secattr) { int ret_val; u32 net_iter; u16 cat_low; u16 cat_high; for (net_iter = 0; net_iter < net_cat_len; net_iter += 4) { cat_high = get_unaligned_be16(&net_cat[net_iter]); if ((net_iter + 4) <= net_cat_len) cat_low = get_unaligned_be16(&net_cat[net_iter + 2]); else cat_low = 0; ret_val = netlbl_secattr_catmap_setrng(secattr->attr.mls.cat, cat_low, cat_high, GFP_ATOMIC); if (ret_val != 0) return ret_val; } return 0; } /* * Protocol Handling Functions */ /** * cipso_v4_gentag_hdr - Generate a CIPSO option header * @doi_def: the DOI definition * @len: the total tag length in bytes, not including this header * @buf: the CIPSO option buffer * * Description: * Write a CIPSO header into the beginning of @buffer. * */ static void cipso_v4_gentag_hdr(const struct cipso_v4_doi *doi_def, unsigned char *buf, u32 len) { buf[0] = IPOPT_CIPSO; buf[1] = CIPSO_V4_HDR_LEN + len; *(__be32 *)&buf[2] = htonl(doi_def->doi); } /** * cipso_v4_gentag_rbm - Generate a CIPSO restricted bitmap tag (type #1) * @doi_def: the DOI definition * @secattr: the security attributes * @buffer: the option buffer * @buffer_len: length of buffer in bytes * * Description: * Generate a CIPSO option using the restricted bitmap tag, tag type #1. The * actual buffer length may be larger than the indicated size due to * translation between host and network category bitmaps. Returns the size of * the tag on success, negative values on failure. * */ static int cipso_v4_gentag_rbm(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *buffer, u32 buffer_len) { int ret_val; u32 tag_len; u32 level; if ((secattr->flags & NETLBL_SECATTR_MLS_LVL) == 0) return -EPERM; ret_val = cipso_v4_map_lvl_hton(doi_def, secattr->attr.mls.lvl, &level); if (ret_val != 0) return ret_val; if (secattr->flags & NETLBL_SECATTR_MLS_CAT) { ret_val = cipso_v4_map_cat_rbm_hton(doi_def, secattr, &buffer[4], buffer_len - 4); if (ret_val < 0) return ret_val; /* This will send packets using the "optimized" format when * possible as specified in section 3.4.2.6 of the * CIPSO draft. */ if (cipso_v4_rbm_optfmt && ret_val > 0 && ret_val <= 10) tag_len = 14; else tag_len = 4 + ret_val; } else tag_len = 4; buffer[0] = CIPSO_V4_TAG_RBITMAP; buffer[1] = tag_len; buffer[3] = level; return tag_len; } /** * cipso_v4_parsetag_rbm - Parse a CIPSO restricted bitmap tag * @doi_def: the DOI definition * @tag: the CIPSO tag * @secattr: the security attributes * * Description: * Parse a CIPSO restricted bitmap tag (tag type #1) and return the security * attributes in @secattr. Return zero on success, negatives values on * failure. * */ static int cipso_v4_parsetag_rbm(const struct cipso_v4_doi *doi_def, const unsigned char *tag, struct netlbl_lsm_secattr *secattr) { int ret_val; u8 tag_len = tag[1]; u32 level; ret_val = cipso_v4_map_lvl_ntoh(doi_def, tag[3], &level); if (ret_val != 0) return ret_val; secattr->attr.mls.lvl = level; secattr->flags |= NETLBL_SECATTR_MLS_LVL; if (tag_len > 4) { secattr->attr.mls.cat = netlbl_secattr_catmap_alloc(GFP_ATOMIC); if (secattr->attr.mls.cat == NULL) return -ENOMEM; ret_val = cipso_v4_map_cat_rbm_ntoh(doi_def, &tag[4], tag_len - 4, secattr); if (ret_val != 0) { netlbl_secattr_catmap_free(secattr->attr.mls.cat); return ret_val; } secattr->flags |= NETLBL_SECATTR_MLS_CAT; } return 0; } /** * cipso_v4_gentag_enum - Generate a CIPSO enumerated tag (type #2) * @doi_def: the DOI definition * @secattr: the security attributes * @buffer: the option buffer * @buffer_len: length of buffer in bytes * * Description: * Generate a CIPSO option using the enumerated tag, tag type #2. Returns the * size of the tag on success, negative values on failure. * */ static int cipso_v4_gentag_enum(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *buffer, u32 buffer_len) { int ret_val; u32 tag_len; u32 level; if (!(secattr->flags & NETLBL_SECATTR_MLS_LVL)) return -EPERM; ret_val = cipso_v4_map_lvl_hton(doi_def, secattr->attr.mls.lvl, &level); if (ret_val != 0) return ret_val; if (secattr->flags & NETLBL_SECATTR_MLS_CAT) { ret_val = cipso_v4_map_cat_enum_hton(doi_def, secattr, &buffer[4], buffer_len - 4); if (ret_val < 0) return ret_val; tag_len = 4 + ret_val; } else tag_len = 4; buffer[0] = CIPSO_V4_TAG_ENUM; buffer[1] = tag_len; buffer[3] = level; return tag_len; } /** * cipso_v4_parsetag_enum - Parse a CIPSO enumerated tag * @doi_def: the DOI definition * @tag: the CIPSO tag * @secattr: the security attributes * * Description: * Parse a CIPSO enumerated tag (tag type #2) and return the security * attributes in @secattr. Return zero on success, negatives values on * failure. * */ static int cipso_v4_parsetag_enum(const struct cipso_v4_doi *doi_def, const unsigned char *tag, struct netlbl_lsm_secattr *secattr) { int ret_val; u8 tag_len = tag[1]; u32 level; ret_val = cipso_v4_map_lvl_ntoh(doi_def, tag[3], &level); if (ret_val != 0) return ret_val; secattr->attr.mls.lvl = level; secattr->flags |= NETLBL_SECATTR_MLS_LVL; if (tag_len > 4) { secattr->attr.mls.cat = netlbl_secattr_catmap_alloc(GFP_ATOMIC); if (secattr->attr.mls.cat == NULL) return -ENOMEM; ret_val = cipso_v4_map_cat_enum_ntoh(doi_def, &tag[4], tag_len - 4, secattr); if (ret_val != 0) { netlbl_secattr_catmap_free(secattr->attr.mls.cat); return ret_val; } secattr->flags |= NETLBL_SECATTR_MLS_CAT; } return 0; } /** * cipso_v4_gentag_rng - Generate a CIPSO ranged tag (type #5) * @doi_def: the DOI definition * @secattr: the security attributes * @buffer: the option buffer * @buffer_len: length of buffer in bytes * * Description: * Generate a CIPSO option using the ranged tag, tag type #5. Returns the * size of the tag on success, negative values on failure. * */ static int cipso_v4_gentag_rng(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *buffer, u32 buffer_len) { int ret_val; u32 tag_len; u32 level; if (!(secattr->flags & NETLBL_SECATTR_MLS_LVL)) return -EPERM; ret_val = cipso_v4_map_lvl_hton(doi_def, secattr->attr.mls.lvl, &level); if (ret_val != 0) return ret_val; if (secattr->flags & NETLBL_SECATTR_MLS_CAT) { ret_val = cipso_v4_map_cat_rng_hton(doi_def, secattr, &buffer[4], buffer_len - 4); if (ret_val < 0) return ret_val; tag_len = 4 + ret_val; } else tag_len = 4; buffer[0] = CIPSO_V4_TAG_RANGE; buffer[1] = tag_len; buffer[3] = level; return tag_len; } /** * cipso_v4_parsetag_rng - Parse a CIPSO ranged tag * @doi_def: the DOI definition * @tag: the CIPSO tag * @secattr: the security attributes * * Description: * Parse a CIPSO ranged tag (tag type #5) and return the security attributes * in @secattr. Return zero on success, negatives values on failure. * */ static int cipso_v4_parsetag_rng(const struct cipso_v4_doi *doi_def, const unsigned char *tag, struct netlbl_lsm_secattr *secattr) { int ret_val; u8 tag_len = tag[1]; u32 level; ret_val = cipso_v4_map_lvl_ntoh(doi_def, tag[3], &level); if (ret_val != 0) return ret_val; secattr->attr.mls.lvl = level; secattr->flags |= NETLBL_SECATTR_MLS_LVL; if (tag_len > 4) { secattr->attr.mls.cat = netlbl_secattr_catmap_alloc(GFP_ATOMIC); if (secattr->attr.mls.cat == NULL) return -ENOMEM; ret_val = cipso_v4_map_cat_rng_ntoh(doi_def, &tag[4], tag_len - 4, secattr); if (ret_val != 0) { netlbl_secattr_catmap_free(secattr->attr.mls.cat); return ret_val; } secattr->flags |= NETLBL_SECATTR_MLS_CAT; } return 0; } /** * cipso_v4_gentag_loc - Generate a CIPSO local tag (non-standard) * @doi_def: the DOI definition * @secattr: the security attributes * @buffer: the option buffer * @buffer_len: length of buffer in bytes * * Description: * Generate a CIPSO option using the local tag. Returns the size of the tag * on success, negative values on failure. * */ static int cipso_v4_gentag_loc(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *buffer, u32 buffer_len) { if (!(secattr->flags & NETLBL_SECATTR_SECID)) return -EPERM; buffer[0] = CIPSO_V4_TAG_LOCAL; buffer[1] = CIPSO_V4_TAG_LOC_BLEN; *(u32 *)&buffer[2] = secattr->attr.secid; return CIPSO_V4_TAG_LOC_BLEN; } /** * cipso_v4_parsetag_loc - Parse a CIPSO local tag * @doi_def: the DOI definition * @tag: the CIPSO tag * @secattr: the security attributes * * Description: * Parse a CIPSO local tag and return the security attributes in @secattr. * Return zero on success, negatives values on failure. * */ static int cipso_v4_parsetag_loc(const struct cipso_v4_doi *doi_def, const unsigned char *tag, struct netlbl_lsm_secattr *secattr) { secattr->attr.secid = *(u32 *)&tag[2]; secattr->flags |= NETLBL_SECATTR_SECID; return 0; } /** * cipso_v4_validate - Validate a CIPSO option * @option: the start of the option, on error it is set to point to the error * * Description: * This routine is called to validate a CIPSO option, it checks all of the * fields to ensure that they are at least valid, see the draft snippet below * for details. If the option is valid then a zero value is returned and * the value of @option is unchanged. If the option is invalid then a * non-zero value is returned and @option is adjusted to point to the * offending portion of the option. From the IETF draft ... * * "If any field within the CIPSO options, such as the DOI identifier, is not * recognized the IP datagram is discarded and an ICMP 'parameter problem' * (type 12) is generated and returned. The ICMP code field is set to 'bad * parameter' (code 0) and the pointer is set to the start of the CIPSO field * that is unrecognized." * */ int cipso_v4_validate(const struct sk_buff *skb, unsigned char **option) { unsigned char *opt = *option; unsigned char *tag; unsigned char opt_iter; unsigned char err_offset = 0; u8 opt_len; u8 tag_len; struct cipso_v4_doi *doi_def = NULL; u32 tag_iter; /* caller already checks for length values that are too large */ opt_len = opt[1]; if (opt_len < 8) { err_offset = 1; goto validate_return; } rcu_read_lock(); doi_def = cipso_v4_doi_search(get_unaligned_be32(&opt[2])); if (doi_def == NULL) { err_offset = 2; goto validate_return_locked; } opt_iter = CIPSO_V4_HDR_LEN; tag = opt + opt_iter; while (opt_iter < opt_len) { for (tag_iter = 0; doi_def->tags[tag_iter] != tag[0];) if (doi_def->tags[tag_iter] == CIPSO_V4_TAG_INVALID || ++tag_iter == CIPSO_V4_TAG_MAXCNT) { err_offset = opt_iter; goto validate_return_locked; } tag_len = tag[1]; if (tag_len > (opt_len - opt_iter)) { err_offset = opt_iter + 1; goto validate_return_locked; } switch (tag[0]) { case CIPSO_V4_TAG_RBITMAP: if (tag_len < CIPSO_V4_TAG_RBM_BLEN) { err_offset = opt_iter + 1; goto validate_return_locked; } /* We are already going to do all the verification * necessary at the socket layer so from our point of * view it is safe to turn these checks off (and less * work), however, the CIPSO draft says we should do * all the CIPSO validations here but it doesn't * really specify _exactly_ what we need to validate * ... so, just make it a sysctl tunable. */ if (cipso_v4_rbm_strictvalid) { if (cipso_v4_map_lvl_valid(doi_def, tag[3]) < 0) { err_offset = opt_iter + 3; goto validate_return_locked; } if (tag_len > CIPSO_V4_TAG_RBM_BLEN && cipso_v4_map_cat_rbm_valid(doi_def, &tag[4], tag_len - 4) < 0) { err_offset = opt_iter + 4; goto validate_return_locked; } } break; case CIPSO_V4_TAG_ENUM: if (tag_len < CIPSO_V4_TAG_ENUM_BLEN) { err_offset = opt_iter + 1; goto validate_return_locked; } if (cipso_v4_map_lvl_valid(doi_def, tag[3]) < 0) { err_offset = opt_iter + 3; goto validate_return_locked; } if (tag_len > CIPSO_V4_TAG_ENUM_BLEN && cipso_v4_map_cat_enum_valid(doi_def, &tag[4], tag_len - 4) < 0) { err_offset = opt_iter + 4; goto validate_return_locked; } break; case CIPSO_V4_TAG_RANGE: if (tag_len < CIPSO_V4_TAG_RNG_BLEN) { err_offset = opt_iter + 1; goto validate_return_locked; } if (cipso_v4_map_lvl_valid(doi_def, tag[3]) < 0) { err_offset = opt_iter + 3; goto validate_return_locked; } if (tag_len > CIPSO_V4_TAG_RNG_BLEN && cipso_v4_map_cat_rng_valid(doi_def, &tag[4], tag_len - 4) < 0) { err_offset = opt_iter + 4; goto validate_return_locked; } break; case CIPSO_V4_TAG_LOCAL: /* This is a non-standard tag that we only allow for * local connections, so if the incoming interface is * not the loopback device drop the packet. */ if (!(skb->dev->flags & IFF_LOOPBACK)) { err_offset = opt_iter; goto validate_return_locked; } if (tag_len != CIPSO_V4_TAG_LOC_BLEN) { err_offset = opt_iter + 1; goto validate_return_locked; } break; default: err_offset = opt_iter; goto validate_return_locked; } tag += tag_len; opt_iter += tag_len; } validate_return_locked: rcu_read_unlock(); validate_return: *option = opt + err_offset; return err_offset; } /** * cipso_v4_error - Send the correct response for a bad packet * @skb: the packet * @error: the error code * @gateway: CIPSO gateway flag * * Description: * Based on the error code given in @error, send an ICMP error message back to * the originating host. From the IETF draft ... * * "If the contents of the CIPSO [option] are valid but the security label is * outside of the configured host or port label range, the datagram is * discarded and an ICMP 'destination unreachable' (type 3) is generated and * returned. The code field of the ICMP is set to 'communication with * destination network administratively prohibited' (code 9) or to * 'communication with destination host administratively prohibited' * (code 10). The value of the code is dependent on whether the originator * of the ICMP message is acting as a CIPSO host or a CIPSO gateway. The * recipient of the ICMP message MUST be able to handle either value. The * same procedure is performed if a CIPSO [option] can not be added to an * IP packet because it is too large to fit in the IP options area." * * "If the error is triggered by receipt of an ICMP message, the message is * discarded and no response is permitted (consistent with general ICMP * processing rules)." * */ void cipso_v4_error(struct sk_buff *skb, int error, u32 gateway) { if (ip_hdr(skb)->protocol == IPPROTO_ICMP || error != -EACCES) return; if (gateway) icmp_send(skb, ICMP_DEST_UNREACH, ICMP_NET_ANO, 0); else icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_ANO, 0); } /** * cipso_v4_genopt - Generate a CIPSO option * @buf: the option buffer * @buf_len: the size of opt_buf * @doi_def: the CIPSO DOI to use * @secattr: the security attributes * * Description: * Generate a CIPSO option using the DOI definition and security attributes * passed to the function. Returns the length of the option on success and * negative values on failure. * */ static int cipso_v4_genopt(unsigned char *buf, u32 buf_len, const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr) { int ret_val; u32 iter; if (buf_len <= CIPSO_V4_HDR_LEN) return -ENOSPC; /* XXX - This code assumes only one tag per CIPSO option which isn't * really a good assumption to make but since we only support the MAC * tags right now it is a safe assumption. */ iter = 0; do { memset(buf, 0, buf_len); switch (doi_def->tags[iter]) { case CIPSO_V4_TAG_RBITMAP: ret_val = cipso_v4_gentag_rbm(doi_def, secattr, &buf[CIPSO_V4_HDR_LEN], buf_len - CIPSO_V4_HDR_LEN); break; case CIPSO_V4_TAG_ENUM: ret_val = cipso_v4_gentag_enum(doi_def, secattr, &buf[CIPSO_V4_HDR_LEN], buf_len - CIPSO_V4_HDR_LEN); break; case CIPSO_V4_TAG_RANGE: ret_val = cipso_v4_gentag_rng(doi_def, secattr, &buf[CIPSO_V4_HDR_LEN], buf_len - CIPSO_V4_HDR_LEN); break; case CIPSO_V4_TAG_LOCAL: ret_val = cipso_v4_gentag_loc(doi_def, secattr, &buf[CIPSO_V4_HDR_LEN], buf_len - CIPSO_V4_HDR_LEN); break; default: return -EPERM; } iter++; } while (ret_val < 0 && iter < CIPSO_V4_TAG_MAXCNT && doi_def->tags[iter] != CIPSO_V4_TAG_INVALID); if (ret_val < 0) return ret_val; cipso_v4_gentag_hdr(doi_def, buf, ret_val); return CIPSO_V4_HDR_LEN + ret_val; } static void opt_kfree_rcu(struct rcu_head *head) { kfree(container_of(head, struct ip_options_rcu, rcu)); } /** * cipso_v4_sock_setattr - Add a CIPSO option to a socket * @sk: the socket * @doi_def: the CIPSO DOI to use * @secattr: the specific security attributes of the socket * * Description: * Set the CIPSO option on the given socket using the DOI definition and * security attributes passed to the function. This function requires * exclusive access to @sk, which means it either needs to be in the * process of being created or locked. Returns zero on success and negative * values on failure. * */ int cipso_v4_sock_setattr(struct sock *sk, const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr) { int ret_val = -EPERM; unsigned char *buf = NULL; u32 buf_len; u32 opt_len; struct ip_options_rcu *old, *opt = NULL; struct inet_sock *sk_inet; struct inet_connection_sock *sk_conn; /* In the case of sock_create_lite(), the sock->sk field is not * defined yet but it is not a problem as the only users of these * "lite" PF_INET sockets are functions which do an accept() call * afterwards so we will label the socket as part of the accept(). */ if (sk == NULL) return 0; /* We allocate the maximum CIPSO option size here so we are probably * being a little wasteful, but it makes our life _much_ easier later * on and after all we are only talking about 40 bytes. */ buf_len = CIPSO_V4_OPT_LEN_MAX; buf = kmalloc(buf_len, GFP_ATOMIC); if (buf == NULL) { ret_val = -ENOMEM; goto socket_setattr_failure; } ret_val = cipso_v4_genopt(buf, buf_len, doi_def, secattr); if (ret_val < 0) goto socket_setattr_failure; buf_len = ret_val; /* We can't use ip_options_get() directly because it makes a call to * ip_options_get_alloc() which allocates memory with GFP_KERNEL and * we won't always have CAP_NET_RAW even though we _always_ want to * set the IPOPT_CIPSO option. */ opt_len = (buf_len + 3) & ~3; opt = kzalloc(sizeof(*opt) + opt_len, GFP_ATOMIC); if (opt == NULL) { ret_val = -ENOMEM; goto socket_setattr_failure; } memcpy(opt->opt.__data, buf, buf_len); opt->opt.optlen = opt_len; opt->opt.cipso = sizeof(struct iphdr); kfree(buf); buf = NULL; sk_inet = inet_sk(sk); old = rcu_dereference_protected(sk_inet->inet_opt, sock_owned_by_user(sk)); if (sk_inet->is_icsk) { sk_conn = inet_csk(sk); if (old) sk_conn->icsk_ext_hdr_len -= old->opt.optlen; sk_conn->icsk_ext_hdr_len += opt->opt.optlen; sk_conn->icsk_sync_mss(sk, sk_conn->icsk_pmtu_cookie); } rcu_assign_pointer(sk_inet->inet_opt, opt); if (old) call_rcu(&old->rcu, opt_kfree_rcu); return 0; socket_setattr_failure: kfree(buf); kfree(opt); return ret_val; } /** * cipso_v4_req_setattr - Add a CIPSO option to a connection request socket * @req: the connection request socket * @doi_def: the CIPSO DOI to use * @secattr: the specific security attributes of the socket * * Description: * Set the CIPSO option on the given socket using the DOI definition and * security attributes passed to the function. Returns zero on success and * negative values on failure. * */ int cipso_v4_req_setattr(struct request_sock *req, const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr) { int ret_val = -EPERM; unsigned char *buf = NULL; u32 buf_len; u32 opt_len; struct ip_options_rcu *opt = NULL; struct inet_request_sock *req_inet; /* We allocate the maximum CIPSO option size here so we are probably * being a little wasteful, but it makes our life _much_ easier later * on and after all we are only talking about 40 bytes. */ buf_len = CIPSO_V4_OPT_LEN_MAX; buf = kmalloc(buf_len, GFP_ATOMIC); if (buf == NULL) { ret_val = -ENOMEM; goto req_setattr_failure; } ret_val = cipso_v4_genopt(buf, buf_len, doi_def, secattr); if (ret_val < 0) goto req_setattr_failure; buf_len = ret_val; /* We can't use ip_options_get() directly because it makes a call to * ip_options_get_alloc() which allocates memory with GFP_KERNEL and * we won't always have CAP_NET_RAW even though we _always_ want to * set the IPOPT_CIPSO option. */ opt_len = (buf_len + 3) & ~3; opt = kzalloc(sizeof(*opt) + opt_len, GFP_ATOMIC); if (opt == NULL) { ret_val = -ENOMEM; goto req_setattr_failure; } memcpy(opt->opt.__data, buf, buf_len); opt->opt.optlen = opt_len; opt->opt.cipso = sizeof(struct iphdr); kfree(buf); buf = NULL; req_inet = inet_rsk(req); opt = xchg(&req_inet->opt, opt); if (opt) call_rcu(&opt->rcu, opt_kfree_rcu); return 0; req_setattr_failure: kfree(buf); kfree(opt); return ret_val; } /** * cipso_v4_delopt - Delete the CIPSO option from a set of IP options * @opt_ptr: IP option pointer * * Description: * Deletes the CIPSO IP option from a set of IP options and makes the necessary * adjustments to the IP option structure. Returns zero on success, negative * values on failure. * */ static int cipso_v4_delopt(struct ip_options_rcu **opt_ptr) { int hdr_delta = 0; struct ip_options_rcu *opt = *opt_ptr; if (opt->opt.srr || opt->opt.rr || opt->opt.ts || opt->opt.router_alert) { u8 cipso_len; u8 cipso_off; unsigned char *cipso_ptr; int iter; int optlen_new; cipso_off = opt->opt.cipso - sizeof(struct iphdr); cipso_ptr = &opt->opt.__data[cipso_off]; cipso_len = cipso_ptr[1]; if (opt->opt.srr > opt->opt.cipso) opt->opt.srr -= cipso_len; if (opt->opt.rr > opt->opt.cipso) opt->opt.rr -= cipso_len; if (opt->opt.ts > opt->opt.cipso) opt->opt.ts -= cipso_len; if (opt->opt.router_alert > opt->opt.cipso) opt->opt.router_alert -= cipso_len; opt->opt.cipso = 0; memmove(cipso_ptr, cipso_ptr + cipso_len, opt->opt.optlen - cipso_off - cipso_len); /* determining the new total option length is tricky because of * the padding necessary, the only thing i can think to do at * this point is walk the options one-by-one, skipping the * padding at the end to determine the actual option size and * from there we can determine the new total option length */ iter = 0; optlen_new = 0; while (iter < opt->opt.optlen) if (opt->opt.__data[iter] != IPOPT_NOP) { iter += opt->opt.__data[iter + 1]; optlen_new = iter; } else iter++; hdr_delta = opt->opt.optlen; opt->opt.optlen = (optlen_new + 3) & ~3; hdr_delta -= opt->opt.optlen; } else { /* only the cipso option was present on the socket so we can * remove the entire option struct */ *opt_ptr = NULL; hdr_delta = opt->opt.optlen; call_rcu(&opt->rcu, opt_kfree_rcu); } return hdr_delta; } /** * cipso_v4_sock_delattr - Delete the CIPSO option from a socket * @sk: the socket * * Description: * Removes the CIPSO option from a socket, if present. * */ void cipso_v4_sock_delattr(struct sock *sk) { int hdr_delta; struct ip_options_rcu *opt; struct inet_sock *sk_inet; sk_inet = inet_sk(sk); opt = rcu_dereference_protected(sk_inet->inet_opt, 1); if (opt == NULL || opt->opt.cipso == 0) return; hdr_delta = cipso_v4_delopt(&sk_inet->inet_opt); if (sk_inet->is_icsk && hdr_delta > 0) { struct inet_connection_sock *sk_conn = inet_csk(sk); sk_conn->icsk_ext_hdr_len -= hdr_delta; sk_conn->icsk_sync_mss(sk, sk_conn->icsk_pmtu_cookie); } } /** * cipso_v4_req_delattr - Delete the CIPSO option from a request socket * @reg: the request socket * * Description: * Removes the CIPSO option from a request socket, if present. * */ void cipso_v4_req_delattr(struct request_sock *req) { struct ip_options_rcu *opt; struct inet_request_sock *req_inet; req_inet = inet_rsk(req); opt = req_inet->opt; if (opt == NULL || opt->opt.cipso == 0) return; cipso_v4_delopt(&req_inet->opt); } /** * cipso_v4_getattr - Helper function for the cipso_v4_*_getattr functions * @cipso: the CIPSO v4 option * @secattr: the security attributes * * Description: * Inspect @cipso and return the security attributes in @secattr. Returns zero * on success and negative values on failure. * */ static int cipso_v4_getattr(const unsigned char *cipso, struct netlbl_lsm_secattr *secattr) { int ret_val = -ENOMSG; u32 doi; struct cipso_v4_doi *doi_def; if (cipso_v4_cache_check(cipso, cipso[1], secattr) == 0) return 0; doi = get_unaligned_be32(&cipso[2]); rcu_read_lock(); doi_def = cipso_v4_doi_search(doi); if (doi_def == NULL) goto getattr_return; /* XXX - This code assumes only one tag per CIPSO option which isn't * really a good assumption to make but since we only support the MAC * tags right now it is a safe assumption. */ switch (cipso[6]) { case CIPSO_V4_TAG_RBITMAP: ret_val = cipso_v4_parsetag_rbm(doi_def, &cipso[6], secattr); break; case CIPSO_V4_TAG_ENUM: ret_val = cipso_v4_parsetag_enum(doi_def, &cipso[6], secattr); break; case CIPSO_V4_TAG_RANGE: ret_val = cipso_v4_parsetag_rng(doi_def, &cipso[6], secattr); break; case CIPSO_V4_TAG_LOCAL: ret_val = cipso_v4_parsetag_loc(doi_def, &cipso[6], secattr); break; } if (ret_val == 0) secattr->type = NETLBL_NLTYPE_CIPSOV4; getattr_return: rcu_read_unlock(); return ret_val; } /** * cipso_v4_sock_getattr - Get the security attributes from a sock * @sk: the sock * @secattr: the security attributes * * Description: * Query @sk to see if there is a CIPSO option attached to the sock and if * there is return the CIPSO security attributes in @secattr. This function * requires that @sk be locked, or privately held, but it does not do any * locking itself. Returns zero on success and negative values on failure. * */ int cipso_v4_sock_getattr(struct sock *sk, struct netlbl_lsm_secattr *secattr) { struct ip_options_rcu *opt; int res = -ENOMSG; rcu_read_lock(); opt = rcu_dereference(inet_sk(sk)->inet_opt); if (opt && opt->opt.cipso) res = cipso_v4_getattr(opt->opt.__data + opt->opt.cipso - sizeof(struct iphdr), secattr); rcu_read_unlock(); return res; } /** * cipso_v4_skbuff_setattr - Set the CIPSO option on a packet * @skb: the packet * @secattr: the security attributes * * Description: * Set the CIPSO option on the given packet based on the security attributes. * Returns a pointer to the IP header on success and NULL on failure. * */ int cipso_v4_skbuff_setattr(struct sk_buff *skb, const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr) { int ret_val; struct iphdr *iph; struct ip_options *opt = &IPCB(skb)->opt; unsigned char buf[CIPSO_V4_OPT_LEN_MAX]; u32 buf_len = CIPSO_V4_OPT_LEN_MAX; u32 opt_len; int len_delta; ret_val = cipso_v4_genopt(buf, buf_len, doi_def, secattr); if (ret_val < 0) return ret_val; buf_len = ret_val; opt_len = (buf_len + 3) & ~3; /* we overwrite any existing options to ensure that we have enough * room for the CIPSO option, the reason is that we _need_ to guarantee * that the security label is applied to the packet - we do the same * thing when using the socket options and it hasn't caused a problem, * if we need to we can always revisit this choice later */ len_delta = opt_len - opt->optlen; /* if we don't ensure enough headroom we could panic on the skb_push() * call below so make sure we have enough, we are also "mangling" the * packet so we should probably do a copy-on-write call anyway */ ret_val = skb_cow(skb, skb_headroom(skb) + len_delta); if (ret_val < 0) return ret_val; if (len_delta > 0) { /* we assume that the header + opt->optlen have already been * "pushed" in ip_options_build() or similar */ iph = ip_hdr(skb); skb_push(skb, len_delta); memmove((char *)iph - len_delta, iph, iph->ihl << 2); skb_reset_network_header(skb); iph = ip_hdr(skb); } else if (len_delta < 0) { iph = ip_hdr(skb); memset(iph + 1, IPOPT_NOP, opt->optlen); } else iph = ip_hdr(skb); if (opt->optlen > 0) memset(opt, 0, sizeof(*opt)); opt->optlen = opt_len; opt->cipso = sizeof(struct iphdr); opt->is_changed = 1; /* we have to do the following because we are being called from a * netfilter hook which means the packet already has had the header * fields populated and the checksum calculated - yes this means we * are doing more work than needed but we do it to keep the core * stack clean and tidy */ memcpy(iph + 1, buf, buf_len); if (opt_len > buf_len) memset((char *)(iph + 1) + buf_len, 0, opt_len - buf_len); if (len_delta != 0) { iph->ihl = 5 + (opt_len >> 2); iph->tot_len = htons(skb->len); } ip_send_check(iph); return 0; } /** * cipso_v4_skbuff_delattr - Delete any CIPSO options from a packet * @skb: the packet * * Description: * Removes any and all CIPSO options from the given packet. Returns zero on * success, negative values on failure. * */ int cipso_v4_skbuff_delattr(struct sk_buff *skb) { int ret_val; struct iphdr *iph; struct ip_options *opt = &IPCB(skb)->opt; unsigned char *cipso_ptr; if (opt->cipso == 0) return 0; /* since we are changing the packet we should make a copy */ ret_val = skb_cow(skb, skb_headroom(skb)); if (ret_val < 0) return ret_val; /* the easiest thing to do is just replace the cipso option with noop * options since we don't change the size of the packet, although we * still need to recalculate the checksum */ iph = ip_hdr(skb); cipso_ptr = (unsigned char *)iph + opt->cipso; memset(cipso_ptr, IPOPT_NOOP, cipso_ptr[1]); opt->cipso = 0; opt->is_changed = 1; ip_send_check(iph); return 0; } /** * cipso_v4_skbuff_getattr - Get the security attributes from the CIPSO option * @skb: the packet * @secattr: the security attributes * * Description: * Parse the given packet's CIPSO option and return the security attributes. * Returns zero on success and negative values on failure. * */ int cipso_v4_skbuff_getattr(const struct sk_buff *skb, struct netlbl_lsm_secattr *secattr) { return cipso_v4_getattr(CIPSO_V4_OPTPTR(skb), secattr); } /* * Setup Functions */ /** * cipso_v4_init - Initialize the CIPSO module * * Description: * Initialize the CIPSO module and prepare it for use. Returns zero on success * and negative values on failure. * */ static int __init cipso_v4_init(void) { int ret_val; ret_val = cipso_v4_cache_init(); if (ret_val != 0) panic("Failed to initialize the CIPSO/IPv4 cache (%d)\n", ret_val); return 0; } subsys_initcall(cipso_v4_init);
int cipso_v4_req_setattr(struct request_sock *req, const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr) { int ret_val = -EPERM; unsigned char *buf = NULL; u32 buf_len; u32 opt_len; struct ip_options *opt = NULL; struct inet_request_sock *req_inet; /* We allocate the maximum CIPSO option size here so we are probably * being a little wasteful, but it makes our life _much_ easier later * on and after all we are only talking about 40 bytes. */ buf_len = CIPSO_V4_OPT_LEN_MAX; buf = kmalloc(buf_len, GFP_ATOMIC); if (buf == NULL) { ret_val = -ENOMEM; goto req_setattr_failure; } ret_val = cipso_v4_genopt(buf, buf_len, doi_def, secattr); if (ret_val < 0) goto req_setattr_failure; buf_len = ret_val; /* We can't use ip_options_get() directly because it makes a call to * ip_options_get_alloc() which allocates memory with GFP_KERNEL and * we won't always have CAP_NET_RAW even though we _always_ want to * set the IPOPT_CIPSO option. */ opt_len = (buf_len + 3) & ~3; opt = kzalloc(sizeof(*opt) + opt_len, GFP_ATOMIC); if (opt == NULL) { ret_val = -ENOMEM; goto req_setattr_failure; } memcpy(opt->__data, buf, buf_len); opt->optlen = opt_len; opt->cipso = sizeof(struct iphdr); kfree(buf); buf = NULL; req_inet = inet_rsk(req); opt = xchg(&req_inet->opt, opt); kfree(opt); return 0; req_setattr_failure: kfree(buf); kfree(opt); return ret_val; }
int cipso_v4_req_setattr(struct request_sock *req, const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr) { int ret_val = -EPERM; unsigned char *buf = NULL; u32 buf_len; u32 opt_len; struct ip_options_rcu *opt = NULL; struct inet_request_sock *req_inet; /* We allocate the maximum CIPSO option size here so we are probably * being a little wasteful, but it makes our life _much_ easier later * on and after all we are only talking about 40 bytes. */ buf_len = CIPSO_V4_OPT_LEN_MAX; buf = kmalloc(buf_len, GFP_ATOMIC); if (buf == NULL) { ret_val = -ENOMEM; goto req_setattr_failure; } ret_val = cipso_v4_genopt(buf, buf_len, doi_def, secattr); if (ret_val < 0) goto req_setattr_failure; buf_len = ret_val; /* We can't use ip_options_get() directly because it makes a call to * ip_options_get_alloc() which allocates memory with GFP_KERNEL and * we won't always have CAP_NET_RAW even though we _always_ want to * set the IPOPT_CIPSO option. */ opt_len = (buf_len + 3) & ~3; opt = kzalloc(sizeof(*opt) + opt_len, GFP_ATOMIC); if (opt == NULL) { ret_val = -ENOMEM; goto req_setattr_failure; } memcpy(opt->opt.__data, buf, buf_len); opt->opt.optlen = opt_len; opt->opt.cipso = sizeof(struct iphdr); kfree(buf); buf = NULL; req_inet = inet_rsk(req); opt = xchg(&req_inet->opt, opt); if (opt) call_rcu(&opt->rcu, opt_kfree_rcu); return 0; req_setattr_failure: kfree(buf); kfree(opt); return ret_val; }
{'added': [(1860, 'static void opt_kfree_rcu(struct rcu_head *head)'), (1861, '{'), (1862, '\tkfree(container_of(head, struct ip_options_rcu, rcu));'), (1863, '}'), (1864, ''), (1887, '\tstruct ip_options_rcu *old, *opt = NULL;'), (1923, '\tmemcpy(opt->opt.__data, buf, buf_len);'), (1924, '\topt->opt.optlen = opt_len;'), (1925, '\topt->opt.cipso = sizeof(struct iphdr);'), (1930, ''), (1931, '\told = rcu_dereference_protected(sk_inet->inet_opt, sock_owned_by_user(sk));'), (1934, '\t\tif (old)'), (1935, '\t\t\tsk_conn->icsk_ext_hdr_len -= old->opt.optlen;'), (1936, '\t\tsk_conn->icsk_ext_hdr_len += opt->opt.optlen;'), (1939, '\trcu_assign_pointer(sk_inet->inet_opt, opt);'), (1940, '\tif (old)'), (1941, '\t\tcall_rcu(&old->rcu, opt_kfree_rcu);'), (1971, '\tstruct ip_options_rcu *opt = NULL;'), (1999, '\tmemcpy(opt->opt.__data, buf, buf_len);'), (2000, '\topt->opt.optlen = opt_len;'), (2001, '\topt->opt.cipso = sizeof(struct iphdr);'), (2007, '\tif (opt)'), (2008, '\t\tcall_rcu(&opt->rcu, opt_kfree_rcu);'), (2028, 'static int cipso_v4_delopt(struct ip_options_rcu **opt_ptr)'), (2031, '\tstruct ip_options_rcu *opt = *opt_ptr;'), (2033, '\tif (opt->opt.srr || opt->opt.rr || opt->opt.ts || opt->opt.router_alert) {'), (2040, '\t\tcipso_off = opt->opt.cipso - sizeof(struct iphdr);'), (2041, '\t\tcipso_ptr = &opt->opt.__data[cipso_off];'), (2044, '\t\tif (opt->opt.srr > opt->opt.cipso)'), (2045, '\t\t\topt->opt.srr -= cipso_len;'), (2046, '\t\tif (opt->opt.rr > opt->opt.cipso)'), (2047, '\t\t\topt->opt.rr -= cipso_len;'), (2048, '\t\tif (opt->opt.ts > opt->opt.cipso)'), (2049, '\t\t\topt->opt.ts -= cipso_len;'), (2050, '\t\tif (opt->opt.router_alert > opt->opt.cipso)'), (2051, '\t\t\topt->opt.router_alert -= cipso_len;'), (2052, '\t\topt->opt.cipso = 0;'), (2055, '\t\t\topt->opt.optlen - cipso_off - cipso_len);'), (2064, '\t\twhile (iter < opt->opt.optlen)'), (2065, '\t\t\tif (opt->opt.__data[iter] != IPOPT_NOP) {'), (2066, '\t\t\t\titer += opt->opt.__data[iter + 1];'), (2070, '\t\thdr_delta = opt->opt.optlen;'), (2071, '\t\topt->opt.optlen = (optlen_new + 3) & ~3;'), (2072, '\t\thdr_delta -= opt->opt.optlen;'), (2077, '\t\thdr_delta = opt->opt.optlen;'), (2078, '\t\tcall_rcu(&opt->rcu, opt_kfree_rcu);'), (2095, '\tstruct ip_options_rcu *opt;'), (2099, '\topt = rcu_dereference_protected(sk_inet->inet_opt, 1);'), (2100, '\tif (opt == NULL || opt->opt.cipso == 0)'), (2103, '\thdr_delta = cipso_v4_delopt(&sk_inet->inet_opt);'), (2121, '\tstruct ip_options_rcu *opt;'), (2126, '\tif (opt == NULL || opt->opt.cipso == 0)'), (2196, '\tstruct ip_options_rcu *opt;'), (2197, '\tint res = -ENOMSG;'), (2199, '\trcu_read_lock();'), (2200, '\topt = rcu_dereference(inet_sk(sk)->inet_opt);'), (2201, '\tif (opt && opt->opt.cipso)'), (2202, '\t\tres = cipso_v4_getattr(opt->opt.__data +'), (2203, '\t\t\t\t\t\topt->opt.cipso -'), (2204, '\t\t\t\t\t\tsizeof(struct iphdr),'), (2205, '\t\t\t\t secattr);'), (2206, '\trcu_read_unlock();'), (2207, '\treturn res;')], 'deleted': [(1882, '\tstruct ip_options *opt = NULL;'), (1918, '\tmemcpy(opt->__data, buf, buf_len);'), (1919, '\topt->optlen = opt_len;'), (1920, '\topt->cipso = sizeof(struct iphdr);'), (1927, '\t\tif (sk_inet->opt)'), (1928, '\t\t\tsk_conn->icsk_ext_hdr_len -= sk_inet->opt->optlen;'), (1929, '\t\tsk_conn->icsk_ext_hdr_len += opt->optlen;'), (1932, '\topt = xchg(&sk_inet->opt, opt);'), (1933, '\tkfree(opt);'), (1963, '\tstruct ip_options *opt = NULL;'), (1991, '\tmemcpy(opt->__data, buf, buf_len);'), (1992, '\topt->optlen = opt_len;'), (1993, '\topt->cipso = sizeof(struct iphdr);'), (1999, '\tkfree(opt);'), (2019, 'static int cipso_v4_delopt(struct ip_options **opt_ptr)'), (2022, '\tstruct ip_options *opt = *opt_ptr;'), (2024, '\tif (opt->srr || opt->rr || opt->ts || opt->router_alert) {'), (2031, '\t\tcipso_off = opt->cipso - sizeof(struct iphdr);'), (2032, '\t\tcipso_ptr = &opt->__data[cipso_off];'), (2035, '\t\tif (opt->srr > opt->cipso)'), (2036, '\t\t\topt->srr -= cipso_len;'), (2037, '\t\tif (opt->rr > opt->cipso)'), (2038, '\t\t\topt->rr -= cipso_len;'), (2039, '\t\tif (opt->ts > opt->cipso)'), (2040, '\t\t\topt->ts -= cipso_len;'), (2041, '\t\tif (opt->router_alert > opt->cipso)'), (2042, '\t\t\topt->router_alert -= cipso_len;'), (2043, '\t\topt->cipso = 0;'), (2046, '\t\t\topt->optlen - cipso_off - cipso_len);'), (2055, '\t\twhile (iter < opt->optlen)'), (2056, '\t\t\tif (opt->__data[iter] != IPOPT_NOP) {'), (2057, '\t\t\t\titer += opt->__data[iter + 1];'), (2061, '\t\thdr_delta = opt->optlen;'), (2062, '\t\topt->optlen = (optlen_new + 3) & ~3;'), (2063, '\t\thdr_delta -= opt->optlen;'), (2068, '\t\thdr_delta = opt->optlen;'), (2069, '\t\tkfree(opt);'), (2086, '\tstruct ip_options *opt;'), (2090, '\topt = sk_inet->opt;'), (2091, '\tif (opt == NULL || opt->cipso == 0)'), (2094, '\thdr_delta = cipso_v4_delopt(&sk_inet->opt);'), (2112, '\tstruct ip_options *opt;'), (2117, '\tif (opt == NULL || opt->cipso == 0)'), (2187, '\tstruct ip_options *opt;'), (2189, '\topt = inet_sk(sk)->opt;'), (2190, '\tif (opt == NULL || opt->cipso == 0)'), (2191, '\t\treturn -ENOMSG;'), (2192, ''), (2193, '\treturn cipso_v4_getattr(opt->__data + opt->cipso - sizeof(struct iphdr),'), (2194, '\t\t\t\tsecattr);')]}
63
50
1,360
7,485
https://github.com/torvalds/linux
CVE-2012-3552
['CWE-362']
cipso_ipv4.c
cipso_v4_sock_delattr
/* * CIPSO - Commercial IP Security Option * * This is an implementation of the CIPSO 2.2 protocol as specified in * draft-ietf-cipso-ipsecurity-01.txt with additional tag types as found in * FIPS-188. While CIPSO never became a full IETF RFC standard many vendors * have chosen to adopt the protocol and over the years it has become a * de-facto standard for labeled networking. * * The CIPSO draft specification can be found in the kernel's Documentation * directory as well as the following URL: * http://tools.ietf.org/id/draft-ietf-cipso-ipsecurity-01.txt * The FIPS-188 specification can be found at the following URL: * http://www.itl.nist.gov/fipspubs/fip188.htm * * Author: Paul Moore <paul.moore@hp.com> * */ /* * (c) Copyright Hewlett-Packard Development Company, L.P., 2006, 2008 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/init.h> #include <linux/types.h> #include <linux/rcupdate.h> #include <linux/list.h> #include <linux/spinlock.h> #include <linux/string.h> #include <linux/jhash.h> #include <linux/audit.h> #include <linux/slab.h> #include <net/ip.h> #include <net/icmp.h> #include <net/tcp.h> #include <net/netlabel.h> #include <net/cipso_ipv4.h> #include <asm/atomic.h> #include <asm/bug.h> #include <asm/unaligned.h> /* List of available DOI definitions */ /* XXX - This currently assumes a minimal number of different DOIs in use, * if in practice there are a lot of different DOIs this list should * probably be turned into a hash table or something similar so we * can do quick lookups. */ static DEFINE_SPINLOCK(cipso_v4_doi_list_lock); static LIST_HEAD(cipso_v4_doi_list); /* Label mapping cache */ int cipso_v4_cache_enabled = 1; int cipso_v4_cache_bucketsize = 10; #define CIPSO_V4_CACHE_BUCKETBITS 7 #define CIPSO_V4_CACHE_BUCKETS (1 << CIPSO_V4_CACHE_BUCKETBITS) #define CIPSO_V4_CACHE_REORDERLIMIT 10 struct cipso_v4_map_cache_bkt { spinlock_t lock; u32 size; struct list_head list; }; struct cipso_v4_map_cache_entry { u32 hash; unsigned char *key; size_t key_len; struct netlbl_lsm_cache *lsm_data; u32 activity; struct list_head list; }; static struct cipso_v4_map_cache_bkt *cipso_v4_cache = NULL; /* Restricted bitmap (tag #1) flags */ int cipso_v4_rbm_optfmt = 0; int cipso_v4_rbm_strictvalid = 1; /* * Protocol Constants */ /* Maximum size of the CIPSO IP option, derived from the fact that the maximum * IPv4 header size is 60 bytes and the base IPv4 header is 20 bytes long. */ #define CIPSO_V4_OPT_LEN_MAX 40 /* Length of the base CIPSO option, this includes the option type (1 byte), the * option length (1 byte), and the DOI (4 bytes). */ #define CIPSO_V4_HDR_LEN 6 /* Base length of the restrictive category bitmap tag (tag #1). */ #define CIPSO_V4_TAG_RBM_BLEN 4 /* Base length of the enumerated category tag (tag #2). */ #define CIPSO_V4_TAG_ENUM_BLEN 4 /* Base length of the ranged categories bitmap tag (tag #5). */ #define CIPSO_V4_TAG_RNG_BLEN 4 /* The maximum number of category ranges permitted in the ranged category tag * (tag #5). You may note that the IETF draft states that the maximum number * of category ranges is 7, but if the low end of the last category range is * zero then it is possible to fit 8 category ranges because the zero should * be omitted. */ #define CIPSO_V4_TAG_RNG_CAT_MAX 8 /* Base length of the local tag (non-standard tag). * Tag definition (may change between kernel versions) * * 0 8 16 24 32 * +----------+----------+----------+----------+ * | 10000000 | 00000110 | 32-bit secid value | * +----------+----------+----------+----------+ * | in (host byte order)| * +----------+----------+ * */ #define CIPSO_V4_TAG_LOC_BLEN 6 /* * Helper Functions */ /** * cipso_v4_bitmap_walk - Walk a bitmap looking for a bit * @bitmap: the bitmap * @bitmap_len: length in bits * @offset: starting offset * @state: if non-zero, look for a set (1) bit else look for a cleared (0) bit * * Description: * Starting at @offset, walk the bitmap from left to right until either the * desired bit is found or we reach the end. Return the bit offset, -1 if * not found, or -2 if error. */ static int cipso_v4_bitmap_walk(const unsigned char *bitmap, u32 bitmap_len, u32 offset, u8 state) { u32 bit_spot; u32 byte_offset; unsigned char bitmask; unsigned char byte; /* gcc always rounds to zero when doing integer division */ byte_offset = offset / 8; byte = bitmap[byte_offset]; bit_spot = offset; bitmask = 0x80 >> (offset % 8); while (bit_spot < bitmap_len) { if ((state && (byte & bitmask) == bitmask) || (state == 0 && (byte & bitmask) == 0)) return bit_spot; bit_spot++; bitmask >>= 1; if (bitmask == 0) { byte = bitmap[++byte_offset]; bitmask = 0x80; } } return -1; } /** * cipso_v4_bitmap_setbit - Sets a single bit in a bitmap * @bitmap: the bitmap * @bit: the bit * @state: if non-zero, set the bit (1) else clear the bit (0) * * Description: * Set a single bit in the bitmask. Returns zero on success, negative values * on error. */ static void cipso_v4_bitmap_setbit(unsigned char *bitmap, u32 bit, u8 state) { u32 byte_spot; u8 bitmask; /* gcc always rounds to zero when doing integer division */ byte_spot = bit / 8; bitmask = 0x80 >> (bit % 8); if (state) bitmap[byte_spot] |= bitmask; else bitmap[byte_spot] &= ~bitmask; } /** * cipso_v4_cache_entry_free - Frees a cache entry * @entry: the entry to free * * Description: * This function frees the memory associated with a cache entry including the * LSM cache data if there are no longer any users, i.e. reference count == 0. * */ static void cipso_v4_cache_entry_free(struct cipso_v4_map_cache_entry *entry) { if (entry->lsm_data) netlbl_secattr_cache_free(entry->lsm_data); kfree(entry->key); kfree(entry); } /** * cipso_v4_map_cache_hash - Hashing function for the CIPSO cache * @key: the hash key * @key_len: the length of the key in bytes * * Description: * The CIPSO tag hashing function. Returns a 32-bit hash value. * */ static u32 cipso_v4_map_cache_hash(const unsigned char *key, u32 key_len) { return jhash(key, key_len, 0); } /* * Label Mapping Cache Functions */ /** * cipso_v4_cache_init - Initialize the CIPSO cache * * Description: * Initializes the CIPSO label mapping cache, this function should be called * before any of the other functions defined in this file. Returns zero on * success, negative values on error. * */ static int cipso_v4_cache_init(void) { u32 iter; cipso_v4_cache = kcalloc(CIPSO_V4_CACHE_BUCKETS, sizeof(struct cipso_v4_map_cache_bkt), GFP_KERNEL); if (cipso_v4_cache == NULL) return -ENOMEM; for (iter = 0; iter < CIPSO_V4_CACHE_BUCKETS; iter++) { spin_lock_init(&cipso_v4_cache[iter].lock); cipso_v4_cache[iter].size = 0; INIT_LIST_HEAD(&cipso_v4_cache[iter].list); } return 0; } /** * cipso_v4_cache_invalidate - Invalidates the current CIPSO cache * * Description: * Invalidates and frees any entries in the CIPSO cache. Returns zero on * success and negative values on failure. * */ void cipso_v4_cache_invalidate(void) { struct cipso_v4_map_cache_entry *entry, *tmp_entry; u32 iter; for (iter = 0; iter < CIPSO_V4_CACHE_BUCKETS; iter++) { spin_lock_bh(&cipso_v4_cache[iter].lock); list_for_each_entry_safe(entry, tmp_entry, &cipso_v4_cache[iter].list, list) { list_del(&entry->list); cipso_v4_cache_entry_free(entry); } cipso_v4_cache[iter].size = 0; spin_unlock_bh(&cipso_v4_cache[iter].lock); } } /** * cipso_v4_cache_check - Check the CIPSO cache for a label mapping * @key: the buffer to check * @key_len: buffer length in bytes * @secattr: the security attribute struct to use * * Description: * This function checks the cache to see if a label mapping already exists for * the given key. If there is a match then the cache is adjusted and the * @secattr struct is populated with the correct LSM security attributes. The * cache is adjusted in the following manner if the entry is not already the * first in the cache bucket: * * 1. The cache entry's activity counter is incremented * 2. The previous (higher ranking) entry's activity counter is decremented * 3. If the difference between the two activity counters is geater than * CIPSO_V4_CACHE_REORDERLIMIT the two entries are swapped * * Returns zero on success, -ENOENT for a cache miss, and other negative values * on error. * */ static int cipso_v4_cache_check(const unsigned char *key, u32 key_len, struct netlbl_lsm_secattr *secattr) { u32 bkt; struct cipso_v4_map_cache_entry *entry; struct cipso_v4_map_cache_entry *prev_entry = NULL; u32 hash; if (!cipso_v4_cache_enabled) return -ENOENT; hash = cipso_v4_map_cache_hash(key, key_len); bkt = hash & (CIPSO_V4_CACHE_BUCKETS - 1); spin_lock_bh(&cipso_v4_cache[bkt].lock); list_for_each_entry(entry, &cipso_v4_cache[bkt].list, list) { if (entry->hash == hash && entry->key_len == key_len && memcmp(entry->key, key, key_len) == 0) { entry->activity += 1; atomic_inc(&entry->lsm_data->refcount); secattr->cache = entry->lsm_data; secattr->flags |= NETLBL_SECATTR_CACHE; secattr->type = NETLBL_NLTYPE_CIPSOV4; if (prev_entry == NULL) { spin_unlock_bh(&cipso_v4_cache[bkt].lock); return 0; } if (prev_entry->activity > 0) prev_entry->activity -= 1; if (entry->activity > prev_entry->activity && entry->activity - prev_entry->activity > CIPSO_V4_CACHE_REORDERLIMIT) { __list_del(entry->list.prev, entry->list.next); __list_add(&entry->list, prev_entry->list.prev, &prev_entry->list); } spin_unlock_bh(&cipso_v4_cache[bkt].lock); return 0; } prev_entry = entry; } spin_unlock_bh(&cipso_v4_cache[bkt].lock); return -ENOENT; } /** * cipso_v4_cache_add - Add an entry to the CIPSO cache * @skb: the packet * @secattr: the packet's security attributes * * Description: * Add a new entry into the CIPSO label mapping cache. Add the new entry to * head of the cache bucket's list, if the cache bucket is out of room remove * the last entry in the list first. It is important to note that there is * currently no checking for duplicate keys. Returns zero on success, * negative values on failure. * */ int cipso_v4_cache_add(const struct sk_buff *skb, const struct netlbl_lsm_secattr *secattr) { int ret_val = -EPERM; u32 bkt; struct cipso_v4_map_cache_entry *entry = NULL; struct cipso_v4_map_cache_entry *old_entry = NULL; unsigned char *cipso_ptr; u32 cipso_ptr_len; if (!cipso_v4_cache_enabled || cipso_v4_cache_bucketsize <= 0) return 0; cipso_ptr = CIPSO_V4_OPTPTR(skb); cipso_ptr_len = cipso_ptr[1]; entry = kzalloc(sizeof(*entry), GFP_ATOMIC); if (entry == NULL) return -ENOMEM; entry->key = kmemdup(cipso_ptr, cipso_ptr_len, GFP_ATOMIC); if (entry->key == NULL) { ret_val = -ENOMEM; goto cache_add_failure; } entry->key_len = cipso_ptr_len; entry->hash = cipso_v4_map_cache_hash(cipso_ptr, cipso_ptr_len); atomic_inc(&secattr->cache->refcount); entry->lsm_data = secattr->cache; bkt = entry->hash & (CIPSO_V4_CACHE_BUCKETS - 1); spin_lock_bh(&cipso_v4_cache[bkt].lock); if (cipso_v4_cache[bkt].size < cipso_v4_cache_bucketsize) { list_add(&entry->list, &cipso_v4_cache[bkt].list); cipso_v4_cache[bkt].size += 1; } else { old_entry = list_entry(cipso_v4_cache[bkt].list.prev, struct cipso_v4_map_cache_entry, list); list_del(&old_entry->list); list_add(&entry->list, &cipso_v4_cache[bkt].list); cipso_v4_cache_entry_free(old_entry); } spin_unlock_bh(&cipso_v4_cache[bkt].lock); return 0; cache_add_failure: if (entry) cipso_v4_cache_entry_free(entry); return ret_val; } /* * DOI List Functions */ /** * cipso_v4_doi_search - Searches for a DOI definition * @doi: the DOI to search for * * Description: * Search the DOI definition list for a DOI definition with a DOI value that * matches @doi. The caller is responsible for calling rcu_read_[un]lock(). * Returns a pointer to the DOI definition on success and NULL on failure. */ static struct cipso_v4_doi *cipso_v4_doi_search(u32 doi) { struct cipso_v4_doi *iter; list_for_each_entry_rcu(iter, &cipso_v4_doi_list, list) if (iter->doi == doi && atomic_read(&iter->refcount)) return iter; return NULL; } /** * cipso_v4_doi_add - Add a new DOI to the CIPSO protocol engine * @doi_def: the DOI structure * @audit_info: NetLabel audit information * * Description: * The caller defines a new DOI for use by the CIPSO engine and calls this * function to add it to the list of acceptable domains. The caller must * ensure that the mapping table specified in @doi_def->map meets all of the * requirements of the mapping type (see cipso_ipv4.h for details). Returns * zero on success and non-zero on failure. * */ int cipso_v4_doi_add(struct cipso_v4_doi *doi_def, struct netlbl_audit *audit_info) { int ret_val = -EINVAL; u32 iter; u32 doi; u32 doi_type; struct audit_buffer *audit_buf; doi = doi_def->doi; doi_type = doi_def->type; if (doi_def == NULL || doi_def->doi == CIPSO_V4_DOI_UNKNOWN) goto doi_add_return; for (iter = 0; iter < CIPSO_V4_TAG_MAXCNT; iter++) { switch (doi_def->tags[iter]) { case CIPSO_V4_TAG_RBITMAP: break; case CIPSO_V4_TAG_RANGE: case CIPSO_V4_TAG_ENUM: if (doi_def->type != CIPSO_V4_MAP_PASS) goto doi_add_return; break; case CIPSO_V4_TAG_LOCAL: if (doi_def->type != CIPSO_V4_MAP_LOCAL) goto doi_add_return; break; case CIPSO_V4_TAG_INVALID: if (iter == 0) goto doi_add_return; break; default: goto doi_add_return; } } atomic_set(&doi_def->refcount, 1); spin_lock(&cipso_v4_doi_list_lock); if (cipso_v4_doi_search(doi_def->doi) != NULL) { spin_unlock(&cipso_v4_doi_list_lock); ret_val = -EEXIST; goto doi_add_return; } list_add_tail_rcu(&doi_def->list, &cipso_v4_doi_list); spin_unlock(&cipso_v4_doi_list_lock); ret_val = 0; doi_add_return: audit_buf = netlbl_audit_start(AUDIT_MAC_CIPSOV4_ADD, audit_info); if (audit_buf != NULL) { const char *type_str; switch (doi_type) { case CIPSO_V4_MAP_TRANS: type_str = "trans"; break; case CIPSO_V4_MAP_PASS: type_str = "pass"; break; case CIPSO_V4_MAP_LOCAL: type_str = "local"; break; default: type_str = "(unknown)"; } audit_log_format(audit_buf, " cipso_doi=%u cipso_type=%s res=%u", doi, type_str, ret_val == 0 ? 1 : 0); audit_log_end(audit_buf); } return ret_val; } /** * cipso_v4_doi_free - Frees a DOI definition * @entry: the entry's RCU field * * Description: * This function frees all of the memory associated with a DOI definition. * */ void cipso_v4_doi_free(struct cipso_v4_doi *doi_def) { if (doi_def == NULL) return; switch (doi_def->type) { case CIPSO_V4_MAP_TRANS: kfree(doi_def->map.std->lvl.cipso); kfree(doi_def->map.std->lvl.local); kfree(doi_def->map.std->cat.cipso); kfree(doi_def->map.std->cat.local); break; } kfree(doi_def); } /** * cipso_v4_doi_free_rcu - Frees a DOI definition via the RCU pointer * @entry: the entry's RCU field * * Description: * This function is designed to be used as a callback to the call_rcu() * function so that the memory allocated to the DOI definition can be released * safely. * */ static void cipso_v4_doi_free_rcu(struct rcu_head *entry) { struct cipso_v4_doi *doi_def; doi_def = container_of(entry, struct cipso_v4_doi, rcu); cipso_v4_doi_free(doi_def); } /** * cipso_v4_doi_remove - Remove an existing DOI from the CIPSO protocol engine * @doi: the DOI value * @audit_secid: the LSM secid to use in the audit message * * Description: * Removes a DOI definition from the CIPSO engine. The NetLabel routines will * be called to release their own LSM domain mappings as well as our own * domain list. Returns zero on success and negative values on failure. * */ int cipso_v4_doi_remove(u32 doi, struct netlbl_audit *audit_info) { int ret_val; struct cipso_v4_doi *doi_def; struct audit_buffer *audit_buf; spin_lock(&cipso_v4_doi_list_lock); doi_def = cipso_v4_doi_search(doi); if (doi_def == NULL) { spin_unlock(&cipso_v4_doi_list_lock); ret_val = -ENOENT; goto doi_remove_return; } if (!atomic_dec_and_test(&doi_def->refcount)) { spin_unlock(&cipso_v4_doi_list_lock); ret_val = -EBUSY; goto doi_remove_return; } list_del_rcu(&doi_def->list); spin_unlock(&cipso_v4_doi_list_lock); cipso_v4_cache_invalidate(); call_rcu(&doi_def->rcu, cipso_v4_doi_free_rcu); ret_val = 0; doi_remove_return: audit_buf = netlbl_audit_start(AUDIT_MAC_CIPSOV4_DEL, audit_info); if (audit_buf != NULL) { audit_log_format(audit_buf, " cipso_doi=%u res=%u", doi, ret_val == 0 ? 1 : 0); audit_log_end(audit_buf); } return ret_val; } /** * cipso_v4_doi_getdef - Returns a reference to a valid DOI definition * @doi: the DOI value * * Description: * Searches for a valid DOI definition and if one is found it is returned to * the caller. Otherwise NULL is returned. The caller must ensure that * rcu_read_lock() is held while accessing the returned definition and the DOI * definition reference count is decremented when the caller is done. * */ struct cipso_v4_doi *cipso_v4_doi_getdef(u32 doi) { struct cipso_v4_doi *doi_def; rcu_read_lock(); doi_def = cipso_v4_doi_search(doi); if (doi_def == NULL) goto doi_getdef_return; if (!atomic_inc_not_zero(&doi_def->refcount)) doi_def = NULL; doi_getdef_return: rcu_read_unlock(); return doi_def; } /** * cipso_v4_doi_putdef - Releases a reference for the given DOI definition * @doi_def: the DOI definition * * Description: * Releases a DOI definition reference obtained from cipso_v4_doi_getdef(). * */ void cipso_v4_doi_putdef(struct cipso_v4_doi *doi_def) { if (doi_def == NULL) return; if (!atomic_dec_and_test(&doi_def->refcount)) return; spin_lock(&cipso_v4_doi_list_lock); list_del_rcu(&doi_def->list); spin_unlock(&cipso_v4_doi_list_lock); cipso_v4_cache_invalidate(); call_rcu(&doi_def->rcu, cipso_v4_doi_free_rcu); } /** * cipso_v4_doi_walk - Iterate through the DOI definitions * @skip_cnt: skip past this number of DOI definitions, updated * @callback: callback for each DOI definition * @cb_arg: argument for the callback function * * Description: * Iterate over the DOI definition list, skipping the first @skip_cnt entries. * For each entry call @callback, if @callback returns a negative value stop * 'walking' through the list and return. Updates the value in @skip_cnt upon * return. Returns zero on success, negative values on failure. * */ int cipso_v4_doi_walk(u32 *skip_cnt, int (*callback) (struct cipso_v4_doi *doi_def, void *arg), void *cb_arg) { int ret_val = -ENOENT; u32 doi_cnt = 0; struct cipso_v4_doi *iter_doi; rcu_read_lock(); list_for_each_entry_rcu(iter_doi, &cipso_v4_doi_list, list) if (atomic_read(&iter_doi->refcount) > 0) { if (doi_cnt++ < *skip_cnt) continue; ret_val = callback(iter_doi, cb_arg); if (ret_val < 0) { doi_cnt--; goto doi_walk_return; } } doi_walk_return: rcu_read_unlock(); *skip_cnt = doi_cnt; return ret_val; } /* * Label Mapping Functions */ /** * cipso_v4_map_lvl_valid - Checks to see if the given level is understood * @doi_def: the DOI definition * @level: the level to check * * Description: * Checks the given level against the given DOI definition and returns a * negative value if the level does not have a valid mapping and a zero value * if the level is defined by the DOI. * */ static int cipso_v4_map_lvl_valid(const struct cipso_v4_doi *doi_def, u8 level) { switch (doi_def->type) { case CIPSO_V4_MAP_PASS: return 0; case CIPSO_V4_MAP_TRANS: if (doi_def->map.std->lvl.cipso[level] < CIPSO_V4_INV_LVL) return 0; break; } return -EFAULT; } /** * cipso_v4_map_lvl_hton - Perform a level mapping from the host to the network * @doi_def: the DOI definition * @host_lvl: the host MLS level * @net_lvl: the network/CIPSO MLS level * * Description: * Perform a label mapping to translate a local MLS level to the correct * CIPSO level using the given DOI definition. Returns zero on success, * negative values otherwise. * */ static int cipso_v4_map_lvl_hton(const struct cipso_v4_doi *doi_def, u32 host_lvl, u32 *net_lvl) { switch (doi_def->type) { case CIPSO_V4_MAP_PASS: *net_lvl = host_lvl; return 0; case CIPSO_V4_MAP_TRANS: if (host_lvl < doi_def->map.std->lvl.local_size && doi_def->map.std->lvl.local[host_lvl] < CIPSO_V4_INV_LVL) { *net_lvl = doi_def->map.std->lvl.local[host_lvl]; return 0; } return -EPERM; } return -EINVAL; } /** * cipso_v4_map_lvl_ntoh - Perform a level mapping from the network to the host * @doi_def: the DOI definition * @net_lvl: the network/CIPSO MLS level * @host_lvl: the host MLS level * * Description: * Perform a label mapping to translate a CIPSO level to the correct local MLS * level using the given DOI definition. Returns zero on success, negative * values otherwise. * */ static int cipso_v4_map_lvl_ntoh(const struct cipso_v4_doi *doi_def, u32 net_lvl, u32 *host_lvl) { struct cipso_v4_std_map_tbl *map_tbl; switch (doi_def->type) { case CIPSO_V4_MAP_PASS: *host_lvl = net_lvl; return 0; case CIPSO_V4_MAP_TRANS: map_tbl = doi_def->map.std; if (net_lvl < map_tbl->lvl.cipso_size && map_tbl->lvl.cipso[net_lvl] < CIPSO_V4_INV_LVL) { *host_lvl = doi_def->map.std->lvl.cipso[net_lvl]; return 0; } return -EPERM; } return -EINVAL; } /** * cipso_v4_map_cat_rbm_valid - Checks to see if the category bitmap is valid * @doi_def: the DOI definition * @bitmap: category bitmap * @bitmap_len: bitmap length in bytes * * Description: * Checks the given category bitmap against the given DOI definition and * returns a negative value if any of the categories in the bitmap do not have * a valid mapping and a zero value if all of the categories are valid. * */ static int cipso_v4_map_cat_rbm_valid(const struct cipso_v4_doi *doi_def, const unsigned char *bitmap, u32 bitmap_len) { int cat = -1; u32 bitmap_len_bits = bitmap_len * 8; u32 cipso_cat_size; u32 *cipso_array; switch (doi_def->type) { case CIPSO_V4_MAP_PASS: return 0; case CIPSO_V4_MAP_TRANS: cipso_cat_size = doi_def->map.std->cat.cipso_size; cipso_array = doi_def->map.std->cat.cipso; for (;;) { cat = cipso_v4_bitmap_walk(bitmap, bitmap_len_bits, cat + 1, 1); if (cat < 0) break; if (cat >= cipso_cat_size || cipso_array[cat] >= CIPSO_V4_INV_CAT) return -EFAULT; } if (cat == -1) return 0; break; } return -EFAULT; } /** * cipso_v4_map_cat_rbm_hton - Perform a category mapping from host to network * @doi_def: the DOI definition * @secattr: the security attributes * @net_cat: the zero'd out category bitmap in network/CIPSO format * @net_cat_len: the length of the CIPSO bitmap in bytes * * Description: * Perform a label mapping to translate a local MLS category bitmap to the * correct CIPSO bitmap using the given DOI definition. Returns the minimum * size in bytes of the network bitmap on success, negative values otherwise. * */ static int cipso_v4_map_cat_rbm_hton(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *net_cat, u32 net_cat_len) { int host_spot = -1; u32 net_spot = CIPSO_V4_INV_CAT; u32 net_spot_max = 0; u32 net_clen_bits = net_cat_len * 8; u32 host_cat_size = 0; u32 *host_cat_array = NULL; if (doi_def->type == CIPSO_V4_MAP_TRANS) { host_cat_size = doi_def->map.std->cat.local_size; host_cat_array = doi_def->map.std->cat.local; } for (;;) { host_spot = netlbl_secattr_catmap_walk(secattr->attr.mls.cat, host_spot + 1); if (host_spot < 0) break; switch (doi_def->type) { case CIPSO_V4_MAP_PASS: net_spot = host_spot; break; case CIPSO_V4_MAP_TRANS: if (host_spot >= host_cat_size) return -EPERM; net_spot = host_cat_array[host_spot]; if (net_spot >= CIPSO_V4_INV_CAT) return -EPERM; break; } if (net_spot >= net_clen_bits) return -ENOSPC; cipso_v4_bitmap_setbit(net_cat, net_spot, 1); if (net_spot > net_spot_max) net_spot_max = net_spot; } if (++net_spot_max % 8) return net_spot_max / 8 + 1; return net_spot_max / 8; } /** * cipso_v4_map_cat_rbm_ntoh - Perform a category mapping from network to host * @doi_def: the DOI definition * @net_cat: the category bitmap in network/CIPSO format * @net_cat_len: the length of the CIPSO bitmap in bytes * @secattr: the security attributes * * Description: * Perform a label mapping to translate a CIPSO bitmap to the correct local * MLS category bitmap using the given DOI definition. Returns zero on * success, negative values on failure. * */ static int cipso_v4_map_cat_rbm_ntoh(const struct cipso_v4_doi *doi_def, const unsigned char *net_cat, u32 net_cat_len, struct netlbl_lsm_secattr *secattr) { int ret_val; int net_spot = -1; u32 host_spot = CIPSO_V4_INV_CAT; u32 net_clen_bits = net_cat_len * 8; u32 net_cat_size = 0; u32 *net_cat_array = NULL; if (doi_def->type == CIPSO_V4_MAP_TRANS) { net_cat_size = doi_def->map.std->cat.cipso_size; net_cat_array = doi_def->map.std->cat.cipso; } for (;;) { net_spot = cipso_v4_bitmap_walk(net_cat, net_clen_bits, net_spot + 1, 1); if (net_spot < 0) { if (net_spot == -2) return -EFAULT; return 0; } switch (doi_def->type) { case CIPSO_V4_MAP_PASS: host_spot = net_spot; break; case CIPSO_V4_MAP_TRANS: if (net_spot >= net_cat_size) return -EPERM; host_spot = net_cat_array[net_spot]; if (host_spot >= CIPSO_V4_INV_CAT) return -EPERM; break; } ret_val = netlbl_secattr_catmap_setbit(secattr->attr.mls.cat, host_spot, GFP_ATOMIC); if (ret_val != 0) return ret_val; } return -EINVAL; } /** * cipso_v4_map_cat_enum_valid - Checks to see if the categories are valid * @doi_def: the DOI definition * @enumcat: category list * @enumcat_len: length of the category list in bytes * * Description: * Checks the given categories against the given DOI definition and returns a * negative value if any of the categories do not have a valid mapping and a * zero value if all of the categories are valid. * */ static int cipso_v4_map_cat_enum_valid(const struct cipso_v4_doi *doi_def, const unsigned char *enumcat, u32 enumcat_len) { u16 cat; int cat_prev = -1; u32 iter; if (doi_def->type != CIPSO_V4_MAP_PASS || enumcat_len & 0x01) return -EFAULT; for (iter = 0; iter < enumcat_len; iter += 2) { cat = get_unaligned_be16(&enumcat[iter]); if (cat <= cat_prev) return -EFAULT; cat_prev = cat; } return 0; } /** * cipso_v4_map_cat_enum_hton - Perform a category mapping from host to network * @doi_def: the DOI definition * @secattr: the security attributes * @net_cat: the zero'd out category list in network/CIPSO format * @net_cat_len: the length of the CIPSO category list in bytes * * Description: * Perform a label mapping to translate a local MLS category bitmap to the * correct CIPSO category list using the given DOI definition. Returns the * size in bytes of the network category bitmap on success, negative values * otherwise. * */ static int cipso_v4_map_cat_enum_hton(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *net_cat, u32 net_cat_len) { int cat = -1; u32 cat_iter = 0; for (;;) { cat = netlbl_secattr_catmap_walk(secattr->attr.mls.cat, cat + 1); if (cat < 0) break; if ((cat_iter + 2) > net_cat_len) return -ENOSPC; *((__be16 *)&net_cat[cat_iter]) = htons(cat); cat_iter += 2; } return cat_iter; } /** * cipso_v4_map_cat_enum_ntoh - Perform a category mapping from network to host * @doi_def: the DOI definition * @net_cat: the category list in network/CIPSO format * @net_cat_len: the length of the CIPSO bitmap in bytes * @secattr: the security attributes * * Description: * Perform a label mapping to translate a CIPSO category list to the correct * local MLS category bitmap using the given DOI definition. Returns zero on * success, negative values on failure. * */ static int cipso_v4_map_cat_enum_ntoh(const struct cipso_v4_doi *doi_def, const unsigned char *net_cat, u32 net_cat_len, struct netlbl_lsm_secattr *secattr) { int ret_val; u32 iter; for (iter = 0; iter < net_cat_len; iter += 2) { ret_val = netlbl_secattr_catmap_setbit(secattr->attr.mls.cat, get_unaligned_be16(&net_cat[iter]), GFP_ATOMIC); if (ret_val != 0) return ret_val; } return 0; } /** * cipso_v4_map_cat_rng_valid - Checks to see if the categories are valid * @doi_def: the DOI definition * @rngcat: category list * @rngcat_len: length of the category list in bytes * * Description: * Checks the given categories against the given DOI definition and returns a * negative value if any of the categories do not have a valid mapping and a * zero value if all of the categories are valid. * */ static int cipso_v4_map_cat_rng_valid(const struct cipso_v4_doi *doi_def, const unsigned char *rngcat, u32 rngcat_len) { u16 cat_high; u16 cat_low; u32 cat_prev = CIPSO_V4_MAX_REM_CATS + 1; u32 iter; if (doi_def->type != CIPSO_V4_MAP_PASS || rngcat_len & 0x01) return -EFAULT; for (iter = 0; iter < rngcat_len; iter += 4) { cat_high = get_unaligned_be16(&rngcat[iter]); if ((iter + 4) <= rngcat_len) cat_low = get_unaligned_be16(&rngcat[iter + 2]); else cat_low = 0; if (cat_high > cat_prev) return -EFAULT; cat_prev = cat_low; } return 0; } /** * cipso_v4_map_cat_rng_hton - Perform a category mapping from host to network * @doi_def: the DOI definition * @secattr: the security attributes * @net_cat: the zero'd out category list in network/CIPSO format * @net_cat_len: the length of the CIPSO category list in bytes * * Description: * Perform a label mapping to translate a local MLS category bitmap to the * correct CIPSO category list using the given DOI definition. Returns the * size in bytes of the network category bitmap on success, negative values * otherwise. * */ static int cipso_v4_map_cat_rng_hton(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *net_cat, u32 net_cat_len) { int iter = -1; u16 array[CIPSO_V4_TAG_RNG_CAT_MAX * 2]; u32 array_cnt = 0; u32 cat_size = 0; /* make sure we don't overflow the 'array[]' variable */ if (net_cat_len > (CIPSO_V4_OPT_LEN_MAX - CIPSO_V4_HDR_LEN - CIPSO_V4_TAG_RNG_BLEN)) return -ENOSPC; for (;;) { iter = netlbl_secattr_catmap_walk(secattr->attr.mls.cat, iter + 1); if (iter < 0) break; cat_size += (iter == 0 ? 0 : sizeof(u16)); if (cat_size > net_cat_len) return -ENOSPC; array[array_cnt++] = iter; iter = netlbl_secattr_catmap_walk_rng(secattr->attr.mls.cat, iter); if (iter < 0) return -EFAULT; cat_size += sizeof(u16); if (cat_size > net_cat_len) return -ENOSPC; array[array_cnt++] = iter; } for (iter = 0; array_cnt > 0;) { *((__be16 *)&net_cat[iter]) = htons(array[--array_cnt]); iter += 2; array_cnt--; if (array[array_cnt] != 0) { *((__be16 *)&net_cat[iter]) = htons(array[array_cnt]); iter += 2; } } return cat_size; } /** * cipso_v4_map_cat_rng_ntoh - Perform a category mapping from network to host * @doi_def: the DOI definition * @net_cat: the category list in network/CIPSO format * @net_cat_len: the length of the CIPSO bitmap in bytes * @secattr: the security attributes * * Description: * Perform a label mapping to translate a CIPSO category list to the correct * local MLS category bitmap using the given DOI definition. Returns zero on * success, negative values on failure. * */ static int cipso_v4_map_cat_rng_ntoh(const struct cipso_v4_doi *doi_def, const unsigned char *net_cat, u32 net_cat_len, struct netlbl_lsm_secattr *secattr) { int ret_val; u32 net_iter; u16 cat_low; u16 cat_high; for (net_iter = 0; net_iter < net_cat_len; net_iter += 4) { cat_high = get_unaligned_be16(&net_cat[net_iter]); if ((net_iter + 4) <= net_cat_len) cat_low = get_unaligned_be16(&net_cat[net_iter + 2]); else cat_low = 0; ret_val = netlbl_secattr_catmap_setrng(secattr->attr.mls.cat, cat_low, cat_high, GFP_ATOMIC); if (ret_val != 0) return ret_val; } return 0; } /* * Protocol Handling Functions */ /** * cipso_v4_gentag_hdr - Generate a CIPSO option header * @doi_def: the DOI definition * @len: the total tag length in bytes, not including this header * @buf: the CIPSO option buffer * * Description: * Write a CIPSO header into the beginning of @buffer. * */ static void cipso_v4_gentag_hdr(const struct cipso_v4_doi *doi_def, unsigned char *buf, u32 len) { buf[0] = IPOPT_CIPSO; buf[1] = CIPSO_V4_HDR_LEN + len; *(__be32 *)&buf[2] = htonl(doi_def->doi); } /** * cipso_v4_gentag_rbm - Generate a CIPSO restricted bitmap tag (type #1) * @doi_def: the DOI definition * @secattr: the security attributes * @buffer: the option buffer * @buffer_len: length of buffer in bytes * * Description: * Generate a CIPSO option using the restricted bitmap tag, tag type #1. The * actual buffer length may be larger than the indicated size due to * translation between host and network category bitmaps. Returns the size of * the tag on success, negative values on failure. * */ static int cipso_v4_gentag_rbm(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *buffer, u32 buffer_len) { int ret_val; u32 tag_len; u32 level; if ((secattr->flags & NETLBL_SECATTR_MLS_LVL) == 0) return -EPERM; ret_val = cipso_v4_map_lvl_hton(doi_def, secattr->attr.mls.lvl, &level); if (ret_val != 0) return ret_val; if (secattr->flags & NETLBL_SECATTR_MLS_CAT) { ret_val = cipso_v4_map_cat_rbm_hton(doi_def, secattr, &buffer[4], buffer_len - 4); if (ret_val < 0) return ret_val; /* This will send packets using the "optimized" format when * possible as specified in section 3.4.2.6 of the * CIPSO draft. */ if (cipso_v4_rbm_optfmt && ret_val > 0 && ret_val <= 10) tag_len = 14; else tag_len = 4 + ret_val; } else tag_len = 4; buffer[0] = CIPSO_V4_TAG_RBITMAP; buffer[1] = tag_len; buffer[3] = level; return tag_len; } /** * cipso_v4_parsetag_rbm - Parse a CIPSO restricted bitmap tag * @doi_def: the DOI definition * @tag: the CIPSO tag * @secattr: the security attributes * * Description: * Parse a CIPSO restricted bitmap tag (tag type #1) and return the security * attributes in @secattr. Return zero on success, negatives values on * failure. * */ static int cipso_v4_parsetag_rbm(const struct cipso_v4_doi *doi_def, const unsigned char *tag, struct netlbl_lsm_secattr *secattr) { int ret_val; u8 tag_len = tag[1]; u32 level; ret_val = cipso_v4_map_lvl_ntoh(doi_def, tag[3], &level); if (ret_val != 0) return ret_val; secattr->attr.mls.lvl = level; secattr->flags |= NETLBL_SECATTR_MLS_LVL; if (tag_len > 4) { secattr->attr.mls.cat = netlbl_secattr_catmap_alloc(GFP_ATOMIC); if (secattr->attr.mls.cat == NULL) return -ENOMEM; ret_val = cipso_v4_map_cat_rbm_ntoh(doi_def, &tag[4], tag_len - 4, secattr); if (ret_val != 0) { netlbl_secattr_catmap_free(secattr->attr.mls.cat); return ret_val; } secattr->flags |= NETLBL_SECATTR_MLS_CAT; } return 0; } /** * cipso_v4_gentag_enum - Generate a CIPSO enumerated tag (type #2) * @doi_def: the DOI definition * @secattr: the security attributes * @buffer: the option buffer * @buffer_len: length of buffer in bytes * * Description: * Generate a CIPSO option using the enumerated tag, tag type #2. Returns the * size of the tag on success, negative values on failure. * */ static int cipso_v4_gentag_enum(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *buffer, u32 buffer_len) { int ret_val; u32 tag_len; u32 level; if (!(secattr->flags & NETLBL_SECATTR_MLS_LVL)) return -EPERM; ret_val = cipso_v4_map_lvl_hton(doi_def, secattr->attr.mls.lvl, &level); if (ret_val != 0) return ret_val; if (secattr->flags & NETLBL_SECATTR_MLS_CAT) { ret_val = cipso_v4_map_cat_enum_hton(doi_def, secattr, &buffer[4], buffer_len - 4); if (ret_val < 0) return ret_val; tag_len = 4 + ret_val; } else tag_len = 4; buffer[0] = CIPSO_V4_TAG_ENUM; buffer[1] = tag_len; buffer[3] = level; return tag_len; } /** * cipso_v4_parsetag_enum - Parse a CIPSO enumerated tag * @doi_def: the DOI definition * @tag: the CIPSO tag * @secattr: the security attributes * * Description: * Parse a CIPSO enumerated tag (tag type #2) and return the security * attributes in @secattr. Return zero on success, negatives values on * failure. * */ static int cipso_v4_parsetag_enum(const struct cipso_v4_doi *doi_def, const unsigned char *tag, struct netlbl_lsm_secattr *secattr) { int ret_val; u8 tag_len = tag[1]; u32 level; ret_val = cipso_v4_map_lvl_ntoh(doi_def, tag[3], &level); if (ret_val != 0) return ret_val; secattr->attr.mls.lvl = level; secattr->flags |= NETLBL_SECATTR_MLS_LVL; if (tag_len > 4) { secattr->attr.mls.cat = netlbl_secattr_catmap_alloc(GFP_ATOMIC); if (secattr->attr.mls.cat == NULL) return -ENOMEM; ret_val = cipso_v4_map_cat_enum_ntoh(doi_def, &tag[4], tag_len - 4, secattr); if (ret_val != 0) { netlbl_secattr_catmap_free(secattr->attr.mls.cat); return ret_val; } secattr->flags |= NETLBL_SECATTR_MLS_CAT; } return 0; } /** * cipso_v4_gentag_rng - Generate a CIPSO ranged tag (type #5) * @doi_def: the DOI definition * @secattr: the security attributes * @buffer: the option buffer * @buffer_len: length of buffer in bytes * * Description: * Generate a CIPSO option using the ranged tag, tag type #5. Returns the * size of the tag on success, negative values on failure. * */ static int cipso_v4_gentag_rng(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *buffer, u32 buffer_len) { int ret_val; u32 tag_len; u32 level; if (!(secattr->flags & NETLBL_SECATTR_MLS_LVL)) return -EPERM; ret_val = cipso_v4_map_lvl_hton(doi_def, secattr->attr.mls.lvl, &level); if (ret_val != 0) return ret_val; if (secattr->flags & NETLBL_SECATTR_MLS_CAT) { ret_val = cipso_v4_map_cat_rng_hton(doi_def, secattr, &buffer[4], buffer_len - 4); if (ret_val < 0) return ret_val; tag_len = 4 + ret_val; } else tag_len = 4; buffer[0] = CIPSO_V4_TAG_RANGE; buffer[1] = tag_len; buffer[3] = level; return tag_len; } /** * cipso_v4_parsetag_rng - Parse a CIPSO ranged tag * @doi_def: the DOI definition * @tag: the CIPSO tag * @secattr: the security attributes * * Description: * Parse a CIPSO ranged tag (tag type #5) and return the security attributes * in @secattr. Return zero on success, negatives values on failure. * */ static int cipso_v4_parsetag_rng(const struct cipso_v4_doi *doi_def, const unsigned char *tag, struct netlbl_lsm_secattr *secattr) { int ret_val; u8 tag_len = tag[1]; u32 level; ret_val = cipso_v4_map_lvl_ntoh(doi_def, tag[3], &level); if (ret_val != 0) return ret_val; secattr->attr.mls.lvl = level; secattr->flags |= NETLBL_SECATTR_MLS_LVL; if (tag_len > 4) { secattr->attr.mls.cat = netlbl_secattr_catmap_alloc(GFP_ATOMIC); if (secattr->attr.mls.cat == NULL) return -ENOMEM; ret_val = cipso_v4_map_cat_rng_ntoh(doi_def, &tag[4], tag_len - 4, secattr); if (ret_val != 0) { netlbl_secattr_catmap_free(secattr->attr.mls.cat); return ret_val; } secattr->flags |= NETLBL_SECATTR_MLS_CAT; } return 0; } /** * cipso_v4_gentag_loc - Generate a CIPSO local tag (non-standard) * @doi_def: the DOI definition * @secattr: the security attributes * @buffer: the option buffer * @buffer_len: length of buffer in bytes * * Description: * Generate a CIPSO option using the local tag. Returns the size of the tag * on success, negative values on failure. * */ static int cipso_v4_gentag_loc(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *buffer, u32 buffer_len) { if (!(secattr->flags & NETLBL_SECATTR_SECID)) return -EPERM; buffer[0] = CIPSO_V4_TAG_LOCAL; buffer[1] = CIPSO_V4_TAG_LOC_BLEN; *(u32 *)&buffer[2] = secattr->attr.secid; return CIPSO_V4_TAG_LOC_BLEN; } /** * cipso_v4_parsetag_loc - Parse a CIPSO local tag * @doi_def: the DOI definition * @tag: the CIPSO tag * @secattr: the security attributes * * Description: * Parse a CIPSO local tag and return the security attributes in @secattr. * Return zero on success, negatives values on failure. * */ static int cipso_v4_parsetag_loc(const struct cipso_v4_doi *doi_def, const unsigned char *tag, struct netlbl_lsm_secattr *secattr) { secattr->attr.secid = *(u32 *)&tag[2]; secattr->flags |= NETLBL_SECATTR_SECID; return 0; } /** * cipso_v4_validate - Validate a CIPSO option * @option: the start of the option, on error it is set to point to the error * * Description: * This routine is called to validate a CIPSO option, it checks all of the * fields to ensure that they are at least valid, see the draft snippet below * for details. If the option is valid then a zero value is returned and * the value of @option is unchanged. If the option is invalid then a * non-zero value is returned and @option is adjusted to point to the * offending portion of the option. From the IETF draft ... * * "If any field within the CIPSO options, such as the DOI identifier, is not * recognized the IP datagram is discarded and an ICMP 'parameter problem' * (type 12) is generated and returned. The ICMP code field is set to 'bad * parameter' (code 0) and the pointer is set to the start of the CIPSO field * that is unrecognized." * */ int cipso_v4_validate(const struct sk_buff *skb, unsigned char **option) { unsigned char *opt = *option; unsigned char *tag; unsigned char opt_iter; unsigned char err_offset = 0; u8 opt_len; u8 tag_len; struct cipso_v4_doi *doi_def = NULL; u32 tag_iter; /* caller already checks for length values that are too large */ opt_len = opt[1]; if (opt_len < 8) { err_offset = 1; goto validate_return; } rcu_read_lock(); doi_def = cipso_v4_doi_search(get_unaligned_be32(&opt[2])); if (doi_def == NULL) { err_offset = 2; goto validate_return_locked; } opt_iter = CIPSO_V4_HDR_LEN; tag = opt + opt_iter; while (opt_iter < opt_len) { for (tag_iter = 0; doi_def->tags[tag_iter] != tag[0];) if (doi_def->tags[tag_iter] == CIPSO_V4_TAG_INVALID || ++tag_iter == CIPSO_V4_TAG_MAXCNT) { err_offset = opt_iter; goto validate_return_locked; } tag_len = tag[1]; if (tag_len > (opt_len - opt_iter)) { err_offset = opt_iter + 1; goto validate_return_locked; } switch (tag[0]) { case CIPSO_V4_TAG_RBITMAP: if (tag_len < CIPSO_V4_TAG_RBM_BLEN) { err_offset = opt_iter + 1; goto validate_return_locked; } /* We are already going to do all the verification * necessary at the socket layer so from our point of * view it is safe to turn these checks off (and less * work), however, the CIPSO draft says we should do * all the CIPSO validations here but it doesn't * really specify _exactly_ what we need to validate * ... so, just make it a sysctl tunable. */ if (cipso_v4_rbm_strictvalid) { if (cipso_v4_map_lvl_valid(doi_def, tag[3]) < 0) { err_offset = opt_iter + 3; goto validate_return_locked; } if (tag_len > CIPSO_V4_TAG_RBM_BLEN && cipso_v4_map_cat_rbm_valid(doi_def, &tag[4], tag_len - 4) < 0) { err_offset = opt_iter + 4; goto validate_return_locked; } } break; case CIPSO_V4_TAG_ENUM: if (tag_len < CIPSO_V4_TAG_ENUM_BLEN) { err_offset = opt_iter + 1; goto validate_return_locked; } if (cipso_v4_map_lvl_valid(doi_def, tag[3]) < 0) { err_offset = opt_iter + 3; goto validate_return_locked; } if (tag_len > CIPSO_V4_TAG_ENUM_BLEN && cipso_v4_map_cat_enum_valid(doi_def, &tag[4], tag_len - 4) < 0) { err_offset = opt_iter + 4; goto validate_return_locked; } break; case CIPSO_V4_TAG_RANGE: if (tag_len < CIPSO_V4_TAG_RNG_BLEN) { err_offset = opt_iter + 1; goto validate_return_locked; } if (cipso_v4_map_lvl_valid(doi_def, tag[3]) < 0) { err_offset = opt_iter + 3; goto validate_return_locked; } if (tag_len > CIPSO_V4_TAG_RNG_BLEN && cipso_v4_map_cat_rng_valid(doi_def, &tag[4], tag_len - 4) < 0) { err_offset = opt_iter + 4; goto validate_return_locked; } break; case CIPSO_V4_TAG_LOCAL: /* This is a non-standard tag that we only allow for * local connections, so if the incoming interface is * not the loopback device drop the packet. */ if (!(skb->dev->flags & IFF_LOOPBACK)) { err_offset = opt_iter; goto validate_return_locked; } if (tag_len != CIPSO_V4_TAG_LOC_BLEN) { err_offset = opt_iter + 1; goto validate_return_locked; } break; default: err_offset = opt_iter; goto validate_return_locked; } tag += tag_len; opt_iter += tag_len; } validate_return_locked: rcu_read_unlock(); validate_return: *option = opt + err_offset; return err_offset; } /** * cipso_v4_error - Send the correct response for a bad packet * @skb: the packet * @error: the error code * @gateway: CIPSO gateway flag * * Description: * Based on the error code given in @error, send an ICMP error message back to * the originating host. From the IETF draft ... * * "If the contents of the CIPSO [option] are valid but the security label is * outside of the configured host or port label range, the datagram is * discarded and an ICMP 'destination unreachable' (type 3) is generated and * returned. The code field of the ICMP is set to 'communication with * destination network administratively prohibited' (code 9) or to * 'communication with destination host administratively prohibited' * (code 10). The value of the code is dependent on whether the originator * of the ICMP message is acting as a CIPSO host or a CIPSO gateway. The * recipient of the ICMP message MUST be able to handle either value. The * same procedure is performed if a CIPSO [option] can not be added to an * IP packet because it is too large to fit in the IP options area." * * "If the error is triggered by receipt of an ICMP message, the message is * discarded and no response is permitted (consistent with general ICMP * processing rules)." * */ void cipso_v4_error(struct sk_buff *skb, int error, u32 gateway) { if (ip_hdr(skb)->protocol == IPPROTO_ICMP || error != -EACCES) return; if (gateway) icmp_send(skb, ICMP_DEST_UNREACH, ICMP_NET_ANO, 0); else icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_ANO, 0); } /** * cipso_v4_genopt - Generate a CIPSO option * @buf: the option buffer * @buf_len: the size of opt_buf * @doi_def: the CIPSO DOI to use * @secattr: the security attributes * * Description: * Generate a CIPSO option using the DOI definition and security attributes * passed to the function. Returns the length of the option on success and * negative values on failure. * */ static int cipso_v4_genopt(unsigned char *buf, u32 buf_len, const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr) { int ret_val; u32 iter; if (buf_len <= CIPSO_V4_HDR_LEN) return -ENOSPC; /* XXX - This code assumes only one tag per CIPSO option which isn't * really a good assumption to make but since we only support the MAC * tags right now it is a safe assumption. */ iter = 0; do { memset(buf, 0, buf_len); switch (doi_def->tags[iter]) { case CIPSO_V4_TAG_RBITMAP: ret_val = cipso_v4_gentag_rbm(doi_def, secattr, &buf[CIPSO_V4_HDR_LEN], buf_len - CIPSO_V4_HDR_LEN); break; case CIPSO_V4_TAG_ENUM: ret_val = cipso_v4_gentag_enum(doi_def, secattr, &buf[CIPSO_V4_HDR_LEN], buf_len - CIPSO_V4_HDR_LEN); break; case CIPSO_V4_TAG_RANGE: ret_val = cipso_v4_gentag_rng(doi_def, secattr, &buf[CIPSO_V4_HDR_LEN], buf_len - CIPSO_V4_HDR_LEN); break; case CIPSO_V4_TAG_LOCAL: ret_val = cipso_v4_gentag_loc(doi_def, secattr, &buf[CIPSO_V4_HDR_LEN], buf_len - CIPSO_V4_HDR_LEN); break; default: return -EPERM; } iter++; } while (ret_val < 0 && iter < CIPSO_V4_TAG_MAXCNT && doi_def->tags[iter] != CIPSO_V4_TAG_INVALID); if (ret_val < 0) return ret_val; cipso_v4_gentag_hdr(doi_def, buf, ret_val); return CIPSO_V4_HDR_LEN + ret_val; } /** * cipso_v4_sock_setattr - Add a CIPSO option to a socket * @sk: the socket * @doi_def: the CIPSO DOI to use * @secattr: the specific security attributes of the socket * * Description: * Set the CIPSO option on the given socket using the DOI definition and * security attributes passed to the function. This function requires * exclusive access to @sk, which means it either needs to be in the * process of being created or locked. Returns zero on success and negative * values on failure. * */ int cipso_v4_sock_setattr(struct sock *sk, const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr) { int ret_val = -EPERM; unsigned char *buf = NULL; u32 buf_len; u32 opt_len; struct ip_options *opt = NULL; struct inet_sock *sk_inet; struct inet_connection_sock *sk_conn; /* In the case of sock_create_lite(), the sock->sk field is not * defined yet but it is not a problem as the only users of these * "lite" PF_INET sockets are functions which do an accept() call * afterwards so we will label the socket as part of the accept(). */ if (sk == NULL) return 0; /* We allocate the maximum CIPSO option size here so we are probably * being a little wasteful, but it makes our life _much_ easier later * on and after all we are only talking about 40 bytes. */ buf_len = CIPSO_V4_OPT_LEN_MAX; buf = kmalloc(buf_len, GFP_ATOMIC); if (buf == NULL) { ret_val = -ENOMEM; goto socket_setattr_failure; } ret_val = cipso_v4_genopt(buf, buf_len, doi_def, secattr); if (ret_val < 0) goto socket_setattr_failure; buf_len = ret_val; /* We can't use ip_options_get() directly because it makes a call to * ip_options_get_alloc() which allocates memory with GFP_KERNEL and * we won't always have CAP_NET_RAW even though we _always_ want to * set the IPOPT_CIPSO option. */ opt_len = (buf_len + 3) & ~3; opt = kzalloc(sizeof(*opt) + opt_len, GFP_ATOMIC); if (opt == NULL) { ret_val = -ENOMEM; goto socket_setattr_failure; } memcpy(opt->__data, buf, buf_len); opt->optlen = opt_len; opt->cipso = sizeof(struct iphdr); kfree(buf); buf = NULL; sk_inet = inet_sk(sk); if (sk_inet->is_icsk) { sk_conn = inet_csk(sk); if (sk_inet->opt) sk_conn->icsk_ext_hdr_len -= sk_inet->opt->optlen; sk_conn->icsk_ext_hdr_len += opt->optlen; sk_conn->icsk_sync_mss(sk, sk_conn->icsk_pmtu_cookie); } opt = xchg(&sk_inet->opt, opt); kfree(opt); return 0; socket_setattr_failure: kfree(buf); kfree(opt); return ret_val; } /** * cipso_v4_req_setattr - Add a CIPSO option to a connection request socket * @req: the connection request socket * @doi_def: the CIPSO DOI to use * @secattr: the specific security attributes of the socket * * Description: * Set the CIPSO option on the given socket using the DOI definition and * security attributes passed to the function. Returns zero on success and * negative values on failure. * */ int cipso_v4_req_setattr(struct request_sock *req, const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr) { int ret_val = -EPERM; unsigned char *buf = NULL; u32 buf_len; u32 opt_len; struct ip_options *opt = NULL; struct inet_request_sock *req_inet; /* We allocate the maximum CIPSO option size here so we are probably * being a little wasteful, but it makes our life _much_ easier later * on and after all we are only talking about 40 bytes. */ buf_len = CIPSO_V4_OPT_LEN_MAX; buf = kmalloc(buf_len, GFP_ATOMIC); if (buf == NULL) { ret_val = -ENOMEM; goto req_setattr_failure; } ret_val = cipso_v4_genopt(buf, buf_len, doi_def, secattr); if (ret_val < 0) goto req_setattr_failure; buf_len = ret_val; /* We can't use ip_options_get() directly because it makes a call to * ip_options_get_alloc() which allocates memory with GFP_KERNEL and * we won't always have CAP_NET_RAW even though we _always_ want to * set the IPOPT_CIPSO option. */ opt_len = (buf_len + 3) & ~3; opt = kzalloc(sizeof(*opt) + opt_len, GFP_ATOMIC); if (opt == NULL) { ret_val = -ENOMEM; goto req_setattr_failure; } memcpy(opt->__data, buf, buf_len); opt->optlen = opt_len; opt->cipso = sizeof(struct iphdr); kfree(buf); buf = NULL; req_inet = inet_rsk(req); opt = xchg(&req_inet->opt, opt); kfree(opt); return 0; req_setattr_failure: kfree(buf); kfree(opt); return ret_val; } /** * cipso_v4_delopt - Delete the CIPSO option from a set of IP options * @opt_ptr: IP option pointer * * Description: * Deletes the CIPSO IP option from a set of IP options and makes the necessary * adjustments to the IP option structure. Returns zero on success, negative * values on failure. * */ static int cipso_v4_delopt(struct ip_options **opt_ptr) { int hdr_delta = 0; struct ip_options *opt = *opt_ptr; if (opt->srr || opt->rr || opt->ts || opt->router_alert) { u8 cipso_len; u8 cipso_off; unsigned char *cipso_ptr; int iter; int optlen_new; cipso_off = opt->cipso - sizeof(struct iphdr); cipso_ptr = &opt->__data[cipso_off]; cipso_len = cipso_ptr[1]; if (opt->srr > opt->cipso) opt->srr -= cipso_len; if (opt->rr > opt->cipso) opt->rr -= cipso_len; if (opt->ts > opt->cipso) opt->ts -= cipso_len; if (opt->router_alert > opt->cipso) opt->router_alert -= cipso_len; opt->cipso = 0; memmove(cipso_ptr, cipso_ptr + cipso_len, opt->optlen - cipso_off - cipso_len); /* determining the new total option length is tricky because of * the padding necessary, the only thing i can think to do at * this point is walk the options one-by-one, skipping the * padding at the end to determine the actual option size and * from there we can determine the new total option length */ iter = 0; optlen_new = 0; while (iter < opt->optlen) if (opt->__data[iter] != IPOPT_NOP) { iter += opt->__data[iter + 1]; optlen_new = iter; } else iter++; hdr_delta = opt->optlen; opt->optlen = (optlen_new + 3) & ~3; hdr_delta -= opt->optlen; } else { /* only the cipso option was present on the socket so we can * remove the entire option struct */ *opt_ptr = NULL; hdr_delta = opt->optlen; kfree(opt); } return hdr_delta; } /** * cipso_v4_sock_delattr - Delete the CIPSO option from a socket * @sk: the socket * * Description: * Removes the CIPSO option from a socket, if present. * */ void cipso_v4_sock_delattr(struct sock *sk) { int hdr_delta; struct ip_options *opt; struct inet_sock *sk_inet; sk_inet = inet_sk(sk); opt = sk_inet->opt; if (opt == NULL || opt->cipso == 0) return; hdr_delta = cipso_v4_delopt(&sk_inet->opt); if (sk_inet->is_icsk && hdr_delta > 0) { struct inet_connection_sock *sk_conn = inet_csk(sk); sk_conn->icsk_ext_hdr_len -= hdr_delta; sk_conn->icsk_sync_mss(sk, sk_conn->icsk_pmtu_cookie); } } /** * cipso_v4_req_delattr - Delete the CIPSO option from a request socket * @reg: the request socket * * Description: * Removes the CIPSO option from a request socket, if present. * */ void cipso_v4_req_delattr(struct request_sock *req) { struct ip_options *opt; struct inet_request_sock *req_inet; req_inet = inet_rsk(req); opt = req_inet->opt; if (opt == NULL || opt->cipso == 0) return; cipso_v4_delopt(&req_inet->opt); } /** * cipso_v4_getattr - Helper function for the cipso_v4_*_getattr functions * @cipso: the CIPSO v4 option * @secattr: the security attributes * * Description: * Inspect @cipso and return the security attributes in @secattr. Returns zero * on success and negative values on failure. * */ static int cipso_v4_getattr(const unsigned char *cipso, struct netlbl_lsm_secattr *secattr) { int ret_val = -ENOMSG; u32 doi; struct cipso_v4_doi *doi_def; if (cipso_v4_cache_check(cipso, cipso[1], secattr) == 0) return 0; doi = get_unaligned_be32(&cipso[2]); rcu_read_lock(); doi_def = cipso_v4_doi_search(doi); if (doi_def == NULL) goto getattr_return; /* XXX - This code assumes only one tag per CIPSO option which isn't * really a good assumption to make but since we only support the MAC * tags right now it is a safe assumption. */ switch (cipso[6]) { case CIPSO_V4_TAG_RBITMAP: ret_val = cipso_v4_parsetag_rbm(doi_def, &cipso[6], secattr); break; case CIPSO_V4_TAG_ENUM: ret_val = cipso_v4_parsetag_enum(doi_def, &cipso[6], secattr); break; case CIPSO_V4_TAG_RANGE: ret_val = cipso_v4_parsetag_rng(doi_def, &cipso[6], secattr); break; case CIPSO_V4_TAG_LOCAL: ret_val = cipso_v4_parsetag_loc(doi_def, &cipso[6], secattr); break; } if (ret_val == 0) secattr->type = NETLBL_NLTYPE_CIPSOV4; getattr_return: rcu_read_unlock(); return ret_val; } /** * cipso_v4_sock_getattr - Get the security attributes from a sock * @sk: the sock * @secattr: the security attributes * * Description: * Query @sk to see if there is a CIPSO option attached to the sock and if * there is return the CIPSO security attributes in @secattr. This function * requires that @sk be locked, or privately held, but it does not do any * locking itself. Returns zero on success and negative values on failure. * */ int cipso_v4_sock_getattr(struct sock *sk, struct netlbl_lsm_secattr *secattr) { struct ip_options *opt; opt = inet_sk(sk)->opt; if (opt == NULL || opt->cipso == 0) return -ENOMSG; return cipso_v4_getattr(opt->__data + opt->cipso - sizeof(struct iphdr), secattr); } /** * cipso_v4_skbuff_setattr - Set the CIPSO option on a packet * @skb: the packet * @secattr: the security attributes * * Description: * Set the CIPSO option on the given packet based on the security attributes. * Returns a pointer to the IP header on success and NULL on failure. * */ int cipso_v4_skbuff_setattr(struct sk_buff *skb, const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr) { int ret_val; struct iphdr *iph; struct ip_options *opt = &IPCB(skb)->opt; unsigned char buf[CIPSO_V4_OPT_LEN_MAX]; u32 buf_len = CIPSO_V4_OPT_LEN_MAX; u32 opt_len; int len_delta; ret_val = cipso_v4_genopt(buf, buf_len, doi_def, secattr); if (ret_val < 0) return ret_val; buf_len = ret_val; opt_len = (buf_len + 3) & ~3; /* we overwrite any existing options to ensure that we have enough * room for the CIPSO option, the reason is that we _need_ to guarantee * that the security label is applied to the packet - we do the same * thing when using the socket options and it hasn't caused a problem, * if we need to we can always revisit this choice later */ len_delta = opt_len - opt->optlen; /* if we don't ensure enough headroom we could panic on the skb_push() * call below so make sure we have enough, we are also "mangling" the * packet so we should probably do a copy-on-write call anyway */ ret_val = skb_cow(skb, skb_headroom(skb) + len_delta); if (ret_val < 0) return ret_val; if (len_delta > 0) { /* we assume that the header + opt->optlen have already been * "pushed" in ip_options_build() or similar */ iph = ip_hdr(skb); skb_push(skb, len_delta); memmove((char *)iph - len_delta, iph, iph->ihl << 2); skb_reset_network_header(skb); iph = ip_hdr(skb); } else if (len_delta < 0) { iph = ip_hdr(skb); memset(iph + 1, IPOPT_NOP, opt->optlen); } else iph = ip_hdr(skb); if (opt->optlen > 0) memset(opt, 0, sizeof(*opt)); opt->optlen = opt_len; opt->cipso = sizeof(struct iphdr); opt->is_changed = 1; /* we have to do the following because we are being called from a * netfilter hook which means the packet already has had the header * fields populated and the checksum calculated - yes this means we * are doing more work than needed but we do it to keep the core * stack clean and tidy */ memcpy(iph + 1, buf, buf_len); if (opt_len > buf_len) memset((char *)(iph + 1) + buf_len, 0, opt_len - buf_len); if (len_delta != 0) { iph->ihl = 5 + (opt_len >> 2); iph->tot_len = htons(skb->len); } ip_send_check(iph); return 0; } /** * cipso_v4_skbuff_delattr - Delete any CIPSO options from a packet * @skb: the packet * * Description: * Removes any and all CIPSO options from the given packet. Returns zero on * success, negative values on failure. * */ int cipso_v4_skbuff_delattr(struct sk_buff *skb) { int ret_val; struct iphdr *iph; struct ip_options *opt = &IPCB(skb)->opt; unsigned char *cipso_ptr; if (opt->cipso == 0) return 0; /* since we are changing the packet we should make a copy */ ret_val = skb_cow(skb, skb_headroom(skb)); if (ret_val < 0) return ret_val; /* the easiest thing to do is just replace the cipso option with noop * options since we don't change the size of the packet, although we * still need to recalculate the checksum */ iph = ip_hdr(skb); cipso_ptr = (unsigned char *)iph + opt->cipso; memset(cipso_ptr, IPOPT_NOOP, cipso_ptr[1]); opt->cipso = 0; opt->is_changed = 1; ip_send_check(iph); return 0; } /** * cipso_v4_skbuff_getattr - Get the security attributes from the CIPSO option * @skb: the packet * @secattr: the security attributes * * Description: * Parse the given packet's CIPSO option and return the security attributes. * Returns zero on success and negative values on failure. * */ int cipso_v4_skbuff_getattr(const struct sk_buff *skb, struct netlbl_lsm_secattr *secattr) { return cipso_v4_getattr(CIPSO_V4_OPTPTR(skb), secattr); } /* * Setup Functions */ /** * cipso_v4_init - Initialize the CIPSO module * * Description: * Initialize the CIPSO module and prepare it for use. Returns zero on success * and negative values on failure. * */ static int __init cipso_v4_init(void) { int ret_val; ret_val = cipso_v4_cache_init(); if (ret_val != 0) panic("Failed to initialize the CIPSO/IPv4 cache (%d)\n", ret_val); return 0; } subsys_initcall(cipso_v4_init);
/* * CIPSO - Commercial IP Security Option * * This is an implementation of the CIPSO 2.2 protocol as specified in * draft-ietf-cipso-ipsecurity-01.txt with additional tag types as found in * FIPS-188. While CIPSO never became a full IETF RFC standard many vendors * have chosen to adopt the protocol and over the years it has become a * de-facto standard for labeled networking. * * The CIPSO draft specification can be found in the kernel's Documentation * directory as well as the following URL: * http://tools.ietf.org/id/draft-ietf-cipso-ipsecurity-01.txt * The FIPS-188 specification can be found at the following URL: * http://www.itl.nist.gov/fipspubs/fip188.htm * * Author: Paul Moore <paul.moore@hp.com> * */ /* * (c) Copyright Hewlett-Packard Development Company, L.P., 2006, 2008 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/init.h> #include <linux/types.h> #include <linux/rcupdate.h> #include <linux/list.h> #include <linux/spinlock.h> #include <linux/string.h> #include <linux/jhash.h> #include <linux/audit.h> #include <linux/slab.h> #include <net/ip.h> #include <net/icmp.h> #include <net/tcp.h> #include <net/netlabel.h> #include <net/cipso_ipv4.h> #include <asm/atomic.h> #include <asm/bug.h> #include <asm/unaligned.h> /* List of available DOI definitions */ /* XXX - This currently assumes a minimal number of different DOIs in use, * if in practice there are a lot of different DOIs this list should * probably be turned into a hash table or something similar so we * can do quick lookups. */ static DEFINE_SPINLOCK(cipso_v4_doi_list_lock); static LIST_HEAD(cipso_v4_doi_list); /* Label mapping cache */ int cipso_v4_cache_enabled = 1; int cipso_v4_cache_bucketsize = 10; #define CIPSO_V4_CACHE_BUCKETBITS 7 #define CIPSO_V4_CACHE_BUCKETS (1 << CIPSO_V4_CACHE_BUCKETBITS) #define CIPSO_V4_CACHE_REORDERLIMIT 10 struct cipso_v4_map_cache_bkt { spinlock_t lock; u32 size; struct list_head list; }; struct cipso_v4_map_cache_entry { u32 hash; unsigned char *key; size_t key_len; struct netlbl_lsm_cache *lsm_data; u32 activity; struct list_head list; }; static struct cipso_v4_map_cache_bkt *cipso_v4_cache = NULL; /* Restricted bitmap (tag #1) flags */ int cipso_v4_rbm_optfmt = 0; int cipso_v4_rbm_strictvalid = 1; /* * Protocol Constants */ /* Maximum size of the CIPSO IP option, derived from the fact that the maximum * IPv4 header size is 60 bytes and the base IPv4 header is 20 bytes long. */ #define CIPSO_V4_OPT_LEN_MAX 40 /* Length of the base CIPSO option, this includes the option type (1 byte), the * option length (1 byte), and the DOI (4 bytes). */ #define CIPSO_V4_HDR_LEN 6 /* Base length of the restrictive category bitmap tag (tag #1). */ #define CIPSO_V4_TAG_RBM_BLEN 4 /* Base length of the enumerated category tag (tag #2). */ #define CIPSO_V4_TAG_ENUM_BLEN 4 /* Base length of the ranged categories bitmap tag (tag #5). */ #define CIPSO_V4_TAG_RNG_BLEN 4 /* The maximum number of category ranges permitted in the ranged category tag * (tag #5). You may note that the IETF draft states that the maximum number * of category ranges is 7, but if the low end of the last category range is * zero then it is possible to fit 8 category ranges because the zero should * be omitted. */ #define CIPSO_V4_TAG_RNG_CAT_MAX 8 /* Base length of the local tag (non-standard tag). * Tag definition (may change between kernel versions) * * 0 8 16 24 32 * +----------+----------+----------+----------+ * | 10000000 | 00000110 | 32-bit secid value | * +----------+----------+----------+----------+ * | in (host byte order)| * +----------+----------+ * */ #define CIPSO_V4_TAG_LOC_BLEN 6 /* * Helper Functions */ /** * cipso_v4_bitmap_walk - Walk a bitmap looking for a bit * @bitmap: the bitmap * @bitmap_len: length in bits * @offset: starting offset * @state: if non-zero, look for a set (1) bit else look for a cleared (0) bit * * Description: * Starting at @offset, walk the bitmap from left to right until either the * desired bit is found or we reach the end. Return the bit offset, -1 if * not found, or -2 if error. */ static int cipso_v4_bitmap_walk(const unsigned char *bitmap, u32 bitmap_len, u32 offset, u8 state) { u32 bit_spot; u32 byte_offset; unsigned char bitmask; unsigned char byte; /* gcc always rounds to zero when doing integer division */ byte_offset = offset / 8; byte = bitmap[byte_offset]; bit_spot = offset; bitmask = 0x80 >> (offset % 8); while (bit_spot < bitmap_len) { if ((state && (byte & bitmask) == bitmask) || (state == 0 && (byte & bitmask) == 0)) return bit_spot; bit_spot++; bitmask >>= 1; if (bitmask == 0) { byte = bitmap[++byte_offset]; bitmask = 0x80; } } return -1; } /** * cipso_v4_bitmap_setbit - Sets a single bit in a bitmap * @bitmap: the bitmap * @bit: the bit * @state: if non-zero, set the bit (1) else clear the bit (0) * * Description: * Set a single bit in the bitmask. Returns zero on success, negative values * on error. */ static void cipso_v4_bitmap_setbit(unsigned char *bitmap, u32 bit, u8 state) { u32 byte_spot; u8 bitmask; /* gcc always rounds to zero when doing integer division */ byte_spot = bit / 8; bitmask = 0x80 >> (bit % 8); if (state) bitmap[byte_spot] |= bitmask; else bitmap[byte_spot] &= ~bitmask; } /** * cipso_v4_cache_entry_free - Frees a cache entry * @entry: the entry to free * * Description: * This function frees the memory associated with a cache entry including the * LSM cache data if there are no longer any users, i.e. reference count == 0. * */ static void cipso_v4_cache_entry_free(struct cipso_v4_map_cache_entry *entry) { if (entry->lsm_data) netlbl_secattr_cache_free(entry->lsm_data); kfree(entry->key); kfree(entry); } /** * cipso_v4_map_cache_hash - Hashing function for the CIPSO cache * @key: the hash key * @key_len: the length of the key in bytes * * Description: * The CIPSO tag hashing function. Returns a 32-bit hash value. * */ static u32 cipso_v4_map_cache_hash(const unsigned char *key, u32 key_len) { return jhash(key, key_len, 0); } /* * Label Mapping Cache Functions */ /** * cipso_v4_cache_init - Initialize the CIPSO cache * * Description: * Initializes the CIPSO label mapping cache, this function should be called * before any of the other functions defined in this file. Returns zero on * success, negative values on error. * */ static int cipso_v4_cache_init(void) { u32 iter; cipso_v4_cache = kcalloc(CIPSO_V4_CACHE_BUCKETS, sizeof(struct cipso_v4_map_cache_bkt), GFP_KERNEL); if (cipso_v4_cache == NULL) return -ENOMEM; for (iter = 0; iter < CIPSO_V4_CACHE_BUCKETS; iter++) { spin_lock_init(&cipso_v4_cache[iter].lock); cipso_v4_cache[iter].size = 0; INIT_LIST_HEAD(&cipso_v4_cache[iter].list); } return 0; } /** * cipso_v4_cache_invalidate - Invalidates the current CIPSO cache * * Description: * Invalidates and frees any entries in the CIPSO cache. Returns zero on * success and negative values on failure. * */ void cipso_v4_cache_invalidate(void) { struct cipso_v4_map_cache_entry *entry, *tmp_entry; u32 iter; for (iter = 0; iter < CIPSO_V4_CACHE_BUCKETS; iter++) { spin_lock_bh(&cipso_v4_cache[iter].lock); list_for_each_entry_safe(entry, tmp_entry, &cipso_v4_cache[iter].list, list) { list_del(&entry->list); cipso_v4_cache_entry_free(entry); } cipso_v4_cache[iter].size = 0; spin_unlock_bh(&cipso_v4_cache[iter].lock); } } /** * cipso_v4_cache_check - Check the CIPSO cache for a label mapping * @key: the buffer to check * @key_len: buffer length in bytes * @secattr: the security attribute struct to use * * Description: * This function checks the cache to see if a label mapping already exists for * the given key. If there is a match then the cache is adjusted and the * @secattr struct is populated with the correct LSM security attributes. The * cache is adjusted in the following manner if the entry is not already the * first in the cache bucket: * * 1. The cache entry's activity counter is incremented * 2. The previous (higher ranking) entry's activity counter is decremented * 3. If the difference between the two activity counters is geater than * CIPSO_V4_CACHE_REORDERLIMIT the two entries are swapped * * Returns zero on success, -ENOENT for a cache miss, and other negative values * on error. * */ static int cipso_v4_cache_check(const unsigned char *key, u32 key_len, struct netlbl_lsm_secattr *secattr) { u32 bkt; struct cipso_v4_map_cache_entry *entry; struct cipso_v4_map_cache_entry *prev_entry = NULL; u32 hash; if (!cipso_v4_cache_enabled) return -ENOENT; hash = cipso_v4_map_cache_hash(key, key_len); bkt = hash & (CIPSO_V4_CACHE_BUCKETS - 1); spin_lock_bh(&cipso_v4_cache[bkt].lock); list_for_each_entry(entry, &cipso_v4_cache[bkt].list, list) { if (entry->hash == hash && entry->key_len == key_len && memcmp(entry->key, key, key_len) == 0) { entry->activity += 1; atomic_inc(&entry->lsm_data->refcount); secattr->cache = entry->lsm_data; secattr->flags |= NETLBL_SECATTR_CACHE; secattr->type = NETLBL_NLTYPE_CIPSOV4; if (prev_entry == NULL) { spin_unlock_bh(&cipso_v4_cache[bkt].lock); return 0; } if (prev_entry->activity > 0) prev_entry->activity -= 1; if (entry->activity > prev_entry->activity && entry->activity - prev_entry->activity > CIPSO_V4_CACHE_REORDERLIMIT) { __list_del(entry->list.prev, entry->list.next); __list_add(&entry->list, prev_entry->list.prev, &prev_entry->list); } spin_unlock_bh(&cipso_v4_cache[bkt].lock); return 0; } prev_entry = entry; } spin_unlock_bh(&cipso_v4_cache[bkt].lock); return -ENOENT; } /** * cipso_v4_cache_add - Add an entry to the CIPSO cache * @skb: the packet * @secattr: the packet's security attributes * * Description: * Add a new entry into the CIPSO label mapping cache. Add the new entry to * head of the cache bucket's list, if the cache bucket is out of room remove * the last entry in the list first. It is important to note that there is * currently no checking for duplicate keys. Returns zero on success, * negative values on failure. * */ int cipso_v4_cache_add(const struct sk_buff *skb, const struct netlbl_lsm_secattr *secattr) { int ret_val = -EPERM; u32 bkt; struct cipso_v4_map_cache_entry *entry = NULL; struct cipso_v4_map_cache_entry *old_entry = NULL; unsigned char *cipso_ptr; u32 cipso_ptr_len; if (!cipso_v4_cache_enabled || cipso_v4_cache_bucketsize <= 0) return 0; cipso_ptr = CIPSO_V4_OPTPTR(skb); cipso_ptr_len = cipso_ptr[1]; entry = kzalloc(sizeof(*entry), GFP_ATOMIC); if (entry == NULL) return -ENOMEM; entry->key = kmemdup(cipso_ptr, cipso_ptr_len, GFP_ATOMIC); if (entry->key == NULL) { ret_val = -ENOMEM; goto cache_add_failure; } entry->key_len = cipso_ptr_len; entry->hash = cipso_v4_map_cache_hash(cipso_ptr, cipso_ptr_len); atomic_inc(&secattr->cache->refcount); entry->lsm_data = secattr->cache; bkt = entry->hash & (CIPSO_V4_CACHE_BUCKETS - 1); spin_lock_bh(&cipso_v4_cache[bkt].lock); if (cipso_v4_cache[bkt].size < cipso_v4_cache_bucketsize) { list_add(&entry->list, &cipso_v4_cache[bkt].list); cipso_v4_cache[bkt].size += 1; } else { old_entry = list_entry(cipso_v4_cache[bkt].list.prev, struct cipso_v4_map_cache_entry, list); list_del(&old_entry->list); list_add(&entry->list, &cipso_v4_cache[bkt].list); cipso_v4_cache_entry_free(old_entry); } spin_unlock_bh(&cipso_v4_cache[bkt].lock); return 0; cache_add_failure: if (entry) cipso_v4_cache_entry_free(entry); return ret_val; } /* * DOI List Functions */ /** * cipso_v4_doi_search - Searches for a DOI definition * @doi: the DOI to search for * * Description: * Search the DOI definition list for a DOI definition with a DOI value that * matches @doi. The caller is responsible for calling rcu_read_[un]lock(). * Returns a pointer to the DOI definition on success and NULL on failure. */ static struct cipso_v4_doi *cipso_v4_doi_search(u32 doi) { struct cipso_v4_doi *iter; list_for_each_entry_rcu(iter, &cipso_v4_doi_list, list) if (iter->doi == doi && atomic_read(&iter->refcount)) return iter; return NULL; } /** * cipso_v4_doi_add - Add a new DOI to the CIPSO protocol engine * @doi_def: the DOI structure * @audit_info: NetLabel audit information * * Description: * The caller defines a new DOI for use by the CIPSO engine and calls this * function to add it to the list of acceptable domains. The caller must * ensure that the mapping table specified in @doi_def->map meets all of the * requirements of the mapping type (see cipso_ipv4.h for details). Returns * zero on success and non-zero on failure. * */ int cipso_v4_doi_add(struct cipso_v4_doi *doi_def, struct netlbl_audit *audit_info) { int ret_val = -EINVAL; u32 iter; u32 doi; u32 doi_type; struct audit_buffer *audit_buf; doi = doi_def->doi; doi_type = doi_def->type; if (doi_def == NULL || doi_def->doi == CIPSO_V4_DOI_UNKNOWN) goto doi_add_return; for (iter = 0; iter < CIPSO_V4_TAG_MAXCNT; iter++) { switch (doi_def->tags[iter]) { case CIPSO_V4_TAG_RBITMAP: break; case CIPSO_V4_TAG_RANGE: case CIPSO_V4_TAG_ENUM: if (doi_def->type != CIPSO_V4_MAP_PASS) goto doi_add_return; break; case CIPSO_V4_TAG_LOCAL: if (doi_def->type != CIPSO_V4_MAP_LOCAL) goto doi_add_return; break; case CIPSO_V4_TAG_INVALID: if (iter == 0) goto doi_add_return; break; default: goto doi_add_return; } } atomic_set(&doi_def->refcount, 1); spin_lock(&cipso_v4_doi_list_lock); if (cipso_v4_doi_search(doi_def->doi) != NULL) { spin_unlock(&cipso_v4_doi_list_lock); ret_val = -EEXIST; goto doi_add_return; } list_add_tail_rcu(&doi_def->list, &cipso_v4_doi_list); spin_unlock(&cipso_v4_doi_list_lock); ret_val = 0; doi_add_return: audit_buf = netlbl_audit_start(AUDIT_MAC_CIPSOV4_ADD, audit_info); if (audit_buf != NULL) { const char *type_str; switch (doi_type) { case CIPSO_V4_MAP_TRANS: type_str = "trans"; break; case CIPSO_V4_MAP_PASS: type_str = "pass"; break; case CIPSO_V4_MAP_LOCAL: type_str = "local"; break; default: type_str = "(unknown)"; } audit_log_format(audit_buf, " cipso_doi=%u cipso_type=%s res=%u", doi, type_str, ret_val == 0 ? 1 : 0); audit_log_end(audit_buf); } return ret_val; } /** * cipso_v4_doi_free - Frees a DOI definition * @entry: the entry's RCU field * * Description: * This function frees all of the memory associated with a DOI definition. * */ void cipso_v4_doi_free(struct cipso_v4_doi *doi_def) { if (doi_def == NULL) return; switch (doi_def->type) { case CIPSO_V4_MAP_TRANS: kfree(doi_def->map.std->lvl.cipso); kfree(doi_def->map.std->lvl.local); kfree(doi_def->map.std->cat.cipso); kfree(doi_def->map.std->cat.local); break; } kfree(doi_def); } /** * cipso_v4_doi_free_rcu - Frees a DOI definition via the RCU pointer * @entry: the entry's RCU field * * Description: * This function is designed to be used as a callback to the call_rcu() * function so that the memory allocated to the DOI definition can be released * safely. * */ static void cipso_v4_doi_free_rcu(struct rcu_head *entry) { struct cipso_v4_doi *doi_def; doi_def = container_of(entry, struct cipso_v4_doi, rcu); cipso_v4_doi_free(doi_def); } /** * cipso_v4_doi_remove - Remove an existing DOI from the CIPSO protocol engine * @doi: the DOI value * @audit_secid: the LSM secid to use in the audit message * * Description: * Removes a DOI definition from the CIPSO engine. The NetLabel routines will * be called to release their own LSM domain mappings as well as our own * domain list. Returns zero on success and negative values on failure. * */ int cipso_v4_doi_remove(u32 doi, struct netlbl_audit *audit_info) { int ret_val; struct cipso_v4_doi *doi_def; struct audit_buffer *audit_buf; spin_lock(&cipso_v4_doi_list_lock); doi_def = cipso_v4_doi_search(doi); if (doi_def == NULL) { spin_unlock(&cipso_v4_doi_list_lock); ret_val = -ENOENT; goto doi_remove_return; } if (!atomic_dec_and_test(&doi_def->refcount)) { spin_unlock(&cipso_v4_doi_list_lock); ret_val = -EBUSY; goto doi_remove_return; } list_del_rcu(&doi_def->list); spin_unlock(&cipso_v4_doi_list_lock); cipso_v4_cache_invalidate(); call_rcu(&doi_def->rcu, cipso_v4_doi_free_rcu); ret_val = 0; doi_remove_return: audit_buf = netlbl_audit_start(AUDIT_MAC_CIPSOV4_DEL, audit_info); if (audit_buf != NULL) { audit_log_format(audit_buf, " cipso_doi=%u res=%u", doi, ret_val == 0 ? 1 : 0); audit_log_end(audit_buf); } return ret_val; } /** * cipso_v4_doi_getdef - Returns a reference to a valid DOI definition * @doi: the DOI value * * Description: * Searches for a valid DOI definition and if one is found it is returned to * the caller. Otherwise NULL is returned. The caller must ensure that * rcu_read_lock() is held while accessing the returned definition and the DOI * definition reference count is decremented when the caller is done. * */ struct cipso_v4_doi *cipso_v4_doi_getdef(u32 doi) { struct cipso_v4_doi *doi_def; rcu_read_lock(); doi_def = cipso_v4_doi_search(doi); if (doi_def == NULL) goto doi_getdef_return; if (!atomic_inc_not_zero(&doi_def->refcount)) doi_def = NULL; doi_getdef_return: rcu_read_unlock(); return doi_def; } /** * cipso_v4_doi_putdef - Releases a reference for the given DOI definition * @doi_def: the DOI definition * * Description: * Releases a DOI definition reference obtained from cipso_v4_doi_getdef(). * */ void cipso_v4_doi_putdef(struct cipso_v4_doi *doi_def) { if (doi_def == NULL) return; if (!atomic_dec_and_test(&doi_def->refcount)) return; spin_lock(&cipso_v4_doi_list_lock); list_del_rcu(&doi_def->list); spin_unlock(&cipso_v4_doi_list_lock); cipso_v4_cache_invalidate(); call_rcu(&doi_def->rcu, cipso_v4_doi_free_rcu); } /** * cipso_v4_doi_walk - Iterate through the DOI definitions * @skip_cnt: skip past this number of DOI definitions, updated * @callback: callback for each DOI definition * @cb_arg: argument for the callback function * * Description: * Iterate over the DOI definition list, skipping the first @skip_cnt entries. * For each entry call @callback, if @callback returns a negative value stop * 'walking' through the list and return. Updates the value in @skip_cnt upon * return. Returns zero on success, negative values on failure. * */ int cipso_v4_doi_walk(u32 *skip_cnt, int (*callback) (struct cipso_v4_doi *doi_def, void *arg), void *cb_arg) { int ret_val = -ENOENT; u32 doi_cnt = 0; struct cipso_v4_doi *iter_doi; rcu_read_lock(); list_for_each_entry_rcu(iter_doi, &cipso_v4_doi_list, list) if (atomic_read(&iter_doi->refcount) > 0) { if (doi_cnt++ < *skip_cnt) continue; ret_val = callback(iter_doi, cb_arg); if (ret_val < 0) { doi_cnt--; goto doi_walk_return; } } doi_walk_return: rcu_read_unlock(); *skip_cnt = doi_cnt; return ret_val; } /* * Label Mapping Functions */ /** * cipso_v4_map_lvl_valid - Checks to see if the given level is understood * @doi_def: the DOI definition * @level: the level to check * * Description: * Checks the given level against the given DOI definition and returns a * negative value if the level does not have a valid mapping and a zero value * if the level is defined by the DOI. * */ static int cipso_v4_map_lvl_valid(const struct cipso_v4_doi *doi_def, u8 level) { switch (doi_def->type) { case CIPSO_V4_MAP_PASS: return 0; case CIPSO_V4_MAP_TRANS: if (doi_def->map.std->lvl.cipso[level] < CIPSO_V4_INV_LVL) return 0; break; } return -EFAULT; } /** * cipso_v4_map_lvl_hton - Perform a level mapping from the host to the network * @doi_def: the DOI definition * @host_lvl: the host MLS level * @net_lvl: the network/CIPSO MLS level * * Description: * Perform a label mapping to translate a local MLS level to the correct * CIPSO level using the given DOI definition. Returns zero on success, * negative values otherwise. * */ static int cipso_v4_map_lvl_hton(const struct cipso_v4_doi *doi_def, u32 host_lvl, u32 *net_lvl) { switch (doi_def->type) { case CIPSO_V4_MAP_PASS: *net_lvl = host_lvl; return 0; case CIPSO_V4_MAP_TRANS: if (host_lvl < doi_def->map.std->lvl.local_size && doi_def->map.std->lvl.local[host_lvl] < CIPSO_V4_INV_LVL) { *net_lvl = doi_def->map.std->lvl.local[host_lvl]; return 0; } return -EPERM; } return -EINVAL; } /** * cipso_v4_map_lvl_ntoh - Perform a level mapping from the network to the host * @doi_def: the DOI definition * @net_lvl: the network/CIPSO MLS level * @host_lvl: the host MLS level * * Description: * Perform a label mapping to translate a CIPSO level to the correct local MLS * level using the given DOI definition. Returns zero on success, negative * values otherwise. * */ static int cipso_v4_map_lvl_ntoh(const struct cipso_v4_doi *doi_def, u32 net_lvl, u32 *host_lvl) { struct cipso_v4_std_map_tbl *map_tbl; switch (doi_def->type) { case CIPSO_V4_MAP_PASS: *host_lvl = net_lvl; return 0; case CIPSO_V4_MAP_TRANS: map_tbl = doi_def->map.std; if (net_lvl < map_tbl->lvl.cipso_size && map_tbl->lvl.cipso[net_lvl] < CIPSO_V4_INV_LVL) { *host_lvl = doi_def->map.std->lvl.cipso[net_lvl]; return 0; } return -EPERM; } return -EINVAL; } /** * cipso_v4_map_cat_rbm_valid - Checks to see if the category bitmap is valid * @doi_def: the DOI definition * @bitmap: category bitmap * @bitmap_len: bitmap length in bytes * * Description: * Checks the given category bitmap against the given DOI definition and * returns a negative value if any of the categories in the bitmap do not have * a valid mapping and a zero value if all of the categories are valid. * */ static int cipso_v4_map_cat_rbm_valid(const struct cipso_v4_doi *doi_def, const unsigned char *bitmap, u32 bitmap_len) { int cat = -1; u32 bitmap_len_bits = bitmap_len * 8; u32 cipso_cat_size; u32 *cipso_array; switch (doi_def->type) { case CIPSO_V4_MAP_PASS: return 0; case CIPSO_V4_MAP_TRANS: cipso_cat_size = doi_def->map.std->cat.cipso_size; cipso_array = doi_def->map.std->cat.cipso; for (;;) { cat = cipso_v4_bitmap_walk(bitmap, bitmap_len_bits, cat + 1, 1); if (cat < 0) break; if (cat >= cipso_cat_size || cipso_array[cat] >= CIPSO_V4_INV_CAT) return -EFAULT; } if (cat == -1) return 0; break; } return -EFAULT; } /** * cipso_v4_map_cat_rbm_hton - Perform a category mapping from host to network * @doi_def: the DOI definition * @secattr: the security attributes * @net_cat: the zero'd out category bitmap in network/CIPSO format * @net_cat_len: the length of the CIPSO bitmap in bytes * * Description: * Perform a label mapping to translate a local MLS category bitmap to the * correct CIPSO bitmap using the given DOI definition. Returns the minimum * size in bytes of the network bitmap on success, negative values otherwise. * */ static int cipso_v4_map_cat_rbm_hton(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *net_cat, u32 net_cat_len) { int host_spot = -1; u32 net_spot = CIPSO_V4_INV_CAT; u32 net_spot_max = 0; u32 net_clen_bits = net_cat_len * 8; u32 host_cat_size = 0; u32 *host_cat_array = NULL; if (doi_def->type == CIPSO_V4_MAP_TRANS) { host_cat_size = doi_def->map.std->cat.local_size; host_cat_array = doi_def->map.std->cat.local; } for (;;) { host_spot = netlbl_secattr_catmap_walk(secattr->attr.mls.cat, host_spot + 1); if (host_spot < 0) break; switch (doi_def->type) { case CIPSO_V4_MAP_PASS: net_spot = host_spot; break; case CIPSO_V4_MAP_TRANS: if (host_spot >= host_cat_size) return -EPERM; net_spot = host_cat_array[host_spot]; if (net_spot >= CIPSO_V4_INV_CAT) return -EPERM; break; } if (net_spot >= net_clen_bits) return -ENOSPC; cipso_v4_bitmap_setbit(net_cat, net_spot, 1); if (net_spot > net_spot_max) net_spot_max = net_spot; } if (++net_spot_max % 8) return net_spot_max / 8 + 1; return net_spot_max / 8; } /** * cipso_v4_map_cat_rbm_ntoh - Perform a category mapping from network to host * @doi_def: the DOI definition * @net_cat: the category bitmap in network/CIPSO format * @net_cat_len: the length of the CIPSO bitmap in bytes * @secattr: the security attributes * * Description: * Perform a label mapping to translate a CIPSO bitmap to the correct local * MLS category bitmap using the given DOI definition. Returns zero on * success, negative values on failure. * */ static int cipso_v4_map_cat_rbm_ntoh(const struct cipso_v4_doi *doi_def, const unsigned char *net_cat, u32 net_cat_len, struct netlbl_lsm_secattr *secattr) { int ret_val; int net_spot = -1; u32 host_spot = CIPSO_V4_INV_CAT; u32 net_clen_bits = net_cat_len * 8; u32 net_cat_size = 0; u32 *net_cat_array = NULL; if (doi_def->type == CIPSO_V4_MAP_TRANS) { net_cat_size = doi_def->map.std->cat.cipso_size; net_cat_array = doi_def->map.std->cat.cipso; } for (;;) { net_spot = cipso_v4_bitmap_walk(net_cat, net_clen_bits, net_spot + 1, 1); if (net_spot < 0) { if (net_spot == -2) return -EFAULT; return 0; } switch (doi_def->type) { case CIPSO_V4_MAP_PASS: host_spot = net_spot; break; case CIPSO_V4_MAP_TRANS: if (net_spot >= net_cat_size) return -EPERM; host_spot = net_cat_array[net_spot]; if (host_spot >= CIPSO_V4_INV_CAT) return -EPERM; break; } ret_val = netlbl_secattr_catmap_setbit(secattr->attr.mls.cat, host_spot, GFP_ATOMIC); if (ret_val != 0) return ret_val; } return -EINVAL; } /** * cipso_v4_map_cat_enum_valid - Checks to see if the categories are valid * @doi_def: the DOI definition * @enumcat: category list * @enumcat_len: length of the category list in bytes * * Description: * Checks the given categories against the given DOI definition and returns a * negative value if any of the categories do not have a valid mapping and a * zero value if all of the categories are valid. * */ static int cipso_v4_map_cat_enum_valid(const struct cipso_v4_doi *doi_def, const unsigned char *enumcat, u32 enumcat_len) { u16 cat; int cat_prev = -1; u32 iter; if (doi_def->type != CIPSO_V4_MAP_PASS || enumcat_len & 0x01) return -EFAULT; for (iter = 0; iter < enumcat_len; iter += 2) { cat = get_unaligned_be16(&enumcat[iter]); if (cat <= cat_prev) return -EFAULT; cat_prev = cat; } return 0; } /** * cipso_v4_map_cat_enum_hton - Perform a category mapping from host to network * @doi_def: the DOI definition * @secattr: the security attributes * @net_cat: the zero'd out category list in network/CIPSO format * @net_cat_len: the length of the CIPSO category list in bytes * * Description: * Perform a label mapping to translate a local MLS category bitmap to the * correct CIPSO category list using the given DOI definition. Returns the * size in bytes of the network category bitmap on success, negative values * otherwise. * */ static int cipso_v4_map_cat_enum_hton(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *net_cat, u32 net_cat_len) { int cat = -1; u32 cat_iter = 0; for (;;) { cat = netlbl_secattr_catmap_walk(secattr->attr.mls.cat, cat + 1); if (cat < 0) break; if ((cat_iter + 2) > net_cat_len) return -ENOSPC; *((__be16 *)&net_cat[cat_iter]) = htons(cat); cat_iter += 2; } return cat_iter; } /** * cipso_v4_map_cat_enum_ntoh - Perform a category mapping from network to host * @doi_def: the DOI definition * @net_cat: the category list in network/CIPSO format * @net_cat_len: the length of the CIPSO bitmap in bytes * @secattr: the security attributes * * Description: * Perform a label mapping to translate a CIPSO category list to the correct * local MLS category bitmap using the given DOI definition. Returns zero on * success, negative values on failure. * */ static int cipso_v4_map_cat_enum_ntoh(const struct cipso_v4_doi *doi_def, const unsigned char *net_cat, u32 net_cat_len, struct netlbl_lsm_secattr *secattr) { int ret_val; u32 iter; for (iter = 0; iter < net_cat_len; iter += 2) { ret_val = netlbl_secattr_catmap_setbit(secattr->attr.mls.cat, get_unaligned_be16(&net_cat[iter]), GFP_ATOMIC); if (ret_val != 0) return ret_val; } return 0; } /** * cipso_v4_map_cat_rng_valid - Checks to see if the categories are valid * @doi_def: the DOI definition * @rngcat: category list * @rngcat_len: length of the category list in bytes * * Description: * Checks the given categories against the given DOI definition and returns a * negative value if any of the categories do not have a valid mapping and a * zero value if all of the categories are valid. * */ static int cipso_v4_map_cat_rng_valid(const struct cipso_v4_doi *doi_def, const unsigned char *rngcat, u32 rngcat_len) { u16 cat_high; u16 cat_low; u32 cat_prev = CIPSO_V4_MAX_REM_CATS + 1; u32 iter; if (doi_def->type != CIPSO_V4_MAP_PASS || rngcat_len & 0x01) return -EFAULT; for (iter = 0; iter < rngcat_len; iter += 4) { cat_high = get_unaligned_be16(&rngcat[iter]); if ((iter + 4) <= rngcat_len) cat_low = get_unaligned_be16(&rngcat[iter + 2]); else cat_low = 0; if (cat_high > cat_prev) return -EFAULT; cat_prev = cat_low; } return 0; } /** * cipso_v4_map_cat_rng_hton - Perform a category mapping from host to network * @doi_def: the DOI definition * @secattr: the security attributes * @net_cat: the zero'd out category list in network/CIPSO format * @net_cat_len: the length of the CIPSO category list in bytes * * Description: * Perform a label mapping to translate a local MLS category bitmap to the * correct CIPSO category list using the given DOI definition. Returns the * size in bytes of the network category bitmap on success, negative values * otherwise. * */ static int cipso_v4_map_cat_rng_hton(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *net_cat, u32 net_cat_len) { int iter = -1; u16 array[CIPSO_V4_TAG_RNG_CAT_MAX * 2]; u32 array_cnt = 0; u32 cat_size = 0; /* make sure we don't overflow the 'array[]' variable */ if (net_cat_len > (CIPSO_V4_OPT_LEN_MAX - CIPSO_V4_HDR_LEN - CIPSO_V4_TAG_RNG_BLEN)) return -ENOSPC; for (;;) { iter = netlbl_secattr_catmap_walk(secattr->attr.mls.cat, iter + 1); if (iter < 0) break; cat_size += (iter == 0 ? 0 : sizeof(u16)); if (cat_size > net_cat_len) return -ENOSPC; array[array_cnt++] = iter; iter = netlbl_secattr_catmap_walk_rng(secattr->attr.mls.cat, iter); if (iter < 0) return -EFAULT; cat_size += sizeof(u16); if (cat_size > net_cat_len) return -ENOSPC; array[array_cnt++] = iter; } for (iter = 0; array_cnt > 0;) { *((__be16 *)&net_cat[iter]) = htons(array[--array_cnt]); iter += 2; array_cnt--; if (array[array_cnt] != 0) { *((__be16 *)&net_cat[iter]) = htons(array[array_cnt]); iter += 2; } } return cat_size; } /** * cipso_v4_map_cat_rng_ntoh - Perform a category mapping from network to host * @doi_def: the DOI definition * @net_cat: the category list in network/CIPSO format * @net_cat_len: the length of the CIPSO bitmap in bytes * @secattr: the security attributes * * Description: * Perform a label mapping to translate a CIPSO category list to the correct * local MLS category bitmap using the given DOI definition. Returns zero on * success, negative values on failure. * */ static int cipso_v4_map_cat_rng_ntoh(const struct cipso_v4_doi *doi_def, const unsigned char *net_cat, u32 net_cat_len, struct netlbl_lsm_secattr *secattr) { int ret_val; u32 net_iter; u16 cat_low; u16 cat_high; for (net_iter = 0; net_iter < net_cat_len; net_iter += 4) { cat_high = get_unaligned_be16(&net_cat[net_iter]); if ((net_iter + 4) <= net_cat_len) cat_low = get_unaligned_be16(&net_cat[net_iter + 2]); else cat_low = 0; ret_val = netlbl_secattr_catmap_setrng(secattr->attr.mls.cat, cat_low, cat_high, GFP_ATOMIC); if (ret_val != 0) return ret_val; } return 0; } /* * Protocol Handling Functions */ /** * cipso_v4_gentag_hdr - Generate a CIPSO option header * @doi_def: the DOI definition * @len: the total tag length in bytes, not including this header * @buf: the CIPSO option buffer * * Description: * Write a CIPSO header into the beginning of @buffer. * */ static void cipso_v4_gentag_hdr(const struct cipso_v4_doi *doi_def, unsigned char *buf, u32 len) { buf[0] = IPOPT_CIPSO; buf[1] = CIPSO_V4_HDR_LEN + len; *(__be32 *)&buf[2] = htonl(doi_def->doi); } /** * cipso_v4_gentag_rbm - Generate a CIPSO restricted bitmap tag (type #1) * @doi_def: the DOI definition * @secattr: the security attributes * @buffer: the option buffer * @buffer_len: length of buffer in bytes * * Description: * Generate a CIPSO option using the restricted bitmap tag, tag type #1. The * actual buffer length may be larger than the indicated size due to * translation between host and network category bitmaps. Returns the size of * the tag on success, negative values on failure. * */ static int cipso_v4_gentag_rbm(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *buffer, u32 buffer_len) { int ret_val; u32 tag_len; u32 level; if ((secattr->flags & NETLBL_SECATTR_MLS_LVL) == 0) return -EPERM; ret_val = cipso_v4_map_lvl_hton(doi_def, secattr->attr.mls.lvl, &level); if (ret_val != 0) return ret_val; if (secattr->flags & NETLBL_SECATTR_MLS_CAT) { ret_val = cipso_v4_map_cat_rbm_hton(doi_def, secattr, &buffer[4], buffer_len - 4); if (ret_val < 0) return ret_val; /* This will send packets using the "optimized" format when * possible as specified in section 3.4.2.6 of the * CIPSO draft. */ if (cipso_v4_rbm_optfmt && ret_val > 0 && ret_val <= 10) tag_len = 14; else tag_len = 4 + ret_val; } else tag_len = 4; buffer[0] = CIPSO_V4_TAG_RBITMAP; buffer[1] = tag_len; buffer[3] = level; return tag_len; } /** * cipso_v4_parsetag_rbm - Parse a CIPSO restricted bitmap tag * @doi_def: the DOI definition * @tag: the CIPSO tag * @secattr: the security attributes * * Description: * Parse a CIPSO restricted bitmap tag (tag type #1) and return the security * attributes in @secattr. Return zero on success, negatives values on * failure. * */ static int cipso_v4_parsetag_rbm(const struct cipso_v4_doi *doi_def, const unsigned char *tag, struct netlbl_lsm_secattr *secattr) { int ret_val; u8 tag_len = tag[1]; u32 level; ret_val = cipso_v4_map_lvl_ntoh(doi_def, tag[3], &level); if (ret_val != 0) return ret_val; secattr->attr.mls.lvl = level; secattr->flags |= NETLBL_SECATTR_MLS_LVL; if (tag_len > 4) { secattr->attr.mls.cat = netlbl_secattr_catmap_alloc(GFP_ATOMIC); if (secattr->attr.mls.cat == NULL) return -ENOMEM; ret_val = cipso_v4_map_cat_rbm_ntoh(doi_def, &tag[4], tag_len - 4, secattr); if (ret_val != 0) { netlbl_secattr_catmap_free(secattr->attr.mls.cat); return ret_val; } secattr->flags |= NETLBL_SECATTR_MLS_CAT; } return 0; } /** * cipso_v4_gentag_enum - Generate a CIPSO enumerated tag (type #2) * @doi_def: the DOI definition * @secattr: the security attributes * @buffer: the option buffer * @buffer_len: length of buffer in bytes * * Description: * Generate a CIPSO option using the enumerated tag, tag type #2. Returns the * size of the tag on success, negative values on failure. * */ static int cipso_v4_gentag_enum(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *buffer, u32 buffer_len) { int ret_val; u32 tag_len; u32 level; if (!(secattr->flags & NETLBL_SECATTR_MLS_LVL)) return -EPERM; ret_val = cipso_v4_map_lvl_hton(doi_def, secattr->attr.mls.lvl, &level); if (ret_val != 0) return ret_val; if (secattr->flags & NETLBL_SECATTR_MLS_CAT) { ret_val = cipso_v4_map_cat_enum_hton(doi_def, secattr, &buffer[4], buffer_len - 4); if (ret_val < 0) return ret_val; tag_len = 4 + ret_val; } else tag_len = 4; buffer[0] = CIPSO_V4_TAG_ENUM; buffer[1] = tag_len; buffer[3] = level; return tag_len; } /** * cipso_v4_parsetag_enum - Parse a CIPSO enumerated tag * @doi_def: the DOI definition * @tag: the CIPSO tag * @secattr: the security attributes * * Description: * Parse a CIPSO enumerated tag (tag type #2) and return the security * attributes in @secattr. Return zero on success, negatives values on * failure. * */ static int cipso_v4_parsetag_enum(const struct cipso_v4_doi *doi_def, const unsigned char *tag, struct netlbl_lsm_secattr *secattr) { int ret_val; u8 tag_len = tag[1]; u32 level; ret_val = cipso_v4_map_lvl_ntoh(doi_def, tag[3], &level); if (ret_val != 0) return ret_val; secattr->attr.mls.lvl = level; secattr->flags |= NETLBL_SECATTR_MLS_LVL; if (tag_len > 4) { secattr->attr.mls.cat = netlbl_secattr_catmap_alloc(GFP_ATOMIC); if (secattr->attr.mls.cat == NULL) return -ENOMEM; ret_val = cipso_v4_map_cat_enum_ntoh(doi_def, &tag[4], tag_len - 4, secattr); if (ret_val != 0) { netlbl_secattr_catmap_free(secattr->attr.mls.cat); return ret_val; } secattr->flags |= NETLBL_SECATTR_MLS_CAT; } return 0; } /** * cipso_v4_gentag_rng - Generate a CIPSO ranged tag (type #5) * @doi_def: the DOI definition * @secattr: the security attributes * @buffer: the option buffer * @buffer_len: length of buffer in bytes * * Description: * Generate a CIPSO option using the ranged tag, tag type #5. Returns the * size of the tag on success, negative values on failure. * */ static int cipso_v4_gentag_rng(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *buffer, u32 buffer_len) { int ret_val; u32 tag_len; u32 level; if (!(secattr->flags & NETLBL_SECATTR_MLS_LVL)) return -EPERM; ret_val = cipso_v4_map_lvl_hton(doi_def, secattr->attr.mls.lvl, &level); if (ret_val != 0) return ret_val; if (secattr->flags & NETLBL_SECATTR_MLS_CAT) { ret_val = cipso_v4_map_cat_rng_hton(doi_def, secattr, &buffer[4], buffer_len - 4); if (ret_val < 0) return ret_val; tag_len = 4 + ret_val; } else tag_len = 4; buffer[0] = CIPSO_V4_TAG_RANGE; buffer[1] = tag_len; buffer[3] = level; return tag_len; } /** * cipso_v4_parsetag_rng - Parse a CIPSO ranged tag * @doi_def: the DOI definition * @tag: the CIPSO tag * @secattr: the security attributes * * Description: * Parse a CIPSO ranged tag (tag type #5) and return the security attributes * in @secattr. Return zero on success, negatives values on failure. * */ static int cipso_v4_parsetag_rng(const struct cipso_v4_doi *doi_def, const unsigned char *tag, struct netlbl_lsm_secattr *secattr) { int ret_val; u8 tag_len = tag[1]; u32 level; ret_val = cipso_v4_map_lvl_ntoh(doi_def, tag[3], &level); if (ret_val != 0) return ret_val; secattr->attr.mls.lvl = level; secattr->flags |= NETLBL_SECATTR_MLS_LVL; if (tag_len > 4) { secattr->attr.mls.cat = netlbl_secattr_catmap_alloc(GFP_ATOMIC); if (secattr->attr.mls.cat == NULL) return -ENOMEM; ret_val = cipso_v4_map_cat_rng_ntoh(doi_def, &tag[4], tag_len - 4, secattr); if (ret_val != 0) { netlbl_secattr_catmap_free(secattr->attr.mls.cat); return ret_val; } secattr->flags |= NETLBL_SECATTR_MLS_CAT; } return 0; } /** * cipso_v4_gentag_loc - Generate a CIPSO local tag (non-standard) * @doi_def: the DOI definition * @secattr: the security attributes * @buffer: the option buffer * @buffer_len: length of buffer in bytes * * Description: * Generate a CIPSO option using the local tag. Returns the size of the tag * on success, negative values on failure. * */ static int cipso_v4_gentag_loc(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *buffer, u32 buffer_len) { if (!(secattr->flags & NETLBL_SECATTR_SECID)) return -EPERM; buffer[0] = CIPSO_V4_TAG_LOCAL; buffer[1] = CIPSO_V4_TAG_LOC_BLEN; *(u32 *)&buffer[2] = secattr->attr.secid; return CIPSO_V4_TAG_LOC_BLEN; } /** * cipso_v4_parsetag_loc - Parse a CIPSO local tag * @doi_def: the DOI definition * @tag: the CIPSO tag * @secattr: the security attributes * * Description: * Parse a CIPSO local tag and return the security attributes in @secattr. * Return zero on success, negatives values on failure. * */ static int cipso_v4_parsetag_loc(const struct cipso_v4_doi *doi_def, const unsigned char *tag, struct netlbl_lsm_secattr *secattr) { secattr->attr.secid = *(u32 *)&tag[2]; secattr->flags |= NETLBL_SECATTR_SECID; return 0; } /** * cipso_v4_validate - Validate a CIPSO option * @option: the start of the option, on error it is set to point to the error * * Description: * This routine is called to validate a CIPSO option, it checks all of the * fields to ensure that they are at least valid, see the draft snippet below * for details. If the option is valid then a zero value is returned and * the value of @option is unchanged. If the option is invalid then a * non-zero value is returned and @option is adjusted to point to the * offending portion of the option. From the IETF draft ... * * "If any field within the CIPSO options, such as the DOI identifier, is not * recognized the IP datagram is discarded and an ICMP 'parameter problem' * (type 12) is generated and returned. The ICMP code field is set to 'bad * parameter' (code 0) and the pointer is set to the start of the CIPSO field * that is unrecognized." * */ int cipso_v4_validate(const struct sk_buff *skb, unsigned char **option) { unsigned char *opt = *option; unsigned char *tag; unsigned char opt_iter; unsigned char err_offset = 0; u8 opt_len; u8 tag_len; struct cipso_v4_doi *doi_def = NULL; u32 tag_iter; /* caller already checks for length values that are too large */ opt_len = opt[1]; if (opt_len < 8) { err_offset = 1; goto validate_return; } rcu_read_lock(); doi_def = cipso_v4_doi_search(get_unaligned_be32(&opt[2])); if (doi_def == NULL) { err_offset = 2; goto validate_return_locked; } opt_iter = CIPSO_V4_HDR_LEN; tag = opt + opt_iter; while (opt_iter < opt_len) { for (tag_iter = 0; doi_def->tags[tag_iter] != tag[0];) if (doi_def->tags[tag_iter] == CIPSO_V4_TAG_INVALID || ++tag_iter == CIPSO_V4_TAG_MAXCNT) { err_offset = opt_iter; goto validate_return_locked; } tag_len = tag[1]; if (tag_len > (opt_len - opt_iter)) { err_offset = opt_iter + 1; goto validate_return_locked; } switch (tag[0]) { case CIPSO_V4_TAG_RBITMAP: if (tag_len < CIPSO_V4_TAG_RBM_BLEN) { err_offset = opt_iter + 1; goto validate_return_locked; } /* We are already going to do all the verification * necessary at the socket layer so from our point of * view it is safe to turn these checks off (and less * work), however, the CIPSO draft says we should do * all the CIPSO validations here but it doesn't * really specify _exactly_ what we need to validate * ... so, just make it a sysctl tunable. */ if (cipso_v4_rbm_strictvalid) { if (cipso_v4_map_lvl_valid(doi_def, tag[3]) < 0) { err_offset = opt_iter + 3; goto validate_return_locked; } if (tag_len > CIPSO_V4_TAG_RBM_BLEN && cipso_v4_map_cat_rbm_valid(doi_def, &tag[4], tag_len - 4) < 0) { err_offset = opt_iter + 4; goto validate_return_locked; } } break; case CIPSO_V4_TAG_ENUM: if (tag_len < CIPSO_V4_TAG_ENUM_BLEN) { err_offset = opt_iter + 1; goto validate_return_locked; } if (cipso_v4_map_lvl_valid(doi_def, tag[3]) < 0) { err_offset = opt_iter + 3; goto validate_return_locked; } if (tag_len > CIPSO_V4_TAG_ENUM_BLEN && cipso_v4_map_cat_enum_valid(doi_def, &tag[4], tag_len - 4) < 0) { err_offset = opt_iter + 4; goto validate_return_locked; } break; case CIPSO_V4_TAG_RANGE: if (tag_len < CIPSO_V4_TAG_RNG_BLEN) { err_offset = opt_iter + 1; goto validate_return_locked; } if (cipso_v4_map_lvl_valid(doi_def, tag[3]) < 0) { err_offset = opt_iter + 3; goto validate_return_locked; } if (tag_len > CIPSO_V4_TAG_RNG_BLEN && cipso_v4_map_cat_rng_valid(doi_def, &tag[4], tag_len - 4) < 0) { err_offset = opt_iter + 4; goto validate_return_locked; } break; case CIPSO_V4_TAG_LOCAL: /* This is a non-standard tag that we only allow for * local connections, so if the incoming interface is * not the loopback device drop the packet. */ if (!(skb->dev->flags & IFF_LOOPBACK)) { err_offset = opt_iter; goto validate_return_locked; } if (tag_len != CIPSO_V4_TAG_LOC_BLEN) { err_offset = opt_iter + 1; goto validate_return_locked; } break; default: err_offset = opt_iter; goto validate_return_locked; } tag += tag_len; opt_iter += tag_len; } validate_return_locked: rcu_read_unlock(); validate_return: *option = opt + err_offset; return err_offset; } /** * cipso_v4_error - Send the correct response for a bad packet * @skb: the packet * @error: the error code * @gateway: CIPSO gateway flag * * Description: * Based on the error code given in @error, send an ICMP error message back to * the originating host. From the IETF draft ... * * "If the contents of the CIPSO [option] are valid but the security label is * outside of the configured host or port label range, the datagram is * discarded and an ICMP 'destination unreachable' (type 3) is generated and * returned. The code field of the ICMP is set to 'communication with * destination network administratively prohibited' (code 9) or to * 'communication with destination host administratively prohibited' * (code 10). The value of the code is dependent on whether the originator * of the ICMP message is acting as a CIPSO host or a CIPSO gateway. The * recipient of the ICMP message MUST be able to handle either value. The * same procedure is performed if a CIPSO [option] can not be added to an * IP packet because it is too large to fit in the IP options area." * * "If the error is triggered by receipt of an ICMP message, the message is * discarded and no response is permitted (consistent with general ICMP * processing rules)." * */ void cipso_v4_error(struct sk_buff *skb, int error, u32 gateway) { if (ip_hdr(skb)->protocol == IPPROTO_ICMP || error != -EACCES) return; if (gateway) icmp_send(skb, ICMP_DEST_UNREACH, ICMP_NET_ANO, 0); else icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_ANO, 0); } /** * cipso_v4_genopt - Generate a CIPSO option * @buf: the option buffer * @buf_len: the size of opt_buf * @doi_def: the CIPSO DOI to use * @secattr: the security attributes * * Description: * Generate a CIPSO option using the DOI definition and security attributes * passed to the function. Returns the length of the option on success and * negative values on failure. * */ static int cipso_v4_genopt(unsigned char *buf, u32 buf_len, const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr) { int ret_val; u32 iter; if (buf_len <= CIPSO_V4_HDR_LEN) return -ENOSPC; /* XXX - This code assumes only one tag per CIPSO option which isn't * really a good assumption to make but since we only support the MAC * tags right now it is a safe assumption. */ iter = 0; do { memset(buf, 0, buf_len); switch (doi_def->tags[iter]) { case CIPSO_V4_TAG_RBITMAP: ret_val = cipso_v4_gentag_rbm(doi_def, secattr, &buf[CIPSO_V4_HDR_LEN], buf_len - CIPSO_V4_HDR_LEN); break; case CIPSO_V4_TAG_ENUM: ret_val = cipso_v4_gentag_enum(doi_def, secattr, &buf[CIPSO_V4_HDR_LEN], buf_len - CIPSO_V4_HDR_LEN); break; case CIPSO_V4_TAG_RANGE: ret_val = cipso_v4_gentag_rng(doi_def, secattr, &buf[CIPSO_V4_HDR_LEN], buf_len - CIPSO_V4_HDR_LEN); break; case CIPSO_V4_TAG_LOCAL: ret_val = cipso_v4_gentag_loc(doi_def, secattr, &buf[CIPSO_V4_HDR_LEN], buf_len - CIPSO_V4_HDR_LEN); break; default: return -EPERM; } iter++; } while (ret_val < 0 && iter < CIPSO_V4_TAG_MAXCNT && doi_def->tags[iter] != CIPSO_V4_TAG_INVALID); if (ret_val < 0) return ret_val; cipso_v4_gentag_hdr(doi_def, buf, ret_val); return CIPSO_V4_HDR_LEN + ret_val; } static void opt_kfree_rcu(struct rcu_head *head) { kfree(container_of(head, struct ip_options_rcu, rcu)); } /** * cipso_v4_sock_setattr - Add a CIPSO option to a socket * @sk: the socket * @doi_def: the CIPSO DOI to use * @secattr: the specific security attributes of the socket * * Description: * Set the CIPSO option on the given socket using the DOI definition and * security attributes passed to the function. This function requires * exclusive access to @sk, which means it either needs to be in the * process of being created or locked. Returns zero on success and negative * values on failure. * */ int cipso_v4_sock_setattr(struct sock *sk, const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr) { int ret_val = -EPERM; unsigned char *buf = NULL; u32 buf_len; u32 opt_len; struct ip_options_rcu *old, *opt = NULL; struct inet_sock *sk_inet; struct inet_connection_sock *sk_conn; /* In the case of sock_create_lite(), the sock->sk field is not * defined yet but it is not a problem as the only users of these * "lite" PF_INET sockets are functions which do an accept() call * afterwards so we will label the socket as part of the accept(). */ if (sk == NULL) return 0; /* We allocate the maximum CIPSO option size here so we are probably * being a little wasteful, but it makes our life _much_ easier later * on and after all we are only talking about 40 bytes. */ buf_len = CIPSO_V4_OPT_LEN_MAX; buf = kmalloc(buf_len, GFP_ATOMIC); if (buf == NULL) { ret_val = -ENOMEM; goto socket_setattr_failure; } ret_val = cipso_v4_genopt(buf, buf_len, doi_def, secattr); if (ret_val < 0) goto socket_setattr_failure; buf_len = ret_val; /* We can't use ip_options_get() directly because it makes a call to * ip_options_get_alloc() which allocates memory with GFP_KERNEL and * we won't always have CAP_NET_RAW even though we _always_ want to * set the IPOPT_CIPSO option. */ opt_len = (buf_len + 3) & ~3; opt = kzalloc(sizeof(*opt) + opt_len, GFP_ATOMIC); if (opt == NULL) { ret_val = -ENOMEM; goto socket_setattr_failure; } memcpy(opt->opt.__data, buf, buf_len); opt->opt.optlen = opt_len; opt->opt.cipso = sizeof(struct iphdr); kfree(buf); buf = NULL; sk_inet = inet_sk(sk); old = rcu_dereference_protected(sk_inet->inet_opt, sock_owned_by_user(sk)); if (sk_inet->is_icsk) { sk_conn = inet_csk(sk); if (old) sk_conn->icsk_ext_hdr_len -= old->opt.optlen; sk_conn->icsk_ext_hdr_len += opt->opt.optlen; sk_conn->icsk_sync_mss(sk, sk_conn->icsk_pmtu_cookie); } rcu_assign_pointer(sk_inet->inet_opt, opt); if (old) call_rcu(&old->rcu, opt_kfree_rcu); return 0; socket_setattr_failure: kfree(buf); kfree(opt); return ret_val; } /** * cipso_v4_req_setattr - Add a CIPSO option to a connection request socket * @req: the connection request socket * @doi_def: the CIPSO DOI to use * @secattr: the specific security attributes of the socket * * Description: * Set the CIPSO option on the given socket using the DOI definition and * security attributes passed to the function. Returns zero on success and * negative values on failure. * */ int cipso_v4_req_setattr(struct request_sock *req, const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr) { int ret_val = -EPERM; unsigned char *buf = NULL; u32 buf_len; u32 opt_len; struct ip_options_rcu *opt = NULL; struct inet_request_sock *req_inet; /* We allocate the maximum CIPSO option size here so we are probably * being a little wasteful, but it makes our life _much_ easier later * on and after all we are only talking about 40 bytes. */ buf_len = CIPSO_V4_OPT_LEN_MAX; buf = kmalloc(buf_len, GFP_ATOMIC); if (buf == NULL) { ret_val = -ENOMEM; goto req_setattr_failure; } ret_val = cipso_v4_genopt(buf, buf_len, doi_def, secattr); if (ret_val < 0) goto req_setattr_failure; buf_len = ret_val; /* We can't use ip_options_get() directly because it makes a call to * ip_options_get_alloc() which allocates memory with GFP_KERNEL and * we won't always have CAP_NET_RAW even though we _always_ want to * set the IPOPT_CIPSO option. */ opt_len = (buf_len + 3) & ~3; opt = kzalloc(sizeof(*opt) + opt_len, GFP_ATOMIC); if (opt == NULL) { ret_val = -ENOMEM; goto req_setattr_failure; } memcpy(opt->opt.__data, buf, buf_len); opt->opt.optlen = opt_len; opt->opt.cipso = sizeof(struct iphdr); kfree(buf); buf = NULL; req_inet = inet_rsk(req); opt = xchg(&req_inet->opt, opt); if (opt) call_rcu(&opt->rcu, opt_kfree_rcu); return 0; req_setattr_failure: kfree(buf); kfree(opt); return ret_val; } /** * cipso_v4_delopt - Delete the CIPSO option from a set of IP options * @opt_ptr: IP option pointer * * Description: * Deletes the CIPSO IP option from a set of IP options and makes the necessary * adjustments to the IP option structure. Returns zero on success, negative * values on failure. * */ static int cipso_v4_delopt(struct ip_options_rcu **opt_ptr) { int hdr_delta = 0; struct ip_options_rcu *opt = *opt_ptr; if (opt->opt.srr || opt->opt.rr || opt->opt.ts || opt->opt.router_alert) { u8 cipso_len; u8 cipso_off; unsigned char *cipso_ptr; int iter; int optlen_new; cipso_off = opt->opt.cipso - sizeof(struct iphdr); cipso_ptr = &opt->opt.__data[cipso_off]; cipso_len = cipso_ptr[1]; if (opt->opt.srr > opt->opt.cipso) opt->opt.srr -= cipso_len; if (opt->opt.rr > opt->opt.cipso) opt->opt.rr -= cipso_len; if (opt->opt.ts > opt->opt.cipso) opt->opt.ts -= cipso_len; if (opt->opt.router_alert > opt->opt.cipso) opt->opt.router_alert -= cipso_len; opt->opt.cipso = 0; memmove(cipso_ptr, cipso_ptr + cipso_len, opt->opt.optlen - cipso_off - cipso_len); /* determining the new total option length is tricky because of * the padding necessary, the only thing i can think to do at * this point is walk the options one-by-one, skipping the * padding at the end to determine the actual option size and * from there we can determine the new total option length */ iter = 0; optlen_new = 0; while (iter < opt->opt.optlen) if (opt->opt.__data[iter] != IPOPT_NOP) { iter += opt->opt.__data[iter + 1]; optlen_new = iter; } else iter++; hdr_delta = opt->opt.optlen; opt->opt.optlen = (optlen_new + 3) & ~3; hdr_delta -= opt->opt.optlen; } else { /* only the cipso option was present on the socket so we can * remove the entire option struct */ *opt_ptr = NULL; hdr_delta = opt->opt.optlen; call_rcu(&opt->rcu, opt_kfree_rcu); } return hdr_delta; } /** * cipso_v4_sock_delattr - Delete the CIPSO option from a socket * @sk: the socket * * Description: * Removes the CIPSO option from a socket, if present. * */ void cipso_v4_sock_delattr(struct sock *sk) { int hdr_delta; struct ip_options_rcu *opt; struct inet_sock *sk_inet; sk_inet = inet_sk(sk); opt = rcu_dereference_protected(sk_inet->inet_opt, 1); if (opt == NULL || opt->opt.cipso == 0) return; hdr_delta = cipso_v4_delopt(&sk_inet->inet_opt); if (sk_inet->is_icsk && hdr_delta > 0) { struct inet_connection_sock *sk_conn = inet_csk(sk); sk_conn->icsk_ext_hdr_len -= hdr_delta; sk_conn->icsk_sync_mss(sk, sk_conn->icsk_pmtu_cookie); } } /** * cipso_v4_req_delattr - Delete the CIPSO option from a request socket * @reg: the request socket * * Description: * Removes the CIPSO option from a request socket, if present. * */ void cipso_v4_req_delattr(struct request_sock *req) { struct ip_options_rcu *opt; struct inet_request_sock *req_inet; req_inet = inet_rsk(req); opt = req_inet->opt; if (opt == NULL || opt->opt.cipso == 0) return; cipso_v4_delopt(&req_inet->opt); } /** * cipso_v4_getattr - Helper function for the cipso_v4_*_getattr functions * @cipso: the CIPSO v4 option * @secattr: the security attributes * * Description: * Inspect @cipso and return the security attributes in @secattr. Returns zero * on success and negative values on failure. * */ static int cipso_v4_getattr(const unsigned char *cipso, struct netlbl_lsm_secattr *secattr) { int ret_val = -ENOMSG; u32 doi; struct cipso_v4_doi *doi_def; if (cipso_v4_cache_check(cipso, cipso[1], secattr) == 0) return 0; doi = get_unaligned_be32(&cipso[2]); rcu_read_lock(); doi_def = cipso_v4_doi_search(doi); if (doi_def == NULL) goto getattr_return; /* XXX - This code assumes only one tag per CIPSO option which isn't * really a good assumption to make but since we only support the MAC * tags right now it is a safe assumption. */ switch (cipso[6]) { case CIPSO_V4_TAG_RBITMAP: ret_val = cipso_v4_parsetag_rbm(doi_def, &cipso[6], secattr); break; case CIPSO_V4_TAG_ENUM: ret_val = cipso_v4_parsetag_enum(doi_def, &cipso[6], secattr); break; case CIPSO_V4_TAG_RANGE: ret_val = cipso_v4_parsetag_rng(doi_def, &cipso[6], secattr); break; case CIPSO_V4_TAG_LOCAL: ret_val = cipso_v4_parsetag_loc(doi_def, &cipso[6], secattr); break; } if (ret_val == 0) secattr->type = NETLBL_NLTYPE_CIPSOV4; getattr_return: rcu_read_unlock(); return ret_val; } /** * cipso_v4_sock_getattr - Get the security attributes from a sock * @sk: the sock * @secattr: the security attributes * * Description: * Query @sk to see if there is a CIPSO option attached to the sock and if * there is return the CIPSO security attributes in @secattr. This function * requires that @sk be locked, or privately held, but it does not do any * locking itself. Returns zero on success and negative values on failure. * */ int cipso_v4_sock_getattr(struct sock *sk, struct netlbl_lsm_secattr *secattr) { struct ip_options_rcu *opt; int res = -ENOMSG; rcu_read_lock(); opt = rcu_dereference(inet_sk(sk)->inet_opt); if (opt && opt->opt.cipso) res = cipso_v4_getattr(opt->opt.__data + opt->opt.cipso - sizeof(struct iphdr), secattr); rcu_read_unlock(); return res; } /** * cipso_v4_skbuff_setattr - Set the CIPSO option on a packet * @skb: the packet * @secattr: the security attributes * * Description: * Set the CIPSO option on the given packet based on the security attributes. * Returns a pointer to the IP header on success and NULL on failure. * */ int cipso_v4_skbuff_setattr(struct sk_buff *skb, const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr) { int ret_val; struct iphdr *iph; struct ip_options *opt = &IPCB(skb)->opt; unsigned char buf[CIPSO_V4_OPT_LEN_MAX]; u32 buf_len = CIPSO_V4_OPT_LEN_MAX; u32 opt_len; int len_delta; ret_val = cipso_v4_genopt(buf, buf_len, doi_def, secattr); if (ret_val < 0) return ret_val; buf_len = ret_val; opt_len = (buf_len + 3) & ~3; /* we overwrite any existing options to ensure that we have enough * room for the CIPSO option, the reason is that we _need_ to guarantee * that the security label is applied to the packet - we do the same * thing when using the socket options and it hasn't caused a problem, * if we need to we can always revisit this choice later */ len_delta = opt_len - opt->optlen; /* if we don't ensure enough headroom we could panic on the skb_push() * call below so make sure we have enough, we are also "mangling" the * packet so we should probably do a copy-on-write call anyway */ ret_val = skb_cow(skb, skb_headroom(skb) + len_delta); if (ret_val < 0) return ret_val; if (len_delta > 0) { /* we assume that the header + opt->optlen have already been * "pushed" in ip_options_build() or similar */ iph = ip_hdr(skb); skb_push(skb, len_delta); memmove((char *)iph - len_delta, iph, iph->ihl << 2); skb_reset_network_header(skb); iph = ip_hdr(skb); } else if (len_delta < 0) { iph = ip_hdr(skb); memset(iph + 1, IPOPT_NOP, opt->optlen); } else iph = ip_hdr(skb); if (opt->optlen > 0) memset(opt, 0, sizeof(*opt)); opt->optlen = opt_len; opt->cipso = sizeof(struct iphdr); opt->is_changed = 1; /* we have to do the following because we are being called from a * netfilter hook which means the packet already has had the header * fields populated and the checksum calculated - yes this means we * are doing more work than needed but we do it to keep the core * stack clean and tidy */ memcpy(iph + 1, buf, buf_len); if (opt_len > buf_len) memset((char *)(iph + 1) + buf_len, 0, opt_len - buf_len); if (len_delta != 0) { iph->ihl = 5 + (opt_len >> 2); iph->tot_len = htons(skb->len); } ip_send_check(iph); return 0; } /** * cipso_v4_skbuff_delattr - Delete any CIPSO options from a packet * @skb: the packet * * Description: * Removes any and all CIPSO options from the given packet. Returns zero on * success, negative values on failure. * */ int cipso_v4_skbuff_delattr(struct sk_buff *skb) { int ret_val; struct iphdr *iph; struct ip_options *opt = &IPCB(skb)->opt; unsigned char *cipso_ptr; if (opt->cipso == 0) return 0; /* since we are changing the packet we should make a copy */ ret_val = skb_cow(skb, skb_headroom(skb)); if (ret_val < 0) return ret_val; /* the easiest thing to do is just replace the cipso option with noop * options since we don't change the size of the packet, although we * still need to recalculate the checksum */ iph = ip_hdr(skb); cipso_ptr = (unsigned char *)iph + opt->cipso; memset(cipso_ptr, IPOPT_NOOP, cipso_ptr[1]); opt->cipso = 0; opt->is_changed = 1; ip_send_check(iph); return 0; } /** * cipso_v4_skbuff_getattr - Get the security attributes from the CIPSO option * @skb: the packet * @secattr: the security attributes * * Description: * Parse the given packet's CIPSO option and return the security attributes. * Returns zero on success and negative values on failure. * */ int cipso_v4_skbuff_getattr(const struct sk_buff *skb, struct netlbl_lsm_secattr *secattr) { return cipso_v4_getattr(CIPSO_V4_OPTPTR(skb), secattr); } /* * Setup Functions */ /** * cipso_v4_init - Initialize the CIPSO module * * Description: * Initialize the CIPSO module and prepare it for use. Returns zero on success * and negative values on failure. * */ static int __init cipso_v4_init(void) { int ret_val; ret_val = cipso_v4_cache_init(); if (ret_val != 0) panic("Failed to initialize the CIPSO/IPv4 cache (%d)\n", ret_val); return 0; } subsys_initcall(cipso_v4_init);
void cipso_v4_sock_delattr(struct sock *sk) { int hdr_delta; struct ip_options *opt; struct inet_sock *sk_inet; sk_inet = inet_sk(sk); opt = sk_inet->opt; if (opt == NULL || opt->cipso == 0) return; hdr_delta = cipso_v4_delopt(&sk_inet->opt); if (sk_inet->is_icsk && hdr_delta > 0) { struct inet_connection_sock *sk_conn = inet_csk(sk); sk_conn->icsk_ext_hdr_len -= hdr_delta; sk_conn->icsk_sync_mss(sk, sk_conn->icsk_pmtu_cookie); } }
void cipso_v4_sock_delattr(struct sock *sk) { int hdr_delta; struct ip_options_rcu *opt; struct inet_sock *sk_inet; sk_inet = inet_sk(sk); opt = rcu_dereference_protected(sk_inet->inet_opt, 1); if (opt == NULL || opt->opt.cipso == 0) return; hdr_delta = cipso_v4_delopt(&sk_inet->inet_opt); if (sk_inet->is_icsk && hdr_delta > 0) { struct inet_connection_sock *sk_conn = inet_csk(sk); sk_conn->icsk_ext_hdr_len -= hdr_delta; sk_conn->icsk_sync_mss(sk, sk_conn->icsk_pmtu_cookie); } }
{'added': [(1860, 'static void opt_kfree_rcu(struct rcu_head *head)'), (1861, '{'), (1862, '\tkfree(container_of(head, struct ip_options_rcu, rcu));'), (1863, '}'), (1864, ''), (1887, '\tstruct ip_options_rcu *old, *opt = NULL;'), (1923, '\tmemcpy(opt->opt.__data, buf, buf_len);'), (1924, '\topt->opt.optlen = opt_len;'), (1925, '\topt->opt.cipso = sizeof(struct iphdr);'), (1930, ''), (1931, '\told = rcu_dereference_protected(sk_inet->inet_opt, sock_owned_by_user(sk));'), (1934, '\t\tif (old)'), (1935, '\t\t\tsk_conn->icsk_ext_hdr_len -= old->opt.optlen;'), (1936, '\t\tsk_conn->icsk_ext_hdr_len += opt->opt.optlen;'), (1939, '\trcu_assign_pointer(sk_inet->inet_opt, opt);'), (1940, '\tif (old)'), (1941, '\t\tcall_rcu(&old->rcu, opt_kfree_rcu);'), (1971, '\tstruct ip_options_rcu *opt = NULL;'), (1999, '\tmemcpy(opt->opt.__data, buf, buf_len);'), (2000, '\topt->opt.optlen = opt_len;'), (2001, '\topt->opt.cipso = sizeof(struct iphdr);'), (2007, '\tif (opt)'), (2008, '\t\tcall_rcu(&opt->rcu, opt_kfree_rcu);'), (2028, 'static int cipso_v4_delopt(struct ip_options_rcu **opt_ptr)'), (2031, '\tstruct ip_options_rcu *opt = *opt_ptr;'), (2033, '\tif (opt->opt.srr || opt->opt.rr || opt->opt.ts || opt->opt.router_alert) {'), (2040, '\t\tcipso_off = opt->opt.cipso - sizeof(struct iphdr);'), (2041, '\t\tcipso_ptr = &opt->opt.__data[cipso_off];'), (2044, '\t\tif (opt->opt.srr > opt->opt.cipso)'), (2045, '\t\t\topt->opt.srr -= cipso_len;'), (2046, '\t\tif (opt->opt.rr > opt->opt.cipso)'), (2047, '\t\t\topt->opt.rr -= cipso_len;'), (2048, '\t\tif (opt->opt.ts > opt->opt.cipso)'), (2049, '\t\t\topt->opt.ts -= cipso_len;'), (2050, '\t\tif (opt->opt.router_alert > opt->opt.cipso)'), (2051, '\t\t\topt->opt.router_alert -= cipso_len;'), (2052, '\t\topt->opt.cipso = 0;'), (2055, '\t\t\topt->opt.optlen - cipso_off - cipso_len);'), (2064, '\t\twhile (iter < opt->opt.optlen)'), (2065, '\t\t\tif (opt->opt.__data[iter] != IPOPT_NOP) {'), (2066, '\t\t\t\titer += opt->opt.__data[iter + 1];'), (2070, '\t\thdr_delta = opt->opt.optlen;'), (2071, '\t\topt->opt.optlen = (optlen_new + 3) & ~3;'), (2072, '\t\thdr_delta -= opt->opt.optlen;'), (2077, '\t\thdr_delta = opt->opt.optlen;'), (2078, '\t\tcall_rcu(&opt->rcu, opt_kfree_rcu);'), (2095, '\tstruct ip_options_rcu *opt;'), (2099, '\topt = rcu_dereference_protected(sk_inet->inet_opt, 1);'), (2100, '\tif (opt == NULL || opt->opt.cipso == 0)'), (2103, '\thdr_delta = cipso_v4_delopt(&sk_inet->inet_opt);'), (2121, '\tstruct ip_options_rcu *opt;'), (2126, '\tif (opt == NULL || opt->opt.cipso == 0)'), (2196, '\tstruct ip_options_rcu *opt;'), (2197, '\tint res = -ENOMSG;'), (2199, '\trcu_read_lock();'), (2200, '\topt = rcu_dereference(inet_sk(sk)->inet_opt);'), (2201, '\tif (opt && opt->opt.cipso)'), (2202, '\t\tres = cipso_v4_getattr(opt->opt.__data +'), (2203, '\t\t\t\t\t\topt->opt.cipso -'), (2204, '\t\t\t\t\t\tsizeof(struct iphdr),'), (2205, '\t\t\t\t secattr);'), (2206, '\trcu_read_unlock();'), (2207, '\treturn res;')], 'deleted': [(1882, '\tstruct ip_options *opt = NULL;'), (1918, '\tmemcpy(opt->__data, buf, buf_len);'), (1919, '\topt->optlen = opt_len;'), (1920, '\topt->cipso = sizeof(struct iphdr);'), (1927, '\t\tif (sk_inet->opt)'), (1928, '\t\t\tsk_conn->icsk_ext_hdr_len -= sk_inet->opt->optlen;'), (1929, '\t\tsk_conn->icsk_ext_hdr_len += opt->optlen;'), (1932, '\topt = xchg(&sk_inet->opt, opt);'), (1933, '\tkfree(opt);'), (1963, '\tstruct ip_options *opt = NULL;'), (1991, '\tmemcpy(opt->__data, buf, buf_len);'), (1992, '\topt->optlen = opt_len;'), (1993, '\topt->cipso = sizeof(struct iphdr);'), (1999, '\tkfree(opt);'), (2019, 'static int cipso_v4_delopt(struct ip_options **opt_ptr)'), (2022, '\tstruct ip_options *opt = *opt_ptr;'), (2024, '\tif (opt->srr || opt->rr || opt->ts || opt->router_alert) {'), (2031, '\t\tcipso_off = opt->cipso - sizeof(struct iphdr);'), (2032, '\t\tcipso_ptr = &opt->__data[cipso_off];'), (2035, '\t\tif (opt->srr > opt->cipso)'), (2036, '\t\t\topt->srr -= cipso_len;'), (2037, '\t\tif (opt->rr > opt->cipso)'), (2038, '\t\t\topt->rr -= cipso_len;'), (2039, '\t\tif (opt->ts > opt->cipso)'), (2040, '\t\t\topt->ts -= cipso_len;'), (2041, '\t\tif (opt->router_alert > opt->cipso)'), (2042, '\t\t\topt->router_alert -= cipso_len;'), (2043, '\t\topt->cipso = 0;'), (2046, '\t\t\topt->optlen - cipso_off - cipso_len);'), (2055, '\t\twhile (iter < opt->optlen)'), (2056, '\t\t\tif (opt->__data[iter] != IPOPT_NOP) {'), (2057, '\t\t\t\titer += opt->__data[iter + 1];'), (2061, '\t\thdr_delta = opt->optlen;'), (2062, '\t\topt->optlen = (optlen_new + 3) & ~3;'), (2063, '\t\thdr_delta -= opt->optlen;'), (2068, '\t\thdr_delta = opt->optlen;'), (2069, '\t\tkfree(opt);'), (2086, '\tstruct ip_options *opt;'), (2090, '\topt = sk_inet->opt;'), (2091, '\tif (opt == NULL || opt->cipso == 0)'), (2094, '\thdr_delta = cipso_v4_delopt(&sk_inet->opt);'), (2112, '\tstruct ip_options *opt;'), (2117, '\tif (opt == NULL || opt->cipso == 0)'), (2187, '\tstruct ip_options *opt;'), (2189, '\topt = inet_sk(sk)->opt;'), (2190, '\tif (opt == NULL || opt->cipso == 0)'), (2191, '\t\treturn -ENOMSG;'), (2192, ''), (2193, '\treturn cipso_v4_getattr(opt->__data + opt->cipso - sizeof(struct iphdr),'), (2194, '\t\t\t\tsecattr);')]}
63
50
1,360
7,485
https://github.com/torvalds/linux
CVE-2012-3552
['CWE-362']
cipso_ipv4.c
cipso_v4_sock_getattr
/* * CIPSO - Commercial IP Security Option * * This is an implementation of the CIPSO 2.2 protocol as specified in * draft-ietf-cipso-ipsecurity-01.txt with additional tag types as found in * FIPS-188. While CIPSO never became a full IETF RFC standard many vendors * have chosen to adopt the protocol and over the years it has become a * de-facto standard for labeled networking. * * The CIPSO draft specification can be found in the kernel's Documentation * directory as well as the following URL: * http://tools.ietf.org/id/draft-ietf-cipso-ipsecurity-01.txt * The FIPS-188 specification can be found at the following URL: * http://www.itl.nist.gov/fipspubs/fip188.htm * * Author: Paul Moore <paul.moore@hp.com> * */ /* * (c) Copyright Hewlett-Packard Development Company, L.P., 2006, 2008 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/init.h> #include <linux/types.h> #include <linux/rcupdate.h> #include <linux/list.h> #include <linux/spinlock.h> #include <linux/string.h> #include <linux/jhash.h> #include <linux/audit.h> #include <linux/slab.h> #include <net/ip.h> #include <net/icmp.h> #include <net/tcp.h> #include <net/netlabel.h> #include <net/cipso_ipv4.h> #include <asm/atomic.h> #include <asm/bug.h> #include <asm/unaligned.h> /* List of available DOI definitions */ /* XXX - This currently assumes a minimal number of different DOIs in use, * if in practice there are a lot of different DOIs this list should * probably be turned into a hash table or something similar so we * can do quick lookups. */ static DEFINE_SPINLOCK(cipso_v4_doi_list_lock); static LIST_HEAD(cipso_v4_doi_list); /* Label mapping cache */ int cipso_v4_cache_enabled = 1; int cipso_v4_cache_bucketsize = 10; #define CIPSO_V4_CACHE_BUCKETBITS 7 #define CIPSO_V4_CACHE_BUCKETS (1 << CIPSO_V4_CACHE_BUCKETBITS) #define CIPSO_V4_CACHE_REORDERLIMIT 10 struct cipso_v4_map_cache_bkt { spinlock_t lock; u32 size; struct list_head list; }; struct cipso_v4_map_cache_entry { u32 hash; unsigned char *key; size_t key_len; struct netlbl_lsm_cache *lsm_data; u32 activity; struct list_head list; }; static struct cipso_v4_map_cache_bkt *cipso_v4_cache = NULL; /* Restricted bitmap (tag #1) flags */ int cipso_v4_rbm_optfmt = 0; int cipso_v4_rbm_strictvalid = 1; /* * Protocol Constants */ /* Maximum size of the CIPSO IP option, derived from the fact that the maximum * IPv4 header size is 60 bytes and the base IPv4 header is 20 bytes long. */ #define CIPSO_V4_OPT_LEN_MAX 40 /* Length of the base CIPSO option, this includes the option type (1 byte), the * option length (1 byte), and the DOI (4 bytes). */ #define CIPSO_V4_HDR_LEN 6 /* Base length of the restrictive category bitmap tag (tag #1). */ #define CIPSO_V4_TAG_RBM_BLEN 4 /* Base length of the enumerated category tag (tag #2). */ #define CIPSO_V4_TAG_ENUM_BLEN 4 /* Base length of the ranged categories bitmap tag (tag #5). */ #define CIPSO_V4_TAG_RNG_BLEN 4 /* The maximum number of category ranges permitted in the ranged category tag * (tag #5). You may note that the IETF draft states that the maximum number * of category ranges is 7, but if the low end of the last category range is * zero then it is possible to fit 8 category ranges because the zero should * be omitted. */ #define CIPSO_V4_TAG_RNG_CAT_MAX 8 /* Base length of the local tag (non-standard tag). * Tag definition (may change between kernel versions) * * 0 8 16 24 32 * +----------+----------+----------+----------+ * | 10000000 | 00000110 | 32-bit secid value | * +----------+----------+----------+----------+ * | in (host byte order)| * +----------+----------+ * */ #define CIPSO_V4_TAG_LOC_BLEN 6 /* * Helper Functions */ /** * cipso_v4_bitmap_walk - Walk a bitmap looking for a bit * @bitmap: the bitmap * @bitmap_len: length in bits * @offset: starting offset * @state: if non-zero, look for a set (1) bit else look for a cleared (0) bit * * Description: * Starting at @offset, walk the bitmap from left to right until either the * desired bit is found or we reach the end. Return the bit offset, -1 if * not found, or -2 if error. */ static int cipso_v4_bitmap_walk(const unsigned char *bitmap, u32 bitmap_len, u32 offset, u8 state) { u32 bit_spot; u32 byte_offset; unsigned char bitmask; unsigned char byte; /* gcc always rounds to zero when doing integer division */ byte_offset = offset / 8; byte = bitmap[byte_offset]; bit_spot = offset; bitmask = 0x80 >> (offset % 8); while (bit_spot < bitmap_len) { if ((state && (byte & bitmask) == bitmask) || (state == 0 && (byte & bitmask) == 0)) return bit_spot; bit_spot++; bitmask >>= 1; if (bitmask == 0) { byte = bitmap[++byte_offset]; bitmask = 0x80; } } return -1; } /** * cipso_v4_bitmap_setbit - Sets a single bit in a bitmap * @bitmap: the bitmap * @bit: the bit * @state: if non-zero, set the bit (1) else clear the bit (0) * * Description: * Set a single bit in the bitmask. Returns zero on success, negative values * on error. */ static void cipso_v4_bitmap_setbit(unsigned char *bitmap, u32 bit, u8 state) { u32 byte_spot; u8 bitmask; /* gcc always rounds to zero when doing integer division */ byte_spot = bit / 8; bitmask = 0x80 >> (bit % 8); if (state) bitmap[byte_spot] |= bitmask; else bitmap[byte_spot] &= ~bitmask; } /** * cipso_v4_cache_entry_free - Frees a cache entry * @entry: the entry to free * * Description: * This function frees the memory associated with a cache entry including the * LSM cache data if there are no longer any users, i.e. reference count == 0. * */ static void cipso_v4_cache_entry_free(struct cipso_v4_map_cache_entry *entry) { if (entry->lsm_data) netlbl_secattr_cache_free(entry->lsm_data); kfree(entry->key); kfree(entry); } /** * cipso_v4_map_cache_hash - Hashing function for the CIPSO cache * @key: the hash key * @key_len: the length of the key in bytes * * Description: * The CIPSO tag hashing function. Returns a 32-bit hash value. * */ static u32 cipso_v4_map_cache_hash(const unsigned char *key, u32 key_len) { return jhash(key, key_len, 0); } /* * Label Mapping Cache Functions */ /** * cipso_v4_cache_init - Initialize the CIPSO cache * * Description: * Initializes the CIPSO label mapping cache, this function should be called * before any of the other functions defined in this file. Returns zero on * success, negative values on error. * */ static int cipso_v4_cache_init(void) { u32 iter; cipso_v4_cache = kcalloc(CIPSO_V4_CACHE_BUCKETS, sizeof(struct cipso_v4_map_cache_bkt), GFP_KERNEL); if (cipso_v4_cache == NULL) return -ENOMEM; for (iter = 0; iter < CIPSO_V4_CACHE_BUCKETS; iter++) { spin_lock_init(&cipso_v4_cache[iter].lock); cipso_v4_cache[iter].size = 0; INIT_LIST_HEAD(&cipso_v4_cache[iter].list); } return 0; } /** * cipso_v4_cache_invalidate - Invalidates the current CIPSO cache * * Description: * Invalidates and frees any entries in the CIPSO cache. Returns zero on * success and negative values on failure. * */ void cipso_v4_cache_invalidate(void) { struct cipso_v4_map_cache_entry *entry, *tmp_entry; u32 iter; for (iter = 0; iter < CIPSO_V4_CACHE_BUCKETS; iter++) { spin_lock_bh(&cipso_v4_cache[iter].lock); list_for_each_entry_safe(entry, tmp_entry, &cipso_v4_cache[iter].list, list) { list_del(&entry->list); cipso_v4_cache_entry_free(entry); } cipso_v4_cache[iter].size = 0; spin_unlock_bh(&cipso_v4_cache[iter].lock); } } /** * cipso_v4_cache_check - Check the CIPSO cache for a label mapping * @key: the buffer to check * @key_len: buffer length in bytes * @secattr: the security attribute struct to use * * Description: * This function checks the cache to see if a label mapping already exists for * the given key. If there is a match then the cache is adjusted and the * @secattr struct is populated with the correct LSM security attributes. The * cache is adjusted in the following manner if the entry is not already the * first in the cache bucket: * * 1. The cache entry's activity counter is incremented * 2. The previous (higher ranking) entry's activity counter is decremented * 3. If the difference between the two activity counters is geater than * CIPSO_V4_CACHE_REORDERLIMIT the two entries are swapped * * Returns zero on success, -ENOENT for a cache miss, and other negative values * on error. * */ static int cipso_v4_cache_check(const unsigned char *key, u32 key_len, struct netlbl_lsm_secattr *secattr) { u32 bkt; struct cipso_v4_map_cache_entry *entry; struct cipso_v4_map_cache_entry *prev_entry = NULL; u32 hash; if (!cipso_v4_cache_enabled) return -ENOENT; hash = cipso_v4_map_cache_hash(key, key_len); bkt = hash & (CIPSO_V4_CACHE_BUCKETS - 1); spin_lock_bh(&cipso_v4_cache[bkt].lock); list_for_each_entry(entry, &cipso_v4_cache[bkt].list, list) { if (entry->hash == hash && entry->key_len == key_len && memcmp(entry->key, key, key_len) == 0) { entry->activity += 1; atomic_inc(&entry->lsm_data->refcount); secattr->cache = entry->lsm_data; secattr->flags |= NETLBL_SECATTR_CACHE; secattr->type = NETLBL_NLTYPE_CIPSOV4; if (prev_entry == NULL) { spin_unlock_bh(&cipso_v4_cache[bkt].lock); return 0; } if (prev_entry->activity > 0) prev_entry->activity -= 1; if (entry->activity > prev_entry->activity && entry->activity - prev_entry->activity > CIPSO_V4_CACHE_REORDERLIMIT) { __list_del(entry->list.prev, entry->list.next); __list_add(&entry->list, prev_entry->list.prev, &prev_entry->list); } spin_unlock_bh(&cipso_v4_cache[bkt].lock); return 0; } prev_entry = entry; } spin_unlock_bh(&cipso_v4_cache[bkt].lock); return -ENOENT; } /** * cipso_v4_cache_add - Add an entry to the CIPSO cache * @skb: the packet * @secattr: the packet's security attributes * * Description: * Add a new entry into the CIPSO label mapping cache. Add the new entry to * head of the cache bucket's list, if the cache bucket is out of room remove * the last entry in the list first. It is important to note that there is * currently no checking for duplicate keys. Returns zero on success, * negative values on failure. * */ int cipso_v4_cache_add(const struct sk_buff *skb, const struct netlbl_lsm_secattr *secattr) { int ret_val = -EPERM; u32 bkt; struct cipso_v4_map_cache_entry *entry = NULL; struct cipso_v4_map_cache_entry *old_entry = NULL; unsigned char *cipso_ptr; u32 cipso_ptr_len; if (!cipso_v4_cache_enabled || cipso_v4_cache_bucketsize <= 0) return 0; cipso_ptr = CIPSO_V4_OPTPTR(skb); cipso_ptr_len = cipso_ptr[1]; entry = kzalloc(sizeof(*entry), GFP_ATOMIC); if (entry == NULL) return -ENOMEM; entry->key = kmemdup(cipso_ptr, cipso_ptr_len, GFP_ATOMIC); if (entry->key == NULL) { ret_val = -ENOMEM; goto cache_add_failure; } entry->key_len = cipso_ptr_len; entry->hash = cipso_v4_map_cache_hash(cipso_ptr, cipso_ptr_len); atomic_inc(&secattr->cache->refcount); entry->lsm_data = secattr->cache; bkt = entry->hash & (CIPSO_V4_CACHE_BUCKETS - 1); spin_lock_bh(&cipso_v4_cache[bkt].lock); if (cipso_v4_cache[bkt].size < cipso_v4_cache_bucketsize) { list_add(&entry->list, &cipso_v4_cache[bkt].list); cipso_v4_cache[bkt].size += 1; } else { old_entry = list_entry(cipso_v4_cache[bkt].list.prev, struct cipso_v4_map_cache_entry, list); list_del(&old_entry->list); list_add(&entry->list, &cipso_v4_cache[bkt].list); cipso_v4_cache_entry_free(old_entry); } spin_unlock_bh(&cipso_v4_cache[bkt].lock); return 0; cache_add_failure: if (entry) cipso_v4_cache_entry_free(entry); return ret_val; } /* * DOI List Functions */ /** * cipso_v4_doi_search - Searches for a DOI definition * @doi: the DOI to search for * * Description: * Search the DOI definition list for a DOI definition with a DOI value that * matches @doi. The caller is responsible for calling rcu_read_[un]lock(). * Returns a pointer to the DOI definition on success and NULL on failure. */ static struct cipso_v4_doi *cipso_v4_doi_search(u32 doi) { struct cipso_v4_doi *iter; list_for_each_entry_rcu(iter, &cipso_v4_doi_list, list) if (iter->doi == doi && atomic_read(&iter->refcount)) return iter; return NULL; } /** * cipso_v4_doi_add - Add a new DOI to the CIPSO protocol engine * @doi_def: the DOI structure * @audit_info: NetLabel audit information * * Description: * The caller defines a new DOI for use by the CIPSO engine and calls this * function to add it to the list of acceptable domains. The caller must * ensure that the mapping table specified in @doi_def->map meets all of the * requirements of the mapping type (see cipso_ipv4.h for details). Returns * zero on success and non-zero on failure. * */ int cipso_v4_doi_add(struct cipso_v4_doi *doi_def, struct netlbl_audit *audit_info) { int ret_val = -EINVAL; u32 iter; u32 doi; u32 doi_type; struct audit_buffer *audit_buf; doi = doi_def->doi; doi_type = doi_def->type; if (doi_def == NULL || doi_def->doi == CIPSO_V4_DOI_UNKNOWN) goto doi_add_return; for (iter = 0; iter < CIPSO_V4_TAG_MAXCNT; iter++) { switch (doi_def->tags[iter]) { case CIPSO_V4_TAG_RBITMAP: break; case CIPSO_V4_TAG_RANGE: case CIPSO_V4_TAG_ENUM: if (doi_def->type != CIPSO_V4_MAP_PASS) goto doi_add_return; break; case CIPSO_V4_TAG_LOCAL: if (doi_def->type != CIPSO_V4_MAP_LOCAL) goto doi_add_return; break; case CIPSO_V4_TAG_INVALID: if (iter == 0) goto doi_add_return; break; default: goto doi_add_return; } } atomic_set(&doi_def->refcount, 1); spin_lock(&cipso_v4_doi_list_lock); if (cipso_v4_doi_search(doi_def->doi) != NULL) { spin_unlock(&cipso_v4_doi_list_lock); ret_val = -EEXIST; goto doi_add_return; } list_add_tail_rcu(&doi_def->list, &cipso_v4_doi_list); spin_unlock(&cipso_v4_doi_list_lock); ret_val = 0; doi_add_return: audit_buf = netlbl_audit_start(AUDIT_MAC_CIPSOV4_ADD, audit_info); if (audit_buf != NULL) { const char *type_str; switch (doi_type) { case CIPSO_V4_MAP_TRANS: type_str = "trans"; break; case CIPSO_V4_MAP_PASS: type_str = "pass"; break; case CIPSO_V4_MAP_LOCAL: type_str = "local"; break; default: type_str = "(unknown)"; } audit_log_format(audit_buf, " cipso_doi=%u cipso_type=%s res=%u", doi, type_str, ret_val == 0 ? 1 : 0); audit_log_end(audit_buf); } return ret_val; } /** * cipso_v4_doi_free - Frees a DOI definition * @entry: the entry's RCU field * * Description: * This function frees all of the memory associated with a DOI definition. * */ void cipso_v4_doi_free(struct cipso_v4_doi *doi_def) { if (doi_def == NULL) return; switch (doi_def->type) { case CIPSO_V4_MAP_TRANS: kfree(doi_def->map.std->lvl.cipso); kfree(doi_def->map.std->lvl.local); kfree(doi_def->map.std->cat.cipso); kfree(doi_def->map.std->cat.local); break; } kfree(doi_def); } /** * cipso_v4_doi_free_rcu - Frees a DOI definition via the RCU pointer * @entry: the entry's RCU field * * Description: * This function is designed to be used as a callback to the call_rcu() * function so that the memory allocated to the DOI definition can be released * safely. * */ static void cipso_v4_doi_free_rcu(struct rcu_head *entry) { struct cipso_v4_doi *doi_def; doi_def = container_of(entry, struct cipso_v4_doi, rcu); cipso_v4_doi_free(doi_def); } /** * cipso_v4_doi_remove - Remove an existing DOI from the CIPSO protocol engine * @doi: the DOI value * @audit_secid: the LSM secid to use in the audit message * * Description: * Removes a DOI definition from the CIPSO engine. The NetLabel routines will * be called to release their own LSM domain mappings as well as our own * domain list. Returns zero on success and negative values on failure. * */ int cipso_v4_doi_remove(u32 doi, struct netlbl_audit *audit_info) { int ret_val; struct cipso_v4_doi *doi_def; struct audit_buffer *audit_buf; spin_lock(&cipso_v4_doi_list_lock); doi_def = cipso_v4_doi_search(doi); if (doi_def == NULL) { spin_unlock(&cipso_v4_doi_list_lock); ret_val = -ENOENT; goto doi_remove_return; } if (!atomic_dec_and_test(&doi_def->refcount)) { spin_unlock(&cipso_v4_doi_list_lock); ret_val = -EBUSY; goto doi_remove_return; } list_del_rcu(&doi_def->list); spin_unlock(&cipso_v4_doi_list_lock); cipso_v4_cache_invalidate(); call_rcu(&doi_def->rcu, cipso_v4_doi_free_rcu); ret_val = 0; doi_remove_return: audit_buf = netlbl_audit_start(AUDIT_MAC_CIPSOV4_DEL, audit_info); if (audit_buf != NULL) { audit_log_format(audit_buf, " cipso_doi=%u res=%u", doi, ret_val == 0 ? 1 : 0); audit_log_end(audit_buf); } return ret_val; } /** * cipso_v4_doi_getdef - Returns a reference to a valid DOI definition * @doi: the DOI value * * Description: * Searches for a valid DOI definition and if one is found it is returned to * the caller. Otherwise NULL is returned. The caller must ensure that * rcu_read_lock() is held while accessing the returned definition and the DOI * definition reference count is decremented when the caller is done. * */ struct cipso_v4_doi *cipso_v4_doi_getdef(u32 doi) { struct cipso_v4_doi *doi_def; rcu_read_lock(); doi_def = cipso_v4_doi_search(doi); if (doi_def == NULL) goto doi_getdef_return; if (!atomic_inc_not_zero(&doi_def->refcount)) doi_def = NULL; doi_getdef_return: rcu_read_unlock(); return doi_def; } /** * cipso_v4_doi_putdef - Releases a reference for the given DOI definition * @doi_def: the DOI definition * * Description: * Releases a DOI definition reference obtained from cipso_v4_doi_getdef(). * */ void cipso_v4_doi_putdef(struct cipso_v4_doi *doi_def) { if (doi_def == NULL) return; if (!atomic_dec_and_test(&doi_def->refcount)) return; spin_lock(&cipso_v4_doi_list_lock); list_del_rcu(&doi_def->list); spin_unlock(&cipso_v4_doi_list_lock); cipso_v4_cache_invalidate(); call_rcu(&doi_def->rcu, cipso_v4_doi_free_rcu); } /** * cipso_v4_doi_walk - Iterate through the DOI definitions * @skip_cnt: skip past this number of DOI definitions, updated * @callback: callback for each DOI definition * @cb_arg: argument for the callback function * * Description: * Iterate over the DOI definition list, skipping the first @skip_cnt entries. * For each entry call @callback, if @callback returns a negative value stop * 'walking' through the list and return. Updates the value in @skip_cnt upon * return. Returns zero on success, negative values on failure. * */ int cipso_v4_doi_walk(u32 *skip_cnt, int (*callback) (struct cipso_v4_doi *doi_def, void *arg), void *cb_arg) { int ret_val = -ENOENT; u32 doi_cnt = 0; struct cipso_v4_doi *iter_doi; rcu_read_lock(); list_for_each_entry_rcu(iter_doi, &cipso_v4_doi_list, list) if (atomic_read(&iter_doi->refcount) > 0) { if (doi_cnt++ < *skip_cnt) continue; ret_val = callback(iter_doi, cb_arg); if (ret_val < 0) { doi_cnt--; goto doi_walk_return; } } doi_walk_return: rcu_read_unlock(); *skip_cnt = doi_cnt; return ret_val; } /* * Label Mapping Functions */ /** * cipso_v4_map_lvl_valid - Checks to see if the given level is understood * @doi_def: the DOI definition * @level: the level to check * * Description: * Checks the given level against the given DOI definition and returns a * negative value if the level does not have a valid mapping and a zero value * if the level is defined by the DOI. * */ static int cipso_v4_map_lvl_valid(const struct cipso_v4_doi *doi_def, u8 level) { switch (doi_def->type) { case CIPSO_V4_MAP_PASS: return 0; case CIPSO_V4_MAP_TRANS: if (doi_def->map.std->lvl.cipso[level] < CIPSO_V4_INV_LVL) return 0; break; } return -EFAULT; } /** * cipso_v4_map_lvl_hton - Perform a level mapping from the host to the network * @doi_def: the DOI definition * @host_lvl: the host MLS level * @net_lvl: the network/CIPSO MLS level * * Description: * Perform a label mapping to translate a local MLS level to the correct * CIPSO level using the given DOI definition. Returns zero on success, * negative values otherwise. * */ static int cipso_v4_map_lvl_hton(const struct cipso_v4_doi *doi_def, u32 host_lvl, u32 *net_lvl) { switch (doi_def->type) { case CIPSO_V4_MAP_PASS: *net_lvl = host_lvl; return 0; case CIPSO_V4_MAP_TRANS: if (host_lvl < doi_def->map.std->lvl.local_size && doi_def->map.std->lvl.local[host_lvl] < CIPSO_V4_INV_LVL) { *net_lvl = doi_def->map.std->lvl.local[host_lvl]; return 0; } return -EPERM; } return -EINVAL; } /** * cipso_v4_map_lvl_ntoh - Perform a level mapping from the network to the host * @doi_def: the DOI definition * @net_lvl: the network/CIPSO MLS level * @host_lvl: the host MLS level * * Description: * Perform a label mapping to translate a CIPSO level to the correct local MLS * level using the given DOI definition. Returns zero on success, negative * values otherwise. * */ static int cipso_v4_map_lvl_ntoh(const struct cipso_v4_doi *doi_def, u32 net_lvl, u32 *host_lvl) { struct cipso_v4_std_map_tbl *map_tbl; switch (doi_def->type) { case CIPSO_V4_MAP_PASS: *host_lvl = net_lvl; return 0; case CIPSO_V4_MAP_TRANS: map_tbl = doi_def->map.std; if (net_lvl < map_tbl->lvl.cipso_size && map_tbl->lvl.cipso[net_lvl] < CIPSO_V4_INV_LVL) { *host_lvl = doi_def->map.std->lvl.cipso[net_lvl]; return 0; } return -EPERM; } return -EINVAL; } /** * cipso_v4_map_cat_rbm_valid - Checks to see if the category bitmap is valid * @doi_def: the DOI definition * @bitmap: category bitmap * @bitmap_len: bitmap length in bytes * * Description: * Checks the given category bitmap against the given DOI definition and * returns a negative value if any of the categories in the bitmap do not have * a valid mapping and a zero value if all of the categories are valid. * */ static int cipso_v4_map_cat_rbm_valid(const struct cipso_v4_doi *doi_def, const unsigned char *bitmap, u32 bitmap_len) { int cat = -1; u32 bitmap_len_bits = bitmap_len * 8; u32 cipso_cat_size; u32 *cipso_array; switch (doi_def->type) { case CIPSO_V4_MAP_PASS: return 0; case CIPSO_V4_MAP_TRANS: cipso_cat_size = doi_def->map.std->cat.cipso_size; cipso_array = doi_def->map.std->cat.cipso; for (;;) { cat = cipso_v4_bitmap_walk(bitmap, bitmap_len_bits, cat + 1, 1); if (cat < 0) break; if (cat >= cipso_cat_size || cipso_array[cat] >= CIPSO_V4_INV_CAT) return -EFAULT; } if (cat == -1) return 0; break; } return -EFAULT; } /** * cipso_v4_map_cat_rbm_hton - Perform a category mapping from host to network * @doi_def: the DOI definition * @secattr: the security attributes * @net_cat: the zero'd out category bitmap in network/CIPSO format * @net_cat_len: the length of the CIPSO bitmap in bytes * * Description: * Perform a label mapping to translate a local MLS category bitmap to the * correct CIPSO bitmap using the given DOI definition. Returns the minimum * size in bytes of the network bitmap on success, negative values otherwise. * */ static int cipso_v4_map_cat_rbm_hton(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *net_cat, u32 net_cat_len) { int host_spot = -1; u32 net_spot = CIPSO_V4_INV_CAT; u32 net_spot_max = 0; u32 net_clen_bits = net_cat_len * 8; u32 host_cat_size = 0; u32 *host_cat_array = NULL; if (doi_def->type == CIPSO_V4_MAP_TRANS) { host_cat_size = doi_def->map.std->cat.local_size; host_cat_array = doi_def->map.std->cat.local; } for (;;) { host_spot = netlbl_secattr_catmap_walk(secattr->attr.mls.cat, host_spot + 1); if (host_spot < 0) break; switch (doi_def->type) { case CIPSO_V4_MAP_PASS: net_spot = host_spot; break; case CIPSO_V4_MAP_TRANS: if (host_spot >= host_cat_size) return -EPERM; net_spot = host_cat_array[host_spot]; if (net_spot >= CIPSO_V4_INV_CAT) return -EPERM; break; } if (net_spot >= net_clen_bits) return -ENOSPC; cipso_v4_bitmap_setbit(net_cat, net_spot, 1); if (net_spot > net_spot_max) net_spot_max = net_spot; } if (++net_spot_max % 8) return net_spot_max / 8 + 1; return net_spot_max / 8; } /** * cipso_v4_map_cat_rbm_ntoh - Perform a category mapping from network to host * @doi_def: the DOI definition * @net_cat: the category bitmap in network/CIPSO format * @net_cat_len: the length of the CIPSO bitmap in bytes * @secattr: the security attributes * * Description: * Perform a label mapping to translate a CIPSO bitmap to the correct local * MLS category bitmap using the given DOI definition. Returns zero on * success, negative values on failure. * */ static int cipso_v4_map_cat_rbm_ntoh(const struct cipso_v4_doi *doi_def, const unsigned char *net_cat, u32 net_cat_len, struct netlbl_lsm_secattr *secattr) { int ret_val; int net_spot = -1; u32 host_spot = CIPSO_V4_INV_CAT; u32 net_clen_bits = net_cat_len * 8; u32 net_cat_size = 0; u32 *net_cat_array = NULL; if (doi_def->type == CIPSO_V4_MAP_TRANS) { net_cat_size = doi_def->map.std->cat.cipso_size; net_cat_array = doi_def->map.std->cat.cipso; } for (;;) { net_spot = cipso_v4_bitmap_walk(net_cat, net_clen_bits, net_spot + 1, 1); if (net_spot < 0) { if (net_spot == -2) return -EFAULT; return 0; } switch (doi_def->type) { case CIPSO_V4_MAP_PASS: host_spot = net_spot; break; case CIPSO_V4_MAP_TRANS: if (net_spot >= net_cat_size) return -EPERM; host_spot = net_cat_array[net_spot]; if (host_spot >= CIPSO_V4_INV_CAT) return -EPERM; break; } ret_val = netlbl_secattr_catmap_setbit(secattr->attr.mls.cat, host_spot, GFP_ATOMIC); if (ret_val != 0) return ret_val; } return -EINVAL; } /** * cipso_v4_map_cat_enum_valid - Checks to see if the categories are valid * @doi_def: the DOI definition * @enumcat: category list * @enumcat_len: length of the category list in bytes * * Description: * Checks the given categories against the given DOI definition and returns a * negative value if any of the categories do not have a valid mapping and a * zero value if all of the categories are valid. * */ static int cipso_v4_map_cat_enum_valid(const struct cipso_v4_doi *doi_def, const unsigned char *enumcat, u32 enumcat_len) { u16 cat; int cat_prev = -1; u32 iter; if (doi_def->type != CIPSO_V4_MAP_PASS || enumcat_len & 0x01) return -EFAULT; for (iter = 0; iter < enumcat_len; iter += 2) { cat = get_unaligned_be16(&enumcat[iter]); if (cat <= cat_prev) return -EFAULT; cat_prev = cat; } return 0; } /** * cipso_v4_map_cat_enum_hton - Perform a category mapping from host to network * @doi_def: the DOI definition * @secattr: the security attributes * @net_cat: the zero'd out category list in network/CIPSO format * @net_cat_len: the length of the CIPSO category list in bytes * * Description: * Perform a label mapping to translate a local MLS category bitmap to the * correct CIPSO category list using the given DOI definition. Returns the * size in bytes of the network category bitmap on success, negative values * otherwise. * */ static int cipso_v4_map_cat_enum_hton(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *net_cat, u32 net_cat_len) { int cat = -1; u32 cat_iter = 0; for (;;) { cat = netlbl_secattr_catmap_walk(secattr->attr.mls.cat, cat + 1); if (cat < 0) break; if ((cat_iter + 2) > net_cat_len) return -ENOSPC; *((__be16 *)&net_cat[cat_iter]) = htons(cat); cat_iter += 2; } return cat_iter; } /** * cipso_v4_map_cat_enum_ntoh - Perform a category mapping from network to host * @doi_def: the DOI definition * @net_cat: the category list in network/CIPSO format * @net_cat_len: the length of the CIPSO bitmap in bytes * @secattr: the security attributes * * Description: * Perform a label mapping to translate a CIPSO category list to the correct * local MLS category bitmap using the given DOI definition. Returns zero on * success, negative values on failure. * */ static int cipso_v4_map_cat_enum_ntoh(const struct cipso_v4_doi *doi_def, const unsigned char *net_cat, u32 net_cat_len, struct netlbl_lsm_secattr *secattr) { int ret_val; u32 iter; for (iter = 0; iter < net_cat_len; iter += 2) { ret_val = netlbl_secattr_catmap_setbit(secattr->attr.mls.cat, get_unaligned_be16(&net_cat[iter]), GFP_ATOMIC); if (ret_val != 0) return ret_val; } return 0; } /** * cipso_v4_map_cat_rng_valid - Checks to see if the categories are valid * @doi_def: the DOI definition * @rngcat: category list * @rngcat_len: length of the category list in bytes * * Description: * Checks the given categories against the given DOI definition and returns a * negative value if any of the categories do not have a valid mapping and a * zero value if all of the categories are valid. * */ static int cipso_v4_map_cat_rng_valid(const struct cipso_v4_doi *doi_def, const unsigned char *rngcat, u32 rngcat_len) { u16 cat_high; u16 cat_low; u32 cat_prev = CIPSO_V4_MAX_REM_CATS + 1; u32 iter; if (doi_def->type != CIPSO_V4_MAP_PASS || rngcat_len & 0x01) return -EFAULT; for (iter = 0; iter < rngcat_len; iter += 4) { cat_high = get_unaligned_be16(&rngcat[iter]); if ((iter + 4) <= rngcat_len) cat_low = get_unaligned_be16(&rngcat[iter + 2]); else cat_low = 0; if (cat_high > cat_prev) return -EFAULT; cat_prev = cat_low; } return 0; } /** * cipso_v4_map_cat_rng_hton - Perform a category mapping from host to network * @doi_def: the DOI definition * @secattr: the security attributes * @net_cat: the zero'd out category list in network/CIPSO format * @net_cat_len: the length of the CIPSO category list in bytes * * Description: * Perform a label mapping to translate a local MLS category bitmap to the * correct CIPSO category list using the given DOI definition. Returns the * size in bytes of the network category bitmap on success, negative values * otherwise. * */ static int cipso_v4_map_cat_rng_hton(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *net_cat, u32 net_cat_len) { int iter = -1; u16 array[CIPSO_V4_TAG_RNG_CAT_MAX * 2]; u32 array_cnt = 0; u32 cat_size = 0; /* make sure we don't overflow the 'array[]' variable */ if (net_cat_len > (CIPSO_V4_OPT_LEN_MAX - CIPSO_V4_HDR_LEN - CIPSO_V4_TAG_RNG_BLEN)) return -ENOSPC; for (;;) { iter = netlbl_secattr_catmap_walk(secattr->attr.mls.cat, iter + 1); if (iter < 0) break; cat_size += (iter == 0 ? 0 : sizeof(u16)); if (cat_size > net_cat_len) return -ENOSPC; array[array_cnt++] = iter; iter = netlbl_secattr_catmap_walk_rng(secattr->attr.mls.cat, iter); if (iter < 0) return -EFAULT; cat_size += sizeof(u16); if (cat_size > net_cat_len) return -ENOSPC; array[array_cnt++] = iter; } for (iter = 0; array_cnt > 0;) { *((__be16 *)&net_cat[iter]) = htons(array[--array_cnt]); iter += 2; array_cnt--; if (array[array_cnt] != 0) { *((__be16 *)&net_cat[iter]) = htons(array[array_cnt]); iter += 2; } } return cat_size; } /** * cipso_v4_map_cat_rng_ntoh - Perform a category mapping from network to host * @doi_def: the DOI definition * @net_cat: the category list in network/CIPSO format * @net_cat_len: the length of the CIPSO bitmap in bytes * @secattr: the security attributes * * Description: * Perform a label mapping to translate a CIPSO category list to the correct * local MLS category bitmap using the given DOI definition. Returns zero on * success, negative values on failure. * */ static int cipso_v4_map_cat_rng_ntoh(const struct cipso_v4_doi *doi_def, const unsigned char *net_cat, u32 net_cat_len, struct netlbl_lsm_secattr *secattr) { int ret_val; u32 net_iter; u16 cat_low; u16 cat_high; for (net_iter = 0; net_iter < net_cat_len; net_iter += 4) { cat_high = get_unaligned_be16(&net_cat[net_iter]); if ((net_iter + 4) <= net_cat_len) cat_low = get_unaligned_be16(&net_cat[net_iter + 2]); else cat_low = 0; ret_val = netlbl_secattr_catmap_setrng(secattr->attr.mls.cat, cat_low, cat_high, GFP_ATOMIC); if (ret_val != 0) return ret_val; } return 0; } /* * Protocol Handling Functions */ /** * cipso_v4_gentag_hdr - Generate a CIPSO option header * @doi_def: the DOI definition * @len: the total tag length in bytes, not including this header * @buf: the CIPSO option buffer * * Description: * Write a CIPSO header into the beginning of @buffer. * */ static void cipso_v4_gentag_hdr(const struct cipso_v4_doi *doi_def, unsigned char *buf, u32 len) { buf[0] = IPOPT_CIPSO; buf[1] = CIPSO_V4_HDR_LEN + len; *(__be32 *)&buf[2] = htonl(doi_def->doi); } /** * cipso_v4_gentag_rbm - Generate a CIPSO restricted bitmap tag (type #1) * @doi_def: the DOI definition * @secattr: the security attributes * @buffer: the option buffer * @buffer_len: length of buffer in bytes * * Description: * Generate a CIPSO option using the restricted bitmap tag, tag type #1. The * actual buffer length may be larger than the indicated size due to * translation between host and network category bitmaps. Returns the size of * the tag on success, negative values on failure. * */ static int cipso_v4_gentag_rbm(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *buffer, u32 buffer_len) { int ret_val; u32 tag_len; u32 level; if ((secattr->flags & NETLBL_SECATTR_MLS_LVL) == 0) return -EPERM; ret_val = cipso_v4_map_lvl_hton(doi_def, secattr->attr.mls.lvl, &level); if (ret_val != 0) return ret_val; if (secattr->flags & NETLBL_SECATTR_MLS_CAT) { ret_val = cipso_v4_map_cat_rbm_hton(doi_def, secattr, &buffer[4], buffer_len - 4); if (ret_val < 0) return ret_val; /* This will send packets using the "optimized" format when * possible as specified in section 3.4.2.6 of the * CIPSO draft. */ if (cipso_v4_rbm_optfmt && ret_val > 0 && ret_val <= 10) tag_len = 14; else tag_len = 4 + ret_val; } else tag_len = 4; buffer[0] = CIPSO_V4_TAG_RBITMAP; buffer[1] = tag_len; buffer[3] = level; return tag_len; } /** * cipso_v4_parsetag_rbm - Parse a CIPSO restricted bitmap tag * @doi_def: the DOI definition * @tag: the CIPSO tag * @secattr: the security attributes * * Description: * Parse a CIPSO restricted bitmap tag (tag type #1) and return the security * attributes in @secattr. Return zero on success, negatives values on * failure. * */ static int cipso_v4_parsetag_rbm(const struct cipso_v4_doi *doi_def, const unsigned char *tag, struct netlbl_lsm_secattr *secattr) { int ret_val; u8 tag_len = tag[1]; u32 level; ret_val = cipso_v4_map_lvl_ntoh(doi_def, tag[3], &level); if (ret_val != 0) return ret_val; secattr->attr.mls.lvl = level; secattr->flags |= NETLBL_SECATTR_MLS_LVL; if (tag_len > 4) { secattr->attr.mls.cat = netlbl_secattr_catmap_alloc(GFP_ATOMIC); if (secattr->attr.mls.cat == NULL) return -ENOMEM; ret_val = cipso_v4_map_cat_rbm_ntoh(doi_def, &tag[4], tag_len - 4, secattr); if (ret_val != 0) { netlbl_secattr_catmap_free(secattr->attr.mls.cat); return ret_val; } secattr->flags |= NETLBL_SECATTR_MLS_CAT; } return 0; } /** * cipso_v4_gentag_enum - Generate a CIPSO enumerated tag (type #2) * @doi_def: the DOI definition * @secattr: the security attributes * @buffer: the option buffer * @buffer_len: length of buffer in bytes * * Description: * Generate a CIPSO option using the enumerated tag, tag type #2. Returns the * size of the tag on success, negative values on failure. * */ static int cipso_v4_gentag_enum(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *buffer, u32 buffer_len) { int ret_val; u32 tag_len; u32 level; if (!(secattr->flags & NETLBL_SECATTR_MLS_LVL)) return -EPERM; ret_val = cipso_v4_map_lvl_hton(doi_def, secattr->attr.mls.lvl, &level); if (ret_val != 0) return ret_val; if (secattr->flags & NETLBL_SECATTR_MLS_CAT) { ret_val = cipso_v4_map_cat_enum_hton(doi_def, secattr, &buffer[4], buffer_len - 4); if (ret_val < 0) return ret_val; tag_len = 4 + ret_val; } else tag_len = 4; buffer[0] = CIPSO_V4_TAG_ENUM; buffer[1] = tag_len; buffer[3] = level; return tag_len; } /** * cipso_v4_parsetag_enum - Parse a CIPSO enumerated tag * @doi_def: the DOI definition * @tag: the CIPSO tag * @secattr: the security attributes * * Description: * Parse a CIPSO enumerated tag (tag type #2) and return the security * attributes in @secattr. Return zero on success, negatives values on * failure. * */ static int cipso_v4_parsetag_enum(const struct cipso_v4_doi *doi_def, const unsigned char *tag, struct netlbl_lsm_secattr *secattr) { int ret_val; u8 tag_len = tag[1]; u32 level; ret_val = cipso_v4_map_lvl_ntoh(doi_def, tag[3], &level); if (ret_val != 0) return ret_val; secattr->attr.mls.lvl = level; secattr->flags |= NETLBL_SECATTR_MLS_LVL; if (tag_len > 4) { secattr->attr.mls.cat = netlbl_secattr_catmap_alloc(GFP_ATOMIC); if (secattr->attr.mls.cat == NULL) return -ENOMEM; ret_val = cipso_v4_map_cat_enum_ntoh(doi_def, &tag[4], tag_len - 4, secattr); if (ret_val != 0) { netlbl_secattr_catmap_free(secattr->attr.mls.cat); return ret_val; } secattr->flags |= NETLBL_SECATTR_MLS_CAT; } return 0; } /** * cipso_v4_gentag_rng - Generate a CIPSO ranged tag (type #5) * @doi_def: the DOI definition * @secattr: the security attributes * @buffer: the option buffer * @buffer_len: length of buffer in bytes * * Description: * Generate a CIPSO option using the ranged tag, tag type #5. Returns the * size of the tag on success, negative values on failure. * */ static int cipso_v4_gentag_rng(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *buffer, u32 buffer_len) { int ret_val; u32 tag_len; u32 level; if (!(secattr->flags & NETLBL_SECATTR_MLS_LVL)) return -EPERM; ret_val = cipso_v4_map_lvl_hton(doi_def, secattr->attr.mls.lvl, &level); if (ret_val != 0) return ret_val; if (secattr->flags & NETLBL_SECATTR_MLS_CAT) { ret_val = cipso_v4_map_cat_rng_hton(doi_def, secattr, &buffer[4], buffer_len - 4); if (ret_val < 0) return ret_val; tag_len = 4 + ret_val; } else tag_len = 4; buffer[0] = CIPSO_V4_TAG_RANGE; buffer[1] = tag_len; buffer[3] = level; return tag_len; } /** * cipso_v4_parsetag_rng - Parse a CIPSO ranged tag * @doi_def: the DOI definition * @tag: the CIPSO tag * @secattr: the security attributes * * Description: * Parse a CIPSO ranged tag (tag type #5) and return the security attributes * in @secattr. Return zero on success, negatives values on failure. * */ static int cipso_v4_parsetag_rng(const struct cipso_v4_doi *doi_def, const unsigned char *tag, struct netlbl_lsm_secattr *secattr) { int ret_val; u8 tag_len = tag[1]; u32 level; ret_val = cipso_v4_map_lvl_ntoh(doi_def, tag[3], &level); if (ret_val != 0) return ret_val; secattr->attr.mls.lvl = level; secattr->flags |= NETLBL_SECATTR_MLS_LVL; if (tag_len > 4) { secattr->attr.mls.cat = netlbl_secattr_catmap_alloc(GFP_ATOMIC); if (secattr->attr.mls.cat == NULL) return -ENOMEM; ret_val = cipso_v4_map_cat_rng_ntoh(doi_def, &tag[4], tag_len - 4, secattr); if (ret_val != 0) { netlbl_secattr_catmap_free(secattr->attr.mls.cat); return ret_val; } secattr->flags |= NETLBL_SECATTR_MLS_CAT; } return 0; } /** * cipso_v4_gentag_loc - Generate a CIPSO local tag (non-standard) * @doi_def: the DOI definition * @secattr: the security attributes * @buffer: the option buffer * @buffer_len: length of buffer in bytes * * Description: * Generate a CIPSO option using the local tag. Returns the size of the tag * on success, negative values on failure. * */ static int cipso_v4_gentag_loc(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *buffer, u32 buffer_len) { if (!(secattr->flags & NETLBL_SECATTR_SECID)) return -EPERM; buffer[0] = CIPSO_V4_TAG_LOCAL; buffer[1] = CIPSO_V4_TAG_LOC_BLEN; *(u32 *)&buffer[2] = secattr->attr.secid; return CIPSO_V4_TAG_LOC_BLEN; } /** * cipso_v4_parsetag_loc - Parse a CIPSO local tag * @doi_def: the DOI definition * @tag: the CIPSO tag * @secattr: the security attributes * * Description: * Parse a CIPSO local tag and return the security attributes in @secattr. * Return zero on success, negatives values on failure. * */ static int cipso_v4_parsetag_loc(const struct cipso_v4_doi *doi_def, const unsigned char *tag, struct netlbl_lsm_secattr *secattr) { secattr->attr.secid = *(u32 *)&tag[2]; secattr->flags |= NETLBL_SECATTR_SECID; return 0; } /** * cipso_v4_validate - Validate a CIPSO option * @option: the start of the option, on error it is set to point to the error * * Description: * This routine is called to validate a CIPSO option, it checks all of the * fields to ensure that they are at least valid, see the draft snippet below * for details. If the option is valid then a zero value is returned and * the value of @option is unchanged. If the option is invalid then a * non-zero value is returned and @option is adjusted to point to the * offending portion of the option. From the IETF draft ... * * "If any field within the CIPSO options, such as the DOI identifier, is not * recognized the IP datagram is discarded and an ICMP 'parameter problem' * (type 12) is generated and returned. The ICMP code field is set to 'bad * parameter' (code 0) and the pointer is set to the start of the CIPSO field * that is unrecognized." * */ int cipso_v4_validate(const struct sk_buff *skb, unsigned char **option) { unsigned char *opt = *option; unsigned char *tag; unsigned char opt_iter; unsigned char err_offset = 0; u8 opt_len; u8 tag_len; struct cipso_v4_doi *doi_def = NULL; u32 tag_iter; /* caller already checks for length values that are too large */ opt_len = opt[1]; if (opt_len < 8) { err_offset = 1; goto validate_return; } rcu_read_lock(); doi_def = cipso_v4_doi_search(get_unaligned_be32(&opt[2])); if (doi_def == NULL) { err_offset = 2; goto validate_return_locked; } opt_iter = CIPSO_V4_HDR_LEN; tag = opt + opt_iter; while (opt_iter < opt_len) { for (tag_iter = 0; doi_def->tags[tag_iter] != tag[0];) if (doi_def->tags[tag_iter] == CIPSO_V4_TAG_INVALID || ++tag_iter == CIPSO_V4_TAG_MAXCNT) { err_offset = opt_iter; goto validate_return_locked; } tag_len = tag[1]; if (tag_len > (opt_len - opt_iter)) { err_offset = opt_iter + 1; goto validate_return_locked; } switch (tag[0]) { case CIPSO_V4_TAG_RBITMAP: if (tag_len < CIPSO_V4_TAG_RBM_BLEN) { err_offset = opt_iter + 1; goto validate_return_locked; } /* We are already going to do all the verification * necessary at the socket layer so from our point of * view it is safe to turn these checks off (and less * work), however, the CIPSO draft says we should do * all the CIPSO validations here but it doesn't * really specify _exactly_ what we need to validate * ... so, just make it a sysctl tunable. */ if (cipso_v4_rbm_strictvalid) { if (cipso_v4_map_lvl_valid(doi_def, tag[3]) < 0) { err_offset = opt_iter + 3; goto validate_return_locked; } if (tag_len > CIPSO_V4_TAG_RBM_BLEN && cipso_v4_map_cat_rbm_valid(doi_def, &tag[4], tag_len - 4) < 0) { err_offset = opt_iter + 4; goto validate_return_locked; } } break; case CIPSO_V4_TAG_ENUM: if (tag_len < CIPSO_V4_TAG_ENUM_BLEN) { err_offset = opt_iter + 1; goto validate_return_locked; } if (cipso_v4_map_lvl_valid(doi_def, tag[3]) < 0) { err_offset = opt_iter + 3; goto validate_return_locked; } if (tag_len > CIPSO_V4_TAG_ENUM_BLEN && cipso_v4_map_cat_enum_valid(doi_def, &tag[4], tag_len - 4) < 0) { err_offset = opt_iter + 4; goto validate_return_locked; } break; case CIPSO_V4_TAG_RANGE: if (tag_len < CIPSO_V4_TAG_RNG_BLEN) { err_offset = opt_iter + 1; goto validate_return_locked; } if (cipso_v4_map_lvl_valid(doi_def, tag[3]) < 0) { err_offset = opt_iter + 3; goto validate_return_locked; } if (tag_len > CIPSO_V4_TAG_RNG_BLEN && cipso_v4_map_cat_rng_valid(doi_def, &tag[4], tag_len - 4) < 0) { err_offset = opt_iter + 4; goto validate_return_locked; } break; case CIPSO_V4_TAG_LOCAL: /* This is a non-standard tag that we only allow for * local connections, so if the incoming interface is * not the loopback device drop the packet. */ if (!(skb->dev->flags & IFF_LOOPBACK)) { err_offset = opt_iter; goto validate_return_locked; } if (tag_len != CIPSO_V4_TAG_LOC_BLEN) { err_offset = opt_iter + 1; goto validate_return_locked; } break; default: err_offset = opt_iter; goto validate_return_locked; } tag += tag_len; opt_iter += tag_len; } validate_return_locked: rcu_read_unlock(); validate_return: *option = opt + err_offset; return err_offset; } /** * cipso_v4_error - Send the correct response for a bad packet * @skb: the packet * @error: the error code * @gateway: CIPSO gateway flag * * Description: * Based on the error code given in @error, send an ICMP error message back to * the originating host. From the IETF draft ... * * "If the contents of the CIPSO [option] are valid but the security label is * outside of the configured host or port label range, the datagram is * discarded and an ICMP 'destination unreachable' (type 3) is generated and * returned. The code field of the ICMP is set to 'communication with * destination network administratively prohibited' (code 9) or to * 'communication with destination host administratively prohibited' * (code 10). The value of the code is dependent on whether the originator * of the ICMP message is acting as a CIPSO host or a CIPSO gateway. The * recipient of the ICMP message MUST be able to handle either value. The * same procedure is performed if a CIPSO [option] can not be added to an * IP packet because it is too large to fit in the IP options area." * * "If the error is triggered by receipt of an ICMP message, the message is * discarded and no response is permitted (consistent with general ICMP * processing rules)." * */ void cipso_v4_error(struct sk_buff *skb, int error, u32 gateway) { if (ip_hdr(skb)->protocol == IPPROTO_ICMP || error != -EACCES) return; if (gateway) icmp_send(skb, ICMP_DEST_UNREACH, ICMP_NET_ANO, 0); else icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_ANO, 0); } /** * cipso_v4_genopt - Generate a CIPSO option * @buf: the option buffer * @buf_len: the size of opt_buf * @doi_def: the CIPSO DOI to use * @secattr: the security attributes * * Description: * Generate a CIPSO option using the DOI definition and security attributes * passed to the function. Returns the length of the option on success and * negative values on failure. * */ static int cipso_v4_genopt(unsigned char *buf, u32 buf_len, const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr) { int ret_val; u32 iter; if (buf_len <= CIPSO_V4_HDR_LEN) return -ENOSPC; /* XXX - This code assumes only one tag per CIPSO option which isn't * really a good assumption to make but since we only support the MAC * tags right now it is a safe assumption. */ iter = 0; do { memset(buf, 0, buf_len); switch (doi_def->tags[iter]) { case CIPSO_V4_TAG_RBITMAP: ret_val = cipso_v4_gentag_rbm(doi_def, secattr, &buf[CIPSO_V4_HDR_LEN], buf_len - CIPSO_V4_HDR_LEN); break; case CIPSO_V4_TAG_ENUM: ret_val = cipso_v4_gentag_enum(doi_def, secattr, &buf[CIPSO_V4_HDR_LEN], buf_len - CIPSO_V4_HDR_LEN); break; case CIPSO_V4_TAG_RANGE: ret_val = cipso_v4_gentag_rng(doi_def, secattr, &buf[CIPSO_V4_HDR_LEN], buf_len - CIPSO_V4_HDR_LEN); break; case CIPSO_V4_TAG_LOCAL: ret_val = cipso_v4_gentag_loc(doi_def, secattr, &buf[CIPSO_V4_HDR_LEN], buf_len - CIPSO_V4_HDR_LEN); break; default: return -EPERM; } iter++; } while (ret_val < 0 && iter < CIPSO_V4_TAG_MAXCNT && doi_def->tags[iter] != CIPSO_V4_TAG_INVALID); if (ret_val < 0) return ret_val; cipso_v4_gentag_hdr(doi_def, buf, ret_val); return CIPSO_V4_HDR_LEN + ret_val; } /** * cipso_v4_sock_setattr - Add a CIPSO option to a socket * @sk: the socket * @doi_def: the CIPSO DOI to use * @secattr: the specific security attributes of the socket * * Description: * Set the CIPSO option on the given socket using the DOI definition and * security attributes passed to the function. This function requires * exclusive access to @sk, which means it either needs to be in the * process of being created or locked. Returns zero on success and negative * values on failure. * */ int cipso_v4_sock_setattr(struct sock *sk, const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr) { int ret_val = -EPERM; unsigned char *buf = NULL; u32 buf_len; u32 opt_len; struct ip_options *opt = NULL; struct inet_sock *sk_inet; struct inet_connection_sock *sk_conn; /* In the case of sock_create_lite(), the sock->sk field is not * defined yet but it is not a problem as the only users of these * "lite" PF_INET sockets are functions which do an accept() call * afterwards so we will label the socket as part of the accept(). */ if (sk == NULL) return 0; /* We allocate the maximum CIPSO option size here so we are probably * being a little wasteful, but it makes our life _much_ easier later * on and after all we are only talking about 40 bytes. */ buf_len = CIPSO_V4_OPT_LEN_MAX; buf = kmalloc(buf_len, GFP_ATOMIC); if (buf == NULL) { ret_val = -ENOMEM; goto socket_setattr_failure; } ret_val = cipso_v4_genopt(buf, buf_len, doi_def, secattr); if (ret_val < 0) goto socket_setattr_failure; buf_len = ret_val; /* We can't use ip_options_get() directly because it makes a call to * ip_options_get_alloc() which allocates memory with GFP_KERNEL and * we won't always have CAP_NET_RAW even though we _always_ want to * set the IPOPT_CIPSO option. */ opt_len = (buf_len + 3) & ~3; opt = kzalloc(sizeof(*opt) + opt_len, GFP_ATOMIC); if (opt == NULL) { ret_val = -ENOMEM; goto socket_setattr_failure; } memcpy(opt->__data, buf, buf_len); opt->optlen = opt_len; opt->cipso = sizeof(struct iphdr); kfree(buf); buf = NULL; sk_inet = inet_sk(sk); if (sk_inet->is_icsk) { sk_conn = inet_csk(sk); if (sk_inet->opt) sk_conn->icsk_ext_hdr_len -= sk_inet->opt->optlen; sk_conn->icsk_ext_hdr_len += opt->optlen; sk_conn->icsk_sync_mss(sk, sk_conn->icsk_pmtu_cookie); } opt = xchg(&sk_inet->opt, opt); kfree(opt); return 0; socket_setattr_failure: kfree(buf); kfree(opt); return ret_val; } /** * cipso_v4_req_setattr - Add a CIPSO option to a connection request socket * @req: the connection request socket * @doi_def: the CIPSO DOI to use * @secattr: the specific security attributes of the socket * * Description: * Set the CIPSO option on the given socket using the DOI definition and * security attributes passed to the function. Returns zero on success and * negative values on failure. * */ int cipso_v4_req_setattr(struct request_sock *req, const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr) { int ret_val = -EPERM; unsigned char *buf = NULL; u32 buf_len; u32 opt_len; struct ip_options *opt = NULL; struct inet_request_sock *req_inet; /* We allocate the maximum CIPSO option size here so we are probably * being a little wasteful, but it makes our life _much_ easier later * on and after all we are only talking about 40 bytes. */ buf_len = CIPSO_V4_OPT_LEN_MAX; buf = kmalloc(buf_len, GFP_ATOMIC); if (buf == NULL) { ret_val = -ENOMEM; goto req_setattr_failure; } ret_val = cipso_v4_genopt(buf, buf_len, doi_def, secattr); if (ret_val < 0) goto req_setattr_failure; buf_len = ret_val; /* We can't use ip_options_get() directly because it makes a call to * ip_options_get_alloc() which allocates memory with GFP_KERNEL and * we won't always have CAP_NET_RAW even though we _always_ want to * set the IPOPT_CIPSO option. */ opt_len = (buf_len + 3) & ~3; opt = kzalloc(sizeof(*opt) + opt_len, GFP_ATOMIC); if (opt == NULL) { ret_val = -ENOMEM; goto req_setattr_failure; } memcpy(opt->__data, buf, buf_len); opt->optlen = opt_len; opt->cipso = sizeof(struct iphdr); kfree(buf); buf = NULL; req_inet = inet_rsk(req); opt = xchg(&req_inet->opt, opt); kfree(opt); return 0; req_setattr_failure: kfree(buf); kfree(opt); return ret_val; } /** * cipso_v4_delopt - Delete the CIPSO option from a set of IP options * @opt_ptr: IP option pointer * * Description: * Deletes the CIPSO IP option from a set of IP options and makes the necessary * adjustments to the IP option structure. Returns zero on success, negative * values on failure. * */ static int cipso_v4_delopt(struct ip_options **opt_ptr) { int hdr_delta = 0; struct ip_options *opt = *opt_ptr; if (opt->srr || opt->rr || opt->ts || opt->router_alert) { u8 cipso_len; u8 cipso_off; unsigned char *cipso_ptr; int iter; int optlen_new; cipso_off = opt->cipso - sizeof(struct iphdr); cipso_ptr = &opt->__data[cipso_off]; cipso_len = cipso_ptr[1]; if (opt->srr > opt->cipso) opt->srr -= cipso_len; if (opt->rr > opt->cipso) opt->rr -= cipso_len; if (opt->ts > opt->cipso) opt->ts -= cipso_len; if (opt->router_alert > opt->cipso) opt->router_alert -= cipso_len; opt->cipso = 0; memmove(cipso_ptr, cipso_ptr + cipso_len, opt->optlen - cipso_off - cipso_len); /* determining the new total option length is tricky because of * the padding necessary, the only thing i can think to do at * this point is walk the options one-by-one, skipping the * padding at the end to determine the actual option size and * from there we can determine the new total option length */ iter = 0; optlen_new = 0; while (iter < opt->optlen) if (opt->__data[iter] != IPOPT_NOP) { iter += opt->__data[iter + 1]; optlen_new = iter; } else iter++; hdr_delta = opt->optlen; opt->optlen = (optlen_new + 3) & ~3; hdr_delta -= opt->optlen; } else { /* only the cipso option was present on the socket so we can * remove the entire option struct */ *opt_ptr = NULL; hdr_delta = opt->optlen; kfree(opt); } return hdr_delta; } /** * cipso_v4_sock_delattr - Delete the CIPSO option from a socket * @sk: the socket * * Description: * Removes the CIPSO option from a socket, if present. * */ void cipso_v4_sock_delattr(struct sock *sk) { int hdr_delta; struct ip_options *opt; struct inet_sock *sk_inet; sk_inet = inet_sk(sk); opt = sk_inet->opt; if (opt == NULL || opt->cipso == 0) return; hdr_delta = cipso_v4_delopt(&sk_inet->opt); if (sk_inet->is_icsk && hdr_delta > 0) { struct inet_connection_sock *sk_conn = inet_csk(sk); sk_conn->icsk_ext_hdr_len -= hdr_delta; sk_conn->icsk_sync_mss(sk, sk_conn->icsk_pmtu_cookie); } } /** * cipso_v4_req_delattr - Delete the CIPSO option from a request socket * @reg: the request socket * * Description: * Removes the CIPSO option from a request socket, if present. * */ void cipso_v4_req_delattr(struct request_sock *req) { struct ip_options *opt; struct inet_request_sock *req_inet; req_inet = inet_rsk(req); opt = req_inet->opt; if (opt == NULL || opt->cipso == 0) return; cipso_v4_delopt(&req_inet->opt); } /** * cipso_v4_getattr - Helper function for the cipso_v4_*_getattr functions * @cipso: the CIPSO v4 option * @secattr: the security attributes * * Description: * Inspect @cipso and return the security attributes in @secattr. Returns zero * on success and negative values on failure. * */ static int cipso_v4_getattr(const unsigned char *cipso, struct netlbl_lsm_secattr *secattr) { int ret_val = -ENOMSG; u32 doi; struct cipso_v4_doi *doi_def; if (cipso_v4_cache_check(cipso, cipso[1], secattr) == 0) return 0; doi = get_unaligned_be32(&cipso[2]); rcu_read_lock(); doi_def = cipso_v4_doi_search(doi); if (doi_def == NULL) goto getattr_return; /* XXX - This code assumes only one tag per CIPSO option which isn't * really a good assumption to make but since we only support the MAC * tags right now it is a safe assumption. */ switch (cipso[6]) { case CIPSO_V4_TAG_RBITMAP: ret_val = cipso_v4_parsetag_rbm(doi_def, &cipso[6], secattr); break; case CIPSO_V4_TAG_ENUM: ret_val = cipso_v4_parsetag_enum(doi_def, &cipso[6], secattr); break; case CIPSO_V4_TAG_RANGE: ret_val = cipso_v4_parsetag_rng(doi_def, &cipso[6], secattr); break; case CIPSO_V4_TAG_LOCAL: ret_val = cipso_v4_parsetag_loc(doi_def, &cipso[6], secattr); break; } if (ret_val == 0) secattr->type = NETLBL_NLTYPE_CIPSOV4; getattr_return: rcu_read_unlock(); return ret_val; } /** * cipso_v4_sock_getattr - Get the security attributes from a sock * @sk: the sock * @secattr: the security attributes * * Description: * Query @sk to see if there is a CIPSO option attached to the sock and if * there is return the CIPSO security attributes in @secattr. This function * requires that @sk be locked, or privately held, but it does not do any * locking itself. Returns zero on success and negative values on failure. * */ int cipso_v4_sock_getattr(struct sock *sk, struct netlbl_lsm_secattr *secattr) { struct ip_options *opt; opt = inet_sk(sk)->opt; if (opt == NULL || opt->cipso == 0) return -ENOMSG; return cipso_v4_getattr(opt->__data + opt->cipso - sizeof(struct iphdr), secattr); } /** * cipso_v4_skbuff_setattr - Set the CIPSO option on a packet * @skb: the packet * @secattr: the security attributes * * Description: * Set the CIPSO option on the given packet based on the security attributes. * Returns a pointer to the IP header on success and NULL on failure. * */ int cipso_v4_skbuff_setattr(struct sk_buff *skb, const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr) { int ret_val; struct iphdr *iph; struct ip_options *opt = &IPCB(skb)->opt; unsigned char buf[CIPSO_V4_OPT_LEN_MAX]; u32 buf_len = CIPSO_V4_OPT_LEN_MAX; u32 opt_len; int len_delta; ret_val = cipso_v4_genopt(buf, buf_len, doi_def, secattr); if (ret_val < 0) return ret_val; buf_len = ret_val; opt_len = (buf_len + 3) & ~3; /* we overwrite any existing options to ensure that we have enough * room for the CIPSO option, the reason is that we _need_ to guarantee * that the security label is applied to the packet - we do the same * thing when using the socket options and it hasn't caused a problem, * if we need to we can always revisit this choice later */ len_delta = opt_len - opt->optlen; /* if we don't ensure enough headroom we could panic on the skb_push() * call below so make sure we have enough, we are also "mangling" the * packet so we should probably do a copy-on-write call anyway */ ret_val = skb_cow(skb, skb_headroom(skb) + len_delta); if (ret_val < 0) return ret_val; if (len_delta > 0) { /* we assume that the header + opt->optlen have already been * "pushed" in ip_options_build() or similar */ iph = ip_hdr(skb); skb_push(skb, len_delta); memmove((char *)iph - len_delta, iph, iph->ihl << 2); skb_reset_network_header(skb); iph = ip_hdr(skb); } else if (len_delta < 0) { iph = ip_hdr(skb); memset(iph + 1, IPOPT_NOP, opt->optlen); } else iph = ip_hdr(skb); if (opt->optlen > 0) memset(opt, 0, sizeof(*opt)); opt->optlen = opt_len; opt->cipso = sizeof(struct iphdr); opt->is_changed = 1; /* we have to do the following because we are being called from a * netfilter hook which means the packet already has had the header * fields populated and the checksum calculated - yes this means we * are doing more work than needed but we do it to keep the core * stack clean and tidy */ memcpy(iph + 1, buf, buf_len); if (opt_len > buf_len) memset((char *)(iph + 1) + buf_len, 0, opt_len - buf_len); if (len_delta != 0) { iph->ihl = 5 + (opt_len >> 2); iph->tot_len = htons(skb->len); } ip_send_check(iph); return 0; } /** * cipso_v4_skbuff_delattr - Delete any CIPSO options from a packet * @skb: the packet * * Description: * Removes any and all CIPSO options from the given packet. Returns zero on * success, negative values on failure. * */ int cipso_v4_skbuff_delattr(struct sk_buff *skb) { int ret_val; struct iphdr *iph; struct ip_options *opt = &IPCB(skb)->opt; unsigned char *cipso_ptr; if (opt->cipso == 0) return 0; /* since we are changing the packet we should make a copy */ ret_val = skb_cow(skb, skb_headroom(skb)); if (ret_val < 0) return ret_val; /* the easiest thing to do is just replace the cipso option with noop * options since we don't change the size of the packet, although we * still need to recalculate the checksum */ iph = ip_hdr(skb); cipso_ptr = (unsigned char *)iph + opt->cipso; memset(cipso_ptr, IPOPT_NOOP, cipso_ptr[1]); opt->cipso = 0; opt->is_changed = 1; ip_send_check(iph); return 0; } /** * cipso_v4_skbuff_getattr - Get the security attributes from the CIPSO option * @skb: the packet * @secattr: the security attributes * * Description: * Parse the given packet's CIPSO option and return the security attributes. * Returns zero on success and negative values on failure. * */ int cipso_v4_skbuff_getattr(const struct sk_buff *skb, struct netlbl_lsm_secattr *secattr) { return cipso_v4_getattr(CIPSO_V4_OPTPTR(skb), secattr); } /* * Setup Functions */ /** * cipso_v4_init - Initialize the CIPSO module * * Description: * Initialize the CIPSO module and prepare it for use. Returns zero on success * and negative values on failure. * */ static int __init cipso_v4_init(void) { int ret_val; ret_val = cipso_v4_cache_init(); if (ret_val != 0) panic("Failed to initialize the CIPSO/IPv4 cache (%d)\n", ret_val); return 0; } subsys_initcall(cipso_v4_init);
/* * CIPSO - Commercial IP Security Option * * This is an implementation of the CIPSO 2.2 protocol as specified in * draft-ietf-cipso-ipsecurity-01.txt with additional tag types as found in * FIPS-188. While CIPSO never became a full IETF RFC standard many vendors * have chosen to adopt the protocol and over the years it has become a * de-facto standard for labeled networking. * * The CIPSO draft specification can be found in the kernel's Documentation * directory as well as the following URL: * http://tools.ietf.org/id/draft-ietf-cipso-ipsecurity-01.txt * The FIPS-188 specification can be found at the following URL: * http://www.itl.nist.gov/fipspubs/fip188.htm * * Author: Paul Moore <paul.moore@hp.com> * */ /* * (c) Copyright Hewlett-Packard Development Company, L.P., 2006, 2008 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/init.h> #include <linux/types.h> #include <linux/rcupdate.h> #include <linux/list.h> #include <linux/spinlock.h> #include <linux/string.h> #include <linux/jhash.h> #include <linux/audit.h> #include <linux/slab.h> #include <net/ip.h> #include <net/icmp.h> #include <net/tcp.h> #include <net/netlabel.h> #include <net/cipso_ipv4.h> #include <asm/atomic.h> #include <asm/bug.h> #include <asm/unaligned.h> /* List of available DOI definitions */ /* XXX - This currently assumes a minimal number of different DOIs in use, * if in practice there are a lot of different DOIs this list should * probably be turned into a hash table or something similar so we * can do quick lookups. */ static DEFINE_SPINLOCK(cipso_v4_doi_list_lock); static LIST_HEAD(cipso_v4_doi_list); /* Label mapping cache */ int cipso_v4_cache_enabled = 1; int cipso_v4_cache_bucketsize = 10; #define CIPSO_V4_CACHE_BUCKETBITS 7 #define CIPSO_V4_CACHE_BUCKETS (1 << CIPSO_V4_CACHE_BUCKETBITS) #define CIPSO_V4_CACHE_REORDERLIMIT 10 struct cipso_v4_map_cache_bkt { spinlock_t lock; u32 size; struct list_head list; }; struct cipso_v4_map_cache_entry { u32 hash; unsigned char *key; size_t key_len; struct netlbl_lsm_cache *lsm_data; u32 activity; struct list_head list; }; static struct cipso_v4_map_cache_bkt *cipso_v4_cache = NULL; /* Restricted bitmap (tag #1) flags */ int cipso_v4_rbm_optfmt = 0; int cipso_v4_rbm_strictvalid = 1; /* * Protocol Constants */ /* Maximum size of the CIPSO IP option, derived from the fact that the maximum * IPv4 header size is 60 bytes and the base IPv4 header is 20 bytes long. */ #define CIPSO_V4_OPT_LEN_MAX 40 /* Length of the base CIPSO option, this includes the option type (1 byte), the * option length (1 byte), and the DOI (4 bytes). */ #define CIPSO_V4_HDR_LEN 6 /* Base length of the restrictive category bitmap tag (tag #1). */ #define CIPSO_V4_TAG_RBM_BLEN 4 /* Base length of the enumerated category tag (tag #2). */ #define CIPSO_V4_TAG_ENUM_BLEN 4 /* Base length of the ranged categories bitmap tag (tag #5). */ #define CIPSO_V4_TAG_RNG_BLEN 4 /* The maximum number of category ranges permitted in the ranged category tag * (tag #5). You may note that the IETF draft states that the maximum number * of category ranges is 7, but if the low end of the last category range is * zero then it is possible to fit 8 category ranges because the zero should * be omitted. */ #define CIPSO_V4_TAG_RNG_CAT_MAX 8 /* Base length of the local tag (non-standard tag). * Tag definition (may change between kernel versions) * * 0 8 16 24 32 * +----------+----------+----------+----------+ * | 10000000 | 00000110 | 32-bit secid value | * +----------+----------+----------+----------+ * | in (host byte order)| * +----------+----------+ * */ #define CIPSO_V4_TAG_LOC_BLEN 6 /* * Helper Functions */ /** * cipso_v4_bitmap_walk - Walk a bitmap looking for a bit * @bitmap: the bitmap * @bitmap_len: length in bits * @offset: starting offset * @state: if non-zero, look for a set (1) bit else look for a cleared (0) bit * * Description: * Starting at @offset, walk the bitmap from left to right until either the * desired bit is found or we reach the end. Return the bit offset, -1 if * not found, or -2 if error. */ static int cipso_v4_bitmap_walk(const unsigned char *bitmap, u32 bitmap_len, u32 offset, u8 state) { u32 bit_spot; u32 byte_offset; unsigned char bitmask; unsigned char byte; /* gcc always rounds to zero when doing integer division */ byte_offset = offset / 8; byte = bitmap[byte_offset]; bit_spot = offset; bitmask = 0x80 >> (offset % 8); while (bit_spot < bitmap_len) { if ((state && (byte & bitmask) == bitmask) || (state == 0 && (byte & bitmask) == 0)) return bit_spot; bit_spot++; bitmask >>= 1; if (bitmask == 0) { byte = bitmap[++byte_offset]; bitmask = 0x80; } } return -1; } /** * cipso_v4_bitmap_setbit - Sets a single bit in a bitmap * @bitmap: the bitmap * @bit: the bit * @state: if non-zero, set the bit (1) else clear the bit (0) * * Description: * Set a single bit in the bitmask. Returns zero on success, negative values * on error. */ static void cipso_v4_bitmap_setbit(unsigned char *bitmap, u32 bit, u8 state) { u32 byte_spot; u8 bitmask; /* gcc always rounds to zero when doing integer division */ byte_spot = bit / 8; bitmask = 0x80 >> (bit % 8); if (state) bitmap[byte_spot] |= bitmask; else bitmap[byte_spot] &= ~bitmask; } /** * cipso_v4_cache_entry_free - Frees a cache entry * @entry: the entry to free * * Description: * This function frees the memory associated with a cache entry including the * LSM cache data if there are no longer any users, i.e. reference count == 0. * */ static void cipso_v4_cache_entry_free(struct cipso_v4_map_cache_entry *entry) { if (entry->lsm_data) netlbl_secattr_cache_free(entry->lsm_data); kfree(entry->key); kfree(entry); } /** * cipso_v4_map_cache_hash - Hashing function for the CIPSO cache * @key: the hash key * @key_len: the length of the key in bytes * * Description: * The CIPSO tag hashing function. Returns a 32-bit hash value. * */ static u32 cipso_v4_map_cache_hash(const unsigned char *key, u32 key_len) { return jhash(key, key_len, 0); } /* * Label Mapping Cache Functions */ /** * cipso_v4_cache_init - Initialize the CIPSO cache * * Description: * Initializes the CIPSO label mapping cache, this function should be called * before any of the other functions defined in this file. Returns zero on * success, negative values on error. * */ static int cipso_v4_cache_init(void) { u32 iter; cipso_v4_cache = kcalloc(CIPSO_V4_CACHE_BUCKETS, sizeof(struct cipso_v4_map_cache_bkt), GFP_KERNEL); if (cipso_v4_cache == NULL) return -ENOMEM; for (iter = 0; iter < CIPSO_V4_CACHE_BUCKETS; iter++) { spin_lock_init(&cipso_v4_cache[iter].lock); cipso_v4_cache[iter].size = 0; INIT_LIST_HEAD(&cipso_v4_cache[iter].list); } return 0; } /** * cipso_v4_cache_invalidate - Invalidates the current CIPSO cache * * Description: * Invalidates and frees any entries in the CIPSO cache. Returns zero on * success and negative values on failure. * */ void cipso_v4_cache_invalidate(void) { struct cipso_v4_map_cache_entry *entry, *tmp_entry; u32 iter; for (iter = 0; iter < CIPSO_V4_CACHE_BUCKETS; iter++) { spin_lock_bh(&cipso_v4_cache[iter].lock); list_for_each_entry_safe(entry, tmp_entry, &cipso_v4_cache[iter].list, list) { list_del(&entry->list); cipso_v4_cache_entry_free(entry); } cipso_v4_cache[iter].size = 0; spin_unlock_bh(&cipso_v4_cache[iter].lock); } } /** * cipso_v4_cache_check - Check the CIPSO cache for a label mapping * @key: the buffer to check * @key_len: buffer length in bytes * @secattr: the security attribute struct to use * * Description: * This function checks the cache to see if a label mapping already exists for * the given key. If there is a match then the cache is adjusted and the * @secattr struct is populated with the correct LSM security attributes. The * cache is adjusted in the following manner if the entry is not already the * first in the cache bucket: * * 1. The cache entry's activity counter is incremented * 2. The previous (higher ranking) entry's activity counter is decremented * 3. If the difference between the two activity counters is geater than * CIPSO_V4_CACHE_REORDERLIMIT the two entries are swapped * * Returns zero on success, -ENOENT for a cache miss, and other negative values * on error. * */ static int cipso_v4_cache_check(const unsigned char *key, u32 key_len, struct netlbl_lsm_secattr *secattr) { u32 bkt; struct cipso_v4_map_cache_entry *entry; struct cipso_v4_map_cache_entry *prev_entry = NULL; u32 hash; if (!cipso_v4_cache_enabled) return -ENOENT; hash = cipso_v4_map_cache_hash(key, key_len); bkt = hash & (CIPSO_V4_CACHE_BUCKETS - 1); spin_lock_bh(&cipso_v4_cache[bkt].lock); list_for_each_entry(entry, &cipso_v4_cache[bkt].list, list) { if (entry->hash == hash && entry->key_len == key_len && memcmp(entry->key, key, key_len) == 0) { entry->activity += 1; atomic_inc(&entry->lsm_data->refcount); secattr->cache = entry->lsm_data; secattr->flags |= NETLBL_SECATTR_CACHE; secattr->type = NETLBL_NLTYPE_CIPSOV4; if (prev_entry == NULL) { spin_unlock_bh(&cipso_v4_cache[bkt].lock); return 0; } if (prev_entry->activity > 0) prev_entry->activity -= 1; if (entry->activity > prev_entry->activity && entry->activity - prev_entry->activity > CIPSO_V4_CACHE_REORDERLIMIT) { __list_del(entry->list.prev, entry->list.next); __list_add(&entry->list, prev_entry->list.prev, &prev_entry->list); } spin_unlock_bh(&cipso_v4_cache[bkt].lock); return 0; } prev_entry = entry; } spin_unlock_bh(&cipso_v4_cache[bkt].lock); return -ENOENT; } /** * cipso_v4_cache_add - Add an entry to the CIPSO cache * @skb: the packet * @secattr: the packet's security attributes * * Description: * Add a new entry into the CIPSO label mapping cache. Add the new entry to * head of the cache bucket's list, if the cache bucket is out of room remove * the last entry in the list first. It is important to note that there is * currently no checking for duplicate keys. Returns zero on success, * negative values on failure. * */ int cipso_v4_cache_add(const struct sk_buff *skb, const struct netlbl_lsm_secattr *secattr) { int ret_val = -EPERM; u32 bkt; struct cipso_v4_map_cache_entry *entry = NULL; struct cipso_v4_map_cache_entry *old_entry = NULL; unsigned char *cipso_ptr; u32 cipso_ptr_len; if (!cipso_v4_cache_enabled || cipso_v4_cache_bucketsize <= 0) return 0; cipso_ptr = CIPSO_V4_OPTPTR(skb); cipso_ptr_len = cipso_ptr[1]; entry = kzalloc(sizeof(*entry), GFP_ATOMIC); if (entry == NULL) return -ENOMEM; entry->key = kmemdup(cipso_ptr, cipso_ptr_len, GFP_ATOMIC); if (entry->key == NULL) { ret_val = -ENOMEM; goto cache_add_failure; } entry->key_len = cipso_ptr_len; entry->hash = cipso_v4_map_cache_hash(cipso_ptr, cipso_ptr_len); atomic_inc(&secattr->cache->refcount); entry->lsm_data = secattr->cache; bkt = entry->hash & (CIPSO_V4_CACHE_BUCKETS - 1); spin_lock_bh(&cipso_v4_cache[bkt].lock); if (cipso_v4_cache[bkt].size < cipso_v4_cache_bucketsize) { list_add(&entry->list, &cipso_v4_cache[bkt].list); cipso_v4_cache[bkt].size += 1; } else { old_entry = list_entry(cipso_v4_cache[bkt].list.prev, struct cipso_v4_map_cache_entry, list); list_del(&old_entry->list); list_add(&entry->list, &cipso_v4_cache[bkt].list); cipso_v4_cache_entry_free(old_entry); } spin_unlock_bh(&cipso_v4_cache[bkt].lock); return 0; cache_add_failure: if (entry) cipso_v4_cache_entry_free(entry); return ret_val; } /* * DOI List Functions */ /** * cipso_v4_doi_search - Searches for a DOI definition * @doi: the DOI to search for * * Description: * Search the DOI definition list for a DOI definition with a DOI value that * matches @doi. The caller is responsible for calling rcu_read_[un]lock(). * Returns a pointer to the DOI definition on success and NULL on failure. */ static struct cipso_v4_doi *cipso_v4_doi_search(u32 doi) { struct cipso_v4_doi *iter; list_for_each_entry_rcu(iter, &cipso_v4_doi_list, list) if (iter->doi == doi && atomic_read(&iter->refcount)) return iter; return NULL; } /** * cipso_v4_doi_add - Add a new DOI to the CIPSO protocol engine * @doi_def: the DOI structure * @audit_info: NetLabel audit information * * Description: * The caller defines a new DOI for use by the CIPSO engine and calls this * function to add it to the list of acceptable domains. The caller must * ensure that the mapping table specified in @doi_def->map meets all of the * requirements of the mapping type (see cipso_ipv4.h for details). Returns * zero on success and non-zero on failure. * */ int cipso_v4_doi_add(struct cipso_v4_doi *doi_def, struct netlbl_audit *audit_info) { int ret_val = -EINVAL; u32 iter; u32 doi; u32 doi_type; struct audit_buffer *audit_buf; doi = doi_def->doi; doi_type = doi_def->type; if (doi_def == NULL || doi_def->doi == CIPSO_V4_DOI_UNKNOWN) goto doi_add_return; for (iter = 0; iter < CIPSO_V4_TAG_MAXCNT; iter++) { switch (doi_def->tags[iter]) { case CIPSO_V4_TAG_RBITMAP: break; case CIPSO_V4_TAG_RANGE: case CIPSO_V4_TAG_ENUM: if (doi_def->type != CIPSO_V4_MAP_PASS) goto doi_add_return; break; case CIPSO_V4_TAG_LOCAL: if (doi_def->type != CIPSO_V4_MAP_LOCAL) goto doi_add_return; break; case CIPSO_V4_TAG_INVALID: if (iter == 0) goto doi_add_return; break; default: goto doi_add_return; } } atomic_set(&doi_def->refcount, 1); spin_lock(&cipso_v4_doi_list_lock); if (cipso_v4_doi_search(doi_def->doi) != NULL) { spin_unlock(&cipso_v4_doi_list_lock); ret_val = -EEXIST; goto doi_add_return; } list_add_tail_rcu(&doi_def->list, &cipso_v4_doi_list); spin_unlock(&cipso_v4_doi_list_lock); ret_val = 0; doi_add_return: audit_buf = netlbl_audit_start(AUDIT_MAC_CIPSOV4_ADD, audit_info); if (audit_buf != NULL) { const char *type_str; switch (doi_type) { case CIPSO_V4_MAP_TRANS: type_str = "trans"; break; case CIPSO_V4_MAP_PASS: type_str = "pass"; break; case CIPSO_V4_MAP_LOCAL: type_str = "local"; break; default: type_str = "(unknown)"; } audit_log_format(audit_buf, " cipso_doi=%u cipso_type=%s res=%u", doi, type_str, ret_val == 0 ? 1 : 0); audit_log_end(audit_buf); } return ret_val; } /** * cipso_v4_doi_free - Frees a DOI definition * @entry: the entry's RCU field * * Description: * This function frees all of the memory associated with a DOI definition. * */ void cipso_v4_doi_free(struct cipso_v4_doi *doi_def) { if (doi_def == NULL) return; switch (doi_def->type) { case CIPSO_V4_MAP_TRANS: kfree(doi_def->map.std->lvl.cipso); kfree(doi_def->map.std->lvl.local); kfree(doi_def->map.std->cat.cipso); kfree(doi_def->map.std->cat.local); break; } kfree(doi_def); } /** * cipso_v4_doi_free_rcu - Frees a DOI definition via the RCU pointer * @entry: the entry's RCU field * * Description: * This function is designed to be used as a callback to the call_rcu() * function so that the memory allocated to the DOI definition can be released * safely. * */ static void cipso_v4_doi_free_rcu(struct rcu_head *entry) { struct cipso_v4_doi *doi_def; doi_def = container_of(entry, struct cipso_v4_doi, rcu); cipso_v4_doi_free(doi_def); } /** * cipso_v4_doi_remove - Remove an existing DOI from the CIPSO protocol engine * @doi: the DOI value * @audit_secid: the LSM secid to use in the audit message * * Description: * Removes a DOI definition from the CIPSO engine. The NetLabel routines will * be called to release their own LSM domain mappings as well as our own * domain list. Returns zero on success and negative values on failure. * */ int cipso_v4_doi_remove(u32 doi, struct netlbl_audit *audit_info) { int ret_val; struct cipso_v4_doi *doi_def; struct audit_buffer *audit_buf; spin_lock(&cipso_v4_doi_list_lock); doi_def = cipso_v4_doi_search(doi); if (doi_def == NULL) { spin_unlock(&cipso_v4_doi_list_lock); ret_val = -ENOENT; goto doi_remove_return; } if (!atomic_dec_and_test(&doi_def->refcount)) { spin_unlock(&cipso_v4_doi_list_lock); ret_val = -EBUSY; goto doi_remove_return; } list_del_rcu(&doi_def->list); spin_unlock(&cipso_v4_doi_list_lock); cipso_v4_cache_invalidate(); call_rcu(&doi_def->rcu, cipso_v4_doi_free_rcu); ret_val = 0; doi_remove_return: audit_buf = netlbl_audit_start(AUDIT_MAC_CIPSOV4_DEL, audit_info); if (audit_buf != NULL) { audit_log_format(audit_buf, " cipso_doi=%u res=%u", doi, ret_val == 0 ? 1 : 0); audit_log_end(audit_buf); } return ret_val; } /** * cipso_v4_doi_getdef - Returns a reference to a valid DOI definition * @doi: the DOI value * * Description: * Searches for a valid DOI definition and if one is found it is returned to * the caller. Otherwise NULL is returned. The caller must ensure that * rcu_read_lock() is held while accessing the returned definition and the DOI * definition reference count is decremented when the caller is done. * */ struct cipso_v4_doi *cipso_v4_doi_getdef(u32 doi) { struct cipso_v4_doi *doi_def; rcu_read_lock(); doi_def = cipso_v4_doi_search(doi); if (doi_def == NULL) goto doi_getdef_return; if (!atomic_inc_not_zero(&doi_def->refcount)) doi_def = NULL; doi_getdef_return: rcu_read_unlock(); return doi_def; } /** * cipso_v4_doi_putdef - Releases a reference for the given DOI definition * @doi_def: the DOI definition * * Description: * Releases a DOI definition reference obtained from cipso_v4_doi_getdef(). * */ void cipso_v4_doi_putdef(struct cipso_v4_doi *doi_def) { if (doi_def == NULL) return; if (!atomic_dec_and_test(&doi_def->refcount)) return; spin_lock(&cipso_v4_doi_list_lock); list_del_rcu(&doi_def->list); spin_unlock(&cipso_v4_doi_list_lock); cipso_v4_cache_invalidate(); call_rcu(&doi_def->rcu, cipso_v4_doi_free_rcu); } /** * cipso_v4_doi_walk - Iterate through the DOI definitions * @skip_cnt: skip past this number of DOI definitions, updated * @callback: callback for each DOI definition * @cb_arg: argument for the callback function * * Description: * Iterate over the DOI definition list, skipping the first @skip_cnt entries. * For each entry call @callback, if @callback returns a negative value stop * 'walking' through the list and return. Updates the value in @skip_cnt upon * return. Returns zero on success, negative values on failure. * */ int cipso_v4_doi_walk(u32 *skip_cnt, int (*callback) (struct cipso_v4_doi *doi_def, void *arg), void *cb_arg) { int ret_val = -ENOENT; u32 doi_cnt = 0; struct cipso_v4_doi *iter_doi; rcu_read_lock(); list_for_each_entry_rcu(iter_doi, &cipso_v4_doi_list, list) if (atomic_read(&iter_doi->refcount) > 0) { if (doi_cnt++ < *skip_cnt) continue; ret_val = callback(iter_doi, cb_arg); if (ret_val < 0) { doi_cnt--; goto doi_walk_return; } } doi_walk_return: rcu_read_unlock(); *skip_cnt = doi_cnt; return ret_val; } /* * Label Mapping Functions */ /** * cipso_v4_map_lvl_valid - Checks to see if the given level is understood * @doi_def: the DOI definition * @level: the level to check * * Description: * Checks the given level against the given DOI definition and returns a * negative value if the level does not have a valid mapping and a zero value * if the level is defined by the DOI. * */ static int cipso_v4_map_lvl_valid(const struct cipso_v4_doi *doi_def, u8 level) { switch (doi_def->type) { case CIPSO_V4_MAP_PASS: return 0; case CIPSO_V4_MAP_TRANS: if (doi_def->map.std->lvl.cipso[level] < CIPSO_V4_INV_LVL) return 0; break; } return -EFAULT; } /** * cipso_v4_map_lvl_hton - Perform a level mapping from the host to the network * @doi_def: the DOI definition * @host_lvl: the host MLS level * @net_lvl: the network/CIPSO MLS level * * Description: * Perform a label mapping to translate a local MLS level to the correct * CIPSO level using the given DOI definition. Returns zero on success, * negative values otherwise. * */ static int cipso_v4_map_lvl_hton(const struct cipso_v4_doi *doi_def, u32 host_lvl, u32 *net_lvl) { switch (doi_def->type) { case CIPSO_V4_MAP_PASS: *net_lvl = host_lvl; return 0; case CIPSO_V4_MAP_TRANS: if (host_lvl < doi_def->map.std->lvl.local_size && doi_def->map.std->lvl.local[host_lvl] < CIPSO_V4_INV_LVL) { *net_lvl = doi_def->map.std->lvl.local[host_lvl]; return 0; } return -EPERM; } return -EINVAL; } /** * cipso_v4_map_lvl_ntoh - Perform a level mapping from the network to the host * @doi_def: the DOI definition * @net_lvl: the network/CIPSO MLS level * @host_lvl: the host MLS level * * Description: * Perform a label mapping to translate a CIPSO level to the correct local MLS * level using the given DOI definition. Returns zero on success, negative * values otherwise. * */ static int cipso_v4_map_lvl_ntoh(const struct cipso_v4_doi *doi_def, u32 net_lvl, u32 *host_lvl) { struct cipso_v4_std_map_tbl *map_tbl; switch (doi_def->type) { case CIPSO_V4_MAP_PASS: *host_lvl = net_lvl; return 0; case CIPSO_V4_MAP_TRANS: map_tbl = doi_def->map.std; if (net_lvl < map_tbl->lvl.cipso_size && map_tbl->lvl.cipso[net_lvl] < CIPSO_V4_INV_LVL) { *host_lvl = doi_def->map.std->lvl.cipso[net_lvl]; return 0; } return -EPERM; } return -EINVAL; } /** * cipso_v4_map_cat_rbm_valid - Checks to see if the category bitmap is valid * @doi_def: the DOI definition * @bitmap: category bitmap * @bitmap_len: bitmap length in bytes * * Description: * Checks the given category bitmap against the given DOI definition and * returns a negative value if any of the categories in the bitmap do not have * a valid mapping and a zero value if all of the categories are valid. * */ static int cipso_v4_map_cat_rbm_valid(const struct cipso_v4_doi *doi_def, const unsigned char *bitmap, u32 bitmap_len) { int cat = -1; u32 bitmap_len_bits = bitmap_len * 8; u32 cipso_cat_size; u32 *cipso_array; switch (doi_def->type) { case CIPSO_V4_MAP_PASS: return 0; case CIPSO_V4_MAP_TRANS: cipso_cat_size = doi_def->map.std->cat.cipso_size; cipso_array = doi_def->map.std->cat.cipso; for (;;) { cat = cipso_v4_bitmap_walk(bitmap, bitmap_len_bits, cat + 1, 1); if (cat < 0) break; if (cat >= cipso_cat_size || cipso_array[cat] >= CIPSO_V4_INV_CAT) return -EFAULT; } if (cat == -1) return 0; break; } return -EFAULT; } /** * cipso_v4_map_cat_rbm_hton - Perform a category mapping from host to network * @doi_def: the DOI definition * @secattr: the security attributes * @net_cat: the zero'd out category bitmap in network/CIPSO format * @net_cat_len: the length of the CIPSO bitmap in bytes * * Description: * Perform a label mapping to translate a local MLS category bitmap to the * correct CIPSO bitmap using the given DOI definition. Returns the minimum * size in bytes of the network bitmap on success, negative values otherwise. * */ static int cipso_v4_map_cat_rbm_hton(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *net_cat, u32 net_cat_len) { int host_spot = -1; u32 net_spot = CIPSO_V4_INV_CAT; u32 net_spot_max = 0; u32 net_clen_bits = net_cat_len * 8; u32 host_cat_size = 0; u32 *host_cat_array = NULL; if (doi_def->type == CIPSO_V4_MAP_TRANS) { host_cat_size = doi_def->map.std->cat.local_size; host_cat_array = doi_def->map.std->cat.local; } for (;;) { host_spot = netlbl_secattr_catmap_walk(secattr->attr.mls.cat, host_spot + 1); if (host_spot < 0) break; switch (doi_def->type) { case CIPSO_V4_MAP_PASS: net_spot = host_spot; break; case CIPSO_V4_MAP_TRANS: if (host_spot >= host_cat_size) return -EPERM; net_spot = host_cat_array[host_spot]; if (net_spot >= CIPSO_V4_INV_CAT) return -EPERM; break; } if (net_spot >= net_clen_bits) return -ENOSPC; cipso_v4_bitmap_setbit(net_cat, net_spot, 1); if (net_spot > net_spot_max) net_spot_max = net_spot; } if (++net_spot_max % 8) return net_spot_max / 8 + 1; return net_spot_max / 8; } /** * cipso_v4_map_cat_rbm_ntoh - Perform a category mapping from network to host * @doi_def: the DOI definition * @net_cat: the category bitmap in network/CIPSO format * @net_cat_len: the length of the CIPSO bitmap in bytes * @secattr: the security attributes * * Description: * Perform a label mapping to translate a CIPSO bitmap to the correct local * MLS category bitmap using the given DOI definition. Returns zero on * success, negative values on failure. * */ static int cipso_v4_map_cat_rbm_ntoh(const struct cipso_v4_doi *doi_def, const unsigned char *net_cat, u32 net_cat_len, struct netlbl_lsm_secattr *secattr) { int ret_val; int net_spot = -1; u32 host_spot = CIPSO_V4_INV_CAT; u32 net_clen_bits = net_cat_len * 8; u32 net_cat_size = 0; u32 *net_cat_array = NULL; if (doi_def->type == CIPSO_V4_MAP_TRANS) { net_cat_size = doi_def->map.std->cat.cipso_size; net_cat_array = doi_def->map.std->cat.cipso; } for (;;) { net_spot = cipso_v4_bitmap_walk(net_cat, net_clen_bits, net_spot + 1, 1); if (net_spot < 0) { if (net_spot == -2) return -EFAULT; return 0; } switch (doi_def->type) { case CIPSO_V4_MAP_PASS: host_spot = net_spot; break; case CIPSO_V4_MAP_TRANS: if (net_spot >= net_cat_size) return -EPERM; host_spot = net_cat_array[net_spot]; if (host_spot >= CIPSO_V4_INV_CAT) return -EPERM; break; } ret_val = netlbl_secattr_catmap_setbit(secattr->attr.mls.cat, host_spot, GFP_ATOMIC); if (ret_val != 0) return ret_val; } return -EINVAL; } /** * cipso_v4_map_cat_enum_valid - Checks to see if the categories are valid * @doi_def: the DOI definition * @enumcat: category list * @enumcat_len: length of the category list in bytes * * Description: * Checks the given categories against the given DOI definition and returns a * negative value if any of the categories do not have a valid mapping and a * zero value if all of the categories are valid. * */ static int cipso_v4_map_cat_enum_valid(const struct cipso_v4_doi *doi_def, const unsigned char *enumcat, u32 enumcat_len) { u16 cat; int cat_prev = -1; u32 iter; if (doi_def->type != CIPSO_V4_MAP_PASS || enumcat_len & 0x01) return -EFAULT; for (iter = 0; iter < enumcat_len; iter += 2) { cat = get_unaligned_be16(&enumcat[iter]); if (cat <= cat_prev) return -EFAULT; cat_prev = cat; } return 0; } /** * cipso_v4_map_cat_enum_hton - Perform a category mapping from host to network * @doi_def: the DOI definition * @secattr: the security attributes * @net_cat: the zero'd out category list in network/CIPSO format * @net_cat_len: the length of the CIPSO category list in bytes * * Description: * Perform a label mapping to translate a local MLS category bitmap to the * correct CIPSO category list using the given DOI definition. Returns the * size in bytes of the network category bitmap on success, negative values * otherwise. * */ static int cipso_v4_map_cat_enum_hton(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *net_cat, u32 net_cat_len) { int cat = -1; u32 cat_iter = 0; for (;;) { cat = netlbl_secattr_catmap_walk(secattr->attr.mls.cat, cat + 1); if (cat < 0) break; if ((cat_iter + 2) > net_cat_len) return -ENOSPC; *((__be16 *)&net_cat[cat_iter]) = htons(cat); cat_iter += 2; } return cat_iter; } /** * cipso_v4_map_cat_enum_ntoh - Perform a category mapping from network to host * @doi_def: the DOI definition * @net_cat: the category list in network/CIPSO format * @net_cat_len: the length of the CIPSO bitmap in bytes * @secattr: the security attributes * * Description: * Perform a label mapping to translate a CIPSO category list to the correct * local MLS category bitmap using the given DOI definition. Returns zero on * success, negative values on failure. * */ static int cipso_v4_map_cat_enum_ntoh(const struct cipso_v4_doi *doi_def, const unsigned char *net_cat, u32 net_cat_len, struct netlbl_lsm_secattr *secattr) { int ret_val; u32 iter; for (iter = 0; iter < net_cat_len; iter += 2) { ret_val = netlbl_secattr_catmap_setbit(secattr->attr.mls.cat, get_unaligned_be16(&net_cat[iter]), GFP_ATOMIC); if (ret_val != 0) return ret_val; } return 0; } /** * cipso_v4_map_cat_rng_valid - Checks to see if the categories are valid * @doi_def: the DOI definition * @rngcat: category list * @rngcat_len: length of the category list in bytes * * Description: * Checks the given categories against the given DOI definition and returns a * negative value if any of the categories do not have a valid mapping and a * zero value if all of the categories are valid. * */ static int cipso_v4_map_cat_rng_valid(const struct cipso_v4_doi *doi_def, const unsigned char *rngcat, u32 rngcat_len) { u16 cat_high; u16 cat_low; u32 cat_prev = CIPSO_V4_MAX_REM_CATS + 1; u32 iter; if (doi_def->type != CIPSO_V4_MAP_PASS || rngcat_len & 0x01) return -EFAULT; for (iter = 0; iter < rngcat_len; iter += 4) { cat_high = get_unaligned_be16(&rngcat[iter]); if ((iter + 4) <= rngcat_len) cat_low = get_unaligned_be16(&rngcat[iter + 2]); else cat_low = 0; if (cat_high > cat_prev) return -EFAULT; cat_prev = cat_low; } return 0; } /** * cipso_v4_map_cat_rng_hton - Perform a category mapping from host to network * @doi_def: the DOI definition * @secattr: the security attributes * @net_cat: the zero'd out category list in network/CIPSO format * @net_cat_len: the length of the CIPSO category list in bytes * * Description: * Perform a label mapping to translate a local MLS category bitmap to the * correct CIPSO category list using the given DOI definition. Returns the * size in bytes of the network category bitmap on success, negative values * otherwise. * */ static int cipso_v4_map_cat_rng_hton(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *net_cat, u32 net_cat_len) { int iter = -1; u16 array[CIPSO_V4_TAG_RNG_CAT_MAX * 2]; u32 array_cnt = 0; u32 cat_size = 0; /* make sure we don't overflow the 'array[]' variable */ if (net_cat_len > (CIPSO_V4_OPT_LEN_MAX - CIPSO_V4_HDR_LEN - CIPSO_V4_TAG_RNG_BLEN)) return -ENOSPC; for (;;) { iter = netlbl_secattr_catmap_walk(secattr->attr.mls.cat, iter + 1); if (iter < 0) break; cat_size += (iter == 0 ? 0 : sizeof(u16)); if (cat_size > net_cat_len) return -ENOSPC; array[array_cnt++] = iter; iter = netlbl_secattr_catmap_walk_rng(secattr->attr.mls.cat, iter); if (iter < 0) return -EFAULT; cat_size += sizeof(u16); if (cat_size > net_cat_len) return -ENOSPC; array[array_cnt++] = iter; } for (iter = 0; array_cnt > 0;) { *((__be16 *)&net_cat[iter]) = htons(array[--array_cnt]); iter += 2; array_cnt--; if (array[array_cnt] != 0) { *((__be16 *)&net_cat[iter]) = htons(array[array_cnt]); iter += 2; } } return cat_size; } /** * cipso_v4_map_cat_rng_ntoh - Perform a category mapping from network to host * @doi_def: the DOI definition * @net_cat: the category list in network/CIPSO format * @net_cat_len: the length of the CIPSO bitmap in bytes * @secattr: the security attributes * * Description: * Perform a label mapping to translate a CIPSO category list to the correct * local MLS category bitmap using the given DOI definition. Returns zero on * success, negative values on failure. * */ static int cipso_v4_map_cat_rng_ntoh(const struct cipso_v4_doi *doi_def, const unsigned char *net_cat, u32 net_cat_len, struct netlbl_lsm_secattr *secattr) { int ret_val; u32 net_iter; u16 cat_low; u16 cat_high; for (net_iter = 0; net_iter < net_cat_len; net_iter += 4) { cat_high = get_unaligned_be16(&net_cat[net_iter]); if ((net_iter + 4) <= net_cat_len) cat_low = get_unaligned_be16(&net_cat[net_iter + 2]); else cat_low = 0; ret_val = netlbl_secattr_catmap_setrng(secattr->attr.mls.cat, cat_low, cat_high, GFP_ATOMIC); if (ret_val != 0) return ret_val; } return 0; } /* * Protocol Handling Functions */ /** * cipso_v4_gentag_hdr - Generate a CIPSO option header * @doi_def: the DOI definition * @len: the total tag length in bytes, not including this header * @buf: the CIPSO option buffer * * Description: * Write a CIPSO header into the beginning of @buffer. * */ static void cipso_v4_gentag_hdr(const struct cipso_v4_doi *doi_def, unsigned char *buf, u32 len) { buf[0] = IPOPT_CIPSO; buf[1] = CIPSO_V4_HDR_LEN + len; *(__be32 *)&buf[2] = htonl(doi_def->doi); } /** * cipso_v4_gentag_rbm - Generate a CIPSO restricted bitmap tag (type #1) * @doi_def: the DOI definition * @secattr: the security attributes * @buffer: the option buffer * @buffer_len: length of buffer in bytes * * Description: * Generate a CIPSO option using the restricted bitmap tag, tag type #1. The * actual buffer length may be larger than the indicated size due to * translation between host and network category bitmaps. Returns the size of * the tag on success, negative values on failure. * */ static int cipso_v4_gentag_rbm(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *buffer, u32 buffer_len) { int ret_val; u32 tag_len; u32 level; if ((secattr->flags & NETLBL_SECATTR_MLS_LVL) == 0) return -EPERM; ret_val = cipso_v4_map_lvl_hton(doi_def, secattr->attr.mls.lvl, &level); if (ret_val != 0) return ret_val; if (secattr->flags & NETLBL_SECATTR_MLS_CAT) { ret_val = cipso_v4_map_cat_rbm_hton(doi_def, secattr, &buffer[4], buffer_len - 4); if (ret_val < 0) return ret_val; /* This will send packets using the "optimized" format when * possible as specified in section 3.4.2.6 of the * CIPSO draft. */ if (cipso_v4_rbm_optfmt && ret_val > 0 && ret_val <= 10) tag_len = 14; else tag_len = 4 + ret_val; } else tag_len = 4; buffer[0] = CIPSO_V4_TAG_RBITMAP; buffer[1] = tag_len; buffer[3] = level; return tag_len; } /** * cipso_v4_parsetag_rbm - Parse a CIPSO restricted bitmap tag * @doi_def: the DOI definition * @tag: the CIPSO tag * @secattr: the security attributes * * Description: * Parse a CIPSO restricted bitmap tag (tag type #1) and return the security * attributes in @secattr. Return zero on success, negatives values on * failure. * */ static int cipso_v4_parsetag_rbm(const struct cipso_v4_doi *doi_def, const unsigned char *tag, struct netlbl_lsm_secattr *secattr) { int ret_val; u8 tag_len = tag[1]; u32 level; ret_val = cipso_v4_map_lvl_ntoh(doi_def, tag[3], &level); if (ret_val != 0) return ret_val; secattr->attr.mls.lvl = level; secattr->flags |= NETLBL_SECATTR_MLS_LVL; if (tag_len > 4) { secattr->attr.mls.cat = netlbl_secattr_catmap_alloc(GFP_ATOMIC); if (secattr->attr.mls.cat == NULL) return -ENOMEM; ret_val = cipso_v4_map_cat_rbm_ntoh(doi_def, &tag[4], tag_len - 4, secattr); if (ret_val != 0) { netlbl_secattr_catmap_free(secattr->attr.mls.cat); return ret_val; } secattr->flags |= NETLBL_SECATTR_MLS_CAT; } return 0; } /** * cipso_v4_gentag_enum - Generate a CIPSO enumerated tag (type #2) * @doi_def: the DOI definition * @secattr: the security attributes * @buffer: the option buffer * @buffer_len: length of buffer in bytes * * Description: * Generate a CIPSO option using the enumerated tag, tag type #2. Returns the * size of the tag on success, negative values on failure. * */ static int cipso_v4_gentag_enum(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *buffer, u32 buffer_len) { int ret_val; u32 tag_len; u32 level; if (!(secattr->flags & NETLBL_SECATTR_MLS_LVL)) return -EPERM; ret_val = cipso_v4_map_lvl_hton(doi_def, secattr->attr.mls.lvl, &level); if (ret_val != 0) return ret_val; if (secattr->flags & NETLBL_SECATTR_MLS_CAT) { ret_val = cipso_v4_map_cat_enum_hton(doi_def, secattr, &buffer[4], buffer_len - 4); if (ret_val < 0) return ret_val; tag_len = 4 + ret_val; } else tag_len = 4; buffer[0] = CIPSO_V4_TAG_ENUM; buffer[1] = tag_len; buffer[3] = level; return tag_len; } /** * cipso_v4_parsetag_enum - Parse a CIPSO enumerated tag * @doi_def: the DOI definition * @tag: the CIPSO tag * @secattr: the security attributes * * Description: * Parse a CIPSO enumerated tag (tag type #2) and return the security * attributes in @secattr. Return zero on success, negatives values on * failure. * */ static int cipso_v4_parsetag_enum(const struct cipso_v4_doi *doi_def, const unsigned char *tag, struct netlbl_lsm_secattr *secattr) { int ret_val; u8 tag_len = tag[1]; u32 level; ret_val = cipso_v4_map_lvl_ntoh(doi_def, tag[3], &level); if (ret_val != 0) return ret_val; secattr->attr.mls.lvl = level; secattr->flags |= NETLBL_SECATTR_MLS_LVL; if (tag_len > 4) { secattr->attr.mls.cat = netlbl_secattr_catmap_alloc(GFP_ATOMIC); if (secattr->attr.mls.cat == NULL) return -ENOMEM; ret_val = cipso_v4_map_cat_enum_ntoh(doi_def, &tag[4], tag_len - 4, secattr); if (ret_val != 0) { netlbl_secattr_catmap_free(secattr->attr.mls.cat); return ret_val; } secattr->flags |= NETLBL_SECATTR_MLS_CAT; } return 0; } /** * cipso_v4_gentag_rng - Generate a CIPSO ranged tag (type #5) * @doi_def: the DOI definition * @secattr: the security attributes * @buffer: the option buffer * @buffer_len: length of buffer in bytes * * Description: * Generate a CIPSO option using the ranged tag, tag type #5. Returns the * size of the tag on success, negative values on failure. * */ static int cipso_v4_gentag_rng(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *buffer, u32 buffer_len) { int ret_val; u32 tag_len; u32 level; if (!(secattr->flags & NETLBL_SECATTR_MLS_LVL)) return -EPERM; ret_val = cipso_v4_map_lvl_hton(doi_def, secattr->attr.mls.lvl, &level); if (ret_val != 0) return ret_val; if (secattr->flags & NETLBL_SECATTR_MLS_CAT) { ret_val = cipso_v4_map_cat_rng_hton(doi_def, secattr, &buffer[4], buffer_len - 4); if (ret_val < 0) return ret_val; tag_len = 4 + ret_val; } else tag_len = 4; buffer[0] = CIPSO_V4_TAG_RANGE; buffer[1] = tag_len; buffer[3] = level; return tag_len; } /** * cipso_v4_parsetag_rng - Parse a CIPSO ranged tag * @doi_def: the DOI definition * @tag: the CIPSO tag * @secattr: the security attributes * * Description: * Parse a CIPSO ranged tag (tag type #5) and return the security attributes * in @secattr. Return zero on success, negatives values on failure. * */ static int cipso_v4_parsetag_rng(const struct cipso_v4_doi *doi_def, const unsigned char *tag, struct netlbl_lsm_secattr *secattr) { int ret_val; u8 tag_len = tag[1]; u32 level; ret_val = cipso_v4_map_lvl_ntoh(doi_def, tag[3], &level); if (ret_val != 0) return ret_val; secattr->attr.mls.lvl = level; secattr->flags |= NETLBL_SECATTR_MLS_LVL; if (tag_len > 4) { secattr->attr.mls.cat = netlbl_secattr_catmap_alloc(GFP_ATOMIC); if (secattr->attr.mls.cat == NULL) return -ENOMEM; ret_val = cipso_v4_map_cat_rng_ntoh(doi_def, &tag[4], tag_len - 4, secattr); if (ret_val != 0) { netlbl_secattr_catmap_free(secattr->attr.mls.cat); return ret_val; } secattr->flags |= NETLBL_SECATTR_MLS_CAT; } return 0; } /** * cipso_v4_gentag_loc - Generate a CIPSO local tag (non-standard) * @doi_def: the DOI definition * @secattr: the security attributes * @buffer: the option buffer * @buffer_len: length of buffer in bytes * * Description: * Generate a CIPSO option using the local tag. Returns the size of the tag * on success, negative values on failure. * */ static int cipso_v4_gentag_loc(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *buffer, u32 buffer_len) { if (!(secattr->flags & NETLBL_SECATTR_SECID)) return -EPERM; buffer[0] = CIPSO_V4_TAG_LOCAL; buffer[1] = CIPSO_V4_TAG_LOC_BLEN; *(u32 *)&buffer[2] = secattr->attr.secid; return CIPSO_V4_TAG_LOC_BLEN; } /** * cipso_v4_parsetag_loc - Parse a CIPSO local tag * @doi_def: the DOI definition * @tag: the CIPSO tag * @secattr: the security attributes * * Description: * Parse a CIPSO local tag and return the security attributes in @secattr. * Return zero on success, negatives values on failure. * */ static int cipso_v4_parsetag_loc(const struct cipso_v4_doi *doi_def, const unsigned char *tag, struct netlbl_lsm_secattr *secattr) { secattr->attr.secid = *(u32 *)&tag[2]; secattr->flags |= NETLBL_SECATTR_SECID; return 0; } /** * cipso_v4_validate - Validate a CIPSO option * @option: the start of the option, on error it is set to point to the error * * Description: * This routine is called to validate a CIPSO option, it checks all of the * fields to ensure that they are at least valid, see the draft snippet below * for details. If the option is valid then a zero value is returned and * the value of @option is unchanged. If the option is invalid then a * non-zero value is returned and @option is adjusted to point to the * offending portion of the option. From the IETF draft ... * * "If any field within the CIPSO options, such as the DOI identifier, is not * recognized the IP datagram is discarded and an ICMP 'parameter problem' * (type 12) is generated and returned. The ICMP code field is set to 'bad * parameter' (code 0) and the pointer is set to the start of the CIPSO field * that is unrecognized." * */ int cipso_v4_validate(const struct sk_buff *skb, unsigned char **option) { unsigned char *opt = *option; unsigned char *tag; unsigned char opt_iter; unsigned char err_offset = 0; u8 opt_len; u8 tag_len; struct cipso_v4_doi *doi_def = NULL; u32 tag_iter; /* caller already checks for length values that are too large */ opt_len = opt[1]; if (opt_len < 8) { err_offset = 1; goto validate_return; } rcu_read_lock(); doi_def = cipso_v4_doi_search(get_unaligned_be32(&opt[2])); if (doi_def == NULL) { err_offset = 2; goto validate_return_locked; } opt_iter = CIPSO_V4_HDR_LEN; tag = opt + opt_iter; while (opt_iter < opt_len) { for (tag_iter = 0; doi_def->tags[tag_iter] != tag[0];) if (doi_def->tags[tag_iter] == CIPSO_V4_TAG_INVALID || ++tag_iter == CIPSO_V4_TAG_MAXCNT) { err_offset = opt_iter; goto validate_return_locked; } tag_len = tag[1]; if (tag_len > (opt_len - opt_iter)) { err_offset = opt_iter + 1; goto validate_return_locked; } switch (tag[0]) { case CIPSO_V4_TAG_RBITMAP: if (tag_len < CIPSO_V4_TAG_RBM_BLEN) { err_offset = opt_iter + 1; goto validate_return_locked; } /* We are already going to do all the verification * necessary at the socket layer so from our point of * view it is safe to turn these checks off (and less * work), however, the CIPSO draft says we should do * all the CIPSO validations here but it doesn't * really specify _exactly_ what we need to validate * ... so, just make it a sysctl tunable. */ if (cipso_v4_rbm_strictvalid) { if (cipso_v4_map_lvl_valid(doi_def, tag[3]) < 0) { err_offset = opt_iter + 3; goto validate_return_locked; } if (tag_len > CIPSO_V4_TAG_RBM_BLEN && cipso_v4_map_cat_rbm_valid(doi_def, &tag[4], tag_len - 4) < 0) { err_offset = opt_iter + 4; goto validate_return_locked; } } break; case CIPSO_V4_TAG_ENUM: if (tag_len < CIPSO_V4_TAG_ENUM_BLEN) { err_offset = opt_iter + 1; goto validate_return_locked; } if (cipso_v4_map_lvl_valid(doi_def, tag[3]) < 0) { err_offset = opt_iter + 3; goto validate_return_locked; } if (tag_len > CIPSO_V4_TAG_ENUM_BLEN && cipso_v4_map_cat_enum_valid(doi_def, &tag[4], tag_len - 4) < 0) { err_offset = opt_iter + 4; goto validate_return_locked; } break; case CIPSO_V4_TAG_RANGE: if (tag_len < CIPSO_V4_TAG_RNG_BLEN) { err_offset = opt_iter + 1; goto validate_return_locked; } if (cipso_v4_map_lvl_valid(doi_def, tag[3]) < 0) { err_offset = opt_iter + 3; goto validate_return_locked; } if (tag_len > CIPSO_V4_TAG_RNG_BLEN && cipso_v4_map_cat_rng_valid(doi_def, &tag[4], tag_len - 4) < 0) { err_offset = opt_iter + 4; goto validate_return_locked; } break; case CIPSO_V4_TAG_LOCAL: /* This is a non-standard tag that we only allow for * local connections, so if the incoming interface is * not the loopback device drop the packet. */ if (!(skb->dev->flags & IFF_LOOPBACK)) { err_offset = opt_iter; goto validate_return_locked; } if (tag_len != CIPSO_V4_TAG_LOC_BLEN) { err_offset = opt_iter + 1; goto validate_return_locked; } break; default: err_offset = opt_iter; goto validate_return_locked; } tag += tag_len; opt_iter += tag_len; } validate_return_locked: rcu_read_unlock(); validate_return: *option = opt + err_offset; return err_offset; } /** * cipso_v4_error - Send the correct response for a bad packet * @skb: the packet * @error: the error code * @gateway: CIPSO gateway flag * * Description: * Based on the error code given in @error, send an ICMP error message back to * the originating host. From the IETF draft ... * * "If the contents of the CIPSO [option] are valid but the security label is * outside of the configured host or port label range, the datagram is * discarded and an ICMP 'destination unreachable' (type 3) is generated and * returned. The code field of the ICMP is set to 'communication with * destination network administratively prohibited' (code 9) or to * 'communication with destination host administratively prohibited' * (code 10). The value of the code is dependent on whether the originator * of the ICMP message is acting as a CIPSO host or a CIPSO gateway. The * recipient of the ICMP message MUST be able to handle either value. The * same procedure is performed if a CIPSO [option] can not be added to an * IP packet because it is too large to fit in the IP options area." * * "If the error is triggered by receipt of an ICMP message, the message is * discarded and no response is permitted (consistent with general ICMP * processing rules)." * */ void cipso_v4_error(struct sk_buff *skb, int error, u32 gateway) { if (ip_hdr(skb)->protocol == IPPROTO_ICMP || error != -EACCES) return; if (gateway) icmp_send(skb, ICMP_DEST_UNREACH, ICMP_NET_ANO, 0); else icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_ANO, 0); } /** * cipso_v4_genopt - Generate a CIPSO option * @buf: the option buffer * @buf_len: the size of opt_buf * @doi_def: the CIPSO DOI to use * @secattr: the security attributes * * Description: * Generate a CIPSO option using the DOI definition and security attributes * passed to the function. Returns the length of the option on success and * negative values on failure. * */ static int cipso_v4_genopt(unsigned char *buf, u32 buf_len, const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr) { int ret_val; u32 iter; if (buf_len <= CIPSO_V4_HDR_LEN) return -ENOSPC; /* XXX - This code assumes only one tag per CIPSO option which isn't * really a good assumption to make but since we only support the MAC * tags right now it is a safe assumption. */ iter = 0; do { memset(buf, 0, buf_len); switch (doi_def->tags[iter]) { case CIPSO_V4_TAG_RBITMAP: ret_val = cipso_v4_gentag_rbm(doi_def, secattr, &buf[CIPSO_V4_HDR_LEN], buf_len - CIPSO_V4_HDR_LEN); break; case CIPSO_V4_TAG_ENUM: ret_val = cipso_v4_gentag_enum(doi_def, secattr, &buf[CIPSO_V4_HDR_LEN], buf_len - CIPSO_V4_HDR_LEN); break; case CIPSO_V4_TAG_RANGE: ret_val = cipso_v4_gentag_rng(doi_def, secattr, &buf[CIPSO_V4_HDR_LEN], buf_len - CIPSO_V4_HDR_LEN); break; case CIPSO_V4_TAG_LOCAL: ret_val = cipso_v4_gentag_loc(doi_def, secattr, &buf[CIPSO_V4_HDR_LEN], buf_len - CIPSO_V4_HDR_LEN); break; default: return -EPERM; } iter++; } while (ret_val < 0 && iter < CIPSO_V4_TAG_MAXCNT && doi_def->tags[iter] != CIPSO_V4_TAG_INVALID); if (ret_val < 0) return ret_val; cipso_v4_gentag_hdr(doi_def, buf, ret_val); return CIPSO_V4_HDR_LEN + ret_val; } static void opt_kfree_rcu(struct rcu_head *head) { kfree(container_of(head, struct ip_options_rcu, rcu)); } /** * cipso_v4_sock_setattr - Add a CIPSO option to a socket * @sk: the socket * @doi_def: the CIPSO DOI to use * @secattr: the specific security attributes of the socket * * Description: * Set the CIPSO option on the given socket using the DOI definition and * security attributes passed to the function. This function requires * exclusive access to @sk, which means it either needs to be in the * process of being created or locked. Returns zero on success and negative * values on failure. * */ int cipso_v4_sock_setattr(struct sock *sk, const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr) { int ret_val = -EPERM; unsigned char *buf = NULL; u32 buf_len; u32 opt_len; struct ip_options_rcu *old, *opt = NULL; struct inet_sock *sk_inet; struct inet_connection_sock *sk_conn; /* In the case of sock_create_lite(), the sock->sk field is not * defined yet but it is not a problem as the only users of these * "lite" PF_INET sockets are functions which do an accept() call * afterwards so we will label the socket as part of the accept(). */ if (sk == NULL) return 0; /* We allocate the maximum CIPSO option size here so we are probably * being a little wasteful, but it makes our life _much_ easier later * on and after all we are only talking about 40 bytes. */ buf_len = CIPSO_V4_OPT_LEN_MAX; buf = kmalloc(buf_len, GFP_ATOMIC); if (buf == NULL) { ret_val = -ENOMEM; goto socket_setattr_failure; } ret_val = cipso_v4_genopt(buf, buf_len, doi_def, secattr); if (ret_val < 0) goto socket_setattr_failure; buf_len = ret_val; /* We can't use ip_options_get() directly because it makes a call to * ip_options_get_alloc() which allocates memory with GFP_KERNEL and * we won't always have CAP_NET_RAW even though we _always_ want to * set the IPOPT_CIPSO option. */ opt_len = (buf_len + 3) & ~3; opt = kzalloc(sizeof(*opt) + opt_len, GFP_ATOMIC); if (opt == NULL) { ret_val = -ENOMEM; goto socket_setattr_failure; } memcpy(opt->opt.__data, buf, buf_len); opt->opt.optlen = opt_len; opt->opt.cipso = sizeof(struct iphdr); kfree(buf); buf = NULL; sk_inet = inet_sk(sk); old = rcu_dereference_protected(sk_inet->inet_opt, sock_owned_by_user(sk)); if (sk_inet->is_icsk) { sk_conn = inet_csk(sk); if (old) sk_conn->icsk_ext_hdr_len -= old->opt.optlen; sk_conn->icsk_ext_hdr_len += opt->opt.optlen; sk_conn->icsk_sync_mss(sk, sk_conn->icsk_pmtu_cookie); } rcu_assign_pointer(sk_inet->inet_opt, opt); if (old) call_rcu(&old->rcu, opt_kfree_rcu); return 0; socket_setattr_failure: kfree(buf); kfree(opt); return ret_val; } /** * cipso_v4_req_setattr - Add a CIPSO option to a connection request socket * @req: the connection request socket * @doi_def: the CIPSO DOI to use * @secattr: the specific security attributes of the socket * * Description: * Set the CIPSO option on the given socket using the DOI definition and * security attributes passed to the function. Returns zero on success and * negative values on failure. * */ int cipso_v4_req_setattr(struct request_sock *req, const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr) { int ret_val = -EPERM; unsigned char *buf = NULL; u32 buf_len; u32 opt_len; struct ip_options_rcu *opt = NULL; struct inet_request_sock *req_inet; /* We allocate the maximum CIPSO option size here so we are probably * being a little wasteful, but it makes our life _much_ easier later * on and after all we are only talking about 40 bytes. */ buf_len = CIPSO_V4_OPT_LEN_MAX; buf = kmalloc(buf_len, GFP_ATOMIC); if (buf == NULL) { ret_val = -ENOMEM; goto req_setattr_failure; } ret_val = cipso_v4_genopt(buf, buf_len, doi_def, secattr); if (ret_val < 0) goto req_setattr_failure; buf_len = ret_val; /* We can't use ip_options_get() directly because it makes a call to * ip_options_get_alloc() which allocates memory with GFP_KERNEL and * we won't always have CAP_NET_RAW even though we _always_ want to * set the IPOPT_CIPSO option. */ opt_len = (buf_len + 3) & ~3; opt = kzalloc(sizeof(*opt) + opt_len, GFP_ATOMIC); if (opt == NULL) { ret_val = -ENOMEM; goto req_setattr_failure; } memcpy(opt->opt.__data, buf, buf_len); opt->opt.optlen = opt_len; opt->opt.cipso = sizeof(struct iphdr); kfree(buf); buf = NULL; req_inet = inet_rsk(req); opt = xchg(&req_inet->opt, opt); if (opt) call_rcu(&opt->rcu, opt_kfree_rcu); return 0; req_setattr_failure: kfree(buf); kfree(opt); return ret_val; } /** * cipso_v4_delopt - Delete the CIPSO option from a set of IP options * @opt_ptr: IP option pointer * * Description: * Deletes the CIPSO IP option from a set of IP options and makes the necessary * adjustments to the IP option structure. Returns zero on success, negative * values on failure. * */ static int cipso_v4_delopt(struct ip_options_rcu **opt_ptr) { int hdr_delta = 0; struct ip_options_rcu *opt = *opt_ptr; if (opt->opt.srr || opt->opt.rr || opt->opt.ts || opt->opt.router_alert) { u8 cipso_len; u8 cipso_off; unsigned char *cipso_ptr; int iter; int optlen_new; cipso_off = opt->opt.cipso - sizeof(struct iphdr); cipso_ptr = &opt->opt.__data[cipso_off]; cipso_len = cipso_ptr[1]; if (opt->opt.srr > opt->opt.cipso) opt->opt.srr -= cipso_len; if (opt->opt.rr > opt->opt.cipso) opt->opt.rr -= cipso_len; if (opt->opt.ts > opt->opt.cipso) opt->opt.ts -= cipso_len; if (opt->opt.router_alert > opt->opt.cipso) opt->opt.router_alert -= cipso_len; opt->opt.cipso = 0; memmove(cipso_ptr, cipso_ptr + cipso_len, opt->opt.optlen - cipso_off - cipso_len); /* determining the new total option length is tricky because of * the padding necessary, the only thing i can think to do at * this point is walk the options one-by-one, skipping the * padding at the end to determine the actual option size and * from there we can determine the new total option length */ iter = 0; optlen_new = 0; while (iter < opt->opt.optlen) if (opt->opt.__data[iter] != IPOPT_NOP) { iter += opt->opt.__data[iter + 1]; optlen_new = iter; } else iter++; hdr_delta = opt->opt.optlen; opt->opt.optlen = (optlen_new + 3) & ~3; hdr_delta -= opt->opt.optlen; } else { /* only the cipso option was present on the socket so we can * remove the entire option struct */ *opt_ptr = NULL; hdr_delta = opt->opt.optlen; call_rcu(&opt->rcu, opt_kfree_rcu); } return hdr_delta; } /** * cipso_v4_sock_delattr - Delete the CIPSO option from a socket * @sk: the socket * * Description: * Removes the CIPSO option from a socket, if present. * */ void cipso_v4_sock_delattr(struct sock *sk) { int hdr_delta; struct ip_options_rcu *opt; struct inet_sock *sk_inet; sk_inet = inet_sk(sk); opt = rcu_dereference_protected(sk_inet->inet_opt, 1); if (opt == NULL || opt->opt.cipso == 0) return; hdr_delta = cipso_v4_delopt(&sk_inet->inet_opt); if (sk_inet->is_icsk && hdr_delta > 0) { struct inet_connection_sock *sk_conn = inet_csk(sk); sk_conn->icsk_ext_hdr_len -= hdr_delta; sk_conn->icsk_sync_mss(sk, sk_conn->icsk_pmtu_cookie); } } /** * cipso_v4_req_delattr - Delete the CIPSO option from a request socket * @reg: the request socket * * Description: * Removes the CIPSO option from a request socket, if present. * */ void cipso_v4_req_delattr(struct request_sock *req) { struct ip_options_rcu *opt; struct inet_request_sock *req_inet; req_inet = inet_rsk(req); opt = req_inet->opt; if (opt == NULL || opt->opt.cipso == 0) return; cipso_v4_delopt(&req_inet->opt); } /** * cipso_v4_getattr - Helper function for the cipso_v4_*_getattr functions * @cipso: the CIPSO v4 option * @secattr: the security attributes * * Description: * Inspect @cipso and return the security attributes in @secattr. Returns zero * on success and negative values on failure. * */ static int cipso_v4_getattr(const unsigned char *cipso, struct netlbl_lsm_secattr *secattr) { int ret_val = -ENOMSG; u32 doi; struct cipso_v4_doi *doi_def; if (cipso_v4_cache_check(cipso, cipso[1], secattr) == 0) return 0; doi = get_unaligned_be32(&cipso[2]); rcu_read_lock(); doi_def = cipso_v4_doi_search(doi); if (doi_def == NULL) goto getattr_return; /* XXX - This code assumes only one tag per CIPSO option which isn't * really a good assumption to make but since we only support the MAC * tags right now it is a safe assumption. */ switch (cipso[6]) { case CIPSO_V4_TAG_RBITMAP: ret_val = cipso_v4_parsetag_rbm(doi_def, &cipso[6], secattr); break; case CIPSO_V4_TAG_ENUM: ret_val = cipso_v4_parsetag_enum(doi_def, &cipso[6], secattr); break; case CIPSO_V4_TAG_RANGE: ret_val = cipso_v4_parsetag_rng(doi_def, &cipso[6], secattr); break; case CIPSO_V4_TAG_LOCAL: ret_val = cipso_v4_parsetag_loc(doi_def, &cipso[6], secattr); break; } if (ret_val == 0) secattr->type = NETLBL_NLTYPE_CIPSOV4; getattr_return: rcu_read_unlock(); return ret_val; } /** * cipso_v4_sock_getattr - Get the security attributes from a sock * @sk: the sock * @secattr: the security attributes * * Description: * Query @sk to see if there is a CIPSO option attached to the sock and if * there is return the CIPSO security attributes in @secattr. This function * requires that @sk be locked, or privately held, but it does not do any * locking itself. Returns zero on success and negative values on failure. * */ int cipso_v4_sock_getattr(struct sock *sk, struct netlbl_lsm_secattr *secattr) { struct ip_options_rcu *opt; int res = -ENOMSG; rcu_read_lock(); opt = rcu_dereference(inet_sk(sk)->inet_opt); if (opt && opt->opt.cipso) res = cipso_v4_getattr(opt->opt.__data + opt->opt.cipso - sizeof(struct iphdr), secattr); rcu_read_unlock(); return res; } /** * cipso_v4_skbuff_setattr - Set the CIPSO option on a packet * @skb: the packet * @secattr: the security attributes * * Description: * Set the CIPSO option on the given packet based on the security attributes. * Returns a pointer to the IP header on success and NULL on failure. * */ int cipso_v4_skbuff_setattr(struct sk_buff *skb, const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr) { int ret_val; struct iphdr *iph; struct ip_options *opt = &IPCB(skb)->opt; unsigned char buf[CIPSO_V4_OPT_LEN_MAX]; u32 buf_len = CIPSO_V4_OPT_LEN_MAX; u32 opt_len; int len_delta; ret_val = cipso_v4_genopt(buf, buf_len, doi_def, secattr); if (ret_val < 0) return ret_val; buf_len = ret_val; opt_len = (buf_len + 3) & ~3; /* we overwrite any existing options to ensure that we have enough * room for the CIPSO option, the reason is that we _need_ to guarantee * that the security label is applied to the packet - we do the same * thing when using the socket options and it hasn't caused a problem, * if we need to we can always revisit this choice later */ len_delta = opt_len - opt->optlen; /* if we don't ensure enough headroom we could panic on the skb_push() * call below so make sure we have enough, we are also "mangling" the * packet so we should probably do a copy-on-write call anyway */ ret_val = skb_cow(skb, skb_headroom(skb) + len_delta); if (ret_val < 0) return ret_val; if (len_delta > 0) { /* we assume that the header + opt->optlen have already been * "pushed" in ip_options_build() or similar */ iph = ip_hdr(skb); skb_push(skb, len_delta); memmove((char *)iph - len_delta, iph, iph->ihl << 2); skb_reset_network_header(skb); iph = ip_hdr(skb); } else if (len_delta < 0) { iph = ip_hdr(skb); memset(iph + 1, IPOPT_NOP, opt->optlen); } else iph = ip_hdr(skb); if (opt->optlen > 0) memset(opt, 0, sizeof(*opt)); opt->optlen = opt_len; opt->cipso = sizeof(struct iphdr); opt->is_changed = 1; /* we have to do the following because we are being called from a * netfilter hook which means the packet already has had the header * fields populated and the checksum calculated - yes this means we * are doing more work than needed but we do it to keep the core * stack clean and tidy */ memcpy(iph + 1, buf, buf_len); if (opt_len > buf_len) memset((char *)(iph + 1) + buf_len, 0, opt_len - buf_len); if (len_delta != 0) { iph->ihl = 5 + (opt_len >> 2); iph->tot_len = htons(skb->len); } ip_send_check(iph); return 0; } /** * cipso_v4_skbuff_delattr - Delete any CIPSO options from a packet * @skb: the packet * * Description: * Removes any and all CIPSO options from the given packet. Returns zero on * success, negative values on failure. * */ int cipso_v4_skbuff_delattr(struct sk_buff *skb) { int ret_val; struct iphdr *iph; struct ip_options *opt = &IPCB(skb)->opt; unsigned char *cipso_ptr; if (opt->cipso == 0) return 0; /* since we are changing the packet we should make a copy */ ret_val = skb_cow(skb, skb_headroom(skb)); if (ret_val < 0) return ret_val; /* the easiest thing to do is just replace the cipso option with noop * options since we don't change the size of the packet, although we * still need to recalculate the checksum */ iph = ip_hdr(skb); cipso_ptr = (unsigned char *)iph + opt->cipso; memset(cipso_ptr, IPOPT_NOOP, cipso_ptr[1]); opt->cipso = 0; opt->is_changed = 1; ip_send_check(iph); return 0; } /** * cipso_v4_skbuff_getattr - Get the security attributes from the CIPSO option * @skb: the packet * @secattr: the security attributes * * Description: * Parse the given packet's CIPSO option and return the security attributes. * Returns zero on success and negative values on failure. * */ int cipso_v4_skbuff_getattr(const struct sk_buff *skb, struct netlbl_lsm_secattr *secattr) { return cipso_v4_getattr(CIPSO_V4_OPTPTR(skb), secattr); } /* * Setup Functions */ /** * cipso_v4_init - Initialize the CIPSO module * * Description: * Initialize the CIPSO module and prepare it for use. Returns zero on success * and negative values on failure. * */ static int __init cipso_v4_init(void) { int ret_val; ret_val = cipso_v4_cache_init(); if (ret_val != 0) panic("Failed to initialize the CIPSO/IPv4 cache (%d)\n", ret_val); return 0; } subsys_initcall(cipso_v4_init);
int cipso_v4_sock_getattr(struct sock *sk, struct netlbl_lsm_secattr *secattr) { struct ip_options *opt; opt = inet_sk(sk)->opt; if (opt == NULL || opt->cipso == 0) return -ENOMSG; return cipso_v4_getattr(opt->__data + opt->cipso - sizeof(struct iphdr), secattr); }
int cipso_v4_sock_getattr(struct sock *sk, struct netlbl_lsm_secattr *secattr) { struct ip_options_rcu *opt; int res = -ENOMSG; rcu_read_lock(); opt = rcu_dereference(inet_sk(sk)->inet_opt); if (opt && opt->opt.cipso) res = cipso_v4_getattr(opt->opt.__data + opt->opt.cipso - sizeof(struct iphdr), secattr); rcu_read_unlock(); return res; }
{'added': [(1860, 'static void opt_kfree_rcu(struct rcu_head *head)'), (1861, '{'), (1862, '\tkfree(container_of(head, struct ip_options_rcu, rcu));'), (1863, '}'), (1864, ''), (1887, '\tstruct ip_options_rcu *old, *opt = NULL;'), (1923, '\tmemcpy(opt->opt.__data, buf, buf_len);'), (1924, '\topt->opt.optlen = opt_len;'), (1925, '\topt->opt.cipso = sizeof(struct iphdr);'), (1930, ''), (1931, '\told = rcu_dereference_protected(sk_inet->inet_opt, sock_owned_by_user(sk));'), (1934, '\t\tif (old)'), (1935, '\t\t\tsk_conn->icsk_ext_hdr_len -= old->opt.optlen;'), (1936, '\t\tsk_conn->icsk_ext_hdr_len += opt->opt.optlen;'), (1939, '\trcu_assign_pointer(sk_inet->inet_opt, opt);'), (1940, '\tif (old)'), (1941, '\t\tcall_rcu(&old->rcu, opt_kfree_rcu);'), (1971, '\tstruct ip_options_rcu *opt = NULL;'), (1999, '\tmemcpy(opt->opt.__data, buf, buf_len);'), (2000, '\topt->opt.optlen = opt_len;'), (2001, '\topt->opt.cipso = sizeof(struct iphdr);'), (2007, '\tif (opt)'), (2008, '\t\tcall_rcu(&opt->rcu, opt_kfree_rcu);'), (2028, 'static int cipso_v4_delopt(struct ip_options_rcu **opt_ptr)'), (2031, '\tstruct ip_options_rcu *opt = *opt_ptr;'), (2033, '\tif (opt->opt.srr || opt->opt.rr || opt->opt.ts || opt->opt.router_alert) {'), (2040, '\t\tcipso_off = opt->opt.cipso - sizeof(struct iphdr);'), (2041, '\t\tcipso_ptr = &opt->opt.__data[cipso_off];'), (2044, '\t\tif (opt->opt.srr > opt->opt.cipso)'), (2045, '\t\t\topt->opt.srr -= cipso_len;'), (2046, '\t\tif (opt->opt.rr > opt->opt.cipso)'), (2047, '\t\t\topt->opt.rr -= cipso_len;'), (2048, '\t\tif (opt->opt.ts > opt->opt.cipso)'), (2049, '\t\t\topt->opt.ts -= cipso_len;'), (2050, '\t\tif (opt->opt.router_alert > opt->opt.cipso)'), (2051, '\t\t\topt->opt.router_alert -= cipso_len;'), (2052, '\t\topt->opt.cipso = 0;'), (2055, '\t\t\topt->opt.optlen - cipso_off - cipso_len);'), (2064, '\t\twhile (iter < opt->opt.optlen)'), (2065, '\t\t\tif (opt->opt.__data[iter] != IPOPT_NOP) {'), (2066, '\t\t\t\titer += opt->opt.__data[iter + 1];'), (2070, '\t\thdr_delta = opt->opt.optlen;'), (2071, '\t\topt->opt.optlen = (optlen_new + 3) & ~3;'), (2072, '\t\thdr_delta -= opt->opt.optlen;'), (2077, '\t\thdr_delta = opt->opt.optlen;'), (2078, '\t\tcall_rcu(&opt->rcu, opt_kfree_rcu);'), (2095, '\tstruct ip_options_rcu *opt;'), (2099, '\topt = rcu_dereference_protected(sk_inet->inet_opt, 1);'), (2100, '\tif (opt == NULL || opt->opt.cipso == 0)'), (2103, '\thdr_delta = cipso_v4_delopt(&sk_inet->inet_opt);'), (2121, '\tstruct ip_options_rcu *opt;'), (2126, '\tif (opt == NULL || opt->opt.cipso == 0)'), (2196, '\tstruct ip_options_rcu *opt;'), (2197, '\tint res = -ENOMSG;'), (2199, '\trcu_read_lock();'), (2200, '\topt = rcu_dereference(inet_sk(sk)->inet_opt);'), (2201, '\tif (opt && opt->opt.cipso)'), (2202, '\t\tres = cipso_v4_getattr(opt->opt.__data +'), (2203, '\t\t\t\t\t\topt->opt.cipso -'), (2204, '\t\t\t\t\t\tsizeof(struct iphdr),'), (2205, '\t\t\t\t secattr);'), (2206, '\trcu_read_unlock();'), (2207, '\treturn res;')], 'deleted': [(1882, '\tstruct ip_options *opt = NULL;'), (1918, '\tmemcpy(opt->__data, buf, buf_len);'), (1919, '\topt->optlen = opt_len;'), (1920, '\topt->cipso = sizeof(struct iphdr);'), (1927, '\t\tif (sk_inet->opt)'), (1928, '\t\t\tsk_conn->icsk_ext_hdr_len -= sk_inet->opt->optlen;'), (1929, '\t\tsk_conn->icsk_ext_hdr_len += opt->optlen;'), (1932, '\topt = xchg(&sk_inet->opt, opt);'), (1933, '\tkfree(opt);'), (1963, '\tstruct ip_options *opt = NULL;'), (1991, '\tmemcpy(opt->__data, buf, buf_len);'), (1992, '\topt->optlen = opt_len;'), (1993, '\topt->cipso = sizeof(struct iphdr);'), (1999, '\tkfree(opt);'), (2019, 'static int cipso_v4_delopt(struct ip_options **opt_ptr)'), (2022, '\tstruct ip_options *opt = *opt_ptr;'), (2024, '\tif (opt->srr || opt->rr || opt->ts || opt->router_alert) {'), (2031, '\t\tcipso_off = opt->cipso - sizeof(struct iphdr);'), (2032, '\t\tcipso_ptr = &opt->__data[cipso_off];'), (2035, '\t\tif (opt->srr > opt->cipso)'), (2036, '\t\t\topt->srr -= cipso_len;'), (2037, '\t\tif (opt->rr > opt->cipso)'), (2038, '\t\t\topt->rr -= cipso_len;'), (2039, '\t\tif (opt->ts > opt->cipso)'), (2040, '\t\t\topt->ts -= cipso_len;'), (2041, '\t\tif (opt->router_alert > opt->cipso)'), (2042, '\t\t\topt->router_alert -= cipso_len;'), (2043, '\t\topt->cipso = 0;'), (2046, '\t\t\topt->optlen - cipso_off - cipso_len);'), (2055, '\t\twhile (iter < opt->optlen)'), (2056, '\t\t\tif (opt->__data[iter] != IPOPT_NOP) {'), (2057, '\t\t\t\titer += opt->__data[iter + 1];'), (2061, '\t\thdr_delta = opt->optlen;'), (2062, '\t\topt->optlen = (optlen_new + 3) & ~3;'), (2063, '\t\thdr_delta -= opt->optlen;'), (2068, '\t\thdr_delta = opt->optlen;'), (2069, '\t\tkfree(opt);'), (2086, '\tstruct ip_options *opt;'), (2090, '\topt = sk_inet->opt;'), (2091, '\tif (opt == NULL || opt->cipso == 0)'), (2094, '\thdr_delta = cipso_v4_delopt(&sk_inet->opt);'), (2112, '\tstruct ip_options *opt;'), (2117, '\tif (opt == NULL || opt->cipso == 0)'), (2187, '\tstruct ip_options *opt;'), (2189, '\topt = inet_sk(sk)->opt;'), (2190, '\tif (opt == NULL || opt->cipso == 0)'), (2191, '\t\treturn -ENOMSG;'), (2192, ''), (2193, '\treturn cipso_v4_getattr(opt->__data + opt->cipso - sizeof(struct iphdr),'), (2194, '\t\t\t\tsecattr);')]}
63
50
1,360
7,485
https://github.com/torvalds/linux
CVE-2012-3552
['CWE-362']
cipso_ipv4.c
cipso_v4_sock_setattr
/* * CIPSO - Commercial IP Security Option * * This is an implementation of the CIPSO 2.2 protocol as specified in * draft-ietf-cipso-ipsecurity-01.txt with additional tag types as found in * FIPS-188. While CIPSO never became a full IETF RFC standard many vendors * have chosen to adopt the protocol and over the years it has become a * de-facto standard for labeled networking. * * The CIPSO draft specification can be found in the kernel's Documentation * directory as well as the following URL: * http://tools.ietf.org/id/draft-ietf-cipso-ipsecurity-01.txt * The FIPS-188 specification can be found at the following URL: * http://www.itl.nist.gov/fipspubs/fip188.htm * * Author: Paul Moore <paul.moore@hp.com> * */ /* * (c) Copyright Hewlett-Packard Development Company, L.P., 2006, 2008 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/init.h> #include <linux/types.h> #include <linux/rcupdate.h> #include <linux/list.h> #include <linux/spinlock.h> #include <linux/string.h> #include <linux/jhash.h> #include <linux/audit.h> #include <linux/slab.h> #include <net/ip.h> #include <net/icmp.h> #include <net/tcp.h> #include <net/netlabel.h> #include <net/cipso_ipv4.h> #include <asm/atomic.h> #include <asm/bug.h> #include <asm/unaligned.h> /* List of available DOI definitions */ /* XXX - This currently assumes a minimal number of different DOIs in use, * if in practice there are a lot of different DOIs this list should * probably be turned into a hash table or something similar so we * can do quick lookups. */ static DEFINE_SPINLOCK(cipso_v4_doi_list_lock); static LIST_HEAD(cipso_v4_doi_list); /* Label mapping cache */ int cipso_v4_cache_enabled = 1; int cipso_v4_cache_bucketsize = 10; #define CIPSO_V4_CACHE_BUCKETBITS 7 #define CIPSO_V4_CACHE_BUCKETS (1 << CIPSO_V4_CACHE_BUCKETBITS) #define CIPSO_V4_CACHE_REORDERLIMIT 10 struct cipso_v4_map_cache_bkt { spinlock_t lock; u32 size; struct list_head list; }; struct cipso_v4_map_cache_entry { u32 hash; unsigned char *key; size_t key_len; struct netlbl_lsm_cache *lsm_data; u32 activity; struct list_head list; }; static struct cipso_v4_map_cache_bkt *cipso_v4_cache = NULL; /* Restricted bitmap (tag #1) flags */ int cipso_v4_rbm_optfmt = 0; int cipso_v4_rbm_strictvalid = 1; /* * Protocol Constants */ /* Maximum size of the CIPSO IP option, derived from the fact that the maximum * IPv4 header size is 60 bytes and the base IPv4 header is 20 bytes long. */ #define CIPSO_V4_OPT_LEN_MAX 40 /* Length of the base CIPSO option, this includes the option type (1 byte), the * option length (1 byte), and the DOI (4 bytes). */ #define CIPSO_V4_HDR_LEN 6 /* Base length of the restrictive category bitmap tag (tag #1). */ #define CIPSO_V4_TAG_RBM_BLEN 4 /* Base length of the enumerated category tag (tag #2). */ #define CIPSO_V4_TAG_ENUM_BLEN 4 /* Base length of the ranged categories bitmap tag (tag #5). */ #define CIPSO_V4_TAG_RNG_BLEN 4 /* The maximum number of category ranges permitted in the ranged category tag * (tag #5). You may note that the IETF draft states that the maximum number * of category ranges is 7, but if the low end of the last category range is * zero then it is possible to fit 8 category ranges because the zero should * be omitted. */ #define CIPSO_V4_TAG_RNG_CAT_MAX 8 /* Base length of the local tag (non-standard tag). * Tag definition (may change between kernel versions) * * 0 8 16 24 32 * +----------+----------+----------+----------+ * | 10000000 | 00000110 | 32-bit secid value | * +----------+----------+----------+----------+ * | in (host byte order)| * +----------+----------+ * */ #define CIPSO_V4_TAG_LOC_BLEN 6 /* * Helper Functions */ /** * cipso_v4_bitmap_walk - Walk a bitmap looking for a bit * @bitmap: the bitmap * @bitmap_len: length in bits * @offset: starting offset * @state: if non-zero, look for a set (1) bit else look for a cleared (0) bit * * Description: * Starting at @offset, walk the bitmap from left to right until either the * desired bit is found or we reach the end. Return the bit offset, -1 if * not found, or -2 if error. */ static int cipso_v4_bitmap_walk(const unsigned char *bitmap, u32 bitmap_len, u32 offset, u8 state) { u32 bit_spot; u32 byte_offset; unsigned char bitmask; unsigned char byte; /* gcc always rounds to zero when doing integer division */ byte_offset = offset / 8; byte = bitmap[byte_offset]; bit_spot = offset; bitmask = 0x80 >> (offset % 8); while (bit_spot < bitmap_len) { if ((state && (byte & bitmask) == bitmask) || (state == 0 && (byte & bitmask) == 0)) return bit_spot; bit_spot++; bitmask >>= 1; if (bitmask == 0) { byte = bitmap[++byte_offset]; bitmask = 0x80; } } return -1; } /** * cipso_v4_bitmap_setbit - Sets a single bit in a bitmap * @bitmap: the bitmap * @bit: the bit * @state: if non-zero, set the bit (1) else clear the bit (0) * * Description: * Set a single bit in the bitmask. Returns zero on success, negative values * on error. */ static void cipso_v4_bitmap_setbit(unsigned char *bitmap, u32 bit, u8 state) { u32 byte_spot; u8 bitmask; /* gcc always rounds to zero when doing integer division */ byte_spot = bit / 8; bitmask = 0x80 >> (bit % 8); if (state) bitmap[byte_spot] |= bitmask; else bitmap[byte_spot] &= ~bitmask; } /** * cipso_v4_cache_entry_free - Frees a cache entry * @entry: the entry to free * * Description: * This function frees the memory associated with a cache entry including the * LSM cache data if there are no longer any users, i.e. reference count == 0. * */ static void cipso_v4_cache_entry_free(struct cipso_v4_map_cache_entry *entry) { if (entry->lsm_data) netlbl_secattr_cache_free(entry->lsm_data); kfree(entry->key); kfree(entry); } /** * cipso_v4_map_cache_hash - Hashing function for the CIPSO cache * @key: the hash key * @key_len: the length of the key in bytes * * Description: * The CIPSO tag hashing function. Returns a 32-bit hash value. * */ static u32 cipso_v4_map_cache_hash(const unsigned char *key, u32 key_len) { return jhash(key, key_len, 0); } /* * Label Mapping Cache Functions */ /** * cipso_v4_cache_init - Initialize the CIPSO cache * * Description: * Initializes the CIPSO label mapping cache, this function should be called * before any of the other functions defined in this file. Returns zero on * success, negative values on error. * */ static int cipso_v4_cache_init(void) { u32 iter; cipso_v4_cache = kcalloc(CIPSO_V4_CACHE_BUCKETS, sizeof(struct cipso_v4_map_cache_bkt), GFP_KERNEL); if (cipso_v4_cache == NULL) return -ENOMEM; for (iter = 0; iter < CIPSO_V4_CACHE_BUCKETS; iter++) { spin_lock_init(&cipso_v4_cache[iter].lock); cipso_v4_cache[iter].size = 0; INIT_LIST_HEAD(&cipso_v4_cache[iter].list); } return 0; } /** * cipso_v4_cache_invalidate - Invalidates the current CIPSO cache * * Description: * Invalidates and frees any entries in the CIPSO cache. Returns zero on * success and negative values on failure. * */ void cipso_v4_cache_invalidate(void) { struct cipso_v4_map_cache_entry *entry, *tmp_entry; u32 iter; for (iter = 0; iter < CIPSO_V4_CACHE_BUCKETS; iter++) { spin_lock_bh(&cipso_v4_cache[iter].lock); list_for_each_entry_safe(entry, tmp_entry, &cipso_v4_cache[iter].list, list) { list_del(&entry->list); cipso_v4_cache_entry_free(entry); } cipso_v4_cache[iter].size = 0; spin_unlock_bh(&cipso_v4_cache[iter].lock); } } /** * cipso_v4_cache_check - Check the CIPSO cache for a label mapping * @key: the buffer to check * @key_len: buffer length in bytes * @secattr: the security attribute struct to use * * Description: * This function checks the cache to see if a label mapping already exists for * the given key. If there is a match then the cache is adjusted and the * @secattr struct is populated with the correct LSM security attributes. The * cache is adjusted in the following manner if the entry is not already the * first in the cache bucket: * * 1. The cache entry's activity counter is incremented * 2. The previous (higher ranking) entry's activity counter is decremented * 3. If the difference between the two activity counters is geater than * CIPSO_V4_CACHE_REORDERLIMIT the two entries are swapped * * Returns zero on success, -ENOENT for a cache miss, and other negative values * on error. * */ static int cipso_v4_cache_check(const unsigned char *key, u32 key_len, struct netlbl_lsm_secattr *secattr) { u32 bkt; struct cipso_v4_map_cache_entry *entry; struct cipso_v4_map_cache_entry *prev_entry = NULL; u32 hash; if (!cipso_v4_cache_enabled) return -ENOENT; hash = cipso_v4_map_cache_hash(key, key_len); bkt = hash & (CIPSO_V4_CACHE_BUCKETS - 1); spin_lock_bh(&cipso_v4_cache[bkt].lock); list_for_each_entry(entry, &cipso_v4_cache[bkt].list, list) { if (entry->hash == hash && entry->key_len == key_len && memcmp(entry->key, key, key_len) == 0) { entry->activity += 1; atomic_inc(&entry->lsm_data->refcount); secattr->cache = entry->lsm_data; secattr->flags |= NETLBL_SECATTR_CACHE; secattr->type = NETLBL_NLTYPE_CIPSOV4; if (prev_entry == NULL) { spin_unlock_bh(&cipso_v4_cache[bkt].lock); return 0; } if (prev_entry->activity > 0) prev_entry->activity -= 1; if (entry->activity > prev_entry->activity && entry->activity - prev_entry->activity > CIPSO_V4_CACHE_REORDERLIMIT) { __list_del(entry->list.prev, entry->list.next); __list_add(&entry->list, prev_entry->list.prev, &prev_entry->list); } spin_unlock_bh(&cipso_v4_cache[bkt].lock); return 0; } prev_entry = entry; } spin_unlock_bh(&cipso_v4_cache[bkt].lock); return -ENOENT; } /** * cipso_v4_cache_add - Add an entry to the CIPSO cache * @skb: the packet * @secattr: the packet's security attributes * * Description: * Add a new entry into the CIPSO label mapping cache. Add the new entry to * head of the cache bucket's list, if the cache bucket is out of room remove * the last entry in the list first. It is important to note that there is * currently no checking for duplicate keys. Returns zero on success, * negative values on failure. * */ int cipso_v4_cache_add(const struct sk_buff *skb, const struct netlbl_lsm_secattr *secattr) { int ret_val = -EPERM; u32 bkt; struct cipso_v4_map_cache_entry *entry = NULL; struct cipso_v4_map_cache_entry *old_entry = NULL; unsigned char *cipso_ptr; u32 cipso_ptr_len; if (!cipso_v4_cache_enabled || cipso_v4_cache_bucketsize <= 0) return 0; cipso_ptr = CIPSO_V4_OPTPTR(skb); cipso_ptr_len = cipso_ptr[1]; entry = kzalloc(sizeof(*entry), GFP_ATOMIC); if (entry == NULL) return -ENOMEM; entry->key = kmemdup(cipso_ptr, cipso_ptr_len, GFP_ATOMIC); if (entry->key == NULL) { ret_val = -ENOMEM; goto cache_add_failure; } entry->key_len = cipso_ptr_len; entry->hash = cipso_v4_map_cache_hash(cipso_ptr, cipso_ptr_len); atomic_inc(&secattr->cache->refcount); entry->lsm_data = secattr->cache; bkt = entry->hash & (CIPSO_V4_CACHE_BUCKETS - 1); spin_lock_bh(&cipso_v4_cache[bkt].lock); if (cipso_v4_cache[bkt].size < cipso_v4_cache_bucketsize) { list_add(&entry->list, &cipso_v4_cache[bkt].list); cipso_v4_cache[bkt].size += 1; } else { old_entry = list_entry(cipso_v4_cache[bkt].list.prev, struct cipso_v4_map_cache_entry, list); list_del(&old_entry->list); list_add(&entry->list, &cipso_v4_cache[bkt].list); cipso_v4_cache_entry_free(old_entry); } spin_unlock_bh(&cipso_v4_cache[bkt].lock); return 0; cache_add_failure: if (entry) cipso_v4_cache_entry_free(entry); return ret_val; } /* * DOI List Functions */ /** * cipso_v4_doi_search - Searches for a DOI definition * @doi: the DOI to search for * * Description: * Search the DOI definition list for a DOI definition with a DOI value that * matches @doi. The caller is responsible for calling rcu_read_[un]lock(). * Returns a pointer to the DOI definition on success and NULL on failure. */ static struct cipso_v4_doi *cipso_v4_doi_search(u32 doi) { struct cipso_v4_doi *iter; list_for_each_entry_rcu(iter, &cipso_v4_doi_list, list) if (iter->doi == doi && atomic_read(&iter->refcount)) return iter; return NULL; } /** * cipso_v4_doi_add - Add a new DOI to the CIPSO protocol engine * @doi_def: the DOI structure * @audit_info: NetLabel audit information * * Description: * The caller defines a new DOI for use by the CIPSO engine and calls this * function to add it to the list of acceptable domains. The caller must * ensure that the mapping table specified in @doi_def->map meets all of the * requirements of the mapping type (see cipso_ipv4.h for details). Returns * zero on success and non-zero on failure. * */ int cipso_v4_doi_add(struct cipso_v4_doi *doi_def, struct netlbl_audit *audit_info) { int ret_val = -EINVAL; u32 iter; u32 doi; u32 doi_type; struct audit_buffer *audit_buf; doi = doi_def->doi; doi_type = doi_def->type; if (doi_def == NULL || doi_def->doi == CIPSO_V4_DOI_UNKNOWN) goto doi_add_return; for (iter = 0; iter < CIPSO_V4_TAG_MAXCNT; iter++) { switch (doi_def->tags[iter]) { case CIPSO_V4_TAG_RBITMAP: break; case CIPSO_V4_TAG_RANGE: case CIPSO_V4_TAG_ENUM: if (doi_def->type != CIPSO_V4_MAP_PASS) goto doi_add_return; break; case CIPSO_V4_TAG_LOCAL: if (doi_def->type != CIPSO_V4_MAP_LOCAL) goto doi_add_return; break; case CIPSO_V4_TAG_INVALID: if (iter == 0) goto doi_add_return; break; default: goto doi_add_return; } } atomic_set(&doi_def->refcount, 1); spin_lock(&cipso_v4_doi_list_lock); if (cipso_v4_doi_search(doi_def->doi) != NULL) { spin_unlock(&cipso_v4_doi_list_lock); ret_val = -EEXIST; goto doi_add_return; } list_add_tail_rcu(&doi_def->list, &cipso_v4_doi_list); spin_unlock(&cipso_v4_doi_list_lock); ret_val = 0; doi_add_return: audit_buf = netlbl_audit_start(AUDIT_MAC_CIPSOV4_ADD, audit_info); if (audit_buf != NULL) { const char *type_str; switch (doi_type) { case CIPSO_V4_MAP_TRANS: type_str = "trans"; break; case CIPSO_V4_MAP_PASS: type_str = "pass"; break; case CIPSO_V4_MAP_LOCAL: type_str = "local"; break; default: type_str = "(unknown)"; } audit_log_format(audit_buf, " cipso_doi=%u cipso_type=%s res=%u", doi, type_str, ret_val == 0 ? 1 : 0); audit_log_end(audit_buf); } return ret_val; } /** * cipso_v4_doi_free - Frees a DOI definition * @entry: the entry's RCU field * * Description: * This function frees all of the memory associated with a DOI definition. * */ void cipso_v4_doi_free(struct cipso_v4_doi *doi_def) { if (doi_def == NULL) return; switch (doi_def->type) { case CIPSO_V4_MAP_TRANS: kfree(doi_def->map.std->lvl.cipso); kfree(doi_def->map.std->lvl.local); kfree(doi_def->map.std->cat.cipso); kfree(doi_def->map.std->cat.local); break; } kfree(doi_def); } /** * cipso_v4_doi_free_rcu - Frees a DOI definition via the RCU pointer * @entry: the entry's RCU field * * Description: * This function is designed to be used as a callback to the call_rcu() * function so that the memory allocated to the DOI definition can be released * safely. * */ static void cipso_v4_doi_free_rcu(struct rcu_head *entry) { struct cipso_v4_doi *doi_def; doi_def = container_of(entry, struct cipso_v4_doi, rcu); cipso_v4_doi_free(doi_def); } /** * cipso_v4_doi_remove - Remove an existing DOI from the CIPSO protocol engine * @doi: the DOI value * @audit_secid: the LSM secid to use in the audit message * * Description: * Removes a DOI definition from the CIPSO engine. The NetLabel routines will * be called to release their own LSM domain mappings as well as our own * domain list. Returns zero on success and negative values on failure. * */ int cipso_v4_doi_remove(u32 doi, struct netlbl_audit *audit_info) { int ret_val; struct cipso_v4_doi *doi_def; struct audit_buffer *audit_buf; spin_lock(&cipso_v4_doi_list_lock); doi_def = cipso_v4_doi_search(doi); if (doi_def == NULL) { spin_unlock(&cipso_v4_doi_list_lock); ret_val = -ENOENT; goto doi_remove_return; } if (!atomic_dec_and_test(&doi_def->refcount)) { spin_unlock(&cipso_v4_doi_list_lock); ret_val = -EBUSY; goto doi_remove_return; } list_del_rcu(&doi_def->list); spin_unlock(&cipso_v4_doi_list_lock); cipso_v4_cache_invalidate(); call_rcu(&doi_def->rcu, cipso_v4_doi_free_rcu); ret_val = 0; doi_remove_return: audit_buf = netlbl_audit_start(AUDIT_MAC_CIPSOV4_DEL, audit_info); if (audit_buf != NULL) { audit_log_format(audit_buf, " cipso_doi=%u res=%u", doi, ret_val == 0 ? 1 : 0); audit_log_end(audit_buf); } return ret_val; } /** * cipso_v4_doi_getdef - Returns a reference to a valid DOI definition * @doi: the DOI value * * Description: * Searches for a valid DOI definition and if one is found it is returned to * the caller. Otherwise NULL is returned. The caller must ensure that * rcu_read_lock() is held while accessing the returned definition and the DOI * definition reference count is decremented when the caller is done. * */ struct cipso_v4_doi *cipso_v4_doi_getdef(u32 doi) { struct cipso_v4_doi *doi_def; rcu_read_lock(); doi_def = cipso_v4_doi_search(doi); if (doi_def == NULL) goto doi_getdef_return; if (!atomic_inc_not_zero(&doi_def->refcount)) doi_def = NULL; doi_getdef_return: rcu_read_unlock(); return doi_def; } /** * cipso_v4_doi_putdef - Releases a reference for the given DOI definition * @doi_def: the DOI definition * * Description: * Releases a DOI definition reference obtained from cipso_v4_doi_getdef(). * */ void cipso_v4_doi_putdef(struct cipso_v4_doi *doi_def) { if (doi_def == NULL) return; if (!atomic_dec_and_test(&doi_def->refcount)) return; spin_lock(&cipso_v4_doi_list_lock); list_del_rcu(&doi_def->list); spin_unlock(&cipso_v4_doi_list_lock); cipso_v4_cache_invalidate(); call_rcu(&doi_def->rcu, cipso_v4_doi_free_rcu); } /** * cipso_v4_doi_walk - Iterate through the DOI definitions * @skip_cnt: skip past this number of DOI definitions, updated * @callback: callback for each DOI definition * @cb_arg: argument for the callback function * * Description: * Iterate over the DOI definition list, skipping the first @skip_cnt entries. * For each entry call @callback, if @callback returns a negative value stop * 'walking' through the list and return. Updates the value in @skip_cnt upon * return. Returns zero on success, negative values on failure. * */ int cipso_v4_doi_walk(u32 *skip_cnt, int (*callback) (struct cipso_v4_doi *doi_def, void *arg), void *cb_arg) { int ret_val = -ENOENT; u32 doi_cnt = 0; struct cipso_v4_doi *iter_doi; rcu_read_lock(); list_for_each_entry_rcu(iter_doi, &cipso_v4_doi_list, list) if (atomic_read(&iter_doi->refcount) > 0) { if (doi_cnt++ < *skip_cnt) continue; ret_val = callback(iter_doi, cb_arg); if (ret_val < 0) { doi_cnt--; goto doi_walk_return; } } doi_walk_return: rcu_read_unlock(); *skip_cnt = doi_cnt; return ret_val; } /* * Label Mapping Functions */ /** * cipso_v4_map_lvl_valid - Checks to see if the given level is understood * @doi_def: the DOI definition * @level: the level to check * * Description: * Checks the given level against the given DOI definition and returns a * negative value if the level does not have a valid mapping and a zero value * if the level is defined by the DOI. * */ static int cipso_v4_map_lvl_valid(const struct cipso_v4_doi *doi_def, u8 level) { switch (doi_def->type) { case CIPSO_V4_MAP_PASS: return 0; case CIPSO_V4_MAP_TRANS: if (doi_def->map.std->lvl.cipso[level] < CIPSO_V4_INV_LVL) return 0; break; } return -EFAULT; } /** * cipso_v4_map_lvl_hton - Perform a level mapping from the host to the network * @doi_def: the DOI definition * @host_lvl: the host MLS level * @net_lvl: the network/CIPSO MLS level * * Description: * Perform a label mapping to translate a local MLS level to the correct * CIPSO level using the given DOI definition. Returns zero on success, * negative values otherwise. * */ static int cipso_v4_map_lvl_hton(const struct cipso_v4_doi *doi_def, u32 host_lvl, u32 *net_lvl) { switch (doi_def->type) { case CIPSO_V4_MAP_PASS: *net_lvl = host_lvl; return 0; case CIPSO_V4_MAP_TRANS: if (host_lvl < doi_def->map.std->lvl.local_size && doi_def->map.std->lvl.local[host_lvl] < CIPSO_V4_INV_LVL) { *net_lvl = doi_def->map.std->lvl.local[host_lvl]; return 0; } return -EPERM; } return -EINVAL; } /** * cipso_v4_map_lvl_ntoh - Perform a level mapping from the network to the host * @doi_def: the DOI definition * @net_lvl: the network/CIPSO MLS level * @host_lvl: the host MLS level * * Description: * Perform a label mapping to translate a CIPSO level to the correct local MLS * level using the given DOI definition. Returns zero on success, negative * values otherwise. * */ static int cipso_v4_map_lvl_ntoh(const struct cipso_v4_doi *doi_def, u32 net_lvl, u32 *host_lvl) { struct cipso_v4_std_map_tbl *map_tbl; switch (doi_def->type) { case CIPSO_V4_MAP_PASS: *host_lvl = net_lvl; return 0; case CIPSO_V4_MAP_TRANS: map_tbl = doi_def->map.std; if (net_lvl < map_tbl->lvl.cipso_size && map_tbl->lvl.cipso[net_lvl] < CIPSO_V4_INV_LVL) { *host_lvl = doi_def->map.std->lvl.cipso[net_lvl]; return 0; } return -EPERM; } return -EINVAL; } /** * cipso_v4_map_cat_rbm_valid - Checks to see if the category bitmap is valid * @doi_def: the DOI definition * @bitmap: category bitmap * @bitmap_len: bitmap length in bytes * * Description: * Checks the given category bitmap against the given DOI definition and * returns a negative value if any of the categories in the bitmap do not have * a valid mapping and a zero value if all of the categories are valid. * */ static int cipso_v4_map_cat_rbm_valid(const struct cipso_v4_doi *doi_def, const unsigned char *bitmap, u32 bitmap_len) { int cat = -1; u32 bitmap_len_bits = bitmap_len * 8; u32 cipso_cat_size; u32 *cipso_array; switch (doi_def->type) { case CIPSO_V4_MAP_PASS: return 0; case CIPSO_V4_MAP_TRANS: cipso_cat_size = doi_def->map.std->cat.cipso_size; cipso_array = doi_def->map.std->cat.cipso; for (;;) { cat = cipso_v4_bitmap_walk(bitmap, bitmap_len_bits, cat + 1, 1); if (cat < 0) break; if (cat >= cipso_cat_size || cipso_array[cat] >= CIPSO_V4_INV_CAT) return -EFAULT; } if (cat == -1) return 0; break; } return -EFAULT; } /** * cipso_v4_map_cat_rbm_hton - Perform a category mapping from host to network * @doi_def: the DOI definition * @secattr: the security attributes * @net_cat: the zero'd out category bitmap in network/CIPSO format * @net_cat_len: the length of the CIPSO bitmap in bytes * * Description: * Perform a label mapping to translate a local MLS category bitmap to the * correct CIPSO bitmap using the given DOI definition. Returns the minimum * size in bytes of the network bitmap on success, negative values otherwise. * */ static int cipso_v4_map_cat_rbm_hton(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *net_cat, u32 net_cat_len) { int host_spot = -1; u32 net_spot = CIPSO_V4_INV_CAT; u32 net_spot_max = 0; u32 net_clen_bits = net_cat_len * 8; u32 host_cat_size = 0; u32 *host_cat_array = NULL; if (doi_def->type == CIPSO_V4_MAP_TRANS) { host_cat_size = doi_def->map.std->cat.local_size; host_cat_array = doi_def->map.std->cat.local; } for (;;) { host_spot = netlbl_secattr_catmap_walk(secattr->attr.mls.cat, host_spot + 1); if (host_spot < 0) break; switch (doi_def->type) { case CIPSO_V4_MAP_PASS: net_spot = host_spot; break; case CIPSO_V4_MAP_TRANS: if (host_spot >= host_cat_size) return -EPERM; net_spot = host_cat_array[host_spot]; if (net_spot >= CIPSO_V4_INV_CAT) return -EPERM; break; } if (net_spot >= net_clen_bits) return -ENOSPC; cipso_v4_bitmap_setbit(net_cat, net_spot, 1); if (net_spot > net_spot_max) net_spot_max = net_spot; } if (++net_spot_max % 8) return net_spot_max / 8 + 1; return net_spot_max / 8; } /** * cipso_v4_map_cat_rbm_ntoh - Perform a category mapping from network to host * @doi_def: the DOI definition * @net_cat: the category bitmap in network/CIPSO format * @net_cat_len: the length of the CIPSO bitmap in bytes * @secattr: the security attributes * * Description: * Perform a label mapping to translate a CIPSO bitmap to the correct local * MLS category bitmap using the given DOI definition. Returns zero on * success, negative values on failure. * */ static int cipso_v4_map_cat_rbm_ntoh(const struct cipso_v4_doi *doi_def, const unsigned char *net_cat, u32 net_cat_len, struct netlbl_lsm_secattr *secattr) { int ret_val; int net_spot = -1; u32 host_spot = CIPSO_V4_INV_CAT; u32 net_clen_bits = net_cat_len * 8; u32 net_cat_size = 0; u32 *net_cat_array = NULL; if (doi_def->type == CIPSO_V4_MAP_TRANS) { net_cat_size = doi_def->map.std->cat.cipso_size; net_cat_array = doi_def->map.std->cat.cipso; } for (;;) { net_spot = cipso_v4_bitmap_walk(net_cat, net_clen_bits, net_spot + 1, 1); if (net_spot < 0) { if (net_spot == -2) return -EFAULT; return 0; } switch (doi_def->type) { case CIPSO_V4_MAP_PASS: host_spot = net_spot; break; case CIPSO_V4_MAP_TRANS: if (net_spot >= net_cat_size) return -EPERM; host_spot = net_cat_array[net_spot]; if (host_spot >= CIPSO_V4_INV_CAT) return -EPERM; break; } ret_val = netlbl_secattr_catmap_setbit(secattr->attr.mls.cat, host_spot, GFP_ATOMIC); if (ret_val != 0) return ret_val; } return -EINVAL; } /** * cipso_v4_map_cat_enum_valid - Checks to see if the categories are valid * @doi_def: the DOI definition * @enumcat: category list * @enumcat_len: length of the category list in bytes * * Description: * Checks the given categories against the given DOI definition and returns a * negative value if any of the categories do not have a valid mapping and a * zero value if all of the categories are valid. * */ static int cipso_v4_map_cat_enum_valid(const struct cipso_v4_doi *doi_def, const unsigned char *enumcat, u32 enumcat_len) { u16 cat; int cat_prev = -1; u32 iter; if (doi_def->type != CIPSO_V4_MAP_PASS || enumcat_len & 0x01) return -EFAULT; for (iter = 0; iter < enumcat_len; iter += 2) { cat = get_unaligned_be16(&enumcat[iter]); if (cat <= cat_prev) return -EFAULT; cat_prev = cat; } return 0; } /** * cipso_v4_map_cat_enum_hton - Perform a category mapping from host to network * @doi_def: the DOI definition * @secattr: the security attributes * @net_cat: the zero'd out category list in network/CIPSO format * @net_cat_len: the length of the CIPSO category list in bytes * * Description: * Perform a label mapping to translate a local MLS category bitmap to the * correct CIPSO category list using the given DOI definition. Returns the * size in bytes of the network category bitmap on success, negative values * otherwise. * */ static int cipso_v4_map_cat_enum_hton(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *net_cat, u32 net_cat_len) { int cat = -1; u32 cat_iter = 0; for (;;) { cat = netlbl_secattr_catmap_walk(secattr->attr.mls.cat, cat + 1); if (cat < 0) break; if ((cat_iter + 2) > net_cat_len) return -ENOSPC; *((__be16 *)&net_cat[cat_iter]) = htons(cat); cat_iter += 2; } return cat_iter; } /** * cipso_v4_map_cat_enum_ntoh - Perform a category mapping from network to host * @doi_def: the DOI definition * @net_cat: the category list in network/CIPSO format * @net_cat_len: the length of the CIPSO bitmap in bytes * @secattr: the security attributes * * Description: * Perform a label mapping to translate a CIPSO category list to the correct * local MLS category bitmap using the given DOI definition. Returns zero on * success, negative values on failure. * */ static int cipso_v4_map_cat_enum_ntoh(const struct cipso_v4_doi *doi_def, const unsigned char *net_cat, u32 net_cat_len, struct netlbl_lsm_secattr *secattr) { int ret_val; u32 iter; for (iter = 0; iter < net_cat_len; iter += 2) { ret_val = netlbl_secattr_catmap_setbit(secattr->attr.mls.cat, get_unaligned_be16(&net_cat[iter]), GFP_ATOMIC); if (ret_val != 0) return ret_val; } return 0; } /** * cipso_v4_map_cat_rng_valid - Checks to see if the categories are valid * @doi_def: the DOI definition * @rngcat: category list * @rngcat_len: length of the category list in bytes * * Description: * Checks the given categories against the given DOI definition and returns a * negative value if any of the categories do not have a valid mapping and a * zero value if all of the categories are valid. * */ static int cipso_v4_map_cat_rng_valid(const struct cipso_v4_doi *doi_def, const unsigned char *rngcat, u32 rngcat_len) { u16 cat_high; u16 cat_low; u32 cat_prev = CIPSO_V4_MAX_REM_CATS + 1; u32 iter; if (doi_def->type != CIPSO_V4_MAP_PASS || rngcat_len & 0x01) return -EFAULT; for (iter = 0; iter < rngcat_len; iter += 4) { cat_high = get_unaligned_be16(&rngcat[iter]); if ((iter + 4) <= rngcat_len) cat_low = get_unaligned_be16(&rngcat[iter + 2]); else cat_low = 0; if (cat_high > cat_prev) return -EFAULT; cat_prev = cat_low; } return 0; } /** * cipso_v4_map_cat_rng_hton - Perform a category mapping from host to network * @doi_def: the DOI definition * @secattr: the security attributes * @net_cat: the zero'd out category list in network/CIPSO format * @net_cat_len: the length of the CIPSO category list in bytes * * Description: * Perform a label mapping to translate a local MLS category bitmap to the * correct CIPSO category list using the given DOI definition. Returns the * size in bytes of the network category bitmap on success, negative values * otherwise. * */ static int cipso_v4_map_cat_rng_hton(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *net_cat, u32 net_cat_len) { int iter = -1; u16 array[CIPSO_V4_TAG_RNG_CAT_MAX * 2]; u32 array_cnt = 0; u32 cat_size = 0; /* make sure we don't overflow the 'array[]' variable */ if (net_cat_len > (CIPSO_V4_OPT_LEN_MAX - CIPSO_V4_HDR_LEN - CIPSO_V4_TAG_RNG_BLEN)) return -ENOSPC; for (;;) { iter = netlbl_secattr_catmap_walk(secattr->attr.mls.cat, iter + 1); if (iter < 0) break; cat_size += (iter == 0 ? 0 : sizeof(u16)); if (cat_size > net_cat_len) return -ENOSPC; array[array_cnt++] = iter; iter = netlbl_secattr_catmap_walk_rng(secattr->attr.mls.cat, iter); if (iter < 0) return -EFAULT; cat_size += sizeof(u16); if (cat_size > net_cat_len) return -ENOSPC; array[array_cnt++] = iter; } for (iter = 0; array_cnt > 0;) { *((__be16 *)&net_cat[iter]) = htons(array[--array_cnt]); iter += 2; array_cnt--; if (array[array_cnt] != 0) { *((__be16 *)&net_cat[iter]) = htons(array[array_cnt]); iter += 2; } } return cat_size; } /** * cipso_v4_map_cat_rng_ntoh - Perform a category mapping from network to host * @doi_def: the DOI definition * @net_cat: the category list in network/CIPSO format * @net_cat_len: the length of the CIPSO bitmap in bytes * @secattr: the security attributes * * Description: * Perform a label mapping to translate a CIPSO category list to the correct * local MLS category bitmap using the given DOI definition. Returns zero on * success, negative values on failure. * */ static int cipso_v4_map_cat_rng_ntoh(const struct cipso_v4_doi *doi_def, const unsigned char *net_cat, u32 net_cat_len, struct netlbl_lsm_secattr *secattr) { int ret_val; u32 net_iter; u16 cat_low; u16 cat_high; for (net_iter = 0; net_iter < net_cat_len; net_iter += 4) { cat_high = get_unaligned_be16(&net_cat[net_iter]); if ((net_iter + 4) <= net_cat_len) cat_low = get_unaligned_be16(&net_cat[net_iter + 2]); else cat_low = 0; ret_val = netlbl_secattr_catmap_setrng(secattr->attr.mls.cat, cat_low, cat_high, GFP_ATOMIC); if (ret_val != 0) return ret_val; } return 0; } /* * Protocol Handling Functions */ /** * cipso_v4_gentag_hdr - Generate a CIPSO option header * @doi_def: the DOI definition * @len: the total tag length in bytes, not including this header * @buf: the CIPSO option buffer * * Description: * Write a CIPSO header into the beginning of @buffer. * */ static void cipso_v4_gentag_hdr(const struct cipso_v4_doi *doi_def, unsigned char *buf, u32 len) { buf[0] = IPOPT_CIPSO; buf[1] = CIPSO_V4_HDR_LEN + len; *(__be32 *)&buf[2] = htonl(doi_def->doi); } /** * cipso_v4_gentag_rbm - Generate a CIPSO restricted bitmap tag (type #1) * @doi_def: the DOI definition * @secattr: the security attributes * @buffer: the option buffer * @buffer_len: length of buffer in bytes * * Description: * Generate a CIPSO option using the restricted bitmap tag, tag type #1. The * actual buffer length may be larger than the indicated size due to * translation between host and network category bitmaps. Returns the size of * the tag on success, negative values on failure. * */ static int cipso_v4_gentag_rbm(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *buffer, u32 buffer_len) { int ret_val; u32 tag_len; u32 level; if ((secattr->flags & NETLBL_SECATTR_MLS_LVL) == 0) return -EPERM; ret_val = cipso_v4_map_lvl_hton(doi_def, secattr->attr.mls.lvl, &level); if (ret_val != 0) return ret_val; if (secattr->flags & NETLBL_SECATTR_MLS_CAT) { ret_val = cipso_v4_map_cat_rbm_hton(doi_def, secattr, &buffer[4], buffer_len - 4); if (ret_val < 0) return ret_val; /* This will send packets using the "optimized" format when * possible as specified in section 3.4.2.6 of the * CIPSO draft. */ if (cipso_v4_rbm_optfmt && ret_val > 0 && ret_val <= 10) tag_len = 14; else tag_len = 4 + ret_val; } else tag_len = 4; buffer[0] = CIPSO_V4_TAG_RBITMAP; buffer[1] = tag_len; buffer[3] = level; return tag_len; } /** * cipso_v4_parsetag_rbm - Parse a CIPSO restricted bitmap tag * @doi_def: the DOI definition * @tag: the CIPSO tag * @secattr: the security attributes * * Description: * Parse a CIPSO restricted bitmap tag (tag type #1) and return the security * attributes in @secattr. Return zero on success, negatives values on * failure. * */ static int cipso_v4_parsetag_rbm(const struct cipso_v4_doi *doi_def, const unsigned char *tag, struct netlbl_lsm_secattr *secattr) { int ret_val; u8 tag_len = tag[1]; u32 level; ret_val = cipso_v4_map_lvl_ntoh(doi_def, tag[3], &level); if (ret_val != 0) return ret_val; secattr->attr.mls.lvl = level; secattr->flags |= NETLBL_SECATTR_MLS_LVL; if (tag_len > 4) { secattr->attr.mls.cat = netlbl_secattr_catmap_alloc(GFP_ATOMIC); if (secattr->attr.mls.cat == NULL) return -ENOMEM; ret_val = cipso_v4_map_cat_rbm_ntoh(doi_def, &tag[4], tag_len - 4, secattr); if (ret_val != 0) { netlbl_secattr_catmap_free(secattr->attr.mls.cat); return ret_val; } secattr->flags |= NETLBL_SECATTR_MLS_CAT; } return 0; } /** * cipso_v4_gentag_enum - Generate a CIPSO enumerated tag (type #2) * @doi_def: the DOI definition * @secattr: the security attributes * @buffer: the option buffer * @buffer_len: length of buffer in bytes * * Description: * Generate a CIPSO option using the enumerated tag, tag type #2. Returns the * size of the tag on success, negative values on failure. * */ static int cipso_v4_gentag_enum(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *buffer, u32 buffer_len) { int ret_val; u32 tag_len; u32 level; if (!(secattr->flags & NETLBL_SECATTR_MLS_LVL)) return -EPERM; ret_val = cipso_v4_map_lvl_hton(doi_def, secattr->attr.mls.lvl, &level); if (ret_val != 0) return ret_val; if (secattr->flags & NETLBL_SECATTR_MLS_CAT) { ret_val = cipso_v4_map_cat_enum_hton(doi_def, secattr, &buffer[4], buffer_len - 4); if (ret_val < 0) return ret_val; tag_len = 4 + ret_val; } else tag_len = 4; buffer[0] = CIPSO_V4_TAG_ENUM; buffer[1] = tag_len; buffer[3] = level; return tag_len; } /** * cipso_v4_parsetag_enum - Parse a CIPSO enumerated tag * @doi_def: the DOI definition * @tag: the CIPSO tag * @secattr: the security attributes * * Description: * Parse a CIPSO enumerated tag (tag type #2) and return the security * attributes in @secattr. Return zero on success, negatives values on * failure. * */ static int cipso_v4_parsetag_enum(const struct cipso_v4_doi *doi_def, const unsigned char *tag, struct netlbl_lsm_secattr *secattr) { int ret_val; u8 tag_len = tag[1]; u32 level; ret_val = cipso_v4_map_lvl_ntoh(doi_def, tag[3], &level); if (ret_val != 0) return ret_val; secattr->attr.mls.lvl = level; secattr->flags |= NETLBL_SECATTR_MLS_LVL; if (tag_len > 4) { secattr->attr.mls.cat = netlbl_secattr_catmap_alloc(GFP_ATOMIC); if (secattr->attr.mls.cat == NULL) return -ENOMEM; ret_val = cipso_v4_map_cat_enum_ntoh(doi_def, &tag[4], tag_len - 4, secattr); if (ret_val != 0) { netlbl_secattr_catmap_free(secattr->attr.mls.cat); return ret_val; } secattr->flags |= NETLBL_SECATTR_MLS_CAT; } return 0; } /** * cipso_v4_gentag_rng - Generate a CIPSO ranged tag (type #5) * @doi_def: the DOI definition * @secattr: the security attributes * @buffer: the option buffer * @buffer_len: length of buffer in bytes * * Description: * Generate a CIPSO option using the ranged tag, tag type #5. Returns the * size of the tag on success, negative values on failure. * */ static int cipso_v4_gentag_rng(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *buffer, u32 buffer_len) { int ret_val; u32 tag_len; u32 level; if (!(secattr->flags & NETLBL_SECATTR_MLS_LVL)) return -EPERM; ret_val = cipso_v4_map_lvl_hton(doi_def, secattr->attr.mls.lvl, &level); if (ret_val != 0) return ret_val; if (secattr->flags & NETLBL_SECATTR_MLS_CAT) { ret_val = cipso_v4_map_cat_rng_hton(doi_def, secattr, &buffer[4], buffer_len - 4); if (ret_val < 0) return ret_val; tag_len = 4 + ret_val; } else tag_len = 4; buffer[0] = CIPSO_V4_TAG_RANGE; buffer[1] = tag_len; buffer[3] = level; return tag_len; } /** * cipso_v4_parsetag_rng - Parse a CIPSO ranged tag * @doi_def: the DOI definition * @tag: the CIPSO tag * @secattr: the security attributes * * Description: * Parse a CIPSO ranged tag (tag type #5) and return the security attributes * in @secattr. Return zero on success, negatives values on failure. * */ static int cipso_v4_parsetag_rng(const struct cipso_v4_doi *doi_def, const unsigned char *tag, struct netlbl_lsm_secattr *secattr) { int ret_val; u8 tag_len = tag[1]; u32 level; ret_val = cipso_v4_map_lvl_ntoh(doi_def, tag[3], &level); if (ret_val != 0) return ret_val; secattr->attr.mls.lvl = level; secattr->flags |= NETLBL_SECATTR_MLS_LVL; if (tag_len > 4) { secattr->attr.mls.cat = netlbl_secattr_catmap_alloc(GFP_ATOMIC); if (secattr->attr.mls.cat == NULL) return -ENOMEM; ret_val = cipso_v4_map_cat_rng_ntoh(doi_def, &tag[4], tag_len - 4, secattr); if (ret_val != 0) { netlbl_secattr_catmap_free(secattr->attr.mls.cat); return ret_val; } secattr->flags |= NETLBL_SECATTR_MLS_CAT; } return 0; } /** * cipso_v4_gentag_loc - Generate a CIPSO local tag (non-standard) * @doi_def: the DOI definition * @secattr: the security attributes * @buffer: the option buffer * @buffer_len: length of buffer in bytes * * Description: * Generate a CIPSO option using the local tag. Returns the size of the tag * on success, negative values on failure. * */ static int cipso_v4_gentag_loc(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *buffer, u32 buffer_len) { if (!(secattr->flags & NETLBL_SECATTR_SECID)) return -EPERM; buffer[0] = CIPSO_V4_TAG_LOCAL; buffer[1] = CIPSO_V4_TAG_LOC_BLEN; *(u32 *)&buffer[2] = secattr->attr.secid; return CIPSO_V4_TAG_LOC_BLEN; } /** * cipso_v4_parsetag_loc - Parse a CIPSO local tag * @doi_def: the DOI definition * @tag: the CIPSO tag * @secattr: the security attributes * * Description: * Parse a CIPSO local tag and return the security attributes in @secattr. * Return zero on success, negatives values on failure. * */ static int cipso_v4_parsetag_loc(const struct cipso_v4_doi *doi_def, const unsigned char *tag, struct netlbl_lsm_secattr *secattr) { secattr->attr.secid = *(u32 *)&tag[2]; secattr->flags |= NETLBL_SECATTR_SECID; return 0; } /** * cipso_v4_validate - Validate a CIPSO option * @option: the start of the option, on error it is set to point to the error * * Description: * This routine is called to validate a CIPSO option, it checks all of the * fields to ensure that they are at least valid, see the draft snippet below * for details. If the option is valid then a zero value is returned and * the value of @option is unchanged. If the option is invalid then a * non-zero value is returned and @option is adjusted to point to the * offending portion of the option. From the IETF draft ... * * "If any field within the CIPSO options, such as the DOI identifier, is not * recognized the IP datagram is discarded and an ICMP 'parameter problem' * (type 12) is generated and returned. The ICMP code field is set to 'bad * parameter' (code 0) and the pointer is set to the start of the CIPSO field * that is unrecognized." * */ int cipso_v4_validate(const struct sk_buff *skb, unsigned char **option) { unsigned char *opt = *option; unsigned char *tag; unsigned char opt_iter; unsigned char err_offset = 0; u8 opt_len; u8 tag_len; struct cipso_v4_doi *doi_def = NULL; u32 tag_iter; /* caller already checks for length values that are too large */ opt_len = opt[1]; if (opt_len < 8) { err_offset = 1; goto validate_return; } rcu_read_lock(); doi_def = cipso_v4_doi_search(get_unaligned_be32(&opt[2])); if (doi_def == NULL) { err_offset = 2; goto validate_return_locked; } opt_iter = CIPSO_V4_HDR_LEN; tag = opt + opt_iter; while (opt_iter < opt_len) { for (tag_iter = 0; doi_def->tags[tag_iter] != tag[0];) if (doi_def->tags[tag_iter] == CIPSO_V4_TAG_INVALID || ++tag_iter == CIPSO_V4_TAG_MAXCNT) { err_offset = opt_iter; goto validate_return_locked; } tag_len = tag[1]; if (tag_len > (opt_len - opt_iter)) { err_offset = opt_iter + 1; goto validate_return_locked; } switch (tag[0]) { case CIPSO_V4_TAG_RBITMAP: if (tag_len < CIPSO_V4_TAG_RBM_BLEN) { err_offset = opt_iter + 1; goto validate_return_locked; } /* We are already going to do all the verification * necessary at the socket layer so from our point of * view it is safe to turn these checks off (and less * work), however, the CIPSO draft says we should do * all the CIPSO validations here but it doesn't * really specify _exactly_ what we need to validate * ... so, just make it a sysctl tunable. */ if (cipso_v4_rbm_strictvalid) { if (cipso_v4_map_lvl_valid(doi_def, tag[3]) < 0) { err_offset = opt_iter + 3; goto validate_return_locked; } if (tag_len > CIPSO_V4_TAG_RBM_BLEN && cipso_v4_map_cat_rbm_valid(doi_def, &tag[4], tag_len - 4) < 0) { err_offset = opt_iter + 4; goto validate_return_locked; } } break; case CIPSO_V4_TAG_ENUM: if (tag_len < CIPSO_V4_TAG_ENUM_BLEN) { err_offset = opt_iter + 1; goto validate_return_locked; } if (cipso_v4_map_lvl_valid(doi_def, tag[3]) < 0) { err_offset = opt_iter + 3; goto validate_return_locked; } if (tag_len > CIPSO_V4_TAG_ENUM_BLEN && cipso_v4_map_cat_enum_valid(doi_def, &tag[4], tag_len - 4) < 0) { err_offset = opt_iter + 4; goto validate_return_locked; } break; case CIPSO_V4_TAG_RANGE: if (tag_len < CIPSO_V4_TAG_RNG_BLEN) { err_offset = opt_iter + 1; goto validate_return_locked; } if (cipso_v4_map_lvl_valid(doi_def, tag[3]) < 0) { err_offset = opt_iter + 3; goto validate_return_locked; } if (tag_len > CIPSO_V4_TAG_RNG_BLEN && cipso_v4_map_cat_rng_valid(doi_def, &tag[4], tag_len - 4) < 0) { err_offset = opt_iter + 4; goto validate_return_locked; } break; case CIPSO_V4_TAG_LOCAL: /* This is a non-standard tag that we only allow for * local connections, so if the incoming interface is * not the loopback device drop the packet. */ if (!(skb->dev->flags & IFF_LOOPBACK)) { err_offset = opt_iter; goto validate_return_locked; } if (tag_len != CIPSO_V4_TAG_LOC_BLEN) { err_offset = opt_iter + 1; goto validate_return_locked; } break; default: err_offset = opt_iter; goto validate_return_locked; } tag += tag_len; opt_iter += tag_len; } validate_return_locked: rcu_read_unlock(); validate_return: *option = opt + err_offset; return err_offset; } /** * cipso_v4_error - Send the correct response for a bad packet * @skb: the packet * @error: the error code * @gateway: CIPSO gateway flag * * Description: * Based on the error code given in @error, send an ICMP error message back to * the originating host. From the IETF draft ... * * "If the contents of the CIPSO [option] are valid but the security label is * outside of the configured host or port label range, the datagram is * discarded and an ICMP 'destination unreachable' (type 3) is generated and * returned. The code field of the ICMP is set to 'communication with * destination network administratively prohibited' (code 9) or to * 'communication with destination host administratively prohibited' * (code 10). The value of the code is dependent on whether the originator * of the ICMP message is acting as a CIPSO host or a CIPSO gateway. The * recipient of the ICMP message MUST be able to handle either value. The * same procedure is performed if a CIPSO [option] can not be added to an * IP packet because it is too large to fit in the IP options area." * * "If the error is triggered by receipt of an ICMP message, the message is * discarded and no response is permitted (consistent with general ICMP * processing rules)." * */ void cipso_v4_error(struct sk_buff *skb, int error, u32 gateway) { if (ip_hdr(skb)->protocol == IPPROTO_ICMP || error != -EACCES) return; if (gateway) icmp_send(skb, ICMP_DEST_UNREACH, ICMP_NET_ANO, 0); else icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_ANO, 0); } /** * cipso_v4_genopt - Generate a CIPSO option * @buf: the option buffer * @buf_len: the size of opt_buf * @doi_def: the CIPSO DOI to use * @secattr: the security attributes * * Description: * Generate a CIPSO option using the DOI definition and security attributes * passed to the function. Returns the length of the option on success and * negative values on failure. * */ static int cipso_v4_genopt(unsigned char *buf, u32 buf_len, const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr) { int ret_val; u32 iter; if (buf_len <= CIPSO_V4_HDR_LEN) return -ENOSPC; /* XXX - This code assumes only one tag per CIPSO option which isn't * really a good assumption to make but since we only support the MAC * tags right now it is a safe assumption. */ iter = 0; do { memset(buf, 0, buf_len); switch (doi_def->tags[iter]) { case CIPSO_V4_TAG_RBITMAP: ret_val = cipso_v4_gentag_rbm(doi_def, secattr, &buf[CIPSO_V4_HDR_LEN], buf_len - CIPSO_V4_HDR_LEN); break; case CIPSO_V4_TAG_ENUM: ret_val = cipso_v4_gentag_enum(doi_def, secattr, &buf[CIPSO_V4_HDR_LEN], buf_len - CIPSO_V4_HDR_LEN); break; case CIPSO_V4_TAG_RANGE: ret_val = cipso_v4_gentag_rng(doi_def, secattr, &buf[CIPSO_V4_HDR_LEN], buf_len - CIPSO_V4_HDR_LEN); break; case CIPSO_V4_TAG_LOCAL: ret_val = cipso_v4_gentag_loc(doi_def, secattr, &buf[CIPSO_V4_HDR_LEN], buf_len - CIPSO_V4_HDR_LEN); break; default: return -EPERM; } iter++; } while (ret_val < 0 && iter < CIPSO_V4_TAG_MAXCNT && doi_def->tags[iter] != CIPSO_V4_TAG_INVALID); if (ret_val < 0) return ret_val; cipso_v4_gentag_hdr(doi_def, buf, ret_val); return CIPSO_V4_HDR_LEN + ret_val; } /** * cipso_v4_sock_setattr - Add a CIPSO option to a socket * @sk: the socket * @doi_def: the CIPSO DOI to use * @secattr: the specific security attributes of the socket * * Description: * Set the CIPSO option on the given socket using the DOI definition and * security attributes passed to the function. This function requires * exclusive access to @sk, which means it either needs to be in the * process of being created or locked. Returns zero on success and negative * values on failure. * */ int cipso_v4_sock_setattr(struct sock *sk, const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr) { int ret_val = -EPERM; unsigned char *buf = NULL; u32 buf_len; u32 opt_len; struct ip_options *opt = NULL; struct inet_sock *sk_inet; struct inet_connection_sock *sk_conn; /* In the case of sock_create_lite(), the sock->sk field is not * defined yet but it is not a problem as the only users of these * "lite" PF_INET sockets are functions which do an accept() call * afterwards so we will label the socket as part of the accept(). */ if (sk == NULL) return 0; /* We allocate the maximum CIPSO option size here so we are probably * being a little wasteful, but it makes our life _much_ easier later * on and after all we are only talking about 40 bytes. */ buf_len = CIPSO_V4_OPT_LEN_MAX; buf = kmalloc(buf_len, GFP_ATOMIC); if (buf == NULL) { ret_val = -ENOMEM; goto socket_setattr_failure; } ret_val = cipso_v4_genopt(buf, buf_len, doi_def, secattr); if (ret_val < 0) goto socket_setattr_failure; buf_len = ret_val; /* We can't use ip_options_get() directly because it makes a call to * ip_options_get_alloc() which allocates memory with GFP_KERNEL and * we won't always have CAP_NET_RAW even though we _always_ want to * set the IPOPT_CIPSO option. */ opt_len = (buf_len + 3) & ~3; opt = kzalloc(sizeof(*opt) + opt_len, GFP_ATOMIC); if (opt == NULL) { ret_val = -ENOMEM; goto socket_setattr_failure; } memcpy(opt->__data, buf, buf_len); opt->optlen = opt_len; opt->cipso = sizeof(struct iphdr); kfree(buf); buf = NULL; sk_inet = inet_sk(sk); if (sk_inet->is_icsk) { sk_conn = inet_csk(sk); if (sk_inet->opt) sk_conn->icsk_ext_hdr_len -= sk_inet->opt->optlen; sk_conn->icsk_ext_hdr_len += opt->optlen; sk_conn->icsk_sync_mss(sk, sk_conn->icsk_pmtu_cookie); } opt = xchg(&sk_inet->opt, opt); kfree(opt); return 0; socket_setattr_failure: kfree(buf); kfree(opt); return ret_val; } /** * cipso_v4_req_setattr - Add a CIPSO option to a connection request socket * @req: the connection request socket * @doi_def: the CIPSO DOI to use * @secattr: the specific security attributes of the socket * * Description: * Set the CIPSO option on the given socket using the DOI definition and * security attributes passed to the function. Returns zero on success and * negative values on failure. * */ int cipso_v4_req_setattr(struct request_sock *req, const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr) { int ret_val = -EPERM; unsigned char *buf = NULL; u32 buf_len; u32 opt_len; struct ip_options *opt = NULL; struct inet_request_sock *req_inet; /* We allocate the maximum CIPSO option size here so we are probably * being a little wasteful, but it makes our life _much_ easier later * on and after all we are only talking about 40 bytes. */ buf_len = CIPSO_V4_OPT_LEN_MAX; buf = kmalloc(buf_len, GFP_ATOMIC); if (buf == NULL) { ret_val = -ENOMEM; goto req_setattr_failure; } ret_val = cipso_v4_genopt(buf, buf_len, doi_def, secattr); if (ret_val < 0) goto req_setattr_failure; buf_len = ret_val; /* We can't use ip_options_get() directly because it makes a call to * ip_options_get_alloc() which allocates memory with GFP_KERNEL and * we won't always have CAP_NET_RAW even though we _always_ want to * set the IPOPT_CIPSO option. */ opt_len = (buf_len + 3) & ~3; opt = kzalloc(sizeof(*opt) + opt_len, GFP_ATOMIC); if (opt == NULL) { ret_val = -ENOMEM; goto req_setattr_failure; } memcpy(opt->__data, buf, buf_len); opt->optlen = opt_len; opt->cipso = sizeof(struct iphdr); kfree(buf); buf = NULL; req_inet = inet_rsk(req); opt = xchg(&req_inet->opt, opt); kfree(opt); return 0; req_setattr_failure: kfree(buf); kfree(opt); return ret_val; } /** * cipso_v4_delopt - Delete the CIPSO option from a set of IP options * @opt_ptr: IP option pointer * * Description: * Deletes the CIPSO IP option from a set of IP options and makes the necessary * adjustments to the IP option structure. Returns zero on success, negative * values on failure. * */ static int cipso_v4_delopt(struct ip_options **opt_ptr) { int hdr_delta = 0; struct ip_options *opt = *opt_ptr; if (opt->srr || opt->rr || opt->ts || opt->router_alert) { u8 cipso_len; u8 cipso_off; unsigned char *cipso_ptr; int iter; int optlen_new; cipso_off = opt->cipso - sizeof(struct iphdr); cipso_ptr = &opt->__data[cipso_off]; cipso_len = cipso_ptr[1]; if (opt->srr > opt->cipso) opt->srr -= cipso_len; if (opt->rr > opt->cipso) opt->rr -= cipso_len; if (opt->ts > opt->cipso) opt->ts -= cipso_len; if (opt->router_alert > opt->cipso) opt->router_alert -= cipso_len; opt->cipso = 0; memmove(cipso_ptr, cipso_ptr + cipso_len, opt->optlen - cipso_off - cipso_len); /* determining the new total option length is tricky because of * the padding necessary, the only thing i can think to do at * this point is walk the options one-by-one, skipping the * padding at the end to determine the actual option size and * from there we can determine the new total option length */ iter = 0; optlen_new = 0; while (iter < opt->optlen) if (opt->__data[iter] != IPOPT_NOP) { iter += opt->__data[iter + 1]; optlen_new = iter; } else iter++; hdr_delta = opt->optlen; opt->optlen = (optlen_new + 3) & ~3; hdr_delta -= opt->optlen; } else { /* only the cipso option was present on the socket so we can * remove the entire option struct */ *opt_ptr = NULL; hdr_delta = opt->optlen; kfree(opt); } return hdr_delta; } /** * cipso_v4_sock_delattr - Delete the CIPSO option from a socket * @sk: the socket * * Description: * Removes the CIPSO option from a socket, if present. * */ void cipso_v4_sock_delattr(struct sock *sk) { int hdr_delta; struct ip_options *opt; struct inet_sock *sk_inet; sk_inet = inet_sk(sk); opt = sk_inet->opt; if (opt == NULL || opt->cipso == 0) return; hdr_delta = cipso_v4_delopt(&sk_inet->opt); if (sk_inet->is_icsk && hdr_delta > 0) { struct inet_connection_sock *sk_conn = inet_csk(sk); sk_conn->icsk_ext_hdr_len -= hdr_delta; sk_conn->icsk_sync_mss(sk, sk_conn->icsk_pmtu_cookie); } } /** * cipso_v4_req_delattr - Delete the CIPSO option from a request socket * @reg: the request socket * * Description: * Removes the CIPSO option from a request socket, if present. * */ void cipso_v4_req_delattr(struct request_sock *req) { struct ip_options *opt; struct inet_request_sock *req_inet; req_inet = inet_rsk(req); opt = req_inet->opt; if (opt == NULL || opt->cipso == 0) return; cipso_v4_delopt(&req_inet->opt); } /** * cipso_v4_getattr - Helper function for the cipso_v4_*_getattr functions * @cipso: the CIPSO v4 option * @secattr: the security attributes * * Description: * Inspect @cipso and return the security attributes in @secattr. Returns zero * on success and negative values on failure. * */ static int cipso_v4_getattr(const unsigned char *cipso, struct netlbl_lsm_secattr *secattr) { int ret_val = -ENOMSG; u32 doi; struct cipso_v4_doi *doi_def; if (cipso_v4_cache_check(cipso, cipso[1], secattr) == 0) return 0; doi = get_unaligned_be32(&cipso[2]); rcu_read_lock(); doi_def = cipso_v4_doi_search(doi); if (doi_def == NULL) goto getattr_return; /* XXX - This code assumes only one tag per CIPSO option which isn't * really a good assumption to make but since we only support the MAC * tags right now it is a safe assumption. */ switch (cipso[6]) { case CIPSO_V4_TAG_RBITMAP: ret_val = cipso_v4_parsetag_rbm(doi_def, &cipso[6], secattr); break; case CIPSO_V4_TAG_ENUM: ret_val = cipso_v4_parsetag_enum(doi_def, &cipso[6], secattr); break; case CIPSO_V4_TAG_RANGE: ret_val = cipso_v4_parsetag_rng(doi_def, &cipso[6], secattr); break; case CIPSO_V4_TAG_LOCAL: ret_val = cipso_v4_parsetag_loc(doi_def, &cipso[6], secattr); break; } if (ret_val == 0) secattr->type = NETLBL_NLTYPE_CIPSOV4; getattr_return: rcu_read_unlock(); return ret_val; } /** * cipso_v4_sock_getattr - Get the security attributes from a sock * @sk: the sock * @secattr: the security attributes * * Description: * Query @sk to see if there is a CIPSO option attached to the sock and if * there is return the CIPSO security attributes in @secattr. This function * requires that @sk be locked, or privately held, but it does not do any * locking itself. Returns zero on success and negative values on failure. * */ int cipso_v4_sock_getattr(struct sock *sk, struct netlbl_lsm_secattr *secattr) { struct ip_options *opt; opt = inet_sk(sk)->opt; if (opt == NULL || opt->cipso == 0) return -ENOMSG; return cipso_v4_getattr(opt->__data + opt->cipso - sizeof(struct iphdr), secattr); } /** * cipso_v4_skbuff_setattr - Set the CIPSO option on a packet * @skb: the packet * @secattr: the security attributes * * Description: * Set the CIPSO option on the given packet based on the security attributes. * Returns a pointer to the IP header on success and NULL on failure. * */ int cipso_v4_skbuff_setattr(struct sk_buff *skb, const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr) { int ret_val; struct iphdr *iph; struct ip_options *opt = &IPCB(skb)->opt; unsigned char buf[CIPSO_V4_OPT_LEN_MAX]; u32 buf_len = CIPSO_V4_OPT_LEN_MAX; u32 opt_len; int len_delta; ret_val = cipso_v4_genopt(buf, buf_len, doi_def, secattr); if (ret_val < 0) return ret_val; buf_len = ret_val; opt_len = (buf_len + 3) & ~3; /* we overwrite any existing options to ensure that we have enough * room for the CIPSO option, the reason is that we _need_ to guarantee * that the security label is applied to the packet - we do the same * thing when using the socket options and it hasn't caused a problem, * if we need to we can always revisit this choice later */ len_delta = opt_len - opt->optlen; /* if we don't ensure enough headroom we could panic on the skb_push() * call below so make sure we have enough, we are also "mangling" the * packet so we should probably do a copy-on-write call anyway */ ret_val = skb_cow(skb, skb_headroom(skb) + len_delta); if (ret_val < 0) return ret_val; if (len_delta > 0) { /* we assume that the header + opt->optlen have already been * "pushed" in ip_options_build() or similar */ iph = ip_hdr(skb); skb_push(skb, len_delta); memmove((char *)iph - len_delta, iph, iph->ihl << 2); skb_reset_network_header(skb); iph = ip_hdr(skb); } else if (len_delta < 0) { iph = ip_hdr(skb); memset(iph + 1, IPOPT_NOP, opt->optlen); } else iph = ip_hdr(skb); if (opt->optlen > 0) memset(opt, 0, sizeof(*opt)); opt->optlen = opt_len; opt->cipso = sizeof(struct iphdr); opt->is_changed = 1; /* we have to do the following because we are being called from a * netfilter hook which means the packet already has had the header * fields populated and the checksum calculated - yes this means we * are doing more work than needed but we do it to keep the core * stack clean and tidy */ memcpy(iph + 1, buf, buf_len); if (opt_len > buf_len) memset((char *)(iph + 1) + buf_len, 0, opt_len - buf_len); if (len_delta != 0) { iph->ihl = 5 + (opt_len >> 2); iph->tot_len = htons(skb->len); } ip_send_check(iph); return 0; } /** * cipso_v4_skbuff_delattr - Delete any CIPSO options from a packet * @skb: the packet * * Description: * Removes any and all CIPSO options from the given packet. Returns zero on * success, negative values on failure. * */ int cipso_v4_skbuff_delattr(struct sk_buff *skb) { int ret_val; struct iphdr *iph; struct ip_options *opt = &IPCB(skb)->opt; unsigned char *cipso_ptr; if (opt->cipso == 0) return 0; /* since we are changing the packet we should make a copy */ ret_val = skb_cow(skb, skb_headroom(skb)); if (ret_val < 0) return ret_val; /* the easiest thing to do is just replace the cipso option with noop * options since we don't change the size of the packet, although we * still need to recalculate the checksum */ iph = ip_hdr(skb); cipso_ptr = (unsigned char *)iph + opt->cipso; memset(cipso_ptr, IPOPT_NOOP, cipso_ptr[1]); opt->cipso = 0; opt->is_changed = 1; ip_send_check(iph); return 0; } /** * cipso_v4_skbuff_getattr - Get the security attributes from the CIPSO option * @skb: the packet * @secattr: the security attributes * * Description: * Parse the given packet's CIPSO option and return the security attributes. * Returns zero on success and negative values on failure. * */ int cipso_v4_skbuff_getattr(const struct sk_buff *skb, struct netlbl_lsm_secattr *secattr) { return cipso_v4_getattr(CIPSO_V4_OPTPTR(skb), secattr); } /* * Setup Functions */ /** * cipso_v4_init - Initialize the CIPSO module * * Description: * Initialize the CIPSO module and prepare it for use. Returns zero on success * and negative values on failure. * */ static int __init cipso_v4_init(void) { int ret_val; ret_val = cipso_v4_cache_init(); if (ret_val != 0) panic("Failed to initialize the CIPSO/IPv4 cache (%d)\n", ret_val); return 0; } subsys_initcall(cipso_v4_init);
/* * CIPSO - Commercial IP Security Option * * This is an implementation of the CIPSO 2.2 protocol as specified in * draft-ietf-cipso-ipsecurity-01.txt with additional tag types as found in * FIPS-188. While CIPSO never became a full IETF RFC standard many vendors * have chosen to adopt the protocol and over the years it has become a * de-facto standard for labeled networking. * * The CIPSO draft specification can be found in the kernel's Documentation * directory as well as the following URL: * http://tools.ietf.org/id/draft-ietf-cipso-ipsecurity-01.txt * The FIPS-188 specification can be found at the following URL: * http://www.itl.nist.gov/fipspubs/fip188.htm * * Author: Paul Moore <paul.moore@hp.com> * */ /* * (c) Copyright Hewlett-Packard Development Company, L.P., 2006, 2008 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/init.h> #include <linux/types.h> #include <linux/rcupdate.h> #include <linux/list.h> #include <linux/spinlock.h> #include <linux/string.h> #include <linux/jhash.h> #include <linux/audit.h> #include <linux/slab.h> #include <net/ip.h> #include <net/icmp.h> #include <net/tcp.h> #include <net/netlabel.h> #include <net/cipso_ipv4.h> #include <asm/atomic.h> #include <asm/bug.h> #include <asm/unaligned.h> /* List of available DOI definitions */ /* XXX - This currently assumes a minimal number of different DOIs in use, * if in practice there are a lot of different DOIs this list should * probably be turned into a hash table or something similar so we * can do quick lookups. */ static DEFINE_SPINLOCK(cipso_v4_doi_list_lock); static LIST_HEAD(cipso_v4_doi_list); /* Label mapping cache */ int cipso_v4_cache_enabled = 1; int cipso_v4_cache_bucketsize = 10; #define CIPSO_V4_CACHE_BUCKETBITS 7 #define CIPSO_V4_CACHE_BUCKETS (1 << CIPSO_V4_CACHE_BUCKETBITS) #define CIPSO_V4_CACHE_REORDERLIMIT 10 struct cipso_v4_map_cache_bkt { spinlock_t lock; u32 size; struct list_head list; }; struct cipso_v4_map_cache_entry { u32 hash; unsigned char *key; size_t key_len; struct netlbl_lsm_cache *lsm_data; u32 activity; struct list_head list; }; static struct cipso_v4_map_cache_bkt *cipso_v4_cache = NULL; /* Restricted bitmap (tag #1) flags */ int cipso_v4_rbm_optfmt = 0; int cipso_v4_rbm_strictvalid = 1; /* * Protocol Constants */ /* Maximum size of the CIPSO IP option, derived from the fact that the maximum * IPv4 header size is 60 bytes and the base IPv4 header is 20 bytes long. */ #define CIPSO_V4_OPT_LEN_MAX 40 /* Length of the base CIPSO option, this includes the option type (1 byte), the * option length (1 byte), and the DOI (4 bytes). */ #define CIPSO_V4_HDR_LEN 6 /* Base length of the restrictive category bitmap tag (tag #1). */ #define CIPSO_V4_TAG_RBM_BLEN 4 /* Base length of the enumerated category tag (tag #2). */ #define CIPSO_V4_TAG_ENUM_BLEN 4 /* Base length of the ranged categories bitmap tag (tag #5). */ #define CIPSO_V4_TAG_RNG_BLEN 4 /* The maximum number of category ranges permitted in the ranged category tag * (tag #5). You may note that the IETF draft states that the maximum number * of category ranges is 7, but if the low end of the last category range is * zero then it is possible to fit 8 category ranges because the zero should * be omitted. */ #define CIPSO_V4_TAG_RNG_CAT_MAX 8 /* Base length of the local tag (non-standard tag). * Tag definition (may change between kernel versions) * * 0 8 16 24 32 * +----------+----------+----------+----------+ * | 10000000 | 00000110 | 32-bit secid value | * +----------+----------+----------+----------+ * | in (host byte order)| * +----------+----------+ * */ #define CIPSO_V4_TAG_LOC_BLEN 6 /* * Helper Functions */ /** * cipso_v4_bitmap_walk - Walk a bitmap looking for a bit * @bitmap: the bitmap * @bitmap_len: length in bits * @offset: starting offset * @state: if non-zero, look for a set (1) bit else look for a cleared (0) bit * * Description: * Starting at @offset, walk the bitmap from left to right until either the * desired bit is found or we reach the end. Return the bit offset, -1 if * not found, or -2 if error. */ static int cipso_v4_bitmap_walk(const unsigned char *bitmap, u32 bitmap_len, u32 offset, u8 state) { u32 bit_spot; u32 byte_offset; unsigned char bitmask; unsigned char byte; /* gcc always rounds to zero when doing integer division */ byte_offset = offset / 8; byte = bitmap[byte_offset]; bit_spot = offset; bitmask = 0x80 >> (offset % 8); while (bit_spot < bitmap_len) { if ((state && (byte & bitmask) == bitmask) || (state == 0 && (byte & bitmask) == 0)) return bit_spot; bit_spot++; bitmask >>= 1; if (bitmask == 0) { byte = bitmap[++byte_offset]; bitmask = 0x80; } } return -1; } /** * cipso_v4_bitmap_setbit - Sets a single bit in a bitmap * @bitmap: the bitmap * @bit: the bit * @state: if non-zero, set the bit (1) else clear the bit (0) * * Description: * Set a single bit in the bitmask. Returns zero on success, negative values * on error. */ static void cipso_v4_bitmap_setbit(unsigned char *bitmap, u32 bit, u8 state) { u32 byte_spot; u8 bitmask; /* gcc always rounds to zero when doing integer division */ byte_spot = bit / 8; bitmask = 0x80 >> (bit % 8); if (state) bitmap[byte_spot] |= bitmask; else bitmap[byte_spot] &= ~bitmask; } /** * cipso_v4_cache_entry_free - Frees a cache entry * @entry: the entry to free * * Description: * This function frees the memory associated with a cache entry including the * LSM cache data if there are no longer any users, i.e. reference count == 0. * */ static void cipso_v4_cache_entry_free(struct cipso_v4_map_cache_entry *entry) { if (entry->lsm_data) netlbl_secattr_cache_free(entry->lsm_data); kfree(entry->key); kfree(entry); } /** * cipso_v4_map_cache_hash - Hashing function for the CIPSO cache * @key: the hash key * @key_len: the length of the key in bytes * * Description: * The CIPSO tag hashing function. Returns a 32-bit hash value. * */ static u32 cipso_v4_map_cache_hash(const unsigned char *key, u32 key_len) { return jhash(key, key_len, 0); } /* * Label Mapping Cache Functions */ /** * cipso_v4_cache_init - Initialize the CIPSO cache * * Description: * Initializes the CIPSO label mapping cache, this function should be called * before any of the other functions defined in this file. Returns zero on * success, negative values on error. * */ static int cipso_v4_cache_init(void) { u32 iter; cipso_v4_cache = kcalloc(CIPSO_V4_CACHE_BUCKETS, sizeof(struct cipso_v4_map_cache_bkt), GFP_KERNEL); if (cipso_v4_cache == NULL) return -ENOMEM; for (iter = 0; iter < CIPSO_V4_CACHE_BUCKETS; iter++) { spin_lock_init(&cipso_v4_cache[iter].lock); cipso_v4_cache[iter].size = 0; INIT_LIST_HEAD(&cipso_v4_cache[iter].list); } return 0; } /** * cipso_v4_cache_invalidate - Invalidates the current CIPSO cache * * Description: * Invalidates and frees any entries in the CIPSO cache. Returns zero on * success and negative values on failure. * */ void cipso_v4_cache_invalidate(void) { struct cipso_v4_map_cache_entry *entry, *tmp_entry; u32 iter; for (iter = 0; iter < CIPSO_V4_CACHE_BUCKETS; iter++) { spin_lock_bh(&cipso_v4_cache[iter].lock); list_for_each_entry_safe(entry, tmp_entry, &cipso_v4_cache[iter].list, list) { list_del(&entry->list); cipso_v4_cache_entry_free(entry); } cipso_v4_cache[iter].size = 0; spin_unlock_bh(&cipso_v4_cache[iter].lock); } } /** * cipso_v4_cache_check - Check the CIPSO cache for a label mapping * @key: the buffer to check * @key_len: buffer length in bytes * @secattr: the security attribute struct to use * * Description: * This function checks the cache to see if a label mapping already exists for * the given key. If there is a match then the cache is adjusted and the * @secattr struct is populated with the correct LSM security attributes. The * cache is adjusted in the following manner if the entry is not already the * first in the cache bucket: * * 1. The cache entry's activity counter is incremented * 2. The previous (higher ranking) entry's activity counter is decremented * 3. If the difference between the two activity counters is geater than * CIPSO_V4_CACHE_REORDERLIMIT the two entries are swapped * * Returns zero on success, -ENOENT for a cache miss, and other negative values * on error. * */ static int cipso_v4_cache_check(const unsigned char *key, u32 key_len, struct netlbl_lsm_secattr *secattr) { u32 bkt; struct cipso_v4_map_cache_entry *entry; struct cipso_v4_map_cache_entry *prev_entry = NULL; u32 hash; if (!cipso_v4_cache_enabled) return -ENOENT; hash = cipso_v4_map_cache_hash(key, key_len); bkt = hash & (CIPSO_V4_CACHE_BUCKETS - 1); spin_lock_bh(&cipso_v4_cache[bkt].lock); list_for_each_entry(entry, &cipso_v4_cache[bkt].list, list) { if (entry->hash == hash && entry->key_len == key_len && memcmp(entry->key, key, key_len) == 0) { entry->activity += 1; atomic_inc(&entry->lsm_data->refcount); secattr->cache = entry->lsm_data; secattr->flags |= NETLBL_SECATTR_CACHE; secattr->type = NETLBL_NLTYPE_CIPSOV4; if (prev_entry == NULL) { spin_unlock_bh(&cipso_v4_cache[bkt].lock); return 0; } if (prev_entry->activity > 0) prev_entry->activity -= 1; if (entry->activity > prev_entry->activity && entry->activity - prev_entry->activity > CIPSO_V4_CACHE_REORDERLIMIT) { __list_del(entry->list.prev, entry->list.next); __list_add(&entry->list, prev_entry->list.prev, &prev_entry->list); } spin_unlock_bh(&cipso_v4_cache[bkt].lock); return 0; } prev_entry = entry; } spin_unlock_bh(&cipso_v4_cache[bkt].lock); return -ENOENT; } /** * cipso_v4_cache_add - Add an entry to the CIPSO cache * @skb: the packet * @secattr: the packet's security attributes * * Description: * Add a new entry into the CIPSO label mapping cache. Add the new entry to * head of the cache bucket's list, if the cache bucket is out of room remove * the last entry in the list first. It is important to note that there is * currently no checking for duplicate keys. Returns zero on success, * negative values on failure. * */ int cipso_v4_cache_add(const struct sk_buff *skb, const struct netlbl_lsm_secattr *secattr) { int ret_val = -EPERM; u32 bkt; struct cipso_v4_map_cache_entry *entry = NULL; struct cipso_v4_map_cache_entry *old_entry = NULL; unsigned char *cipso_ptr; u32 cipso_ptr_len; if (!cipso_v4_cache_enabled || cipso_v4_cache_bucketsize <= 0) return 0; cipso_ptr = CIPSO_V4_OPTPTR(skb); cipso_ptr_len = cipso_ptr[1]; entry = kzalloc(sizeof(*entry), GFP_ATOMIC); if (entry == NULL) return -ENOMEM; entry->key = kmemdup(cipso_ptr, cipso_ptr_len, GFP_ATOMIC); if (entry->key == NULL) { ret_val = -ENOMEM; goto cache_add_failure; } entry->key_len = cipso_ptr_len; entry->hash = cipso_v4_map_cache_hash(cipso_ptr, cipso_ptr_len); atomic_inc(&secattr->cache->refcount); entry->lsm_data = secattr->cache; bkt = entry->hash & (CIPSO_V4_CACHE_BUCKETS - 1); spin_lock_bh(&cipso_v4_cache[bkt].lock); if (cipso_v4_cache[bkt].size < cipso_v4_cache_bucketsize) { list_add(&entry->list, &cipso_v4_cache[bkt].list); cipso_v4_cache[bkt].size += 1; } else { old_entry = list_entry(cipso_v4_cache[bkt].list.prev, struct cipso_v4_map_cache_entry, list); list_del(&old_entry->list); list_add(&entry->list, &cipso_v4_cache[bkt].list); cipso_v4_cache_entry_free(old_entry); } spin_unlock_bh(&cipso_v4_cache[bkt].lock); return 0; cache_add_failure: if (entry) cipso_v4_cache_entry_free(entry); return ret_val; } /* * DOI List Functions */ /** * cipso_v4_doi_search - Searches for a DOI definition * @doi: the DOI to search for * * Description: * Search the DOI definition list for a DOI definition with a DOI value that * matches @doi. The caller is responsible for calling rcu_read_[un]lock(). * Returns a pointer to the DOI definition on success and NULL on failure. */ static struct cipso_v4_doi *cipso_v4_doi_search(u32 doi) { struct cipso_v4_doi *iter; list_for_each_entry_rcu(iter, &cipso_v4_doi_list, list) if (iter->doi == doi && atomic_read(&iter->refcount)) return iter; return NULL; } /** * cipso_v4_doi_add - Add a new DOI to the CIPSO protocol engine * @doi_def: the DOI structure * @audit_info: NetLabel audit information * * Description: * The caller defines a new DOI for use by the CIPSO engine and calls this * function to add it to the list of acceptable domains. The caller must * ensure that the mapping table specified in @doi_def->map meets all of the * requirements of the mapping type (see cipso_ipv4.h for details). Returns * zero on success and non-zero on failure. * */ int cipso_v4_doi_add(struct cipso_v4_doi *doi_def, struct netlbl_audit *audit_info) { int ret_val = -EINVAL; u32 iter; u32 doi; u32 doi_type; struct audit_buffer *audit_buf; doi = doi_def->doi; doi_type = doi_def->type; if (doi_def == NULL || doi_def->doi == CIPSO_V4_DOI_UNKNOWN) goto doi_add_return; for (iter = 0; iter < CIPSO_V4_TAG_MAXCNT; iter++) { switch (doi_def->tags[iter]) { case CIPSO_V4_TAG_RBITMAP: break; case CIPSO_V4_TAG_RANGE: case CIPSO_V4_TAG_ENUM: if (doi_def->type != CIPSO_V4_MAP_PASS) goto doi_add_return; break; case CIPSO_V4_TAG_LOCAL: if (doi_def->type != CIPSO_V4_MAP_LOCAL) goto doi_add_return; break; case CIPSO_V4_TAG_INVALID: if (iter == 0) goto doi_add_return; break; default: goto doi_add_return; } } atomic_set(&doi_def->refcount, 1); spin_lock(&cipso_v4_doi_list_lock); if (cipso_v4_doi_search(doi_def->doi) != NULL) { spin_unlock(&cipso_v4_doi_list_lock); ret_val = -EEXIST; goto doi_add_return; } list_add_tail_rcu(&doi_def->list, &cipso_v4_doi_list); spin_unlock(&cipso_v4_doi_list_lock); ret_val = 0; doi_add_return: audit_buf = netlbl_audit_start(AUDIT_MAC_CIPSOV4_ADD, audit_info); if (audit_buf != NULL) { const char *type_str; switch (doi_type) { case CIPSO_V4_MAP_TRANS: type_str = "trans"; break; case CIPSO_V4_MAP_PASS: type_str = "pass"; break; case CIPSO_V4_MAP_LOCAL: type_str = "local"; break; default: type_str = "(unknown)"; } audit_log_format(audit_buf, " cipso_doi=%u cipso_type=%s res=%u", doi, type_str, ret_val == 0 ? 1 : 0); audit_log_end(audit_buf); } return ret_val; } /** * cipso_v4_doi_free - Frees a DOI definition * @entry: the entry's RCU field * * Description: * This function frees all of the memory associated with a DOI definition. * */ void cipso_v4_doi_free(struct cipso_v4_doi *doi_def) { if (doi_def == NULL) return; switch (doi_def->type) { case CIPSO_V4_MAP_TRANS: kfree(doi_def->map.std->lvl.cipso); kfree(doi_def->map.std->lvl.local); kfree(doi_def->map.std->cat.cipso); kfree(doi_def->map.std->cat.local); break; } kfree(doi_def); } /** * cipso_v4_doi_free_rcu - Frees a DOI definition via the RCU pointer * @entry: the entry's RCU field * * Description: * This function is designed to be used as a callback to the call_rcu() * function so that the memory allocated to the DOI definition can be released * safely. * */ static void cipso_v4_doi_free_rcu(struct rcu_head *entry) { struct cipso_v4_doi *doi_def; doi_def = container_of(entry, struct cipso_v4_doi, rcu); cipso_v4_doi_free(doi_def); } /** * cipso_v4_doi_remove - Remove an existing DOI from the CIPSO protocol engine * @doi: the DOI value * @audit_secid: the LSM secid to use in the audit message * * Description: * Removes a DOI definition from the CIPSO engine. The NetLabel routines will * be called to release their own LSM domain mappings as well as our own * domain list. Returns zero on success and negative values on failure. * */ int cipso_v4_doi_remove(u32 doi, struct netlbl_audit *audit_info) { int ret_val; struct cipso_v4_doi *doi_def; struct audit_buffer *audit_buf; spin_lock(&cipso_v4_doi_list_lock); doi_def = cipso_v4_doi_search(doi); if (doi_def == NULL) { spin_unlock(&cipso_v4_doi_list_lock); ret_val = -ENOENT; goto doi_remove_return; } if (!atomic_dec_and_test(&doi_def->refcount)) { spin_unlock(&cipso_v4_doi_list_lock); ret_val = -EBUSY; goto doi_remove_return; } list_del_rcu(&doi_def->list); spin_unlock(&cipso_v4_doi_list_lock); cipso_v4_cache_invalidate(); call_rcu(&doi_def->rcu, cipso_v4_doi_free_rcu); ret_val = 0; doi_remove_return: audit_buf = netlbl_audit_start(AUDIT_MAC_CIPSOV4_DEL, audit_info); if (audit_buf != NULL) { audit_log_format(audit_buf, " cipso_doi=%u res=%u", doi, ret_val == 0 ? 1 : 0); audit_log_end(audit_buf); } return ret_val; } /** * cipso_v4_doi_getdef - Returns a reference to a valid DOI definition * @doi: the DOI value * * Description: * Searches for a valid DOI definition and if one is found it is returned to * the caller. Otherwise NULL is returned. The caller must ensure that * rcu_read_lock() is held while accessing the returned definition and the DOI * definition reference count is decremented when the caller is done. * */ struct cipso_v4_doi *cipso_v4_doi_getdef(u32 doi) { struct cipso_v4_doi *doi_def; rcu_read_lock(); doi_def = cipso_v4_doi_search(doi); if (doi_def == NULL) goto doi_getdef_return; if (!atomic_inc_not_zero(&doi_def->refcount)) doi_def = NULL; doi_getdef_return: rcu_read_unlock(); return doi_def; } /** * cipso_v4_doi_putdef - Releases a reference for the given DOI definition * @doi_def: the DOI definition * * Description: * Releases a DOI definition reference obtained from cipso_v4_doi_getdef(). * */ void cipso_v4_doi_putdef(struct cipso_v4_doi *doi_def) { if (doi_def == NULL) return; if (!atomic_dec_and_test(&doi_def->refcount)) return; spin_lock(&cipso_v4_doi_list_lock); list_del_rcu(&doi_def->list); spin_unlock(&cipso_v4_doi_list_lock); cipso_v4_cache_invalidate(); call_rcu(&doi_def->rcu, cipso_v4_doi_free_rcu); } /** * cipso_v4_doi_walk - Iterate through the DOI definitions * @skip_cnt: skip past this number of DOI definitions, updated * @callback: callback for each DOI definition * @cb_arg: argument for the callback function * * Description: * Iterate over the DOI definition list, skipping the first @skip_cnt entries. * For each entry call @callback, if @callback returns a negative value stop * 'walking' through the list and return. Updates the value in @skip_cnt upon * return. Returns zero on success, negative values on failure. * */ int cipso_v4_doi_walk(u32 *skip_cnt, int (*callback) (struct cipso_v4_doi *doi_def, void *arg), void *cb_arg) { int ret_val = -ENOENT; u32 doi_cnt = 0; struct cipso_v4_doi *iter_doi; rcu_read_lock(); list_for_each_entry_rcu(iter_doi, &cipso_v4_doi_list, list) if (atomic_read(&iter_doi->refcount) > 0) { if (doi_cnt++ < *skip_cnt) continue; ret_val = callback(iter_doi, cb_arg); if (ret_val < 0) { doi_cnt--; goto doi_walk_return; } } doi_walk_return: rcu_read_unlock(); *skip_cnt = doi_cnt; return ret_val; } /* * Label Mapping Functions */ /** * cipso_v4_map_lvl_valid - Checks to see if the given level is understood * @doi_def: the DOI definition * @level: the level to check * * Description: * Checks the given level against the given DOI definition and returns a * negative value if the level does not have a valid mapping and a zero value * if the level is defined by the DOI. * */ static int cipso_v4_map_lvl_valid(const struct cipso_v4_doi *doi_def, u8 level) { switch (doi_def->type) { case CIPSO_V4_MAP_PASS: return 0; case CIPSO_V4_MAP_TRANS: if (doi_def->map.std->lvl.cipso[level] < CIPSO_V4_INV_LVL) return 0; break; } return -EFAULT; } /** * cipso_v4_map_lvl_hton - Perform a level mapping from the host to the network * @doi_def: the DOI definition * @host_lvl: the host MLS level * @net_lvl: the network/CIPSO MLS level * * Description: * Perform a label mapping to translate a local MLS level to the correct * CIPSO level using the given DOI definition. Returns zero on success, * negative values otherwise. * */ static int cipso_v4_map_lvl_hton(const struct cipso_v4_doi *doi_def, u32 host_lvl, u32 *net_lvl) { switch (doi_def->type) { case CIPSO_V4_MAP_PASS: *net_lvl = host_lvl; return 0; case CIPSO_V4_MAP_TRANS: if (host_lvl < doi_def->map.std->lvl.local_size && doi_def->map.std->lvl.local[host_lvl] < CIPSO_V4_INV_LVL) { *net_lvl = doi_def->map.std->lvl.local[host_lvl]; return 0; } return -EPERM; } return -EINVAL; } /** * cipso_v4_map_lvl_ntoh - Perform a level mapping from the network to the host * @doi_def: the DOI definition * @net_lvl: the network/CIPSO MLS level * @host_lvl: the host MLS level * * Description: * Perform a label mapping to translate a CIPSO level to the correct local MLS * level using the given DOI definition. Returns zero on success, negative * values otherwise. * */ static int cipso_v4_map_lvl_ntoh(const struct cipso_v4_doi *doi_def, u32 net_lvl, u32 *host_lvl) { struct cipso_v4_std_map_tbl *map_tbl; switch (doi_def->type) { case CIPSO_V4_MAP_PASS: *host_lvl = net_lvl; return 0; case CIPSO_V4_MAP_TRANS: map_tbl = doi_def->map.std; if (net_lvl < map_tbl->lvl.cipso_size && map_tbl->lvl.cipso[net_lvl] < CIPSO_V4_INV_LVL) { *host_lvl = doi_def->map.std->lvl.cipso[net_lvl]; return 0; } return -EPERM; } return -EINVAL; } /** * cipso_v4_map_cat_rbm_valid - Checks to see if the category bitmap is valid * @doi_def: the DOI definition * @bitmap: category bitmap * @bitmap_len: bitmap length in bytes * * Description: * Checks the given category bitmap against the given DOI definition and * returns a negative value if any of the categories in the bitmap do not have * a valid mapping and a zero value if all of the categories are valid. * */ static int cipso_v4_map_cat_rbm_valid(const struct cipso_v4_doi *doi_def, const unsigned char *bitmap, u32 bitmap_len) { int cat = -1; u32 bitmap_len_bits = bitmap_len * 8; u32 cipso_cat_size; u32 *cipso_array; switch (doi_def->type) { case CIPSO_V4_MAP_PASS: return 0; case CIPSO_V4_MAP_TRANS: cipso_cat_size = doi_def->map.std->cat.cipso_size; cipso_array = doi_def->map.std->cat.cipso; for (;;) { cat = cipso_v4_bitmap_walk(bitmap, bitmap_len_bits, cat + 1, 1); if (cat < 0) break; if (cat >= cipso_cat_size || cipso_array[cat] >= CIPSO_V4_INV_CAT) return -EFAULT; } if (cat == -1) return 0; break; } return -EFAULT; } /** * cipso_v4_map_cat_rbm_hton - Perform a category mapping from host to network * @doi_def: the DOI definition * @secattr: the security attributes * @net_cat: the zero'd out category bitmap in network/CIPSO format * @net_cat_len: the length of the CIPSO bitmap in bytes * * Description: * Perform a label mapping to translate a local MLS category bitmap to the * correct CIPSO bitmap using the given DOI definition. Returns the minimum * size in bytes of the network bitmap on success, negative values otherwise. * */ static int cipso_v4_map_cat_rbm_hton(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *net_cat, u32 net_cat_len) { int host_spot = -1; u32 net_spot = CIPSO_V4_INV_CAT; u32 net_spot_max = 0; u32 net_clen_bits = net_cat_len * 8; u32 host_cat_size = 0; u32 *host_cat_array = NULL; if (doi_def->type == CIPSO_V4_MAP_TRANS) { host_cat_size = doi_def->map.std->cat.local_size; host_cat_array = doi_def->map.std->cat.local; } for (;;) { host_spot = netlbl_secattr_catmap_walk(secattr->attr.mls.cat, host_spot + 1); if (host_spot < 0) break; switch (doi_def->type) { case CIPSO_V4_MAP_PASS: net_spot = host_spot; break; case CIPSO_V4_MAP_TRANS: if (host_spot >= host_cat_size) return -EPERM; net_spot = host_cat_array[host_spot]; if (net_spot >= CIPSO_V4_INV_CAT) return -EPERM; break; } if (net_spot >= net_clen_bits) return -ENOSPC; cipso_v4_bitmap_setbit(net_cat, net_spot, 1); if (net_spot > net_spot_max) net_spot_max = net_spot; } if (++net_spot_max % 8) return net_spot_max / 8 + 1; return net_spot_max / 8; } /** * cipso_v4_map_cat_rbm_ntoh - Perform a category mapping from network to host * @doi_def: the DOI definition * @net_cat: the category bitmap in network/CIPSO format * @net_cat_len: the length of the CIPSO bitmap in bytes * @secattr: the security attributes * * Description: * Perform a label mapping to translate a CIPSO bitmap to the correct local * MLS category bitmap using the given DOI definition. Returns zero on * success, negative values on failure. * */ static int cipso_v4_map_cat_rbm_ntoh(const struct cipso_v4_doi *doi_def, const unsigned char *net_cat, u32 net_cat_len, struct netlbl_lsm_secattr *secattr) { int ret_val; int net_spot = -1; u32 host_spot = CIPSO_V4_INV_CAT; u32 net_clen_bits = net_cat_len * 8; u32 net_cat_size = 0; u32 *net_cat_array = NULL; if (doi_def->type == CIPSO_V4_MAP_TRANS) { net_cat_size = doi_def->map.std->cat.cipso_size; net_cat_array = doi_def->map.std->cat.cipso; } for (;;) { net_spot = cipso_v4_bitmap_walk(net_cat, net_clen_bits, net_spot + 1, 1); if (net_spot < 0) { if (net_spot == -2) return -EFAULT; return 0; } switch (doi_def->type) { case CIPSO_V4_MAP_PASS: host_spot = net_spot; break; case CIPSO_V4_MAP_TRANS: if (net_spot >= net_cat_size) return -EPERM; host_spot = net_cat_array[net_spot]; if (host_spot >= CIPSO_V4_INV_CAT) return -EPERM; break; } ret_val = netlbl_secattr_catmap_setbit(secattr->attr.mls.cat, host_spot, GFP_ATOMIC); if (ret_val != 0) return ret_val; } return -EINVAL; } /** * cipso_v4_map_cat_enum_valid - Checks to see if the categories are valid * @doi_def: the DOI definition * @enumcat: category list * @enumcat_len: length of the category list in bytes * * Description: * Checks the given categories against the given DOI definition and returns a * negative value if any of the categories do not have a valid mapping and a * zero value if all of the categories are valid. * */ static int cipso_v4_map_cat_enum_valid(const struct cipso_v4_doi *doi_def, const unsigned char *enumcat, u32 enumcat_len) { u16 cat; int cat_prev = -1; u32 iter; if (doi_def->type != CIPSO_V4_MAP_PASS || enumcat_len & 0x01) return -EFAULT; for (iter = 0; iter < enumcat_len; iter += 2) { cat = get_unaligned_be16(&enumcat[iter]); if (cat <= cat_prev) return -EFAULT; cat_prev = cat; } return 0; } /** * cipso_v4_map_cat_enum_hton - Perform a category mapping from host to network * @doi_def: the DOI definition * @secattr: the security attributes * @net_cat: the zero'd out category list in network/CIPSO format * @net_cat_len: the length of the CIPSO category list in bytes * * Description: * Perform a label mapping to translate a local MLS category bitmap to the * correct CIPSO category list using the given DOI definition. Returns the * size in bytes of the network category bitmap on success, negative values * otherwise. * */ static int cipso_v4_map_cat_enum_hton(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *net_cat, u32 net_cat_len) { int cat = -1; u32 cat_iter = 0; for (;;) { cat = netlbl_secattr_catmap_walk(secattr->attr.mls.cat, cat + 1); if (cat < 0) break; if ((cat_iter + 2) > net_cat_len) return -ENOSPC; *((__be16 *)&net_cat[cat_iter]) = htons(cat); cat_iter += 2; } return cat_iter; } /** * cipso_v4_map_cat_enum_ntoh - Perform a category mapping from network to host * @doi_def: the DOI definition * @net_cat: the category list in network/CIPSO format * @net_cat_len: the length of the CIPSO bitmap in bytes * @secattr: the security attributes * * Description: * Perform a label mapping to translate a CIPSO category list to the correct * local MLS category bitmap using the given DOI definition. Returns zero on * success, negative values on failure. * */ static int cipso_v4_map_cat_enum_ntoh(const struct cipso_v4_doi *doi_def, const unsigned char *net_cat, u32 net_cat_len, struct netlbl_lsm_secattr *secattr) { int ret_val; u32 iter; for (iter = 0; iter < net_cat_len; iter += 2) { ret_val = netlbl_secattr_catmap_setbit(secattr->attr.mls.cat, get_unaligned_be16(&net_cat[iter]), GFP_ATOMIC); if (ret_val != 0) return ret_val; } return 0; } /** * cipso_v4_map_cat_rng_valid - Checks to see if the categories are valid * @doi_def: the DOI definition * @rngcat: category list * @rngcat_len: length of the category list in bytes * * Description: * Checks the given categories against the given DOI definition and returns a * negative value if any of the categories do not have a valid mapping and a * zero value if all of the categories are valid. * */ static int cipso_v4_map_cat_rng_valid(const struct cipso_v4_doi *doi_def, const unsigned char *rngcat, u32 rngcat_len) { u16 cat_high; u16 cat_low; u32 cat_prev = CIPSO_V4_MAX_REM_CATS + 1; u32 iter; if (doi_def->type != CIPSO_V4_MAP_PASS || rngcat_len & 0x01) return -EFAULT; for (iter = 0; iter < rngcat_len; iter += 4) { cat_high = get_unaligned_be16(&rngcat[iter]); if ((iter + 4) <= rngcat_len) cat_low = get_unaligned_be16(&rngcat[iter + 2]); else cat_low = 0; if (cat_high > cat_prev) return -EFAULT; cat_prev = cat_low; } return 0; } /** * cipso_v4_map_cat_rng_hton - Perform a category mapping from host to network * @doi_def: the DOI definition * @secattr: the security attributes * @net_cat: the zero'd out category list in network/CIPSO format * @net_cat_len: the length of the CIPSO category list in bytes * * Description: * Perform a label mapping to translate a local MLS category bitmap to the * correct CIPSO category list using the given DOI definition. Returns the * size in bytes of the network category bitmap on success, negative values * otherwise. * */ static int cipso_v4_map_cat_rng_hton(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *net_cat, u32 net_cat_len) { int iter = -1; u16 array[CIPSO_V4_TAG_RNG_CAT_MAX * 2]; u32 array_cnt = 0; u32 cat_size = 0; /* make sure we don't overflow the 'array[]' variable */ if (net_cat_len > (CIPSO_V4_OPT_LEN_MAX - CIPSO_V4_HDR_LEN - CIPSO_V4_TAG_RNG_BLEN)) return -ENOSPC; for (;;) { iter = netlbl_secattr_catmap_walk(secattr->attr.mls.cat, iter + 1); if (iter < 0) break; cat_size += (iter == 0 ? 0 : sizeof(u16)); if (cat_size > net_cat_len) return -ENOSPC; array[array_cnt++] = iter; iter = netlbl_secattr_catmap_walk_rng(secattr->attr.mls.cat, iter); if (iter < 0) return -EFAULT; cat_size += sizeof(u16); if (cat_size > net_cat_len) return -ENOSPC; array[array_cnt++] = iter; } for (iter = 0; array_cnt > 0;) { *((__be16 *)&net_cat[iter]) = htons(array[--array_cnt]); iter += 2; array_cnt--; if (array[array_cnt] != 0) { *((__be16 *)&net_cat[iter]) = htons(array[array_cnt]); iter += 2; } } return cat_size; } /** * cipso_v4_map_cat_rng_ntoh - Perform a category mapping from network to host * @doi_def: the DOI definition * @net_cat: the category list in network/CIPSO format * @net_cat_len: the length of the CIPSO bitmap in bytes * @secattr: the security attributes * * Description: * Perform a label mapping to translate a CIPSO category list to the correct * local MLS category bitmap using the given DOI definition. Returns zero on * success, negative values on failure. * */ static int cipso_v4_map_cat_rng_ntoh(const struct cipso_v4_doi *doi_def, const unsigned char *net_cat, u32 net_cat_len, struct netlbl_lsm_secattr *secattr) { int ret_val; u32 net_iter; u16 cat_low; u16 cat_high; for (net_iter = 0; net_iter < net_cat_len; net_iter += 4) { cat_high = get_unaligned_be16(&net_cat[net_iter]); if ((net_iter + 4) <= net_cat_len) cat_low = get_unaligned_be16(&net_cat[net_iter + 2]); else cat_low = 0; ret_val = netlbl_secattr_catmap_setrng(secattr->attr.mls.cat, cat_low, cat_high, GFP_ATOMIC); if (ret_val != 0) return ret_val; } return 0; } /* * Protocol Handling Functions */ /** * cipso_v4_gentag_hdr - Generate a CIPSO option header * @doi_def: the DOI definition * @len: the total tag length in bytes, not including this header * @buf: the CIPSO option buffer * * Description: * Write a CIPSO header into the beginning of @buffer. * */ static void cipso_v4_gentag_hdr(const struct cipso_v4_doi *doi_def, unsigned char *buf, u32 len) { buf[0] = IPOPT_CIPSO; buf[1] = CIPSO_V4_HDR_LEN + len; *(__be32 *)&buf[2] = htonl(doi_def->doi); } /** * cipso_v4_gentag_rbm - Generate a CIPSO restricted bitmap tag (type #1) * @doi_def: the DOI definition * @secattr: the security attributes * @buffer: the option buffer * @buffer_len: length of buffer in bytes * * Description: * Generate a CIPSO option using the restricted bitmap tag, tag type #1. The * actual buffer length may be larger than the indicated size due to * translation between host and network category bitmaps. Returns the size of * the tag on success, negative values on failure. * */ static int cipso_v4_gentag_rbm(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *buffer, u32 buffer_len) { int ret_val; u32 tag_len; u32 level; if ((secattr->flags & NETLBL_SECATTR_MLS_LVL) == 0) return -EPERM; ret_val = cipso_v4_map_lvl_hton(doi_def, secattr->attr.mls.lvl, &level); if (ret_val != 0) return ret_val; if (secattr->flags & NETLBL_SECATTR_MLS_CAT) { ret_val = cipso_v4_map_cat_rbm_hton(doi_def, secattr, &buffer[4], buffer_len - 4); if (ret_val < 0) return ret_val; /* This will send packets using the "optimized" format when * possible as specified in section 3.4.2.6 of the * CIPSO draft. */ if (cipso_v4_rbm_optfmt && ret_val > 0 && ret_val <= 10) tag_len = 14; else tag_len = 4 + ret_val; } else tag_len = 4; buffer[0] = CIPSO_V4_TAG_RBITMAP; buffer[1] = tag_len; buffer[3] = level; return tag_len; } /** * cipso_v4_parsetag_rbm - Parse a CIPSO restricted bitmap tag * @doi_def: the DOI definition * @tag: the CIPSO tag * @secattr: the security attributes * * Description: * Parse a CIPSO restricted bitmap tag (tag type #1) and return the security * attributes in @secattr. Return zero on success, negatives values on * failure. * */ static int cipso_v4_parsetag_rbm(const struct cipso_v4_doi *doi_def, const unsigned char *tag, struct netlbl_lsm_secattr *secattr) { int ret_val; u8 tag_len = tag[1]; u32 level; ret_val = cipso_v4_map_lvl_ntoh(doi_def, tag[3], &level); if (ret_val != 0) return ret_val; secattr->attr.mls.lvl = level; secattr->flags |= NETLBL_SECATTR_MLS_LVL; if (tag_len > 4) { secattr->attr.mls.cat = netlbl_secattr_catmap_alloc(GFP_ATOMIC); if (secattr->attr.mls.cat == NULL) return -ENOMEM; ret_val = cipso_v4_map_cat_rbm_ntoh(doi_def, &tag[4], tag_len - 4, secattr); if (ret_val != 0) { netlbl_secattr_catmap_free(secattr->attr.mls.cat); return ret_val; } secattr->flags |= NETLBL_SECATTR_MLS_CAT; } return 0; } /** * cipso_v4_gentag_enum - Generate a CIPSO enumerated tag (type #2) * @doi_def: the DOI definition * @secattr: the security attributes * @buffer: the option buffer * @buffer_len: length of buffer in bytes * * Description: * Generate a CIPSO option using the enumerated tag, tag type #2. Returns the * size of the tag on success, negative values on failure. * */ static int cipso_v4_gentag_enum(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *buffer, u32 buffer_len) { int ret_val; u32 tag_len; u32 level; if (!(secattr->flags & NETLBL_SECATTR_MLS_LVL)) return -EPERM; ret_val = cipso_v4_map_lvl_hton(doi_def, secattr->attr.mls.lvl, &level); if (ret_val != 0) return ret_val; if (secattr->flags & NETLBL_SECATTR_MLS_CAT) { ret_val = cipso_v4_map_cat_enum_hton(doi_def, secattr, &buffer[4], buffer_len - 4); if (ret_val < 0) return ret_val; tag_len = 4 + ret_val; } else tag_len = 4; buffer[0] = CIPSO_V4_TAG_ENUM; buffer[1] = tag_len; buffer[3] = level; return tag_len; } /** * cipso_v4_parsetag_enum - Parse a CIPSO enumerated tag * @doi_def: the DOI definition * @tag: the CIPSO tag * @secattr: the security attributes * * Description: * Parse a CIPSO enumerated tag (tag type #2) and return the security * attributes in @secattr. Return zero on success, negatives values on * failure. * */ static int cipso_v4_parsetag_enum(const struct cipso_v4_doi *doi_def, const unsigned char *tag, struct netlbl_lsm_secattr *secattr) { int ret_val; u8 tag_len = tag[1]; u32 level; ret_val = cipso_v4_map_lvl_ntoh(doi_def, tag[3], &level); if (ret_val != 0) return ret_val; secattr->attr.mls.lvl = level; secattr->flags |= NETLBL_SECATTR_MLS_LVL; if (tag_len > 4) { secattr->attr.mls.cat = netlbl_secattr_catmap_alloc(GFP_ATOMIC); if (secattr->attr.mls.cat == NULL) return -ENOMEM; ret_val = cipso_v4_map_cat_enum_ntoh(doi_def, &tag[4], tag_len - 4, secattr); if (ret_val != 0) { netlbl_secattr_catmap_free(secattr->attr.mls.cat); return ret_val; } secattr->flags |= NETLBL_SECATTR_MLS_CAT; } return 0; } /** * cipso_v4_gentag_rng - Generate a CIPSO ranged tag (type #5) * @doi_def: the DOI definition * @secattr: the security attributes * @buffer: the option buffer * @buffer_len: length of buffer in bytes * * Description: * Generate a CIPSO option using the ranged tag, tag type #5. Returns the * size of the tag on success, negative values on failure. * */ static int cipso_v4_gentag_rng(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *buffer, u32 buffer_len) { int ret_val; u32 tag_len; u32 level; if (!(secattr->flags & NETLBL_SECATTR_MLS_LVL)) return -EPERM; ret_val = cipso_v4_map_lvl_hton(doi_def, secattr->attr.mls.lvl, &level); if (ret_val != 0) return ret_val; if (secattr->flags & NETLBL_SECATTR_MLS_CAT) { ret_val = cipso_v4_map_cat_rng_hton(doi_def, secattr, &buffer[4], buffer_len - 4); if (ret_val < 0) return ret_val; tag_len = 4 + ret_val; } else tag_len = 4; buffer[0] = CIPSO_V4_TAG_RANGE; buffer[1] = tag_len; buffer[3] = level; return tag_len; } /** * cipso_v4_parsetag_rng - Parse a CIPSO ranged tag * @doi_def: the DOI definition * @tag: the CIPSO tag * @secattr: the security attributes * * Description: * Parse a CIPSO ranged tag (tag type #5) and return the security attributes * in @secattr. Return zero on success, negatives values on failure. * */ static int cipso_v4_parsetag_rng(const struct cipso_v4_doi *doi_def, const unsigned char *tag, struct netlbl_lsm_secattr *secattr) { int ret_val; u8 tag_len = tag[1]; u32 level; ret_val = cipso_v4_map_lvl_ntoh(doi_def, tag[3], &level); if (ret_val != 0) return ret_val; secattr->attr.mls.lvl = level; secattr->flags |= NETLBL_SECATTR_MLS_LVL; if (tag_len > 4) { secattr->attr.mls.cat = netlbl_secattr_catmap_alloc(GFP_ATOMIC); if (secattr->attr.mls.cat == NULL) return -ENOMEM; ret_val = cipso_v4_map_cat_rng_ntoh(doi_def, &tag[4], tag_len - 4, secattr); if (ret_val != 0) { netlbl_secattr_catmap_free(secattr->attr.mls.cat); return ret_val; } secattr->flags |= NETLBL_SECATTR_MLS_CAT; } return 0; } /** * cipso_v4_gentag_loc - Generate a CIPSO local tag (non-standard) * @doi_def: the DOI definition * @secattr: the security attributes * @buffer: the option buffer * @buffer_len: length of buffer in bytes * * Description: * Generate a CIPSO option using the local tag. Returns the size of the tag * on success, negative values on failure. * */ static int cipso_v4_gentag_loc(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *buffer, u32 buffer_len) { if (!(secattr->flags & NETLBL_SECATTR_SECID)) return -EPERM; buffer[0] = CIPSO_V4_TAG_LOCAL; buffer[1] = CIPSO_V4_TAG_LOC_BLEN; *(u32 *)&buffer[2] = secattr->attr.secid; return CIPSO_V4_TAG_LOC_BLEN; } /** * cipso_v4_parsetag_loc - Parse a CIPSO local tag * @doi_def: the DOI definition * @tag: the CIPSO tag * @secattr: the security attributes * * Description: * Parse a CIPSO local tag and return the security attributes in @secattr. * Return zero on success, negatives values on failure. * */ static int cipso_v4_parsetag_loc(const struct cipso_v4_doi *doi_def, const unsigned char *tag, struct netlbl_lsm_secattr *secattr) { secattr->attr.secid = *(u32 *)&tag[2]; secattr->flags |= NETLBL_SECATTR_SECID; return 0; } /** * cipso_v4_validate - Validate a CIPSO option * @option: the start of the option, on error it is set to point to the error * * Description: * This routine is called to validate a CIPSO option, it checks all of the * fields to ensure that they are at least valid, see the draft snippet below * for details. If the option is valid then a zero value is returned and * the value of @option is unchanged. If the option is invalid then a * non-zero value is returned and @option is adjusted to point to the * offending portion of the option. From the IETF draft ... * * "If any field within the CIPSO options, such as the DOI identifier, is not * recognized the IP datagram is discarded and an ICMP 'parameter problem' * (type 12) is generated and returned. The ICMP code field is set to 'bad * parameter' (code 0) and the pointer is set to the start of the CIPSO field * that is unrecognized." * */ int cipso_v4_validate(const struct sk_buff *skb, unsigned char **option) { unsigned char *opt = *option; unsigned char *tag; unsigned char opt_iter; unsigned char err_offset = 0; u8 opt_len; u8 tag_len; struct cipso_v4_doi *doi_def = NULL; u32 tag_iter; /* caller already checks for length values that are too large */ opt_len = opt[1]; if (opt_len < 8) { err_offset = 1; goto validate_return; } rcu_read_lock(); doi_def = cipso_v4_doi_search(get_unaligned_be32(&opt[2])); if (doi_def == NULL) { err_offset = 2; goto validate_return_locked; } opt_iter = CIPSO_V4_HDR_LEN; tag = opt + opt_iter; while (opt_iter < opt_len) { for (tag_iter = 0; doi_def->tags[tag_iter] != tag[0];) if (doi_def->tags[tag_iter] == CIPSO_V4_TAG_INVALID || ++tag_iter == CIPSO_V4_TAG_MAXCNT) { err_offset = opt_iter; goto validate_return_locked; } tag_len = tag[1]; if (tag_len > (opt_len - opt_iter)) { err_offset = opt_iter + 1; goto validate_return_locked; } switch (tag[0]) { case CIPSO_V4_TAG_RBITMAP: if (tag_len < CIPSO_V4_TAG_RBM_BLEN) { err_offset = opt_iter + 1; goto validate_return_locked; } /* We are already going to do all the verification * necessary at the socket layer so from our point of * view it is safe to turn these checks off (and less * work), however, the CIPSO draft says we should do * all the CIPSO validations here but it doesn't * really specify _exactly_ what we need to validate * ... so, just make it a sysctl tunable. */ if (cipso_v4_rbm_strictvalid) { if (cipso_v4_map_lvl_valid(doi_def, tag[3]) < 0) { err_offset = opt_iter + 3; goto validate_return_locked; } if (tag_len > CIPSO_V4_TAG_RBM_BLEN && cipso_v4_map_cat_rbm_valid(doi_def, &tag[4], tag_len - 4) < 0) { err_offset = opt_iter + 4; goto validate_return_locked; } } break; case CIPSO_V4_TAG_ENUM: if (tag_len < CIPSO_V4_TAG_ENUM_BLEN) { err_offset = opt_iter + 1; goto validate_return_locked; } if (cipso_v4_map_lvl_valid(doi_def, tag[3]) < 0) { err_offset = opt_iter + 3; goto validate_return_locked; } if (tag_len > CIPSO_V4_TAG_ENUM_BLEN && cipso_v4_map_cat_enum_valid(doi_def, &tag[4], tag_len - 4) < 0) { err_offset = opt_iter + 4; goto validate_return_locked; } break; case CIPSO_V4_TAG_RANGE: if (tag_len < CIPSO_V4_TAG_RNG_BLEN) { err_offset = opt_iter + 1; goto validate_return_locked; } if (cipso_v4_map_lvl_valid(doi_def, tag[3]) < 0) { err_offset = opt_iter + 3; goto validate_return_locked; } if (tag_len > CIPSO_V4_TAG_RNG_BLEN && cipso_v4_map_cat_rng_valid(doi_def, &tag[4], tag_len - 4) < 0) { err_offset = opt_iter + 4; goto validate_return_locked; } break; case CIPSO_V4_TAG_LOCAL: /* This is a non-standard tag that we only allow for * local connections, so if the incoming interface is * not the loopback device drop the packet. */ if (!(skb->dev->flags & IFF_LOOPBACK)) { err_offset = opt_iter; goto validate_return_locked; } if (tag_len != CIPSO_V4_TAG_LOC_BLEN) { err_offset = opt_iter + 1; goto validate_return_locked; } break; default: err_offset = opt_iter; goto validate_return_locked; } tag += tag_len; opt_iter += tag_len; } validate_return_locked: rcu_read_unlock(); validate_return: *option = opt + err_offset; return err_offset; } /** * cipso_v4_error - Send the correct response for a bad packet * @skb: the packet * @error: the error code * @gateway: CIPSO gateway flag * * Description: * Based on the error code given in @error, send an ICMP error message back to * the originating host. From the IETF draft ... * * "If the contents of the CIPSO [option] are valid but the security label is * outside of the configured host or port label range, the datagram is * discarded and an ICMP 'destination unreachable' (type 3) is generated and * returned. The code field of the ICMP is set to 'communication with * destination network administratively prohibited' (code 9) or to * 'communication with destination host administratively prohibited' * (code 10). The value of the code is dependent on whether the originator * of the ICMP message is acting as a CIPSO host or a CIPSO gateway. The * recipient of the ICMP message MUST be able to handle either value. The * same procedure is performed if a CIPSO [option] can not be added to an * IP packet because it is too large to fit in the IP options area." * * "If the error is triggered by receipt of an ICMP message, the message is * discarded and no response is permitted (consistent with general ICMP * processing rules)." * */ void cipso_v4_error(struct sk_buff *skb, int error, u32 gateway) { if (ip_hdr(skb)->protocol == IPPROTO_ICMP || error != -EACCES) return; if (gateway) icmp_send(skb, ICMP_DEST_UNREACH, ICMP_NET_ANO, 0); else icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_ANO, 0); } /** * cipso_v4_genopt - Generate a CIPSO option * @buf: the option buffer * @buf_len: the size of opt_buf * @doi_def: the CIPSO DOI to use * @secattr: the security attributes * * Description: * Generate a CIPSO option using the DOI definition and security attributes * passed to the function. Returns the length of the option on success and * negative values on failure. * */ static int cipso_v4_genopt(unsigned char *buf, u32 buf_len, const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr) { int ret_val; u32 iter; if (buf_len <= CIPSO_V4_HDR_LEN) return -ENOSPC; /* XXX - This code assumes only one tag per CIPSO option which isn't * really a good assumption to make but since we only support the MAC * tags right now it is a safe assumption. */ iter = 0; do { memset(buf, 0, buf_len); switch (doi_def->tags[iter]) { case CIPSO_V4_TAG_RBITMAP: ret_val = cipso_v4_gentag_rbm(doi_def, secattr, &buf[CIPSO_V4_HDR_LEN], buf_len - CIPSO_V4_HDR_LEN); break; case CIPSO_V4_TAG_ENUM: ret_val = cipso_v4_gentag_enum(doi_def, secattr, &buf[CIPSO_V4_HDR_LEN], buf_len - CIPSO_V4_HDR_LEN); break; case CIPSO_V4_TAG_RANGE: ret_val = cipso_v4_gentag_rng(doi_def, secattr, &buf[CIPSO_V4_HDR_LEN], buf_len - CIPSO_V4_HDR_LEN); break; case CIPSO_V4_TAG_LOCAL: ret_val = cipso_v4_gentag_loc(doi_def, secattr, &buf[CIPSO_V4_HDR_LEN], buf_len - CIPSO_V4_HDR_LEN); break; default: return -EPERM; } iter++; } while (ret_val < 0 && iter < CIPSO_V4_TAG_MAXCNT && doi_def->tags[iter] != CIPSO_V4_TAG_INVALID); if (ret_val < 0) return ret_val; cipso_v4_gentag_hdr(doi_def, buf, ret_val); return CIPSO_V4_HDR_LEN + ret_val; } static void opt_kfree_rcu(struct rcu_head *head) { kfree(container_of(head, struct ip_options_rcu, rcu)); } /** * cipso_v4_sock_setattr - Add a CIPSO option to a socket * @sk: the socket * @doi_def: the CIPSO DOI to use * @secattr: the specific security attributes of the socket * * Description: * Set the CIPSO option on the given socket using the DOI definition and * security attributes passed to the function. This function requires * exclusive access to @sk, which means it either needs to be in the * process of being created or locked. Returns zero on success and negative * values on failure. * */ int cipso_v4_sock_setattr(struct sock *sk, const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr) { int ret_val = -EPERM; unsigned char *buf = NULL; u32 buf_len; u32 opt_len; struct ip_options_rcu *old, *opt = NULL; struct inet_sock *sk_inet; struct inet_connection_sock *sk_conn; /* In the case of sock_create_lite(), the sock->sk field is not * defined yet but it is not a problem as the only users of these * "lite" PF_INET sockets are functions which do an accept() call * afterwards so we will label the socket as part of the accept(). */ if (sk == NULL) return 0; /* We allocate the maximum CIPSO option size here so we are probably * being a little wasteful, but it makes our life _much_ easier later * on and after all we are only talking about 40 bytes. */ buf_len = CIPSO_V4_OPT_LEN_MAX; buf = kmalloc(buf_len, GFP_ATOMIC); if (buf == NULL) { ret_val = -ENOMEM; goto socket_setattr_failure; } ret_val = cipso_v4_genopt(buf, buf_len, doi_def, secattr); if (ret_val < 0) goto socket_setattr_failure; buf_len = ret_val; /* We can't use ip_options_get() directly because it makes a call to * ip_options_get_alloc() which allocates memory with GFP_KERNEL and * we won't always have CAP_NET_RAW even though we _always_ want to * set the IPOPT_CIPSO option. */ opt_len = (buf_len + 3) & ~3; opt = kzalloc(sizeof(*opt) + opt_len, GFP_ATOMIC); if (opt == NULL) { ret_val = -ENOMEM; goto socket_setattr_failure; } memcpy(opt->opt.__data, buf, buf_len); opt->opt.optlen = opt_len; opt->opt.cipso = sizeof(struct iphdr); kfree(buf); buf = NULL; sk_inet = inet_sk(sk); old = rcu_dereference_protected(sk_inet->inet_opt, sock_owned_by_user(sk)); if (sk_inet->is_icsk) { sk_conn = inet_csk(sk); if (old) sk_conn->icsk_ext_hdr_len -= old->opt.optlen; sk_conn->icsk_ext_hdr_len += opt->opt.optlen; sk_conn->icsk_sync_mss(sk, sk_conn->icsk_pmtu_cookie); } rcu_assign_pointer(sk_inet->inet_opt, opt); if (old) call_rcu(&old->rcu, opt_kfree_rcu); return 0; socket_setattr_failure: kfree(buf); kfree(opt); return ret_val; } /** * cipso_v4_req_setattr - Add a CIPSO option to a connection request socket * @req: the connection request socket * @doi_def: the CIPSO DOI to use * @secattr: the specific security attributes of the socket * * Description: * Set the CIPSO option on the given socket using the DOI definition and * security attributes passed to the function. Returns zero on success and * negative values on failure. * */ int cipso_v4_req_setattr(struct request_sock *req, const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr) { int ret_val = -EPERM; unsigned char *buf = NULL; u32 buf_len; u32 opt_len; struct ip_options_rcu *opt = NULL; struct inet_request_sock *req_inet; /* We allocate the maximum CIPSO option size here so we are probably * being a little wasteful, but it makes our life _much_ easier later * on and after all we are only talking about 40 bytes. */ buf_len = CIPSO_V4_OPT_LEN_MAX; buf = kmalloc(buf_len, GFP_ATOMIC); if (buf == NULL) { ret_val = -ENOMEM; goto req_setattr_failure; } ret_val = cipso_v4_genopt(buf, buf_len, doi_def, secattr); if (ret_val < 0) goto req_setattr_failure; buf_len = ret_val; /* We can't use ip_options_get() directly because it makes a call to * ip_options_get_alloc() which allocates memory with GFP_KERNEL and * we won't always have CAP_NET_RAW even though we _always_ want to * set the IPOPT_CIPSO option. */ opt_len = (buf_len + 3) & ~3; opt = kzalloc(sizeof(*opt) + opt_len, GFP_ATOMIC); if (opt == NULL) { ret_val = -ENOMEM; goto req_setattr_failure; } memcpy(opt->opt.__data, buf, buf_len); opt->opt.optlen = opt_len; opt->opt.cipso = sizeof(struct iphdr); kfree(buf); buf = NULL; req_inet = inet_rsk(req); opt = xchg(&req_inet->opt, opt); if (opt) call_rcu(&opt->rcu, opt_kfree_rcu); return 0; req_setattr_failure: kfree(buf); kfree(opt); return ret_val; } /** * cipso_v4_delopt - Delete the CIPSO option from a set of IP options * @opt_ptr: IP option pointer * * Description: * Deletes the CIPSO IP option from a set of IP options and makes the necessary * adjustments to the IP option structure. Returns zero on success, negative * values on failure. * */ static int cipso_v4_delopt(struct ip_options_rcu **opt_ptr) { int hdr_delta = 0; struct ip_options_rcu *opt = *opt_ptr; if (opt->opt.srr || opt->opt.rr || opt->opt.ts || opt->opt.router_alert) { u8 cipso_len; u8 cipso_off; unsigned char *cipso_ptr; int iter; int optlen_new; cipso_off = opt->opt.cipso - sizeof(struct iphdr); cipso_ptr = &opt->opt.__data[cipso_off]; cipso_len = cipso_ptr[1]; if (opt->opt.srr > opt->opt.cipso) opt->opt.srr -= cipso_len; if (opt->opt.rr > opt->opt.cipso) opt->opt.rr -= cipso_len; if (opt->opt.ts > opt->opt.cipso) opt->opt.ts -= cipso_len; if (opt->opt.router_alert > opt->opt.cipso) opt->opt.router_alert -= cipso_len; opt->opt.cipso = 0; memmove(cipso_ptr, cipso_ptr + cipso_len, opt->opt.optlen - cipso_off - cipso_len); /* determining the new total option length is tricky because of * the padding necessary, the only thing i can think to do at * this point is walk the options one-by-one, skipping the * padding at the end to determine the actual option size and * from there we can determine the new total option length */ iter = 0; optlen_new = 0; while (iter < opt->opt.optlen) if (opt->opt.__data[iter] != IPOPT_NOP) { iter += opt->opt.__data[iter + 1]; optlen_new = iter; } else iter++; hdr_delta = opt->opt.optlen; opt->opt.optlen = (optlen_new + 3) & ~3; hdr_delta -= opt->opt.optlen; } else { /* only the cipso option was present on the socket so we can * remove the entire option struct */ *opt_ptr = NULL; hdr_delta = opt->opt.optlen; call_rcu(&opt->rcu, opt_kfree_rcu); } return hdr_delta; } /** * cipso_v4_sock_delattr - Delete the CIPSO option from a socket * @sk: the socket * * Description: * Removes the CIPSO option from a socket, if present. * */ void cipso_v4_sock_delattr(struct sock *sk) { int hdr_delta; struct ip_options_rcu *opt; struct inet_sock *sk_inet; sk_inet = inet_sk(sk); opt = rcu_dereference_protected(sk_inet->inet_opt, 1); if (opt == NULL || opt->opt.cipso == 0) return; hdr_delta = cipso_v4_delopt(&sk_inet->inet_opt); if (sk_inet->is_icsk && hdr_delta > 0) { struct inet_connection_sock *sk_conn = inet_csk(sk); sk_conn->icsk_ext_hdr_len -= hdr_delta; sk_conn->icsk_sync_mss(sk, sk_conn->icsk_pmtu_cookie); } } /** * cipso_v4_req_delattr - Delete the CIPSO option from a request socket * @reg: the request socket * * Description: * Removes the CIPSO option from a request socket, if present. * */ void cipso_v4_req_delattr(struct request_sock *req) { struct ip_options_rcu *opt; struct inet_request_sock *req_inet; req_inet = inet_rsk(req); opt = req_inet->opt; if (opt == NULL || opt->opt.cipso == 0) return; cipso_v4_delopt(&req_inet->opt); } /** * cipso_v4_getattr - Helper function for the cipso_v4_*_getattr functions * @cipso: the CIPSO v4 option * @secattr: the security attributes * * Description: * Inspect @cipso and return the security attributes in @secattr. Returns zero * on success and negative values on failure. * */ static int cipso_v4_getattr(const unsigned char *cipso, struct netlbl_lsm_secattr *secattr) { int ret_val = -ENOMSG; u32 doi; struct cipso_v4_doi *doi_def; if (cipso_v4_cache_check(cipso, cipso[1], secattr) == 0) return 0; doi = get_unaligned_be32(&cipso[2]); rcu_read_lock(); doi_def = cipso_v4_doi_search(doi); if (doi_def == NULL) goto getattr_return; /* XXX - This code assumes only one tag per CIPSO option which isn't * really a good assumption to make but since we only support the MAC * tags right now it is a safe assumption. */ switch (cipso[6]) { case CIPSO_V4_TAG_RBITMAP: ret_val = cipso_v4_parsetag_rbm(doi_def, &cipso[6], secattr); break; case CIPSO_V4_TAG_ENUM: ret_val = cipso_v4_parsetag_enum(doi_def, &cipso[6], secattr); break; case CIPSO_V4_TAG_RANGE: ret_val = cipso_v4_parsetag_rng(doi_def, &cipso[6], secattr); break; case CIPSO_V4_TAG_LOCAL: ret_val = cipso_v4_parsetag_loc(doi_def, &cipso[6], secattr); break; } if (ret_val == 0) secattr->type = NETLBL_NLTYPE_CIPSOV4; getattr_return: rcu_read_unlock(); return ret_val; } /** * cipso_v4_sock_getattr - Get the security attributes from a sock * @sk: the sock * @secattr: the security attributes * * Description: * Query @sk to see if there is a CIPSO option attached to the sock and if * there is return the CIPSO security attributes in @secattr. This function * requires that @sk be locked, or privately held, but it does not do any * locking itself. Returns zero on success and negative values on failure. * */ int cipso_v4_sock_getattr(struct sock *sk, struct netlbl_lsm_secattr *secattr) { struct ip_options_rcu *opt; int res = -ENOMSG; rcu_read_lock(); opt = rcu_dereference(inet_sk(sk)->inet_opt); if (opt && opt->opt.cipso) res = cipso_v4_getattr(opt->opt.__data + opt->opt.cipso - sizeof(struct iphdr), secattr); rcu_read_unlock(); return res; } /** * cipso_v4_skbuff_setattr - Set the CIPSO option on a packet * @skb: the packet * @secattr: the security attributes * * Description: * Set the CIPSO option on the given packet based on the security attributes. * Returns a pointer to the IP header on success and NULL on failure. * */ int cipso_v4_skbuff_setattr(struct sk_buff *skb, const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr) { int ret_val; struct iphdr *iph; struct ip_options *opt = &IPCB(skb)->opt; unsigned char buf[CIPSO_V4_OPT_LEN_MAX]; u32 buf_len = CIPSO_V4_OPT_LEN_MAX; u32 opt_len; int len_delta; ret_val = cipso_v4_genopt(buf, buf_len, doi_def, secattr); if (ret_val < 0) return ret_val; buf_len = ret_val; opt_len = (buf_len + 3) & ~3; /* we overwrite any existing options to ensure that we have enough * room for the CIPSO option, the reason is that we _need_ to guarantee * that the security label is applied to the packet - we do the same * thing when using the socket options and it hasn't caused a problem, * if we need to we can always revisit this choice later */ len_delta = opt_len - opt->optlen; /* if we don't ensure enough headroom we could panic on the skb_push() * call below so make sure we have enough, we are also "mangling" the * packet so we should probably do a copy-on-write call anyway */ ret_val = skb_cow(skb, skb_headroom(skb) + len_delta); if (ret_val < 0) return ret_val; if (len_delta > 0) { /* we assume that the header + opt->optlen have already been * "pushed" in ip_options_build() or similar */ iph = ip_hdr(skb); skb_push(skb, len_delta); memmove((char *)iph - len_delta, iph, iph->ihl << 2); skb_reset_network_header(skb); iph = ip_hdr(skb); } else if (len_delta < 0) { iph = ip_hdr(skb); memset(iph + 1, IPOPT_NOP, opt->optlen); } else iph = ip_hdr(skb); if (opt->optlen > 0) memset(opt, 0, sizeof(*opt)); opt->optlen = opt_len; opt->cipso = sizeof(struct iphdr); opt->is_changed = 1; /* we have to do the following because we are being called from a * netfilter hook which means the packet already has had the header * fields populated and the checksum calculated - yes this means we * are doing more work than needed but we do it to keep the core * stack clean and tidy */ memcpy(iph + 1, buf, buf_len); if (opt_len > buf_len) memset((char *)(iph + 1) + buf_len, 0, opt_len - buf_len); if (len_delta != 0) { iph->ihl = 5 + (opt_len >> 2); iph->tot_len = htons(skb->len); } ip_send_check(iph); return 0; } /** * cipso_v4_skbuff_delattr - Delete any CIPSO options from a packet * @skb: the packet * * Description: * Removes any and all CIPSO options from the given packet. Returns zero on * success, negative values on failure. * */ int cipso_v4_skbuff_delattr(struct sk_buff *skb) { int ret_val; struct iphdr *iph; struct ip_options *opt = &IPCB(skb)->opt; unsigned char *cipso_ptr; if (opt->cipso == 0) return 0; /* since we are changing the packet we should make a copy */ ret_val = skb_cow(skb, skb_headroom(skb)); if (ret_val < 0) return ret_val; /* the easiest thing to do is just replace the cipso option with noop * options since we don't change the size of the packet, although we * still need to recalculate the checksum */ iph = ip_hdr(skb); cipso_ptr = (unsigned char *)iph + opt->cipso; memset(cipso_ptr, IPOPT_NOOP, cipso_ptr[1]); opt->cipso = 0; opt->is_changed = 1; ip_send_check(iph); return 0; } /** * cipso_v4_skbuff_getattr - Get the security attributes from the CIPSO option * @skb: the packet * @secattr: the security attributes * * Description: * Parse the given packet's CIPSO option and return the security attributes. * Returns zero on success and negative values on failure. * */ int cipso_v4_skbuff_getattr(const struct sk_buff *skb, struct netlbl_lsm_secattr *secattr) { return cipso_v4_getattr(CIPSO_V4_OPTPTR(skb), secattr); } /* * Setup Functions */ /** * cipso_v4_init - Initialize the CIPSO module * * Description: * Initialize the CIPSO module and prepare it for use. Returns zero on success * and negative values on failure. * */ static int __init cipso_v4_init(void) { int ret_val; ret_val = cipso_v4_cache_init(); if (ret_val != 0) panic("Failed to initialize the CIPSO/IPv4 cache (%d)\n", ret_val); return 0; } subsys_initcall(cipso_v4_init);
int cipso_v4_sock_setattr(struct sock *sk, const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr) { int ret_val = -EPERM; unsigned char *buf = NULL; u32 buf_len; u32 opt_len; struct ip_options *opt = NULL; struct inet_sock *sk_inet; struct inet_connection_sock *sk_conn; /* In the case of sock_create_lite(), the sock->sk field is not * defined yet but it is not a problem as the only users of these * "lite" PF_INET sockets are functions which do an accept() call * afterwards so we will label the socket as part of the accept(). */ if (sk == NULL) return 0; /* We allocate the maximum CIPSO option size here so we are probably * being a little wasteful, but it makes our life _much_ easier later * on and after all we are only talking about 40 bytes. */ buf_len = CIPSO_V4_OPT_LEN_MAX; buf = kmalloc(buf_len, GFP_ATOMIC); if (buf == NULL) { ret_val = -ENOMEM; goto socket_setattr_failure; } ret_val = cipso_v4_genopt(buf, buf_len, doi_def, secattr); if (ret_val < 0) goto socket_setattr_failure; buf_len = ret_val; /* We can't use ip_options_get() directly because it makes a call to * ip_options_get_alloc() which allocates memory with GFP_KERNEL and * we won't always have CAP_NET_RAW even though we _always_ want to * set the IPOPT_CIPSO option. */ opt_len = (buf_len + 3) & ~3; opt = kzalloc(sizeof(*opt) + opt_len, GFP_ATOMIC); if (opt == NULL) { ret_val = -ENOMEM; goto socket_setattr_failure; } memcpy(opt->__data, buf, buf_len); opt->optlen = opt_len; opt->cipso = sizeof(struct iphdr); kfree(buf); buf = NULL; sk_inet = inet_sk(sk); if (sk_inet->is_icsk) { sk_conn = inet_csk(sk); if (sk_inet->opt) sk_conn->icsk_ext_hdr_len -= sk_inet->opt->optlen; sk_conn->icsk_ext_hdr_len += opt->optlen; sk_conn->icsk_sync_mss(sk, sk_conn->icsk_pmtu_cookie); } opt = xchg(&sk_inet->opt, opt); kfree(opt); return 0; socket_setattr_failure: kfree(buf); kfree(opt); return ret_val; }
int cipso_v4_sock_setattr(struct sock *sk, const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr) { int ret_val = -EPERM; unsigned char *buf = NULL; u32 buf_len; u32 opt_len; struct ip_options_rcu *old, *opt = NULL; struct inet_sock *sk_inet; struct inet_connection_sock *sk_conn; /* In the case of sock_create_lite(), the sock->sk field is not * defined yet but it is not a problem as the only users of these * "lite" PF_INET sockets are functions which do an accept() call * afterwards so we will label the socket as part of the accept(). */ if (sk == NULL) return 0; /* We allocate the maximum CIPSO option size here so we are probably * being a little wasteful, but it makes our life _much_ easier later * on and after all we are only talking about 40 bytes. */ buf_len = CIPSO_V4_OPT_LEN_MAX; buf = kmalloc(buf_len, GFP_ATOMIC); if (buf == NULL) { ret_val = -ENOMEM; goto socket_setattr_failure; } ret_val = cipso_v4_genopt(buf, buf_len, doi_def, secattr); if (ret_val < 0) goto socket_setattr_failure; buf_len = ret_val; /* We can't use ip_options_get() directly because it makes a call to * ip_options_get_alloc() which allocates memory with GFP_KERNEL and * we won't always have CAP_NET_RAW even though we _always_ want to * set the IPOPT_CIPSO option. */ opt_len = (buf_len + 3) & ~3; opt = kzalloc(sizeof(*opt) + opt_len, GFP_ATOMIC); if (opt == NULL) { ret_val = -ENOMEM; goto socket_setattr_failure; } memcpy(opt->opt.__data, buf, buf_len); opt->opt.optlen = opt_len; opt->opt.cipso = sizeof(struct iphdr); kfree(buf); buf = NULL; sk_inet = inet_sk(sk); old = rcu_dereference_protected(sk_inet->inet_opt, sock_owned_by_user(sk)); if (sk_inet->is_icsk) { sk_conn = inet_csk(sk); if (old) sk_conn->icsk_ext_hdr_len -= old->opt.optlen; sk_conn->icsk_ext_hdr_len += opt->opt.optlen; sk_conn->icsk_sync_mss(sk, sk_conn->icsk_pmtu_cookie); } rcu_assign_pointer(sk_inet->inet_opt, opt); if (old) call_rcu(&old->rcu, opt_kfree_rcu); return 0; socket_setattr_failure: kfree(buf); kfree(opt); return ret_val; }
{'added': [(1860, 'static void opt_kfree_rcu(struct rcu_head *head)'), (1861, '{'), (1862, '\tkfree(container_of(head, struct ip_options_rcu, rcu));'), (1863, '}'), (1864, ''), (1887, '\tstruct ip_options_rcu *old, *opt = NULL;'), (1923, '\tmemcpy(opt->opt.__data, buf, buf_len);'), (1924, '\topt->opt.optlen = opt_len;'), (1925, '\topt->opt.cipso = sizeof(struct iphdr);'), (1930, ''), (1931, '\told = rcu_dereference_protected(sk_inet->inet_opt, sock_owned_by_user(sk));'), (1934, '\t\tif (old)'), (1935, '\t\t\tsk_conn->icsk_ext_hdr_len -= old->opt.optlen;'), (1936, '\t\tsk_conn->icsk_ext_hdr_len += opt->opt.optlen;'), (1939, '\trcu_assign_pointer(sk_inet->inet_opt, opt);'), (1940, '\tif (old)'), (1941, '\t\tcall_rcu(&old->rcu, opt_kfree_rcu);'), (1971, '\tstruct ip_options_rcu *opt = NULL;'), (1999, '\tmemcpy(opt->opt.__data, buf, buf_len);'), (2000, '\topt->opt.optlen = opt_len;'), (2001, '\topt->opt.cipso = sizeof(struct iphdr);'), (2007, '\tif (opt)'), (2008, '\t\tcall_rcu(&opt->rcu, opt_kfree_rcu);'), (2028, 'static int cipso_v4_delopt(struct ip_options_rcu **opt_ptr)'), (2031, '\tstruct ip_options_rcu *opt = *opt_ptr;'), (2033, '\tif (opt->opt.srr || opt->opt.rr || opt->opt.ts || opt->opt.router_alert) {'), (2040, '\t\tcipso_off = opt->opt.cipso - sizeof(struct iphdr);'), (2041, '\t\tcipso_ptr = &opt->opt.__data[cipso_off];'), (2044, '\t\tif (opt->opt.srr > opt->opt.cipso)'), (2045, '\t\t\topt->opt.srr -= cipso_len;'), (2046, '\t\tif (opt->opt.rr > opt->opt.cipso)'), (2047, '\t\t\topt->opt.rr -= cipso_len;'), (2048, '\t\tif (opt->opt.ts > opt->opt.cipso)'), (2049, '\t\t\topt->opt.ts -= cipso_len;'), (2050, '\t\tif (opt->opt.router_alert > opt->opt.cipso)'), (2051, '\t\t\topt->opt.router_alert -= cipso_len;'), (2052, '\t\topt->opt.cipso = 0;'), (2055, '\t\t\topt->opt.optlen - cipso_off - cipso_len);'), (2064, '\t\twhile (iter < opt->opt.optlen)'), (2065, '\t\t\tif (opt->opt.__data[iter] != IPOPT_NOP) {'), (2066, '\t\t\t\titer += opt->opt.__data[iter + 1];'), (2070, '\t\thdr_delta = opt->opt.optlen;'), (2071, '\t\topt->opt.optlen = (optlen_new + 3) & ~3;'), (2072, '\t\thdr_delta -= opt->opt.optlen;'), (2077, '\t\thdr_delta = opt->opt.optlen;'), (2078, '\t\tcall_rcu(&opt->rcu, opt_kfree_rcu);'), (2095, '\tstruct ip_options_rcu *opt;'), (2099, '\topt = rcu_dereference_protected(sk_inet->inet_opt, 1);'), (2100, '\tif (opt == NULL || opt->opt.cipso == 0)'), (2103, '\thdr_delta = cipso_v4_delopt(&sk_inet->inet_opt);'), (2121, '\tstruct ip_options_rcu *opt;'), (2126, '\tif (opt == NULL || opt->opt.cipso == 0)'), (2196, '\tstruct ip_options_rcu *opt;'), (2197, '\tint res = -ENOMSG;'), (2199, '\trcu_read_lock();'), (2200, '\topt = rcu_dereference(inet_sk(sk)->inet_opt);'), (2201, '\tif (opt && opt->opt.cipso)'), (2202, '\t\tres = cipso_v4_getattr(opt->opt.__data +'), (2203, '\t\t\t\t\t\topt->opt.cipso -'), (2204, '\t\t\t\t\t\tsizeof(struct iphdr),'), (2205, '\t\t\t\t secattr);'), (2206, '\trcu_read_unlock();'), (2207, '\treturn res;')], 'deleted': [(1882, '\tstruct ip_options *opt = NULL;'), (1918, '\tmemcpy(opt->__data, buf, buf_len);'), (1919, '\topt->optlen = opt_len;'), (1920, '\topt->cipso = sizeof(struct iphdr);'), (1927, '\t\tif (sk_inet->opt)'), (1928, '\t\t\tsk_conn->icsk_ext_hdr_len -= sk_inet->opt->optlen;'), (1929, '\t\tsk_conn->icsk_ext_hdr_len += opt->optlen;'), (1932, '\topt = xchg(&sk_inet->opt, opt);'), (1933, '\tkfree(opt);'), (1963, '\tstruct ip_options *opt = NULL;'), (1991, '\tmemcpy(opt->__data, buf, buf_len);'), (1992, '\topt->optlen = opt_len;'), (1993, '\topt->cipso = sizeof(struct iphdr);'), (1999, '\tkfree(opt);'), (2019, 'static int cipso_v4_delopt(struct ip_options **opt_ptr)'), (2022, '\tstruct ip_options *opt = *opt_ptr;'), (2024, '\tif (opt->srr || opt->rr || opt->ts || opt->router_alert) {'), (2031, '\t\tcipso_off = opt->cipso - sizeof(struct iphdr);'), (2032, '\t\tcipso_ptr = &opt->__data[cipso_off];'), (2035, '\t\tif (opt->srr > opt->cipso)'), (2036, '\t\t\topt->srr -= cipso_len;'), (2037, '\t\tif (opt->rr > opt->cipso)'), (2038, '\t\t\topt->rr -= cipso_len;'), (2039, '\t\tif (opt->ts > opt->cipso)'), (2040, '\t\t\topt->ts -= cipso_len;'), (2041, '\t\tif (opt->router_alert > opt->cipso)'), (2042, '\t\t\topt->router_alert -= cipso_len;'), (2043, '\t\topt->cipso = 0;'), (2046, '\t\t\topt->optlen - cipso_off - cipso_len);'), (2055, '\t\twhile (iter < opt->optlen)'), (2056, '\t\t\tif (opt->__data[iter] != IPOPT_NOP) {'), (2057, '\t\t\t\titer += opt->__data[iter + 1];'), (2061, '\t\thdr_delta = opt->optlen;'), (2062, '\t\topt->optlen = (optlen_new + 3) & ~3;'), (2063, '\t\thdr_delta -= opt->optlen;'), (2068, '\t\thdr_delta = opt->optlen;'), (2069, '\t\tkfree(opt);'), (2086, '\tstruct ip_options *opt;'), (2090, '\topt = sk_inet->opt;'), (2091, '\tif (opt == NULL || opt->cipso == 0)'), (2094, '\thdr_delta = cipso_v4_delopt(&sk_inet->opt);'), (2112, '\tstruct ip_options *opt;'), (2117, '\tif (opt == NULL || opt->cipso == 0)'), (2187, '\tstruct ip_options *opt;'), (2189, '\topt = inet_sk(sk)->opt;'), (2190, '\tif (opt == NULL || opt->cipso == 0)'), (2191, '\t\treturn -ENOMSG;'), (2192, ''), (2193, '\treturn cipso_v4_getattr(opt->__data + opt->cipso - sizeof(struct iphdr),'), (2194, '\t\t\t\tsecattr);')]}
63
50
1,360
7,485
https://github.com/torvalds/linux
CVE-2012-3552
['CWE-362']
mif_cod.c
mif_hdr_get
/* * Copyright (c) 2001-2002 Michael David Adams. * All rights reserved. */ /* __START_OF_JASPER_LICENSE__ * * JasPer License Version 2.0 * * Copyright (c) 2001-2006 Michael David Adams * Copyright (c) 1999-2000 Image Power, Inc. * Copyright (c) 1999-2000 The University of British Columbia * * All rights reserved. * * Permission is hereby granted, free of charge, to any person (the * "User") obtaining a copy of this software and associated documentation * files (the "Software"), to deal in the Software without restriction, * including without limitation the rights to use, copy, modify, merge, * publish, distribute, and/or sell copies of the Software, and to permit * persons to whom the Software is furnished to do so, subject to the * following conditions: * * 1. The above copyright notices and this permission notice (which * includes the disclaimer below) shall be included in all copies or * substantial portions of the Software. * * 2. The name of a copyright holder shall not be used to endorse or * promote products derived from the Software without specific prior * written permission. * * THIS DISCLAIMER OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS * LICENSE. NO USE OF THE SOFTWARE IS AUTHORIZED HEREUNDER EXCEPT UNDER * THIS DISCLAIMER. THE SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS * "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A * PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL * INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING * FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. NO ASSURANCES ARE * PROVIDED BY THE COPYRIGHT HOLDERS THAT THE SOFTWARE DOES NOT INFRINGE * THE PATENT OR OTHER INTELLECTUAL PROPERTY RIGHTS OF ANY OTHER ENTITY. * EACH COPYRIGHT HOLDER DISCLAIMS ANY LIABILITY TO THE USER FOR CLAIMS * BROUGHT BY ANY OTHER ENTITY BASED ON INFRINGEMENT OF INTELLECTUAL * PROPERTY RIGHTS OR OTHERWISE. AS A CONDITION TO EXERCISING THE RIGHTS * GRANTED HEREUNDER, EACH USER HEREBY ASSUMES SOLE RESPONSIBILITY TO SECURE * ANY OTHER INTELLECTUAL PROPERTY RIGHTS NEEDED, IF ANY. THE SOFTWARE * IS NOT FAULT-TOLERANT AND IS NOT INTENDED FOR USE IN MISSION-CRITICAL * SYSTEMS, SUCH AS THOSE USED IN THE OPERATION OF NUCLEAR FACILITIES, * AIRCRAFT NAVIGATION OR COMMUNICATION SYSTEMS, AIR TRAFFIC CONTROL * SYSTEMS, DIRECT LIFE SUPPORT MACHINES, OR WEAPONS SYSTEMS, IN WHICH * THE FAILURE OF THE SOFTWARE OR SYSTEM COULD LEAD DIRECTLY TO DEATH, * PERSONAL INJURY, OR SEVERE PHYSICAL OR ENVIRONMENTAL DAMAGE ("HIGH * RISK ACTIVITIES"). THE COPYRIGHT HOLDERS SPECIFICALLY DISCLAIM ANY * EXPRESS OR IMPLIED WARRANTY OF FITNESS FOR HIGH RISK ACTIVITIES. * * __END_OF_JASPER_LICENSE__ */ /******************************************************************************\ * Includes. \******************************************************************************/ #include <assert.h> #include "jasper/jas_tvp.h" #include "jasper/jas_stream.h" #include "jasper/jas_image.h" #include "jasper/jas_string.h" #include "jasper/jas_malloc.h" #include "jasper/jas_debug.h" #include "mif_cod.h" /******************************************************************************\ * Local types. \******************************************************************************/ typedef enum { MIF_END = 0, MIF_CMPT } mif_tagid2_t; typedef enum { MIF_TLX = 0, MIF_TLY, MIF_WIDTH, MIF_HEIGHT, MIF_HSAMP, MIF_VSAMP, MIF_PREC, MIF_SGND, MIF_DATA } mif_tagid_t; /******************************************************************************\ * Local functions. \******************************************************************************/ static mif_hdr_t *mif_hdr_create(int maxcmpts); static void mif_hdr_destroy(mif_hdr_t *hdr); static int mif_hdr_growcmpts(mif_hdr_t *hdr, int maxcmpts); static mif_hdr_t *mif_hdr_get(jas_stream_t *in); static int mif_process_cmpt(mif_hdr_t *hdr, char *buf); static int mif_hdr_put(mif_hdr_t *hdr, jas_stream_t *out); static int mif_hdr_addcmpt(mif_hdr_t *hdr, int cmptno, mif_cmpt_t *cmpt); static mif_cmpt_t *mif_cmpt_create(void); static void mif_cmpt_destroy(mif_cmpt_t *cmpt); static char *mif_getline(jas_stream_t *jas_stream, char *buf, int bufsize); static int mif_getc(jas_stream_t *in); static mif_hdr_t *mif_makehdrfromimage(jas_image_t *image); /******************************************************************************\ * Local data. \******************************************************************************/ jas_taginfo_t mif_tags2[] = { {MIF_CMPT, "component"}, {MIF_END, "end"}, {-1, 0} }; jas_taginfo_t mif_tags[] = { {MIF_TLX, "tlx"}, {MIF_TLY, "tly"}, {MIF_WIDTH, "width"}, {MIF_HEIGHT, "height"}, {MIF_HSAMP, "sampperx"}, {MIF_VSAMP, "samppery"}, {MIF_PREC, "prec"}, {MIF_SGND, "sgnd"}, {MIF_DATA, "data"}, {-1, 0} }; /******************************************************************************\ * Code for load operation. \******************************************************************************/ /* Load an image from a stream in the MIF format. */ jas_image_t *mif_decode(jas_stream_t *in, char *optstr) { mif_hdr_t *hdr; jas_image_t *image; jas_image_t *tmpimage; jas_stream_t *tmpstream; int cmptno; mif_cmpt_t *cmpt; jas_image_cmptparm_t cmptparm; jas_seq2d_t *data; int_fast32_t x; int_fast32_t y; int bias; /* Avoid warnings about unused parameters. */ optstr = 0; hdr = 0; image = 0; tmpimage = 0; tmpstream = 0; data = 0; if (!(hdr = mif_hdr_get(in))) { goto error; } if (!(image = jas_image_create0())) { goto error; } for (cmptno = 0; cmptno < hdr->numcmpts; ++cmptno) { cmpt = hdr->cmpts[cmptno]; tmpstream = cmpt->data ? jas_stream_fopen(cmpt->data, "rb") : in; if (!tmpstream) { jas_eprintf("cannot open component file %s\n", cmpt->data); goto error; } if (!(tmpimage = jas_image_decode(tmpstream, -1, 0))) { goto error; } if (tmpstream != in) { jas_stream_close(tmpstream); tmpstream = 0; } if (!cmpt->width) { cmpt->width = jas_image_cmptwidth(tmpimage, 0); } if (!cmpt->height) { cmpt->height = jas_image_cmptwidth(tmpimage, 0); } if (!cmpt->prec) { cmpt->prec = jas_image_cmptprec(tmpimage, 0); } if (cmpt->sgnd < 0) { cmpt->sgnd = jas_image_cmptsgnd(tmpimage, 0); } cmptparm.tlx = cmpt->tlx; cmptparm.tly = cmpt->tly; cmptparm.hstep = cmpt->sampperx; cmptparm.vstep = cmpt->samppery; cmptparm.width = cmpt->width; cmptparm.height = cmpt->height; cmptparm.prec = cmpt->prec; cmptparm.sgnd = cmpt->sgnd; if (jas_image_addcmpt(image, jas_image_numcmpts(image), &cmptparm)) { goto error; } if (!(data = jas_seq2d_create(0, 0, cmpt->width, cmpt->height))) { goto error; } if (jas_image_readcmpt(tmpimage, 0, 0, 0, cmpt->width, cmpt->height, data)) { goto error; } if (cmpt->sgnd) { bias = 1 << (cmpt->prec - 1); for (y = 0; y < cmpt->height; ++y) { for (x = 0; x < cmpt->width; ++x) { *jas_seq2d_getref(data, x, y) -= bias; } } } if (jas_image_writecmpt(image, jas_image_numcmpts(image) - 1, 0, 0, cmpt->width, cmpt->height, data)) { goto error; } jas_seq2d_destroy(data); data = 0; jas_image_destroy(tmpimage); tmpimage = 0; } mif_hdr_destroy(hdr); hdr = 0; return image; error: if (image) { jas_image_destroy(image); } if (hdr) { mif_hdr_destroy(hdr); } if (tmpstream && tmpstream != in) { jas_stream_close(tmpstream); } if (tmpimage) { jas_image_destroy(tmpimage); } if (data) { jas_seq2d_destroy(data); } return 0; } /******************************************************************************\ * Code for save operation. \******************************************************************************/ /* Save an image to a stream in the the MIF format. */ int mif_encode(jas_image_t *image, jas_stream_t *out, char *optstr) { mif_hdr_t *hdr; jas_image_t *tmpimage; int fmt; int cmptno; mif_cmpt_t *cmpt; jas_image_cmptparm_t cmptparm; jas_seq2d_t *data; int_fast32_t x; int_fast32_t y; int bias; hdr = 0; tmpimage = 0; data = 0; if (optstr && *optstr != '\0') { jas_eprintf("warning: ignoring unsupported options\n"); } if ((fmt = jas_image_strtofmt("pnm")) < 0) { jas_eprintf("error: PNM support required\n"); goto error; } if (!(hdr = mif_makehdrfromimage(image))) { goto error; } if (mif_hdr_put(hdr, out)) { goto error; } /* Output component data. */ for (cmptno = 0; cmptno < hdr->numcmpts; ++cmptno) { cmpt = hdr->cmpts[cmptno]; if (!cmpt->data) { if (!(tmpimage = jas_image_create0())) { goto error; } cmptparm.tlx = 0; cmptparm.tly = 0; cmptparm.hstep = cmpt->sampperx; cmptparm.vstep = cmpt->samppery; cmptparm.width = cmpt->width; cmptparm.height = cmpt->height; cmptparm.prec = cmpt->prec; cmptparm.sgnd = false; if (jas_image_addcmpt(tmpimage, jas_image_numcmpts(tmpimage), &cmptparm)) { goto error; } jas_image_setclrspc(tmpimage, JAS_CLRSPC_SGRAY); jas_image_setcmpttype(tmpimage, 0, JAS_IMAGE_CT_COLOR(JAS_CLRSPC_CHANIND_GRAY_Y)); if (!(data = jas_seq2d_create(0, 0, cmpt->width, cmpt->height))) { goto error; } if (jas_image_readcmpt(image, cmptno, 0, 0, cmpt->width, cmpt->height, data)) { goto error; } if (cmpt->sgnd) { bias = 1 << (cmpt->prec - 1); for (y = 0; y < cmpt->height; ++y) { for (x = 0; x < cmpt->width; ++x) { *jas_seq2d_getref(data, x, y) += bias; } } } if (jas_image_writecmpt(tmpimage, 0, 0, 0, cmpt->width, cmpt->height, data)) { goto error; } jas_seq2d_destroy(data); data = 0; if (jas_image_encode(tmpimage, out, fmt, 0)) { goto error; } jas_image_destroy(tmpimage); tmpimage = 0; } } mif_hdr_destroy(hdr); return 0; error: if (hdr) { mif_hdr_destroy(hdr); } if (tmpimage) { jas_image_destroy(tmpimage); } if (data) { jas_seq2d_destroy(data); } return -1; } /******************************************************************************\ * Code for validate operation. \******************************************************************************/ int mif_validate(jas_stream_t *in) { uchar buf[MIF_MAGICLEN]; uint_fast32_t magic; int i; int n; assert(JAS_STREAM_MAXPUTBACK >= MIF_MAGICLEN); /* Read the validation data (i.e., the data used for detecting the format). */ if ((n = jas_stream_read(in, buf, MIF_MAGICLEN)) < 0) { return -1; } /* Put the validation data back onto the stream, so that the stream position will not be changed. */ for (i = n - 1; i >= 0; --i) { if (jas_stream_ungetc(in, buf[i]) == EOF) { return -1; } } /* Was enough data read? */ if (n < MIF_MAGICLEN) { return -1; } /* Compute the signature value. */ magic = (JAS_CAST(uint_fast32_t, buf[0]) << 24) | (JAS_CAST(uint_fast32_t, buf[1]) << 16) | (JAS_CAST(uint_fast32_t, buf[2]) << 8) | buf[3]; /* Ensure that the signature is correct for this format. */ if (magic != MIF_MAGIC) { return -1; } return 0; } /******************************************************************************\ * Code for MIF header class. \******************************************************************************/ static mif_hdr_t *mif_hdr_create(int maxcmpts) { mif_hdr_t *hdr; if (!(hdr = jas_malloc(sizeof(mif_hdr_t)))) { return 0; } hdr->numcmpts = 0; hdr->maxcmpts = 0; hdr->cmpts = 0; if (mif_hdr_growcmpts(hdr, maxcmpts)) { mif_hdr_destroy(hdr); return 0; } return hdr; } static void mif_hdr_destroy(mif_hdr_t *hdr) { int cmptno; if (hdr->cmpts) { for (cmptno = 0; cmptno < hdr->numcmpts; ++cmptno) { mif_cmpt_destroy(hdr->cmpts[cmptno]); } jas_free(hdr->cmpts); } jas_free(hdr); } static int mif_hdr_growcmpts(mif_hdr_t *hdr, int maxcmpts) { int cmptno; mif_cmpt_t **newcmpts; assert(maxcmpts >= hdr->numcmpts); newcmpts = (!hdr->cmpts) ? jas_alloc2(maxcmpts, sizeof(mif_cmpt_t *)) : jas_realloc2(hdr->cmpts, maxcmpts, sizeof(mif_cmpt_t *)); if (!newcmpts) { return -1; } hdr->maxcmpts = maxcmpts; hdr->cmpts = newcmpts; for (cmptno = hdr->numcmpts; cmptno < hdr->maxcmpts; ++cmptno) { hdr->cmpts[cmptno] = 0; } return 0; } static mif_hdr_t *mif_hdr_get(jas_stream_t *in) { uchar magicbuf[MIF_MAGICLEN]; char buf[4096]; mif_hdr_t *hdr; bool done; jas_tvparser_t *tvp; int id; hdr = 0; tvp = 0; if (jas_stream_read(in, magicbuf, MIF_MAGICLEN) != MIF_MAGICLEN) { goto error; } if (magicbuf[0] != (MIF_MAGIC >> 24) || magicbuf[1] != ((MIF_MAGIC >> 16) & 0xff) || magicbuf[2] != ((MIF_MAGIC >> 8) & 0xff) || magicbuf[3] != (MIF_MAGIC & 0xff)) { jas_eprintf("error: bad signature\n"); goto error; } if (!(hdr = mif_hdr_create(0))) { goto error; } done = false; do { if (!mif_getline(in, buf, sizeof(buf))) { jas_eprintf("mif_getline failed\n"); goto error; } if (buf[0] == '\0') { continue; } JAS_DBGLOG(10, ("header line: len=%d; %s\n", strlen(buf), buf)); if (!(tvp = jas_tvparser_create(buf))) { jas_eprintf("jas_tvparser_create failed\n"); goto error; } if (jas_tvparser_next(tvp)) { jas_eprintf("cannot get record type\n"); goto error; } id = jas_taginfo_nonull(jas_taginfos_lookup(mif_tags2, jas_tvparser_gettag(tvp)))->id; jas_tvparser_destroy(tvp); tvp = 0; switch (id) { case MIF_CMPT: if (mif_process_cmpt(hdr, buf)) { jas_eprintf("cannot get component information\n"); goto error; } break; case MIF_END: done = 1; break; default: jas_eprintf("invalid header information: %s\n", buf); goto error; break; } } while (!done); return hdr; error: if (hdr) { mif_hdr_destroy(hdr); } if (tvp) { jas_tvparser_destroy(tvp); } return 0; } static int mif_process_cmpt(mif_hdr_t *hdr, char *buf) { jas_tvparser_t *tvp; mif_cmpt_t *cmpt; int id; cmpt = 0; tvp = 0; if (!(cmpt = mif_cmpt_create())) { jas_eprintf("cannot create component\n"); goto error; } cmpt->tlx = 0; cmpt->tly = 0; cmpt->sampperx = 0; cmpt->samppery = 0; cmpt->width = 0; cmpt->height = 0; cmpt->prec = 0; cmpt->sgnd = -1; cmpt->data = 0; if (!(tvp = jas_tvparser_create(buf))) { jas_eprintf("cannot create parser\n"); goto error; } // Skip the component keyword if ((id = jas_tvparser_next(tvp))) { // This should never happen. abort(); } // Process the tag-value pairs. while (!(id = jas_tvparser_next(tvp))) { switch (jas_taginfo_nonull(jas_taginfos_lookup(mif_tags, jas_tvparser_gettag(tvp)))->id) { case MIF_TLX: cmpt->tlx = atoi(jas_tvparser_getval(tvp)); break; case MIF_TLY: cmpt->tly = atoi(jas_tvparser_getval(tvp)); break; case MIF_WIDTH: cmpt->width = atoi(jas_tvparser_getval(tvp)); break; case MIF_HEIGHT: cmpt->height = atoi(jas_tvparser_getval(tvp)); break; case MIF_HSAMP: cmpt->sampperx = atoi(jas_tvparser_getval(tvp)); break; case MIF_VSAMP: cmpt->samppery = atoi(jas_tvparser_getval(tvp)); break; case MIF_PREC: cmpt->prec = atoi(jas_tvparser_getval(tvp)); break; case MIF_SGND: cmpt->sgnd = atoi(jas_tvparser_getval(tvp)); break; case MIF_DATA: if (!(cmpt->data = jas_strdup(jas_tvparser_getval(tvp)))) { goto error; } break; default: jas_eprintf("invalid component information: %s\n", buf); goto error; break; } } if (!cmpt->sampperx || !cmpt->samppery) { goto error; } if (!cmpt->width || !cmpt->height || !cmpt->prec || cmpt->sgnd < 0) { goto error; } if (mif_hdr_addcmpt(hdr, hdr->numcmpts, cmpt)) { jas_eprintf("cannot add component\n"); goto error; } jas_tvparser_destroy(tvp); return 0; error: if (cmpt) { mif_cmpt_destroy(cmpt); } if (tvp) { jas_tvparser_destroy(tvp); } return -1; } static int mif_hdr_put(mif_hdr_t *hdr, jas_stream_t *out) { int cmptno; mif_cmpt_t *cmpt; /* Output signature. */ jas_stream_putc(out, (MIF_MAGIC >> 24) & 0xff); jas_stream_putc(out, (MIF_MAGIC >> 16) & 0xff); jas_stream_putc(out, (MIF_MAGIC >> 8) & 0xff); jas_stream_putc(out, MIF_MAGIC & 0xff); /* Output component information. */ for (cmptno = 0; cmptno < hdr->numcmpts; ++cmptno) { cmpt = hdr->cmpts[cmptno]; jas_stream_printf(out, "component tlx=%ld tly=%ld " "sampperx=%ld samppery=%ld width=%ld height=%ld prec=%d sgnd=%d", cmpt->tlx, cmpt->tly, cmpt->sampperx, cmpt->samppery, cmpt->width, cmpt->height, cmpt->prec, cmpt->sgnd); if (cmpt->data) { jas_stream_printf(out, " data=%s", cmpt->data); } jas_stream_printf(out, "\n"); } /* Output end of header indicator. */ jas_stream_printf(out, "end\n"); return 0; } static int mif_hdr_addcmpt(mif_hdr_t *hdr, int cmptno, mif_cmpt_t *cmpt) { assert(cmptno >= hdr->numcmpts); if (hdr->numcmpts >= hdr->maxcmpts) { if (mif_hdr_growcmpts(hdr, hdr->numcmpts + 128)) { return -1; } } hdr->cmpts[hdr->numcmpts] = cmpt; ++hdr->numcmpts; return 0; } /******************************************************************************\ * Code for MIF component class. \******************************************************************************/ static mif_cmpt_t *mif_cmpt_create() { mif_cmpt_t *cmpt; if (!(cmpt = jas_malloc(sizeof(mif_cmpt_t)))) { return 0; } memset(cmpt, 0, sizeof(mif_cmpt_t)); return cmpt; } static void mif_cmpt_destroy(mif_cmpt_t *cmpt) { if (cmpt->data) { jas_free(cmpt->data); } jas_free(cmpt); } /******************************************************************************\ * MIF parsing code. \******************************************************************************/ static char *mif_getline(jas_stream_t *stream, char *buf, int bufsize) { int c; char *bufptr; assert(bufsize > 0); bufptr = buf; while (bufsize > 1) { if ((c = mif_getc(stream)) == EOF) { break; } *bufptr++ = c; --bufsize; if (c == '\n') { break; } } *bufptr = '\0'; if (!(bufptr = strchr(buf, '\n'))) { return 0; } *bufptr = '\0'; return buf; } static int mif_getc(jas_stream_t *in) { int c; bool done; done = false; do { switch (c = jas_stream_getc(in)) { case EOF: done = true; break; case '#': for (;;) { if ((c = jas_stream_getc(in)) == EOF) { done = true; break; } if (c == '\n') { done = true; break; } } break; case '\\': if (jas_stream_peekc(in) == '\n') { jas_stream_getc(in); } break; default: done = true; break; } } while (!done); return c; } /******************************************************************************\ * Miscellaneous functions. \******************************************************************************/ static mif_hdr_t *mif_makehdrfromimage(jas_image_t *image) { mif_hdr_t *hdr; int cmptno; mif_cmpt_t *cmpt; if (!(hdr = mif_hdr_create(jas_image_numcmpts(image)))) { return 0; } hdr->magic = MIF_MAGIC; hdr->numcmpts = jas_image_numcmpts(image); for (cmptno = 0; cmptno < hdr->numcmpts; ++cmptno) { if (!(hdr->cmpts[cmptno] = jas_malloc(sizeof(mif_cmpt_t)))) { goto error; } cmpt = hdr->cmpts[cmptno]; cmpt->tlx = jas_image_cmpttlx(image, cmptno); cmpt->tly = jas_image_cmpttly(image, cmptno); cmpt->width = jas_image_cmptwidth(image, cmptno); cmpt->height = jas_image_cmptheight(image, cmptno); cmpt->sampperx = jas_image_cmpthstep(image, cmptno); cmpt->samppery = jas_image_cmptvstep(image, cmptno); cmpt->prec = jas_image_cmptprec(image, cmptno); cmpt->sgnd = jas_image_cmptsgnd(image, cmptno); cmpt->data = 0; } return hdr; error: for (cmptno = 0; cmptno < hdr->numcmpts; ++cmptno) { if (hdr->cmpts[cmptno]) { jas_free(hdr->cmpts[cmptno]); } } if (hdr) { jas_free(hdr); } return 0; }
/* * Copyright (c) 2001-2002 Michael David Adams. * All rights reserved. */ /* __START_OF_JASPER_LICENSE__ * * JasPer License Version 2.0 * * Copyright (c) 2001-2006 Michael David Adams * Copyright (c) 1999-2000 Image Power, Inc. * Copyright (c) 1999-2000 The University of British Columbia * * All rights reserved. * * Permission is hereby granted, free of charge, to any person (the * "User") obtaining a copy of this software and associated documentation * files (the "Software"), to deal in the Software without restriction, * including without limitation the rights to use, copy, modify, merge, * publish, distribute, and/or sell copies of the Software, and to permit * persons to whom the Software is furnished to do so, subject to the * following conditions: * * 1. The above copyright notices and this permission notice (which * includes the disclaimer below) shall be included in all copies or * substantial portions of the Software. * * 2. The name of a copyright holder shall not be used to endorse or * promote products derived from the Software without specific prior * written permission. * * THIS DISCLAIMER OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS * LICENSE. NO USE OF THE SOFTWARE IS AUTHORIZED HEREUNDER EXCEPT UNDER * THIS DISCLAIMER. THE SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS * "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A * PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL * INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING * FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. NO ASSURANCES ARE * PROVIDED BY THE COPYRIGHT HOLDERS THAT THE SOFTWARE DOES NOT INFRINGE * THE PATENT OR OTHER INTELLECTUAL PROPERTY RIGHTS OF ANY OTHER ENTITY. * EACH COPYRIGHT HOLDER DISCLAIMS ANY LIABILITY TO THE USER FOR CLAIMS * BROUGHT BY ANY OTHER ENTITY BASED ON INFRINGEMENT OF INTELLECTUAL * PROPERTY RIGHTS OR OTHERWISE. AS A CONDITION TO EXERCISING THE RIGHTS * GRANTED HEREUNDER, EACH USER HEREBY ASSUMES SOLE RESPONSIBILITY TO SECURE * ANY OTHER INTELLECTUAL PROPERTY RIGHTS NEEDED, IF ANY. THE SOFTWARE * IS NOT FAULT-TOLERANT AND IS NOT INTENDED FOR USE IN MISSION-CRITICAL * SYSTEMS, SUCH AS THOSE USED IN THE OPERATION OF NUCLEAR FACILITIES, * AIRCRAFT NAVIGATION OR COMMUNICATION SYSTEMS, AIR TRAFFIC CONTROL * SYSTEMS, DIRECT LIFE SUPPORT MACHINES, OR WEAPONS SYSTEMS, IN WHICH * THE FAILURE OF THE SOFTWARE OR SYSTEM COULD LEAD DIRECTLY TO DEATH, * PERSONAL INJURY, OR SEVERE PHYSICAL OR ENVIRONMENTAL DAMAGE ("HIGH * RISK ACTIVITIES"). THE COPYRIGHT HOLDERS SPECIFICALLY DISCLAIM ANY * EXPRESS OR IMPLIED WARRANTY OF FITNESS FOR HIGH RISK ACTIVITIES. * * __END_OF_JASPER_LICENSE__ */ /******************************************************************************\ * Includes. \******************************************************************************/ #include <assert.h> #include "jasper/jas_tvp.h" #include "jasper/jas_stream.h" #include "jasper/jas_image.h" #include "jasper/jas_string.h" #include "jasper/jas_malloc.h" #include "jasper/jas_debug.h" #include "mif_cod.h" /******************************************************************************\ * Local types. \******************************************************************************/ typedef enum { MIF_END = 0, MIF_CMPT } mif_tagid2_t; typedef enum { MIF_TLX = 0, MIF_TLY, MIF_WIDTH, MIF_HEIGHT, MIF_HSAMP, MIF_VSAMP, MIF_PREC, MIF_SGND, MIF_DATA } mif_tagid_t; /******************************************************************************\ * Local functions. \******************************************************************************/ static mif_hdr_t *mif_hdr_create(int maxcmpts); static void mif_hdr_destroy(mif_hdr_t *hdr); static int mif_hdr_growcmpts(mif_hdr_t *hdr, int maxcmpts); static mif_hdr_t *mif_hdr_get(jas_stream_t *in); static int mif_process_cmpt(mif_hdr_t *hdr, char *buf); static int mif_hdr_put(mif_hdr_t *hdr, jas_stream_t *out); static int mif_hdr_addcmpt(mif_hdr_t *hdr, int cmptno, mif_cmpt_t *cmpt); static mif_cmpt_t *mif_cmpt_create(void); static void mif_cmpt_destroy(mif_cmpt_t *cmpt); static char *mif_getline(jas_stream_t *jas_stream, char *buf, int bufsize); static int mif_getc(jas_stream_t *in); static mif_hdr_t *mif_makehdrfromimage(jas_image_t *image); /******************************************************************************\ * Local data. \******************************************************************************/ jas_taginfo_t mif_tags2[] = { {MIF_CMPT, "component"}, {MIF_END, "end"}, {-1, 0} }; jas_taginfo_t mif_tags[] = { {MIF_TLX, "tlx"}, {MIF_TLY, "tly"}, {MIF_WIDTH, "width"}, {MIF_HEIGHT, "height"}, {MIF_HSAMP, "sampperx"}, {MIF_VSAMP, "samppery"}, {MIF_PREC, "prec"}, {MIF_SGND, "sgnd"}, {MIF_DATA, "data"}, {-1, 0} }; /******************************************************************************\ * Code for load operation. \******************************************************************************/ /* Load an image from a stream in the MIF format. */ jas_image_t *mif_decode(jas_stream_t *in, char *optstr) { mif_hdr_t *hdr; jas_image_t *image; jas_image_t *tmpimage; jas_stream_t *tmpstream; int cmptno; mif_cmpt_t *cmpt; jas_image_cmptparm_t cmptparm; jas_seq2d_t *data; int_fast32_t x; int_fast32_t y; int bias; /* Avoid warnings about unused parameters. */ optstr = 0; hdr = 0; image = 0; tmpimage = 0; tmpstream = 0; data = 0; if (!(hdr = mif_hdr_get(in))) { goto error; } if (!(image = jas_image_create0())) { goto error; } for (cmptno = 0; cmptno < hdr->numcmpts; ++cmptno) { cmpt = hdr->cmpts[cmptno]; tmpstream = cmpt->data ? jas_stream_fopen(cmpt->data, "rb") : in; if (!tmpstream) { jas_eprintf("cannot open component file %s\n", cmpt->data); goto error; } if (!(tmpimage = jas_image_decode(tmpstream, -1, 0))) { goto error; } if (tmpstream != in) { jas_stream_close(tmpstream); tmpstream = 0; } if (!cmpt->width) { cmpt->width = jas_image_cmptwidth(tmpimage, 0); } if (!cmpt->height) { cmpt->height = jas_image_cmptwidth(tmpimage, 0); } if (!cmpt->prec) { cmpt->prec = jas_image_cmptprec(tmpimage, 0); } if (cmpt->sgnd < 0) { cmpt->sgnd = jas_image_cmptsgnd(tmpimage, 0); } cmptparm.tlx = cmpt->tlx; cmptparm.tly = cmpt->tly; cmptparm.hstep = cmpt->sampperx; cmptparm.vstep = cmpt->samppery; cmptparm.width = cmpt->width; cmptparm.height = cmpt->height; cmptparm.prec = cmpt->prec; cmptparm.sgnd = cmpt->sgnd; if (jas_image_addcmpt(image, jas_image_numcmpts(image), &cmptparm)) { goto error; } if (!(data = jas_seq2d_create(0, 0, cmpt->width, cmpt->height))) { goto error; } if (jas_image_readcmpt(tmpimage, 0, 0, 0, cmpt->width, cmpt->height, data)) { goto error; } if (cmpt->sgnd) { bias = 1 << (cmpt->prec - 1); for (y = 0; y < cmpt->height; ++y) { for (x = 0; x < cmpt->width; ++x) { *jas_seq2d_getref(data, x, y) -= bias; } } } if (jas_image_writecmpt(image, jas_image_numcmpts(image) - 1, 0, 0, cmpt->width, cmpt->height, data)) { goto error; } jas_seq2d_destroy(data); data = 0; jas_image_destroy(tmpimage); tmpimage = 0; } mif_hdr_destroy(hdr); hdr = 0; return image; error: if (image) { jas_image_destroy(image); } if (hdr) { mif_hdr_destroy(hdr); } if (tmpstream && tmpstream != in) { jas_stream_close(tmpstream); } if (tmpimage) { jas_image_destroy(tmpimage); } if (data) { jas_seq2d_destroy(data); } return 0; } /******************************************************************************\ * Code for save operation. \******************************************************************************/ /* Save an image to a stream in the the MIF format. */ int mif_encode(jas_image_t *image, jas_stream_t *out, char *optstr) { mif_hdr_t *hdr; jas_image_t *tmpimage; int fmt; int cmptno; mif_cmpt_t *cmpt; jas_image_cmptparm_t cmptparm; jas_seq2d_t *data; int_fast32_t x; int_fast32_t y; int bias; hdr = 0; tmpimage = 0; data = 0; if (optstr && *optstr != '\0') { jas_eprintf("warning: ignoring unsupported options\n"); } if ((fmt = jas_image_strtofmt("pnm")) < 0) { jas_eprintf("error: PNM support required\n"); goto error; } if (!(hdr = mif_makehdrfromimage(image))) { goto error; } if (mif_hdr_put(hdr, out)) { goto error; } /* Output component data. */ for (cmptno = 0; cmptno < hdr->numcmpts; ++cmptno) { cmpt = hdr->cmpts[cmptno]; if (!cmpt->data) { if (!(tmpimage = jas_image_create0())) { goto error; } cmptparm.tlx = 0; cmptparm.tly = 0; cmptparm.hstep = cmpt->sampperx; cmptparm.vstep = cmpt->samppery; cmptparm.width = cmpt->width; cmptparm.height = cmpt->height; cmptparm.prec = cmpt->prec; cmptparm.sgnd = false; if (jas_image_addcmpt(tmpimage, jas_image_numcmpts(tmpimage), &cmptparm)) { goto error; } jas_image_setclrspc(tmpimage, JAS_CLRSPC_SGRAY); jas_image_setcmpttype(tmpimage, 0, JAS_IMAGE_CT_COLOR(JAS_CLRSPC_CHANIND_GRAY_Y)); if (!(data = jas_seq2d_create(0, 0, cmpt->width, cmpt->height))) { goto error; } if (jas_image_readcmpt(image, cmptno, 0, 0, cmpt->width, cmpt->height, data)) { goto error; } if (cmpt->sgnd) { bias = 1 << (cmpt->prec - 1); for (y = 0; y < cmpt->height; ++y) { for (x = 0; x < cmpt->width; ++x) { *jas_seq2d_getref(data, x, y) += bias; } } } if (jas_image_writecmpt(tmpimage, 0, 0, 0, cmpt->width, cmpt->height, data)) { goto error; } jas_seq2d_destroy(data); data = 0; if (jas_image_encode(tmpimage, out, fmt, 0)) { goto error; } jas_image_destroy(tmpimage); tmpimage = 0; } } mif_hdr_destroy(hdr); return 0; error: if (hdr) { mif_hdr_destroy(hdr); } if (tmpimage) { jas_image_destroy(tmpimage); } if (data) { jas_seq2d_destroy(data); } return -1; } /******************************************************************************\ * Code for validate operation. \******************************************************************************/ int mif_validate(jas_stream_t *in) { jas_uchar buf[MIF_MAGICLEN]; uint_fast32_t magic; int i; int n; assert(JAS_STREAM_MAXPUTBACK >= MIF_MAGICLEN); /* Read the validation data (i.e., the data used for detecting the format). */ if ((n = jas_stream_read(in, buf, MIF_MAGICLEN)) < 0) { return -1; } /* Put the validation data back onto the stream, so that the stream position will not be changed. */ for (i = n - 1; i >= 0; --i) { if (jas_stream_ungetc(in, buf[i]) == EOF) { return -1; } } /* Was enough data read? */ if (n < MIF_MAGICLEN) { return -1; } /* Compute the signature value. */ magic = (JAS_CAST(uint_fast32_t, buf[0]) << 24) | (JAS_CAST(uint_fast32_t, buf[1]) << 16) | (JAS_CAST(uint_fast32_t, buf[2]) << 8) | buf[3]; /* Ensure that the signature is correct for this format. */ if (magic != MIF_MAGIC) { return -1; } return 0; } /******************************************************************************\ * Code for MIF header class. \******************************************************************************/ static mif_hdr_t *mif_hdr_create(int maxcmpts) { mif_hdr_t *hdr; if (!(hdr = jas_malloc(sizeof(mif_hdr_t)))) { return 0; } hdr->numcmpts = 0; hdr->maxcmpts = 0; hdr->cmpts = 0; if (mif_hdr_growcmpts(hdr, maxcmpts)) { mif_hdr_destroy(hdr); return 0; } return hdr; } static void mif_hdr_destroy(mif_hdr_t *hdr) { int cmptno; if (hdr->cmpts) { for (cmptno = 0; cmptno < hdr->numcmpts; ++cmptno) { mif_cmpt_destroy(hdr->cmpts[cmptno]); } jas_free(hdr->cmpts); } jas_free(hdr); } static int mif_hdr_growcmpts(mif_hdr_t *hdr, int maxcmpts) { int cmptno; mif_cmpt_t **newcmpts; assert(maxcmpts >= hdr->numcmpts); newcmpts = (!hdr->cmpts) ? jas_alloc2(maxcmpts, sizeof(mif_cmpt_t *)) : jas_realloc2(hdr->cmpts, maxcmpts, sizeof(mif_cmpt_t *)); if (!newcmpts) { return -1; } hdr->maxcmpts = maxcmpts; hdr->cmpts = newcmpts; for (cmptno = hdr->numcmpts; cmptno < hdr->maxcmpts; ++cmptno) { hdr->cmpts[cmptno] = 0; } return 0; } static mif_hdr_t *mif_hdr_get(jas_stream_t *in) { jas_uchar magicbuf[MIF_MAGICLEN]; char buf[4096]; mif_hdr_t *hdr; bool done; jas_tvparser_t *tvp; int id; hdr = 0; tvp = 0; if (jas_stream_read(in, magicbuf, MIF_MAGICLEN) != MIF_MAGICLEN) { goto error; } if (magicbuf[0] != (MIF_MAGIC >> 24) || magicbuf[1] != ((MIF_MAGIC >> 16) & 0xff) || magicbuf[2] != ((MIF_MAGIC >> 8) & 0xff) || magicbuf[3] != (MIF_MAGIC & 0xff)) { jas_eprintf("error: bad signature\n"); goto error; } if (!(hdr = mif_hdr_create(0))) { goto error; } done = false; do { if (!mif_getline(in, buf, sizeof(buf))) { jas_eprintf("mif_getline failed\n"); goto error; } if (buf[0] == '\0') { continue; } JAS_DBGLOG(10, ("header line: len=%d; %s\n", strlen(buf), buf)); if (!(tvp = jas_tvparser_create(buf))) { jas_eprintf("jas_tvparser_create failed\n"); goto error; } if (jas_tvparser_next(tvp)) { jas_eprintf("cannot get record type\n"); goto error; } id = jas_taginfo_nonull(jas_taginfos_lookup(mif_tags2, jas_tvparser_gettag(tvp)))->id; jas_tvparser_destroy(tvp); tvp = 0; switch (id) { case MIF_CMPT: if (mif_process_cmpt(hdr, buf)) { jas_eprintf("cannot get component information\n"); goto error; } break; case MIF_END: done = 1; break; default: jas_eprintf("invalid header information: %s\n", buf); goto error; break; } } while (!done); return hdr; error: if (hdr) { mif_hdr_destroy(hdr); } if (tvp) { jas_tvparser_destroy(tvp); } return 0; } static int mif_process_cmpt(mif_hdr_t *hdr, char *buf) { jas_tvparser_t *tvp; mif_cmpt_t *cmpt; int id; cmpt = 0; tvp = 0; if (!(cmpt = mif_cmpt_create())) { jas_eprintf("cannot create component\n"); goto error; } cmpt->tlx = 0; cmpt->tly = 0; cmpt->sampperx = 0; cmpt->samppery = 0; cmpt->width = 0; cmpt->height = 0; cmpt->prec = 0; cmpt->sgnd = -1; cmpt->data = 0; if (!(tvp = jas_tvparser_create(buf))) { jas_eprintf("cannot create parser\n"); goto error; } // Skip the component keyword if ((id = jas_tvparser_next(tvp))) { // This should never happen. abort(); } // Process the tag-value pairs. while (!(id = jas_tvparser_next(tvp))) { switch (jas_taginfo_nonull(jas_taginfos_lookup(mif_tags, jas_tvparser_gettag(tvp)))->id) { case MIF_TLX: cmpt->tlx = atoi(jas_tvparser_getval(tvp)); break; case MIF_TLY: cmpt->tly = atoi(jas_tvparser_getval(tvp)); break; case MIF_WIDTH: cmpt->width = atoi(jas_tvparser_getval(tvp)); break; case MIF_HEIGHT: cmpt->height = atoi(jas_tvparser_getval(tvp)); break; case MIF_HSAMP: cmpt->sampperx = atoi(jas_tvparser_getval(tvp)); break; case MIF_VSAMP: cmpt->samppery = atoi(jas_tvparser_getval(tvp)); break; case MIF_PREC: cmpt->prec = atoi(jas_tvparser_getval(tvp)); break; case MIF_SGND: cmpt->sgnd = atoi(jas_tvparser_getval(tvp)); break; case MIF_DATA: if (!(cmpt->data = jas_strdup(jas_tvparser_getval(tvp)))) { goto error; } break; default: jas_eprintf("invalid component information: %s\n", buf); goto error; break; } } if (!cmpt->sampperx || !cmpt->samppery) { goto error; } if (!cmpt->width || !cmpt->height || !cmpt->prec || cmpt->sgnd < 0) { goto error; } if (mif_hdr_addcmpt(hdr, hdr->numcmpts, cmpt)) { jas_eprintf("cannot add component\n"); goto error; } jas_tvparser_destroy(tvp); return 0; error: if (cmpt) { mif_cmpt_destroy(cmpt); } if (tvp) { jas_tvparser_destroy(tvp); } return -1; } static int mif_hdr_put(mif_hdr_t *hdr, jas_stream_t *out) { int cmptno; mif_cmpt_t *cmpt; /* Output signature. */ jas_stream_putc(out, (MIF_MAGIC >> 24) & 0xff); jas_stream_putc(out, (MIF_MAGIC >> 16) & 0xff); jas_stream_putc(out, (MIF_MAGIC >> 8) & 0xff); jas_stream_putc(out, MIF_MAGIC & 0xff); /* Output component information. */ for (cmptno = 0; cmptno < hdr->numcmpts; ++cmptno) { cmpt = hdr->cmpts[cmptno]; jas_stream_printf(out, "component tlx=%ld tly=%ld " "sampperx=%ld samppery=%ld width=%ld height=%ld prec=%d sgnd=%d", cmpt->tlx, cmpt->tly, cmpt->sampperx, cmpt->samppery, cmpt->width, cmpt->height, cmpt->prec, cmpt->sgnd); if (cmpt->data) { jas_stream_printf(out, " data=%s", cmpt->data); } jas_stream_printf(out, "\n"); } /* Output end of header indicator. */ jas_stream_printf(out, "end\n"); return 0; } static int mif_hdr_addcmpt(mif_hdr_t *hdr, int cmptno, mif_cmpt_t *cmpt) { assert(cmptno >= hdr->numcmpts); if (hdr->numcmpts >= hdr->maxcmpts) { if (mif_hdr_growcmpts(hdr, hdr->numcmpts + 128)) { return -1; } } hdr->cmpts[hdr->numcmpts] = cmpt; ++hdr->numcmpts; return 0; } /******************************************************************************\ * Code for MIF component class. \******************************************************************************/ static mif_cmpt_t *mif_cmpt_create() { mif_cmpt_t *cmpt; if (!(cmpt = jas_malloc(sizeof(mif_cmpt_t)))) { return 0; } memset(cmpt, 0, sizeof(mif_cmpt_t)); return cmpt; } static void mif_cmpt_destroy(mif_cmpt_t *cmpt) { if (cmpt->data) { jas_free(cmpt->data); } jas_free(cmpt); } /******************************************************************************\ * MIF parsing code. \******************************************************************************/ static char *mif_getline(jas_stream_t *stream, char *buf, int bufsize) { int c; char *bufptr; assert(bufsize > 0); bufptr = buf; while (bufsize > 1) { if ((c = mif_getc(stream)) == EOF) { break; } *bufptr++ = c; --bufsize; if (c == '\n') { break; } } *bufptr = '\0'; if (!(bufptr = strchr(buf, '\n'))) { return 0; } *bufptr = '\0'; return buf; } static int mif_getc(jas_stream_t *in) { int c; bool done; done = false; do { switch (c = jas_stream_getc(in)) { case EOF: done = true; break; case '#': for (;;) { if ((c = jas_stream_getc(in)) == EOF) { done = true; break; } if (c == '\n') { done = true; break; } } break; case '\\': if (jas_stream_peekc(in) == '\n') { jas_stream_getc(in); } break; default: done = true; break; } } while (!done); return c; } /******************************************************************************\ * Miscellaneous functions. \******************************************************************************/ static mif_hdr_t *mif_makehdrfromimage(jas_image_t *image) { mif_hdr_t *hdr; int cmptno; mif_cmpt_t *cmpt; if (!(hdr = mif_hdr_create(jas_image_numcmpts(image)))) { return 0; } hdr->magic = MIF_MAGIC; hdr->numcmpts = jas_image_numcmpts(image); for (cmptno = 0; cmptno < hdr->numcmpts; ++cmptno) { if (!(hdr->cmpts[cmptno] = jas_malloc(sizeof(mif_cmpt_t)))) { goto error; } cmpt = hdr->cmpts[cmptno]; cmpt->tlx = jas_image_cmpttlx(image, cmptno); cmpt->tly = jas_image_cmpttly(image, cmptno); cmpt->width = jas_image_cmptwidth(image, cmptno); cmpt->height = jas_image_cmptheight(image, cmptno); cmpt->sampperx = jas_image_cmpthstep(image, cmptno); cmpt->samppery = jas_image_cmptvstep(image, cmptno); cmpt->prec = jas_image_cmptprec(image, cmptno); cmpt->sgnd = jas_image_cmptsgnd(image, cmptno); cmpt->data = 0; } return hdr; error: for (cmptno = 0; cmptno < hdr->numcmpts; ++cmptno) { if (hdr->cmpts[cmptno]) { jas_free(hdr->cmpts[cmptno]); } } if (hdr) { jas_free(hdr); } return 0; }
static mif_hdr_t *mif_hdr_get(jas_stream_t *in) { uchar magicbuf[MIF_MAGICLEN]; char buf[4096]; mif_hdr_t *hdr; bool done; jas_tvparser_t *tvp; int id; hdr = 0; tvp = 0; if (jas_stream_read(in, magicbuf, MIF_MAGICLEN) != MIF_MAGICLEN) { goto error; } if (magicbuf[0] != (MIF_MAGIC >> 24) || magicbuf[1] != ((MIF_MAGIC >> 16) & 0xff) || magicbuf[2] != ((MIF_MAGIC >> 8) & 0xff) || magicbuf[3] != (MIF_MAGIC & 0xff)) { jas_eprintf("error: bad signature\n"); goto error; } if (!(hdr = mif_hdr_create(0))) { goto error; } done = false; do { if (!mif_getline(in, buf, sizeof(buf))) { jas_eprintf("mif_getline failed\n"); goto error; } if (buf[0] == '\0') { continue; } JAS_DBGLOG(10, ("header line: len=%d; %s\n", strlen(buf), buf)); if (!(tvp = jas_tvparser_create(buf))) { jas_eprintf("jas_tvparser_create failed\n"); goto error; } if (jas_tvparser_next(tvp)) { jas_eprintf("cannot get record type\n"); goto error; } id = jas_taginfo_nonull(jas_taginfos_lookup(mif_tags2, jas_tvparser_gettag(tvp)))->id; jas_tvparser_destroy(tvp); tvp = 0; switch (id) { case MIF_CMPT: if (mif_process_cmpt(hdr, buf)) { jas_eprintf("cannot get component information\n"); goto error; } break; case MIF_END: done = 1; break; default: jas_eprintf("invalid header information: %s\n", buf); goto error; break; } } while (!done); return hdr; error: if (hdr) { mif_hdr_destroy(hdr); } if (tvp) { jas_tvparser_destroy(tvp); } return 0; }
static mif_hdr_t *mif_hdr_get(jas_stream_t *in) { jas_uchar magicbuf[MIF_MAGICLEN]; char buf[4096]; mif_hdr_t *hdr; bool done; jas_tvparser_t *tvp; int id; hdr = 0; tvp = 0; if (jas_stream_read(in, magicbuf, MIF_MAGICLEN) != MIF_MAGICLEN) { goto error; } if (magicbuf[0] != (MIF_MAGIC >> 24) || magicbuf[1] != ((MIF_MAGIC >> 16) & 0xff) || magicbuf[2] != ((MIF_MAGIC >> 8) & 0xff) || magicbuf[3] != (MIF_MAGIC & 0xff)) { jas_eprintf("error: bad signature\n"); goto error; } if (!(hdr = mif_hdr_create(0))) { goto error; } done = false; do { if (!mif_getline(in, buf, sizeof(buf))) { jas_eprintf("mif_getline failed\n"); goto error; } if (buf[0] == '\0') { continue; } JAS_DBGLOG(10, ("header line: len=%d; %s\n", strlen(buf), buf)); if (!(tvp = jas_tvparser_create(buf))) { jas_eprintf("jas_tvparser_create failed\n"); goto error; } if (jas_tvparser_next(tvp)) { jas_eprintf("cannot get record type\n"); goto error; } id = jas_taginfo_nonull(jas_taginfos_lookup(mif_tags2, jas_tvparser_gettag(tvp)))->id; jas_tvparser_destroy(tvp); tvp = 0; switch (id) { case MIF_CMPT: if (mif_process_cmpt(hdr, buf)) { jas_eprintf("cannot get component information\n"); goto error; } break; case MIF_END: done = 1; break; default: jas_eprintf("invalid header information: %s\n", buf); goto error; break; } } while (!done); return hdr; error: if (hdr) { mif_hdr_destroy(hdr); } if (tvp) { jas_tvparser_destroy(tvp); } return 0; }
{'added': [(373, '\tjas_uchar buf[MIF_MAGICLEN];'), (465, '\tjas_uchar magicbuf[MIF_MAGICLEN];')], 'deleted': [(373, '\tuchar buf[MIF_MAGICLEN];'), (465, '\tuchar magicbuf[MIF_MAGICLEN];')]}
2
2
620
3,492
https://github.com/mdadams/jasper
CVE-2016-9395
['CWE-20']
mif_cod.c
mif_validate
/* * Copyright (c) 2001-2002 Michael David Adams. * All rights reserved. */ /* __START_OF_JASPER_LICENSE__ * * JasPer License Version 2.0 * * Copyright (c) 2001-2006 Michael David Adams * Copyright (c) 1999-2000 Image Power, Inc. * Copyright (c) 1999-2000 The University of British Columbia * * All rights reserved. * * Permission is hereby granted, free of charge, to any person (the * "User") obtaining a copy of this software and associated documentation * files (the "Software"), to deal in the Software without restriction, * including without limitation the rights to use, copy, modify, merge, * publish, distribute, and/or sell copies of the Software, and to permit * persons to whom the Software is furnished to do so, subject to the * following conditions: * * 1. The above copyright notices and this permission notice (which * includes the disclaimer below) shall be included in all copies or * substantial portions of the Software. * * 2. The name of a copyright holder shall not be used to endorse or * promote products derived from the Software without specific prior * written permission. * * THIS DISCLAIMER OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS * LICENSE. NO USE OF THE SOFTWARE IS AUTHORIZED HEREUNDER EXCEPT UNDER * THIS DISCLAIMER. THE SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS * "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A * PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL * INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING * FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. NO ASSURANCES ARE * PROVIDED BY THE COPYRIGHT HOLDERS THAT THE SOFTWARE DOES NOT INFRINGE * THE PATENT OR OTHER INTELLECTUAL PROPERTY RIGHTS OF ANY OTHER ENTITY. * EACH COPYRIGHT HOLDER DISCLAIMS ANY LIABILITY TO THE USER FOR CLAIMS * BROUGHT BY ANY OTHER ENTITY BASED ON INFRINGEMENT OF INTELLECTUAL * PROPERTY RIGHTS OR OTHERWISE. AS A CONDITION TO EXERCISING THE RIGHTS * GRANTED HEREUNDER, EACH USER HEREBY ASSUMES SOLE RESPONSIBILITY TO SECURE * ANY OTHER INTELLECTUAL PROPERTY RIGHTS NEEDED, IF ANY. THE SOFTWARE * IS NOT FAULT-TOLERANT AND IS NOT INTENDED FOR USE IN MISSION-CRITICAL * SYSTEMS, SUCH AS THOSE USED IN THE OPERATION OF NUCLEAR FACILITIES, * AIRCRAFT NAVIGATION OR COMMUNICATION SYSTEMS, AIR TRAFFIC CONTROL * SYSTEMS, DIRECT LIFE SUPPORT MACHINES, OR WEAPONS SYSTEMS, IN WHICH * THE FAILURE OF THE SOFTWARE OR SYSTEM COULD LEAD DIRECTLY TO DEATH, * PERSONAL INJURY, OR SEVERE PHYSICAL OR ENVIRONMENTAL DAMAGE ("HIGH * RISK ACTIVITIES"). THE COPYRIGHT HOLDERS SPECIFICALLY DISCLAIM ANY * EXPRESS OR IMPLIED WARRANTY OF FITNESS FOR HIGH RISK ACTIVITIES. * * __END_OF_JASPER_LICENSE__ */ /******************************************************************************\ * Includes. \******************************************************************************/ #include <assert.h> #include "jasper/jas_tvp.h" #include "jasper/jas_stream.h" #include "jasper/jas_image.h" #include "jasper/jas_string.h" #include "jasper/jas_malloc.h" #include "jasper/jas_debug.h" #include "mif_cod.h" /******************************************************************************\ * Local types. \******************************************************************************/ typedef enum { MIF_END = 0, MIF_CMPT } mif_tagid2_t; typedef enum { MIF_TLX = 0, MIF_TLY, MIF_WIDTH, MIF_HEIGHT, MIF_HSAMP, MIF_VSAMP, MIF_PREC, MIF_SGND, MIF_DATA } mif_tagid_t; /******************************************************************************\ * Local functions. \******************************************************************************/ static mif_hdr_t *mif_hdr_create(int maxcmpts); static void mif_hdr_destroy(mif_hdr_t *hdr); static int mif_hdr_growcmpts(mif_hdr_t *hdr, int maxcmpts); static mif_hdr_t *mif_hdr_get(jas_stream_t *in); static int mif_process_cmpt(mif_hdr_t *hdr, char *buf); static int mif_hdr_put(mif_hdr_t *hdr, jas_stream_t *out); static int mif_hdr_addcmpt(mif_hdr_t *hdr, int cmptno, mif_cmpt_t *cmpt); static mif_cmpt_t *mif_cmpt_create(void); static void mif_cmpt_destroy(mif_cmpt_t *cmpt); static char *mif_getline(jas_stream_t *jas_stream, char *buf, int bufsize); static int mif_getc(jas_stream_t *in); static mif_hdr_t *mif_makehdrfromimage(jas_image_t *image); /******************************************************************************\ * Local data. \******************************************************************************/ jas_taginfo_t mif_tags2[] = { {MIF_CMPT, "component"}, {MIF_END, "end"}, {-1, 0} }; jas_taginfo_t mif_tags[] = { {MIF_TLX, "tlx"}, {MIF_TLY, "tly"}, {MIF_WIDTH, "width"}, {MIF_HEIGHT, "height"}, {MIF_HSAMP, "sampperx"}, {MIF_VSAMP, "samppery"}, {MIF_PREC, "prec"}, {MIF_SGND, "sgnd"}, {MIF_DATA, "data"}, {-1, 0} }; /******************************************************************************\ * Code for load operation. \******************************************************************************/ /* Load an image from a stream in the MIF format. */ jas_image_t *mif_decode(jas_stream_t *in, char *optstr) { mif_hdr_t *hdr; jas_image_t *image; jas_image_t *tmpimage; jas_stream_t *tmpstream; int cmptno; mif_cmpt_t *cmpt; jas_image_cmptparm_t cmptparm; jas_seq2d_t *data; int_fast32_t x; int_fast32_t y; int bias; /* Avoid warnings about unused parameters. */ optstr = 0; hdr = 0; image = 0; tmpimage = 0; tmpstream = 0; data = 0; if (!(hdr = mif_hdr_get(in))) { goto error; } if (!(image = jas_image_create0())) { goto error; } for (cmptno = 0; cmptno < hdr->numcmpts; ++cmptno) { cmpt = hdr->cmpts[cmptno]; tmpstream = cmpt->data ? jas_stream_fopen(cmpt->data, "rb") : in; if (!tmpstream) { jas_eprintf("cannot open component file %s\n", cmpt->data); goto error; } if (!(tmpimage = jas_image_decode(tmpstream, -1, 0))) { goto error; } if (tmpstream != in) { jas_stream_close(tmpstream); tmpstream = 0; } if (!cmpt->width) { cmpt->width = jas_image_cmptwidth(tmpimage, 0); } if (!cmpt->height) { cmpt->height = jas_image_cmptwidth(tmpimage, 0); } if (!cmpt->prec) { cmpt->prec = jas_image_cmptprec(tmpimage, 0); } if (cmpt->sgnd < 0) { cmpt->sgnd = jas_image_cmptsgnd(tmpimage, 0); } cmptparm.tlx = cmpt->tlx; cmptparm.tly = cmpt->tly; cmptparm.hstep = cmpt->sampperx; cmptparm.vstep = cmpt->samppery; cmptparm.width = cmpt->width; cmptparm.height = cmpt->height; cmptparm.prec = cmpt->prec; cmptparm.sgnd = cmpt->sgnd; if (jas_image_addcmpt(image, jas_image_numcmpts(image), &cmptparm)) { goto error; } if (!(data = jas_seq2d_create(0, 0, cmpt->width, cmpt->height))) { goto error; } if (jas_image_readcmpt(tmpimage, 0, 0, 0, cmpt->width, cmpt->height, data)) { goto error; } if (cmpt->sgnd) { bias = 1 << (cmpt->prec - 1); for (y = 0; y < cmpt->height; ++y) { for (x = 0; x < cmpt->width; ++x) { *jas_seq2d_getref(data, x, y) -= bias; } } } if (jas_image_writecmpt(image, jas_image_numcmpts(image) - 1, 0, 0, cmpt->width, cmpt->height, data)) { goto error; } jas_seq2d_destroy(data); data = 0; jas_image_destroy(tmpimage); tmpimage = 0; } mif_hdr_destroy(hdr); hdr = 0; return image; error: if (image) { jas_image_destroy(image); } if (hdr) { mif_hdr_destroy(hdr); } if (tmpstream && tmpstream != in) { jas_stream_close(tmpstream); } if (tmpimage) { jas_image_destroy(tmpimage); } if (data) { jas_seq2d_destroy(data); } return 0; } /******************************************************************************\ * Code for save operation. \******************************************************************************/ /* Save an image to a stream in the the MIF format. */ int mif_encode(jas_image_t *image, jas_stream_t *out, char *optstr) { mif_hdr_t *hdr; jas_image_t *tmpimage; int fmt; int cmptno; mif_cmpt_t *cmpt; jas_image_cmptparm_t cmptparm; jas_seq2d_t *data; int_fast32_t x; int_fast32_t y; int bias; hdr = 0; tmpimage = 0; data = 0; if (optstr && *optstr != '\0') { jas_eprintf("warning: ignoring unsupported options\n"); } if ((fmt = jas_image_strtofmt("pnm")) < 0) { jas_eprintf("error: PNM support required\n"); goto error; } if (!(hdr = mif_makehdrfromimage(image))) { goto error; } if (mif_hdr_put(hdr, out)) { goto error; } /* Output component data. */ for (cmptno = 0; cmptno < hdr->numcmpts; ++cmptno) { cmpt = hdr->cmpts[cmptno]; if (!cmpt->data) { if (!(tmpimage = jas_image_create0())) { goto error; } cmptparm.tlx = 0; cmptparm.tly = 0; cmptparm.hstep = cmpt->sampperx; cmptparm.vstep = cmpt->samppery; cmptparm.width = cmpt->width; cmptparm.height = cmpt->height; cmptparm.prec = cmpt->prec; cmptparm.sgnd = false; if (jas_image_addcmpt(tmpimage, jas_image_numcmpts(tmpimage), &cmptparm)) { goto error; } jas_image_setclrspc(tmpimage, JAS_CLRSPC_SGRAY); jas_image_setcmpttype(tmpimage, 0, JAS_IMAGE_CT_COLOR(JAS_CLRSPC_CHANIND_GRAY_Y)); if (!(data = jas_seq2d_create(0, 0, cmpt->width, cmpt->height))) { goto error; } if (jas_image_readcmpt(image, cmptno, 0, 0, cmpt->width, cmpt->height, data)) { goto error; } if (cmpt->sgnd) { bias = 1 << (cmpt->prec - 1); for (y = 0; y < cmpt->height; ++y) { for (x = 0; x < cmpt->width; ++x) { *jas_seq2d_getref(data, x, y) += bias; } } } if (jas_image_writecmpt(tmpimage, 0, 0, 0, cmpt->width, cmpt->height, data)) { goto error; } jas_seq2d_destroy(data); data = 0; if (jas_image_encode(tmpimage, out, fmt, 0)) { goto error; } jas_image_destroy(tmpimage); tmpimage = 0; } } mif_hdr_destroy(hdr); return 0; error: if (hdr) { mif_hdr_destroy(hdr); } if (tmpimage) { jas_image_destroy(tmpimage); } if (data) { jas_seq2d_destroy(data); } return -1; } /******************************************************************************\ * Code for validate operation. \******************************************************************************/ int mif_validate(jas_stream_t *in) { uchar buf[MIF_MAGICLEN]; uint_fast32_t magic; int i; int n; assert(JAS_STREAM_MAXPUTBACK >= MIF_MAGICLEN); /* Read the validation data (i.e., the data used for detecting the format). */ if ((n = jas_stream_read(in, buf, MIF_MAGICLEN)) < 0) { return -1; } /* Put the validation data back onto the stream, so that the stream position will not be changed. */ for (i = n - 1; i >= 0; --i) { if (jas_stream_ungetc(in, buf[i]) == EOF) { return -1; } } /* Was enough data read? */ if (n < MIF_MAGICLEN) { return -1; } /* Compute the signature value. */ magic = (JAS_CAST(uint_fast32_t, buf[0]) << 24) | (JAS_CAST(uint_fast32_t, buf[1]) << 16) | (JAS_CAST(uint_fast32_t, buf[2]) << 8) | buf[3]; /* Ensure that the signature is correct for this format. */ if (magic != MIF_MAGIC) { return -1; } return 0; } /******************************************************************************\ * Code for MIF header class. \******************************************************************************/ static mif_hdr_t *mif_hdr_create(int maxcmpts) { mif_hdr_t *hdr; if (!(hdr = jas_malloc(sizeof(mif_hdr_t)))) { return 0; } hdr->numcmpts = 0; hdr->maxcmpts = 0; hdr->cmpts = 0; if (mif_hdr_growcmpts(hdr, maxcmpts)) { mif_hdr_destroy(hdr); return 0; } return hdr; } static void mif_hdr_destroy(mif_hdr_t *hdr) { int cmptno; if (hdr->cmpts) { for (cmptno = 0; cmptno < hdr->numcmpts; ++cmptno) { mif_cmpt_destroy(hdr->cmpts[cmptno]); } jas_free(hdr->cmpts); } jas_free(hdr); } static int mif_hdr_growcmpts(mif_hdr_t *hdr, int maxcmpts) { int cmptno; mif_cmpt_t **newcmpts; assert(maxcmpts >= hdr->numcmpts); newcmpts = (!hdr->cmpts) ? jas_alloc2(maxcmpts, sizeof(mif_cmpt_t *)) : jas_realloc2(hdr->cmpts, maxcmpts, sizeof(mif_cmpt_t *)); if (!newcmpts) { return -1; } hdr->maxcmpts = maxcmpts; hdr->cmpts = newcmpts; for (cmptno = hdr->numcmpts; cmptno < hdr->maxcmpts; ++cmptno) { hdr->cmpts[cmptno] = 0; } return 0; } static mif_hdr_t *mif_hdr_get(jas_stream_t *in) { uchar magicbuf[MIF_MAGICLEN]; char buf[4096]; mif_hdr_t *hdr; bool done; jas_tvparser_t *tvp; int id; hdr = 0; tvp = 0; if (jas_stream_read(in, magicbuf, MIF_MAGICLEN) != MIF_MAGICLEN) { goto error; } if (magicbuf[0] != (MIF_MAGIC >> 24) || magicbuf[1] != ((MIF_MAGIC >> 16) & 0xff) || magicbuf[2] != ((MIF_MAGIC >> 8) & 0xff) || magicbuf[3] != (MIF_MAGIC & 0xff)) { jas_eprintf("error: bad signature\n"); goto error; } if (!(hdr = mif_hdr_create(0))) { goto error; } done = false; do { if (!mif_getline(in, buf, sizeof(buf))) { jas_eprintf("mif_getline failed\n"); goto error; } if (buf[0] == '\0') { continue; } JAS_DBGLOG(10, ("header line: len=%d; %s\n", strlen(buf), buf)); if (!(tvp = jas_tvparser_create(buf))) { jas_eprintf("jas_tvparser_create failed\n"); goto error; } if (jas_tvparser_next(tvp)) { jas_eprintf("cannot get record type\n"); goto error; } id = jas_taginfo_nonull(jas_taginfos_lookup(mif_tags2, jas_tvparser_gettag(tvp)))->id; jas_tvparser_destroy(tvp); tvp = 0; switch (id) { case MIF_CMPT: if (mif_process_cmpt(hdr, buf)) { jas_eprintf("cannot get component information\n"); goto error; } break; case MIF_END: done = 1; break; default: jas_eprintf("invalid header information: %s\n", buf); goto error; break; } } while (!done); return hdr; error: if (hdr) { mif_hdr_destroy(hdr); } if (tvp) { jas_tvparser_destroy(tvp); } return 0; } static int mif_process_cmpt(mif_hdr_t *hdr, char *buf) { jas_tvparser_t *tvp; mif_cmpt_t *cmpt; int id; cmpt = 0; tvp = 0; if (!(cmpt = mif_cmpt_create())) { jas_eprintf("cannot create component\n"); goto error; } cmpt->tlx = 0; cmpt->tly = 0; cmpt->sampperx = 0; cmpt->samppery = 0; cmpt->width = 0; cmpt->height = 0; cmpt->prec = 0; cmpt->sgnd = -1; cmpt->data = 0; if (!(tvp = jas_tvparser_create(buf))) { jas_eprintf("cannot create parser\n"); goto error; } // Skip the component keyword if ((id = jas_tvparser_next(tvp))) { // This should never happen. abort(); } // Process the tag-value pairs. while (!(id = jas_tvparser_next(tvp))) { switch (jas_taginfo_nonull(jas_taginfos_lookup(mif_tags, jas_tvparser_gettag(tvp)))->id) { case MIF_TLX: cmpt->tlx = atoi(jas_tvparser_getval(tvp)); break; case MIF_TLY: cmpt->tly = atoi(jas_tvparser_getval(tvp)); break; case MIF_WIDTH: cmpt->width = atoi(jas_tvparser_getval(tvp)); break; case MIF_HEIGHT: cmpt->height = atoi(jas_tvparser_getval(tvp)); break; case MIF_HSAMP: cmpt->sampperx = atoi(jas_tvparser_getval(tvp)); break; case MIF_VSAMP: cmpt->samppery = atoi(jas_tvparser_getval(tvp)); break; case MIF_PREC: cmpt->prec = atoi(jas_tvparser_getval(tvp)); break; case MIF_SGND: cmpt->sgnd = atoi(jas_tvparser_getval(tvp)); break; case MIF_DATA: if (!(cmpt->data = jas_strdup(jas_tvparser_getval(tvp)))) { goto error; } break; default: jas_eprintf("invalid component information: %s\n", buf); goto error; break; } } if (!cmpt->sampperx || !cmpt->samppery) { goto error; } if (!cmpt->width || !cmpt->height || !cmpt->prec || cmpt->sgnd < 0) { goto error; } if (mif_hdr_addcmpt(hdr, hdr->numcmpts, cmpt)) { jas_eprintf("cannot add component\n"); goto error; } jas_tvparser_destroy(tvp); return 0; error: if (cmpt) { mif_cmpt_destroy(cmpt); } if (tvp) { jas_tvparser_destroy(tvp); } return -1; } static int mif_hdr_put(mif_hdr_t *hdr, jas_stream_t *out) { int cmptno; mif_cmpt_t *cmpt; /* Output signature. */ jas_stream_putc(out, (MIF_MAGIC >> 24) & 0xff); jas_stream_putc(out, (MIF_MAGIC >> 16) & 0xff); jas_stream_putc(out, (MIF_MAGIC >> 8) & 0xff); jas_stream_putc(out, MIF_MAGIC & 0xff); /* Output component information. */ for (cmptno = 0; cmptno < hdr->numcmpts; ++cmptno) { cmpt = hdr->cmpts[cmptno]; jas_stream_printf(out, "component tlx=%ld tly=%ld " "sampperx=%ld samppery=%ld width=%ld height=%ld prec=%d sgnd=%d", cmpt->tlx, cmpt->tly, cmpt->sampperx, cmpt->samppery, cmpt->width, cmpt->height, cmpt->prec, cmpt->sgnd); if (cmpt->data) { jas_stream_printf(out, " data=%s", cmpt->data); } jas_stream_printf(out, "\n"); } /* Output end of header indicator. */ jas_stream_printf(out, "end\n"); return 0; } static int mif_hdr_addcmpt(mif_hdr_t *hdr, int cmptno, mif_cmpt_t *cmpt) { assert(cmptno >= hdr->numcmpts); if (hdr->numcmpts >= hdr->maxcmpts) { if (mif_hdr_growcmpts(hdr, hdr->numcmpts + 128)) { return -1; } } hdr->cmpts[hdr->numcmpts] = cmpt; ++hdr->numcmpts; return 0; } /******************************************************************************\ * Code for MIF component class. \******************************************************************************/ static mif_cmpt_t *mif_cmpt_create() { mif_cmpt_t *cmpt; if (!(cmpt = jas_malloc(sizeof(mif_cmpt_t)))) { return 0; } memset(cmpt, 0, sizeof(mif_cmpt_t)); return cmpt; } static void mif_cmpt_destroy(mif_cmpt_t *cmpt) { if (cmpt->data) { jas_free(cmpt->data); } jas_free(cmpt); } /******************************************************************************\ * MIF parsing code. \******************************************************************************/ static char *mif_getline(jas_stream_t *stream, char *buf, int bufsize) { int c; char *bufptr; assert(bufsize > 0); bufptr = buf; while (bufsize > 1) { if ((c = mif_getc(stream)) == EOF) { break; } *bufptr++ = c; --bufsize; if (c == '\n') { break; } } *bufptr = '\0'; if (!(bufptr = strchr(buf, '\n'))) { return 0; } *bufptr = '\0'; return buf; } static int mif_getc(jas_stream_t *in) { int c; bool done; done = false; do { switch (c = jas_stream_getc(in)) { case EOF: done = true; break; case '#': for (;;) { if ((c = jas_stream_getc(in)) == EOF) { done = true; break; } if (c == '\n') { done = true; break; } } break; case '\\': if (jas_stream_peekc(in) == '\n') { jas_stream_getc(in); } break; default: done = true; break; } } while (!done); return c; } /******************************************************************************\ * Miscellaneous functions. \******************************************************************************/ static mif_hdr_t *mif_makehdrfromimage(jas_image_t *image) { mif_hdr_t *hdr; int cmptno; mif_cmpt_t *cmpt; if (!(hdr = mif_hdr_create(jas_image_numcmpts(image)))) { return 0; } hdr->magic = MIF_MAGIC; hdr->numcmpts = jas_image_numcmpts(image); for (cmptno = 0; cmptno < hdr->numcmpts; ++cmptno) { if (!(hdr->cmpts[cmptno] = jas_malloc(sizeof(mif_cmpt_t)))) { goto error; } cmpt = hdr->cmpts[cmptno]; cmpt->tlx = jas_image_cmpttlx(image, cmptno); cmpt->tly = jas_image_cmpttly(image, cmptno); cmpt->width = jas_image_cmptwidth(image, cmptno); cmpt->height = jas_image_cmptheight(image, cmptno); cmpt->sampperx = jas_image_cmpthstep(image, cmptno); cmpt->samppery = jas_image_cmptvstep(image, cmptno); cmpt->prec = jas_image_cmptprec(image, cmptno); cmpt->sgnd = jas_image_cmptsgnd(image, cmptno); cmpt->data = 0; } return hdr; error: for (cmptno = 0; cmptno < hdr->numcmpts; ++cmptno) { if (hdr->cmpts[cmptno]) { jas_free(hdr->cmpts[cmptno]); } } if (hdr) { jas_free(hdr); } return 0; }
/* * Copyright (c) 2001-2002 Michael David Adams. * All rights reserved. */ /* __START_OF_JASPER_LICENSE__ * * JasPer License Version 2.0 * * Copyright (c) 2001-2006 Michael David Adams * Copyright (c) 1999-2000 Image Power, Inc. * Copyright (c) 1999-2000 The University of British Columbia * * All rights reserved. * * Permission is hereby granted, free of charge, to any person (the * "User") obtaining a copy of this software and associated documentation * files (the "Software"), to deal in the Software without restriction, * including without limitation the rights to use, copy, modify, merge, * publish, distribute, and/or sell copies of the Software, and to permit * persons to whom the Software is furnished to do so, subject to the * following conditions: * * 1. The above copyright notices and this permission notice (which * includes the disclaimer below) shall be included in all copies or * substantial portions of the Software. * * 2. The name of a copyright holder shall not be used to endorse or * promote products derived from the Software without specific prior * written permission. * * THIS DISCLAIMER OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS * LICENSE. NO USE OF THE SOFTWARE IS AUTHORIZED HEREUNDER EXCEPT UNDER * THIS DISCLAIMER. THE SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS * "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A * PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL * INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING * FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. NO ASSURANCES ARE * PROVIDED BY THE COPYRIGHT HOLDERS THAT THE SOFTWARE DOES NOT INFRINGE * THE PATENT OR OTHER INTELLECTUAL PROPERTY RIGHTS OF ANY OTHER ENTITY. * EACH COPYRIGHT HOLDER DISCLAIMS ANY LIABILITY TO THE USER FOR CLAIMS * BROUGHT BY ANY OTHER ENTITY BASED ON INFRINGEMENT OF INTELLECTUAL * PROPERTY RIGHTS OR OTHERWISE. AS A CONDITION TO EXERCISING THE RIGHTS * GRANTED HEREUNDER, EACH USER HEREBY ASSUMES SOLE RESPONSIBILITY TO SECURE * ANY OTHER INTELLECTUAL PROPERTY RIGHTS NEEDED, IF ANY. THE SOFTWARE * IS NOT FAULT-TOLERANT AND IS NOT INTENDED FOR USE IN MISSION-CRITICAL * SYSTEMS, SUCH AS THOSE USED IN THE OPERATION OF NUCLEAR FACILITIES, * AIRCRAFT NAVIGATION OR COMMUNICATION SYSTEMS, AIR TRAFFIC CONTROL * SYSTEMS, DIRECT LIFE SUPPORT MACHINES, OR WEAPONS SYSTEMS, IN WHICH * THE FAILURE OF THE SOFTWARE OR SYSTEM COULD LEAD DIRECTLY TO DEATH, * PERSONAL INJURY, OR SEVERE PHYSICAL OR ENVIRONMENTAL DAMAGE ("HIGH * RISK ACTIVITIES"). THE COPYRIGHT HOLDERS SPECIFICALLY DISCLAIM ANY * EXPRESS OR IMPLIED WARRANTY OF FITNESS FOR HIGH RISK ACTIVITIES. * * __END_OF_JASPER_LICENSE__ */ /******************************************************************************\ * Includes. \******************************************************************************/ #include <assert.h> #include "jasper/jas_tvp.h" #include "jasper/jas_stream.h" #include "jasper/jas_image.h" #include "jasper/jas_string.h" #include "jasper/jas_malloc.h" #include "jasper/jas_debug.h" #include "mif_cod.h" /******************************************************************************\ * Local types. \******************************************************************************/ typedef enum { MIF_END = 0, MIF_CMPT } mif_tagid2_t; typedef enum { MIF_TLX = 0, MIF_TLY, MIF_WIDTH, MIF_HEIGHT, MIF_HSAMP, MIF_VSAMP, MIF_PREC, MIF_SGND, MIF_DATA } mif_tagid_t; /******************************************************************************\ * Local functions. \******************************************************************************/ static mif_hdr_t *mif_hdr_create(int maxcmpts); static void mif_hdr_destroy(mif_hdr_t *hdr); static int mif_hdr_growcmpts(mif_hdr_t *hdr, int maxcmpts); static mif_hdr_t *mif_hdr_get(jas_stream_t *in); static int mif_process_cmpt(mif_hdr_t *hdr, char *buf); static int mif_hdr_put(mif_hdr_t *hdr, jas_stream_t *out); static int mif_hdr_addcmpt(mif_hdr_t *hdr, int cmptno, mif_cmpt_t *cmpt); static mif_cmpt_t *mif_cmpt_create(void); static void mif_cmpt_destroy(mif_cmpt_t *cmpt); static char *mif_getline(jas_stream_t *jas_stream, char *buf, int bufsize); static int mif_getc(jas_stream_t *in); static mif_hdr_t *mif_makehdrfromimage(jas_image_t *image); /******************************************************************************\ * Local data. \******************************************************************************/ jas_taginfo_t mif_tags2[] = { {MIF_CMPT, "component"}, {MIF_END, "end"}, {-1, 0} }; jas_taginfo_t mif_tags[] = { {MIF_TLX, "tlx"}, {MIF_TLY, "tly"}, {MIF_WIDTH, "width"}, {MIF_HEIGHT, "height"}, {MIF_HSAMP, "sampperx"}, {MIF_VSAMP, "samppery"}, {MIF_PREC, "prec"}, {MIF_SGND, "sgnd"}, {MIF_DATA, "data"}, {-1, 0} }; /******************************************************************************\ * Code for load operation. \******************************************************************************/ /* Load an image from a stream in the MIF format. */ jas_image_t *mif_decode(jas_stream_t *in, char *optstr) { mif_hdr_t *hdr; jas_image_t *image; jas_image_t *tmpimage; jas_stream_t *tmpstream; int cmptno; mif_cmpt_t *cmpt; jas_image_cmptparm_t cmptparm; jas_seq2d_t *data; int_fast32_t x; int_fast32_t y; int bias; /* Avoid warnings about unused parameters. */ optstr = 0; hdr = 0; image = 0; tmpimage = 0; tmpstream = 0; data = 0; if (!(hdr = mif_hdr_get(in))) { goto error; } if (!(image = jas_image_create0())) { goto error; } for (cmptno = 0; cmptno < hdr->numcmpts; ++cmptno) { cmpt = hdr->cmpts[cmptno]; tmpstream = cmpt->data ? jas_stream_fopen(cmpt->data, "rb") : in; if (!tmpstream) { jas_eprintf("cannot open component file %s\n", cmpt->data); goto error; } if (!(tmpimage = jas_image_decode(tmpstream, -1, 0))) { goto error; } if (tmpstream != in) { jas_stream_close(tmpstream); tmpstream = 0; } if (!cmpt->width) { cmpt->width = jas_image_cmptwidth(tmpimage, 0); } if (!cmpt->height) { cmpt->height = jas_image_cmptwidth(tmpimage, 0); } if (!cmpt->prec) { cmpt->prec = jas_image_cmptprec(tmpimage, 0); } if (cmpt->sgnd < 0) { cmpt->sgnd = jas_image_cmptsgnd(tmpimage, 0); } cmptparm.tlx = cmpt->tlx; cmptparm.tly = cmpt->tly; cmptparm.hstep = cmpt->sampperx; cmptparm.vstep = cmpt->samppery; cmptparm.width = cmpt->width; cmptparm.height = cmpt->height; cmptparm.prec = cmpt->prec; cmptparm.sgnd = cmpt->sgnd; if (jas_image_addcmpt(image, jas_image_numcmpts(image), &cmptparm)) { goto error; } if (!(data = jas_seq2d_create(0, 0, cmpt->width, cmpt->height))) { goto error; } if (jas_image_readcmpt(tmpimage, 0, 0, 0, cmpt->width, cmpt->height, data)) { goto error; } if (cmpt->sgnd) { bias = 1 << (cmpt->prec - 1); for (y = 0; y < cmpt->height; ++y) { for (x = 0; x < cmpt->width; ++x) { *jas_seq2d_getref(data, x, y) -= bias; } } } if (jas_image_writecmpt(image, jas_image_numcmpts(image) - 1, 0, 0, cmpt->width, cmpt->height, data)) { goto error; } jas_seq2d_destroy(data); data = 0; jas_image_destroy(tmpimage); tmpimage = 0; } mif_hdr_destroy(hdr); hdr = 0; return image; error: if (image) { jas_image_destroy(image); } if (hdr) { mif_hdr_destroy(hdr); } if (tmpstream && tmpstream != in) { jas_stream_close(tmpstream); } if (tmpimage) { jas_image_destroy(tmpimage); } if (data) { jas_seq2d_destroy(data); } return 0; } /******************************************************************************\ * Code for save operation. \******************************************************************************/ /* Save an image to a stream in the the MIF format. */ int mif_encode(jas_image_t *image, jas_stream_t *out, char *optstr) { mif_hdr_t *hdr; jas_image_t *tmpimage; int fmt; int cmptno; mif_cmpt_t *cmpt; jas_image_cmptparm_t cmptparm; jas_seq2d_t *data; int_fast32_t x; int_fast32_t y; int bias; hdr = 0; tmpimage = 0; data = 0; if (optstr && *optstr != '\0') { jas_eprintf("warning: ignoring unsupported options\n"); } if ((fmt = jas_image_strtofmt("pnm")) < 0) { jas_eprintf("error: PNM support required\n"); goto error; } if (!(hdr = mif_makehdrfromimage(image))) { goto error; } if (mif_hdr_put(hdr, out)) { goto error; } /* Output component data. */ for (cmptno = 0; cmptno < hdr->numcmpts; ++cmptno) { cmpt = hdr->cmpts[cmptno]; if (!cmpt->data) { if (!(tmpimage = jas_image_create0())) { goto error; } cmptparm.tlx = 0; cmptparm.tly = 0; cmptparm.hstep = cmpt->sampperx; cmptparm.vstep = cmpt->samppery; cmptparm.width = cmpt->width; cmptparm.height = cmpt->height; cmptparm.prec = cmpt->prec; cmptparm.sgnd = false; if (jas_image_addcmpt(tmpimage, jas_image_numcmpts(tmpimage), &cmptparm)) { goto error; } jas_image_setclrspc(tmpimage, JAS_CLRSPC_SGRAY); jas_image_setcmpttype(tmpimage, 0, JAS_IMAGE_CT_COLOR(JAS_CLRSPC_CHANIND_GRAY_Y)); if (!(data = jas_seq2d_create(0, 0, cmpt->width, cmpt->height))) { goto error; } if (jas_image_readcmpt(image, cmptno, 0, 0, cmpt->width, cmpt->height, data)) { goto error; } if (cmpt->sgnd) { bias = 1 << (cmpt->prec - 1); for (y = 0; y < cmpt->height; ++y) { for (x = 0; x < cmpt->width; ++x) { *jas_seq2d_getref(data, x, y) += bias; } } } if (jas_image_writecmpt(tmpimage, 0, 0, 0, cmpt->width, cmpt->height, data)) { goto error; } jas_seq2d_destroy(data); data = 0; if (jas_image_encode(tmpimage, out, fmt, 0)) { goto error; } jas_image_destroy(tmpimage); tmpimage = 0; } } mif_hdr_destroy(hdr); return 0; error: if (hdr) { mif_hdr_destroy(hdr); } if (tmpimage) { jas_image_destroy(tmpimage); } if (data) { jas_seq2d_destroy(data); } return -1; } /******************************************************************************\ * Code for validate operation. \******************************************************************************/ int mif_validate(jas_stream_t *in) { jas_uchar buf[MIF_MAGICLEN]; uint_fast32_t magic; int i; int n; assert(JAS_STREAM_MAXPUTBACK >= MIF_MAGICLEN); /* Read the validation data (i.e., the data used for detecting the format). */ if ((n = jas_stream_read(in, buf, MIF_MAGICLEN)) < 0) { return -1; } /* Put the validation data back onto the stream, so that the stream position will not be changed. */ for (i = n - 1; i >= 0; --i) { if (jas_stream_ungetc(in, buf[i]) == EOF) { return -1; } } /* Was enough data read? */ if (n < MIF_MAGICLEN) { return -1; } /* Compute the signature value. */ magic = (JAS_CAST(uint_fast32_t, buf[0]) << 24) | (JAS_CAST(uint_fast32_t, buf[1]) << 16) | (JAS_CAST(uint_fast32_t, buf[2]) << 8) | buf[3]; /* Ensure that the signature is correct for this format. */ if (magic != MIF_MAGIC) { return -1; } return 0; } /******************************************************************************\ * Code for MIF header class. \******************************************************************************/ static mif_hdr_t *mif_hdr_create(int maxcmpts) { mif_hdr_t *hdr; if (!(hdr = jas_malloc(sizeof(mif_hdr_t)))) { return 0; } hdr->numcmpts = 0; hdr->maxcmpts = 0; hdr->cmpts = 0; if (mif_hdr_growcmpts(hdr, maxcmpts)) { mif_hdr_destroy(hdr); return 0; } return hdr; } static void mif_hdr_destroy(mif_hdr_t *hdr) { int cmptno; if (hdr->cmpts) { for (cmptno = 0; cmptno < hdr->numcmpts; ++cmptno) { mif_cmpt_destroy(hdr->cmpts[cmptno]); } jas_free(hdr->cmpts); } jas_free(hdr); } static int mif_hdr_growcmpts(mif_hdr_t *hdr, int maxcmpts) { int cmptno; mif_cmpt_t **newcmpts; assert(maxcmpts >= hdr->numcmpts); newcmpts = (!hdr->cmpts) ? jas_alloc2(maxcmpts, sizeof(mif_cmpt_t *)) : jas_realloc2(hdr->cmpts, maxcmpts, sizeof(mif_cmpt_t *)); if (!newcmpts) { return -1; } hdr->maxcmpts = maxcmpts; hdr->cmpts = newcmpts; for (cmptno = hdr->numcmpts; cmptno < hdr->maxcmpts; ++cmptno) { hdr->cmpts[cmptno] = 0; } return 0; } static mif_hdr_t *mif_hdr_get(jas_stream_t *in) { jas_uchar magicbuf[MIF_MAGICLEN]; char buf[4096]; mif_hdr_t *hdr; bool done; jas_tvparser_t *tvp; int id; hdr = 0; tvp = 0; if (jas_stream_read(in, magicbuf, MIF_MAGICLEN) != MIF_MAGICLEN) { goto error; } if (magicbuf[0] != (MIF_MAGIC >> 24) || magicbuf[1] != ((MIF_MAGIC >> 16) & 0xff) || magicbuf[2] != ((MIF_MAGIC >> 8) & 0xff) || magicbuf[3] != (MIF_MAGIC & 0xff)) { jas_eprintf("error: bad signature\n"); goto error; } if (!(hdr = mif_hdr_create(0))) { goto error; } done = false; do { if (!mif_getline(in, buf, sizeof(buf))) { jas_eprintf("mif_getline failed\n"); goto error; } if (buf[0] == '\0') { continue; } JAS_DBGLOG(10, ("header line: len=%d; %s\n", strlen(buf), buf)); if (!(tvp = jas_tvparser_create(buf))) { jas_eprintf("jas_tvparser_create failed\n"); goto error; } if (jas_tvparser_next(tvp)) { jas_eprintf("cannot get record type\n"); goto error; } id = jas_taginfo_nonull(jas_taginfos_lookup(mif_tags2, jas_tvparser_gettag(tvp)))->id; jas_tvparser_destroy(tvp); tvp = 0; switch (id) { case MIF_CMPT: if (mif_process_cmpt(hdr, buf)) { jas_eprintf("cannot get component information\n"); goto error; } break; case MIF_END: done = 1; break; default: jas_eprintf("invalid header information: %s\n", buf); goto error; break; } } while (!done); return hdr; error: if (hdr) { mif_hdr_destroy(hdr); } if (tvp) { jas_tvparser_destroy(tvp); } return 0; } static int mif_process_cmpt(mif_hdr_t *hdr, char *buf) { jas_tvparser_t *tvp; mif_cmpt_t *cmpt; int id; cmpt = 0; tvp = 0; if (!(cmpt = mif_cmpt_create())) { jas_eprintf("cannot create component\n"); goto error; } cmpt->tlx = 0; cmpt->tly = 0; cmpt->sampperx = 0; cmpt->samppery = 0; cmpt->width = 0; cmpt->height = 0; cmpt->prec = 0; cmpt->sgnd = -1; cmpt->data = 0; if (!(tvp = jas_tvparser_create(buf))) { jas_eprintf("cannot create parser\n"); goto error; } // Skip the component keyword if ((id = jas_tvparser_next(tvp))) { // This should never happen. abort(); } // Process the tag-value pairs. while (!(id = jas_tvparser_next(tvp))) { switch (jas_taginfo_nonull(jas_taginfos_lookup(mif_tags, jas_tvparser_gettag(tvp)))->id) { case MIF_TLX: cmpt->tlx = atoi(jas_tvparser_getval(tvp)); break; case MIF_TLY: cmpt->tly = atoi(jas_tvparser_getval(tvp)); break; case MIF_WIDTH: cmpt->width = atoi(jas_tvparser_getval(tvp)); break; case MIF_HEIGHT: cmpt->height = atoi(jas_tvparser_getval(tvp)); break; case MIF_HSAMP: cmpt->sampperx = atoi(jas_tvparser_getval(tvp)); break; case MIF_VSAMP: cmpt->samppery = atoi(jas_tvparser_getval(tvp)); break; case MIF_PREC: cmpt->prec = atoi(jas_tvparser_getval(tvp)); break; case MIF_SGND: cmpt->sgnd = atoi(jas_tvparser_getval(tvp)); break; case MIF_DATA: if (!(cmpt->data = jas_strdup(jas_tvparser_getval(tvp)))) { goto error; } break; default: jas_eprintf("invalid component information: %s\n", buf); goto error; break; } } if (!cmpt->sampperx || !cmpt->samppery) { goto error; } if (!cmpt->width || !cmpt->height || !cmpt->prec || cmpt->sgnd < 0) { goto error; } if (mif_hdr_addcmpt(hdr, hdr->numcmpts, cmpt)) { jas_eprintf("cannot add component\n"); goto error; } jas_tvparser_destroy(tvp); return 0; error: if (cmpt) { mif_cmpt_destroy(cmpt); } if (tvp) { jas_tvparser_destroy(tvp); } return -1; } static int mif_hdr_put(mif_hdr_t *hdr, jas_stream_t *out) { int cmptno; mif_cmpt_t *cmpt; /* Output signature. */ jas_stream_putc(out, (MIF_MAGIC >> 24) & 0xff); jas_stream_putc(out, (MIF_MAGIC >> 16) & 0xff); jas_stream_putc(out, (MIF_MAGIC >> 8) & 0xff); jas_stream_putc(out, MIF_MAGIC & 0xff); /* Output component information. */ for (cmptno = 0; cmptno < hdr->numcmpts; ++cmptno) { cmpt = hdr->cmpts[cmptno]; jas_stream_printf(out, "component tlx=%ld tly=%ld " "sampperx=%ld samppery=%ld width=%ld height=%ld prec=%d sgnd=%d", cmpt->tlx, cmpt->tly, cmpt->sampperx, cmpt->samppery, cmpt->width, cmpt->height, cmpt->prec, cmpt->sgnd); if (cmpt->data) { jas_stream_printf(out, " data=%s", cmpt->data); } jas_stream_printf(out, "\n"); } /* Output end of header indicator. */ jas_stream_printf(out, "end\n"); return 0; } static int mif_hdr_addcmpt(mif_hdr_t *hdr, int cmptno, mif_cmpt_t *cmpt) { assert(cmptno >= hdr->numcmpts); if (hdr->numcmpts >= hdr->maxcmpts) { if (mif_hdr_growcmpts(hdr, hdr->numcmpts + 128)) { return -1; } } hdr->cmpts[hdr->numcmpts] = cmpt; ++hdr->numcmpts; return 0; } /******************************************************************************\ * Code for MIF component class. \******************************************************************************/ static mif_cmpt_t *mif_cmpt_create() { mif_cmpt_t *cmpt; if (!(cmpt = jas_malloc(sizeof(mif_cmpt_t)))) { return 0; } memset(cmpt, 0, sizeof(mif_cmpt_t)); return cmpt; } static void mif_cmpt_destroy(mif_cmpt_t *cmpt) { if (cmpt->data) { jas_free(cmpt->data); } jas_free(cmpt); } /******************************************************************************\ * MIF parsing code. \******************************************************************************/ static char *mif_getline(jas_stream_t *stream, char *buf, int bufsize) { int c; char *bufptr; assert(bufsize > 0); bufptr = buf; while (bufsize > 1) { if ((c = mif_getc(stream)) == EOF) { break; } *bufptr++ = c; --bufsize; if (c == '\n') { break; } } *bufptr = '\0'; if (!(bufptr = strchr(buf, '\n'))) { return 0; } *bufptr = '\0'; return buf; } static int mif_getc(jas_stream_t *in) { int c; bool done; done = false; do { switch (c = jas_stream_getc(in)) { case EOF: done = true; break; case '#': for (;;) { if ((c = jas_stream_getc(in)) == EOF) { done = true; break; } if (c == '\n') { done = true; break; } } break; case '\\': if (jas_stream_peekc(in) == '\n') { jas_stream_getc(in); } break; default: done = true; break; } } while (!done); return c; } /******************************************************************************\ * Miscellaneous functions. \******************************************************************************/ static mif_hdr_t *mif_makehdrfromimage(jas_image_t *image) { mif_hdr_t *hdr; int cmptno; mif_cmpt_t *cmpt; if (!(hdr = mif_hdr_create(jas_image_numcmpts(image)))) { return 0; } hdr->magic = MIF_MAGIC; hdr->numcmpts = jas_image_numcmpts(image); for (cmptno = 0; cmptno < hdr->numcmpts; ++cmptno) { if (!(hdr->cmpts[cmptno] = jas_malloc(sizeof(mif_cmpt_t)))) { goto error; } cmpt = hdr->cmpts[cmptno]; cmpt->tlx = jas_image_cmpttlx(image, cmptno); cmpt->tly = jas_image_cmpttly(image, cmptno); cmpt->width = jas_image_cmptwidth(image, cmptno); cmpt->height = jas_image_cmptheight(image, cmptno); cmpt->sampperx = jas_image_cmpthstep(image, cmptno); cmpt->samppery = jas_image_cmptvstep(image, cmptno); cmpt->prec = jas_image_cmptprec(image, cmptno); cmpt->sgnd = jas_image_cmptsgnd(image, cmptno); cmpt->data = 0; } return hdr; error: for (cmptno = 0; cmptno < hdr->numcmpts; ++cmptno) { if (hdr->cmpts[cmptno]) { jas_free(hdr->cmpts[cmptno]); } } if (hdr) { jas_free(hdr); } return 0; }
int mif_validate(jas_stream_t *in) { uchar buf[MIF_MAGICLEN]; uint_fast32_t magic; int i; int n; assert(JAS_STREAM_MAXPUTBACK >= MIF_MAGICLEN); /* Read the validation data (i.e., the data used for detecting the format). */ if ((n = jas_stream_read(in, buf, MIF_MAGICLEN)) < 0) { return -1; } /* Put the validation data back onto the stream, so that the stream position will not be changed. */ for (i = n - 1; i >= 0; --i) { if (jas_stream_ungetc(in, buf[i]) == EOF) { return -1; } } /* Was enough data read? */ if (n < MIF_MAGICLEN) { return -1; } /* Compute the signature value. */ magic = (JAS_CAST(uint_fast32_t, buf[0]) << 24) | (JAS_CAST(uint_fast32_t, buf[1]) << 16) | (JAS_CAST(uint_fast32_t, buf[2]) << 8) | buf[3]; /* Ensure that the signature is correct for this format. */ if (magic != MIF_MAGIC) { return -1; } return 0; }
int mif_validate(jas_stream_t *in) { jas_uchar buf[MIF_MAGICLEN]; uint_fast32_t magic; int i; int n; assert(JAS_STREAM_MAXPUTBACK >= MIF_MAGICLEN); /* Read the validation data (i.e., the data used for detecting the format). */ if ((n = jas_stream_read(in, buf, MIF_MAGICLEN)) < 0) { return -1; } /* Put the validation data back onto the stream, so that the stream position will not be changed. */ for (i = n - 1; i >= 0; --i) { if (jas_stream_ungetc(in, buf[i]) == EOF) { return -1; } } /* Was enough data read? */ if (n < MIF_MAGICLEN) { return -1; } /* Compute the signature value. */ magic = (JAS_CAST(uint_fast32_t, buf[0]) << 24) | (JAS_CAST(uint_fast32_t, buf[1]) << 16) | (JAS_CAST(uint_fast32_t, buf[2]) << 8) | buf[3]; /* Ensure that the signature is correct for this format. */ if (magic != MIF_MAGIC) { return -1; } return 0; }
{'added': [(373, '\tjas_uchar buf[MIF_MAGICLEN];'), (465, '\tjas_uchar magicbuf[MIF_MAGICLEN];')], 'deleted': [(373, '\tuchar buf[MIF_MAGICLEN];'), (465, '\tuchar magicbuf[MIF_MAGICLEN];')]}
2
2
620
3,492
https://github.com/mdadams/jasper
CVE-2016-9395
['CWE-20']
t1lib.c
process_pfa
/* t1lib * * This file contains functions for reading PFA and PFB files. * * Copyright (c) 1998-2013 Eddie Kohler * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, subject to the * conditions listed in the Click LICENSE file, which is available in full at * http://github.com/kohler/click/blob/master/LICENSE. The conditions * include: you must preserve this copyright notice, and you cannot mention * the copyright holders in advertising related to the Software without * their permission. The Software is provided WITHOUT ANY WARRANTY, EXPRESS * OR IMPLIED. This notice is a summary of the Click LICENSE file; the * license in that file is binding. */ #ifdef HAVE_CONFIG_H # include <config.h> #endif #include <stdio.h> #include <stdlib.h> #include <ctype.h> #include <string.h> #include "t1lib.h" #ifdef __cplusplus extern "C" { #endif #define PFA_ASCII 1 #define PFA_EEXEC_TEST 2 #define PFA_HEX 3 #define PFA_BINARY 4 /* This function returns the value (0-15) of a single hex digit. It returns 0 for an invalid hex digit. */ static int hexval(char c) { if (c >= 'A' && c <= 'F') return c - 'A' + 10; else if (c >= 'a' && c <= 'f') return c - 'a' + 10; else if (c >= '0' && c <= '9') return c - '0'; else return 0; } /* This function translates a string of hexadecimal digits into binary data. We allow an odd number of digits. Returns length of binary data. */ static int translate_hex_string(char *s, char *saved_orphan) { int c1 = *saved_orphan; char *start = s; char *t = s; for (; *s; s++) { if (isspace(*s)) continue; if (c1) { *t++ = (hexval(c1) << 4) + hexval(*s); c1 = 0; } else c1 = *s; } *saved_orphan = c1; return t - start; } /* This function returns 1 if the string contains all '0's. */ static int all_zeroes(char *s) { if (*s == '\0' || *s == '\n') return 0; while (*s == '0') s++; return *s == '\0' || *s == '\n'; } /* This function handles the entire file. */ #define LINESIZE 1024 void process_pfa(FILE *ifp, const char *ifp_filename, struct font_reader *fr) { /* Loop until no more input. We need to look for `currentfile eexec' to start eexec section (hex to binary conversion) and line of all zeros to switch back to ASCII. */ /* Don't use fgets() in case line-endings are indicated by bare \r's, as occurs in Macintosh fonts. */ /* 2.Aug.1999 - At the behest of Tom Kacvinsky <tjk@ams.org>, support binary PFA fonts. */ char buffer[LINESIZE]; int c = 0; int blocktyp = PFA_ASCII; char saved_orphan = 0; (void)ifp_filename; while (c != EOF) { char *line = buffer, *last = buffer; int crlf = 0; c = getc(ifp); while (c != EOF && c != '\r' && c != '\n' && last < buffer + LINESIZE - 1) { *last++ = c; c = getc(ifp); } /* handle the end of the line */ if (last == buffer + LINESIZE - 1) /* buffer overrun: don't append newline even if we have it */ ungetc(c, ifp); else if (c == '\r' && blocktyp != PFA_BINARY) { /* change CR or CR/LF into LF, unless reading binary data! (This condition was wrong before, caused Thanh problems - 6.Mar.2001) */ c = getc(ifp); if (c != '\n') ungetc(c, ifp), crlf = 1; else crlf = 2; *last++ = '\n'; } else if (c != EOF) *last++ = c; *last = 0; /* now that we have the line, handle it */ if (blocktyp == PFA_ASCII) { if (strncmp(line, "currentfile eexec", 17) == 0 && isspace(line[17])) { char saved_p; /* assert(line == buffer); */ for (line += 18; isspace(*line); line++) /* nada */; saved_p = *line; *line = 0; fr->output_ascii(buffer, line - buffer); *line = saved_p; blocktyp = PFA_EEXEC_TEST; if (!*line) continue; } else { fr->output_ascii(line, last - line); continue; } } /* check immediately after "currentfile eexec" for ASCII or binary */ if (blocktyp == PFA_EEXEC_TEST) { /* 8.Feb.2004: fix bug if first character in a binary eexec block is 0, reported by Werner Lemberg */ for (; line < last && isspace(*line); line++) /* nada */; if (line == last) continue; else if (last >= line + 4 && isxdigit(line[0]) && isxdigit(line[1]) && isxdigit(line[2]) && isxdigit(line[3])) blocktyp = PFA_HEX; else blocktyp = PFA_BINARY; memmove(buffer, line, last - line + 1); last = buffer + (last - line); line = buffer; /* patch up crlf fix */ if (blocktyp == PFA_BINARY && crlf) { last[-1] = '\r'; if (crlf == 2) *last++ = '\n'; } } /* blocktyp == PFA_HEX || blocktyp == PFA_BINARY */ if (all_zeroes(line)) { /* XXX not safe */ fr->output_ascii(line, last - line); blocktyp = PFA_ASCII; } else if (blocktyp == PFA_HEX) { int len = translate_hex_string(line, &saved_orphan); if (len) fr->output_binary((unsigned char *)line, len); } else fr->output_binary((unsigned char *)line, last - line); } fr->output_end(); } /* Process a PFB file. */ /* XXX Doesn't handle "currentfile eexec" as intelligently as process_pfa does. */ static int handle_pfb_ascii(struct font_reader *fr, char *line, int len) { /* Divide PFB_ASCII blocks into lines */ int start = 0; while (1) { int pos = start; while (pos < len && line[pos] != '\n' && line[pos] != '\r') pos++; if (pos >= len) { if (pos == start) return 0; else if (start == 0 && pos == LINESIZE - 1) { line[pos] = 0; fr->output_ascii(line, pos); return 0; } else { memmove(line, line + start, pos - start); return pos - start; } } else if (pos < len - 1 && line[pos] == '\r' && line[pos+1] == '\n') { line[pos] = '\n'; line[pos+1] = 0; fr->output_ascii(line + start, pos + 1 - start); start = pos + 2; } else { char save = line[pos+1]; line[pos] = '\n'; line[pos+1] = 0; fr->output_ascii(line + start, pos + 1 - start); line[pos+1] = save; start = pos + 1; } } } void process_pfb(FILE *ifp, const char *ifp_filename, struct font_reader *fr) { int blocktyp = 0; unsigned block_len = 0; int c = 0; unsigned filepos = 0; int linepos = 0; char line[LINESIZE]; while (1) { while (block_len == 0) { c = getc(ifp); blocktyp = getc(ifp); if (c != PFB_MARKER || (blocktyp != PFB_ASCII && blocktyp != PFB_BINARY && blocktyp != PFB_DONE)) { if (c == EOF || blocktyp == EOF) error("%s corrupted: no end-of-file marker", ifp_filename); else error("%s corrupted: bad block marker at position %u", ifp_filename, filepos); blocktyp = PFB_DONE; } if (blocktyp == PFB_DONE) goto done; block_len = getc(ifp) & 0xFF; block_len |= (getc(ifp) & 0xFF) << 8; block_len |= (getc(ifp) & 0xFF) << 16; block_len |= (unsigned) (getc(ifp) & 0xFF) << 24; if (feof(ifp)) { error("%s corrupted: bad block length at position %u", ifp_filename, filepos); blocktyp = PFB_DONE; goto done; } filepos += 6; } /* read the block in its entirety, in LINESIZE chunks */ while (block_len > 0) { unsigned rest = LINESIZE - 1 - linepos; /* leave space for '\0' */ unsigned n = (block_len > rest ? rest : block_len); int actual = fread(line + linepos, 1, n, ifp); if (actual != (int) n) { error("%s corrupted: block short by %u bytes at position %u", ifp_filename, block_len - actual, filepos); block_len = actual; } if (blocktyp == PFB_BINARY) fr->output_binary((unsigned char *)line, actual); else linepos = handle_pfb_ascii(fr, line, linepos + actual); block_len -= actual; filepos += actual; } /* handle any leftover line */ if (linepos > 0) { line[linepos] = 0; fr->output_ascii(line, linepos); linepos = 0; } } done: c = getc(ifp); if (c != EOF) error("%s corrupted: data after PFB end marker at position %u", ifp_filename, filepos - 2); fr->output_end(); } #define DEFAULT_BLOCKLEN (1L<<12) void init_pfb_writer(struct pfb_writer *w, int blocklen, FILE *f) { w->len = DEFAULT_BLOCKLEN; w->buf = (unsigned char *)malloc(w->len); if (!w->buf) fatal_error("out of memory"); w->max_len = (blocklen <= 0 ? 0xFFFFFFFFU : (unsigned)blocklen); w->pos = 0; w->blocktyp = PFB_ASCII; w->binary_blocks_written = 0; w->f = f; } void pfb_writer_output_block(struct pfb_writer *w) { /* do nothing if nothing in block */ if (w->pos == 0) return; /* output four-byte block length */ putc(PFB_MARKER, w->f); putc(w->blocktyp, w->f); putc((int)(w->pos & 0xff), w->f); putc((int)((w->pos >> 8) & 0xff), w->f); putc((int)((w->pos >> 16) & 0xff), w->f); putc((int)((w->pos >> 24) & 0xff), w->f); /* output block data */ fwrite(w->buf, 1, w->pos, w->f); /* mark block buffer empty and uninitialized */ w->pos = 0; if (w->blocktyp == PFB_BINARY) w->binary_blocks_written++; } void pfb_writer_grow_buf(struct pfb_writer *w) { if (w->len < w->max_len) { /* grow w->buf */ unsigned new_len = w->len * 2; unsigned char *new_buf; if (new_len > w->max_len) new_len = w->max_len; new_buf = (unsigned char *)malloc(new_len); if (!new_buf) { error("out of memory; continuing with a smaller block size"); w->max_len = w->len; pfb_writer_output_block(w); } else { memcpy(new_buf, w->buf, w->len); free(w->buf); w->buf = new_buf; w->len = new_len; } } else /* buf already the right size, just output the block */ pfb_writer_output_block(w); } void pfb_writer_end(struct pfb_writer *w) { if (w->pos) pfb_writer_output_block(w); putc(PFB_MARKER, w->f); putc(PFB_DONE, w->f); } /* This CRC table and routine were borrowed from macutils-2.0b3 */ static unsigned short crctab[256] = { 0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50A5, 0x60C6, 0x70E7, 0x8108, 0x9129, 0xA14A, 0xB16B, 0xC18C, 0xD1AD, 0xE1CE, 0xF1EF, 0x1231, 0x0210, 0x3273, 0x2252, 0x52B5, 0x4294, 0x72F7, 0x62D6, 0x9339, 0x8318, 0xB37B, 0xA35A, 0xD3BD, 0xC39C, 0xF3FF, 0xE3DE, 0x2462, 0x3443, 0x0420, 0x1401, 0x64E6, 0x74C7, 0x44A4, 0x5485, 0xA56A, 0xB54B, 0x8528, 0x9509, 0xE5EE, 0xF5CF, 0xC5AC, 0xD58D, 0x3653, 0x2672, 0x1611, 0x0630, 0x76D7, 0x66F6, 0x5695, 0x46B4, 0xB75B, 0xA77A, 0x9719, 0x8738, 0xF7DF, 0xE7FE, 0xD79D, 0xC7BC, 0x48C4, 0x58E5, 0x6886, 0x78A7, 0x0840, 0x1861, 0x2802, 0x3823, 0xC9CC, 0xD9ED, 0xE98E, 0xF9AF, 0x8948, 0x9969, 0xA90A, 0xB92B, 0x5AF5, 0x4AD4, 0x7AB7, 0x6A96, 0x1A71, 0x0A50, 0x3A33, 0x2A12, 0xDBFD, 0xCBDC, 0xFBBF, 0xEB9E, 0x9B79, 0x8B58, 0xBB3B, 0xAB1A, 0x6CA6, 0x7C87, 0x4CE4, 0x5CC5, 0x2C22, 0x3C03, 0x0C60, 0x1C41, 0xEDAE, 0xFD8F, 0xCDEC, 0xDDCD, 0xAD2A, 0xBD0B, 0x8D68, 0x9D49, 0x7E97, 0x6EB6, 0x5ED5, 0x4EF4, 0x3E13, 0x2E32, 0x1E51, 0x0E70, 0xFF9F, 0xEFBE, 0xDFDD, 0xCFFC, 0xBF1B, 0xAF3A, 0x9F59, 0x8F78, 0x9188, 0x81A9, 0xB1CA, 0xA1EB, 0xD10C, 0xC12D, 0xF14E, 0xE16F, 0x1080, 0x00A1, 0x30C2, 0x20E3, 0x5004, 0x4025, 0x7046, 0x6067, 0x83B9, 0x9398, 0xA3FB, 0xB3DA, 0xC33D, 0xD31C, 0xE37F, 0xF35E, 0x02B1, 0x1290, 0x22F3, 0x32D2, 0x4235, 0x5214, 0x6277, 0x7256, 0xB5EA, 0xA5CB, 0x95A8, 0x8589, 0xF56E, 0xE54F, 0xD52C, 0xC50D, 0x34E2, 0x24C3, 0x14A0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405, 0xA7DB, 0xB7FA, 0x8799, 0x97B8, 0xE75F, 0xF77E, 0xC71D, 0xD73C, 0x26D3, 0x36F2, 0x0691, 0x16B0, 0x6657, 0x7676, 0x4615, 0x5634, 0xD94C, 0xC96D, 0xF90E, 0xE92F, 0x99C8, 0x89E9, 0xB98A, 0xA9AB, 0x5844, 0x4865, 0x7806, 0x6827, 0x18C0, 0x08E1, 0x3882, 0x28A3, 0xCB7D, 0xDB5C, 0xEB3F, 0xFB1E, 0x8BF9, 0x9BD8, 0xABBB, 0xBB9A, 0x4A75, 0x5A54, 0x6A37, 0x7A16, 0x0AF1, 0x1AD0, 0x2AB3, 0x3A92, 0xFD2E, 0xED0F, 0xDD6C, 0xCD4D, 0xBDAA, 0xAD8B, 0x9DE8, 0x8DC9, 0x7C26, 0x6C07, 0x5C64, 0x4C45, 0x3CA2, 0x2C83, 0x1CE0, 0x0CC1, 0xEF1F, 0xFF3E, 0xCF5D, 0xDF7C, 0xAF9B, 0xBFBA, 0x8FD9, 0x9FF8, 0x6E17, 0x7E36, 0x4E55, 0x5E74, 0x2E93, 0x3EB2, 0x0ED1, 0x1EF0, }; /* * Update a CRC check on the given buffer. */ int crcbuf(int crc, unsigned int len, const char *buf) { const unsigned char *ubuf = (const unsigned char *)buf; while (len--) crc = ((crc << 8) & 0xFF00) ^ crctab[((crc >> 8) & 0xFF) ^ *ubuf++]; return crc; } #ifdef __cplusplus } #endif
/* t1lib * * This file contains functions for reading PFA and PFB files. * * Copyright (c) 1998-2013 Eddie Kohler * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, subject to the * conditions listed in the Click LICENSE file, which is available in full at * http://github.com/kohler/click/blob/master/LICENSE. The conditions * include: you must preserve this copyright notice, and you cannot mention * the copyright holders in advertising related to the Software without * their permission. The Software is provided WITHOUT ANY WARRANTY, EXPRESS * OR IMPLIED. This notice is a summary of the Click LICENSE file; the * license in that file is binding. */ #ifdef HAVE_CONFIG_H # include <config.h> #endif #include <stdio.h> #include <stdlib.h> #include <ctype.h> #include <string.h> #include "t1lib.h" #ifdef __cplusplus extern "C" { #endif #define PFA_ASCII 1 #define PFA_EEXEC_TEST 2 #define PFA_HEX 3 #define PFA_BINARY 4 /* This function returns the value (0-15) of a single hex digit. It returns 0 for an invalid hex digit. */ static int hexval(char c) { if (c >= 'A' && c <= 'F') return c - 'A' + 10; else if (c >= 'a' && c <= 'f') return c - 'a' + 10; else if (c >= '0' && c <= '9') return c - '0'; else return 0; } /* This function translates a string of hexadecimal digits into binary data. We allow an odd number of digits. Returns length of binary data. */ static int translate_hex_string(char *s, char *saved_orphan) { int c1 = *saved_orphan; char *start = s; char *t = s; for (; *s; s++) { if (isspace((unsigned char) *s)) continue; if (c1) { *t++ = (hexval(c1) << 4) + hexval(*s); c1 = 0; } else c1 = *s; } *saved_orphan = c1; return t - start; } /* This function returns 1 if the string contains all '0's. */ static int all_zeroes(char *s) { if (*s == '\0' || *s == '\n') return 0; while (*s == '0') s++; return *s == '\0' || *s == '\n'; } /* This function handles the entire file. */ #define LINESIZE 1024 void process_pfa(FILE *ifp, const char *ifp_filename, struct font_reader *fr) { /* Loop until no more input. We need to look for `currentfile eexec' to start eexec section (hex to binary conversion) and line of all zeros to switch back to ASCII. */ /* Don't use fgets() in case line-endings are indicated by bare \r's, as occurs in Macintosh fonts. */ /* 2.Aug.1999 - At the behest of Tom Kacvinsky <tjk@ams.org>, support binary PFA fonts. */ char buffer[LINESIZE]; int c = 0; int blocktyp = PFA_ASCII; char saved_orphan = 0; (void)ifp_filename; while (c != EOF) { char *line = buffer, *last = buffer; int crlf = 0; c = getc(ifp); while (c != EOF && c != '\r' && c != '\n' && last < buffer + LINESIZE - 1) { *last++ = c; c = getc(ifp); } /* handle the end of the line */ if (last == buffer + LINESIZE - 1) /* buffer overrun: don't append newline even if we have it */ ungetc(c, ifp); else if (c == '\r' && blocktyp != PFA_BINARY) { /* change CR or CR/LF into LF, unless reading binary data! (This condition was wrong before, caused Thanh problems - 6.Mar.2001) */ c = getc(ifp); if (c != '\n') ungetc(c, ifp), crlf = 1; else crlf = 2; *last++ = '\n'; } else if (c != EOF) *last++ = c; *last = 0; /* now that we have the line, handle it */ if (blocktyp == PFA_ASCII) { if (strncmp(line, "currentfile eexec", 17) == 0 && isspace((unsigned char) line[17])) { char saved_p; /* assert(line == buffer); */ for (line += 18; isspace((unsigned char) *line); line++) /* nada */; saved_p = *line; *line = 0; fr->output_ascii(buffer, line - buffer); *line = saved_p; blocktyp = PFA_EEXEC_TEST; if (!*line) continue; } else { fr->output_ascii(line, last - line); continue; } } /* check immediately after "currentfile eexec" for ASCII or binary */ if (blocktyp == PFA_EEXEC_TEST) { /* 8.Feb.2004: fix bug if first character in a binary eexec block is 0, reported by Werner Lemberg */ for (; line < last && isspace((unsigned char) *line); line++) /* nada */; if (line == last) continue; else if (last >= line + 4 && isxdigit((unsigned char) line[0]) && isxdigit((unsigned char) line[1]) && isxdigit((unsigned char) line[2]) && isxdigit((unsigned char) line[3])) blocktyp = PFA_HEX; else blocktyp = PFA_BINARY; memmove(buffer, line, last - line + 1); last = buffer + (last - line); line = buffer; /* patch up crlf fix */ if (blocktyp == PFA_BINARY && crlf) { last[-1] = '\r'; if (crlf == 2) *last++ = '\n'; } } /* blocktyp == PFA_HEX || blocktyp == PFA_BINARY */ if (all_zeroes(line)) { /* XXX not safe */ fr->output_ascii(line, last - line); blocktyp = PFA_ASCII; } else if (blocktyp == PFA_HEX) { int len = translate_hex_string(line, &saved_orphan); if (len) fr->output_binary((unsigned char *)line, len); } else fr->output_binary((unsigned char *)line, last - line); } fr->output_end(); } /* Process a PFB file. */ /* XXX Doesn't handle "currentfile eexec" as intelligently as process_pfa does. */ static int handle_pfb_ascii(struct font_reader *fr, char *line, int len) { /* Divide PFB_ASCII blocks into lines */ int start = 0; while (1) { int pos = start; while (pos < len && line[pos] != '\n' && line[pos] != '\r') pos++; if (pos >= len) { if (pos == start) return 0; else if (start == 0 && pos == LINESIZE - 1) { line[pos] = 0; fr->output_ascii(line, pos); return 0; } else { memmove(line, line + start, pos - start); return pos - start; } } else if (pos < len - 1 && line[pos] == '\r' && line[pos+1] == '\n') { line[pos] = '\n'; line[pos+1] = 0; fr->output_ascii(line + start, pos + 1 - start); start = pos + 2; } else { char save = line[pos+1]; line[pos] = '\n'; line[pos+1] = 0; fr->output_ascii(line + start, pos + 1 - start); line[pos+1] = save; start = pos + 1; } } } void process_pfb(FILE *ifp, const char *ifp_filename, struct font_reader *fr) { int blocktyp = 0; unsigned block_len = 0; int c = 0; unsigned filepos = 0; int linepos = 0; char line[LINESIZE]; while (1) { while (block_len == 0) { c = getc(ifp); blocktyp = getc(ifp); if (c != PFB_MARKER || (blocktyp != PFB_ASCII && blocktyp != PFB_BINARY && blocktyp != PFB_DONE)) { if (c == EOF || blocktyp == EOF) error("%s corrupted: no end-of-file marker", ifp_filename); else error("%s corrupted: bad block marker at position %u", ifp_filename, filepos); blocktyp = PFB_DONE; } if (blocktyp == PFB_DONE) goto done; block_len = getc(ifp) & 0xFF; block_len |= (getc(ifp) & 0xFF) << 8; block_len |= (getc(ifp) & 0xFF) << 16; block_len |= (unsigned) (getc(ifp) & 0xFF) << 24; if (feof(ifp)) { error("%s corrupted: bad block length at position %u", ifp_filename, filepos); blocktyp = PFB_DONE; goto done; } filepos += 6; } /* read the block in its entirety, in LINESIZE chunks */ while (block_len > 0) { unsigned rest = LINESIZE - 1 - linepos; /* leave space for '\0' */ unsigned n = (block_len > rest ? rest : block_len); int actual = fread(line + linepos, 1, n, ifp); if (actual != (int) n) { error("%s corrupted: block short by %u bytes at position %u", ifp_filename, block_len - actual, filepos); block_len = actual; } if (blocktyp == PFB_BINARY) fr->output_binary((unsigned char *)line, actual); else linepos = handle_pfb_ascii(fr, line, linepos + actual); block_len -= actual; filepos += actual; } /* handle any leftover line */ if (linepos > 0) { line[linepos] = 0; fr->output_ascii(line, linepos); linepos = 0; } } done: c = getc(ifp); if (c != EOF) error("%s corrupted: data after PFB end marker at position %u", ifp_filename, filepos - 2); fr->output_end(); } #define DEFAULT_BLOCKLEN (1L<<12) void init_pfb_writer(struct pfb_writer *w, int blocklen, FILE *f) { w->len = DEFAULT_BLOCKLEN; w->buf = (unsigned char *)malloc(w->len); if (!w->buf) fatal_error("out of memory"); w->max_len = (blocklen <= 0 ? 0xFFFFFFFFU : (unsigned)blocklen); w->pos = 0; w->blocktyp = PFB_ASCII; w->binary_blocks_written = 0; w->f = f; } void pfb_writer_output_block(struct pfb_writer *w) { /* do nothing if nothing in block */ if (w->pos == 0) return; /* output four-byte block length */ putc(PFB_MARKER, w->f); putc(w->blocktyp, w->f); putc((int)(w->pos & 0xff), w->f); putc((int)((w->pos >> 8) & 0xff), w->f); putc((int)((w->pos >> 16) & 0xff), w->f); putc((int)((w->pos >> 24) & 0xff), w->f); /* output block data */ fwrite(w->buf, 1, w->pos, w->f); /* mark block buffer empty and uninitialized */ w->pos = 0; if (w->blocktyp == PFB_BINARY) w->binary_blocks_written++; } void pfb_writer_grow_buf(struct pfb_writer *w) { if (w->len < w->max_len) { /* grow w->buf */ unsigned new_len = w->len * 2; unsigned char *new_buf; if (new_len > w->max_len) new_len = w->max_len; new_buf = (unsigned char *)malloc(new_len); if (!new_buf) { error("out of memory; continuing with a smaller block size"); w->max_len = w->len; pfb_writer_output_block(w); } else { memcpy(new_buf, w->buf, w->len); free(w->buf); w->buf = new_buf; w->len = new_len; } } else /* buf already the right size, just output the block */ pfb_writer_output_block(w); } void pfb_writer_end(struct pfb_writer *w) { if (w->pos) pfb_writer_output_block(w); putc(PFB_MARKER, w->f); putc(PFB_DONE, w->f); } /* This CRC table and routine were borrowed from macutils-2.0b3 */ static unsigned short crctab[256] = { 0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50A5, 0x60C6, 0x70E7, 0x8108, 0x9129, 0xA14A, 0xB16B, 0xC18C, 0xD1AD, 0xE1CE, 0xF1EF, 0x1231, 0x0210, 0x3273, 0x2252, 0x52B5, 0x4294, 0x72F7, 0x62D6, 0x9339, 0x8318, 0xB37B, 0xA35A, 0xD3BD, 0xC39C, 0xF3FF, 0xE3DE, 0x2462, 0x3443, 0x0420, 0x1401, 0x64E6, 0x74C7, 0x44A4, 0x5485, 0xA56A, 0xB54B, 0x8528, 0x9509, 0xE5EE, 0xF5CF, 0xC5AC, 0xD58D, 0x3653, 0x2672, 0x1611, 0x0630, 0x76D7, 0x66F6, 0x5695, 0x46B4, 0xB75B, 0xA77A, 0x9719, 0x8738, 0xF7DF, 0xE7FE, 0xD79D, 0xC7BC, 0x48C4, 0x58E5, 0x6886, 0x78A7, 0x0840, 0x1861, 0x2802, 0x3823, 0xC9CC, 0xD9ED, 0xE98E, 0xF9AF, 0x8948, 0x9969, 0xA90A, 0xB92B, 0x5AF5, 0x4AD4, 0x7AB7, 0x6A96, 0x1A71, 0x0A50, 0x3A33, 0x2A12, 0xDBFD, 0xCBDC, 0xFBBF, 0xEB9E, 0x9B79, 0x8B58, 0xBB3B, 0xAB1A, 0x6CA6, 0x7C87, 0x4CE4, 0x5CC5, 0x2C22, 0x3C03, 0x0C60, 0x1C41, 0xEDAE, 0xFD8F, 0xCDEC, 0xDDCD, 0xAD2A, 0xBD0B, 0x8D68, 0x9D49, 0x7E97, 0x6EB6, 0x5ED5, 0x4EF4, 0x3E13, 0x2E32, 0x1E51, 0x0E70, 0xFF9F, 0xEFBE, 0xDFDD, 0xCFFC, 0xBF1B, 0xAF3A, 0x9F59, 0x8F78, 0x9188, 0x81A9, 0xB1CA, 0xA1EB, 0xD10C, 0xC12D, 0xF14E, 0xE16F, 0x1080, 0x00A1, 0x30C2, 0x20E3, 0x5004, 0x4025, 0x7046, 0x6067, 0x83B9, 0x9398, 0xA3FB, 0xB3DA, 0xC33D, 0xD31C, 0xE37F, 0xF35E, 0x02B1, 0x1290, 0x22F3, 0x32D2, 0x4235, 0x5214, 0x6277, 0x7256, 0xB5EA, 0xA5CB, 0x95A8, 0x8589, 0xF56E, 0xE54F, 0xD52C, 0xC50D, 0x34E2, 0x24C3, 0x14A0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405, 0xA7DB, 0xB7FA, 0x8799, 0x97B8, 0xE75F, 0xF77E, 0xC71D, 0xD73C, 0x26D3, 0x36F2, 0x0691, 0x16B0, 0x6657, 0x7676, 0x4615, 0x5634, 0xD94C, 0xC96D, 0xF90E, 0xE92F, 0x99C8, 0x89E9, 0xB98A, 0xA9AB, 0x5844, 0x4865, 0x7806, 0x6827, 0x18C0, 0x08E1, 0x3882, 0x28A3, 0xCB7D, 0xDB5C, 0xEB3F, 0xFB1E, 0x8BF9, 0x9BD8, 0xABBB, 0xBB9A, 0x4A75, 0x5A54, 0x6A37, 0x7A16, 0x0AF1, 0x1AD0, 0x2AB3, 0x3A92, 0xFD2E, 0xED0F, 0xDD6C, 0xCD4D, 0xBDAA, 0xAD8B, 0x9DE8, 0x8DC9, 0x7C26, 0x6C07, 0x5C64, 0x4C45, 0x3CA2, 0x2C83, 0x1CE0, 0x0CC1, 0xEF1F, 0xFF3E, 0xCF5D, 0xDF7C, 0xAF9B, 0xBFBA, 0x8FD9, 0x9FF8, 0x6E17, 0x7E36, 0x4E55, 0x5E74, 0x2E93, 0x3EB2, 0x0ED1, 0x1EF0, }; /* * Update a CRC check on the given buffer. */ int crcbuf(int crc, unsigned int len, const char *buf) { const unsigned char *ubuf = (const unsigned char *)buf; while (len--) crc = ((crc << 8) & 0xFF00) ^ crctab[((crc >> 8) & 0xFF) ^ *ubuf++]; return crc; } #ifdef __cplusplus } #endif
process_pfa(FILE *ifp, const char *ifp_filename, struct font_reader *fr) { /* Loop until no more input. We need to look for `currentfile eexec' to start eexec section (hex to binary conversion) and line of all zeros to switch back to ASCII. */ /* Don't use fgets() in case line-endings are indicated by bare \r's, as occurs in Macintosh fonts. */ /* 2.Aug.1999 - At the behest of Tom Kacvinsky <tjk@ams.org>, support binary PFA fonts. */ char buffer[LINESIZE]; int c = 0; int blocktyp = PFA_ASCII; char saved_orphan = 0; (void)ifp_filename; while (c != EOF) { char *line = buffer, *last = buffer; int crlf = 0; c = getc(ifp); while (c != EOF && c != '\r' && c != '\n' && last < buffer + LINESIZE - 1) { *last++ = c; c = getc(ifp); } /* handle the end of the line */ if (last == buffer + LINESIZE - 1) /* buffer overrun: don't append newline even if we have it */ ungetc(c, ifp); else if (c == '\r' && blocktyp != PFA_BINARY) { /* change CR or CR/LF into LF, unless reading binary data! (This condition was wrong before, caused Thanh problems - 6.Mar.2001) */ c = getc(ifp); if (c != '\n') ungetc(c, ifp), crlf = 1; else crlf = 2; *last++ = '\n'; } else if (c != EOF) *last++ = c; *last = 0; /* now that we have the line, handle it */ if (blocktyp == PFA_ASCII) { if (strncmp(line, "currentfile eexec", 17) == 0 && isspace(line[17])) { char saved_p; /* assert(line == buffer); */ for (line += 18; isspace(*line); line++) /* nada */; saved_p = *line; *line = 0; fr->output_ascii(buffer, line - buffer); *line = saved_p; blocktyp = PFA_EEXEC_TEST; if (!*line) continue; } else { fr->output_ascii(line, last - line); continue; } } /* check immediately after "currentfile eexec" for ASCII or binary */ if (blocktyp == PFA_EEXEC_TEST) { /* 8.Feb.2004: fix bug if first character in a binary eexec block is 0, reported by Werner Lemberg */ for (; line < last && isspace(*line); line++) /* nada */; if (line == last) continue; else if (last >= line + 4 && isxdigit(line[0]) && isxdigit(line[1]) && isxdigit(line[2]) && isxdigit(line[3])) blocktyp = PFA_HEX; else blocktyp = PFA_BINARY; memmove(buffer, line, last - line + 1); last = buffer + (last - line); line = buffer; /* patch up crlf fix */ if (blocktyp == PFA_BINARY && crlf) { last[-1] = '\r'; if (crlf == 2) *last++ = '\n'; } } /* blocktyp == PFA_HEX || blocktyp == PFA_BINARY */ if (all_zeroes(line)) { /* XXX not safe */ fr->output_ascii(line, last - line); blocktyp = PFA_ASCII; } else if (blocktyp == PFA_HEX) { int len = translate_hex_string(line, &saved_orphan); if (len) fr->output_binary((unsigned char *)line, len); } else fr->output_binary((unsigned char *)line, last - line); } fr->output_end(); }
process_pfa(FILE *ifp, const char *ifp_filename, struct font_reader *fr) { /* Loop until no more input. We need to look for `currentfile eexec' to start eexec section (hex to binary conversion) and line of all zeros to switch back to ASCII. */ /* Don't use fgets() in case line-endings are indicated by bare \r's, as occurs in Macintosh fonts. */ /* 2.Aug.1999 - At the behest of Tom Kacvinsky <tjk@ams.org>, support binary PFA fonts. */ char buffer[LINESIZE]; int c = 0; int blocktyp = PFA_ASCII; char saved_orphan = 0; (void)ifp_filename; while (c != EOF) { char *line = buffer, *last = buffer; int crlf = 0; c = getc(ifp); while (c != EOF && c != '\r' && c != '\n' && last < buffer + LINESIZE - 1) { *last++ = c; c = getc(ifp); } /* handle the end of the line */ if (last == buffer + LINESIZE - 1) /* buffer overrun: don't append newline even if we have it */ ungetc(c, ifp); else if (c == '\r' && blocktyp != PFA_BINARY) { /* change CR or CR/LF into LF, unless reading binary data! (This condition was wrong before, caused Thanh problems - 6.Mar.2001) */ c = getc(ifp); if (c != '\n') ungetc(c, ifp), crlf = 1; else crlf = 2; *last++ = '\n'; } else if (c != EOF) *last++ = c; *last = 0; /* now that we have the line, handle it */ if (blocktyp == PFA_ASCII) { if (strncmp(line, "currentfile eexec", 17) == 0 && isspace((unsigned char) line[17])) { char saved_p; /* assert(line == buffer); */ for (line += 18; isspace((unsigned char) *line); line++) /* nada */; saved_p = *line; *line = 0; fr->output_ascii(buffer, line - buffer); *line = saved_p; blocktyp = PFA_EEXEC_TEST; if (!*line) continue; } else { fr->output_ascii(line, last - line); continue; } } /* check immediately after "currentfile eexec" for ASCII or binary */ if (blocktyp == PFA_EEXEC_TEST) { /* 8.Feb.2004: fix bug if first character in a binary eexec block is 0, reported by Werner Lemberg */ for (; line < last && isspace((unsigned char) *line); line++) /* nada */; if (line == last) continue; else if (last >= line + 4 && isxdigit((unsigned char) line[0]) && isxdigit((unsigned char) line[1]) && isxdigit((unsigned char) line[2]) && isxdigit((unsigned char) line[3])) blocktyp = PFA_HEX; else blocktyp = PFA_BINARY; memmove(buffer, line, last - line + 1); last = buffer + (last - line); line = buffer; /* patch up crlf fix */ if (blocktyp == PFA_BINARY && crlf) { last[-1] = '\r'; if (crlf == 2) *last++ = '\n'; } } /* blocktyp == PFA_HEX || blocktyp == PFA_BINARY */ if (all_zeroes(line)) { /* XXX not safe */ fr->output_ascii(line, last - line); blocktyp = PFA_ASCII; } else if (blocktyp == PFA_HEX) { int len = translate_hex_string(line, &saved_orphan); if (len) fr->output_binary((unsigned char *)line, len); } else fr->output_binary((unsigned char *)line, last - line); } fr->output_end(); }
{'added': [(62, ' if (isspace((unsigned char) *s))'), (139, '\t if (strncmp(line, "currentfile eexec", 17) == 0 && isspace((unsigned char) line[17])) {'), (142, '\t\tfor (line += 18; isspace((unsigned char) *line); line++)'), (161, '\t for (; line < last && isspace((unsigned char) *line); line++)'), (165, '\t else if (last >= line + 4 && isxdigit((unsigned char) line[0])'), (166, ' && isxdigit((unsigned char) line[1])'), (167, '\t\t && isxdigit((unsigned char) line[2])'), (168, ' && isxdigit((unsigned char) line[3]))')], 'deleted': [(62, ' if (isspace(*s))'), (139, '\t if (strncmp(line, "currentfile eexec", 17) == 0 && isspace(line[17])) {'), (142, '\t\tfor (line += 18; isspace(*line); line++)'), (161, '\t for (; line < last && isspace(*line); line++)'), (165, '\t else if (last >= line + 4 && isxdigit(line[0]) && isxdigit(line[1])'), (166, '\t\t && isxdigit(line[2]) && isxdigit(line[3]))')]}
8
6
326
2,448
https://github.com/kohler/t1utils
CVE-2015-3905
['CWE-119']
t1lib.c
translate_hex_string
/* t1lib * * This file contains functions for reading PFA and PFB files. * * Copyright (c) 1998-2013 Eddie Kohler * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, subject to the * conditions listed in the Click LICENSE file, which is available in full at * http://github.com/kohler/click/blob/master/LICENSE. The conditions * include: you must preserve this copyright notice, and you cannot mention * the copyright holders in advertising related to the Software without * their permission. The Software is provided WITHOUT ANY WARRANTY, EXPRESS * OR IMPLIED. This notice is a summary of the Click LICENSE file; the * license in that file is binding. */ #ifdef HAVE_CONFIG_H # include <config.h> #endif #include <stdio.h> #include <stdlib.h> #include <ctype.h> #include <string.h> #include "t1lib.h" #ifdef __cplusplus extern "C" { #endif #define PFA_ASCII 1 #define PFA_EEXEC_TEST 2 #define PFA_HEX 3 #define PFA_BINARY 4 /* This function returns the value (0-15) of a single hex digit. It returns 0 for an invalid hex digit. */ static int hexval(char c) { if (c >= 'A' && c <= 'F') return c - 'A' + 10; else if (c >= 'a' && c <= 'f') return c - 'a' + 10; else if (c >= '0' && c <= '9') return c - '0'; else return 0; } /* This function translates a string of hexadecimal digits into binary data. We allow an odd number of digits. Returns length of binary data. */ static int translate_hex_string(char *s, char *saved_orphan) { int c1 = *saved_orphan; char *start = s; char *t = s; for (; *s; s++) { if (isspace(*s)) continue; if (c1) { *t++ = (hexval(c1) << 4) + hexval(*s); c1 = 0; } else c1 = *s; } *saved_orphan = c1; return t - start; } /* This function returns 1 if the string contains all '0's. */ static int all_zeroes(char *s) { if (*s == '\0' || *s == '\n') return 0; while (*s == '0') s++; return *s == '\0' || *s == '\n'; } /* This function handles the entire file. */ #define LINESIZE 1024 void process_pfa(FILE *ifp, const char *ifp_filename, struct font_reader *fr) { /* Loop until no more input. We need to look for `currentfile eexec' to start eexec section (hex to binary conversion) and line of all zeros to switch back to ASCII. */ /* Don't use fgets() in case line-endings are indicated by bare \r's, as occurs in Macintosh fonts. */ /* 2.Aug.1999 - At the behest of Tom Kacvinsky <tjk@ams.org>, support binary PFA fonts. */ char buffer[LINESIZE]; int c = 0; int blocktyp = PFA_ASCII; char saved_orphan = 0; (void)ifp_filename; while (c != EOF) { char *line = buffer, *last = buffer; int crlf = 0; c = getc(ifp); while (c != EOF && c != '\r' && c != '\n' && last < buffer + LINESIZE - 1) { *last++ = c; c = getc(ifp); } /* handle the end of the line */ if (last == buffer + LINESIZE - 1) /* buffer overrun: don't append newline even if we have it */ ungetc(c, ifp); else if (c == '\r' && blocktyp != PFA_BINARY) { /* change CR or CR/LF into LF, unless reading binary data! (This condition was wrong before, caused Thanh problems - 6.Mar.2001) */ c = getc(ifp); if (c != '\n') ungetc(c, ifp), crlf = 1; else crlf = 2; *last++ = '\n'; } else if (c != EOF) *last++ = c; *last = 0; /* now that we have the line, handle it */ if (blocktyp == PFA_ASCII) { if (strncmp(line, "currentfile eexec", 17) == 0 && isspace(line[17])) { char saved_p; /* assert(line == buffer); */ for (line += 18; isspace(*line); line++) /* nada */; saved_p = *line; *line = 0; fr->output_ascii(buffer, line - buffer); *line = saved_p; blocktyp = PFA_EEXEC_TEST; if (!*line) continue; } else { fr->output_ascii(line, last - line); continue; } } /* check immediately after "currentfile eexec" for ASCII or binary */ if (blocktyp == PFA_EEXEC_TEST) { /* 8.Feb.2004: fix bug if first character in a binary eexec block is 0, reported by Werner Lemberg */ for (; line < last && isspace(*line); line++) /* nada */; if (line == last) continue; else if (last >= line + 4 && isxdigit(line[0]) && isxdigit(line[1]) && isxdigit(line[2]) && isxdigit(line[3])) blocktyp = PFA_HEX; else blocktyp = PFA_BINARY; memmove(buffer, line, last - line + 1); last = buffer + (last - line); line = buffer; /* patch up crlf fix */ if (blocktyp == PFA_BINARY && crlf) { last[-1] = '\r'; if (crlf == 2) *last++ = '\n'; } } /* blocktyp == PFA_HEX || blocktyp == PFA_BINARY */ if (all_zeroes(line)) { /* XXX not safe */ fr->output_ascii(line, last - line); blocktyp = PFA_ASCII; } else if (blocktyp == PFA_HEX) { int len = translate_hex_string(line, &saved_orphan); if (len) fr->output_binary((unsigned char *)line, len); } else fr->output_binary((unsigned char *)line, last - line); } fr->output_end(); } /* Process a PFB file. */ /* XXX Doesn't handle "currentfile eexec" as intelligently as process_pfa does. */ static int handle_pfb_ascii(struct font_reader *fr, char *line, int len) { /* Divide PFB_ASCII blocks into lines */ int start = 0; while (1) { int pos = start; while (pos < len && line[pos] != '\n' && line[pos] != '\r') pos++; if (pos >= len) { if (pos == start) return 0; else if (start == 0 && pos == LINESIZE - 1) { line[pos] = 0; fr->output_ascii(line, pos); return 0; } else { memmove(line, line + start, pos - start); return pos - start; } } else if (pos < len - 1 && line[pos] == '\r' && line[pos+1] == '\n') { line[pos] = '\n'; line[pos+1] = 0; fr->output_ascii(line + start, pos + 1 - start); start = pos + 2; } else { char save = line[pos+1]; line[pos] = '\n'; line[pos+1] = 0; fr->output_ascii(line + start, pos + 1 - start); line[pos+1] = save; start = pos + 1; } } } void process_pfb(FILE *ifp, const char *ifp_filename, struct font_reader *fr) { int blocktyp = 0; unsigned block_len = 0; int c = 0; unsigned filepos = 0; int linepos = 0; char line[LINESIZE]; while (1) { while (block_len == 0) { c = getc(ifp); blocktyp = getc(ifp); if (c != PFB_MARKER || (blocktyp != PFB_ASCII && blocktyp != PFB_BINARY && blocktyp != PFB_DONE)) { if (c == EOF || blocktyp == EOF) error("%s corrupted: no end-of-file marker", ifp_filename); else error("%s corrupted: bad block marker at position %u", ifp_filename, filepos); blocktyp = PFB_DONE; } if (blocktyp == PFB_DONE) goto done; block_len = getc(ifp) & 0xFF; block_len |= (getc(ifp) & 0xFF) << 8; block_len |= (getc(ifp) & 0xFF) << 16; block_len |= (unsigned) (getc(ifp) & 0xFF) << 24; if (feof(ifp)) { error("%s corrupted: bad block length at position %u", ifp_filename, filepos); blocktyp = PFB_DONE; goto done; } filepos += 6; } /* read the block in its entirety, in LINESIZE chunks */ while (block_len > 0) { unsigned rest = LINESIZE - 1 - linepos; /* leave space for '\0' */ unsigned n = (block_len > rest ? rest : block_len); int actual = fread(line + linepos, 1, n, ifp); if (actual != (int) n) { error("%s corrupted: block short by %u bytes at position %u", ifp_filename, block_len - actual, filepos); block_len = actual; } if (blocktyp == PFB_BINARY) fr->output_binary((unsigned char *)line, actual); else linepos = handle_pfb_ascii(fr, line, linepos + actual); block_len -= actual; filepos += actual; } /* handle any leftover line */ if (linepos > 0) { line[linepos] = 0; fr->output_ascii(line, linepos); linepos = 0; } } done: c = getc(ifp); if (c != EOF) error("%s corrupted: data after PFB end marker at position %u", ifp_filename, filepos - 2); fr->output_end(); } #define DEFAULT_BLOCKLEN (1L<<12) void init_pfb_writer(struct pfb_writer *w, int blocklen, FILE *f) { w->len = DEFAULT_BLOCKLEN; w->buf = (unsigned char *)malloc(w->len); if (!w->buf) fatal_error("out of memory"); w->max_len = (blocklen <= 0 ? 0xFFFFFFFFU : (unsigned)blocklen); w->pos = 0; w->blocktyp = PFB_ASCII; w->binary_blocks_written = 0; w->f = f; } void pfb_writer_output_block(struct pfb_writer *w) { /* do nothing if nothing in block */ if (w->pos == 0) return; /* output four-byte block length */ putc(PFB_MARKER, w->f); putc(w->blocktyp, w->f); putc((int)(w->pos & 0xff), w->f); putc((int)((w->pos >> 8) & 0xff), w->f); putc((int)((w->pos >> 16) & 0xff), w->f); putc((int)((w->pos >> 24) & 0xff), w->f); /* output block data */ fwrite(w->buf, 1, w->pos, w->f); /* mark block buffer empty and uninitialized */ w->pos = 0; if (w->blocktyp == PFB_BINARY) w->binary_blocks_written++; } void pfb_writer_grow_buf(struct pfb_writer *w) { if (w->len < w->max_len) { /* grow w->buf */ unsigned new_len = w->len * 2; unsigned char *new_buf; if (new_len > w->max_len) new_len = w->max_len; new_buf = (unsigned char *)malloc(new_len); if (!new_buf) { error("out of memory; continuing with a smaller block size"); w->max_len = w->len; pfb_writer_output_block(w); } else { memcpy(new_buf, w->buf, w->len); free(w->buf); w->buf = new_buf; w->len = new_len; } } else /* buf already the right size, just output the block */ pfb_writer_output_block(w); } void pfb_writer_end(struct pfb_writer *w) { if (w->pos) pfb_writer_output_block(w); putc(PFB_MARKER, w->f); putc(PFB_DONE, w->f); } /* This CRC table and routine were borrowed from macutils-2.0b3 */ static unsigned short crctab[256] = { 0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50A5, 0x60C6, 0x70E7, 0x8108, 0x9129, 0xA14A, 0xB16B, 0xC18C, 0xD1AD, 0xE1CE, 0xF1EF, 0x1231, 0x0210, 0x3273, 0x2252, 0x52B5, 0x4294, 0x72F7, 0x62D6, 0x9339, 0x8318, 0xB37B, 0xA35A, 0xD3BD, 0xC39C, 0xF3FF, 0xE3DE, 0x2462, 0x3443, 0x0420, 0x1401, 0x64E6, 0x74C7, 0x44A4, 0x5485, 0xA56A, 0xB54B, 0x8528, 0x9509, 0xE5EE, 0xF5CF, 0xC5AC, 0xD58D, 0x3653, 0x2672, 0x1611, 0x0630, 0x76D7, 0x66F6, 0x5695, 0x46B4, 0xB75B, 0xA77A, 0x9719, 0x8738, 0xF7DF, 0xE7FE, 0xD79D, 0xC7BC, 0x48C4, 0x58E5, 0x6886, 0x78A7, 0x0840, 0x1861, 0x2802, 0x3823, 0xC9CC, 0xD9ED, 0xE98E, 0xF9AF, 0x8948, 0x9969, 0xA90A, 0xB92B, 0x5AF5, 0x4AD4, 0x7AB7, 0x6A96, 0x1A71, 0x0A50, 0x3A33, 0x2A12, 0xDBFD, 0xCBDC, 0xFBBF, 0xEB9E, 0x9B79, 0x8B58, 0xBB3B, 0xAB1A, 0x6CA6, 0x7C87, 0x4CE4, 0x5CC5, 0x2C22, 0x3C03, 0x0C60, 0x1C41, 0xEDAE, 0xFD8F, 0xCDEC, 0xDDCD, 0xAD2A, 0xBD0B, 0x8D68, 0x9D49, 0x7E97, 0x6EB6, 0x5ED5, 0x4EF4, 0x3E13, 0x2E32, 0x1E51, 0x0E70, 0xFF9F, 0xEFBE, 0xDFDD, 0xCFFC, 0xBF1B, 0xAF3A, 0x9F59, 0x8F78, 0x9188, 0x81A9, 0xB1CA, 0xA1EB, 0xD10C, 0xC12D, 0xF14E, 0xE16F, 0x1080, 0x00A1, 0x30C2, 0x20E3, 0x5004, 0x4025, 0x7046, 0x6067, 0x83B9, 0x9398, 0xA3FB, 0xB3DA, 0xC33D, 0xD31C, 0xE37F, 0xF35E, 0x02B1, 0x1290, 0x22F3, 0x32D2, 0x4235, 0x5214, 0x6277, 0x7256, 0xB5EA, 0xA5CB, 0x95A8, 0x8589, 0xF56E, 0xE54F, 0xD52C, 0xC50D, 0x34E2, 0x24C3, 0x14A0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405, 0xA7DB, 0xB7FA, 0x8799, 0x97B8, 0xE75F, 0xF77E, 0xC71D, 0xD73C, 0x26D3, 0x36F2, 0x0691, 0x16B0, 0x6657, 0x7676, 0x4615, 0x5634, 0xD94C, 0xC96D, 0xF90E, 0xE92F, 0x99C8, 0x89E9, 0xB98A, 0xA9AB, 0x5844, 0x4865, 0x7806, 0x6827, 0x18C0, 0x08E1, 0x3882, 0x28A3, 0xCB7D, 0xDB5C, 0xEB3F, 0xFB1E, 0x8BF9, 0x9BD8, 0xABBB, 0xBB9A, 0x4A75, 0x5A54, 0x6A37, 0x7A16, 0x0AF1, 0x1AD0, 0x2AB3, 0x3A92, 0xFD2E, 0xED0F, 0xDD6C, 0xCD4D, 0xBDAA, 0xAD8B, 0x9DE8, 0x8DC9, 0x7C26, 0x6C07, 0x5C64, 0x4C45, 0x3CA2, 0x2C83, 0x1CE0, 0x0CC1, 0xEF1F, 0xFF3E, 0xCF5D, 0xDF7C, 0xAF9B, 0xBFBA, 0x8FD9, 0x9FF8, 0x6E17, 0x7E36, 0x4E55, 0x5E74, 0x2E93, 0x3EB2, 0x0ED1, 0x1EF0, }; /* * Update a CRC check on the given buffer. */ int crcbuf(int crc, unsigned int len, const char *buf) { const unsigned char *ubuf = (const unsigned char *)buf; while (len--) crc = ((crc << 8) & 0xFF00) ^ crctab[((crc >> 8) & 0xFF) ^ *ubuf++]; return crc; } #ifdef __cplusplus } #endif
/* t1lib * * This file contains functions for reading PFA and PFB files. * * Copyright (c) 1998-2013 Eddie Kohler * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, subject to the * conditions listed in the Click LICENSE file, which is available in full at * http://github.com/kohler/click/blob/master/LICENSE. The conditions * include: you must preserve this copyright notice, and you cannot mention * the copyright holders in advertising related to the Software without * their permission. The Software is provided WITHOUT ANY WARRANTY, EXPRESS * OR IMPLIED. This notice is a summary of the Click LICENSE file; the * license in that file is binding. */ #ifdef HAVE_CONFIG_H # include <config.h> #endif #include <stdio.h> #include <stdlib.h> #include <ctype.h> #include <string.h> #include "t1lib.h" #ifdef __cplusplus extern "C" { #endif #define PFA_ASCII 1 #define PFA_EEXEC_TEST 2 #define PFA_HEX 3 #define PFA_BINARY 4 /* This function returns the value (0-15) of a single hex digit. It returns 0 for an invalid hex digit. */ static int hexval(char c) { if (c >= 'A' && c <= 'F') return c - 'A' + 10; else if (c >= 'a' && c <= 'f') return c - 'a' + 10; else if (c >= '0' && c <= '9') return c - '0'; else return 0; } /* This function translates a string of hexadecimal digits into binary data. We allow an odd number of digits. Returns length of binary data. */ static int translate_hex_string(char *s, char *saved_orphan) { int c1 = *saved_orphan; char *start = s; char *t = s; for (; *s; s++) { if (isspace((unsigned char) *s)) continue; if (c1) { *t++ = (hexval(c1) << 4) + hexval(*s); c1 = 0; } else c1 = *s; } *saved_orphan = c1; return t - start; } /* This function returns 1 if the string contains all '0's. */ static int all_zeroes(char *s) { if (*s == '\0' || *s == '\n') return 0; while (*s == '0') s++; return *s == '\0' || *s == '\n'; } /* This function handles the entire file. */ #define LINESIZE 1024 void process_pfa(FILE *ifp, const char *ifp_filename, struct font_reader *fr) { /* Loop until no more input. We need to look for `currentfile eexec' to start eexec section (hex to binary conversion) and line of all zeros to switch back to ASCII. */ /* Don't use fgets() in case line-endings are indicated by bare \r's, as occurs in Macintosh fonts. */ /* 2.Aug.1999 - At the behest of Tom Kacvinsky <tjk@ams.org>, support binary PFA fonts. */ char buffer[LINESIZE]; int c = 0; int blocktyp = PFA_ASCII; char saved_orphan = 0; (void)ifp_filename; while (c != EOF) { char *line = buffer, *last = buffer; int crlf = 0; c = getc(ifp); while (c != EOF && c != '\r' && c != '\n' && last < buffer + LINESIZE - 1) { *last++ = c; c = getc(ifp); } /* handle the end of the line */ if (last == buffer + LINESIZE - 1) /* buffer overrun: don't append newline even if we have it */ ungetc(c, ifp); else if (c == '\r' && blocktyp != PFA_BINARY) { /* change CR or CR/LF into LF, unless reading binary data! (This condition was wrong before, caused Thanh problems - 6.Mar.2001) */ c = getc(ifp); if (c != '\n') ungetc(c, ifp), crlf = 1; else crlf = 2; *last++ = '\n'; } else if (c != EOF) *last++ = c; *last = 0; /* now that we have the line, handle it */ if (blocktyp == PFA_ASCII) { if (strncmp(line, "currentfile eexec", 17) == 0 && isspace((unsigned char) line[17])) { char saved_p; /* assert(line == buffer); */ for (line += 18; isspace((unsigned char) *line); line++) /* nada */; saved_p = *line; *line = 0; fr->output_ascii(buffer, line - buffer); *line = saved_p; blocktyp = PFA_EEXEC_TEST; if (!*line) continue; } else { fr->output_ascii(line, last - line); continue; } } /* check immediately after "currentfile eexec" for ASCII or binary */ if (blocktyp == PFA_EEXEC_TEST) { /* 8.Feb.2004: fix bug if first character in a binary eexec block is 0, reported by Werner Lemberg */ for (; line < last && isspace((unsigned char) *line); line++) /* nada */; if (line == last) continue; else if (last >= line + 4 && isxdigit((unsigned char) line[0]) && isxdigit((unsigned char) line[1]) && isxdigit((unsigned char) line[2]) && isxdigit((unsigned char) line[3])) blocktyp = PFA_HEX; else blocktyp = PFA_BINARY; memmove(buffer, line, last - line + 1); last = buffer + (last - line); line = buffer; /* patch up crlf fix */ if (blocktyp == PFA_BINARY && crlf) { last[-1] = '\r'; if (crlf == 2) *last++ = '\n'; } } /* blocktyp == PFA_HEX || blocktyp == PFA_BINARY */ if (all_zeroes(line)) { /* XXX not safe */ fr->output_ascii(line, last - line); blocktyp = PFA_ASCII; } else if (blocktyp == PFA_HEX) { int len = translate_hex_string(line, &saved_orphan); if (len) fr->output_binary((unsigned char *)line, len); } else fr->output_binary((unsigned char *)line, last - line); } fr->output_end(); } /* Process a PFB file. */ /* XXX Doesn't handle "currentfile eexec" as intelligently as process_pfa does. */ static int handle_pfb_ascii(struct font_reader *fr, char *line, int len) { /* Divide PFB_ASCII blocks into lines */ int start = 0; while (1) { int pos = start; while (pos < len && line[pos] != '\n' && line[pos] != '\r') pos++; if (pos >= len) { if (pos == start) return 0; else if (start == 0 && pos == LINESIZE - 1) { line[pos] = 0; fr->output_ascii(line, pos); return 0; } else { memmove(line, line + start, pos - start); return pos - start; } } else if (pos < len - 1 && line[pos] == '\r' && line[pos+1] == '\n') { line[pos] = '\n'; line[pos+1] = 0; fr->output_ascii(line + start, pos + 1 - start); start = pos + 2; } else { char save = line[pos+1]; line[pos] = '\n'; line[pos+1] = 0; fr->output_ascii(line + start, pos + 1 - start); line[pos+1] = save; start = pos + 1; } } } void process_pfb(FILE *ifp, const char *ifp_filename, struct font_reader *fr) { int blocktyp = 0; unsigned block_len = 0; int c = 0; unsigned filepos = 0; int linepos = 0; char line[LINESIZE]; while (1) { while (block_len == 0) { c = getc(ifp); blocktyp = getc(ifp); if (c != PFB_MARKER || (blocktyp != PFB_ASCII && blocktyp != PFB_BINARY && blocktyp != PFB_DONE)) { if (c == EOF || blocktyp == EOF) error("%s corrupted: no end-of-file marker", ifp_filename); else error("%s corrupted: bad block marker at position %u", ifp_filename, filepos); blocktyp = PFB_DONE; } if (blocktyp == PFB_DONE) goto done; block_len = getc(ifp) & 0xFF; block_len |= (getc(ifp) & 0xFF) << 8; block_len |= (getc(ifp) & 0xFF) << 16; block_len |= (unsigned) (getc(ifp) & 0xFF) << 24; if (feof(ifp)) { error("%s corrupted: bad block length at position %u", ifp_filename, filepos); blocktyp = PFB_DONE; goto done; } filepos += 6; } /* read the block in its entirety, in LINESIZE chunks */ while (block_len > 0) { unsigned rest = LINESIZE - 1 - linepos; /* leave space for '\0' */ unsigned n = (block_len > rest ? rest : block_len); int actual = fread(line + linepos, 1, n, ifp); if (actual != (int) n) { error("%s corrupted: block short by %u bytes at position %u", ifp_filename, block_len - actual, filepos); block_len = actual; } if (blocktyp == PFB_BINARY) fr->output_binary((unsigned char *)line, actual); else linepos = handle_pfb_ascii(fr, line, linepos + actual); block_len -= actual; filepos += actual; } /* handle any leftover line */ if (linepos > 0) { line[linepos] = 0; fr->output_ascii(line, linepos); linepos = 0; } } done: c = getc(ifp); if (c != EOF) error("%s corrupted: data after PFB end marker at position %u", ifp_filename, filepos - 2); fr->output_end(); } #define DEFAULT_BLOCKLEN (1L<<12) void init_pfb_writer(struct pfb_writer *w, int blocklen, FILE *f) { w->len = DEFAULT_BLOCKLEN; w->buf = (unsigned char *)malloc(w->len); if (!w->buf) fatal_error("out of memory"); w->max_len = (blocklen <= 0 ? 0xFFFFFFFFU : (unsigned)blocklen); w->pos = 0; w->blocktyp = PFB_ASCII; w->binary_blocks_written = 0; w->f = f; } void pfb_writer_output_block(struct pfb_writer *w) { /* do nothing if nothing in block */ if (w->pos == 0) return; /* output four-byte block length */ putc(PFB_MARKER, w->f); putc(w->blocktyp, w->f); putc((int)(w->pos & 0xff), w->f); putc((int)((w->pos >> 8) & 0xff), w->f); putc((int)((w->pos >> 16) & 0xff), w->f); putc((int)((w->pos >> 24) & 0xff), w->f); /* output block data */ fwrite(w->buf, 1, w->pos, w->f); /* mark block buffer empty and uninitialized */ w->pos = 0; if (w->blocktyp == PFB_BINARY) w->binary_blocks_written++; } void pfb_writer_grow_buf(struct pfb_writer *w) { if (w->len < w->max_len) { /* grow w->buf */ unsigned new_len = w->len * 2; unsigned char *new_buf; if (new_len > w->max_len) new_len = w->max_len; new_buf = (unsigned char *)malloc(new_len); if (!new_buf) { error("out of memory; continuing with a smaller block size"); w->max_len = w->len; pfb_writer_output_block(w); } else { memcpy(new_buf, w->buf, w->len); free(w->buf); w->buf = new_buf; w->len = new_len; } } else /* buf already the right size, just output the block */ pfb_writer_output_block(w); } void pfb_writer_end(struct pfb_writer *w) { if (w->pos) pfb_writer_output_block(w); putc(PFB_MARKER, w->f); putc(PFB_DONE, w->f); } /* This CRC table and routine were borrowed from macutils-2.0b3 */ static unsigned short crctab[256] = { 0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50A5, 0x60C6, 0x70E7, 0x8108, 0x9129, 0xA14A, 0xB16B, 0xC18C, 0xD1AD, 0xE1CE, 0xF1EF, 0x1231, 0x0210, 0x3273, 0x2252, 0x52B5, 0x4294, 0x72F7, 0x62D6, 0x9339, 0x8318, 0xB37B, 0xA35A, 0xD3BD, 0xC39C, 0xF3FF, 0xE3DE, 0x2462, 0x3443, 0x0420, 0x1401, 0x64E6, 0x74C7, 0x44A4, 0x5485, 0xA56A, 0xB54B, 0x8528, 0x9509, 0xE5EE, 0xF5CF, 0xC5AC, 0xD58D, 0x3653, 0x2672, 0x1611, 0x0630, 0x76D7, 0x66F6, 0x5695, 0x46B4, 0xB75B, 0xA77A, 0x9719, 0x8738, 0xF7DF, 0xE7FE, 0xD79D, 0xC7BC, 0x48C4, 0x58E5, 0x6886, 0x78A7, 0x0840, 0x1861, 0x2802, 0x3823, 0xC9CC, 0xD9ED, 0xE98E, 0xF9AF, 0x8948, 0x9969, 0xA90A, 0xB92B, 0x5AF5, 0x4AD4, 0x7AB7, 0x6A96, 0x1A71, 0x0A50, 0x3A33, 0x2A12, 0xDBFD, 0xCBDC, 0xFBBF, 0xEB9E, 0x9B79, 0x8B58, 0xBB3B, 0xAB1A, 0x6CA6, 0x7C87, 0x4CE4, 0x5CC5, 0x2C22, 0x3C03, 0x0C60, 0x1C41, 0xEDAE, 0xFD8F, 0xCDEC, 0xDDCD, 0xAD2A, 0xBD0B, 0x8D68, 0x9D49, 0x7E97, 0x6EB6, 0x5ED5, 0x4EF4, 0x3E13, 0x2E32, 0x1E51, 0x0E70, 0xFF9F, 0xEFBE, 0xDFDD, 0xCFFC, 0xBF1B, 0xAF3A, 0x9F59, 0x8F78, 0x9188, 0x81A9, 0xB1CA, 0xA1EB, 0xD10C, 0xC12D, 0xF14E, 0xE16F, 0x1080, 0x00A1, 0x30C2, 0x20E3, 0x5004, 0x4025, 0x7046, 0x6067, 0x83B9, 0x9398, 0xA3FB, 0xB3DA, 0xC33D, 0xD31C, 0xE37F, 0xF35E, 0x02B1, 0x1290, 0x22F3, 0x32D2, 0x4235, 0x5214, 0x6277, 0x7256, 0xB5EA, 0xA5CB, 0x95A8, 0x8589, 0xF56E, 0xE54F, 0xD52C, 0xC50D, 0x34E2, 0x24C3, 0x14A0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405, 0xA7DB, 0xB7FA, 0x8799, 0x97B8, 0xE75F, 0xF77E, 0xC71D, 0xD73C, 0x26D3, 0x36F2, 0x0691, 0x16B0, 0x6657, 0x7676, 0x4615, 0x5634, 0xD94C, 0xC96D, 0xF90E, 0xE92F, 0x99C8, 0x89E9, 0xB98A, 0xA9AB, 0x5844, 0x4865, 0x7806, 0x6827, 0x18C0, 0x08E1, 0x3882, 0x28A3, 0xCB7D, 0xDB5C, 0xEB3F, 0xFB1E, 0x8BF9, 0x9BD8, 0xABBB, 0xBB9A, 0x4A75, 0x5A54, 0x6A37, 0x7A16, 0x0AF1, 0x1AD0, 0x2AB3, 0x3A92, 0xFD2E, 0xED0F, 0xDD6C, 0xCD4D, 0xBDAA, 0xAD8B, 0x9DE8, 0x8DC9, 0x7C26, 0x6C07, 0x5C64, 0x4C45, 0x3CA2, 0x2C83, 0x1CE0, 0x0CC1, 0xEF1F, 0xFF3E, 0xCF5D, 0xDF7C, 0xAF9B, 0xBFBA, 0x8FD9, 0x9FF8, 0x6E17, 0x7E36, 0x4E55, 0x5E74, 0x2E93, 0x3EB2, 0x0ED1, 0x1EF0, }; /* * Update a CRC check on the given buffer. */ int crcbuf(int crc, unsigned int len, const char *buf) { const unsigned char *ubuf = (const unsigned char *)buf; while (len--) crc = ((crc << 8) & 0xFF00) ^ crctab[((crc >> 8) & 0xFF) ^ *ubuf++]; return crc; } #ifdef __cplusplus } #endif
translate_hex_string(char *s, char *saved_orphan) { int c1 = *saved_orphan; char *start = s; char *t = s; for (; *s; s++) { if (isspace(*s)) continue; if (c1) { *t++ = (hexval(c1) << 4) + hexval(*s); c1 = 0; } else c1 = *s; } *saved_orphan = c1; return t - start; }
translate_hex_string(char *s, char *saved_orphan) { int c1 = *saved_orphan; char *start = s; char *t = s; for (; *s; s++) { if (isspace((unsigned char) *s)) continue; if (c1) { *t++ = (hexval(c1) << 4) + hexval(*s); c1 = 0; } else c1 = *s; } *saved_orphan = c1; return t - start; }
{'added': [(62, ' if (isspace((unsigned char) *s))'), (139, '\t if (strncmp(line, "currentfile eexec", 17) == 0 && isspace((unsigned char) line[17])) {'), (142, '\t\tfor (line += 18; isspace((unsigned char) *line); line++)'), (161, '\t for (; line < last && isspace((unsigned char) *line); line++)'), (165, '\t else if (last >= line + 4 && isxdigit((unsigned char) line[0])'), (166, ' && isxdigit((unsigned char) line[1])'), (167, '\t\t && isxdigit((unsigned char) line[2])'), (168, ' && isxdigit((unsigned char) line[3]))')], 'deleted': [(62, ' if (isspace(*s))'), (139, '\t if (strncmp(line, "currentfile eexec", 17) == 0 && isspace(line[17])) {'), (142, '\t\tfor (line += 18; isspace(*line); line++)'), (161, '\t for (; line < last && isspace(*line); line++)'), (165, '\t else if (last >= line + 4 && isxdigit(line[0]) && isxdigit(line[1])'), (166, '\t\t && isxdigit(line[2]) && isxdigit(line[3]))')]}
8
6
326
2,448
https://github.com/kohler/t1utils
CVE-2015-3905
['CWE-119']
filedump.c
print_udta
/* * GPAC - Multimedia Framework C SDK * * Authors: Jean Le Feuvre * Copyright (c) Telecom ParisTech 2000-2021 * All rights reserved * * This file is part of GPAC / mp4box application * * GPAC is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * GPAC is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; see the file COPYING. If not, write to * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include "mp4box.h" #if defined(GPAC_DISABLE_ISOM) || defined(GPAC_DISABLE_ISOM_WRITE) #error "Cannot compile MP4Box if GPAC is not built with ISO File Format support" #else #ifndef GPAC_DISABLE_X3D #include <gpac/nodes_x3d.h> #endif #ifndef GPAC_DISABLE_BIFS #include <gpac/internal/bifs_dev.h> #endif #ifndef GPAC_DISABLE_VRML #include <gpac/nodes_mpeg4.h> #endif #include <gpac/constants.h> #include <gpac/avparse.h> #include <gpac/internal/media_dev.h> /*ISO 639 languages*/ #include <gpac/iso639.h> #include <gpac/mpegts.h> #ifndef GPAC_DISABLE_SMGR #include <gpac/scene_manager.h> #endif #include <gpac/internal/media_dev.h> #include <gpac/media_tools.h> /*for built-in box printing*/ #include <gpac/internal/isomedia_dev.h> extern u32 swf_flags; extern Float swf_flatten_angle; extern GF_FileType get_file_type_by_ext(char *inName); extern u32 fs_dump_flags; void scene_coding_log(void *cbk, GF_LOG_Level log_level, GF_LOG_Tool log_tool, const char *fmt, va_list vlist); #ifdef GPAC_DISABLE_LOG void mp4box_log(const char *fmt, ...) { va_list vl; va_start(vl, fmt); vfprintf(stderr, fmt, vlist); fflush(stderr); va_end(vl); } #endif u32 PrintLanguages(char *val, u32 opt) { u32 i=0, count = gf_lang_get_count(); fprintf(stderr, "Supported ISO 639 languages and codes:\n\n"); for (i=0; i<count; i++) { if (gf_lang_get_2cc(i)) { fprintf(stderr, "%s (%s - %s)\n", gf_lang_get_name(i), gf_lang_get_3cc(i), gf_lang_get_2cc(i)); } } return 1; } static const char *GetLanguage(char *lcode) { s32 idx = gf_lang_find(lcode); if (idx>=0) return gf_lang_get_name(idx); return lcode; } GF_Err dump_isom_cover_art(GF_ISOFile *file, char *inName, Bool is_final_name) { const u8 *tag; FILE *t; u32 tag_len; GF_Err e = gf_isom_apple_get_tag(file, GF_ISOM_ITUNE_COVER_ART, &tag, &tag_len); if (e!=GF_OK) { if (e==GF_URL_ERROR) { M4_LOG(GF_LOG_WARNING, ("No cover art found\n")); return GF_OK; } return e; } if (inName) { char szName[1024]; if (is_final_name) { strcpy(szName, inName); } else { sprintf(szName, "%s.%s", inName, (tag_len>>31) ? "png" : "jpg"); } t = gf_fopen(szName, "wb"); if (!t) { M4_LOG(GF_LOG_ERROR, ("Failed to open %s for dumping\n", szName)); return GF_IO_ERR; } } else { t = stdout; } gf_fwrite(tag, tag_len & 0x7FFFFFFF, t); if (inName) gf_fclose(t); return GF_OK; } #ifndef GPAC_DISABLE_SCENE_DUMP GF_Err dump_isom_scene(char *file, char *inName, Bool is_final_name, GF_SceneDumpFormat dump_mode, Bool do_log, Bool no_odf_conv) { GF_Err e; GF_SceneManager *ctx; GF_SceneGraph *sg; GF_SceneLoader load; GF_FileType ftype; gf_log_cbk prev_logs = NULL; FILE *logs = NULL; sg = gf_sg_new(); ctx = gf_sm_new(sg); memset(&load, 0, sizeof(GF_SceneLoader)); load.fileName = file; load.ctx = ctx; load.swf_import_flags = swf_flags; if (dump_mode == GF_SM_DUMP_SVG) { load.swf_import_flags |= GF_SM_SWF_USE_SVG; load.svgOutFile = inName; } load.swf_flatten_limit = swf_flatten_angle; ftype = get_file_type_by_ext(file); if (ftype == GF_FILE_TYPE_ISO_MEDIA) { load.isom = gf_isom_open(file, GF_ISOM_OPEN_READ, NULL); if (!load.isom) { e = gf_isom_last_error(NULL); M4_LOG(GF_LOG_ERROR, ("Error opening file: %s\n", gf_error_to_string(e))); gf_sm_del(ctx); gf_sg_del(sg); return e; } if (no_odf_conv) gf_isom_disable_odf_conversion(load.isom, GF_TRUE); } else if (ftype==GF_FILE_TYPE_LSR_SAF) { load.isom = gf_isom_open("saf_conv", GF_ISOM_WRITE_EDIT, NULL); #ifndef GPAC_DISABLE_MEDIA_IMPORT if (load.isom) { GF_Fraction _frac = {0,0}; e = import_file(load.isom, file, 0, _frac, 0, NULL, NULL, 0); } else #else M4_LOG(GF_LOG_WARNING, ("Warning: GPAC was compiled without Media Import support\n")); #endif e = gf_isom_last_error(NULL); if (e) { M4_LOG(GF_LOG_ERROR, ("Error importing file: %s\n", gf_error_to_string(e))); gf_sm_del(ctx); gf_sg_del(sg); if (load.isom) gf_isom_delete(load.isom); return e; } } if (do_log) { char szLog[GF_MAX_PATH]; sprintf(szLog, "%s_dec.logs", inName); logs = gf_fopen(szLog, "wt"); gf_log_set_tool_level(GF_LOG_CODING, GF_LOG_DEBUG); prev_logs = gf_log_set_callback(logs, scene_coding_log); } e = gf_sm_load_init(&load); if (!e) e = gf_sm_load_run(&load); gf_sm_load_done(&load); if (logs) { gf_log_set_tool_level(GF_LOG_CODING, GF_LOG_ERROR); gf_log_set_callback(NULL, prev_logs); gf_fclose(logs); } if (!e && dump_mode != GF_SM_DUMP_SVG) { u32 count = gf_list_count(ctx->streams); if (count) fprintf(stderr, "Scene loaded - dumping %d systems streams\n", count); else fprintf(stderr, "Scene loaded - dumping root scene\n"); e = gf_sm_dump(ctx, inName, is_final_name, dump_mode); } gf_sm_del(ctx); gf_sg_del(sg); if (e) M4_LOG(GF_LOG_ERROR, ("Error loading scene: %s\n", gf_error_to_string(e))); if (load.isom) gf_isom_delete(load.isom); return e; } #endif #ifndef GPAC_DISABLE_SCENE_STATS static void dump_stats(FILE *dump, const GF_SceneStatistics *stats) { u32 i; s32 created, count, draw_created, draw_count, deleted, draw_deleted; created = count = draw_created = draw_count = deleted = draw_deleted = 0; fprintf(dump, "<NodeStatistics>\n"); fprintf(dump, "<General NumberOfNodeTypes=\"%d\"/>\n", gf_list_count(stats->node_stats)); for (i=0; i<gf_list_count(stats->node_stats); i++) { GF_NodeStats *ptr = gf_list_get(stats->node_stats, i); fprintf(dump, "<NodeStat NodeName=\"%s\">\n", ptr->name); switch (ptr->tag) { #ifndef GPAC_DISABLE_VRML case TAG_MPEG4_Bitmap: case TAG_MPEG4_Background2D: case TAG_MPEG4_Background: case TAG_MPEG4_Box: case TAG_MPEG4_Circle: case TAG_MPEG4_CompositeTexture2D: case TAG_MPEG4_CompositeTexture3D: case TAG_MPEG4_Cylinder: case TAG_MPEG4_Cone: case TAG_MPEG4_Curve2D: case TAG_MPEG4_Extrusion: case TAG_MPEG4_ElevationGrid: case TAG_MPEG4_IndexedFaceSet2D: case TAG_MPEG4_IndexedFaceSet: case TAG_MPEG4_IndexedLineSet2D: case TAG_MPEG4_IndexedLineSet: case TAG_MPEG4_PointSet2D: case TAG_MPEG4_PointSet: case TAG_MPEG4_Rectangle: case TAG_MPEG4_Sphere: case TAG_MPEG4_Text: case TAG_MPEG4_Ellipse: case TAG_MPEG4_XCurve2D: draw_count += ptr->nb_created + ptr->nb_used - ptr->nb_del; draw_deleted += ptr->nb_del; draw_created += ptr->nb_created; break; #endif /*GPAC_DISABLE_VRML*/ } fprintf(dump, "<Instanciation NbObjects=\"%d\" NbUse=\"%d\" NbDestroy=\"%d\"/>\n", ptr->nb_created, ptr->nb_used, ptr->nb_del); count += ptr->nb_created + ptr->nb_used; deleted += ptr->nb_del; created += ptr->nb_created; fprintf(dump, "</NodeStat>\n"); } if (i) { fprintf(dump, "<CumulatedStat TotalNumberOfNodes=\"%d\" ReallyAllocatedNodes=\"%d\" DeletedNodes=\"%d\" NumberOfAttributes=\"%d\"/>\n", count, created, deleted, stats->nb_svg_attributes); fprintf(dump, "<DrawableNodesCumulatedStat TotalNumberOfNodes=\"%d\" ReallyAllocatedNodes=\"%d\" DeletedNodes=\"%d\"/>\n", draw_count, draw_created, draw_deleted); } fprintf(dump, "</NodeStatistics>\n"); created = count = deleted = 0; if (gf_list_count(stats->proto_stats)) { fprintf(dump, "<ProtoStatistics NumberOfProtoUsed=\"%d\">\n", gf_list_count(stats->proto_stats)); for (i=0; i<gf_list_count(stats->proto_stats); i++) { GF_NodeStats *ptr = gf_list_get(stats->proto_stats, i); fprintf(dump, "<ProtoStat ProtoName=\"%s\">\n", ptr->name); fprintf(dump, "<Instanciation NbObjects=\"%d\" NbUse=\"%d\" NbDestroy=\"%d\"/>\n", ptr->nb_created, ptr->nb_used, ptr->nb_del); count += ptr->nb_created + ptr->nb_used; deleted += ptr->nb_del; created += ptr->nb_created; fprintf(dump, "</ProtoStat>\n"); } if (i) fprintf(dump, "<CumulatedStat TotalNumberOfProtos=\"%d\" ReallyAllocatedProtos=\"%d\" DeletedProtos=\"%d\"/>\n", count, created, deleted); fprintf(dump, "</ProtoStatistics>\n"); } fprintf(dump, "<FixedValues min=\"%f\" max=\"%f\">\n", FIX2FLT( stats->min_fixed) , FIX2FLT( stats->max_fixed )); fprintf(dump, "<Resolutions scaleIntegerPart=\"%d\" scaleFracPart=\"%d\" coordIntegerPart=\"%d\" coordFracPart=\"%d\"/>\n", stats->scale_int_res_2d, stats->scale_frac_res_2d, stats->int_res_2d, stats->frac_res_2d); fprintf(dump, "</FixedValues>\n"); fprintf(dump, "<FieldStatistic FieldType=\"MFVec2f\">\n"); fprintf(dump, "<ParsingInfo NumParsed=\"%d\" NumRemoved=\"%d\"/>\n", stats->count_2d, stats->rem_2d); if (stats->count_2d) { fprintf(dump, "<ExtendInfo MinVec2f=\"%f %f\" MaxVec2f=\"%f %f\"/>\n", FIX2FLT( stats->min_2d.x) , FIX2FLT( stats->min_2d.y ), FIX2FLT( stats->max_2d.x ), FIX2FLT( stats->max_2d.y ) ); } fprintf(dump, "</FieldStatistic>\n"); fprintf(dump, "<FieldStatistic FieldType=\"MFVec3f\">\n"); fprintf(dump, "<ParsingInfo NumParsed=\"%d\" NumRemoved=\"%d\"/>", stats->count_3d, stats->rem_3d); if (stats->count_3d) { fprintf(dump, "<ExtendInfo MinVec3f=\"%f %f %f\" MaxVec3f=\"%f %f %f\"/>\n", FIX2FLT( stats->min_3d.x ), FIX2FLT( stats->min_3d.y ), FIX2FLT( stats->min_3d.z ), FIX2FLT( stats->max_3d.x ), FIX2FLT( stats->max_3d.y ), FIX2FLT( stats->max_3d.z ) ); } fprintf(dump, "</FieldStatistic>\n"); fprintf(dump, "<FieldStatistic FieldType=\"MF/SFColor\">\n"); fprintf(dump, "<ParsingInfo NumParsed=\"%d\" NumRemoved=\"%d\"/>", stats->count_color, stats->rem_color); fprintf(dump, "</FieldStatistic>\n"); fprintf(dump, "<FieldStatistic FieldType=\"MF/SFFloat\">\n"); fprintf(dump, "<ParsingInfo NumParsed=\"%d\" NumRemoved=\"%d\"/>", stats->count_float, stats->rem_float); fprintf(dump, "</FieldStatistic>\n"); fprintf(dump, "<FieldStatistic FieldType=\"SFVec2f\">\n"); fprintf(dump, "<ParsingInfo NumParsed=\"%d\"/>", stats->count_2f); fprintf(dump, "</FieldStatistic>\n"); fprintf(dump, "<FieldStatistic FieldType=\"SFVec3f\">\n"); fprintf(dump, "<ParsingInfo NumParsed=\"%d\"/>", stats->count_3f); fprintf(dump, "</FieldStatistic>\n"); } static void ReorderAU(GF_List *sample_list, GF_AUContext *au) { u32 i; for (i=0; i<gf_list_count(sample_list); i++) { GF_AUContext *ptr = gf_list_get(sample_list, i); if ( /*time ordered*/ (ptr->timing_sec > au->timing_sec) /*set bifs first*/ || ((ptr->timing_sec == au->timing_sec) && (ptr->owner->streamType < au->owner->streamType)) ) { gf_list_insert(sample_list, au, i); return; } } gf_list_add(sample_list, au); } void dump_isom_scene_stats(char *file, char *inName, Bool is_final_name, u32 stat_level) { GF_Err e; FILE *dump; Bool close; u32 i, j, count; char szBuf[1024]; GF_SceneManager *ctx; GF_SceneLoader load; GF_StatManager *sm; GF_List *sample_list; GF_SceneGraph *scene_graph; dump = NULL; sm = NULL; sample_list = NULL; close = 0; scene_graph = gf_sg_new(); ctx = gf_sm_new(scene_graph); memset(&load, 0, sizeof(GF_SceneLoader)); load.fileName = file; load.ctx = ctx; if (get_file_type_by_ext(file) == 1) { load.isom = gf_isom_open(file, GF_ISOM_OPEN_READ, NULL); if (!load.isom) { M4_LOG(GF_LOG_ERROR, ("Cannot open file: %s\n", gf_error_to_string(gf_isom_last_error(NULL)))); gf_sm_del(ctx); gf_sg_del(scene_graph); return; } } e = gf_sm_load_init(&load); if (!e) e = gf_sm_load_run(&load); gf_sm_load_done(&load); if (e<0) goto exit; if (inName) { strcpy(szBuf, inName); if (!is_final_name) strcat(szBuf, "_stat.xml"); dump = gf_fopen(szBuf, "wt"); if (!dump) { M4_LOG(GF_LOG_ERROR, ("Failed to open %s for dumping\n", szBuf)); return; } close = 1; } else { dump = stdout; close = 0; } fprintf(stderr, "Analysing Scene\n"); fprintf(dump, "<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n"); fprintf(dump, "<!-- Scene Graph Statistics Generated by MP4Box - GPAC "); if (! gf_sys_is_test_mode()) fprintf(dump, "%s ", gf_gpac_version()); fprintf(dump, "-->\n"); fprintf(dump, "<SceneStatistics file=\"%s\" DumpType=\"%s\">\n", gf_file_basename(file), (stat_level==1) ? "full scene" : ((stat_level==2) ? "AccessUnit based" : "SceneGraph after each AU")); sm = gf_sm_stats_new(); /*stat level 1: complete scene stat*/ if (stat_level == 1) { e = gf_sm_stats_for_scene(sm, ctx); if (!e) dump_stats(dump, gf_sm_stats_get(sm) ); goto exit; } /*re_order all BIFS-AUs*/ sample_list = gf_list_new(); /*configure all systems streams we're dumping*/ for (i=0; i<gf_list_count(ctx->streams); i++) { GF_StreamContext *sc = gf_list_get(ctx->streams, i); if (sc->streamType != GF_STREAM_SCENE) continue; for (j=0; j<gf_list_count(sc->AUs); j++) { GF_AUContext *au = gf_list_get(sc->AUs, j); ReorderAU(sample_list, au); } } count = gf_list_count(sample_list); for (i=0; i<count; i++) { GF_AUContext *au = gf_list_get(sample_list, i); for (j=0; j<gf_list_count(au->commands); j++) { GF_Command *com = gf_list_get(au->commands, j); /*stat level 2 - get command stats*/ if (stat_level==2) { e = gf_sm_stats_for_command(sm, com); if (e) goto exit; } /*stat level 3 - apply command*/ if (stat_level==3) gf_sg_command_apply(scene_graph, com, 0); } /*stat level 3: get graph stat*/ if (stat_level==3) { e = gf_sm_stats_for_graph(sm, scene_graph); if (e) goto exit; } if (stat_level==2) { fprintf(dump, "<AUStatistics StreamID=\"%d\" AUTime=\""LLD"\">\n", au->owner->ESID, au->timing); } else { fprintf(dump, "<GraphStatistics StreamID=\"%d\" AUTime=\""LLD"\">\n", au->owner->ESID, au->timing); } /*dump stats*/ dump_stats(dump, gf_sm_stats_get(sm) ); /*reset stats*/ gf_sm_stats_reset(sm); if (stat_level==2) { fprintf(dump, "</AUStatistics>\n"); } else { fprintf(dump, "</GraphStatistics>\n"); } gf_set_progress("Analysing AU", i+1, count); } exit: if (sample_list) gf_list_del(sample_list); if (sm) gf_sm_stats_del(sm); gf_sm_del(ctx); gf_sg_del(scene_graph); if (load.isom) gf_isom_delete(load.isom); if (e) { M4_LOG(GF_LOG_ERROR, ("Stats error: %s\n", gf_error_to_string(e))); } else { fprintf(dump, "</SceneStatistics>\n"); } if (dump && close) gf_fclose(dump); fprintf(stderr, "done\n"); } #endif /*GPAC_DISABLE_SCENE_STATS*/ #ifndef GPAC_DISABLE_VRML static void PrintFixed(Fixed val, Bool add_space) { if (add_space) fprintf(stderr, " "); if (val==FIX_MIN) fprintf(stderr, "-I"); else if (val==FIX_MAX) fprintf(stderr, "+I"); else fprintf(stderr, "%g", FIX2FLT(val)); } static void PrintNodeSFField(u32 type, void *far_ptr) { if (!far_ptr) return; switch (type) { case GF_SG_VRML_SFBOOL: fprintf(stderr, "%s", (*(SFBool *)far_ptr) ? "TRUE" : "FALSE"); break; case GF_SG_VRML_SFINT32: fprintf(stderr, "%d", (*(SFInt32 *)far_ptr)); break; case GF_SG_VRML_SFFLOAT: PrintFixed((*(SFFloat *)far_ptr), 0); break; case GF_SG_VRML_SFTIME: fprintf(stderr, "%g", (*(SFTime *)far_ptr)); break; case GF_SG_VRML_SFVEC2F: PrintFixed(((SFVec2f *)far_ptr)->x, 0); PrintFixed(((SFVec2f *)far_ptr)->y, 1); break; case GF_SG_VRML_SFVEC3F: PrintFixed(((SFVec3f *)far_ptr)->x, 0); PrintFixed(((SFVec3f *)far_ptr)->y, 1); PrintFixed(((SFVec3f *)far_ptr)->z, 1); break; case GF_SG_VRML_SFROTATION: PrintFixed(((SFRotation *)far_ptr)->x, 0); PrintFixed(((SFRotation *)far_ptr)->y, 1); PrintFixed(((SFRotation *)far_ptr)->z, 1); PrintFixed(((SFRotation *)far_ptr)->q, 1); break; case GF_SG_VRML_SFCOLOR: PrintFixed(((SFColor *)far_ptr)->red, 0); PrintFixed(((SFColor *)far_ptr)->green, 1); PrintFixed(((SFColor *)far_ptr)->blue, 1); break; case GF_SG_VRML_SFSTRING: if (((SFString*)far_ptr)->buffer) fprintf(stderr, "\"%s\"", ((SFString*)far_ptr)->buffer); else fprintf(stderr, "NULL"); break; } } #endif #ifndef GPAC_DISABLE_VRML static void do_print_node(GF_Node *node, GF_SceneGraph *sg, const char *name, u32 graph_type, Bool is_nodefield, Bool do_cov) { u32 nbF, i; GF_FieldInfo f; #ifndef GPAC_DISABLE_BIFS u8 qt, at; Fixed bmin, bmax; u32 nbBits; #endif /*GPAC_DISABLE_BIFS*/ nbF = gf_node_get_field_count(node); if (is_nodefield) { char szField[1024]; u32 tfirst, tlast; if (gf_node_get_field_by_name(node, szField, &f) != GF_OK) { M4_LOG(GF_LOG_ERROR, ("Field %s is not a member of node %s\n", szField, name)); return; } fprintf(stderr, "Allowed nodes in %s.%s:\n", name, szField); if (graph_type==1) { tfirst = GF_NODE_RANGE_FIRST_X3D; tlast = GF_NODE_RANGE_LAST_X3D; } else { tfirst = GF_NODE_RANGE_FIRST_MPEG4; tlast = GF_NODE_RANGE_LAST_MPEG4; } for (i=tfirst; i<tlast; i++) { GF_Node *tmp = gf_node_new(sg, i); gf_node_register(tmp, NULL); if (gf_node_in_table_by_tag(i, f.NDTtype)) { const char *nname = gf_node_get_class_name(tmp); if (nname && strcmp(nname, "Unknown Node")) { fprintf(stderr, "\t%s\n", nname); } } gf_node_unregister(tmp, NULL); } return; } if (do_cov) { u32 ndt; if (graph_type==0) { u32 all; gf_node_mpeg4_type_by_class_name(name); gf_bifs_get_child_table(node); all = gf_node_get_num_fields_in_mode(node, GF_SG_FIELD_CODING_ALL); for (i=0; i<all; i++) { u32 res; gf_sg_script_get_field_index(node, i, GF_SG_FIELD_CODING_ALL, &res); } gf_node_get_num_fields_in_mode(node, GF_SG_FIELD_CODING_DEF); gf_node_get_num_fields_in_mode(node, GF_SG_FIELD_CODING_IN); gf_node_get_num_fields_in_mode(node, GF_SG_FIELD_CODING_OUT); gf_node_get_num_fields_in_mode(node, GF_SG_FIELD_CODING_DYN); } else if (graph_type==1) gf_node_x3d_type_by_class_name(name); for (ndt=NDT_SFWorldNode; ndt<NDT_LAST; ndt++) { gf_node_in_table_by_tag(gf_node_get_tag(node), ndt); } } fprintf(stderr, "%s {\n", name); for (i=0; i<nbF; i++) { gf_node_get_field(node, i, &f); if (graph_type==2) { fprintf(stderr, "\t%s=\"...\"\n", f.name); continue; } fprintf(stderr, "\t%s %s %s", gf_sg_vrml_get_event_type_name(f.eventType, 0), gf_sg_vrml_get_field_type_name(f.fieldType), f.name); if (f.fieldType==GF_SG_VRML_SFNODE) fprintf(stderr, " NULL"); else if (f.fieldType==GF_SG_VRML_MFNODE) fprintf(stderr, " []"); else if (gf_sg_vrml_is_sf_field(f.fieldType)) { fprintf(stderr, " "); PrintNodeSFField(f.fieldType, f.far_ptr); } else { void *ptr; u32 j, sftype; GenMFField *mffield = (GenMFField *) f.far_ptr; fprintf(stderr, " ["); sftype = gf_sg_vrml_get_sf_type(f.fieldType); for (j=0; j<mffield->count; j++) { if (j) fprintf(stderr, " "); gf_sg_vrml_mf_get_item(f.far_ptr, f.fieldType, &ptr, j); PrintNodeSFField(sftype, ptr); } fprintf(stderr, "]"); } #ifndef GPAC_DISABLE_BIFS if (gf_bifs_get_aq_info(node, i, &qt, &at, &bmin, &bmax, &nbBits)) { if (qt) { fprintf(stderr, " #QP=%d", qt); if (qt==13) fprintf(stderr, " NbBits=%d", nbBits); if (bmin && bmax) { fprintf(stderr, " Bounds=["); PrintFixed(bmin, 0); fprintf(stderr, ","); PrintFixed(bmax, 0); fprintf(stderr, "]"); } } } #endif /*GPAC_DISABLE_BIFS*/ fprintf(stderr, "\n"); if (do_cov) { gf_node_get_field_by_name(node, (char *) f.name, &f); } } fprintf(stderr, "}\n\n"); } #endif u32 PrintNode(const char *name, u32 graph_type) { #ifdef GPAC_DISABLE_VRML M4_LOG(GF_LOG_ERROR, ("VRML/MPEG-4/X3D scene graph is disabled in this build of GPAC\n")); return 2; #else const char *std_name; GF_Node *node; GF_SceneGraph *sg; u32 tag; #ifndef GPAC_DISABLE_BIFS #endif /*GPAC_DISABLE_BIFS*/ Bool is_nodefield = 0; char *sep = strchr(name, '.'); if (sep) { sep[0] = 0; is_nodefield = 1; } if (graph_type==1) { #ifndef GPAC_DISABLE_X3D tag = gf_node_x3d_type_by_class_name(name); std_name = "X3D"; #else M4_LOG(GF_LOG_ERROR, ("X3D node printing is not supported (X3D support disabled)\n")); return 2; #endif } else { tag = gf_node_mpeg4_type_by_class_name(name); std_name = "MPEG4"; } if (!tag) { M4_LOG(GF_LOG_ERROR, ("Unknown %s node %s\n", std_name, name)); return 2; } sg = gf_sg_new(); node = gf_node_new(sg, tag); gf_node_register(node, NULL); name = gf_node_get_class_name(node); if (!node) { M4_LOG(GF_LOG_ERROR, ("Node %s not supported in current built\n", name)); return 2; } do_print_node(node, sg, name, graph_type, is_nodefield, GF_FALSE); gf_node_unregister(node, NULL); gf_sg_del(sg); #endif /*GPAC_DISABLE_VRML*/ return 1; } u32 PrintBuiltInNodes(char *arg_val, u32 dump_type) { #if !defined(GPAC_DISABLE_VRML) && !defined(GPAC_DISABLE_X3D) && !defined(GPAC_DISABLE_SVG) GF_SceneGraph *sg; u32 i, nb_in, nb_not_in, start_tag, end_tag; u32 graph_type; Bool dump_nodes = ((dump_type==1) || (dump_type==3)) ? 1 : 0; if (dump_type==4) graph_type = 2; else if ((dump_type==2) || (dump_type==3)) graph_type = 1; else graph_type = 0; if (graph_type==1) { #if !defined(GPAC_DISABLE_VRML) && !defined(GPAC_DISABLE_X3D) start_tag = GF_NODE_RANGE_FIRST_X3D; end_tag = TAG_LastImplementedX3D; #else M4_LOG(GF_LOG_ERROR, ("X3D scene graph disabled in this build of GPAC\n")); return 2; #endif } else if (graph_type==2) { #ifdef GPAC_DISABLE_SVG M4_LOG(GF_LOG_ERROR, ("SVG scene graph disabled in this build of GPAC\n")); return 2; #else start_tag = GF_NODE_RANGE_FIRST_SVG; end_tag = GF_NODE_RANGE_LAST_SVG; #endif } else { #ifdef GPAC_DISABLE_VRML M4_LOG(GF_LOG_ERROR, ("VRML/MPEG-4 scene graph disabled in this build of GPAC\n")); return 2; #else start_tag = GF_NODE_RANGE_FIRST_MPEG4; end_tag = TAG_LastImplementedMPEG4; #endif } nb_in = nb_not_in = 0; sg = gf_sg_new(); if (graph_type==1) { fprintf(stderr, "Available X3D nodes in this build (dumping):\n"); } else if (graph_type==2) { fprintf(stderr, "Available SVG nodes in this build (dumping and LASeR coding):\n"); } else { fprintf(stderr, "Available MPEG-4 nodes in this build (encoding/decoding/dumping):\n"); } for (i=start_tag; i<end_tag; i++) { GF_Node *node = gf_node_new(sg, i); if (node) { gf_node_register(node, NULL); if (dump_nodes) { do_print_node(node, sg, gf_node_get_class_name(node), graph_type, GF_FALSE, GF_TRUE); } else { fprintf(stderr, " %s\n", gf_node_get_class_name(node)); } gf_node_unregister(node, NULL); nb_in++; } else { if (graph_type==2) break; nb_not_in++; } } gf_sg_del(sg); if (graph_type==2) { fprintf(stderr, "\n%d nodes supported\n", nb_in); } else { fprintf(stderr, "\n%d nodes supported - %d nodes not supported\n", nb_in, nb_not_in); } //coverage if (dump_nodes) { for (i=GF_SG_VRML_SFBOOL; i<GF_SG_VRML_SCRIPT_FUNCTION; i++) { void *fp = gf_sg_vrml_field_pointer_new(i); if (fp) { if (i==GF_SG_VRML_SFSCRIPT) gf_free(fp); else gf_sg_vrml_field_pointer_del(fp, i); } } } #else M4_LOG(GF_LOG_ERROR, ("No scene graph enabled in this MP4Box build\n")); #endif return 1; } u32 PrintBuiltInBoxes(char *argval, u32 do_cov) { u32 i, count=gf_isom_get_num_supported_boxes(); fprintf(stdout, "<Boxes>\n"); //index 0 is our internal unknown box handler for (i=1; i<count; i++) { gf_isom_dump_supported_box(i, stdout); if (do_cov) { u32 btype = gf_isom_get_supported_box_type(i); GF_Box *b=gf_isom_box_new(btype); if (b) { GF_Box *c=NULL; gf_isom_clone_box(b, &c); if (c) gf_isom_box_del(c); gf_isom_box_del(b); } } } fprintf(stdout, "</Boxes>\n"); return 1; } #if !defined(GPAC_DISABLE_ISOM_HINTING) && !defined(GPAC_DISABLE_ISOM_DUMP) void dump_isom_rtp(GF_ISOFile *file, char *inName, Bool is_final_name) { u32 i, j, size; FILE *dump; const char *sdp; if (inName) { char szBuf[1024]; strcpy(szBuf, inName); if (!is_final_name) strcat(szBuf, "_rtp.xml"); dump = gf_fopen(szBuf, "wt"); if (!dump) { M4_LOG(GF_LOG_ERROR, ("Failed to open %s\n", szBuf)); return; } } else { dump = stdout; } fprintf(dump, "<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n"); fprintf(dump, "<!-- MP4Box RTP trace -->\n"); fprintf(dump, "<RTPFile>\n"); for (i=0; i<gf_isom_get_track_count(file); i++) { if (gf_isom_get_media_type(file, i+1) != GF_ISOM_MEDIA_HINT) continue; fprintf(dump, "<RTPHintTrack trackID=\"%d\">\n", gf_isom_get_track_id(file, i+1)); gf_isom_sdp_track_get(file, i+1, &sdp, &size); fprintf(dump, "<SDPInfo>%s</SDPInfo>", sdp); #ifndef GPAC_DISABLE_ISOM_HINTING for (j=0; j<gf_isom_get_sample_count(file, i+1); j++) { gf_isom_dump_hint_sample(file, i+1, j+1, dump); } #endif fprintf(dump, "</RTPHintTrack>\n"); } fprintf(dump, "</RTPFile>\n"); if (inName) gf_fclose(dump); } #endif void dump_isom_timestamps(GF_ISOFile *file, char *inName, Bool is_final_name, u32 dump_mode) { u32 i, j, k, count; Bool has_ctts_error, is_fragmented=GF_FALSE; FILE *dump; Bool skip_offset = ((dump_mode==2) || (dump_mode==4)) ? GF_TRUE : GF_FALSE; Bool check_ts = ((dump_mode==3) || (dump_mode==4)) ? GF_TRUE : GF_FALSE; struct _ts_info { u64 dts; s64 cts; }; struct _ts_info *timings = NULL; u32 nb_timings=0, nb_timings_alloc = 0; if (inName) { char szBuf[1024]; strcpy(szBuf, inName); if (!is_final_name) strcat(szBuf, "_ts.txt"); dump = gf_fopen(szBuf, "wt"); if (!dump) { M4_LOG(GF_LOG_ERROR, ("Failed to open %s\n", szBuf)); return; } } else { dump = stdout; } if (gf_isom_is_fragmented(file)) is_fragmented = GF_TRUE; has_ctts_error = GF_FALSE; for (i=0; i<gf_isom_get_track_count(file); i++) { s64 cts_dts_shift = gf_isom_get_cts_to_dts_shift(file, i+1); u32 has_cts_offset = gf_isom_has_time_offset(file, i+1); fprintf(dump, "#dumping track ID %d timing:\n", gf_isom_get_track_id(file, i + 1)); fprintf(dump, "Num\tDTS\tCTS\tSize\tRAP%s\tisLeading\tDependsOn\tDependedOn\tRedundant\tRAP-SampleGroup\tRoll-SampleGroup\tRoll-Distance", skip_offset ? "" : "\tOffset"); if (is_fragmented) { fprintf(dump, "\tfrag_start"); } fprintf(dump, "\n"); count = gf_isom_get_sample_count(file, i+1); if (has_cts_offset && check_ts) { if (nb_timings_alloc<count) { nb_timings_alloc = count; timings = gf_realloc(timings, sizeof (struct _ts_info) * count); } nb_timings = 0; } for (j=0; j<count; j++) { s64 cts; u64 dts, offset; u32 isLeading, dependsOn, dependedOn, redundant; Bool is_rap; GF_ISOSampleRollType roll_type; s32 roll_distance; u32 index; GF_ISOSample *samp = gf_isom_get_sample_info(file, i+1, j+1, &index, &offset); if (!samp) { fprintf(dump, " SAMPLE #%d IN TRACK #%d NOT THERE !!!\n", j+1, i+1); continue; } gf_isom_get_sample_flags(file, i+1, j+1, &isLeading, &dependsOn, &dependedOn, &redundant); gf_isom_get_sample_rap_roll_info(file, i+1, j+1, &is_rap, &roll_type, &roll_distance); dts = samp->DTS; cts = dts + (s32) samp->CTS_Offset; fprintf(dump, "Sample %d\tDTS "LLU"\tCTS "LLD"\t%d\t%d", j+1, dts, cts, samp->dataLength, samp->IsRAP); if (!skip_offset) fprintf(dump, "\t"LLU, offset); fprintf(dump, "\t%d\t%d\t%d\t%d\t%d\t%d\t%d", isLeading, dependsOn, dependedOn, redundant, is_rap, roll_type, roll_distance); if (cts< (s64) dts) { if (has_cts_offset==2) { if (cts_dts_shift && (cts+cts_dts_shift < (s64) dts)) { fprintf(dump, " #NEGATIVE CTS OFFSET!!!"); has_ctts_error = 1; } else if (!cts_dts_shift) { fprintf(dump, " #possible negative CTS offset (no cslg in file)"); } } else { fprintf(dump, " #NEGATIVE CTS OFFSET!!!"); has_ctts_error = 1; } } if (has_cts_offset && check_ts) { for (k=0; k<nb_timings; k++) { if (timings[k].dts==dts) { fprintf(dump, " #SAME DTS USED!!!"); has_ctts_error = 1; } if (timings[k].cts==cts) { fprintf(dump, " #SAME CTS USED!!! "); has_ctts_error = 1; } } timings[nb_timings].dts = dts; timings[nb_timings].cts = cts; nb_timings++; } gf_isom_sample_del(&samp); if (is_fragmented) { fprintf(dump, "\t%d", gf_isom_sample_is_fragment_start(file, i+1, j+1, NULL) ); } fprintf(dump, "\n"); gf_set_progress("Dumping track timing", j+1, count); } fprintf(dump, "\n\n"); gf_set_progress("Dumping track timing", count, count); } if (timings) gf_free(timings); if (inName) gf_fclose(dump); if (has_ctts_error) { M4_LOG(GF_LOG_ERROR, ("\tFile has CTTS table errors\n")); } } static u32 read_nal_size_hdr(u8 *ptr, u32 nalh_size) { u32 nal_size=0; u32 v = nalh_size; while (v) { nal_size |= (u8) *ptr; ptr++; v-=1; if (v) nal_size <<= 8; } return nal_size; } #ifndef GPAC_DISABLE_AV_PARSERS void gf_inspect_dump_nalu(FILE *dump, u8 *ptr, u32 ptr_size, Bool is_svc, HEVCState *hevc, AVCState *avc, VVCState *vvc, u32 nalh_size, Bool dump_crc, Bool is_encrypted); #endif static void dump_isom_nal_ex(GF_ISOFile *file, GF_ISOTrackID trackID, FILE *dump, u32 dump_flags) { u32 i, j, count, nb_descs, track, nalh_size, timescale, cur_extract_mode; s32 countRef; Bool is_adobe_protected = GF_FALSE; Bool is_cenc_protected = GF_FALSE; Bool is_hevc = GF_FALSE; Bool is_vvc = GF_FALSE; #ifndef GPAC_DISABLE_AV_PARSERS AVCState *avc_state = NULL; HEVCState *hevc_state = NULL; VVCState *vvc_state = NULL; #endif GF_AVCConfig *avccfg, *svccfg; GF_HEVCConfig *hevccfg, *lhvccfg; GF_VVCConfig *vvccfg; GF_NALUFFParam *slc; Bool has_svcc = GF_FALSE; track = gf_isom_get_track_by_id(file, trackID); count = gf_isom_get_sample_count(file, track); timescale = gf_isom_get_media_timescale(file, track); cur_extract_mode = gf_isom_get_nalu_extract_mode(file, track); nb_descs = gf_isom_get_sample_description_count(file, track); if (!nb_descs) { M4_LOG(GF_LOG_ERROR, ("Error: Track #%d has no sample description so is likely not NALU-based!\n", trackID)); return; } fprintf(dump, "<NALUTrack trackID=\"%d\" SampleCount=\"%d\" TimeScale=\"%d\">\n", trackID, count, timescale); #ifndef GPAC_DISABLE_AV_PARSERS #define DUMP_ARRAY(arr, name, loc, _is_svc)\ if (arr) {\ fprintf(dump, " <%sArray location=\"%s\">\n", name, loc);\ for (i=0; i<gf_list_count(arr); i++) {\ slc = gf_list_get(arr, i);\ fprintf(dump, " <NALU size=\"%d\" ", slc->size);\ gf_inspect_dump_nalu(dump, (u8 *) slc->data, slc->size, _is_svc, is_hevc ? hevc_state : NULL, avc_state, is_vvc ? vvc_state : NULL, nalh_size, (dump_flags&1) ? GF_TRUE : GF_FALSE, GF_FALSE);\ }\ fprintf(dump, " </%sArray>\n", name);\ }\ #else #define DUMP_ARRAY(arr, name, loc, _is_svc)\ if (arr) {\ fprintf(dump, " <%sArray location=\"%s\">\n", name, loc);\ for (i=0; i<gf_list_count(arr); i++) {\ slc = gf_list_get(arr, i);\ fprintf(dump, " <NALU size=\"%d\" ", slc->size);\ fprintf(dump, "/>\n");\ }\ fprintf(dump, " </%sArray>\n", name);\ }\ #endif nalh_size = 0; for (j=0; j<nb_descs; j++) { GF_AVCConfig *mvccfg; Bool is_svc; avccfg = gf_isom_avc_config_get(file, track, j+1); svccfg = gf_isom_svc_config_get(file, track, j+1); mvccfg = gf_isom_mvc_config_get(file, track, j+1); hevccfg = gf_isom_hevc_config_get(file, track, j+1); lhvccfg = gf_isom_lhvc_config_get(file, track, j+1); vvccfg = gf_isom_vvc_config_get(file, track, j+1); is_svc = (svccfg!=NULL) ? 1:0; if (hevccfg || lhvccfg) { is_hevc = 1; #ifndef GPAC_DISABLE_AV_PARSERS GF_SAFEALLOC(hevc_state, HEVCState) #endif } else if (vvccfg) { is_vvc = 1; #ifndef GPAC_DISABLE_AV_PARSERS GF_SAFEALLOC(vvc_state, VVCState) #endif } else if (avccfg || svccfg || mvccfg) { #ifndef GPAC_DISABLE_AV_PARSERS GF_SAFEALLOC(avc_state, AVCState) #endif } //for tile tracks the hvcC is stored in the 'tbas' track if (!hevccfg && gf_isom_get_reference_count(file, track, GF_ISOM_REF_TBAS)) { u32 tk = 0; gf_isom_get_reference(file, track, GF_ISOM_REF_TBAS, 1, &tk); hevccfg = gf_isom_hevc_config_get(file, tk, 1); } fprintf(dump, " <NALUConfig>\n"); if (!avccfg && !svccfg && !hevccfg && !lhvccfg && !vvccfg) { M4_LOG(GF_LOG_ERROR, ("Error: Track #%d is not NALU or OBU based!\n", trackID)); return; } if (avccfg) { nalh_size = avccfg->nal_unit_size; DUMP_ARRAY(avccfg->sequenceParameterSets, "AVCSPS", "avcC", is_svc); DUMP_ARRAY(avccfg->pictureParameterSets, "AVCPPS", "avcC", is_svc) DUMP_ARRAY(avccfg->sequenceParameterSetExtensions, "AVCSPSEx", "avcC", is_svc) } if (is_svc) { if (!nalh_size) nalh_size = svccfg->nal_unit_size; DUMP_ARRAY(svccfg->sequenceParameterSets, "SVCSPS", "svcC", is_svc) DUMP_ARRAY(svccfg->pictureParameterSets, "SVCPPS", "svcC", is_svc) } if (mvccfg) { if (!nalh_size) nalh_size = mvccfg->nal_unit_size; DUMP_ARRAY(mvccfg->sequenceParameterSets, "SVCSPS", "mvcC", is_svc) DUMP_ARRAY(mvccfg->pictureParameterSets, "SVCPPS", "mvcC", is_svc) } if (hevccfg) { u32 idx; nalh_size = hevccfg->nal_unit_size; for (idx=0; idx<gf_list_count(hevccfg->param_array); idx++) { GF_NALUFFParamArray *ar = gf_list_get(hevccfg->param_array, idx); if (ar->type==GF_HEVC_NALU_SEQ_PARAM) { DUMP_ARRAY(ar->nalus, "HEVCSPS", "hvcC", 0) } else if (ar->type==GF_HEVC_NALU_PIC_PARAM) { DUMP_ARRAY(ar->nalus, "HEVCPPS", "hvcC", 0) } else if (ar->type==GF_HEVC_NALU_VID_PARAM) { DUMP_ARRAY(ar->nalus, "HEVCVPS", "hvcC", 0) } else { DUMP_ARRAY(ar->nalus, "HEVCUnknownPS", "hvcC", 0) } } } if (vvccfg) { u32 idx; nalh_size = vvccfg->nal_unit_size; for (idx=0; idx<gf_list_count(vvccfg->param_array); idx++) { GF_NALUFFParamArray *ar = gf_list_get(vvccfg->param_array, idx); if (ar->type==GF_VVC_NALU_SEQ_PARAM) { DUMP_ARRAY(ar->nalus, "VVCSPS", "vvcC", 0) } else if (ar->type==GF_VVC_NALU_PIC_PARAM) { DUMP_ARRAY(ar->nalus, "VVCPPS", "vvcC", 0) } else if (ar->type==GF_VVC_NALU_VID_PARAM) { DUMP_ARRAY(ar->nalus, "VVCVPS", "vvcC", 0) } else { DUMP_ARRAY(ar->nalus, "VVCUnknownPS", "vvcC", 0) } } } if (lhvccfg) { u32 idx; nalh_size = lhvccfg->nal_unit_size; for (idx=0; idx<gf_list_count(lhvccfg->param_array); idx++) { GF_NALUFFParamArray *ar = gf_list_get(lhvccfg->param_array, idx); if (ar->type==GF_HEVC_NALU_SEQ_PARAM) { DUMP_ARRAY(ar->nalus, "HEVCSPS", "lhcC", 0) } else if (ar->type==GF_HEVC_NALU_PIC_PARAM) { DUMP_ARRAY(ar->nalus, "HEVCPPS", "lhcC", 0) } else if (ar->type==GF_HEVC_NALU_VID_PARAM) { DUMP_ARRAY(ar->nalus, "HEVCVPS", "lhcC", 0) } else { DUMP_ARRAY(ar->nalus, "HEVCUnknownPS", "lhcC", 0) } } } fprintf(dump, " </NALUConfig>\n"); if (avccfg) gf_odf_avc_cfg_del(avccfg); if (svccfg) { gf_odf_avc_cfg_del(svccfg); has_svcc = GF_TRUE; } if (hevccfg) gf_odf_hevc_cfg_del(hevccfg); if (vvccfg) gf_odf_vvc_cfg_del(vvccfg); if (lhvccfg) gf_odf_hevc_cfg_del(lhvccfg); } /*fixme: for dumping encrypted track: we don't have neither avccfg nor svccfg*/ if (!nalh_size) nalh_size = 4; /*for testing dependency*/ countRef = gf_isom_get_reference_count(file, track, GF_ISOM_REF_SCAL); if (countRef > 0) { GF_ISOTrackID refTrackID; fprintf(dump, " <SCALReferences>\n"); for (i = 1; i <= (u32) countRef; i++) { gf_isom_get_reference_ID(file, track, GF_ISOM_REF_SCAL, i, &refTrackID); fprintf(dump, " <SCALReference number=\"%d\" refTrackID=\"%d\"/>\n", i, refTrackID); } fprintf(dump, " </SCALReferences>\n"); } fprintf(dump, " <NALUSamples>\n"); gf_isom_set_nalu_extract_mode(file, track, GF_ISOM_NALU_EXTRACT_INSPECT); is_adobe_protected = gf_isom_is_adobe_protection_media(file, track, 1); is_cenc_protected = gf_isom_is_cenc_media(file, track, 1); for (i=0; i<count; i++) { u64 dts, cts; Bool is_rap; u32 size, nal_size, idx, di; u8 *ptr; GF_ISOSample *samp = gf_isom_get_sample(file, track, i+1, &di); if (!samp) { fprintf(dump, "<!-- Unable to fetch sample %d -->\n", i+1); continue; } dts = samp->DTS; cts = dts + (s32) samp->CTS_Offset; is_rap = samp->IsRAP; if (!is_rap) gf_isom_get_sample_rap_roll_info(file, track, i+1, &is_rap, NULL, NULL); if (dump_flags&2) { fprintf(dump, " <Sample size=\"%d\" RAP=\"%d\"", samp->dataLength, is_rap); } else { fprintf(dump, " <Sample DTS=\""LLD"\" CTS=\""LLD"\" size=\"%d\" RAP=\"%d\"", dts, cts, samp->dataLength, is_rap); } if (nb_descs>1) fprintf(dump, " sample_description=\"%d\"", di); fprintf(dump, " >\n"); if (cts<dts) fprintf(dump, "<!-- NEGATIVE CTS OFFSET! -->\n"); idx = 1; ptr = samp->data; size = samp->dataLength; if (is_adobe_protected) { u8 encrypted_au = ptr[0]; if (encrypted_au) { fprintf(dump, " <!-- Sample number %d is an Adobe's protected sample: can not be dumped -->\n", i+1); fprintf(dump, " </Sample>\n\n"); continue; } else { ptr++; size--; } } while (size) { nal_size = read_nal_size_hdr(ptr, nalh_size); ptr += nalh_size; if (nal_size >= UINT_MAX-nalh_size || nalh_size + nal_size > size) { fprintf(dump, " <!-- NALU number %d is corrupted: size is %d but only %d remains -->\n", idx, nal_size, size); break; } else { fprintf(dump, " <NALU size=\"%d\" ", nal_size); #ifndef GPAC_DISABLE_AV_PARSERS Bool is_encrypted = 0; if (is_cenc_protected) { GF_Err e = gf_isom_get_sample_cenc_info(file, track, i + 1, &is_encrypted, NULL, NULL, NULL, NULL); if (e != GF_OK) { fprintf(dump, "dump_msg=\"Error %s while fetching encryption info for sample, assuming sample is encrypted\" ", gf_error_to_string(e) ); is_encrypted = GF_TRUE; } } gf_inspect_dump_nalu(dump, ptr, nal_size, has_svcc ? 1 : 0, hevc_state, avc_state, vvc_state, nalh_size, dump_flags, is_encrypted); #else fprintf(dump, "/>\n"); #endif } idx++; ptr+=nal_size; size -= nal_size + nalh_size; } fprintf(dump, " </Sample>\n"); gf_isom_sample_del(&samp); fprintf(dump, "\n"); gf_set_progress("Analysing Track NALUs", i+1, count); } fprintf(dump, " </NALUSamples>\n"); fprintf(dump, "</NALUTrack>\n"); gf_isom_set_nalu_extract_mode(file, track, cur_extract_mode); #ifndef GPAC_DISABLE_AV_PARSERS if (hevc_state) gf_free(hevc_state); if (vvc_state) gf_free(vvc_state); if (avc_state) gf_free(avc_state); #endif } static void dump_isom_obu(GF_ISOFile *file, GF_ISOTrackID trackID, FILE *dump, Bool dump_crc); static void dump_qt_prores(GF_ISOFile *file, GF_ISOTrackID trackID, FILE *dump, Bool dump_crc); void dump_isom_nal(GF_ISOFile *file, GF_ISOTrackID trackID, char *inName, Bool is_final_name, u32 dump_flags) { Bool is_av1 = GF_FALSE; Bool is_prores = GF_FALSE; FILE *dump; if (inName) { GF_ESD* esd; char szBuf[GF_MAX_PATH]; strcpy(szBuf, inName); u32 track = gf_isom_get_track_by_id(file, trackID); esd = gf_isom_get_esd(file, track, 1); if (!esd || !esd->decoderConfig) { switch (gf_isom_get_media_subtype(file, track, 1)) { case GF_ISOM_SUBTYPE_AV01: is_av1 = GF_TRUE; break; case GF_QT_SUBTYPE_APCH: case GF_QT_SUBTYPE_APCO: case GF_QT_SUBTYPE_APCN: case GF_QT_SUBTYPE_APCS: case GF_QT_SUBTYPE_AP4X: case GF_QT_SUBTYPE_AP4H: is_prores = GF_TRUE; break; } } else if (esd->decoderConfig->objectTypeIndication == GF_CODECID_AV1) { is_av1 = GF_TRUE; } if (esd) gf_odf_desc_del((GF_Descriptor*)esd); if (!is_final_name) sprintf(szBuf, "%s_%d_%s.xml", inName, trackID, is_av1 ? "obu" : "nalu"); dump = gf_fopen(szBuf, "wt"); if (!dump) { M4_LOG(GF_LOG_ERROR, ("Failed to open %s for dumping\n", szBuf)); return; } } else { dump = stdout; } if (is_av1) dump_isom_obu(file, trackID, dump, dump_flags); else if (is_prores) dump_qt_prores(file, trackID, dump, dump_flags); else dump_isom_nal_ex(file, trackID, dump, dump_flags); if (inName) gf_fclose(dump); } #ifndef GPAC_DISABLE_AV_PARSERS void gf_inspect_dump_obu(FILE *dump, AV1State *av1, u8 *obu, u64 obu_length, ObuType obu_type, u64 obu_size, u32 hdr_size, Bool dump_crc); #endif static void dump_isom_obu(GF_ISOFile *file, GF_ISOTrackID trackID, FILE *dump, Bool dump_crc) { #ifndef GPAC_DISABLE_AV_PARSERS u32 i, count, track, timescale; AV1State av1; ObuType obu_type; u64 obu_size; u32 hdr_size; GF_BitStream *bs; u32 idx; track = gf_isom_get_track_by_id(file, trackID); gf_av1_init_state(&av1); av1.config = gf_isom_av1_config_get(file, track, 1); if (!av1.config) { M4_LOG(GF_LOG_ERROR, ("Error: Track #%d is not AV1!\n", trackID)); return; } count = gf_isom_get_sample_count(file, track); timescale = gf_isom_get_media_timescale(file, track); fprintf(dump, "<OBUTrack trackID=\"%d\" SampleCount=\"%d\" TimeScale=\"%d\">\n", trackID, count, timescale); fprintf(dump, " <OBUConfig>\n"); for (i=0; i<gf_list_count(av1.config->obu_array); i++) { GF_AV1_OBUArrayEntry *obu = gf_list_get(av1.config->obu_array, i); bs = gf_bs_new(obu->obu, (u32) obu->obu_length, GF_BITSTREAM_READ); gf_av1_parse_obu(bs, &obu_type, &obu_size, &hdr_size, &av1); gf_inspect_dump_obu(dump, &av1, obu->obu, obu->obu_length, obu_type, obu_size, hdr_size, dump_crc); gf_bs_del(bs); } fprintf(dump, " </OBUConfig>\n"); fprintf(dump, " <OBUSamples>\n"); for (i=0; i<count; i++) { u64 dts, cts; u32 size; u8 *ptr; GF_ISOSample *samp = gf_isom_get_sample(file, track, i+1, NULL); if (!samp) { fprintf(dump, "<!-- Unable to fetch sample %d -->\n", i+1); continue; } dts = samp->DTS; cts = dts + (s32) samp->CTS_Offset; fprintf(dump, " <Sample number=\"%d\" DTS=\""LLD"\" CTS=\""LLD"\" size=\"%d\" RAP=\"%d\" >\n", i+1, dts, cts, samp->dataLength, samp->IsRAP); if (cts<dts) fprintf(dump, "<!-- NEGATIVE CTS OFFSET! -->\n"); idx = 1; ptr = samp->data; size = samp->dataLength; bs = gf_bs_new(ptr, size, GF_BITSTREAM_READ); while (size) { gf_av1_parse_obu(bs, &obu_type, &obu_size, &hdr_size, &av1); if (obu_size > size) { fprintf(dump, " <!-- OBU number %d is corrupted: size is %d but only %d remains -->\n", idx, (u32) obu_size, size); break; } gf_inspect_dump_obu(dump, &av1, ptr, obu_size, obu_type, obu_size, hdr_size, dump_crc); ptr += obu_size; size -= (u32)obu_size; idx++; } gf_bs_del(bs); fprintf(dump, " </Sample>\n"); gf_isom_sample_del(&samp); fprintf(dump, "\n"); gf_set_progress("Analysing Track OBUs", i+1, count); } fprintf(dump, " </OBUSamples>\n"); fprintf(dump, "</OBUTrack>\n"); if (av1.config) gf_odf_av1_cfg_del(av1.config); gf_av1_reset_state(&av1, GF_TRUE); #endif } static void dump_qt_prores(GF_ISOFile *file, u32 trackID, FILE *dump, Bool dump_crc) { #ifndef GPAC_DISABLE_AV_PARSERS u32 i, count, track, timescale; track = gf_isom_get_track_by_id(file, trackID); count = gf_isom_get_sample_count(file, track); timescale = gf_isom_get_media_timescale(file, track); fprintf(dump, "<ProResTrack trackID=\"%d\" SampleCount=\"%d\" TimeScale=\"%d\">\n", trackID, count, timescale); for (i=0; i<count; i++) { void gf_inspect_dump_prores(FILE *dump, u8 *ptr, u64 frame_size, Bool dump_crc); u64 dts, cts; GF_ISOSample *samp = gf_isom_get_sample(file, track, i+1, NULL); if (!samp) { fprintf(dump, "<!-- Unable to fetch sample %d -->\n", i+1); continue; } dts = samp->DTS; cts = dts + (s32) samp->CTS_Offset; if (cts!=dts) fprintf(dump, "<!-- Wrong timing info (CTS "LLD" vs DTS "LLD") ! -->\n", cts, dts); if (!samp->IsRAP) fprintf(dump, "<!-- Wrong sync sample info, sample is not SAP1 ! -->\n"); fprintf(dump, " <Sample number=\"%d\" CTS=\""LLD"\" size=\"%d\">\n", i+1, cts, samp->dataLength); gf_inspect_dump_prores(dump, samp->data, samp->dataLength, dump_crc); fprintf(dump, " </Sample>\n"); gf_isom_sample_del(&samp); fprintf(dump, "\n"); gf_set_progress("Analysing ProRes Track", i+1, count); } fprintf(dump, "</ProResTrack>\n"); #endif } void dump_isom_saps(GF_ISOFile *file, GF_ISOTrackID trackID, u32 dump_saps_mode, char *inName, Bool is_final_name) { FILE *dump; u32 i, count; s64 media_offset=0; u32 track = gf_isom_get_track_by_id(file, trackID); if (inName) { char szBuf[GF_MAX_PATH]; strcpy(szBuf, inName); if (!is_final_name) sprintf(szBuf, "%s_%d_cues.xml", inName, trackID); dump = gf_fopen(szBuf, "wt"); if (!dump) { M4_LOG(GF_LOG_ERROR, ("Failed to open %s for dumping\n", szBuf)); return; } } else { dump = stdout; } fprintf(dump, "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n"); fprintf(dump, "<DASHCues xmlns=\"urn:gpac:dash:schema:cues:2018\">\n"); fprintf(dump, "<Stream id=\"%d\" timescale=\"%d\"", trackID, gf_isom_get_media_timescale(file, track) ); if (dump_saps_mode==4) { fprintf(dump, " mode=\"edit\""); gf_isom_get_edit_list_type(file, track, &media_offset); } fprintf(dump, ">\n"); count = gf_isom_get_sample_count(file, track); for (i=0; i<count; i++) { s64 cts, dts; u32 di; Bool traf_start = 0; u32 sap_type = 0; u64 doffset; GF_ISOSample *samp = gf_isom_get_sample_info(file, track, i+1, &di, &doffset); traf_start = gf_isom_sample_is_fragment_start(file, track, i+1, NULL); sap_type = samp->IsRAP; if (!sap_type) { Bool is_rap; GF_ISOSampleRollType roll_type; s32 roll_dist; gf_isom_get_sample_rap_roll_info(file, track, i+1, &is_rap, &roll_type, &roll_dist); if (roll_type) sap_type = SAP_TYPE_4; else if (is_rap) sap_type = SAP_TYPE_3; } if (!sap_type) { gf_isom_sample_del(&samp); continue; } dts = cts = samp->DTS; cts += samp->CTS_Offset; fprintf(dump, "<Cue sap=\"%d\"", sap_type); if (dump_saps_mode==4) { cts += media_offset; fprintf(dump, " cts=\""LLD"\"", cts); } else { if (!dump_saps_mode || (dump_saps_mode==1)) fprintf(dump, " sample=\"%d\"", i+1); if (!dump_saps_mode || (dump_saps_mode==2)) fprintf(dump, " cts=\""LLD"\"", cts); if (!dump_saps_mode || (dump_saps_mode==3)) fprintf(dump, " dts=\""LLD"\"", dts); } if (traf_start) fprintf(dump, " wasFragStart=\"yes\""); fprintf(dump, "/>\n"); gf_isom_sample_del(&samp); } fprintf(dump, "</Stream>\n"); fprintf(dump, "</DASHCues>\n"); if (inName) gf_fclose(dump); } #ifndef GPAC_DISABLE_ISOM_DUMP void dump_isom_ismacryp(GF_ISOFile *file, char *inName, Bool is_final_name) { u32 i, j; FILE *dump; if (inName) { char szBuf[1024]; strcpy(szBuf, inName); if (!is_final_name) strcat(szBuf, "_ismacryp.xml"); dump = gf_fopen(szBuf, "wt"); if (!dump) { M4_LOG(GF_LOG_ERROR, ("Failed to open %s for dumping\n", szBuf)); return; } } else { dump = stdout; } fprintf(dump, "<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n"); fprintf(dump, "<!-- MP4Box ISMACryp trace -->\n"); fprintf(dump, "<ISMACrypFile>\n"); for (i=0; i<gf_isom_get_track_count(file); i++) { if (gf_isom_get_media_subtype(file, i+1, 1) != GF_ISOM_SUBTYPE_MPEG4_CRYP) continue; gf_isom_dump_ismacryp_protection(file, i+1, dump); fprintf(dump, "<ISMACrypTrack trackID=\"%d\">\n", gf_isom_get_track_id(file, i+1)); for (j=0; j<gf_isom_get_sample_count(file, i+1); j++) { gf_isom_dump_ismacryp_sample(file, i+1, j+1, dump); } fprintf(dump, "</ISMACrypTrack >\n"); } fprintf(dump, "</ISMACrypFile>\n"); if (inName) gf_fclose(dump); } void dump_isom_timed_text(GF_ISOFile *file, GF_ISOTrackID trackID, char *inName, Bool is_final_name, Bool is_convert, GF_TextDumpType dump_type) { FILE *dump; GF_Err e; u32 track; track = gf_isom_get_track_by_id(file, trackID); if (!track) { M4_LOG(GF_LOG_ERROR, ("Cannot find track ID %d\n", trackID)); return; } switch (gf_isom_get_media_type(file, track)) { case GF_ISOM_MEDIA_TEXT: case GF_ISOM_MEDIA_SUBT: break; default: M4_LOG(GF_LOG_ERROR, ("Track ID %d is not a 3GPP text track\n", trackID)); return; } if (inName) { char szBuf[1024]; char *ext; ext = ((dump_type==GF_TEXTDUMPTYPE_SVG) ? "svg" : ((dump_type==GF_TEXTDUMPTYPE_SRT) ? "srt" : "ttxt")); if (is_final_name) { strcpy(szBuf, inName) ; } else if (is_convert) sprintf(szBuf, "%s.%s", inName, ext) ; else sprintf(szBuf, "%s_%d_text.%s", inName, trackID, ext); dump = gf_fopen(szBuf, "wt"); if (!dump) { M4_LOG(GF_LOG_ERROR, ("Failed to open %s for dumping\n", szBuf)); return; } } else { dump = stdout; } e = gf_isom_text_dump(file, track, dump, dump_type); if (inName) gf_fclose(dump); if (e) { M4_LOG(GF_LOG_ERROR, ("Conversion failed (%s)\n", gf_error_to_string(e))); } else { fprintf(stderr, "Conversion done\n"); } } #endif /*GPAC_DISABLE_ISOM_DUMP*/ #ifndef GPAC_DISABLE_ISOM_HINTING void dump_isom_sdp(GF_ISOFile *file, char *inName, Bool is_final_name) { const char *sdp; u32 size, i; FILE *dump; if (inName) { char szBuf[1024]; strcpy(szBuf, inName); if (!is_final_name) { char *ext = strchr(szBuf, '.'); if (ext) ext[0] = 0; strcat(szBuf, "_sdp.txt"); } dump = gf_fopen(szBuf, "wt"); if (!dump) { M4_LOG(GF_LOG_ERROR, ("Failed to open %s for dumping\n", szBuf)); return; } } else { dump = stdout; fprintf(dump, "# File SDP content \n\n"); } //get the movie SDP gf_isom_sdp_get(file, &sdp, &size); if (sdp && size) fprintf(dump, "%s", sdp); fprintf(dump, "\r\n"); //then tracks for (i=0; i<gf_isom_get_track_count(file); i++) { if (gf_isom_get_media_type(file, i+1) != GF_ISOM_MEDIA_HINT) continue; gf_isom_sdp_track_get(file, i+1, &sdp, &size); fprintf(dump, "%s", sdp); } fprintf(dump, "\n\n"); if (inName) gf_fclose(dump); } #endif #ifndef GPAC_DISABLE_ISOM_DUMP GF_Err dump_isom_xml(GF_ISOFile *file, char *inName, Bool is_final_name, Bool do_track_dump, Bool merge_vtt_cues, Bool skip_init, Bool skip_samples) { GF_Err e; FILE *dump = stdout; Bool do_close=GF_FALSE; if (!file) return GF_ISOM_INVALID_FILE; if (inName) { char szBuf[1024]; strcpy(szBuf, inName); if (!is_final_name) { strcat(szBuf, do_track_dump ? "_dump.xml" : "_info.xml"); } dump = gf_fopen(szBuf, "wt"); if (!dump) { M4_LOG(GF_LOG_ERROR, ("Failed to open %s\n", szBuf)); return GF_IO_ERR; } do_close=GF_TRUE; } fprintf(dump, "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n"); if (do_track_dump) { fprintf(dump, "<ISOBaseMediaFileTrace>\n"); } e = gf_isom_dump(file, dump, skip_init, skip_samples); if (e) { M4_LOG(GF_LOG_ERROR, ("Error dumping ISO structure\n")); } if (do_track_dump) { #ifndef GPAC_DISABLE_MEDIA_EXPORT u32 i; //because of dump mode we need to reopen in regular read mode to avoid mem leaks GF_ISOFile *the_file = gf_isom_open(gf_isom_get_filename(file), GF_ISOM_OPEN_READ, NULL); u32 tcount = gf_isom_get_track_count(the_file); fprintf(dump, "<Tracks>\n"); for (i=0; i<tcount; i++) { GF_MediaExporter dumper; GF_ISOTrackID trackID = gf_isom_get_track_id(the_file, i+1); u32 mtype = gf_isom_get_media_type(the_file, i+1); u32 msubtype = gf_isom_get_media_subtype(the_file, i+1, 1); Bool fmt_handled = GF_FALSE; memset(&dumper, 0, sizeof(GF_MediaExporter)); dumper.file = the_file; dumper.trackID = trackID; dumper.dump_file = dump; if (mtype == GF_ISOM_MEDIA_HINT) { #ifndef GPAC_DISABLE_ISOM_HINTING char *name=NULL; if (msubtype==GF_ISOM_SUBTYPE_RTP) name = "RTPHintTrack"; else if (msubtype==GF_ISOM_SUBTYPE_SRTP) name = "SRTPHintTrack"; else if (msubtype==GF_ISOM_SUBTYPE_RRTP) name = "RTPReceptionHintTrack"; else if (msubtype==GF_ISOM_SUBTYPE_RTCP) name = "RTCPReceptionHintTrack"; else if (msubtype==GF_ISOM_SUBTYPE_FLUTE) name = "FLUTEReceptionHintTrack"; else name = "UnknownHintTrack"; fprintf(dump, "<%s trackID=\"%d\">\n", name, trackID); #ifndef GPAC_DISABLE_ISOM_HINTING u32 j, scount=gf_isom_get_sample_count(the_file, i+1); for (j=0; j<scount; j++) { gf_isom_dump_hint_sample(the_file, i+1, j+1, dump); } #endif fprintf(dump, "</%s>\n", name); fmt_handled = GF_TRUE; #endif /*GPAC_DISABLE_ISOM_HINTING*/ } else if (gf_isom_get_avc_svc_type(the_file, i+1, 1) || gf_isom_get_hevc_lhvc_type(the_file, i+1, 1)) { dump_isom_nal_ex(the_file, trackID, dump, GF_FALSE); fmt_handled = GF_TRUE; } else if ((mtype==GF_ISOM_MEDIA_TEXT) || (mtype==GF_ISOM_MEDIA_SUBT) ) { if (msubtype==GF_ISOM_SUBTYPE_WVTT) { gf_webvtt_dump_iso_track(&dumper, i+1, merge_vtt_cues, GF_TRUE); fmt_handled = GF_TRUE; } else if ((msubtype==GF_ISOM_SUBTYPE_TX3G) || (msubtype==GF_ISOM_SUBTYPE_TEXT)) { gf_isom_text_dump(the_file, i+1, dump, GF_TEXTDUMPTYPE_TTXT_BOXES); fmt_handled = GF_TRUE; } } if (!fmt_handled) { dumper.flags = GF_EXPORT_NHML | GF_EXPORT_NHML_FULL; dumper.print_stats_graph = fs_dump_flags; gf_media_export(&dumper); } } #else return GF_NOT_SUPPORTED; #endif /*GPAC_DISABLE_MEDIA_EXPORT*/ gf_isom_delete(the_file); fprintf(dump, "</Tracks>\n"); fprintf(dump, "</ISOBaseMediaFileTrace>\n"); } if (do_close) gf_fclose(dump); return e; } #endif static char *format_duration(u64 dur, u32 timescale, char *szDur) { u32 h, m, s, ms; if ((dur==(u64) -1) || (dur==(u32) -1)) { strcpy(szDur, "Unknown"); return szDur; } dur = (u64) (( ((Double) (s64) dur)/timescale)*1000); h = (u32) (dur / 3600000); m = (u32) (dur/ 60000) - h*60; s = (u32) (dur/1000) - h*3600 - m*60; ms = (u32) (dur) - h*3600000 - m*60000 - s*1000; if (h<=24) { sprintf(szDur, "%02d:%02d:%02d.%03d", h, m, s, ms); } else { u32 d = (u32) (dur / 3600000 / 24); h = (u32) (dur/3600000)-24*d; if (d<=365) { sprintf(szDur, "%d Days, %02d:%02d:%02d.%03d", d, h, m, s, ms); } else { u32 y=0; while (d>365) { y++; d-=365; if (y%4) d--; } sprintf(szDur, "%d Years %d Days, %02d:%02d:%02d.%03d", y, d, h, m, s, ms); } } return szDur; } static char *format_date(u64 time, char *szTime) { time_t now; if (!time) { strcpy(szTime, "UNKNOWN DATE"); } else { time -= 2082844800; now = (u32) time; sprintf(szTime, "GMT %s", asctime(gf_gmtime(&now)) ); } return szTime; } void print_udta(GF_ISOFile *file, u32 track_number, Bool has_itags) { u32 i, count; count = gf_isom_get_udta_count(file, track_number); if (!count) return; if (has_itags) { for (i=0; i<count; i++) { u32 type; bin128 uuid; gf_isom_get_udta_type(file, track_number, i+1, &type, &uuid); if (type == GF_ISOM_BOX_TYPE_META) { count--; break; } } if (!count) return; } fprintf(stderr, "%d UDTA types: ", count); for (i=0; i<count; i++) { u32 j, type, nb_items, first=GF_TRUE; bin128 uuid; gf_isom_get_udta_type(file, track_number, i+1, &type, &uuid); nb_items = gf_isom_get_user_data_count(file, track_number, type, uuid); fprintf(stderr, "%s (%d) ", gf_4cc_to_str(type), nb_items); for (j=0; j<nb_items; j++) { u8 *udta=NULL; u32 udta_size; gf_isom_get_user_data(file, track_number, type, uuid, j+1, &udta, &udta_size); if (!udta) continue; if (gf_utf8_is_legal(udta, udta_size)) { if (first) { fprintf(stderr, "\n"); first = GF_FALSE; } fprintf(stderr, "\t%s\n", (char *) udta); } gf_free(udta); } } fprintf(stderr, "\n"); } GF_Err dump_isom_udta(GF_ISOFile *file, char *inName, Bool is_final_name, u32 dump_udta_type, u32 dump_udta_track) { u8 *data; FILE *t; bin128 uuid; u32 count, res; GF_Err e; memset(uuid, 0, 16); count = gf_isom_get_user_data_count(file, dump_udta_track, dump_udta_type, uuid); if (!count) { M4_LOG(GF_LOG_ERROR, ("No UDTA for type %s found\n", gf_4cc_to_str(dump_udta_type) )); return GF_NOT_FOUND; } data = NULL; count = 0; e = gf_isom_get_user_data(file, dump_udta_track, dump_udta_type, uuid, 0, &data, &count); if (e) { M4_LOG(GF_LOG_ERROR, ("Error dumping UDTA %s: %s\n", gf_4cc_to_str(dump_udta_type), gf_error_to_string(e) )); return e; } if (inName) { char szName[1024]; if (is_final_name) strcpy(szName, inName); else sprintf(szName, "%s_%s.udta", inName, gf_4cc_to_str(dump_udta_type) ); t = gf_fopen(szName, "wb"); if (!t) { gf_free(data); M4_LOG(GF_LOG_ERROR, ("Cannot open file %s\n", szName )); return GF_IO_ERR; } } else { t = stdout; } res = (u32) gf_fwrite(data+8, count-8, t); if (inName) gf_fclose(t); gf_free(data); if (count-8 != res) { M4_LOG(GF_LOG_ERROR, ("Error writing udta to file\n")); return GF_IO_ERR; } return GF_OK; } GF_Err dump_isom_chapters(GF_ISOFile *file, char *inName, Bool is_final_name, u32 dump_mode) { FILE *t; u32 i, count; u32 chap_tk = 0; count = gf_isom_get_chapter_count(file, 0); if (dump_mode==2) dump_mode = GF_TEXTDUMPTYPE_OGG_CHAP; else if (dump_mode==3) dump_mode = GF_TEXTDUMPTYPE_ZOOM_CHAP; else dump_mode = GF_TEXTDUMPTYPE_TTXT_CHAP; if (!count) { for (i=0; i<gf_isom_get_track_count(file); i++) { if (gf_isom_get_reference_count(file, i+1, GF_ISOM_REF_CHAP)) { GF_Err e = gf_isom_get_reference(file, i+1, GF_ISOM_REF_CHAP, 1, &chap_tk); if (!e) break; } } if (!chap_tk) { M4_LOG(GF_LOG_WARNING, ("No chapters or chapters track found in file\n")); return GF_OK; } fprintf(stderr, "Dumping chapter track %d\n", chap_tk); dump_isom_timed_text(file, gf_isom_get_track_id(file, chap_tk), inName, is_final_name, GF_FALSE, dump_mode); return GF_OK; } if (inName) { char szName[1024]; strcpy(szName, inName); if (!is_final_name) { if (dump_mode==GF_TEXTDUMPTYPE_OGG_CHAP) { strcat(szName, ".txt"); } else if (dump_mode==GF_TEXTDUMPTYPE_ZOOM_CHAP) { strcat(szName, ".txt"); } else { strcat(szName, ".ttxt"); } } t = gf_fopen(szName, "wt"); if (!t) return GF_IO_ERR; } else { t = stdout; } if (dump_mode==GF_TEXTDUMPTYPE_TTXT_CHAP) { fprintf(t, "<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n"); fprintf(t, "<TextStream version=\"1.1\">\n"); fprintf(t, "<TextStreamHeader width=\"0\" height=\"0\" layer=\"0\" translation_x=\"0\" translation_y=\"0\">\n"); fprintf(t, "<TextSampleDescription horizontalJustification=\"left\" backColor=\"0 0 0\" scroll=\"None\"/>\n"); fprintf(t, "</TextStreamHeader>\n"); } for (i=0; i<count; i++) { char szDur[20]; u64 chapter_time; const char *name; gf_isom_get_chapter(file, 0, i+1, &chapter_time, &name); if (dump_mode==GF_TEXTDUMPTYPE_OGG_CHAP) { fprintf(t, "CHAPTER%02d=%s\n", i+1, format_duration(chapter_time, 1000, szDur)); fprintf(t, "CHAPTER%02dNAME=%s\n", i+1, name); } else if (dump_mode==GF_TEXTDUMPTYPE_ZOOM_CHAP) { chapter_time /= 1000; fprintf(t, "AddChapterBySecond("LLD",%s)\n", chapter_time, name); } else { fprintf(t, "<TextSample sampleTime=\"%s\" sampleDescriptionIndex=\"1\" xml:space=\"preserve\">%s</TextSample>\n" , format_duration(chapter_time, 1000, szDur), name); } } if (dump_mode==GF_TEXTDUMPTYPE_TTXT_CHAP) { fprintf(t, "</TextStream>\n"); } if (inName) gf_fclose(t); return GF_OK; } static void dump_key_info(const u8 *key_info, u32 key_info_size, Bool is_protected) { if (!key_info) return; u32 j, k, kpos=3; u32 nb_keys = 1; if (key_info[0]) { nb_keys = key_info[1]; nb_keys <<= 8; nb_keys |= key_info[2]; } for (k=0; k<nb_keys; k++) { u8 constant_iv_size=0; u8 iv_size=key_info[kpos+1]; fprintf(stderr, "\t\tKID"); if (nb_keys>1) fprintf(stderr, "%d", k+1); fprintf(stderr, " "); for (j=0; j<16; j++) fprintf(stderr, "%02X", key_info[kpos+1+j]); kpos+=17; if (!iv_size && is_protected) { constant_iv_size = key_info[1]; kpos += 1 + constant_iv_size; } fprintf(stderr, " - %sIV size %d \n", constant_iv_size ? "const " : "", constant_iv_size ? constant_iv_size : iv_size); } } static void DumpMetaItem(GF_ISOFile *file, Bool root_meta, u32 tk_num, char *name) { char szInd[2]; u32 i, count, primary_id; u32 meta_type = gf_isom_get_meta_type(file, root_meta, tk_num); if (name[0]=='\t') { szInd[0] = '\t'; szInd[1] = 0; } else { szInd[0] = 0; } count = gf_isom_get_meta_item_count(file, root_meta, tk_num); primary_id = gf_isom_get_meta_primary_item_id(file, root_meta, tk_num); fprintf(stderr, "%s type: \"%s\" - %d resource item(s)\n", name, meta_type ? gf_4cc_to_str(meta_type) : "undefined", (count+(primary_id>0))); switch (gf_isom_has_meta_xml(file, root_meta, tk_num)) { case 1: fprintf(stderr, "%sMeta has XML resource\n", szInd); break; case 2: fprintf(stderr, "%sMeta has BinaryXML resource\n", szInd); break; } if (primary_id) { fprintf(stderr, "%sPrimary Item - ID %d\n", szInd, primary_id); } for (i=0; i<count; i++) { const char *it_name, *mime, *enc, *url, *urn; Bool self_ref; u32 ID; u32 it_type, cenc_scheme, cenc_version; GF_Err e = gf_isom_get_meta_item_info(file, root_meta, tk_num, i+1, &ID, &it_type, &cenc_scheme, &cenc_version, &self_ref, &it_name, &mime, &enc, &url, &urn); if (e) { fprintf(stderr, "%sItem #%d fetch info error: %s\n", szInd, i+1, gf_error_to_string(e) ); continue; } fprintf(stderr, "%sItem #%d: ID %d type %s", szInd, i+1, ID, gf_4cc_to_str(it_type)); if (self_ref) fprintf(stderr, " Self-Reference"); else if (it_name && it_name[0]) fprintf(stderr, " Name \"%s\"", it_name); if (mime) fprintf(stderr, " MIME: \"%s\"", mime); if (enc) fprintf(stderr, " ContentEncoding: \"%s\"", enc); if (meta_type == GF_META_ITEM_TYPE_PICT) { GF_ImageItemProperties img_props; e = gf_isom_get_meta_image_props(file, root_meta, tk_num, ID, &img_props); if (e) { fprintf(stderr, " invalid image properties !"); } else { u32 j; Bool chan_diff = 0; if (img_props.width && img_props.height) { fprintf(stderr, " size %ux%u", img_props.width, img_props.height); } if (img_props.hSpacing && img_props.vSpacing) { fprintf(stderr, " SAR %u/%u", img_props.hSpacing, img_props.vSpacing); } if (img_props.num_channels) { fprintf(stderr, " %d channel%s (", img_props.num_channels, (img_props.num_channels>1) ? "s" : ""); for (j=1; j<img_props.num_channels; j++) { if (img_props.bits_per_channel[0] != img_props.bits_per_channel[j]) chan_diff = 1; } if (chan_diff) { for (j=0; j<img_props.num_channels; j++) { if (j) fprintf(stderr, ","); fprintf(stderr, "%d", img_props.bits_per_channel[j]); } } else { fprintf(stderr, "%d", img_props.bits_per_channel[0]); } fprintf(stderr, " bpc)"); } if (img_props.hOffset || img_props.vOffset) fprintf(stderr, " Offset %ux%u", img_props.hOffset, img_props.vOffset); if (img_props.alpha) fprintf(stderr, " Alpha"); if (img_props.hidden) fprintf(stderr, " Hidden"); if (img_props.angle) fprintf(stderr, " Rotate %d", img_props.angle); if (img_props.mirror) fprintf(stderr, " Mirror %d", img_props.mirror); if (img_props.clap_hden || img_props.clap_wden) fprintf(stderr, " Clap %d/%d,%d/%d,%d/%d,%d/%d", img_props.clap_wnum, img_props.clap_wden, img_props.clap_hnum, img_props.clap_hden, img_props.clap_honum, img_props.clap_hoden, img_props.clap_vonum, img_props.clap_voden); } } if (cenc_scheme) { Bool is_protected; u8 skip_byte_block, crypt_byte_block; const u8 *key_info; u32 key_info_size; fprintf(stderr, " - Protection scheme: %s v0x%08X", gf_4cc_to_str(cenc_scheme), cenc_version); gf_isom_extract_meta_item_get_cenc_info(file, root_meta, tk_num, ID, &is_protected, &skip_byte_block, &crypt_byte_block, &key_info, &key_info_size, NULL, NULL, NULL, NULL); if (skip_byte_block && crypt_byte_block) fprintf(stderr, " - Pattern %d:%d", skip_byte_block, crypt_byte_block); fprintf(stderr, "\n"); dump_key_info(key_info, key_info_size, is_protected); } fprintf(stderr, "\n"); if (url) fprintf(stderr, "%sURL: %s\n", szInd, url); if (urn) fprintf(stderr, "%sURN: %s\n", szInd, urn); } } static void print_config_hash(GF_List *xps_array, char *szName) { u32 i, j; u8 hash[20]; for (i=0; i<gf_list_count(xps_array); i++) { GF_NALUFFParam *slc = gf_list_get(xps_array, i); gf_sha1_csum((u8 *) slc->data, slc->size, hash); fprintf(stderr, "\t%s#%d hash: ", szName, i+1); for (j=0; j<20; j++) fprintf(stderr, "%02X", hash[j]); fprintf(stderr, "\n"); } } void dump_hevc_track_info(GF_ISOFile *file, u32 trackNum, GF_HEVCConfig *hevccfg #if !defined(GPAC_DISABLE_AV_PARSERS) && !defined(GPAC_DISABLE_HEVC) , HEVCState *hevc_state #endif /*GPAC_DISABLE_AV_PARSERS && defined(GPAC_DISABLE_HEVC)*/ ) { #if !defined(GPAC_DISABLE_AV_PARSERS) && !defined(GPAC_DISABLE_HEVC) u32 idx; #endif u32 k; Bool non_hevc_base_layer=GF_FALSE; fprintf(stderr, "\t%s Info:", hevccfg->is_lhvc ? "LHVC" : "HEVC"); if (!hevccfg->is_lhvc) fprintf(stderr, " Profile %s @ Level %g - Chroma Format %s\n", gf_hevc_get_profile_name(hevccfg->profile_idc), ((Double)hevccfg->level_idc) / 30.0, gf_avc_hevc_get_chroma_format_name(hevccfg->chromaFormat)); fprintf(stderr, "\n"); fprintf(stderr, "\tNAL Unit length bits: %d", 8*hevccfg->nal_unit_size); if (!hevccfg->is_lhvc) fprintf(stderr, " - general profile compatibility 0x%08X\n", hevccfg->general_profile_compatibility_flags); fprintf(stderr, "\n"); fprintf(stderr, "\tParameter Sets: "); for (k=0; k<gf_list_count(hevccfg->param_array); k++) { GF_NALUFFParamArray *ar=gf_list_get(hevccfg->param_array, k); if (ar->type==GF_HEVC_NALU_SEQ_PARAM) { fprintf(stderr, "%d SPS ", gf_list_count(ar->nalus)); } if (ar->type==GF_HEVC_NALU_PIC_PARAM) { fprintf(stderr, "%d PPS ", gf_list_count(ar->nalus)); } if (ar->type==GF_HEVC_NALU_VID_PARAM) { fprintf(stderr, "%d VPS ", gf_list_count(ar->nalus)); #if !defined(GPAC_DISABLE_AV_PARSERS) && !defined(GPAC_DISABLE_HEVC) for (idx=0; idx<gf_list_count(ar->nalus); idx++) { GF_NALUFFParam *vps = gf_list_get(ar->nalus, idx); s32 ps_idx=gf_hevc_read_vps(vps->data, vps->size, hevc_state); if (hevccfg->is_lhvc && (ps_idx>=0)) { non_hevc_base_layer = ! hevc_state->vps[ps_idx].base_layer_internal_flag; } } #endif } } fprintf(stderr, "\n"); #if !defined(GPAC_DISABLE_AV_PARSERS) && !defined(GPAC_DISABLE_HEVC) for (k=0; k<gf_list_count(hevccfg->param_array); k++) { GF_NALUFFParamArray *ar=gf_list_get(hevccfg->param_array, k); u32 width, height; s32 par_n, par_d; if (ar->type !=GF_HEVC_NALU_SEQ_PARAM) continue; for (idx=0; idx<gf_list_count(ar->nalus); idx++) { GF_Err e; GF_NALUFFParam *sps = gf_list_get(ar->nalus, idx); par_n = par_d = -1; e = gf_hevc_get_sps_info_with_state(hevc_state, sps->data, sps->size, NULL, &width, &height, &par_n, &par_d); if (e==GF_OK) { fprintf(stderr, "\tSPS resolution %dx%d", width, height); if ((par_n>0) && (par_d>0)) { u32 tw, th; gf_isom_get_track_layout_info(file, trackNum, &tw, &th, NULL, NULL, NULL); fprintf(stderr, " - Pixel Aspect Ratio %d:%d - Indicated track size %d x %d", par_n, par_d, tw, th); } fprintf(stderr, "\n"); } else { M4_LOG(GF_LOG_ERROR, ("Failed to read SPS: %s\n\n", gf_error_to_string(e) )); } } } #endif if (!hevccfg->is_lhvc) fprintf(stderr, "\tBit Depth luma %d - Chroma %d - %d temporal layers\n", hevccfg->luma_bit_depth, hevccfg->chroma_bit_depth, hevccfg->numTemporalLayers); else fprintf(stderr, "\t%d temporal layers\n", hevccfg->numTemporalLayers); if (hevccfg->is_lhvc) { fprintf(stderr, "\t%sHEVC base layer - Complete representation %d\n", non_hevc_base_layer ? "Non-" : "", hevccfg->complete_representation); } for (k=0; k<gf_list_count(hevccfg->param_array); k++) { GF_NALUFFParamArray *ar=gf_list_get(hevccfg->param_array, k); if (ar->type==GF_HEVC_NALU_SEQ_PARAM) print_config_hash(ar->nalus, "SPS"); else if (ar->type==GF_HEVC_NALU_PIC_PARAM) print_config_hash(ar->nalus, "PPS"); else if (ar->type==GF_HEVC_NALU_VID_PARAM) print_config_hash(ar->nalus, "VPS"); } } void dump_vvc_track_info(GF_ISOFile *file, u32 trackNum, GF_VVCConfig *vvccfg #if !defined(GPAC_DISABLE_AV_PARSERS) , VVCState *vvc_state #endif /*GPAC_DISABLE_AV_PARSERS && defined(GPAC_DISABLE_HEVC)*/ ) { #if !defined(GPAC_DISABLE_AV_PARSERS) u32 idx; #endif u32 k; fprintf(stderr, "\tVVC Info:"); fprintf(stderr, " Profile %d @ Level %d - Chroma Format %s\n", vvccfg->general_profile_idc, vvccfg->general_level_idc, vvccfg->chromaformat_plus_one ? gf_avc_hevc_get_chroma_format_name(vvccfg->chromaformat_plus_one-1) : "n/a"); fprintf(stderr, "\n"); fprintf(stderr, "\tNAL Unit length bits: %d", 8*vvccfg->nal_unit_size); if (vvccfg->general_constraint_info && vvccfg->num_constraint_info && vvccfg->general_constraint_info[0]) { fprintf(stderr, " - general constraint info 0x"); for (idx=0; idx<vvccfg->num_constraint_info; idx++) { fprintf(stderr, "%02X", vvccfg->general_constraint_info[idx]); } } fprintf(stderr, "\n"); fprintf(stderr, "\tParameter Sets: "); for (k=0; k<gf_list_count(vvccfg->param_array); k++) { GF_NALUFFParamArray *ar=gf_list_get(vvccfg->param_array, k); if (ar->type==GF_VVC_NALU_SEQ_PARAM) { fprintf(stderr, "%d SPS ", gf_list_count(ar->nalus)); } if (ar->type==GF_VVC_NALU_PIC_PARAM) { fprintf(stderr, "%d PPS ", gf_list_count(ar->nalus)); } if (ar->type==GF_VVC_NALU_VID_PARAM) { fprintf(stderr, "%d VPS ", gf_list_count(ar->nalus)); #if !defined(GPAC_DISABLE_AV_PARSERS) && 0 //TODO for (idx=0; idx<gf_list_count(ar->nalus); idx++) { GF_NALUFFParam *vps = gf_list_get(ar->nalus, idx); s32 ps_idx=gf_hevc_read_vps(vps->data, vps->size, hevc_state); if (hevccfg->is_lhvc && (ps_idx>=0)) { non_hevc_base_layer = ! hevc_state->vps[ps_idx].base_layer_internal_flag; } } #endif } } fprintf(stderr, "\n"); #if !defined(GPAC_DISABLE_AV_PARSERS) && 0 //TODO for (k=0; k<gf_list_count(vvccfg->param_array); k++) { GF_NALUFFParamArray *ar=gf_list_get(vvccfg->param_array, k); u32 width, height; s32 par_n, par_d; if (ar->type !=GF_VVC_NALU_SEQ_PARAM) continue; for (idx=0; idx<gf_list_count(ar->nalus); idx++) { GF_Err e; GF_NALUFFParam *sps = gf_list_get(ar->nalus, idx); par_n = par_d = -1; e = gf_vvc_get_sps_info_with_state(vvc_state, sps->data, sps->size, NULL, &width, &height, &par_n, &par_d); if (e==GF_OK) { fprintf(stderr, "\tSPS resolution %dx%d", width, height); if ((par_n>0) && (par_d>0)) { u32 tw, th; gf_isom_get_track_layout_info(file, trackNum, &tw, &th, NULL, NULL, NULL); fprintf(stderr, " - Pixel Aspect Ratio %d:%d - Indicated track size %d x %d", par_n, par_d, tw, th); } fprintf(stderr, "\n"); } else { M4_LOG(GF_LOG_ERROR, ("\nFailed to read SPS: %s\n\n", gf_error_to_string(e) )); } } } #endif fprintf(stderr, "\tBit Depth %d - %d temporal layers\n", vvccfg->bit_depth_plus_one-1, vvccfg->numTemporalLayers); for (k=0; k<gf_list_count(vvccfg->param_array); k++) { GF_NALUFFParamArray *ar=gf_list_get(vvccfg->param_array, k); if (ar->type==GF_VVC_NALU_SEQ_PARAM) print_config_hash(ar->nalus, "SPS"); else if (ar->type==GF_VVC_NALU_PIC_PARAM) print_config_hash(ar->nalus, "PPS"); else if (ar->type==GF_VVC_NALU_VID_PARAM) print_config_hash(ar->nalus, "VPS"); } } void gf_inspect_format_timecode(const u8 *data, u32 size, u32 tmcd_flags, u32 tc_num, u32 tc_den, u32 tmcd_fpt, char szFmt[100]); void DumpTrackInfo(GF_ISOFile *file, GF_ISOTrackID trackID, Bool full_dump, Bool is_track_num, Bool dump_m4sys) { char szCodec[RFC6381_CODEC_NAME_SIZE_MAX]; Double scale, max_rate, rate; Bool is_od_track = 0; u32 trackNum, i, j, ts, mtype, msub_type, timescale, sr, nb_ch, count, alt_group, nb_groups, nb_edits, cdur, csize, bps, pfmt, codecid; u64 time_slice, dur, size; s32 cts_shift; GF_ESD *esd; char szDur[50]; char *lang; if (!is_track_num) { trackNum = gf_isom_get_track_by_id(file, trackID); } else { trackNum = trackID; trackID = gf_isom_get_track_id(file, trackNum); } if (!trackNum) { M4_LOG(GF_LOG_ERROR, ("No track with ID %d found\n", trackID)); return; } timescale = gf_isom_get_media_timescale(file, trackNum); fprintf(stderr, "# Track %d Info - ID %d - TimeScale %d\n", trackNum, trackID, timescale); dur = gf_isom_get_media_original_duration(file, trackNum); size = gf_isom_get_media_duration(file, trackNum); fprintf(stderr, "Media Duration %s ", format_duration(dur, timescale, szDur)); if (dur != size) fprintf(stderr, " (recomputed %s)", format_duration(size, timescale, szDur)); fprintf(stderr, "\n"); if (gf_isom_check_data_reference(file, trackNum, 1) != GF_OK) { M4_LOG(GF_LOG_WARNING, ("Track uses external data reference not supported by GPAC!\n")); } nb_edits = gf_isom_get_edits_count(file, trackNum); if (nb_edits) fprintf(stderr, "Track has %d edits: track duration is %s\n", nb_edits, format_duration(gf_isom_get_track_duration(file, trackNum), gf_isom_get_timescale(file), szDur)); cts_shift = gf_isom_get_composition_offset_shift(file, trackNum); if (cts_shift) fprintf(stderr, "Track composition offset shift (negative CTS offset): %d\n", cts_shift); if (gf_isom_is_track_in_root_od(file, trackNum) ) fprintf(stderr, "Track is present in Root OD\n"); if (!gf_isom_is_track_enabled(file, trackNum)) fprintf(stderr, "Track is disabled\n"); gf_isom_get_media_language(file, trackNum, &lang); fprintf(stderr, "Media Info: Language \"%s (%s)\" - ", GetLanguage(lang), lang ); gf_free(lang); mtype = gf_isom_get_media_type(file, trackNum); fprintf(stderr, "Type \"%s:", gf_4cc_to_str(mtype)); msub_type = gf_isom_get_mpeg4_subtype(file, trackNum, 1); if (!msub_type) msub_type = gf_isom_get_media_subtype(file, trackNum, 1); fprintf(stderr, "%s\" - %d samples\n", gf_4cc_to_str(msub_type), gf_isom_get_sample_count(file, trackNum)); pfmt = gf_pixel_fmt_from_qt_type(msub_type); codecid = gf_codec_id_from_isobmf(msub_type); count = gf_isom_get_track_kind_count(file, trackNum); for (i = 0; i < count; i++) { char *kind_scheme, *kind_value; gf_isom_get_track_kind(file, trackNum, i, &kind_scheme, &kind_value); fprintf(stderr, "Kind: %s - %s\n", kind_scheme ? kind_scheme : "null", kind_value ? kind_value : "null"); if (kind_scheme) gf_free(kind_scheme); if (kind_value) gf_free(kind_value); } if (gf_isom_is_track_fragmented(file, trackID) ) { u32 defaultDuration, defaultSize, defaultDescriptionIndex, defaultRandomAccess; u8 defaultPadding; u16 defaultDegradationPriority; u32 frag_samples; u64 frag_duration; gf_isom_get_fragmented_samples_info(file, trackID, &frag_samples, &frag_duration); fprintf(stderr, "Fragmented track: %d samples - Media Duration %s\n", frag_samples, format_duration(frag_duration, timescale, szDur)); gf_isom_get_fragment_defaults(file, trackNum, &defaultDuration, &defaultSize, &defaultDescriptionIndex, &defaultRandomAccess, &defaultPadding, &defaultDegradationPriority); fprintf(stderr, "Fragment sample defaults: duration %d size %d stsd %d sync %d padding %d degradation_priority %d\n", defaultDuration, defaultSize, defaultDescriptionIndex, defaultRandomAccess, (u32) defaultPadding, (u32) defaultDegradationPriority ); } if (!gf_isom_is_self_contained(file, trackNum, 1)) { const char *url, *urn; gf_isom_get_data_reference(file, trackNum, 1, &url, &urn); fprintf(stderr, "Media Data Location: %s\n", url ? url : urn); } if (full_dump) { const char *handler_name; gf_isom_get_handler_name(file, trackNum, &handler_name); fprintf(stderr, "Handler name: %s\n", handler_name); } print_udta(file, trackNum, GF_FALSE); if (gf_isom_is_video_handler_type(mtype) ) { s32 tx, ty; u32 w, h; u16 bit_depth; gf_isom_get_visual_info(file, trackNum, 1, &w, &h); gf_isom_get_visual_bit_depth(file, trackNum, 1, &bit_depth); fprintf(stderr, "Visual Sample Entry Info: width=%d height=%d (depth=%d bits)\n", w, h, (int)bit_depth); gf_isom_get_track_layout_info(file, trackNum, &w, &h, &tx, &ty, NULL); fprintf(stderr, "Visual Track layout: x=%d y=%d width=%d height=%d\n", tx, ty, w, h); } gf_isom_get_audio_info(file, trackNum, 1, &sr, &nb_ch, &bps); gf_isom_set_nalu_extract_mode(file, trackNum, GF_ISOM_NALU_EXTRACT_INSPECT); msub_type = gf_isom_get_media_subtype(file, trackNum, 1); if (msub_type==GF_ISOM_SUBTYPE_MPEG4_CRYP) gf_isom_get_original_format_type(file, trackNum, 1, &msub_type); if ((msub_type==GF_ISOM_SUBTYPE_MPEG4) || (msub_type==GF_ISOM_SUBTYPE_AVC_H264) || (msub_type==GF_ISOM_SUBTYPE_AVC2_H264) || (msub_type==GF_ISOM_SUBTYPE_AVC3_H264) || (msub_type==GF_ISOM_SUBTYPE_AVC4_H264) || (msub_type==GF_ISOM_SUBTYPE_SVC_H264) || (msub_type==GF_ISOM_SUBTYPE_MVC_H264) || (msub_type==GF_ISOM_SUBTYPE_LSR1) || (msub_type==GF_ISOM_SUBTYPE_HVC1) || (msub_type==GF_ISOM_SUBTYPE_HEV1) || (msub_type==GF_ISOM_SUBTYPE_HVC2) || (msub_type==GF_ISOM_SUBTYPE_HEV2) || (msub_type==GF_ISOM_SUBTYPE_LHV1) || (msub_type==GF_ISOM_SUBTYPE_LHE1) || (msub_type==GF_ISOM_SUBTYPE_HVT1) ) { esd = gf_isom_get_esd(file, trackNum, 1); if (!esd || !esd->decoderConfig) { M4_LOG(GF_LOG_WARNING, ("WARNING: Broken MPEG-4 Track\n")); if (esd) gf_odf_desc_del((GF_Descriptor *)esd); } else { const char *st = gf_stream_type_name(esd->decoderConfig->streamType); if (dump_m4sys) { if (st) { fprintf(stderr, "MPEG-4 Config%s%s Stream - ObjectTypeIndication 0x%02x\n", full_dump ? "\n\t" : ": ", st, esd->decoderConfig->objectTypeIndication); } else { fprintf(stderr, "MPEG-4 Config%sStream Type 0x%02x - ObjectTypeIndication 0x%02x\n", full_dump ? "\n\t" : ": ", esd->decoderConfig->streamType, esd->decoderConfig->objectTypeIndication); } } if (esd->decoderConfig->streamType==GF_STREAM_OD) is_od_track=1; if (esd->decoderConfig->streamType==GF_STREAM_VISUAL) { u32 w, h; u16 rvc_predef; w = h = 0; if (esd->decoderConfig->objectTypeIndication==GF_CODECID_MPEG4_PART2) { #ifndef GPAC_DISABLE_AV_PARSERS if (!esd->decoderConfig->decoderSpecificInfo) { #else gf_isom_get_visual_info(file, trackNum, 1, &w, &h); fprintf(stderr, "MPEG-4 Visual Size %d x %d\n", w, h); #endif M4_LOG(GF_LOG_WARNING, ("Non-compliant MPEG-4 Visual track: video_object_layer infos not found in sample description\n")); #ifndef GPAC_DISABLE_AV_PARSERS } else { GF_M4VDecSpecInfo dsi; gf_m4v_get_config(esd->decoderConfig->decoderSpecificInfo->data, esd->decoderConfig->decoderSpecificInfo->dataLength, &dsi); if (full_dump) fprintf(stderr, "\t"); w = dsi.width; h = dsi.height; fprintf(stderr, "MPEG-4 Visual Size %d x %d - %s\n", w, h, gf_m4v_get_profile_name(dsi.VideoPL)); if (dsi.par_den && dsi.par_num) { u32 tw, th; gf_isom_get_track_layout_info(file, trackNum, &tw, &th, NULL, NULL, NULL); fprintf(stderr, "Pixel Aspect Ratio %d:%d - Indicated track size %d x %d\n", dsi.par_num, dsi.par_den, tw, th); } } #endif } else if (gf_isom_get_avc_svc_type(file, trackNum, 1) != GF_ISOM_AVCTYPE_NONE) { GF_AVCConfig *avccfg, *svccfg, *mvccfg; gf_isom_get_visual_info(file, trackNum, 1, &w, &h); if (full_dump) fprintf(stderr, "\t"); fprintf(stderr, "AVC/H264 Video - Visual Size %d x %d\n", w, h); avccfg = gf_isom_avc_config_get(file, trackNum, 1); svccfg = gf_isom_svc_config_get(file, trackNum, 1); mvccfg = gf_isom_mvc_config_get(file, trackNum, 1); if (!avccfg && !svccfg && !mvccfg) { M4_LOG(GF_LOG_ERROR, ("\tNon-compliant AVC track: SPS/PPS not found in sample description\n")); } else if (avccfg) { fprintf(stderr, "\tAVC Info: %d SPS - %d PPS", gf_list_count(avccfg->sequenceParameterSets) , gf_list_count(avccfg->pictureParameterSets) ); fprintf(stderr, " - Profile %s @ Level %g\n", gf_avc_get_profile_name(avccfg->AVCProfileIndication), ((Double)avccfg->AVCLevelIndication)/10.0 ); fprintf(stderr, "\tNAL Unit length bits: %d\n", 8*avccfg->nal_unit_size); #ifndef GPAC_DISABLE_AV_PARSERS for (i=0; i<gf_list_count(avccfg->sequenceParameterSets); i++) { s32 par_n, par_d; GF_NALUFFParam *slc = gf_list_get(avccfg->sequenceParameterSets, i); gf_avc_get_sps_info(slc->data, slc->size, NULL, NULL, NULL, &par_n, &par_d); if ((par_n>0) && (par_d>0)) { u32 tw, th; gf_isom_get_track_layout_info(file, trackNum, &tw, &th, NULL, NULL, NULL); fprintf(stderr, "\tPixel Aspect Ratio %d:%d - Indicated track size %d x %d\n", par_n, par_d, tw, th); } if (!full_dump) break; } #endif if (avccfg->chroma_bit_depth) { fprintf(stderr, "\tChroma format %s - Luma bit depth %d - chroma bit depth %d\n", gf_avc_hevc_get_chroma_format_name(avccfg->chroma_format), avccfg->luma_bit_depth, avccfg->chroma_bit_depth); } print_config_hash(avccfg->sequenceParameterSets, "SPS"); print_config_hash(avccfg->pictureParameterSets, "PPS"); gf_odf_avc_cfg_del(avccfg); } if (svccfg) { fprintf(stderr, "\n\tSVC Info: %d SPS - %d PPS - Profile %s @ Level %g\n", gf_list_count(svccfg->sequenceParameterSets) , gf_list_count(svccfg->pictureParameterSets), gf_avc_get_profile_name(svccfg->AVCProfileIndication), ((Double)svccfg->AVCLevelIndication)/10.0 ); fprintf(stderr, "\tSVC NAL Unit length bits: %d\n", 8*svccfg->nal_unit_size); #ifndef GPAC_DISABLE_AV_PARSERS for (i=0; i<gf_list_count(svccfg->sequenceParameterSets); i++) { GF_NALUFFParam *slc = gf_list_get(svccfg->sequenceParameterSets, i); if (slc) { s32 par_n, par_d; u32 s_w, s_h, sps_id; gf_avc_get_sps_info(slc->data, slc->size, &sps_id, &s_w, &s_h, &par_n, &par_d); fprintf(stderr, "\t\tSPS ID %d - Visual Size %d x %d\n", sps_id, s_w, s_h); if ((par_n>0) && (par_d>0)) { u32 tw, th; gf_isom_get_track_layout_info(file, trackNum, &tw, &th, NULL, NULL, NULL); fprintf(stderr, "\tPixel Aspect Ratio %d:%d - Indicated track size %d x %d\n", par_n, par_d, tw, th); } } } #endif print_config_hash(svccfg->sequenceParameterSets, "SPS"); print_config_hash(svccfg->pictureParameterSets, "PPS"); print_config_hash(svccfg->sequenceParameterSetExtensions, "SPSEx"); gf_odf_avc_cfg_del(svccfg); } if (mvccfg) { fprintf(stderr, "\n\tMVC Info: %d SPS - %d PPS - Profile %s @ Level %g\n", gf_list_count(mvccfg->sequenceParameterSets) , gf_list_count(mvccfg->pictureParameterSets), gf_avc_get_profile_name(mvccfg->AVCProfileIndication), ((Double)mvccfg->AVCLevelIndication)/10.0 ); fprintf(stderr, "\tMVC NAL Unit length bits: %d\n", 8*mvccfg->nal_unit_size); #ifndef GPAC_DISABLE_AV_PARSERS for (i=0; i<gf_list_count(mvccfg->sequenceParameterSets); i++) { GF_NALUFFParam *slc = gf_list_get(mvccfg->sequenceParameterSets, i); if (slc) { u32 s_w, s_h, sps_id; s32 par_n, par_d; gf_avc_get_sps_info(slc->data, slc->size, &sps_id, &s_w, &s_h, &par_n, &par_d); fprintf(stderr, "\t\tSPS ID %d - Visual Size %d x %d\n", sps_id, s_w, s_h); if ((par_n>0) && (par_d>0)) { u32 tw, th; gf_isom_get_track_layout_info(file, trackNum, &tw, &th, NULL, NULL, NULL); fprintf(stderr, "\tPixel Aspect Ratio %d:%d - Indicated track size %d x %d\n", par_n, par_d, tw, th); } } } #endif print_config_hash(mvccfg->sequenceParameterSets, "SPS"); print_config_hash(mvccfg->pictureParameterSets, "PPS"); gf_odf_avc_cfg_del(mvccfg); } } else if ((esd->decoderConfig->objectTypeIndication==GF_CODECID_HEVC) || (esd->decoderConfig->objectTypeIndication==GF_CODECID_LHVC) ) { GF_HEVCConfig *hevccfg, *lhvccfg; GF_OperatingPointsInformation *oinf; #if !defined(GPAC_DISABLE_AV_PARSERS) && !defined(GPAC_DISABLE_HEVC) HEVCState hevc_state; memset(&hevc_state, 0, sizeof(HEVCState)); hevc_state.sps_active_idx = -1; #endif gf_isom_get_visual_info(file, trackNum, 1, &w, &h); if (full_dump) fprintf(stderr, "\t"); fprintf(stderr, "HEVC Video - Visual Size %d x %d\n", w, h); hevccfg = gf_isom_hevc_config_get(file, trackNum, 1); lhvccfg = gf_isom_lhvc_config_get(file, trackNum, 1); if (msub_type==GF_ISOM_SUBTYPE_HVT1) { const u8 *data; u32 tsize; u32 is_default, tx,ty,tw,th, id, independent; Bool full_frame; if (gf_isom_get_tile_info(file, trackNum, 1, &is_default, &id, &independent, &full_frame, &tx, &ty, &tw, &th)) { fprintf(stderr, "\tHEVC Tile - ID %d independent %d (x,y,w,h)=%d,%d,%d,%d \n", id, independent, tx, ty, tw, th); } else if (gf_isom_get_sample_group_info(file, trackNum, 1, GF_ISOM_SAMPLE_GROUP_TRIF, &is_default, &data, &tsize)) { fprintf(stderr, "\tHEVC Tile track containing a tile set\n"); } else { fprintf(stderr, "\tHEVC Tile track without tiling info\n"); } } else if (!hevccfg && !lhvccfg) { M4_LOG(GF_LOG_ERROR, ("\tNon-compliant HEVC track: No hvcC or shcC found in sample description\n")); } if (gf_isom_get_reference_count(file, trackNum, GF_ISOM_REF_SABT)) { fprintf(stderr, "\tHEVC Tile base track\n"); } if (hevccfg) { dump_hevc_track_info(file, trackNum, hevccfg #if !defined(GPAC_DISABLE_AV_PARSERS) && !defined(GPAC_DISABLE_HEVC) , &hevc_state #endif ); gf_odf_hevc_cfg_del(hevccfg); fprintf(stderr, "\n"); } if (lhvccfg) { dump_hevc_track_info(file, trackNum, lhvccfg #if !defined(GPAC_DISABLE_AV_PARSERS) && !defined(GPAC_DISABLE_HEVC) , &hevc_state #endif ); gf_odf_hevc_cfg_del(lhvccfg); } if (gf_isom_get_oinf_info(file, trackNum, &oinf)) { fprintf(stderr, "\n\tOperating Points Information -"); fprintf(stderr, " scalability_mask %d (", oinf->scalability_mask); switch (oinf->scalability_mask) { case 2: fprintf(stderr, "Multiview"); break; case 4: fprintf(stderr, "Spatial scalability"); break; case 8: fprintf(stderr, "Auxilary"); break; default: fprintf(stderr, "unknown"); } //TODO: need to dump more info ? fprintf(stderr, ") num_profile_tier_level %d ", gf_list_count(oinf->profile_tier_levels) ); fprintf(stderr, " num_operating_points %d dependency layers %d \n", gf_list_count(oinf->operating_points), gf_list_count(oinf->dependency_layers) ); } } /*OGG media*/ else if (esd->decoderConfig->objectTypeIndication==GF_CODECID_THEORA) { char *szName; gf_isom_get_visual_info(file, trackNum, 1, &w, &h); if (full_dump) fprintf(stderr, "\t"); if (!strnicmp((char *) &esd->decoderConfig->decoderSpecificInfo->data[3], "theora", 6)) szName = "Theora"; else szName = "Unknown"; fprintf(stderr, "Ogg/%s video / GPAC Mux - Visual Size %d x %d\n", szName, w, h); } else { //check if we know this codec from its OTI u32 codec_id = gf_codecid_from_oti(GF_STREAM_VISUAL, esd->decoderConfig->objectTypeIndication); if (codec_id) { gf_isom_get_visual_info(file, trackNum, 1, &w, &h); fprintf(stderr, "%s - Visual Size %d x %d\n", gf_codecid_name(codec_id), w, h); } } if (!w || !h) { gf_isom_get_visual_info(file, trackNum, 1, &w, &h); if (full_dump) fprintf(stderr, "\t"); fprintf(stderr, "Visual Size %d x %d\n", w, h); } if (gf_isom_get_rvc_config(file, trackNum, 1, &rvc_predef, NULL, NULL, NULL)==GF_OK) { fprintf(stderr, "Has RVC signaled - Predefined configuration %d\n", rvc_predef); } } else if (esd->decoderConfig->streamType==GF_STREAM_AUDIO) { #ifndef GPAC_DISABLE_AV_PARSERS GF_M4ADecSpecInfo a_cfg; GF_Err e; u32 oti; #endif u32 codec_id; Bool is_mp2 = GF_FALSE; switch (esd->decoderConfig->objectTypeIndication) { case GF_CODECID_AAC_MPEG2_MP: case GF_CODECID_AAC_MPEG2_LCP: case GF_CODECID_AAC_MPEG2_SSRP: is_mp2 = GF_TRUE; case GF_CODECID_AAC_MPEG4: #ifndef GPAC_DISABLE_AV_PARSERS if (!esd->decoderConfig->decoderSpecificInfo) e = GF_NON_COMPLIANT_BITSTREAM; else e = gf_m4a_get_config(esd->decoderConfig->decoderSpecificInfo->data, esd->decoderConfig->decoderSpecificInfo->dataLength, &a_cfg); if (full_dump) fprintf(stderr, "\t"); if (e) { M4_LOG(GF_LOG_ERROR, ("Corrupted AAC Config\n")); } else { char *signaling = "implicit"; char *heaac = ""; if (!is_mp2 && a_cfg.has_sbr) { if (a_cfg.has_ps) heaac = "(HE-AAC v2) "; else heaac = "(HE-AAC v1) "; } if (a_cfg.base_object_type==2) { if (a_cfg.has_ps || a_cfg.has_sbr) signaling = "backward compatible"; } else { signaling = "hierarchical"; } fprintf(stderr, "%s (AOT=%d %s) %s- %d Channel(s) - SampleRate %d", gf_m4a_object_type_name(a_cfg.base_object_type), a_cfg.base_object_type, signaling, heaac, a_cfg.nb_chan, a_cfg.base_sr); if (is_mp2) fprintf(stderr, " (MPEG-2 Signaling)"); if (a_cfg.has_sbr) fprintf(stderr, " - SBR: SampleRate %d Type %s", a_cfg.sbr_sr, gf_m4a_object_type_name(a_cfg.sbr_object_type)); if (a_cfg.has_ps) fprintf(stderr, " - PS"); fprintf(stderr, "\n"); } #else fprintf(stderr, "MPEG-2/4 Audio - %d Channels - SampleRate %d\n", nb_ch, sr); #endif break; case GF_CODECID_MPEG2_PART3: case GF_CODECID_MPEG_AUDIO: if (msub_type == GF_ISOM_SUBTYPE_MPEG4_CRYP) { fprintf(stderr, "MPEG-1/2 Audio - %d Channels - SampleRate %d\n", nb_ch, sr); } else { #ifndef GPAC_DISABLE_AV_PARSERS GF_ISOSample *samp = gf_isom_get_sample(file, trackNum, 1, &oti); if (samp) { u32 mhdr = GF_4CC((u8)samp->data[0], (u8)samp->data[1], (u8)samp->data[2], (u8)samp->data[3]); if (full_dump) fprintf(stderr, "\t"); fprintf(stderr, "%s Audio - %d Channel(s) - SampleRate %d - Layer %d\n", gf_mp3_version_name(mhdr), gf_mp3_num_channels(mhdr), gf_mp3_sampling_rate(mhdr), gf_mp3_layer(mhdr) ); gf_isom_sample_del(&samp); } else { M4_LOG(GF_LOG_ERROR, ("Error fetching sample: %s\n", gf_error_to_string(gf_isom_last_error(file)) )); } #else fprintf(stderr, "MPEG-1/2 Audio - %d Channels - SampleRate %d\n", nb_ch, sr); #endif } break; case GF_CODECID_EVRC: fprintf(stderr, "EVRC Audio - Sample Rate 8000 - 1 channel\n"); break; case GF_CODECID_SMV: fprintf(stderr, "SMV Audio - Sample Rate 8000 - 1 channel\n"); break; case GF_CODECID_QCELP: fprintf(stderr, "QCELP Audio - Sample Rate 8000 - 1 channel\n"); break; /*packetVideo hack for EVRC...*/ case GF_CODECID_EVRC_PV: if (esd->decoderConfig->decoderSpecificInfo && (esd->decoderConfig->decoderSpecificInfo->dataLength==8) && !strnicmp((char *)esd->decoderConfig->decoderSpecificInfo->data, "pvmm", 4)) { if (full_dump) fprintf(stderr, "\t"); fprintf(stderr, "EVRC Audio (PacketVideo Mux) - Sample Rate 8000 - 1 channel\n"); } break; default: codec_id = gf_codecid_from_oti(GF_STREAM_AUDIO, esd->decoderConfig->objectTypeIndication); if (codec_id) { fprintf(stderr, "%s - Sample Rate %d - %d channel(s)\n", gf_codecid_name(codec_id), sr, nb_ch); } break; } } else if (esd->decoderConfig->streamType==GF_STREAM_SCENE) { if (esd->decoderConfig->objectTypeIndication<=4) { GF_BIFSConfig *b_cfg = gf_odf_get_bifs_config(esd->decoderConfig->decoderSpecificInfo, esd->decoderConfig->objectTypeIndication); fprintf(stderr, "BIFS Scene description - %s stream\n", b_cfg->elementaryMasks ? "Animation" : "Command"); if (full_dump && !b_cfg->elementaryMasks) { fprintf(stderr, "\tWidth %d Height %d Pixel Metrics %s\n", b_cfg->pixelWidth, b_cfg->pixelHeight, b_cfg->pixelMetrics ? "yes" : "no"); } gf_odf_desc_del((GF_Descriptor *)b_cfg); } else if (esd->decoderConfig->objectTypeIndication==GF_CODECID_AFX) { u8 tag = esd->decoderConfig->decoderSpecificInfo ? esd->decoderConfig->decoderSpecificInfo->data[0] : 0xFF; const char *afxtype = gf_stream_type_afx_name(tag); fprintf(stderr, "AFX Stream - type %s (%d)\n", afxtype, tag); } else if (esd->decoderConfig->objectTypeIndication==GF_CODECID_FONT) { fprintf(stderr, "Font Data stream\n"); } else if (esd->decoderConfig->objectTypeIndication==GF_CODECID_LASER) { GF_LASERConfig l_cfg; gf_odf_get_laser_config(esd->decoderConfig->decoderSpecificInfo, &l_cfg); fprintf(stderr, "LASER Stream - %s\n", l_cfg.newSceneIndicator ? "Full Scene" : "Scene Segment"); } else if (esd->decoderConfig->objectTypeIndication==GF_CODECID_TEXT_MPEG4) { fprintf(stderr, "MPEG-4 Streaming Text stream\n"); } else if (esd->decoderConfig->objectTypeIndication==GF_CODECID_SYNTHESIZED_TEXTURE) { fprintf(stderr, "Synthetized Texture stream stream\n"); } else { M4_LOG(GF_LOG_WARNING, ("Unknown Systems stream OTI %d\n", esd->decoderConfig->objectTypeIndication)); } } /*sync is only valid if we open all tracks to take care of default MP4 sync..*/ if (!full_dump) { if (dump_m4sys) { if (!esd->OCRESID || (esd->OCRESID == esd->ESID)) fprintf(stderr, "Self-synchronized\n"); else fprintf(stderr, "Synchronized on stream %d\n", esd->OCRESID); } } else { fprintf(stderr, "\tDecoding Buffer size %d - Bitrate: avg %d - max %d kbps\n", esd->decoderConfig->bufferSizeDB, esd->decoderConfig->avgBitrate/1000, esd->decoderConfig->maxBitrate/1000); if (esd->dependsOnESID) fprintf(stderr, "\tDepends on stream %d for decoding\n", esd->dependsOnESID); else fprintf(stderr, "\tNo stream dependencies for decoding\n"); fprintf(stderr, "\tStreamPriority %d\n", esd->streamPriority); if (esd->URLString) fprintf(stderr, "\tRemote Data Source %s\n", esd->URLString); } gf_odf_desc_del((GF_Descriptor *) esd); } } else if (msub_type == GF_ISOM_SUBTYPE_AV01) { GF_AV1Config *av1c; u32 w, h; gf_isom_get_visual_info(file, trackNum, 1, &w, &h); fprintf(stderr, "\tAOM AV1 stream - Resolution %d x %d\n", w, h); av1c = gf_isom_av1_config_get(file, trackNum, 1); fprintf(stderr, "\tversion=%u, profile=%u, level_idx0=%u, tier=%u\n", (u32)av1c->version, (u32)av1c->seq_profile, (u32)av1c->seq_level_idx_0, (u32)av1c->seq_tier_0); fprintf(stderr, "\thigh_bitdepth=%u, twelve_bit=%u, monochrome=%u\n", (u32)av1c->high_bitdepth, (u32)av1c->twelve_bit, (u32)av1c->monochrome); fprintf(stderr, "\tchroma: subsampling_x=%u, subsampling_y=%u, sample_position=%u\n", (u32)av1c->chroma_subsampling_x, (u32)av1c->chroma_subsampling_y, (u32)av1c->chroma_sample_position); if (av1c->initial_presentation_delay_present) fprintf(stderr, "\tInitial presentation delay %u\n", (u32) av1c->initial_presentation_delay_minus_one+1); count = gf_list_count(av1c->obu_array); for (i=0; i<count; i++) { u8 hash[20]; GF_AV1_OBUArrayEntry *obu = gf_list_get(av1c->obu_array, i); gf_sha1_csum((u8*)obu->obu, (u32)obu->obu_length, hash); fprintf(stderr, "\tOBU#%d %s hash: ", i+1, gf_av1_get_obu_name(obu->obu_type) ); for (j=0; j<20; j++) fprintf(stderr, "%02X", hash[j]); fprintf(stderr, "\n"); } gf_odf_av1_cfg_del(av1c); } else if (msub_type == GF_ISOM_SUBTYPE_3GP_H263) { u32 w, h; gf_isom_get_visual_info(file, trackNum, 1, &w, &h); fprintf(stderr, "\t3GPP H263 stream - Resolution %d x %d\n", w, h); } else if (msub_type == GF_ISOM_SUBTYPE_MJP2) { u32 w, h; gf_isom_get_visual_info(file, trackNum, 1, &w, &h); fprintf(stderr, "\tMotionJPEG2000 stream - Resolution %d x %d\n", w, h); } else if ((msub_type == GF_ISOM_SUBTYPE_3GP_AMR) || (msub_type == GF_ISOM_SUBTYPE_3GP_AMR_WB)) { fprintf(stderr, "\t3GPP AMR%s stream - Sample Rate %d - %d channel(s) %d bps\n", (msub_type == GF_ISOM_SUBTYPE_3GP_AMR_WB) ? " Wide Band" : "", sr, nb_ch, (u32) bps); } else if (msub_type == GF_ISOM_SUBTYPE_3GP_EVRC) { fprintf(stderr, "\t3GPP EVRC stream - Sample Rate %d - %d channel(s) %d bps\n", sr, nb_ch, (u32) bps); } else if (msub_type == GF_ISOM_SUBTYPE_3GP_QCELP) { fprintf(stderr, "\t3GPP QCELP stream - Sample Rate %d - %d channel(s) %d bps\n", sr, nb_ch, (u32) bps); } else if (msub_type == GF_ISOM_SUBTYPE_MP3) { fprintf(stderr, "\tMPEG 1/2 Audio stream - Sample Rate %d - %d channel(s) %d bps\n", sr, nb_ch, (u32) bps); } else if ((msub_type == GF_ISOM_SUBTYPE_AC3) || (msub_type == GF_ISOM_SUBTYPE_EC3)) { u32 br = 0; const char *lfe = ""; Bool is_ec3 = (msub_type == GF_ISOM_SUBTYPE_EC3) ? GF_TRUE : GF_FALSE; #ifndef GPAC_DISABLE_AV_PARSERS GF_AC3Config *ac3 = gf_isom_ac3_config_get(file, trackNum, 1); if (ac3) { nb_ch = gf_ac3_get_channels(ac3->streams[0].acmod); for (i=0; i<ac3->streams[0].nb_dep_sub; ++i) { assert(ac3->streams[0].nb_dep_sub == 1); nb_ch += gf_ac3_get_channels(ac3->streams[0].chan_loc); } if (ac3->streams[0].lfon) lfe = ".1"; br = ac3->is_ec3 ? ac3->brcode : gf_ac3_get_bitrate(ac3->brcode); is_ec3 = ac3->is_ec3; gf_free(ac3); } #endif fprintf(stderr, "\t%s stream - Sample Rate %d - %d%s channel(s) - bitrate %d\n", is_ec3 ? "EC-3" : "AC-3", sr, nb_ch, lfe, br); } else if (msub_type == GF_ISOM_SUBTYPE_3GP_SMV) { fprintf(stderr, "\t3GPP SMV stream - Sample Rate %d - %d channel(s) %d bits per samples\n", sr, nb_ch, (u32) bps); } else if (msub_type == GF_ISOM_SUBTYPE_3GP_DIMS) { u32 w, h; GF_DIMSDescription dims; gf_isom_get_visual_info(file, trackNum, 1, &w, &h); gf_isom_get_dims_description(file, trackNum, 1, &dims); fprintf(stderr, "\t3GPP DIMS stream - size %d x %d - Profile %d - Level %d\n", w, h, dims.profile, dims.level); fprintf(stderr, "\tpathComponents: %d - useFullRequestHost: %s\n", dims.pathComponents, dims.fullRequestHost ? "yes" : "no"); fprintf(stderr, "\tstream type: %s - redundant: %s\n", dims.streamType ? "primary" : "secondary", (dims.containsRedundant==1) ? "main" : ((dims.containsRedundant==2) ? "redundant" : "main+redundant") ); if (dims.textEncoding[0]) fprintf(stderr, "\ttext encoding %s\n", dims.textEncoding); if (dims.contentEncoding[0]) fprintf(stderr, "\tcontent encoding %s\n", dims.contentEncoding); if (dims.content_script_types) fprintf(stderr, "\tscript languages %s\n", dims.content_script_types); } else if (mtype==GF_ISOM_MEDIA_HINT) { u32 refTrack; s32 refCount = gf_isom_get_reference_count(file, trackNum, GF_ISOM_REF_HINT); if (refCount>0) { fprintf(stderr, "Streaming Hint Track for track%s ", (refCount>1) ? "s" :""); for (i=0; i<(u32) refCount; i++) { gf_isom_get_reference(file, trackNum, GF_ISOM_REF_HINT, i+1, &refTrack); if (i) fprintf(stderr, " - "); fprintf(stderr, "ID %d", gf_isom_get_track_id(file, refTrack)); } fprintf(stderr, "\n"); } else { fprintf(stderr, "Streaming Hint Track (no refs)\n"); } #ifndef GPAC_DISABLE_ISOM_HINTING refCount = gf_isom_get_payt_count(file, trackNum); if (refCount>0) { for (i=0; i<(u32) refCount; i++) { const char *name = gf_isom_get_payt_info(file, trackNum, i+1, &refTrack); fprintf(stderr, "\tPayload ID %d: type %s\n", refTrack, name); } } #endif } else if (mtype==GF_ISOM_MEDIA_FLASH) { fprintf(stderr, "Macromedia Flash Movie\n"); } else if ((mtype==GF_ISOM_MEDIA_TEXT) || (mtype==GF_ISOM_MEDIA_SUBT) || (mtype==GF_ISOM_MEDIA_MPEG_SUBT)) { u32 w, h; s16 l; s32 tx, ty; const char *content_encoding = NULL; const char *mime = NULL; const char *config = NULL; const char *_namespace = NULL; const char *schema_loc = NULL; const char *auxiliary_mimes = NULL; gf_isom_get_track_layout_info(file, trackNum, &w, &h, &tx, &ty, &l); if (msub_type == GF_ISOM_SUBTYPE_SBTT) { gf_isom_stxt_get_description(file, trackNum, 1, &mime, &content_encoding, &config); fprintf(stderr, "Textual Subtitle Stream "); fprintf(stderr, "- mime %s", mime); if (content_encoding != NULL) { fprintf(stderr, " - encoding %s", content_encoding); } if (config != NULL) { fprintf(stderr, " - %d bytes config", (u32) strlen(config)); } } else if (msub_type == GF_ISOM_SUBTYPE_STXT) { gf_isom_stxt_get_description(file, trackNum, 1, &mime, &content_encoding, &config); fprintf(stderr, "Simple Timed Text Stream "); fprintf(stderr, "- mime %s", mime); if (content_encoding != NULL) { fprintf(stderr, " - encoding %s", content_encoding); } if (config != NULL) { fprintf(stderr, " - %d bytes config", (u32) strlen(config)); } } else if (msub_type == GF_ISOM_SUBTYPE_STPP) { gf_isom_xml_subtitle_get_description(file, trackNum, 1, &_namespace, &schema_loc, &auxiliary_mimes); fprintf(stderr, "XML Subtitle Stream "); fprintf(stderr, "- namespace %s", _namespace); if (schema_loc != NULL) { fprintf(stderr, " - schema-location %s", schema_loc); } if (auxiliary_mimes != NULL) { fprintf(stderr, " - auxiliary-mime-types %s", auxiliary_mimes); } } else { fprintf(stderr, "Unknown Text Stream"); } fprintf(stderr, "\n Size %d x %d - Translation X=%d Y=%d - Layer %d\n", w, h, tx, ty, l); } else if (mtype == GF_ISOM_MEDIA_META) { const char *content_encoding = NULL; if (msub_type == GF_ISOM_SUBTYPE_METT) { const char *mime = NULL; const char *config = NULL; gf_isom_stxt_get_description(file, trackNum, 1, &mime, &content_encoding, &config); fprintf(stderr, "Textual Metadata Stream - mime %s", mime); if (content_encoding != NULL) { fprintf(stderr, " - encoding %s", content_encoding); } if (config != NULL) { fprintf(stderr, " - %d bytes config", (u32) strlen(config)); } fprintf(stderr, "\n"); } else if (msub_type == GF_ISOM_SUBTYPE_METX) { const char *_namespace = NULL; const char *schema_loc = NULL; gf_isom_get_xml_metadata_description(file, trackNum, 1, &_namespace, &schema_loc, &content_encoding); fprintf(stderr, "XML Metadata Stream - namespace %s", _namespace); if (content_encoding != NULL) { fprintf(stderr, " - encoding %s", content_encoding); } if (schema_loc != NULL) { fprintf(stderr, " - schema-location %s", schema_loc); } fprintf(stderr, "\n"); } else { fprintf(stderr, "Unknown Metadata Stream\n"); } } else if ((msub_type==GF_ISOM_SUBTYPE_VVC1) || (msub_type==GF_ISOM_SUBTYPE_VVI1)) { GF_VVCConfig *vvccfg; u32 w, h; #if !defined(GPAC_DISABLE_AV_PARSERS) VVCState *vvc_state; GF_SAFEALLOC(vvc_state, VVCState); if (vvc_state) vvc_state->sps_active_idx = -1; #endif gf_isom_get_visual_info(file, trackNum, 1, &w, &h); if (full_dump) fprintf(stderr, "\t"); fprintf(stderr, "VVC Video - Visual Size %d x %d\n", w, h); vvccfg = gf_isom_vvc_config_get(file, trackNum, 1); if (!vvccfg) { M4_LOG(GF_LOG_ERROR, ("Non-compliant VVC track: No vvcC found in sample description\n")); } else { dump_vvc_track_info(file, trackNum, vvccfg #if !defined(GPAC_DISABLE_AV_PARSERS) , vvc_state #endif ); gf_odf_vvc_cfg_del(vvccfg); fprintf(stderr, "\n"); } #if !defined(GPAC_DISABLE_AV_PARSERS) if (vvc_state) gf_free(vvc_state); #endif } else if ((msub_type == GF_ISOM_SUBTYPE_MH3D_MHA1) || (msub_type == GF_ISOM_SUBTYPE_MH3D_MHA2) || (msub_type == GF_ISOM_SUBTYPE_MH3D_MHM1) || (msub_type == GF_ISOM_SUBTYPE_MH3D_MHM2) ) { const u8 *compat_profiles; u32 nb_compat_profiles; Bool valid = GF_FALSE; Bool allow_inband = GF_FALSE; if ( (msub_type == GF_ISOM_SUBTYPE_MH3D_MHM1) || (msub_type == GF_ISOM_SUBTYPE_MH3D_MHM2)) allow_inband = GF_TRUE; fprintf(stderr, "\tMPEG-H Audio stream - Sample Rate %d\n", sr); esd = gf_media_map_esd(file, trackNum, 1); if (!esd || !esd->decoderConfig || !esd->decoderConfig->decoderSpecificInfo || !esd->decoderConfig->decoderSpecificInfo->data ) { if (allow_inband) { GF_ISOSample *samp = gf_isom_get_sample(file, trackNum, 1, NULL); if (samp) { u64 ch_layout=0; s32 PL = gf_mpegh_get_mhas_pl(samp->data, samp->dataLength, &ch_layout); if (PL>=0) { fprintf(stderr, "\tProfileLevelIndication: 0x%02X", PL); if (ch_layout) fprintf(stderr, " - Reference Channel Layout %s", gf_audio_fmt_get_layout_name(ch_layout) ); fprintf(stderr, "\n"); } gf_isom_sample_del(&samp); } valid = GF_TRUE; } } else if (esd->decoderConfig->decoderSpecificInfo->dataLength>=5) { fprintf(stderr, "\tProfileLevelIndication: 0x%02X - Reference Channel Layout %s\n", esd->decoderConfig->decoderSpecificInfo->data[1] , gf_audio_fmt_get_layout_name_from_cicp(esd->decoderConfig->decoderSpecificInfo->data[2]) ); valid = GF_TRUE; } if (!valid) { M4_LOG(GF_LOG_ERROR, ("Invalid MPEG-H audio config\n")); } if (esd) gf_odf_desc_del((GF_Descriptor *)esd); compat_profiles = gf_isom_get_mpegh_compatible_profiles(file, trackNum, 1, &nb_compat_profiles); for (i=0; i<nb_compat_profiles; i++) { if (!i) fprintf(stderr, "\tCompatible profiles:"); fprintf(stderr, " 0x%02X", compat_profiles[i]); } if (i) fprintf(stderr, "\n"); } else if (msub_type==GF_ISOM_SUBTYPE_MLPA) { u32 fmt, prate; if (gf_isom_truehd_config_get(file, trackNum, 1, &fmt, &prate) != GF_OK) { fprintf(stderr, "\tInvalid TrueHD audio config\n"); } fprintf(stderr, "TrueHD Audio stream - Sample Rate %u - channels %u - format %u peak rate %u\n", sr, nb_ch, fmt, prate); } else if (codecid) { if (gf_isom_is_video_handler_type(mtype) ) { u32 w, h; gf_isom_get_visual_info(file, trackNum, 1, &w, &h); fprintf(stderr, "%s - Resolution %d x %d\n", gf_codecid_name(codecid), w, h); } else if (mtype==GF_ISOM_MEDIA_AUDIO) { gf_isom_get_audio_info(file, trackNum, 1, &sr, &nb_ch, NULL); fprintf(stderr, "%s - Sample Rate %d - %d channel(s)\n", gf_codecid_name(codecid), sr, nb_ch); } else { fprintf(stderr, "%s\n", gf_codecid_name(codecid) ); } } else if (pfmt) { u32 w, h; gf_isom_get_visual_info(file, trackNum, 1, &w, &h); fprintf(stderr, "Raw video %s - Resolution %d x %d\n", gf_pixel_fmt_name(pfmt), w, h); } else if (msub_type==GF_QT_SUBTYPE_TMCD) { u32 stsd_idx; GF_ISOSample *sample = gf_isom_get_sample(file, trackNum, 1, &stsd_idx); fprintf(stderr, "Time Code stream\n"); if (sample) { char szTimecode[100]; u32 tmcd_flags, tmcd_num, tmcd_den, tmcd_fpt; gf_isom_get_tmcd_config(file, trackNum, stsd_idx, &tmcd_flags, &tmcd_num, &tmcd_den, &tmcd_fpt); gf_inspect_format_timecode(sample->data, sample->dataLength, tmcd_flags, tmcd_num, tmcd_den, tmcd_fpt, szTimecode); gf_isom_sample_del(&sample); fprintf(stderr, "\tFirst timecode: %s\n", szTimecode); } } else { GF_GenericSampleDescription *udesc; udesc = gf_isom_get_generic_sample_description(file, trackNum, 1); if (udesc) { if (gf_isom_is_video_handler_type(mtype) ) { fprintf(stderr, "%s - Compressor \"%s\" - Resolution %d x %d\n", ( (mtype == GF_ISOM_MEDIA_VISUAL ? "Visual" : "Auxiliary Video") ), udesc->compressor_name, udesc->width, udesc->height); } else if (mtype==GF_ISOM_MEDIA_AUDIO) { fprintf(stderr, "Audio - Sample Rate %d - %d channel(s)\n", udesc->samplerate, udesc->nb_channels); } else { fprintf(stderr, "Unknown media type\n"); } if (udesc->vendor_code) fprintf(stderr, "\tVendor code \"%s\" - Version %d - revision %d\n", gf_4cc_to_str(udesc->vendor_code), udesc->version, udesc->revision); if (udesc->extension_buf) { fprintf(stderr, "\tCodec configuration data size: %d bytes\n", udesc->extension_buf_size); gf_free(udesc->extension_buf); } gf_free(udesc); } else { fprintf(stderr, "Unknown track type\n"); } } /*Crypto info*/ if (gf_isom_is_track_encrypted(file, trackNum)) { const char *scheme_URI, *KMS_URI; u32 scheme_type, version; u32 IV_size; Bool use_sel_enc; if (gf_isom_is_ismacryp_media(file, trackNum, 1)) { gf_isom_get_ismacryp_info(file, trackNum, 1, NULL, &scheme_type, &version, &scheme_URI, &KMS_URI, &use_sel_enc, &IV_size, NULL); fprintf(stderr, "\n\tProtected by ISMA E&A scheme %s (version %d)\n", gf_4cc_to_str(scheme_type), version); if (scheme_URI) fprintf(stderr, "scheme location: %s\n", scheme_URI); if (KMS_URI) { if (!strnicmp(KMS_URI, "(key)", 5)) fprintf(stderr, "\tKMS location: key in file\n"); else fprintf(stderr, "\tKMS location: %s\n", KMS_URI); } fprintf(stderr, "\tSelective Encryption: %s\n", use_sel_enc ? "Yes" : "No"); if (IV_size) fprintf(stderr, "\tInitialization Vector size: %d bits\n", IV_size*8); } else if (gf_isom_is_omadrm_media(file, trackNum, 1)) { const char *textHdrs; u32 enc_type, hdr_len; u64 orig_len; gf_isom_get_omadrm_info(file, trackNum, 1, NULL, &scheme_type, &version, &scheme_URI, &KMS_URI, &textHdrs, &hdr_len, &orig_len, &enc_type, &use_sel_enc, &IV_size, NULL); fprintf(stderr, "\n\tProtected by OMA DRM scheme %s (version %d)\n", gf_4cc_to_str(scheme_type), version); fprintf(stderr, "\tRights Issuer: %s\n", KMS_URI); fprintf(stderr, "\tContent ID: %s\n", scheme_URI); if (textHdrs) { u32 offset; const char *start = textHdrs; fprintf(stderr, "\tOMA Textual Headers:\n"); i=0; offset=0; while (i<hdr_len) { if (start[i]==0) { fprintf(stderr, "\t\t%s\n", start+offset); offset=i+1; } i++; } fprintf(stderr, "\\tt%s\n", start+offset); } if (orig_len) fprintf(stderr, "\tOriginal media size "LLD"\n", orig_len); fprintf(stderr, "\tEncryption algorithm %s\n", (enc_type==1) ? "AEA 128 CBC" : (enc_type ? "AEA 128 CTR" : "None")); fprintf(stderr, "\tSelective Encryption: %s\n", use_sel_enc ? "Yes" : "No"); if (IV_size) fprintf(stderr, "\tInitialization Vector size: %d bits\n", IV_size*8); } else if(gf_isom_is_cenc_media(file, trackNum, 1)) { const u8 *def_key; u32 def_key_size; Bool IsEncrypted; u8 crypt_byte_block, skip_byte_block; IV_size = 0; gf_isom_get_cenc_info(file, trackNum, 1, NULL, &scheme_type, &version); gf_isom_cenc_get_default_info(file, trackNum, 1, NULL, &IsEncrypted, &crypt_byte_block, &skip_byte_block, &def_key, &def_key_size); fprintf(stderr, "\n\tProtected by CENC scheme %s version 0x%08X", gf_4cc_to_str(scheme_type), version); if (crypt_byte_block && skip_byte_block) fprintf(stderr, " - Pattern %d:%d", (u32) skip_byte_block, (u32) crypt_byte_block); if (def_key && def_key[0]) fprintf(stderr, " - MultiKey"); fprintf(stderr, "\n"); dump_key_info(def_key, def_key_size, IsEncrypted); } else if(gf_isom_is_adobe_protection_media(file, trackNum, 1)) { gf_isom_get_adobe_protection_info(file, trackNum, 1, NULL, &scheme_type, &version, NULL); fprintf(stderr, "\nProtected by Adobe scheme %s (version %d)\n", gf_4cc_to_str(scheme_type), version); } else { fprintf(stderr, "\nProtected by unknown scheme %s\n", gf_4cc_to_str(gf_isom_is_media_encrypted(file, trackNum, 0) )); } fprintf(stderr, "\n"); } if ( gf_media_get_rfc_6381_codec_name(file, trackNum, szCodec, GF_FALSE, GF_FALSE) == GF_OK) { fprintf(stderr, "\tRFC6381 Codec Parameters: %s\n", szCodec); } DumpMetaItem(file, 0, trackNum, "\tTrack Meta"); gf_isom_get_track_switch_group_count(file, trackNum, &alt_group, &nb_groups); if (alt_group) { fprintf(stderr, "Alternate Group ID %d\n", alt_group); for (i=0; i<nb_groups; i++) { u32 nb_crit, switchGroupID; const u32 *criterias = gf_isom_get_track_switch_parameter(file, trackNum, i+1, &switchGroupID, &nb_crit); if (!nb_crit) { fprintf(stderr, "\tNo criteria in %s group\n", switchGroupID ? "switch" : "alternate"); } else { if (switchGroupID) { fprintf(stderr, "\tSwitchGroup ID %d criterias: ", switchGroupID); } else { fprintf(stderr, "\tAlternate Group criterias: "); } for (j=0; j<nb_crit; j++) { if (j) fprintf(stderr, " "); fprintf(stderr, "%s", gf_4cc_to_str(criterias[j]) ); } fprintf(stderr, "\n"); } } } switch (gf_isom_has_sync_points(file, trackNum)) { case 0: fprintf(stderr, "\tAll samples are sync\n"); break; case 1: { u32 nb_sync = gf_isom_get_sync_point_count(file, trackNum) - 1; if (! nb_sync) { fprintf(stderr, "\tOnly one sync sample\n"); } else { fprintf(stderr, "\tAverage GOP length: %d samples\n", gf_isom_get_sample_count(file, trackNum) / nb_sync); } } break; case 2: fprintf(stderr, "\tNo sync sample found\n"); break; } fprintf(stderr, "\tMax sample duration: %d / %d\n", gf_isom_get_max_sample_delta(file, trackNum), timescale); if (!full_dump) { fprintf(stderr, "\n"); return; } dur = size = 0; max_rate = rate = 0; time_slice = 0; ts = gf_isom_get_media_timescale(file, trackNum); csize = gf_isom_get_constant_sample_size(file, trackNum); cdur = gf_isom_get_constant_sample_duration(file, trackNum); count = gf_isom_get_sample_count(file, trackNum); if (csize && cdur) { size = count * csize; dur = cdur * count; } else { for (j=0; j<count; j++) { GF_ISOSample *samp; if (is_od_track) { samp = gf_isom_get_sample(file, trackNum, j+1, NULL); } else { samp = gf_isom_get_sample_info(file, trackNum, j+1, NULL, NULL); } if (!samp) { M4_LOG(GF_LOG_ERROR, ("Failed to fetch sample %d\n", j+1)); return; } dur = samp->DTS+samp->CTS_Offset; size += samp->dataLength; rate += samp->dataLength; if (samp->DTS - time_slice > ts) { Double max_tmp = rate * ts / (samp->DTS - time_slice); if (max_rate < max_tmp ) max_rate = max_tmp; rate = 0; time_slice = samp->DTS; } gf_isom_sample_del(&samp); } } fprintf(stderr, "\nComputed info from media:\n"); if (csize && cdur) { fprintf(stderr, "\tConstant sample size %d bytes and dur %d / %d\n", csize, cdur, ts); } scale = 1000.0 / ts; dur = (u64) (scale * dur); fprintf(stderr, "\tTotal size "LLU" bytes - Total samples duration "LLU" ms\n", size, dur); if (!dur) { fprintf(stderr, "\n"); return; } /*rate in byte, dur is in ms*/ rate = 8000.0 * size / dur; if (!max_rate) max_rate = rate; else max_rate *= 8.0; if (rate >= 1500) { fprintf(stderr, "\tAverage rate %.2f kbps - Max Rate %.2f kbps\n", rate/1000, max_rate/1000); } else { fprintf(stderr, "\tAverage rate %.2f bps - Max Rate %.2f bps\n", rate, max_rate); } { u32 dmin, dmax, davg, smin, smax, savg; gf_isom_get_chunks_infos(file, trackNum, &dmin, &davg, &dmax, &smin, &savg, &smax); fprintf(stderr, "\tChunk durations: min %d ms - max %d ms - average %d ms\n", (1000*dmin)/ts, (1000*dmax)/ts, (1000*davg)/ts); fprintf(stderr, "\tChunk sizes (bytes): min %d - max %d - average %d\n", smin, smax, savg); } fprintf(stderr, "\n"); count = gf_isom_get_chapter_count(file, trackNum); if (count) { const char *name; u64 time; fprintf(stderr, "\nChapters:\n"); for (j=0; j<count; j++) { gf_isom_get_chapter(file, trackNum, j+1, &time, &name); fprintf(stderr, "\tChapter #%d - %s - \"%s\"\n", j+1, format_duration(time, 1000, szDur), name); } } } void DumpMovieInfo(GF_ISOFile *file) { GF_InitialObjectDescriptor *iod; Bool dump_m4sys = GF_FALSE; u32 i, brand, min, timescale, count, data_len; const u8 *data; u64 create, modif; Bool has_itags = GF_FALSE; char szDur[50]; DumpMetaItem(file, 1, 0, "# File Meta"); if (!gf_isom_has_movie(file)) { if (gf_isom_has_segment(file, &brand, &min)) { count = gf_isom_segment_get_fragment_count(file); fprintf(stderr, "File is a segment - %d movie fragments - Brand %s (version %d):\n", count, gf_4cc_to_str(brand), min); for (i=0; i<count; i++) { u32 j, traf_count = gf_isom_segment_get_track_fragment_count(file, i+1); for (j=0; j<traf_count; j++) { u32 ID; u64 tfdt; ID = gf_isom_segment_get_track_fragment_decode_time(file, i+1, j+1, &tfdt); fprintf(stderr, "\tFragment #%d Track ID %d - TFDT "LLU"\n", i+1, ID, tfdt); } } } else { fprintf(stderr, "File has no movie (moov) - static data container\n"); } return; } timescale = gf_isom_get_timescale(file); i=gf_isom_get_track_count(file); fprintf(stderr, "# Movie Info - %d track%s - TimeScale %d\n", i, i>1 ? "s" : "", timescale); modif = gf_isom_get_duration(file); create = gf_isom_get_original_duration(file); fprintf(stderr, "Duration %s", format_duration(create, timescale, szDur)); if (create!=modif) { fprintf(stderr, " (recomputed %s)", format_duration(modif, timescale, szDur)); } fprintf(stderr, "\n"); #ifndef GPAC_DISABLE_ISOM_FRAGMENTS if (gf_isom_is_fragmented(file)) { fprintf(stderr, "Fragmented: yes - duration %s\n%d fragments - %d SegmentIndexes\n", format_duration(gf_isom_get_fragmented_duration(file), timescale, szDur), gf_isom_get_fragments_count(file, 0) , gf_isom_get_fragments_count(file, 1) ); } else { fprintf(stderr, "Fragmented: no\n"); } #endif if (gf_isom_moov_first(file)) fprintf(stderr, "Progressive (moov before mdat)\n"); if (gf_isom_get_brand_info(file, &brand, &min, &count) == GF_OK) { fprintf(stderr, "Major Brand %s - version %d - compatible brands:", gf_4cc_to_str(brand), min); for (i=0; i<count;i++) { if (gf_isom_get_alternate_brand(file, i+1, &brand)==GF_OK) fprintf(stderr, " %s", gf_4cc_to_str(brand) ); } fprintf(stderr, "\n"); } gf_isom_get_creation_time(file, &create, &modif); fprintf(stderr, "Created: %s", format_date(create, szDur)); if (create != modif) fprintf(stderr, "Modified: %s", format_date(modif, szDur)); fprintf(stderr, "\n"); DumpMetaItem(file, 0, 0, "# Movie Meta"); iod = (GF_InitialObjectDescriptor *) gf_isom_get_root_od(file); if (iod) { u32 desc_size = gf_odf_desc_size((GF_Descriptor *)iod); if (iod->tag == GF_ODF_IOD_TAG) { fprintf(stderr, "File has root IOD (%d bytes)\n", desc_size); fprintf(stderr, "Scene PL 0x%02x - Graphics PL 0x%02x - OD PL 0x%02x\n", iod->scene_profileAndLevel, iod->graphics_profileAndLevel, iod->OD_profileAndLevel); fprintf(stderr, "Visual PL: %s (0x%02x)\n", gf_m4v_get_profile_name(iod->visual_profileAndLevel), iod->visual_profileAndLevel); fprintf(stderr, "Audio PL: %s (0x%02x)\n", gf_m4a_get_profile_name(iod->audio_profileAndLevel), iod->audio_profileAndLevel); //fprintf(stderr, "inline profiles included %s\n", iod->inlineProfileFlag ? "yes" : "no"); } else { fprintf(stderr, "File has root OD (%d bytes)\n", desc_size); } if (!gf_list_count(iod->ESDescriptors)) fprintf(stderr, "No streams included in root OD\n"); else dump_m4sys = GF_TRUE; gf_odf_desc_del((GF_Descriptor *) iod); } if (gf_isom_is_JPEG2000(file)) fprintf(stderr, "File is JPEG 2000\n"); count = gf_isom_get_copyright_count(file); if (count) { const char *lang, *note; fprintf(stderr, "\nCopyrights:\n"); for (i=0; i<count; i++) { gf_isom_get_copyright(file, i+1, &lang, &note); fprintf(stderr, "\t(%s) %s\n", lang, note); } } count = gf_isom_get_chapter_count(file, 0); if (count) { const char *name; u64 time; fprintf(stderr, "\nChapters:\n"); for (i=0; i<count; i++) { gf_isom_get_chapter(file, 0, i+1, &time, &name); fprintf(stderr, "\tChapter #%d - %s - \"%s\"\n", i+1, format_duration(time, 1000, szDur), name); } } if (gf_isom_apple_get_tag(file, 0, &data, &data_len) == GF_OK) { has_itags = GF_TRUE; fprintf(stderr, "\niTunes Info:\n"); i=0; while (1) { u32 int_val2, flags, itype; GF_ISOiTunesTag tag; u64 int_val; s32 tag_idx; GF_Err e = gf_isom_apple_enum_tag(file, i, &tag, &data, &data_len, &int_val, &int_val2, &flags); if (e) break; i++; tag_idx = gf_itags_find_by_itag(tag); if (tag_idx<0) { fprintf(stderr, "\t%s: %s\n", gf_4cc_to_str(tag), data); continue; } fprintf(stderr, "\t%s: ", gf_itags_get_name(tag_idx) ); itype = gf_itags_get_type(tag_idx); switch (itype) { case GF_ITAG_BOOL: fprintf(stderr, int_val ? "yes" : "no"); break; case GF_ITAG_INT8: case GF_ITAG_INT16: case GF_ITAG_INT32: case GF_ITAG_INT64: fprintf(stderr, LLU, int_val); break; case GF_ITAG_FRAC6: case GF_ITAG_FRAC8: fprintf(stderr, LLU" / %u", int_val, int_val2); break; case GF_ITAG_FILE: if (flags==14) fprintf(stderr, "PNG File"); else if (flags==13) fprintf(stderr, "JPEG File"); else fprintf(stderr, "unknown (flags %d)", flags); break; case GF_ITAG_ID3_GENRE: if (int_val) { fprintf(stderr, "%s", gf_id3_get_genre((u32) int_val) ); break; } //fallthrough default: if (data) fprintf(stderr, "%s", data); else fprintf(stderr, data_len ? "none" : "unknown"); break; } fprintf(stderr, "\n"); } } i=0; while (1) { u32 type, version; char *wmatag; GF_Err e = gf_isom_wma_enum_tag(file, i, &wmatag, &data, &data_len, &version, &type); if (e) break; if (!i) { fprintf(stderr, "\nWMA Info:\n"); } i++; fprintf(stderr, "\t%s", wmatag); if (version!=1) fprintf(stderr, " (version %d)", version); fprintf(stderr, ": "); if (type) { fprintf(stderr, "unknown type %d\n", type); } else { u16 *src_str = (u16 *) data; u32 len = (u32) ( UTF8_MAX_BYTES_PER_CHAR * gf_utf8_wcslen(src_str) ); char *utf8str = (char *)gf_malloc(len + 1); u32 res_len = (u32) gf_utf8_wcstombs(utf8str, len, (const unsigned short **) &src_str); utf8str[res_len] = 0; fprintf(stderr, "%s\n", utf8str); gf_free(utf8str); } } print_udta(file, 0, has_itags); fprintf(stderr, "\n"); for (i=0; i<gf_isom_get_track_count(file); i++) { DumpTrackInfo(file, i+1, 0, GF_TRUE, dump_m4sys); } } #endif /*defined(GPAC_DISABLE_ISOM) || defined(GPAC_DISABLE_ISOM_WRITE)*/ #ifndef GPAC_DISABLE_MPEG2TS typedef struct { /* when writing to file */ FILE *pes_out; char dump[100]; #if 0 FILE *pes_out_nhml; char nhml[100]; FILE *pes_out_info; char info[100]; #endif Bool is_info_dumped; u32 prog_number; /* For logging timing information (PCR, PTS/DTS) */ FILE *timestamps_info_file; char timestamps_info_name[100]; /* when dumping TS information */ u32 dump_pid; Bool has_seen_pat; } GF_M2TS_Dump; static void on_m2ts_dump_event(GF_M2TS_Demuxer *ts, u32 evt_type, void *par) { u32 i, count; GF_M2TS_Program *prog; GF_M2TS_PES_PCK *pck; GF_M2TS_Dump *dumper = (GF_M2TS_Dump *)ts->user; switch (evt_type) { case GF_M2TS_EVT_PAT_FOUND: if (dumper->timestamps_info_file) { fprintf(dumper->timestamps_info_file, "%u\t%d\n", ts->pck_number, 0); } break; case GF_M2TS_EVT_PAT_UPDATE: if (dumper->timestamps_info_file) { fprintf(dumper->timestamps_info_file, "%u\t%d\n", ts->pck_number, 0); } break; case GF_M2TS_EVT_PAT_REPEAT: /* WARNING: We detect the pat on a repetition, probably to ensure that we also have seen all the PMT To be checked */ dumper->has_seen_pat = 1; if (dumper->timestamps_info_file) { fprintf(dumper->timestamps_info_file, "%u\t%d\n", ts->pck_number, 0); } // fprintf(stderr, "Repeated PAT found - %d programs\n", gf_list_count(ts->programs) ); break; case GF_M2TS_EVT_CAT_FOUND: if (dumper->timestamps_info_file) { fprintf(dumper->timestamps_info_file, "%u\t%d\n", ts->pck_number, 0); } break; case GF_M2TS_EVT_CAT_UPDATE: if (dumper->timestamps_info_file) { fprintf(dumper->timestamps_info_file, "%u\t%d\n", ts->pck_number, 0); } break; case GF_M2TS_EVT_CAT_REPEAT: if (dumper->timestamps_info_file) { fprintf(dumper->timestamps_info_file, "%u\t%d\n", ts->pck_number, 0); } break; case GF_M2TS_EVT_PMT_FOUND: prog = (GF_M2TS_Program*)par; if (gf_list_count(ts->programs)>1 && prog->number!=dumper->prog_number) break; count = gf_list_count(prog->streams); GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("Program number %d found - %d streams:\n", prog->number, count)); for (i=0; i<count; i++) { GF_M2TS_ES *es = gf_list_get(prog->streams, i); if (es->pid == prog->pmt_pid) { GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("\tPID %d: Program Map Table\n", es->pid)); } else { GF_M2TS_PES *pes = (GF_M2TS_PES *)es; gf_m2ts_set_pes_framing(pes, dumper->pes_out ? GF_M2TS_PES_FRAMING_RAW : GF_M2TS_PES_FRAMING_DEFAULT); GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("\tPID %d: %s ", pes->pid, gf_m2ts_get_stream_name(pes->stream_type) )); if (pes->mpeg4_es_id) GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, (" - MPEG-4 ES ID %d", pes->mpeg4_es_id)); GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("\n")); } } if (dumper->timestamps_info_file) { fprintf(dumper->timestamps_info_file, "%u\t%d\n", ts->pck_number, prog->pmt_pid); } break; case GF_M2TS_EVT_PMT_UPDATE: prog = (GF_M2TS_Program*)par; if (gf_list_count(ts->programs)>1 && prog->number!=dumper->prog_number) break; if (dumper->timestamps_info_file) { fprintf(dumper->timestamps_info_file, "%u\t%d\n", ts->pck_number, prog->pmt_pid); } break; case GF_M2TS_EVT_PMT_REPEAT: prog = (GF_M2TS_Program*)par; if (gf_list_count(ts->programs)>1 && prog->number!=dumper->prog_number) break; if (dumper->timestamps_info_file) { fprintf(dumper->timestamps_info_file, "%u\t%d\n", ts->pck_number, prog->pmt_pid); } break; case GF_M2TS_EVT_SDT_FOUND: #ifndef GPAC_DISABLE_LOG count = gf_list_count(ts->SDTs) ; GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("Program Description found - %d desc:\n", count)); for (i=0; i<count; i++) { GF_M2TS_SDT *sdt = gf_list_get(ts->SDTs, i); GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("\tServiceID %d - Provider %s - Name %s\n", sdt->service_id, sdt->provider, sdt->service)); } #endif break; case GF_M2TS_EVT_SDT_UPDATE: #ifndef GPAC_DISABLE_LOG count = gf_list_count(ts->SDTs) ; GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("Program Description updated - %d desc\n", count)); for (i=0; i<count; i++) { GF_M2TS_SDT *sdt = gf_list_get(ts->SDTs, i); GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("\tServiceID %d - Provider %s - Name %s\n", sdt->service_id, sdt->provider, sdt->service)); } #endif break; case GF_M2TS_EVT_SDT_REPEAT: break; case GF_M2TS_EVT_PES_TIMING: pck = par; if (gf_list_count(ts->programs)>1 && pck->stream->program->number != dumper->prog_number) break; break; case GF_M2TS_EVT_PES_PCK: pck = par; if (gf_list_count(ts->programs)>1 && pck->stream->program->number != dumper->prog_number) break; if (dumper->has_seen_pat) { /*We need the interpolated PCR for the pcrb, hence moved this calculus out, and saving the calculated value in index_info to put it in the pcrb*/ GF_M2TS_PES *pes = pck->stream; /*FIXME : not used GF_M2TS_Program *prog = pes->program; */ /* Interpolated PCR value for the TS packet containing the PES header start */ u64 interpolated_pcr_value = 0; if (pes->last_pcr_value && pes->before_last_pcr_value_pck_number && pes->last_pcr_value > pes->before_last_pcr_value) { u32 delta_pcr_pck_num = pes->last_pcr_value_pck_number - pes->before_last_pcr_value_pck_number; u32 delta_pts_pcr_pck_num = pes->pes_start_packet_number - pes->last_pcr_value_pck_number; u64 delta_pcr_value = pes->last_pcr_value - pes->before_last_pcr_value; if ((pes->pes_start_packet_number > pes->last_pcr_value_pck_number) && (pes->last_pcr_value > pes->before_last_pcr_value)) { pes->last_pcr_value = pes->before_last_pcr_value; } /* we can compute the interpolated pcr value for the packet containing the PES header */ interpolated_pcr_value = pes->last_pcr_value + (u64)((delta_pcr_value*delta_pts_pcr_pck_num*1.0)/delta_pcr_pck_num); } if (dumper->timestamps_info_file) { Double diff; fprintf(dumper->timestamps_info_file, "%u\t%d\t", pck->stream->pes_start_packet_number, pck->stream->pid); if (interpolated_pcr_value) fprintf(dumper->timestamps_info_file, "%f", interpolated_pcr_value/(300.0 * 90000)); fprintf(dumper->timestamps_info_file, "\t"); if (pck->DTS) fprintf(dumper->timestamps_info_file, "%f", (pck->DTS / 90000.0)); fprintf(dumper->timestamps_info_file, "\t%f\t%d\t%d", pck->PTS / 90000.0, (pck->flags & GF_M2TS_PES_PCK_RAP) ? 1 : 0, (pck->flags & GF_M2TS_PES_PCK_DISCONTINUITY) ? 1 : 0); if (interpolated_pcr_value) { diff = (pck->DTS ? pck->DTS : pck->PTS) / 90000.0; diff -= pes->last_pcr_value / (300.0 * 90000); fprintf(dumper->timestamps_info_file, "\t%f\n", diff); if (diff<0) { M4_LOG(GF_LOG_WARNING, ("Warning: detected PTS/DTS value less than current PCR of %g sec\n", diff)); } } else { fprintf(dumper->timestamps_info_file, "\t\n"); } } } if (dumper->has_seen_pat && dumper->pes_out && (dumper->dump_pid == pck->stream->pid)) { gf_fwrite(pck->data, pck->data_len, dumper->pes_out); } break; case GF_M2TS_EVT_PES_PCR: pck = par; if (gf_list_count(ts->programs)>1 && pck->stream->program->number != dumper->prog_number) break; if (dumper->timestamps_info_file) { fprintf(dumper->timestamps_info_file, "%u\t%d\t%f\t\t\t\t%d\n", pck->stream->program->last_pcr_value_pck_number, pck->stream->pid, pck->PTS / (300*90000.0), (pck->flags & GF_M2TS_PES_PCK_DISCONTINUITY) ? 1 : 0); } break; case GF_M2TS_EVT_SL_PCK: #if 0 { GF_M2TS_SL_PCK *sl_pck = par; if (dumper->pes_out && (dumper->dump_pid == sl_pck->stream->pid)) { GF_SLHeader header; u32 header_len; if (sl_pck->stream->mpeg4_es_id) { GF_ESD *esd = ((GF_M2TS_PES*)sl_pck->stream)->esd; if (!dumper->is_info_dumped) { if (esd->decoderConfig->decoderSpecificInfo) gf_fwrite(esd->decoderConfig->decoderSpecificInfo->data, esd->decoderConfig->decoderSpecificInfo->dataLength, dumper->pes_out_info); dumper->is_info_dumped = 1; fprintf(dumper->pes_out_nhml, "<NHNTStream version=\"1.0\" "); fprintf(dumper->pes_out_nhml, "timeScale=\"%d\" ", esd->slConfig->timestampResolution); fprintf(dumper->pes_out_nhml, "streamType=\"%d\" ", esd->decoderConfig->streamType); fprintf(dumper->pes_out_nhml, "objectTypeIndication=\"%d\" ", esd->decoderConfig->objectTypeIndication); if (esd->decoderConfig->decoderSpecificInfo) fprintf(dumper->pes_out_nhml, "specificInfoFile=\"%s\" ", dumper->info); fprintf(dumper->pes_out_nhml, "baseMediaFile=\"%s\" ", dumper->dump); fprintf(dumper->pes_out_nhml, "inRootOD=\"yes\">\n"); } gf_sl_depacketize(esd->slConfig, &header, sl_pck->data, sl_pck->data_len, &header_len); gf_fwrite(sl_pck->data+header_len, sl_pck->data_len-header_len, dumper->pes_out); fprintf(dumper->pes_out_nhml, "<NHNTSample DTS=\""LLD"\" dataLength=\"%d\" isRAP=\"%s\"/>\n", header.decodingTimeStamp, sl_pck->data_len-header_len, (header.randomAccessPointFlag?"yes":"no")); } } } #endif break; } } void dump_mpeg2_ts(char *mpeg2ts_file, char *out_name, Bool prog_num) { u8 data[188]; GF_M2TS_Dump dumper; u32 size; u64 fsize, fdone; GF_M2TS_Demuxer *ts; FILE *src; if (!prog_num && !out_name) { fprintf(stderr, "No program number nor output filename specified. No timestamp file will be generated."); } src = gf_fopen(mpeg2ts_file, "rb"); if (!src) { M4_LOG(GF_LOG_ERROR, ("Cannot open %s: no such file\n", mpeg2ts_file)); return; } ts = gf_m2ts_demux_new(); ts->on_event = on_m2ts_dump_event; ts->notify_pes_timing = 1; memset(&dumper, 0, sizeof(GF_M2TS_Dump)); ts->user = &dumper; dumper.prog_number = prog_num; /*PES dumping*/ if (out_name) { char *pid = strrchr(out_name, '#'); if (pid) { dumper.dump_pid = atoi(pid+1); pid[0] = 0; sprintf(dumper.dump, "%s_%d.raw", out_name, dumper.dump_pid); dumper.pes_out = gf_fopen(dumper.dump, "wb"); #if 0 sprintf(dumper.nhml, "%s_%d.nhml", pes_out_name, dumper.dump_pid); dumper.pes_out_nhml = gf_fopen(dumper.nhml, "wt"); sprintf(dumper.info, "%s_%d.info", pes_out_name, dumper.dump_pid); dumper.pes_out_info = gf_fopen(dumper.info, "wb"); #endif pid[0] = '#'; } } gf_fseek(src, 0, SEEK_END); fsize = gf_ftell(src); gf_fseek(src, 0, SEEK_SET); /* first loop to process all packets between two PAT, and assume all signaling was found between these 2 PATs */ while (!feof(src)) { size = (u32) gf_fread(data, 188, src); if (size<188) break; gf_m2ts_process_data(ts, data, size); if (dumper.has_seen_pat) break; } dumper.has_seen_pat = GF_TRUE; if (!prog_num) { GF_M2TS_Program *p = gf_list_get(ts->programs, 0); if (p) prog_num = p->number; fprintf(stderr, "No program number specified, defaulting to first program\n"); } if (!prog_num && !out_name) { fprintf(stderr, "No program number nor output filename specified. No timestamp file will be generated\n"); } if (prog_num) { sprintf(dumper.timestamps_info_name, "%s_prog_%d_timestamps.txt", mpeg2ts_file, prog_num/*, mpeg2ts_file*/); dumper.timestamps_info_file = gf_fopen(dumper.timestamps_info_name, "wt"); if (!dumper.timestamps_info_file) { M4_LOG(GF_LOG_ERROR, ("Cannot open file %s\n", dumper.timestamps_info_name)); return; } fprintf(dumper.timestamps_info_file, "PCK#\tPID\tPCR\tDTS\tPTS\tRAP\tDiscontinuity\tDTS-PCR Diff\n"); } gf_m2ts_reset_parsers(ts); gf_fseek(src, 0, SEEK_SET); fdone = 0; while (!feof(src)) { size = (u32) gf_fread(data, 188, src); if (size<188) break; gf_m2ts_process_data(ts, data, size); fdone += size; gf_set_progress("MPEG-2 TS Parsing", fdone, fsize); } gf_fclose(src); gf_m2ts_demux_del(ts); if (dumper.pes_out) gf_fclose(dumper.pes_out); #if 0 if (dumper.pes_out_nhml) { if (dumper.is_info_dumped) fprintf(dumper.pes_out_nhml, "</NHNTStream>\n"); gf_fclose(dumper.pes_out_nhml); gf_fclose(dumper.pes_out_info); } #endif if (dumper.timestamps_info_file) gf_fclose(dumper.timestamps_info_file); } #endif /*GPAC_DISABLE_MPEG2TS*/ #include <gpac/download.h> #include <gpac/mpd.h> void get_file_callback(void *usr_cbk, GF_NETIO_Parameter *parameter) { if (parameter->msg_type==GF_NETIO_DATA_EXCHANGE) { u64 tot_size, done, max; u32 bps; gf_dm_sess_get_stats(parameter->sess, NULL, NULL, &tot_size, &done, &bps, NULL); if (tot_size) { max = done; max *= 100; max /= tot_size; fprintf(stderr, "download %02d %% at %05d kpbs\r", (u32) max, bps*8/1000); } } } static GF_DownloadSession *get_file(const char *url, GF_DownloadManager *dm, GF_Err *e) { GF_DownloadSession *sess; sess = gf_dm_sess_new(dm, url, GF_NETIO_SESSION_NOT_THREADED, get_file_callback, NULL, e); if (!sess) return NULL; *e = gf_dm_sess_process(sess); if (*e) { gf_dm_sess_del(sess); return NULL; } return sess; } static void revert_cache_file(char *item_path) { char szPATH[GF_MAX_PATH]; const char *url; GF_Config *cached; if (!strstr(item_path, "gpac_cache_")) { fprintf(stderr, "%s is not a gpac cache file\n", item_path); return; } if (!strncmp(item_path, "./", 2) || !strncmp(item_path, ".\\", 2)) item_path += 2; strcpy(szPATH, item_path); strcat(szPATH, ".txt"); cached = gf_cfg_new(NULL, szPATH); url = gf_cfg_get_key(cached, "cache", "url"); if (url) url = strstr(url, "://"); if (url) { u32 i, len, dir_len=0, k=0; char *sep; char *dst_name; sep = strstr(item_path, "gpac_cache_"); if (sep) { sep[0] = 0; dir_len = (u32) strlen(item_path); sep[0] = 'g'; } url+=3; len = (u32) strlen(url); dst_name = gf_malloc(len+dir_len+1); memset(dst_name, 0, len+dir_len+1); strncpy(dst_name, item_path, dir_len); k=dir_len; for (i=0; i<len; i++) { dst_name[k] = url[i]; if (dst_name[k]==':') dst_name[k]='_'; else if (dst_name[k]=='/') { if (!gf_dir_exists(dst_name)) gf_mkdir(dst_name); } k++; } if (gf_file_exists(item_path)) { gf_file_move(item_path, dst_name); } gf_free(dst_name); } else { M4_LOG(GF_LOG_ERROR, ("Failed to reverse %s cache file\n", item_path)); } gf_cfg_del(cached); gf_file_delete(szPATH); } GF_Err rip_mpd(const char *mpd_src, const char *output_dir) { GF_DownloadSession *sess; u32 i, connect_time, reply_time, download_time, req_hdr_size, rsp_hdr_size; GF_Err e; GF_DOMParser *mpd_parser=NULL; GF_MPD *mpd=NULL; GF_MPD_Period *period; GF_MPD_AdaptationSet *as; GF_MPD_Representation *rep; char szName[GF_MAX_PATH]; GF_DownloadManager *dm; if (output_dir) { char *sep; strcpy(szName, output_dir); sep = gf_file_basename(szName); if (sep) sep[0] = 0; gf_opts_set_key("temp", "cache", szName); } else { gf_opts_set_key("temp", "cache", "."); } gf_opts_set_key("temp", "clean-cache", "true"); dm = gf_dm_new(NULL); /* char *name = strrchr(mpd_src, '/'); if (!name) name = strrchr(mpd_src, '\\'); if (!name) name = "manifest.mpd"; else name ++; if (strchr(name, '?') || strchr(name, '&')) name = "manifest.mpd"; */ fprintf(stderr, "Downloading %s\n", mpd_src); sess = get_file(mpd_src, dm, &e); if (!sess) { GF_LOG(GF_LOG_ERROR, GF_LOG_APP, ("Error downloading MPD file %s: %s\n", mpd_src, gf_error_to_string(e) )); goto err_exit; } strcpy(szName, gf_dm_sess_get_cache_name(sess) ); gf_dm_sess_get_header_sizes_and_times(sess, &req_hdr_size, &rsp_hdr_size, &connect_time, &reply_time, &download_time); gf_dm_sess_del(sess); if (e) { GF_LOG(GF_LOG_ERROR, GF_LOG_APP, ("Error fetching MPD file %s: %s\n", mpd_src, gf_error_to_string(e))); goto err_exit; } else { GF_LOG(GF_LOG_INFO, GF_LOG_APP, ("Fetched file %s\n", mpd_src)); } GF_LOG(GF_LOG_DEBUG, GF_LOG_APP, ("GET Header size %d - Reply header size %d\n", req_hdr_size, rsp_hdr_size)); GF_LOG(GF_LOG_DEBUG, GF_LOG_APP, ("GET time: Connect Time %d - Reply Time %d - Download Time %d\n", connect_time, reply_time, download_time)); mpd_parser = gf_xml_dom_new(); e = gf_xml_dom_parse(mpd_parser, szName, NULL, NULL); if (e != GF_OK) { gf_xml_dom_del(mpd_parser); GF_LOG(GF_LOG_ERROR, GF_LOG_APP, ("Error parsing MPD %s : %s\n", mpd_src, gf_error_to_string(e))); return e; } mpd = gf_mpd_new(); e = gf_mpd_init_from_dom(gf_xml_dom_get_root(mpd_parser), mpd, mpd_src); gf_xml_dom_del(mpd_parser); mpd_parser=NULL; if (e) { GF_LOG(GF_LOG_ERROR, GF_LOG_APP, ("Error initializing MPD %s : %s\n", mpd_src, gf_error_to_string(e))); goto err_exit; } else { GF_LOG(GF_LOG_DEBUG, GF_LOG_APP, ("MPD %s initialized: %s\n", szName, gf_error_to_string(e))); } revert_cache_file(szName); if (mpd->type==GF_MPD_TYPE_DYNAMIC) { GF_LOG(GF_LOG_ERROR, GF_LOG_APP, ("MPD rip is not supported on live sources\n")); e = GF_NOT_SUPPORTED; goto err_exit; } i=0; while ((period = (GF_MPD_Period *) gf_list_enum(mpd->periods, &i))) { char *initTemplate = NULL; Bool segment_base = GF_FALSE; u32 j=0; if (period->segment_base) segment_base=GF_TRUE; if (period->segment_template && period->segment_template->initialization) { initTemplate = period->segment_template->initialization; } while ((as = gf_list_enum(period->adaptation_sets, &j))) { u32 k=0; if (!initTemplate && as->segment_template && as->segment_template->initialization) { initTemplate = as->segment_template->initialization; } if (as->segment_base) segment_base=GF_TRUE; while ((rep = gf_list_enum(as->representations, &k))) { u64 out_range_start, out_range_end, segment_duration; Bool is_in_base_url; char *seg_url; u32 seg_idx=0; if (rep->segment_template && rep->segment_template->initialization) { initTemplate = rep->segment_template->initialization; } else if (k>1) { initTemplate = NULL; } if (rep->segment_base) segment_base=GF_TRUE; e = gf_mpd_resolve_url(mpd, rep, as, period, mpd_src, 0, GF_MPD_RESOLVE_URL_INIT, 0, 0, &seg_url, &out_range_start, &out_range_end, &segment_duration, &is_in_base_url, NULL, NULL, NULL); if (e) { GF_LOG(GF_LOG_ERROR, GF_LOG_APP, ("Error resolving init segment name : %s\n", gf_error_to_string(e))); continue; } //not a byte range, replace URL if (segment_base) { } else if (out_range_start || out_range_end || !seg_url) { GF_LOG(GF_LOG_ERROR, GF_LOG_APP, ("byte range rip not yet implemented\n")); if (seg_url) gf_free(seg_url); e = GF_NOT_SUPPORTED; goto err_exit; } fprintf(stderr, "Downloading %s\n", seg_url); sess = get_file(seg_url, dm, &e); if (e) { GF_LOG(GF_LOG_ERROR, GF_LOG_APP, ("Error downloading init segment %s from MPD %s : %s\n", seg_url, mpd_src, gf_error_to_string(e))); goto err_exit; } revert_cache_file((char *) gf_dm_sess_get_cache_name(sess) ); gf_free(seg_url); gf_dm_sess_del(sess); if (segment_base) continue; while (1) { e = gf_mpd_resolve_url(mpd, rep, as, period, mpd_src, 0, GF_MPD_RESOLVE_URL_MEDIA, seg_idx, 0, &seg_url, &out_range_start, &out_range_end, &segment_duration, NULL, NULL, NULL, NULL); if (e) { if (e<0) { GF_LOG(GF_LOG_ERROR, GF_LOG_APP, ("Error resolving segment name : %s\n", gf_error_to_string(e))); } break; } seg_idx++; if (out_range_start || out_range_end || !seg_url) { GF_LOG(GF_LOG_ERROR, GF_LOG_APP, ("byte range rip not yet implemented\n")); if (seg_url) gf_free(seg_url); break; } fprintf(stderr, "Downloading %s\n", seg_url); sess = get_file(seg_url, dm, &e); if (e) { gf_free(seg_url); if (e != GF_URL_ERROR) { GF_LOG(GF_LOG_ERROR, GF_LOG_APP, ("Error downloading segment %s: %s\n", seg_url, gf_error_to_string(e))); } else { //todo, properly detect end of dash representation e = GF_OK; } break; } revert_cache_file((char *) gf_dm_sess_get_cache_name(sess) ); gf_free(seg_url); gf_dm_sess_del(sess); } } } } err_exit: if (mpd) gf_mpd_del(mpd); gf_dm_del(dm); return e; }
/* * GPAC - Multimedia Framework C SDK * * Authors: Jean Le Feuvre * Copyright (c) Telecom ParisTech 2000-2021 * All rights reserved * * This file is part of GPAC / mp4box application * * GPAC is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * GPAC is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; see the file COPYING. If not, write to * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include "mp4box.h" #if defined(GPAC_DISABLE_ISOM) || defined(GPAC_DISABLE_ISOM_WRITE) #error "Cannot compile MP4Box if GPAC is not built with ISO File Format support" #else #ifndef GPAC_DISABLE_X3D #include <gpac/nodes_x3d.h> #endif #ifndef GPAC_DISABLE_BIFS #include <gpac/internal/bifs_dev.h> #endif #ifndef GPAC_DISABLE_VRML #include <gpac/nodes_mpeg4.h> #endif #include <gpac/constants.h> #include <gpac/avparse.h> #include <gpac/internal/media_dev.h> /*ISO 639 languages*/ #include <gpac/iso639.h> #include <gpac/mpegts.h> #ifndef GPAC_DISABLE_SMGR #include <gpac/scene_manager.h> #endif #include <gpac/internal/media_dev.h> #include <gpac/media_tools.h> /*for built-in box printing*/ #include <gpac/internal/isomedia_dev.h> extern u32 swf_flags; extern Float swf_flatten_angle; extern GF_FileType get_file_type_by_ext(char *inName); extern u32 fs_dump_flags; void scene_coding_log(void *cbk, GF_LOG_Level log_level, GF_LOG_Tool log_tool, const char *fmt, va_list vlist); #ifdef GPAC_DISABLE_LOG void mp4box_log(const char *fmt, ...) { va_list vl; va_start(vl, fmt); vfprintf(stderr, fmt, vlist); fflush(stderr); va_end(vl); } #endif u32 PrintLanguages(char *val, u32 opt) { u32 i=0, count = gf_lang_get_count(); fprintf(stderr, "Supported ISO 639 languages and codes:\n\n"); for (i=0; i<count; i++) { if (gf_lang_get_2cc(i)) { fprintf(stderr, "%s (%s - %s)\n", gf_lang_get_name(i), gf_lang_get_3cc(i), gf_lang_get_2cc(i)); } } return 1; } static const char *GetLanguage(char *lcode) { s32 idx = gf_lang_find(lcode); if (idx>=0) return gf_lang_get_name(idx); return lcode; } GF_Err dump_isom_cover_art(GF_ISOFile *file, char *inName, Bool is_final_name) { const u8 *tag; FILE *t; u32 tag_len; GF_Err e = gf_isom_apple_get_tag(file, GF_ISOM_ITUNE_COVER_ART, &tag, &tag_len); if (e!=GF_OK) { if (e==GF_URL_ERROR) { M4_LOG(GF_LOG_WARNING, ("No cover art found\n")); return GF_OK; } return e; } if (inName) { char szName[1024]; if (is_final_name) { strcpy(szName, inName); } else { sprintf(szName, "%s.%s", inName, (tag_len>>31) ? "png" : "jpg"); } t = gf_fopen(szName, "wb"); if (!t) { M4_LOG(GF_LOG_ERROR, ("Failed to open %s for dumping\n", szName)); return GF_IO_ERR; } } else { t = stdout; } gf_fwrite(tag, tag_len & 0x7FFFFFFF, t); if (inName) gf_fclose(t); return GF_OK; } #ifndef GPAC_DISABLE_SCENE_DUMP GF_Err dump_isom_scene(char *file, char *inName, Bool is_final_name, GF_SceneDumpFormat dump_mode, Bool do_log, Bool no_odf_conv) { GF_Err e; GF_SceneManager *ctx; GF_SceneGraph *sg; GF_SceneLoader load; GF_FileType ftype; gf_log_cbk prev_logs = NULL; FILE *logs = NULL; sg = gf_sg_new(); ctx = gf_sm_new(sg); memset(&load, 0, sizeof(GF_SceneLoader)); load.fileName = file; load.ctx = ctx; load.swf_import_flags = swf_flags; if (dump_mode == GF_SM_DUMP_SVG) { load.swf_import_flags |= GF_SM_SWF_USE_SVG; load.svgOutFile = inName; } load.swf_flatten_limit = swf_flatten_angle; ftype = get_file_type_by_ext(file); if (ftype == GF_FILE_TYPE_ISO_MEDIA) { load.isom = gf_isom_open(file, GF_ISOM_OPEN_READ, NULL); if (!load.isom) { e = gf_isom_last_error(NULL); M4_LOG(GF_LOG_ERROR, ("Error opening file: %s\n", gf_error_to_string(e))); gf_sm_del(ctx); gf_sg_del(sg); return e; } if (no_odf_conv) gf_isom_disable_odf_conversion(load.isom, GF_TRUE); } else if (ftype==GF_FILE_TYPE_LSR_SAF) { load.isom = gf_isom_open("saf_conv", GF_ISOM_WRITE_EDIT, NULL); #ifndef GPAC_DISABLE_MEDIA_IMPORT if (load.isom) { GF_Fraction _frac = {0,0}; e = import_file(load.isom, file, 0, _frac, 0, NULL, NULL, 0); } else #else M4_LOG(GF_LOG_WARNING, ("Warning: GPAC was compiled without Media Import support\n")); #endif e = gf_isom_last_error(NULL); if (e) { M4_LOG(GF_LOG_ERROR, ("Error importing file: %s\n", gf_error_to_string(e))); gf_sm_del(ctx); gf_sg_del(sg); if (load.isom) gf_isom_delete(load.isom); return e; } } if (do_log) { char szLog[GF_MAX_PATH]; sprintf(szLog, "%s_dec.logs", inName); logs = gf_fopen(szLog, "wt"); gf_log_set_tool_level(GF_LOG_CODING, GF_LOG_DEBUG); prev_logs = gf_log_set_callback(logs, scene_coding_log); } e = gf_sm_load_init(&load); if (!e) e = gf_sm_load_run(&load); gf_sm_load_done(&load); if (logs) { gf_log_set_tool_level(GF_LOG_CODING, GF_LOG_ERROR); gf_log_set_callback(NULL, prev_logs); gf_fclose(logs); } if (!e && dump_mode != GF_SM_DUMP_SVG) { u32 count = gf_list_count(ctx->streams); if (count) fprintf(stderr, "Scene loaded - dumping %d systems streams\n", count); else fprintf(stderr, "Scene loaded - dumping root scene\n"); e = gf_sm_dump(ctx, inName, is_final_name, dump_mode); } gf_sm_del(ctx); gf_sg_del(sg); if (e) M4_LOG(GF_LOG_ERROR, ("Error loading scene: %s\n", gf_error_to_string(e))); if (load.isom) gf_isom_delete(load.isom); return e; } #endif #ifndef GPAC_DISABLE_SCENE_STATS static void dump_stats(FILE *dump, const GF_SceneStatistics *stats) { u32 i; s32 created, count, draw_created, draw_count, deleted, draw_deleted; created = count = draw_created = draw_count = deleted = draw_deleted = 0; fprintf(dump, "<NodeStatistics>\n"); fprintf(dump, "<General NumberOfNodeTypes=\"%d\"/>\n", gf_list_count(stats->node_stats)); for (i=0; i<gf_list_count(stats->node_stats); i++) { GF_NodeStats *ptr = gf_list_get(stats->node_stats, i); fprintf(dump, "<NodeStat NodeName=\"%s\">\n", ptr->name); switch (ptr->tag) { #ifndef GPAC_DISABLE_VRML case TAG_MPEG4_Bitmap: case TAG_MPEG4_Background2D: case TAG_MPEG4_Background: case TAG_MPEG4_Box: case TAG_MPEG4_Circle: case TAG_MPEG4_CompositeTexture2D: case TAG_MPEG4_CompositeTexture3D: case TAG_MPEG4_Cylinder: case TAG_MPEG4_Cone: case TAG_MPEG4_Curve2D: case TAG_MPEG4_Extrusion: case TAG_MPEG4_ElevationGrid: case TAG_MPEG4_IndexedFaceSet2D: case TAG_MPEG4_IndexedFaceSet: case TAG_MPEG4_IndexedLineSet2D: case TAG_MPEG4_IndexedLineSet: case TAG_MPEG4_PointSet2D: case TAG_MPEG4_PointSet: case TAG_MPEG4_Rectangle: case TAG_MPEG4_Sphere: case TAG_MPEG4_Text: case TAG_MPEG4_Ellipse: case TAG_MPEG4_XCurve2D: draw_count += ptr->nb_created + ptr->nb_used - ptr->nb_del; draw_deleted += ptr->nb_del; draw_created += ptr->nb_created; break; #endif /*GPAC_DISABLE_VRML*/ } fprintf(dump, "<Instanciation NbObjects=\"%d\" NbUse=\"%d\" NbDestroy=\"%d\"/>\n", ptr->nb_created, ptr->nb_used, ptr->nb_del); count += ptr->nb_created + ptr->nb_used; deleted += ptr->nb_del; created += ptr->nb_created; fprintf(dump, "</NodeStat>\n"); } if (i) { fprintf(dump, "<CumulatedStat TotalNumberOfNodes=\"%d\" ReallyAllocatedNodes=\"%d\" DeletedNodes=\"%d\" NumberOfAttributes=\"%d\"/>\n", count, created, deleted, stats->nb_svg_attributes); fprintf(dump, "<DrawableNodesCumulatedStat TotalNumberOfNodes=\"%d\" ReallyAllocatedNodes=\"%d\" DeletedNodes=\"%d\"/>\n", draw_count, draw_created, draw_deleted); } fprintf(dump, "</NodeStatistics>\n"); created = count = deleted = 0; if (gf_list_count(stats->proto_stats)) { fprintf(dump, "<ProtoStatistics NumberOfProtoUsed=\"%d\">\n", gf_list_count(stats->proto_stats)); for (i=0; i<gf_list_count(stats->proto_stats); i++) { GF_NodeStats *ptr = gf_list_get(stats->proto_stats, i); fprintf(dump, "<ProtoStat ProtoName=\"%s\">\n", ptr->name); fprintf(dump, "<Instanciation NbObjects=\"%d\" NbUse=\"%d\" NbDestroy=\"%d\"/>\n", ptr->nb_created, ptr->nb_used, ptr->nb_del); count += ptr->nb_created + ptr->nb_used; deleted += ptr->nb_del; created += ptr->nb_created; fprintf(dump, "</ProtoStat>\n"); } if (i) fprintf(dump, "<CumulatedStat TotalNumberOfProtos=\"%d\" ReallyAllocatedProtos=\"%d\" DeletedProtos=\"%d\"/>\n", count, created, deleted); fprintf(dump, "</ProtoStatistics>\n"); } fprintf(dump, "<FixedValues min=\"%f\" max=\"%f\">\n", FIX2FLT( stats->min_fixed) , FIX2FLT( stats->max_fixed )); fprintf(dump, "<Resolutions scaleIntegerPart=\"%d\" scaleFracPart=\"%d\" coordIntegerPart=\"%d\" coordFracPart=\"%d\"/>\n", stats->scale_int_res_2d, stats->scale_frac_res_2d, stats->int_res_2d, stats->frac_res_2d); fprintf(dump, "</FixedValues>\n"); fprintf(dump, "<FieldStatistic FieldType=\"MFVec2f\">\n"); fprintf(dump, "<ParsingInfo NumParsed=\"%d\" NumRemoved=\"%d\"/>\n", stats->count_2d, stats->rem_2d); if (stats->count_2d) { fprintf(dump, "<ExtendInfo MinVec2f=\"%f %f\" MaxVec2f=\"%f %f\"/>\n", FIX2FLT( stats->min_2d.x) , FIX2FLT( stats->min_2d.y ), FIX2FLT( stats->max_2d.x ), FIX2FLT( stats->max_2d.y ) ); } fprintf(dump, "</FieldStatistic>\n"); fprintf(dump, "<FieldStatistic FieldType=\"MFVec3f\">\n"); fprintf(dump, "<ParsingInfo NumParsed=\"%d\" NumRemoved=\"%d\"/>", stats->count_3d, stats->rem_3d); if (stats->count_3d) { fprintf(dump, "<ExtendInfo MinVec3f=\"%f %f %f\" MaxVec3f=\"%f %f %f\"/>\n", FIX2FLT( stats->min_3d.x ), FIX2FLT( stats->min_3d.y ), FIX2FLT( stats->min_3d.z ), FIX2FLT( stats->max_3d.x ), FIX2FLT( stats->max_3d.y ), FIX2FLT( stats->max_3d.z ) ); } fprintf(dump, "</FieldStatistic>\n"); fprintf(dump, "<FieldStatistic FieldType=\"MF/SFColor\">\n"); fprintf(dump, "<ParsingInfo NumParsed=\"%d\" NumRemoved=\"%d\"/>", stats->count_color, stats->rem_color); fprintf(dump, "</FieldStatistic>\n"); fprintf(dump, "<FieldStatistic FieldType=\"MF/SFFloat\">\n"); fprintf(dump, "<ParsingInfo NumParsed=\"%d\" NumRemoved=\"%d\"/>", stats->count_float, stats->rem_float); fprintf(dump, "</FieldStatistic>\n"); fprintf(dump, "<FieldStatistic FieldType=\"SFVec2f\">\n"); fprintf(dump, "<ParsingInfo NumParsed=\"%d\"/>", stats->count_2f); fprintf(dump, "</FieldStatistic>\n"); fprintf(dump, "<FieldStatistic FieldType=\"SFVec3f\">\n"); fprintf(dump, "<ParsingInfo NumParsed=\"%d\"/>", stats->count_3f); fprintf(dump, "</FieldStatistic>\n"); } static void ReorderAU(GF_List *sample_list, GF_AUContext *au) { u32 i; for (i=0; i<gf_list_count(sample_list); i++) { GF_AUContext *ptr = gf_list_get(sample_list, i); if ( /*time ordered*/ (ptr->timing_sec > au->timing_sec) /*set bifs first*/ || ((ptr->timing_sec == au->timing_sec) && (ptr->owner->streamType < au->owner->streamType)) ) { gf_list_insert(sample_list, au, i); return; } } gf_list_add(sample_list, au); } void dump_isom_scene_stats(char *file, char *inName, Bool is_final_name, u32 stat_level) { GF_Err e; FILE *dump; Bool close; u32 i, j, count; char szBuf[1024]; GF_SceneManager *ctx; GF_SceneLoader load; GF_StatManager *sm; GF_List *sample_list; GF_SceneGraph *scene_graph; dump = NULL; sm = NULL; sample_list = NULL; close = 0; scene_graph = gf_sg_new(); ctx = gf_sm_new(scene_graph); memset(&load, 0, sizeof(GF_SceneLoader)); load.fileName = file; load.ctx = ctx; if (get_file_type_by_ext(file) == 1) { load.isom = gf_isom_open(file, GF_ISOM_OPEN_READ, NULL); if (!load.isom) { M4_LOG(GF_LOG_ERROR, ("Cannot open file: %s\n", gf_error_to_string(gf_isom_last_error(NULL)))); gf_sm_del(ctx); gf_sg_del(scene_graph); return; } } e = gf_sm_load_init(&load); if (!e) e = gf_sm_load_run(&load); gf_sm_load_done(&load); if (e<0) goto exit; if (inName) { strcpy(szBuf, inName); if (!is_final_name) strcat(szBuf, "_stat.xml"); dump = gf_fopen(szBuf, "wt"); if (!dump) { M4_LOG(GF_LOG_ERROR, ("Failed to open %s for dumping\n", szBuf)); return; } close = 1; } else { dump = stdout; close = 0; } fprintf(stderr, "Analysing Scene\n"); fprintf(dump, "<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n"); fprintf(dump, "<!-- Scene Graph Statistics Generated by MP4Box - GPAC "); if (! gf_sys_is_test_mode()) fprintf(dump, "%s ", gf_gpac_version()); fprintf(dump, "-->\n"); fprintf(dump, "<SceneStatistics file=\"%s\" DumpType=\"%s\">\n", gf_file_basename(file), (stat_level==1) ? "full scene" : ((stat_level==2) ? "AccessUnit based" : "SceneGraph after each AU")); sm = gf_sm_stats_new(); /*stat level 1: complete scene stat*/ if (stat_level == 1) { e = gf_sm_stats_for_scene(sm, ctx); if (!e) dump_stats(dump, gf_sm_stats_get(sm) ); goto exit; } /*re_order all BIFS-AUs*/ sample_list = gf_list_new(); /*configure all systems streams we're dumping*/ for (i=0; i<gf_list_count(ctx->streams); i++) { GF_StreamContext *sc = gf_list_get(ctx->streams, i); if (sc->streamType != GF_STREAM_SCENE) continue; for (j=0; j<gf_list_count(sc->AUs); j++) { GF_AUContext *au = gf_list_get(sc->AUs, j); ReorderAU(sample_list, au); } } count = gf_list_count(sample_list); for (i=0; i<count; i++) { GF_AUContext *au = gf_list_get(sample_list, i); for (j=0; j<gf_list_count(au->commands); j++) { GF_Command *com = gf_list_get(au->commands, j); /*stat level 2 - get command stats*/ if (stat_level==2) { e = gf_sm_stats_for_command(sm, com); if (e) goto exit; } /*stat level 3 - apply command*/ if (stat_level==3) gf_sg_command_apply(scene_graph, com, 0); } /*stat level 3: get graph stat*/ if (stat_level==3) { e = gf_sm_stats_for_graph(sm, scene_graph); if (e) goto exit; } if (stat_level==2) { fprintf(dump, "<AUStatistics StreamID=\"%d\" AUTime=\""LLD"\">\n", au->owner->ESID, au->timing); } else { fprintf(dump, "<GraphStatistics StreamID=\"%d\" AUTime=\""LLD"\">\n", au->owner->ESID, au->timing); } /*dump stats*/ dump_stats(dump, gf_sm_stats_get(sm) ); /*reset stats*/ gf_sm_stats_reset(sm); if (stat_level==2) { fprintf(dump, "</AUStatistics>\n"); } else { fprintf(dump, "</GraphStatistics>\n"); } gf_set_progress("Analysing AU", i+1, count); } exit: if (sample_list) gf_list_del(sample_list); if (sm) gf_sm_stats_del(sm); gf_sm_del(ctx); gf_sg_del(scene_graph); if (load.isom) gf_isom_delete(load.isom); if (e) { M4_LOG(GF_LOG_ERROR, ("Stats error: %s\n", gf_error_to_string(e))); } else { fprintf(dump, "</SceneStatistics>\n"); } if (dump && close) gf_fclose(dump); fprintf(stderr, "done\n"); } #endif /*GPAC_DISABLE_SCENE_STATS*/ #ifndef GPAC_DISABLE_VRML static void PrintFixed(Fixed val, Bool add_space) { if (add_space) fprintf(stderr, " "); if (val==FIX_MIN) fprintf(stderr, "-I"); else if (val==FIX_MAX) fprintf(stderr, "+I"); else fprintf(stderr, "%g", FIX2FLT(val)); } static void PrintNodeSFField(u32 type, void *far_ptr) { if (!far_ptr) return; switch (type) { case GF_SG_VRML_SFBOOL: fprintf(stderr, "%s", (*(SFBool *)far_ptr) ? "TRUE" : "FALSE"); break; case GF_SG_VRML_SFINT32: fprintf(stderr, "%d", (*(SFInt32 *)far_ptr)); break; case GF_SG_VRML_SFFLOAT: PrintFixed((*(SFFloat *)far_ptr), 0); break; case GF_SG_VRML_SFTIME: fprintf(stderr, "%g", (*(SFTime *)far_ptr)); break; case GF_SG_VRML_SFVEC2F: PrintFixed(((SFVec2f *)far_ptr)->x, 0); PrintFixed(((SFVec2f *)far_ptr)->y, 1); break; case GF_SG_VRML_SFVEC3F: PrintFixed(((SFVec3f *)far_ptr)->x, 0); PrintFixed(((SFVec3f *)far_ptr)->y, 1); PrintFixed(((SFVec3f *)far_ptr)->z, 1); break; case GF_SG_VRML_SFROTATION: PrintFixed(((SFRotation *)far_ptr)->x, 0); PrintFixed(((SFRotation *)far_ptr)->y, 1); PrintFixed(((SFRotation *)far_ptr)->z, 1); PrintFixed(((SFRotation *)far_ptr)->q, 1); break; case GF_SG_VRML_SFCOLOR: PrintFixed(((SFColor *)far_ptr)->red, 0); PrintFixed(((SFColor *)far_ptr)->green, 1); PrintFixed(((SFColor *)far_ptr)->blue, 1); break; case GF_SG_VRML_SFSTRING: if (((SFString*)far_ptr)->buffer) fprintf(stderr, "\"%s\"", ((SFString*)far_ptr)->buffer); else fprintf(stderr, "NULL"); break; } } #endif #ifndef GPAC_DISABLE_VRML static void do_print_node(GF_Node *node, GF_SceneGraph *sg, const char *name, u32 graph_type, Bool is_nodefield, Bool do_cov) { u32 nbF, i; GF_FieldInfo f; #ifndef GPAC_DISABLE_BIFS u8 qt, at; Fixed bmin, bmax; u32 nbBits; #endif /*GPAC_DISABLE_BIFS*/ nbF = gf_node_get_field_count(node); if (is_nodefield) { char szField[1024]; u32 tfirst, tlast; if (gf_node_get_field_by_name(node, szField, &f) != GF_OK) { M4_LOG(GF_LOG_ERROR, ("Field %s is not a member of node %s\n", szField, name)); return; } fprintf(stderr, "Allowed nodes in %s.%s:\n", name, szField); if (graph_type==1) { tfirst = GF_NODE_RANGE_FIRST_X3D; tlast = GF_NODE_RANGE_LAST_X3D; } else { tfirst = GF_NODE_RANGE_FIRST_MPEG4; tlast = GF_NODE_RANGE_LAST_MPEG4; } for (i=tfirst; i<tlast; i++) { GF_Node *tmp = gf_node_new(sg, i); gf_node_register(tmp, NULL); if (gf_node_in_table_by_tag(i, f.NDTtype)) { const char *nname = gf_node_get_class_name(tmp); if (nname && strcmp(nname, "Unknown Node")) { fprintf(stderr, "\t%s\n", nname); } } gf_node_unregister(tmp, NULL); } return; } if (do_cov) { u32 ndt; if (graph_type==0) { u32 all; gf_node_mpeg4_type_by_class_name(name); gf_bifs_get_child_table(node); all = gf_node_get_num_fields_in_mode(node, GF_SG_FIELD_CODING_ALL); for (i=0; i<all; i++) { u32 res; gf_sg_script_get_field_index(node, i, GF_SG_FIELD_CODING_ALL, &res); } gf_node_get_num_fields_in_mode(node, GF_SG_FIELD_CODING_DEF); gf_node_get_num_fields_in_mode(node, GF_SG_FIELD_CODING_IN); gf_node_get_num_fields_in_mode(node, GF_SG_FIELD_CODING_OUT); gf_node_get_num_fields_in_mode(node, GF_SG_FIELD_CODING_DYN); } else if (graph_type==1) gf_node_x3d_type_by_class_name(name); for (ndt=NDT_SFWorldNode; ndt<NDT_LAST; ndt++) { gf_node_in_table_by_tag(gf_node_get_tag(node), ndt); } } fprintf(stderr, "%s {\n", name); for (i=0; i<nbF; i++) { gf_node_get_field(node, i, &f); if (graph_type==2) { fprintf(stderr, "\t%s=\"...\"\n", f.name); continue; } fprintf(stderr, "\t%s %s %s", gf_sg_vrml_get_event_type_name(f.eventType, 0), gf_sg_vrml_get_field_type_name(f.fieldType), f.name); if (f.fieldType==GF_SG_VRML_SFNODE) fprintf(stderr, " NULL"); else if (f.fieldType==GF_SG_VRML_MFNODE) fprintf(stderr, " []"); else if (gf_sg_vrml_is_sf_field(f.fieldType)) { fprintf(stderr, " "); PrintNodeSFField(f.fieldType, f.far_ptr); } else { void *ptr; u32 j, sftype; GenMFField *mffield = (GenMFField *) f.far_ptr; fprintf(stderr, " ["); sftype = gf_sg_vrml_get_sf_type(f.fieldType); for (j=0; j<mffield->count; j++) { if (j) fprintf(stderr, " "); gf_sg_vrml_mf_get_item(f.far_ptr, f.fieldType, &ptr, j); PrintNodeSFField(sftype, ptr); } fprintf(stderr, "]"); } #ifndef GPAC_DISABLE_BIFS if (gf_bifs_get_aq_info(node, i, &qt, &at, &bmin, &bmax, &nbBits)) { if (qt) { fprintf(stderr, " #QP=%d", qt); if (qt==13) fprintf(stderr, " NbBits=%d", nbBits); if (bmin && bmax) { fprintf(stderr, " Bounds=["); PrintFixed(bmin, 0); fprintf(stderr, ","); PrintFixed(bmax, 0); fprintf(stderr, "]"); } } } #endif /*GPAC_DISABLE_BIFS*/ fprintf(stderr, "\n"); if (do_cov) { gf_node_get_field_by_name(node, (char *) f.name, &f); } } fprintf(stderr, "}\n\n"); } #endif u32 PrintNode(const char *name, u32 graph_type) { #ifdef GPAC_DISABLE_VRML M4_LOG(GF_LOG_ERROR, ("VRML/MPEG-4/X3D scene graph is disabled in this build of GPAC\n")); return 2; #else const char *std_name; GF_Node *node; GF_SceneGraph *sg; u32 tag; #ifndef GPAC_DISABLE_BIFS #endif /*GPAC_DISABLE_BIFS*/ Bool is_nodefield = 0; char *sep = strchr(name, '.'); if (sep) { sep[0] = 0; is_nodefield = 1; } if (graph_type==1) { #ifndef GPAC_DISABLE_X3D tag = gf_node_x3d_type_by_class_name(name); std_name = "X3D"; #else M4_LOG(GF_LOG_ERROR, ("X3D node printing is not supported (X3D support disabled)\n")); return 2; #endif } else { tag = gf_node_mpeg4_type_by_class_name(name); std_name = "MPEG4"; } if (!tag) { M4_LOG(GF_LOG_ERROR, ("Unknown %s node %s\n", std_name, name)); return 2; } sg = gf_sg_new(); node = gf_node_new(sg, tag); gf_node_register(node, NULL); name = gf_node_get_class_name(node); if (!node) { M4_LOG(GF_LOG_ERROR, ("Node %s not supported in current built\n", name)); return 2; } do_print_node(node, sg, name, graph_type, is_nodefield, GF_FALSE); gf_node_unregister(node, NULL); gf_sg_del(sg); #endif /*GPAC_DISABLE_VRML*/ return 1; } u32 PrintBuiltInNodes(char *arg_val, u32 dump_type) { #if !defined(GPAC_DISABLE_VRML) && !defined(GPAC_DISABLE_X3D) && !defined(GPAC_DISABLE_SVG) GF_SceneGraph *sg; u32 i, nb_in, nb_not_in, start_tag, end_tag; u32 graph_type; Bool dump_nodes = ((dump_type==1) || (dump_type==3)) ? 1 : 0; if (dump_type==4) graph_type = 2; else if ((dump_type==2) || (dump_type==3)) graph_type = 1; else graph_type = 0; if (graph_type==1) { #if !defined(GPAC_DISABLE_VRML) && !defined(GPAC_DISABLE_X3D) start_tag = GF_NODE_RANGE_FIRST_X3D; end_tag = TAG_LastImplementedX3D; #else M4_LOG(GF_LOG_ERROR, ("X3D scene graph disabled in this build of GPAC\n")); return 2; #endif } else if (graph_type==2) { #ifdef GPAC_DISABLE_SVG M4_LOG(GF_LOG_ERROR, ("SVG scene graph disabled in this build of GPAC\n")); return 2; #else start_tag = GF_NODE_RANGE_FIRST_SVG; end_tag = GF_NODE_RANGE_LAST_SVG; #endif } else { #ifdef GPAC_DISABLE_VRML M4_LOG(GF_LOG_ERROR, ("VRML/MPEG-4 scene graph disabled in this build of GPAC\n")); return 2; #else start_tag = GF_NODE_RANGE_FIRST_MPEG4; end_tag = TAG_LastImplementedMPEG4; #endif } nb_in = nb_not_in = 0; sg = gf_sg_new(); if (graph_type==1) { fprintf(stderr, "Available X3D nodes in this build (dumping):\n"); } else if (graph_type==2) { fprintf(stderr, "Available SVG nodes in this build (dumping and LASeR coding):\n"); } else { fprintf(stderr, "Available MPEG-4 nodes in this build (encoding/decoding/dumping):\n"); } for (i=start_tag; i<end_tag; i++) { GF_Node *node = gf_node_new(sg, i); if (node) { gf_node_register(node, NULL); if (dump_nodes) { do_print_node(node, sg, gf_node_get_class_name(node), graph_type, GF_FALSE, GF_TRUE); } else { fprintf(stderr, " %s\n", gf_node_get_class_name(node)); } gf_node_unregister(node, NULL); nb_in++; } else { if (graph_type==2) break; nb_not_in++; } } gf_sg_del(sg); if (graph_type==2) { fprintf(stderr, "\n%d nodes supported\n", nb_in); } else { fprintf(stderr, "\n%d nodes supported - %d nodes not supported\n", nb_in, nb_not_in); } //coverage if (dump_nodes) { for (i=GF_SG_VRML_SFBOOL; i<GF_SG_VRML_SCRIPT_FUNCTION; i++) { void *fp = gf_sg_vrml_field_pointer_new(i); if (fp) { if (i==GF_SG_VRML_SFSCRIPT) gf_free(fp); else gf_sg_vrml_field_pointer_del(fp, i); } } } #else M4_LOG(GF_LOG_ERROR, ("No scene graph enabled in this MP4Box build\n")); #endif return 1; } u32 PrintBuiltInBoxes(char *argval, u32 do_cov) { u32 i, count=gf_isom_get_num_supported_boxes(); fprintf(stdout, "<Boxes>\n"); //index 0 is our internal unknown box handler for (i=1; i<count; i++) { gf_isom_dump_supported_box(i, stdout); if (do_cov) { u32 btype = gf_isom_get_supported_box_type(i); GF_Box *b=gf_isom_box_new(btype); if (b) { GF_Box *c=NULL; gf_isom_clone_box(b, &c); if (c) gf_isom_box_del(c); gf_isom_box_del(b); } } } fprintf(stdout, "</Boxes>\n"); return 1; } #if !defined(GPAC_DISABLE_ISOM_HINTING) && !defined(GPAC_DISABLE_ISOM_DUMP) void dump_isom_rtp(GF_ISOFile *file, char *inName, Bool is_final_name) { u32 i, j, size; FILE *dump; const char *sdp; if (inName) { char szBuf[1024]; strcpy(szBuf, inName); if (!is_final_name) strcat(szBuf, "_rtp.xml"); dump = gf_fopen(szBuf, "wt"); if (!dump) { M4_LOG(GF_LOG_ERROR, ("Failed to open %s\n", szBuf)); return; } } else { dump = stdout; } fprintf(dump, "<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n"); fprintf(dump, "<!-- MP4Box RTP trace -->\n"); fprintf(dump, "<RTPFile>\n"); for (i=0; i<gf_isom_get_track_count(file); i++) { if (gf_isom_get_media_type(file, i+1) != GF_ISOM_MEDIA_HINT) continue; fprintf(dump, "<RTPHintTrack trackID=\"%d\">\n", gf_isom_get_track_id(file, i+1)); gf_isom_sdp_track_get(file, i+1, &sdp, &size); fprintf(dump, "<SDPInfo>%s</SDPInfo>", sdp); #ifndef GPAC_DISABLE_ISOM_HINTING for (j=0; j<gf_isom_get_sample_count(file, i+1); j++) { gf_isom_dump_hint_sample(file, i+1, j+1, dump); } #endif fprintf(dump, "</RTPHintTrack>\n"); } fprintf(dump, "</RTPFile>\n"); if (inName) gf_fclose(dump); } #endif void dump_isom_timestamps(GF_ISOFile *file, char *inName, Bool is_final_name, u32 dump_mode) { u32 i, j, k, count; Bool has_ctts_error, is_fragmented=GF_FALSE; FILE *dump; Bool skip_offset = ((dump_mode==2) || (dump_mode==4)) ? GF_TRUE : GF_FALSE; Bool check_ts = ((dump_mode==3) || (dump_mode==4)) ? GF_TRUE : GF_FALSE; struct _ts_info { u64 dts; s64 cts; }; struct _ts_info *timings = NULL; u32 nb_timings=0, nb_timings_alloc = 0; if (inName) { char szBuf[1024]; strcpy(szBuf, inName); if (!is_final_name) strcat(szBuf, "_ts.txt"); dump = gf_fopen(szBuf, "wt"); if (!dump) { M4_LOG(GF_LOG_ERROR, ("Failed to open %s\n", szBuf)); return; } } else { dump = stdout; } if (gf_isom_is_fragmented(file)) is_fragmented = GF_TRUE; has_ctts_error = GF_FALSE; for (i=0; i<gf_isom_get_track_count(file); i++) { s64 cts_dts_shift = gf_isom_get_cts_to_dts_shift(file, i+1); u32 has_cts_offset = gf_isom_has_time_offset(file, i+1); fprintf(dump, "#dumping track ID %d timing:\n", gf_isom_get_track_id(file, i + 1)); fprintf(dump, "Num\tDTS\tCTS\tSize\tRAP%s\tisLeading\tDependsOn\tDependedOn\tRedundant\tRAP-SampleGroup\tRoll-SampleGroup\tRoll-Distance", skip_offset ? "" : "\tOffset"); if (is_fragmented) { fprintf(dump, "\tfrag_start"); } fprintf(dump, "\n"); count = gf_isom_get_sample_count(file, i+1); if (has_cts_offset && check_ts) { if (nb_timings_alloc<count) { nb_timings_alloc = count; timings = gf_realloc(timings, sizeof (struct _ts_info) * count); } nb_timings = 0; } for (j=0; j<count; j++) { s64 cts; u64 dts, offset; u32 isLeading, dependsOn, dependedOn, redundant; Bool is_rap; GF_ISOSampleRollType roll_type; s32 roll_distance; u32 index; GF_ISOSample *samp = gf_isom_get_sample_info(file, i+1, j+1, &index, &offset); if (!samp) { fprintf(dump, " SAMPLE #%d IN TRACK #%d NOT THERE !!!\n", j+1, i+1); continue; } gf_isom_get_sample_flags(file, i+1, j+1, &isLeading, &dependsOn, &dependedOn, &redundant); gf_isom_get_sample_rap_roll_info(file, i+1, j+1, &is_rap, &roll_type, &roll_distance); dts = samp->DTS; cts = dts + (s32) samp->CTS_Offset; fprintf(dump, "Sample %d\tDTS "LLU"\tCTS "LLD"\t%d\t%d", j+1, dts, cts, samp->dataLength, samp->IsRAP); if (!skip_offset) fprintf(dump, "\t"LLU, offset); fprintf(dump, "\t%d\t%d\t%d\t%d\t%d\t%d\t%d", isLeading, dependsOn, dependedOn, redundant, is_rap, roll_type, roll_distance); if (cts< (s64) dts) { if (has_cts_offset==2) { if (cts_dts_shift && (cts+cts_dts_shift < (s64) dts)) { fprintf(dump, " #NEGATIVE CTS OFFSET!!!"); has_ctts_error = 1; } else if (!cts_dts_shift) { fprintf(dump, " #possible negative CTS offset (no cslg in file)"); } } else { fprintf(dump, " #NEGATIVE CTS OFFSET!!!"); has_ctts_error = 1; } } if (has_cts_offset && check_ts) { for (k=0; k<nb_timings; k++) { if (timings[k].dts==dts) { fprintf(dump, " #SAME DTS USED!!!"); has_ctts_error = 1; } if (timings[k].cts==cts) { fprintf(dump, " #SAME CTS USED!!! "); has_ctts_error = 1; } } timings[nb_timings].dts = dts; timings[nb_timings].cts = cts; nb_timings++; } gf_isom_sample_del(&samp); if (is_fragmented) { fprintf(dump, "\t%d", gf_isom_sample_is_fragment_start(file, i+1, j+1, NULL) ); } fprintf(dump, "\n"); gf_set_progress("Dumping track timing", j+1, count); } fprintf(dump, "\n\n"); gf_set_progress("Dumping track timing", count, count); } if (timings) gf_free(timings); if (inName) gf_fclose(dump); if (has_ctts_error) { M4_LOG(GF_LOG_ERROR, ("\tFile has CTTS table errors\n")); } } static u32 read_nal_size_hdr(u8 *ptr, u32 nalh_size) { u32 nal_size=0; u32 v = nalh_size; while (v) { nal_size |= (u8) *ptr; ptr++; v-=1; if (v) nal_size <<= 8; } return nal_size; } #ifndef GPAC_DISABLE_AV_PARSERS void gf_inspect_dump_nalu(FILE *dump, u8 *ptr, u32 ptr_size, Bool is_svc, HEVCState *hevc, AVCState *avc, VVCState *vvc, u32 nalh_size, Bool dump_crc, Bool is_encrypted); #endif static void dump_isom_nal_ex(GF_ISOFile *file, GF_ISOTrackID trackID, FILE *dump, u32 dump_flags) { u32 i, j, count, nb_descs, track, nalh_size, timescale, cur_extract_mode; s32 countRef; Bool is_adobe_protected = GF_FALSE; Bool is_cenc_protected = GF_FALSE; Bool is_hevc = GF_FALSE; Bool is_vvc = GF_FALSE; #ifndef GPAC_DISABLE_AV_PARSERS AVCState *avc_state = NULL; HEVCState *hevc_state = NULL; VVCState *vvc_state = NULL; #endif GF_AVCConfig *avccfg, *svccfg; GF_HEVCConfig *hevccfg, *lhvccfg; GF_VVCConfig *vvccfg; GF_NALUFFParam *slc; Bool has_svcc = GF_FALSE; track = gf_isom_get_track_by_id(file, trackID); count = gf_isom_get_sample_count(file, track); timescale = gf_isom_get_media_timescale(file, track); cur_extract_mode = gf_isom_get_nalu_extract_mode(file, track); nb_descs = gf_isom_get_sample_description_count(file, track); if (!nb_descs) { M4_LOG(GF_LOG_ERROR, ("Error: Track #%d has no sample description so is likely not NALU-based!\n", trackID)); return; } fprintf(dump, "<NALUTrack trackID=\"%d\" SampleCount=\"%d\" TimeScale=\"%d\">\n", trackID, count, timescale); #ifndef GPAC_DISABLE_AV_PARSERS #define DUMP_ARRAY(arr, name, loc, _is_svc)\ if (arr) {\ fprintf(dump, " <%sArray location=\"%s\">\n", name, loc);\ for (i=0; i<gf_list_count(arr); i++) {\ slc = gf_list_get(arr, i);\ fprintf(dump, " <NALU size=\"%d\" ", slc->size);\ gf_inspect_dump_nalu(dump, (u8 *) slc->data, slc->size, _is_svc, is_hevc ? hevc_state : NULL, avc_state, is_vvc ? vvc_state : NULL, nalh_size, (dump_flags&1) ? GF_TRUE : GF_FALSE, GF_FALSE);\ }\ fprintf(dump, " </%sArray>\n", name);\ }\ #else #define DUMP_ARRAY(arr, name, loc, _is_svc)\ if (arr) {\ fprintf(dump, " <%sArray location=\"%s\">\n", name, loc);\ for (i=0; i<gf_list_count(arr); i++) {\ slc = gf_list_get(arr, i);\ fprintf(dump, " <NALU size=\"%d\" ", slc->size);\ fprintf(dump, "/>\n");\ }\ fprintf(dump, " </%sArray>\n", name);\ }\ #endif nalh_size = 0; for (j=0; j<nb_descs; j++) { GF_AVCConfig *mvccfg; Bool is_svc; avccfg = gf_isom_avc_config_get(file, track, j+1); svccfg = gf_isom_svc_config_get(file, track, j+1); mvccfg = gf_isom_mvc_config_get(file, track, j+1); hevccfg = gf_isom_hevc_config_get(file, track, j+1); lhvccfg = gf_isom_lhvc_config_get(file, track, j+1); vvccfg = gf_isom_vvc_config_get(file, track, j+1); is_svc = (svccfg!=NULL) ? 1:0; if (hevccfg || lhvccfg) { is_hevc = 1; #ifndef GPAC_DISABLE_AV_PARSERS GF_SAFEALLOC(hevc_state, HEVCState) #endif } else if (vvccfg) { is_vvc = 1; #ifndef GPAC_DISABLE_AV_PARSERS GF_SAFEALLOC(vvc_state, VVCState) #endif } else if (avccfg || svccfg || mvccfg) { #ifndef GPAC_DISABLE_AV_PARSERS GF_SAFEALLOC(avc_state, AVCState) #endif } //for tile tracks the hvcC is stored in the 'tbas' track if (!hevccfg && gf_isom_get_reference_count(file, track, GF_ISOM_REF_TBAS)) { u32 tk = 0; gf_isom_get_reference(file, track, GF_ISOM_REF_TBAS, 1, &tk); hevccfg = gf_isom_hevc_config_get(file, tk, 1); } fprintf(dump, " <NALUConfig>\n"); if (!avccfg && !svccfg && !hevccfg && !lhvccfg && !vvccfg) { M4_LOG(GF_LOG_ERROR, ("Error: Track #%d is not NALU or OBU based!\n", trackID)); return; } if (avccfg) { nalh_size = avccfg->nal_unit_size; DUMP_ARRAY(avccfg->sequenceParameterSets, "AVCSPS", "avcC", is_svc); DUMP_ARRAY(avccfg->pictureParameterSets, "AVCPPS", "avcC", is_svc) DUMP_ARRAY(avccfg->sequenceParameterSetExtensions, "AVCSPSEx", "avcC", is_svc) } if (is_svc) { if (!nalh_size) nalh_size = svccfg->nal_unit_size; DUMP_ARRAY(svccfg->sequenceParameterSets, "SVCSPS", "svcC", is_svc) DUMP_ARRAY(svccfg->pictureParameterSets, "SVCPPS", "svcC", is_svc) } if (mvccfg) { if (!nalh_size) nalh_size = mvccfg->nal_unit_size; DUMP_ARRAY(mvccfg->sequenceParameterSets, "SVCSPS", "mvcC", is_svc) DUMP_ARRAY(mvccfg->pictureParameterSets, "SVCPPS", "mvcC", is_svc) } if (hevccfg) { u32 idx; nalh_size = hevccfg->nal_unit_size; for (idx=0; idx<gf_list_count(hevccfg->param_array); idx++) { GF_NALUFFParamArray *ar = gf_list_get(hevccfg->param_array, idx); if (ar->type==GF_HEVC_NALU_SEQ_PARAM) { DUMP_ARRAY(ar->nalus, "HEVCSPS", "hvcC", 0) } else if (ar->type==GF_HEVC_NALU_PIC_PARAM) { DUMP_ARRAY(ar->nalus, "HEVCPPS", "hvcC", 0) } else if (ar->type==GF_HEVC_NALU_VID_PARAM) { DUMP_ARRAY(ar->nalus, "HEVCVPS", "hvcC", 0) } else { DUMP_ARRAY(ar->nalus, "HEVCUnknownPS", "hvcC", 0) } } } if (vvccfg) { u32 idx; nalh_size = vvccfg->nal_unit_size; for (idx=0; idx<gf_list_count(vvccfg->param_array); idx++) { GF_NALUFFParamArray *ar = gf_list_get(vvccfg->param_array, idx); if (ar->type==GF_VVC_NALU_SEQ_PARAM) { DUMP_ARRAY(ar->nalus, "VVCSPS", "vvcC", 0) } else if (ar->type==GF_VVC_NALU_PIC_PARAM) { DUMP_ARRAY(ar->nalus, "VVCPPS", "vvcC", 0) } else if (ar->type==GF_VVC_NALU_VID_PARAM) { DUMP_ARRAY(ar->nalus, "VVCVPS", "vvcC", 0) } else { DUMP_ARRAY(ar->nalus, "VVCUnknownPS", "vvcC", 0) } } } if (lhvccfg) { u32 idx; nalh_size = lhvccfg->nal_unit_size; for (idx=0; idx<gf_list_count(lhvccfg->param_array); idx++) { GF_NALUFFParamArray *ar = gf_list_get(lhvccfg->param_array, idx); if (ar->type==GF_HEVC_NALU_SEQ_PARAM) { DUMP_ARRAY(ar->nalus, "HEVCSPS", "lhcC", 0) } else if (ar->type==GF_HEVC_NALU_PIC_PARAM) { DUMP_ARRAY(ar->nalus, "HEVCPPS", "lhcC", 0) } else if (ar->type==GF_HEVC_NALU_VID_PARAM) { DUMP_ARRAY(ar->nalus, "HEVCVPS", "lhcC", 0) } else { DUMP_ARRAY(ar->nalus, "HEVCUnknownPS", "lhcC", 0) } } } fprintf(dump, " </NALUConfig>\n"); if (avccfg) gf_odf_avc_cfg_del(avccfg); if (svccfg) { gf_odf_avc_cfg_del(svccfg); has_svcc = GF_TRUE; } if (hevccfg) gf_odf_hevc_cfg_del(hevccfg); if (vvccfg) gf_odf_vvc_cfg_del(vvccfg); if (lhvccfg) gf_odf_hevc_cfg_del(lhvccfg); } /*fixme: for dumping encrypted track: we don't have neither avccfg nor svccfg*/ if (!nalh_size) nalh_size = 4; /*for testing dependency*/ countRef = gf_isom_get_reference_count(file, track, GF_ISOM_REF_SCAL); if (countRef > 0) { GF_ISOTrackID refTrackID; fprintf(dump, " <SCALReferences>\n"); for (i = 1; i <= (u32) countRef; i++) { gf_isom_get_reference_ID(file, track, GF_ISOM_REF_SCAL, i, &refTrackID); fprintf(dump, " <SCALReference number=\"%d\" refTrackID=\"%d\"/>\n", i, refTrackID); } fprintf(dump, " </SCALReferences>\n"); } fprintf(dump, " <NALUSamples>\n"); gf_isom_set_nalu_extract_mode(file, track, GF_ISOM_NALU_EXTRACT_INSPECT); is_adobe_protected = gf_isom_is_adobe_protection_media(file, track, 1); is_cenc_protected = gf_isom_is_cenc_media(file, track, 1); for (i=0; i<count; i++) { u64 dts, cts; Bool is_rap; u32 size, nal_size, idx, di; u8 *ptr; GF_ISOSample *samp = gf_isom_get_sample(file, track, i+1, &di); if (!samp) { fprintf(dump, "<!-- Unable to fetch sample %d -->\n", i+1); continue; } dts = samp->DTS; cts = dts + (s32) samp->CTS_Offset; is_rap = samp->IsRAP; if (!is_rap) gf_isom_get_sample_rap_roll_info(file, track, i+1, &is_rap, NULL, NULL); if (dump_flags&2) { fprintf(dump, " <Sample size=\"%d\" RAP=\"%d\"", samp->dataLength, is_rap); } else { fprintf(dump, " <Sample DTS=\""LLD"\" CTS=\""LLD"\" size=\"%d\" RAP=\"%d\"", dts, cts, samp->dataLength, is_rap); } if (nb_descs>1) fprintf(dump, " sample_description=\"%d\"", di); fprintf(dump, " >\n"); if (cts<dts) fprintf(dump, "<!-- NEGATIVE CTS OFFSET! -->\n"); idx = 1; ptr = samp->data; size = samp->dataLength; if (is_adobe_protected) { u8 encrypted_au = ptr[0]; if (encrypted_au) { fprintf(dump, " <!-- Sample number %d is an Adobe's protected sample: can not be dumped -->\n", i+1); fprintf(dump, " </Sample>\n\n"); continue; } else { ptr++; size--; } } while (size) { nal_size = read_nal_size_hdr(ptr, nalh_size); ptr += nalh_size; if (nal_size >= UINT_MAX-nalh_size || nalh_size + nal_size > size) { fprintf(dump, " <!-- NALU number %d is corrupted: size is %d but only %d remains -->\n", idx, nal_size, size); break; } else { fprintf(dump, " <NALU size=\"%d\" ", nal_size); #ifndef GPAC_DISABLE_AV_PARSERS Bool is_encrypted = 0; if (is_cenc_protected) { GF_Err e = gf_isom_get_sample_cenc_info(file, track, i + 1, &is_encrypted, NULL, NULL, NULL, NULL); if (e != GF_OK) { fprintf(dump, "dump_msg=\"Error %s while fetching encryption info for sample, assuming sample is encrypted\" ", gf_error_to_string(e) ); is_encrypted = GF_TRUE; } } gf_inspect_dump_nalu(dump, ptr, nal_size, has_svcc ? 1 : 0, hevc_state, avc_state, vvc_state, nalh_size, dump_flags, is_encrypted); #else fprintf(dump, "/>\n"); #endif } idx++; ptr+=nal_size; size -= nal_size + nalh_size; } fprintf(dump, " </Sample>\n"); gf_isom_sample_del(&samp); fprintf(dump, "\n"); gf_set_progress("Analysing Track NALUs", i+1, count); } fprintf(dump, " </NALUSamples>\n"); fprintf(dump, "</NALUTrack>\n"); gf_isom_set_nalu_extract_mode(file, track, cur_extract_mode); #ifndef GPAC_DISABLE_AV_PARSERS if (hevc_state) gf_free(hevc_state); if (vvc_state) gf_free(vvc_state); if (avc_state) gf_free(avc_state); #endif } static void dump_isom_obu(GF_ISOFile *file, GF_ISOTrackID trackID, FILE *dump, Bool dump_crc); static void dump_qt_prores(GF_ISOFile *file, GF_ISOTrackID trackID, FILE *dump, Bool dump_crc); void dump_isom_nal(GF_ISOFile *file, GF_ISOTrackID trackID, char *inName, Bool is_final_name, u32 dump_flags) { Bool is_av1 = GF_FALSE; Bool is_prores = GF_FALSE; FILE *dump; if (inName) { GF_ESD* esd; char szBuf[GF_MAX_PATH]; strcpy(szBuf, inName); u32 track = gf_isom_get_track_by_id(file, trackID); esd = gf_isom_get_esd(file, track, 1); if (!esd || !esd->decoderConfig) { switch (gf_isom_get_media_subtype(file, track, 1)) { case GF_ISOM_SUBTYPE_AV01: is_av1 = GF_TRUE; break; case GF_QT_SUBTYPE_APCH: case GF_QT_SUBTYPE_APCO: case GF_QT_SUBTYPE_APCN: case GF_QT_SUBTYPE_APCS: case GF_QT_SUBTYPE_AP4X: case GF_QT_SUBTYPE_AP4H: is_prores = GF_TRUE; break; } } else if (esd->decoderConfig->objectTypeIndication == GF_CODECID_AV1) { is_av1 = GF_TRUE; } if (esd) gf_odf_desc_del((GF_Descriptor*)esd); if (!is_final_name) sprintf(szBuf, "%s_%d_%s.xml", inName, trackID, is_av1 ? "obu" : "nalu"); dump = gf_fopen(szBuf, "wt"); if (!dump) { M4_LOG(GF_LOG_ERROR, ("Failed to open %s for dumping\n", szBuf)); return; } } else { dump = stdout; } if (is_av1) dump_isom_obu(file, trackID, dump, dump_flags); else if (is_prores) dump_qt_prores(file, trackID, dump, dump_flags); else dump_isom_nal_ex(file, trackID, dump, dump_flags); if (inName) gf_fclose(dump); } #ifndef GPAC_DISABLE_AV_PARSERS void gf_inspect_dump_obu(FILE *dump, AV1State *av1, u8 *obu, u64 obu_length, ObuType obu_type, u64 obu_size, u32 hdr_size, Bool dump_crc); #endif static void dump_isom_obu(GF_ISOFile *file, GF_ISOTrackID trackID, FILE *dump, Bool dump_crc) { #ifndef GPAC_DISABLE_AV_PARSERS u32 i, count, track, timescale; AV1State av1; ObuType obu_type; u64 obu_size; u32 hdr_size; GF_BitStream *bs; u32 idx; track = gf_isom_get_track_by_id(file, trackID); gf_av1_init_state(&av1); av1.config = gf_isom_av1_config_get(file, track, 1); if (!av1.config) { M4_LOG(GF_LOG_ERROR, ("Error: Track #%d is not AV1!\n", trackID)); return; } count = gf_isom_get_sample_count(file, track); timescale = gf_isom_get_media_timescale(file, track); fprintf(dump, "<OBUTrack trackID=\"%d\" SampleCount=\"%d\" TimeScale=\"%d\">\n", trackID, count, timescale); fprintf(dump, " <OBUConfig>\n"); for (i=0; i<gf_list_count(av1.config->obu_array); i++) { GF_AV1_OBUArrayEntry *obu = gf_list_get(av1.config->obu_array, i); bs = gf_bs_new(obu->obu, (u32) obu->obu_length, GF_BITSTREAM_READ); gf_av1_parse_obu(bs, &obu_type, &obu_size, &hdr_size, &av1); gf_inspect_dump_obu(dump, &av1, obu->obu, obu->obu_length, obu_type, obu_size, hdr_size, dump_crc); gf_bs_del(bs); } fprintf(dump, " </OBUConfig>\n"); fprintf(dump, " <OBUSamples>\n"); for (i=0; i<count; i++) { u64 dts, cts; u32 size; u8 *ptr; GF_ISOSample *samp = gf_isom_get_sample(file, track, i+1, NULL); if (!samp) { fprintf(dump, "<!-- Unable to fetch sample %d -->\n", i+1); continue; } dts = samp->DTS; cts = dts + (s32) samp->CTS_Offset; fprintf(dump, " <Sample number=\"%d\" DTS=\""LLD"\" CTS=\""LLD"\" size=\"%d\" RAP=\"%d\" >\n", i+1, dts, cts, samp->dataLength, samp->IsRAP); if (cts<dts) fprintf(dump, "<!-- NEGATIVE CTS OFFSET! -->\n"); idx = 1; ptr = samp->data; size = samp->dataLength; bs = gf_bs_new(ptr, size, GF_BITSTREAM_READ); while (size) { gf_av1_parse_obu(bs, &obu_type, &obu_size, &hdr_size, &av1); if (obu_size > size) { fprintf(dump, " <!-- OBU number %d is corrupted: size is %d but only %d remains -->\n", idx, (u32) obu_size, size); break; } gf_inspect_dump_obu(dump, &av1, ptr, obu_size, obu_type, obu_size, hdr_size, dump_crc); ptr += obu_size; size -= (u32)obu_size; idx++; } gf_bs_del(bs); fprintf(dump, " </Sample>\n"); gf_isom_sample_del(&samp); fprintf(dump, "\n"); gf_set_progress("Analysing Track OBUs", i+1, count); } fprintf(dump, " </OBUSamples>\n"); fprintf(dump, "</OBUTrack>\n"); if (av1.config) gf_odf_av1_cfg_del(av1.config); gf_av1_reset_state(&av1, GF_TRUE); #endif } static void dump_qt_prores(GF_ISOFile *file, u32 trackID, FILE *dump, Bool dump_crc) { #ifndef GPAC_DISABLE_AV_PARSERS u32 i, count, track, timescale; track = gf_isom_get_track_by_id(file, trackID); count = gf_isom_get_sample_count(file, track); timescale = gf_isom_get_media_timescale(file, track); fprintf(dump, "<ProResTrack trackID=\"%d\" SampleCount=\"%d\" TimeScale=\"%d\">\n", trackID, count, timescale); for (i=0; i<count; i++) { void gf_inspect_dump_prores(FILE *dump, u8 *ptr, u64 frame_size, Bool dump_crc); u64 dts, cts; GF_ISOSample *samp = gf_isom_get_sample(file, track, i+1, NULL); if (!samp) { fprintf(dump, "<!-- Unable to fetch sample %d -->\n", i+1); continue; } dts = samp->DTS; cts = dts + (s32) samp->CTS_Offset; if (cts!=dts) fprintf(dump, "<!-- Wrong timing info (CTS "LLD" vs DTS "LLD") ! -->\n", cts, dts); if (!samp->IsRAP) fprintf(dump, "<!-- Wrong sync sample info, sample is not SAP1 ! -->\n"); fprintf(dump, " <Sample number=\"%d\" CTS=\""LLD"\" size=\"%d\">\n", i+1, cts, samp->dataLength); gf_inspect_dump_prores(dump, samp->data, samp->dataLength, dump_crc); fprintf(dump, " </Sample>\n"); gf_isom_sample_del(&samp); fprintf(dump, "\n"); gf_set_progress("Analysing ProRes Track", i+1, count); } fprintf(dump, "</ProResTrack>\n"); #endif } void dump_isom_saps(GF_ISOFile *file, GF_ISOTrackID trackID, u32 dump_saps_mode, char *inName, Bool is_final_name) { FILE *dump; u32 i, count; s64 media_offset=0; u32 track = gf_isom_get_track_by_id(file, trackID); if (inName) { char szBuf[GF_MAX_PATH]; strcpy(szBuf, inName); if (!is_final_name) sprintf(szBuf, "%s_%d_cues.xml", inName, trackID); dump = gf_fopen(szBuf, "wt"); if (!dump) { M4_LOG(GF_LOG_ERROR, ("Failed to open %s for dumping\n", szBuf)); return; } } else { dump = stdout; } fprintf(dump, "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n"); fprintf(dump, "<DASHCues xmlns=\"urn:gpac:dash:schema:cues:2018\">\n"); fprintf(dump, "<Stream id=\"%d\" timescale=\"%d\"", trackID, gf_isom_get_media_timescale(file, track) ); if (dump_saps_mode==4) { fprintf(dump, " mode=\"edit\""); gf_isom_get_edit_list_type(file, track, &media_offset); } fprintf(dump, ">\n"); count = gf_isom_get_sample_count(file, track); for (i=0; i<count; i++) { s64 cts, dts; u32 di; Bool traf_start = 0; u32 sap_type = 0; u64 doffset; GF_ISOSample *samp = gf_isom_get_sample_info(file, track, i+1, &di, &doffset); traf_start = gf_isom_sample_is_fragment_start(file, track, i+1, NULL); sap_type = samp->IsRAP; if (!sap_type) { Bool is_rap; GF_ISOSampleRollType roll_type; s32 roll_dist; gf_isom_get_sample_rap_roll_info(file, track, i+1, &is_rap, &roll_type, &roll_dist); if (roll_type) sap_type = SAP_TYPE_4; else if (is_rap) sap_type = SAP_TYPE_3; } if (!sap_type) { gf_isom_sample_del(&samp); continue; } dts = cts = samp->DTS; cts += samp->CTS_Offset; fprintf(dump, "<Cue sap=\"%d\"", sap_type); if (dump_saps_mode==4) { cts += media_offset; fprintf(dump, " cts=\""LLD"\"", cts); } else { if (!dump_saps_mode || (dump_saps_mode==1)) fprintf(dump, " sample=\"%d\"", i+1); if (!dump_saps_mode || (dump_saps_mode==2)) fprintf(dump, " cts=\""LLD"\"", cts); if (!dump_saps_mode || (dump_saps_mode==3)) fprintf(dump, " dts=\""LLD"\"", dts); } if (traf_start) fprintf(dump, " wasFragStart=\"yes\""); fprintf(dump, "/>\n"); gf_isom_sample_del(&samp); } fprintf(dump, "</Stream>\n"); fprintf(dump, "</DASHCues>\n"); if (inName) gf_fclose(dump); } #ifndef GPAC_DISABLE_ISOM_DUMP void dump_isom_ismacryp(GF_ISOFile *file, char *inName, Bool is_final_name) { u32 i, j; FILE *dump; if (inName) { char szBuf[1024]; strcpy(szBuf, inName); if (!is_final_name) strcat(szBuf, "_ismacryp.xml"); dump = gf_fopen(szBuf, "wt"); if (!dump) { M4_LOG(GF_LOG_ERROR, ("Failed to open %s for dumping\n", szBuf)); return; } } else { dump = stdout; } fprintf(dump, "<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n"); fprintf(dump, "<!-- MP4Box ISMACryp trace -->\n"); fprintf(dump, "<ISMACrypFile>\n"); for (i=0; i<gf_isom_get_track_count(file); i++) { if (gf_isom_get_media_subtype(file, i+1, 1) != GF_ISOM_SUBTYPE_MPEG4_CRYP) continue; gf_isom_dump_ismacryp_protection(file, i+1, dump); fprintf(dump, "<ISMACrypTrack trackID=\"%d\">\n", gf_isom_get_track_id(file, i+1)); for (j=0; j<gf_isom_get_sample_count(file, i+1); j++) { gf_isom_dump_ismacryp_sample(file, i+1, j+1, dump); } fprintf(dump, "</ISMACrypTrack >\n"); } fprintf(dump, "</ISMACrypFile>\n"); if (inName) gf_fclose(dump); } void dump_isom_timed_text(GF_ISOFile *file, GF_ISOTrackID trackID, char *inName, Bool is_final_name, Bool is_convert, GF_TextDumpType dump_type) { FILE *dump; GF_Err e; u32 track; track = gf_isom_get_track_by_id(file, trackID); if (!track) { M4_LOG(GF_LOG_ERROR, ("Cannot find track ID %d\n", trackID)); return; } switch (gf_isom_get_media_type(file, track)) { case GF_ISOM_MEDIA_TEXT: case GF_ISOM_MEDIA_SUBT: break; default: M4_LOG(GF_LOG_ERROR, ("Track ID %d is not a 3GPP text track\n", trackID)); return; } if (inName) { char szBuf[1024]; char *ext; ext = ((dump_type==GF_TEXTDUMPTYPE_SVG) ? "svg" : ((dump_type==GF_TEXTDUMPTYPE_SRT) ? "srt" : "ttxt")); if (is_final_name) { strcpy(szBuf, inName) ; } else if (is_convert) sprintf(szBuf, "%s.%s", inName, ext) ; else sprintf(szBuf, "%s_%d_text.%s", inName, trackID, ext); dump = gf_fopen(szBuf, "wt"); if (!dump) { M4_LOG(GF_LOG_ERROR, ("Failed to open %s for dumping\n", szBuf)); return; } } else { dump = stdout; } e = gf_isom_text_dump(file, track, dump, dump_type); if (inName) gf_fclose(dump); if (e) { M4_LOG(GF_LOG_ERROR, ("Conversion failed (%s)\n", gf_error_to_string(e))); } else { fprintf(stderr, "Conversion done\n"); } } #endif /*GPAC_DISABLE_ISOM_DUMP*/ #ifndef GPAC_DISABLE_ISOM_HINTING void dump_isom_sdp(GF_ISOFile *file, char *inName, Bool is_final_name) { const char *sdp; u32 size, i; FILE *dump; if (inName) { char szBuf[1024]; strcpy(szBuf, inName); if (!is_final_name) { char *ext = strchr(szBuf, '.'); if (ext) ext[0] = 0; strcat(szBuf, "_sdp.txt"); } dump = gf_fopen(szBuf, "wt"); if (!dump) { M4_LOG(GF_LOG_ERROR, ("Failed to open %s for dumping\n", szBuf)); return; } } else { dump = stdout; fprintf(dump, "# File SDP content \n\n"); } //get the movie SDP gf_isom_sdp_get(file, &sdp, &size); if (sdp && size) fprintf(dump, "%s", sdp); fprintf(dump, "\r\n"); //then tracks for (i=0; i<gf_isom_get_track_count(file); i++) { if (gf_isom_get_media_type(file, i+1) != GF_ISOM_MEDIA_HINT) continue; gf_isom_sdp_track_get(file, i+1, &sdp, &size); fprintf(dump, "%s", sdp); } fprintf(dump, "\n\n"); if (inName) gf_fclose(dump); } #endif #ifndef GPAC_DISABLE_ISOM_DUMP GF_Err dump_isom_xml(GF_ISOFile *file, char *inName, Bool is_final_name, Bool do_track_dump, Bool merge_vtt_cues, Bool skip_init, Bool skip_samples) { GF_Err e; FILE *dump = stdout; Bool do_close=GF_FALSE; if (!file) return GF_ISOM_INVALID_FILE; if (inName) { char szBuf[1024]; strcpy(szBuf, inName); if (!is_final_name) { strcat(szBuf, do_track_dump ? "_dump.xml" : "_info.xml"); } dump = gf_fopen(szBuf, "wt"); if (!dump) { M4_LOG(GF_LOG_ERROR, ("Failed to open %s\n", szBuf)); return GF_IO_ERR; } do_close=GF_TRUE; } fprintf(dump, "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n"); if (do_track_dump) { fprintf(dump, "<ISOBaseMediaFileTrace>\n"); } e = gf_isom_dump(file, dump, skip_init, skip_samples); if (e) { M4_LOG(GF_LOG_ERROR, ("Error dumping ISO structure\n")); } if (do_track_dump) { #ifndef GPAC_DISABLE_MEDIA_EXPORT u32 i; //because of dump mode we need to reopen in regular read mode to avoid mem leaks GF_ISOFile *the_file = gf_isom_open(gf_isom_get_filename(file), GF_ISOM_OPEN_READ, NULL); u32 tcount = gf_isom_get_track_count(the_file); fprintf(dump, "<Tracks>\n"); for (i=0; i<tcount; i++) { GF_MediaExporter dumper; GF_ISOTrackID trackID = gf_isom_get_track_id(the_file, i+1); u32 mtype = gf_isom_get_media_type(the_file, i+1); u32 msubtype = gf_isom_get_media_subtype(the_file, i+1, 1); Bool fmt_handled = GF_FALSE; memset(&dumper, 0, sizeof(GF_MediaExporter)); dumper.file = the_file; dumper.trackID = trackID; dumper.dump_file = dump; if (mtype == GF_ISOM_MEDIA_HINT) { #ifndef GPAC_DISABLE_ISOM_HINTING char *name=NULL; if (msubtype==GF_ISOM_SUBTYPE_RTP) name = "RTPHintTrack"; else if (msubtype==GF_ISOM_SUBTYPE_SRTP) name = "SRTPHintTrack"; else if (msubtype==GF_ISOM_SUBTYPE_RRTP) name = "RTPReceptionHintTrack"; else if (msubtype==GF_ISOM_SUBTYPE_RTCP) name = "RTCPReceptionHintTrack"; else if (msubtype==GF_ISOM_SUBTYPE_FLUTE) name = "FLUTEReceptionHintTrack"; else name = "UnknownHintTrack"; fprintf(dump, "<%s trackID=\"%d\">\n", name, trackID); #ifndef GPAC_DISABLE_ISOM_HINTING u32 j, scount=gf_isom_get_sample_count(the_file, i+1); for (j=0; j<scount; j++) { gf_isom_dump_hint_sample(the_file, i+1, j+1, dump); } #endif fprintf(dump, "</%s>\n", name); fmt_handled = GF_TRUE; #endif /*GPAC_DISABLE_ISOM_HINTING*/ } else if (gf_isom_get_avc_svc_type(the_file, i+1, 1) || gf_isom_get_hevc_lhvc_type(the_file, i+1, 1)) { dump_isom_nal_ex(the_file, trackID, dump, GF_FALSE); fmt_handled = GF_TRUE; } else if ((mtype==GF_ISOM_MEDIA_TEXT) || (mtype==GF_ISOM_MEDIA_SUBT) ) { if (msubtype==GF_ISOM_SUBTYPE_WVTT) { gf_webvtt_dump_iso_track(&dumper, i+1, merge_vtt_cues, GF_TRUE); fmt_handled = GF_TRUE; } else if ((msubtype==GF_ISOM_SUBTYPE_TX3G) || (msubtype==GF_ISOM_SUBTYPE_TEXT)) { gf_isom_text_dump(the_file, i+1, dump, GF_TEXTDUMPTYPE_TTXT_BOXES); fmt_handled = GF_TRUE; } } if (!fmt_handled) { dumper.flags = GF_EXPORT_NHML | GF_EXPORT_NHML_FULL; dumper.print_stats_graph = fs_dump_flags; gf_media_export(&dumper); } } #else return GF_NOT_SUPPORTED; #endif /*GPAC_DISABLE_MEDIA_EXPORT*/ gf_isom_delete(the_file); fprintf(dump, "</Tracks>\n"); fprintf(dump, "</ISOBaseMediaFileTrace>\n"); } if (do_close) gf_fclose(dump); return e; } #endif static char *format_duration(u64 dur, u32 timescale, char *szDur) { u32 h, m, s, ms; if ((dur==(u64) -1) || (dur==(u32) -1)) { strcpy(szDur, "Unknown"); return szDur; } dur = (u64) (( ((Double) (s64) dur)/timescale)*1000); h = (u32) (dur / 3600000); m = (u32) (dur/ 60000) - h*60; s = (u32) (dur/1000) - h*3600 - m*60; ms = (u32) (dur) - h*3600000 - m*60000 - s*1000; if (h<=24) { sprintf(szDur, "%02d:%02d:%02d.%03d", h, m, s, ms); } else { u32 d = (u32) (dur / 3600000 / 24); h = (u32) (dur/3600000)-24*d; if (d<=365) { sprintf(szDur, "%d Days, %02d:%02d:%02d.%03d", d, h, m, s, ms); } else { u32 y=0; while (d>365) { y++; d-=365; if (y%4) d--; } sprintf(szDur, "%d Years %d Days, %02d:%02d:%02d.%03d", y, d, h, m, s, ms); } } return szDur; } static char *format_date(u64 time, char *szTime) { time_t now; if (!time) { strcpy(szTime, "UNKNOWN DATE"); } else { time -= 2082844800; now = (u32) time; sprintf(szTime, "GMT %s", asctime(gf_gmtime(&now)) ); } return szTime; } void print_udta(GF_ISOFile *file, u32 track_number, Bool has_itags) { u32 i, count; count = gf_isom_get_udta_count(file, track_number); if (!count) return; if (has_itags) { for (i=0; i<count; i++) { u32 type; bin128 uuid; gf_isom_get_udta_type(file, track_number, i+1, &type, &uuid); if (type == GF_ISOM_BOX_TYPE_META) { count--; break; } } if (!count) return; } fprintf(stderr, "%d UDTA types: ", count); for (i=0; i<count; i++) { u32 j, type, nb_items, first=GF_TRUE; bin128 uuid; gf_isom_get_udta_type(file, track_number, i+1, &type, &uuid); nb_items = gf_isom_get_user_data_count(file, track_number, type, uuid); fprintf(stderr, "%s (%d) ", gf_4cc_to_str(type), nb_items); for (j=0; j<nb_items; j++) { u8 *udta=NULL; u32 udta_size; gf_isom_get_user_data(file, track_number, type, uuid, j+1, &udta, &udta_size); if (!udta) continue; if (udta_size && gf_utf8_is_legal(udta, udta_size)) { u32 idx; if (first) { fprintf(stderr, "\n"); first = GF_FALSE; } fprintf(stderr, "\t"); for (idx=0; idx<udta_size; idx++) { if (!udta[idx]) break; fprintf(stderr, "%c", udta[idx]); } fprintf(stderr, "\n"); } gf_free(udta); } } fprintf(stderr, "\n"); } GF_Err dump_isom_udta(GF_ISOFile *file, char *inName, Bool is_final_name, u32 dump_udta_type, u32 dump_udta_track) { u8 *data; FILE *t; bin128 uuid; u32 count, res; GF_Err e; memset(uuid, 0, 16); count = gf_isom_get_user_data_count(file, dump_udta_track, dump_udta_type, uuid); if (!count) { M4_LOG(GF_LOG_ERROR, ("No UDTA for type %s found\n", gf_4cc_to_str(dump_udta_type) )); return GF_NOT_FOUND; } data = NULL; count = 0; e = gf_isom_get_user_data(file, dump_udta_track, dump_udta_type, uuid, 0, &data, &count); if (e) { M4_LOG(GF_LOG_ERROR, ("Error dumping UDTA %s: %s\n", gf_4cc_to_str(dump_udta_type), gf_error_to_string(e) )); return e; } if (inName) { char szName[1024]; if (is_final_name) strcpy(szName, inName); else sprintf(szName, "%s_%s.udta", inName, gf_4cc_to_str(dump_udta_type) ); t = gf_fopen(szName, "wb"); if (!t) { gf_free(data); M4_LOG(GF_LOG_ERROR, ("Cannot open file %s\n", szName )); return GF_IO_ERR; } } else { t = stdout; } res = (u32) gf_fwrite(data+8, count-8, t); if (inName) gf_fclose(t); gf_free(data); if (count-8 != res) { M4_LOG(GF_LOG_ERROR, ("Error writing udta to file\n")); return GF_IO_ERR; } return GF_OK; } GF_Err dump_isom_chapters(GF_ISOFile *file, char *inName, Bool is_final_name, u32 dump_mode) { FILE *t; u32 i, count; u32 chap_tk = 0; count = gf_isom_get_chapter_count(file, 0); if (dump_mode==2) dump_mode = GF_TEXTDUMPTYPE_OGG_CHAP; else if (dump_mode==3) dump_mode = GF_TEXTDUMPTYPE_ZOOM_CHAP; else dump_mode = GF_TEXTDUMPTYPE_TTXT_CHAP; if (!count) { for (i=0; i<gf_isom_get_track_count(file); i++) { if (gf_isom_get_reference_count(file, i+1, GF_ISOM_REF_CHAP)) { GF_Err e = gf_isom_get_reference(file, i+1, GF_ISOM_REF_CHAP, 1, &chap_tk); if (!e) break; } } if (!chap_tk) { M4_LOG(GF_LOG_WARNING, ("No chapters or chapters track found in file\n")); return GF_OK; } fprintf(stderr, "Dumping chapter track %d\n", chap_tk); dump_isom_timed_text(file, gf_isom_get_track_id(file, chap_tk), inName, is_final_name, GF_FALSE, dump_mode); return GF_OK; } if (inName) { char szName[1024]; strcpy(szName, inName); if (!is_final_name) { if (dump_mode==GF_TEXTDUMPTYPE_OGG_CHAP) { strcat(szName, ".txt"); } else if (dump_mode==GF_TEXTDUMPTYPE_ZOOM_CHAP) { strcat(szName, ".txt"); } else { strcat(szName, ".ttxt"); } } t = gf_fopen(szName, "wt"); if (!t) return GF_IO_ERR; } else { t = stdout; } if (dump_mode==GF_TEXTDUMPTYPE_TTXT_CHAP) { fprintf(t, "<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n"); fprintf(t, "<TextStream version=\"1.1\">\n"); fprintf(t, "<TextStreamHeader width=\"0\" height=\"0\" layer=\"0\" translation_x=\"0\" translation_y=\"0\">\n"); fprintf(t, "<TextSampleDescription horizontalJustification=\"left\" backColor=\"0 0 0\" scroll=\"None\"/>\n"); fprintf(t, "</TextStreamHeader>\n"); } for (i=0; i<count; i++) { char szDur[20]; u64 chapter_time; const char *name; gf_isom_get_chapter(file, 0, i+1, &chapter_time, &name); if (dump_mode==GF_TEXTDUMPTYPE_OGG_CHAP) { fprintf(t, "CHAPTER%02d=%s\n", i+1, format_duration(chapter_time, 1000, szDur)); fprintf(t, "CHAPTER%02dNAME=%s\n", i+1, name); } else if (dump_mode==GF_TEXTDUMPTYPE_ZOOM_CHAP) { chapter_time /= 1000; fprintf(t, "AddChapterBySecond("LLD",%s)\n", chapter_time, name); } else { fprintf(t, "<TextSample sampleTime=\"%s\" sampleDescriptionIndex=\"1\" xml:space=\"preserve\">%s</TextSample>\n" , format_duration(chapter_time, 1000, szDur), name); } } if (dump_mode==GF_TEXTDUMPTYPE_TTXT_CHAP) { fprintf(t, "</TextStream>\n"); } if (inName) gf_fclose(t); return GF_OK; } static void dump_key_info(const u8 *key_info, u32 key_info_size, Bool is_protected) { if (!key_info) return; u32 j, k, kpos=3; u32 nb_keys = 1; if (key_info[0]) { nb_keys = key_info[1]; nb_keys <<= 8; nb_keys |= key_info[2]; } for (k=0; k<nb_keys; k++) { u8 constant_iv_size=0; u8 iv_size=key_info[kpos+1]; fprintf(stderr, "\t\tKID"); if (nb_keys>1) fprintf(stderr, "%d", k+1); fprintf(stderr, " "); for (j=0; j<16; j++) fprintf(stderr, "%02X", key_info[kpos+1+j]); kpos+=17; if (!iv_size && is_protected) { constant_iv_size = key_info[1]; kpos += 1 + constant_iv_size; } fprintf(stderr, " - %sIV size %d \n", constant_iv_size ? "const " : "", constant_iv_size ? constant_iv_size : iv_size); } } static void DumpMetaItem(GF_ISOFile *file, Bool root_meta, u32 tk_num, char *name) { char szInd[2]; u32 i, count, primary_id; u32 meta_type = gf_isom_get_meta_type(file, root_meta, tk_num); if (name[0]=='\t') { szInd[0] = '\t'; szInd[1] = 0; } else { szInd[0] = 0; } count = gf_isom_get_meta_item_count(file, root_meta, tk_num); primary_id = gf_isom_get_meta_primary_item_id(file, root_meta, tk_num); fprintf(stderr, "%s type: \"%s\" - %d resource item(s)\n", name, meta_type ? gf_4cc_to_str(meta_type) : "undefined", (count+(primary_id>0))); switch (gf_isom_has_meta_xml(file, root_meta, tk_num)) { case 1: fprintf(stderr, "%sMeta has XML resource\n", szInd); break; case 2: fprintf(stderr, "%sMeta has BinaryXML resource\n", szInd); break; } if (primary_id) { fprintf(stderr, "%sPrimary Item - ID %d\n", szInd, primary_id); } for (i=0; i<count; i++) { const char *it_name, *mime, *enc, *url, *urn; Bool self_ref; u32 ID; u32 it_type, cenc_scheme, cenc_version; GF_Err e = gf_isom_get_meta_item_info(file, root_meta, tk_num, i+1, &ID, &it_type, &cenc_scheme, &cenc_version, &self_ref, &it_name, &mime, &enc, &url, &urn); if (e) { fprintf(stderr, "%sItem #%d fetch info error: %s\n", szInd, i+1, gf_error_to_string(e) ); continue; } fprintf(stderr, "%sItem #%d: ID %d type %s", szInd, i+1, ID, gf_4cc_to_str(it_type)); if (self_ref) fprintf(stderr, " Self-Reference"); else if (it_name && it_name[0]) fprintf(stderr, " Name \"%s\"", it_name); if (mime) fprintf(stderr, " MIME: \"%s\"", mime); if (enc) fprintf(stderr, " ContentEncoding: \"%s\"", enc); if (meta_type == GF_META_ITEM_TYPE_PICT) { GF_ImageItemProperties img_props; e = gf_isom_get_meta_image_props(file, root_meta, tk_num, ID, &img_props); if (e) { fprintf(stderr, " invalid image properties !"); } else { u32 j; Bool chan_diff = 0; if (img_props.width && img_props.height) { fprintf(stderr, " size %ux%u", img_props.width, img_props.height); } if (img_props.hSpacing && img_props.vSpacing) { fprintf(stderr, " SAR %u/%u", img_props.hSpacing, img_props.vSpacing); } if (img_props.num_channels) { fprintf(stderr, " %d channel%s (", img_props.num_channels, (img_props.num_channels>1) ? "s" : ""); for (j=1; j<img_props.num_channels; j++) { if (img_props.bits_per_channel[0] != img_props.bits_per_channel[j]) chan_diff = 1; } if (chan_diff) { for (j=0; j<img_props.num_channels; j++) { if (j) fprintf(stderr, ","); fprintf(stderr, "%d", img_props.bits_per_channel[j]); } } else { fprintf(stderr, "%d", img_props.bits_per_channel[0]); } fprintf(stderr, " bpc)"); } if (img_props.hOffset || img_props.vOffset) fprintf(stderr, " Offset %ux%u", img_props.hOffset, img_props.vOffset); if (img_props.alpha) fprintf(stderr, " Alpha"); if (img_props.hidden) fprintf(stderr, " Hidden"); if (img_props.angle) fprintf(stderr, " Rotate %d", img_props.angle); if (img_props.mirror) fprintf(stderr, " Mirror %d", img_props.mirror); if (img_props.clap_hden || img_props.clap_wden) fprintf(stderr, " Clap %d/%d,%d/%d,%d/%d,%d/%d", img_props.clap_wnum, img_props.clap_wden, img_props.clap_hnum, img_props.clap_hden, img_props.clap_honum, img_props.clap_hoden, img_props.clap_vonum, img_props.clap_voden); } } if (cenc_scheme) { Bool is_protected; u8 skip_byte_block, crypt_byte_block; const u8 *key_info; u32 key_info_size; fprintf(stderr, " - Protection scheme: %s v0x%08X", gf_4cc_to_str(cenc_scheme), cenc_version); gf_isom_extract_meta_item_get_cenc_info(file, root_meta, tk_num, ID, &is_protected, &skip_byte_block, &crypt_byte_block, &key_info, &key_info_size, NULL, NULL, NULL, NULL); if (skip_byte_block && crypt_byte_block) fprintf(stderr, " - Pattern %d:%d", skip_byte_block, crypt_byte_block); fprintf(stderr, "\n"); dump_key_info(key_info, key_info_size, is_protected); } fprintf(stderr, "\n"); if (url) fprintf(stderr, "%sURL: %s\n", szInd, url); if (urn) fprintf(stderr, "%sURN: %s\n", szInd, urn); } } static void print_config_hash(GF_List *xps_array, char *szName) { u32 i, j; u8 hash[20]; for (i=0; i<gf_list_count(xps_array); i++) { GF_NALUFFParam *slc = gf_list_get(xps_array, i); gf_sha1_csum((u8 *) slc->data, slc->size, hash); fprintf(stderr, "\t%s#%d hash: ", szName, i+1); for (j=0; j<20; j++) fprintf(stderr, "%02X", hash[j]); fprintf(stderr, "\n"); } } void dump_hevc_track_info(GF_ISOFile *file, u32 trackNum, GF_HEVCConfig *hevccfg #if !defined(GPAC_DISABLE_AV_PARSERS) && !defined(GPAC_DISABLE_HEVC) , HEVCState *hevc_state #endif /*GPAC_DISABLE_AV_PARSERS && defined(GPAC_DISABLE_HEVC)*/ ) { #if !defined(GPAC_DISABLE_AV_PARSERS) && !defined(GPAC_DISABLE_HEVC) u32 idx; #endif u32 k; Bool non_hevc_base_layer=GF_FALSE; fprintf(stderr, "\t%s Info:", hevccfg->is_lhvc ? "LHVC" : "HEVC"); if (!hevccfg->is_lhvc) fprintf(stderr, " Profile %s @ Level %g - Chroma Format %s\n", gf_hevc_get_profile_name(hevccfg->profile_idc), ((Double)hevccfg->level_idc) / 30.0, gf_avc_hevc_get_chroma_format_name(hevccfg->chromaFormat)); fprintf(stderr, "\n"); fprintf(stderr, "\tNAL Unit length bits: %d", 8*hevccfg->nal_unit_size); if (!hevccfg->is_lhvc) fprintf(stderr, " - general profile compatibility 0x%08X\n", hevccfg->general_profile_compatibility_flags); fprintf(stderr, "\n"); fprintf(stderr, "\tParameter Sets: "); for (k=0; k<gf_list_count(hevccfg->param_array); k++) { GF_NALUFFParamArray *ar=gf_list_get(hevccfg->param_array, k); if (ar->type==GF_HEVC_NALU_SEQ_PARAM) { fprintf(stderr, "%d SPS ", gf_list_count(ar->nalus)); } if (ar->type==GF_HEVC_NALU_PIC_PARAM) { fprintf(stderr, "%d PPS ", gf_list_count(ar->nalus)); } if (ar->type==GF_HEVC_NALU_VID_PARAM) { fprintf(stderr, "%d VPS ", gf_list_count(ar->nalus)); #if !defined(GPAC_DISABLE_AV_PARSERS) && !defined(GPAC_DISABLE_HEVC) for (idx=0; idx<gf_list_count(ar->nalus); idx++) { GF_NALUFFParam *vps = gf_list_get(ar->nalus, idx); s32 ps_idx=gf_hevc_read_vps(vps->data, vps->size, hevc_state); if (hevccfg->is_lhvc && (ps_idx>=0)) { non_hevc_base_layer = ! hevc_state->vps[ps_idx].base_layer_internal_flag; } } #endif } } fprintf(stderr, "\n"); #if !defined(GPAC_DISABLE_AV_PARSERS) && !defined(GPAC_DISABLE_HEVC) for (k=0; k<gf_list_count(hevccfg->param_array); k++) { GF_NALUFFParamArray *ar=gf_list_get(hevccfg->param_array, k); u32 width, height; s32 par_n, par_d; if (ar->type !=GF_HEVC_NALU_SEQ_PARAM) continue; for (idx=0; idx<gf_list_count(ar->nalus); idx++) { GF_Err e; GF_NALUFFParam *sps = gf_list_get(ar->nalus, idx); par_n = par_d = -1; e = gf_hevc_get_sps_info_with_state(hevc_state, sps->data, sps->size, NULL, &width, &height, &par_n, &par_d); if (e==GF_OK) { fprintf(stderr, "\tSPS resolution %dx%d", width, height); if ((par_n>0) && (par_d>0)) { u32 tw, th; gf_isom_get_track_layout_info(file, trackNum, &tw, &th, NULL, NULL, NULL); fprintf(stderr, " - Pixel Aspect Ratio %d:%d - Indicated track size %d x %d", par_n, par_d, tw, th); } fprintf(stderr, "\n"); } else { M4_LOG(GF_LOG_ERROR, ("Failed to read SPS: %s\n\n", gf_error_to_string(e) )); } } } #endif if (!hevccfg->is_lhvc) fprintf(stderr, "\tBit Depth luma %d - Chroma %d - %d temporal layers\n", hevccfg->luma_bit_depth, hevccfg->chroma_bit_depth, hevccfg->numTemporalLayers); else fprintf(stderr, "\t%d temporal layers\n", hevccfg->numTemporalLayers); if (hevccfg->is_lhvc) { fprintf(stderr, "\t%sHEVC base layer - Complete representation %d\n", non_hevc_base_layer ? "Non-" : "", hevccfg->complete_representation); } for (k=0; k<gf_list_count(hevccfg->param_array); k++) { GF_NALUFFParamArray *ar=gf_list_get(hevccfg->param_array, k); if (ar->type==GF_HEVC_NALU_SEQ_PARAM) print_config_hash(ar->nalus, "SPS"); else if (ar->type==GF_HEVC_NALU_PIC_PARAM) print_config_hash(ar->nalus, "PPS"); else if (ar->type==GF_HEVC_NALU_VID_PARAM) print_config_hash(ar->nalus, "VPS"); } } void dump_vvc_track_info(GF_ISOFile *file, u32 trackNum, GF_VVCConfig *vvccfg #if !defined(GPAC_DISABLE_AV_PARSERS) , VVCState *vvc_state #endif /*GPAC_DISABLE_AV_PARSERS && defined(GPAC_DISABLE_HEVC)*/ ) { #if !defined(GPAC_DISABLE_AV_PARSERS) u32 idx; #endif u32 k; fprintf(stderr, "\tVVC Info:"); fprintf(stderr, " Profile %d @ Level %d - Chroma Format %s\n", vvccfg->general_profile_idc, vvccfg->general_level_idc, vvccfg->chromaformat_plus_one ? gf_avc_hevc_get_chroma_format_name(vvccfg->chromaformat_plus_one-1) : "n/a"); fprintf(stderr, "\n"); fprintf(stderr, "\tNAL Unit length bits: %d", 8*vvccfg->nal_unit_size); if (vvccfg->general_constraint_info && vvccfg->num_constraint_info && vvccfg->general_constraint_info[0]) { fprintf(stderr, " - general constraint info 0x"); for (idx=0; idx<vvccfg->num_constraint_info; idx++) { fprintf(stderr, "%02X", vvccfg->general_constraint_info[idx]); } } fprintf(stderr, "\n"); fprintf(stderr, "\tParameter Sets: "); for (k=0; k<gf_list_count(vvccfg->param_array); k++) { GF_NALUFFParamArray *ar=gf_list_get(vvccfg->param_array, k); if (ar->type==GF_VVC_NALU_SEQ_PARAM) { fprintf(stderr, "%d SPS ", gf_list_count(ar->nalus)); } if (ar->type==GF_VVC_NALU_PIC_PARAM) { fprintf(stderr, "%d PPS ", gf_list_count(ar->nalus)); } if (ar->type==GF_VVC_NALU_VID_PARAM) { fprintf(stderr, "%d VPS ", gf_list_count(ar->nalus)); #if !defined(GPAC_DISABLE_AV_PARSERS) && 0 //TODO for (idx=0; idx<gf_list_count(ar->nalus); idx++) { GF_NALUFFParam *vps = gf_list_get(ar->nalus, idx); s32 ps_idx=gf_hevc_read_vps(vps->data, vps->size, hevc_state); if (hevccfg->is_lhvc && (ps_idx>=0)) { non_hevc_base_layer = ! hevc_state->vps[ps_idx].base_layer_internal_flag; } } #endif } } fprintf(stderr, "\n"); #if !defined(GPAC_DISABLE_AV_PARSERS) && 0 //TODO for (k=0; k<gf_list_count(vvccfg->param_array); k++) { GF_NALUFFParamArray *ar=gf_list_get(vvccfg->param_array, k); u32 width, height; s32 par_n, par_d; if (ar->type !=GF_VVC_NALU_SEQ_PARAM) continue; for (idx=0; idx<gf_list_count(ar->nalus); idx++) { GF_Err e; GF_NALUFFParam *sps = gf_list_get(ar->nalus, idx); par_n = par_d = -1; e = gf_vvc_get_sps_info_with_state(vvc_state, sps->data, sps->size, NULL, &width, &height, &par_n, &par_d); if (e==GF_OK) { fprintf(stderr, "\tSPS resolution %dx%d", width, height); if ((par_n>0) && (par_d>0)) { u32 tw, th; gf_isom_get_track_layout_info(file, trackNum, &tw, &th, NULL, NULL, NULL); fprintf(stderr, " - Pixel Aspect Ratio %d:%d - Indicated track size %d x %d", par_n, par_d, tw, th); } fprintf(stderr, "\n"); } else { M4_LOG(GF_LOG_ERROR, ("\nFailed to read SPS: %s\n\n", gf_error_to_string(e) )); } } } #endif fprintf(stderr, "\tBit Depth %d - %d temporal layers\n", vvccfg->bit_depth_plus_one-1, vvccfg->numTemporalLayers); for (k=0; k<gf_list_count(vvccfg->param_array); k++) { GF_NALUFFParamArray *ar=gf_list_get(vvccfg->param_array, k); if (ar->type==GF_VVC_NALU_SEQ_PARAM) print_config_hash(ar->nalus, "SPS"); else if (ar->type==GF_VVC_NALU_PIC_PARAM) print_config_hash(ar->nalus, "PPS"); else if (ar->type==GF_VVC_NALU_VID_PARAM) print_config_hash(ar->nalus, "VPS"); } } void gf_inspect_format_timecode(const u8 *data, u32 size, u32 tmcd_flags, u32 tc_num, u32 tc_den, u32 tmcd_fpt, char szFmt[100]); void DumpTrackInfo(GF_ISOFile *file, GF_ISOTrackID trackID, Bool full_dump, Bool is_track_num, Bool dump_m4sys) { char szCodec[RFC6381_CODEC_NAME_SIZE_MAX]; Double scale, max_rate, rate; Bool is_od_track = 0; u32 trackNum, i, j, ts, mtype, msub_type, timescale, sr, nb_ch, count, alt_group, nb_groups, nb_edits, cdur, csize, bps, pfmt, codecid; u64 time_slice, dur, size; s32 cts_shift; GF_ESD *esd; char szDur[50]; char *lang; if (!is_track_num) { trackNum = gf_isom_get_track_by_id(file, trackID); } else { trackNum = trackID; trackID = gf_isom_get_track_id(file, trackNum); } if (!trackNum) { M4_LOG(GF_LOG_ERROR, ("No track with ID %d found\n", trackID)); return; } timescale = gf_isom_get_media_timescale(file, trackNum); fprintf(stderr, "# Track %d Info - ID %d - TimeScale %d\n", trackNum, trackID, timescale); dur = gf_isom_get_media_original_duration(file, trackNum); size = gf_isom_get_media_duration(file, trackNum); fprintf(stderr, "Media Duration %s ", format_duration(dur, timescale, szDur)); if (dur != size) fprintf(stderr, " (recomputed %s)", format_duration(size, timescale, szDur)); fprintf(stderr, "\n"); if (gf_isom_check_data_reference(file, trackNum, 1) != GF_OK) { M4_LOG(GF_LOG_WARNING, ("Track uses external data reference not supported by GPAC!\n")); } nb_edits = gf_isom_get_edits_count(file, trackNum); if (nb_edits) fprintf(stderr, "Track has %d edits: track duration is %s\n", nb_edits, format_duration(gf_isom_get_track_duration(file, trackNum), gf_isom_get_timescale(file), szDur)); cts_shift = gf_isom_get_composition_offset_shift(file, trackNum); if (cts_shift) fprintf(stderr, "Track composition offset shift (negative CTS offset): %d\n", cts_shift); if (gf_isom_is_track_in_root_od(file, trackNum) ) fprintf(stderr, "Track is present in Root OD\n"); if (!gf_isom_is_track_enabled(file, trackNum)) fprintf(stderr, "Track is disabled\n"); gf_isom_get_media_language(file, trackNum, &lang); fprintf(stderr, "Media Info: Language \"%s (%s)\" - ", GetLanguage(lang), lang ); gf_free(lang); mtype = gf_isom_get_media_type(file, trackNum); fprintf(stderr, "Type \"%s:", gf_4cc_to_str(mtype)); msub_type = gf_isom_get_mpeg4_subtype(file, trackNum, 1); if (!msub_type) msub_type = gf_isom_get_media_subtype(file, trackNum, 1); fprintf(stderr, "%s\" - %d samples\n", gf_4cc_to_str(msub_type), gf_isom_get_sample_count(file, trackNum)); pfmt = gf_pixel_fmt_from_qt_type(msub_type); codecid = gf_codec_id_from_isobmf(msub_type); count = gf_isom_get_track_kind_count(file, trackNum); for (i = 0; i < count; i++) { char *kind_scheme, *kind_value; gf_isom_get_track_kind(file, trackNum, i, &kind_scheme, &kind_value); fprintf(stderr, "Kind: %s - %s\n", kind_scheme ? kind_scheme : "null", kind_value ? kind_value : "null"); if (kind_scheme) gf_free(kind_scheme); if (kind_value) gf_free(kind_value); } if (gf_isom_is_track_fragmented(file, trackID) ) { u32 defaultDuration, defaultSize, defaultDescriptionIndex, defaultRandomAccess; u8 defaultPadding; u16 defaultDegradationPriority; u32 frag_samples; u64 frag_duration; gf_isom_get_fragmented_samples_info(file, trackID, &frag_samples, &frag_duration); fprintf(stderr, "Fragmented track: %d samples - Media Duration %s\n", frag_samples, format_duration(frag_duration, timescale, szDur)); gf_isom_get_fragment_defaults(file, trackNum, &defaultDuration, &defaultSize, &defaultDescriptionIndex, &defaultRandomAccess, &defaultPadding, &defaultDegradationPriority); fprintf(stderr, "Fragment sample defaults: duration %d size %d stsd %d sync %d padding %d degradation_priority %d\n", defaultDuration, defaultSize, defaultDescriptionIndex, defaultRandomAccess, (u32) defaultPadding, (u32) defaultDegradationPriority ); } if (!gf_isom_is_self_contained(file, trackNum, 1)) { const char *url, *urn; gf_isom_get_data_reference(file, trackNum, 1, &url, &urn); fprintf(stderr, "Media Data Location: %s\n", url ? url : urn); } if (full_dump) { const char *handler_name; gf_isom_get_handler_name(file, trackNum, &handler_name); fprintf(stderr, "Handler name: %s\n", handler_name); } print_udta(file, trackNum, GF_FALSE); if (gf_isom_is_video_handler_type(mtype) ) { s32 tx, ty; u32 w, h; u16 bit_depth; gf_isom_get_visual_info(file, trackNum, 1, &w, &h); gf_isom_get_visual_bit_depth(file, trackNum, 1, &bit_depth); fprintf(stderr, "Visual Sample Entry Info: width=%d height=%d (depth=%d bits)\n", w, h, (int)bit_depth); gf_isom_get_track_layout_info(file, trackNum, &w, &h, &tx, &ty, NULL); fprintf(stderr, "Visual Track layout: x=%d y=%d width=%d height=%d\n", tx, ty, w, h); } gf_isom_get_audio_info(file, trackNum, 1, &sr, &nb_ch, &bps); gf_isom_set_nalu_extract_mode(file, trackNum, GF_ISOM_NALU_EXTRACT_INSPECT); msub_type = gf_isom_get_media_subtype(file, trackNum, 1); if (msub_type==GF_ISOM_SUBTYPE_MPEG4_CRYP) gf_isom_get_original_format_type(file, trackNum, 1, &msub_type); if ((msub_type==GF_ISOM_SUBTYPE_MPEG4) || (msub_type==GF_ISOM_SUBTYPE_AVC_H264) || (msub_type==GF_ISOM_SUBTYPE_AVC2_H264) || (msub_type==GF_ISOM_SUBTYPE_AVC3_H264) || (msub_type==GF_ISOM_SUBTYPE_AVC4_H264) || (msub_type==GF_ISOM_SUBTYPE_SVC_H264) || (msub_type==GF_ISOM_SUBTYPE_MVC_H264) || (msub_type==GF_ISOM_SUBTYPE_LSR1) || (msub_type==GF_ISOM_SUBTYPE_HVC1) || (msub_type==GF_ISOM_SUBTYPE_HEV1) || (msub_type==GF_ISOM_SUBTYPE_HVC2) || (msub_type==GF_ISOM_SUBTYPE_HEV2) || (msub_type==GF_ISOM_SUBTYPE_LHV1) || (msub_type==GF_ISOM_SUBTYPE_LHE1) || (msub_type==GF_ISOM_SUBTYPE_HVT1) ) { esd = gf_isom_get_esd(file, trackNum, 1); if (!esd || !esd->decoderConfig) { M4_LOG(GF_LOG_WARNING, ("WARNING: Broken MPEG-4 Track\n")); if (esd) gf_odf_desc_del((GF_Descriptor *)esd); } else { const char *st = gf_stream_type_name(esd->decoderConfig->streamType); if (dump_m4sys) { if (st) { fprintf(stderr, "MPEG-4 Config%s%s Stream - ObjectTypeIndication 0x%02x\n", full_dump ? "\n\t" : ": ", st, esd->decoderConfig->objectTypeIndication); } else { fprintf(stderr, "MPEG-4 Config%sStream Type 0x%02x - ObjectTypeIndication 0x%02x\n", full_dump ? "\n\t" : ": ", esd->decoderConfig->streamType, esd->decoderConfig->objectTypeIndication); } } if (esd->decoderConfig->streamType==GF_STREAM_OD) is_od_track=1; if (esd->decoderConfig->streamType==GF_STREAM_VISUAL) { u32 w, h; u16 rvc_predef; w = h = 0; if (esd->decoderConfig->objectTypeIndication==GF_CODECID_MPEG4_PART2) { #ifndef GPAC_DISABLE_AV_PARSERS if (!esd->decoderConfig->decoderSpecificInfo) { #else gf_isom_get_visual_info(file, trackNum, 1, &w, &h); fprintf(stderr, "MPEG-4 Visual Size %d x %d\n", w, h); #endif M4_LOG(GF_LOG_WARNING, ("Non-compliant MPEG-4 Visual track: video_object_layer infos not found in sample description\n")); #ifndef GPAC_DISABLE_AV_PARSERS } else { GF_M4VDecSpecInfo dsi; gf_m4v_get_config(esd->decoderConfig->decoderSpecificInfo->data, esd->decoderConfig->decoderSpecificInfo->dataLength, &dsi); if (full_dump) fprintf(stderr, "\t"); w = dsi.width; h = dsi.height; fprintf(stderr, "MPEG-4 Visual Size %d x %d - %s\n", w, h, gf_m4v_get_profile_name(dsi.VideoPL)); if (dsi.par_den && dsi.par_num) { u32 tw, th; gf_isom_get_track_layout_info(file, trackNum, &tw, &th, NULL, NULL, NULL); fprintf(stderr, "Pixel Aspect Ratio %d:%d - Indicated track size %d x %d\n", dsi.par_num, dsi.par_den, tw, th); } } #endif } else if (gf_isom_get_avc_svc_type(file, trackNum, 1) != GF_ISOM_AVCTYPE_NONE) { GF_AVCConfig *avccfg, *svccfg, *mvccfg; gf_isom_get_visual_info(file, trackNum, 1, &w, &h); if (full_dump) fprintf(stderr, "\t"); fprintf(stderr, "AVC/H264 Video - Visual Size %d x %d\n", w, h); avccfg = gf_isom_avc_config_get(file, trackNum, 1); svccfg = gf_isom_svc_config_get(file, trackNum, 1); mvccfg = gf_isom_mvc_config_get(file, trackNum, 1); if (!avccfg && !svccfg && !mvccfg) { M4_LOG(GF_LOG_ERROR, ("\tNon-compliant AVC track: SPS/PPS not found in sample description\n")); } else if (avccfg) { fprintf(stderr, "\tAVC Info: %d SPS - %d PPS", gf_list_count(avccfg->sequenceParameterSets) , gf_list_count(avccfg->pictureParameterSets) ); fprintf(stderr, " - Profile %s @ Level %g\n", gf_avc_get_profile_name(avccfg->AVCProfileIndication), ((Double)avccfg->AVCLevelIndication)/10.0 ); fprintf(stderr, "\tNAL Unit length bits: %d\n", 8*avccfg->nal_unit_size); #ifndef GPAC_DISABLE_AV_PARSERS for (i=0; i<gf_list_count(avccfg->sequenceParameterSets); i++) { s32 par_n, par_d; GF_NALUFFParam *slc = gf_list_get(avccfg->sequenceParameterSets, i); gf_avc_get_sps_info(slc->data, slc->size, NULL, NULL, NULL, &par_n, &par_d); if ((par_n>0) && (par_d>0)) { u32 tw, th; gf_isom_get_track_layout_info(file, trackNum, &tw, &th, NULL, NULL, NULL); fprintf(stderr, "\tPixel Aspect Ratio %d:%d - Indicated track size %d x %d\n", par_n, par_d, tw, th); } if (!full_dump) break; } #endif if (avccfg->chroma_bit_depth) { fprintf(stderr, "\tChroma format %s - Luma bit depth %d - chroma bit depth %d\n", gf_avc_hevc_get_chroma_format_name(avccfg->chroma_format), avccfg->luma_bit_depth, avccfg->chroma_bit_depth); } print_config_hash(avccfg->sequenceParameterSets, "SPS"); print_config_hash(avccfg->pictureParameterSets, "PPS"); gf_odf_avc_cfg_del(avccfg); } if (svccfg) { fprintf(stderr, "\n\tSVC Info: %d SPS - %d PPS - Profile %s @ Level %g\n", gf_list_count(svccfg->sequenceParameterSets) , gf_list_count(svccfg->pictureParameterSets), gf_avc_get_profile_name(svccfg->AVCProfileIndication), ((Double)svccfg->AVCLevelIndication)/10.0 ); fprintf(stderr, "\tSVC NAL Unit length bits: %d\n", 8*svccfg->nal_unit_size); #ifndef GPAC_DISABLE_AV_PARSERS for (i=0; i<gf_list_count(svccfg->sequenceParameterSets); i++) { GF_NALUFFParam *slc = gf_list_get(svccfg->sequenceParameterSets, i); if (slc) { s32 par_n, par_d; u32 s_w, s_h, sps_id; gf_avc_get_sps_info(slc->data, slc->size, &sps_id, &s_w, &s_h, &par_n, &par_d); fprintf(stderr, "\t\tSPS ID %d - Visual Size %d x %d\n", sps_id, s_w, s_h); if ((par_n>0) && (par_d>0)) { u32 tw, th; gf_isom_get_track_layout_info(file, trackNum, &tw, &th, NULL, NULL, NULL); fprintf(stderr, "\tPixel Aspect Ratio %d:%d - Indicated track size %d x %d\n", par_n, par_d, tw, th); } } } #endif print_config_hash(svccfg->sequenceParameterSets, "SPS"); print_config_hash(svccfg->pictureParameterSets, "PPS"); print_config_hash(svccfg->sequenceParameterSetExtensions, "SPSEx"); gf_odf_avc_cfg_del(svccfg); } if (mvccfg) { fprintf(stderr, "\n\tMVC Info: %d SPS - %d PPS - Profile %s @ Level %g\n", gf_list_count(mvccfg->sequenceParameterSets) , gf_list_count(mvccfg->pictureParameterSets), gf_avc_get_profile_name(mvccfg->AVCProfileIndication), ((Double)mvccfg->AVCLevelIndication)/10.0 ); fprintf(stderr, "\tMVC NAL Unit length bits: %d\n", 8*mvccfg->nal_unit_size); #ifndef GPAC_DISABLE_AV_PARSERS for (i=0; i<gf_list_count(mvccfg->sequenceParameterSets); i++) { GF_NALUFFParam *slc = gf_list_get(mvccfg->sequenceParameterSets, i); if (slc) { u32 s_w, s_h, sps_id; s32 par_n, par_d; gf_avc_get_sps_info(slc->data, slc->size, &sps_id, &s_w, &s_h, &par_n, &par_d); fprintf(stderr, "\t\tSPS ID %d - Visual Size %d x %d\n", sps_id, s_w, s_h); if ((par_n>0) && (par_d>0)) { u32 tw, th; gf_isom_get_track_layout_info(file, trackNum, &tw, &th, NULL, NULL, NULL); fprintf(stderr, "\tPixel Aspect Ratio %d:%d - Indicated track size %d x %d\n", par_n, par_d, tw, th); } } } #endif print_config_hash(mvccfg->sequenceParameterSets, "SPS"); print_config_hash(mvccfg->pictureParameterSets, "PPS"); gf_odf_avc_cfg_del(mvccfg); } } else if ((esd->decoderConfig->objectTypeIndication==GF_CODECID_HEVC) || (esd->decoderConfig->objectTypeIndication==GF_CODECID_LHVC) ) { GF_HEVCConfig *hevccfg, *lhvccfg; GF_OperatingPointsInformation *oinf; #if !defined(GPAC_DISABLE_AV_PARSERS) && !defined(GPAC_DISABLE_HEVC) HEVCState hevc_state; memset(&hevc_state, 0, sizeof(HEVCState)); hevc_state.sps_active_idx = -1; #endif gf_isom_get_visual_info(file, trackNum, 1, &w, &h); if (full_dump) fprintf(stderr, "\t"); fprintf(stderr, "HEVC Video - Visual Size %d x %d\n", w, h); hevccfg = gf_isom_hevc_config_get(file, trackNum, 1); lhvccfg = gf_isom_lhvc_config_get(file, trackNum, 1); if (msub_type==GF_ISOM_SUBTYPE_HVT1) { const u8 *data; u32 tsize; u32 is_default, tx,ty,tw,th, id, independent; Bool full_frame; if (gf_isom_get_tile_info(file, trackNum, 1, &is_default, &id, &independent, &full_frame, &tx, &ty, &tw, &th)) { fprintf(stderr, "\tHEVC Tile - ID %d independent %d (x,y,w,h)=%d,%d,%d,%d \n", id, independent, tx, ty, tw, th); } else if (gf_isom_get_sample_group_info(file, trackNum, 1, GF_ISOM_SAMPLE_GROUP_TRIF, &is_default, &data, &tsize)) { fprintf(stderr, "\tHEVC Tile track containing a tile set\n"); } else { fprintf(stderr, "\tHEVC Tile track without tiling info\n"); } } else if (!hevccfg && !lhvccfg) { M4_LOG(GF_LOG_ERROR, ("\tNon-compliant HEVC track: No hvcC or shcC found in sample description\n")); } if (gf_isom_get_reference_count(file, trackNum, GF_ISOM_REF_SABT)) { fprintf(stderr, "\tHEVC Tile base track\n"); } if (hevccfg) { dump_hevc_track_info(file, trackNum, hevccfg #if !defined(GPAC_DISABLE_AV_PARSERS) && !defined(GPAC_DISABLE_HEVC) , &hevc_state #endif ); gf_odf_hevc_cfg_del(hevccfg); fprintf(stderr, "\n"); } if (lhvccfg) { dump_hevc_track_info(file, trackNum, lhvccfg #if !defined(GPAC_DISABLE_AV_PARSERS) && !defined(GPAC_DISABLE_HEVC) , &hevc_state #endif ); gf_odf_hevc_cfg_del(lhvccfg); } if (gf_isom_get_oinf_info(file, trackNum, &oinf)) { fprintf(stderr, "\n\tOperating Points Information -"); fprintf(stderr, " scalability_mask %d (", oinf->scalability_mask); switch (oinf->scalability_mask) { case 2: fprintf(stderr, "Multiview"); break; case 4: fprintf(stderr, "Spatial scalability"); break; case 8: fprintf(stderr, "Auxilary"); break; default: fprintf(stderr, "unknown"); } //TODO: need to dump more info ? fprintf(stderr, ") num_profile_tier_level %d ", gf_list_count(oinf->profile_tier_levels) ); fprintf(stderr, " num_operating_points %d dependency layers %d \n", gf_list_count(oinf->operating_points), gf_list_count(oinf->dependency_layers) ); } } /*OGG media*/ else if (esd->decoderConfig->objectTypeIndication==GF_CODECID_THEORA) { char *szName; gf_isom_get_visual_info(file, trackNum, 1, &w, &h); if (full_dump) fprintf(stderr, "\t"); if (!strnicmp((char *) &esd->decoderConfig->decoderSpecificInfo->data[3], "theora", 6)) szName = "Theora"; else szName = "Unknown"; fprintf(stderr, "Ogg/%s video / GPAC Mux - Visual Size %d x %d\n", szName, w, h); } else { //check if we know this codec from its OTI u32 codec_id = gf_codecid_from_oti(GF_STREAM_VISUAL, esd->decoderConfig->objectTypeIndication); if (codec_id) { gf_isom_get_visual_info(file, trackNum, 1, &w, &h); fprintf(stderr, "%s - Visual Size %d x %d\n", gf_codecid_name(codec_id), w, h); } } if (!w || !h) { gf_isom_get_visual_info(file, trackNum, 1, &w, &h); if (full_dump) fprintf(stderr, "\t"); fprintf(stderr, "Visual Size %d x %d\n", w, h); } if (gf_isom_get_rvc_config(file, trackNum, 1, &rvc_predef, NULL, NULL, NULL)==GF_OK) { fprintf(stderr, "Has RVC signaled - Predefined configuration %d\n", rvc_predef); } } else if (esd->decoderConfig->streamType==GF_STREAM_AUDIO) { #ifndef GPAC_DISABLE_AV_PARSERS GF_M4ADecSpecInfo a_cfg; GF_Err e; u32 oti; #endif u32 codec_id; Bool is_mp2 = GF_FALSE; switch (esd->decoderConfig->objectTypeIndication) { case GF_CODECID_AAC_MPEG2_MP: case GF_CODECID_AAC_MPEG2_LCP: case GF_CODECID_AAC_MPEG2_SSRP: is_mp2 = GF_TRUE; case GF_CODECID_AAC_MPEG4: #ifndef GPAC_DISABLE_AV_PARSERS if (!esd->decoderConfig->decoderSpecificInfo) e = GF_NON_COMPLIANT_BITSTREAM; else e = gf_m4a_get_config(esd->decoderConfig->decoderSpecificInfo->data, esd->decoderConfig->decoderSpecificInfo->dataLength, &a_cfg); if (full_dump) fprintf(stderr, "\t"); if (e) { M4_LOG(GF_LOG_ERROR, ("Corrupted AAC Config\n")); } else { char *signaling = "implicit"; char *heaac = ""; if (!is_mp2 && a_cfg.has_sbr) { if (a_cfg.has_ps) heaac = "(HE-AAC v2) "; else heaac = "(HE-AAC v1) "; } if (a_cfg.base_object_type==2) { if (a_cfg.has_ps || a_cfg.has_sbr) signaling = "backward compatible"; } else { signaling = "hierarchical"; } fprintf(stderr, "%s (AOT=%d %s) %s- %d Channel(s) - SampleRate %d", gf_m4a_object_type_name(a_cfg.base_object_type), a_cfg.base_object_type, signaling, heaac, a_cfg.nb_chan, a_cfg.base_sr); if (is_mp2) fprintf(stderr, " (MPEG-2 Signaling)"); if (a_cfg.has_sbr) fprintf(stderr, " - SBR: SampleRate %d Type %s", a_cfg.sbr_sr, gf_m4a_object_type_name(a_cfg.sbr_object_type)); if (a_cfg.has_ps) fprintf(stderr, " - PS"); fprintf(stderr, "\n"); } #else fprintf(stderr, "MPEG-2/4 Audio - %d Channels - SampleRate %d\n", nb_ch, sr); #endif break; case GF_CODECID_MPEG2_PART3: case GF_CODECID_MPEG_AUDIO: if (msub_type == GF_ISOM_SUBTYPE_MPEG4_CRYP) { fprintf(stderr, "MPEG-1/2 Audio - %d Channels - SampleRate %d\n", nb_ch, sr); } else { #ifndef GPAC_DISABLE_AV_PARSERS GF_ISOSample *samp = gf_isom_get_sample(file, trackNum, 1, &oti); if (samp) { u32 mhdr = GF_4CC((u8)samp->data[0], (u8)samp->data[1], (u8)samp->data[2], (u8)samp->data[3]); if (full_dump) fprintf(stderr, "\t"); fprintf(stderr, "%s Audio - %d Channel(s) - SampleRate %d - Layer %d\n", gf_mp3_version_name(mhdr), gf_mp3_num_channels(mhdr), gf_mp3_sampling_rate(mhdr), gf_mp3_layer(mhdr) ); gf_isom_sample_del(&samp); } else { M4_LOG(GF_LOG_ERROR, ("Error fetching sample: %s\n", gf_error_to_string(gf_isom_last_error(file)) )); } #else fprintf(stderr, "MPEG-1/2 Audio - %d Channels - SampleRate %d\n", nb_ch, sr); #endif } break; case GF_CODECID_EVRC: fprintf(stderr, "EVRC Audio - Sample Rate 8000 - 1 channel\n"); break; case GF_CODECID_SMV: fprintf(stderr, "SMV Audio - Sample Rate 8000 - 1 channel\n"); break; case GF_CODECID_QCELP: fprintf(stderr, "QCELP Audio - Sample Rate 8000 - 1 channel\n"); break; /*packetVideo hack for EVRC...*/ case GF_CODECID_EVRC_PV: if (esd->decoderConfig->decoderSpecificInfo && (esd->decoderConfig->decoderSpecificInfo->dataLength==8) && !strnicmp((char *)esd->decoderConfig->decoderSpecificInfo->data, "pvmm", 4)) { if (full_dump) fprintf(stderr, "\t"); fprintf(stderr, "EVRC Audio (PacketVideo Mux) - Sample Rate 8000 - 1 channel\n"); } break; default: codec_id = gf_codecid_from_oti(GF_STREAM_AUDIO, esd->decoderConfig->objectTypeIndication); if (codec_id) { fprintf(stderr, "%s - Sample Rate %d - %d channel(s)\n", gf_codecid_name(codec_id), sr, nb_ch); } break; } } else if (esd->decoderConfig->streamType==GF_STREAM_SCENE) { if (esd->decoderConfig->objectTypeIndication<=4) { GF_BIFSConfig *b_cfg = gf_odf_get_bifs_config(esd->decoderConfig->decoderSpecificInfo, esd->decoderConfig->objectTypeIndication); fprintf(stderr, "BIFS Scene description - %s stream\n", b_cfg->elementaryMasks ? "Animation" : "Command"); if (full_dump && !b_cfg->elementaryMasks) { fprintf(stderr, "\tWidth %d Height %d Pixel Metrics %s\n", b_cfg->pixelWidth, b_cfg->pixelHeight, b_cfg->pixelMetrics ? "yes" : "no"); } gf_odf_desc_del((GF_Descriptor *)b_cfg); } else if (esd->decoderConfig->objectTypeIndication==GF_CODECID_AFX) { u8 tag = esd->decoderConfig->decoderSpecificInfo ? esd->decoderConfig->decoderSpecificInfo->data[0] : 0xFF; const char *afxtype = gf_stream_type_afx_name(tag); fprintf(stderr, "AFX Stream - type %s (%d)\n", afxtype, tag); } else if (esd->decoderConfig->objectTypeIndication==GF_CODECID_FONT) { fprintf(stderr, "Font Data stream\n"); } else if (esd->decoderConfig->objectTypeIndication==GF_CODECID_LASER) { GF_LASERConfig l_cfg; gf_odf_get_laser_config(esd->decoderConfig->decoderSpecificInfo, &l_cfg); fprintf(stderr, "LASER Stream - %s\n", l_cfg.newSceneIndicator ? "Full Scene" : "Scene Segment"); } else if (esd->decoderConfig->objectTypeIndication==GF_CODECID_TEXT_MPEG4) { fprintf(stderr, "MPEG-4 Streaming Text stream\n"); } else if (esd->decoderConfig->objectTypeIndication==GF_CODECID_SYNTHESIZED_TEXTURE) { fprintf(stderr, "Synthetized Texture stream stream\n"); } else { M4_LOG(GF_LOG_WARNING, ("Unknown Systems stream OTI %d\n", esd->decoderConfig->objectTypeIndication)); } } /*sync is only valid if we open all tracks to take care of default MP4 sync..*/ if (!full_dump) { if (dump_m4sys) { if (!esd->OCRESID || (esd->OCRESID == esd->ESID)) fprintf(stderr, "Self-synchronized\n"); else fprintf(stderr, "Synchronized on stream %d\n", esd->OCRESID); } } else { fprintf(stderr, "\tDecoding Buffer size %d - Bitrate: avg %d - max %d kbps\n", esd->decoderConfig->bufferSizeDB, esd->decoderConfig->avgBitrate/1000, esd->decoderConfig->maxBitrate/1000); if (esd->dependsOnESID) fprintf(stderr, "\tDepends on stream %d for decoding\n", esd->dependsOnESID); else fprintf(stderr, "\tNo stream dependencies for decoding\n"); fprintf(stderr, "\tStreamPriority %d\n", esd->streamPriority); if (esd->URLString) fprintf(stderr, "\tRemote Data Source %s\n", esd->URLString); } gf_odf_desc_del((GF_Descriptor *) esd); } } else if (msub_type == GF_ISOM_SUBTYPE_AV01) { GF_AV1Config *av1c; u32 w, h; gf_isom_get_visual_info(file, trackNum, 1, &w, &h); fprintf(stderr, "\tAOM AV1 stream - Resolution %d x %d\n", w, h); av1c = gf_isom_av1_config_get(file, trackNum, 1); fprintf(stderr, "\tversion=%u, profile=%u, level_idx0=%u, tier=%u\n", (u32)av1c->version, (u32)av1c->seq_profile, (u32)av1c->seq_level_idx_0, (u32)av1c->seq_tier_0); fprintf(stderr, "\thigh_bitdepth=%u, twelve_bit=%u, monochrome=%u\n", (u32)av1c->high_bitdepth, (u32)av1c->twelve_bit, (u32)av1c->monochrome); fprintf(stderr, "\tchroma: subsampling_x=%u, subsampling_y=%u, sample_position=%u\n", (u32)av1c->chroma_subsampling_x, (u32)av1c->chroma_subsampling_y, (u32)av1c->chroma_sample_position); if (av1c->initial_presentation_delay_present) fprintf(stderr, "\tInitial presentation delay %u\n", (u32) av1c->initial_presentation_delay_minus_one+1); count = gf_list_count(av1c->obu_array); for (i=0; i<count; i++) { u8 hash[20]; GF_AV1_OBUArrayEntry *obu = gf_list_get(av1c->obu_array, i); gf_sha1_csum((u8*)obu->obu, (u32)obu->obu_length, hash); fprintf(stderr, "\tOBU#%d %s hash: ", i+1, gf_av1_get_obu_name(obu->obu_type) ); for (j=0; j<20; j++) fprintf(stderr, "%02X", hash[j]); fprintf(stderr, "\n"); } gf_odf_av1_cfg_del(av1c); } else if (msub_type == GF_ISOM_SUBTYPE_3GP_H263) { u32 w, h; gf_isom_get_visual_info(file, trackNum, 1, &w, &h); fprintf(stderr, "\t3GPP H263 stream - Resolution %d x %d\n", w, h); } else if (msub_type == GF_ISOM_SUBTYPE_MJP2) { u32 w, h; gf_isom_get_visual_info(file, trackNum, 1, &w, &h); fprintf(stderr, "\tMotionJPEG2000 stream - Resolution %d x %d\n", w, h); } else if ((msub_type == GF_ISOM_SUBTYPE_3GP_AMR) || (msub_type == GF_ISOM_SUBTYPE_3GP_AMR_WB)) { fprintf(stderr, "\t3GPP AMR%s stream - Sample Rate %d - %d channel(s) %d bps\n", (msub_type == GF_ISOM_SUBTYPE_3GP_AMR_WB) ? " Wide Band" : "", sr, nb_ch, (u32) bps); } else if (msub_type == GF_ISOM_SUBTYPE_3GP_EVRC) { fprintf(stderr, "\t3GPP EVRC stream - Sample Rate %d - %d channel(s) %d bps\n", sr, nb_ch, (u32) bps); } else if (msub_type == GF_ISOM_SUBTYPE_3GP_QCELP) { fprintf(stderr, "\t3GPP QCELP stream - Sample Rate %d - %d channel(s) %d bps\n", sr, nb_ch, (u32) bps); } else if (msub_type == GF_ISOM_SUBTYPE_MP3) { fprintf(stderr, "\tMPEG 1/2 Audio stream - Sample Rate %d - %d channel(s) %d bps\n", sr, nb_ch, (u32) bps); } else if ((msub_type == GF_ISOM_SUBTYPE_AC3) || (msub_type == GF_ISOM_SUBTYPE_EC3)) { u32 br = 0; const char *lfe = ""; Bool is_ec3 = (msub_type == GF_ISOM_SUBTYPE_EC3) ? GF_TRUE : GF_FALSE; #ifndef GPAC_DISABLE_AV_PARSERS GF_AC3Config *ac3 = gf_isom_ac3_config_get(file, trackNum, 1); if (ac3) { nb_ch = gf_ac3_get_channels(ac3->streams[0].acmod); for (i=0; i<ac3->streams[0].nb_dep_sub; ++i) { assert(ac3->streams[0].nb_dep_sub == 1); nb_ch += gf_ac3_get_channels(ac3->streams[0].chan_loc); } if (ac3->streams[0].lfon) lfe = ".1"; br = ac3->is_ec3 ? ac3->brcode : gf_ac3_get_bitrate(ac3->brcode); is_ec3 = ac3->is_ec3; gf_free(ac3); } #endif fprintf(stderr, "\t%s stream - Sample Rate %d - %d%s channel(s) - bitrate %d\n", is_ec3 ? "EC-3" : "AC-3", sr, nb_ch, lfe, br); } else if (msub_type == GF_ISOM_SUBTYPE_3GP_SMV) { fprintf(stderr, "\t3GPP SMV stream - Sample Rate %d - %d channel(s) %d bits per samples\n", sr, nb_ch, (u32) bps); } else if (msub_type == GF_ISOM_SUBTYPE_3GP_DIMS) { u32 w, h; GF_DIMSDescription dims; gf_isom_get_visual_info(file, trackNum, 1, &w, &h); gf_isom_get_dims_description(file, trackNum, 1, &dims); fprintf(stderr, "\t3GPP DIMS stream - size %d x %d - Profile %d - Level %d\n", w, h, dims.profile, dims.level); fprintf(stderr, "\tpathComponents: %d - useFullRequestHost: %s\n", dims.pathComponents, dims.fullRequestHost ? "yes" : "no"); fprintf(stderr, "\tstream type: %s - redundant: %s\n", dims.streamType ? "primary" : "secondary", (dims.containsRedundant==1) ? "main" : ((dims.containsRedundant==2) ? "redundant" : "main+redundant") ); if (dims.textEncoding[0]) fprintf(stderr, "\ttext encoding %s\n", dims.textEncoding); if (dims.contentEncoding[0]) fprintf(stderr, "\tcontent encoding %s\n", dims.contentEncoding); if (dims.content_script_types) fprintf(stderr, "\tscript languages %s\n", dims.content_script_types); } else if (mtype==GF_ISOM_MEDIA_HINT) { u32 refTrack; s32 refCount = gf_isom_get_reference_count(file, trackNum, GF_ISOM_REF_HINT); if (refCount>0) { fprintf(stderr, "Streaming Hint Track for track%s ", (refCount>1) ? "s" :""); for (i=0; i<(u32) refCount; i++) { gf_isom_get_reference(file, trackNum, GF_ISOM_REF_HINT, i+1, &refTrack); if (i) fprintf(stderr, " - "); fprintf(stderr, "ID %d", gf_isom_get_track_id(file, refTrack)); } fprintf(stderr, "\n"); } else { fprintf(stderr, "Streaming Hint Track (no refs)\n"); } #ifndef GPAC_DISABLE_ISOM_HINTING refCount = gf_isom_get_payt_count(file, trackNum); if (refCount>0) { for (i=0; i<(u32) refCount; i++) { const char *name = gf_isom_get_payt_info(file, trackNum, i+1, &refTrack); fprintf(stderr, "\tPayload ID %d: type %s\n", refTrack, name); } } #endif } else if (mtype==GF_ISOM_MEDIA_FLASH) { fprintf(stderr, "Macromedia Flash Movie\n"); } else if ((mtype==GF_ISOM_MEDIA_TEXT) || (mtype==GF_ISOM_MEDIA_SUBT) || (mtype==GF_ISOM_MEDIA_MPEG_SUBT)) { u32 w, h; s16 l; s32 tx, ty; const char *content_encoding = NULL; const char *mime = NULL; const char *config = NULL; const char *_namespace = NULL; const char *schema_loc = NULL; const char *auxiliary_mimes = NULL; gf_isom_get_track_layout_info(file, trackNum, &w, &h, &tx, &ty, &l); if (msub_type == GF_ISOM_SUBTYPE_SBTT) { gf_isom_stxt_get_description(file, trackNum, 1, &mime, &content_encoding, &config); fprintf(stderr, "Textual Subtitle Stream "); fprintf(stderr, "- mime %s", mime); if (content_encoding != NULL) { fprintf(stderr, " - encoding %s", content_encoding); } if (config != NULL) { fprintf(stderr, " - %d bytes config", (u32) strlen(config)); } } else if (msub_type == GF_ISOM_SUBTYPE_STXT) { gf_isom_stxt_get_description(file, trackNum, 1, &mime, &content_encoding, &config); fprintf(stderr, "Simple Timed Text Stream "); fprintf(stderr, "- mime %s", mime); if (content_encoding != NULL) { fprintf(stderr, " - encoding %s", content_encoding); } if (config != NULL) { fprintf(stderr, " - %d bytes config", (u32) strlen(config)); } } else if (msub_type == GF_ISOM_SUBTYPE_STPP) { gf_isom_xml_subtitle_get_description(file, trackNum, 1, &_namespace, &schema_loc, &auxiliary_mimes); fprintf(stderr, "XML Subtitle Stream "); fprintf(stderr, "- namespace %s", _namespace); if (schema_loc != NULL) { fprintf(stderr, " - schema-location %s", schema_loc); } if (auxiliary_mimes != NULL) { fprintf(stderr, " - auxiliary-mime-types %s", auxiliary_mimes); } } else { fprintf(stderr, "Unknown Text Stream"); } fprintf(stderr, "\n Size %d x %d - Translation X=%d Y=%d - Layer %d\n", w, h, tx, ty, l); } else if (mtype == GF_ISOM_MEDIA_META) { const char *content_encoding = NULL; if (msub_type == GF_ISOM_SUBTYPE_METT) { const char *mime = NULL; const char *config = NULL; gf_isom_stxt_get_description(file, trackNum, 1, &mime, &content_encoding, &config); fprintf(stderr, "Textual Metadata Stream - mime %s", mime); if (content_encoding != NULL) { fprintf(stderr, " - encoding %s", content_encoding); } if (config != NULL) { fprintf(stderr, " - %d bytes config", (u32) strlen(config)); } fprintf(stderr, "\n"); } else if (msub_type == GF_ISOM_SUBTYPE_METX) { const char *_namespace = NULL; const char *schema_loc = NULL; gf_isom_get_xml_metadata_description(file, trackNum, 1, &_namespace, &schema_loc, &content_encoding); fprintf(stderr, "XML Metadata Stream - namespace %s", _namespace); if (content_encoding != NULL) { fprintf(stderr, " - encoding %s", content_encoding); } if (schema_loc != NULL) { fprintf(stderr, " - schema-location %s", schema_loc); } fprintf(stderr, "\n"); } else { fprintf(stderr, "Unknown Metadata Stream\n"); } } else if ((msub_type==GF_ISOM_SUBTYPE_VVC1) || (msub_type==GF_ISOM_SUBTYPE_VVI1)) { GF_VVCConfig *vvccfg; u32 w, h; #if !defined(GPAC_DISABLE_AV_PARSERS) VVCState *vvc_state; GF_SAFEALLOC(vvc_state, VVCState); if (vvc_state) vvc_state->sps_active_idx = -1; #endif gf_isom_get_visual_info(file, trackNum, 1, &w, &h); if (full_dump) fprintf(stderr, "\t"); fprintf(stderr, "VVC Video - Visual Size %d x %d\n", w, h); vvccfg = gf_isom_vvc_config_get(file, trackNum, 1); if (!vvccfg) { M4_LOG(GF_LOG_ERROR, ("Non-compliant VVC track: No vvcC found in sample description\n")); } else { dump_vvc_track_info(file, trackNum, vvccfg #if !defined(GPAC_DISABLE_AV_PARSERS) , vvc_state #endif ); gf_odf_vvc_cfg_del(vvccfg); fprintf(stderr, "\n"); } #if !defined(GPAC_DISABLE_AV_PARSERS) if (vvc_state) gf_free(vvc_state); #endif } else if ((msub_type == GF_ISOM_SUBTYPE_MH3D_MHA1) || (msub_type == GF_ISOM_SUBTYPE_MH3D_MHA2) || (msub_type == GF_ISOM_SUBTYPE_MH3D_MHM1) || (msub_type == GF_ISOM_SUBTYPE_MH3D_MHM2) ) { const u8 *compat_profiles; u32 nb_compat_profiles; Bool valid = GF_FALSE; Bool allow_inband = GF_FALSE; if ( (msub_type == GF_ISOM_SUBTYPE_MH3D_MHM1) || (msub_type == GF_ISOM_SUBTYPE_MH3D_MHM2)) allow_inband = GF_TRUE; fprintf(stderr, "\tMPEG-H Audio stream - Sample Rate %d\n", sr); esd = gf_media_map_esd(file, trackNum, 1); if (!esd || !esd->decoderConfig || !esd->decoderConfig->decoderSpecificInfo || !esd->decoderConfig->decoderSpecificInfo->data ) { if (allow_inband) { GF_ISOSample *samp = gf_isom_get_sample(file, trackNum, 1, NULL); if (samp) { u64 ch_layout=0; s32 PL = gf_mpegh_get_mhas_pl(samp->data, samp->dataLength, &ch_layout); if (PL>=0) { fprintf(stderr, "\tProfileLevelIndication: 0x%02X", PL); if (ch_layout) fprintf(stderr, " - Reference Channel Layout %s", gf_audio_fmt_get_layout_name(ch_layout) ); fprintf(stderr, "\n"); } gf_isom_sample_del(&samp); } valid = GF_TRUE; } } else if (esd->decoderConfig->decoderSpecificInfo->dataLength>=5) { fprintf(stderr, "\tProfileLevelIndication: 0x%02X - Reference Channel Layout %s\n", esd->decoderConfig->decoderSpecificInfo->data[1] , gf_audio_fmt_get_layout_name_from_cicp(esd->decoderConfig->decoderSpecificInfo->data[2]) ); valid = GF_TRUE; } if (!valid) { M4_LOG(GF_LOG_ERROR, ("Invalid MPEG-H audio config\n")); } if (esd) gf_odf_desc_del((GF_Descriptor *)esd); compat_profiles = gf_isom_get_mpegh_compatible_profiles(file, trackNum, 1, &nb_compat_profiles); for (i=0; i<nb_compat_profiles; i++) { if (!i) fprintf(stderr, "\tCompatible profiles:"); fprintf(stderr, " 0x%02X", compat_profiles[i]); } if (i) fprintf(stderr, "\n"); } else if (msub_type==GF_ISOM_SUBTYPE_MLPA) { u32 fmt, prate; if (gf_isom_truehd_config_get(file, trackNum, 1, &fmt, &prate) != GF_OK) { fprintf(stderr, "\tInvalid TrueHD audio config\n"); } fprintf(stderr, "TrueHD Audio stream - Sample Rate %u - channels %u - format %u peak rate %u\n", sr, nb_ch, fmt, prate); } else if (codecid) { if (gf_isom_is_video_handler_type(mtype) ) { u32 w, h; gf_isom_get_visual_info(file, trackNum, 1, &w, &h); fprintf(stderr, "%s - Resolution %d x %d\n", gf_codecid_name(codecid), w, h); } else if (mtype==GF_ISOM_MEDIA_AUDIO) { gf_isom_get_audio_info(file, trackNum, 1, &sr, &nb_ch, NULL); fprintf(stderr, "%s - Sample Rate %d - %d channel(s)\n", gf_codecid_name(codecid), sr, nb_ch); } else { fprintf(stderr, "%s\n", gf_codecid_name(codecid) ); } } else if (pfmt) { u32 w, h; gf_isom_get_visual_info(file, trackNum, 1, &w, &h); fprintf(stderr, "Raw video %s - Resolution %d x %d\n", gf_pixel_fmt_name(pfmt), w, h); } else if (msub_type==GF_QT_SUBTYPE_TMCD) { u32 stsd_idx; GF_ISOSample *sample = gf_isom_get_sample(file, trackNum, 1, &stsd_idx); fprintf(stderr, "Time Code stream\n"); if (sample) { char szTimecode[100]; u32 tmcd_flags, tmcd_num, tmcd_den, tmcd_fpt; gf_isom_get_tmcd_config(file, trackNum, stsd_idx, &tmcd_flags, &tmcd_num, &tmcd_den, &tmcd_fpt); gf_inspect_format_timecode(sample->data, sample->dataLength, tmcd_flags, tmcd_num, tmcd_den, tmcd_fpt, szTimecode); gf_isom_sample_del(&sample); fprintf(stderr, "\tFirst timecode: %s\n", szTimecode); } } else { GF_GenericSampleDescription *udesc; udesc = gf_isom_get_generic_sample_description(file, trackNum, 1); if (udesc) { if (gf_isom_is_video_handler_type(mtype) ) { fprintf(stderr, "%s - Compressor \"%s\" - Resolution %d x %d\n", ( (mtype == GF_ISOM_MEDIA_VISUAL ? "Visual" : "Auxiliary Video") ), udesc->compressor_name, udesc->width, udesc->height); } else if (mtype==GF_ISOM_MEDIA_AUDIO) { fprintf(stderr, "Audio - Sample Rate %d - %d channel(s)\n", udesc->samplerate, udesc->nb_channels); } else { fprintf(stderr, "Unknown media type\n"); } if (udesc->vendor_code) fprintf(stderr, "\tVendor code \"%s\" - Version %d - revision %d\n", gf_4cc_to_str(udesc->vendor_code), udesc->version, udesc->revision); if (udesc->extension_buf) { fprintf(stderr, "\tCodec configuration data size: %d bytes\n", udesc->extension_buf_size); gf_free(udesc->extension_buf); } gf_free(udesc); } else { fprintf(stderr, "Unknown track type\n"); } } /*Crypto info*/ if (gf_isom_is_track_encrypted(file, trackNum)) { const char *scheme_URI, *KMS_URI; u32 scheme_type, version; u32 IV_size; Bool use_sel_enc; if (gf_isom_is_ismacryp_media(file, trackNum, 1)) { gf_isom_get_ismacryp_info(file, trackNum, 1, NULL, &scheme_type, &version, &scheme_URI, &KMS_URI, &use_sel_enc, &IV_size, NULL); fprintf(stderr, "\n\tProtected by ISMA E&A scheme %s (version %d)\n", gf_4cc_to_str(scheme_type), version); if (scheme_URI) fprintf(stderr, "scheme location: %s\n", scheme_URI); if (KMS_URI) { if (!strnicmp(KMS_URI, "(key)", 5)) fprintf(stderr, "\tKMS location: key in file\n"); else fprintf(stderr, "\tKMS location: %s\n", KMS_URI); } fprintf(stderr, "\tSelective Encryption: %s\n", use_sel_enc ? "Yes" : "No"); if (IV_size) fprintf(stderr, "\tInitialization Vector size: %d bits\n", IV_size*8); } else if (gf_isom_is_omadrm_media(file, trackNum, 1)) { const char *textHdrs; u32 enc_type, hdr_len; u64 orig_len; gf_isom_get_omadrm_info(file, trackNum, 1, NULL, &scheme_type, &version, &scheme_URI, &KMS_URI, &textHdrs, &hdr_len, &orig_len, &enc_type, &use_sel_enc, &IV_size, NULL); fprintf(stderr, "\n\tProtected by OMA DRM scheme %s (version %d)\n", gf_4cc_to_str(scheme_type), version); fprintf(stderr, "\tRights Issuer: %s\n", KMS_URI); fprintf(stderr, "\tContent ID: %s\n", scheme_URI); if (textHdrs) { u32 offset; const char *start = textHdrs; fprintf(stderr, "\tOMA Textual Headers:\n"); i=0; offset=0; while (i<hdr_len) { if (start[i]==0) { fprintf(stderr, "\t\t%s\n", start+offset); offset=i+1; } i++; } fprintf(stderr, "\\tt%s\n", start+offset); } if (orig_len) fprintf(stderr, "\tOriginal media size "LLD"\n", orig_len); fprintf(stderr, "\tEncryption algorithm %s\n", (enc_type==1) ? "AEA 128 CBC" : (enc_type ? "AEA 128 CTR" : "None")); fprintf(stderr, "\tSelective Encryption: %s\n", use_sel_enc ? "Yes" : "No"); if (IV_size) fprintf(stderr, "\tInitialization Vector size: %d bits\n", IV_size*8); } else if(gf_isom_is_cenc_media(file, trackNum, 1)) { const u8 *def_key; u32 def_key_size; Bool IsEncrypted; u8 crypt_byte_block, skip_byte_block; IV_size = 0; gf_isom_get_cenc_info(file, trackNum, 1, NULL, &scheme_type, &version); gf_isom_cenc_get_default_info(file, trackNum, 1, NULL, &IsEncrypted, &crypt_byte_block, &skip_byte_block, &def_key, &def_key_size); fprintf(stderr, "\n\tProtected by CENC scheme %s version 0x%08X", gf_4cc_to_str(scheme_type), version); if (crypt_byte_block && skip_byte_block) fprintf(stderr, " - Pattern %d:%d", (u32) skip_byte_block, (u32) crypt_byte_block); if (def_key && def_key[0]) fprintf(stderr, " - MultiKey"); fprintf(stderr, "\n"); dump_key_info(def_key, def_key_size, IsEncrypted); } else if(gf_isom_is_adobe_protection_media(file, trackNum, 1)) { gf_isom_get_adobe_protection_info(file, trackNum, 1, NULL, &scheme_type, &version, NULL); fprintf(stderr, "\nProtected by Adobe scheme %s (version %d)\n", gf_4cc_to_str(scheme_type), version); } else { fprintf(stderr, "\nProtected by unknown scheme %s\n", gf_4cc_to_str(gf_isom_is_media_encrypted(file, trackNum, 0) )); } fprintf(stderr, "\n"); } if ( gf_media_get_rfc_6381_codec_name(file, trackNum, szCodec, GF_FALSE, GF_FALSE) == GF_OK) { fprintf(stderr, "\tRFC6381 Codec Parameters: %s\n", szCodec); } DumpMetaItem(file, 0, trackNum, "\tTrack Meta"); gf_isom_get_track_switch_group_count(file, trackNum, &alt_group, &nb_groups); if (alt_group) { fprintf(stderr, "Alternate Group ID %d\n", alt_group); for (i=0; i<nb_groups; i++) { u32 nb_crit, switchGroupID; const u32 *criterias = gf_isom_get_track_switch_parameter(file, trackNum, i+1, &switchGroupID, &nb_crit); if (!nb_crit) { fprintf(stderr, "\tNo criteria in %s group\n", switchGroupID ? "switch" : "alternate"); } else { if (switchGroupID) { fprintf(stderr, "\tSwitchGroup ID %d criterias: ", switchGroupID); } else { fprintf(stderr, "\tAlternate Group criterias: "); } for (j=0; j<nb_crit; j++) { if (j) fprintf(stderr, " "); fprintf(stderr, "%s", gf_4cc_to_str(criterias[j]) ); } fprintf(stderr, "\n"); } } } switch (gf_isom_has_sync_points(file, trackNum)) { case 0: fprintf(stderr, "\tAll samples are sync\n"); break; case 1: { u32 nb_sync = gf_isom_get_sync_point_count(file, trackNum) - 1; if (! nb_sync) { fprintf(stderr, "\tOnly one sync sample\n"); } else { fprintf(stderr, "\tAverage GOP length: %d samples\n", gf_isom_get_sample_count(file, trackNum) / nb_sync); } } break; case 2: fprintf(stderr, "\tNo sync sample found\n"); break; } fprintf(stderr, "\tMax sample duration: %d / %d\n", gf_isom_get_max_sample_delta(file, trackNum), timescale); if (!full_dump) { fprintf(stderr, "\n"); return; } dur = size = 0; max_rate = rate = 0; time_slice = 0; ts = gf_isom_get_media_timescale(file, trackNum); csize = gf_isom_get_constant_sample_size(file, trackNum); cdur = gf_isom_get_constant_sample_duration(file, trackNum); count = gf_isom_get_sample_count(file, trackNum); if (csize && cdur) { size = count * csize; dur = cdur * count; } else { for (j=0; j<count; j++) { GF_ISOSample *samp; if (is_od_track) { samp = gf_isom_get_sample(file, trackNum, j+1, NULL); } else { samp = gf_isom_get_sample_info(file, trackNum, j+1, NULL, NULL); } if (!samp) { M4_LOG(GF_LOG_ERROR, ("Failed to fetch sample %d\n", j+1)); return; } dur = samp->DTS+samp->CTS_Offset; size += samp->dataLength; rate += samp->dataLength; if (samp->DTS - time_slice > ts) { Double max_tmp = rate * ts / (samp->DTS - time_slice); if (max_rate < max_tmp ) max_rate = max_tmp; rate = 0; time_slice = samp->DTS; } gf_isom_sample_del(&samp); } } fprintf(stderr, "\nComputed info from media:\n"); if (csize && cdur) { fprintf(stderr, "\tConstant sample size %d bytes and dur %d / %d\n", csize, cdur, ts); } scale = 1000.0 / ts; dur = (u64) (scale * dur); fprintf(stderr, "\tTotal size "LLU" bytes - Total samples duration "LLU" ms\n", size, dur); if (!dur) { fprintf(stderr, "\n"); return; } /*rate in byte, dur is in ms*/ rate = 8000.0 * size / dur; if (!max_rate) max_rate = rate; else max_rate *= 8.0; if (rate >= 1500) { fprintf(stderr, "\tAverage rate %.2f kbps - Max Rate %.2f kbps\n", rate/1000, max_rate/1000); } else { fprintf(stderr, "\tAverage rate %.2f bps - Max Rate %.2f bps\n", rate, max_rate); } { u32 dmin, dmax, davg, smin, smax, savg; gf_isom_get_chunks_infos(file, trackNum, &dmin, &davg, &dmax, &smin, &savg, &smax); fprintf(stderr, "\tChunk durations: min %d ms - max %d ms - average %d ms\n", (1000*dmin)/ts, (1000*dmax)/ts, (1000*davg)/ts); fprintf(stderr, "\tChunk sizes (bytes): min %d - max %d - average %d\n", smin, smax, savg); } fprintf(stderr, "\n"); count = gf_isom_get_chapter_count(file, trackNum); if (count) { const char *name; u64 time; fprintf(stderr, "\nChapters:\n"); for (j=0; j<count; j++) { gf_isom_get_chapter(file, trackNum, j+1, &time, &name); fprintf(stderr, "\tChapter #%d - %s - \"%s\"\n", j+1, format_duration(time, 1000, szDur), name); } } } void DumpMovieInfo(GF_ISOFile *file) { GF_InitialObjectDescriptor *iod; Bool dump_m4sys = GF_FALSE; u32 i, brand, min, timescale, count, data_len; const u8 *data; u64 create, modif; Bool has_itags = GF_FALSE; char szDur[50]; DumpMetaItem(file, 1, 0, "# File Meta"); if (!gf_isom_has_movie(file)) { if (gf_isom_has_segment(file, &brand, &min)) { count = gf_isom_segment_get_fragment_count(file); fprintf(stderr, "File is a segment - %d movie fragments - Brand %s (version %d):\n", count, gf_4cc_to_str(brand), min); for (i=0; i<count; i++) { u32 j, traf_count = gf_isom_segment_get_track_fragment_count(file, i+1); for (j=0; j<traf_count; j++) { u32 ID; u64 tfdt; ID = gf_isom_segment_get_track_fragment_decode_time(file, i+1, j+1, &tfdt); fprintf(stderr, "\tFragment #%d Track ID %d - TFDT "LLU"\n", i+1, ID, tfdt); } } } else { fprintf(stderr, "File has no movie (moov) - static data container\n"); } return; } timescale = gf_isom_get_timescale(file); i=gf_isom_get_track_count(file); fprintf(stderr, "# Movie Info - %d track%s - TimeScale %d\n", i, i>1 ? "s" : "", timescale); modif = gf_isom_get_duration(file); create = gf_isom_get_original_duration(file); fprintf(stderr, "Duration %s", format_duration(create, timescale, szDur)); if (create!=modif) { fprintf(stderr, " (recomputed %s)", format_duration(modif, timescale, szDur)); } fprintf(stderr, "\n"); #ifndef GPAC_DISABLE_ISOM_FRAGMENTS if (gf_isom_is_fragmented(file)) { fprintf(stderr, "Fragmented: yes - duration %s\n%d fragments - %d SegmentIndexes\n", format_duration(gf_isom_get_fragmented_duration(file), timescale, szDur), gf_isom_get_fragments_count(file, 0) , gf_isom_get_fragments_count(file, 1) ); } else { fprintf(stderr, "Fragmented: no\n"); } #endif if (gf_isom_moov_first(file)) fprintf(stderr, "Progressive (moov before mdat)\n"); if (gf_isom_get_brand_info(file, &brand, &min, &count) == GF_OK) { fprintf(stderr, "Major Brand %s - version %d - compatible brands:", gf_4cc_to_str(brand), min); for (i=0; i<count;i++) { if (gf_isom_get_alternate_brand(file, i+1, &brand)==GF_OK) fprintf(stderr, " %s", gf_4cc_to_str(brand) ); } fprintf(stderr, "\n"); } gf_isom_get_creation_time(file, &create, &modif); fprintf(stderr, "Created: %s", format_date(create, szDur)); if (create != modif) fprintf(stderr, "Modified: %s", format_date(modif, szDur)); fprintf(stderr, "\n"); DumpMetaItem(file, 0, 0, "# Movie Meta"); iod = (GF_InitialObjectDescriptor *) gf_isom_get_root_od(file); if (iod) { u32 desc_size = gf_odf_desc_size((GF_Descriptor *)iod); if (iod->tag == GF_ODF_IOD_TAG) { fprintf(stderr, "File has root IOD (%d bytes)\n", desc_size); fprintf(stderr, "Scene PL 0x%02x - Graphics PL 0x%02x - OD PL 0x%02x\n", iod->scene_profileAndLevel, iod->graphics_profileAndLevel, iod->OD_profileAndLevel); fprintf(stderr, "Visual PL: %s (0x%02x)\n", gf_m4v_get_profile_name(iod->visual_profileAndLevel), iod->visual_profileAndLevel); fprintf(stderr, "Audio PL: %s (0x%02x)\n", gf_m4a_get_profile_name(iod->audio_profileAndLevel), iod->audio_profileAndLevel); //fprintf(stderr, "inline profiles included %s\n", iod->inlineProfileFlag ? "yes" : "no"); } else { fprintf(stderr, "File has root OD (%d bytes)\n", desc_size); } if (!gf_list_count(iod->ESDescriptors)) fprintf(stderr, "No streams included in root OD\n"); else dump_m4sys = GF_TRUE; gf_odf_desc_del((GF_Descriptor *) iod); } if (gf_isom_is_JPEG2000(file)) fprintf(stderr, "File is JPEG 2000\n"); count = gf_isom_get_copyright_count(file); if (count) { const char *lang, *note; fprintf(stderr, "\nCopyrights:\n"); for (i=0; i<count; i++) { gf_isom_get_copyright(file, i+1, &lang, &note); fprintf(stderr, "\t(%s) %s\n", lang, note); } } count = gf_isom_get_chapter_count(file, 0); if (count) { const char *name; u64 time; fprintf(stderr, "\nChapters:\n"); for (i=0; i<count; i++) { gf_isom_get_chapter(file, 0, i+1, &time, &name); fprintf(stderr, "\tChapter #%d - %s - \"%s\"\n", i+1, format_duration(time, 1000, szDur), name); } } if (gf_isom_apple_get_tag(file, 0, &data, &data_len) == GF_OK) { has_itags = GF_TRUE; fprintf(stderr, "\niTunes Info:\n"); i=0; while (1) { u32 int_val2, flags, itype; GF_ISOiTunesTag tag; u64 int_val; s32 tag_idx; GF_Err e = gf_isom_apple_enum_tag(file, i, &tag, &data, &data_len, &int_val, &int_val2, &flags); if (e) break; i++; tag_idx = gf_itags_find_by_itag(tag); if (tag_idx<0) { fprintf(stderr, "\t%s: %s\n", gf_4cc_to_str(tag), data); continue; } fprintf(stderr, "\t%s: ", gf_itags_get_name(tag_idx) ); itype = gf_itags_get_type(tag_idx); switch (itype) { case GF_ITAG_BOOL: fprintf(stderr, int_val ? "yes" : "no"); break; case GF_ITAG_INT8: case GF_ITAG_INT16: case GF_ITAG_INT32: case GF_ITAG_INT64: fprintf(stderr, LLU, int_val); break; case GF_ITAG_FRAC6: case GF_ITAG_FRAC8: fprintf(stderr, LLU" / %u", int_val, int_val2); break; case GF_ITAG_FILE: if (flags==14) fprintf(stderr, "PNG File"); else if (flags==13) fprintf(stderr, "JPEG File"); else fprintf(stderr, "unknown (flags %d)", flags); break; case GF_ITAG_ID3_GENRE: if (int_val) { fprintf(stderr, "%s", gf_id3_get_genre((u32) int_val) ); break; } //fallthrough default: if (data) fprintf(stderr, "%s", data); else fprintf(stderr, data_len ? "none" : "unknown"); break; } fprintf(stderr, "\n"); } } i=0; while (1) { u32 type, version; char *wmatag; GF_Err e = gf_isom_wma_enum_tag(file, i, &wmatag, &data, &data_len, &version, &type); if (e) break; if (!i) { fprintf(stderr, "\nWMA Info:\n"); } i++; fprintf(stderr, "\t%s", wmatag); if (version!=1) fprintf(stderr, " (version %d)", version); fprintf(stderr, ": "); if (type) { fprintf(stderr, "unknown type %d\n", type); } else { u16 *src_str = (u16 *) data; u32 len = (u32) ( UTF8_MAX_BYTES_PER_CHAR * gf_utf8_wcslen(src_str) ); char *utf8str = (char *)gf_malloc(len + 1); u32 res_len = (u32) gf_utf8_wcstombs(utf8str, len, (const unsigned short **) &src_str); utf8str[res_len] = 0; fprintf(stderr, "%s\n", utf8str); gf_free(utf8str); } } print_udta(file, 0, has_itags); fprintf(stderr, "\n"); for (i=0; i<gf_isom_get_track_count(file); i++) { DumpTrackInfo(file, i+1, 0, GF_TRUE, dump_m4sys); } } #endif /*defined(GPAC_DISABLE_ISOM) || defined(GPAC_DISABLE_ISOM_WRITE)*/ #ifndef GPAC_DISABLE_MPEG2TS typedef struct { /* when writing to file */ FILE *pes_out; char dump[100]; #if 0 FILE *pes_out_nhml; char nhml[100]; FILE *pes_out_info; char info[100]; #endif Bool is_info_dumped; u32 prog_number; /* For logging timing information (PCR, PTS/DTS) */ FILE *timestamps_info_file; char timestamps_info_name[100]; /* when dumping TS information */ u32 dump_pid; Bool has_seen_pat; } GF_M2TS_Dump; static void on_m2ts_dump_event(GF_M2TS_Demuxer *ts, u32 evt_type, void *par) { u32 i, count; GF_M2TS_Program *prog; GF_M2TS_PES_PCK *pck; GF_M2TS_Dump *dumper = (GF_M2TS_Dump *)ts->user; switch (evt_type) { case GF_M2TS_EVT_PAT_FOUND: if (dumper->timestamps_info_file) { fprintf(dumper->timestamps_info_file, "%u\t%d\n", ts->pck_number, 0); } break; case GF_M2TS_EVT_PAT_UPDATE: if (dumper->timestamps_info_file) { fprintf(dumper->timestamps_info_file, "%u\t%d\n", ts->pck_number, 0); } break; case GF_M2TS_EVT_PAT_REPEAT: /* WARNING: We detect the pat on a repetition, probably to ensure that we also have seen all the PMT To be checked */ dumper->has_seen_pat = 1; if (dumper->timestamps_info_file) { fprintf(dumper->timestamps_info_file, "%u\t%d\n", ts->pck_number, 0); } // fprintf(stderr, "Repeated PAT found - %d programs\n", gf_list_count(ts->programs) ); break; case GF_M2TS_EVT_CAT_FOUND: if (dumper->timestamps_info_file) { fprintf(dumper->timestamps_info_file, "%u\t%d\n", ts->pck_number, 0); } break; case GF_M2TS_EVT_CAT_UPDATE: if (dumper->timestamps_info_file) { fprintf(dumper->timestamps_info_file, "%u\t%d\n", ts->pck_number, 0); } break; case GF_M2TS_EVT_CAT_REPEAT: if (dumper->timestamps_info_file) { fprintf(dumper->timestamps_info_file, "%u\t%d\n", ts->pck_number, 0); } break; case GF_M2TS_EVT_PMT_FOUND: prog = (GF_M2TS_Program*)par; if (gf_list_count(ts->programs)>1 && prog->number!=dumper->prog_number) break; count = gf_list_count(prog->streams); GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("Program number %d found - %d streams:\n", prog->number, count)); for (i=0; i<count; i++) { GF_M2TS_ES *es = gf_list_get(prog->streams, i); if (es->pid == prog->pmt_pid) { GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("\tPID %d: Program Map Table\n", es->pid)); } else { GF_M2TS_PES *pes = (GF_M2TS_PES *)es; gf_m2ts_set_pes_framing(pes, dumper->pes_out ? GF_M2TS_PES_FRAMING_RAW : GF_M2TS_PES_FRAMING_DEFAULT); GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("\tPID %d: %s ", pes->pid, gf_m2ts_get_stream_name(pes->stream_type) )); if (pes->mpeg4_es_id) GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, (" - MPEG-4 ES ID %d", pes->mpeg4_es_id)); GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("\n")); } } if (dumper->timestamps_info_file) { fprintf(dumper->timestamps_info_file, "%u\t%d\n", ts->pck_number, prog->pmt_pid); } break; case GF_M2TS_EVT_PMT_UPDATE: prog = (GF_M2TS_Program*)par; if (gf_list_count(ts->programs)>1 && prog->number!=dumper->prog_number) break; if (dumper->timestamps_info_file) { fprintf(dumper->timestamps_info_file, "%u\t%d\n", ts->pck_number, prog->pmt_pid); } break; case GF_M2TS_EVT_PMT_REPEAT: prog = (GF_M2TS_Program*)par; if (gf_list_count(ts->programs)>1 && prog->number!=dumper->prog_number) break; if (dumper->timestamps_info_file) { fprintf(dumper->timestamps_info_file, "%u\t%d\n", ts->pck_number, prog->pmt_pid); } break; case GF_M2TS_EVT_SDT_FOUND: #ifndef GPAC_DISABLE_LOG count = gf_list_count(ts->SDTs) ; GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("Program Description found - %d desc:\n", count)); for (i=0; i<count; i++) { GF_M2TS_SDT *sdt = gf_list_get(ts->SDTs, i); GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("\tServiceID %d - Provider %s - Name %s\n", sdt->service_id, sdt->provider, sdt->service)); } #endif break; case GF_M2TS_EVT_SDT_UPDATE: #ifndef GPAC_DISABLE_LOG count = gf_list_count(ts->SDTs) ; GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("Program Description updated - %d desc\n", count)); for (i=0; i<count; i++) { GF_M2TS_SDT *sdt = gf_list_get(ts->SDTs, i); GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("\tServiceID %d - Provider %s - Name %s\n", sdt->service_id, sdt->provider, sdt->service)); } #endif break; case GF_M2TS_EVT_SDT_REPEAT: break; case GF_M2TS_EVT_PES_TIMING: pck = par; if (gf_list_count(ts->programs)>1 && pck->stream->program->number != dumper->prog_number) break; break; case GF_M2TS_EVT_PES_PCK: pck = par; if (gf_list_count(ts->programs)>1 && pck->stream->program->number != dumper->prog_number) break; if (dumper->has_seen_pat) { /*We need the interpolated PCR for the pcrb, hence moved this calculus out, and saving the calculated value in index_info to put it in the pcrb*/ GF_M2TS_PES *pes = pck->stream; /*FIXME : not used GF_M2TS_Program *prog = pes->program; */ /* Interpolated PCR value for the TS packet containing the PES header start */ u64 interpolated_pcr_value = 0; if (pes->last_pcr_value && pes->before_last_pcr_value_pck_number && pes->last_pcr_value > pes->before_last_pcr_value) { u32 delta_pcr_pck_num = pes->last_pcr_value_pck_number - pes->before_last_pcr_value_pck_number; u32 delta_pts_pcr_pck_num = pes->pes_start_packet_number - pes->last_pcr_value_pck_number; u64 delta_pcr_value = pes->last_pcr_value - pes->before_last_pcr_value; if ((pes->pes_start_packet_number > pes->last_pcr_value_pck_number) && (pes->last_pcr_value > pes->before_last_pcr_value)) { pes->last_pcr_value = pes->before_last_pcr_value; } /* we can compute the interpolated pcr value for the packet containing the PES header */ interpolated_pcr_value = pes->last_pcr_value + (u64)((delta_pcr_value*delta_pts_pcr_pck_num*1.0)/delta_pcr_pck_num); } if (dumper->timestamps_info_file) { Double diff; fprintf(dumper->timestamps_info_file, "%u\t%d\t", pck->stream->pes_start_packet_number, pck->stream->pid); if (interpolated_pcr_value) fprintf(dumper->timestamps_info_file, "%f", interpolated_pcr_value/(300.0 * 90000)); fprintf(dumper->timestamps_info_file, "\t"); if (pck->DTS) fprintf(dumper->timestamps_info_file, "%f", (pck->DTS / 90000.0)); fprintf(dumper->timestamps_info_file, "\t%f\t%d\t%d", pck->PTS / 90000.0, (pck->flags & GF_M2TS_PES_PCK_RAP) ? 1 : 0, (pck->flags & GF_M2TS_PES_PCK_DISCONTINUITY) ? 1 : 0); if (interpolated_pcr_value) { diff = (pck->DTS ? pck->DTS : pck->PTS) / 90000.0; diff -= pes->last_pcr_value / (300.0 * 90000); fprintf(dumper->timestamps_info_file, "\t%f\n", diff); if (diff<0) { M4_LOG(GF_LOG_WARNING, ("Warning: detected PTS/DTS value less than current PCR of %g sec\n", diff)); } } else { fprintf(dumper->timestamps_info_file, "\t\n"); } } } if (dumper->has_seen_pat && dumper->pes_out && (dumper->dump_pid == pck->stream->pid)) { gf_fwrite(pck->data, pck->data_len, dumper->pes_out); } break; case GF_M2TS_EVT_PES_PCR: pck = par; if (gf_list_count(ts->programs)>1 && pck->stream->program->number != dumper->prog_number) break; if (dumper->timestamps_info_file) { fprintf(dumper->timestamps_info_file, "%u\t%d\t%f\t\t\t\t%d\n", pck->stream->program->last_pcr_value_pck_number, pck->stream->pid, pck->PTS / (300*90000.0), (pck->flags & GF_M2TS_PES_PCK_DISCONTINUITY) ? 1 : 0); } break; case GF_M2TS_EVT_SL_PCK: #if 0 { GF_M2TS_SL_PCK *sl_pck = par; if (dumper->pes_out && (dumper->dump_pid == sl_pck->stream->pid)) { GF_SLHeader header; u32 header_len; if (sl_pck->stream->mpeg4_es_id) { GF_ESD *esd = ((GF_M2TS_PES*)sl_pck->stream)->esd; if (!dumper->is_info_dumped) { if (esd->decoderConfig->decoderSpecificInfo) gf_fwrite(esd->decoderConfig->decoderSpecificInfo->data, esd->decoderConfig->decoderSpecificInfo->dataLength, dumper->pes_out_info); dumper->is_info_dumped = 1; fprintf(dumper->pes_out_nhml, "<NHNTStream version=\"1.0\" "); fprintf(dumper->pes_out_nhml, "timeScale=\"%d\" ", esd->slConfig->timestampResolution); fprintf(dumper->pes_out_nhml, "streamType=\"%d\" ", esd->decoderConfig->streamType); fprintf(dumper->pes_out_nhml, "objectTypeIndication=\"%d\" ", esd->decoderConfig->objectTypeIndication); if (esd->decoderConfig->decoderSpecificInfo) fprintf(dumper->pes_out_nhml, "specificInfoFile=\"%s\" ", dumper->info); fprintf(dumper->pes_out_nhml, "baseMediaFile=\"%s\" ", dumper->dump); fprintf(dumper->pes_out_nhml, "inRootOD=\"yes\">\n"); } gf_sl_depacketize(esd->slConfig, &header, sl_pck->data, sl_pck->data_len, &header_len); gf_fwrite(sl_pck->data+header_len, sl_pck->data_len-header_len, dumper->pes_out); fprintf(dumper->pes_out_nhml, "<NHNTSample DTS=\""LLD"\" dataLength=\"%d\" isRAP=\"%s\"/>\n", header.decodingTimeStamp, sl_pck->data_len-header_len, (header.randomAccessPointFlag?"yes":"no")); } } } #endif break; } } void dump_mpeg2_ts(char *mpeg2ts_file, char *out_name, Bool prog_num) { u8 data[188]; GF_M2TS_Dump dumper; u32 size; u64 fsize, fdone; GF_M2TS_Demuxer *ts; FILE *src; if (!prog_num && !out_name) { fprintf(stderr, "No program number nor output filename specified. No timestamp file will be generated."); } src = gf_fopen(mpeg2ts_file, "rb"); if (!src) { M4_LOG(GF_LOG_ERROR, ("Cannot open %s: no such file\n", mpeg2ts_file)); return; } ts = gf_m2ts_demux_new(); ts->on_event = on_m2ts_dump_event; ts->notify_pes_timing = 1; memset(&dumper, 0, sizeof(GF_M2TS_Dump)); ts->user = &dumper; dumper.prog_number = prog_num; /*PES dumping*/ if (out_name) { char *pid = strrchr(out_name, '#'); if (pid) { dumper.dump_pid = atoi(pid+1); pid[0] = 0; sprintf(dumper.dump, "%s_%d.raw", out_name, dumper.dump_pid); dumper.pes_out = gf_fopen(dumper.dump, "wb"); #if 0 sprintf(dumper.nhml, "%s_%d.nhml", pes_out_name, dumper.dump_pid); dumper.pes_out_nhml = gf_fopen(dumper.nhml, "wt"); sprintf(dumper.info, "%s_%d.info", pes_out_name, dumper.dump_pid); dumper.pes_out_info = gf_fopen(dumper.info, "wb"); #endif pid[0] = '#'; } } gf_fseek(src, 0, SEEK_END); fsize = gf_ftell(src); gf_fseek(src, 0, SEEK_SET); /* first loop to process all packets between two PAT, and assume all signaling was found between these 2 PATs */ while (!feof(src)) { size = (u32) gf_fread(data, 188, src); if (size<188) break; gf_m2ts_process_data(ts, data, size); if (dumper.has_seen_pat) break; } dumper.has_seen_pat = GF_TRUE; if (!prog_num) { GF_M2TS_Program *p = gf_list_get(ts->programs, 0); if (p) prog_num = p->number; fprintf(stderr, "No program number specified, defaulting to first program\n"); } if (!prog_num && !out_name) { fprintf(stderr, "No program number nor output filename specified. No timestamp file will be generated\n"); } if (prog_num) { sprintf(dumper.timestamps_info_name, "%s_prog_%d_timestamps.txt", mpeg2ts_file, prog_num/*, mpeg2ts_file*/); dumper.timestamps_info_file = gf_fopen(dumper.timestamps_info_name, "wt"); if (!dumper.timestamps_info_file) { M4_LOG(GF_LOG_ERROR, ("Cannot open file %s\n", dumper.timestamps_info_name)); return; } fprintf(dumper.timestamps_info_file, "PCK#\tPID\tPCR\tDTS\tPTS\tRAP\tDiscontinuity\tDTS-PCR Diff\n"); } gf_m2ts_reset_parsers(ts); gf_fseek(src, 0, SEEK_SET); fdone = 0; while (!feof(src)) { size = (u32) gf_fread(data, 188, src); if (size<188) break; gf_m2ts_process_data(ts, data, size); fdone += size; gf_set_progress("MPEG-2 TS Parsing", fdone, fsize); } gf_fclose(src); gf_m2ts_demux_del(ts); if (dumper.pes_out) gf_fclose(dumper.pes_out); #if 0 if (dumper.pes_out_nhml) { if (dumper.is_info_dumped) fprintf(dumper.pes_out_nhml, "</NHNTStream>\n"); gf_fclose(dumper.pes_out_nhml); gf_fclose(dumper.pes_out_info); } #endif if (dumper.timestamps_info_file) gf_fclose(dumper.timestamps_info_file); } #endif /*GPAC_DISABLE_MPEG2TS*/ #include <gpac/download.h> #include <gpac/mpd.h> void get_file_callback(void *usr_cbk, GF_NETIO_Parameter *parameter) { if (parameter->msg_type==GF_NETIO_DATA_EXCHANGE) { u64 tot_size, done, max; u32 bps; gf_dm_sess_get_stats(parameter->sess, NULL, NULL, &tot_size, &done, &bps, NULL); if (tot_size) { max = done; max *= 100; max /= tot_size; fprintf(stderr, "download %02d %% at %05d kpbs\r", (u32) max, bps*8/1000); } } } static GF_DownloadSession *get_file(const char *url, GF_DownloadManager *dm, GF_Err *e) { GF_DownloadSession *sess; sess = gf_dm_sess_new(dm, url, GF_NETIO_SESSION_NOT_THREADED, get_file_callback, NULL, e); if (!sess) return NULL; *e = gf_dm_sess_process(sess); if (*e) { gf_dm_sess_del(sess); return NULL; } return sess; } static void revert_cache_file(char *item_path) { char szPATH[GF_MAX_PATH]; const char *url; GF_Config *cached; if (!strstr(item_path, "gpac_cache_")) { fprintf(stderr, "%s is not a gpac cache file\n", item_path); return; } if (!strncmp(item_path, "./", 2) || !strncmp(item_path, ".\\", 2)) item_path += 2; strcpy(szPATH, item_path); strcat(szPATH, ".txt"); cached = gf_cfg_new(NULL, szPATH); url = gf_cfg_get_key(cached, "cache", "url"); if (url) url = strstr(url, "://"); if (url) { u32 i, len, dir_len=0, k=0; char *sep; char *dst_name; sep = strstr(item_path, "gpac_cache_"); if (sep) { sep[0] = 0; dir_len = (u32) strlen(item_path); sep[0] = 'g'; } url+=3; len = (u32) strlen(url); dst_name = gf_malloc(len+dir_len+1); memset(dst_name, 0, len+dir_len+1); strncpy(dst_name, item_path, dir_len); k=dir_len; for (i=0; i<len; i++) { dst_name[k] = url[i]; if (dst_name[k]==':') dst_name[k]='_'; else if (dst_name[k]=='/') { if (!gf_dir_exists(dst_name)) gf_mkdir(dst_name); } k++; } if (gf_file_exists(item_path)) { gf_file_move(item_path, dst_name); } gf_free(dst_name); } else { M4_LOG(GF_LOG_ERROR, ("Failed to reverse %s cache file\n", item_path)); } gf_cfg_del(cached); gf_file_delete(szPATH); } GF_Err rip_mpd(const char *mpd_src, const char *output_dir) { GF_DownloadSession *sess; u32 i, connect_time, reply_time, download_time, req_hdr_size, rsp_hdr_size; GF_Err e; GF_DOMParser *mpd_parser=NULL; GF_MPD *mpd=NULL; GF_MPD_Period *period; GF_MPD_AdaptationSet *as; GF_MPD_Representation *rep; char szName[GF_MAX_PATH]; GF_DownloadManager *dm; if (output_dir) { char *sep; strcpy(szName, output_dir); sep = gf_file_basename(szName); if (sep) sep[0] = 0; gf_opts_set_key("temp", "cache", szName); } else { gf_opts_set_key("temp", "cache", "."); } gf_opts_set_key("temp", "clean-cache", "true"); dm = gf_dm_new(NULL); /* char *name = strrchr(mpd_src, '/'); if (!name) name = strrchr(mpd_src, '\\'); if (!name) name = "manifest.mpd"; else name ++; if (strchr(name, '?') || strchr(name, '&')) name = "manifest.mpd"; */ fprintf(stderr, "Downloading %s\n", mpd_src); sess = get_file(mpd_src, dm, &e); if (!sess) { GF_LOG(GF_LOG_ERROR, GF_LOG_APP, ("Error downloading MPD file %s: %s\n", mpd_src, gf_error_to_string(e) )); goto err_exit; } strcpy(szName, gf_dm_sess_get_cache_name(sess) ); gf_dm_sess_get_header_sizes_and_times(sess, &req_hdr_size, &rsp_hdr_size, &connect_time, &reply_time, &download_time); gf_dm_sess_del(sess); if (e) { GF_LOG(GF_LOG_ERROR, GF_LOG_APP, ("Error fetching MPD file %s: %s\n", mpd_src, gf_error_to_string(e))); goto err_exit; } else { GF_LOG(GF_LOG_INFO, GF_LOG_APP, ("Fetched file %s\n", mpd_src)); } GF_LOG(GF_LOG_DEBUG, GF_LOG_APP, ("GET Header size %d - Reply header size %d\n", req_hdr_size, rsp_hdr_size)); GF_LOG(GF_LOG_DEBUG, GF_LOG_APP, ("GET time: Connect Time %d - Reply Time %d - Download Time %d\n", connect_time, reply_time, download_time)); mpd_parser = gf_xml_dom_new(); e = gf_xml_dom_parse(mpd_parser, szName, NULL, NULL); if (e != GF_OK) { gf_xml_dom_del(mpd_parser); GF_LOG(GF_LOG_ERROR, GF_LOG_APP, ("Error parsing MPD %s : %s\n", mpd_src, gf_error_to_string(e))); return e; } mpd = gf_mpd_new(); e = gf_mpd_init_from_dom(gf_xml_dom_get_root(mpd_parser), mpd, mpd_src); gf_xml_dom_del(mpd_parser); mpd_parser=NULL; if (e) { GF_LOG(GF_LOG_ERROR, GF_LOG_APP, ("Error initializing MPD %s : %s\n", mpd_src, gf_error_to_string(e))); goto err_exit; } else { GF_LOG(GF_LOG_DEBUG, GF_LOG_APP, ("MPD %s initialized: %s\n", szName, gf_error_to_string(e))); } revert_cache_file(szName); if (mpd->type==GF_MPD_TYPE_DYNAMIC) { GF_LOG(GF_LOG_ERROR, GF_LOG_APP, ("MPD rip is not supported on live sources\n")); e = GF_NOT_SUPPORTED; goto err_exit; } i=0; while ((period = (GF_MPD_Period *) gf_list_enum(mpd->periods, &i))) { char *initTemplate = NULL; Bool segment_base = GF_FALSE; u32 j=0; if (period->segment_base) segment_base=GF_TRUE; if (period->segment_template && period->segment_template->initialization) { initTemplate = period->segment_template->initialization; } while ((as = gf_list_enum(period->adaptation_sets, &j))) { u32 k=0; if (!initTemplate && as->segment_template && as->segment_template->initialization) { initTemplate = as->segment_template->initialization; } if (as->segment_base) segment_base=GF_TRUE; while ((rep = gf_list_enum(as->representations, &k))) { u64 out_range_start, out_range_end, segment_duration; Bool is_in_base_url; char *seg_url; u32 seg_idx=0; if (rep->segment_template && rep->segment_template->initialization) { initTemplate = rep->segment_template->initialization; } else if (k>1) { initTemplate = NULL; } if (rep->segment_base) segment_base=GF_TRUE; e = gf_mpd_resolve_url(mpd, rep, as, period, mpd_src, 0, GF_MPD_RESOLVE_URL_INIT, 0, 0, &seg_url, &out_range_start, &out_range_end, &segment_duration, &is_in_base_url, NULL, NULL, NULL); if (e) { GF_LOG(GF_LOG_ERROR, GF_LOG_APP, ("Error resolving init segment name : %s\n", gf_error_to_string(e))); continue; } //not a byte range, replace URL if (segment_base) { } else if (out_range_start || out_range_end || !seg_url) { GF_LOG(GF_LOG_ERROR, GF_LOG_APP, ("byte range rip not yet implemented\n")); if (seg_url) gf_free(seg_url); e = GF_NOT_SUPPORTED; goto err_exit; } fprintf(stderr, "Downloading %s\n", seg_url); sess = get_file(seg_url, dm, &e); if (e) { GF_LOG(GF_LOG_ERROR, GF_LOG_APP, ("Error downloading init segment %s from MPD %s : %s\n", seg_url, mpd_src, gf_error_to_string(e))); goto err_exit; } revert_cache_file((char *) gf_dm_sess_get_cache_name(sess) ); gf_free(seg_url); gf_dm_sess_del(sess); if (segment_base) continue; while (1) { e = gf_mpd_resolve_url(mpd, rep, as, period, mpd_src, 0, GF_MPD_RESOLVE_URL_MEDIA, seg_idx, 0, &seg_url, &out_range_start, &out_range_end, &segment_duration, NULL, NULL, NULL, NULL); if (e) { if (e<0) { GF_LOG(GF_LOG_ERROR, GF_LOG_APP, ("Error resolving segment name : %s\n", gf_error_to_string(e))); } break; } seg_idx++; if (out_range_start || out_range_end || !seg_url) { GF_LOG(GF_LOG_ERROR, GF_LOG_APP, ("byte range rip not yet implemented\n")); if (seg_url) gf_free(seg_url); break; } fprintf(stderr, "Downloading %s\n", seg_url); sess = get_file(seg_url, dm, &e); if (e) { gf_free(seg_url); if (e != GF_URL_ERROR) { GF_LOG(GF_LOG_ERROR, GF_LOG_APP, ("Error downloading segment %s: %s\n", seg_url, gf_error_to_string(e))); } else { //todo, properly detect end of dash representation e = GF_OK; } break; } revert_cache_file((char *) gf_dm_sess_get_cache_name(sess) ); gf_free(seg_url); gf_dm_sess_del(sess); } } } } err_exit: if (mpd) gf_mpd_del(mpd); gf_dm_del(dm); return e; }
} void print_udta(GF_ISOFile *file, u32 track_number, Bool has_itags) { u32 i, count; count = gf_isom_get_udta_count(file, track_number); if (!count) return; if (has_itags) { for (i=0; i<count; i++) { u32 type; bin128 uuid; gf_isom_get_udta_type(file, track_number, i+1, &type, &uuid); if (type == GF_ISOM_BOX_TYPE_META) { count--; break; } } if (!count) return; } fprintf(stderr, "%d UDTA types: ", count); for (i=0; i<count; i++) { u32 j, type, nb_items, first=GF_TRUE; bin128 uuid; gf_isom_get_udta_type(file, track_number, i+1, &type, &uuid); nb_items = gf_isom_get_user_data_count(file, track_number, type, uuid); fprintf(stderr, "%s (%d) ", gf_4cc_to_str(type), nb_items); for (j=0; j<nb_items; j++) { u8 *udta=NULL; u32 udta_size; gf_isom_get_user_data(file, track_number, type, uuid, j+1, &udta, &udta_size); if (!udta) continue; if (gf_utf8_is_legal(udta, udta_size)) { if (first) { fprintf(stderr, "\n"); first = GF_FALSE; } fprintf(stderr, "\t%s\n", (char *) udta); } gf_free(udta); } }
} void print_udta(GF_ISOFile *file, u32 track_number, Bool has_itags) { u32 i, count; count = gf_isom_get_udta_count(file, track_number); if (!count) return; if (has_itags) { for (i=0; i<count; i++) { u32 type; bin128 uuid; gf_isom_get_udta_type(file, track_number, i+1, &type, &uuid); if (type == GF_ISOM_BOX_TYPE_META) { count--; break; } } if (!count) return; } fprintf(stderr, "%d UDTA types: ", count); for (i=0; i<count; i++) { u32 j, type, nb_items, first=GF_TRUE; bin128 uuid; gf_isom_get_udta_type(file, track_number, i+1, &type, &uuid); nb_items = gf_isom_get_user_data_count(file, track_number, type, uuid); fprintf(stderr, "%s (%d) ", gf_4cc_to_str(type), nb_items); for (j=0; j<nb_items; j++) { u8 *udta=NULL; u32 udta_size; gf_isom_get_user_data(file, track_number, type, uuid, j+1, &udta, &udta_size); if (!udta) continue; if (udta_size && gf_utf8_is_legal(udta, udta_size)) { u32 idx; if (first) { fprintf(stderr, "\n"); first = GF_FALSE; } fprintf(stderr, "\t"); for (idx=0; idx<udta_size; idx++) { if (!udta[idx]) break; fprintf(stderr, "%c", udta[idx]); } fprintf(stderr, "\n"); } gf_free(udta); } }
{'added': [(1898, '\t\t\tif (udta_size && gf_utf8_is_legal(udta, udta_size)) {'), (1899, '\t\t\t\tu32 idx;'), (1904, '\t\t\t\tfprintf(stderr, "\\t");'), (1905, '\t\t\t\tfor (idx=0; idx<udta_size; idx++) {'), (1906, '\t\t\t\t\tif (!udta[idx]) break;'), (1907, '\t\t\t\t\tfprintf(stderr, "%c", udta[idx]);'), (1908, '\t\t\t\t}'), (1909, '\t\t\t\tfprintf(stderr, "\\n");')], 'deleted': [(1898, '\t\t\tif (gf_utf8_is_legal(udta, udta_size)) {'), (1903, '\t\t\t\tfprintf(stderr, "\\t%s\\n", (char *) udta);')]}
8
2
3,521
27,715
https://github.com/gpac/gpac
CVE-2021-32136
['CWE-787']
print-lmp.c
lmp_print
/* * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that: (1) source code * distributions retain the above copyright notice and this paragraph * in its entirety, and (2) distributions including binary code include * the above copyright notice and this paragraph in its entirety in * the documentation or other materials provided with the distribution. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND * WITHOUT ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, WITHOUT * LIMITATION, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE. * * Original code by Hannes Gredler (hannes@gredler.at) * Support for LMP service discovery extensions (defined by UNI 1.0) added * by Manu Pathak (mapathak@cisco.com), May 2005 */ /* \summary: Link Management Protocol (LMP) printer */ /* specification: RFC 4204 */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <netdissect-stdinc.h> #include "netdissect.h" #include "extract.h" #include "addrtoname.h" #include "gmpls.h" /* * LMP common header * * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Vers | (Reserved) | Flags | Msg Type | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | LMP Length | (Reserved) | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ */ struct lmp_common_header { uint8_t version_res[2]; uint8_t flags; uint8_t msg_type; uint8_t length[2]; uint8_t reserved[2]; }; #define LMP_VERSION 1 #define LMP_EXTRACT_VERSION(x) (((x)&0xf0)>>4) static const struct tok lmp_header_flag_values[] = { { 0x01, "Control Channel Down"}, { 0x02, "LMP restart"}, { 0, NULL} }; static const struct tok lmp_obj_te_link_flag_values[] = { { 0x01, "Fault Management Supported"}, { 0x02, "Link Verification Supported"}, { 0, NULL} }; static const struct tok lmp_obj_data_link_flag_values[] = { { 0x01, "Data Link Port"}, { 0x02, "Allocated for user traffic"}, { 0x04, "Failed link"}, { 0, NULL} }; static const struct tok lmp_obj_channel_status_values[] = { { 1, "Signal Okay"}, { 2, "Signal Degraded"}, { 3, "Signal Fail"}, { 0, NULL} }; static const struct tok lmp_obj_begin_verify_flag_values[] = { { 0x0001, "Verify all links"}, { 0x0002, "Data link type"}, { 0, NULL} }; static const struct tok lmp_obj_begin_verify_error_values[] = { { 0x01, "Link Verification Procedure Not supported"}, { 0x02, "Unwilling to verify"}, { 0x04, "Unsupported verification transport mechanism"}, { 0x08, "Link-Id configuration error"}, { 0x10, "Unknown object c-type"}, { 0, NULL} }; static const struct tok lmp_obj_link_summary_error_values[] = { { 0x01, "Unacceptable non-negotiable LINK-SUMMARY parameters"}, { 0x02, "Renegotiate LINK-SUMMARY parameters"}, { 0x04, "Invalid TE-LINK Object"}, { 0x08, "Invalid DATA-LINK Object"}, { 0x10, "Unknown TE-LINK Object c-type"}, { 0x20, "Unknown DATA-LINK Object c-type"}, { 0, NULL} }; /* Service Config Supported Protocols Flags */ static const struct tok lmp_obj_service_config_sp_flag_values[] = { { 0x01, "RSVP Supported"}, { 0x02, "LDP Supported"}, { 0, NULL} }; /* Service Config Client Port Service Attribute Transparency Flags */ static const struct tok lmp_obj_service_config_cpsa_tp_flag_values[] = { { 0x01, "Path/VC Overhead Transparency Supported"}, { 0x02, "Line/MS Overhead Transparency Supported"}, { 0x04, "Section/RS Overhead Transparency Supported"}, { 0, NULL} }; /* Service Config Client Port Service Attribute Contiguous Concatenation Types Flags */ static const struct tok lmp_obj_service_config_cpsa_cct_flag_values[] = { { 0x01, "Contiguous Concatenation Types Supported"}, { 0, NULL} }; /* Service Config Network Service Attributes Transparency Flags */ static const struct tok lmp_obj_service_config_nsa_transparency_flag_values[] = { { 0x01, "Standard SOH/RSOH Transparency Supported"}, { 0x02, "Standard LOH/MSOH Transparency Supported"}, { 0, NULL} }; /* Service Config Network Service Attributes TCM Monitoring Flags */ static const struct tok lmp_obj_service_config_nsa_tcm_flag_values[] = { { 0x01, "Transparent Tandem Connection Monitoring Supported"}, { 0, NULL} }; /* Network Service Attributes Network Diversity Flags */ static const struct tok lmp_obj_service_config_nsa_network_diversity_flag_values[] = { { 0x01, "Node Diversity Supported"}, { 0x02, "Link Diversity Supported"}, { 0x04, "SRLG Diversity Supported"}, { 0, NULL} }; #define LMP_MSGTYPE_CONFIG 1 #define LMP_MSGTYPE_CONFIG_ACK 2 #define LMP_MSGTYPE_CONFIG_NACK 3 #define LMP_MSGTYPE_HELLO 4 #define LMP_MSGTYPE_VERIFY_BEGIN 5 #define LMP_MSGTYPE_VERIFY_BEGIN_ACK 6 #define LMP_MSGTYPE_VERIFY_BEGIN_NACK 7 #define LMP_MSGTYPE_VERIFY_END 8 #define LMP_MSGTYPE_VERIFY_END_ACK 9 #define LMP_MSGTYPE_TEST 10 #define LMP_MSGTYPE_TEST_STATUS_SUCCESS 11 #define LMP_MSGTYPE_TEST_STATUS_FAILURE 12 #define LMP_MSGTYPE_TEST_STATUS_ACK 13 #define LMP_MSGTYPE_LINK_SUMMARY 14 #define LMP_MSGTYPE_LINK_SUMMARY_ACK 15 #define LMP_MSGTYPE_LINK_SUMMARY_NACK 16 #define LMP_MSGTYPE_CHANNEL_STATUS 17 #define LMP_MSGTYPE_CHANNEL_STATUS_ACK 18 #define LMP_MSGTYPE_CHANNEL_STATUS_REQ 19 #define LMP_MSGTYPE_CHANNEL_STATUS_RESP 20 /* LMP Service Discovery message types defined by UNI 1.0 */ #define LMP_MSGTYPE_SERVICE_CONFIG 50 #define LMP_MSGTYPE_SERVICE_CONFIG_ACK 51 #define LMP_MSGTYPE_SERVICE_CONFIG_NACK 52 static const struct tok lmp_msg_type_values[] = { { LMP_MSGTYPE_CONFIG, "Config"}, { LMP_MSGTYPE_CONFIG_ACK, "Config ACK"}, { LMP_MSGTYPE_CONFIG_NACK, "Config NACK"}, { LMP_MSGTYPE_HELLO, "Hello"}, { LMP_MSGTYPE_VERIFY_BEGIN, "Begin Verify"}, { LMP_MSGTYPE_VERIFY_BEGIN_ACK, "Begin Verify ACK"}, { LMP_MSGTYPE_VERIFY_BEGIN_NACK, "Begin Verify NACK"}, { LMP_MSGTYPE_VERIFY_END, "End Verify"}, { LMP_MSGTYPE_VERIFY_END_ACK, "End Verify ACK"}, { LMP_MSGTYPE_TEST, "Test"}, { LMP_MSGTYPE_TEST_STATUS_SUCCESS, "Test Status Success"}, { LMP_MSGTYPE_TEST_STATUS_FAILURE, "Test Status Failure"}, { LMP_MSGTYPE_TEST_STATUS_ACK, "Test Status ACK"}, { LMP_MSGTYPE_LINK_SUMMARY, "Link Summary"}, { LMP_MSGTYPE_LINK_SUMMARY_ACK, "Link Summary ACK"}, { LMP_MSGTYPE_LINK_SUMMARY_NACK, "Link Summary NACK"}, { LMP_MSGTYPE_CHANNEL_STATUS, "Channel Status"}, { LMP_MSGTYPE_CHANNEL_STATUS_ACK, "Channel Status ACK"}, { LMP_MSGTYPE_CHANNEL_STATUS_REQ, "Channel Status Request"}, { LMP_MSGTYPE_CHANNEL_STATUS_RESP, "Channel Status Response"}, { LMP_MSGTYPE_SERVICE_CONFIG, "Service Config"}, { LMP_MSGTYPE_SERVICE_CONFIG_ACK, "Service Config ACK"}, { LMP_MSGTYPE_SERVICE_CONFIG_NACK, "Service Config NACK"}, { 0, NULL} }; /* * LMP object header * * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * |N| C-Type | Class | Length | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | | * // (object contents) // * | | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ */ struct lmp_object_header { uint8_t ctype; uint8_t class_num; uint8_t length[2]; }; #define LMP_OBJ_CC_ID 1 #define LMP_OBJ_NODE_ID 2 #define LMP_OBJ_LINK_ID 3 #define LMP_OBJ_INTERFACE_ID 4 #define LMP_OBJ_MESSAGE_ID 5 #define LMP_OBJ_CONFIG 6 #define LMP_OBJ_HELLO 7 #define LMP_OBJ_VERIFY_BEGIN 8 #define LMP_OBJ_VERIFY_BEGIN_ACK 9 #define LMP_OBJ_VERIFY_ID 10 #define LMP_OBJ_TE_LINK 11 #define LMP_OBJ_DATA_LINK 12 #define LMP_OBJ_CHANNEL_STATUS 13 #define LMP_OBJ_CHANNEL_STATUS_REQ 14 #define LMP_OBJ_ERROR_CODE 20 #define LMP_OBJ_SERVICE_CONFIG 51 /* defined in UNI 1.0 */ static const struct tok lmp_obj_values[] = { { LMP_OBJ_CC_ID, "Control Channel ID" }, { LMP_OBJ_NODE_ID, "Node ID" }, { LMP_OBJ_LINK_ID, "Link ID" }, { LMP_OBJ_INTERFACE_ID, "Interface ID" }, { LMP_OBJ_MESSAGE_ID, "Message ID" }, { LMP_OBJ_CONFIG, "Configuration" }, { LMP_OBJ_HELLO, "Hello" }, { LMP_OBJ_VERIFY_BEGIN, "Verify Begin" }, { LMP_OBJ_VERIFY_BEGIN_ACK, "Verify Begin ACK" }, { LMP_OBJ_VERIFY_ID, "Verify ID" }, { LMP_OBJ_TE_LINK, "TE Link" }, { LMP_OBJ_DATA_LINK, "Data Link" }, { LMP_OBJ_CHANNEL_STATUS, "Channel Status" }, { LMP_OBJ_CHANNEL_STATUS_REQ, "Channel Status Request" }, { LMP_OBJ_ERROR_CODE, "Error Code" }, { LMP_OBJ_SERVICE_CONFIG, "Service Config" }, { 0, NULL} }; #define INT_SWITCHING_TYPE_SUBOBJ 1 #define WAVELENGTH_SUBOBJ 2 static const struct tok lmp_data_link_subobj[] = { { INT_SWITCHING_TYPE_SUBOBJ, "Interface Switching Type" }, { WAVELENGTH_SUBOBJ , "Wavelength" }, { 0, NULL} }; #define LMP_CTYPE_IPV4 1 #define LMP_CTYPE_IPV6 2 #define LMP_CTYPE_LOC 1 #define LMP_CTYPE_RMT 2 #define LMP_CTYPE_UNMD 3 #define LMP_CTYPE_IPV4_LOC 1 #define LMP_CTYPE_IPV4_RMT 2 #define LMP_CTYPE_IPV6_LOC 3 #define LMP_CTYPE_IPV6_RMT 4 #define LMP_CTYPE_UNMD_LOC 5 #define LMP_CTYPE_UNMD_RMT 6 #define LMP_CTYPE_1 1 #define LMP_CTYPE_2 2 #define LMP_CTYPE_HELLO_CONFIG 1 #define LMP_CTYPE_HELLO 1 #define LMP_CTYPE_BEGIN_VERIFY_ERROR 1 #define LMP_CTYPE_LINK_SUMMARY_ERROR 2 /* C-Types for Service Config Object */ #define LMP_CTYPE_SERVICE_CONFIG_SP 1 #define LMP_CTYPE_SERVICE_CONFIG_CPSA 2 #define LMP_CTYPE_SERVICE_CONFIG_TRANSPARENCY_TCM 3 #define LMP_CTYPE_SERVICE_CONFIG_NETWORK_DIVERSITY 4 /* * Different link types allowed in the Client Port Service Attributes * subobject defined for LMP Service Discovery in the UNI 1.0 spec */ #define LMP_SD_SERVICE_CONFIG_CPSA_LINK_TYPE_SDH 5 /* UNI 1.0 Sec 9.4.2 */ #define LMP_SD_SERVICE_CONFIG_CPSA_LINK_TYPE_SONET 6 /* UNI 1.0 Sec 9.4.2 */ /* * the ctypes are not globally unique so for * translating it to strings we build a table based * on objects offsetted by the ctype */ static const struct tok lmp_ctype_values[] = { { 256*LMP_OBJ_CC_ID+LMP_CTYPE_LOC, "Local" }, { 256*LMP_OBJ_CC_ID+LMP_CTYPE_RMT, "Remote" }, { 256*LMP_OBJ_NODE_ID+LMP_CTYPE_LOC, "Local" }, { 256*LMP_OBJ_NODE_ID+LMP_CTYPE_RMT, "Remote" }, { 256*LMP_OBJ_LINK_ID+LMP_CTYPE_IPV4_LOC, "IPv4 Local" }, { 256*LMP_OBJ_LINK_ID+LMP_CTYPE_IPV4_RMT, "IPv4 Remote" }, { 256*LMP_OBJ_LINK_ID+LMP_CTYPE_IPV6_LOC, "IPv6 Local" }, { 256*LMP_OBJ_LINK_ID+LMP_CTYPE_IPV6_RMT, "IPv6 Remote" }, { 256*LMP_OBJ_LINK_ID+LMP_CTYPE_UNMD_LOC, "Unnumbered Local" }, { 256*LMP_OBJ_LINK_ID+LMP_CTYPE_UNMD_RMT, "Unnumbered Remote" }, { 256*LMP_OBJ_INTERFACE_ID+LMP_CTYPE_IPV4_LOC, "IPv4 Local" }, { 256*LMP_OBJ_INTERFACE_ID+LMP_CTYPE_IPV4_RMT, "IPv4 Remote" }, { 256*LMP_OBJ_INTERFACE_ID+LMP_CTYPE_IPV6_LOC, "IPv6 Local" }, { 256*LMP_OBJ_INTERFACE_ID+LMP_CTYPE_IPV6_RMT, "IPv6 Remote" }, { 256*LMP_OBJ_INTERFACE_ID+LMP_CTYPE_UNMD_LOC, "Unnumbered Local" }, { 256*LMP_OBJ_INTERFACE_ID+LMP_CTYPE_UNMD_RMT, "Unnumbered Remote" }, { 256*LMP_OBJ_MESSAGE_ID+LMP_CTYPE_1, "1" }, { 256*LMP_OBJ_MESSAGE_ID+LMP_CTYPE_2, "2" }, { 256*LMP_OBJ_CONFIG+LMP_CTYPE_1, "1" }, { 256*LMP_OBJ_HELLO+LMP_CTYPE_1, "1" }, { 256*LMP_OBJ_VERIFY_BEGIN+LMP_CTYPE_1, "1" }, { 256*LMP_OBJ_VERIFY_BEGIN_ACK+LMP_CTYPE_1, "1" }, { 256*LMP_OBJ_VERIFY_ID+LMP_CTYPE_1, "1" }, { 256*LMP_OBJ_TE_LINK+LMP_CTYPE_IPV4, "IPv4" }, { 256*LMP_OBJ_TE_LINK+LMP_CTYPE_IPV6, "IPv6" }, { 256*LMP_OBJ_TE_LINK+LMP_CTYPE_UNMD, "Unnumbered" }, { 256*LMP_OBJ_DATA_LINK+LMP_CTYPE_IPV4, "IPv4" }, { 256*LMP_OBJ_DATA_LINK+LMP_CTYPE_IPV6, "IPv6" }, { 256*LMP_OBJ_DATA_LINK+LMP_CTYPE_UNMD, "Unnumbered" }, { 256*LMP_OBJ_CHANNEL_STATUS+LMP_CTYPE_IPV4, "IPv4" }, { 256*LMP_OBJ_CHANNEL_STATUS+LMP_CTYPE_IPV6, "IPv6" }, { 256*LMP_OBJ_CHANNEL_STATUS+LMP_CTYPE_UNMD, "Unnumbered" }, { 256*LMP_OBJ_CHANNEL_STATUS_REQ+LMP_CTYPE_IPV4, "IPv4" }, { 256*LMP_OBJ_CHANNEL_STATUS_REQ+LMP_CTYPE_IPV6, "IPv6" }, { 256*LMP_OBJ_CHANNEL_STATUS_REQ+LMP_CTYPE_UNMD, "Unnumbered" }, { 256*LMP_OBJ_ERROR_CODE+LMP_CTYPE_1, "1" }, { 256*LMP_OBJ_ERROR_CODE+LMP_CTYPE_2, "2" }, { 256*LMP_OBJ_SERVICE_CONFIG+LMP_CTYPE_SERVICE_CONFIG_SP, "1" }, { 256*LMP_OBJ_SERVICE_CONFIG+LMP_CTYPE_SERVICE_CONFIG_CPSA, "2" }, { 256*LMP_OBJ_SERVICE_CONFIG+LMP_CTYPE_SERVICE_CONFIG_TRANSPARENCY_TCM, "3" }, { 256*LMP_OBJ_SERVICE_CONFIG+LMP_CTYPE_SERVICE_CONFIG_NETWORK_DIVERSITY, "4" }, { 0, NULL} }; void lmp_print(netdissect_options *ndo, register const u_char *pptr, register u_int len) { const struct lmp_common_header *lmp_com_header; const struct lmp_object_header *lmp_obj_header; const u_char *tptr,*obj_tptr; int tlen,lmp_obj_len,lmp_obj_ctype,obj_tlen; int hexdump; int offset,subobj_type,subobj_len,total_subobj_len; int link_type; union { /* int to float conversion buffer */ float f; uint32_t i; } bw; tptr=pptr; lmp_com_header = (const struct lmp_common_header *)pptr; ND_TCHECK(*lmp_com_header); /* * Sanity checking of the header. */ if (LMP_EXTRACT_VERSION(lmp_com_header->version_res[0]) != LMP_VERSION) { ND_PRINT((ndo, "LMP version %u packet not supported", LMP_EXTRACT_VERSION(lmp_com_header->version_res[0]))); return; } /* in non-verbose mode just lets print the basic Message Type*/ if (ndo->ndo_vflag < 1) { ND_PRINT((ndo, "LMPv%u %s Message, length: %u", LMP_EXTRACT_VERSION(lmp_com_header->version_res[0]), tok2str(lmp_msg_type_values, "unknown (%u)",lmp_com_header->msg_type), len)); return; } /* ok they seem to want to know everything - lets fully decode it */ tlen=EXTRACT_16BITS(lmp_com_header->length); ND_PRINT((ndo, "\n\tLMPv%u, msg-type: %s, Flags: [%s], length: %u", LMP_EXTRACT_VERSION(lmp_com_header->version_res[0]), tok2str(lmp_msg_type_values, "unknown, type: %u",lmp_com_header->msg_type), bittok2str(lmp_header_flag_values,"none",lmp_com_header->flags), tlen)); tptr+=sizeof(const struct lmp_common_header); tlen-=sizeof(const struct lmp_common_header); while(tlen>0) { /* did we capture enough for fully decoding the object header ? */ ND_TCHECK2(*tptr, sizeof(struct lmp_object_header)); lmp_obj_header = (const struct lmp_object_header *)tptr; lmp_obj_len=EXTRACT_16BITS(lmp_obj_header->length); lmp_obj_ctype=(lmp_obj_header->ctype)&0x7f; if(lmp_obj_len % 4 || lmp_obj_len < 4) return; ND_PRINT((ndo, "\n\t %s Object (%u), Class-Type: %s (%u) Flags: [%snegotiable], length: %u", tok2str(lmp_obj_values, "Unknown", lmp_obj_header->class_num), lmp_obj_header->class_num, tok2str(lmp_ctype_values, "Unknown", ((lmp_obj_header->class_num)<<8)+lmp_obj_ctype), lmp_obj_ctype, (lmp_obj_header->ctype)&0x80 ? "" : "non-", lmp_obj_len)); obj_tptr=tptr+sizeof(struct lmp_object_header); obj_tlen=lmp_obj_len-sizeof(struct lmp_object_header); /* did we capture enough for fully decoding the object ? */ ND_TCHECK2(*tptr, lmp_obj_len); hexdump=FALSE; switch(lmp_obj_header->class_num) { case LMP_OBJ_CC_ID: switch(lmp_obj_ctype) { case LMP_CTYPE_LOC: case LMP_CTYPE_RMT: ND_PRINT((ndo, "\n\t Control Channel ID: %u (0x%08x)", EXTRACT_32BITS(obj_tptr), EXTRACT_32BITS(obj_tptr))); break; default: hexdump=TRUE; } break; case LMP_OBJ_LINK_ID: case LMP_OBJ_INTERFACE_ID: switch(lmp_obj_ctype) { case LMP_CTYPE_IPV4_LOC: case LMP_CTYPE_IPV4_RMT: ND_PRINT((ndo, "\n\t IPv4 Link ID: %s (0x%08x)", ipaddr_string(ndo, obj_tptr), EXTRACT_32BITS(obj_tptr))); break; case LMP_CTYPE_IPV6_LOC: case LMP_CTYPE_IPV6_RMT: ND_PRINT((ndo, "\n\t IPv6 Link ID: %s (0x%08x)", ip6addr_string(ndo, obj_tptr), EXTRACT_32BITS(obj_tptr))); break; case LMP_CTYPE_UNMD_LOC: case LMP_CTYPE_UNMD_RMT: ND_PRINT((ndo, "\n\t Link ID: %u (0x%08x)", EXTRACT_32BITS(obj_tptr), EXTRACT_32BITS(obj_tptr))); break; default: hexdump=TRUE; } break; case LMP_OBJ_MESSAGE_ID: switch(lmp_obj_ctype) { case LMP_CTYPE_1: ND_PRINT((ndo, "\n\t Message ID: %u (0x%08x)", EXTRACT_32BITS(obj_tptr), EXTRACT_32BITS(obj_tptr))); break; case LMP_CTYPE_2: ND_PRINT((ndo, "\n\t Message ID Ack: %u (0x%08x)", EXTRACT_32BITS(obj_tptr), EXTRACT_32BITS(obj_tptr))); break; default: hexdump=TRUE; } break; case LMP_OBJ_NODE_ID: switch(lmp_obj_ctype) { case LMP_CTYPE_LOC: case LMP_CTYPE_RMT: ND_PRINT((ndo, "\n\t Node ID: %s (0x%08x)", ipaddr_string(ndo, obj_tptr), EXTRACT_32BITS(obj_tptr))); break; default: hexdump=TRUE; } break; case LMP_OBJ_CONFIG: switch(lmp_obj_ctype) { case LMP_CTYPE_HELLO_CONFIG: ND_PRINT((ndo, "\n\t Hello Interval: %u\n\t Hello Dead Interval: %u", EXTRACT_16BITS(obj_tptr), EXTRACT_16BITS(obj_tptr+2))); break; default: hexdump=TRUE; } break; case LMP_OBJ_HELLO: switch(lmp_obj_ctype) { case LMP_CTYPE_HELLO: ND_PRINT((ndo, "\n\t Tx Seq: %u, Rx Seq: %u", EXTRACT_32BITS(obj_tptr), EXTRACT_32BITS(obj_tptr+4))); break; default: hexdump=TRUE; } break; case LMP_OBJ_TE_LINK: ND_PRINT((ndo, "\n\t Flags: [%s]", bittok2str(lmp_obj_te_link_flag_values, "none", EXTRACT_16BITS(obj_tptr)>>8))); switch(lmp_obj_ctype) { case LMP_CTYPE_IPV4: ND_PRINT((ndo, "\n\t Local Link-ID: %s (0x%08x)" "\n\t Remote Link-ID: %s (0x%08x)", ipaddr_string(ndo, obj_tptr+4), EXTRACT_32BITS(obj_tptr+4), ipaddr_string(ndo, obj_tptr+8), EXTRACT_32BITS(obj_tptr+8))); break; case LMP_CTYPE_IPV6: case LMP_CTYPE_UNMD: default: hexdump=TRUE; } break; case LMP_OBJ_DATA_LINK: ND_PRINT((ndo, "\n\t Flags: [%s]", bittok2str(lmp_obj_data_link_flag_values, "none", EXTRACT_16BITS(obj_tptr)>>8))); switch(lmp_obj_ctype) { case LMP_CTYPE_IPV4: case LMP_CTYPE_UNMD: ND_PRINT((ndo, "\n\t Local Interface ID: %s (0x%08x)" "\n\t Remote Interface ID: %s (0x%08x)", ipaddr_string(ndo, obj_tptr+4), EXTRACT_32BITS(obj_tptr+4), ipaddr_string(ndo, obj_tptr+8), EXTRACT_32BITS(obj_tptr+8))); total_subobj_len = lmp_obj_len - 16; offset = 12; while (total_subobj_len > 0 && hexdump == FALSE ) { subobj_type = EXTRACT_16BITS(obj_tptr+offset)>>8; subobj_len = EXTRACT_16BITS(obj_tptr+offset)&0x00FF; ND_PRINT((ndo, "\n\t Subobject, Type: %s (%u), Length: %u", tok2str(lmp_data_link_subobj, "Unknown", subobj_type), subobj_type, subobj_len)); switch(subobj_type) { case INT_SWITCHING_TYPE_SUBOBJ: ND_PRINT((ndo, "\n\t Switching Type: %s (%u)", tok2str(gmpls_switch_cap_values, "Unknown", EXTRACT_16BITS(obj_tptr+offset+2)>>8), EXTRACT_16BITS(obj_tptr+offset+2)>>8)); ND_PRINT((ndo, "\n\t Encoding Type: %s (%u)", tok2str(gmpls_encoding_values, "Unknown", EXTRACT_16BITS(obj_tptr+offset+2)&0x00FF), EXTRACT_16BITS(obj_tptr+offset+2)&0x00FF)); bw.i = EXTRACT_32BITS(obj_tptr+offset+4); ND_PRINT((ndo, "\n\t Min Reservable Bandwidth: %.3f Mbps", bw.f*8/1000000)); bw.i = EXTRACT_32BITS(obj_tptr+offset+8); ND_PRINT((ndo, "\n\t Max Reservable Bandwidth: %.3f Mbps", bw.f*8/1000000)); break; case WAVELENGTH_SUBOBJ: ND_PRINT((ndo, "\n\t Wavelength: %u", EXTRACT_32BITS(obj_tptr+offset+4))); break; default: /* Any Unknown Subobject ==> Exit loop */ hexdump=TRUE; break; } total_subobj_len-=subobj_len; offset+=subobj_len; } break; case LMP_CTYPE_IPV6: default: hexdump=TRUE; } break; case LMP_OBJ_VERIFY_BEGIN: switch(lmp_obj_ctype) { case LMP_CTYPE_1: ND_PRINT((ndo, "\n\t Flags: %s", bittok2str(lmp_obj_begin_verify_flag_values, "none", EXTRACT_16BITS(obj_tptr)))); ND_PRINT((ndo, "\n\t Verify Interval: %u", EXTRACT_16BITS(obj_tptr+2))); ND_PRINT((ndo, "\n\t Data links: %u", EXTRACT_32BITS(obj_tptr+4))); ND_PRINT((ndo, "\n\t Encoding type: %s", tok2str(gmpls_encoding_values, "Unknown", *(obj_tptr+8)))); ND_PRINT((ndo, "\n\t Verify Transport Mechanism: %u (0x%x)%s", EXTRACT_16BITS(obj_tptr+10), EXTRACT_16BITS(obj_tptr+10), EXTRACT_16BITS(obj_tptr+10)&8000 ? " (Payload test messages capable)" : "")); bw.i = EXTRACT_32BITS(obj_tptr+12); ND_PRINT((ndo, "\n\t Transmission Rate: %.3f Mbps",bw.f*8/1000000)); ND_PRINT((ndo, "\n\t Wavelength: %u", EXTRACT_32BITS(obj_tptr+16))); break; default: hexdump=TRUE; } break; case LMP_OBJ_VERIFY_BEGIN_ACK: switch(lmp_obj_ctype) { case LMP_CTYPE_1: ND_PRINT((ndo, "\n\t Verify Dead Interval: %u" "\n\t Verify Transport Response: %u", EXTRACT_16BITS(obj_tptr), EXTRACT_16BITS(obj_tptr+2))); break; default: hexdump=TRUE; } break; case LMP_OBJ_VERIFY_ID: switch(lmp_obj_ctype) { case LMP_CTYPE_1: ND_PRINT((ndo, "\n\t Verify ID: %u", EXTRACT_32BITS(obj_tptr))); break; default: hexdump=TRUE; } break; case LMP_OBJ_CHANNEL_STATUS: switch(lmp_obj_ctype) { case LMP_CTYPE_IPV4: case LMP_CTYPE_UNMD: offset = 0; /* Decode pairs: <Interface_ID (4 bytes), Channel_status (4 bytes)> */ while (offset < (lmp_obj_len-(int)sizeof(struct lmp_object_header)) ) { ND_PRINT((ndo, "\n\t Interface ID: %s (0x%08x)", ipaddr_string(ndo, obj_tptr+offset), EXTRACT_32BITS(obj_tptr+offset))); ND_PRINT((ndo, "\n\t\t Active: %s (%u)", (EXTRACT_32BITS(obj_tptr+offset+4)>>31) ? "Allocated" : "Non-allocated", (EXTRACT_32BITS(obj_tptr+offset+4)>>31))); ND_PRINT((ndo, "\n\t\t Direction: %s (%u)", (EXTRACT_32BITS(obj_tptr+offset+4)>>30)&0x1 ? "Transmit" : "Receive", (EXTRACT_32BITS(obj_tptr+offset+4)>>30)&0x1)); ND_PRINT((ndo, "\n\t\t Channel Status: %s (%u)", tok2str(lmp_obj_channel_status_values, "Unknown", EXTRACT_32BITS(obj_tptr+offset+4)&0x3FFFFFF), EXTRACT_32BITS(obj_tptr+offset+4)&0x3FFFFFF)); offset+=8; } break; case LMP_CTYPE_IPV6: default: hexdump=TRUE; } break; case LMP_OBJ_CHANNEL_STATUS_REQ: switch(lmp_obj_ctype) { case LMP_CTYPE_IPV4: case LMP_CTYPE_UNMD: offset = 0; while (offset < (lmp_obj_len-(int)sizeof(struct lmp_object_header)) ) { ND_PRINT((ndo, "\n\t Interface ID: %s (0x%08x)", ipaddr_string(ndo, obj_tptr+offset), EXTRACT_32BITS(obj_tptr+offset))); offset+=4; } break; case LMP_CTYPE_IPV6: default: hexdump=TRUE; } break; case LMP_OBJ_ERROR_CODE: switch(lmp_obj_ctype) { case LMP_CTYPE_BEGIN_VERIFY_ERROR: ND_PRINT((ndo, "\n\t Error Code: %s", bittok2str(lmp_obj_begin_verify_error_values, "none", EXTRACT_32BITS(obj_tptr)))); break; case LMP_CTYPE_LINK_SUMMARY_ERROR: ND_PRINT((ndo, "\n\t Error Code: %s", bittok2str(lmp_obj_link_summary_error_values, "none", EXTRACT_32BITS(obj_tptr)))); break; default: hexdump=TRUE; } break; case LMP_OBJ_SERVICE_CONFIG: switch (lmp_obj_ctype) { case LMP_CTYPE_SERVICE_CONFIG_SP: ND_PRINT((ndo, "\n\t Flags: %s", bittok2str(lmp_obj_service_config_sp_flag_values, "none", EXTRACT_16BITS(obj_tptr)>>8))); ND_PRINT((ndo, "\n\t UNI Version: %u", EXTRACT_16BITS(obj_tptr) & 0x00FF)); break; case LMP_CTYPE_SERVICE_CONFIG_CPSA: link_type = EXTRACT_16BITS(obj_tptr)>>8; ND_PRINT((ndo, "\n\t Link Type: %s (%u)", tok2str(lmp_sd_service_config_cpsa_link_type_values, "Unknown", link_type), link_type)); if (link_type == LMP_SD_SERVICE_CONFIG_CPSA_LINK_TYPE_SDH) { ND_PRINT((ndo, "\n\t Signal Type: %s (%u)", tok2str(lmp_sd_service_config_cpsa_signal_type_sdh_values, "Unknown", EXTRACT_16BITS(obj_tptr) & 0x00FF), EXTRACT_16BITS(obj_tptr) & 0x00FF)); } if (link_type == LMP_SD_SERVICE_CONFIG_CPSA_LINK_TYPE_SONET) { ND_PRINT((ndo, "\n\t Signal Type: %s (%u)", tok2str(lmp_sd_service_config_cpsa_signal_type_sonet_values, "Unknown", EXTRACT_16BITS(obj_tptr) & 0x00FF), EXTRACT_16BITS(obj_tptr) & 0x00FF)); } ND_PRINT((ndo, "\n\t Transparency: %s", bittok2str(lmp_obj_service_config_cpsa_tp_flag_values, "none", EXTRACT_16BITS(obj_tptr+2)>>8))); ND_PRINT((ndo, "\n\t Contiguous Concatenation Types: %s", bittok2str(lmp_obj_service_config_cpsa_cct_flag_values, "none", EXTRACT_16BITS(obj_tptr+2)>>8 & 0x00FF))); ND_PRINT((ndo, "\n\t Minimum NCC: %u", EXTRACT_16BITS(obj_tptr+4))); ND_PRINT((ndo, "\n\t Maximum NCC: %u", EXTRACT_16BITS(obj_tptr+6))); ND_PRINT((ndo, "\n\t Minimum NVC:%u", EXTRACT_16BITS(obj_tptr+8))); ND_PRINT((ndo, "\n\t Maximum NVC:%u", EXTRACT_16BITS(obj_tptr+10))); ND_PRINT((ndo, "\n\t Local Interface ID: %s (0x%08x)", ipaddr_string(ndo, obj_tptr+12), EXTRACT_32BITS(obj_tptr+12))); break; case LMP_CTYPE_SERVICE_CONFIG_TRANSPARENCY_TCM: ND_PRINT((ndo, "\n\t Transparency Flags: %s", bittok2str( lmp_obj_service_config_nsa_transparency_flag_values, "none", EXTRACT_32BITS(obj_tptr)))); ND_PRINT((ndo, "\n\t TCM Monitoring Flags: %s", bittok2str( lmp_obj_service_config_nsa_tcm_flag_values, "none", EXTRACT_16BITS(obj_tptr+6) & 0x00FF))); break; case LMP_CTYPE_SERVICE_CONFIG_NETWORK_DIVERSITY: ND_PRINT((ndo, "\n\t Diversity: Flags: %s", bittok2str( lmp_obj_service_config_nsa_network_diversity_flag_values, "none", EXTRACT_16BITS(obj_tptr+2) & 0x00FF))); break; default: hexdump = TRUE; } break; default: if (ndo->ndo_vflag <= 1) print_unknown_data(ndo,obj_tptr,"\n\t ",obj_tlen); break; } /* do we want to see an additionally hexdump ? */ if (ndo->ndo_vflag > 1 || hexdump==TRUE) print_unknown_data(ndo,tptr+sizeof(struct lmp_object_header),"\n\t ", lmp_obj_len-sizeof(struct lmp_object_header)); tptr+=lmp_obj_len; tlen-=lmp_obj_len; } return; trunc: ND_PRINT((ndo, "\n\t\t packet exceeded snapshot")); } /* * Local Variables: * c-style: whitesmith * c-basic-offset: 8 * End: */
/* * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that: (1) source code * distributions retain the above copyright notice and this paragraph * in its entirety, and (2) distributions including binary code include * the above copyright notice and this paragraph in its entirety in * the documentation or other materials provided with the distribution. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND * WITHOUT ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, WITHOUT * LIMITATION, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE. * * Original code by Hannes Gredler (hannes@gredler.at) * Support for LMP service discovery extensions (defined by OIF UNI 1.0) * added by Manu Pathak (mapathak@cisco.com), May 2005 */ /* \summary: Link Management Protocol (LMP) printer */ /* specification: RFC 4204 */ /* OIF UNI 1.0: http://www.oiforum.com/public/documents/OIF-UNI-01.0.pdf */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <netdissect-stdinc.h> #include "netdissect.h" #include "extract.h" #include "addrtoname.h" #include "gmpls.h" /* * LMP common header * * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Vers | (Reserved) | Flags | Msg Type | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | LMP Length | (Reserved) | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ */ struct lmp_common_header { uint8_t version_res[2]; uint8_t flags; uint8_t msg_type; uint8_t length[2]; uint8_t reserved[2]; }; #define LMP_VERSION 1 #define LMP_EXTRACT_VERSION(x) (((x)&0xf0)>>4) static const struct tok lmp_header_flag_values[] = { { 0x01, "Control Channel Down"}, { 0x02, "LMP restart"}, { 0, NULL} }; static const struct tok lmp_obj_te_link_flag_values[] = { { 0x01, "Fault Management Supported"}, { 0x02, "Link Verification Supported"}, { 0, NULL} }; static const struct tok lmp_obj_data_link_flag_values[] = { { 0x01, "Data Link Port"}, { 0x02, "Allocated for user traffic"}, { 0x04, "Failed link"}, { 0, NULL} }; static const struct tok lmp_obj_channel_status_values[] = { { 1, "Signal Okay"}, { 2, "Signal Degraded"}, { 3, "Signal Fail"}, { 0, NULL} }; static const struct tok lmp_obj_begin_verify_flag_values[] = { { 0x0001, "Verify all links"}, { 0x0002, "Data link type"}, { 0, NULL} }; static const struct tok lmp_obj_begin_verify_error_values[] = { { 0x01, "Link Verification Procedure Not supported"}, { 0x02, "Unwilling to verify"}, { 0x04, "Unsupported verification transport mechanism"}, { 0x08, "Link-Id configuration error"}, { 0x10, "Unknown object c-type"}, { 0, NULL} }; static const struct tok lmp_obj_link_summary_error_values[] = { { 0x01, "Unacceptable non-negotiable LINK-SUMMARY parameters"}, { 0x02, "Renegotiate LINK-SUMMARY parameters"}, { 0x04, "Invalid TE-LINK Object"}, { 0x08, "Invalid DATA-LINK Object"}, { 0x10, "Unknown TE-LINK Object c-type"}, { 0x20, "Unknown DATA-LINK Object c-type"}, { 0, NULL} }; /* Service Config Supported Protocols Flags */ static const struct tok lmp_obj_service_config_sp_flag_values[] = { { 0x01, "RSVP Supported"}, { 0x02, "LDP Supported"}, { 0, NULL} }; /* Service Config Client Port Service Attribute Transparency Flags */ static const struct tok lmp_obj_service_config_cpsa_tp_flag_values[] = { { 0x01, "Path/VC Overhead Transparency Supported"}, { 0x02, "Line/MS Overhead Transparency Supported"}, { 0x04, "Section/RS Overhead Transparency Supported"}, { 0, NULL} }; /* Service Config Client Port Service Attribute Contiguous Concatenation Types Flags */ static const struct tok lmp_obj_service_config_cpsa_cct_flag_values[] = { { 0x01, "Contiguous Concatenation Types Supported"}, { 0, NULL} }; /* Service Config Network Service Attributes Transparency Flags */ static const struct tok lmp_obj_service_config_nsa_transparency_flag_values[] = { { 0x01, "Standard SOH/RSOH Transparency Supported"}, { 0x02, "Standard LOH/MSOH Transparency Supported"}, { 0, NULL} }; /* Service Config Network Service Attributes TCM Monitoring Flags */ static const struct tok lmp_obj_service_config_nsa_tcm_flag_values[] = { { 0x01, "Transparent Tandem Connection Monitoring Supported"}, { 0, NULL} }; /* Network Service Attributes Network Diversity Flags */ static const struct tok lmp_obj_service_config_nsa_network_diversity_flag_values[] = { { 0x01, "Node Diversity Supported"}, { 0x02, "Link Diversity Supported"}, { 0x04, "SRLG Diversity Supported"}, { 0, NULL} }; #define LMP_MSGTYPE_CONFIG 1 #define LMP_MSGTYPE_CONFIG_ACK 2 #define LMP_MSGTYPE_CONFIG_NACK 3 #define LMP_MSGTYPE_HELLO 4 #define LMP_MSGTYPE_VERIFY_BEGIN 5 #define LMP_MSGTYPE_VERIFY_BEGIN_ACK 6 #define LMP_MSGTYPE_VERIFY_BEGIN_NACK 7 #define LMP_MSGTYPE_VERIFY_END 8 #define LMP_MSGTYPE_VERIFY_END_ACK 9 #define LMP_MSGTYPE_TEST 10 #define LMP_MSGTYPE_TEST_STATUS_SUCCESS 11 #define LMP_MSGTYPE_TEST_STATUS_FAILURE 12 #define LMP_MSGTYPE_TEST_STATUS_ACK 13 #define LMP_MSGTYPE_LINK_SUMMARY 14 #define LMP_MSGTYPE_LINK_SUMMARY_ACK 15 #define LMP_MSGTYPE_LINK_SUMMARY_NACK 16 #define LMP_MSGTYPE_CHANNEL_STATUS 17 #define LMP_MSGTYPE_CHANNEL_STATUS_ACK 18 #define LMP_MSGTYPE_CHANNEL_STATUS_REQ 19 #define LMP_MSGTYPE_CHANNEL_STATUS_RESP 20 /* LMP Service Discovery message types defined by UNI 1.0 */ #define LMP_MSGTYPE_SERVICE_CONFIG 50 #define LMP_MSGTYPE_SERVICE_CONFIG_ACK 51 #define LMP_MSGTYPE_SERVICE_CONFIG_NACK 52 static const struct tok lmp_msg_type_values[] = { { LMP_MSGTYPE_CONFIG, "Config"}, { LMP_MSGTYPE_CONFIG_ACK, "Config ACK"}, { LMP_MSGTYPE_CONFIG_NACK, "Config NACK"}, { LMP_MSGTYPE_HELLO, "Hello"}, { LMP_MSGTYPE_VERIFY_BEGIN, "Begin Verify"}, { LMP_MSGTYPE_VERIFY_BEGIN_ACK, "Begin Verify ACK"}, { LMP_MSGTYPE_VERIFY_BEGIN_NACK, "Begin Verify NACK"}, { LMP_MSGTYPE_VERIFY_END, "End Verify"}, { LMP_MSGTYPE_VERIFY_END_ACK, "End Verify ACK"}, { LMP_MSGTYPE_TEST, "Test"}, { LMP_MSGTYPE_TEST_STATUS_SUCCESS, "Test Status Success"}, { LMP_MSGTYPE_TEST_STATUS_FAILURE, "Test Status Failure"}, { LMP_MSGTYPE_TEST_STATUS_ACK, "Test Status ACK"}, { LMP_MSGTYPE_LINK_SUMMARY, "Link Summary"}, { LMP_MSGTYPE_LINK_SUMMARY_ACK, "Link Summary ACK"}, { LMP_MSGTYPE_LINK_SUMMARY_NACK, "Link Summary NACK"}, { LMP_MSGTYPE_CHANNEL_STATUS, "Channel Status"}, { LMP_MSGTYPE_CHANNEL_STATUS_ACK, "Channel Status ACK"}, { LMP_MSGTYPE_CHANNEL_STATUS_REQ, "Channel Status Request"}, { LMP_MSGTYPE_CHANNEL_STATUS_RESP, "Channel Status Response"}, { LMP_MSGTYPE_SERVICE_CONFIG, "Service Config"}, { LMP_MSGTYPE_SERVICE_CONFIG_ACK, "Service Config ACK"}, { LMP_MSGTYPE_SERVICE_CONFIG_NACK, "Service Config NACK"}, { 0, NULL} }; /* * LMP object header * * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * |N| C-Type | Class | Length | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | | * // (object contents) // * | | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ */ struct lmp_object_header { uint8_t ctype; uint8_t class_num; uint8_t length[2]; }; #define LMP_OBJ_CC_ID 1 #define LMP_OBJ_NODE_ID 2 #define LMP_OBJ_LINK_ID 3 #define LMP_OBJ_INTERFACE_ID 4 #define LMP_OBJ_MESSAGE_ID 5 #define LMP_OBJ_CONFIG 6 #define LMP_OBJ_HELLO 7 #define LMP_OBJ_VERIFY_BEGIN 8 #define LMP_OBJ_VERIFY_BEGIN_ACK 9 #define LMP_OBJ_VERIFY_ID 10 #define LMP_OBJ_TE_LINK 11 #define LMP_OBJ_DATA_LINK 12 #define LMP_OBJ_CHANNEL_STATUS 13 #define LMP_OBJ_CHANNEL_STATUS_REQ 14 #define LMP_OBJ_ERROR_CODE 20 #define LMP_OBJ_SERVICE_CONFIG 51 /* defined in UNI 1.0 */ static const struct tok lmp_obj_values[] = { { LMP_OBJ_CC_ID, "Control Channel ID" }, { LMP_OBJ_NODE_ID, "Node ID" }, { LMP_OBJ_LINK_ID, "Link ID" }, { LMP_OBJ_INTERFACE_ID, "Interface ID" }, { LMP_OBJ_MESSAGE_ID, "Message ID" }, { LMP_OBJ_CONFIG, "Configuration" }, { LMP_OBJ_HELLO, "Hello" }, { LMP_OBJ_VERIFY_BEGIN, "Verify Begin" }, { LMP_OBJ_VERIFY_BEGIN_ACK, "Verify Begin ACK" }, { LMP_OBJ_VERIFY_ID, "Verify ID" }, { LMP_OBJ_TE_LINK, "TE Link" }, { LMP_OBJ_DATA_LINK, "Data Link" }, { LMP_OBJ_CHANNEL_STATUS, "Channel Status" }, { LMP_OBJ_CHANNEL_STATUS_REQ, "Channel Status Request" }, { LMP_OBJ_ERROR_CODE, "Error Code" }, { LMP_OBJ_SERVICE_CONFIG, "Service Config" }, { 0, NULL} }; #define INT_SWITCHING_TYPE_SUBOBJ 1 #define WAVELENGTH_SUBOBJ 2 static const struct tok lmp_data_link_subobj[] = { { INT_SWITCHING_TYPE_SUBOBJ, "Interface Switching Type" }, { WAVELENGTH_SUBOBJ , "Wavelength" }, { 0, NULL} }; #define LMP_CTYPE_IPV4 1 #define LMP_CTYPE_IPV6 2 #define LMP_CTYPE_LOC 1 #define LMP_CTYPE_RMT 2 #define LMP_CTYPE_UNMD 3 #define LMP_CTYPE_IPV4_LOC 1 #define LMP_CTYPE_IPV4_RMT 2 #define LMP_CTYPE_IPV6_LOC 3 #define LMP_CTYPE_IPV6_RMT 4 #define LMP_CTYPE_UNMD_LOC 5 #define LMP_CTYPE_UNMD_RMT 6 #define LMP_CTYPE_1 1 #define LMP_CTYPE_2 2 #define LMP_CTYPE_HELLO_CONFIG 1 #define LMP_CTYPE_HELLO 1 #define LMP_CTYPE_BEGIN_VERIFY_ERROR 1 #define LMP_CTYPE_LINK_SUMMARY_ERROR 2 /* C-Types for Service Config Object */ #define LMP_CTYPE_SERVICE_CONFIG_SP 1 #define LMP_CTYPE_SERVICE_CONFIG_CPSA 2 #define LMP_CTYPE_SERVICE_CONFIG_TRANSPARENCY_TCM 3 #define LMP_CTYPE_SERVICE_CONFIG_NETWORK_DIVERSITY 4 /* * Different link types allowed in the Client Port Service Attributes * subobject defined for LMP Service Discovery in the UNI 1.0 spec */ #define LMP_SD_SERVICE_CONFIG_CPSA_LINK_TYPE_SDH 5 /* UNI 1.0 Sec 9.4.2 */ #define LMP_SD_SERVICE_CONFIG_CPSA_LINK_TYPE_SONET 6 /* UNI 1.0 Sec 9.4.2 */ /* * the ctypes are not globally unique so for * translating it to strings we build a table based * on objects offsetted by the ctype */ static const struct tok lmp_ctype_values[] = { { 256*LMP_OBJ_CC_ID+LMP_CTYPE_LOC, "Local" }, { 256*LMP_OBJ_CC_ID+LMP_CTYPE_RMT, "Remote" }, { 256*LMP_OBJ_NODE_ID+LMP_CTYPE_LOC, "Local" }, { 256*LMP_OBJ_NODE_ID+LMP_CTYPE_RMT, "Remote" }, { 256*LMP_OBJ_LINK_ID+LMP_CTYPE_IPV4_LOC, "IPv4 Local" }, { 256*LMP_OBJ_LINK_ID+LMP_CTYPE_IPV4_RMT, "IPv4 Remote" }, { 256*LMP_OBJ_LINK_ID+LMP_CTYPE_IPV6_LOC, "IPv6 Local" }, { 256*LMP_OBJ_LINK_ID+LMP_CTYPE_IPV6_RMT, "IPv6 Remote" }, { 256*LMP_OBJ_LINK_ID+LMP_CTYPE_UNMD_LOC, "Unnumbered Local" }, { 256*LMP_OBJ_LINK_ID+LMP_CTYPE_UNMD_RMT, "Unnumbered Remote" }, { 256*LMP_OBJ_INTERFACE_ID+LMP_CTYPE_IPV4_LOC, "IPv4 Local" }, { 256*LMP_OBJ_INTERFACE_ID+LMP_CTYPE_IPV4_RMT, "IPv4 Remote" }, { 256*LMP_OBJ_INTERFACE_ID+LMP_CTYPE_IPV6_LOC, "IPv6 Local" }, { 256*LMP_OBJ_INTERFACE_ID+LMP_CTYPE_IPV6_RMT, "IPv6 Remote" }, { 256*LMP_OBJ_INTERFACE_ID+LMP_CTYPE_UNMD_LOC, "Unnumbered Local" }, { 256*LMP_OBJ_INTERFACE_ID+LMP_CTYPE_UNMD_RMT, "Unnumbered Remote" }, { 256*LMP_OBJ_MESSAGE_ID+LMP_CTYPE_1, "1" }, { 256*LMP_OBJ_MESSAGE_ID+LMP_CTYPE_2, "2" }, { 256*LMP_OBJ_CONFIG+LMP_CTYPE_1, "1" }, { 256*LMP_OBJ_HELLO+LMP_CTYPE_1, "1" }, { 256*LMP_OBJ_VERIFY_BEGIN+LMP_CTYPE_1, "1" }, { 256*LMP_OBJ_VERIFY_BEGIN_ACK+LMP_CTYPE_1, "1" }, { 256*LMP_OBJ_VERIFY_ID+LMP_CTYPE_1, "1" }, { 256*LMP_OBJ_TE_LINK+LMP_CTYPE_IPV4, "IPv4" }, { 256*LMP_OBJ_TE_LINK+LMP_CTYPE_IPV6, "IPv6" }, { 256*LMP_OBJ_TE_LINK+LMP_CTYPE_UNMD, "Unnumbered" }, { 256*LMP_OBJ_DATA_LINK+LMP_CTYPE_IPV4, "IPv4" }, { 256*LMP_OBJ_DATA_LINK+LMP_CTYPE_IPV6, "IPv6" }, { 256*LMP_OBJ_DATA_LINK+LMP_CTYPE_UNMD, "Unnumbered" }, { 256*LMP_OBJ_CHANNEL_STATUS+LMP_CTYPE_IPV4, "IPv4" }, { 256*LMP_OBJ_CHANNEL_STATUS+LMP_CTYPE_IPV6, "IPv6" }, { 256*LMP_OBJ_CHANNEL_STATUS+LMP_CTYPE_UNMD, "Unnumbered" }, { 256*LMP_OBJ_CHANNEL_STATUS_REQ+LMP_CTYPE_IPV4, "IPv4" }, { 256*LMP_OBJ_CHANNEL_STATUS_REQ+LMP_CTYPE_IPV6, "IPv6" }, { 256*LMP_OBJ_CHANNEL_STATUS_REQ+LMP_CTYPE_UNMD, "Unnumbered" }, { 256*LMP_OBJ_ERROR_CODE+LMP_CTYPE_1, "1" }, { 256*LMP_OBJ_ERROR_CODE+LMP_CTYPE_2, "2" }, { 256*LMP_OBJ_SERVICE_CONFIG+LMP_CTYPE_SERVICE_CONFIG_SP, "1" }, { 256*LMP_OBJ_SERVICE_CONFIG+LMP_CTYPE_SERVICE_CONFIG_CPSA, "2" }, { 256*LMP_OBJ_SERVICE_CONFIG+LMP_CTYPE_SERVICE_CONFIG_TRANSPARENCY_TCM, "3" }, { 256*LMP_OBJ_SERVICE_CONFIG+LMP_CTYPE_SERVICE_CONFIG_NETWORK_DIVERSITY, "4" }, { 0, NULL} }; static int lmp_print_data_link_subobjs(netdissect_options *ndo, const u_char *obj_tptr, int total_subobj_len, int offset) { int hexdump = FALSE; int subobj_type, subobj_len; union { /* int to float conversion buffer */ float f; uint32_t i; } bw; while (total_subobj_len > 0 && hexdump == FALSE ) { subobj_type = EXTRACT_8BITS(obj_tptr+offset); subobj_len = EXTRACT_8BITS(obj_tptr+offset+1); ND_PRINT((ndo, "\n\t Subobject, Type: %s (%u), Length: %u", tok2str(lmp_data_link_subobj, "Unknown", subobj_type), subobj_type, subobj_len)); if (subobj_len < 4) { ND_PRINT((ndo, " (too short)")); break; } if ((subobj_len % 4) != 0) { ND_PRINT((ndo, " (not a multiple of 4)")); break; } if (total_subobj_len < subobj_len) { ND_PRINT((ndo, " (goes past the end of the object)")); break; } switch(subobj_type) { case INT_SWITCHING_TYPE_SUBOBJ: ND_PRINT((ndo, "\n\t Switching Type: %s (%u)", tok2str(gmpls_switch_cap_values, "Unknown", EXTRACT_8BITS(obj_tptr+offset+2)), EXTRACT_8BITS(obj_tptr+offset+2))); ND_PRINT((ndo, "\n\t Encoding Type: %s (%u)", tok2str(gmpls_encoding_values, "Unknown", EXTRACT_8BITS(obj_tptr+offset+3)), EXTRACT_8BITS(obj_tptr+offset+3))); bw.i = EXTRACT_32BITS(obj_tptr+offset+4); ND_PRINT((ndo, "\n\t Min Reservable Bandwidth: %.3f Mbps", bw.f*8/1000000)); bw.i = EXTRACT_32BITS(obj_tptr+offset+8); ND_PRINT((ndo, "\n\t Max Reservable Bandwidth: %.3f Mbps", bw.f*8/1000000)); break; case WAVELENGTH_SUBOBJ: ND_PRINT((ndo, "\n\t Wavelength: %u", EXTRACT_32BITS(obj_tptr+offset+4))); break; default: /* Any Unknown Subobject ==> Exit loop */ hexdump=TRUE; break; } total_subobj_len-=subobj_len; offset+=subobj_len; } return (hexdump); } void lmp_print(netdissect_options *ndo, register const u_char *pptr, register u_int len) { const struct lmp_common_header *lmp_com_header; const struct lmp_object_header *lmp_obj_header; const u_char *tptr,*obj_tptr; u_int tlen,lmp_obj_len,lmp_obj_ctype,obj_tlen; int hexdump; u_int offset; u_int link_type; union { /* int to float conversion buffer */ float f; uint32_t i; } bw; tptr=pptr; lmp_com_header = (const struct lmp_common_header *)pptr; ND_TCHECK(*lmp_com_header); /* * Sanity checking of the header. */ if (LMP_EXTRACT_VERSION(lmp_com_header->version_res[0]) != LMP_VERSION) { ND_PRINT((ndo, "LMP version %u packet not supported", LMP_EXTRACT_VERSION(lmp_com_header->version_res[0]))); return; } /* in non-verbose mode just lets print the basic Message Type*/ if (ndo->ndo_vflag < 1) { ND_PRINT((ndo, "LMPv%u %s Message, length: %u", LMP_EXTRACT_VERSION(lmp_com_header->version_res[0]), tok2str(lmp_msg_type_values, "unknown (%u)",lmp_com_header->msg_type), len)); return; } /* ok they seem to want to know everything - lets fully decode it */ tlen=EXTRACT_16BITS(lmp_com_header->length); ND_PRINT((ndo, "\n\tLMPv%u, msg-type: %s, Flags: [%s], length: %u", LMP_EXTRACT_VERSION(lmp_com_header->version_res[0]), tok2str(lmp_msg_type_values, "unknown, type: %u",lmp_com_header->msg_type), bittok2str(lmp_header_flag_values,"none",lmp_com_header->flags), tlen)); if (tlen < sizeof(const struct lmp_common_header)) { ND_PRINT((ndo, " (too short)")); return; } if (tlen > len) { ND_PRINT((ndo, " (too long)")); tlen = len; } tptr+=sizeof(const struct lmp_common_header); tlen-=sizeof(const struct lmp_common_header); while(tlen>0) { /* did we capture enough for fully decoding the object header ? */ ND_TCHECK2(*tptr, sizeof(struct lmp_object_header)); lmp_obj_header = (const struct lmp_object_header *)tptr; lmp_obj_len=EXTRACT_16BITS(lmp_obj_header->length); lmp_obj_ctype=(lmp_obj_header->ctype)&0x7f; ND_PRINT((ndo, "\n\t %s Object (%u), Class-Type: %s (%u) Flags: [%snegotiable], length: %u", tok2str(lmp_obj_values, "Unknown", lmp_obj_header->class_num), lmp_obj_header->class_num, tok2str(lmp_ctype_values, "Unknown", ((lmp_obj_header->class_num)<<8)+lmp_obj_ctype), lmp_obj_ctype, (lmp_obj_header->ctype)&0x80 ? "" : "non-", lmp_obj_len)); if (lmp_obj_len < 4) { ND_PRINT((ndo, " (too short)")); return; } if ((lmp_obj_len % 4) != 0) { ND_PRINT((ndo, " (not a multiple of 4)")); return; } obj_tptr=tptr+sizeof(struct lmp_object_header); obj_tlen=lmp_obj_len-sizeof(struct lmp_object_header); /* did we capture enough for fully decoding the object ? */ ND_TCHECK2(*tptr, lmp_obj_len); hexdump=FALSE; switch(lmp_obj_header->class_num) { case LMP_OBJ_CC_ID: switch(lmp_obj_ctype) { case LMP_CTYPE_LOC: case LMP_CTYPE_RMT: if (obj_tlen != 4) { ND_PRINT((ndo, " (not correct for object)")); break; } ND_PRINT((ndo, "\n\t Control Channel ID: %u (0x%08x)", EXTRACT_32BITS(obj_tptr), EXTRACT_32BITS(obj_tptr))); break; default: hexdump=TRUE; } break; case LMP_OBJ_LINK_ID: case LMP_OBJ_INTERFACE_ID: switch(lmp_obj_ctype) { case LMP_CTYPE_IPV4_LOC: case LMP_CTYPE_IPV4_RMT: if (obj_tlen != 4) { ND_PRINT((ndo, " (not correct for object)")); break; } ND_PRINT((ndo, "\n\t IPv4 Link ID: %s (0x%08x)", ipaddr_string(ndo, obj_tptr), EXTRACT_32BITS(obj_tptr))); break; case LMP_CTYPE_IPV6_LOC: case LMP_CTYPE_IPV6_RMT: if (obj_tlen != 16) { ND_PRINT((ndo, " (not correct for object)")); break; } ND_PRINT((ndo, "\n\t IPv6 Link ID: %s (0x%08x)", ip6addr_string(ndo, obj_tptr), EXTRACT_32BITS(obj_tptr))); break; case LMP_CTYPE_UNMD_LOC: case LMP_CTYPE_UNMD_RMT: if (obj_tlen != 4) { ND_PRINT((ndo, " (not correct for object)")); break; } ND_PRINT((ndo, "\n\t Link ID: %u (0x%08x)", EXTRACT_32BITS(obj_tptr), EXTRACT_32BITS(obj_tptr))); break; default: hexdump=TRUE; } break; case LMP_OBJ_MESSAGE_ID: switch(lmp_obj_ctype) { case LMP_CTYPE_1: if (obj_tlen != 4) { ND_PRINT((ndo, " (not correct for object)")); break; } ND_PRINT((ndo, "\n\t Message ID: %u (0x%08x)", EXTRACT_32BITS(obj_tptr), EXTRACT_32BITS(obj_tptr))); break; case LMP_CTYPE_2: if (obj_tlen != 4) { ND_PRINT((ndo, " (not correct for object)")); break; } ND_PRINT((ndo, "\n\t Message ID Ack: %u (0x%08x)", EXTRACT_32BITS(obj_tptr), EXTRACT_32BITS(obj_tptr))); break; default: hexdump=TRUE; } break; case LMP_OBJ_NODE_ID: switch(lmp_obj_ctype) { case LMP_CTYPE_LOC: case LMP_CTYPE_RMT: if (obj_tlen != 4) { ND_PRINT((ndo, " (not correct for object)")); break; } ND_PRINT((ndo, "\n\t Node ID: %s (0x%08x)", ipaddr_string(ndo, obj_tptr), EXTRACT_32BITS(obj_tptr))); break; default: hexdump=TRUE; } break; case LMP_OBJ_CONFIG: switch(lmp_obj_ctype) { case LMP_CTYPE_HELLO_CONFIG: if (obj_tlen != 4) { ND_PRINT((ndo, " (not correct for object)")); break; } ND_PRINT((ndo, "\n\t Hello Interval: %u\n\t Hello Dead Interval: %u", EXTRACT_16BITS(obj_tptr), EXTRACT_16BITS(obj_tptr+2))); break; default: hexdump=TRUE; } break; case LMP_OBJ_HELLO: switch(lmp_obj_ctype) { case LMP_CTYPE_HELLO: if (obj_tlen != 8) { ND_PRINT((ndo, " (not correct for object)")); break; } ND_PRINT((ndo, "\n\t Tx Seq: %u, Rx Seq: %u", EXTRACT_32BITS(obj_tptr), EXTRACT_32BITS(obj_tptr+4))); break; default: hexdump=TRUE; } break; case LMP_OBJ_TE_LINK: switch(lmp_obj_ctype) { case LMP_CTYPE_IPV4: if (obj_tlen != 12) { ND_PRINT((ndo, " (not correct for object)")); break; } ND_PRINT((ndo, "\n\t Flags: [%s]", bittok2str(lmp_obj_te_link_flag_values, "none", EXTRACT_8BITS(obj_tptr)))); ND_PRINT((ndo, "\n\t Local Link-ID: %s (0x%08x)" "\n\t Remote Link-ID: %s (0x%08x)", ipaddr_string(ndo, obj_tptr+4), EXTRACT_32BITS(obj_tptr+4), ipaddr_string(ndo, obj_tptr+8), EXTRACT_32BITS(obj_tptr+8))); break; case LMP_CTYPE_IPV6: if (obj_tlen != 36) { ND_PRINT((ndo, " (not correct for object)")); break; } ND_PRINT((ndo, "\n\t Flags: [%s]", bittok2str(lmp_obj_te_link_flag_values, "none", EXTRACT_8BITS(obj_tptr)))); ND_PRINT((ndo, "\n\t Local Link-ID: %s (0x%08x)" "\n\t Remote Link-ID: %s (0x%08x)", ip6addr_string(ndo, obj_tptr+4), EXTRACT_32BITS(obj_tptr+4), ip6addr_string(ndo, obj_tptr+20), EXTRACT_32BITS(obj_tptr+20))); break; case LMP_CTYPE_UNMD: if (obj_tlen != 12) { ND_PRINT((ndo, " (not correct for object)")); break; } ND_PRINT((ndo, "\n\t Flags: [%s]", bittok2str(lmp_obj_te_link_flag_values, "none", EXTRACT_8BITS(obj_tptr)))); ND_PRINT((ndo, "\n\t Local Link-ID: %u (0x%08x)" "\n\t Remote Link-ID: %u (0x%08x)", EXTRACT_32BITS(obj_tptr+4), EXTRACT_32BITS(obj_tptr+4), EXTRACT_32BITS(obj_tptr+8), EXTRACT_32BITS(obj_tptr+8))); break; default: hexdump=TRUE; } break; case LMP_OBJ_DATA_LINK: switch(lmp_obj_ctype) { case LMP_CTYPE_IPV4: if (obj_tlen < 12) { ND_PRINT((ndo, " (not correct for object)")); break; } ND_PRINT((ndo, "\n\t Flags: [%s]", bittok2str(lmp_obj_data_link_flag_values, "none", EXTRACT_8BITS(obj_tptr)))); ND_PRINT((ndo, "\n\t Local Interface ID: %s (0x%08x)" "\n\t Remote Interface ID: %s (0x%08x)", ipaddr_string(ndo, obj_tptr+4), EXTRACT_32BITS(obj_tptr+4), ipaddr_string(ndo, obj_tptr+8), EXTRACT_32BITS(obj_tptr+8))); if (lmp_print_data_link_subobjs(ndo, obj_tptr, obj_tlen - 12, 12)) hexdump=TRUE; break; case LMP_CTYPE_IPV6: if (obj_tlen < 36) { ND_PRINT((ndo, " (not correct for object)")); break; } ND_PRINT((ndo, "\n\t Flags: [%s]", bittok2str(lmp_obj_data_link_flag_values, "none", EXTRACT_8BITS(obj_tptr)))); ND_PRINT((ndo, "\n\t Local Interface ID: %s (0x%08x)" "\n\t Remote Interface ID: %s (0x%08x)", ip6addr_string(ndo, obj_tptr+4), EXTRACT_32BITS(obj_tptr+4), ip6addr_string(ndo, obj_tptr+20), EXTRACT_32BITS(obj_tptr+20))); if (lmp_print_data_link_subobjs(ndo, obj_tptr, obj_tlen - 36, 36)) hexdump=TRUE; break; case LMP_CTYPE_UNMD: if (obj_tlen < 12) { ND_PRINT((ndo, " (not correct for object)")); break; } ND_PRINT((ndo, "\n\t Flags: [%s]", bittok2str(lmp_obj_data_link_flag_values, "none", EXTRACT_8BITS(obj_tptr)))); ND_PRINT((ndo, "\n\t Local Interface ID: %u (0x%08x)" "\n\t Remote Interface ID: %u (0x%08x)", EXTRACT_32BITS(obj_tptr+4), EXTRACT_32BITS(obj_tptr+4), EXTRACT_32BITS(obj_tptr+8), EXTRACT_32BITS(obj_tptr+8))); if (lmp_print_data_link_subobjs(ndo, obj_tptr, obj_tlen - 12, 12)) hexdump=TRUE; break; default: hexdump=TRUE; } break; case LMP_OBJ_VERIFY_BEGIN: switch(lmp_obj_ctype) { case LMP_CTYPE_1: if (obj_tlen != 20) { ND_PRINT((ndo, " (not correct for object)")); break; } ND_PRINT((ndo, "\n\t Flags: %s", bittok2str(lmp_obj_begin_verify_flag_values, "none", EXTRACT_16BITS(obj_tptr)))); ND_PRINT((ndo, "\n\t Verify Interval: %u", EXTRACT_16BITS(obj_tptr+2))); ND_PRINT((ndo, "\n\t Data links: %u", EXTRACT_32BITS(obj_tptr+4))); ND_PRINT((ndo, "\n\t Encoding type: %s", tok2str(gmpls_encoding_values, "Unknown", *(obj_tptr+8)))); ND_PRINT((ndo, "\n\t Verify Transport Mechanism: %u (0x%x)%s", EXTRACT_16BITS(obj_tptr+10), EXTRACT_16BITS(obj_tptr+10), EXTRACT_16BITS(obj_tptr+10)&8000 ? " (Payload test messages capable)" : "")); bw.i = EXTRACT_32BITS(obj_tptr+12); ND_PRINT((ndo, "\n\t Transmission Rate: %.3f Mbps",bw.f*8/1000000)); ND_PRINT((ndo, "\n\t Wavelength: %u", EXTRACT_32BITS(obj_tptr+16))); break; default: hexdump=TRUE; } break; case LMP_OBJ_VERIFY_BEGIN_ACK: switch(lmp_obj_ctype) { case LMP_CTYPE_1: if (obj_tlen != 4) { ND_PRINT((ndo, " (not correct for object)")); break; } ND_PRINT((ndo, "\n\t Verify Dead Interval: %u" "\n\t Verify Transport Response: %u", EXTRACT_16BITS(obj_tptr), EXTRACT_16BITS(obj_tptr+2))); break; default: hexdump=TRUE; } break; case LMP_OBJ_VERIFY_ID: switch(lmp_obj_ctype) { case LMP_CTYPE_1: if (obj_tlen != 4) { ND_PRINT((ndo, " (not correct for object)")); break; } ND_PRINT((ndo, "\n\t Verify ID: %u", EXTRACT_32BITS(obj_tptr))); break; default: hexdump=TRUE; } break; case LMP_OBJ_CHANNEL_STATUS: switch(lmp_obj_ctype) { case LMP_CTYPE_IPV4: offset = 0; /* Decode pairs: <Interface_ID (4 bytes), Channel_status (4 bytes)> */ while (offset+8 <= obj_tlen) { ND_PRINT((ndo, "\n\t Interface ID: %s (0x%08x)", ipaddr_string(ndo, obj_tptr+offset), EXTRACT_32BITS(obj_tptr+offset))); ND_PRINT((ndo, "\n\t\t Active: %s (%u)", (EXTRACT_32BITS(obj_tptr+offset+4)>>31) ? "Allocated" : "Non-allocated", (EXTRACT_32BITS(obj_tptr+offset+4)>>31))); ND_PRINT((ndo, "\n\t\t Direction: %s (%u)", (EXTRACT_32BITS(obj_tptr+offset+4)>>30)&0x1 ? "Transmit" : "Receive", (EXTRACT_32BITS(obj_tptr+offset+4)>>30)&0x1)); ND_PRINT((ndo, "\n\t\t Channel Status: %s (%u)", tok2str(lmp_obj_channel_status_values, "Unknown", EXTRACT_32BITS(obj_tptr+offset+4)&0x3FFFFFF), EXTRACT_32BITS(obj_tptr+offset+4)&0x3FFFFFF)); offset+=8; } break; case LMP_CTYPE_IPV6: offset = 0; /* Decode pairs: <Interface_ID (16 bytes), Channel_status (4 bytes)> */ while (offset+20 <= obj_tlen) { ND_PRINT((ndo, "\n\t Interface ID: %s (0x%08x)", ip6addr_string(ndo, obj_tptr+offset), EXTRACT_32BITS(obj_tptr+offset))); ND_PRINT((ndo, "\n\t\t Active: %s (%u)", (EXTRACT_32BITS(obj_tptr+offset+16)>>31) ? "Allocated" : "Non-allocated", (EXTRACT_32BITS(obj_tptr+offset+16)>>31))); ND_PRINT((ndo, "\n\t\t Direction: %s (%u)", (EXTRACT_32BITS(obj_tptr+offset+16)>>30)&0x1 ? "Transmit" : "Receive", (EXTRACT_32BITS(obj_tptr+offset+16)>>30)&0x1)); ND_PRINT((ndo, "\n\t\t Channel Status: %s (%u)", tok2str(lmp_obj_channel_status_values, "Unknown", EXTRACT_32BITS(obj_tptr+offset+16)&0x3FFFFFF), EXTRACT_32BITS(obj_tptr+offset+16)&0x3FFFFFF)); offset+=20; } break; case LMP_CTYPE_UNMD: offset = 0; /* Decode pairs: <Interface_ID (4 bytes), Channel_status (4 bytes)> */ while (offset+8 <= obj_tlen) { ND_PRINT((ndo, "\n\t Interface ID: %u (0x%08x)", EXTRACT_32BITS(obj_tptr+offset), EXTRACT_32BITS(obj_tptr+offset))); ND_PRINT((ndo, "\n\t\t Active: %s (%u)", (EXTRACT_32BITS(obj_tptr+offset+4)>>31) ? "Allocated" : "Non-allocated", (EXTRACT_32BITS(obj_tptr+offset+4)>>31))); ND_PRINT((ndo, "\n\t\t Direction: %s (%u)", (EXTRACT_32BITS(obj_tptr+offset+4)>>30)&0x1 ? "Transmit" : "Receive", (EXTRACT_32BITS(obj_tptr+offset+4)>>30)&0x1)); ND_PRINT((ndo, "\n\t\t Channel Status: %s (%u)", tok2str(lmp_obj_channel_status_values, "Unknown", EXTRACT_32BITS(obj_tptr+offset+4)&0x3FFFFFF), EXTRACT_32BITS(obj_tptr+offset+4)&0x3FFFFFF)); offset+=8; } break; default: hexdump=TRUE; } break; case LMP_OBJ_CHANNEL_STATUS_REQ: switch(lmp_obj_ctype) { case LMP_CTYPE_IPV4: offset = 0; while (offset+4 <= obj_tlen) { ND_PRINT((ndo, "\n\t Interface ID: %s (0x%08x)", ipaddr_string(ndo, obj_tptr+offset), EXTRACT_32BITS(obj_tptr+offset))); offset+=4; } break; case LMP_CTYPE_IPV6: offset = 0; while (offset+16 <= obj_tlen) { ND_PRINT((ndo, "\n\t Interface ID: %s (0x%08x)", ip6addr_string(ndo, obj_tptr+offset), EXTRACT_32BITS(obj_tptr+offset))); offset+=16; } break; case LMP_CTYPE_UNMD: offset = 0; while (offset+4 <= obj_tlen) { ND_PRINT((ndo, "\n\t Interface ID: %u (0x%08x)", EXTRACT_32BITS(obj_tptr+offset), EXTRACT_32BITS(obj_tptr+offset))); offset+=4; } break; default: hexdump=TRUE; } break; case LMP_OBJ_ERROR_CODE: switch(lmp_obj_ctype) { case LMP_CTYPE_BEGIN_VERIFY_ERROR: if (obj_tlen != 4) { ND_PRINT((ndo, " (not correct for object)")); break; } ND_PRINT((ndo, "\n\t Error Code: %s", bittok2str(lmp_obj_begin_verify_error_values, "none", EXTRACT_32BITS(obj_tptr)))); break; case LMP_CTYPE_LINK_SUMMARY_ERROR: if (obj_tlen != 4) { ND_PRINT((ndo, " (not correct for object)")); break; } ND_PRINT((ndo, "\n\t Error Code: %s", bittok2str(lmp_obj_link_summary_error_values, "none", EXTRACT_32BITS(obj_tptr)))); break; default: hexdump=TRUE; } break; case LMP_OBJ_SERVICE_CONFIG: switch (lmp_obj_ctype) { case LMP_CTYPE_SERVICE_CONFIG_SP: if (obj_tlen != 4) { ND_PRINT((ndo, " (not correct for object)")); break; } ND_PRINT((ndo, "\n\t Flags: %s", bittok2str(lmp_obj_service_config_sp_flag_values, "none", EXTRACT_8BITS(obj_tptr)))); ND_PRINT((ndo, "\n\t UNI Version: %u", EXTRACT_8BITS(obj_tptr+1))); break; case LMP_CTYPE_SERVICE_CONFIG_CPSA: if (obj_tlen != 16) { ND_PRINT((ndo, " (not correct for object)")); break; } link_type = EXTRACT_8BITS(obj_tptr); ND_PRINT((ndo, "\n\t Link Type: %s (%u)", tok2str(lmp_sd_service_config_cpsa_link_type_values, "Unknown", link_type), link_type)); switch (link_type) { case LMP_SD_SERVICE_CONFIG_CPSA_LINK_TYPE_SDH: ND_PRINT((ndo, "\n\t Signal Type: %s (%u)", tok2str(lmp_sd_service_config_cpsa_signal_type_sdh_values, "Unknown", EXTRACT_8BITS(obj_tptr+1)), EXTRACT_8BITS(obj_tptr+1))); break; case LMP_SD_SERVICE_CONFIG_CPSA_LINK_TYPE_SONET: ND_PRINT((ndo, "\n\t Signal Type: %s (%u)", tok2str(lmp_sd_service_config_cpsa_signal_type_sonet_values, "Unknown", EXTRACT_8BITS(obj_tptr+1)), EXTRACT_8BITS(obj_tptr+1))); break; } ND_PRINT((ndo, "\n\t Transparency: %s", bittok2str(lmp_obj_service_config_cpsa_tp_flag_values, "none", EXTRACT_8BITS(obj_tptr+2)))); ND_PRINT((ndo, "\n\t Contiguous Concatenation Types: %s", bittok2str(lmp_obj_service_config_cpsa_cct_flag_values, "none", EXTRACT_8BITS(obj_tptr+3)))); ND_PRINT((ndo, "\n\t Minimum NCC: %u", EXTRACT_16BITS(obj_tptr+4))); ND_PRINT((ndo, "\n\t Maximum NCC: %u", EXTRACT_16BITS(obj_tptr+6))); ND_PRINT((ndo, "\n\t Minimum NVC:%u", EXTRACT_16BITS(obj_tptr+8))); ND_PRINT((ndo, "\n\t Maximum NVC:%u", EXTRACT_16BITS(obj_tptr+10))); ND_PRINT((ndo, "\n\t Local Interface ID: %s (0x%08x)", ipaddr_string(ndo, obj_tptr+12), EXTRACT_32BITS(obj_tptr+12))); break; case LMP_CTYPE_SERVICE_CONFIG_TRANSPARENCY_TCM: if (obj_tlen != 8) { ND_PRINT((ndo, " (not correct for object)")); break; } ND_PRINT((ndo, "\n\t Transparency Flags: %s", bittok2str( lmp_obj_service_config_nsa_transparency_flag_values, "none", EXTRACT_32BITS(obj_tptr)))); ND_PRINT((ndo, "\n\t TCM Monitoring Flags: %s", bittok2str( lmp_obj_service_config_nsa_tcm_flag_values, "none", EXTRACT_8BITS(obj_tptr+7)))); break; case LMP_CTYPE_SERVICE_CONFIG_NETWORK_DIVERSITY: if (obj_tlen != 4) { ND_PRINT((ndo, " (not correct for object)")); break; } ND_PRINT((ndo, "\n\t Diversity: Flags: %s", bittok2str( lmp_obj_service_config_nsa_network_diversity_flag_values, "none", EXTRACT_8BITS(obj_tptr+3)))); break; default: hexdump = TRUE; } break; default: if (ndo->ndo_vflag <= 1) print_unknown_data(ndo,obj_tptr,"\n\t ",obj_tlen); break; } /* do we want to see an additionally hexdump ? */ if (ndo->ndo_vflag > 1 || hexdump==TRUE) print_unknown_data(ndo,tptr+sizeof(struct lmp_object_header),"\n\t ", lmp_obj_len-sizeof(struct lmp_object_header)); tptr+=lmp_obj_len; tlen-=lmp_obj_len; } return; trunc: ND_PRINT((ndo, "\n\t\t packet exceeded snapshot")); } /* * Local Variables: * c-style: whitesmith * c-basic-offset: 8 * End: */
lmp_print(netdissect_options *ndo, register const u_char *pptr, register u_int len) { const struct lmp_common_header *lmp_com_header; const struct lmp_object_header *lmp_obj_header; const u_char *tptr,*obj_tptr; int tlen,lmp_obj_len,lmp_obj_ctype,obj_tlen; int hexdump; int offset,subobj_type,subobj_len,total_subobj_len; int link_type; union { /* int to float conversion buffer */ float f; uint32_t i; } bw; tptr=pptr; lmp_com_header = (const struct lmp_common_header *)pptr; ND_TCHECK(*lmp_com_header); /* * Sanity checking of the header. */ if (LMP_EXTRACT_VERSION(lmp_com_header->version_res[0]) != LMP_VERSION) { ND_PRINT((ndo, "LMP version %u packet not supported", LMP_EXTRACT_VERSION(lmp_com_header->version_res[0]))); return; } /* in non-verbose mode just lets print the basic Message Type*/ if (ndo->ndo_vflag < 1) { ND_PRINT((ndo, "LMPv%u %s Message, length: %u", LMP_EXTRACT_VERSION(lmp_com_header->version_res[0]), tok2str(lmp_msg_type_values, "unknown (%u)",lmp_com_header->msg_type), len)); return; } /* ok they seem to want to know everything - lets fully decode it */ tlen=EXTRACT_16BITS(lmp_com_header->length); ND_PRINT((ndo, "\n\tLMPv%u, msg-type: %s, Flags: [%s], length: %u", LMP_EXTRACT_VERSION(lmp_com_header->version_res[0]), tok2str(lmp_msg_type_values, "unknown, type: %u",lmp_com_header->msg_type), bittok2str(lmp_header_flag_values,"none",lmp_com_header->flags), tlen)); tptr+=sizeof(const struct lmp_common_header); tlen-=sizeof(const struct lmp_common_header); while(tlen>0) { /* did we capture enough for fully decoding the object header ? */ ND_TCHECK2(*tptr, sizeof(struct lmp_object_header)); lmp_obj_header = (const struct lmp_object_header *)tptr; lmp_obj_len=EXTRACT_16BITS(lmp_obj_header->length); lmp_obj_ctype=(lmp_obj_header->ctype)&0x7f; if(lmp_obj_len % 4 || lmp_obj_len < 4) return; ND_PRINT((ndo, "\n\t %s Object (%u), Class-Type: %s (%u) Flags: [%snegotiable], length: %u", tok2str(lmp_obj_values, "Unknown", lmp_obj_header->class_num), lmp_obj_header->class_num, tok2str(lmp_ctype_values, "Unknown", ((lmp_obj_header->class_num)<<8)+lmp_obj_ctype), lmp_obj_ctype, (lmp_obj_header->ctype)&0x80 ? "" : "non-", lmp_obj_len)); obj_tptr=tptr+sizeof(struct lmp_object_header); obj_tlen=lmp_obj_len-sizeof(struct lmp_object_header); /* did we capture enough for fully decoding the object ? */ ND_TCHECK2(*tptr, lmp_obj_len); hexdump=FALSE; switch(lmp_obj_header->class_num) { case LMP_OBJ_CC_ID: switch(lmp_obj_ctype) { case LMP_CTYPE_LOC: case LMP_CTYPE_RMT: ND_PRINT((ndo, "\n\t Control Channel ID: %u (0x%08x)", EXTRACT_32BITS(obj_tptr), EXTRACT_32BITS(obj_tptr))); break; default: hexdump=TRUE; } break; case LMP_OBJ_LINK_ID: case LMP_OBJ_INTERFACE_ID: switch(lmp_obj_ctype) { case LMP_CTYPE_IPV4_LOC: case LMP_CTYPE_IPV4_RMT: ND_PRINT((ndo, "\n\t IPv4 Link ID: %s (0x%08x)", ipaddr_string(ndo, obj_tptr), EXTRACT_32BITS(obj_tptr))); break; case LMP_CTYPE_IPV6_LOC: case LMP_CTYPE_IPV6_RMT: ND_PRINT((ndo, "\n\t IPv6 Link ID: %s (0x%08x)", ip6addr_string(ndo, obj_tptr), EXTRACT_32BITS(obj_tptr))); break; case LMP_CTYPE_UNMD_LOC: case LMP_CTYPE_UNMD_RMT: ND_PRINT((ndo, "\n\t Link ID: %u (0x%08x)", EXTRACT_32BITS(obj_tptr), EXTRACT_32BITS(obj_tptr))); break; default: hexdump=TRUE; } break; case LMP_OBJ_MESSAGE_ID: switch(lmp_obj_ctype) { case LMP_CTYPE_1: ND_PRINT((ndo, "\n\t Message ID: %u (0x%08x)", EXTRACT_32BITS(obj_tptr), EXTRACT_32BITS(obj_tptr))); break; case LMP_CTYPE_2: ND_PRINT((ndo, "\n\t Message ID Ack: %u (0x%08x)", EXTRACT_32BITS(obj_tptr), EXTRACT_32BITS(obj_tptr))); break; default: hexdump=TRUE; } break; case LMP_OBJ_NODE_ID: switch(lmp_obj_ctype) { case LMP_CTYPE_LOC: case LMP_CTYPE_RMT: ND_PRINT((ndo, "\n\t Node ID: %s (0x%08x)", ipaddr_string(ndo, obj_tptr), EXTRACT_32BITS(obj_tptr))); break; default: hexdump=TRUE; } break; case LMP_OBJ_CONFIG: switch(lmp_obj_ctype) { case LMP_CTYPE_HELLO_CONFIG: ND_PRINT((ndo, "\n\t Hello Interval: %u\n\t Hello Dead Interval: %u", EXTRACT_16BITS(obj_tptr), EXTRACT_16BITS(obj_tptr+2))); break; default: hexdump=TRUE; } break; case LMP_OBJ_HELLO: switch(lmp_obj_ctype) { case LMP_CTYPE_HELLO: ND_PRINT((ndo, "\n\t Tx Seq: %u, Rx Seq: %u", EXTRACT_32BITS(obj_tptr), EXTRACT_32BITS(obj_tptr+4))); break; default: hexdump=TRUE; } break; case LMP_OBJ_TE_LINK: ND_PRINT((ndo, "\n\t Flags: [%s]", bittok2str(lmp_obj_te_link_flag_values, "none", EXTRACT_16BITS(obj_tptr)>>8))); switch(lmp_obj_ctype) { case LMP_CTYPE_IPV4: ND_PRINT((ndo, "\n\t Local Link-ID: %s (0x%08x)" "\n\t Remote Link-ID: %s (0x%08x)", ipaddr_string(ndo, obj_tptr+4), EXTRACT_32BITS(obj_tptr+4), ipaddr_string(ndo, obj_tptr+8), EXTRACT_32BITS(obj_tptr+8))); break; case LMP_CTYPE_IPV6: case LMP_CTYPE_UNMD: default: hexdump=TRUE; } break; case LMP_OBJ_DATA_LINK: ND_PRINT((ndo, "\n\t Flags: [%s]", bittok2str(lmp_obj_data_link_flag_values, "none", EXTRACT_16BITS(obj_tptr)>>8))); switch(lmp_obj_ctype) { case LMP_CTYPE_IPV4: case LMP_CTYPE_UNMD: ND_PRINT((ndo, "\n\t Local Interface ID: %s (0x%08x)" "\n\t Remote Interface ID: %s (0x%08x)", ipaddr_string(ndo, obj_tptr+4), EXTRACT_32BITS(obj_tptr+4), ipaddr_string(ndo, obj_tptr+8), EXTRACT_32BITS(obj_tptr+8))); total_subobj_len = lmp_obj_len - 16; offset = 12; while (total_subobj_len > 0 && hexdump == FALSE ) { subobj_type = EXTRACT_16BITS(obj_tptr+offset)>>8; subobj_len = EXTRACT_16BITS(obj_tptr+offset)&0x00FF; ND_PRINT((ndo, "\n\t Subobject, Type: %s (%u), Length: %u", tok2str(lmp_data_link_subobj, "Unknown", subobj_type), subobj_type, subobj_len)); switch(subobj_type) { case INT_SWITCHING_TYPE_SUBOBJ: ND_PRINT((ndo, "\n\t Switching Type: %s (%u)", tok2str(gmpls_switch_cap_values, "Unknown", EXTRACT_16BITS(obj_tptr+offset+2)>>8), EXTRACT_16BITS(obj_tptr+offset+2)>>8)); ND_PRINT((ndo, "\n\t Encoding Type: %s (%u)", tok2str(gmpls_encoding_values, "Unknown", EXTRACT_16BITS(obj_tptr+offset+2)&0x00FF), EXTRACT_16BITS(obj_tptr+offset+2)&0x00FF)); bw.i = EXTRACT_32BITS(obj_tptr+offset+4); ND_PRINT((ndo, "\n\t Min Reservable Bandwidth: %.3f Mbps", bw.f*8/1000000)); bw.i = EXTRACT_32BITS(obj_tptr+offset+8); ND_PRINT((ndo, "\n\t Max Reservable Bandwidth: %.3f Mbps", bw.f*8/1000000)); break; case WAVELENGTH_SUBOBJ: ND_PRINT((ndo, "\n\t Wavelength: %u", EXTRACT_32BITS(obj_tptr+offset+4))); break; default: /* Any Unknown Subobject ==> Exit loop */ hexdump=TRUE; break; } total_subobj_len-=subobj_len; offset+=subobj_len; } break; case LMP_CTYPE_IPV6: default: hexdump=TRUE; } break; case LMP_OBJ_VERIFY_BEGIN: switch(lmp_obj_ctype) { case LMP_CTYPE_1: ND_PRINT((ndo, "\n\t Flags: %s", bittok2str(lmp_obj_begin_verify_flag_values, "none", EXTRACT_16BITS(obj_tptr)))); ND_PRINT((ndo, "\n\t Verify Interval: %u", EXTRACT_16BITS(obj_tptr+2))); ND_PRINT((ndo, "\n\t Data links: %u", EXTRACT_32BITS(obj_tptr+4))); ND_PRINT((ndo, "\n\t Encoding type: %s", tok2str(gmpls_encoding_values, "Unknown", *(obj_tptr+8)))); ND_PRINT((ndo, "\n\t Verify Transport Mechanism: %u (0x%x)%s", EXTRACT_16BITS(obj_tptr+10), EXTRACT_16BITS(obj_tptr+10), EXTRACT_16BITS(obj_tptr+10)&8000 ? " (Payload test messages capable)" : "")); bw.i = EXTRACT_32BITS(obj_tptr+12); ND_PRINT((ndo, "\n\t Transmission Rate: %.3f Mbps",bw.f*8/1000000)); ND_PRINT((ndo, "\n\t Wavelength: %u", EXTRACT_32BITS(obj_tptr+16))); break; default: hexdump=TRUE; } break; case LMP_OBJ_VERIFY_BEGIN_ACK: switch(lmp_obj_ctype) { case LMP_CTYPE_1: ND_PRINT((ndo, "\n\t Verify Dead Interval: %u" "\n\t Verify Transport Response: %u", EXTRACT_16BITS(obj_tptr), EXTRACT_16BITS(obj_tptr+2))); break; default: hexdump=TRUE; } break; case LMP_OBJ_VERIFY_ID: switch(lmp_obj_ctype) { case LMP_CTYPE_1: ND_PRINT((ndo, "\n\t Verify ID: %u", EXTRACT_32BITS(obj_tptr))); break; default: hexdump=TRUE; } break; case LMP_OBJ_CHANNEL_STATUS: switch(lmp_obj_ctype) { case LMP_CTYPE_IPV4: case LMP_CTYPE_UNMD: offset = 0; /* Decode pairs: <Interface_ID (4 bytes), Channel_status (4 bytes)> */ while (offset < (lmp_obj_len-(int)sizeof(struct lmp_object_header)) ) { ND_PRINT((ndo, "\n\t Interface ID: %s (0x%08x)", ipaddr_string(ndo, obj_tptr+offset), EXTRACT_32BITS(obj_tptr+offset))); ND_PRINT((ndo, "\n\t\t Active: %s (%u)", (EXTRACT_32BITS(obj_tptr+offset+4)>>31) ? "Allocated" : "Non-allocated", (EXTRACT_32BITS(obj_tptr+offset+4)>>31))); ND_PRINT((ndo, "\n\t\t Direction: %s (%u)", (EXTRACT_32BITS(obj_tptr+offset+4)>>30)&0x1 ? "Transmit" : "Receive", (EXTRACT_32BITS(obj_tptr+offset+4)>>30)&0x1)); ND_PRINT((ndo, "\n\t\t Channel Status: %s (%u)", tok2str(lmp_obj_channel_status_values, "Unknown", EXTRACT_32BITS(obj_tptr+offset+4)&0x3FFFFFF), EXTRACT_32BITS(obj_tptr+offset+4)&0x3FFFFFF)); offset+=8; } break; case LMP_CTYPE_IPV6: default: hexdump=TRUE; } break; case LMP_OBJ_CHANNEL_STATUS_REQ: switch(lmp_obj_ctype) { case LMP_CTYPE_IPV4: case LMP_CTYPE_UNMD: offset = 0; while (offset < (lmp_obj_len-(int)sizeof(struct lmp_object_header)) ) { ND_PRINT((ndo, "\n\t Interface ID: %s (0x%08x)", ipaddr_string(ndo, obj_tptr+offset), EXTRACT_32BITS(obj_tptr+offset))); offset+=4; } break; case LMP_CTYPE_IPV6: default: hexdump=TRUE; } break; case LMP_OBJ_ERROR_CODE: switch(lmp_obj_ctype) { case LMP_CTYPE_BEGIN_VERIFY_ERROR: ND_PRINT((ndo, "\n\t Error Code: %s", bittok2str(lmp_obj_begin_verify_error_values, "none", EXTRACT_32BITS(obj_tptr)))); break; case LMP_CTYPE_LINK_SUMMARY_ERROR: ND_PRINT((ndo, "\n\t Error Code: %s", bittok2str(lmp_obj_link_summary_error_values, "none", EXTRACT_32BITS(obj_tptr)))); break; default: hexdump=TRUE; } break; case LMP_OBJ_SERVICE_CONFIG: switch (lmp_obj_ctype) { case LMP_CTYPE_SERVICE_CONFIG_SP: ND_PRINT((ndo, "\n\t Flags: %s", bittok2str(lmp_obj_service_config_sp_flag_values, "none", EXTRACT_16BITS(obj_tptr)>>8))); ND_PRINT((ndo, "\n\t UNI Version: %u", EXTRACT_16BITS(obj_tptr) & 0x00FF)); break; case LMP_CTYPE_SERVICE_CONFIG_CPSA: link_type = EXTRACT_16BITS(obj_tptr)>>8; ND_PRINT((ndo, "\n\t Link Type: %s (%u)", tok2str(lmp_sd_service_config_cpsa_link_type_values, "Unknown", link_type), link_type)); if (link_type == LMP_SD_SERVICE_CONFIG_CPSA_LINK_TYPE_SDH) { ND_PRINT((ndo, "\n\t Signal Type: %s (%u)", tok2str(lmp_sd_service_config_cpsa_signal_type_sdh_values, "Unknown", EXTRACT_16BITS(obj_tptr) & 0x00FF), EXTRACT_16BITS(obj_tptr) & 0x00FF)); } if (link_type == LMP_SD_SERVICE_CONFIG_CPSA_LINK_TYPE_SONET) { ND_PRINT((ndo, "\n\t Signal Type: %s (%u)", tok2str(lmp_sd_service_config_cpsa_signal_type_sonet_values, "Unknown", EXTRACT_16BITS(obj_tptr) & 0x00FF), EXTRACT_16BITS(obj_tptr) & 0x00FF)); } ND_PRINT((ndo, "\n\t Transparency: %s", bittok2str(lmp_obj_service_config_cpsa_tp_flag_values, "none", EXTRACT_16BITS(obj_tptr+2)>>8))); ND_PRINT((ndo, "\n\t Contiguous Concatenation Types: %s", bittok2str(lmp_obj_service_config_cpsa_cct_flag_values, "none", EXTRACT_16BITS(obj_tptr+2)>>8 & 0x00FF))); ND_PRINT((ndo, "\n\t Minimum NCC: %u", EXTRACT_16BITS(obj_tptr+4))); ND_PRINT((ndo, "\n\t Maximum NCC: %u", EXTRACT_16BITS(obj_tptr+6))); ND_PRINT((ndo, "\n\t Minimum NVC:%u", EXTRACT_16BITS(obj_tptr+8))); ND_PRINT((ndo, "\n\t Maximum NVC:%u", EXTRACT_16BITS(obj_tptr+10))); ND_PRINT((ndo, "\n\t Local Interface ID: %s (0x%08x)", ipaddr_string(ndo, obj_tptr+12), EXTRACT_32BITS(obj_tptr+12))); break; case LMP_CTYPE_SERVICE_CONFIG_TRANSPARENCY_TCM: ND_PRINT((ndo, "\n\t Transparency Flags: %s", bittok2str( lmp_obj_service_config_nsa_transparency_flag_values, "none", EXTRACT_32BITS(obj_tptr)))); ND_PRINT((ndo, "\n\t TCM Monitoring Flags: %s", bittok2str( lmp_obj_service_config_nsa_tcm_flag_values, "none", EXTRACT_16BITS(obj_tptr+6) & 0x00FF))); break; case LMP_CTYPE_SERVICE_CONFIG_NETWORK_DIVERSITY: ND_PRINT((ndo, "\n\t Diversity: Flags: %s", bittok2str( lmp_obj_service_config_nsa_network_diversity_flag_values, "none", EXTRACT_16BITS(obj_tptr+2) & 0x00FF))); break; default: hexdump = TRUE; } break; default: if (ndo->ndo_vflag <= 1) print_unknown_data(ndo,obj_tptr,"\n\t ",obj_tlen); break; } /* do we want to see an additionally hexdump ? */ if (ndo->ndo_vflag > 1 || hexdump==TRUE) print_unknown_data(ndo,tptr+sizeof(struct lmp_object_header),"\n\t ", lmp_obj_len-sizeof(struct lmp_object_header)); tptr+=lmp_obj_len; tlen-=lmp_obj_len; } return; trunc: ND_PRINT((ndo, "\n\t\t packet exceeded snapshot")); }
lmp_print(netdissect_options *ndo, register const u_char *pptr, register u_int len) { const struct lmp_common_header *lmp_com_header; const struct lmp_object_header *lmp_obj_header; const u_char *tptr,*obj_tptr; u_int tlen,lmp_obj_len,lmp_obj_ctype,obj_tlen; int hexdump; u_int offset; u_int link_type; union { /* int to float conversion buffer */ float f; uint32_t i; } bw; tptr=pptr; lmp_com_header = (const struct lmp_common_header *)pptr; ND_TCHECK(*lmp_com_header); /* * Sanity checking of the header. */ if (LMP_EXTRACT_VERSION(lmp_com_header->version_res[0]) != LMP_VERSION) { ND_PRINT((ndo, "LMP version %u packet not supported", LMP_EXTRACT_VERSION(lmp_com_header->version_res[0]))); return; } /* in non-verbose mode just lets print the basic Message Type*/ if (ndo->ndo_vflag < 1) { ND_PRINT((ndo, "LMPv%u %s Message, length: %u", LMP_EXTRACT_VERSION(lmp_com_header->version_res[0]), tok2str(lmp_msg_type_values, "unknown (%u)",lmp_com_header->msg_type), len)); return; } /* ok they seem to want to know everything - lets fully decode it */ tlen=EXTRACT_16BITS(lmp_com_header->length); ND_PRINT((ndo, "\n\tLMPv%u, msg-type: %s, Flags: [%s], length: %u", LMP_EXTRACT_VERSION(lmp_com_header->version_res[0]), tok2str(lmp_msg_type_values, "unknown, type: %u",lmp_com_header->msg_type), bittok2str(lmp_header_flag_values,"none",lmp_com_header->flags), tlen)); if (tlen < sizeof(const struct lmp_common_header)) { ND_PRINT((ndo, " (too short)")); return; } if (tlen > len) { ND_PRINT((ndo, " (too long)")); tlen = len; } tptr+=sizeof(const struct lmp_common_header); tlen-=sizeof(const struct lmp_common_header); while(tlen>0) { /* did we capture enough for fully decoding the object header ? */ ND_TCHECK2(*tptr, sizeof(struct lmp_object_header)); lmp_obj_header = (const struct lmp_object_header *)tptr; lmp_obj_len=EXTRACT_16BITS(lmp_obj_header->length); lmp_obj_ctype=(lmp_obj_header->ctype)&0x7f; ND_PRINT((ndo, "\n\t %s Object (%u), Class-Type: %s (%u) Flags: [%snegotiable], length: %u", tok2str(lmp_obj_values, "Unknown", lmp_obj_header->class_num), lmp_obj_header->class_num, tok2str(lmp_ctype_values, "Unknown", ((lmp_obj_header->class_num)<<8)+lmp_obj_ctype), lmp_obj_ctype, (lmp_obj_header->ctype)&0x80 ? "" : "non-", lmp_obj_len)); if (lmp_obj_len < 4) { ND_PRINT((ndo, " (too short)")); return; } if ((lmp_obj_len % 4) != 0) { ND_PRINT((ndo, " (not a multiple of 4)")); return; } obj_tptr=tptr+sizeof(struct lmp_object_header); obj_tlen=lmp_obj_len-sizeof(struct lmp_object_header); /* did we capture enough for fully decoding the object ? */ ND_TCHECK2(*tptr, lmp_obj_len); hexdump=FALSE; switch(lmp_obj_header->class_num) { case LMP_OBJ_CC_ID: switch(lmp_obj_ctype) { case LMP_CTYPE_LOC: case LMP_CTYPE_RMT: if (obj_tlen != 4) { ND_PRINT((ndo, " (not correct for object)")); break; } ND_PRINT((ndo, "\n\t Control Channel ID: %u (0x%08x)", EXTRACT_32BITS(obj_tptr), EXTRACT_32BITS(obj_tptr))); break; default: hexdump=TRUE; } break; case LMP_OBJ_LINK_ID: case LMP_OBJ_INTERFACE_ID: switch(lmp_obj_ctype) { case LMP_CTYPE_IPV4_LOC: case LMP_CTYPE_IPV4_RMT: if (obj_tlen != 4) { ND_PRINT((ndo, " (not correct for object)")); break; } ND_PRINT((ndo, "\n\t IPv4 Link ID: %s (0x%08x)", ipaddr_string(ndo, obj_tptr), EXTRACT_32BITS(obj_tptr))); break; case LMP_CTYPE_IPV6_LOC: case LMP_CTYPE_IPV6_RMT: if (obj_tlen != 16) { ND_PRINT((ndo, " (not correct for object)")); break; } ND_PRINT((ndo, "\n\t IPv6 Link ID: %s (0x%08x)", ip6addr_string(ndo, obj_tptr), EXTRACT_32BITS(obj_tptr))); break; case LMP_CTYPE_UNMD_LOC: case LMP_CTYPE_UNMD_RMT: if (obj_tlen != 4) { ND_PRINT((ndo, " (not correct for object)")); break; } ND_PRINT((ndo, "\n\t Link ID: %u (0x%08x)", EXTRACT_32BITS(obj_tptr), EXTRACT_32BITS(obj_tptr))); break; default: hexdump=TRUE; } break; case LMP_OBJ_MESSAGE_ID: switch(lmp_obj_ctype) { case LMP_CTYPE_1: if (obj_tlen != 4) { ND_PRINT((ndo, " (not correct for object)")); break; } ND_PRINT((ndo, "\n\t Message ID: %u (0x%08x)", EXTRACT_32BITS(obj_tptr), EXTRACT_32BITS(obj_tptr))); break; case LMP_CTYPE_2: if (obj_tlen != 4) { ND_PRINT((ndo, " (not correct for object)")); break; } ND_PRINT((ndo, "\n\t Message ID Ack: %u (0x%08x)", EXTRACT_32BITS(obj_tptr), EXTRACT_32BITS(obj_tptr))); break; default: hexdump=TRUE; } break; case LMP_OBJ_NODE_ID: switch(lmp_obj_ctype) { case LMP_CTYPE_LOC: case LMP_CTYPE_RMT: if (obj_tlen != 4) { ND_PRINT((ndo, " (not correct for object)")); break; } ND_PRINT((ndo, "\n\t Node ID: %s (0x%08x)", ipaddr_string(ndo, obj_tptr), EXTRACT_32BITS(obj_tptr))); break; default: hexdump=TRUE; } break; case LMP_OBJ_CONFIG: switch(lmp_obj_ctype) { case LMP_CTYPE_HELLO_CONFIG: if (obj_tlen != 4) { ND_PRINT((ndo, " (not correct for object)")); break; } ND_PRINT((ndo, "\n\t Hello Interval: %u\n\t Hello Dead Interval: %u", EXTRACT_16BITS(obj_tptr), EXTRACT_16BITS(obj_tptr+2))); break; default: hexdump=TRUE; } break; case LMP_OBJ_HELLO: switch(lmp_obj_ctype) { case LMP_CTYPE_HELLO: if (obj_tlen != 8) { ND_PRINT((ndo, " (not correct for object)")); break; } ND_PRINT((ndo, "\n\t Tx Seq: %u, Rx Seq: %u", EXTRACT_32BITS(obj_tptr), EXTRACT_32BITS(obj_tptr+4))); break; default: hexdump=TRUE; } break; case LMP_OBJ_TE_LINK: switch(lmp_obj_ctype) { case LMP_CTYPE_IPV4: if (obj_tlen != 12) { ND_PRINT((ndo, " (not correct for object)")); break; } ND_PRINT((ndo, "\n\t Flags: [%s]", bittok2str(lmp_obj_te_link_flag_values, "none", EXTRACT_8BITS(obj_tptr)))); ND_PRINT((ndo, "\n\t Local Link-ID: %s (0x%08x)" "\n\t Remote Link-ID: %s (0x%08x)", ipaddr_string(ndo, obj_tptr+4), EXTRACT_32BITS(obj_tptr+4), ipaddr_string(ndo, obj_tptr+8), EXTRACT_32BITS(obj_tptr+8))); break; case LMP_CTYPE_IPV6: if (obj_tlen != 36) { ND_PRINT((ndo, " (not correct for object)")); break; } ND_PRINT((ndo, "\n\t Flags: [%s]", bittok2str(lmp_obj_te_link_flag_values, "none", EXTRACT_8BITS(obj_tptr)))); ND_PRINT((ndo, "\n\t Local Link-ID: %s (0x%08x)" "\n\t Remote Link-ID: %s (0x%08x)", ip6addr_string(ndo, obj_tptr+4), EXTRACT_32BITS(obj_tptr+4), ip6addr_string(ndo, obj_tptr+20), EXTRACT_32BITS(obj_tptr+20))); break; case LMP_CTYPE_UNMD: if (obj_tlen != 12) { ND_PRINT((ndo, " (not correct for object)")); break; } ND_PRINT((ndo, "\n\t Flags: [%s]", bittok2str(lmp_obj_te_link_flag_values, "none", EXTRACT_8BITS(obj_tptr)))); ND_PRINT((ndo, "\n\t Local Link-ID: %u (0x%08x)" "\n\t Remote Link-ID: %u (0x%08x)", EXTRACT_32BITS(obj_tptr+4), EXTRACT_32BITS(obj_tptr+4), EXTRACT_32BITS(obj_tptr+8), EXTRACT_32BITS(obj_tptr+8))); break; default: hexdump=TRUE; } break; case LMP_OBJ_DATA_LINK: switch(lmp_obj_ctype) { case LMP_CTYPE_IPV4: if (obj_tlen < 12) { ND_PRINT((ndo, " (not correct for object)")); break; } ND_PRINT((ndo, "\n\t Flags: [%s]", bittok2str(lmp_obj_data_link_flag_values, "none", EXTRACT_8BITS(obj_tptr)))); ND_PRINT((ndo, "\n\t Local Interface ID: %s (0x%08x)" "\n\t Remote Interface ID: %s (0x%08x)", ipaddr_string(ndo, obj_tptr+4), EXTRACT_32BITS(obj_tptr+4), ipaddr_string(ndo, obj_tptr+8), EXTRACT_32BITS(obj_tptr+8))); if (lmp_print_data_link_subobjs(ndo, obj_tptr, obj_tlen - 12, 12)) hexdump=TRUE; break; case LMP_CTYPE_IPV6: if (obj_tlen < 36) { ND_PRINT((ndo, " (not correct for object)")); break; } ND_PRINT((ndo, "\n\t Flags: [%s]", bittok2str(lmp_obj_data_link_flag_values, "none", EXTRACT_8BITS(obj_tptr)))); ND_PRINT((ndo, "\n\t Local Interface ID: %s (0x%08x)" "\n\t Remote Interface ID: %s (0x%08x)", ip6addr_string(ndo, obj_tptr+4), EXTRACT_32BITS(obj_tptr+4), ip6addr_string(ndo, obj_tptr+20), EXTRACT_32BITS(obj_tptr+20))); if (lmp_print_data_link_subobjs(ndo, obj_tptr, obj_tlen - 36, 36)) hexdump=TRUE; break; case LMP_CTYPE_UNMD: if (obj_tlen < 12) { ND_PRINT((ndo, " (not correct for object)")); break; } ND_PRINT((ndo, "\n\t Flags: [%s]", bittok2str(lmp_obj_data_link_flag_values, "none", EXTRACT_8BITS(obj_tptr)))); ND_PRINT((ndo, "\n\t Local Interface ID: %u (0x%08x)" "\n\t Remote Interface ID: %u (0x%08x)", EXTRACT_32BITS(obj_tptr+4), EXTRACT_32BITS(obj_tptr+4), EXTRACT_32BITS(obj_tptr+8), EXTRACT_32BITS(obj_tptr+8))); if (lmp_print_data_link_subobjs(ndo, obj_tptr, obj_tlen - 12, 12)) hexdump=TRUE; break; default: hexdump=TRUE; } break; case LMP_OBJ_VERIFY_BEGIN: switch(lmp_obj_ctype) { case LMP_CTYPE_1: if (obj_tlen != 20) { ND_PRINT((ndo, " (not correct for object)")); break; } ND_PRINT((ndo, "\n\t Flags: %s", bittok2str(lmp_obj_begin_verify_flag_values, "none", EXTRACT_16BITS(obj_tptr)))); ND_PRINT((ndo, "\n\t Verify Interval: %u", EXTRACT_16BITS(obj_tptr+2))); ND_PRINT((ndo, "\n\t Data links: %u", EXTRACT_32BITS(obj_tptr+4))); ND_PRINT((ndo, "\n\t Encoding type: %s", tok2str(gmpls_encoding_values, "Unknown", *(obj_tptr+8)))); ND_PRINT((ndo, "\n\t Verify Transport Mechanism: %u (0x%x)%s", EXTRACT_16BITS(obj_tptr+10), EXTRACT_16BITS(obj_tptr+10), EXTRACT_16BITS(obj_tptr+10)&8000 ? " (Payload test messages capable)" : "")); bw.i = EXTRACT_32BITS(obj_tptr+12); ND_PRINT((ndo, "\n\t Transmission Rate: %.3f Mbps",bw.f*8/1000000)); ND_PRINT((ndo, "\n\t Wavelength: %u", EXTRACT_32BITS(obj_tptr+16))); break; default: hexdump=TRUE; } break; case LMP_OBJ_VERIFY_BEGIN_ACK: switch(lmp_obj_ctype) { case LMP_CTYPE_1: if (obj_tlen != 4) { ND_PRINT((ndo, " (not correct for object)")); break; } ND_PRINT((ndo, "\n\t Verify Dead Interval: %u" "\n\t Verify Transport Response: %u", EXTRACT_16BITS(obj_tptr), EXTRACT_16BITS(obj_tptr+2))); break; default: hexdump=TRUE; } break; case LMP_OBJ_VERIFY_ID: switch(lmp_obj_ctype) { case LMP_CTYPE_1: if (obj_tlen != 4) { ND_PRINT((ndo, " (not correct for object)")); break; } ND_PRINT((ndo, "\n\t Verify ID: %u", EXTRACT_32BITS(obj_tptr))); break; default: hexdump=TRUE; } break; case LMP_OBJ_CHANNEL_STATUS: switch(lmp_obj_ctype) { case LMP_CTYPE_IPV4: offset = 0; /* Decode pairs: <Interface_ID (4 bytes), Channel_status (4 bytes)> */ while (offset+8 <= obj_tlen) { ND_PRINT((ndo, "\n\t Interface ID: %s (0x%08x)", ipaddr_string(ndo, obj_tptr+offset), EXTRACT_32BITS(obj_tptr+offset))); ND_PRINT((ndo, "\n\t\t Active: %s (%u)", (EXTRACT_32BITS(obj_tptr+offset+4)>>31) ? "Allocated" : "Non-allocated", (EXTRACT_32BITS(obj_tptr+offset+4)>>31))); ND_PRINT((ndo, "\n\t\t Direction: %s (%u)", (EXTRACT_32BITS(obj_tptr+offset+4)>>30)&0x1 ? "Transmit" : "Receive", (EXTRACT_32BITS(obj_tptr+offset+4)>>30)&0x1)); ND_PRINT((ndo, "\n\t\t Channel Status: %s (%u)", tok2str(lmp_obj_channel_status_values, "Unknown", EXTRACT_32BITS(obj_tptr+offset+4)&0x3FFFFFF), EXTRACT_32BITS(obj_tptr+offset+4)&0x3FFFFFF)); offset+=8; } break; case LMP_CTYPE_IPV6: offset = 0; /* Decode pairs: <Interface_ID (16 bytes), Channel_status (4 bytes)> */ while (offset+20 <= obj_tlen) { ND_PRINT((ndo, "\n\t Interface ID: %s (0x%08x)", ip6addr_string(ndo, obj_tptr+offset), EXTRACT_32BITS(obj_tptr+offset))); ND_PRINT((ndo, "\n\t\t Active: %s (%u)", (EXTRACT_32BITS(obj_tptr+offset+16)>>31) ? "Allocated" : "Non-allocated", (EXTRACT_32BITS(obj_tptr+offset+16)>>31))); ND_PRINT((ndo, "\n\t\t Direction: %s (%u)", (EXTRACT_32BITS(obj_tptr+offset+16)>>30)&0x1 ? "Transmit" : "Receive", (EXTRACT_32BITS(obj_tptr+offset+16)>>30)&0x1)); ND_PRINT((ndo, "\n\t\t Channel Status: %s (%u)", tok2str(lmp_obj_channel_status_values, "Unknown", EXTRACT_32BITS(obj_tptr+offset+16)&0x3FFFFFF), EXTRACT_32BITS(obj_tptr+offset+16)&0x3FFFFFF)); offset+=20; } break; case LMP_CTYPE_UNMD: offset = 0; /* Decode pairs: <Interface_ID (4 bytes), Channel_status (4 bytes)> */ while (offset+8 <= obj_tlen) { ND_PRINT((ndo, "\n\t Interface ID: %u (0x%08x)", EXTRACT_32BITS(obj_tptr+offset), EXTRACT_32BITS(obj_tptr+offset))); ND_PRINT((ndo, "\n\t\t Active: %s (%u)", (EXTRACT_32BITS(obj_tptr+offset+4)>>31) ? "Allocated" : "Non-allocated", (EXTRACT_32BITS(obj_tptr+offset+4)>>31))); ND_PRINT((ndo, "\n\t\t Direction: %s (%u)", (EXTRACT_32BITS(obj_tptr+offset+4)>>30)&0x1 ? "Transmit" : "Receive", (EXTRACT_32BITS(obj_tptr+offset+4)>>30)&0x1)); ND_PRINT((ndo, "\n\t\t Channel Status: %s (%u)", tok2str(lmp_obj_channel_status_values, "Unknown", EXTRACT_32BITS(obj_tptr+offset+4)&0x3FFFFFF), EXTRACT_32BITS(obj_tptr+offset+4)&0x3FFFFFF)); offset+=8; } break; default: hexdump=TRUE; } break; case LMP_OBJ_CHANNEL_STATUS_REQ: switch(lmp_obj_ctype) { case LMP_CTYPE_IPV4: offset = 0; while (offset+4 <= obj_tlen) { ND_PRINT((ndo, "\n\t Interface ID: %s (0x%08x)", ipaddr_string(ndo, obj_tptr+offset), EXTRACT_32BITS(obj_tptr+offset))); offset+=4; } break; case LMP_CTYPE_IPV6: offset = 0; while (offset+16 <= obj_tlen) { ND_PRINT((ndo, "\n\t Interface ID: %s (0x%08x)", ip6addr_string(ndo, obj_tptr+offset), EXTRACT_32BITS(obj_tptr+offset))); offset+=16; } break; case LMP_CTYPE_UNMD: offset = 0; while (offset+4 <= obj_tlen) { ND_PRINT((ndo, "\n\t Interface ID: %u (0x%08x)", EXTRACT_32BITS(obj_tptr+offset), EXTRACT_32BITS(obj_tptr+offset))); offset+=4; } break; default: hexdump=TRUE; } break; case LMP_OBJ_ERROR_CODE: switch(lmp_obj_ctype) { case LMP_CTYPE_BEGIN_VERIFY_ERROR: if (obj_tlen != 4) { ND_PRINT((ndo, " (not correct for object)")); break; } ND_PRINT((ndo, "\n\t Error Code: %s", bittok2str(lmp_obj_begin_verify_error_values, "none", EXTRACT_32BITS(obj_tptr)))); break; case LMP_CTYPE_LINK_SUMMARY_ERROR: if (obj_tlen != 4) { ND_PRINT((ndo, " (not correct for object)")); break; } ND_PRINT((ndo, "\n\t Error Code: %s", bittok2str(lmp_obj_link_summary_error_values, "none", EXTRACT_32BITS(obj_tptr)))); break; default: hexdump=TRUE; } break; case LMP_OBJ_SERVICE_CONFIG: switch (lmp_obj_ctype) { case LMP_CTYPE_SERVICE_CONFIG_SP: if (obj_tlen != 4) { ND_PRINT((ndo, " (not correct for object)")); break; } ND_PRINT((ndo, "\n\t Flags: %s", bittok2str(lmp_obj_service_config_sp_flag_values, "none", EXTRACT_8BITS(obj_tptr)))); ND_PRINT((ndo, "\n\t UNI Version: %u", EXTRACT_8BITS(obj_tptr+1))); break; case LMP_CTYPE_SERVICE_CONFIG_CPSA: if (obj_tlen != 16) { ND_PRINT((ndo, " (not correct for object)")); break; } link_type = EXTRACT_8BITS(obj_tptr); ND_PRINT((ndo, "\n\t Link Type: %s (%u)", tok2str(lmp_sd_service_config_cpsa_link_type_values, "Unknown", link_type), link_type)); switch (link_type) { case LMP_SD_SERVICE_CONFIG_CPSA_LINK_TYPE_SDH: ND_PRINT((ndo, "\n\t Signal Type: %s (%u)", tok2str(lmp_sd_service_config_cpsa_signal_type_sdh_values, "Unknown", EXTRACT_8BITS(obj_tptr+1)), EXTRACT_8BITS(obj_tptr+1))); break; case LMP_SD_SERVICE_CONFIG_CPSA_LINK_TYPE_SONET: ND_PRINT((ndo, "\n\t Signal Type: %s (%u)", tok2str(lmp_sd_service_config_cpsa_signal_type_sonet_values, "Unknown", EXTRACT_8BITS(obj_tptr+1)), EXTRACT_8BITS(obj_tptr+1))); break; } ND_PRINT((ndo, "\n\t Transparency: %s", bittok2str(lmp_obj_service_config_cpsa_tp_flag_values, "none", EXTRACT_8BITS(obj_tptr+2)))); ND_PRINT((ndo, "\n\t Contiguous Concatenation Types: %s", bittok2str(lmp_obj_service_config_cpsa_cct_flag_values, "none", EXTRACT_8BITS(obj_tptr+3)))); ND_PRINT((ndo, "\n\t Minimum NCC: %u", EXTRACT_16BITS(obj_tptr+4))); ND_PRINT((ndo, "\n\t Maximum NCC: %u", EXTRACT_16BITS(obj_tptr+6))); ND_PRINT((ndo, "\n\t Minimum NVC:%u", EXTRACT_16BITS(obj_tptr+8))); ND_PRINT((ndo, "\n\t Maximum NVC:%u", EXTRACT_16BITS(obj_tptr+10))); ND_PRINT((ndo, "\n\t Local Interface ID: %s (0x%08x)", ipaddr_string(ndo, obj_tptr+12), EXTRACT_32BITS(obj_tptr+12))); break; case LMP_CTYPE_SERVICE_CONFIG_TRANSPARENCY_TCM: if (obj_tlen != 8) { ND_PRINT((ndo, " (not correct for object)")); break; } ND_PRINT((ndo, "\n\t Transparency Flags: %s", bittok2str( lmp_obj_service_config_nsa_transparency_flag_values, "none", EXTRACT_32BITS(obj_tptr)))); ND_PRINT((ndo, "\n\t TCM Monitoring Flags: %s", bittok2str( lmp_obj_service_config_nsa_tcm_flag_values, "none", EXTRACT_8BITS(obj_tptr+7)))); break; case LMP_CTYPE_SERVICE_CONFIG_NETWORK_DIVERSITY: if (obj_tlen != 4) { ND_PRINT((ndo, " (not correct for object)")); break; } ND_PRINT((ndo, "\n\t Diversity: Flags: %s", bittok2str( lmp_obj_service_config_nsa_network_diversity_flag_values, "none", EXTRACT_8BITS(obj_tptr+3)))); break; default: hexdump = TRUE; } break; default: if (ndo->ndo_vflag <= 1) print_unknown_data(ndo,obj_tptr,"\n\t ",obj_tlen); break; } /* do we want to see an additionally hexdump ? */ if (ndo->ndo_vflag > 1 || hexdump==TRUE) print_unknown_data(ndo,tptr+sizeof(struct lmp_object_header),"\n\t ", lmp_obj_len-sizeof(struct lmp_object_header)); tptr+=lmp_obj_len; tlen-=lmp_obj_len; } return; trunc: ND_PRINT((ndo, "\n\t\t packet exceeded snapshot")); }
{'added': [(14, ' * Support for LMP service discovery extensions (defined by OIF UNI 1.0)'), (15, ' * added by Manu Pathak (mapathak@cisco.com), May 2005'), (21, '/* OIF UNI 1.0: http://www.oiforum.com/public/documents/OIF-UNI-01.0.pdf */'), (357, 'static int'), (358, 'lmp_print_data_link_subobjs(netdissect_options *ndo, const u_char *obj_tptr,'), (359, ' int total_subobj_len, int offset)'), (360, '{'), (361, ' int hexdump = FALSE;'), (362, ' int subobj_type, subobj_len;'), (363, ''), (364, ' union { /* int to float conversion buffer */'), (365, ' float f;'), (366, ' uint32_t i;'), (367, ' } bw;'), (368, ''), (369, ' while (total_subobj_len > 0 && hexdump == FALSE ) {'), (370, '\tsubobj_type = EXTRACT_8BITS(obj_tptr+offset);'), (371, '\tsubobj_len = EXTRACT_8BITS(obj_tptr+offset+1);'), (372, '\tND_PRINT((ndo, "\\n\\t Subobject, Type: %s (%u), Length: %u",'), (373, '\t\ttok2str(lmp_data_link_subobj,'), (374, '\t\t\t"Unknown",'), (375, '\t\t\tsubobj_type),'), (376, '\t\t\tsubobj_type,'), (377, '\t\t\tsubobj_len));'), (378, '\tif (subobj_len < 4) {'), (379, '\t ND_PRINT((ndo, " (too short)"));'), (380, '\t break;'), (381, '\t}'), (382, '\tif ((subobj_len % 4) != 0) {'), (383, '\t ND_PRINT((ndo, " (not a multiple of 4)"));'), (384, '\t break;'), (385, '\t}'), (386, '\tif (total_subobj_len < subobj_len) {'), (387, '\t ND_PRINT((ndo, " (goes past the end of the object)"));'), (388, '\t break;'), (389, '\t}'), (390, '\tswitch(subobj_type) {'), (391, '\tcase INT_SWITCHING_TYPE_SUBOBJ:'), (392, '\t ND_PRINT((ndo, "\\n\\t Switching Type: %s (%u)",'), (393, '\t\ttok2str(gmpls_switch_cap_values,'), (394, '\t\t\t"Unknown",'), (395, '\t\t\tEXTRACT_8BITS(obj_tptr+offset+2)),'), (396, '\t\tEXTRACT_8BITS(obj_tptr+offset+2)));'), (397, '\t ND_PRINT((ndo, "\\n\\t Encoding Type: %s (%u)",'), (398, '\t\ttok2str(gmpls_encoding_values,'), (399, '\t\t\t"Unknown",'), (400, '\t\t\tEXTRACT_8BITS(obj_tptr+offset+3)),'), (401, '\t\tEXTRACT_8BITS(obj_tptr+offset+3)));'), (402, '\t bw.i = EXTRACT_32BITS(obj_tptr+offset+4);'), (403, '\t ND_PRINT((ndo, "\\n\\t Min Reservable Bandwidth: %.3f Mbps",'), (404, ' bw.f*8/1000000));'), (405, '\t bw.i = EXTRACT_32BITS(obj_tptr+offset+8);'), (406, '\t ND_PRINT((ndo, "\\n\\t Max Reservable Bandwidth: %.3f Mbps",'), (407, ' bw.f*8/1000000));'), (408, '\t break;'), (409, '\tcase WAVELENGTH_SUBOBJ:'), (410, '\t ND_PRINT((ndo, "\\n\\t Wavelength: %u",'), (411, '\t\tEXTRACT_32BITS(obj_tptr+offset+4)));'), (412, '\t break;'), (413, '\tdefault:'), (414, '\t /* Any Unknown Subobject ==> Exit loop */'), (415, '\t hexdump=TRUE;'), (416, '\t break;'), (417, '\t}'), (418, '\ttotal_subobj_len-=subobj_len;'), (419, '\toffset+=subobj_len;'), (420, ' }'), (421, ' return (hexdump);'), (422, '}'), (423, ''), (431, ' u_int tlen,lmp_obj_len,lmp_obj_ctype,obj_tlen;'), (433, ' u_int offset;'), (434, ' u_int link_type;'), (472, ' if (tlen < sizeof(const struct lmp_common_header)) {'), (473, ' ND_PRINT((ndo, " (too short)"));'), (474, ' return;'), (475, ' }'), (476, ' if (tlen > len) {'), (477, ' ND_PRINT((ndo, " (too long)"));'), (478, ' tlen = len;'), (479, ' }'), (504, ' if (lmp_obj_len < 4) {'), (505, ' ND_PRINT((ndo, " (too short)"));'), (506, ' return;'), (507, ' }'), (508, ' if ((lmp_obj_len % 4) != 0) {'), (509, ' ND_PRINT((ndo, " (not a multiple of 4)"));'), (510, ' return;'), (511, ' }'), (512, ''), (526, ' if (obj_tlen != 4) {'), (527, ' ND_PRINT((ndo, " (not correct for object)"));'), (528, ' break;'), (529, ' }'), (545, ' if (obj_tlen != 4) {'), (546, ' ND_PRINT((ndo, " (not correct for object)"));'), (547, ' break;'), (548, ' }'), (555, ' if (obj_tlen != 16) {'), (556, ' ND_PRINT((ndo, " (not correct for object)"));'), (557, ' break;'), (558, ' }'), (565, ' if (obj_tlen != 4) {'), (566, ' ND_PRINT((ndo, " (not correct for object)"));'), (567, ' break;'), (568, ' }'), (581, ' if (obj_tlen != 4) {'), (582, ' ND_PRINT((ndo, " (not correct for object)"));'), (583, ' break;'), (584, ' }'), (590, ' if (obj_tlen != 4) {'), (591, ' ND_PRINT((ndo, " (not correct for object)"));'), (592, ' break;'), (593, ' }'), (607, ' if (obj_tlen != 4) {'), (608, ' ND_PRINT((ndo, " (not correct for object)"));'), (609, ' break;'), (610, ' }'), (624, ' if (obj_tlen != 4) {'), (625, ' ND_PRINT((ndo, " (not correct for object)"));'), (626, ' break;'), (627, ' }'), (641, ' if (obj_tlen != 8) {'), (642, ' ND_PRINT((ndo, " (not correct for object)"));'), (643, ' break;'), (644, ' }'), (656, '\t switch(lmp_obj_ctype) {'), (657, '\t case LMP_CTYPE_IPV4:'), (658, ' if (obj_tlen != 12) {'), (659, ' ND_PRINT((ndo, " (not correct for object)"));'), (660, ' break;'), (661, ' }'), (663, '\t\t bittok2str(lmp_obj_te_link_flag_values,'), (665, '\t\t\tEXTRACT_8BITS(obj_tptr))));'), (676, ' if (obj_tlen != 36) {'), (677, ' ND_PRINT((ndo, " (not correct for object)"));'), (678, ' break;'), (679, ' }'), (680, '\t\tND_PRINT((ndo, "\\n\\t Flags: [%s]",'), (681, '\t\t bittok2str(lmp_obj_te_link_flag_values,'), (682, '\t\t\t"none",'), (683, '\t\t\tEXTRACT_8BITS(obj_tptr))));'), (684, ''), (685, '\t\tND_PRINT((ndo, "\\n\\t Local Link-ID: %s (0x%08x)"'), (686, '\t\t "\\n\\t Remote Link-ID: %s (0x%08x)",'), (687, ' ip6addr_string(ndo, obj_tptr+4),'), (688, ' EXTRACT_32BITS(obj_tptr+4),'), (689, ' ip6addr_string(ndo, obj_tptr+20),'), (690, ' EXTRACT_32BITS(obj_tptr+20)));'), (691, ' break;'), (692, ''), (694, ' if (obj_tlen != 12) {'), (695, ' ND_PRINT((ndo, " (not correct for object)"));'), (696, ' break;'), (697, ' }'), (698, '\t\tND_PRINT((ndo, "\\n\\t Flags: [%s]",'), (699, '\t\t bittok2str(lmp_obj_te_link_flag_values,'), (700, '\t\t\t"none",'), (701, '\t\t\tEXTRACT_8BITS(obj_tptr))));'), (702, ''), (703, '\t\tND_PRINT((ndo, "\\n\\t Local Link-ID: %u (0x%08x)"'), (704, '\t\t "\\n\\t Remote Link-ID: %u (0x%08x)",'), (705, ' EXTRACT_32BITS(obj_tptr+4),'), (706, ' EXTRACT_32BITS(obj_tptr+4),'), (707, ' EXTRACT_32BITS(obj_tptr+8),'), (708, ' EXTRACT_32BITS(obj_tptr+8)));'), (709, '\t\tbreak;'), (710, ''), (719, ' if (obj_tlen < 12) {'), (720, ' ND_PRINT((ndo, " (not correct for object)"));'), (721, ' break;'), (722, ' }'), (723, '\t ND_PRINT((ndo, "\\n\\t Flags: [%s]",'), (724, '\t\t bittok2str(lmp_obj_data_link_flag_values,'), (725, '\t\t\t"none",'), (726, '\t\t\tEXTRACT_8BITS(obj_tptr))));'), (734, '\t\tif (lmp_print_data_link_subobjs(ndo, obj_tptr, obj_tlen - 12, 12))'), (735, '\t\t hexdump=TRUE;'), (737, ''), (739, ' if (obj_tlen < 36) {'), (740, ' ND_PRINT((ndo, " (not correct for object)"));'), (741, ' break;'), (742, ' }'), (743, '\t ND_PRINT((ndo, "\\n\\t Flags: [%s]",'), (744, '\t\t bittok2str(lmp_obj_data_link_flag_values,'), (745, '\t\t\t"none",'), (746, '\t\t\tEXTRACT_8BITS(obj_tptr))));'), (747, ' ND_PRINT((ndo, "\\n\\t Local Interface ID: %s (0x%08x)"'), (748, ' "\\n\\t Remote Interface ID: %s (0x%08x)",'), (749, ' ip6addr_string(ndo, obj_tptr+4),'), (750, ' EXTRACT_32BITS(obj_tptr+4),'), (751, ' ip6addr_string(ndo, obj_tptr+20),'), (752, ' EXTRACT_32BITS(obj_tptr+20)));'), (753, ''), (754, '\t\tif (lmp_print_data_link_subobjs(ndo, obj_tptr, obj_tlen - 36, 36))'), (755, '\t\t hexdump=TRUE;'), (756, '\t\tbreak;'), (757, ''), (758, '\t case LMP_CTYPE_UNMD:'), (759, ' if (obj_tlen < 12) {'), (760, ' ND_PRINT((ndo, " (not correct for object)"));'), (761, ' break;'), (762, ' }'), (763, '\t ND_PRINT((ndo, "\\n\\t Flags: [%s]",'), (764, '\t\t bittok2str(lmp_obj_data_link_flag_values,'), (765, '\t\t\t"none",'), (766, '\t\t\tEXTRACT_8BITS(obj_tptr))));'), (767, ' ND_PRINT((ndo, "\\n\\t Local Interface ID: %u (0x%08x)"'), (768, ' "\\n\\t Remote Interface ID: %u (0x%08x)",'), (769, ' EXTRACT_32BITS(obj_tptr+4),'), (770, ' EXTRACT_32BITS(obj_tptr+4),'), (771, ' EXTRACT_32BITS(obj_tptr+8),'), (772, ' EXTRACT_32BITS(obj_tptr+8)));'), (773, ''), (774, '\t\tif (lmp_print_data_link_subobjs(ndo, obj_tptr, obj_tlen - 12, 12))'), (775, '\t\t hexdump=TRUE;'), (776, '\t\tbreak;'), (777, ''), (786, ' if (obj_tlen != 20) {'), (787, ' ND_PRINT((ndo, " (not correct for object)"));'), (788, ' break;'), (789, ' }'), (818, ' if (obj_tlen != 4) {'), (819, ' ND_PRINT((ndo, " (not correct for object)"));'), (820, ' break;'), (821, ' }'), (836, ' if (obj_tlen != 4) {'), (837, ' ND_PRINT((ndo, " (not correct for object)"));'), (838, ' break;'), (839, ' }'), (854, '\t\twhile (offset+8 <= obj_tlen) {'), (859, '\t\t\tND_PRINT((ndo, "\\n\\t\\t Active: %s (%u)",'), (860, '\t\t\t\t(EXTRACT_32BITS(obj_tptr+offset+4)>>31) ?'), (864, '\t\t\tND_PRINT((ndo, "\\n\\t\\t Direction: %s (%u)",'), (865, '\t\t\t\t(EXTRACT_32BITS(obj_tptr+offset+4)>>30)&0x1 ?'), (877, ''), (879, '\t\toffset = 0;'), (880, '\t\t/* Decode pairs: <Interface_ID (16 bytes), Channel_status (4 bytes)> */'), (881, '\t\twhile (offset+20 <= obj_tlen) {'), (882, '\t\t\tND_PRINT((ndo, "\\n\\t Interface ID: %s (0x%08x)",'), (883, '\t\t\tip6addr_string(ndo, obj_tptr+offset),'), (884, '\t\t\tEXTRACT_32BITS(obj_tptr+offset)));'), (885, ''), (886, '\t\t\tND_PRINT((ndo, "\\n\\t\\t Active: %s (%u)",'), (887, '\t\t\t\t(EXTRACT_32BITS(obj_tptr+offset+16)>>31) ?'), (888, '\t\t\t\t\t\t"Allocated" : "Non-allocated",'), (889, '\t\t\t\t(EXTRACT_32BITS(obj_tptr+offset+16)>>31)));'), (890, ''), (891, '\t\t\tND_PRINT((ndo, "\\n\\t\\t Direction: %s (%u)",'), (892, '\t\t\t\t(EXTRACT_32BITS(obj_tptr+offset+16)>>30)&0x1 ?'), (893, '\t\t\t\t\t\t"Transmit" : "Receive",'), (894, '\t\t\t\t(EXTRACT_32BITS(obj_tptr+offset+16)>>30)&0x1));'), (895, ''), (896, '\t\t\tND_PRINT((ndo, "\\n\\t\\t Channel Status: %s (%u)",'), (897, '\t\t\t\t\ttok2str(lmp_obj_channel_status_values,'), (898, '\t\t\t\t\t"Unknown",'), (899, '\t\t\t\t\tEXTRACT_32BITS(obj_tptr+offset+16)&0x3FFFFFF),'), (900, '\t\t\tEXTRACT_32BITS(obj_tptr+offset+16)&0x3FFFFFF));'), (901, '\t\t\toffset+=20;'), (902, '\t\t}'), (903, ' break;'), (904, ''), (905, '\t case LMP_CTYPE_UNMD:'), (906, '\t\toffset = 0;'), (907, '\t\t/* Decode pairs: <Interface_ID (4 bytes), Channel_status (4 bytes)> */'), (908, '\t\twhile (offset+8 <= obj_tlen) {'), (909, '\t\t\tND_PRINT((ndo, "\\n\\t Interface ID: %u (0x%08x)",'), (910, '\t\t\tEXTRACT_32BITS(obj_tptr+offset),'), (911, '\t\t\tEXTRACT_32BITS(obj_tptr+offset)));'), (912, ''), (913, '\t\t\tND_PRINT((ndo, "\\n\\t\\t Active: %s (%u)",'), (914, '\t\t\t\t(EXTRACT_32BITS(obj_tptr+offset+4)>>31) ?'), (915, '\t\t\t\t\t\t"Allocated" : "Non-allocated",'), (916, '\t\t\t\t(EXTRACT_32BITS(obj_tptr+offset+4)>>31)));'), (917, ''), (918, '\t\t\tND_PRINT((ndo, "\\n\\t\\t Direction: %s (%u)",'), (919, '\t\t\t\t(EXTRACT_32BITS(obj_tptr+offset+4)>>30)&0x1 ?'), (920, '\t\t\t\t\t\t"Transmit" : "Receive",'), (921, '\t\t\t\t(EXTRACT_32BITS(obj_tptr+offset+4)>>30)&0x1));'), (922, ''), (923, '\t\t\tND_PRINT((ndo, "\\n\\t\\t Channel Status: %s (%u)",'), (924, '\t\t\t\t\ttok2str(lmp_obj_channel_status_values,'), (925, '\t\t\t\t\t"Unknown",'), (926, '\t\t\t\t\tEXTRACT_32BITS(obj_tptr+offset+4)&0x3FFFFFF),'), (927, '\t\t\tEXTRACT_32BITS(obj_tptr+offset+4)&0x3FFFFFF));'), (928, '\t\t\toffset+=8;'), (929, '\t\t}'), (930, ' break;'), (931, ''), (941, '\t\twhile (offset+4 <= obj_tlen) {'), (948, ''), (950, '\t\toffset = 0;'), (951, '\t\twhile (offset+16 <= obj_tlen) {'), (952, '\t\t\tND_PRINT((ndo, "\\n\\t Interface ID: %s (0x%08x)",'), (953, '\t\t\tip6addr_string(ndo, obj_tptr+offset),'), (954, '\t\t\tEXTRACT_32BITS(obj_tptr+offset)));'), (955, '\t\t\toffset+=16;'), (956, '\t\t}'), (957, ' break;'), (958, ''), (959, '\t case LMP_CTYPE_UNMD:'), (960, '\t\toffset = 0;'), (961, '\t\twhile (offset+4 <= obj_tlen) {'), (962, '\t\t\tND_PRINT((ndo, "\\n\\t Interface ID: %u (0x%08x)",'), (963, '\t\t\tEXTRACT_32BITS(obj_tptr+offset),'), (964, '\t\t\tEXTRACT_32BITS(obj_tptr+offset)));'), (965, '\t\t\toffset+=4;'), (966, '\t\t}'), (967, ' break;'), (968, ''), (977, ' if (obj_tlen != 4) {'), (978, ' ND_PRINT((ndo, " (not correct for object)"));'), (979, ' break;'), (980, ' }'), (988, ' if (obj_tlen != 4) {'), (989, ' ND_PRINT((ndo, " (not correct for object)"));'), (990, ' break;'), (991, ' }'), (1005, ' if (obj_tlen != 4) {'), (1006, ' ND_PRINT((ndo, " (not correct for object)"));'), (1007, ' break;'), (1008, ' }'), (1012, '\t\t\t\t EXTRACT_8BITS(obj_tptr))));'), (1015, '\t\t EXTRACT_8BITS(obj_tptr+1)));'), (1020, ' if (obj_tlen != 16) {'), (1021, ' ND_PRINT((ndo, " (not correct for object)"));'), (1022, ' break;'), (1023, ' }'), (1025, '\t\tlink_type = EXTRACT_8BITS(obj_tptr);'), (1032, '\t\tswitch (link_type) {'), (1033, '\t\tcase LMP_SD_SERVICE_CONFIG_CPSA_LINK_TYPE_SDH:'), (1037, '\t\t\t\t EXTRACT_8BITS(obj_tptr+1)),'), (1038, '\t\t\t EXTRACT_8BITS(obj_tptr+1)));'), (1039, '\t\t break;'), (1041, '\t\tcase LMP_SD_SERVICE_CONFIG_CPSA_LINK_TYPE_SONET:'), (1045, '\t\t\t\t EXTRACT_8BITS(obj_tptr+1)),'), (1046, '\t\t\t EXTRACT_8BITS(obj_tptr+1)));'), (1047, '\t\t break;'), (1053, '\t\t\t\t EXTRACT_8BITS(obj_tptr+2))));'), (1058, '\t\t\t\t EXTRACT_8BITS(obj_tptr+3))));'), (1079, ' if (obj_tlen != 8) {'), (1080, ' ND_PRINT((ndo, " (not correct for object)"));'), (1081, ' break;'), (1082, ' }'), (1094, '\t\t\t EXTRACT_8BITS(obj_tptr+7))));'), (1099, ' if (obj_tlen != 4) {'), (1100, ' ND_PRINT((ndo, " (not correct for object)"));'), (1101, ' break;'), (1102, ' }'), (1108, '\t\t\t EXTRACT_8BITS(obj_tptr+3))));')], 'deleted': [(14, ' * Support for LMP service discovery extensions (defined by UNI 1.0) added'), (15, ' * by Manu Pathak (mapathak@cisco.com), May 2005'), (363, ' int tlen,lmp_obj_len,lmp_obj_ctype,obj_tlen;'), (365, ' int offset,subobj_type,subobj_len,total_subobj_len;'), (366, ' int link_type;'), (416, ' if(lmp_obj_len % 4 || lmp_obj_len < 4)'), (417, ' return;'), (418, ''), (539, '\t\tbittok2str(lmp_obj_te_link_flag_values,'), (541, '\t\t\tEXTRACT_16BITS(obj_tptr)>>8)));'), (543, '\t switch(lmp_obj_ctype) {'), (544, '\t case LMP_CTYPE_IPV4:'), (561, '\t\tND_PRINT((ndo, "\\n\\t Flags: [%s]",'), (562, '\t\tbittok2str(lmp_obj_data_link_flag_values,'), (563, '\t\t\t"none",'), (564, '\t\t\tEXTRACT_16BITS(obj_tptr)>>8)));'), (565, ''), (568, '\t case LMP_CTYPE_UNMD:'), (576, '\t\ttotal_subobj_len = lmp_obj_len - 16;'), (577, '\t\toffset = 12;'), (578, '\t\twhile (total_subobj_len > 0 && hexdump == FALSE ) {'), (579, '\t\t\tsubobj_type = EXTRACT_16BITS(obj_tptr+offset)>>8;'), (580, '\t\t\tsubobj_len = EXTRACT_16BITS(obj_tptr+offset)&0x00FF;'), (581, '\t\t\tND_PRINT((ndo, "\\n\\t Subobject, Type: %s (%u), Length: %u",'), (582, '\t\t\t\ttok2str(lmp_data_link_subobj,'), (583, '\t\t\t\t\t"Unknown",'), (584, '\t\t\t\t\tsubobj_type),'), (585, '\t\t\t\t\tsubobj_type,'), (586, '\t\t\t\t\tsubobj_len));'), (587, '\t\t\tswitch(subobj_type) {'), (588, '\t\t\tcase INT_SWITCHING_TYPE_SUBOBJ:'), (589, '\t\t\t\tND_PRINT((ndo, "\\n\\t Switching Type: %s (%u)",'), (590, '\t\t\t\t\ttok2str(gmpls_switch_cap_values,'), (591, '\t\t\t\t\t\t"Unknown",'), (592, '\t\t\t\t\t\tEXTRACT_16BITS(obj_tptr+offset+2)>>8),'), (593, '\t\t\t\t\tEXTRACT_16BITS(obj_tptr+offset+2)>>8));'), (594, '\t\t\t\tND_PRINT((ndo, "\\n\\t Encoding Type: %s (%u)",'), (595, '\t\t\t\t\ttok2str(gmpls_encoding_values,'), (596, '\t\t\t\t\t\t"Unknown",'), (597, '\t\t\t\t\t\tEXTRACT_16BITS(obj_tptr+offset+2)&0x00FF),'), (598, '\t\t\t\t\tEXTRACT_16BITS(obj_tptr+offset+2)&0x00FF));'), (599, '\t\t\t\tbw.i = EXTRACT_32BITS(obj_tptr+offset+4);'), (600, '\t\t\t\tND_PRINT((ndo, "\\n\\t Min Reservable Bandwidth: %.3f Mbps",'), (601, ' bw.f*8/1000000));'), (602, '\t\t\t\tbw.i = EXTRACT_32BITS(obj_tptr+offset+8);'), (603, '\t\t\t\tND_PRINT((ndo, "\\n\\t Max Reservable Bandwidth: %.3f Mbps",'), (604, ' bw.f*8/1000000));'), (605, '\t\t\t\tbreak;'), (606, '\t\t\tcase WAVELENGTH_SUBOBJ:'), (607, '\t\t\t\tND_PRINT((ndo, "\\n\\t Wavelength: %u",'), (608, '\t\t\t\t\tEXTRACT_32BITS(obj_tptr+offset+4)));'), (609, '\t\t\t\tbreak;'), (610, '\t\t\tdefault:'), (611, '\t\t\t\t/* Any Unknown Subobject ==> Exit loop */'), (612, '\t\t\t\thexdump=TRUE;'), (613, '\t\t\t\tbreak;'), (614, '\t\t\t}'), (615, '\t\t\ttotal_subobj_len-=subobj_len;'), (616, '\t\t\toffset+=subobj_len;'), (617, '\t\t}'), (618, ''), (683, '\t case LMP_CTYPE_UNMD:'), (686, '\t\twhile (offset < (lmp_obj_len-(int)sizeof(struct lmp_object_header)) ) {'), (691, '\t\t\tND_PRINT((ndo, "\\n\\t\\t Active: %s (%u)", \t\t(EXTRACT_32BITS(obj_tptr+offset+4)>>31) ?'), (695, '\t\t\tND_PRINT((ndo, "\\n\\t\\t Direction: %s (%u)", (EXTRACT_32BITS(obj_tptr+offset+4)>>30)&0x1 ?'), (716, '\t case LMP_CTYPE_UNMD:'), (718, '\t\twhile (offset < (lmp_obj_len-(int)sizeof(struct lmp_object_header)) ) {'), (754, ''), (758, '\t\t\t\t EXTRACT_16BITS(obj_tptr)>>8)));'), (761, '\t\t EXTRACT_16BITS(obj_tptr) & 0x00FF));'), (767, '\t\tlink_type = EXTRACT_16BITS(obj_tptr)>>8;'), (774, '\t\tif (link_type == LMP_SD_SERVICE_CONFIG_CPSA_LINK_TYPE_SDH) {'), (778, '\t\t\t\t EXTRACT_16BITS(obj_tptr) & 0x00FF),'), (779, '\t\t\t EXTRACT_16BITS(obj_tptr) & 0x00FF));'), (780, '\t\t}'), (782, '\t\tif (link_type == LMP_SD_SERVICE_CONFIG_CPSA_LINK_TYPE_SONET) {'), (786, '\t\t\t\t EXTRACT_16BITS(obj_tptr) & 0x00FF),'), (787, '\t\t\t EXTRACT_16BITS(obj_tptr) & 0x00FF));'), (793, '\t\t\t\t EXTRACT_16BITS(obj_tptr+2)>>8)));'), (798, '\t\t\t\t EXTRACT_16BITS(obj_tptr+2)>>8 & 0x00FF)));'), (830, '\t\t\t EXTRACT_16BITS(obj_tptr+6) & 0x00FF)));'), (840, '\t\t\t EXTRACT_16BITS(obj_tptr+2) & 0x00FF)));')]}
350
82
858
4,779
https://github.com/the-tcpdump-group/tcpdump
CVE-2017-13003
['CWE-125']
bcm.c
bcm_release
// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) /* * bcm.c - Broadcast Manager to filter/send (cyclic) CAN content * * Copyright (c) 2002-2017 Volkswagen Group Electronic Research * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of Volkswagen nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * Alternatively, provided that this notice is retained in full, this * software may be distributed under the terms of the GNU General * Public License ("GPL") version 2, in which case the provisions of the * GPL apply INSTEAD OF those given above. * * The provided data structures and external interfaces from this code * are not restricted to be used by modules with a GPL compatible license. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. * */ #include <linux/module.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/hrtimer.h> #include <linux/list.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/uio.h> #include <linux/net.h> #include <linux/netdevice.h> #include <linux/socket.h> #include <linux/if_arp.h> #include <linux/skbuff.h> #include <linux/can.h> #include <linux/can/core.h> #include <linux/can/skb.h> #include <linux/can/bcm.h> #include <linux/slab.h> #include <net/sock.h> #include <net/net_namespace.h> /* * To send multiple CAN frame content within TX_SETUP or to filter * CAN messages with multiplex index within RX_SETUP, the number of * different filters is limited to 256 due to the one byte index value. */ #define MAX_NFRAMES 256 /* limit timers to 400 days for sending/timeouts */ #define BCM_TIMER_SEC_MAX (400 * 24 * 60 * 60) /* use of last_frames[index].flags */ #define RX_RECV 0x40 /* received data for this element */ #define RX_THR 0x80 /* element not been sent due to throttle feature */ #define BCM_CAN_FLAGS_MASK 0x3F /* to clean private flags after usage */ /* get best masking value for can_rx_register() for a given single can_id */ #define REGMASK(id) ((id & CAN_EFF_FLAG) ? \ (CAN_EFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG) : \ (CAN_SFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG)) MODULE_DESCRIPTION("PF_CAN broadcast manager protocol"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>"); MODULE_ALIAS("can-proto-2"); #define BCM_MIN_NAMELEN CAN_REQUIRED_SIZE(struct sockaddr_can, can_ifindex) /* * easy access to the first 64 bit of can(fd)_frame payload. cp->data is * 64 bit aligned so the offset has to be multiples of 8 which is ensured * by the only callers in bcm_rx_cmp_to_index() bcm_rx_handler(). */ static inline u64 get_u64(const struct canfd_frame *cp, int offset) { return *(u64 *)(cp->data + offset); } struct bcm_op { struct list_head list; int ifindex; canid_t can_id; u32 flags; unsigned long frames_abs, frames_filtered; struct bcm_timeval ival1, ival2; struct hrtimer timer, thrtimer; ktime_t rx_stamp, kt_ival1, kt_ival2, kt_lastmsg; int rx_ifindex; int cfsiz; u32 count; u32 nframes; u32 currframe; /* void pointers to arrays of struct can[fd]_frame */ void *frames; void *last_frames; struct canfd_frame sframe; struct canfd_frame last_sframe; struct sock *sk; struct net_device *rx_reg_dev; }; struct bcm_sock { struct sock sk; int bound; int ifindex; struct list_head notifier; struct list_head rx_ops; struct list_head tx_ops; unsigned long dropped_usr_msgs; struct proc_dir_entry *bcm_proc_read; char procname [32]; /* inode number in decimal with \0 */ }; static LIST_HEAD(bcm_notifier_list); static DEFINE_SPINLOCK(bcm_notifier_lock); static struct bcm_sock *bcm_busy_notifier; static inline struct bcm_sock *bcm_sk(const struct sock *sk) { return (struct bcm_sock *)sk; } static inline ktime_t bcm_timeval_to_ktime(struct bcm_timeval tv) { return ktime_set(tv.tv_sec, tv.tv_usec * NSEC_PER_USEC); } /* check limitations for timeval provided by user */ static bool bcm_is_invalid_tv(struct bcm_msg_head *msg_head) { if ((msg_head->ival1.tv_sec < 0) || (msg_head->ival1.tv_sec > BCM_TIMER_SEC_MAX) || (msg_head->ival1.tv_usec < 0) || (msg_head->ival1.tv_usec >= USEC_PER_SEC) || (msg_head->ival2.tv_sec < 0) || (msg_head->ival2.tv_sec > BCM_TIMER_SEC_MAX) || (msg_head->ival2.tv_usec < 0) || (msg_head->ival2.tv_usec >= USEC_PER_SEC)) return true; return false; } #define CFSIZ(flags) ((flags & CAN_FD_FRAME) ? CANFD_MTU : CAN_MTU) #define OPSIZ sizeof(struct bcm_op) #define MHSIZ sizeof(struct bcm_msg_head) /* * procfs functions */ #if IS_ENABLED(CONFIG_PROC_FS) static char *bcm_proc_getifname(struct net *net, char *result, int ifindex) { struct net_device *dev; if (!ifindex) return "any"; rcu_read_lock(); dev = dev_get_by_index_rcu(net, ifindex); if (dev) strcpy(result, dev->name); else strcpy(result, "???"); rcu_read_unlock(); return result; } static int bcm_proc_show(struct seq_file *m, void *v) { char ifname[IFNAMSIZ]; struct net *net = m->private; struct sock *sk = (struct sock *)PDE_DATA(m->file->f_inode); struct bcm_sock *bo = bcm_sk(sk); struct bcm_op *op; seq_printf(m, ">>> socket %pK", sk->sk_socket); seq_printf(m, " / sk %pK", sk); seq_printf(m, " / bo %pK", bo); seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs); seq_printf(m, " / bound %s", bcm_proc_getifname(net, ifname, bo->ifindex)); seq_printf(m, " <<<\n"); list_for_each_entry(op, &bo->rx_ops, list) { unsigned long reduction; /* print only active entries & prevent division by zero */ if (!op->frames_abs) continue; seq_printf(m, "rx_op: %03X %-5s ", op->can_id, bcm_proc_getifname(net, ifname, op->ifindex)); if (op->flags & CAN_FD_FRAME) seq_printf(m, "(%u)", op->nframes); else seq_printf(m, "[%u]", op->nframes); seq_printf(m, "%c ", (op->flags & RX_CHECK_DLC) ? 'd' : ' '); if (op->kt_ival1) seq_printf(m, "timeo=%lld ", (long long)ktime_to_us(op->kt_ival1)); if (op->kt_ival2) seq_printf(m, "thr=%lld ", (long long)ktime_to_us(op->kt_ival2)); seq_printf(m, "# recv %ld (%ld) => reduction: ", op->frames_filtered, op->frames_abs); reduction = 100 - (op->frames_filtered * 100) / op->frames_abs; seq_printf(m, "%s%ld%%\n", (reduction == 100) ? "near " : "", reduction); } list_for_each_entry(op, &bo->tx_ops, list) { seq_printf(m, "tx_op: %03X %s ", op->can_id, bcm_proc_getifname(net, ifname, op->ifindex)); if (op->flags & CAN_FD_FRAME) seq_printf(m, "(%u) ", op->nframes); else seq_printf(m, "[%u] ", op->nframes); if (op->kt_ival1) seq_printf(m, "t1=%lld ", (long long)ktime_to_us(op->kt_ival1)); if (op->kt_ival2) seq_printf(m, "t2=%lld ", (long long)ktime_to_us(op->kt_ival2)); seq_printf(m, "# sent %ld\n", op->frames_abs); } seq_putc(m, '\n'); return 0; } #endif /* CONFIG_PROC_FS */ /* * bcm_can_tx - send the (next) CAN frame to the appropriate CAN interface * of the given bcm tx op */ static void bcm_can_tx(struct bcm_op *op) { struct sk_buff *skb; struct net_device *dev; struct canfd_frame *cf = op->frames + op->cfsiz * op->currframe; /* no target device? => exit */ if (!op->ifindex) return; dev = dev_get_by_index(sock_net(op->sk), op->ifindex); if (!dev) { /* RFC: should this bcm_op remove itself here? */ return; } skb = alloc_skb(op->cfsiz + sizeof(struct can_skb_priv), gfp_any()); if (!skb) goto out; can_skb_reserve(skb); can_skb_prv(skb)->ifindex = dev->ifindex; can_skb_prv(skb)->skbcnt = 0; skb_put_data(skb, cf, op->cfsiz); /* send with loopback */ skb->dev = dev; can_skb_set_owner(skb, op->sk); can_send(skb, 1); /* update statistics */ op->currframe++; op->frames_abs++; /* reached last frame? */ if (op->currframe >= op->nframes) op->currframe = 0; out: dev_put(dev); } /* * bcm_send_to_user - send a BCM message to the userspace * (consisting of bcm_msg_head + x CAN frames) */ static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head, struct canfd_frame *frames, int has_timestamp) { struct sk_buff *skb; struct canfd_frame *firstframe; struct sockaddr_can *addr; struct sock *sk = op->sk; unsigned int datalen = head->nframes * op->cfsiz; int err; skb = alloc_skb(sizeof(*head) + datalen, gfp_any()); if (!skb) return; skb_put_data(skb, head, sizeof(*head)); if (head->nframes) { /* CAN frames starting here */ firstframe = (struct canfd_frame *)skb_tail_pointer(skb); skb_put_data(skb, frames, datalen); /* * the BCM uses the flags-element of the canfd_frame * structure for internal purposes. This is only * relevant for updates that are generated by the * BCM, where nframes is 1 */ if (head->nframes == 1) firstframe->flags &= BCM_CAN_FLAGS_MASK; } if (has_timestamp) { /* restore rx timestamp */ skb->tstamp = op->rx_stamp; } /* * Put the datagram to the queue so that bcm_recvmsg() can * get it from there. We need to pass the interface index to * bcm_recvmsg(). We pass a whole struct sockaddr_can in skb->cb * containing the interface index. */ sock_skb_cb_check_size(sizeof(struct sockaddr_can)); addr = (struct sockaddr_can *)skb->cb; memset(addr, 0, sizeof(*addr)); addr->can_family = AF_CAN; addr->can_ifindex = op->rx_ifindex; err = sock_queue_rcv_skb(sk, skb); if (err < 0) { struct bcm_sock *bo = bcm_sk(sk); kfree_skb(skb); /* don't care about overflows in this statistic */ bo->dropped_usr_msgs++; } } static bool bcm_tx_set_expiry(struct bcm_op *op, struct hrtimer *hrt) { ktime_t ival; if (op->kt_ival1 && op->count) ival = op->kt_ival1; else if (op->kt_ival2) ival = op->kt_ival2; else return false; hrtimer_set_expires(hrt, ktime_add(ktime_get(), ival)); return true; } static void bcm_tx_start_timer(struct bcm_op *op) { if (bcm_tx_set_expiry(op, &op->timer)) hrtimer_start_expires(&op->timer, HRTIMER_MODE_ABS_SOFT); } /* bcm_tx_timeout_handler - performs cyclic CAN frame transmissions */ static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer) { struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer); struct bcm_msg_head msg_head; if (op->kt_ival1 && (op->count > 0)) { op->count--; if (!op->count && (op->flags & TX_COUNTEVT)) { /* create notification to user */ memset(&msg_head, 0, sizeof(msg_head)); msg_head.opcode = TX_EXPIRED; msg_head.flags = op->flags; msg_head.count = op->count; msg_head.ival1 = op->ival1; msg_head.ival2 = op->ival2; msg_head.can_id = op->can_id; msg_head.nframes = 0; bcm_send_to_user(op, &msg_head, NULL, 0); } bcm_can_tx(op); } else if (op->kt_ival2) { bcm_can_tx(op); } return bcm_tx_set_expiry(op, &op->timer) ? HRTIMER_RESTART : HRTIMER_NORESTART; } /* * bcm_rx_changed - create a RX_CHANGED notification due to changed content */ static void bcm_rx_changed(struct bcm_op *op, struct canfd_frame *data) { struct bcm_msg_head head; /* update statistics */ op->frames_filtered++; /* prevent statistics overflow */ if (op->frames_filtered > ULONG_MAX/100) op->frames_filtered = op->frames_abs = 0; /* this element is not throttled anymore */ data->flags &= (BCM_CAN_FLAGS_MASK|RX_RECV); memset(&head, 0, sizeof(head)); head.opcode = RX_CHANGED; head.flags = op->flags; head.count = op->count; head.ival1 = op->ival1; head.ival2 = op->ival2; head.can_id = op->can_id; head.nframes = 1; bcm_send_to_user(op, &head, data, 1); } /* * bcm_rx_update_and_send - process a detected relevant receive content change * 1. update the last received data * 2. send a notification to the user (if possible) */ static void bcm_rx_update_and_send(struct bcm_op *op, struct canfd_frame *lastdata, const struct canfd_frame *rxdata) { memcpy(lastdata, rxdata, op->cfsiz); /* mark as used and throttled by default */ lastdata->flags |= (RX_RECV|RX_THR); /* throttling mode inactive ? */ if (!op->kt_ival2) { /* send RX_CHANGED to the user immediately */ bcm_rx_changed(op, lastdata); return; } /* with active throttling timer we are just done here */ if (hrtimer_active(&op->thrtimer)) return; /* first reception with enabled throttling mode */ if (!op->kt_lastmsg) goto rx_changed_settime; /* got a second frame inside a potential throttle period? */ if (ktime_us_delta(ktime_get(), op->kt_lastmsg) < ktime_to_us(op->kt_ival2)) { /* do not send the saved data - only start throttle timer */ hrtimer_start(&op->thrtimer, ktime_add(op->kt_lastmsg, op->kt_ival2), HRTIMER_MODE_ABS_SOFT); return; } /* the gap was that big, that throttling was not needed here */ rx_changed_settime: bcm_rx_changed(op, lastdata); op->kt_lastmsg = ktime_get(); } /* * bcm_rx_cmp_to_index - (bit)compares the currently received data to formerly * received data stored in op->last_frames[] */ static void bcm_rx_cmp_to_index(struct bcm_op *op, unsigned int index, const struct canfd_frame *rxdata) { struct canfd_frame *cf = op->frames + op->cfsiz * index; struct canfd_frame *lcf = op->last_frames + op->cfsiz * index; int i; /* * no one uses the MSBs of flags for comparison, * so we use it here to detect the first time of reception */ if (!(lcf->flags & RX_RECV)) { /* received data for the first time => send update to user */ bcm_rx_update_and_send(op, lcf, rxdata); return; } /* do a real check in CAN frame data section */ for (i = 0; i < rxdata->len; i += 8) { if ((get_u64(cf, i) & get_u64(rxdata, i)) != (get_u64(cf, i) & get_u64(lcf, i))) { bcm_rx_update_and_send(op, lcf, rxdata); return; } } if (op->flags & RX_CHECK_DLC) { /* do a real check in CAN frame length */ if (rxdata->len != lcf->len) { bcm_rx_update_and_send(op, lcf, rxdata); return; } } } /* * bcm_rx_starttimer - enable timeout monitoring for CAN frame reception */ static void bcm_rx_starttimer(struct bcm_op *op) { if (op->flags & RX_NO_AUTOTIMER) return; if (op->kt_ival1) hrtimer_start(&op->timer, op->kt_ival1, HRTIMER_MODE_REL_SOFT); } /* bcm_rx_timeout_handler - when the (cyclic) CAN frame reception timed out */ static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer) { struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer); struct bcm_msg_head msg_head; /* if user wants to be informed, when cyclic CAN-Messages come back */ if ((op->flags & RX_ANNOUNCE_RESUME) && op->last_frames) { /* clear received CAN frames to indicate 'nothing received' */ memset(op->last_frames, 0, op->nframes * op->cfsiz); } /* create notification to user */ memset(&msg_head, 0, sizeof(msg_head)); msg_head.opcode = RX_TIMEOUT; msg_head.flags = op->flags; msg_head.count = op->count; msg_head.ival1 = op->ival1; msg_head.ival2 = op->ival2; msg_head.can_id = op->can_id; msg_head.nframes = 0; bcm_send_to_user(op, &msg_head, NULL, 0); return HRTIMER_NORESTART; } /* * bcm_rx_do_flush - helper for bcm_rx_thr_flush */ static inline int bcm_rx_do_flush(struct bcm_op *op, unsigned int index) { struct canfd_frame *lcf = op->last_frames + op->cfsiz * index; if ((op->last_frames) && (lcf->flags & RX_THR)) { bcm_rx_changed(op, lcf); return 1; } return 0; } /* * bcm_rx_thr_flush - Check for throttled data and send it to the userspace */ static int bcm_rx_thr_flush(struct bcm_op *op) { int updated = 0; if (op->nframes > 1) { unsigned int i; /* for MUX filter we start at index 1 */ for (i = 1; i < op->nframes; i++) updated += bcm_rx_do_flush(op, i); } else { /* for RX_FILTER_ID and simple filter */ updated += bcm_rx_do_flush(op, 0); } return updated; } /* * bcm_rx_thr_handler - the time for blocked content updates is over now: * Check for throttled data and send it to the userspace */ static enum hrtimer_restart bcm_rx_thr_handler(struct hrtimer *hrtimer) { struct bcm_op *op = container_of(hrtimer, struct bcm_op, thrtimer); if (bcm_rx_thr_flush(op)) { hrtimer_forward(hrtimer, ktime_get(), op->kt_ival2); return HRTIMER_RESTART; } else { /* rearm throttle handling */ op->kt_lastmsg = 0; return HRTIMER_NORESTART; } } /* * bcm_rx_handler - handle a CAN frame reception */ static void bcm_rx_handler(struct sk_buff *skb, void *data) { struct bcm_op *op = (struct bcm_op *)data; const struct canfd_frame *rxframe = (struct canfd_frame *)skb->data; unsigned int i; if (op->can_id != rxframe->can_id) return; /* make sure to handle the correct frame type (CAN / CAN FD) */ if (skb->len != op->cfsiz) return; /* disable timeout */ hrtimer_cancel(&op->timer); /* save rx timestamp */ op->rx_stamp = skb->tstamp; /* save originator for recvfrom() */ op->rx_ifindex = skb->dev->ifindex; /* update statistics */ op->frames_abs++; if (op->flags & RX_RTR_FRAME) { /* send reply for RTR-request (placed in op->frames[0]) */ bcm_can_tx(op); return; } if (op->flags & RX_FILTER_ID) { /* the easiest case */ bcm_rx_update_and_send(op, op->last_frames, rxframe); goto rx_starttimer; } if (op->nframes == 1) { /* simple compare with index 0 */ bcm_rx_cmp_to_index(op, 0, rxframe); goto rx_starttimer; } if (op->nframes > 1) { /* * multiplex compare * * find the first multiplex mask that fits. * Remark: The MUX-mask is stored in index 0 - but only the * first 64 bits of the frame data[] are relevant (CAN FD) */ for (i = 1; i < op->nframes; i++) { if ((get_u64(op->frames, 0) & get_u64(rxframe, 0)) == (get_u64(op->frames, 0) & get_u64(op->frames + op->cfsiz * i, 0))) { bcm_rx_cmp_to_index(op, i, rxframe); break; } } } rx_starttimer: bcm_rx_starttimer(op); } /* * helpers for bcm_op handling: find & delete bcm [rx|tx] op elements */ static struct bcm_op *bcm_find_op(struct list_head *ops, struct bcm_msg_head *mh, int ifindex) { struct bcm_op *op; list_for_each_entry(op, ops, list) { if ((op->can_id == mh->can_id) && (op->ifindex == ifindex) && (op->flags & CAN_FD_FRAME) == (mh->flags & CAN_FD_FRAME)) return op; } return NULL; } static void bcm_remove_op(struct bcm_op *op) { hrtimer_cancel(&op->timer); hrtimer_cancel(&op->thrtimer); if ((op->frames) && (op->frames != &op->sframe)) kfree(op->frames); if ((op->last_frames) && (op->last_frames != &op->last_sframe)) kfree(op->last_frames); kfree(op); } static void bcm_rx_unreg(struct net_device *dev, struct bcm_op *op) { if (op->rx_reg_dev == dev) { can_rx_unregister(dev_net(dev), dev, op->can_id, REGMASK(op->can_id), bcm_rx_handler, op); /* mark as removed subscription */ op->rx_reg_dev = NULL; } else printk(KERN_ERR "can-bcm: bcm_rx_unreg: registered device " "mismatch %p %p\n", op->rx_reg_dev, dev); } /* * bcm_delete_rx_op - find and remove a rx op (returns number of removed ops) */ static int bcm_delete_rx_op(struct list_head *ops, struct bcm_msg_head *mh, int ifindex) { struct bcm_op *op, *n; list_for_each_entry_safe(op, n, ops, list) { if ((op->can_id == mh->can_id) && (op->ifindex == ifindex) && (op->flags & CAN_FD_FRAME) == (mh->flags & CAN_FD_FRAME)) { /* * Don't care if we're bound or not (due to netdev * problems) can_rx_unregister() is always a save * thing to do here. */ if (op->ifindex) { /* * Only remove subscriptions that had not * been removed due to NETDEV_UNREGISTER * in bcm_notifier() */ if (op->rx_reg_dev) { struct net_device *dev; dev = dev_get_by_index(sock_net(op->sk), op->ifindex); if (dev) { bcm_rx_unreg(dev, op); dev_put(dev); } } } else can_rx_unregister(sock_net(op->sk), NULL, op->can_id, REGMASK(op->can_id), bcm_rx_handler, op); list_del(&op->list); bcm_remove_op(op); return 1; /* done */ } } return 0; /* not found */ } /* * bcm_delete_tx_op - find and remove a tx op (returns number of removed ops) */ static int bcm_delete_tx_op(struct list_head *ops, struct bcm_msg_head *mh, int ifindex) { struct bcm_op *op, *n; list_for_each_entry_safe(op, n, ops, list) { if ((op->can_id == mh->can_id) && (op->ifindex == ifindex) && (op->flags & CAN_FD_FRAME) == (mh->flags & CAN_FD_FRAME)) { list_del(&op->list); bcm_remove_op(op); return 1; /* done */ } } return 0; /* not found */ } /* * bcm_read_op - read out a bcm_op and send it to the user (for bcm_sendmsg) */ static int bcm_read_op(struct list_head *ops, struct bcm_msg_head *msg_head, int ifindex) { struct bcm_op *op = bcm_find_op(ops, msg_head, ifindex); if (!op) return -EINVAL; /* put current values into msg_head */ msg_head->flags = op->flags; msg_head->count = op->count; msg_head->ival1 = op->ival1; msg_head->ival2 = op->ival2; msg_head->nframes = op->nframes; bcm_send_to_user(op, msg_head, op->frames, 0); return MHSIZ; } /* * bcm_tx_setup - create or update a bcm tx op (for bcm_sendmsg) */ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, int ifindex, struct sock *sk) { struct bcm_sock *bo = bcm_sk(sk); struct bcm_op *op; struct canfd_frame *cf; unsigned int i; int err; /* we need a real device to send frames */ if (!ifindex) return -ENODEV; /* check nframes boundaries - we need at least one CAN frame */ if (msg_head->nframes < 1 || msg_head->nframes > MAX_NFRAMES) return -EINVAL; /* check timeval limitations */ if ((msg_head->flags & SETTIMER) && bcm_is_invalid_tv(msg_head)) return -EINVAL; /* check the given can_id */ op = bcm_find_op(&bo->tx_ops, msg_head, ifindex); if (op) { /* update existing BCM operation */ /* * Do we need more space for the CAN frames than currently * allocated? -> This is a _really_ unusual use-case and * therefore (complexity / locking) it is not supported. */ if (msg_head->nframes > op->nframes) return -E2BIG; /* update CAN frames content */ for (i = 0; i < msg_head->nframes; i++) { cf = op->frames + op->cfsiz * i; err = memcpy_from_msg((u8 *)cf, msg, op->cfsiz); if (op->flags & CAN_FD_FRAME) { if (cf->len > 64) err = -EINVAL; } else { if (cf->len > 8) err = -EINVAL; } if (err < 0) return err; if (msg_head->flags & TX_CP_CAN_ID) { /* copy can_id into frame */ cf->can_id = msg_head->can_id; } } op->flags = msg_head->flags; } else { /* insert new BCM operation for the given can_id */ op = kzalloc(OPSIZ, GFP_KERNEL); if (!op) return -ENOMEM; op->can_id = msg_head->can_id; op->cfsiz = CFSIZ(msg_head->flags); op->flags = msg_head->flags; /* create array for CAN frames and copy the data */ if (msg_head->nframes > 1) { op->frames = kmalloc_array(msg_head->nframes, op->cfsiz, GFP_KERNEL); if (!op->frames) { kfree(op); return -ENOMEM; } } else op->frames = &op->sframe; for (i = 0; i < msg_head->nframes; i++) { cf = op->frames + op->cfsiz * i; err = memcpy_from_msg((u8 *)cf, msg, op->cfsiz); if (op->flags & CAN_FD_FRAME) { if (cf->len > 64) err = -EINVAL; } else { if (cf->len > 8) err = -EINVAL; } if (err < 0) { if (op->frames != &op->sframe) kfree(op->frames); kfree(op); return err; } if (msg_head->flags & TX_CP_CAN_ID) { /* copy can_id into frame */ cf->can_id = msg_head->can_id; } } /* tx_ops never compare with previous received messages */ op->last_frames = NULL; /* bcm_can_tx / bcm_tx_timeout_handler needs this */ op->sk = sk; op->ifindex = ifindex; /* initialize uninitialized (kzalloc) structure */ hrtimer_init(&op->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT); op->timer.function = bcm_tx_timeout_handler; /* currently unused in tx_ops */ hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT); /* add this bcm_op to the list of the tx_ops */ list_add(&op->list, &bo->tx_ops); } /* if ((op = bcm_find_op(&bo->tx_ops, msg_head->can_id, ifindex))) */ if (op->nframes != msg_head->nframes) { op->nframes = msg_head->nframes; /* start multiple frame transmission with index 0 */ op->currframe = 0; } /* check flags */ if (op->flags & TX_RESET_MULTI_IDX) { /* start multiple frame transmission with index 0 */ op->currframe = 0; } if (op->flags & SETTIMER) { /* set timer values */ op->count = msg_head->count; op->ival1 = msg_head->ival1; op->ival2 = msg_head->ival2; op->kt_ival1 = bcm_timeval_to_ktime(msg_head->ival1); op->kt_ival2 = bcm_timeval_to_ktime(msg_head->ival2); /* disable an active timer due to zero values? */ if (!op->kt_ival1 && !op->kt_ival2) hrtimer_cancel(&op->timer); } if (op->flags & STARTTIMER) { hrtimer_cancel(&op->timer); /* spec: send CAN frame when starting timer */ op->flags |= TX_ANNOUNCE; } if (op->flags & TX_ANNOUNCE) { bcm_can_tx(op); if (op->count) op->count--; } if (op->flags & STARTTIMER) bcm_tx_start_timer(op); return msg_head->nframes * op->cfsiz + MHSIZ; } /* * bcm_rx_setup - create or update a bcm rx op (for bcm_sendmsg) */ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, int ifindex, struct sock *sk) { struct bcm_sock *bo = bcm_sk(sk); struct bcm_op *op; int do_rx_register; int err = 0; if ((msg_head->flags & RX_FILTER_ID) || (!(msg_head->nframes))) { /* be robust against wrong usage ... */ msg_head->flags |= RX_FILTER_ID; /* ignore trailing garbage */ msg_head->nframes = 0; } /* the first element contains the mux-mask => MAX_NFRAMES + 1 */ if (msg_head->nframes > MAX_NFRAMES + 1) return -EINVAL; if ((msg_head->flags & RX_RTR_FRAME) && ((msg_head->nframes != 1) || (!(msg_head->can_id & CAN_RTR_FLAG)))) return -EINVAL; /* check timeval limitations */ if ((msg_head->flags & SETTIMER) && bcm_is_invalid_tv(msg_head)) return -EINVAL; /* check the given can_id */ op = bcm_find_op(&bo->rx_ops, msg_head, ifindex); if (op) { /* update existing BCM operation */ /* * Do we need more space for the CAN frames than currently * allocated? -> This is a _really_ unusual use-case and * therefore (complexity / locking) it is not supported. */ if (msg_head->nframes > op->nframes) return -E2BIG; if (msg_head->nframes) { /* update CAN frames content */ err = memcpy_from_msg(op->frames, msg, msg_head->nframes * op->cfsiz); if (err < 0) return err; /* clear last_frames to indicate 'nothing received' */ memset(op->last_frames, 0, msg_head->nframes * op->cfsiz); } op->nframes = msg_head->nframes; op->flags = msg_head->flags; /* Only an update -> do not call can_rx_register() */ do_rx_register = 0; } else { /* insert new BCM operation for the given can_id */ op = kzalloc(OPSIZ, GFP_KERNEL); if (!op) return -ENOMEM; op->can_id = msg_head->can_id; op->nframes = msg_head->nframes; op->cfsiz = CFSIZ(msg_head->flags); op->flags = msg_head->flags; if (msg_head->nframes > 1) { /* create array for CAN frames and copy the data */ op->frames = kmalloc_array(msg_head->nframes, op->cfsiz, GFP_KERNEL); if (!op->frames) { kfree(op); return -ENOMEM; } /* create and init array for received CAN frames */ op->last_frames = kcalloc(msg_head->nframes, op->cfsiz, GFP_KERNEL); if (!op->last_frames) { kfree(op->frames); kfree(op); return -ENOMEM; } } else { op->frames = &op->sframe; op->last_frames = &op->last_sframe; } if (msg_head->nframes) { err = memcpy_from_msg(op->frames, msg, msg_head->nframes * op->cfsiz); if (err < 0) { if (op->frames != &op->sframe) kfree(op->frames); if (op->last_frames != &op->last_sframe) kfree(op->last_frames); kfree(op); return err; } } /* bcm_can_tx / bcm_tx_timeout_handler needs this */ op->sk = sk; op->ifindex = ifindex; /* ifindex for timeout events w/o previous frame reception */ op->rx_ifindex = ifindex; /* initialize uninitialized (kzalloc) structure */ hrtimer_init(&op->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT); op->timer.function = bcm_rx_timeout_handler; hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT); op->thrtimer.function = bcm_rx_thr_handler; /* add this bcm_op to the list of the rx_ops */ list_add(&op->list, &bo->rx_ops); /* call can_rx_register() */ do_rx_register = 1; } /* if ((op = bcm_find_op(&bo->rx_ops, msg_head->can_id, ifindex))) */ /* check flags */ if (op->flags & RX_RTR_FRAME) { struct canfd_frame *frame0 = op->frames; /* no timers in RTR-mode */ hrtimer_cancel(&op->thrtimer); hrtimer_cancel(&op->timer); /* * funny feature in RX(!)_SETUP only for RTR-mode: * copy can_id into frame BUT without RTR-flag to * prevent a full-load-loopback-test ... ;-] */ if ((op->flags & TX_CP_CAN_ID) || (frame0->can_id == op->can_id)) frame0->can_id = op->can_id & ~CAN_RTR_FLAG; } else { if (op->flags & SETTIMER) { /* set timer value */ op->ival1 = msg_head->ival1; op->ival2 = msg_head->ival2; op->kt_ival1 = bcm_timeval_to_ktime(msg_head->ival1); op->kt_ival2 = bcm_timeval_to_ktime(msg_head->ival2); /* disable an active timer due to zero value? */ if (!op->kt_ival1) hrtimer_cancel(&op->timer); /* * In any case cancel the throttle timer, flush * potentially blocked msgs and reset throttle handling */ op->kt_lastmsg = 0; hrtimer_cancel(&op->thrtimer); bcm_rx_thr_flush(op); } if ((op->flags & STARTTIMER) && op->kt_ival1) hrtimer_start(&op->timer, op->kt_ival1, HRTIMER_MODE_REL_SOFT); } /* now we can register for can_ids, if we added a new bcm_op */ if (do_rx_register) { if (ifindex) { struct net_device *dev; dev = dev_get_by_index(sock_net(sk), ifindex); if (dev) { err = can_rx_register(sock_net(sk), dev, op->can_id, REGMASK(op->can_id), bcm_rx_handler, op, "bcm", sk); op->rx_reg_dev = dev; dev_put(dev); } } else err = can_rx_register(sock_net(sk), NULL, op->can_id, REGMASK(op->can_id), bcm_rx_handler, op, "bcm", sk); if (err) { /* this bcm rx op is broken -> remove it */ list_del(&op->list); bcm_remove_op(op); return err; } } return msg_head->nframes * op->cfsiz + MHSIZ; } /* * bcm_tx_send - send a single CAN frame to the CAN interface (for bcm_sendmsg) */ static int bcm_tx_send(struct msghdr *msg, int ifindex, struct sock *sk, int cfsiz) { struct sk_buff *skb; struct net_device *dev; int err; /* we need a real device to send frames */ if (!ifindex) return -ENODEV; skb = alloc_skb(cfsiz + sizeof(struct can_skb_priv), GFP_KERNEL); if (!skb) return -ENOMEM; can_skb_reserve(skb); err = memcpy_from_msg(skb_put(skb, cfsiz), msg, cfsiz); if (err < 0) { kfree_skb(skb); return err; } dev = dev_get_by_index(sock_net(sk), ifindex); if (!dev) { kfree_skb(skb); return -ENODEV; } can_skb_prv(skb)->ifindex = dev->ifindex; can_skb_prv(skb)->skbcnt = 0; skb->dev = dev; can_skb_set_owner(skb, sk); err = can_send(skb, 1); /* send with loopback */ dev_put(dev); if (err) return err; return cfsiz + MHSIZ; } /* * bcm_sendmsg - process BCM commands (opcodes) from the userspace */ static int bcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) { struct sock *sk = sock->sk; struct bcm_sock *bo = bcm_sk(sk); int ifindex = bo->ifindex; /* default ifindex for this bcm_op */ struct bcm_msg_head msg_head; int cfsiz; int ret; /* read bytes or error codes as return value */ if (!bo->bound) return -ENOTCONN; /* check for valid message length from userspace */ if (size < MHSIZ) return -EINVAL; /* read message head information */ ret = memcpy_from_msg((u8 *)&msg_head, msg, MHSIZ); if (ret < 0) return ret; cfsiz = CFSIZ(msg_head.flags); if ((size - MHSIZ) % cfsiz) return -EINVAL; /* check for alternative ifindex for this bcm_op */ if (!ifindex && msg->msg_name) { /* no bound device as default => check msg_name */ DECLARE_SOCKADDR(struct sockaddr_can *, addr, msg->msg_name); if (msg->msg_namelen < BCM_MIN_NAMELEN) return -EINVAL; if (addr->can_family != AF_CAN) return -EINVAL; /* ifindex from sendto() */ ifindex = addr->can_ifindex; if (ifindex) { struct net_device *dev; dev = dev_get_by_index(sock_net(sk), ifindex); if (!dev) return -ENODEV; if (dev->type != ARPHRD_CAN) { dev_put(dev); return -ENODEV; } dev_put(dev); } } lock_sock(sk); switch (msg_head.opcode) { case TX_SETUP: ret = bcm_tx_setup(&msg_head, msg, ifindex, sk); break; case RX_SETUP: ret = bcm_rx_setup(&msg_head, msg, ifindex, sk); break; case TX_DELETE: if (bcm_delete_tx_op(&bo->tx_ops, &msg_head, ifindex)) ret = MHSIZ; else ret = -EINVAL; break; case RX_DELETE: if (bcm_delete_rx_op(&bo->rx_ops, &msg_head, ifindex)) ret = MHSIZ; else ret = -EINVAL; break; case TX_READ: /* reuse msg_head for the reply to TX_READ */ msg_head.opcode = TX_STATUS; ret = bcm_read_op(&bo->tx_ops, &msg_head, ifindex); break; case RX_READ: /* reuse msg_head for the reply to RX_READ */ msg_head.opcode = RX_STATUS; ret = bcm_read_op(&bo->rx_ops, &msg_head, ifindex); break; case TX_SEND: /* we need exactly one CAN frame behind the msg head */ if ((msg_head.nframes != 1) || (size != cfsiz + MHSIZ)) ret = -EINVAL; else ret = bcm_tx_send(msg, ifindex, sk, cfsiz); break; default: ret = -EINVAL; break; } release_sock(sk); return ret; } /* * notification handler for netdevice status changes */ static void bcm_notify(struct bcm_sock *bo, unsigned long msg, struct net_device *dev) { struct sock *sk = &bo->sk; struct bcm_op *op; int notify_enodev = 0; if (!net_eq(dev_net(dev), sock_net(sk))) return; switch (msg) { case NETDEV_UNREGISTER: lock_sock(sk); /* remove device specific receive entries */ list_for_each_entry(op, &bo->rx_ops, list) if (op->rx_reg_dev == dev) bcm_rx_unreg(dev, op); /* remove device reference, if this is our bound device */ if (bo->bound && bo->ifindex == dev->ifindex) { bo->bound = 0; bo->ifindex = 0; notify_enodev = 1; } release_sock(sk); if (notify_enodev) { sk->sk_err = ENODEV; if (!sock_flag(sk, SOCK_DEAD)) sk->sk_error_report(sk); } break; case NETDEV_DOWN: if (bo->bound && bo->ifindex == dev->ifindex) { sk->sk_err = ENETDOWN; if (!sock_flag(sk, SOCK_DEAD)) sk->sk_error_report(sk); } } } static int bcm_notifier(struct notifier_block *nb, unsigned long msg, void *ptr) { struct net_device *dev = netdev_notifier_info_to_dev(ptr); if (dev->type != ARPHRD_CAN) return NOTIFY_DONE; if (msg != NETDEV_UNREGISTER && msg != NETDEV_DOWN) return NOTIFY_DONE; if (unlikely(bcm_busy_notifier)) /* Check for reentrant bug. */ return NOTIFY_DONE; spin_lock(&bcm_notifier_lock); list_for_each_entry(bcm_busy_notifier, &bcm_notifier_list, notifier) { spin_unlock(&bcm_notifier_lock); bcm_notify(bcm_busy_notifier, msg, dev); spin_lock(&bcm_notifier_lock); } bcm_busy_notifier = NULL; spin_unlock(&bcm_notifier_lock); return NOTIFY_DONE; } /* * initial settings for all BCM sockets to be set at socket creation time */ static int bcm_init(struct sock *sk) { struct bcm_sock *bo = bcm_sk(sk); bo->bound = 0; bo->ifindex = 0; bo->dropped_usr_msgs = 0; bo->bcm_proc_read = NULL; INIT_LIST_HEAD(&bo->tx_ops); INIT_LIST_HEAD(&bo->rx_ops); /* set notifier */ spin_lock(&bcm_notifier_lock); list_add_tail(&bo->notifier, &bcm_notifier_list); spin_unlock(&bcm_notifier_lock); return 0; } /* * standard socket functions */ static int bcm_release(struct socket *sock) { struct sock *sk = sock->sk; struct net *net; struct bcm_sock *bo; struct bcm_op *op, *next; if (!sk) return 0; net = sock_net(sk); bo = bcm_sk(sk); /* remove bcm_ops, timer, rx_unregister(), etc. */ spin_lock(&bcm_notifier_lock); while (bcm_busy_notifier == bo) { spin_unlock(&bcm_notifier_lock); schedule_timeout_uninterruptible(1); spin_lock(&bcm_notifier_lock); } list_del(&bo->notifier); spin_unlock(&bcm_notifier_lock); lock_sock(sk); list_for_each_entry_safe(op, next, &bo->tx_ops, list) bcm_remove_op(op); list_for_each_entry_safe(op, next, &bo->rx_ops, list) { /* * Don't care if we're bound or not (due to netdev problems) * can_rx_unregister() is always a save thing to do here. */ if (op->ifindex) { /* * Only remove subscriptions that had not * been removed due to NETDEV_UNREGISTER * in bcm_notifier() */ if (op->rx_reg_dev) { struct net_device *dev; dev = dev_get_by_index(net, op->ifindex); if (dev) { bcm_rx_unreg(dev, op); dev_put(dev); } } } else can_rx_unregister(net, NULL, op->can_id, REGMASK(op->can_id), bcm_rx_handler, op); bcm_remove_op(op); } #if IS_ENABLED(CONFIG_PROC_FS) /* remove procfs entry */ if (net->can.bcmproc_dir && bo->bcm_proc_read) remove_proc_entry(bo->procname, net->can.bcmproc_dir); #endif /* CONFIG_PROC_FS */ /* remove device reference */ if (bo->bound) { bo->bound = 0; bo->ifindex = 0; } sock_orphan(sk); sock->sk = NULL; release_sock(sk); sock_put(sk); return 0; } static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len, int flags) { struct sockaddr_can *addr = (struct sockaddr_can *)uaddr; struct sock *sk = sock->sk; struct bcm_sock *bo = bcm_sk(sk); struct net *net = sock_net(sk); int ret = 0; if (len < BCM_MIN_NAMELEN) return -EINVAL; lock_sock(sk); if (bo->bound) { ret = -EISCONN; goto fail; } /* bind a device to this socket */ if (addr->can_ifindex) { struct net_device *dev; dev = dev_get_by_index(net, addr->can_ifindex); if (!dev) { ret = -ENODEV; goto fail; } if (dev->type != ARPHRD_CAN) { dev_put(dev); ret = -ENODEV; goto fail; } bo->ifindex = dev->ifindex; dev_put(dev); } else { /* no interface reference for ifindex = 0 ('any' CAN device) */ bo->ifindex = 0; } #if IS_ENABLED(CONFIG_PROC_FS) if (net->can.bcmproc_dir) { /* unique socket address as filename */ sprintf(bo->procname, "%lu", sock_i_ino(sk)); bo->bcm_proc_read = proc_create_net_single(bo->procname, 0644, net->can.bcmproc_dir, bcm_proc_show, sk); if (!bo->bcm_proc_read) { ret = -ENOMEM; goto fail; } } #endif /* CONFIG_PROC_FS */ bo->bound = 1; fail: release_sock(sk); return ret; } static int bcm_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock *sk = sock->sk; struct sk_buff *skb; int error = 0; int noblock; int err; noblock = flags & MSG_DONTWAIT; flags &= ~MSG_DONTWAIT; skb = skb_recv_datagram(sk, flags, noblock, &error); if (!skb) return error; if (skb->len < size) size = skb->len; err = memcpy_to_msg(msg, skb->data, size); if (err < 0) { skb_free_datagram(sk, skb); return err; } sock_recv_ts_and_drops(msg, sk, skb); if (msg->msg_name) { __sockaddr_check_size(BCM_MIN_NAMELEN); msg->msg_namelen = BCM_MIN_NAMELEN; memcpy(msg->msg_name, skb->cb, msg->msg_namelen); } skb_free_datagram(sk, skb); return size; } static int bcm_sock_no_ioctlcmd(struct socket *sock, unsigned int cmd, unsigned long arg) { /* no ioctls for socket layer -> hand it down to NIC layer */ return -ENOIOCTLCMD; } static const struct proto_ops bcm_ops = { .family = PF_CAN, .release = bcm_release, .bind = sock_no_bind, .connect = bcm_connect, .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = sock_no_getname, .poll = datagram_poll, .ioctl = bcm_sock_no_ioctlcmd, .gettstamp = sock_gettstamp, .listen = sock_no_listen, .shutdown = sock_no_shutdown, .sendmsg = bcm_sendmsg, .recvmsg = bcm_recvmsg, .mmap = sock_no_mmap, .sendpage = sock_no_sendpage, }; static struct proto bcm_proto __read_mostly = { .name = "CAN_BCM", .owner = THIS_MODULE, .obj_size = sizeof(struct bcm_sock), .init = bcm_init, }; static const struct can_proto bcm_can_proto = { .type = SOCK_DGRAM, .protocol = CAN_BCM, .ops = &bcm_ops, .prot = &bcm_proto, }; static int canbcm_pernet_init(struct net *net) { #if IS_ENABLED(CONFIG_PROC_FS) /* create /proc/net/can-bcm directory */ net->can.bcmproc_dir = proc_net_mkdir(net, "can-bcm", net->proc_net); #endif /* CONFIG_PROC_FS */ return 0; } static void canbcm_pernet_exit(struct net *net) { #if IS_ENABLED(CONFIG_PROC_FS) /* remove /proc/net/can-bcm directory */ if (net->can.bcmproc_dir) remove_proc_entry("can-bcm", net->proc_net); #endif /* CONFIG_PROC_FS */ } static struct pernet_operations canbcm_pernet_ops __read_mostly = { .init = canbcm_pernet_init, .exit = canbcm_pernet_exit, }; static struct notifier_block canbcm_notifier = { .notifier_call = bcm_notifier }; static int __init bcm_module_init(void) { int err; pr_info("can: broadcast manager protocol\n"); err = can_proto_register(&bcm_can_proto); if (err < 0) { printk(KERN_ERR "can: registration of bcm protocol failed\n"); return err; } register_pernet_subsys(&canbcm_pernet_ops); register_netdevice_notifier(&canbcm_notifier); return 0; } static void __exit bcm_module_exit(void) { can_proto_unregister(&bcm_can_proto); unregister_netdevice_notifier(&canbcm_notifier); unregister_pernet_subsys(&canbcm_pernet_ops); } module_init(bcm_module_init); module_exit(bcm_module_exit);
// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) /* * bcm.c - Broadcast Manager to filter/send (cyclic) CAN content * * Copyright (c) 2002-2017 Volkswagen Group Electronic Research * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of Volkswagen nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * Alternatively, provided that this notice is retained in full, this * software may be distributed under the terms of the GNU General * Public License ("GPL") version 2, in which case the provisions of the * GPL apply INSTEAD OF those given above. * * The provided data structures and external interfaces from this code * are not restricted to be used by modules with a GPL compatible license. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. * */ #include <linux/module.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/hrtimer.h> #include <linux/list.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/uio.h> #include <linux/net.h> #include <linux/netdevice.h> #include <linux/socket.h> #include <linux/if_arp.h> #include <linux/skbuff.h> #include <linux/can.h> #include <linux/can/core.h> #include <linux/can/skb.h> #include <linux/can/bcm.h> #include <linux/slab.h> #include <net/sock.h> #include <net/net_namespace.h> /* * To send multiple CAN frame content within TX_SETUP or to filter * CAN messages with multiplex index within RX_SETUP, the number of * different filters is limited to 256 due to the one byte index value. */ #define MAX_NFRAMES 256 /* limit timers to 400 days for sending/timeouts */ #define BCM_TIMER_SEC_MAX (400 * 24 * 60 * 60) /* use of last_frames[index].flags */ #define RX_RECV 0x40 /* received data for this element */ #define RX_THR 0x80 /* element not been sent due to throttle feature */ #define BCM_CAN_FLAGS_MASK 0x3F /* to clean private flags after usage */ /* get best masking value for can_rx_register() for a given single can_id */ #define REGMASK(id) ((id & CAN_EFF_FLAG) ? \ (CAN_EFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG) : \ (CAN_SFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG)) MODULE_DESCRIPTION("PF_CAN broadcast manager protocol"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>"); MODULE_ALIAS("can-proto-2"); #define BCM_MIN_NAMELEN CAN_REQUIRED_SIZE(struct sockaddr_can, can_ifindex) /* * easy access to the first 64 bit of can(fd)_frame payload. cp->data is * 64 bit aligned so the offset has to be multiples of 8 which is ensured * by the only callers in bcm_rx_cmp_to_index() bcm_rx_handler(). */ static inline u64 get_u64(const struct canfd_frame *cp, int offset) { return *(u64 *)(cp->data + offset); } struct bcm_op { struct list_head list; int ifindex; canid_t can_id; u32 flags; unsigned long frames_abs, frames_filtered; struct bcm_timeval ival1, ival2; struct hrtimer timer, thrtimer; ktime_t rx_stamp, kt_ival1, kt_ival2, kt_lastmsg; int rx_ifindex; int cfsiz; u32 count; u32 nframes; u32 currframe; /* void pointers to arrays of struct can[fd]_frame */ void *frames; void *last_frames; struct canfd_frame sframe; struct canfd_frame last_sframe; struct sock *sk; struct net_device *rx_reg_dev; }; struct bcm_sock { struct sock sk; int bound; int ifindex; struct list_head notifier; struct list_head rx_ops; struct list_head tx_ops; unsigned long dropped_usr_msgs; struct proc_dir_entry *bcm_proc_read; char procname [32]; /* inode number in decimal with \0 */ }; static LIST_HEAD(bcm_notifier_list); static DEFINE_SPINLOCK(bcm_notifier_lock); static struct bcm_sock *bcm_busy_notifier; static inline struct bcm_sock *bcm_sk(const struct sock *sk) { return (struct bcm_sock *)sk; } static inline ktime_t bcm_timeval_to_ktime(struct bcm_timeval tv) { return ktime_set(tv.tv_sec, tv.tv_usec * NSEC_PER_USEC); } /* check limitations for timeval provided by user */ static bool bcm_is_invalid_tv(struct bcm_msg_head *msg_head) { if ((msg_head->ival1.tv_sec < 0) || (msg_head->ival1.tv_sec > BCM_TIMER_SEC_MAX) || (msg_head->ival1.tv_usec < 0) || (msg_head->ival1.tv_usec >= USEC_PER_SEC) || (msg_head->ival2.tv_sec < 0) || (msg_head->ival2.tv_sec > BCM_TIMER_SEC_MAX) || (msg_head->ival2.tv_usec < 0) || (msg_head->ival2.tv_usec >= USEC_PER_SEC)) return true; return false; } #define CFSIZ(flags) ((flags & CAN_FD_FRAME) ? CANFD_MTU : CAN_MTU) #define OPSIZ sizeof(struct bcm_op) #define MHSIZ sizeof(struct bcm_msg_head) /* * procfs functions */ #if IS_ENABLED(CONFIG_PROC_FS) static char *bcm_proc_getifname(struct net *net, char *result, int ifindex) { struct net_device *dev; if (!ifindex) return "any"; rcu_read_lock(); dev = dev_get_by_index_rcu(net, ifindex); if (dev) strcpy(result, dev->name); else strcpy(result, "???"); rcu_read_unlock(); return result; } static int bcm_proc_show(struct seq_file *m, void *v) { char ifname[IFNAMSIZ]; struct net *net = m->private; struct sock *sk = (struct sock *)PDE_DATA(m->file->f_inode); struct bcm_sock *bo = bcm_sk(sk); struct bcm_op *op; seq_printf(m, ">>> socket %pK", sk->sk_socket); seq_printf(m, " / sk %pK", sk); seq_printf(m, " / bo %pK", bo); seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs); seq_printf(m, " / bound %s", bcm_proc_getifname(net, ifname, bo->ifindex)); seq_printf(m, " <<<\n"); list_for_each_entry(op, &bo->rx_ops, list) { unsigned long reduction; /* print only active entries & prevent division by zero */ if (!op->frames_abs) continue; seq_printf(m, "rx_op: %03X %-5s ", op->can_id, bcm_proc_getifname(net, ifname, op->ifindex)); if (op->flags & CAN_FD_FRAME) seq_printf(m, "(%u)", op->nframes); else seq_printf(m, "[%u]", op->nframes); seq_printf(m, "%c ", (op->flags & RX_CHECK_DLC) ? 'd' : ' '); if (op->kt_ival1) seq_printf(m, "timeo=%lld ", (long long)ktime_to_us(op->kt_ival1)); if (op->kt_ival2) seq_printf(m, "thr=%lld ", (long long)ktime_to_us(op->kt_ival2)); seq_printf(m, "# recv %ld (%ld) => reduction: ", op->frames_filtered, op->frames_abs); reduction = 100 - (op->frames_filtered * 100) / op->frames_abs; seq_printf(m, "%s%ld%%\n", (reduction == 100) ? "near " : "", reduction); } list_for_each_entry(op, &bo->tx_ops, list) { seq_printf(m, "tx_op: %03X %s ", op->can_id, bcm_proc_getifname(net, ifname, op->ifindex)); if (op->flags & CAN_FD_FRAME) seq_printf(m, "(%u) ", op->nframes); else seq_printf(m, "[%u] ", op->nframes); if (op->kt_ival1) seq_printf(m, "t1=%lld ", (long long)ktime_to_us(op->kt_ival1)); if (op->kt_ival2) seq_printf(m, "t2=%lld ", (long long)ktime_to_us(op->kt_ival2)); seq_printf(m, "# sent %ld\n", op->frames_abs); } seq_putc(m, '\n'); return 0; } #endif /* CONFIG_PROC_FS */ /* * bcm_can_tx - send the (next) CAN frame to the appropriate CAN interface * of the given bcm tx op */ static void bcm_can_tx(struct bcm_op *op) { struct sk_buff *skb; struct net_device *dev; struct canfd_frame *cf = op->frames + op->cfsiz * op->currframe; /* no target device? => exit */ if (!op->ifindex) return; dev = dev_get_by_index(sock_net(op->sk), op->ifindex); if (!dev) { /* RFC: should this bcm_op remove itself here? */ return; } skb = alloc_skb(op->cfsiz + sizeof(struct can_skb_priv), gfp_any()); if (!skb) goto out; can_skb_reserve(skb); can_skb_prv(skb)->ifindex = dev->ifindex; can_skb_prv(skb)->skbcnt = 0; skb_put_data(skb, cf, op->cfsiz); /* send with loopback */ skb->dev = dev; can_skb_set_owner(skb, op->sk); can_send(skb, 1); /* update statistics */ op->currframe++; op->frames_abs++; /* reached last frame? */ if (op->currframe >= op->nframes) op->currframe = 0; out: dev_put(dev); } /* * bcm_send_to_user - send a BCM message to the userspace * (consisting of bcm_msg_head + x CAN frames) */ static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head, struct canfd_frame *frames, int has_timestamp) { struct sk_buff *skb; struct canfd_frame *firstframe; struct sockaddr_can *addr; struct sock *sk = op->sk; unsigned int datalen = head->nframes * op->cfsiz; int err; skb = alloc_skb(sizeof(*head) + datalen, gfp_any()); if (!skb) return; skb_put_data(skb, head, sizeof(*head)); if (head->nframes) { /* CAN frames starting here */ firstframe = (struct canfd_frame *)skb_tail_pointer(skb); skb_put_data(skb, frames, datalen); /* * the BCM uses the flags-element of the canfd_frame * structure for internal purposes. This is only * relevant for updates that are generated by the * BCM, where nframes is 1 */ if (head->nframes == 1) firstframe->flags &= BCM_CAN_FLAGS_MASK; } if (has_timestamp) { /* restore rx timestamp */ skb->tstamp = op->rx_stamp; } /* * Put the datagram to the queue so that bcm_recvmsg() can * get it from there. We need to pass the interface index to * bcm_recvmsg(). We pass a whole struct sockaddr_can in skb->cb * containing the interface index. */ sock_skb_cb_check_size(sizeof(struct sockaddr_can)); addr = (struct sockaddr_can *)skb->cb; memset(addr, 0, sizeof(*addr)); addr->can_family = AF_CAN; addr->can_ifindex = op->rx_ifindex; err = sock_queue_rcv_skb(sk, skb); if (err < 0) { struct bcm_sock *bo = bcm_sk(sk); kfree_skb(skb); /* don't care about overflows in this statistic */ bo->dropped_usr_msgs++; } } static bool bcm_tx_set_expiry(struct bcm_op *op, struct hrtimer *hrt) { ktime_t ival; if (op->kt_ival1 && op->count) ival = op->kt_ival1; else if (op->kt_ival2) ival = op->kt_ival2; else return false; hrtimer_set_expires(hrt, ktime_add(ktime_get(), ival)); return true; } static void bcm_tx_start_timer(struct bcm_op *op) { if (bcm_tx_set_expiry(op, &op->timer)) hrtimer_start_expires(&op->timer, HRTIMER_MODE_ABS_SOFT); } /* bcm_tx_timeout_handler - performs cyclic CAN frame transmissions */ static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer) { struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer); struct bcm_msg_head msg_head; if (op->kt_ival1 && (op->count > 0)) { op->count--; if (!op->count && (op->flags & TX_COUNTEVT)) { /* create notification to user */ memset(&msg_head, 0, sizeof(msg_head)); msg_head.opcode = TX_EXPIRED; msg_head.flags = op->flags; msg_head.count = op->count; msg_head.ival1 = op->ival1; msg_head.ival2 = op->ival2; msg_head.can_id = op->can_id; msg_head.nframes = 0; bcm_send_to_user(op, &msg_head, NULL, 0); } bcm_can_tx(op); } else if (op->kt_ival2) { bcm_can_tx(op); } return bcm_tx_set_expiry(op, &op->timer) ? HRTIMER_RESTART : HRTIMER_NORESTART; } /* * bcm_rx_changed - create a RX_CHANGED notification due to changed content */ static void bcm_rx_changed(struct bcm_op *op, struct canfd_frame *data) { struct bcm_msg_head head; /* update statistics */ op->frames_filtered++; /* prevent statistics overflow */ if (op->frames_filtered > ULONG_MAX/100) op->frames_filtered = op->frames_abs = 0; /* this element is not throttled anymore */ data->flags &= (BCM_CAN_FLAGS_MASK|RX_RECV); memset(&head, 0, sizeof(head)); head.opcode = RX_CHANGED; head.flags = op->flags; head.count = op->count; head.ival1 = op->ival1; head.ival2 = op->ival2; head.can_id = op->can_id; head.nframes = 1; bcm_send_to_user(op, &head, data, 1); } /* * bcm_rx_update_and_send - process a detected relevant receive content change * 1. update the last received data * 2. send a notification to the user (if possible) */ static void bcm_rx_update_and_send(struct bcm_op *op, struct canfd_frame *lastdata, const struct canfd_frame *rxdata) { memcpy(lastdata, rxdata, op->cfsiz); /* mark as used and throttled by default */ lastdata->flags |= (RX_RECV|RX_THR); /* throttling mode inactive ? */ if (!op->kt_ival2) { /* send RX_CHANGED to the user immediately */ bcm_rx_changed(op, lastdata); return; } /* with active throttling timer we are just done here */ if (hrtimer_active(&op->thrtimer)) return; /* first reception with enabled throttling mode */ if (!op->kt_lastmsg) goto rx_changed_settime; /* got a second frame inside a potential throttle period? */ if (ktime_us_delta(ktime_get(), op->kt_lastmsg) < ktime_to_us(op->kt_ival2)) { /* do not send the saved data - only start throttle timer */ hrtimer_start(&op->thrtimer, ktime_add(op->kt_lastmsg, op->kt_ival2), HRTIMER_MODE_ABS_SOFT); return; } /* the gap was that big, that throttling was not needed here */ rx_changed_settime: bcm_rx_changed(op, lastdata); op->kt_lastmsg = ktime_get(); } /* * bcm_rx_cmp_to_index - (bit)compares the currently received data to formerly * received data stored in op->last_frames[] */ static void bcm_rx_cmp_to_index(struct bcm_op *op, unsigned int index, const struct canfd_frame *rxdata) { struct canfd_frame *cf = op->frames + op->cfsiz * index; struct canfd_frame *lcf = op->last_frames + op->cfsiz * index; int i; /* * no one uses the MSBs of flags for comparison, * so we use it here to detect the first time of reception */ if (!(lcf->flags & RX_RECV)) { /* received data for the first time => send update to user */ bcm_rx_update_and_send(op, lcf, rxdata); return; } /* do a real check in CAN frame data section */ for (i = 0; i < rxdata->len; i += 8) { if ((get_u64(cf, i) & get_u64(rxdata, i)) != (get_u64(cf, i) & get_u64(lcf, i))) { bcm_rx_update_and_send(op, lcf, rxdata); return; } } if (op->flags & RX_CHECK_DLC) { /* do a real check in CAN frame length */ if (rxdata->len != lcf->len) { bcm_rx_update_and_send(op, lcf, rxdata); return; } } } /* * bcm_rx_starttimer - enable timeout monitoring for CAN frame reception */ static void bcm_rx_starttimer(struct bcm_op *op) { if (op->flags & RX_NO_AUTOTIMER) return; if (op->kt_ival1) hrtimer_start(&op->timer, op->kt_ival1, HRTIMER_MODE_REL_SOFT); } /* bcm_rx_timeout_handler - when the (cyclic) CAN frame reception timed out */ static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer) { struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer); struct bcm_msg_head msg_head; /* if user wants to be informed, when cyclic CAN-Messages come back */ if ((op->flags & RX_ANNOUNCE_RESUME) && op->last_frames) { /* clear received CAN frames to indicate 'nothing received' */ memset(op->last_frames, 0, op->nframes * op->cfsiz); } /* create notification to user */ memset(&msg_head, 0, sizeof(msg_head)); msg_head.opcode = RX_TIMEOUT; msg_head.flags = op->flags; msg_head.count = op->count; msg_head.ival1 = op->ival1; msg_head.ival2 = op->ival2; msg_head.can_id = op->can_id; msg_head.nframes = 0; bcm_send_to_user(op, &msg_head, NULL, 0); return HRTIMER_NORESTART; } /* * bcm_rx_do_flush - helper for bcm_rx_thr_flush */ static inline int bcm_rx_do_flush(struct bcm_op *op, unsigned int index) { struct canfd_frame *lcf = op->last_frames + op->cfsiz * index; if ((op->last_frames) && (lcf->flags & RX_THR)) { bcm_rx_changed(op, lcf); return 1; } return 0; } /* * bcm_rx_thr_flush - Check for throttled data and send it to the userspace */ static int bcm_rx_thr_flush(struct bcm_op *op) { int updated = 0; if (op->nframes > 1) { unsigned int i; /* for MUX filter we start at index 1 */ for (i = 1; i < op->nframes; i++) updated += bcm_rx_do_flush(op, i); } else { /* for RX_FILTER_ID and simple filter */ updated += bcm_rx_do_flush(op, 0); } return updated; } /* * bcm_rx_thr_handler - the time for blocked content updates is over now: * Check for throttled data and send it to the userspace */ static enum hrtimer_restart bcm_rx_thr_handler(struct hrtimer *hrtimer) { struct bcm_op *op = container_of(hrtimer, struct bcm_op, thrtimer); if (bcm_rx_thr_flush(op)) { hrtimer_forward(hrtimer, ktime_get(), op->kt_ival2); return HRTIMER_RESTART; } else { /* rearm throttle handling */ op->kt_lastmsg = 0; return HRTIMER_NORESTART; } } /* * bcm_rx_handler - handle a CAN frame reception */ static void bcm_rx_handler(struct sk_buff *skb, void *data) { struct bcm_op *op = (struct bcm_op *)data; const struct canfd_frame *rxframe = (struct canfd_frame *)skb->data; unsigned int i; if (op->can_id != rxframe->can_id) return; /* make sure to handle the correct frame type (CAN / CAN FD) */ if (skb->len != op->cfsiz) return; /* disable timeout */ hrtimer_cancel(&op->timer); /* save rx timestamp */ op->rx_stamp = skb->tstamp; /* save originator for recvfrom() */ op->rx_ifindex = skb->dev->ifindex; /* update statistics */ op->frames_abs++; if (op->flags & RX_RTR_FRAME) { /* send reply for RTR-request (placed in op->frames[0]) */ bcm_can_tx(op); return; } if (op->flags & RX_FILTER_ID) { /* the easiest case */ bcm_rx_update_and_send(op, op->last_frames, rxframe); goto rx_starttimer; } if (op->nframes == 1) { /* simple compare with index 0 */ bcm_rx_cmp_to_index(op, 0, rxframe); goto rx_starttimer; } if (op->nframes > 1) { /* * multiplex compare * * find the first multiplex mask that fits. * Remark: The MUX-mask is stored in index 0 - but only the * first 64 bits of the frame data[] are relevant (CAN FD) */ for (i = 1; i < op->nframes; i++) { if ((get_u64(op->frames, 0) & get_u64(rxframe, 0)) == (get_u64(op->frames, 0) & get_u64(op->frames + op->cfsiz * i, 0))) { bcm_rx_cmp_to_index(op, i, rxframe); break; } } } rx_starttimer: bcm_rx_starttimer(op); } /* * helpers for bcm_op handling: find & delete bcm [rx|tx] op elements */ static struct bcm_op *bcm_find_op(struct list_head *ops, struct bcm_msg_head *mh, int ifindex) { struct bcm_op *op; list_for_each_entry(op, ops, list) { if ((op->can_id == mh->can_id) && (op->ifindex == ifindex) && (op->flags & CAN_FD_FRAME) == (mh->flags & CAN_FD_FRAME)) return op; } return NULL; } static void bcm_remove_op(struct bcm_op *op) { hrtimer_cancel(&op->timer); hrtimer_cancel(&op->thrtimer); if ((op->frames) && (op->frames != &op->sframe)) kfree(op->frames); if ((op->last_frames) && (op->last_frames != &op->last_sframe)) kfree(op->last_frames); kfree(op); } static void bcm_rx_unreg(struct net_device *dev, struct bcm_op *op) { if (op->rx_reg_dev == dev) { can_rx_unregister(dev_net(dev), dev, op->can_id, REGMASK(op->can_id), bcm_rx_handler, op); /* mark as removed subscription */ op->rx_reg_dev = NULL; } else printk(KERN_ERR "can-bcm: bcm_rx_unreg: registered device " "mismatch %p %p\n", op->rx_reg_dev, dev); } /* * bcm_delete_rx_op - find and remove a rx op (returns number of removed ops) */ static int bcm_delete_rx_op(struct list_head *ops, struct bcm_msg_head *mh, int ifindex) { struct bcm_op *op, *n; list_for_each_entry_safe(op, n, ops, list) { if ((op->can_id == mh->can_id) && (op->ifindex == ifindex) && (op->flags & CAN_FD_FRAME) == (mh->flags & CAN_FD_FRAME)) { /* * Don't care if we're bound or not (due to netdev * problems) can_rx_unregister() is always a save * thing to do here. */ if (op->ifindex) { /* * Only remove subscriptions that had not * been removed due to NETDEV_UNREGISTER * in bcm_notifier() */ if (op->rx_reg_dev) { struct net_device *dev; dev = dev_get_by_index(sock_net(op->sk), op->ifindex); if (dev) { bcm_rx_unreg(dev, op); dev_put(dev); } } } else can_rx_unregister(sock_net(op->sk), NULL, op->can_id, REGMASK(op->can_id), bcm_rx_handler, op); list_del(&op->list); synchronize_rcu(); bcm_remove_op(op); return 1; /* done */ } } return 0; /* not found */ } /* * bcm_delete_tx_op - find and remove a tx op (returns number of removed ops) */ static int bcm_delete_tx_op(struct list_head *ops, struct bcm_msg_head *mh, int ifindex) { struct bcm_op *op, *n; list_for_each_entry_safe(op, n, ops, list) { if ((op->can_id == mh->can_id) && (op->ifindex == ifindex) && (op->flags & CAN_FD_FRAME) == (mh->flags & CAN_FD_FRAME)) { list_del(&op->list); bcm_remove_op(op); return 1; /* done */ } } return 0; /* not found */ } /* * bcm_read_op - read out a bcm_op and send it to the user (for bcm_sendmsg) */ static int bcm_read_op(struct list_head *ops, struct bcm_msg_head *msg_head, int ifindex) { struct bcm_op *op = bcm_find_op(ops, msg_head, ifindex); if (!op) return -EINVAL; /* put current values into msg_head */ msg_head->flags = op->flags; msg_head->count = op->count; msg_head->ival1 = op->ival1; msg_head->ival2 = op->ival2; msg_head->nframes = op->nframes; bcm_send_to_user(op, msg_head, op->frames, 0); return MHSIZ; } /* * bcm_tx_setup - create or update a bcm tx op (for bcm_sendmsg) */ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, int ifindex, struct sock *sk) { struct bcm_sock *bo = bcm_sk(sk); struct bcm_op *op; struct canfd_frame *cf; unsigned int i; int err; /* we need a real device to send frames */ if (!ifindex) return -ENODEV; /* check nframes boundaries - we need at least one CAN frame */ if (msg_head->nframes < 1 || msg_head->nframes > MAX_NFRAMES) return -EINVAL; /* check timeval limitations */ if ((msg_head->flags & SETTIMER) && bcm_is_invalid_tv(msg_head)) return -EINVAL; /* check the given can_id */ op = bcm_find_op(&bo->tx_ops, msg_head, ifindex); if (op) { /* update existing BCM operation */ /* * Do we need more space for the CAN frames than currently * allocated? -> This is a _really_ unusual use-case and * therefore (complexity / locking) it is not supported. */ if (msg_head->nframes > op->nframes) return -E2BIG; /* update CAN frames content */ for (i = 0; i < msg_head->nframes; i++) { cf = op->frames + op->cfsiz * i; err = memcpy_from_msg((u8 *)cf, msg, op->cfsiz); if (op->flags & CAN_FD_FRAME) { if (cf->len > 64) err = -EINVAL; } else { if (cf->len > 8) err = -EINVAL; } if (err < 0) return err; if (msg_head->flags & TX_CP_CAN_ID) { /* copy can_id into frame */ cf->can_id = msg_head->can_id; } } op->flags = msg_head->flags; } else { /* insert new BCM operation for the given can_id */ op = kzalloc(OPSIZ, GFP_KERNEL); if (!op) return -ENOMEM; op->can_id = msg_head->can_id; op->cfsiz = CFSIZ(msg_head->flags); op->flags = msg_head->flags; /* create array for CAN frames and copy the data */ if (msg_head->nframes > 1) { op->frames = kmalloc_array(msg_head->nframes, op->cfsiz, GFP_KERNEL); if (!op->frames) { kfree(op); return -ENOMEM; } } else op->frames = &op->sframe; for (i = 0; i < msg_head->nframes; i++) { cf = op->frames + op->cfsiz * i; err = memcpy_from_msg((u8 *)cf, msg, op->cfsiz); if (op->flags & CAN_FD_FRAME) { if (cf->len > 64) err = -EINVAL; } else { if (cf->len > 8) err = -EINVAL; } if (err < 0) { if (op->frames != &op->sframe) kfree(op->frames); kfree(op); return err; } if (msg_head->flags & TX_CP_CAN_ID) { /* copy can_id into frame */ cf->can_id = msg_head->can_id; } } /* tx_ops never compare with previous received messages */ op->last_frames = NULL; /* bcm_can_tx / bcm_tx_timeout_handler needs this */ op->sk = sk; op->ifindex = ifindex; /* initialize uninitialized (kzalloc) structure */ hrtimer_init(&op->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT); op->timer.function = bcm_tx_timeout_handler; /* currently unused in tx_ops */ hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT); /* add this bcm_op to the list of the tx_ops */ list_add(&op->list, &bo->tx_ops); } /* if ((op = bcm_find_op(&bo->tx_ops, msg_head->can_id, ifindex))) */ if (op->nframes != msg_head->nframes) { op->nframes = msg_head->nframes; /* start multiple frame transmission with index 0 */ op->currframe = 0; } /* check flags */ if (op->flags & TX_RESET_MULTI_IDX) { /* start multiple frame transmission with index 0 */ op->currframe = 0; } if (op->flags & SETTIMER) { /* set timer values */ op->count = msg_head->count; op->ival1 = msg_head->ival1; op->ival2 = msg_head->ival2; op->kt_ival1 = bcm_timeval_to_ktime(msg_head->ival1); op->kt_ival2 = bcm_timeval_to_ktime(msg_head->ival2); /* disable an active timer due to zero values? */ if (!op->kt_ival1 && !op->kt_ival2) hrtimer_cancel(&op->timer); } if (op->flags & STARTTIMER) { hrtimer_cancel(&op->timer); /* spec: send CAN frame when starting timer */ op->flags |= TX_ANNOUNCE; } if (op->flags & TX_ANNOUNCE) { bcm_can_tx(op); if (op->count) op->count--; } if (op->flags & STARTTIMER) bcm_tx_start_timer(op); return msg_head->nframes * op->cfsiz + MHSIZ; } /* * bcm_rx_setup - create or update a bcm rx op (for bcm_sendmsg) */ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, int ifindex, struct sock *sk) { struct bcm_sock *bo = bcm_sk(sk); struct bcm_op *op; int do_rx_register; int err = 0; if ((msg_head->flags & RX_FILTER_ID) || (!(msg_head->nframes))) { /* be robust against wrong usage ... */ msg_head->flags |= RX_FILTER_ID; /* ignore trailing garbage */ msg_head->nframes = 0; } /* the first element contains the mux-mask => MAX_NFRAMES + 1 */ if (msg_head->nframes > MAX_NFRAMES + 1) return -EINVAL; if ((msg_head->flags & RX_RTR_FRAME) && ((msg_head->nframes != 1) || (!(msg_head->can_id & CAN_RTR_FLAG)))) return -EINVAL; /* check timeval limitations */ if ((msg_head->flags & SETTIMER) && bcm_is_invalid_tv(msg_head)) return -EINVAL; /* check the given can_id */ op = bcm_find_op(&bo->rx_ops, msg_head, ifindex); if (op) { /* update existing BCM operation */ /* * Do we need more space for the CAN frames than currently * allocated? -> This is a _really_ unusual use-case and * therefore (complexity / locking) it is not supported. */ if (msg_head->nframes > op->nframes) return -E2BIG; if (msg_head->nframes) { /* update CAN frames content */ err = memcpy_from_msg(op->frames, msg, msg_head->nframes * op->cfsiz); if (err < 0) return err; /* clear last_frames to indicate 'nothing received' */ memset(op->last_frames, 0, msg_head->nframes * op->cfsiz); } op->nframes = msg_head->nframes; op->flags = msg_head->flags; /* Only an update -> do not call can_rx_register() */ do_rx_register = 0; } else { /* insert new BCM operation for the given can_id */ op = kzalloc(OPSIZ, GFP_KERNEL); if (!op) return -ENOMEM; op->can_id = msg_head->can_id; op->nframes = msg_head->nframes; op->cfsiz = CFSIZ(msg_head->flags); op->flags = msg_head->flags; if (msg_head->nframes > 1) { /* create array for CAN frames and copy the data */ op->frames = kmalloc_array(msg_head->nframes, op->cfsiz, GFP_KERNEL); if (!op->frames) { kfree(op); return -ENOMEM; } /* create and init array for received CAN frames */ op->last_frames = kcalloc(msg_head->nframes, op->cfsiz, GFP_KERNEL); if (!op->last_frames) { kfree(op->frames); kfree(op); return -ENOMEM; } } else { op->frames = &op->sframe; op->last_frames = &op->last_sframe; } if (msg_head->nframes) { err = memcpy_from_msg(op->frames, msg, msg_head->nframes * op->cfsiz); if (err < 0) { if (op->frames != &op->sframe) kfree(op->frames); if (op->last_frames != &op->last_sframe) kfree(op->last_frames); kfree(op); return err; } } /* bcm_can_tx / bcm_tx_timeout_handler needs this */ op->sk = sk; op->ifindex = ifindex; /* ifindex for timeout events w/o previous frame reception */ op->rx_ifindex = ifindex; /* initialize uninitialized (kzalloc) structure */ hrtimer_init(&op->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT); op->timer.function = bcm_rx_timeout_handler; hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT); op->thrtimer.function = bcm_rx_thr_handler; /* add this bcm_op to the list of the rx_ops */ list_add(&op->list, &bo->rx_ops); /* call can_rx_register() */ do_rx_register = 1; } /* if ((op = bcm_find_op(&bo->rx_ops, msg_head->can_id, ifindex))) */ /* check flags */ if (op->flags & RX_RTR_FRAME) { struct canfd_frame *frame0 = op->frames; /* no timers in RTR-mode */ hrtimer_cancel(&op->thrtimer); hrtimer_cancel(&op->timer); /* * funny feature in RX(!)_SETUP only for RTR-mode: * copy can_id into frame BUT without RTR-flag to * prevent a full-load-loopback-test ... ;-] */ if ((op->flags & TX_CP_CAN_ID) || (frame0->can_id == op->can_id)) frame0->can_id = op->can_id & ~CAN_RTR_FLAG; } else { if (op->flags & SETTIMER) { /* set timer value */ op->ival1 = msg_head->ival1; op->ival2 = msg_head->ival2; op->kt_ival1 = bcm_timeval_to_ktime(msg_head->ival1); op->kt_ival2 = bcm_timeval_to_ktime(msg_head->ival2); /* disable an active timer due to zero value? */ if (!op->kt_ival1) hrtimer_cancel(&op->timer); /* * In any case cancel the throttle timer, flush * potentially blocked msgs and reset throttle handling */ op->kt_lastmsg = 0; hrtimer_cancel(&op->thrtimer); bcm_rx_thr_flush(op); } if ((op->flags & STARTTIMER) && op->kt_ival1) hrtimer_start(&op->timer, op->kt_ival1, HRTIMER_MODE_REL_SOFT); } /* now we can register for can_ids, if we added a new bcm_op */ if (do_rx_register) { if (ifindex) { struct net_device *dev; dev = dev_get_by_index(sock_net(sk), ifindex); if (dev) { err = can_rx_register(sock_net(sk), dev, op->can_id, REGMASK(op->can_id), bcm_rx_handler, op, "bcm", sk); op->rx_reg_dev = dev; dev_put(dev); } } else err = can_rx_register(sock_net(sk), NULL, op->can_id, REGMASK(op->can_id), bcm_rx_handler, op, "bcm", sk); if (err) { /* this bcm rx op is broken -> remove it */ list_del(&op->list); bcm_remove_op(op); return err; } } return msg_head->nframes * op->cfsiz + MHSIZ; } /* * bcm_tx_send - send a single CAN frame to the CAN interface (for bcm_sendmsg) */ static int bcm_tx_send(struct msghdr *msg, int ifindex, struct sock *sk, int cfsiz) { struct sk_buff *skb; struct net_device *dev; int err; /* we need a real device to send frames */ if (!ifindex) return -ENODEV; skb = alloc_skb(cfsiz + sizeof(struct can_skb_priv), GFP_KERNEL); if (!skb) return -ENOMEM; can_skb_reserve(skb); err = memcpy_from_msg(skb_put(skb, cfsiz), msg, cfsiz); if (err < 0) { kfree_skb(skb); return err; } dev = dev_get_by_index(sock_net(sk), ifindex); if (!dev) { kfree_skb(skb); return -ENODEV; } can_skb_prv(skb)->ifindex = dev->ifindex; can_skb_prv(skb)->skbcnt = 0; skb->dev = dev; can_skb_set_owner(skb, sk); err = can_send(skb, 1); /* send with loopback */ dev_put(dev); if (err) return err; return cfsiz + MHSIZ; } /* * bcm_sendmsg - process BCM commands (opcodes) from the userspace */ static int bcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) { struct sock *sk = sock->sk; struct bcm_sock *bo = bcm_sk(sk); int ifindex = bo->ifindex; /* default ifindex for this bcm_op */ struct bcm_msg_head msg_head; int cfsiz; int ret; /* read bytes or error codes as return value */ if (!bo->bound) return -ENOTCONN; /* check for valid message length from userspace */ if (size < MHSIZ) return -EINVAL; /* read message head information */ ret = memcpy_from_msg((u8 *)&msg_head, msg, MHSIZ); if (ret < 0) return ret; cfsiz = CFSIZ(msg_head.flags); if ((size - MHSIZ) % cfsiz) return -EINVAL; /* check for alternative ifindex for this bcm_op */ if (!ifindex && msg->msg_name) { /* no bound device as default => check msg_name */ DECLARE_SOCKADDR(struct sockaddr_can *, addr, msg->msg_name); if (msg->msg_namelen < BCM_MIN_NAMELEN) return -EINVAL; if (addr->can_family != AF_CAN) return -EINVAL; /* ifindex from sendto() */ ifindex = addr->can_ifindex; if (ifindex) { struct net_device *dev; dev = dev_get_by_index(sock_net(sk), ifindex); if (!dev) return -ENODEV; if (dev->type != ARPHRD_CAN) { dev_put(dev); return -ENODEV; } dev_put(dev); } } lock_sock(sk); switch (msg_head.opcode) { case TX_SETUP: ret = bcm_tx_setup(&msg_head, msg, ifindex, sk); break; case RX_SETUP: ret = bcm_rx_setup(&msg_head, msg, ifindex, sk); break; case TX_DELETE: if (bcm_delete_tx_op(&bo->tx_ops, &msg_head, ifindex)) ret = MHSIZ; else ret = -EINVAL; break; case RX_DELETE: if (bcm_delete_rx_op(&bo->rx_ops, &msg_head, ifindex)) ret = MHSIZ; else ret = -EINVAL; break; case TX_READ: /* reuse msg_head for the reply to TX_READ */ msg_head.opcode = TX_STATUS; ret = bcm_read_op(&bo->tx_ops, &msg_head, ifindex); break; case RX_READ: /* reuse msg_head for the reply to RX_READ */ msg_head.opcode = RX_STATUS; ret = bcm_read_op(&bo->rx_ops, &msg_head, ifindex); break; case TX_SEND: /* we need exactly one CAN frame behind the msg head */ if ((msg_head.nframes != 1) || (size != cfsiz + MHSIZ)) ret = -EINVAL; else ret = bcm_tx_send(msg, ifindex, sk, cfsiz); break; default: ret = -EINVAL; break; } release_sock(sk); return ret; } /* * notification handler for netdevice status changes */ static void bcm_notify(struct bcm_sock *bo, unsigned long msg, struct net_device *dev) { struct sock *sk = &bo->sk; struct bcm_op *op; int notify_enodev = 0; if (!net_eq(dev_net(dev), sock_net(sk))) return; switch (msg) { case NETDEV_UNREGISTER: lock_sock(sk); /* remove device specific receive entries */ list_for_each_entry(op, &bo->rx_ops, list) if (op->rx_reg_dev == dev) bcm_rx_unreg(dev, op); /* remove device reference, if this is our bound device */ if (bo->bound && bo->ifindex == dev->ifindex) { bo->bound = 0; bo->ifindex = 0; notify_enodev = 1; } release_sock(sk); if (notify_enodev) { sk->sk_err = ENODEV; if (!sock_flag(sk, SOCK_DEAD)) sk->sk_error_report(sk); } break; case NETDEV_DOWN: if (bo->bound && bo->ifindex == dev->ifindex) { sk->sk_err = ENETDOWN; if (!sock_flag(sk, SOCK_DEAD)) sk->sk_error_report(sk); } } } static int bcm_notifier(struct notifier_block *nb, unsigned long msg, void *ptr) { struct net_device *dev = netdev_notifier_info_to_dev(ptr); if (dev->type != ARPHRD_CAN) return NOTIFY_DONE; if (msg != NETDEV_UNREGISTER && msg != NETDEV_DOWN) return NOTIFY_DONE; if (unlikely(bcm_busy_notifier)) /* Check for reentrant bug. */ return NOTIFY_DONE; spin_lock(&bcm_notifier_lock); list_for_each_entry(bcm_busy_notifier, &bcm_notifier_list, notifier) { spin_unlock(&bcm_notifier_lock); bcm_notify(bcm_busy_notifier, msg, dev); spin_lock(&bcm_notifier_lock); } bcm_busy_notifier = NULL; spin_unlock(&bcm_notifier_lock); return NOTIFY_DONE; } /* * initial settings for all BCM sockets to be set at socket creation time */ static int bcm_init(struct sock *sk) { struct bcm_sock *bo = bcm_sk(sk); bo->bound = 0; bo->ifindex = 0; bo->dropped_usr_msgs = 0; bo->bcm_proc_read = NULL; INIT_LIST_HEAD(&bo->tx_ops); INIT_LIST_HEAD(&bo->rx_ops); /* set notifier */ spin_lock(&bcm_notifier_lock); list_add_tail(&bo->notifier, &bcm_notifier_list); spin_unlock(&bcm_notifier_lock); return 0; } /* * standard socket functions */ static int bcm_release(struct socket *sock) { struct sock *sk = sock->sk; struct net *net; struct bcm_sock *bo; struct bcm_op *op, *next; if (!sk) return 0; net = sock_net(sk); bo = bcm_sk(sk); /* remove bcm_ops, timer, rx_unregister(), etc. */ spin_lock(&bcm_notifier_lock); while (bcm_busy_notifier == bo) { spin_unlock(&bcm_notifier_lock); schedule_timeout_uninterruptible(1); spin_lock(&bcm_notifier_lock); } list_del(&bo->notifier); spin_unlock(&bcm_notifier_lock); lock_sock(sk); list_for_each_entry_safe(op, next, &bo->tx_ops, list) bcm_remove_op(op); list_for_each_entry_safe(op, next, &bo->rx_ops, list) { /* * Don't care if we're bound or not (due to netdev problems) * can_rx_unregister() is always a save thing to do here. */ if (op->ifindex) { /* * Only remove subscriptions that had not * been removed due to NETDEV_UNREGISTER * in bcm_notifier() */ if (op->rx_reg_dev) { struct net_device *dev; dev = dev_get_by_index(net, op->ifindex); if (dev) { bcm_rx_unreg(dev, op); dev_put(dev); } } } else can_rx_unregister(net, NULL, op->can_id, REGMASK(op->can_id), bcm_rx_handler, op); } synchronize_rcu(); list_for_each_entry_safe(op, next, &bo->rx_ops, list) bcm_remove_op(op); #if IS_ENABLED(CONFIG_PROC_FS) /* remove procfs entry */ if (net->can.bcmproc_dir && bo->bcm_proc_read) remove_proc_entry(bo->procname, net->can.bcmproc_dir); #endif /* CONFIG_PROC_FS */ /* remove device reference */ if (bo->bound) { bo->bound = 0; bo->ifindex = 0; } sock_orphan(sk); sock->sk = NULL; release_sock(sk); sock_put(sk); return 0; } static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len, int flags) { struct sockaddr_can *addr = (struct sockaddr_can *)uaddr; struct sock *sk = sock->sk; struct bcm_sock *bo = bcm_sk(sk); struct net *net = sock_net(sk); int ret = 0; if (len < BCM_MIN_NAMELEN) return -EINVAL; lock_sock(sk); if (bo->bound) { ret = -EISCONN; goto fail; } /* bind a device to this socket */ if (addr->can_ifindex) { struct net_device *dev; dev = dev_get_by_index(net, addr->can_ifindex); if (!dev) { ret = -ENODEV; goto fail; } if (dev->type != ARPHRD_CAN) { dev_put(dev); ret = -ENODEV; goto fail; } bo->ifindex = dev->ifindex; dev_put(dev); } else { /* no interface reference for ifindex = 0 ('any' CAN device) */ bo->ifindex = 0; } #if IS_ENABLED(CONFIG_PROC_FS) if (net->can.bcmproc_dir) { /* unique socket address as filename */ sprintf(bo->procname, "%lu", sock_i_ino(sk)); bo->bcm_proc_read = proc_create_net_single(bo->procname, 0644, net->can.bcmproc_dir, bcm_proc_show, sk); if (!bo->bcm_proc_read) { ret = -ENOMEM; goto fail; } } #endif /* CONFIG_PROC_FS */ bo->bound = 1; fail: release_sock(sk); return ret; } static int bcm_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock *sk = sock->sk; struct sk_buff *skb; int error = 0; int noblock; int err; noblock = flags & MSG_DONTWAIT; flags &= ~MSG_DONTWAIT; skb = skb_recv_datagram(sk, flags, noblock, &error); if (!skb) return error; if (skb->len < size) size = skb->len; err = memcpy_to_msg(msg, skb->data, size); if (err < 0) { skb_free_datagram(sk, skb); return err; } sock_recv_ts_and_drops(msg, sk, skb); if (msg->msg_name) { __sockaddr_check_size(BCM_MIN_NAMELEN); msg->msg_namelen = BCM_MIN_NAMELEN; memcpy(msg->msg_name, skb->cb, msg->msg_namelen); } skb_free_datagram(sk, skb); return size; } static int bcm_sock_no_ioctlcmd(struct socket *sock, unsigned int cmd, unsigned long arg) { /* no ioctls for socket layer -> hand it down to NIC layer */ return -ENOIOCTLCMD; } static const struct proto_ops bcm_ops = { .family = PF_CAN, .release = bcm_release, .bind = sock_no_bind, .connect = bcm_connect, .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = sock_no_getname, .poll = datagram_poll, .ioctl = bcm_sock_no_ioctlcmd, .gettstamp = sock_gettstamp, .listen = sock_no_listen, .shutdown = sock_no_shutdown, .sendmsg = bcm_sendmsg, .recvmsg = bcm_recvmsg, .mmap = sock_no_mmap, .sendpage = sock_no_sendpage, }; static struct proto bcm_proto __read_mostly = { .name = "CAN_BCM", .owner = THIS_MODULE, .obj_size = sizeof(struct bcm_sock), .init = bcm_init, }; static const struct can_proto bcm_can_proto = { .type = SOCK_DGRAM, .protocol = CAN_BCM, .ops = &bcm_ops, .prot = &bcm_proto, }; static int canbcm_pernet_init(struct net *net) { #if IS_ENABLED(CONFIG_PROC_FS) /* create /proc/net/can-bcm directory */ net->can.bcmproc_dir = proc_net_mkdir(net, "can-bcm", net->proc_net); #endif /* CONFIG_PROC_FS */ return 0; } static void canbcm_pernet_exit(struct net *net) { #if IS_ENABLED(CONFIG_PROC_FS) /* remove /proc/net/can-bcm directory */ if (net->can.bcmproc_dir) remove_proc_entry("can-bcm", net->proc_net); #endif /* CONFIG_PROC_FS */ } static struct pernet_operations canbcm_pernet_ops __read_mostly = { .init = canbcm_pernet_init, .exit = canbcm_pernet_exit, }; static struct notifier_block canbcm_notifier = { .notifier_call = bcm_notifier }; static int __init bcm_module_init(void) { int err; pr_info("can: broadcast manager protocol\n"); err = can_proto_register(&bcm_can_proto); if (err < 0) { printk(KERN_ERR "can: registration of bcm protocol failed\n"); return err; } register_pernet_subsys(&canbcm_pernet_ops); register_netdevice_notifier(&canbcm_notifier); return 0; } static void __exit bcm_module_exit(void) { can_proto_unregister(&bcm_can_proto); unregister_netdevice_notifier(&canbcm_notifier); unregister_pernet_subsys(&canbcm_pernet_ops); } module_init(bcm_module_init); module_exit(bcm_module_exit);
static int bcm_release(struct socket *sock) { struct sock *sk = sock->sk; struct net *net; struct bcm_sock *bo; struct bcm_op *op, *next; if (!sk) return 0; net = sock_net(sk); bo = bcm_sk(sk); /* remove bcm_ops, timer, rx_unregister(), etc. */ spin_lock(&bcm_notifier_lock); while (bcm_busy_notifier == bo) { spin_unlock(&bcm_notifier_lock); schedule_timeout_uninterruptible(1); spin_lock(&bcm_notifier_lock); } list_del(&bo->notifier); spin_unlock(&bcm_notifier_lock); lock_sock(sk); list_for_each_entry_safe(op, next, &bo->tx_ops, list) bcm_remove_op(op); list_for_each_entry_safe(op, next, &bo->rx_ops, list) { /* * Don't care if we're bound or not (due to netdev problems) * can_rx_unregister() is always a save thing to do here. */ if (op->ifindex) { /* * Only remove subscriptions that had not * been removed due to NETDEV_UNREGISTER * in bcm_notifier() */ if (op->rx_reg_dev) { struct net_device *dev; dev = dev_get_by_index(net, op->ifindex); if (dev) { bcm_rx_unreg(dev, op); dev_put(dev); } } } else can_rx_unregister(net, NULL, op->can_id, REGMASK(op->can_id), bcm_rx_handler, op); bcm_remove_op(op); } #if IS_ENABLED(CONFIG_PROC_FS) /* remove procfs entry */ if (net->can.bcmproc_dir && bo->bcm_proc_read) remove_proc_entry(bo->procname, net->can.bcmproc_dir); #endif /* CONFIG_PROC_FS */ /* remove device reference */ if (bo->bound) { bo->bound = 0; bo->ifindex = 0; } sock_orphan(sk); sock->sk = NULL; release_sock(sk); sock_put(sk); return 0; }
static int bcm_release(struct socket *sock) { struct sock *sk = sock->sk; struct net *net; struct bcm_sock *bo; struct bcm_op *op, *next; if (!sk) return 0; net = sock_net(sk); bo = bcm_sk(sk); /* remove bcm_ops, timer, rx_unregister(), etc. */ spin_lock(&bcm_notifier_lock); while (bcm_busy_notifier == bo) { spin_unlock(&bcm_notifier_lock); schedule_timeout_uninterruptible(1); spin_lock(&bcm_notifier_lock); } list_del(&bo->notifier); spin_unlock(&bcm_notifier_lock); lock_sock(sk); list_for_each_entry_safe(op, next, &bo->tx_ops, list) bcm_remove_op(op); list_for_each_entry_safe(op, next, &bo->rx_ops, list) { /* * Don't care if we're bound or not (due to netdev problems) * can_rx_unregister() is always a save thing to do here. */ if (op->ifindex) { /* * Only remove subscriptions that had not * been removed due to NETDEV_UNREGISTER * in bcm_notifier() */ if (op->rx_reg_dev) { struct net_device *dev; dev = dev_get_by_index(net, op->ifindex); if (dev) { bcm_rx_unreg(dev, op); dev_put(dev); } } } else can_rx_unregister(net, NULL, op->can_id, REGMASK(op->can_id), bcm_rx_handler, op); } synchronize_rcu(); list_for_each_entry_safe(op, next, &bo->rx_ops, list) bcm_remove_op(op); #if IS_ENABLED(CONFIG_PROC_FS) /* remove procfs entry */ if (net->can.bcmproc_dir && bo->bcm_proc_read) remove_proc_entry(bo->procname, net->can.bcmproc_dir); #endif /* CONFIG_PROC_FS */ /* remove device reference */ if (bo->bound) { bo->bound = 0; bo->ifindex = 0; } sock_orphan(sk); sock->sk = NULL; release_sock(sk); sock_put(sk); return 0; }
{'added': [(788, '\t\t\tsynchronize_rcu();'), (1539, '\tsynchronize_rcu();'), (1540, ''), (1541, '\tlist_for_each_entry_safe(op, next, &bo->rx_ops, list)'), (1542, '\t\tbcm_remove_op(op);'), (1543, '')], 'deleted': [(1536, '\t\tbcm_remove_op(op);')]}
6
1
1,127
7,132
https://github.com/torvalds/linux
CVE-2021-3609
['CWE-362']
trace.c
buffer_pipe_buf_get
// SPDX-License-Identifier: GPL-2.0 /* * ring buffer based function tracer * * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com> * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com> * * Originally taken from the RT patch by: * Arnaldo Carvalho de Melo <acme@redhat.com> * * Based on code from the latency_tracer, that is: * Copyright (C) 2004-2006 Ingo Molnar * Copyright (C) 2004 Nadia Yvette Chambers */ #include <linux/ring_buffer.h> #include <generated/utsrelease.h> #include <linux/stacktrace.h> #include <linux/writeback.h> #include <linux/kallsyms.h> #include <linux/seq_file.h> #include <linux/notifier.h> #include <linux/irqflags.h> #include <linux/debugfs.h> #include <linux/tracefs.h> #include <linux/pagemap.h> #include <linux/hardirq.h> #include <linux/linkage.h> #include <linux/uaccess.h> #include <linux/vmalloc.h> #include <linux/ftrace.h> #include <linux/module.h> #include <linux/percpu.h> #include <linux/splice.h> #include <linux/kdebug.h> #include <linux/string.h> #include <linux/mount.h> #include <linux/rwsem.h> #include <linux/slab.h> #include <linux/ctype.h> #include <linux/init.h> #include <linux/poll.h> #include <linux/nmi.h> #include <linux/fs.h> #include <linux/trace.h> #include <linux/sched/clock.h> #include <linux/sched/rt.h> #include "trace.h" #include "trace_output.h" /* * On boot up, the ring buffer is set to the minimum size, so that * we do not waste memory on systems that are not using tracing. */ bool ring_buffer_expanded; /* * We need to change this state when a selftest is running. * A selftest will lurk into the ring-buffer to count the * entries inserted during the selftest although some concurrent * insertions into the ring-buffer such as trace_printk could occurred * at the same time, giving false positive or negative results. */ static bool __read_mostly tracing_selftest_running; /* * If a tracer is running, we do not want to run SELFTEST. */ bool __read_mostly tracing_selftest_disabled; /* Pipe tracepoints to printk */ struct trace_iterator *tracepoint_print_iter; int tracepoint_printk; static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key); /* For tracers that don't implement custom flags */ static struct tracer_opt dummy_tracer_opt[] = { { } }; static int dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) { return 0; } /* * To prevent the comm cache from being overwritten when no * tracing is active, only save the comm when a trace event * occurred. */ static DEFINE_PER_CPU(bool, trace_taskinfo_save); /* * Kill all tracing for good (never come back). * It is initialized to 1 but will turn to zero if the initialization * of the tracer is successful. But that is the only place that sets * this back to zero. */ static int tracing_disabled = 1; cpumask_var_t __read_mostly tracing_buffer_mask; /* * ftrace_dump_on_oops - variable to dump ftrace buffer on oops * * If there is an oops (or kernel panic) and the ftrace_dump_on_oops * is set, then ftrace_dump is called. This will output the contents * of the ftrace buffers to the console. This is very useful for * capturing traces that lead to crashes and outputing it to a * serial console. * * It is default off, but you can enable it with either specifying * "ftrace_dump_on_oops" in the kernel command line, or setting * /proc/sys/kernel/ftrace_dump_on_oops * Set 1 if you want to dump buffers of all CPUs * Set 2 if you want to dump the buffer of the CPU that triggered oops */ enum ftrace_dump_mode ftrace_dump_on_oops; /* When set, tracing will stop when a WARN*() is hit */ int __disable_trace_on_warning; #ifdef CONFIG_TRACE_EVAL_MAP_FILE /* Map of enums to their values, for "eval_map" file */ struct trace_eval_map_head { struct module *mod; unsigned long length; }; union trace_eval_map_item; struct trace_eval_map_tail { /* * "end" is first and points to NULL as it must be different * than "mod" or "eval_string" */ union trace_eval_map_item *next; const char *end; /* points to NULL */ }; static DEFINE_MUTEX(trace_eval_mutex); /* * The trace_eval_maps are saved in an array with two extra elements, * one at the beginning, and one at the end. The beginning item contains * the count of the saved maps (head.length), and the module they * belong to if not built in (head.mod). The ending item contains a * pointer to the next array of saved eval_map items. */ union trace_eval_map_item { struct trace_eval_map map; struct trace_eval_map_head head; struct trace_eval_map_tail tail; }; static union trace_eval_map_item *trace_eval_maps; #endif /* CONFIG_TRACE_EVAL_MAP_FILE */ static int tracing_set_tracer(struct trace_array *tr, const char *buf); #define MAX_TRACER_SIZE 100 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata; static char *default_bootup_tracer; static bool allocate_snapshot; static int __init set_cmdline_ftrace(char *str) { strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE); default_bootup_tracer = bootup_tracer_buf; /* We are using ftrace early, expand it */ ring_buffer_expanded = true; return 1; } __setup("ftrace=", set_cmdline_ftrace); static int __init set_ftrace_dump_on_oops(char *str) { if (*str++ != '=' || !*str) { ftrace_dump_on_oops = DUMP_ALL; return 1; } if (!strcmp("orig_cpu", str)) { ftrace_dump_on_oops = DUMP_ORIG; return 1; } return 0; } __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops); static int __init stop_trace_on_warning(char *str) { if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0)) __disable_trace_on_warning = 1; return 1; } __setup("traceoff_on_warning", stop_trace_on_warning); static int __init boot_alloc_snapshot(char *str) { allocate_snapshot = true; /* We also need the main ring buffer expanded */ ring_buffer_expanded = true; return 1; } __setup("alloc_snapshot", boot_alloc_snapshot); static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata; static int __init set_trace_boot_options(char *str) { strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE); return 0; } __setup("trace_options=", set_trace_boot_options); static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata; static char *trace_boot_clock __initdata; static int __init set_trace_boot_clock(char *str) { strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE); trace_boot_clock = trace_boot_clock_buf; return 0; } __setup("trace_clock=", set_trace_boot_clock); static int __init set_tracepoint_printk(char *str) { if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0)) tracepoint_printk = 1; return 1; } __setup("tp_printk", set_tracepoint_printk); unsigned long long ns2usecs(u64 nsec) { nsec += 500; do_div(nsec, 1000); return nsec; } /* trace_flags holds trace_options default values */ #define TRACE_DEFAULT_FLAGS \ (FUNCTION_DEFAULT_FLAGS | \ TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \ TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \ TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \ TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS) /* trace_options that are only supported by global_trace */ #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \ TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD) /* trace_flags that are default zero for instances */ #define ZEROED_TRACE_FLAGS \ (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK) /* * The global_trace is the descriptor that holds the top-level tracing * buffers for the live tracing. */ static struct trace_array global_trace = { .trace_flags = TRACE_DEFAULT_FLAGS, }; LIST_HEAD(ftrace_trace_arrays); int trace_array_get(struct trace_array *this_tr) { struct trace_array *tr; int ret = -ENODEV; mutex_lock(&trace_types_lock); list_for_each_entry(tr, &ftrace_trace_arrays, list) { if (tr == this_tr) { tr->ref++; ret = 0; break; } } mutex_unlock(&trace_types_lock); return ret; } static void __trace_array_put(struct trace_array *this_tr) { WARN_ON(!this_tr->ref); this_tr->ref--; } void trace_array_put(struct trace_array *this_tr) { mutex_lock(&trace_types_lock); __trace_array_put(this_tr); mutex_unlock(&trace_types_lock); } int call_filter_check_discard(struct trace_event_call *call, void *rec, struct ring_buffer *buffer, struct ring_buffer_event *event) { if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) && !filter_match_preds(call->filter, rec)) { __trace_event_discard_commit(buffer, event); return 1; } return 0; } void trace_free_pid_list(struct trace_pid_list *pid_list) { vfree(pid_list->pids); kfree(pid_list); } /** * trace_find_filtered_pid - check if a pid exists in a filtered_pid list * @filtered_pids: The list of pids to check * @search_pid: The PID to find in @filtered_pids * * Returns true if @search_pid is fonud in @filtered_pids, and false otherwis. */ bool trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid) { /* * If pid_max changed after filtered_pids was created, we * by default ignore all pids greater than the previous pid_max. */ if (search_pid >= filtered_pids->pid_max) return false; return test_bit(search_pid, filtered_pids->pids); } /** * trace_ignore_this_task - should a task be ignored for tracing * @filtered_pids: The list of pids to check * @task: The task that should be ignored if not filtered * * Checks if @task should be traced or not from @filtered_pids. * Returns true if @task should *NOT* be traced. * Returns false if @task should be traced. */ bool trace_ignore_this_task(struct trace_pid_list *filtered_pids, struct task_struct *task) { /* * Return false, because if filtered_pids does not exist, * all pids are good to trace. */ if (!filtered_pids) return false; return !trace_find_filtered_pid(filtered_pids, task->pid); } /** * trace_pid_filter_add_remove_task - Add or remove a task from a pid_list * @pid_list: The list to modify * @self: The current task for fork or NULL for exit * @task: The task to add or remove * * If adding a task, if @self is defined, the task is only added if @self * is also included in @pid_list. This happens on fork and tasks should * only be added when the parent is listed. If @self is NULL, then the * @task pid will be removed from the list, which would happen on exit * of a task. */ void trace_filter_add_remove_task(struct trace_pid_list *pid_list, struct task_struct *self, struct task_struct *task) { if (!pid_list) return; /* For forks, we only add if the forking task is listed */ if (self) { if (!trace_find_filtered_pid(pid_list, self->pid)) return; } /* Sorry, but we don't support pid_max changing after setting */ if (task->pid >= pid_list->pid_max) return; /* "self" is set for forks, and NULL for exits */ if (self) set_bit(task->pid, pid_list->pids); else clear_bit(task->pid, pid_list->pids); } /** * trace_pid_next - Used for seq_file to get to the next pid of a pid_list * @pid_list: The pid list to show * @v: The last pid that was shown (+1 the actual pid to let zero be displayed) * @pos: The position of the file * * This is used by the seq_file "next" operation to iterate the pids * listed in a trace_pid_list structure. * * Returns the pid+1 as we want to display pid of zero, but NULL would * stop the iteration. */ void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos) { unsigned long pid = (unsigned long)v; (*pos)++; /* pid already is +1 of the actual prevous bit */ pid = find_next_bit(pid_list->pids, pid_list->pid_max, pid); /* Return pid + 1 to allow zero to be represented */ if (pid < pid_list->pid_max) return (void *)(pid + 1); return NULL; } /** * trace_pid_start - Used for seq_file to start reading pid lists * @pid_list: The pid list to show * @pos: The position of the file * * This is used by seq_file "start" operation to start the iteration * of listing pids. * * Returns the pid+1 as we want to display pid of zero, but NULL would * stop the iteration. */ void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos) { unsigned long pid; loff_t l = 0; pid = find_first_bit(pid_list->pids, pid_list->pid_max); if (pid >= pid_list->pid_max) return NULL; /* Return pid + 1 so that zero can be the exit value */ for (pid++; pid && l < *pos; pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l)) ; return (void *)pid; } /** * trace_pid_show - show the current pid in seq_file processing * @m: The seq_file structure to write into * @v: A void pointer of the pid (+1) value to display * * Can be directly used by seq_file operations to display the current * pid value. */ int trace_pid_show(struct seq_file *m, void *v) { unsigned long pid = (unsigned long)v - 1; seq_printf(m, "%lu\n", pid); return 0; } /* 128 should be much more than enough */ #define PID_BUF_SIZE 127 int trace_pid_write(struct trace_pid_list *filtered_pids, struct trace_pid_list **new_pid_list, const char __user *ubuf, size_t cnt) { struct trace_pid_list *pid_list; struct trace_parser parser; unsigned long val; int nr_pids = 0; ssize_t read = 0; ssize_t ret = 0; loff_t pos; pid_t pid; if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1)) return -ENOMEM; /* * Always recreate a new array. The write is an all or nothing * operation. Always create a new array when adding new pids by * the user. If the operation fails, then the current list is * not modified. */ pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL); if (!pid_list) return -ENOMEM; pid_list->pid_max = READ_ONCE(pid_max); /* Only truncating will shrink pid_max */ if (filtered_pids && filtered_pids->pid_max > pid_list->pid_max) pid_list->pid_max = filtered_pids->pid_max; pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3); if (!pid_list->pids) { kfree(pid_list); return -ENOMEM; } if (filtered_pids) { /* copy the current bits to the new max */ for_each_set_bit(pid, filtered_pids->pids, filtered_pids->pid_max) { set_bit(pid, pid_list->pids); nr_pids++; } } while (cnt > 0) { pos = 0; ret = trace_get_user(&parser, ubuf, cnt, &pos); if (ret < 0 || !trace_parser_loaded(&parser)) break; read += ret; ubuf += ret; cnt -= ret; ret = -EINVAL; if (kstrtoul(parser.buffer, 0, &val)) break; if (val >= pid_list->pid_max) break; pid = (pid_t)val; set_bit(pid, pid_list->pids); nr_pids++; trace_parser_clear(&parser); ret = 0; } trace_parser_put(&parser); if (ret < 0) { trace_free_pid_list(pid_list); return ret; } if (!nr_pids) { /* Cleared the list of pids */ trace_free_pid_list(pid_list); read = ret; pid_list = NULL; } *new_pid_list = pid_list; return read; } static u64 buffer_ftrace_now(struct trace_buffer *buf, int cpu) { u64 ts; /* Early boot up does not have a buffer yet */ if (!buf->buffer) return trace_clock_local(); ts = ring_buffer_time_stamp(buf->buffer, cpu); ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts); return ts; } u64 ftrace_now(int cpu) { return buffer_ftrace_now(&global_trace.trace_buffer, cpu); } /** * tracing_is_enabled - Show if global_trace has been disabled * * Shows if the global trace has been enabled or not. It uses the * mirror flag "buffer_disabled" to be used in fast paths such as for * the irqsoff tracer. But it may be inaccurate due to races. If you * need to know the accurate state, use tracing_is_on() which is a little * slower, but accurate. */ int tracing_is_enabled(void) { /* * For quick access (irqsoff uses this in fast path), just * return the mirror variable of the state of the ring buffer. * It's a little racy, but we don't really care. */ smp_rmb(); return !global_trace.buffer_disabled; } /* * trace_buf_size is the size in bytes that is allocated * for a buffer. Note, the number of bytes is always rounded * to page size. * * This number is purposely set to a low number of 16384. * If the dump on oops happens, it will be much appreciated * to not have to wait for all that output. Anyway this can be * boot time and run time configurable. */ #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */ static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT; /* trace_types holds a link list of available tracers. */ static struct tracer *trace_types __read_mostly; /* * trace_types_lock is used to protect the trace_types list. */ DEFINE_MUTEX(trace_types_lock); /* * serialize the access of the ring buffer * * ring buffer serializes readers, but it is low level protection. * The validity of the events (which returns by ring_buffer_peek() ..etc) * are not protected by ring buffer. * * The content of events may become garbage if we allow other process consumes * these events concurrently: * A) the page of the consumed events may become a normal page * (not reader page) in ring buffer, and this page will be rewrited * by events producer. * B) The page of the consumed events may become a page for splice_read, * and this page will be returned to system. * * These primitives allow multi process access to different cpu ring buffer * concurrently. * * These primitives don't distinguish read-only and read-consume access. * Multi read-only access are also serialized. */ #ifdef CONFIG_SMP static DECLARE_RWSEM(all_cpu_access_lock); static DEFINE_PER_CPU(struct mutex, cpu_access_lock); static inline void trace_access_lock(int cpu) { if (cpu == RING_BUFFER_ALL_CPUS) { /* gain it for accessing the whole ring buffer. */ down_write(&all_cpu_access_lock); } else { /* gain it for accessing a cpu ring buffer. */ /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */ down_read(&all_cpu_access_lock); /* Secondly block other access to this @cpu ring buffer. */ mutex_lock(&per_cpu(cpu_access_lock, cpu)); } } static inline void trace_access_unlock(int cpu) { if (cpu == RING_BUFFER_ALL_CPUS) { up_write(&all_cpu_access_lock); } else { mutex_unlock(&per_cpu(cpu_access_lock, cpu)); up_read(&all_cpu_access_lock); } } static inline void trace_access_lock_init(void) { int cpu; for_each_possible_cpu(cpu) mutex_init(&per_cpu(cpu_access_lock, cpu)); } #else static DEFINE_MUTEX(access_lock); static inline void trace_access_lock(int cpu) { (void)cpu; mutex_lock(&access_lock); } static inline void trace_access_unlock(int cpu) { (void)cpu; mutex_unlock(&access_lock); } static inline void trace_access_lock_init(void) { } #endif #ifdef CONFIG_STACKTRACE static void __ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags, int skip, int pc, struct pt_regs *regs); static inline void ftrace_trace_stack(struct trace_array *tr, struct ring_buffer *buffer, unsigned long flags, int skip, int pc, struct pt_regs *regs); #else static inline void __ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags, int skip, int pc, struct pt_regs *regs) { } static inline void ftrace_trace_stack(struct trace_array *tr, struct ring_buffer *buffer, unsigned long flags, int skip, int pc, struct pt_regs *regs) { } #endif static __always_inline void trace_event_setup(struct ring_buffer_event *event, int type, unsigned long flags, int pc) { struct trace_entry *ent = ring_buffer_event_data(event); tracing_generic_entry_update(ent, flags, pc); ent->type = type; } static __always_inline struct ring_buffer_event * __trace_buffer_lock_reserve(struct ring_buffer *buffer, int type, unsigned long len, unsigned long flags, int pc) { struct ring_buffer_event *event; event = ring_buffer_lock_reserve(buffer, len); if (event != NULL) trace_event_setup(event, type, flags, pc); return event; } void tracer_tracing_on(struct trace_array *tr) { if (tr->trace_buffer.buffer) ring_buffer_record_on(tr->trace_buffer.buffer); /* * This flag is looked at when buffers haven't been allocated * yet, or by some tracers (like irqsoff), that just want to * know if the ring buffer has been disabled, but it can handle * races of where it gets disabled but we still do a record. * As the check is in the fast path of the tracers, it is more * important to be fast than accurate. */ tr->buffer_disabled = 0; /* Make the flag seen by readers */ smp_wmb(); } /** * tracing_on - enable tracing buffers * * This function enables tracing buffers that may have been * disabled with tracing_off. */ void tracing_on(void) { tracer_tracing_on(&global_trace); } EXPORT_SYMBOL_GPL(tracing_on); static __always_inline void __buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event) { __this_cpu_write(trace_taskinfo_save, true); /* If this is the temp buffer, we need to commit fully */ if (this_cpu_read(trace_buffered_event) == event) { /* Length is in event->array[0] */ ring_buffer_write(buffer, event->array[0], &event->array[1]); /* Release the temp buffer */ this_cpu_dec(trace_buffered_event_cnt); } else ring_buffer_unlock_commit(buffer, event); } /** * __trace_puts - write a constant string into the trace buffer. * @ip: The address of the caller * @str: The constant string to write * @size: The size of the string. */ int __trace_puts(unsigned long ip, const char *str, int size) { struct ring_buffer_event *event; struct ring_buffer *buffer; struct print_entry *entry; unsigned long irq_flags; int alloc; int pc; if (!(global_trace.trace_flags & TRACE_ITER_PRINTK)) return 0; pc = preempt_count(); if (unlikely(tracing_selftest_running || tracing_disabled)) return 0; alloc = sizeof(*entry) + size + 2; /* possible \n added */ local_save_flags(irq_flags); buffer = global_trace.trace_buffer.buffer; event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc, irq_flags, pc); if (!event) return 0; entry = ring_buffer_event_data(event); entry->ip = ip; memcpy(&entry->buf, str, size); /* Add a newline if necessary */ if (entry->buf[size - 1] != '\n') { entry->buf[size] = '\n'; entry->buf[size + 1] = '\0'; } else entry->buf[size] = '\0'; __buffer_unlock_commit(buffer, event); ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL); return size; } EXPORT_SYMBOL_GPL(__trace_puts); /** * __trace_bputs - write the pointer to a constant string into trace buffer * @ip: The address of the caller * @str: The constant string to write to the buffer to */ int __trace_bputs(unsigned long ip, const char *str) { struct ring_buffer_event *event; struct ring_buffer *buffer; struct bputs_entry *entry; unsigned long irq_flags; int size = sizeof(struct bputs_entry); int pc; if (!(global_trace.trace_flags & TRACE_ITER_PRINTK)) return 0; pc = preempt_count(); if (unlikely(tracing_selftest_running || tracing_disabled)) return 0; local_save_flags(irq_flags); buffer = global_trace.trace_buffer.buffer; event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size, irq_flags, pc); if (!event) return 0; entry = ring_buffer_event_data(event); entry->ip = ip; entry->str = str; __buffer_unlock_commit(buffer, event); ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL); return 1; } EXPORT_SYMBOL_GPL(__trace_bputs); #ifdef CONFIG_TRACER_SNAPSHOT void tracing_snapshot_instance(struct trace_array *tr) { struct tracer *tracer = tr->current_trace; unsigned long flags; if (in_nmi()) { internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n"); internal_trace_puts("*** snapshot is being ignored ***\n"); return; } if (!tr->allocated_snapshot) { internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n"); internal_trace_puts("*** stopping trace here! ***\n"); tracing_off(); return; } /* Note, snapshot can not be used when the tracer uses it */ if (tracer->use_max_tr) { internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n"); internal_trace_puts("*** Can not use snapshot (sorry) ***\n"); return; } local_irq_save(flags); update_max_tr(tr, current, smp_processor_id()); local_irq_restore(flags); } /** * tracing_snapshot - take a snapshot of the current buffer. * * This causes a swap between the snapshot buffer and the current live * tracing buffer. You can use this to take snapshots of the live * trace when some condition is triggered, but continue to trace. * * Note, make sure to allocate the snapshot with either * a tracing_snapshot_alloc(), or by doing it manually * with: echo 1 > /sys/kernel/debug/tracing/snapshot * * If the snapshot buffer is not allocated, it will stop tracing. * Basically making a permanent snapshot. */ void tracing_snapshot(void) { struct trace_array *tr = &global_trace; tracing_snapshot_instance(tr); } EXPORT_SYMBOL_GPL(tracing_snapshot); static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf, struct trace_buffer *size_buf, int cpu_id); static void set_buffer_entries(struct trace_buffer *buf, unsigned long val); int tracing_alloc_snapshot_instance(struct trace_array *tr) { int ret; if (!tr->allocated_snapshot) { /* allocate spare buffer */ ret = resize_buffer_duplicate_size(&tr->max_buffer, &tr->trace_buffer, RING_BUFFER_ALL_CPUS); if (ret < 0) return ret; tr->allocated_snapshot = true; } return 0; } static void free_snapshot(struct trace_array *tr) { /* * We don't free the ring buffer. instead, resize it because * The max_tr ring buffer has some state (e.g. ring->clock) and * we want preserve it. */ ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS); set_buffer_entries(&tr->max_buffer, 1); tracing_reset_online_cpus(&tr->max_buffer); tr->allocated_snapshot = false; } /** * tracing_alloc_snapshot - allocate snapshot buffer. * * This only allocates the snapshot buffer if it isn't already * allocated - it doesn't also take a snapshot. * * This is meant to be used in cases where the snapshot buffer needs * to be set up for events that can't sleep but need to be able to * trigger a snapshot. */ int tracing_alloc_snapshot(void) { struct trace_array *tr = &global_trace; int ret; ret = tracing_alloc_snapshot_instance(tr); WARN_ON(ret < 0); return ret; } EXPORT_SYMBOL_GPL(tracing_alloc_snapshot); /** * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer. * * This is similar to tracing_snapshot(), but it will allocate the * snapshot buffer if it isn't already allocated. Use this only * where it is safe to sleep, as the allocation may sleep. * * This causes a swap between the snapshot buffer and the current live * tracing buffer. You can use this to take snapshots of the live * trace when some condition is triggered, but continue to trace. */ void tracing_snapshot_alloc(void) { int ret; ret = tracing_alloc_snapshot(); if (ret < 0) return; tracing_snapshot(); } EXPORT_SYMBOL_GPL(tracing_snapshot_alloc); #else void tracing_snapshot(void) { WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used"); } EXPORT_SYMBOL_GPL(tracing_snapshot); int tracing_alloc_snapshot(void) { WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used"); return -ENODEV; } EXPORT_SYMBOL_GPL(tracing_alloc_snapshot); void tracing_snapshot_alloc(void) { /* Give warning */ tracing_snapshot(); } EXPORT_SYMBOL_GPL(tracing_snapshot_alloc); #endif /* CONFIG_TRACER_SNAPSHOT */ void tracer_tracing_off(struct trace_array *tr) { if (tr->trace_buffer.buffer) ring_buffer_record_off(tr->trace_buffer.buffer); /* * This flag is looked at when buffers haven't been allocated * yet, or by some tracers (like irqsoff), that just want to * know if the ring buffer has been disabled, but it can handle * races of where it gets disabled but we still do a record. * As the check is in the fast path of the tracers, it is more * important to be fast than accurate. */ tr->buffer_disabled = 1; /* Make the flag seen by readers */ smp_wmb(); } /** * tracing_off - turn off tracing buffers * * This function stops the tracing buffers from recording data. * It does not disable any overhead the tracers themselves may * be causing. This function simply causes all recording to * the ring buffers to fail. */ void tracing_off(void) { tracer_tracing_off(&global_trace); } EXPORT_SYMBOL_GPL(tracing_off); void disable_trace_on_warning(void) { if (__disable_trace_on_warning) tracing_off(); } /** * tracer_tracing_is_on - show real state of ring buffer enabled * @tr : the trace array to know if ring buffer is enabled * * Shows real state of the ring buffer if it is enabled or not. */ bool tracer_tracing_is_on(struct trace_array *tr) { if (tr->trace_buffer.buffer) return ring_buffer_record_is_on(tr->trace_buffer.buffer); return !tr->buffer_disabled; } /** * tracing_is_on - show state of ring buffers enabled */ int tracing_is_on(void) { return tracer_tracing_is_on(&global_trace); } EXPORT_SYMBOL_GPL(tracing_is_on); static int __init set_buf_size(char *str) { unsigned long buf_size; if (!str) return 0; buf_size = memparse(str, &str); /* nr_entries can not be zero */ if (buf_size == 0) return 0; trace_buf_size = buf_size; return 1; } __setup("trace_buf_size=", set_buf_size); static int __init set_tracing_thresh(char *str) { unsigned long threshold; int ret; if (!str) return 0; ret = kstrtoul(str, 0, &threshold); if (ret < 0) return 0; tracing_thresh = threshold * 1000; return 1; } __setup("tracing_thresh=", set_tracing_thresh); unsigned long nsecs_to_usecs(unsigned long nsecs) { return nsecs / 1000; } /* * TRACE_FLAGS is defined as a tuple matching bit masks with strings. * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list * of strings in the order that the evals (enum) were defined. */ #undef C #define C(a, b) b /* These must match the bit postions in trace_iterator_flags */ static const char *trace_options[] = { TRACE_FLAGS NULL }; static struct { u64 (*func)(void); const char *name; int in_ns; /* is this clock in nanoseconds? */ } trace_clocks[] = { { trace_clock_local, "local", 1 }, { trace_clock_global, "global", 1 }, { trace_clock_counter, "counter", 0 }, { trace_clock_jiffies, "uptime", 0 }, { trace_clock, "perf", 1 }, { ktime_get_mono_fast_ns, "mono", 1 }, { ktime_get_raw_fast_ns, "mono_raw", 1 }, { ktime_get_boot_fast_ns, "boot", 1 }, ARCH_TRACE_CLOCKS }; bool trace_clock_in_ns(struct trace_array *tr) { if (trace_clocks[tr->clock_id].in_ns) return true; return false; } /* * trace_parser_get_init - gets the buffer for trace parser */ int trace_parser_get_init(struct trace_parser *parser, int size) { memset(parser, 0, sizeof(*parser)); parser->buffer = kmalloc(size, GFP_KERNEL); if (!parser->buffer) return 1; parser->size = size; return 0; } /* * trace_parser_put - frees the buffer for trace parser */ void trace_parser_put(struct trace_parser *parser) { kfree(parser->buffer); parser->buffer = NULL; } /* * trace_get_user - reads the user input string separated by space * (matched by isspace(ch)) * * For each string found the 'struct trace_parser' is updated, * and the function returns. * * Returns number of bytes read. * * See kernel/trace/trace.h for 'struct trace_parser' details. */ int trace_get_user(struct trace_parser *parser, const char __user *ubuf, size_t cnt, loff_t *ppos) { char ch; size_t read = 0; ssize_t ret; if (!*ppos) trace_parser_clear(parser); ret = get_user(ch, ubuf++); if (ret) goto out; read++; cnt--; /* * The parser is not finished with the last write, * continue reading the user input without skipping spaces. */ if (!parser->cont) { /* skip white space */ while (cnt && isspace(ch)) { ret = get_user(ch, ubuf++); if (ret) goto out; read++; cnt--; } parser->idx = 0; /* only spaces were written */ if (isspace(ch) || !ch) { *ppos += read; ret = read; goto out; } } /* read the non-space input */ while (cnt && !isspace(ch) && ch) { if (parser->idx < parser->size - 1) parser->buffer[parser->idx++] = ch; else { ret = -EINVAL; goto out; } ret = get_user(ch, ubuf++); if (ret) goto out; read++; cnt--; } /* We either got finished input or we have to wait for another call. */ if (isspace(ch) || !ch) { parser->buffer[parser->idx] = 0; parser->cont = false; } else if (parser->idx < parser->size - 1) { parser->cont = true; parser->buffer[parser->idx++] = ch; /* Make sure the parsed string always terminates with '\0'. */ parser->buffer[parser->idx] = 0; } else { ret = -EINVAL; goto out; } *ppos += read; ret = read; out: return ret; } /* TODO add a seq_buf_to_buffer() */ static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt) { int len; if (trace_seq_used(s) <= s->seq.readpos) return -EBUSY; len = trace_seq_used(s) - s->seq.readpos; if (cnt > len) cnt = len; memcpy(buf, s->buffer + s->seq.readpos, cnt); s->seq.readpos += cnt; return cnt; } unsigned long __read_mostly tracing_thresh; #ifdef CONFIG_TRACER_MAX_TRACE /* * Copy the new maximum trace into the separate maximum-trace * structure. (this way the maximum trace is permanently saved, * for later retrieval via /sys/kernel/tracing/tracing_max_latency) */ static void __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) { struct trace_buffer *trace_buf = &tr->trace_buffer; struct trace_buffer *max_buf = &tr->max_buffer; struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu); struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu); max_buf->cpu = cpu; max_buf->time_start = data->preempt_timestamp; max_data->saved_latency = tr->max_latency; max_data->critical_start = data->critical_start; max_data->critical_end = data->critical_end; memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN); max_data->pid = tsk->pid; /* * If tsk == current, then use current_uid(), as that does not use * RCU. The irq tracer can be called out of RCU scope. */ if (tsk == current) max_data->uid = current_uid(); else max_data->uid = task_uid(tsk); max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO; max_data->policy = tsk->policy; max_data->rt_priority = tsk->rt_priority; /* record this tasks comm */ tracing_record_cmdline(tsk); } /** * update_max_tr - snapshot all trace buffers from global_trace to max_tr * @tr: tracer * @tsk: the task with the latency * @cpu: The cpu that initiated the trace. * * Flip the buffers between the @tr and the max_tr and record information * about which task was the cause of this latency. */ void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) { if (tr->stop_count) return; WARN_ON_ONCE(!irqs_disabled()); if (!tr->allocated_snapshot) { /* Only the nop tracer should hit this when disabling */ WARN_ON_ONCE(tr->current_trace != &nop_trace); return; } arch_spin_lock(&tr->max_lock); /* Inherit the recordable setting from trace_buffer */ if (ring_buffer_record_is_set_on(tr->trace_buffer.buffer)) ring_buffer_record_on(tr->max_buffer.buffer); else ring_buffer_record_off(tr->max_buffer.buffer); swap(tr->trace_buffer.buffer, tr->max_buffer.buffer); __update_max_tr(tr, tsk, cpu); arch_spin_unlock(&tr->max_lock); } /** * update_max_tr_single - only copy one trace over, and reset the rest * @tr - tracer * @tsk - task with the latency * @cpu - the cpu of the buffer to copy. * * Flip the trace of a single CPU buffer between the @tr and the max_tr. */ void update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) { int ret; if (tr->stop_count) return; WARN_ON_ONCE(!irqs_disabled()); if (!tr->allocated_snapshot) { /* Only the nop tracer should hit this when disabling */ WARN_ON_ONCE(tr->current_trace != &nop_trace); return; } arch_spin_lock(&tr->max_lock); ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu); if (ret == -EBUSY) { /* * We failed to swap the buffer due to a commit taking * place on this CPU. We fail to record, but we reset * the max trace buffer (no one writes directly to it) * and flag that it failed. */ trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_, "Failed to swap buffers due to commit in progress\n"); } WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY); __update_max_tr(tr, tsk, cpu); arch_spin_unlock(&tr->max_lock); } #endif /* CONFIG_TRACER_MAX_TRACE */ static int wait_on_pipe(struct trace_iterator *iter, int full) { /* Iterators are static, they should be filled or empty */ if (trace_buffer_iter(iter, iter->cpu_file)) return 0; return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file, full); } #ifdef CONFIG_FTRACE_STARTUP_TEST static bool selftests_can_run; struct trace_selftests { struct list_head list; struct tracer *type; }; static LIST_HEAD(postponed_selftests); static int save_selftest(struct tracer *type) { struct trace_selftests *selftest; selftest = kmalloc(sizeof(*selftest), GFP_KERNEL); if (!selftest) return -ENOMEM; selftest->type = type; list_add(&selftest->list, &postponed_selftests); return 0; } static int run_tracer_selftest(struct tracer *type) { struct trace_array *tr = &global_trace; struct tracer *saved_tracer = tr->current_trace; int ret; if (!type->selftest || tracing_selftest_disabled) return 0; /* * If a tracer registers early in boot up (before scheduling is * initialized and such), then do not run its selftests yet. * Instead, run it a little later in the boot process. */ if (!selftests_can_run) return save_selftest(type); /* * Run a selftest on this tracer. * Here we reset the trace buffer, and set the current * tracer to be this tracer. The tracer can then run some * internal tracing to verify that everything is in order. * If we fail, we do not register this tracer. */ tracing_reset_online_cpus(&tr->trace_buffer); tr->current_trace = type; #ifdef CONFIG_TRACER_MAX_TRACE if (type->use_max_tr) { /* If we expanded the buffers, make sure the max is expanded too */ if (ring_buffer_expanded) ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size, RING_BUFFER_ALL_CPUS); tr->allocated_snapshot = true; } #endif /* the test is responsible for initializing and enabling */ pr_info("Testing tracer %s: ", type->name); ret = type->selftest(type, tr); /* the test is responsible for resetting too */ tr->current_trace = saved_tracer; if (ret) { printk(KERN_CONT "FAILED!\n"); /* Add the warning after printing 'FAILED' */ WARN_ON(1); return -1; } /* Only reset on passing, to avoid touching corrupted buffers */ tracing_reset_online_cpus(&tr->trace_buffer); #ifdef CONFIG_TRACER_MAX_TRACE if (type->use_max_tr) { tr->allocated_snapshot = false; /* Shrink the max buffer again */ if (ring_buffer_expanded) ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS); } #endif printk(KERN_CONT "PASSED\n"); return 0; } static __init int init_trace_selftests(void) { struct trace_selftests *p, *n; struct tracer *t, **last; int ret; selftests_can_run = true; mutex_lock(&trace_types_lock); if (list_empty(&postponed_selftests)) goto out; pr_info("Running postponed tracer tests:\n"); list_for_each_entry_safe(p, n, &postponed_selftests, list) { ret = run_tracer_selftest(p->type); /* If the test fails, then warn and remove from available_tracers */ if (ret < 0) { WARN(1, "tracer: %s failed selftest, disabling\n", p->type->name); last = &trace_types; for (t = trace_types; t; t = t->next) { if (t == p->type) { *last = t->next; break; } last = &t->next; } } list_del(&p->list); kfree(p); } out: mutex_unlock(&trace_types_lock); return 0; } core_initcall(init_trace_selftests); #else static inline int run_tracer_selftest(struct tracer *type) { return 0; } #endif /* CONFIG_FTRACE_STARTUP_TEST */ static void add_tracer_options(struct trace_array *tr, struct tracer *t); static void __init apply_trace_boot_options(void); /** * register_tracer - register a tracer with the ftrace system. * @type - the plugin for the tracer * * Register a new plugin tracer. */ int __init register_tracer(struct tracer *type) { struct tracer *t; int ret = 0; if (!type->name) { pr_info("Tracer must have a name\n"); return -1; } if (strlen(type->name) >= MAX_TRACER_SIZE) { pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE); return -1; } mutex_lock(&trace_types_lock); tracing_selftest_running = true; for (t = trace_types; t; t = t->next) { if (strcmp(type->name, t->name) == 0) { /* already found */ pr_info("Tracer %s already registered\n", type->name); ret = -1; goto out; } } if (!type->set_flag) type->set_flag = &dummy_set_flag; if (!type->flags) { /*allocate a dummy tracer_flags*/ type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL); if (!type->flags) { ret = -ENOMEM; goto out; } type->flags->val = 0; type->flags->opts = dummy_tracer_opt; } else if (!type->flags->opts) type->flags->opts = dummy_tracer_opt; /* store the tracer for __set_tracer_option */ type->flags->trace = type; ret = run_tracer_selftest(type); if (ret < 0) goto out; type->next = trace_types; trace_types = type; add_tracer_options(&global_trace, type); out: tracing_selftest_running = false; mutex_unlock(&trace_types_lock); if (ret || !default_bootup_tracer) goto out_unlock; if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE)) goto out_unlock; printk(KERN_INFO "Starting tracer '%s'\n", type->name); /* Do we want this tracer to start on bootup? */ tracing_set_tracer(&global_trace, type->name); default_bootup_tracer = NULL; apply_trace_boot_options(); /* disable other selftests, since this will break it. */ tracing_selftest_disabled = true; #ifdef CONFIG_FTRACE_STARTUP_TEST printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n", type->name); #endif out_unlock: return ret; } void tracing_reset(struct trace_buffer *buf, int cpu) { struct ring_buffer *buffer = buf->buffer; if (!buffer) return; ring_buffer_record_disable(buffer); /* Make sure all commits have finished */ synchronize_rcu(); ring_buffer_reset_cpu(buffer, cpu); ring_buffer_record_enable(buffer); } void tracing_reset_online_cpus(struct trace_buffer *buf) { struct ring_buffer *buffer = buf->buffer; int cpu; if (!buffer) return; ring_buffer_record_disable(buffer); /* Make sure all commits have finished */ synchronize_rcu(); buf->time_start = buffer_ftrace_now(buf, buf->cpu); for_each_online_cpu(cpu) ring_buffer_reset_cpu(buffer, cpu); ring_buffer_record_enable(buffer); } /* Must have trace_types_lock held */ void tracing_reset_all_online_cpus(void) { struct trace_array *tr; list_for_each_entry(tr, &ftrace_trace_arrays, list) { if (!tr->clear_trace) continue; tr->clear_trace = false; tracing_reset_online_cpus(&tr->trace_buffer); #ifdef CONFIG_TRACER_MAX_TRACE tracing_reset_online_cpus(&tr->max_buffer); #endif } } static int *tgid_map; #define SAVED_CMDLINES_DEFAULT 128 #define NO_CMDLINE_MAP UINT_MAX static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED; struct saved_cmdlines_buffer { unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1]; unsigned *map_cmdline_to_pid; unsigned cmdline_num; int cmdline_idx; char *saved_cmdlines; }; static struct saved_cmdlines_buffer *savedcmd; /* temporary disable recording */ static atomic_t trace_record_taskinfo_disabled __read_mostly; static inline char *get_saved_cmdlines(int idx) { return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN]; } static inline void set_cmdline(int idx, const char *cmdline) { memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN); } static int allocate_cmdlines_buffer(unsigned int val, struct saved_cmdlines_buffer *s) { s->map_cmdline_to_pid = kmalloc_array(val, sizeof(*s->map_cmdline_to_pid), GFP_KERNEL); if (!s->map_cmdline_to_pid) return -ENOMEM; s->saved_cmdlines = kmalloc_array(TASK_COMM_LEN, val, GFP_KERNEL); if (!s->saved_cmdlines) { kfree(s->map_cmdline_to_pid); return -ENOMEM; } s->cmdline_idx = 0; s->cmdline_num = val; memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP, sizeof(s->map_pid_to_cmdline)); memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP, val * sizeof(*s->map_cmdline_to_pid)); return 0; } static int trace_create_savedcmd(void) { int ret; savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL); if (!savedcmd) return -ENOMEM; ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd); if (ret < 0) { kfree(savedcmd); savedcmd = NULL; return -ENOMEM; } return 0; } int is_tracing_stopped(void) { return global_trace.stop_count; } /** * tracing_start - quick start of the tracer * * If tracing is enabled but was stopped by tracing_stop, * this will start the tracer back up. */ void tracing_start(void) { struct ring_buffer *buffer; unsigned long flags; if (tracing_disabled) return; raw_spin_lock_irqsave(&global_trace.start_lock, flags); if (--global_trace.stop_count) { if (global_trace.stop_count < 0) { /* Someone screwed up their debugging */ WARN_ON_ONCE(1); global_trace.stop_count = 0; } goto out; } /* Prevent the buffers from switching */ arch_spin_lock(&global_trace.max_lock); buffer = global_trace.trace_buffer.buffer; if (buffer) ring_buffer_record_enable(buffer); #ifdef CONFIG_TRACER_MAX_TRACE buffer = global_trace.max_buffer.buffer; if (buffer) ring_buffer_record_enable(buffer); #endif arch_spin_unlock(&global_trace.max_lock); out: raw_spin_unlock_irqrestore(&global_trace.start_lock, flags); } static void tracing_start_tr(struct trace_array *tr) { struct ring_buffer *buffer; unsigned long flags; if (tracing_disabled) return; /* If global, we need to also start the max tracer */ if (tr->flags & TRACE_ARRAY_FL_GLOBAL) return tracing_start(); raw_spin_lock_irqsave(&tr->start_lock, flags); if (--tr->stop_count) { if (tr->stop_count < 0) { /* Someone screwed up their debugging */ WARN_ON_ONCE(1); tr->stop_count = 0; } goto out; } buffer = tr->trace_buffer.buffer; if (buffer) ring_buffer_record_enable(buffer); out: raw_spin_unlock_irqrestore(&tr->start_lock, flags); } /** * tracing_stop - quick stop of the tracer * * Light weight way to stop tracing. Use in conjunction with * tracing_start. */ void tracing_stop(void) { struct ring_buffer *buffer; unsigned long flags; raw_spin_lock_irqsave(&global_trace.start_lock, flags); if (global_trace.stop_count++) goto out; /* Prevent the buffers from switching */ arch_spin_lock(&global_trace.max_lock); buffer = global_trace.trace_buffer.buffer; if (buffer) ring_buffer_record_disable(buffer); #ifdef CONFIG_TRACER_MAX_TRACE buffer = global_trace.max_buffer.buffer; if (buffer) ring_buffer_record_disable(buffer); #endif arch_spin_unlock(&global_trace.max_lock); out: raw_spin_unlock_irqrestore(&global_trace.start_lock, flags); } static void tracing_stop_tr(struct trace_array *tr) { struct ring_buffer *buffer; unsigned long flags; /* If global, we need to also stop the max tracer */ if (tr->flags & TRACE_ARRAY_FL_GLOBAL) return tracing_stop(); raw_spin_lock_irqsave(&tr->start_lock, flags); if (tr->stop_count++) goto out; buffer = tr->trace_buffer.buffer; if (buffer) ring_buffer_record_disable(buffer); out: raw_spin_unlock_irqrestore(&tr->start_lock, flags); } static int trace_save_cmdline(struct task_struct *tsk) { unsigned pid, idx; /* treat recording of idle task as a success */ if (!tsk->pid) return 1; if (unlikely(tsk->pid > PID_MAX_DEFAULT)) return 0; /* * It's not the end of the world if we don't get * the lock, but we also don't want to spin * nor do we want to disable interrupts, * so if we miss here, then better luck next time. */ if (!arch_spin_trylock(&trace_cmdline_lock)) return 0; idx = savedcmd->map_pid_to_cmdline[tsk->pid]; if (idx == NO_CMDLINE_MAP) { idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num; /* * Check whether the cmdline buffer at idx has a pid * mapped. We are going to overwrite that entry so we * need to clear the map_pid_to_cmdline. Otherwise we * would read the new comm for the old pid. */ pid = savedcmd->map_cmdline_to_pid[idx]; if (pid != NO_CMDLINE_MAP) savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP; savedcmd->map_cmdline_to_pid[idx] = tsk->pid; savedcmd->map_pid_to_cmdline[tsk->pid] = idx; savedcmd->cmdline_idx = idx; } set_cmdline(idx, tsk->comm); arch_spin_unlock(&trace_cmdline_lock); return 1; } static void __trace_find_cmdline(int pid, char comm[]) { unsigned map; if (!pid) { strcpy(comm, "<idle>"); return; } if (WARN_ON_ONCE(pid < 0)) { strcpy(comm, "<XXX>"); return; } if (pid > PID_MAX_DEFAULT) { strcpy(comm, "<...>"); return; } map = savedcmd->map_pid_to_cmdline[pid]; if (map != NO_CMDLINE_MAP) strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN); else strcpy(comm, "<...>"); } void trace_find_cmdline(int pid, char comm[]) { preempt_disable(); arch_spin_lock(&trace_cmdline_lock); __trace_find_cmdline(pid, comm); arch_spin_unlock(&trace_cmdline_lock); preempt_enable(); } int trace_find_tgid(int pid) { if (unlikely(!tgid_map || !pid || pid > PID_MAX_DEFAULT)) return 0; return tgid_map[pid]; } static int trace_save_tgid(struct task_struct *tsk) { /* treat recording of idle task as a success */ if (!tsk->pid) return 1; if (unlikely(!tgid_map || tsk->pid > PID_MAX_DEFAULT)) return 0; tgid_map[tsk->pid] = tsk->tgid; return 1; } static bool tracing_record_taskinfo_skip(int flags) { if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID)))) return true; if (atomic_read(&trace_record_taskinfo_disabled) || !tracing_is_on()) return true; if (!__this_cpu_read(trace_taskinfo_save)) return true; return false; } /** * tracing_record_taskinfo - record the task info of a task * * @task - task to record * @flags - TRACE_RECORD_CMDLINE for recording comm * - TRACE_RECORD_TGID for recording tgid */ void tracing_record_taskinfo(struct task_struct *task, int flags) { bool done; if (tracing_record_taskinfo_skip(flags)) return; /* * Record as much task information as possible. If some fail, continue * to try to record the others. */ done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task); done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task); /* If recording any information failed, retry again soon. */ if (!done) return; __this_cpu_write(trace_taskinfo_save, false); } /** * tracing_record_taskinfo_sched_switch - record task info for sched_switch * * @prev - previous task during sched_switch * @next - next task during sched_switch * @flags - TRACE_RECORD_CMDLINE for recording comm * TRACE_RECORD_TGID for recording tgid */ void tracing_record_taskinfo_sched_switch(struct task_struct *prev, struct task_struct *next, int flags) { bool done; if (tracing_record_taskinfo_skip(flags)) return; /* * Record as much task information as possible. If some fail, continue * to try to record the others. */ done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(prev); done &= !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(next); done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(prev); done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(next); /* If recording any information failed, retry again soon. */ if (!done) return; __this_cpu_write(trace_taskinfo_save, false); } /* Helpers to record a specific task information */ void tracing_record_cmdline(struct task_struct *task) { tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE); } void tracing_record_tgid(struct task_struct *task) { tracing_record_taskinfo(task, TRACE_RECORD_TGID); } /* * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function * simplifies those functions and keeps them in sync. */ enum print_line_t trace_handle_return(struct trace_seq *s) { return trace_seq_has_overflowed(s) ? TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED; } EXPORT_SYMBOL_GPL(trace_handle_return); void tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, int pc) { struct task_struct *tsk = current; entry->preempt_count = pc & 0xff; entry->pid = (tsk) ? tsk->pid : 0; entry->flags = #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | #else TRACE_FLAG_IRQS_NOSUPPORT | #endif ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) | ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) | ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) | (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) | (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0); } EXPORT_SYMBOL_GPL(tracing_generic_entry_update); struct ring_buffer_event * trace_buffer_lock_reserve(struct ring_buffer *buffer, int type, unsigned long len, unsigned long flags, int pc) { return __trace_buffer_lock_reserve(buffer, type, len, flags, pc); } DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event); DEFINE_PER_CPU(int, trace_buffered_event_cnt); static int trace_buffered_event_ref; /** * trace_buffered_event_enable - enable buffering events * * When events are being filtered, it is quicker to use a temporary * buffer to write the event data into if there's a likely chance * that it will not be committed. The discard of the ring buffer * is not as fast as committing, and is much slower than copying * a commit. * * When an event is to be filtered, allocate per cpu buffers to * write the event data into, and if the event is filtered and discarded * it is simply dropped, otherwise, the entire data is to be committed * in one shot. */ void trace_buffered_event_enable(void) { struct ring_buffer_event *event; struct page *page; int cpu; WARN_ON_ONCE(!mutex_is_locked(&event_mutex)); if (trace_buffered_event_ref++) return; for_each_tracing_cpu(cpu) { page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL | __GFP_NORETRY, 0); if (!page) goto failed; event = page_address(page); memset(event, 0, sizeof(*event)); per_cpu(trace_buffered_event, cpu) = event; preempt_disable(); if (cpu == smp_processor_id() && this_cpu_read(trace_buffered_event) != per_cpu(trace_buffered_event, cpu)) WARN_ON_ONCE(1); preempt_enable(); } return; failed: trace_buffered_event_disable(); } static void enable_trace_buffered_event(void *data) { /* Probably not needed, but do it anyway */ smp_rmb(); this_cpu_dec(trace_buffered_event_cnt); } static void disable_trace_buffered_event(void *data) { this_cpu_inc(trace_buffered_event_cnt); } /** * trace_buffered_event_disable - disable buffering events * * When a filter is removed, it is faster to not use the buffered * events, and to commit directly into the ring buffer. Free up * the temp buffers when there are no more users. This requires * special synchronization with current events. */ void trace_buffered_event_disable(void) { int cpu; WARN_ON_ONCE(!mutex_is_locked(&event_mutex)); if (WARN_ON_ONCE(!trace_buffered_event_ref)) return; if (--trace_buffered_event_ref) return; preempt_disable(); /* For each CPU, set the buffer as used. */ smp_call_function_many(tracing_buffer_mask, disable_trace_buffered_event, NULL, 1); preempt_enable(); /* Wait for all current users to finish */ synchronize_rcu(); for_each_tracing_cpu(cpu) { free_page((unsigned long)per_cpu(trace_buffered_event, cpu)); per_cpu(trace_buffered_event, cpu) = NULL; } /* * Make sure trace_buffered_event is NULL before clearing * trace_buffered_event_cnt. */ smp_wmb(); preempt_disable(); /* Do the work on each cpu */ smp_call_function_many(tracing_buffer_mask, enable_trace_buffered_event, NULL, 1); preempt_enable(); } static struct ring_buffer *temp_buffer; struct ring_buffer_event * trace_event_buffer_lock_reserve(struct ring_buffer **current_rb, struct trace_event_file *trace_file, int type, unsigned long len, unsigned long flags, int pc) { struct ring_buffer_event *entry; int val; *current_rb = trace_file->tr->trace_buffer.buffer; if (!ring_buffer_time_stamp_abs(*current_rb) && (trace_file->flags & (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) && (entry = this_cpu_read(trace_buffered_event))) { /* Try to use the per cpu buffer first */ val = this_cpu_inc_return(trace_buffered_event_cnt); if (val == 1) { trace_event_setup(entry, type, flags, pc); entry->array[0] = len; return entry; } this_cpu_dec(trace_buffered_event_cnt); } entry = __trace_buffer_lock_reserve(*current_rb, type, len, flags, pc); /* * If tracing is off, but we have triggers enabled * we still need to look at the event data. Use the temp_buffer * to store the trace event for the tigger to use. It's recusive * safe and will not be recorded anywhere. */ if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) { *current_rb = temp_buffer; entry = __trace_buffer_lock_reserve(*current_rb, type, len, flags, pc); } return entry; } EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve); static DEFINE_SPINLOCK(tracepoint_iter_lock); static DEFINE_MUTEX(tracepoint_printk_mutex); static void output_printk(struct trace_event_buffer *fbuffer) { struct trace_event_call *event_call; struct trace_event *event; unsigned long flags; struct trace_iterator *iter = tracepoint_print_iter; /* We should never get here if iter is NULL */ if (WARN_ON_ONCE(!iter)) return; event_call = fbuffer->trace_file->event_call; if (!event_call || !event_call->event.funcs || !event_call->event.funcs->trace) return; event = &fbuffer->trace_file->event_call->event; spin_lock_irqsave(&tracepoint_iter_lock, flags); trace_seq_init(&iter->seq); iter->ent = fbuffer->entry; event_call->event.funcs->trace(iter, 0, event); trace_seq_putc(&iter->seq, 0); printk("%s", iter->seq.buffer); spin_unlock_irqrestore(&tracepoint_iter_lock, flags); } int tracepoint_printk_sysctl(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { int save_tracepoint_printk; int ret; mutex_lock(&tracepoint_printk_mutex); save_tracepoint_printk = tracepoint_printk; ret = proc_dointvec(table, write, buffer, lenp, ppos); /* * This will force exiting early, as tracepoint_printk * is always zero when tracepoint_printk_iter is not allocated */ if (!tracepoint_print_iter) tracepoint_printk = 0; if (save_tracepoint_printk == tracepoint_printk) goto out; if (tracepoint_printk) static_key_enable(&tracepoint_printk_key.key); else static_key_disable(&tracepoint_printk_key.key); out: mutex_unlock(&tracepoint_printk_mutex); return ret; } void trace_event_buffer_commit(struct trace_event_buffer *fbuffer) { if (static_key_false(&tracepoint_printk_key.key)) output_printk(fbuffer); event_trigger_unlock_commit(fbuffer->trace_file, fbuffer->buffer, fbuffer->event, fbuffer->entry, fbuffer->flags, fbuffer->pc); } EXPORT_SYMBOL_GPL(trace_event_buffer_commit); /* * Skip 3: * * trace_buffer_unlock_commit_regs() * trace_event_buffer_commit() * trace_event_raw_event_xxx() */ # define STACK_SKIP 3 void trace_buffer_unlock_commit_regs(struct trace_array *tr, struct ring_buffer *buffer, struct ring_buffer_event *event, unsigned long flags, int pc, struct pt_regs *regs) { __buffer_unlock_commit(buffer, event); /* * If regs is not set, then skip the necessary functions. * Note, we can still get here via blktrace, wakeup tracer * and mmiotrace, but that's ok if they lose a function or * two. They are not that meaningful. */ ftrace_trace_stack(tr, buffer, flags, regs ? 0 : STACK_SKIP, pc, regs); ftrace_trace_userstack(buffer, flags, pc); } /* * Similar to trace_buffer_unlock_commit_regs() but do not dump stack. */ void trace_buffer_unlock_commit_nostack(struct ring_buffer *buffer, struct ring_buffer_event *event) { __buffer_unlock_commit(buffer, event); } static void trace_process_export(struct trace_export *export, struct ring_buffer_event *event) { struct trace_entry *entry; unsigned int size = 0; entry = ring_buffer_event_data(event); size = ring_buffer_event_length(event); export->write(export, entry, size); } static DEFINE_MUTEX(ftrace_export_lock); static struct trace_export __rcu *ftrace_exports_list __read_mostly; static DEFINE_STATIC_KEY_FALSE(ftrace_exports_enabled); static inline void ftrace_exports_enable(void) { static_branch_enable(&ftrace_exports_enabled); } static inline void ftrace_exports_disable(void) { static_branch_disable(&ftrace_exports_enabled); } static void ftrace_exports(struct ring_buffer_event *event) { struct trace_export *export; preempt_disable_notrace(); export = rcu_dereference_raw_notrace(ftrace_exports_list); while (export) { trace_process_export(export, event); export = rcu_dereference_raw_notrace(export->next); } preempt_enable_notrace(); } static inline void add_trace_export(struct trace_export **list, struct trace_export *export) { rcu_assign_pointer(export->next, *list); /* * We are entering export into the list but another * CPU might be walking that list. We need to make sure * the export->next pointer is valid before another CPU sees * the export pointer included into the list. */ rcu_assign_pointer(*list, export); } static inline int rm_trace_export(struct trace_export **list, struct trace_export *export) { struct trace_export **p; for (p = list; *p != NULL; p = &(*p)->next) if (*p == export) break; if (*p != export) return -1; rcu_assign_pointer(*p, (*p)->next); return 0; } static inline void add_ftrace_export(struct trace_export **list, struct trace_export *export) { if (*list == NULL) ftrace_exports_enable(); add_trace_export(list, export); } static inline int rm_ftrace_export(struct trace_export **list, struct trace_export *export) { int ret; ret = rm_trace_export(list, export); if (*list == NULL) ftrace_exports_disable(); return ret; } int register_ftrace_export(struct trace_export *export) { if (WARN_ON_ONCE(!export->write)) return -1; mutex_lock(&ftrace_export_lock); add_ftrace_export(&ftrace_exports_list, export); mutex_unlock(&ftrace_export_lock); return 0; } EXPORT_SYMBOL_GPL(register_ftrace_export); int unregister_ftrace_export(struct trace_export *export) { int ret; mutex_lock(&ftrace_export_lock); ret = rm_ftrace_export(&ftrace_exports_list, export); mutex_unlock(&ftrace_export_lock); return ret; } EXPORT_SYMBOL_GPL(unregister_ftrace_export); void trace_function(struct trace_array *tr, unsigned long ip, unsigned long parent_ip, unsigned long flags, int pc) { struct trace_event_call *call = &event_function; struct ring_buffer *buffer = tr->trace_buffer.buffer; struct ring_buffer_event *event; struct ftrace_entry *entry; event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry), flags, pc); if (!event) return; entry = ring_buffer_event_data(event); entry->ip = ip; entry->parent_ip = parent_ip; if (!call_filter_check_discard(call, entry, buffer, event)) { if (static_branch_unlikely(&ftrace_exports_enabled)) ftrace_exports(event); __buffer_unlock_commit(buffer, event); } } #ifdef CONFIG_STACKTRACE #define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long)) struct ftrace_stack { unsigned long calls[FTRACE_STACK_MAX_ENTRIES]; }; static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack); static DEFINE_PER_CPU(int, ftrace_stack_reserve); static void __ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags, int skip, int pc, struct pt_regs *regs) { struct trace_event_call *call = &event_kernel_stack; struct ring_buffer_event *event; struct stack_entry *entry; struct stack_trace trace; int use_stack; int size = FTRACE_STACK_ENTRIES; trace.nr_entries = 0; trace.skip = skip; /* * Add one, for this function and the call to save_stack_trace() * If regs is set, then these functions will not be in the way. */ #ifndef CONFIG_UNWINDER_ORC if (!regs) trace.skip++; #endif /* * Since events can happen in NMIs there's no safe way to * use the per cpu ftrace_stacks. We reserve it and if an interrupt * or NMI comes in, it will just have to use the default * FTRACE_STACK_SIZE. */ preempt_disable_notrace(); use_stack = __this_cpu_inc_return(ftrace_stack_reserve); /* * We don't need any atomic variables, just a barrier. * If an interrupt comes in, we don't care, because it would * have exited and put the counter back to what we want. * We just need a barrier to keep gcc from moving things * around. */ barrier(); if (use_stack == 1) { trace.entries = this_cpu_ptr(ftrace_stack.calls); trace.max_entries = FTRACE_STACK_MAX_ENTRIES; if (regs) save_stack_trace_regs(regs, &trace); else save_stack_trace(&trace); if (trace.nr_entries > size) size = trace.nr_entries; } else /* From now on, use_stack is a boolean */ use_stack = 0; size *= sizeof(unsigned long); event = __trace_buffer_lock_reserve(buffer, TRACE_STACK, sizeof(*entry) + size, flags, pc); if (!event) goto out; entry = ring_buffer_event_data(event); memset(&entry->caller, 0, size); if (use_stack) memcpy(&entry->caller, trace.entries, trace.nr_entries * sizeof(unsigned long)); else { trace.max_entries = FTRACE_STACK_ENTRIES; trace.entries = entry->caller; if (regs) save_stack_trace_regs(regs, &trace); else save_stack_trace(&trace); } entry->size = trace.nr_entries; if (!call_filter_check_discard(call, entry, buffer, event)) __buffer_unlock_commit(buffer, event); out: /* Again, don't let gcc optimize things here */ barrier(); __this_cpu_dec(ftrace_stack_reserve); preempt_enable_notrace(); } static inline void ftrace_trace_stack(struct trace_array *tr, struct ring_buffer *buffer, unsigned long flags, int skip, int pc, struct pt_regs *regs) { if (!(tr->trace_flags & TRACE_ITER_STACKTRACE)) return; __ftrace_trace_stack(buffer, flags, skip, pc, regs); } void __trace_stack(struct trace_array *tr, unsigned long flags, int skip, int pc) { struct ring_buffer *buffer = tr->trace_buffer.buffer; if (rcu_is_watching()) { __ftrace_trace_stack(buffer, flags, skip, pc, NULL); return; } /* * When an NMI triggers, RCU is enabled via rcu_nmi_enter(), * but if the above rcu_is_watching() failed, then the NMI * triggered someplace critical, and rcu_irq_enter() should * not be called from NMI. */ if (unlikely(in_nmi())) return; rcu_irq_enter_irqson(); __ftrace_trace_stack(buffer, flags, skip, pc, NULL); rcu_irq_exit_irqson(); } /** * trace_dump_stack - record a stack back trace in the trace buffer * @skip: Number of functions to skip (helper handlers) */ void trace_dump_stack(int skip) { unsigned long flags; if (tracing_disabled || tracing_selftest_running) return; local_save_flags(flags); #ifndef CONFIG_UNWINDER_ORC /* Skip 1 to skip this function. */ skip++; #endif __ftrace_trace_stack(global_trace.trace_buffer.buffer, flags, skip, preempt_count(), NULL); } EXPORT_SYMBOL_GPL(trace_dump_stack); static DEFINE_PER_CPU(int, user_stack_count); void ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) { struct trace_event_call *call = &event_user_stack; struct ring_buffer_event *event; struct userstack_entry *entry; struct stack_trace trace; if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE)) return; /* * NMIs can not handle page faults, even with fix ups. * The save user stack can (and often does) fault. */ if (unlikely(in_nmi())) return; /* * prevent recursion, since the user stack tracing may * trigger other kernel events. */ preempt_disable(); if (__this_cpu_read(user_stack_count)) goto out; __this_cpu_inc(user_stack_count); event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK, sizeof(*entry), flags, pc); if (!event) goto out_drop_count; entry = ring_buffer_event_data(event); entry->tgid = current->tgid; memset(&entry->caller, 0, sizeof(entry->caller)); trace.nr_entries = 0; trace.max_entries = FTRACE_STACK_ENTRIES; trace.skip = 0; trace.entries = entry->caller; save_stack_trace_user(&trace); if (!call_filter_check_discard(call, entry, buffer, event)) __buffer_unlock_commit(buffer, event); out_drop_count: __this_cpu_dec(user_stack_count); out: preempt_enable(); } #ifdef UNUSED static void __trace_userstack(struct trace_array *tr, unsigned long flags) { ftrace_trace_userstack(tr, flags, preempt_count()); } #endif /* UNUSED */ #endif /* CONFIG_STACKTRACE */ /* created for use with alloc_percpu */ struct trace_buffer_struct { int nesting; char buffer[4][TRACE_BUF_SIZE]; }; static struct trace_buffer_struct *trace_percpu_buffer; /* * Thise allows for lockless recording. If we're nested too deeply, then * this returns NULL. */ static char *get_trace_buf(void) { struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer); if (!buffer || buffer->nesting >= 4) return NULL; buffer->nesting++; /* Interrupts must see nesting incremented before we use the buffer */ barrier(); return &buffer->buffer[buffer->nesting][0]; } static void put_trace_buf(void) { /* Don't let the decrement of nesting leak before this */ barrier(); this_cpu_dec(trace_percpu_buffer->nesting); } static int alloc_percpu_trace_buffer(void) { struct trace_buffer_struct *buffers; buffers = alloc_percpu(struct trace_buffer_struct); if (WARN(!buffers, "Could not allocate percpu trace_printk buffer")) return -ENOMEM; trace_percpu_buffer = buffers; return 0; } static int buffers_allocated; void trace_printk_init_buffers(void) { if (buffers_allocated) return; if (alloc_percpu_trace_buffer()) return; /* trace_printk() is for debug use only. Don't use it in production. */ pr_warn("\n"); pr_warn("**********************************************************\n"); pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n"); pr_warn("** **\n"); pr_warn("** trace_printk() being used. Allocating extra memory. **\n"); pr_warn("** **\n"); pr_warn("** This means that this is a DEBUG kernel and it is **\n"); pr_warn("** unsafe for production use. **\n"); pr_warn("** **\n"); pr_warn("** If you see this message and you are not debugging **\n"); pr_warn("** the kernel, report this immediately to your vendor! **\n"); pr_warn("** **\n"); pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n"); pr_warn("**********************************************************\n"); /* Expand the buffers to set size */ tracing_update_buffers(); buffers_allocated = 1; /* * trace_printk_init_buffers() can be called by modules. * If that happens, then we need to start cmdline recording * directly here. If the global_trace.buffer is already * allocated here, then this was called by module code. */ if (global_trace.trace_buffer.buffer) tracing_start_cmdline_record(); } void trace_printk_start_comm(void) { /* Start tracing comms if trace printk is set */ if (!buffers_allocated) return; tracing_start_cmdline_record(); } static void trace_printk_start_stop_comm(int enabled) { if (!buffers_allocated) return; if (enabled) tracing_start_cmdline_record(); else tracing_stop_cmdline_record(); } /** * trace_vbprintk - write binary msg to tracing buffer * */ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) { struct trace_event_call *call = &event_bprint; struct ring_buffer_event *event; struct ring_buffer *buffer; struct trace_array *tr = &global_trace; struct bprint_entry *entry; unsigned long flags; char *tbuffer; int len = 0, size, pc; if (unlikely(tracing_selftest_running || tracing_disabled)) return 0; /* Don't pollute graph traces with trace_vprintk internals */ pause_graph_tracing(); pc = preempt_count(); preempt_disable_notrace(); tbuffer = get_trace_buf(); if (!tbuffer) { len = 0; goto out_nobuffer; } len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args); if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0) goto out; local_save_flags(flags); size = sizeof(*entry) + sizeof(u32) * len; buffer = tr->trace_buffer.buffer; event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size, flags, pc); if (!event) goto out; entry = ring_buffer_event_data(event); entry->ip = ip; entry->fmt = fmt; memcpy(entry->buf, tbuffer, sizeof(u32) * len); if (!call_filter_check_discard(call, entry, buffer, event)) { __buffer_unlock_commit(buffer, event); ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL); } out: put_trace_buf(); out_nobuffer: preempt_enable_notrace(); unpause_graph_tracing(); return len; } EXPORT_SYMBOL_GPL(trace_vbprintk); __printf(3, 0) static int __trace_array_vprintk(struct ring_buffer *buffer, unsigned long ip, const char *fmt, va_list args) { struct trace_event_call *call = &event_print; struct ring_buffer_event *event; int len = 0, size, pc; struct print_entry *entry; unsigned long flags; char *tbuffer; if (tracing_disabled || tracing_selftest_running) return 0; /* Don't pollute graph traces with trace_vprintk internals */ pause_graph_tracing(); pc = preempt_count(); preempt_disable_notrace(); tbuffer = get_trace_buf(); if (!tbuffer) { len = 0; goto out_nobuffer; } len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args); local_save_flags(flags); size = sizeof(*entry) + len + 1; event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, flags, pc); if (!event) goto out; entry = ring_buffer_event_data(event); entry->ip = ip; memcpy(&entry->buf, tbuffer, len + 1); if (!call_filter_check_discard(call, entry, buffer, event)) { __buffer_unlock_commit(buffer, event); ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL); } out: put_trace_buf(); out_nobuffer: preempt_enable_notrace(); unpause_graph_tracing(); return len; } __printf(3, 0) int trace_array_vprintk(struct trace_array *tr, unsigned long ip, const char *fmt, va_list args) { return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args); } __printf(3, 0) int trace_array_printk(struct trace_array *tr, unsigned long ip, const char *fmt, ...) { int ret; va_list ap; if (!(global_trace.trace_flags & TRACE_ITER_PRINTK)) return 0; va_start(ap, fmt); ret = trace_array_vprintk(tr, ip, fmt, ap); va_end(ap); return ret; } __printf(3, 4) int trace_array_printk_buf(struct ring_buffer *buffer, unsigned long ip, const char *fmt, ...) { int ret; va_list ap; if (!(global_trace.trace_flags & TRACE_ITER_PRINTK)) return 0; va_start(ap, fmt); ret = __trace_array_vprintk(buffer, ip, fmt, ap); va_end(ap); return ret; } __printf(2, 0) int trace_vprintk(unsigned long ip, const char *fmt, va_list args) { return trace_array_vprintk(&global_trace, ip, fmt, args); } EXPORT_SYMBOL_GPL(trace_vprintk); static void trace_iterator_increment(struct trace_iterator *iter) { struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu); iter->idx++; if (buf_iter) ring_buffer_read(buf_iter, NULL); } static struct trace_entry * peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts, unsigned long *lost_events) { struct ring_buffer_event *event; struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu); if (buf_iter) event = ring_buffer_iter_peek(buf_iter, ts); else event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts, lost_events); if (event) { iter->ent_size = ring_buffer_event_length(event); return ring_buffer_event_data(event); } iter->ent_size = 0; return NULL; } static struct trace_entry * __find_next_entry(struct trace_iterator *iter, int *ent_cpu, unsigned long *missing_events, u64 *ent_ts) { struct ring_buffer *buffer = iter->trace_buffer->buffer; struct trace_entry *ent, *next = NULL; unsigned long lost_events = 0, next_lost = 0; int cpu_file = iter->cpu_file; u64 next_ts = 0, ts; int next_cpu = -1; int next_size = 0; int cpu; /* * If we are in a per_cpu trace file, don't bother by iterating over * all cpu and peek directly. */ if (cpu_file > RING_BUFFER_ALL_CPUS) { if (ring_buffer_empty_cpu(buffer, cpu_file)) return NULL; ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events); if (ent_cpu) *ent_cpu = cpu_file; return ent; } for_each_tracing_cpu(cpu) { if (ring_buffer_empty_cpu(buffer, cpu)) continue; ent = peek_next_entry(iter, cpu, &ts, &lost_events); /* * Pick the entry with the smallest timestamp: */ if (ent && (!next || ts < next_ts)) { next = ent; next_cpu = cpu; next_ts = ts; next_lost = lost_events; next_size = iter->ent_size; } } iter->ent_size = next_size; if (ent_cpu) *ent_cpu = next_cpu; if (ent_ts) *ent_ts = next_ts; if (missing_events) *missing_events = next_lost; return next; } /* Find the next real entry, without updating the iterator itself */ struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts) { return __find_next_entry(iter, ent_cpu, NULL, ent_ts); } /* Find the next real entry, and increment the iterator to the next entry */ void *trace_find_next_entry_inc(struct trace_iterator *iter) { iter->ent = __find_next_entry(iter, &iter->cpu, &iter->lost_events, &iter->ts); if (iter->ent) trace_iterator_increment(iter); return iter->ent ? iter : NULL; } static void trace_consume(struct trace_iterator *iter) { ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts, &iter->lost_events); } static void *s_next(struct seq_file *m, void *v, loff_t *pos) { struct trace_iterator *iter = m->private; int i = (int)*pos; void *ent; WARN_ON_ONCE(iter->leftover); (*pos)++; /* can't go backwards */ if (iter->idx > i) return NULL; if (iter->idx < 0) ent = trace_find_next_entry_inc(iter); else ent = iter; while (ent && iter->idx < i) ent = trace_find_next_entry_inc(iter); iter->pos = *pos; return ent; } void tracing_iter_reset(struct trace_iterator *iter, int cpu) { struct ring_buffer_event *event; struct ring_buffer_iter *buf_iter; unsigned long entries = 0; u64 ts; per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0; buf_iter = trace_buffer_iter(iter, cpu); if (!buf_iter) return; ring_buffer_iter_reset(buf_iter); /* * We could have the case with the max latency tracers * that a reset never took place on a cpu. This is evident * by the timestamp being before the start of the buffer. */ while ((event = ring_buffer_iter_peek(buf_iter, &ts))) { if (ts >= iter->trace_buffer->time_start) break; entries++; ring_buffer_read(buf_iter, NULL); } per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries; } /* * The current tracer is copied to avoid a global locking * all around. */ static void *s_start(struct seq_file *m, loff_t *pos) { struct trace_iterator *iter = m->private; struct trace_array *tr = iter->tr; int cpu_file = iter->cpu_file; void *p = NULL; loff_t l = 0; int cpu; /* * copy the tracer to avoid using a global lock all around. * iter->trace is a copy of current_trace, the pointer to the * name may be used instead of a strcmp(), as iter->trace->name * will point to the same string as current_trace->name. */ mutex_lock(&trace_types_lock); if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name)) *iter->trace = *tr->current_trace; mutex_unlock(&trace_types_lock); #ifdef CONFIG_TRACER_MAX_TRACE if (iter->snapshot && iter->trace->use_max_tr) return ERR_PTR(-EBUSY); #endif if (!iter->snapshot) atomic_inc(&trace_record_taskinfo_disabled); if (*pos != iter->pos) { iter->ent = NULL; iter->cpu = 0; iter->idx = -1; if (cpu_file == RING_BUFFER_ALL_CPUS) { for_each_tracing_cpu(cpu) tracing_iter_reset(iter, cpu); } else tracing_iter_reset(iter, cpu_file); iter->leftover = 0; for (p = iter; p && l < *pos; p = s_next(m, p, &l)) ; } else { /* * If we overflowed the seq_file before, then we want * to just reuse the trace_seq buffer again. */ if (iter->leftover) p = iter; else { l = *pos - 1; p = s_next(m, p, &l); } } trace_event_read_lock(); trace_access_lock(cpu_file); return p; } static void s_stop(struct seq_file *m, void *p) { struct trace_iterator *iter = m->private; #ifdef CONFIG_TRACER_MAX_TRACE if (iter->snapshot && iter->trace->use_max_tr) return; #endif if (!iter->snapshot) atomic_dec(&trace_record_taskinfo_disabled); trace_access_unlock(iter->cpu_file); trace_event_read_unlock(); } static void get_total_entries(struct trace_buffer *buf, unsigned long *total, unsigned long *entries) { unsigned long count; int cpu; *total = 0; *entries = 0; for_each_tracing_cpu(cpu) { count = ring_buffer_entries_cpu(buf->buffer, cpu); /* * If this buffer has skipped entries, then we hold all * entries for the trace and we need to ignore the * ones before the time stamp. */ if (per_cpu_ptr(buf->data, cpu)->skipped_entries) { count -= per_cpu_ptr(buf->data, cpu)->skipped_entries; /* total is the same as the entries */ *total += count; } else *total += count + ring_buffer_overrun_cpu(buf->buffer, cpu); *entries += count; } } static void print_lat_help_header(struct seq_file *m) { seq_puts(m, "# _------=> CPU# \n" "# / _-----=> irqs-off \n" "# | / _----=> need-resched \n" "# || / _---=> hardirq/softirq \n" "# ||| / _--=> preempt-depth \n" "# |||| / delay \n" "# cmd pid ||||| time | caller \n" "# \\ / ||||| \\ | / \n"); } static void print_event_info(struct trace_buffer *buf, struct seq_file *m) { unsigned long total; unsigned long entries; get_total_entries(buf, &total, &entries); seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n", entries, total, num_online_cpus()); seq_puts(m, "#\n"); } static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m, unsigned int flags) { bool tgid = flags & TRACE_ITER_RECORD_TGID; print_event_info(buf, m); seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? "TGID " : ""); seq_printf(m, "# | | %s | | |\n", tgid ? " | " : ""); } static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m, unsigned int flags) { bool tgid = flags & TRACE_ITER_RECORD_TGID; const char tgid_space[] = " "; const char space[] = " "; print_event_info(buf, m); seq_printf(m, "# %s _-----=> irqs-off\n", tgid ? tgid_space : space); seq_printf(m, "# %s / _----=> need-resched\n", tgid ? tgid_space : space); seq_printf(m, "# %s| / _---=> hardirq/softirq\n", tgid ? tgid_space : space); seq_printf(m, "# %s|| / _--=> preempt-depth\n", tgid ? tgid_space : space); seq_printf(m, "# %s||| / delay\n", tgid ? tgid_space : space); seq_printf(m, "# TASK-PID %sCPU# |||| TIMESTAMP FUNCTION\n", tgid ? " TGID " : space); seq_printf(m, "# | | %s | |||| | |\n", tgid ? " | " : space); } void print_trace_header(struct seq_file *m, struct trace_iterator *iter) { unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK); struct trace_buffer *buf = iter->trace_buffer; struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu); struct tracer *type = iter->trace; unsigned long entries; unsigned long total; const char *name = "preemption"; name = type->name; get_total_entries(buf, &total, &entries); seq_printf(m, "# %s latency trace v1.1.5 on %s\n", name, UTS_RELEASE); seq_puts(m, "# -----------------------------------" "---------------------------------\n"); seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |" " (M:%s VP:%d, KP:%d, SP:%d HP:%d", nsecs_to_usecs(data->saved_latency), entries, total, buf->cpu, #if defined(CONFIG_PREEMPT_NONE) "server", #elif defined(CONFIG_PREEMPT_VOLUNTARY) "desktop", #elif defined(CONFIG_PREEMPT) "preempt", #else "unknown", #endif /* These are reserved for later use */ 0, 0, 0, 0); #ifdef CONFIG_SMP seq_printf(m, " #P:%d)\n", num_online_cpus()); #else seq_puts(m, ")\n"); #endif seq_puts(m, "# -----------------\n"); seq_printf(m, "# | task: %.16s-%d " "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n", data->comm, data->pid, from_kuid_munged(seq_user_ns(m), data->uid), data->nice, data->policy, data->rt_priority); seq_puts(m, "# -----------------\n"); if (data->critical_start) { seq_puts(m, "# => started at: "); seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags); trace_print_seq(m, &iter->seq); seq_puts(m, "\n# => ended at: "); seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags); trace_print_seq(m, &iter->seq); seq_puts(m, "\n#\n"); } seq_puts(m, "#\n"); } static void test_cpu_buff_start(struct trace_iterator *iter) { struct trace_seq *s = &iter->seq; struct trace_array *tr = iter->tr; if (!(tr->trace_flags & TRACE_ITER_ANNOTATE)) return; if (!(iter->iter_flags & TRACE_FILE_ANNOTATE)) return; if (cpumask_available(iter->started) && cpumask_test_cpu(iter->cpu, iter->started)) return; if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries) return; if (cpumask_available(iter->started)) cpumask_set_cpu(iter->cpu, iter->started); /* Don't print started cpu buffer for the first entry of the trace */ if (iter->idx > 1) trace_seq_printf(s, "##### CPU %u buffer started ####\n", iter->cpu); } static enum print_line_t print_trace_fmt(struct trace_iterator *iter) { struct trace_array *tr = iter->tr; struct trace_seq *s = &iter->seq; unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK); struct trace_entry *entry; struct trace_event *event; entry = iter->ent; test_cpu_buff_start(iter); event = ftrace_find_event(entry->type); if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) { if (iter->iter_flags & TRACE_FILE_LAT_FMT) trace_print_lat_context(iter); else trace_print_context(iter); } if (trace_seq_has_overflowed(s)) return TRACE_TYPE_PARTIAL_LINE; if (event) return event->funcs->trace(iter, sym_flags, event); trace_seq_printf(s, "Unknown type %d\n", entry->type); return trace_handle_return(s); } static enum print_line_t print_raw_fmt(struct trace_iterator *iter) { struct trace_array *tr = iter->tr; struct trace_seq *s = &iter->seq; struct trace_entry *entry; struct trace_event *event; entry = iter->ent; if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) trace_seq_printf(s, "%d %d %llu ", entry->pid, iter->cpu, iter->ts); if (trace_seq_has_overflowed(s)) return TRACE_TYPE_PARTIAL_LINE; event = ftrace_find_event(entry->type); if (event) return event->funcs->raw(iter, 0, event); trace_seq_printf(s, "%d ?\n", entry->type); return trace_handle_return(s); } static enum print_line_t print_hex_fmt(struct trace_iterator *iter) { struct trace_array *tr = iter->tr; struct trace_seq *s = &iter->seq; unsigned char newline = '\n'; struct trace_entry *entry; struct trace_event *event; entry = iter->ent; if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) { SEQ_PUT_HEX_FIELD(s, entry->pid); SEQ_PUT_HEX_FIELD(s, iter->cpu); SEQ_PUT_HEX_FIELD(s, iter->ts); if (trace_seq_has_overflowed(s)) return TRACE_TYPE_PARTIAL_LINE; } event = ftrace_find_event(entry->type); if (event) { enum print_line_t ret = event->funcs->hex(iter, 0, event); if (ret != TRACE_TYPE_HANDLED) return ret; } SEQ_PUT_FIELD(s, newline); return trace_handle_return(s); } static enum print_line_t print_bin_fmt(struct trace_iterator *iter) { struct trace_array *tr = iter->tr; struct trace_seq *s = &iter->seq; struct trace_entry *entry; struct trace_event *event; entry = iter->ent; if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) { SEQ_PUT_FIELD(s, entry->pid); SEQ_PUT_FIELD(s, iter->cpu); SEQ_PUT_FIELD(s, iter->ts); if (trace_seq_has_overflowed(s)) return TRACE_TYPE_PARTIAL_LINE; } event = ftrace_find_event(entry->type); return event ? event->funcs->binary(iter, 0, event) : TRACE_TYPE_HANDLED; } int trace_empty(struct trace_iterator *iter) { struct ring_buffer_iter *buf_iter; int cpu; /* If we are looking at one CPU buffer, only check that one */ if (iter->cpu_file != RING_BUFFER_ALL_CPUS) { cpu = iter->cpu_file; buf_iter = trace_buffer_iter(iter, cpu); if (buf_iter) { if (!ring_buffer_iter_empty(buf_iter)) return 0; } else { if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu)) return 0; } return 1; } for_each_tracing_cpu(cpu) { buf_iter = trace_buffer_iter(iter, cpu); if (buf_iter) { if (!ring_buffer_iter_empty(buf_iter)) return 0; } else { if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu)) return 0; } } return 1; } /* Called with trace_event_read_lock() held. */ enum print_line_t print_trace_line(struct trace_iterator *iter) { struct trace_array *tr = iter->tr; unsigned long trace_flags = tr->trace_flags; enum print_line_t ret; if (iter->lost_events) { trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n", iter->cpu, iter->lost_events); if (trace_seq_has_overflowed(&iter->seq)) return TRACE_TYPE_PARTIAL_LINE; } if (iter->trace && iter->trace->print_line) { ret = iter->trace->print_line(iter); if (ret != TRACE_TYPE_UNHANDLED) return ret; } if (iter->ent->type == TRACE_BPUTS && trace_flags & TRACE_ITER_PRINTK && trace_flags & TRACE_ITER_PRINTK_MSGONLY) return trace_print_bputs_msg_only(iter); if (iter->ent->type == TRACE_BPRINT && trace_flags & TRACE_ITER_PRINTK && trace_flags & TRACE_ITER_PRINTK_MSGONLY) return trace_print_bprintk_msg_only(iter); if (iter->ent->type == TRACE_PRINT && trace_flags & TRACE_ITER_PRINTK && trace_flags & TRACE_ITER_PRINTK_MSGONLY) return trace_print_printk_msg_only(iter); if (trace_flags & TRACE_ITER_BIN) return print_bin_fmt(iter); if (trace_flags & TRACE_ITER_HEX) return print_hex_fmt(iter); if (trace_flags & TRACE_ITER_RAW) return print_raw_fmt(iter); return print_trace_fmt(iter); } void trace_latency_header(struct seq_file *m) { struct trace_iterator *iter = m->private; struct trace_array *tr = iter->tr; /* print nothing if the buffers are empty */ if (trace_empty(iter)) return; if (iter->iter_flags & TRACE_FILE_LAT_FMT) print_trace_header(m, iter); if (!(tr->trace_flags & TRACE_ITER_VERBOSE)) print_lat_help_header(m); } void trace_default_header(struct seq_file *m) { struct trace_iterator *iter = m->private; struct trace_array *tr = iter->tr; unsigned long trace_flags = tr->trace_flags; if (!(trace_flags & TRACE_ITER_CONTEXT_INFO)) return; if (iter->iter_flags & TRACE_FILE_LAT_FMT) { /* print nothing if the buffers are empty */ if (trace_empty(iter)) return; print_trace_header(m, iter); if (!(trace_flags & TRACE_ITER_VERBOSE)) print_lat_help_header(m); } else { if (!(trace_flags & TRACE_ITER_VERBOSE)) { if (trace_flags & TRACE_ITER_IRQ_INFO) print_func_help_header_irq(iter->trace_buffer, m, trace_flags); else print_func_help_header(iter->trace_buffer, m, trace_flags); } } } static void test_ftrace_alive(struct seq_file *m) { if (!ftrace_is_dead()) return; seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n" "# MAY BE MISSING FUNCTION EVENTS\n"); } #ifdef CONFIG_TRACER_MAX_TRACE static void show_snapshot_main_help(struct seq_file *m) { seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n" "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n" "# Takes a snapshot of the main buffer.\n" "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n" "# (Doesn't have to be '2' works with any number that\n" "# is not a '0' or '1')\n"); } static void show_snapshot_percpu_help(struct seq_file *m) { seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n"); #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n" "# Takes a snapshot of the main buffer for this cpu.\n"); #else seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n" "# Must use main snapshot file to allocate.\n"); #endif seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n" "# (Doesn't have to be '2' works with any number that\n" "# is not a '0' or '1')\n"); } static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { if (iter->tr->allocated_snapshot) seq_puts(m, "#\n# * Snapshot is allocated *\n#\n"); else seq_puts(m, "#\n# * Snapshot is freed *\n#\n"); seq_puts(m, "# Snapshot commands:\n"); if (iter->cpu_file == RING_BUFFER_ALL_CPUS) show_snapshot_main_help(m); else show_snapshot_percpu_help(m); } #else /* Should never be called */ static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { } #endif static int s_show(struct seq_file *m, void *v) { struct trace_iterator *iter = v; int ret; if (iter->ent == NULL) { if (iter->tr) { seq_printf(m, "# tracer: %s\n", iter->trace->name); seq_puts(m, "#\n"); test_ftrace_alive(m); } if (iter->snapshot && trace_empty(iter)) print_snapshot_help(m, iter); else if (iter->trace && iter->trace->print_header) iter->trace->print_header(m); else trace_default_header(m); } else if (iter->leftover) { /* * If we filled the seq_file buffer earlier, we * want to just show it now. */ ret = trace_print_seq(m, &iter->seq); /* ret should this time be zero, but you never know */ iter->leftover = ret; } else { print_trace_line(iter); ret = trace_print_seq(m, &iter->seq); /* * If we overflow the seq_file buffer, then it will * ask us for this data again at start up. * Use that instead. * ret is 0 if seq_file write succeeded. * -1 otherwise. */ iter->leftover = ret; } return 0; } /* * Should be used after trace_array_get(), trace_types_lock * ensures that i_cdev was already initialized. */ static inline int tracing_get_cpu(struct inode *inode) { if (inode->i_cdev) /* See trace_create_cpu_file() */ return (long)inode->i_cdev - 1; return RING_BUFFER_ALL_CPUS; } static const struct seq_operations tracer_seq_ops = { .start = s_start, .next = s_next, .stop = s_stop, .show = s_show, }; static struct trace_iterator * __tracing_open(struct inode *inode, struct file *file, bool snapshot) { struct trace_array *tr = inode->i_private; struct trace_iterator *iter; int cpu; if (tracing_disabled) return ERR_PTR(-ENODEV); iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter)); if (!iter) return ERR_PTR(-ENOMEM); iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter), GFP_KERNEL); if (!iter->buffer_iter) goto release; /* * We make a copy of the current tracer to avoid concurrent * changes on it while we are reading. */ mutex_lock(&trace_types_lock); iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL); if (!iter->trace) goto fail; *iter->trace = *tr->current_trace; if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL)) goto fail; iter->tr = tr; #ifdef CONFIG_TRACER_MAX_TRACE /* Currently only the top directory has a snapshot */ if (tr->current_trace->print_max || snapshot) iter->trace_buffer = &tr->max_buffer; else #endif iter->trace_buffer = &tr->trace_buffer; iter->snapshot = snapshot; iter->pos = -1; iter->cpu_file = tracing_get_cpu(inode); mutex_init(&iter->mutex); /* Notify the tracer early; before we stop tracing. */ if (iter->trace && iter->trace->open) iter->trace->open(iter); /* Annotate start of buffers if we had overruns */ if (ring_buffer_overruns(iter->trace_buffer->buffer)) iter->iter_flags |= TRACE_FILE_ANNOTATE; /* Output in nanoseconds only if we are using a clock in nanoseconds. */ if (trace_clocks[tr->clock_id].in_ns) iter->iter_flags |= TRACE_FILE_TIME_IN_NS; /* stop the trace while dumping if we are not opening "snapshot" */ if (!iter->snapshot) tracing_stop_tr(tr); if (iter->cpu_file == RING_BUFFER_ALL_CPUS) { for_each_tracing_cpu(cpu) { iter->buffer_iter[cpu] = ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu); } ring_buffer_read_prepare_sync(); for_each_tracing_cpu(cpu) { ring_buffer_read_start(iter->buffer_iter[cpu]); tracing_iter_reset(iter, cpu); } } else { cpu = iter->cpu_file; iter->buffer_iter[cpu] = ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu); ring_buffer_read_prepare_sync(); ring_buffer_read_start(iter->buffer_iter[cpu]); tracing_iter_reset(iter, cpu); } mutex_unlock(&trace_types_lock); return iter; fail: mutex_unlock(&trace_types_lock); kfree(iter->trace); kfree(iter->buffer_iter); release: seq_release_private(inode, file); return ERR_PTR(-ENOMEM); } int tracing_open_generic(struct inode *inode, struct file *filp) { if (tracing_disabled) return -ENODEV; filp->private_data = inode->i_private; return 0; } bool tracing_is_disabled(void) { return (tracing_disabled) ? true: false; } /* * Open and update trace_array ref count. * Must have the current trace_array passed to it. */ static int tracing_open_generic_tr(struct inode *inode, struct file *filp) { struct trace_array *tr = inode->i_private; if (tracing_disabled) return -ENODEV; if (trace_array_get(tr) < 0) return -ENODEV; filp->private_data = inode->i_private; return 0; } static int tracing_release(struct inode *inode, struct file *file) { struct trace_array *tr = inode->i_private; struct seq_file *m = file->private_data; struct trace_iterator *iter; int cpu; if (!(file->f_mode & FMODE_READ)) { trace_array_put(tr); return 0; } /* Writes do not use seq_file */ iter = m->private; mutex_lock(&trace_types_lock); for_each_tracing_cpu(cpu) { if (iter->buffer_iter[cpu]) ring_buffer_read_finish(iter->buffer_iter[cpu]); } if (iter->trace && iter->trace->close) iter->trace->close(iter); if (!iter->snapshot) /* reenable tracing if it was previously enabled */ tracing_start_tr(tr); __trace_array_put(tr); mutex_unlock(&trace_types_lock); mutex_destroy(&iter->mutex); free_cpumask_var(iter->started); kfree(iter->trace); kfree(iter->buffer_iter); seq_release_private(inode, file); return 0; } static int tracing_release_generic_tr(struct inode *inode, struct file *file) { struct trace_array *tr = inode->i_private; trace_array_put(tr); return 0; } static int tracing_single_release_tr(struct inode *inode, struct file *file) { struct trace_array *tr = inode->i_private; trace_array_put(tr); return single_release(inode, file); } static int tracing_open(struct inode *inode, struct file *file) { struct trace_array *tr = inode->i_private; struct trace_iterator *iter; int ret = 0; if (trace_array_get(tr) < 0) return -ENODEV; /* If this file was open for write, then erase contents */ if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) { int cpu = tracing_get_cpu(inode); struct trace_buffer *trace_buf = &tr->trace_buffer; #ifdef CONFIG_TRACER_MAX_TRACE if (tr->current_trace->print_max) trace_buf = &tr->max_buffer; #endif if (cpu == RING_BUFFER_ALL_CPUS) tracing_reset_online_cpus(trace_buf); else tracing_reset(trace_buf, cpu); } if (file->f_mode & FMODE_READ) { iter = __tracing_open(inode, file, false); if (IS_ERR(iter)) ret = PTR_ERR(iter); else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) iter->iter_flags |= TRACE_FILE_LAT_FMT; } if (ret < 0) trace_array_put(tr); return ret; } /* * Some tracers are not suitable for instance buffers. * A tracer is always available for the global array (toplevel) * or if it explicitly states that it is. */ static bool trace_ok_for_array(struct tracer *t, struct trace_array *tr) { return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances; } /* Find the next tracer that this trace array may use */ static struct tracer * get_tracer_for_array(struct trace_array *tr, struct tracer *t) { while (t && !trace_ok_for_array(t, tr)) t = t->next; return t; } static void * t_next(struct seq_file *m, void *v, loff_t *pos) { struct trace_array *tr = m->private; struct tracer *t = v; (*pos)++; if (t) t = get_tracer_for_array(tr, t->next); return t; } static void *t_start(struct seq_file *m, loff_t *pos) { struct trace_array *tr = m->private; struct tracer *t; loff_t l = 0; mutex_lock(&trace_types_lock); t = get_tracer_for_array(tr, trace_types); for (; t && l < *pos; t = t_next(m, t, &l)) ; return t; } static void t_stop(struct seq_file *m, void *p) { mutex_unlock(&trace_types_lock); } static int t_show(struct seq_file *m, void *v) { struct tracer *t = v; if (!t) return 0; seq_puts(m, t->name); if (t->next) seq_putc(m, ' '); else seq_putc(m, '\n'); return 0; } static const struct seq_operations show_traces_seq_ops = { .start = t_start, .next = t_next, .stop = t_stop, .show = t_show, }; static int show_traces_open(struct inode *inode, struct file *file) { struct trace_array *tr = inode->i_private; struct seq_file *m; int ret; if (tracing_disabled) return -ENODEV; ret = seq_open(file, &show_traces_seq_ops); if (ret) return ret; m = file->private_data; m->private = tr; return 0; } static ssize_t tracing_write_stub(struct file *filp, const char __user *ubuf, size_t count, loff_t *ppos) { return count; } loff_t tracing_lseek(struct file *file, loff_t offset, int whence) { int ret; if (file->f_mode & FMODE_READ) ret = seq_lseek(file, offset, whence); else file->f_pos = ret = 0; return ret; } static const struct file_operations tracing_fops = { .open = tracing_open, .read = seq_read, .write = tracing_write_stub, .llseek = tracing_lseek, .release = tracing_release, }; static const struct file_operations show_traces_fops = { .open = show_traces_open, .read = seq_read, .release = seq_release, .llseek = seq_lseek, }; static ssize_t tracing_cpumask_read(struct file *filp, char __user *ubuf, size_t count, loff_t *ppos) { struct trace_array *tr = file_inode(filp)->i_private; char *mask_str; int len; len = snprintf(NULL, 0, "%*pb\n", cpumask_pr_args(tr->tracing_cpumask)) + 1; mask_str = kmalloc(len, GFP_KERNEL); if (!mask_str) return -ENOMEM; len = snprintf(mask_str, len, "%*pb\n", cpumask_pr_args(tr->tracing_cpumask)); if (len >= count) { count = -EINVAL; goto out_err; } count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len); out_err: kfree(mask_str); return count; } static ssize_t tracing_cpumask_write(struct file *filp, const char __user *ubuf, size_t count, loff_t *ppos) { struct trace_array *tr = file_inode(filp)->i_private; cpumask_var_t tracing_cpumask_new; int err, cpu; if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL)) return -ENOMEM; err = cpumask_parse_user(ubuf, count, tracing_cpumask_new); if (err) goto err_unlock; local_irq_disable(); arch_spin_lock(&tr->max_lock); for_each_tracing_cpu(cpu) { /* * Increase/decrease the disabled counter if we are * about to flip a bit in the cpumask: */ if (cpumask_test_cpu(cpu, tr->tracing_cpumask) && !cpumask_test_cpu(cpu, tracing_cpumask_new)) { atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled); ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu); } if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) && cpumask_test_cpu(cpu, tracing_cpumask_new)) { atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled); ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu); } } arch_spin_unlock(&tr->max_lock); local_irq_enable(); cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new); free_cpumask_var(tracing_cpumask_new); return count; err_unlock: free_cpumask_var(tracing_cpumask_new); return err; } static const struct file_operations tracing_cpumask_fops = { .open = tracing_open_generic_tr, .read = tracing_cpumask_read, .write = tracing_cpumask_write, .release = tracing_release_generic_tr, .llseek = generic_file_llseek, }; static int tracing_trace_options_show(struct seq_file *m, void *v) { struct tracer_opt *trace_opts; struct trace_array *tr = m->private; u32 tracer_flags; int i; mutex_lock(&trace_types_lock); tracer_flags = tr->current_trace->flags->val; trace_opts = tr->current_trace->flags->opts; for (i = 0; trace_options[i]; i++) { if (tr->trace_flags & (1 << i)) seq_printf(m, "%s\n", trace_options[i]); else seq_printf(m, "no%s\n", trace_options[i]); } for (i = 0; trace_opts[i].name; i++) { if (tracer_flags & trace_opts[i].bit) seq_printf(m, "%s\n", trace_opts[i].name); else seq_printf(m, "no%s\n", trace_opts[i].name); } mutex_unlock(&trace_types_lock); return 0; } static int __set_tracer_option(struct trace_array *tr, struct tracer_flags *tracer_flags, struct tracer_opt *opts, int neg) { struct tracer *trace = tracer_flags->trace; int ret; ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg); if (ret) return ret; if (neg) tracer_flags->val &= ~opts->bit; else tracer_flags->val |= opts->bit; return 0; } /* Try to assign a tracer specific option */ static int set_tracer_option(struct trace_array *tr, char *cmp, int neg) { struct tracer *trace = tr->current_trace; struct tracer_flags *tracer_flags = trace->flags; struct tracer_opt *opts = NULL; int i; for (i = 0; tracer_flags->opts[i].name; i++) { opts = &tracer_flags->opts[i]; if (strcmp(cmp, opts->name) == 0) return __set_tracer_option(tr, trace->flags, opts, neg); } return -EINVAL; } /* Some tracers require overwrite to stay enabled */ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set) { if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set) return -1; return 0; } int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled) { /* do nothing if flag is already set */ if (!!(tr->trace_flags & mask) == !!enabled) return 0; /* Give the tracer a chance to approve the change */ if (tr->current_trace->flag_changed) if (tr->current_trace->flag_changed(tr, mask, !!enabled)) return -EINVAL; if (enabled) tr->trace_flags |= mask; else tr->trace_flags &= ~mask; if (mask == TRACE_ITER_RECORD_CMD) trace_event_enable_cmd_record(enabled); if (mask == TRACE_ITER_RECORD_TGID) { if (!tgid_map) tgid_map = kcalloc(PID_MAX_DEFAULT + 1, sizeof(*tgid_map), GFP_KERNEL); if (!tgid_map) { tr->trace_flags &= ~TRACE_ITER_RECORD_TGID; return -ENOMEM; } trace_event_enable_tgid_record(enabled); } if (mask == TRACE_ITER_EVENT_FORK) trace_event_follow_fork(tr, enabled); if (mask == TRACE_ITER_FUNC_FORK) ftrace_pid_follow_fork(tr, enabled); if (mask == TRACE_ITER_OVERWRITE) { ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled); #ifdef CONFIG_TRACER_MAX_TRACE ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled); #endif } if (mask == TRACE_ITER_PRINTK) { trace_printk_start_stop_comm(enabled); trace_printk_control(enabled); } return 0; } static int trace_set_options(struct trace_array *tr, char *option) { char *cmp; int neg = 0; int ret; size_t orig_len = strlen(option); int len; cmp = strstrip(option); len = str_has_prefix(cmp, "no"); if (len) neg = 1; cmp += len; mutex_lock(&trace_types_lock); ret = match_string(trace_options, -1, cmp); /* If no option could be set, test the specific tracer options */ if (ret < 0) ret = set_tracer_option(tr, cmp, neg); else ret = set_tracer_flag(tr, 1 << ret, !neg); mutex_unlock(&trace_types_lock); /* * If the first trailing whitespace is replaced with '\0' by strstrip, * turn it back into a space. */ if (orig_len > strlen(option)) option[strlen(option)] = ' '; return ret; } static void __init apply_trace_boot_options(void) { char *buf = trace_boot_options_buf; char *option; while (true) { option = strsep(&buf, ","); if (!option) break; if (*option) trace_set_options(&global_trace, option); /* Put back the comma to allow this to be called again */ if (buf) *(buf - 1) = ','; } } static ssize_t tracing_trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { struct seq_file *m = filp->private_data; struct trace_array *tr = m->private; char buf[64]; int ret; if (cnt >= sizeof(buf)) return -EINVAL; if (copy_from_user(buf, ubuf, cnt)) return -EFAULT; buf[cnt] = 0; ret = trace_set_options(tr, buf); if (ret < 0) return ret; *ppos += cnt; return cnt; } static int tracing_trace_options_open(struct inode *inode, struct file *file) { struct trace_array *tr = inode->i_private; int ret; if (tracing_disabled) return -ENODEV; if (trace_array_get(tr) < 0) return -ENODEV; ret = single_open(file, tracing_trace_options_show, inode->i_private); if (ret < 0) trace_array_put(tr); return ret; } static const struct file_operations tracing_iter_fops = { .open = tracing_trace_options_open, .read = seq_read, .llseek = seq_lseek, .release = tracing_single_release_tr, .write = tracing_trace_options_write, }; static const char readme_msg[] = "tracing mini-HOWTO:\n\n" "# echo 0 > tracing_on : quick way to disable tracing\n" "# echo 1 > tracing_on : quick way to re-enable tracing\n\n" " Important files:\n" " trace\t\t\t- The static contents of the buffer\n" "\t\t\t To clear the buffer write into this file: echo > trace\n" " trace_pipe\t\t- A consuming read to see the contents of the buffer\n" " current_tracer\t- function and latency tracers\n" " available_tracers\t- list of configured tracers for current_tracer\n" " buffer_size_kb\t- view and modify size of per cpu buffer\n" " buffer_total_size_kb - view total size of all cpu buffers\n\n" " trace_clock\t\t-change the clock used to order events\n" " local: Per cpu clock but may not be synced across CPUs\n" " global: Synced across CPUs but slows tracing down.\n" " counter: Not a clock, but just an increment\n" " uptime: Jiffy counter from time of boot\n" " perf: Same clock that perf events use\n" #ifdef CONFIG_X86_64 " x86-tsc: TSC cycle counter\n" #endif "\n timestamp_mode\t-view the mode used to timestamp events\n" " delta: Delta difference against a buffer-wide timestamp\n" " absolute: Absolute (standalone) timestamp\n" "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n" "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n" " tracing_cpumask\t- Limit which CPUs to trace\n" " instances\t\t- Make sub-buffers with: mkdir instances/foo\n" "\t\t\t Remove sub-buffer with rmdir\n" " trace_options\t\t- Set format or modify how tracing happens\n" "\t\t\t Disable an option by adding a suffix 'no' to the\n" "\t\t\t option name\n" " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n" #ifdef CONFIG_DYNAMIC_FTRACE "\n available_filter_functions - list of functions that can be filtered on\n" " set_ftrace_filter\t- echo function name in here to only trace these\n" "\t\t\t functions\n" "\t accepts: func_full_name or glob-matching-pattern\n" "\t modules: Can select a group via module\n" "\t Format: :mod:<module-name>\n" "\t example: echo :mod:ext3 > set_ftrace_filter\n" "\t triggers: a command to perform when function is hit\n" "\t Format: <function>:<trigger>[:count]\n" "\t trigger: traceon, traceoff\n" "\t\t enable_event:<system>:<event>\n" "\t\t disable_event:<system>:<event>\n" #ifdef CONFIG_STACKTRACE "\t\t stacktrace\n" #endif #ifdef CONFIG_TRACER_SNAPSHOT "\t\t snapshot\n" #endif "\t\t dump\n" "\t\t cpudump\n" "\t example: echo do_fault:traceoff > set_ftrace_filter\n" "\t echo do_trap:traceoff:3 > set_ftrace_filter\n" "\t The first one will disable tracing every time do_fault is hit\n" "\t The second will disable tracing at most 3 times when do_trap is hit\n" "\t The first time do trap is hit and it disables tracing, the\n" "\t counter will decrement to 2. If tracing is already disabled,\n" "\t the counter will not decrement. It only decrements when the\n" "\t trigger did work\n" "\t To remove trigger without count:\n" "\t echo '!<function>:<trigger> > set_ftrace_filter\n" "\t To remove trigger with a count:\n" "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n" " set_ftrace_notrace\t- echo function name in here to never trace.\n" "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n" "\t modules: Can select a group via module command :mod:\n" "\t Does not accept triggers\n" #endif /* CONFIG_DYNAMIC_FTRACE */ #ifdef CONFIG_FUNCTION_TRACER " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n" "\t\t (function)\n" #endif #ifdef CONFIG_FUNCTION_GRAPH_TRACER " set_graph_function\t- Trace the nested calls of a function (function_graph)\n" " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n" " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n" #endif #ifdef CONFIG_TRACER_SNAPSHOT "\n snapshot\t\t- Like 'trace' but shows the content of the static\n" "\t\t\t snapshot buffer. Read the contents for more\n" "\t\t\t information\n" #endif #ifdef CONFIG_STACK_TRACER " stack_trace\t\t- Shows the max stack trace when active\n" " stack_max_size\t- Shows current max stack size that was traced\n" "\t\t\t Write into this file to reset the max size (trigger a\n" "\t\t\t new trace)\n" #ifdef CONFIG_DYNAMIC_FTRACE " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n" "\t\t\t traces\n" #endif #endif /* CONFIG_STACK_TRACER */ #ifdef CONFIG_DYNAMIC_EVENTS " dynamic_events\t\t- Add/remove/show the generic dynamic events\n" "\t\t\t Write into this file to define/undefine new trace events.\n" #endif #ifdef CONFIG_KPROBE_EVENTS " kprobe_events\t\t- Add/remove/show the kernel dynamic events\n" "\t\t\t Write into this file to define/undefine new trace events.\n" #endif #ifdef CONFIG_UPROBE_EVENTS " uprobe_events\t\t- Add/remove/show the userspace dynamic events\n" "\t\t\t Write into this file to define/undefine new trace events.\n" #endif #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS) "\t accepts: event-definitions (one definition per line)\n" "\t Format: p[:[<group>/]<event>] <place> [<args>]\n" "\t r[maxactive][:[<group>/]<event>] <place> [<args>]\n" #ifdef CONFIG_HIST_TRIGGERS "\t s:[synthetic/]<event> <field> [<field>]\n" #endif "\t -:[<group>/]<event>\n" #ifdef CONFIG_KPROBE_EVENTS "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n" "place (kretprobe): [<module>:]<symbol>[+<offset>]|<memaddr>\n" #endif #ifdef CONFIG_UPROBE_EVENTS " place (uprobe): <path>:<offset>[(ref_ctr_offset)]\n" #endif "\t args: <name>=fetcharg[:type]\n" "\t fetcharg: %<register>, @<address>, @<symbol>[+|-<offset>],\n" #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API "\t $stack<index>, $stack, $retval, $comm, $arg<N>\n" #else "\t $stack<index>, $stack, $retval, $comm\n" #endif "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string, symbol,\n" "\t b<bit-width>@<bit-offset>/<container-size>,\n" "\t <type>\\[<array-size>\\]\n" #ifdef CONFIG_HIST_TRIGGERS "\t field: <stype> <name>;\n" "\t stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n" "\t [unsigned] char/int/long\n" #endif #endif " events/\t\t- Directory containing all trace event subsystems:\n" " enable\t\t- Write 0/1 to enable/disable tracing of all events\n" " events/<system>/\t- Directory containing all trace events for <system>:\n" " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n" "\t\t\t events\n" " filter\t\t- If set, only events passing filter are traced\n" " events/<system>/<event>/\t- Directory containing control files for\n" "\t\t\t <event>:\n" " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n" " filter\t\t- If set, only events passing filter are traced\n" " trigger\t\t- If set, a command to perform when event is hit\n" "\t Format: <trigger>[:count][if <filter>]\n" "\t trigger: traceon, traceoff\n" "\t enable_event:<system>:<event>\n" "\t disable_event:<system>:<event>\n" #ifdef CONFIG_HIST_TRIGGERS "\t enable_hist:<system>:<event>\n" "\t disable_hist:<system>:<event>\n" #endif #ifdef CONFIG_STACKTRACE "\t\t stacktrace\n" #endif #ifdef CONFIG_TRACER_SNAPSHOT "\t\t snapshot\n" #endif #ifdef CONFIG_HIST_TRIGGERS "\t\t hist (see below)\n" #endif "\t example: echo traceoff > events/block/block_unplug/trigger\n" "\t echo traceoff:3 > events/block/block_unplug/trigger\n" "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n" "\t events/block/block_unplug/trigger\n" "\t The first disables tracing every time block_unplug is hit.\n" "\t The second disables tracing the first 3 times block_unplug is hit.\n" "\t The third enables the kmalloc event the first 3 times block_unplug\n" "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n" "\t Like function triggers, the counter is only decremented if it\n" "\t enabled or disabled tracing.\n" "\t To remove a trigger without a count:\n" "\t echo '!<trigger> > <system>/<event>/trigger\n" "\t To remove a trigger with a count:\n" "\t echo '!<trigger>:0 > <system>/<event>/trigger\n" "\t Filters can be ignored when removing a trigger.\n" #ifdef CONFIG_HIST_TRIGGERS " hist trigger\t- If set, event hits are aggregated into a hash table\n" "\t Format: hist:keys=<field1[,field2,...]>\n" "\t [:values=<field1[,field2,...]>]\n" "\t [:sort=<field1[,field2,...]>]\n" "\t [:size=#entries]\n" "\t [:pause][:continue][:clear]\n" "\t [:name=histname1]\n" "\t [if <filter>]\n\n" "\t When a matching event is hit, an entry is added to a hash\n" "\t table using the key(s) and value(s) named, and the value of a\n" "\t sum called 'hitcount' is incremented. Keys and values\n" "\t correspond to fields in the event's format description. Keys\n" "\t can be any field, or the special string 'stacktrace'.\n" "\t Compound keys consisting of up to two fields can be specified\n" "\t by the 'keys' keyword. Values must correspond to numeric\n" "\t fields. Sort keys consisting of up to two fields can be\n" "\t specified using the 'sort' keyword. The sort direction can\n" "\t be modified by appending '.descending' or '.ascending' to a\n" "\t sort field. The 'size' parameter can be used to specify more\n" "\t or fewer than the default 2048 entries for the hashtable size.\n" "\t If a hist trigger is given a name using the 'name' parameter,\n" "\t its histogram data will be shared with other triggers of the\n" "\t same name, and trigger hits will update this common data.\n\n" "\t Reading the 'hist' file for the event will dump the hash\n" "\t table in its entirety to stdout. If there are multiple hist\n" "\t triggers attached to an event, there will be a table for each\n" "\t trigger in the output. The table displayed for a named\n" "\t trigger will be the same as any other instance having the\n" "\t same name. The default format used to display a given field\n" "\t can be modified by appending any of the following modifiers\n" "\t to the field name, as applicable:\n\n" "\t .hex display a number as a hex value\n" "\t .sym display an address as a symbol\n" "\t .sym-offset display an address as a symbol and offset\n" "\t .execname display a common_pid as a program name\n" "\t .syscall display a syscall id as a syscall name\n" "\t .log2 display log2 value rather than raw number\n" "\t .usecs display a common_timestamp in microseconds\n\n" "\t The 'pause' parameter can be used to pause an existing hist\n" "\t trigger or to start a hist trigger but not log any events\n" "\t until told to do so. 'continue' can be used to start or\n" "\t restart a paused hist trigger.\n\n" "\t The 'clear' parameter will clear the contents of a running\n" "\t hist trigger and leave its current paused/active state\n" "\t unchanged.\n\n" "\t The enable_hist and disable_hist triggers can be used to\n" "\t have one event conditionally start and stop another event's\n" "\t already-attached hist trigger. The syntax is analagous to\n" "\t the enable_event and disable_event triggers.\n" #endif ; static ssize_t tracing_readme_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { return simple_read_from_buffer(ubuf, cnt, ppos, readme_msg, strlen(readme_msg)); } static const struct file_operations tracing_readme_fops = { .open = tracing_open_generic, .read = tracing_readme_read, .llseek = generic_file_llseek, }; static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos) { int *ptr = v; if (*pos || m->count) ptr++; (*pos)++; for (; ptr <= &tgid_map[PID_MAX_DEFAULT]; ptr++) { if (trace_find_tgid(*ptr)) return ptr; } return NULL; } static void *saved_tgids_start(struct seq_file *m, loff_t *pos) { void *v; loff_t l = 0; if (!tgid_map) return NULL; v = &tgid_map[0]; while (l <= *pos) { v = saved_tgids_next(m, v, &l); if (!v) return NULL; } return v; } static void saved_tgids_stop(struct seq_file *m, void *v) { } static int saved_tgids_show(struct seq_file *m, void *v) { int pid = (int *)v - tgid_map; seq_printf(m, "%d %d\n", pid, trace_find_tgid(pid)); return 0; } static const struct seq_operations tracing_saved_tgids_seq_ops = { .start = saved_tgids_start, .stop = saved_tgids_stop, .next = saved_tgids_next, .show = saved_tgids_show, }; static int tracing_saved_tgids_open(struct inode *inode, struct file *filp) { if (tracing_disabled) return -ENODEV; return seq_open(filp, &tracing_saved_tgids_seq_ops); } static const struct file_operations tracing_saved_tgids_fops = { .open = tracing_saved_tgids_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos) { unsigned int *ptr = v; if (*pos || m->count) ptr++; (*pos)++; for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num]; ptr++) { if (*ptr == -1 || *ptr == NO_CMDLINE_MAP) continue; return ptr; } return NULL; } static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos) { void *v; loff_t l = 0; preempt_disable(); arch_spin_lock(&trace_cmdline_lock); v = &savedcmd->map_cmdline_to_pid[0]; while (l <= *pos) { v = saved_cmdlines_next(m, v, &l); if (!v) return NULL; } return v; } static void saved_cmdlines_stop(struct seq_file *m, void *v) { arch_spin_unlock(&trace_cmdline_lock); preempt_enable(); } static int saved_cmdlines_show(struct seq_file *m, void *v) { char buf[TASK_COMM_LEN]; unsigned int *pid = v; __trace_find_cmdline(*pid, buf); seq_printf(m, "%d %s\n", *pid, buf); return 0; } static const struct seq_operations tracing_saved_cmdlines_seq_ops = { .start = saved_cmdlines_start, .next = saved_cmdlines_next, .stop = saved_cmdlines_stop, .show = saved_cmdlines_show, }; static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp) { if (tracing_disabled) return -ENODEV; return seq_open(filp, &tracing_saved_cmdlines_seq_ops); } static const struct file_operations tracing_saved_cmdlines_fops = { .open = tracing_saved_cmdlines_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static ssize_t tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { char buf[64]; int r; arch_spin_lock(&trace_cmdline_lock); r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num); arch_spin_unlock(&trace_cmdline_lock); return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); } static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s) { kfree(s->saved_cmdlines); kfree(s->map_cmdline_to_pid); kfree(s); } static int tracing_resize_saved_cmdlines(unsigned int val) { struct saved_cmdlines_buffer *s, *savedcmd_temp; s = kmalloc(sizeof(*s), GFP_KERNEL); if (!s) return -ENOMEM; if (allocate_cmdlines_buffer(val, s) < 0) { kfree(s); return -ENOMEM; } arch_spin_lock(&trace_cmdline_lock); savedcmd_temp = savedcmd; savedcmd = s; arch_spin_unlock(&trace_cmdline_lock); free_saved_cmdlines_buffer(savedcmd_temp); return 0; } static ssize_t tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { unsigned long val; int ret; ret = kstrtoul_from_user(ubuf, cnt, 10, &val); if (ret) return ret; /* must have at least 1 entry or less than PID_MAX_DEFAULT */ if (!val || val > PID_MAX_DEFAULT) return -EINVAL; ret = tracing_resize_saved_cmdlines((unsigned int)val); if (ret < 0) return ret; *ppos += cnt; return cnt; } static const struct file_operations tracing_saved_cmdlines_size_fops = { .open = tracing_open_generic, .read = tracing_saved_cmdlines_size_read, .write = tracing_saved_cmdlines_size_write, }; #ifdef CONFIG_TRACE_EVAL_MAP_FILE static union trace_eval_map_item * update_eval_map(union trace_eval_map_item *ptr) { if (!ptr->map.eval_string) { if (ptr->tail.next) { ptr = ptr->tail.next; /* Set ptr to the next real item (skip head) */ ptr++; } else return NULL; } return ptr; } static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos) { union trace_eval_map_item *ptr = v; /* * Paranoid! If ptr points to end, we don't want to increment past it. * This really should never happen. */ ptr = update_eval_map(ptr); if (WARN_ON_ONCE(!ptr)) return NULL; ptr++; (*pos)++; ptr = update_eval_map(ptr); return ptr; } static void *eval_map_start(struct seq_file *m, loff_t *pos) { union trace_eval_map_item *v; loff_t l = 0; mutex_lock(&trace_eval_mutex); v = trace_eval_maps; if (v) v++; while (v && l < *pos) { v = eval_map_next(m, v, &l); } return v; } static void eval_map_stop(struct seq_file *m, void *v) { mutex_unlock(&trace_eval_mutex); } static int eval_map_show(struct seq_file *m, void *v) { union trace_eval_map_item *ptr = v; seq_printf(m, "%s %ld (%s)\n", ptr->map.eval_string, ptr->map.eval_value, ptr->map.system); return 0; } static const struct seq_operations tracing_eval_map_seq_ops = { .start = eval_map_start, .next = eval_map_next, .stop = eval_map_stop, .show = eval_map_show, }; static int tracing_eval_map_open(struct inode *inode, struct file *filp) { if (tracing_disabled) return -ENODEV; return seq_open(filp, &tracing_eval_map_seq_ops); } static const struct file_operations tracing_eval_map_fops = { .open = tracing_eval_map_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static inline union trace_eval_map_item * trace_eval_jmp_to_tail(union trace_eval_map_item *ptr) { /* Return tail of array given the head */ return ptr + ptr->head.length + 1; } static void trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start, int len) { struct trace_eval_map **stop; struct trace_eval_map **map; union trace_eval_map_item *map_array; union trace_eval_map_item *ptr; stop = start + len; /* * The trace_eval_maps contains the map plus a head and tail item, * where the head holds the module and length of array, and the * tail holds a pointer to the next list. */ map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL); if (!map_array) { pr_warn("Unable to allocate trace eval mapping\n"); return; } mutex_lock(&trace_eval_mutex); if (!trace_eval_maps) trace_eval_maps = map_array; else { ptr = trace_eval_maps; for (;;) { ptr = trace_eval_jmp_to_tail(ptr); if (!ptr->tail.next) break; ptr = ptr->tail.next; } ptr->tail.next = map_array; } map_array->head.mod = mod; map_array->head.length = len; map_array++; for (map = start; (unsigned long)map < (unsigned long)stop; map++) { map_array->map = **map; map_array++; } memset(map_array, 0, sizeof(*map_array)); mutex_unlock(&trace_eval_mutex); } static void trace_create_eval_file(struct dentry *d_tracer) { trace_create_file("eval_map", 0444, d_tracer, NULL, &tracing_eval_map_fops); } #else /* CONFIG_TRACE_EVAL_MAP_FILE */ static inline void trace_create_eval_file(struct dentry *d_tracer) { } static inline void trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start, int len) { } #endif /* !CONFIG_TRACE_EVAL_MAP_FILE */ static void trace_insert_eval_map(struct module *mod, struct trace_eval_map **start, int len) { struct trace_eval_map **map; if (len <= 0) return; map = start; trace_event_eval_update(map, len); trace_insert_eval_map_file(mod, start, len); } static ssize_t tracing_set_trace_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_array *tr = filp->private_data; char buf[MAX_TRACER_SIZE+2]; int r; mutex_lock(&trace_types_lock); r = sprintf(buf, "%s\n", tr->current_trace->name); mutex_unlock(&trace_types_lock); return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); } int tracer_init(struct tracer *t, struct trace_array *tr) { tracing_reset_online_cpus(&tr->trace_buffer); return t->init(tr); } static void set_buffer_entries(struct trace_buffer *buf, unsigned long val) { int cpu; for_each_tracing_cpu(cpu) per_cpu_ptr(buf->data, cpu)->entries = val; } #ifdef CONFIG_TRACER_MAX_TRACE /* resize @tr's buffer to the size of @size_tr's entries */ static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf, struct trace_buffer *size_buf, int cpu_id) { int cpu, ret = 0; if (cpu_id == RING_BUFFER_ALL_CPUS) { for_each_tracing_cpu(cpu) { ret = ring_buffer_resize(trace_buf->buffer, per_cpu_ptr(size_buf->data, cpu)->entries, cpu); if (ret < 0) break; per_cpu_ptr(trace_buf->data, cpu)->entries = per_cpu_ptr(size_buf->data, cpu)->entries; } } else { ret = ring_buffer_resize(trace_buf->buffer, per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id); if (ret == 0) per_cpu_ptr(trace_buf->data, cpu_id)->entries = per_cpu_ptr(size_buf->data, cpu_id)->entries; } return ret; } #endif /* CONFIG_TRACER_MAX_TRACE */ static int __tracing_resize_ring_buffer(struct trace_array *tr, unsigned long size, int cpu) { int ret; /* * If kernel or user changes the size of the ring buffer * we use the size that was given, and we can forget about * expanding it later. */ ring_buffer_expanded = true; /* May be called before buffers are initialized */ if (!tr->trace_buffer.buffer) return 0; ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu); if (ret < 0) return ret; #ifdef CONFIG_TRACER_MAX_TRACE if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) || !tr->current_trace->use_max_tr) goto out; ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu); if (ret < 0) { int r = resize_buffer_duplicate_size(&tr->trace_buffer, &tr->trace_buffer, cpu); if (r < 0) { /* * AARGH! We are left with different * size max buffer!!!! * The max buffer is our "snapshot" buffer. * When a tracer needs a snapshot (one of the * latency tracers), it swaps the max buffer * with the saved snap shot. We succeeded to * update the size of the main buffer, but failed to * update the size of the max buffer. But when we tried * to reset the main buffer to the original size, we * failed there too. This is very unlikely to * happen, but if it does, warn and kill all * tracing. */ WARN_ON(1); tracing_disabled = 1; } return ret; } if (cpu == RING_BUFFER_ALL_CPUS) set_buffer_entries(&tr->max_buffer, size); else per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size; out: #endif /* CONFIG_TRACER_MAX_TRACE */ if (cpu == RING_BUFFER_ALL_CPUS) set_buffer_entries(&tr->trace_buffer, size); else per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size; return ret; } static ssize_t tracing_resize_ring_buffer(struct trace_array *tr, unsigned long size, int cpu_id) { int ret = size; mutex_lock(&trace_types_lock); if (cpu_id != RING_BUFFER_ALL_CPUS) { /* make sure, this cpu is enabled in the mask */ if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) { ret = -EINVAL; goto out; } } ret = __tracing_resize_ring_buffer(tr, size, cpu_id); if (ret < 0) ret = -ENOMEM; out: mutex_unlock(&trace_types_lock); return ret; } /** * tracing_update_buffers - used by tracing facility to expand ring buffers * * To save on memory when the tracing is never used on a system with it * configured in. The ring buffers are set to a minimum size. But once * a user starts to use the tracing facility, then they need to grow * to their default size. * * This function is to be called when a tracer is about to be used. */ int tracing_update_buffers(void) { int ret = 0; mutex_lock(&trace_types_lock); if (!ring_buffer_expanded) ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size, RING_BUFFER_ALL_CPUS); mutex_unlock(&trace_types_lock); return ret; } struct trace_option_dentry; static void create_trace_option_files(struct trace_array *tr, struct tracer *tracer); /* * Used to clear out the tracer before deletion of an instance. * Must have trace_types_lock held. */ static void tracing_set_nop(struct trace_array *tr) { if (tr->current_trace == &nop_trace) return; tr->current_trace->enabled--; if (tr->current_trace->reset) tr->current_trace->reset(tr); tr->current_trace = &nop_trace; } static void add_tracer_options(struct trace_array *tr, struct tracer *t) { /* Only enable if the directory has been created already. */ if (!tr->dir) return; create_trace_option_files(tr, t); } static int tracing_set_tracer(struct trace_array *tr, const char *buf) { struct tracer *t; #ifdef CONFIG_TRACER_MAX_TRACE bool had_max_tr; #endif int ret = 0; mutex_lock(&trace_types_lock); if (!ring_buffer_expanded) { ret = __tracing_resize_ring_buffer(tr, trace_buf_size, RING_BUFFER_ALL_CPUS); if (ret < 0) goto out; ret = 0; } for (t = trace_types; t; t = t->next) { if (strcmp(t->name, buf) == 0) break; } if (!t) { ret = -EINVAL; goto out; } if (t == tr->current_trace) goto out; /* Some tracers won't work on kernel command line */ if (system_state < SYSTEM_RUNNING && t->noboot) { pr_warn("Tracer '%s' is not allowed on command line, ignored\n", t->name); goto out; } /* Some tracers are only allowed for the top level buffer */ if (!trace_ok_for_array(t, tr)) { ret = -EINVAL; goto out; } /* If trace pipe files are being read, we can't change the tracer */ if (tr->current_trace->ref) { ret = -EBUSY; goto out; } trace_branch_disable(); tr->current_trace->enabled--; if (tr->current_trace->reset) tr->current_trace->reset(tr); /* Current trace needs to be nop_trace before synchronize_rcu */ tr->current_trace = &nop_trace; #ifdef CONFIG_TRACER_MAX_TRACE had_max_tr = tr->allocated_snapshot; if (had_max_tr && !t->use_max_tr) { /* * We need to make sure that the update_max_tr sees that * current_trace changed to nop_trace to keep it from * swapping the buffers after we resize it. * The update_max_tr is called from interrupts disabled * so a synchronized_sched() is sufficient. */ synchronize_rcu(); free_snapshot(tr); } #endif #ifdef CONFIG_TRACER_MAX_TRACE if (t->use_max_tr && !had_max_tr) { ret = tracing_alloc_snapshot_instance(tr); if (ret < 0) goto out; } #endif if (t->init) { ret = tracer_init(t, tr); if (ret) goto out; } tr->current_trace = t; tr->current_trace->enabled++; trace_branch_enable(tr); out: mutex_unlock(&trace_types_lock); return ret; } static ssize_t tracing_set_trace_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_array *tr = filp->private_data; char buf[MAX_TRACER_SIZE+1]; int i; size_t ret; int err; ret = cnt; if (cnt > MAX_TRACER_SIZE) cnt = MAX_TRACER_SIZE; if (copy_from_user(buf, ubuf, cnt)) return -EFAULT; buf[cnt] = 0; /* strip ending whitespace. */ for (i = cnt - 1; i > 0 && isspace(buf[i]); i--) buf[i] = 0; err = tracing_set_tracer(tr, buf); if (err) return err; *ppos += ret; return ret; } static ssize_t tracing_nsecs_read(unsigned long *ptr, char __user *ubuf, size_t cnt, loff_t *ppos) { char buf[64]; int r; r = snprintf(buf, sizeof(buf), "%ld\n", *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr)); if (r > sizeof(buf)) r = sizeof(buf); return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); } static ssize_t tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf, size_t cnt, loff_t *ppos) { unsigned long val; int ret; ret = kstrtoul_from_user(ubuf, cnt, 10, &val); if (ret) return ret; *ptr = val * 1000; return cnt; } static ssize_t tracing_thresh_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos); } static ssize_t tracing_thresh_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_array *tr = filp->private_data; int ret; mutex_lock(&trace_types_lock); ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos); if (ret < 0) goto out; if (tr->current_trace->update_thresh) { ret = tr->current_trace->update_thresh(tr); if (ret < 0) goto out; } ret = cnt; out: mutex_unlock(&trace_types_lock); return ret; } #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER) static ssize_t tracing_max_lat_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos); } static ssize_t tracing_max_lat_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos); } #endif static int tracing_open_pipe(struct inode *inode, struct file *filp) { struct trace_array *tr = inode->i_private; struct trace_iterator *iter; int ret = 0; if (tracing_disabled) return -ENODEV; if (trace_array_get(tr) < 0) return -ENODEV; mutex_lock(&trace_types_lock); /* create a buffer to store the information to pass to userspace */ iter = kzalloc(sizeof(*iter), GFP_KERNEL); if (!iter) { ret = -ENOMEM; __trace_array_put(tr); goto out; } trace_seq_init(&iter->seq); iter->trace = tr->current_trace; if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) { ret = -ENOMEM; goto fail; } /* trace pipe does not show start of buffer */ cpumask_setall(iter->started); if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) iter->iter_flags |= TRACE_FILE_LAT_FMT; /* Output in nanoseconds only if we are using a clock in nanoseconds. */ if (trace_clocks[tr->clock_id].in_ns) iter->iter_flags |= TRACE_FILE_TIME_IN_NS; iter->tr = tr; iter->trace_buffer = &tr->trace_buffer; iter->cpu_file = tracing_get_cpu(inode); mutex_init(&iter->mutex); filp->private_data = iter; if (iter->trace->pipe_open) iter->trace->pipe_open(iter); nonseekable_open(inode, filp); tr->current_trace->ref++; out: mutex_unlock(&trace_types_lock); return ret; fail: kfree(iter->trace); kfree(iter); __trace_array_put(tr); mutex_unlock(&trace_types_lock); return ret; } static int tracing_release_pipe(struct inode *inode, struct file *file) { struct trace_iterator *iter = file->private_data; struct trace_array *tr = inode->i_private; mutex_lock(&trace_types_lock); tr->current_trace->ref--; if (iter->trace->pipe_close) iter->trace->pipe_close(iter); mutex_unlock(&trace_types_lock); free_cpumask_var(iter->started); mutex_destroy(&iter->mutex); kfree(iter); trace_array_put(tr); return 0; } static __poll_t trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table) { struct trace_array *tr = iter->tr; /* Iterators are static, they should be filled or empty */ if (trace_buffer_iter(iter, iter->cpu_file)) return EPOLLIN | EPOLLRDNORM; if (tr->trace_flags & TRACE_ITER_BLOCK) /* * Always select as readable when in blocking mode */ return EPOLLIN | EPOLLRDNORM; else return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file, filp, poll_table); } static __poll_t tracing_poll_pipe(struct file *filp, poll_table *poll_table) { struct trace_iterator *iter = filp->private_data; return trace_poll(iter, filp, poll_table); } /* Must be called with iter->mutex held. */ static int tracing_wait_pipe(struct file *filp) { struct trace_iterator *iter = filp->private_data; int ret; while (trace_empty(iter)) { if ((filp->f_flags & O_NONBLOCK)) { return -EAGAIN; } /* * We block until we read something and tracing is disabled. * We still block if tracing is disabled, but we have never * read anything. This allows a user to cat this file, and * then enable tracing. But after we have read something, * we give an EOF when tracing is again disabled. * * iter->pos will be 0 if we haven't read anything. */ if (!tracer_tracing_is_on(iter->tr) && iter->pos) break; mutex_unlock(&iter->mutex); ret = wait_on_pipe(iter, 0); mutex_lock(&iter->mutex); if (ret) return ret; } return 1; } /* * Consumer reader. */ static ssize_t tracing_read_pipe(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_iterator *iter = filp->private_data; ssize_t sret; /* * Avoid more than one consumer on a single file descriptor * This is just a matter of traces coherency, the ring buffer itself * is protected. */ mutex_lock(&iter->mutex); /* return any leftover data */ sret = trace_seq_to_user(&iter->seq, ubuf, cnt); if (sret != -EBUSY) goto out; trace_seq_init(&iter->seq); if (iter->trace->read) { sret = iter->trace->read(iter, filp, ubuf, cnt, ppos); if (sret) goto out; } waitagain: sret = tracing_wait_pipe(filp); if (sret <= 0) goto out; /* stop when tracing is finished */ if (trace_empty(iter)) { sret = 0; goto out; } if (cnt >= PAGE_SIZE) cnt = PAGE_SIZE - 1; /* reset all but tr, trace, and overruns */ memset(&iter->seq, 0, sizeof(struct trace_iterator) - offsetof(struct trace_iterator, seq)); cpumask_clear(iter->started); iter->pos = -1; trace_event_read_lock(); trace_access_lock(iter->cpu_file); while (trace_find_next_entry_inc(iter) != NULL) { enum print_line_t ret; int save_len = iter->seq.seq.len; ret = print_trace_line(iter); if (ret == TRACE_TYPE_PARTIAL_LINE) { /* don't print partial lines */ iter->seq.seq.len = save_len; break; } if (ret != TRACE_TYPE_NO_CONSUME) trace_consume(iter); if (trace_seq_used(&iter->seq) >= cnt) break; /* * Setting the full flag means we reached the trace_seq buffer * size and we should leave by partial output condition above. * One of the trace_seq_* functions is not used properly. */ WARN_ONCE(iter->seq.full, "full flag set for trace type %d", iter->ent->type); } trace_access_unlock(iter->cpu_file); trace_event_read_unlock(); /* Now copy what we have to the user */ sret = trace_seq_to_user(&iter->seq, ubuf, cnt); if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq)) trace_seq_init(&iter->seq); /* * If there was nothing to send to user, in spite of consuming trace * entries, go back to wait for more entries. */ if (sret == -EBUSY) goto waitagain; out: mutex_unlock(&iter->mutex); return sret; } static void tracing_spd_release_pipe(struct splice_pipe_desc *spd, unsigned int idx) { __free_page(spd->pages[idx]); } static const struct pipe_buf_operations tracing_pipe_buf_ops = { .can_merge = 0, .confirm = generic_pipe_buf_confirm, .release = generic_pipe_buf_release, .steal = generic_pipe_buf_steal, .get = generic_pipe_buf_get, }; static size_t tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter) { size_t count; int save_len; int ret; /* Seq buffer is page-sized, exactly what we need. */ for (;;) { save_len = iter->seq.seq.len; ret = print_trace_line(iter); if (trace_seq_has_overflowed(&iter->seq)) { iter->seq.seq.len = save_len; break; } /* * This should not be hit, because it should only * be set if the iter->seq overflowed. But check it * anyway to be safe. */ if (ret == TRACE_TYPE_PARTIAL_LINE) { iter->seq.seq.len = save_len; break; } count = trace_seq_used(&iter->seq) - save_len; if (rem < count) { rem = 0; iter->seq.seq.len = save_len; break; } if (ret != TRACE_TYPE_NO_CONSUME) trace_consume(iter); rem -= count; if (!trace_find_next_entry_inc(iter)) { rem = 0; iter->ent = NULL; break; } } return rem; } static ssize_t tracing_splice_read_pipe(struct file *filp, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags) { struct page *pages_def[PIPE_DEF_BUFFERS]; struct partial_page partial_def[PIPE_DEF_BUFFERS]; struct trace_iterator *iter = filp->private_data; struct splice_pipe_desc spd = { .pages = pages_def, .partial = partial_def, .nr_pages = 0, /* This gets updated below. */ .nr_pages_max = PIPE_DEF_BUFFERS, .ops = &tracing_pipe_buf_ops, .spd_release = tracing_spd_release_pipe, }; ssize_t ret; size_t rem; unsigned int i; if (splice_grow_spd(pipe, &spd)) return -ENOMEM; mutex_lock(&iter->mutex); if (iter->trace->splice_read) { ret = iter->trace->splice_read(iter, filp, ppos, pipe, len, flags); if (ret) goto out_err; } ret = tracing_wait_pipe(filp); if (ret <= 0) goto out_err; if (!iter->ent && !trace_find_next_entry_inc(iter)) { ret = -EFAULT; goto out_err; } trace_event_read_lock(); trace_access_lock(iter->cpu_file); /* Fill as many pages as possible. */ for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) { spd.pages[i] = alloc_page(GFP_KERNEL); if (!spd.pages[i]) break; rem = tracing_fill_pipe_page(rem, iter); /* Copy the data into the page, so we can start over. */ ret = trace_seq_to_buffer(&iter->seq, page_address(spd.pages[i]), trace_seq_used(&iter->seq)); if (ret < 0) { __free_page(spd.pages[i]); break; } spd.partial[i].offset = 0; spd.partial[i].len = trace_seq_used(&iter->seq); trace_seq_init(&iter->seq); } trace_access_unlock(iter->cpu_file); trace_event_read_unlock(); mutex_unlock(&iter->mutex); spd.nr_pages = i; if (i) ret = splice_to_pipe(pipe, &spd); else ret = 0; out: splice_shrink_spd(&spd); return ret; out_err: mutex_unlock(&iter->mutex); goto out; } static ssize_t tracing_entries_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { struct inode *inode = file_inode(filp); struct trace_array *tr = inode->i_private; int cpu = tracing_get_cpu(inode); char buf[64]; int r = 0; ssize_t ret; mutex_lock(&trace_types_lock); if (cpu == RING_BUFFER_ALL_CPUS) { int cpu, buf_size_same; unsigned long size; size = 0; buf_size_same = 1; /* check if all cpu sizes are same */ for_each_tracing_cpu(cpu) { /* fill in the size from first enabled cpu */ if (size == 0) size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries; if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) { buf_size_same = 0; break; } } if (buf_size_same) { if (!ring_buffer_expanded) r = sprintf(buf, "%lu (expanded: %lu)\n", size >> 10, trace_buf_size >> 10); else r = sprintf(buf, "%lu\n", size >> 10); } else r = sprintf(buf, "X\n"); } else r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10); mutex_unlock(&trace_types_lock); ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); return ret; } static ssize_t tracing_entries_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { struct inode *inode = file_inode(filp); struct trace_array *tr = inode->i_private; unsigned long val; int ret; ret = kstrtoul_from_user(ubuf, cnt, 10, &val); if (ret) return ret; /* must have at least 1 entry */ if (!val) return -EINVAL; /* value is in KB */ val <<= 10; ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode)); if (ret < 0) return ret; *ppos += cnt; return cnt; } static ssize_t tracing_total_entries_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_array *tr = filp->private_data; char buf[64]; int r, cpu; unsigned long size = 0, expanded_size = 0; mutex_lock(&trace_types_lock); for_each_tracing_cpu(cpu) { size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10; if (!ring_buffer_expanded) expanded_size += trace_buf_size >> 10; } if (ring_buffer_expanded) r = sprintf(buf, "%lu\n", size); else r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size); mutex_unlock(&trace_types_lock); return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); } static ssize_t tracing_free_buffer_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { /* * There is no need to read what the user has written, this function * is just to make sure that there is no error when "echo" is used */ *ppos += cnt; return cnt; } static int tracing_free_buffer_release(struct inode *inode, struct file *filp) { struct trace_array *tr = inode->i_private; /* disable tracing ? */ if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE) tracer_tracing_off(tr); /* resize the ring buffer to 0 */ tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS); trace_array_put(tr); return 0; } static ssize_t tracing_mark_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *fpos) { struct trace_array *tr = filp->private_data; struct ring_buffer_event *event; enum event_trigger_type tt = ETT_NONE; struct ring_buffer *buffer; struct print_entry *entry; unsigned long irq_flags; const char faulted[] = "<faulted>"; ssize_t written; int size; int len; /* Used in tracing_mark_raw_write() as well */ #define FAULTED_SIZE (sizeof(faulted) - 1) /* '\0' is already accounted for */ if (tracing_disabled) return -EINVAL; if (!(tr->trace_flags & TRACE_ITER_MARKERS)) return -EINVAL; if (cnt > TRACE_BUF_SIZE) cnt = TRACE_BUF_SIZE; BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE); local_save_flags(irq_flags); size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */ /* If less than "<faulted>", then make sure we can still add that */ if (cnt < FAULTED_SIZE) size += FAULTED_SIZE - cnt; buffer = tr->trace_buffer.buffer; event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, irq_flags, preempt_count()); if (unlikely(!event)) /* Ring buffer disabled, return as if not open for write */ return -EBADF; entry = ring_buffer_event_data(event); entry->ip = _THIS_IP_; len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt); if (len) { memcpy(&entry->buf, faulted, FAULTED_SIZE); cnt = FAULTED_SIZE; written = -EFAULT; } else written = cnt; len = cnt; if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) { /* do not add \n before testing triggers, but add \0 */ entry->buf[cnt] = '\0'; tt = event_triggers_call(tr->trace_marker_file, entry, event); } if (entry->buf[cnt - 1] != '\n') { entry->buf[cnt] = '\n'; entry->buf[cnt + 1] = '\0'; } else entry->buf[cnt] = '\0'; __buffer_unlock_commit(buffer, event); if (tt) event_triggers_post_call(tr->trace_marker_file, tt); if (written > 0) *fpos += written; return written; } /* Limit it for now to 3K (including tag) */ #define RAW_DATA_MAX_SIZE (1024*3) static ssize_t tracing_mark_raw_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *fpos) { struct trace_array *tr = filp->private_data; struct ring_buffer_event *event; struct ring_buffer *buffer; struct raw_data_entry *entry; const char faulted[] = "<faulted>"; unsigned long irq_flags; ssize_t written; int size; int len; #define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int)) if (tracing_disabled) return -EINVAL; if (!(tr->trace_flags & TRACE_ITER_MARKERS)) return -EINVAL; /* The marker must at least have a tag id */ if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE) return -EINVAL; if (cnt > TRACE_BUF_SIZE) cnt = TRACE_BUF_SIZE; BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE); local_save_flags(irq_flags); size = sizeof(*entry) + cnt; if (cnt < FAULT_SIZE_ID) size += FAULT_SIZE_ID - cnt; buffer = tr->trace_buffer.buffer; event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size, irq_flags, preempt_count()); if (!event) /* Ring buffer disabled, return as if not open for write */ return -EBADF; entry = ring_buffer_event_data(event); len = __copy_from_user_inatomic(&entry->id, ubuf, cnt); if (len) { entry->id = -1; memcpy(&entry->buf, faulted, FAULTED_SIZE); written = -EFAULT; } else written = cnt; __buffer_unlock_commit(buffer, event); if (written > 0) *fpos += written; return written; } static int tracing_clock_show(struct seq_file *m, void *v) { struct trace_array *tr = m->private; int i; for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) seq_printf(m, "%s%s%s%s", i ? " " : "", i == tr->clock_id ? "[" : "", trace_clocks[i].name, i == tr->clock_id ? "]" : ""); seq_putc(m, '\n'); return 0; } int tracing_set_clock(struct trace_array *tr, const char *clockstr) { int i; for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) { if (strcmp(trace_clocks[i].name, clockstr) == 0) break; } if (i == ARRAY_SIZE(trace_clocks)) return -EINVAL; mutex_lock(&trace_types_lock); tr->clock_id = i; ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func); /* * New clock may not be consistent with the previous clock. * Reset the buffer so that it doesn't have incomparable timestamps. */ tracing_reset_online_cpus(&tr->trace_buffer); #ifdef CONFIG_TRACER_MAX_TRACE if (tr->max_buffer.buffer) ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func); tracing_reset_online_cpus(&tr->max_buffer); #endif mutex_unlock(&trace_types_lock); return 0; } static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *fpos) { struct seq_file *m = filp->private_data; struct trace_array *tr = m->private; char buf[64]; const char *clockstr; int ret; if (cnt >= sizeof(buf)) return -EINVAL; if (copy_from_user(buf, ubuf, cnt)) return -EFAULT; buf[cnt] = 0; clockstr = strstrip(buf); ret = tracing_set_clock(tr, clockstr); if (ret) return ret; *fpos += cnt; return cnt; } static int tracing_clock_open(struct inode *inode, struct file *file) { struct trace_array *tr = inode->i_private; int ret; if (tracing_disabled) return -ENODEV; if (trace_array_get(tr)) return -ENODEV; ret = single_open(file, tracing_clock_show, inode->i_private); if (ret < 0) trace_array_put(tr); return ret; } static int tracing_time_stamp_mode_show(struct seq_file *m, void *v) { struct trace_array *tr = m->private; mutex_lock(&trace_types_lock); if (ring_buffer_time_stamp_abs(tr->trace_buffer.buffer)) seq_puts(m, "delta [absolute]\n"); else seq_puts(m, "[delta] absolute\n"); mutex_unlock(&trace_types_lock); return 0; } static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file) { struct trace_array *tr = inode->i_private; int ret; if (tracing_disabled) return -ENODEV; if (trace_array_get(tr)) return -ENODEV; ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private); if (ret < 0) trace_array_put(tr); return ret; } int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs) { int ret = 0; mutex_lock(&trace_types_lock); if (abs && tr->time_stamp_abs_ref++) goto out; if (!abs) { if (WARN_ON_ONCE(!tr->time_stamp_abs_ref)) { ret = -EINVAL; goto out; } if (--tr->time_stamp_abs_ref) goto out; } ring_buffer_set_time_stamp_abs(tr->trace_buffer.buffer, abs); #ifdef CONFIG_TRACER_MAX_TRACE if (tr->max_buffer.buffer) ring_buffer_set_time_stamp_abs(tr->max_buffer.buffer, abs); #endif out: mutex_unlock(&trace_types_lock); return ret; } struct ftrace_buffer_info { struct trace_iterator iter; void *spare; unsigned int spare_cpu; unsigned int read; }; #ifdef CONFIG_TRACER_SNAPSHOT static int tracing_snapshot_open(struct inode *inode, struct file *file) { struct trace_array *tr = inode->i_private; struct trace_iterator *iter; struct seq_file *m; int ret = 0; if (trace_array_get(tr) < 0) return -ENODEV; if (file->f_mode & FMODE_READ) { iter = __tracing_open(inode, file, true); if (IS_ERR(iter)) ret = PTR_ERR(iter); } else { /* Writes still need the seq_file to hold the private data */ ret = -ENOMEM; m = kzalloc(sizeof(*m), GFP_KERNEL); if (!m) goto out; iter = kzalloc(sizeof(*iter), GFP_KERNEL); if (!iter) { kfree(m); goto out; } ret = 0; iter->tr = tr; iter->trace_buffer = &tr->max_buffer; iter->cpu_file = tracing_get_cpu(inode); m->private = iter; file->private_data = m; } out: if (ret < 0) trace_array_put(tr); return ret; } static ssize_t tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { struct seq_file *m = filp->private_data; struct trace_iterator *iter = m->private; struct trace_array *tr = iter->tr; unsigned long val; int ret; ret = tracing_update_buffers(); if (ret < 0) return ret; ret = kstrtoul_from_user(ubuf, cnt, 10, &val); if (ret) return ret; mutex_lock(&trace_types_lock); if (tr->current_trace->use_max_tr) { ret = -EBUSY; goto out; } switch (val) { case 0: if (iter->cpu_file != RING_BUFFER_ALL_CPUS) { ret = -EINVAL; break; } if (tr->allocated_snapshot) free_snapshot(tr); break; case 1: /* Only allow per-cpu swap if the ring buffer supports it */ #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP if (iter->cpu_file != RING_BUFFER_ALL_CPUS) { ret = -EINVAL; break; } #endif if (!tr->allocated_snapshot) { ret = tracing_alloc_snapshot_instance(tr); if (ret < 0) break; } local_irq_disable(); /* Now, we're going to swap */ if (iter->cpu_file == RING_BUFFER_ALL_CPUS) update_max_tr(tr, current, smp_processor_id()); else update_max_tr_single(tr, current, iter->cpu_file); local_irq_enable(); break; default: if (tr->allocated_snapshot) { if (iter->cpu_file == RING_BUFFER_ALL_CPUS) tracing_reset_online_cpus(&tr->max_buffer); else tracing_reset(&tr->max_buffer, iter->cpu_file); } break; } if (ret >= 0) { *ppos += cnt; ret = cnt; } out: mutex_unlock(&trace_types_lock); return ret; } static int tracing_snapshot_release(struct inode *inode, struct file *file) { struct seq_file *m = file->private_data; int ret; ret = tracing_release(inode, file); if (file->f_mode & FMODE_READ) return ret; /* If write only, the seq_file is just a stub */ if (m) kfree(m->private); kfree(m); return 0; } static int tracing_buffers_open(struct inode *inode, struct file *filp); static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf, size_t count, loff_t *ppos); static int tracing_buffers_release(struct inode *inode, struct file *file); static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags); static int snapshot_raw_open(struct inode *inode, struct file *filp) { struct ftrace_buffer_info *info; int ret; ret = tracing_buffers_open(inode, filp); if (ret < 0) return ret; info = filp->private_data; if (info->iter.trace->use_max_tr) { tracing_buffers_release(inode, filp); return -EBUSY; } info->iter.snapshot = true; info->iter.trace_buffer = &info->iter.tr->max_buffer; return ret; } #endif /* CONFIG_TRACER_SNAPSHOT */ static const struct file_operations tracing_thresh_fops = { .open = tracing_open_generic, .read = tracing_thresh_read, .write = tracing_thresh_write, .llseek = generic_file_llseek, }; #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER) static const struct file_operations tracing_max_lat_fops = { .open = tracing_open_generic, .read = tracing_max_lat_read, .write = tracing_max_lat_write, .llseek = generic_file_llseek, }; #endif static const struct file_operations set_tracer_fops = { .open = tracing_open_generic, .read = tracing_set_trace_read, .write = tracing_set_trace_write, .llseek = generic_file_llseek, }; static const struct file_operations tracing_pipe_fops = { .open = tracing_open_pipe, .poll = tracing_poll_pipe, .read = tracing_read_pipe, .splice_read = tracing_splice_read_pipe, .release = tracing_release_pipe, .llseek = no_llseek, }; static const struct file_operations tracing_entries_fops = { .open = tracing_open_generic_tr, .read = tracing_entries_read, .write = tracing_entries_write, .llseek = generic_file_llseek, .release = tracing_release_generic_tr, }; static const struct file_operations tracing_total_entries_fops = { .open = tracing_open_generic_tr, .read = tracing_total_entries_read, .llseek = generic_file_llseek, .release = tracing_release_generic_tr, }; static const struct file_operations tracing_free_buffer_fops = { .open = tracing_open_generic_tr, .write = tracing_free_buffer_write, .release = tracing_free_buffer_release, }; static const struct file_operations tracing_mark_fops = { .open = tracing_open_generic_tr, .write = tracing_mark_write, .llseek = generic_file_llseek, .release = tracing_release_generic_tr, }; static const struct file_operations tracing_mark_raw_fops = { .open = tracing_open_generic_tr, .write = tracing_mark_raw_write, .llseek = generic_file_llseek, .release = tracing_release_generic_tr, }; static const struct file_operations trace_clock_fops = { .open = tracing_clock_open, .read = seq_read, .llseek = seq_lseek, .release = tracing_single_release_tr, .write = tracing_clock_write, }; static const struct file_operations trace_time_stamp_mode_fops = { .open = tracing_time_stamp_mode_open, .read = seq_read, .llseek = seq_lseek, .release = tracing_single_release_tr, }; #ifdef CONFIG_TRACER_SNAPSHOT static const struct file_operations snapshot_fops = { .open = tracing_snapshot_open, .read = seq_read, .write = tracing_snapshot_write, .llseek = tracing_lseek, .release = tracing_snapshot_release, }; static const struct file_operations snapshot_raw_fops = { .open = snapshot_raw_open, .read = tracing_buffers_read, .release = tracing_buffers_release, .splice_read = tracing_buffers_splice_read, .llseek = no_llseek, }; #endif /* CONFIG_TRACER_SNAPSHOT */ static int tracing_buffers_open(struct inode *inode, struct file *filp) { struct trace_array *tr = inode->i_private; struct ftrace_buffer_info *info; int ret; if (tracing_disabled) return -ENODEV; if (trace_array_get(tr) < 0) return -ENODEV; info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) { trace_array_put(tr); return -ENOMEM; } mutex_lock(&trace_types_lock); info->iter.tr = tr; info->iter.cpu_file = tracing_get_cpu(inode); info->iter.trace = tr->current_trace; info->iter.trace_buffer = &tr->trace_buffer; info->spare = NULL; /* Force reading ring buffer for first read */ info->read = (unsigned int)-1; filp->private_data = info; tr->current_trace->ref++; mutex_unlock(&trace_types_lock); ret = nonseekable_open(inode, filp); if (ret < 0) trace_array_put(tr); return ret; } static __poll_t tracing_buffers_poll(struct file *filp, poll_table *poll_table) { struct ftrace_buffer_info *info = filp->private_data; struct trace_iterator *iter = &info->iter; return trace_poll(iter, filp, poll_table); } static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf, size_t count, loff_t *ppos) { struct ftrace_buffer_info *info = filp->private_data; struct trace_iterator *iter = &info->iter; ssize_t ret = 0; ssize_t size; if (!count) return 0; #ifdef CONFIG_TRACER_MAX_TRACE if (iter->snapshot && iter->tr->current_trace->use_max_tr) return -EBUSY; #endif if (!info->spare) { info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer, iter->cpu_file); if (IS_ERR(info->spare)) { ret = PTR_ERR(info->spare); info->spare = NULL; } else { info->spare_cpu = iter->cpu_file; } } if (!info->spare) return ret; /* Do we have previous read data to read? */ if (info->read < PAGE_SIZE) goto read; again: trace_access_lock(iter->cpu_file); ret = ring_buffer_read_page(iter->trace_buffer->buffer, &info->spare, count, iter->cpu_file, 0); trace_access_unlock(iter->cpu_file); if (ret < 0) { if (trace_empty(iter)) { if ((filp->f_flags & O_NONBLOCK)) return -EAGAIN; ret = wait_on_pipe(iter, 0); if (ret) return ret; goto again; } return 0; } info->read = 0; read: size = PAGE_SIZE - info->read; if (size > count) size = count; ret = copy_to_user(ubuf, info->spare + info->read, size); if (ret == size) return -EFAULT; size -= ret; *ppos += size; info->read += size; return size; } static int tracing_buffers_release(struct inode *inode, struct file *file) { struct ftrace_buffer_info *info = file->private_data; struct trace_iterator *iter = &info->iter; mutex_lock(&trace_types_lock); iter->tr->current_trace->ref--; __trace_array_put(iter->tr); if (info->spare) ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare_cpu, info->spare); kfree(info); mutex_unlock(&trace_types_lock); return 0; } struct buffer_ref { struct ring_buffer *buffer; void *page; int cpu; int ref; }; static void buffer_pipe_buf_release(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { struct buffer_ref *ref = (struct buffer_ref *)buf->private; if (--ref->ref) return; ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page); kfree(ref); buf->private = 0; } static void buffer_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { struct buffer_ref *ref = (struct buffer_ref *)buf->private; ref->ref++; } /* Pipe buffer operations for a buffer. */ static const struct pipe_buf_operations buffer_pipe_buf_ops = { .can_merge = 0, .confirm = generic_pipe_buf_confirm, .release = buffer_pipe_buf_release, .steal = generic_pipe_buf_steal, .get = buffer_pipe_buf_get, }; /* * Callback from splice_to_pipe(), if we need to release some pages * at the end of the spd in case we error'ed out in filling the pipe. */ static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i) { struct buffer_ref *ref = (struct buffer_ref *)spd->partial[i].private; if (--ref->ref) return; ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page); kfree(ref); spd->partial[i].private = 0; } static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags) { struct ftrace_buffer_info *info = file->private_data; struct trace_iterator *iter = &info->iter; struct partial_page partial_def[PIPE_DEF_BUFFERS]; struct page *pages_def[PIPE_DEF_BUFFERS]; struct splice_pipe_desc spd = { .pages = pages_def, .partial = partial_def, .nr_pages_max = PIPE_DEF_BUFFERS, .ops = &buffer_pipe_buf_ops, .spd_release = buffer_spd_release, }; struct buffer_ref *ref; int entries, i; ssize_t ret = 0; #ifdef CONFIG_TRACER_MAX_TRACE if (iter->snapshot && iter->tr->current_trace->use_max_tr) return -EBUSY; #endif if (*ppos & (PAGE_SIZE - 1)) return -EINVAL; if (len & (PAGE_SIZE - 1)) { if (len < PAGE_SIZE) return -EINVAL; len &= PAGE_MASK; } if (splice_grow_spd(pipe, &spd)) return -ENOMEM; again: trace_access_lock(iter->cpu_file); entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file); for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) { struct page *page; int r; ref = kzalloc(sizeof(*ref), GFP_KERNEL); if (!ref) { ret = -ENOMEM; break; } ref->ref = 1; ref->buffer = iter->trace_buffer->buffer; ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file); if (IS_ERR(ref->page)) { ret = PTR_ERR(ref->page); ref->page = NULL; kfree(ref); break; } ref->cpu = iter->cpu_file; r = ring_buffer_read_page(ref->buffer, &ref->page, len, iter->cpu_file, 1); if (r < 0) { ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page); kfree(ref); break; } page = virt_to_page(ref->page); spd.pages[i] = page; spd.partial[i].len = PAGE_SIZE; spd.partial[i].offset = 0; spd.partial[i].private = (unsigned long)ref; spd.nr_pages++; *ppos += PAGE_SIZE; entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file); } trace_access_unlock(iter->cpu_file); spd.nr_pages = i; /* did we read anything? */ if (!spd.nr_pages) { if (ret) goto out; ret = -EAGAIN; if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) goto out; ret = wait_on_pipe(iter, iter->tr->buffer_percent); if (ret) goto out; goto again; } ret = splice_to_pipe(pipe, &spd); out: splice_shrink_spd(&spd); return ret; } static const struct file_operations tracing_buffers_fops = { .open = tracing_buffers_open, .read = tracing_buffers_read, .poll = tracing_buffers_poll, .release = tracing_buffers_release, .splice_read = tracing_buffers_splice_read, .llseek = no_llseek, }; static ssize_t tracing_stats_read(struct file *filp, char __user *ubuf, size_t count, loff_t *ppos) { struct inode *inode = file_inode(filp); struct trace_array *tr = inode->i_private; struct trace_buffer *trace_buf = &tr->trace_buffer; int cpu = tracing_get_cpu(inode); struct trace_seq *s; unsigned long cnt; unsigned long long t; unsigned long usec_rem; s = kmalloc(sizeof(*s), GFP_KERNEL); if (!s) return -ENOMEM; trace_seq_init(s); cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu); trace_seq_printf(s, "entries: %ld\n", cnt); cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu); trace_seq_printf(s, "overrun: %ld\n", cnt); cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu); trace_seq_printf(s, "commit overrun: %ld\n", cnt); cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu); trace_seq_printf(s, "bytes: %ld\n", cnt); if (trace_clocks[tr->clock_id].in_ns) { /* local or global for trace_clock */ t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu)); usec_rem = do_div(t, USEC_PER_SEC); trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n", t, usec_rem); t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu)); usec_rem = do_div(t, USEC_PER_SEC); trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem); } else { /* counter or tsc mode for trace_clock */ trace_seq_printf(s, "oldest event ts: %llu\n", ring_buffer_oldest_event_ts(trace_buf->buffer, cpu)); trace_seq_printf(s, "now ts: %llu\n", ring_buffer_time_stamp(trace_buf->buffer, cpu)); } cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu); trace_seq_printf(s, "dropped events: %ld\n", cnt); cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu); trace_seq_printf(s, "read events: %ld\n", cnt); count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, trace_seq_used(s)); kfree(s); return count; } static const struct file_operations tracing_stats_fops = { .open = tracing_open_generic_tr, .read = tracing_stats_read, .llseek = generic_file_llseek, .release = tracing_release_generic_tr, }; #ifdef CONFIG_DYNAMIC_FTRACE static ssize_t tracing_read_dyn_info(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { unsigned long *p = filp->private_data; char buf[64]; /* Not too big for a shallow stack */ int r; r = scnprintf(buf, 63, "%ld", *p); buf[r++] = '\n'; return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); } static const struct file_operations tracing_dyn_info_fops = { .open = tracing_open_generic, .read = tracing_read_dyn_info, .llseek = generic_file_llseek, }; #endif /* CONFIG_DYNAMIC_FTRACE */ #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) static void ftrace_snapshot(unsigned long ip, unsigned long parent_ip, struct trace_array *tr, struct ftrace_probe_ops *ops, void *data) { tracing_snapshot_instance(tr); } static void ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, struct trace_array *tr, struct ftrace_probe_ops *ops, void *data) { struct ftrace_func_mapper *mapper = data; long *count = NULL; if (mapper) count = (long *)ftrace_func_mapper_find_ip(mapper, ip); if (count) { if (*count <= 0) return; (*count)--; } tracing_snapshot_instance(tr); } static int ftrace_snapshot_print(struct seq_file *m, unsigned long ip, struct ftrace_probe_ops *ops, void *data) { struct ftrace_func_mapper *mapper = data; long *count = NULL; seq_printf(m, "%ps:", (void *)ip); seq_puts(m, "snapshot"); if (mapper) count = (long *)ftrace_func_mapper_find_ip(mapper, ip); if (count) seq_printf(m, ":count=%ld\n", *count); else seq_puts(m, ":unlimited\n"); return 0; } static int ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr, unsigned long ip, void *init_data, void **data) { struct ftrace_func_mapper *mapper = *data; if (!mapper) { mapper = allocate_ftrace_func_mapper(); if (!mapper) return -ENOMEM; *data = mapper; } return ftrace_func_mapper_add_ip(mapper, ip, init_data); } static void ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr, unsigned long ip, void *data) { struct ftrace_func_mapper *mapper = data; if (!ip) { if (!mapper) return; free_ftrace_func_mapper(mapper, NULL); return; } ftrace_func_mapper_remove_ip(mapper, ip); } static struct ftrace_probe_ops snapshot_probe_ops = { .func = ftrace_snapshot, .print = ftrace_snapshot_print, }; static struct ftrace_probe_ops snapshot_count_probe_ops = { .func = ftrace_count_snapshot, .print = ftrace_snapshot_print, .init = ftrace_snapshot_init, .free = ftrace_snapshot_free, }; static int ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash, char *glob, char *cmd, char *param, int enable) { struct ftrace_probe_ops *ops; void *count = (void *)-1; char *number; int ret; if (!tr) return -ENODEV; /* hash funcs only work with set_ftrace_filter */ if (!enable) return -EINVAL; ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops; if (glob[0] == '!') return unregister_ftrace_function_probe_func(glob+1, tr, ops); if (!param) goto out_reg; number = strsep(&param, ":"); if (!strlen(number)) goto out_reg; /* * We use the callback data field (which is a pointer) * as our counter. */ ret = kstrtoul(number, 0, (unsigned long *)&count); if (ret) return ret; out_reg: ret = tracing_alloc_snapshot_instance(tr); if (ret < 0) goto out; ret = register_ftrace_function_probe(glob, tr, ops, count); out: return ret < 0 ? ret : 0; } static struct ftrace_func_command ftrace_snapshot_cmd = { .name = "snapshot", .func = ftrace_trace_snapshot_callback, }; static __init int register_snapshot_cmd(void) { return register_ftrace_command(&ftrace_snapshot_cmd); } #else static inline __init int register_snapshot_cmd(void) { return 0; } #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */ static struct dentry *tracing_get_dentry(struct trace_array *tr) { if (WARN_ON(!tr->dir)) return ERR_PTR(-ENODEV); /* Top directory uses NULL as the parent */ if (tr->flags & TRACE_ARRAY_FL_GLOBAL) return NULL; /* All sub buffers have a descriptor */ return tr->dir; } static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu) { struct dentry *d_tracer; if (tr->percpu_dir) return tr->percpu_dir; d_tracer = tracing_get_dentry(tr); if (IS_ERR(d_tracer)) return NULL; tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer); WARN_ONCE(!tr->percpu_dir, "Could not create tracefs directory 'per_cpu/%d'\n", cpu); return tr->percpu_dir; } static struct dentry * trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent, void *data, long cpu, const struct file_operations *fops) { struct dentry *ret = trace_create_file(name, mode, parent, data, fops); if (ret) /* See tracing_get_cpu() */ d_inode(ret)->i_cdev = (void *)(cpu + 1); return ret; } static void tracing_init_tracefs_percpu(struct trace_array *tr, long cpu) { struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu); struct dentry *d_cpu; char cpu_dir[30]; /* 30 characters should be more than enough */ if (!d_percpu) return; snprintf(cpu_dir, 30, "cpu%ld", cpu); d_cpu = tracefs_create_dir(cpu_dir, d_percpu); if (!d_cpu) { pr_warn("Could not create tracefs '%s' entry\n", cpu_dir); return; } /* per cpu trace_pipe */ trace_create_cpu_file("trace_pipe", 0444, d_cpu, tr, cpu, &tracing_pipe_fops); /* per cpu trace */ trace_create_cpu_file("trace", 0644, d_cpu, tr, cpu, &tracing_fops); trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu, tr, cpu, &tracing_buffers_fops); trace_create_cpu_file("stats", 0444, d_cpu, tr, cpu, &tracing_stats_fops); trace_create_cpu_file("buffer_size_kb", 0444, d_cpu, tr, cpu, &tracing_entries_fops); #ifdef CONFIG_TRACER_SNAPSHOT trace_create_cpu_file("snapshot", 0644, d_cpu, tr, cpu, &snapshot_fops); trace_create_cpu_file("snapshot_raw", 0444, d_cpu, tr, cpu, &snapshot_raw_fops); #endif } #ifdef CONFIG_FTRACE_SELFTEST /* Let selftest have access to static functions in this file */ #include "trace_selftest.c" #endif static ssize_t trace_options_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_option_dentry *topt = filp->private_data; char *buf; if (topt->flags->val & topt->opt->bit) buf = "1\n"; else buf = "0\n"; return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2); } static ssize_t trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_option_dentry *topt = filp->private_data; unsigned long val; int ret; ret = kstrtoul_from_user(ubuf, cnt, 10, &val); if (ret) return ret; if (val != 0 && val != 1) return -EINVAL; if (!!(topt->flags->val & topt->opt->bit) != val) { mutex_lock(&trace_types_lock); ret = __set_tracer_option(topt->tr, topt->flags, topt->opt, !val); mutex_unlock(&trace_types_lock); if (ret) return ret; } *ppos += cnt; return cnt; } static const struct file_operations trace_options_fops = { .open = tracing_open_generic, .read = trace_options_read, .write = trace_options_write, .llseek = generic_file_llseek, }; /* * In order to pass in both the trace_array descriptor as well as the index * to the flag that the trace option file represents, the trace_array * has a character array of trace_flags_index[], which holds the index * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc. * The address of this character array is passed to the flag option file * read/write callbacks. * * In order to extract both the index and the trace_array descriptor, * get_tr_index() uses the following algorithm. * * idx = *ptr; * * As the pointer itself contains the address of the index (remember * index[1] == 1). * * Then to get the trace_array descriptor, by subtracting that index * from the ptr, we get to the start of the index itself. * * ptr - idx == &index[0] * * Then a simple container_of() from that pointer gets us to the * trace_array descriptor. */ static void get_tr_index(void *data, struct trace_array **ptr, unsigned int *pindex) { *pindex = *(unsigned char *)data; *ptr = container_of(data - *pindex, struct trace_array, trace_flags_index); } static ssize_t trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { void *tr_index = filp->private_data; struct trace_array *tr; unsigned int index; char *buf; get_tr_index(tr_index, &tr, &index); if (tr->trace_flags & (1 << index)) buf = "1\n"; else buf = "0\n"; return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2); } static ssize_t trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { void *tr_index = filp->private_data; struct trace_array *tr; unsigned int index; unsigned long val; int ret; get_tr_index(tr_index, &tr, &index); ret = kstrtoul_from_user(ubuf, cnt, 10, &val); if (ret) return ret; if (val != 0 && val != 1) return -EINVAL; mutex_lock(&trace_types_lock); ret = set_tracer_flag(tr, 1 << index, val); mutex_unlock(&trace_types_lock); if (ret < 0) return ret; *ppos += cnt; return cnt; } static const struct file_operations trace_options_core_fops = { .open = tracing_open_generic, .read = trace_options_core_read, .write = trace_options_core_write, .llseek = generic_file_llseek, }; struct dentry *trace_create_file(const char *name, umode_t mode, struct dentry *parent, void *data, const struct file_operations *fops) { struct dentry *ret; ret = tracefs_create_file(name, mode, parent, data, fops); if (!ret) pr_warn("Could not create tracefs '%s' entry\n", name); return ret; } static struct dentry *trace_options_init_dentry(struct trace_array *tr) { struct dentry *d_tracer; if (tr->options) return tr->options; d_tracer = tracing_get_dentry(tr); if (IS_ERR(d_tracer)) return NULL; tr->options = tracefs_create_dir("options", d_tracer); if (!tr->options) { pr_warn("Could not create tracefs directory 'options'\n"); return NULL; } return tr->options; } static void create_trace_option_file(struct trace_array *tr, struct trace_option_dentry *topt, struct tracer_flags *flags, struct tracer_opt *opt) { struct dentry *t_options; t_options = trace_options_init_dentry(tr); if (!t_options) return; topt->flags = flags; topt->opt = opt; topt->tr = tr; topt->entry = trace_create_file(opt->name, 0644, t_options, topt, &trace_options_fops); } static void create_trace_option_files(struct trace_array *tr, struct tracer *tracer) { struct trace_option_dentry *topts; struct trace_options *tr_topts; struct tracer_flags *flags; struct tracer_opt *opts; int cnt; int i; if (!tracer) return; flags = tracer->flags; if (!flags || !flags->opts) return; /* * If this is an instance, only create flags for tracers * the instance may have. */ if (!trace_ok_for_array(tracer, tr)) return; for (i = 0; i < tr->nr_topts; i++) { /* Make sure there's no duplicate flags. */ if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags)) return; } opts = flags->opts; for (cnt = 0; opts[cnt].name; cnt++) ; topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL); if (!topts) return; tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1), GFP_KERNEL); if (!tr_topts) { kfree(topts); return; } tr->topts = tr_topts; tr->topts[tr->nr_topts].tracer = tracer; tr->topts[tr->nr_topts].topts = topts; tr->nr_topts++; for (cnt = 0; opts[cnt].name; cnt++) { create_trace_option_file(tr, &topts[cnt], flags, &opts[cnt]); WARN_ONCE(topts[cnt].entry == NULL, "Failed to create trace option: %s", opts[cnt].name); } } static struct dentry * create_trace_option_core_file(struct trace_array *tr, const char *option, long index) { struct dentry *t_options; t_options = trace_options_init_dentry(tr); if (!t_options) return NULL; return trace_create_file(option, 0644, t_options, (void *)&tr->trace_flags_index[index], &trace_options_core_fops); } static void create_trace_options_dir(struct trace_array *tr) { struct dentry *t_options; bool top_level = tr == &global_trace; int i; t_options = trace_options_init_dentry(tr); if (!t_options) return; for (i = 0; trace_options[i]; i++) { if (top_level || !((1 << i) & TOP_LEVEL_TRACE_FLAGS)) create_trace_option_core_file(tr, trace_options[i], i); } } static ssize_t rb_simple_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_array *tr = filp->private_data; char buf[64]; int r; r = tracer_tracing_is_on(tr); r = sprintf(buf, "%d\n", r); return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); } static ssize_t rb_simple_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_array *tr = filp->private_data; struct ring_buffer *buffer = tr->trace_buffer.buffer; unsigned long val; int ret; ret = kstrtoul_from_user(ubuf, cnt, 10, &val); if (ret) return ret; if (buffer) { mutex_lock(&trace_types_lock); if (!!val == tracer_tracing_is_on(tr)) { val = 0; /* do nothing */ } else if (val) { tracer_tracing_on(tr); if (tr->current_trace->start) tr->current_trace->start(tr); } else { tracer_tracing_off(tr); if (tr->current_trace->stop) tr->current_trace->stop(tr); } mutex_unlock(&trace_types_lock); } (*ppos)++; return cnt; } static const struct file_operations rb_simple_fops = { .open = tracing_open_generic_tr, .read = rb_simple_read, .write = rb_simple_write, .release = tracing_release_generic_tr, .llseek = default_llseek, }; static ssize_t buffer_percent_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_array *tr = filp->private_data; char buf[64]; int r; r = tr->buffer_percent; r = sprintf(buf, "%d\n", r); return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); } static ssize_t buffer_percent_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_array *tr = filp->private_data; unsigned long val; int ret; ret = kstrtoul_from_user(ubuf, cnt, 10, &val); if (ret) return ret; if (val > 100) return -EINVAL; if (!val) val = 1; tr->buffer_percent = val; (*ppos)++; return cnt; } static const struct file_operations buffer_percent_fops = { .open = tracing_open_generic_tr, .read = buffer_percent_read, .write = buffer_percent_write, .release = tracing_release_generic_tr, .llseek = default_llseek, }; struct dentry *trace_instance_dir; static void init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer); static int allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size) { enum ring_buffer_flags rb_flags; rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0; buf->tr = tr; buf->buffer = ring_buffer_alloc(size, rb_flags); if (!buf->buffer) return -ENOMEM; buf->data = alloc_percpu(struct trace_array_cpu); if (!buf->data) { ring_buffer_free(buf->buffer); buf->buffer = NULL; return -ENOMEM; } /* Allocate the first page for all buffers */ set_buffer_entries(&tr->trace_buffer, ring_buffer_size(tr->trace_buffer.buffer, 0)); return 0; } static int allocate_trace_buffers(struct trace_array *tr, int size) { int ret; ret = allocate_trace_buffer(tr, &tr->trace_buffer, size); if (ret) return ret; #ifdef CONFIG_TRACER_MAX_TRACE ret = allocate_trace_buffer(tr, &tr->max_buffer, allocate_snapshot ? size : 1); if (WARN_ON(ret)) { ring_buffer_free(tr->trace_buffer.buffer); tr->trace_buffer.buffer = NULL; free_percpu(tr->trace_buffer.data); tr->trace_buffer.data = NULL; return -ENOMEM; } tr->allocated_snapshot = allocate_snapshot; /* * Only the top level trace array gets its snapshot allocated * from the kernel command line. */ allocate_snapshot = false; #endif return 0; } static void free_trace_buffer(struct trace_buffer *buf) { if (buf->buffer) { ring_buffer_free(buf->buffer); buf->buffer = NULL; free_percpu(buf->data); buf->data = NULL; } } static void free_trace_buffers(struct trace_array *tr) { if (!tr) return; free_trace_buffer(&tr->trace_buffer); #ifdef CONFIG_TRACER_MAX_TRACE free_trace_buffer(&tr->max_buffer); #endif } static void init_trace_flags_index(struct trace_array *tr) { int i; /* Used by the trace options files */ for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) tr->trace_flags_index[i] = i; } static void __update_tracer_options(struct trace_array *tr) { struct tracer *t; for (t = trace_types; t; t = t->next) add_tracer_options(tr, t); } static void update_tracer_options(struct trace_array *tr) { mutex_lock(&trace_types_lock); __update_tracer_options(tr); mutex_unlock(&trace_types_lock); } static int instance_mkdir(const char *name) { struct trace_array *tr; int ret; mutex_lock(&event_mutex); mutex_lock(&trace_types_lock); ret = -EEXIST; list_for_each_entry(tr, &ftrace_trace_arrays, list) { if (tr->name && strcmp(tr->name, name) == 0) goto out_unlock; } ret = -ENOMEM; tr = kzalloc(sizeof(*tr), GFP_KERNEL); if (!tr) goto out_unlock; tr->name = kstrdup(name, GFP_KERNEL); if (!tr->name) goto out_free_tr; if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL)) goto out_free_tr; tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS; cpumask_copy(tr->tracing_cpumask, cpu_all_mask); raw_spin_lock_init(&tr->start_lock); tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; tr->current_trace = &nop_trace; INIT_LIST_HEAD(&tr->systems); INIT_LIST_HEAD(&tr->events); INIT_LIST_HEAD(&tr->hist_vars); if (allocate_trace_buffers(tr, trace_buf_size) < 0) goto out_free_tr; tr->dir = tracefs_create_dir(name, trace_instance_dir); if (!tr->dir) goto out_free_tr; ret = event_trace_add_tracer(tr->dir, tr); if (ret) { tracefs_remove_recursive(tr->dir); goto out_free_tr; } ftrace_init_trace_array(tr); init_tracer_tracefs(tr, tr->dir); init_trace_flags_index(tr); __update_tracer_options(tr); list_add(&tr->list, &ftrace_trace_arrays); mutex_unlock(&trace_types_lock); mutex_unlock(&event_mutex); return 0; out_free_tr: free_trace_buffers(tr); free_cpumask_var(tr->tracing_cpumask); kfree(tr->name); kfree(tr); out_unlock: mutex_unlock(&trace_types_lock); mutex_unlock(&event_mutex); return ret; } static int instance_rmdir(const char *name) { struct trace_array *tr; int found = 0; int ret; int i; mutex_lock(&event_mutex); mutex_lock(&trace_types_lock); ret = -ENODEV; list_for_each_entry(tr, &ftrace_trace_arrays, list) { if (tr->name && strcmp(tr->name, name) == 0) { found = 1; break; } } if (!found) goto out_unlock; ret = -EBUSY; if (tr->ref || (tr->current_trace && tr->current_trace->ref)) goto out_unlock; list_del(&tr->list); /* Disable all the flags that were enabled coming in */ for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) { if ((1 << i) & ZEROED_TRACE_FLAGS) set_tracer_flag(tr, 1 << i, 0); } tracing_set_nop(tr); clear_ftrace_function_probes(tr); event_trace_del_tracer(tr); ftrace_clear_pids(tr); ftrace_destroy_function_files(tr); tracefs_remove_recursive(tr->dir); free_trace_buffers(tr); for (i = 0; i < tr->nr_topts; i++) { kfree(tr->topts[i].topts); } kfree(tr->topts); free_cpumask_var(tr->tracing_cpumask); kfree(tr->name); kfree(tr); ret = 0; out_unlock: mutex_unlock(&trace_types_lock); mutex_unlock(&event_mutex); return ret; } static __init void create_trace_instances(struct dentry *d_tracer) { trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer, instance_mkdir, instance_rmdir); if (WARN_ON(!trace_instance_dir)) return; } static void init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer) { struct trace_event_file *file; int cpu; trace_create_file("available_tracers", 0444, d_tracer, tr, &show_traces_fops); trace_create_file("current_tracer", 0644, d_tracer, tr, &set_tracer_fops); trace_create_file("tracing_cpumask", 0644, d_tracer, tr, &tracing_cpumask_fops); trace_create_file("trace_options", 0644, d_tracer, tr, &tracing_iter_fops); trace_create_file("trace", 0644, d_tracer, tr, &tracing_fops); trace_create_file("trace_pipe", 0444, d_tracer, tr, &tracing_pipe_fops); trace_create_file("buffer_size_kb", 0644, d_tracer, tr, &tracing_entries_fops); trace_create_file("buffer_total_size_kb", 0444, d_tracer, tr, &tracing_total_entries_fops); trace_create_file("free_buffer", 0200, d_tracer, tr, &tracing_free_buffer_fops); trace_create_file("trace_marker", 0220, d_tracer, tr, &tracing_mark_fops); file = __find_event_file(tr, "ftrace", "print"); if (file && file->dir) trace_create_file("trigger", 0644, file->dir, file, &event_trigger_fops); tr->trace_marker_file = file; trace_create_file("trace_marker_raw", 0220, d_tracer, tr, &tracing_mark_raw_fops); trace_create_file("trace_clock", 0644, d_tracer, tr, &trace_clock_fops); trace_create_file("tracing_on", 0644, d_tracer, tr, &rb_simple_fops); trace_create_file("timestamp_mode", 0444, d_tracer, tr, &trace_time_stamp_mode_fops); tr->buffer_percent = 50; trace_create_file("buffer_percent", 0444, d_tracer, tr, &buffer_percent_fops); create_trace_options_dir(tr); #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER) trace_create_file("tracing_max_latency", 0644, d_tracer, &tr->max_latency, &tracing_max_lat_fops); #endif if (ftrace_create_function_files(tr, d_tracer)) WARN(1, "Could not allocate function filter files"); #ifdef CONFIG_TRACER_SNAPSHOT trace_create_file("snapshot", 0644, d_tracer, tr, &snapshot_fops); #endif for_each_tracing_cpu(cpu) tracing_init_tracefs_percpu(tr, cpu); ftrace_init_tracefs(tr, d_tracer); } static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore) { struct vfsmount *mnt; struct file_system_type *type; /* * To maintain backward compatibility for tools that mount * debugfs to get to the tracing facility, tracefs is automatically * mounted to the debugfs/tracing directory. */ type = get_fs_type("tracefs"); if (!type) return NULL; mnt = vfs_submount(mntpt, type, "tracefs", NULL); put_filesystem(type); if (IS_ERR(mnt)) return NULL; mntget(mnt); return mnt; } /** * tracing_init_dentry - initialize top level trace array * * This is called when creating files or directories in the tracing * directory. It is called via fs_initcall() by any of the boot up code * and expects to return the dentry of the top level tracing directory. */ struct dentry *tracing_init_dentry(void) { struct trace_array *tr = &global_trace; /* The top level trace array uses NULL as parent */ if (tr->dir) return NULL; if (WARN_ON(!tracefs_initialized()) || (IS_ENABLED(CONFIG_DEBUG_FS) && WARN_ON(!debugfs_initialized()))) return ERR_PTR(-ENODEV); /* * As there may still be users that expect the tracing * files to exist in debugfs/tracing, we must automount * the tracefs file system there, so older tools still * work with the newer kerenl. */ tr->dir = debugfs_create_automount("tracing", NULL, trace_automount, NULL); if (!tr->dir) { pr_warn_once("Could not create debugfs directory 'tracing'\n"); return ERR_PTR(-ENOMEM); } return NULL; } extern struct trace_eval_map *__start_ftrace_eval_maps[]; extern struct trace_eval_map *__stop_ftrace_eval_maps[]; static void __init trace_eval_init(void) { int len; len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps; trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len); } #ifdef CONFIG_MODULES static void trace_module_add_evals(struct module *mod) { if (!mod->num_trace_evals) return; /* * Modules with bad taint do not have events created, do * not bother with enums either. */ if (trace_module_has_bad_taint(mod)) return; trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals); } #ifdef CONFIG_TRACE_EVAL_MAP_FILE static void trace_module_remove_evals(struct module *mod) { union trace_eval_map_item *map; union trace_eval_map_item **last = &trace_eval_maps; if (!mod->num_trace_evals) return; mutex_lock(&trace_eval_mutex); map = trace_eval_maps; while (map) { if (map->head.mod == mod) break; map = trace_eval_jmp_to_tail(map); last = &map->tail.next; map = map->tail.next; } if (!map) goto out; *last = trace_eval_jmp_to_tail(map)->tail.next; kfree(map); out: mutex_unlock(&trace_eval_mutex); } #else static inline void trace_module_remove_evals(struct module *mod) { } #endif /* CONFIG_TRACE_EVAL_MAP_FILE */ static int trace_module_notify(struct notifier_block *self, unsigned long val, void *data) { struct module *mod = data; switch (val) { case MODULE_STATE_COMING: trace_module_add_evals(mod); break; case MODULE_STATE_GOING: trace_module_remove_evals(mod); break; } return 0; } static struct notifier_block trace_module_nb = { .notifier_call = trace_module_notify, .priority = 0, }; #endif /* CONFIG_MODULES */ static __init int tracer_init_tracefs(void) { struct dentry *d_tracer; trace_access_lock_init(); d_tracer = tracing_init_dentry(); if (IS_ERR(d_tracer)) return 0; event_trace_init(); init_tracer_tracefs(&global_trace, d_tracer); ftrace_init_tracefs_toplevel(&global_trace, d_tracer); trace_create_file("tracing_thresh", 0644, d_tracer, &global_trace, &tracing_thresh_fops); trace_create_file("README", 0444, d_tracer, NULL, &tracing_readme_fops); trace_create_file("saved_cmdlines", 0444, d_tracer, NULL, &tracing_saved_cmdlines_fops); trace_create_file("saved_cmdlines_size", 0644, d_tracer, NULL, &tracing_saved_cmdlines_size_fops); trace_create_file("saved_tgids", 0444, d_tracer, NULL, &tracing_saved_tgids_fops); trace_eval_init(); trace_create_eval_file(d_tracer); #ifdef CONFIG_MODULES register_module_notifier(&trace_module_nb); #endif #ifdef CONFIG_DYNAMIC_FTRACE trace_create_file("dyn_ftrace_total_info", 0444, d_tracer, &ftrace_update_tot_cnt, &tracing_dyn_info_fops); #endif create_trace_instances(d_tracer); update_tracer_options(&global_trace); return 0; } static int trace_panic_handler(struct notifier_block *this, unsigned long event, void *unused) { if (ftrace_dump_on_oops) ftrace_dump(ftrace_dump_on_oops); return NOTIFY_OK; } static struct notifier_block trace_panic_notifier = { .notifier_call = trace_panic_handler, .next = NULL, .priority = 150 /* priority: INT_MAX >= x >= 0 */ }; static int trace_die_handler(struct notifier_block *self, unsigned long val, void *data) { switch (val) { case DIE_OOPS: if (ftrace_dump_on_oops) ftrace_dump(ftrace_dump_on_oops); break; default: break; } return NOTIFY_OK; } static struct notifier_block trace_die_notifier = { .notifier_call = trace_die_handler, .priority = 200 }; /* * printk is set to max of 1024, we really don't need it that big. * Nothing should be printing 1000 characters anyway. */ #define TRACE_MAX_PRINT 1000 /* * Define here KERN_TRACE so that we have one place to modify * it if we decide to change what log level the ftrace dump * should be at. */ #define KERN_TRACE KERN_EMERG void trace_printk_seq(struct trace_seq *s) { /* Probably should print a warning here. */ if (s->seq.len >= TRACE_MAX_PRINT) s->seq.len = TRACE_MAX_PRINT; /* * More paranoid code. Although the buffer size is set to * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just * an extra layer of protection. */ if (WARN_ON_ONCE(s->seq.len >= s->seq.size)) s->seq.len = s->seq.size - 1; /* should be zero ended, but we are paranoid. */ s->buffer[s->seq.len] = 0; printk(KERN_TRACE "%s", s->buffer); trace_seq_init(s); } void trace_init_global_iter(struct trace_iterator *iter) { iter->tr = &global_trace; iter->trace = iter->tr->current_trace; iter->cpu_file = RING_BUFFER_ALL_CPUS; iter->trace_buffer = &global_trace.trace_buffer; if (iter->trace && iter->trace->open) iter->trace->open(iter); /* Annotate start of buffers if we had overruns */ if (ring_buffer_overruns(iter->trace_buffer->buffer)) iter->iter_flags |= TRACE_FILE_ANNOTATE; /* Output in nanoseconds only if we are using a clock in nanoseconds. */ if (trace_clocks[iter->tr->clock_id].in_ns) iter->iter_flags |= TRACE_FILE_TIME_IN_NS; } void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { /* use static because iter can be a bit big for the stack */ static struct trace_iterator iter; static atomic_t dump_running; struct trace_array *tr = &global_trace; unsigned int old_userobj; unsigned long flags; int cnt = 0, cpu; /* Only allow one dump user at a time. */ if (atomic_inc_return(&dump_running) != 1) { atomic_dec(&dump_running); return; } /* * Always turn off tracing when we dump. * We don't need to show trace output of what happens * between multiple crashes. * * If the user does a sysrq-z, then they can re-enable * tracing with echo 1 > tracing_on. */ tracing_off(); local_irq_save(flags); printk_nmi_direct_enter(); /* Simulate the iterator */ trace_init_global_iter(&iter); for_each_tracing_cpu(cpu) { atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled); } old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ; /* don't look at user memory in panic mode */ tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ; switch (oops_dump_mode) { case DUMP_ALL: iter.cpu_file = RING_BUFFER_ALL_CPUS; break; case DUMP_ORIG: iter.cpu_file = raw_smp_processor_id(); break; case DUMP_NONE: goto out_enable; default: printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n"); iter.cpu_file = RING_BUFFER_ALL_CPUS; } printk(KERN_TRACE "Dumping ftrace buffer:\n"); /* Did function tracer already get disabled? */ if (ftrace_is_dead()) { printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n"); printk("# MAY BE MISSING FUNCTION EVENTS\n"); } /* * We need to stop all tracing on all CPUS to read the * the next buffer. This is a bit expensive, but is * not done often. We fill all what we can read, * and then release the locks again. */ while (!trace_empty(&iter)) { if (!cnt) printk(KERN_TRACE "---------------------------------\n"); cnt++; /* reset all but tr, trace, and overruns */ memset(&iter.seq, 0, sizeof(struct trace_iterator) - offsetof(struct trace_iterator, seq)); iter.iter_flags |= TRACE_FILE_LAT_FMT; iter.pos = -1; if (trace_find_next_entry_inc(&iter) != NULL) { int ret; ret = print_trace_line(&iter); if (ret != TRACE_TYPE_NO_CONSUME) trace_consume(&iter); } touch_nmi_watchdog(); trace_printk_seq(&iter.seq); } if (!cnt) printk(KERN_TRACE " (ftrace buffer empty)\n"); else printk(KERN_TRACE "---------------------------------\n"); out_enable: tr->trace_flags |= old_userobj; for_each_tracing_cpu(cpu) { atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled); } atomic_dec(&dump_running); printk_nmi_direct_exit(); local_irq_restore(flags); } EXPORT_SYMBOL_GPL(ftrace_dump); int trace_run_command(const char *buf, int (*createfn)(int, char **)) { char **argv; int argc, ret; argc = 0; ret = 0; argv = argv_split(GFP_KERNEL, buf, &argc); if (!argv) return -ENOMEM; if (argc) ret = createfn(argc, argv); argv_free(argv); return ret; } #define WRITE_BUFSIZE 4096 ssize_t trace_parse_run_command(struct file *file, const char __user *buffer, size_t count, loff_t *ppos, int (*createfn)(int, char **)) { char *kbuf, *buf, *tmp; int ret = 0; size_t done = 0; size_t size; kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL); if (!kbuf) return -ENOMEM; while (done < count) { size = count - done; if (size >= WRITE_BUFSIZE) size = WRITE_BUFSIZE - 1; if (copy_from_user(kbuf, buffer + done, size)) { ret = -EFAULT; goto out; } kbuf[size] = '\0'; buf = kbuf; do { tmp = strchr(buf, '\n'); if (tmp) { *tmp = '\0'; size = tmp - buf + 1; } else { size = strlen(buf); if (done + size < count) { if (buf != kbuf) break; /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */ pr_warn("Line length is too long: Should be less than %d\n", WRITE_BUFSIZE - 2); ret = -EINVAL; goto out; } } done += size; /* Remove comments */ tmp = strchr(buf, '#'); if (tmp) *tmp = '\0'; ret = trace_run_command(buf, createfn); if (ret) goto out; buf += size; } while (done < count); } ret = done; out: kfree(kbuf); return ret; } __init static int tracer_alloc_buffers(void) { int ring_buf_size; int ret = -ENOMEM; /* * Make sure we don't accidently add more trace options * than we have bits for. */ BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE); if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL)) goto out; if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL)) goto out_free_buffer_mask; /* Only allocate trace_printk buffers if a trace_printk exists */ if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt) /* Must be called before global_trace.buffer is allocated */ trace_printk_init_buffers(); /* To save memory, keep the ring buffer size to its minimum */ if (ring_buffer_expanded) ring_buf_size = trace_buf_size; else ring_buf_size = 1; cpumask_copy(tracing_buffer_mask, cpu_possible_mask); cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask); raw_spin_lock_init(&global_trace.start_lock); /* * The prepare callbacks allocates some memory for the ring buffer. We * don't free the buffer if the if the CPU goes down. If we were to free * the buffer, then the user would lose any trace that was in the * buffer. The memory will be removed once the "instance" is removed. */ ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE, "trace/RB:preapre", trace_rb_cpu_prepare, NULL); if (ret < 0) goto out_free_cpumask; /* Used for event triggers */ ret = -ENOMEM; temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE); if (!temp_buffer) goto out_rm_hp_state; if (trace_create_savedcmd() < 0) goto out_free_temp_buffer; /* TODO: make the number of buffers hot pluggable with CPUS */ if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) { printk(KERN_ERR "tracer: failed to allocate ring buffer!\n"); WARN_ON(1); goto out_free_savedcmd; } if (global_trace.buffer_disabled) tracing_off(); if (trace_boot_clock) { ret = tracing_set_clock(&global_trace, trace_boot_clock); if (ret < 0) pr_warn("Trace clock %s not defined, going back to default\n", trace_boot_clock); } /* * register_tracer() might reference current_trace, so it * needs to be set before we register anything. This is * just a bootstrap of current_trace anyway. */ global_trace.current_trace = &nop_trace; global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; ftrace_init_global_array_ops(&global_trace); init_trace_flags_index(&global_trace); register_tracer(&nop_trace); /* Function tracing may start here (via kernel command line) */ init_function_trace(); /* All seems OK, enable tracing */ tracing_disabled = 0; atomic_notifier_chain_register(&panic_notifier_list, &trace_panic_notifier); register_die_notifier(&trace_die_notifier); global_trace.flags = TRACE_ARRAY_FL_GLOBAL; INIT_LIST_HEAD(&global_trace.systems); INIT_LIST_HEAD(&global_trace.events); INIT_LIST_HEAD(&global_trace.hist_vars); list_add(&global_trace.list, &ftrace_trace_arrays); apply_trace_boot_options(); register_snapshot_cmd(); return 0; out_free_savedcmd: free_saved_cmdlines_buffer(savedcmd); out_free_temp_buffer: ring_buffer_free(temp_buffer); out_rm_hp_state: cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE); out_free_cpumask: free_cpumask_var(global_trace.tracing_cpumask); out_free_buffer_mask: free_cpumask_var(tracing_buffer_mask); out: return ret; } void __init early_trace_init(void) { if (tracepoint_printk) { tracepoint_print_iter = kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL); if (WARN_ON(!tracepoint_print_iter)) tracepoint_printk = 0; else static_key_enable(&tracepoint_printk_key.key); } tracer_alloc_buffers(); } void __init trace_init(void) { trace_event_init(); } __init static int clear_boot_tracer(void) { /* * The default tracer at boot buffer is an init section. * This function is called in lateinit. If we did not * find the boot tracer, then clear it out, to prevent * later registration from accessing the buffer that is * about to be freed. */ if (!default_bootup_tracer) return 0; printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n", default_bootup_tracer); default_bootup_tracer = NULL; return 0; } fs_initcall(tracer_init_tracefs); late_initcall_sync(clear_boot_tracer); #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK __init static int tracing_set_default_clock(void) { /* sched_clock_stable() is determined in late_initcall */ if (!trace_boot_clock && !sched_clock_stable()) { printk(KERN_WARNING "Unstable clock detected, switching default tracing clock to \"global\"\n" "If you want to keep using the local clock, then add:\n" " \"trace_clock=local\"\n" "on the kernel command line\n"); tracing_set_clock(&global_trace, "global"); } return 0; } late_initcall_sync(tracing_set_default_clock); #endif
// SPDX-License-Identifier: GPL-2.0 /* * ring buffer based function tracer * * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com> * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com> * * Originally taken from the RT patch by: * Arnaldo Carvalho de Melo <acme@redhat.com> * * Based on code from the latency_tracer, that is: * Copyright (C) 2004-2006 Ingo Molnar * Copyright (C) 2004 Nadia Yvette Chambers */ #include <linux/ring_buffer.h> #include <generated/utsrelease.h> #include <linux/stacktrace.h> #include <linux/writeback.h> #include <linux/kallsyms.h> #include <linux/seq_file.h> #include <linux/notifier.h> #include <linux/irqflags.h> #include <linux/debugfs.h> #include <linux/tracefs.h> #include <linux/pagemap.h> #include <linux/hardirq.h> #include <linux/linkage.h> #include <linux/uaccess.h> #include <linux/vmalloc.h> #include <linux/ftrace.h> #include <linux/module.h> #include <linux/percpu.h> #include <linux/splice.h> #include <linux/kdebug.h> #include <linux/string.h> #include <linux/mount.h> #include <linux/rwsem.h> #include <linux/slab.h> #include <linux/ctype.h> #include <linux/init.h> #include <linux/poll.h> #include <linux/nmi.h> #include <linux/fs.h> #include <linux/trace.h> #include <linux/sched/clock.h> #include <linux/sched/rt.h> #include "trace.h" #include "trace_output.h" /* * On boot up, the ring buffer is set to the minimum size, so that * we do not waste memory on systems that are not using tracing. */ bool ring_buffer_expanded; /* * We need to change this state when a selftest is running. * A selftest will lurk into the ring-buffer to count the * entries inserted during the selftest although some concurrent * insertions into the ring-buffer such as trace_printk could occurred * at the same time, giving false positive or negative results. */ static bool __read_mostly tracing_selftest_running; /* * If a tracer is running, we do not want to run SELFTEST. */ bool __read_mostly tracing_selftest_disabled; /* Pipe tracepoints to printk */ struct trace_iterator *tracepoint_print_iter; int tracepoint_printk; static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key); /* For tracers that don't implement custom flags */ static struct tracer_opt dummy_tracer_opt[] = { { } }; static int dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) { return 0; } /* * To prevent the comm cache from being overwritten when no * tracing is active, only save the comm when a trace event * occurred. */ static DEFINE_PER_CPU(bool, trace_taskinfo_save); /* * Kill all tracing for good (never come back). * It is initialized to 1 but will turn to zero if the initialization * of the tracer is successful. But that is the only place that sets * this back to zero. */ static int tracing_disabled = 1; cpumask_var_t __read_mostly tracing_buffer_mask; /* * ftrace_dump_on_oops - variable to dump ftrace buffer on oops * * If there is an oops (or kernel panic) and the ftrace_dump_on_oops * is set, then ftrace_dump is called. This will output the contents * of the ftrace buffers to the console. This is very useful for * capturing traces that lead to crashes and outputing it to a * serial console. * * It is default off, but you can enable it with either specifying * "ftrace_dump_on_oops" in the kernel command line, or setting * /proc/sys/kernel/ftrace_dump_on_oops * Set 1 if you want to dump buffers of all CPUs * Set 2 if you want to dump the buffer of the CPU that triggered oops */ enum ftrace_dump_mode ftrace_dump_on_oops; /* When set, tracing will stop when a WARN*() is hit */ int __disable_trace_on_warning; #ifdef CONFIG_TRACE_EVAL_MAP_FILE /* Map of enums to their values, for "eval_map" file */ struct trace_eval_map_head { struct module *mod; unsigned long length; }; union trace_eval_map_item; struct trace_eval_map_tail { /* * "end" is first and points to NULL as it must be different * than "mod" or "eval_string" */ union trace_eval_map_item *next; const char *end; /* points to NULL */ }; static DEFINE_MUTEX(trace_eval_mutex); /* * The trace_eval_maps are saved in an array with two extra elements, * one at the beginning, and one at the end. The beginning item contains * the count of the saved maps (head.length), and the module they * belong to if not built in (head.mod). The ending item contains a * pointer to the next array of saved eval_map items. */ union trace_eval_map_item { struct trace_eval_map map; struct trace_eval_map_head head; struct trace_eval_map_tail tail; }; static union trace_eval_map_item *trace_eval_maps; #endif /* CONFIG_TRACE_EVAL_MAP_FILE */ static int tracing_set_tracer(struct trace_array *tr, const char *buf); #define MAX_TRACER_SIZE 100 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata; static char *default_bootup_tracer; static bool allocate_snapshot; static int __init set_cmdline_ftrace(char *str) { strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE); default_bootup_tracer = bootup_tracer_buf; /* We are using ftrace early, expand it */ ring_buffer_expanded = true; return 1; } __setup("ftrace=", set_cmdline_ftrace); static int __init set_ftrace_dump_on_oops(char *str) { if (*str++ != '=' || !*str) { ftrace_dump_on_oops = DUMP_ALL; return 1; } if (!strcmp("orig_cpu", str)) { ftrace_dump_on_oops = DUMP_ORIG; return 1; } return 0; } __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops); static int __init stop_trace_on_warning(char *str) { if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0)) __disable_trace_on_warning = 1; return 1; } __setup("traceoff_on_warning", stop_trace_on_warning); static int __init boot_alloc_snapshot(char *str) { allocate_snapshot = true; /* We also need the main ring buffer expanded */ ring_buffer_expanded = true; return 1; } __setup("alloc_snapshot", boot_alloc_snapshot); static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata; static int __init set_trace_boot_options(char *str) { strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE); return 0; } __setup("trace_options=", set_trace_boot_options); static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata; static char *trace_boot_clock __initdata; static int __init set_trace_boot_clock(char *str) { strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE); trace_boot_clock = trace_boot_clock_buf; return 0; } __setup("trace_clock=", set_trace_boot_clock); static int __init set_tracepoint_printk(char *str) { if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0)) tracepoint_printk = 1; return 1; } __setup("tp_printk", set_tracepoint_printk); unsigned long long ns2usecs(u64 nsec) { nsec += 500; do_div(nsec, 1000); return nsec; } /* trace_flags holds trace_options default values */ #define TRACE_DEFAULT_FLAGS \ (FUNCTION_DEFAULT_FLAGS | \ TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \ TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \ TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \ TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS) /* trace_options that are only supported by global_trace */ #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \ TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD) /* trace_flags that are default zero for instances */ #define ZEROED_TRACE_FLAGS \ (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK) /* * The global_trace is the descriptor that holds the top-level tracing * buffers for the live tracing. */ static struct trace_array global_trace = { .trace_flags = TRACE_DEFAULT_FLAGS, }; LIST_HEAD(ftrace_trace_arrays); int trace_array_get(struct trace_array *this_tr) { struct trace_array *tr; int ret = -ENODEV; mutex_lock(&trace_types_lock); list_for_each_entry(tr, &ftrace_trace_arrays, list) { if (tr == this_tr) { tr->ref++; ret = 0; break; } } mutex_unlock(&trace_types_lock); return ret; } static void __trace_array_put(struct trace_array *this_tr) { WARN_ON(!this_tr->ref); this_tr->ref--; } void trace_array_put(struct trace_array *this_tr) { mutex_lock(&trace_types_lock); __trace_array_put(this_tr); mutex_unlock(&trace_types_lock); } int call_filter_check_discard(struct trace_event_call *call, void *rec, struct ring_buffer *buffer, struct ring_buffer_event *event) { if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) && !filter_match_preds(call->filter, rec)) { __trace_event_discard_commit(buffer, event); return 1; } return 0; } void trace_free_pid_list(struct trace_pid_list *pid_list) { vfree(pid_list->pids); kfree(pid_list); } /** * trace_find_filtered_pid - check if a pid exists in a filtered_pid list * @filtered_pids: The list of pids to check * @search_pid: The PID to find in @filtered_pids * * Returns true if @search_pid is fonud in @filtered_pids, and false otherwis. */ bool trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid) { /* * If pid_max changed after filtered_pids was created, we * by default ignore all pids greater than the previous pid_max. */ if (search_pid >= filtered_pids->pid_max) return false; return test_bit(search_pid, filtered_pids->pids); } /** * trace_ignore_this_task - should a task be ignored for tracing * @filtered_pids: The list of pids to check * @task: The task that should be ignored if not filtered * * Checks if @task should be traced or not from @filtered_pids. * Returns true if @task should *NOT* be traced. * Returns false if @task should be traced. */ bool trace_ignore_this_task(struct trace_pid_list *filtered_pids, struct task_struct *task) { /* * Return false, because if filtered_pids does not exist, * all pids are good to trace. */ if (!filtered_pids) return false; return !trace_find_filtered_pid(filtered_pids, task->pid); } /** * trace_pid_filter_add_remove_task - Add or remove a task from a pid_list * @pid_list: The list to modify * @self: The current task for fork or NULL for exit * @task: The task to add or remove * * If adding a task, if @self is defined, the task is only added if @self * is also included in @pid_list. This happens on fork and tasks should * only be added when the parent is listed. If @self is NULL, then the * @task pid will be removed from the list, which would happen on exit * of a task. */ void trace_filter_add_remove_task(struct trace_pid_list *pid_list, struct task_struct *self, struct task_struct *task) { if (!pid_list) return; /* For forks, we only add if the forking task is listed */ if (self) { if (!trace_find_filtered_pid(pid_list, self->pid)) return; } /* Sorry, but we don't support pid_max changing after setting */ if (task->pid >= pid_list->pid_max) return; /* "self" is set for forks, and NULL for exits */ if (self) set_bit(task->pid, pid_list->pids); else clear_bit(task->pid, pid_list->pids); } /** * trace_pid_next - Used for seq_file to get to the next pid of a pid_list * @pid_list: The pid list to show * @v: The last pid that was shown (+1 the actual pid to let zero be displayed) * @pos: The position of the file * * This is used by the seq_file "next" operation to iterate the pids * listed in a trace_pid_list structure. * * Returns the pid+1 as we want to display pid of zero, but NULL would * stop the iteration. */ void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos) { unsigned long pid = (unsigned long)v; (*pos)++; /* pid already is +1 of the actual prevous bit */ pid = find_next_bit(pid_list->pids, pid_list->pid_max, pid); /* Return pid + 1 to allow zero to be represented */ if (pid < pid_list->pid_max) return (void *)(pid + 1); return NULL; } /** * trace_pid_start - Used for seq_file to start reading pid lists * @pid_list: The pid list to show * @pos: The position of the file * * This is used by seq_file "start" operation to start the iteration * of listing pids. * * Returns the pid+1 as we want to display pid of zero, but NULL would * stop the iteration. */ void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos) { unsigned long pid; loff_t l = 0; pid = find_first_bit(pid_list->pids, pid_list->pid_max); if (pid >= pid_list->pid_max) return NULL; /* Return pid + 1 so that zero can be the exit value */ for (pid++; pid && l < *pos; pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l)) ; return (void *)pid; } /** * trace_pid_show - show the current pid in seq_file processing * @m: The seq_file structure to write into * @v: A void pointer of the pid (+1) value to display * * Can be directly used by seq_file operations to display the current * pid value. */ int trace_pid_show(struct seq_file *m, void *v) { unsigned long pid = (unsigned long)v - 1; seq_printf(m, "%lu\n", pid); return 0; } /* 128 should be much more than enough */ #define PID_BUF_SIZE 127 int trace_pid_write(struct trace_pid_list *filtered_pids, struct trace_pid_list **new_pid_list, const char __user *ubuf, size_t cnt) { struct trace_pid_list *pid_list; struct trace_parser parser; unsigned long val; int nr_pids = 0; ssize_t read = 0; ssize_t ret = 0; loff_t pos; pid_t pid; if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1)) return -ENOMEM; /* * Always recreate a new array. The write is an all or nothing * operation. Always create a new array when adding new pids by * the user. If the operation fails, then the current list is * not modified. */ pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL); if (!pid_list) return -ENOMEM; pid_list->pid_max = READ_ONCE(pid_max); /* Only truncating will shrink pid_max */ if (filtered_pids && filtered_pids->pid_max > pid_list->pid_max) pid_list->pid_max = filtered_pids->pid_max; pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3); if (!pid_list->pids) { kfree(pid_list); return -ENOMEM; } if (filtered_pids) { /* copy the current bits to the new max */ for_each_set_bit(pid, filtered_pids->pids, filtered_pids->pid_max) { set_bit(pid, pid_list->pids); nr_pids++; } } while (cnt > 0) { pos = 0; ret = trace_get_user(&parser, ubuf, cnt, &pos); if (ret < 0 || !trace_parser_loaded(&parser)) break; read += ret; ubuf += ret; cnt -= ret; ret = -EINVAL; if (kstrtoul(parser.buffer, 0, &val)) break; if (val >= pid_list->pid_max) break; pid = (pid_t)val; set_bit(pid, pid_list->pids); nr_pids++; trace_parser_clear(&parser); ret = 0; } trace_parser_put(&parser); if (ret < 0) { trace_free_pid_list(pid_list); return ret; } if (!nr_pids) { /* Cleared the list of pids */ trace_free_pid_list(pid_list); read = ret; pid_list = NULL; } *new_pid_list = pid_list; return read; } static u64 buffer_ftrace_now(struct trace_buffer *buf, int cpu) { u64 ts; /* Early boot up does not have a buffer yet */ if (!buf->buffer) return trace_clock_local(); ts = ring_buffer_time_stamp(buf->buffer, cpu); ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts); return ts; } u64 ftrace_now(int cpu) { return buffer_ftrace_now(&global_trace.trace_buffer, cpu); } /** * tracing_is_enabled - Show if global_trace has been disabled * * Shows if the global trace has been enabled or not. It uses the * mirror flag "buffer_disabled" to be used in fast paths such as for * the irqsoff tracer. But it may be inaccurate due to races. If you * need to know the accurate state, use tracing_is_on() which is a little * slower, but accurate. */ int tracing_is_enabled(void) { /* * For quick access (irqsoff uses this in fast path), just * return the mirror variable of the state of the ring buffer. * It's a little racy, but we don't really care. */ smp_rmb(); return !global_trace.buffer_disabled; } /* * trace_buf_size is the size in bytes that is allocated * for a buffer. Note, the number of bytes is always rounded * to page size. * * This number is purposely set to a low number of 16384. * If the dump on oops happens, it will be much appreciated * to not have to wait for all that output. Anyway this can be * boot time and run time configurable. */ #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */ static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT; /* trace_types holds a link list of available tracers. */ static struct tracer *trace_types __read_mostly; /* * trace_types_lock is used to protect the trace_types list. */ DEFINE_MUTEX(trace_types_lock); /* * serialize the access of the ring buffer * * ring buffer serializes readers, but it is low level protection. * The validity of the events (which returns by ring_buffer_peek() ..etc) * are not protected by ring buffer. * * The content of events may become garbage if we allow other process consumes * these events concurrently: * A) the page of the consumed events may become a normal page * (not reader page) in ring buffer, and this page will be rewrited * by events producer. * B) The page of the consumed events may become a page for splice_read, * and this page will be returned to system. * * These primitives allow multi process access to different cpu ring buffer * concurrently. * * These primitives don't distinguish read-only and read-consume access. * Multi read-only access are also serialized. */ #ifdef CONFIG_SMP static DECLARE_RWSEM(all_cpu_access_lock); static DEFINE_PER_CPU(struct mutex, cpu_access_lock); static inline void trace_access_lock(int cpu) { if (cpu == RING_BUFFER_ALL_CPUS) { /* gain it for accessing the whole ring buffer. */ down_write(&all_cpu_access_lock); } else { /* gain it for accessing a cpu ring buffer. */ /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */ down_read(&all_cpu_access_lock); /* Secondly block other access to this @cpu ring buffer. */ mutex_lock(&per_cpu(cpu_access_lock, cpu)); } } static inline void trace_access_unlock(int cpu) { if (cpu == RING_BUFFER_ALL_CPUS) { up_write(&all_cpu_access_lock); } else { mutex_unlock(&per_cpu(cpu_access_lock, cpu)); up_read(&all_cpu_access_lock); } } static inline void trace_access_lock_init(void) { int cpu; for_each_possible_cpu(cpu) mutex_init(&per_cpu(cpu_access_lock, cpu)); } #else static DEFINE_MUTEX(access_lock); static inline void trace_access_lock(int cpu) { (void)cpu; mutex_lock(&access_lock); } static inline void trace_access_unlock(int cpu) { (void)cpu; mutex_unlock(&access_lock); } static inline void trace_access_lock_init(void) { } #endif #ifdef CONFIG_STACKTRACE static void __ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags, int skip, int pc, struct pt_regs *regs); static inline void ftrace_trace_stack(struct trace_array *tr, struct ring_buffer *buffer, unsigned long flags, int skip, int pc, struct pt_regs *regs); #else static inline void __ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags, int skip, int pc, struct pt_regs *regs) { } static inline void ftrace_trace_stack(struct trace_array *tr, struct ring_buffer *buffer, unsigned long flags, int skip, int pc, struct pt_regs *regs) { } #endif static __always_inline void trace_event_setup(struct ring_buffer_event *event, int type, unsigned long flags, int pc) { struct trace_entry *ent = ring_buffer_event_data(event); tracing_generic_entry_update(ent, flags, pc); ent->type = type; } static __always_inline struct ring_buffer_event * __trace_buffer_lock_reserve(struct ring_buffer *buffer, int type, unsigned long len, unsigned long flags, int pc) { struct ring_buffer_event *event; event = ring_buffer_lock_reserve(buffer, len); if (event != NULL) trace_event_setup(event, type, flags, pc); return event; } void tracer_tracing_on(struct trace_array *tr) { if (tr->trace_buffer.buffer) ring_buffer_record_on(tr->trace_buffer.buffer); /* * This flag is looked at when buffers haven't been allocated * yet, or by some tracers (like irqsoff), that just want to * know if the ring buffer has been disabled, but it can handle * races of where it gets disabled but we still do a record. * As the check is in the fast path of the tracers, it is more * important to be fast than accurate. */ tr->buffer_disabled = 0; /* Make the flag seen by readers */ smp_wmb(); } /** * tracing_on - enable tracing buffers * * This function enables tracing buffers that may have been * disabled with tracing_off. */ void tracing_on(void) { tracer_tracing_on(&global_trace); } EXPORT_SYMBOL_GPL(tracing_on); static __always_inline void __buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event) { __this_cpu_write(trace_taskinfo_save, true); /* If this is the temp buffer, we need to commit fully */ if (this_cpu_read(trace_buffered_event) == event) { /* Length is in event->array[0] */ ring_buffer_write(buffer, event->array[0], &event->array[1]); /* Release the temp buffer */ this_cpu_dec(trace_buffered_event_cnt); } else ring_buffer_unlock_commit(buffer, event); } /** * __trace_puts - write a constant string into the trace buffer. * @ip: The address of the caller * @str: The constant string to write * @size: The size of the string. */ int __trace_puts(unsigned long ip, const char *str, int size) { struct ring_buffer_event *event; struct ring_buffer *buffer; struct print_entry *entry; unsigned long irq_flags; int alloc; int pc; if (!(global_trace.trace_flags & TRACE_ITER_PRINTK)) return 0; pc = preempt_count(); if (unlikely(tracing_selftest_running || tracing_disabled)) return 0; alloc = sizeof(*entry) + size + 2; /* possible \n added */ local_save_flags(irq_flags); buffer = global_trace.trace_buffer.buffer; event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc, irq_flags, pc); if (!event) return 0; entry = ring_buffer_event_data(event); entry->ip = ip; memcpy(&entry->buf, str, size); /* Add a newline if necessary */ if (entry->buf[size - 1] != '\n') { entry->buf[size] = '\n'; entry->buf[size + 1] = '\0'; } else entry->buf[size] = '\0'; __buffer_unlock_commit(buffer, event); ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL); return size; } EXPORT_SYMBOL_GPL(__trace_puts); /** * __trace_bputs - write the pointer to a constant string into trace buffer * @ip: The address of the caller * @str: The constant string to write to the buffer to */ int __trace_bputs(unsigned long ip, const char *str) { struct ring_buffer_event *event; struct ring_buffer *buffer; struct bputs_entry *entry; unsigned long irq_flags; int size = sizeof(struct bputs_entry); int pc; if (!(global_trace.trace_flags & TRACE_ITER_PRINTK)) return 0; pc = preempt_count(); if (unlikely(tracing_selftest_running || tracing_disabled)) return 0; local_save_flags(irq_flags); buffer = global_trace.trace_buffer.buffer; event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size, irq_flags, pc); if (!event) return 0; entry = ring_buffer_event_data(event); entry->ip = ip; entry->str = str; __buffer_unlock_commit(buffer, event); ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL); return 1; } EXPORT_SYMBOL_GPL(__trace_bputs); #ifdef CONFIG_TRACER_SNAPSHOT void tracing_snapshot_instance(struct trace_array *tr) { struct tracer *tracer = tr->current_trace; unsigned long flags; if (in_nmi()) { internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n"); internal_trace_puts("*** snapshot is being ignored ***\n"); return; } if (!tr->allocated_snapshot) { internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n"); internal_trace_puts("*** stopping trace here! ***\n"); tracing_off(); return; } /* Note, snapshot can not be used when the tracer uses it */ if (tracer->use_max_tr) { internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n"); internal_trace_puts("*** Can not use snapshot (sorry) ***\n"); return; } local_irq_save(flags); update_max_tr(tr, current, smp_processor_id()); local_irq_restore(flags); } /** * tracing_snapshot - take a snapshot of the current buffer. * * This causes a swap between the snapshot buffer and the current live * tracing buffer. You can use this to take snapshots of the live * trace when some condition is triggered, but continue to trace. * * Note, make sure to allocate the snapshot with either * a tracing_snapshot_alloc(), or by doing it manually * with: echo 1 > /sys/kernel/debug/tracing/snapshot * * If the snapshot buffer is not allocated, it will stop tracing. * Basically making a permanent snapshot. */ void tracing_snapshot(void) { struct trace_array *tr = &global_trace; tracing_snapshot_instance(tr); } EXPORT_SYMBOL_GPL(tracing_snapshot); static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf, struct trace_buffer *size_buf, int cpu_id); static void set_buffer_entries(struct trace_buffer *buf, unsigned long val); int tracing_alloc_snapshot_instance(struct trace_array *tr) { int ret; if (!tr->allocated_snapshot) { /* allocate spare buffer */ ret = resize_buffer_duplicate_size(&tr->max_buffer, &tr->trace_buffer, RING_BUFFER_ALL_CPUS); if (ret < 0) return ret; tr->allocated_snapshot = true; } return 0; } static void free_snapshot(struct trace_array *tr) { /* * We don't free the ring buffer. instead, resize it because * The max_tr ring buffer has some state (e.g. ring->clock) and * we want preserve it. */ ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS); set_buffer_entries(&tr->max_buffer, 1); tracing_reset_online_cpus(&tr->max_buffer); tr->allocated_snapshot = false; } /** * tracing_alloc_snapshot - allocate snapshot buffer. * * This only allocates the snapshot buffer if it isn't already * allocated - it doesn't also take a snapshot. * * This is meant to be used in cases where the snapshot buffer needs * to be set up for events that can't sleep but need to be able to * trigger a snapshot. */ int tracing_alloc_snapshot(void) { struct trace_array *tr = &global_trace; int ret; ret = tracing_alloc_snapshot_instance(tr); WARN_ON(ret < 0); return ret; } EXPORT_SYMBOL_GPL(tracing_alloc_snapshot); /** * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer. * * This is similar to tracing_snapshot(), but it will allocate the * snapshot buffer if it isn't already allocated. Use this only * where it is safe to sleep, as the allocation may sleep. * * This causes a swap between the snapshot buffer and the current live * tracing buffer. You can use this to take snapshots of the live * trace when some condition is triggered, but continue to trace. */ void tracing_snapshot_alloc(void) { int ret; ret = tracing_alloc_snapshot(); if (ret < 0) return; tracing_snapshot(); } EXPORT_SYMBOL_GPL(tracing_snapshot_alloc); #else void tracing_snapshot(void) { WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used"); } EXPORT_SYMBOL_GPL(tracing_snapshot); int tracing_alloc_snapshot(void) { WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used"); return -ENODEV; } EXPORT_SYMBOL_GPL(tracing_alloc_snapshot); void tracing_snapshot_alloc(void) { /* Give warning */ tracing_snapshot(); } EXPORT_SYMBOL_GPL(tracing_snapshot_alloc); #endif /* CONFIG_TRACER_SNAPSHOT */ void tracer_tracing_off(struct trace_array *tr) { if (tr->trace_buffer.buffer) ring_buffer_record_off(tr->trace_buffer.buffer); /* * This flag is looked at when buffers haven't been allocated * yet, or by some tracers (like irqsoff), that just want to * know if the ring buffer has been disabled, but it can handle * races of where it gets disabled but we still do a record. * As the check is in the fast path of the tracers, it is more * important to be fast than accurate. */ tr->buffer_disabled = 1; /* Make the flag seen by readers */ smp_wmb(); } /** * tracing_off - turn off tracing buffers * * This function stops the tracing buffers from recording data. * It does not disable any overhead the tracers themselves may * be causing. This function simply causes all recording to * the ring buffers to fail. */ void tracing_off(void) { tracer_tracing_off(&global_trace); } EXPORT_SYMBOL_GPL(tracing_off); void disable_trace_on_warning(void) { if (__disable_trace_on_warning) tracing_off(); } /** * tracer_tracing_is_on - show real state of ring buffer enabled * @tr : the trace array to know if ring buffer is enabled * * Shows real state of the ring buffer if it is enabled or not. */ bool tracer_tracing_is_on(struct trace_array *tr) { if (tr->trace_buffer.buffer) return ring_buffer_record_is_on(tr->trace_buffer.buffer); return !tr->buffer_disabled; } /** * tracing_is_on - show state of ring buffers enabled */ int tracing_is_on(void) { return tracer_tracing_is_on(&global_trace); } EXPORT_SYMBOL_GPL(tracing_is_on); static int __init set_buf_size(char *str) { unsigned long buf_size; if (!str) return 0; buf_size = memparse(str, &str); /* nr_entries can not be zero */ if (buf_size == 0) return 0; trace_buf_size = buf_size; return 1; } __setup("trace_buf_size=", set_buf_size); static int __init set_tracing_thresh(char *str) { unsigned long threshold; int ret; if (!str) return 0; ret = kstrtoul(str, 0, &threshold); if (ret < 0) return 0; tracing_thresh = threshold * 1000; return 1; } __setup("tracing_thresh=", set_tracing_thresh); unsigned long nsecs_to_usecs(unsigned long nsecs) { return nsecs / 1000; } /* * TRACE_FLAGS is defined as a tuple matching bit masks with strings. * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list * of strings in the order that the evals (enum) were defined. */ #undef C #define C(a, b) b /* These must match the bit postions in trace_iterator_flags */ static const char *trace_options[] = { TRACE_FLAGS NULL }; static struct { u64 (*func)(void); const char *name; int in_ns; /* is this clock in nanoseconds? */ } trace_clocks[] = { { trace_clock_local, "local", 1 }, { trace_clock_global, "global", 1 }, { trace_clock_counter, "counter", 0 }, { trace_clock_jiffies, "uptime", 0 }, { trace_clock, "perf", 1 }, { ktime_get_mono_fast_ns, "mono", 1 }, { ktime_get_raw_fast_ns, "mono_raw", 1 }, { ktime_get_boot_fast_ns, "boot", 1 }, ARCH_TRACE_CLOCKS }; bool trace_clock_in_ns(struct trace_array *tr) { if (trace_clocks[tr->clock_id].in_ns) return true; return false; } /* * trace_parser_get_init - gets the buffer for trace parser */ int trace_parser_get_init(struct trace_parser *parser, int size) { memset(parser, 0, sizeof(*parser)); parser->buffer = kmalloc(size, GFP_KERNEL); if (!parser->buffer) return 1; parser->size = size; return 0; } /* * trace_parser_put - frees the buffer for trace parser */ void trace_parser_put(struct trace_parser *parser) { kfree(parser->buffer); parser->buffer = NULL; } /* * trace_get_user - reads the user input string separated by space * (matched by isspace(ch)) * * For each string found the 'struct trace_parser' is updated, * and the function returns. * * Returns number of bytes read. * * See kernel/trace/trace.h for 'struct trace_parser' details. */ int trace_get_user(struct trace_parser *parser, const char __user *ubuf, size_t cnt, loff_t *ppos) { char ch; size_t read = 0; ssize_t ret; if (!*ppos) trace_parser_clear(parser); ret = get_user(ch, ubuf++); if (ret) goto out; read++; cnt--; /* * The parser is not finished with the last write, * continue reading the user input without skipping spaces. */ if (!parser->cont) { /* skip white space */ while (cnt && isspace(ch)) { ret = get_user(ch, ubuf++); if (ret) goto out; read++; cnt--; } parser->idx = 0; /* only spaces were written */ if (isspace(ch) || !ch) { *ppos += read; ret = read; goto out; } } /* read the non-space input */ while (cnt && !isspace(ch) && ch) { if (parser->idx < parser->size - 1) parser->buffer[parser->idx++] = ch; else { ret = -EINVAL; goto out; } ret = get_user(ch, ubuf++); if (ret) goto out; read++; cnt--; } /* We either got finished input or we have to wait for another call. */ if (isspace(ch) || !ch) { parser->buffer[parser->idx] = 0; parser->cont = false; } else if (parser->idx < parser->size - 1) { parser->cont = true; parser->buffer[parser->idx++] = ch; /* Make sure the parsed string always terminates with '\0'. */ parser->buffer[parser->idx] = 0; } else { ret = -EINVAL; goto out; } *ppos += read; ret = read; out: return ret; } /* TODO add a seq_buf_to_buffer() */ static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt) { int len; if (trace_seq_used(s) <= s->seq.readpos) return -EBUSY; len = trace_seq_used(s) - s->seq.readpos; if (cnt > len) cnt = len; memcpy(buf, s->buffer + s->seq.readpos, cnt); s->seq.readpos += cnt; return cnt; } unsigned long __read_mostly tracing_thresh; #ifdef CONFIG_TRACER_MAX_TRACE /* * Copy the new maximum trace into the separate maximum-trace * structure. (this way the maximum trace is permanently saved, * for later retrieval via /sys/kernel/tracing/tracing_max_latency) */ static void __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) { struct trace_buffer *trace_buf = &tr->trace_buffer; struct trace_buffer *max_buf = &tr->max_buffer; struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu); struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu); max_buf->cpu = cpu; max_buf->time_start = data->preempt_timestamp; max_data->saved_latency = tr->max_latency; max_data->critical_start = data->critical_start; max_data->critical_end = data->critical_end; memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN); max_data->pid = tsk->pid; /* * If tsk == current, then use current_uid(), as that does not use * RCU. The irq tracer can be called out of RCU scope. */ if (tsk == current) max_data->uid = current_uid(); else max_data->uid = task_uid(tsk); max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO; max_data->policy = tsk->policy; max_data->rt_priority = tsk->rt_priority; /* record this tasks comm */ tracing_record_cmdline(tsk); } /** * update_max_tr - snapshot all trace buffers from global_trace to max_tr * @tr: tracer * @tsk: the task with the latency * @cpu: The cpu that initiated the trace. * * Flip the buffers between the @tr and the max_tr and record information * about which task was the cause of this latency. */ void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) { if (tr->stop_count) return; WARN_ON_ONCE(!irqs_disabled()); if (!tr->allocated_snapshot) { /* Only the nop tracer should hit this when disabling */ WARN_ON_ONCE(tr->current_trace != &nop_trace); return; } arch_spin_lock(&tr->max_lock); /* Inherit the recordable setting from trace_buffer */ if (ring_buffer_record_is_set_on(tr->trace_buffer.buffer)) ring_buffer_record_on(tr->max_buffer.buffer); else ring_buffer_record_off(tr->max_buffer.buffer); swap(tr->trace_buffer.buffer, tr->max_buffer.buffer); __update_max_tr(tr, tsk, cpu); arch_spin_unlock(&tr->max_lock); } /** * update_max_tr_single - only copy one trace over, and reset the rest * @tr - tracer * @tsk - task with the latency * @cpu - the cpu of the buffer to copy. * * Flip the trace of a single CPU buffer between the @tr and the max_tr. */ void update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) { int ret; if (tr->stop_count) return; WARN_ON_ONCE(!irqs_disabled()); if (!tr->allocated_snapshot) { /* Only the nop tracer should hit this when disabling */ WARN_ON_ONCE(tr->current_trace != &nop_trace); return; } arch_spin_lock(&tr->max_lock); ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu); if (ret == -EBUSY) { /* * We failed to swap the buffer due to a commit taking * place on this CPU. We fail to record, but we reset * the max trace buffer (no one writes directly to it) * and flag that it failed. */ trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_, "Failed to swap buffers due to commit in progress\n"); } WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY); __update_max_tr(tr, tsk, cpu); arch_spin_unlock(&tr->max_lock); } #endif /* CONFIG_TRACER_MAX_TRACE */ static int wait_on_pipe(struct trace_iterator *iter, int full) { /* Iterators are static, they should be filled or empty */ if (trace_buffer_iter(iter, iter->cpu_file)) return 0; return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file, full); } #ifdef CONFIG_FTRACE_STARTUP_TEST static bool selftests_can_run; struct trace_selftests { struct list_head list; struct tracer *type; }; static LIST_HEAD(postponed_selftests); static int save_selftest(struct tracer *type) { struct trace_selftests *selftest; selftest = kmalloc(sizeof(*selftest), GFP_KERNEL); if (!selftest) return -ENOMEM; selftest->type = type; list_add(&selftest->list, &postponed_selftests); return 0; } static int run_tracer_selftest(struct tracer *type) { struct trace_array *tr = &global_trace; struct tracer *saved_tracer = tr->current_trace; int ret; if (!type->selftest || tracing_selftest_disabled) return 0; /* * If a tracer registers early in boot up (before scheduling is * initialized and such), then do not run its selftests yet. * Instead, run it a little later in the boot process. */ if (!selftests_can_run) return save_selftest(type); /* * Run a selftest on this tracer. * Here we reset the trace buffer, and set the current * tracer to be this tracer. The tracer can then run some * internal tracing to verify that everything is in order. * If we fail, we do not register this tracer. */ tracing_reset_online_cpus(&tr->trace_buffer); tr->current_trace = type; #ifdef CONFIG_TRACER_MAX_TRACE if (type->use_max_tr) { /* If we expanded the buffers, make sure the max is expanded too */ if (ring_buffer_expanded) ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size, RING_BUFFER_ALL_CPUS); tr->allocated_snapshot = true; } #endif /* the test is responsible for initializing and enabling */ pr_info("Testing tracer %s: ", type->name); ret = type->selftest(type, tr); /* the test is responsible for resetting too */ tr->current_trace = saved_tracer; if (ret) { printk(KERN_CONT "FAILED!\n"); /* Add the warning after printing 'FAILED' */ WARN_ON(1); return -1; } /* Only reset on passing, to avoid touching corrupted buffers */ tracing_reset_online_cpus(&tr->trace_buffer); #ifdef CONFIG_TRACER_MAX_TRACE if (type->use_max_tr) { tr->allocated_snapshot = false; /* Shrink the max buffer again */ if (ring_buffer_expanded) ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS); } #endif printk(KERN_CONT "PASSED\n"); return 0; } static __init int init_trace_selftests(void) { struct trace_selftests *p, *n; struct tracer *t, **last; int ret; selftests_can_run = true; mutex_lock(&trace_types_lock); if (list_empty(&postponed_selftests)) goto out; pr_info("Running postponed tracer tests:\n"); list_for_each_entry_safe(p, n, &postponed_selftests, list) { ret = run_tracer_selftest(p->type); /* If the test fails, then warn and remove from available_tracers */ if (ret < 0) { WARN(1, "tracer: %s failed selftest, disabling\n", p->type->name); last = &trace_types; for (t = trace_types; t; t = t->next) { if (t == p->type) { *last = t->next; break; } last = &t->next; } } list_del(&p->list); kfree(p); } out: mutex_unlock(&trace_types_lock); return 0; } core_initcall(init_trace_selftests); #else static inline int run_tracer_selftest(struct tracer *type) { return 0; } #endif /* CONFIG_FTRACE_STARTUP_TEST */ static void add_tracer_options(struct trace_array *tr, struct tracer *t); static void __init apply_trace_boot_options(void); /** * register_tracer - register a tracer with the ftrace system. * @type - the plugin for the tracer * * Register a new plugin tracer. */ int __init register_tracer(struct tracer *type) { struct tracer *t; int ret = 0; if (!type->name) { pr_info("Tracer must have a name\n"); return -1; } if (strlen(type->name) >= MAX_TRACER_SIZE) { pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE); return -1; } mutex_lock(&trace_types_lock); tracing_selftest_running = true; for (t = trace_types; t; t = t->next) { if (strcmp(type->name, t->name) == 0) { /* already found */ pr_info("Tracer %s already registered\n", type->name); ret = -1; goto out; } } if (!type->set_flag) type->set_flag = &dummy_set_flag; if (!type->flags) { /*allocate a dummy tracer_flags*/ type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL); if (!type->flags) { ret = -ENOMEM; goto out; } type->flags->val = 0; type->flags->opts = dummy_tracer_opt; } else if (!type->flags->opts) type->flags->opts = dummy_tracer_opt; /* store the tracer for __set_tracer_option */ type->flags->trace = type; ret = run_tracer_selftest(type); if (ret < 0) goto out; type->next = trace_types; trace_types = type; add_tracer_options(&global_trace, type); out: tracing_selftest_running = false; mutex_unlock(&trace_types_lock); if (ret || !default_bootup_tracer) goto out_unlock; if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE)) goto out_unlock; printk(KERN_INFO "Starting tracer '%s'\n", type->name); /* Do we want this tracer to start on bootup? */ tracing_set_tracer(&global_trace, type->name); default_bootup_tracer = NULL; apply_trace_boot_options(); /* disable other selftests, since this will break it. */ tracing_selftest_disabled = true; #ifdef CONFIG_FTRACE_STARTUP_TEST printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n", type->name); #endif out_unlock: return ret; } void tracing_reset(struct trace_buffer *buf, int cpu) { struct ring_buffer *buffer = buf->buffer; if (!buffer) return; ring_buffer_record_disable(buffer); /* Make sure all commits have finished */ synchronize_rcu(); ring_buffer_reset_cpu(buffer, cpu); ring_buffer_record_enable(buffer); } void tracing_reset_online_cpus(struct trace_buffer *buf) { struct ring_buffer *buffer = buf->buffer; int cpu; if (!buffer) return; ring_buffer_record_disable(buffer); /* Make sure all commits have finished */ synchronize_rcu(); buf->time_start = buffer_ftrace_now(buf, buf->cpu); for_each_online_cpu(cpu) ring_buffer_reset_cpu(buffer, cpu); ring_buffer_record_enable(buffer); } /* Must have trace_types_lock held */ void tracing_reset_all_online_cpus(void) { struct trace_array *tr; list_for_each_entry(tr, &ftrace_trace_arrays, list) { if (!tr->clear_trace) continue; tr->clear_trace = false; tracing_reset_online_cpus(&tr->trace_buffer); #ifdef CONFIG_TRACER_MAX_TRACE tracing_reset_online_cpus(&tr->max_buffer); #endif } } static int *tgid_map; #define SAVED_CMDLINES_DEFAULT 128 #define NO_CMDLINE_MAP UINT_MAX static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED; struct saved_cmdlines_buffer { unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1]; unsigned *map_cmdline_to_pid; unsigned cmdline_num; int cmdline_idx; char *saved_cmdlines; }; static struct saved_cmdlines_buffer *savedcmd; /* temporary disable recording */ static atomic_t trace_record_taskinfo_disabled __read_mostly; static inline char *get_saved_cmdlines(int idx) { return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN]; } static inline void set_cmdline(int idx, const char *cmdline) { memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN); } static int allocate_cmdlines_buffer(unsigned int val, struct saved_cmdlines_buffer *s) { s->map_cmdline_to_pid = kmalloc_array(val, sizeof(*s->map_cmdline_to_pid), GFP_KERNEL); if (!s->map_cmdline_to_pid) return -ENOMEM; s->saved_cmdlines = kmalloc_array(TASK_COMM_LEN, val, GFP_KERNEL); if (!s->saved_cmdlines) { kfree(s->map_cmdline_to_pid); return -ENOMEM; } s->cmdline_idx = 0; s->cmdline_num = val; memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP, sizeof(s->map_pid_to_cmdline)); memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP, val * sizeof(*s->map_cmdline_to_pid)); return 0; } static int trace_create_savedcmd(void) { int ret; savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL); if (!savedcmd) return -ENOMEM; ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd); if (ret < 0) { kfree(savedcmd); savedcmd = NULL; return -ENOMEM; } return 0; } int is_tracing_stopped(void) { return global_trace.stop_count; } /** * tracing_start - quick start of the tracer * * If tracing is enabled but was stopped by tracing_stop, * this will start the tracer back up. */ void tracing_start(void) { struct ring_buffer *buffer; unsigned long flags; if (tracing_disabled) return; raw_spin_lock_irqsave(&global_trace.start_lock, flags); if (--global_trace.stop_count) { if (global_trace.stop_count < 0) { /* Someone screwed up their debugging */ WARN_ON_ONCE(1); global_trace.stop_count = 0; } goto out; } /* Prevent the buffers from switching */ arch_spin_lock(&global_trace.max_lock); buffer = global_trace.trace_buffer.buffer; if (buffer) ring_buffer_record_enable(buffer); #ifdef CONFIG_TRACER_MAX_TRACE buffer = global_trace.max_buffer.buffer; if (buffer) ring_buffer_record_enable(buffer); #endif arch_spin_unlock(&global_trace.max_lock); out: raw_spin_unlock_irqrestore(&global_trace.start_lock, flags); } static void tracing_start_tr(struct trace_array *tr) { struct ring_buffer *buffer; unsigned long flags; if (tracing_disabled) return; /* If global, we need to also start the max tracer */ if (tr->flags & TRACE_ARRAY_FL_GLOBAL) return tracing_start(); raw_spin_lock_irqsave(&tr->start_lock, flags); if (--tr->stop_count) { if (tr->stop_count < 0) { /* Someone screwed up their debugging */ WARN_ON_ONCE(1); tr->stop_count = 0; } goto out; } buffer = tr->trace_buffer.buffer; if (buffer) ring_buffer_record_enable(buffer); out: raw_spin_unlock_irqrestore(&tr->start_lock, flags); } /** * tracing_stop - quick stop of the tracer * * Light weight way to stop tracing. Use in conjunction with * tracing_start. */ void tracing_stop(void) { struct ring_buffer *buffer; unsigned long flags; raw_spin_lock_irqsave(&global_trace.start_lock, flags); if (global_trace.stop_count++) goto out; /* Prevent the buffers from switching */ arch_spin_lock(&global_trace.max_lock); buffer = global_trace.trace_buffer.buffer; if (buffer) ring_buffer_record_disable(buffer); #ifdef CONFIG_TRACER_MAX_TRACE buffer = global_trace.max_buffer.buffer; if (buffer) ring_buffer_record_disable(buffer); #endif arch_spin_unlock(&global_trace.max_lock); out: raw_spin_unlock_irqrestore(&global_trace.start_lock, flags); } static void tracing_stop_tr(struct trace_array *tr) { struct ring_buffer *buffer; unsigned long flags; /* If global, we need to also stop the max tracer */ if (tr->flags & TRACE_ARRAY_FL_GLOBAL) return tracing_stop(); raw_spin_lock_irqsave(&tr->start_lock, flags); if (tr->stop_count++) goto out; buffer = tr->trace_buffer.buffer; if (buffer) ring_buffer_record_disable(buffer); out: raw_spin_unlock_irqrestore(&tr->start_lock, flags); } static int trace_save_cmdline(struct task_struct *tsk) { unsigned pid, idx; /* treat recording of idle task as a success */ if (!tsk->pid) return 1; if (unlikely(tsk->pid > PID_MAX_DEFAULT)) return 0; /* * It's not the end of the world if we don't get * the lock, but we also don't want to spin * nor do we want to disable interrupts, * so if we miss here, then better luck next time. */ if (!arch_spin_trylock(&trace_cmdline_lock)) return 0; idx = savedcmd->map_pid_to_cmdline[tsk->pid]; if (idx == NO_CMDLINE_MAP) { idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num; /* * Check whether the cmdline buffer at idx has a pid * mapped. We are going to overwrite that entry so we * need to clear the map_pid_to_cmdline. Otherwise we * would read the new comm for the old pid. */ pid = savedcmd->map_cmdline_to_pid[idx]; if (pid != NO_CMDLINE_MAP) savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP; savedcmd->map_cmdline_to_pid[idx] = tsk->pid; savedcmd->map_pid_to_cmdline[tsk->pid] = idx; savedcmd->cmdline_idx = idx; } set_cmdline(idx, tsk->comm); arch_spin_unlock(&trace_cmdline_lock); return 1; } static void __trace_find_cmdline(int pid, char comm[]) { unsigned map; if (!pid) { strcpy(comm, "<idle>"); return; } if (WARN_ON_ONCE(pid < 0)) { strcpy(comm, "<XXX>"); return; } if (pid > PID_MAX_DEFAULT) { strcpy(comm, "<...>"); return; } map = savedcmd->map_pid_to_cmdline[pid]; if (map != NO_CMDLINE_MAP) strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN); else strcpy(comm, "<...>"); } void trace_find_cmdline(int pid, char comm[]) { preempt_disable(); arch_spin_lock(&trace_cmdline_lock); __trace_find_cmdline(pid, comm); arch_spin_unlock(&trace_cmdline_lock); preempt_enable(); } int trace_find_tgid(int pid) { if (unlikely(!tgid_map || !pid || pid > PID_MAX_DEFAULT)) return 0; return tgid_map[pid]; } static int trace_save_tgid(struct task_struct *tsk) { /* treat recording of idle task as a success */ if (!tsk->pid) return 1; if (unlikely(!tgid_map || tsk->pid > PID_MAX_DEFAULT)) return 0; tgid_map[tsk->pid] = tsk->tgid; return 1; } static bool tracing_record_taskinfo_skip(int flags) { if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID)))) return true; if (atomic_read(&trace_record_taskinfo_disabled) || !tracing_is_on()) return true; if (!__this_cpu_read(trace_taskinfo_save)) return true; return false; } /** * tracing_record_taskinfo - record the task info of a task * * @task - task to record * @flags - TRACE_RECORD_CMDLINE for recording comm * - TRACE_RECORD_TGID for recording tgid */ void tracing_record_taskinfo(struct task_struct *task, int flags) { bool done; if (tracing_record_taskinfo_skip(flags)) return; /* * Record as much task information as possible. If some fail, continue * to try to record the others. */ done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task); done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task); /* If recording any information failed, retry again soon. */ if (!done) return; __this_cpu_write(trace_taskinfo_save, false); } /** * tracing_record_taskinfo_sched_switch - record task info for sched_switch * * @prev - previous task during sched_switch * @next - next task during sched_switch * @flags - TRACE_RECORD_CMDLINE for recording comm * TRACE_RECORD_TGID for recording tgid */ void tracing_record_taskinfo_sched_switch(struct task_struct *prev, struct task_struct *next, int flags) { bool done; if (tracing_record_taskinfo_skip(flags)) return; /* * Record as much task information as possible. If some fail, continue * to try to record the others. */ done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(prev); done &= !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(next); done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(prev); done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(next); /* If recording any information failed, retry again soon. */ if (!done) return; __this_cpu_write(trace_taskinfo_save, false); } /* Helpers to record a specific task information */ void tracing_record_cmdline(struct task_struct *task) { tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE); } void tracing_record_tgid(struct task_struct *task) { tracing_record_taskinfo(task, TRACE_RECORD_TGID); } /* * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function * simplifies those functions and keeps them in sync. */ enum print_line_t trace_handle_return(struct trace_seq *s) { return trace_seq_has_overflowed(s) ? TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED; } EXPORT_SYMBOL_GPL(trace_handle_return); void tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, int pc) { struct task_struct *tsk = current; entry->preempt_count = pc & 0xff; entry->pid = (tsk) ? tsk->pid : 0; entry->flags = #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | #else TRACE_FLAG_IRQS_NOSUPPORT | #endif ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) | ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) | ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) | (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) | (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0); } EXPORT_SYMBOL_GPL(tracing_generic_entry_update); struct ring_buffer_event * trace_buffer_lock_reserve(struct ring_buffer *buffer, int type, unsigned long len, unsigned long flags, int pc) { return __trace_buffer_lock_reserve(buffer, type, len, flags, pc); } DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event); DEFINE_PER_CPU(int, trace_buffered_event_cnt); static int trace_buffered_event_ref; /** * trace_buffered_event_enable - enable buffering events * * When events are being filtered, it is quicker to use a temporary * buffer to write the event data into if there's a likely chance * that it will not be committed. The discard of the ring buffer * is not as fast as committing, and is much slower than copying * a commit. * * When an event is to be filtered, allocate per cpu buffers to * write the event data into, and if the event is filtered and discarded * it is simply dropped, otherwise, the entire data is to be committed * in one shot. */ void trace_buffered_event_enable(void) { struct ring_buffer_event *event; struct page *page; int cpu; WARN_ON_ONCE(!mutex_is_locked(&event_mutex)); if (trace_buffered_event_ref++) return; for_each_tracing_cpu(cpu) { page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL | __GFP_NORETRY, 0); if (!page) goto failed; event = page_address(page); memset(event, 0, sizeof(*event)); per_cpu(trace_buffered_event, cpu) = event; preempt_disable(); if (cpu == smp_processor_id() && this_cpu_read(trace_buffered_event) != per_cpu(trace_buffered_event, cpu)) WARN_ON_ONCE(1); preempt_enable(); } return; failed: trace_buffered_event_disable(); } static void enable_trace_buffered_event(void *data) { /* Probably not needed, but do it anyway */ smp_rmb(); this_cpu_dec(trace_buffered_event_cnt); } static void disable_trace_buffered_event(void *data) { this_cpu_inc(trace_buffered_event_cnt); } /** * trace_buffered_event_disable - disable buffering events * * When a filter is removed, it is faster to not use the buffered * events, and to commit directly into the ring buffer. Free up * the temp buffers when there are no more users. This requires * special synchronization with current events. */ void trace_buffered_event_disable(void) { int cpu; WARN_ON_ONCE(!mutex_is_locked(&event_mutex)); if (WARN_ON_ONCE(!trace_buffered_event_ref)) return; if (--trace_buffered_event_ref) return; preempt_disable(); /* For each CPU, set the buffer as used. */ smp_call_function_many(tracing_buffer_mask, disable_trace_buffered_event, NULL, 1); preempt_enable(); /* Wait for all current users to finish */ synchronize_rcu(); for_each_tracing_cpu(cpu) { free_page((unsigned long)per_cpu(trace_buffered_event, cpu)); per_cpu(trace_buffered_event, cpu) = NULL; } /* * Make sure trace_buffered_event is NULL before clearing * trace_buffered_event_cnt. */ smp_wmb(); preempt_disable(); /* Do the work on each cpu */ smp_call_function_many(tracing_buffer_mask, enable_trace_buffered_event, NULL, 1); preempt_enable(); } static struct ring_buffer *temp_buffer; struct ring_buffer_event * trace_event_buffer_lock_reserve(struct ring_buffer **current_rb, struct trace_event_file *trace_file, int type, unsigned long len, unsigned long flags, int pc) { struct ring_buffer_event *entry; int val; *current_rb = trace_file->tr->trace_buffer.buffer; if (!ring_buffer_time_stamp_abs(*current_rb) && (trace_file->flags & (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) && (entry = this_cpu_read(trace_buffered_event))) { /* Try to use the per cpu buffer first */ val = this_cpu_inc_return(trace_buffered_event_cnt); if (val == 1) { trace_event_setup(entry, type, flags, pc); entry->array[0] = len; return entry; } this_cpu_dec(trace_buffered_event_cnt); } entry = __trace_buffer_lock_reserve(*current_rb, type, len, flags, pc); /* * If tracing is off, but we have triggers enabled * we still need to look at the event data. Use the temp_buffer * to store the trace event for the tigger to use. It's recusive * safe and will not be recorded anywhere. */ if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) { *current_rb = temp_buffer; entry = __trace_buffer_lock_reserve(*current_rb, type, len, flags, pc); } return entry; } EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve); static DEFINE_SPINLOCK(tracepoint_iter_lock); static DEFINE_MUTEX(tracepoint_printk_mutex); static void output_printk(struct trace_event_buffer *fbuffer) { struct trace_event_call *event_call; struct trace_event *event; unsigned long flags; struct trace_iterator *iter = tracepoint_print_iter; /* We should never get here if iter is NULL */ if (WARN_ON_ONCE(!iter)) return; event_call = fbuffer->trace_file->event_call; if (!event_call || !event_call->event.funcs || !event_call->event.funcs->trace) return; event = &fbuffer->trace_file->event_call->event; spin_lock_irqsave(&tracepoint_iter_lock, flags); trace_seq_init(&iter->seq); iter->ent = fbuffer->entry; event_call->event.funcs->trace(iter, 0, event); trace_seq_putc(&iter->seq, 0); printk("%s", iter->seq.buffer); spin_unlock_irqrestore(&tracepoint_iter_lock, flags); } int tracepoint_printk_sysctl(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { int save_tracepoint_printk; int ret; mutex_lock(&tracepoint_printk_mutex); save_tracepoint_printk = tracepoint_printk; ret = proc_dointvec(table, write, buffer, lenp, ppos); /* * This will force exiting early, as tracepoint_printk * is always zero when tracepoint_printk_iter is not allocated */ if (!tracepoint_print_iter) tracepoint_printk = 0; if (save_tracepoint_printk == tracepoint_printk) goto out; if (tracepoint_printk) static_key_enable(&tracepoint_printk_key.key); else static_key_disable(&tracepoint_printk_key.key); out: mutex_unlock(&tracepoint_printk_mutex); return ret; } void trace_event_buffer_commit(struct trace_event_buffer *fbuffer) { if (static_key_false(&tracepoint_printk_key.key)) output_printk(fbuffer); event_trigger_unlock_commit(fbuffer->trace_file, fbuffer->buffer, fbuffer->event, fbuffer->entry, fbuffer->flags, fbuffer->pc); } EXPORT_SYMBOL_GPL(trace_event_buffer_commit); /* * Skip 3: * * trace_buffer_unlock_commit_regs() * trace_event_buffer_commit() * trace_event_raw_event_xxx() */ # define STACK_SKIP 3 void trace_buffer_unlock_commit_regs(struct trace_array *tr, struct ring_buffer *buffer, struct ring_buffer_event *event, unsigned long flags, int pc, struct pt_regs *regs) { __buffer_unlock_commit(buffer, event); /* * If regs is not set, then skip the necessary functions. * Note, we can still get here via blktrace, wakeup tracer * and mmiotrace, but that's ok if they lose a function or * two. They are not that meaningful. */ ftrace_trace_stack(tr, buffer, flags, regs ? 0 : STACK_SKIP, pc, regs); ftrace_trace_userstack(buffer, flags, pc); } /* * Similar to trace_buffer_unlock_commit_regs() but do not dump stack. */ void trace_buffer_unlock_commit_nostack(struct ring_buffer *buffer, struct ring_buffer_event *event) { __buffer_unlock_commit(buffer, event); } static void trace_process_export(struct trace_export *export, struct ring_buffer_event *event) { struct trace_entry *entry; unsigned int size = 0; entry = ring_buffer_event_data(event); size = ring_buffer_event_length(event); export->write(export, entry, size); } static DEFINE_MUTEX(ftrace_export_lock); static struct trace_export __rcu *ftrace_exports_list __read_mostly; static DEFINE_STATIC_KEY_FALSE(ftrace_exports_enabled); static inline void ftrace_exports_enable(void) { static_branch_enable(&ftrace_exports_enabled); } static inline void ftrace_exports_disable(void) { static_branch_disable(&ftrace_exports_enabled); } static void ftrace_exports(struct ring_buffer_event *event) { struct trace_export *export; preempt_disable_notrace(); export = rcu_dereference_raw_notrace(ftrace_exports_list); while (export) { trace_process_export(export, event); export = rcu_dereference_raw_notrace(export->next); } preempt_enable_notrace(); } static inline void add_trace_export(struct trace_export **list, struct trace_export *export) { rcu_assign_pointer(export->next, *list); /* * We are entering export into the list but another * CPU might be walking that list. We need to make sure * the export->next pointer is valid before another CPU sees * the export pointer included into the list. */ rcu_assign_pointer(*list, export); } static inline int rm_trace_export(struct trace_export **list, struct trace_export *export) { struct trace_export **p; for (p = list; *p != NULL; p = &(*p)->next) if (*p == export) break; if (*p != export) return -1; rcu_assign_pointer(*p, (*p)->next); return 0; } static inline void add_ftrace_export(struct trace_export **list, struct trace_export *export) { if (*list == NULL) ftrace_exports_enable(); add_trace_export(list, export); } static inline int rm_ftrace_export(struct trace_export **list, struct trace_export *export) { int ret; ret = rm_trace_export(list, export); if (*list == NULL) ftrace_exports_disable(); return ret; } int register_ftrace_export(struct trace_export *export) { if (WARN_ON_ONCE(!export->write)) return -1; mutex_lock(&ftrace_export_lock); add_ftrace_export(&ftrace_exports_list, export); mutex_unlock(&ftrace_export_lock); return 0; } EXPORT_SYMBOL_GPL(register_ftrace_export); int unregister_ftrace_export(struct trace_export *export) { int ret; mutex_lock(&ftrace_export_lock); ret = rm_ftrace_export(&ftrace_exports_list, export); mutex_unlock(&ftrace_export_lock); return ret; } EXPORT_SYMBOL_GPL(unregister_ftrace_export); void trace_function(struct trace_array *tr, unsigned long ip, unsigned long parent_ip, unsigned long flags, int pc) { struct trace_event_call *call = &event_function; struct ring_buffer *buffer = tr->trace_buffer.buffer; struct ring_buffer_event *event; struct ftrace_entry *entry; event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry), flags, pc); if (!event) return; entry = ring_buffer_event_data(event); entry->ip = ip; entry->parent_ip = parent_ip; if (!call_filter_check_discard(call, entry, buffer, event)) { if (static_branch_unlikely(&ftrace_exports_enabled)) ftrace_exports(event); __buffer_unlock_commit(buffer, event); } } #ifdef CONFIG_STACKTRACE #define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long)) struct ftrace_stack { unsigned long calls[FTRACE_STACK_MAX_ENTRIES]; }; static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack); static DEFINE_PER_CPU(int, ftrace_stack_reserve); static void __ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags, int skip, int pc, struct pt_regs *regs) { struct trace_event_call *call = &event_kernel_stack; struct ring_buffer_event *event; struct stack_entry *entry; struct stack_trace trace; int use_stack; int size = FTRACE_STACK_ENTRIES; trace.nr_entries = 0; trace.skip = skip; /* * Add one, for this function and the call to save_stack_trace() * If regs is set, then these functions will not be in the way. */ #ifndef CONFIG_UNWINDER_ORC if (!regs) trace.skip++; #endif /* * Since events can happen in NMIs there's no safe way to * use the per cpu ftrace_stacks. We reserve it and if an interrupt * or NMI comes in, it will just have to use the default * FTRACE_STACK_SIZE. */ preempt_disable_notrace(); use_stack = __this_cpu_inc_return(ftrace_stack_reserve); /* * We don't need any atomic variables, just a barrier. * If an interrupt comes in, we don't care, because it would * have exited and put the counter back to what we want. * We just need a barrier to keep gcc from moving things * around. */ barrier(); if (use_stack == 1) { trace.entries = this_cpu_ptr(ftrace_stack.calls); trace.max_entries = FTRACE_STACK_MAX_ENTRIES; if (regs) save_stack_trace_regs(regs, &trace); else save_stack_trace(&trace); if (trace.nr_entries > size) size = trace.nr_entries; } else /* From now on, use_stack is a boolean */ use_stack = 0; size *= sizeof(unsigned long); event = __trace_buffer_lock_reserve(buffer, TRACE_STACK, sizeof(*entry) + size, flags, pc); if (!event) goto out; entry = ring_buffer_event_data(event); memset(&entry->caller, 0, size); if (use_stack) memcpy(&entry->caller, trace.entries, trace.nr_entries * sizeof(unsigned long)); else { trace.max_entries = FTRACE_STACK_ENTRIES; trace.entries = entry->caller; if (regs) save_stack_trace_regs(regs, &trace); else save_stack_trace(&trace); } entry->size = trace.nr_entries; if (!call_filter_check_discard(call, entry, buffer, event)) __buffer_unlock_commit(buffer, event); out: /* Again, don't let gcc optimize things here */ barrier(); __this_cpu_dec(ftrace_stack_reserve); preempt_enable_notrace(); } static inline void ftrace_trace_stack(struct trace_array *tr, struct ring_buffer *buffer, unsigned long flags, int skip, int pc, struct pt_regs *regs) { if (!(tr->trace_flags & TRACE_ITER_STACKTRACE)) return; __ftrace_trace_stack(buffer, flags, skip, pc, regs); } void __trace_stack(struct trace_array *tr, unsigned long flags, int skip, int pc) { struct ring_buffer *buffer = tr->trace_buffer.buffer; if (rcu_is_watching()) { __ftrace_trace_stack(buffer, flags, skip, pc, NULL); return; } /* * When an NMI triggers, RCU is enabled via rcu_nmi_enter(), * but if the above rcu_is_watching() failed, then the NMI * triggered someplace critical, and rcu_irq_enter() should * not be called from NMI. */ if (unlikely(in_nmi())) return; rcu_irq_enter_irqson(); __ftrace_trace_stack(buffer, flags, skip, pc, NULL); rcu_irq_exit_irqson(); } /** * trace_dump_stack - record a stack back trace in the trace buffer * @skip: Number of functions to skip (helper handlers) */ void trace_dump_stack(int skip) { unsigned long flags; if (tracing_disabled || tracing_selftest_running) return; local_save_flags(flags); #ifndef CONFIG_UNWINDER_ORC /* Skip 1 to skip this function. */ skip++; #endif __ftrace_trace_stack(global_trace.trace_buffer.buffer, flags, skip, preempt_count(), NULL); } EXPORT_SYMBOL_GPL(trace_dump_stack); static DEFINE_PER_CPU(int, user_stack_count); void ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) { struct trace_event_call *call = &event_user_stack; struct ring_buffer_event *event; struct userstack_entry *entry; struct stack_trace trace; if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE)) return; /* * NMIs can not handle page faults, even with fix ups. * The save user stack can (and often does) fault. */ if (unlikely(in_nmi())) return; /* * prevent recursion, since the user stack tracing may * trigger other kernel events. */ preempt_disable(); if (__this_cpu_read(user_stack_count)) goto out; __this_cpu_inc(user_stack_count); event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK, sizeof(*entry), flags, pc); if (!event) goto out_drop_count; entry = ring_buffer_event_data(event); entry->tgid = current->tgid; memset(&entry->caller, 0, sizeof(entry->caller)); trace.nr_entries = 0; trace.max_entries = FTRACE_STACK_ENTRIES; trace.skip = 0; trace.entries = entry->caller; save_stack_trace_user(&trace); if (!call_filter_check_discard(call, entry, buffer, event)) __buffer_unlock_commit(buffer, event); out_drop_count: __this_cpu_dec(user_stack_count); out: preempt_enable(); } #ifdef UNUSED static void __trace_userstack(struct trace_array *tr, unsigned long flags) { ftrace_trace_userstack(tr, flags, preempt_count()); } #endif /* UNUSED */ #endif /* CONFIG_STACKTRACE */ /* created for use with alloc_percpu */ struct trace_buffer_struct { int nesting; char buffer[4][TRACE_BUF_SIZE]; }; static struct trace_buffer_struct *trace_percpu_buffer; /* * Thise allows for lockless recording. If we're nested too deeply, then * this returns NULL. */ static char *get_trace_buf(void) { struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer); if (!buffer || buffer->nesting >= 4) return NULL; buffer->nesting++; /* Interrupts must see nesting incremented before we use the buffer */ barrier(); return &buffer->buffer[buffer->nesting][0]; } static void put_trace_buf(void) { /* Don't let the decrement of nesting leak before this */ barrier(); this_cpu_dec(trace_percpu_buffer->nesting); } static int alloc_percpu_trace_buffer(void) { struct trace_buffer_struct *buffers; buffers = alloc_percpu(struct trace_buffer_struct); if (WARN(!buffers, "Could not allocate percpu trace_printk buffer")) return -ENOMEM; trace_percpu_buffer = buffers; return 0; } static int buffers_allocated; void trace_printk_init_buffers(void) { if (buffers_allocated) return; if (alloc_percpu_trace_buffer()) return; /* trace_printk() is for debug use only. Don't use it in production. */ pr_warn("\n"); pr_warn("**********************************************************\n"); pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n"); pr_warn("** **\n"); pr_warn("** trace_printk() being used. Allocating extra memory. **\n"); pr_warn("** **\n"); pr_warn("** This means that this is a DEBUG kernel and it is **\n"); pr_warn("** unsafe for production use. **\n"); pr_warn("** **\n"); pr_warn("** If you see this message and you are not debugging **\n"); pr_warn("** the kernel, report this immediately to your vendor! **\n"); pr_warn("** **\n"); pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n"); pr_warn("**********************************************************\n"); /* Expand the buffers to set size */ tracing_update_buffers(); buffers_allocated = 1; /* * trace_printk_init_buffers() can be called by modules. * If that happens, then we need to start cmdline recording * directly here. If the global_trace.buffer is already * allocated here, then this was called by module code. */ if (global_trace.trace_buffer.buffer) tracing_start_cmdline_record(); } void trace_printk_start_comm(void) { /* Start tracing comms if trace printk is set */ if (!buffers_allocated) return; tracing_start_cmdline_record(); } static void trace_printk_start_stop_comm(int enabled) { if (!buffers_allocated) return; if (enabled) tracing_start_cmdline_record(); else tracing_stop_cmdline_record(); } /** * trace_vbprintk - write binary msg to tracing buffer * */ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) { struct trace_event_call *call = &event_bprint; struct ring_buffer_event *event; struct ring_buffer *buffer; struct trace_array *tr = &global_trace; struct bprint_entry *entry; unsigned long flags; char *tbuffer; int len = 0, size, pc; if (unlikely(tracing_selftest_running || tracing_disabled)) return 0; /* Don't pollute graph traces with trace_vprintk internals */ pause_graph_tracing(); pc = preempt_count(); preempt_disable_notrace(); tbuffer = get_trace_buf(); if (!tbuffer) { len = 0; goto out_nobuffer; } len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args); if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0) goto out; local_save_flags(flags); size = sizeof(*entry) + sizeof(u32) * len; buffer = tr->trace_buffer.buffer; event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size, flags, pc); if (!event) goto out; entry = ring_buffer_event_data(event); entry->ip = ip; entry->fmt = fmt; memcpy(entry->buf, tbuffer, sizeof(u32) * len); if (!call_filter_check_discard(call, entry, buffer, event)) { __buffer_unlock_commit(buffer, event); ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL); } out: put_trace_buf(); out_nobuffer: preempt_enable_notrace(); unpause_graph_tracing(); return len; } EXPORT_SYMBOL_GPL(trace_vbprintk); __printf(3, 0) static int __trace_array_vprintk(struct ring_buffer *buffer, unsigned long ip, const char *fmt, va_list args) { struct trace_event_call *call = &event_print; struct ring_buffer_event *event; int len = 0, size, pc; struct print_entry *entry; unsigned long flags; char *tbuffer; if (tracing_disabled || tracing_selftest_running) return 0; /* Don't pollute graph traces with trace_vprintk internals */ pause_graph_tracing(); pc = preempt_count(); preempt_disable_notrace(); tbuffer = get_trace_buf(); if (!tbuffer) { len = 0; goto out_nobuffer; } len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args); local_save_flags(flags); size = sizeof(*entry) + len + 1; event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, flags, pc); if (!event) goto out; entry = ring_buffer_event_data(event); entry->ip = ip; memcpy(&entry->buf, tbuffer, len + 1); if (!call_filter_check_discard(call, entry, buffer, event)) { __buffer_unlock_commit(buffer, event); ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL); } out: put_trace_buf(); out_nobuffer: preempt_enable_notrace(); unpause_graph_tracing(); return len; } __printf(3, 0) int trace_array_vprintk(struct trace_array *tr, unsigned long ip, const char *fmt, va_list args) { return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args); } __printf(3, 0) int trace_array_printk(struct trace_array *tr, unsigned long ip, const char *fmt, ...) { int ret; va_list ap; if (!(global_trace.trace_flags & TRACE_ITER_PRINTK)) return 0; va_start(ap, fmt); ret = trace_array_vprintk(tr, ip, fmt, ap); va_end(ap); return ret; } __printf(3, 4) int trace_array_printk_buf(struct ring_buffer *buffer, unsigned long ip, const char *fmt, ...) { int ret; va_list ap; if (!(global_trace.trace_flags & TRACE_ITER_PRINTK)) return 0; va_start(ap, fmt); ret = __trace_array_vprintk(buffer, ip, fmt, ap); va_end(ap); return ret; } __printf(2, 0) int trace_vprintk(unsigned long ip, const char *fmt, va_list args) { return trace_array_vprintk(&global_trace, ip, fmt, args); } EXPORT_SYMBOL_GPL(trace_vprintk); static void trace_iterator_increment(struct trace_iterator *iter) { struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu); iter->idx++; if (buf_iter) ring_buffer_read(buf_iter, NULL); } static struct trace_entry * peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts, unsigned long *lost_events) { struct ring_buffer_event *event; struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu); if (buf_iter) event = ring_buffer_iter_peek(buf_iter, ts); else event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts, lost_events); if (event) { iter->ent_size = ring_buffer_event_length(event); return ring_buffer_event_data(event); } iter->ent_size = 0; return NULL; } static struct trace_entry * __find_next_entry(struct trace_iterator *iter, int *ent_cpu, unsigned long *missing_events, u64 *ent_ts) { struct ring_buffer *buffer = iter->trace_buffer->buffer; struct trace_entry *ent, *next = NULL; unsigned long lost_events = 0, next_lost = 0; int cpu_file = iter->cpu_file; u64 next_ts = 0, ts; int next_cpu = -1; int next_size = 0; int cpu; /* * If we are in a per_cpu trace file, don't bother by iterating over * all cpu and peek directly. */ if (cpu_file > RING_BUFFER_ALL_CPUS) { if (ring_buffer_empty_cpu(buffer, cpu_file)) return NULL; ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events); if (ent_cpu) *ent_cpu = cpu_file; return ent; } for_each_tracing_cpu(cpu) { if (ring_buffer_empty_cpu(buffer, cpu)) continue; ent = peek_next_entry(iter, cpu, &ts, &lost_events); /* * Pick the entry with the smallest timestamp: */ if (ent && (!next || ts < next_ts)) { next = ent; next_cpu = cpu; next_ts = ts; next_lost = lost_events; next_size = iter->ent_size; } } iter->ent_size = next_size; if (ent_cpu) *ent_cpu = next_cpu; if (ent_ts) *ent_ts = next_ts; if (missing_events) *missing_events = next_lost; return next; } /* Find the next real entry, without updating the iterator itself */ struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts) { return __find_next_entry(iter, ent_cpu, NULL, ent_ts); } /* Find the next real entry, and increment the iterator to the next entry */ void *trace_find_next_entry_inc(struct trace_iterator *iter) { iter->ent = __find_next_entry(iter, &iter->cpu, &iter->lost_events, &iter->ts); if (iter->ent) trace_iterator_increment(iter); return iter->ent ? iter : NULL; } static void trace_consume(struct trace_iterator *iter) { ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts, &iter->lost_events); } static void *s_next(struct seq_file *m, void *v, loff_t *pos) { struct trace_iterator *iter = m->private; int i = (int)*pos; void *ent; WARN_ON_ONCE(iter->leftover); (*pos)++; /* can't go backwards */ if (iter->idx > i) return NULL; if (iter->idx < 0) ent = trace_find_next_entry_inc(iter); else ent = iter; while (ent && iter->idx < i) ent = trace_find_next_entry_inc(iter); iter->pos = *pos; return ent; } void tracing_iter_reset(struct trace_iterator *iter, int cpu) { struct ring_buffer_event *event; struct ring_buffer_iter *buf_iter; unsigned long entries = 0; u64 ts; per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0; buf_iter = trace_buffer_iter(iter, cpu); if (!buf_iter) return; ring_buffer_iter_reset(buf_iter); /* * We could have the case with the max latency tracers * that a reset never took place on a cpu. This is evident * by the timestamp being before the start of the buffer. */ while ((event = ring_buffer_iter_peek(buf_iter, &ts))) { if (ts >= iter->trace_buffer->time_start) break; entries++; ring_buffer_read(buf_iter, NULL); } per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries; } /* * The current tracer is copied to avoid a global locking * all around. */ static void *s_start(struct seq_file *m, loff_t *pos) { struct trace_iterator *iter = m->private; struct trace_array *tr = iter->tr; int cpu_file = iter->cpu_file; void *p = NULL; loff_t l = 0; int cpu; /* * copy the tracer to avoid using a global lock all around. * iter->trace is a copy of current_trace, the pointer to the * name may be used instead of a strcmp(), as iter->trace->name * will point to the same string as current_trace->name. */ mutex_lock(&trace_types_lock); if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name)) *iter->trace = *tr->current_trace; mutex_unlock(&trace_types_lock); #ifdef CONFIG_TRACER_MAX_TRACE if (iter->snapshot && iter->trace->use_max_tr) return ERR_PTR(-EBUSY); #endif if (!iter->snapshot) atomic_inc(&trace_record_taskinfo_disabled); if (*pos != iter->pos) { iter->ent = NULL; iter->cpu = 0; iter->idx = -1; if (cpu_file == RING_BUFFER_ALL_CPUS) { for_each_tracing_cpu(cpu) tracing_iter_reset(iter, cpu); } else tracing_iter_reset(iter, cpu_file); iter->leftover = 0; for (p = iter; p && l < *pos; p = s_next(m, p, &l)) ; } else { /* * If we overflowed the seq_file before, then we want * to just reuse the trace_seq buffer again. */ if (iter->leftover) p = iter; else { l = *pos - 1; p = s_next(m, p, &l); } } trace_event_read_lock(); trace_access_lock(cpu_file); return p; } static void s_stop(struct seq_file *m, void *p) { struct trace_iterator *iter = m->private; #ifdef CONFIG_TRACER_MAX_TRACE if (iter->snapshot && iter->trace->use_max_tr) return; #endif if (!iter->snapshot) atomic_dec(&trace_record_taskinfo_disabled); trace_access_unlock(iter->cpu_file); trace_event_read_unlock(); } static void get_total_entries(struct trace_buffer *buf, unsigned long *total, unsigned long *entries) { unsigned long count; int cpu; *total = 0; *entries = 0; for_each_tracing_cpu(cpu) { count = ring_buffer_entries_cpu(buf->buffer, cpu); /* * If this buffer has skipped entries, then we hold all * entries for the trace and we need to ignore the * ones before the time stamp. */ if (per_cpu_ptr(buf->data, cpu)->skipped_entries) { count -= per_cpu_ptr(buf->data, cpu)->skipped_entries; /* total is the same as the entries */ *total += count; } else *total += count + ring_buffer_overrun_cpu(buf->buffer, cpu); *entries += count; } } static void print_lat_help_header(struct seq_file *m) { seq_puts(m, "# _------=> CPU# \n" "# / _-----=> irqs-off \n" "# | / _----=> need-resched \n" "# || / _---=> hardirq/softirq \n" "# ||| / _--=> preempt-depth \n" "# |||| / delay \n" "# cmd pid ||||| time | caller \n" "# \\ / ||||| \\ | / \n"); } static void print_event_info(struct trace_buffer *buf, struct seq_file *m) { unsigned long total; unsigned long entries; get_total_entries(buf, &total, &entries); seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n", entries, total, num_online_cpus()); seq_puts(m, "#\n"); } static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m, unsigned int flags) { bool tgid = flags & TRACE_ITER_RECORD_TGID; print_event_info(buf, m); seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? "TGID " : ""); seq_printf(m, "# | | %s | | |\n", tgid ? " | " : ""); } static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m, unsigned int flags) { bool tgid = flags & TRACE_ITER_RECORD_TGID; const char tgid_space[] = " "; const char space[] = " "; print_event_info(buf, m); seq_printf(m, "# %s _-----=> irqs-off\n", tgid ? tgid_space : space); seq_printf(m, "# %s / _----=> need-resched\n", tgid ? tgid_space : space); seq_printf(m, "# %s| / _---=> hardirq/softirq\n", tgid ? tgid_space : space); seq_printf(m, "# %s|| / _--=> preempt-depth\n", tgid ? tgid_space : space); seq_printf(m, "# %s||| / delay\n", tgid ? tgid_space : space); seq_printf(m, "# TASK-PID %sCPU# |||| TIMESTAMP FUNCTION\n", tgid ? " TGID " : space); seq_printf(m, "# | | %s | |||| | |\n", tgid ? " | " : space); } void print_trace_header(struct seq_file *m, struct trace_iterator *iter) { unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK); struct trace_buffer *buf = iter->trace_buffer; struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu); struct tracer *type = iter->trace; unsigned long entries; unsigned long total; const char *name = "preemption"; name = type->name; get_total_entries(buf, &total, &entries); seq_printf(m, "# %s latency trace v1.1.5 on %s\n", name, UTS_RELEASE); seq_puts(m, "# -----------------------------------" "---------------------------------\n"); seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |" " (M:%s VP:%d, KP:%d, SP:%d HP:%d", nsecs_to_usecs(data->saved_latency), entries, total, buf->cpu, #if defined(CONFIG_PREEMPT_NONE) "server", #elif defined(CONFIG_PREEMPT_VOLUNTARY) "desktop", #elif defined(CONFIG_PREEMPT) "preempt", #else "unknown", #endif /* These are reserved for later use */ 0, 0, 0, 0); #ifdef CONFIG_SMP seq_printf(m, " #P:%d)\n", num_online_cpus()); #else seq_puts(m, ")\n"); #endif seq_puts(m, "# -----------------\n"); seq_printf(m, "# | task: %.16s-%d " "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n", data->comm, data->pid, from_kuid_munged(seq_user_ns(m), data->uid), data->nice, data->policy, data->rt_priority); seq_puts(m, "# -----------------\n"); if (data->critical_start) { seq_puts(m, "# => started at: "); seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags); trace_print_seq(m, &iter->seq); seq_puts(m, "\n# => ended at: "); seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags); trace_print_seq(m, &iter->seq); seq_puts(m, "\n#\n"); } seq_puts(m, "#\n"); } static void test_cpu_buff_start(struct trace_iterator *iter) { struct trace_seq *s = &iter->seq; struct trace_array *tr = iter->tr; if (!(tr->trace_flags & TRACE_ITER_ANNOTATE)) return; if (!(iter->iter_flags & TRACE_FILE_ANNOTATE)) return; if (cpumask_available(iter->started) && cpumask_test_cpu(iter->cpu, iter->started)) return; if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries) return; if (cpumask_available(iter->started)) cpumask_set_cpu(iter->cpu, iter->started); /* Don't print started cpu buffer for the first entry of the trace */ if (iter->idx > 1) trace_seq_printf(s, "##### CPU %u buffer started ####\n", iter->cpu); } static enum print_line_t print_trace_fmt(struct trace_iterator *iter) { struct trace_array *tr = iter->tr; struct trace_seq *s = &iter->seq; unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK); struct trace_entry *entry; struct trace_event *event; entry = iter->ent; test_cpu_buff_start(iter); event = ftrace_find_event(entry->type); if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) { if (iter->iter_flags & TRACE_FILE_LAT_FMT) trace_print_lat_context(iter); else trace_print_context(iter); } if (trace_seq_has_overflowed(s)) return TRACE_TYPE_PARTIAL_LINE; if (event) return event->funcs->trace(iter, sym_flags, event); trace_seq_printf(s, "Unknown type %d\n", entry->type); return trace_handle_return(s); } static enum print_line_t print_raw_fmt(struct trace_iterator *iter) { struct trace_array *tr = iter->tr; struct trace_seq *s = &iter->seq; struct trace_entry *entry; struct trace_event *event; entry = iter->ent; if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) trace_seq_printf(s, "%d %d %llu ", entry->pid, iter->cpu, iter->ts); if (trace_seq_has_overflowed(s)) return TRACE_TYPE_PARTIAL_LINE; event = ftrace_find_event(entry->type); if (event) return event->funcs->raw(iter, 0, event); trace_seq_printf(s, "%d ?\n", entry->type); return trace_handle_return(s); } static enum print_line_t print_hex_fmt(struct trace_iterator *iter) { struct trace_array *tr = iter->tr; struct trace_seq *s = &iter->seq; unsigned char newline = '\n'; struct trace_entry *entry; struct trace_event *event; entry = iter->ent; if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) { SEQ_PUT_HEX_FIELD(s, entry->pid); SEQ_PUT_HEX_FIELD(s, iter->cpu); SEQ_PUT_HEX_FIELD(s, iter->ts); if (trace_seq_has_overflowed(s)) return TRACE_TYPE_PARTIAL_LINE; } event = ftrace_find_event(entry->type); if (event) { enum print_line_t ret = event->funcs->hex(iter, 0, event); if (ret != TRACE_TYPE_HANDLED) return ret; } SEQ_PUT_FIELD(s, newline); return trace_handle_return(s); } static enum print_line_t print_bin_fmt(struct trace_iterator *iter) { struct trace_array *tr = iter->tr; struct trace_seq *s = &iter->seq; struct trace_entry *entry; struct trace_event *event; entry = iter->ent; if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) { SEQ_PUT_FIELD(s, entry->pid); SEQ_PUT_FIELD(s, iter->cpu); SEQ_PUT_FIELD(s, iter->ts); if (trace_seq_has_overflowed(s)) return TRACE_TYPE_PARTIAL_LINE; } event = ftrace_find_event(entry->type); return event ? event->funcs->binary(iter, 0, event) : TRACE_TYPE_HANDLED; } int trace_empty(struct trace_iterator *iter) { struct ring_buffer_iter *buf_iter; int cpu; /* If we are looking at one CPU buffer, only check that one */ if (iter->cpu_file != RING_BUFFER_ALL_CPUS) { cpu = iter->cpu_file; buf_iter = trace_buffer_iter(iter, cpu); if (buf_iter) { if (!ring_buffer_iter_empty(buf_iter)) return 0; } else { if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu)) return 0; } return 1; } for_each_tracing_cpu(cpu) { buf_iter = trace_buffer_iter(iter, cpu); if (buf_iter) { if (!ring_buffer_iter_empty(buf_iter)) return 0; } else { if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu)) return 0; } } return 1; } /* Called with trace_event_read_lock() held. */ enum print_line_t print_trace_line(struct trace_iterator *iter) { struct trace_array *tr = iter->tr; unsigned long trace_flags = tr->trace_flags; enum print_line_t ret; if (iter->lost_events) { trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n", iter->cpu, iter->lost_events); if (trace_seq_has_overflowed(&iter->seq)) return TRACE_TYPE_PARTIAL_LINE; } if (iter->trace && iter->trace->print_line) { ret = iter->trace->print_line(iter); if (ret != TRACE_TYPE_UNHANDLED) return ret; } if (iter->ent->type == TRACE_BPUTS && trace_flags & TRACE_ITER_PRINTK && trace_flags & TRACE_ITER_PRINTK_MSGONLY) return trace_print_bputs_msg_only(iter); if (iter->ent->type == TRACE_BPRINT && trace_flags & TRACE_ITER_PRINTK && trace_flags & TRACE_ITER_PRINTK_MSGONLY) return trace_print_bprintk_msg_only(iter); if (iter->ent->type == TRACE_PRINT && trace_flags & TRACE_ITER_PRINTK && trace_flags & TRACE_ITER_PRINTK_MSGONLY) return trace_print_printk_msg_only(iter); if (trace_flags & TRACE_ITER_BIN) return print_bin_fmt(iter); if (trace_flags & TRACE_ITER_HEX) return print_hex_fmt(iter); if (trace_flags & TRACE_ITER_RAW) return print_raw_fmt(iter); return print_trace_fmt(iter); } void trace_latency_header(struct seq_file *m) { struct trace_iterator *iter = m->private; struct trace_array *tr = iter->tr; /* print nothing if the buffers are empty */ if (trace_empty(iter)) return; if (iter->iter_flags & TRACE_FILE_LAT_FMT) print_trace_header(m, iter); if (!(tr->trace_flags & TRACE_ITER_VERBOSE)) print_lat_help_header(m); } void trace_default_header(struct seq_file *m) { struct trace_iterator *iter = m->private; struct trace_array *tr = iter->tr; unsigned long trace_flags = tr->trace_flags; if (!(trace_flags & TRACE_ITER_CONTEXT_INFO)) return; if (iter->iter_flags & TRACE_FILE_LAT_FMT) { /* print nothing if the buffers are empty */ if (trace_empty(iter)) return; print_trace_header(m, iter); if (!(trace_flags & TRACE_ITER_VERBOSE)) print_lat_help_header(m); } else { if (!(trace_flags & TRACE_ITER_VERBOSE)) { if (trace_flags & TRACE_ITER_IRQ_INFO) print_func_help_header_irq(iter->trace_buffer, m, trace_flags); else print_func_help_header(iter->trace_buffer, m, trace_flags); } } } static void test_ftrace_alive(struct seq_file *m) { if (!ftrace_is_dead()) return; seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n" "# MAY BE MISSING FUNCTION EVENTS\n"); } #ifdef CONFIG_TRACER_MAX_TRACE static void show_snapshot_main_help(struct seq_file *m) { seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n" "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n" "# Takes a snapshot of the main buffer.\n" "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n" "# (Doesn't have to be '2' works with any number that\n" "# is not a '0' or '1')\n"); } static void show_snapshot_percpu_help(struct seq_file *m) { seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n"); #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n" "# Takes a snapshot of the main buffer for this cpu.\n"); #else seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n" "# Must use main snapshot file to allocate.\n"); #endif seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n" "# (Doesn't have to be '2' works with any number that\n" "# is not a '0' or '1')\n"); } static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { if (iter->tr->allocated_snapshot) seq_puts(m, "#\n# * Snapshot is allocated *\n#\n"); else seq_puts(m, "#\n# * Snapshot is freed *\n#\n"); seq_puts(m, "# Snapshot commands:\n"); if (iter->cpu_file == RING_BUFFER_ALL_CPUS) show_snapshot_main_help(m); else show_snapshot_percpu_help(m); } #else /* Should never be called */ static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { } #endif static int s_show(struct seq_file *m, void *v) { struct trace_iterator *iter = v; int ret; if (iter->ent == NULL) { if (iter->tr) { seq_printf(m, "# tracer: %s\n", iter->trace->name); seq_puts(m, "#\n"); test_ftrace_alive(m); } if (iter->snapshot && trace_empty(iter)) print_snapshot_help(m, iter); else if (iter->trace && iter->trace->print_header) iter->trace->print_header(m); else trace_default_header(m); } else if (iter->leftover) { /* * If we filled the seq_file buffer earlier, we * want to just show it now. */ ret = trace_print_seq(m, &iter->seq); /* ret should this time be zero, but you never know */ iter->leftover = ret; } else { print_trace_line(iter); ret = trace_print_seq(m, &iter->seq); /* * If we overflow the seq_file buffer, then it will * ask us for this data again at start up. * Use that instead. * ret is 0 if seq_file write succeeded. * -1 otherwise. */ iter->leftover = ret; } return 0; } /* * Should be used after trace_array_get(), trace_types_lock * ensures that i_cdev was already initialized. */ static inline int tracing_get_cpu(struct inode *inode) { if (inode->i_cdev) /* See trace_create_cpu_file() */ return (long)inode->i_cdev - 1; return RING_BUFFER_ALL_CPUS; } static const struct seq_operations tracer_seq_ops = { .start = s_start, .next = s_next, .stop = s_stop, .show = s_show, }; static struct trace_iterator * __tracing_open(struct inode *inode, struct file *file, bool snapshot) { struct trace_array *tr = inode->i_private; struct trace_iterator *iter; int cpu; if (tracing_disabled) return ERR_PTR(-ENODEV); iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter)); if (!iter) return ERR_PTR(-ENOMEM); iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter), GFP_KERNEL); if (!iter->buffer_iter) goto release; /* * We make a copy of the current tracer to avoid concurrent * changes on it while we are reading. */ mutex_lock(&trace_types_lock); iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL); if (!iter->trace) goto fail; *iter->trace = *tr->current_trace; if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL)) goto fail; iter->tr = tr; #ifdef CONFIG_TRACER_MAX_TRACE /* Currently only the top directory has a snapshot */ if (tr->current_trace->print_max || snapshot) iter->trace_buffer = &tr->max_buffer; else #endif iter->trace_buffer = &tr->trace_buffer; iter->snapshot = snapshot; iter->pos = -1; iter->cpu_file = tracing_get_cpu(inode); mutex_init(&iter->mutex); /* Notify the tracer early; before we stop tracing. */ if (iter->trace && iter->trace->open) iter->trace->open(iter); /* Annotate start of buffers if we had overruns */ if (ring_buffer_overruns(iter->trace_buffer->buffer)) iter->iter_flags |= TRACE_FILE_ANNOTATE; /* Output in nanoseconds only if we are using a clock in nanoseconds. */ if (trace_clocks[tr->clock_id].in_ns) iter->iter_flags |= TRACE_FILE_TIME_IN_NS; /* stop the trace while dumping if we are not opening "snapshot" */ if (!iter->snapshot) tracing_stop_tr(tr); if (iter->cpu_file == RING_BUFFER_ALL_CPUS) { for_each_tracing_cpu(cpu) { iter->buffer_iter[cpu] = ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu); } ring_buffer_read_prepare_sync(); for_each_tracing_cpu(cpu) { ring_buffer_read_start(iter->buffer_iter[cpu]); tracing_iter_reset(iter, cpu); } } else { cpu = iter->cpu_file; iter->buffer_iter[cpu] = ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu); ring_buffer_read_prepare_sync(); ring_buffer_read_start(iter->buffer_iter[cpu]); tracing_iter_reset(iter, cpu); } mutex_unlock(&trace_types_lock); return iter; fail: mutex_unlock(&trace_types_lock); kfree(iter->trace); kfree(iter->buffer_iter); release: seq_release_private(inode, file); return ERR_PTR(-ENOMEM); } int tracing_open_generic(struct inode *inode, struct file *filp) { if (tracing_disabled) return -ENODEV; filp->private_data = inode->i_private; return 0; } bool tracing_is_disabled(void) { return (tracing_disabled) ? true: false; } /* * Open and update trace_array ref count. * Must have the current trace_array passed to it. */ static int tracing_open_generic_tr(struct inode *inode, struct file *filp) { struct trace_array *tr = inode->i_private; if (tracing_disabled) return -ENODEV; if (trace_array_get(tr) < 0) return -ENODEV; filp->private_data = inode->i_private; return 0; } static int tracing_release(struct inode *inode, struct file *file) { struct trace_array *tr = inode->i_private; struct seq_file *m = file->private_data; struct trace_iterator *iter; int cpu; if (!(file->f_mode & FMODE_READ)) { trace_array_put(tr); return 0; } /* Writes do not use seq_file */ iter = m->private; mutex_lock(&trace_types_lock); for_each_tracing_cpu(cpu) { if (iter->buffer_iter[cpu]) ring_buffer_read_finish(iter->buffer_iter[cpu]); } if (iter->trace && iter->trace->close) iter->trace->close(iter); if (!iter->snapshot) /* reenable tracing if it was previously enabled */ tracing_start_tr(tr); __trace_array_put(tr); mutex_unlock(&trace_types_lock); mutex_destroy(&iter->mutex); free_cpumask_var(iter->started); kfree(iter->trace); kfree(iter->buffer_iter); seq_release_private(inode, file); return 0; } static int tracing_release_generic_tr(struct inode *inode, struct file *file) { struct trace_array *tr = inode->i_private; trace_array_put(tr); return 0; } static int tracing_single_release_tr(struct inode *inode, struct file *file) { struct trace_array *tr = inode->i_private; trace_array_put(tr); return single_release(inode, file); } static int tracing_open(struct inode *inode, struct file *file) { struct trace_array *tr = inode->i_private; struct trace_iterator *iter; int ret = 0; if (trace_array_get(tr) < 0) return -ENODEV; /* If this file was open for write, then erase contents */ if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) { int cpu = tracing_get_cpu(inode); struct trace_buffer *trace_buf = &tr->trace_buffer; #ifdef CONFIG_TRACER_MAX_TRACE if (tr->current_trace->print_max) trace_buf = &tr->max_buffer; #endif if (cpu == RING_BUFFER_ALL_CPUS) tracing_reset_online_cpus(trace_buf); else tracing_reset(trace_buf, cpu); } if (file->f_mode & FMODE_READ) { iter = __tracing_open(inode, file, false); if (IS_ERR(iter)) ret = PTR_ERR(iter); else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) iter->iter_flags |= TRACE_FILE_LAT_FMT; } if (ret < 0) trace_array_put(tr); return ret; } /* * Some tracers are not suitable for instance buffers. * A tracer is always available for the global array (toplevel) * or if it explicitly states that it is. */ static bool trace_ok_for_array(struct tracer *t, struct trace_array *tr) { return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances; } /* Find the next tracer that this trace array may use */ static struct tracer * get_tracer_for_array(struct trace_array *tr, struct tracer *t) { while (t && !trace_ok_for_array(t, tr)) t = t->next; return t; } static void * t_next(struct seq_file *m, void *v, loff_t *pos) { struct trace_array *tr = m->private; struct tracer *t = v; (*pos)++; if (t) t = get_tracer_for_array(tr, t->next); return t; } static void *t_start(struct seq_file *m, loff_t *pos) { struct trace_array *tr = m->private; struct tracer *t; loff_t l = 0; mutex_lock(&trace_types_lock); t = get_tracer_for_array(tr, trace_types); for (; t && l < *pos; t = t_next(m, t, &l)) ; return t; } static void t_stop(struct seq_file *m, void *p) { mutex_unlock(&trace_types_lock); } static int t_show(struct seq_file *m, void *v) { struct tracer *t = v; if (!t) return 0; seq_puts(m, t->name); if (t->next) seq_putc(m, ' '); else seq_putc(m, '\n'); return 0; } static const struct seq_operations show_traces_seq_ops = { .start = t_start, .next = t_next, .stop = t_stop, .show = t_show, }; static int show_traces_open(struct inode *inode, struct file *file) { struct trace_array *tr = inode->i_private; struct seq_file *m; int ret; if (tracing_disabled) return -ENODEV; ret = seq_open(file, &show_traces_seq_ops); if (ret) return ret; m = file->private_data; m->private = tr; return 0; } static ssize_t tracing_write_stub(struct file *filp, const char __user *ubuf, size_t count, loff_t *ppos) { return count; } loff_t tracing_lseek(struct file *file, loff_t offset, int whence) { int ret; if (file->f_mode & FMODE_READ) ret = seq_lseek(file, offset, whence); else file->f_pos = ret = 0; return ret; } static const struct file_operations tracing_fops = { .open = tracing_open, .read = seq_read, .write = tracing_write_stub, .llseek = tracing_lseek, .release = tracing_release, }; static const struct file_operations show_traces_fops = { .open = show_traces_open, .read = seq_read, .release = seq_release, .llseek = seq_lseek, }; static ssize_t tracing_cpumask_read(struct file *filp, char __user *ubuf, size_t count, loff_t *ppos) { struct trace_array *tr = file_inode(filp)->i_private; char *mask_str; int len; len = snprintf(NULL, 0, "%*pb\n", cpumask_pr_args(tr->tracing_cpumask)) + 1; mask_str = kmalloc(len, GFP_KERNEL); if (!mask_str) return -ENOMEM; len = snprintf(mask_str, len, "%*pb\n", cpumask_pr_args(tr->tracing_cpumask)); if (len >= count) { count = -EINVAL; goto out_err; } count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len); out_err: kfree(mask_str); return count; } static ssize_t tracing_cpumask_write(struct file *filp, const char __user *ubuf, size_t count, loff_t *ppos) { struct trace_array *tr = file_inode(filp)->i_private; cpumask_var_t tracing_cpumask_new; int err, cpu; if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL)) return -ENOMEM; err = cpumask_parse_user(ubuf, count, tracing_cpumask_new); if (err) goto err_unlock; local_irq_disable(); arch_spin_lock(&tr->max_lock); for_each_tracing_cpu(cpu) { /* * Increase/decrease the disabled counter if we are * about to flip a bit in the cpumask: */ if (cpumask_test_cpu(cpu, tr->tracing_cpumask) && !cpumask_test_cpu(cpu, tracing_cpumask_new)) { atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled); ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu); } if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) && cpumask_test_cpu(cpu, tracing_cpumask_new)) { atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled); ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu); } } arch_spin_unlock(&tr->max_lock); local_irq_enable(); cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new); free_cpumask_var(tracing_cpumask_new); return count; err_unlock: free_cpumask_var(tracing_cpumask_new); return err; } static const struct file_operations tracing_cpumask_fops = { .open = tracing_open_generic_tr, .read = tracing_cpumask_read, .write = tracing_cpumask_write, .release = tracing_release_generic_tr, .llseek = generic_file_llseek, }; static int tracing_trace_options_show(struct seq_file *m, void *v) { struct tracer_opt *trace_opts; struct trace_array *tr = m->private; u32 tracer_flags; int i; mutex_lock(&trace_types_lock); tracer_flags = tr->current_trace->flags->val; trace_opts = tr->current_trace->flags->opts; for (i = 0; trace_options[i]; i++) { if (tr->trace_flags & (1 << i)) seq_printf(m, "%s\n", trace_options[i]); else seq_printf(m, "no%s\n", trace_options[i]); } for (i = 0; trace_opts[i].name; i++) { if (tracer_flags & trace_opts[i].bit) seq_printf(m, "%s\n", trace_opts[i].name); else seq_printf(m, "no%s\n", trace_opts[i].name); } mutex_unlock(&trace_types_lock); return 0; } static int __set_tracer_option(struct trace_array *tr, struct tracer_flags *tracer_flags, struct tracer_opt *opts, int neg) { struct tracer *trace = tracer_flags->trace; int ret; ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg); if (ret) return ret; if (neg) tracer_flags->val &= ~opts->bit; else tracer_flags->val |= opts->bit; return 0; } /* Try to assign a tracer specific option */ static int set_tracer_option(struct trace_array *tr, char *cmp, int neg) { struct tracer *trace = tr->current_trace; struct tracer_flags *tracer_flags = trace->flags; struct tracer_opt *opts = NULL; int i; for (i = 0; tracer_flags->opts[i].name; i++) { opts = &tracer_flags->opts[i]; if (strcmp(cmp, opts->name) == 0) return __set_tracer_option(tr, trace->flags, opts, neg); } return -EINVAL; } /* Some tracers require overwrite to stay enabled */ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set) { if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set) return -1; return 0; } int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled) { /* do nothing if flag is already set */ if (!!(tr->trace_flags & mask) == !!enabled) return 0; /* Give the tracer a chance to approve the change */ if (tr->current_trace->flag_changed) if (tr->current_trace->flag_changed(tr, mask, !!enabled)) return -EINVAL; if (enabled) tr->trace_flags |= mask; else tr->trace_flags &= ~mask; if (mask == TRACE_ITER_RECORD_CMD) trace_event_enable_cmd_record(enabled); if (mask == TRACE_ITER_RECORD_TGID) { if (!tgid_map) tgid_map = kcalloc(PID_MAX_DEFAULT + 1, sizeof(*tgid_map), GFP_KERNEL); if (!tgid_map) { tr->trace_flags &= ~TRACE_ITER_RECORD_TGID; return -ENOMEM; } trace_event_enable_tgid_record(enabled); } if (mask == TRACE_ITER_EVENT_FORK) trace_event_follow_fork(tr, enabled); if (mask == TRACE_ITER_FUNC_FORK) ftrace_pid_follow_fork(tr, enabled); if (mask == TRACE_ITER_OVERWRITE) { ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled); #ifdef CONFIG_TRACER_MAX_TRACE ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled); #endif } if (mask == TRACE_ITER_PRINTK) { trace_printk_start_stop_comm(enabled); trace_printk_control(enabled); } return 0; } static int trace_set_options(struct trace_array *tr, char *option) { char *cmp; int neg = 0; int ret; size_t orig_len = strlen(option); int len; cmp = strstrip(option); len = str_has_prefix(cmp, "no"); if (len) neg = 1; cmp += len; mutex_lock(&trace_types_lock); ret = match_string(trace_options, -1, cmp); /* If no option could be set, test the specific tracer options */ if (ret < 0) ret = set_tracer_option(tr, cmp, neg); else ret = set_tracer_flag(tr, 1 << ret, !neg); mutex_unlock(&trace_types_lock); /* * If the first trailing whitespace is replaced with '\0' by strstrip, * turn it back into a space. */ if (orig_len > strlen(option)) option[strlen(option)] = ' '; return ret; } static void __init apply_trace_boot_options(void) { char *buf = trace_boot_options_buf; char *option; while (true) { option = strsep(&buf, ","); if (!option) break; if (*option) trace_set_options(&global_trace, option); /* Put back the comma to allow this to be called again */ if (buf) *(buf - 1) = ','; } } static ssize_t tracing_trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { struct seq_file *m = filp->private_data; struct trace_array *tr = m->private; char buf[64]; int ret; if (cnt >= sizeof(buf)) return -EINVAL; if (copy_from_user(buf, ubuf, cnt)) return -EFAULT; buf[cnt] = 0; ret = trace_set_options(tr, buf); if (ret < 0) return ret; *ppos += cnt; return cnt; } static int tracing_trace_options_open(struct inode *inode, struct file *file) { struct trace_array *tr = inode->i_private; int ret; if (tracing_disabled) return -ENODEV; if (trace_array_get(tr) < 0) return -ENODEV; ret = single_open(file, tracing_trace_options_show, inode->i_private); if (ret < 0) trace_array_put(tr); return ret; } static const struct file_operations tracing_iter_fops = { .open = tracing_trace_options_open, .read = seq_read, .llseek = seq_lseek, .release = tracing_single_release_tr, .write = tracing_trace_options_write, }; static const char readme_msg[] = "tracing mini-HOWTO:\n\n" "# echo 0 > tracing_on : quick way to disable tracing\n" "# echo 1 > tracing_on : quick way to re-enable tracing\n\n" " Important files:\n" " trace\t\t\t- The static contents of the buffer\n" "\t\t\t To clear the buffer write into this file: echo > trace\n" " trace_pipe\t\t- A consuming read to see the contents of the buffer\n" " current_tracer\t- function and latency tracers\n" " available_tracers\t- list of configured tracers for current_tracer\n" " buffer_size_kb\t- view and modify size of per cpu buffer\n" " buffer_total_size_kb - view total size of all cpu buffers\n\n" " trace_clock\t\t-change the clock used to order events\n" " local: Per cpu clock but may not be synced across CPUs\n" " global: Synced across CPUs but slows tracing down.\n" " counter: Not a clock, but just an increment\n" " uptime: Jiffy counter from time of boot\n" " perf: Same clock that perf events use\n" #ifdef CONFIG_X86_64 " x86-tsc: TSC cycle counter\n" #endif "\n timestamp_mode\t-view the mode used to timestamp events\n" " delta: Delta difference against a buffer-wide timestamp\n" " absolute: Absolute (standalone) timestamp\n" "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n" "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n" " tracing_cpumask\t- Limit which CPUs to trace\n" " instances\t\t- Make sub-buffers with: mkdir instances/foo\n" "\t\t\t Remove sub-buffer with rmdir\n" " trace_options\t\t- Set format or modify how tracing happens\n" "\t\t\t Disable an option by adding a suffix 'no' to the\n" "\t\t\t option name\n" " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n" #ifdef CONFIG_DYNAMIC_FTRACE "\n available_filter_functions - list of functions that can be filtered on\n" " set_ftrace_filter\t- echo function name in here to only trace these\n" "\t\t\t functions\n" "\t accepts: func_full_name or glob-matching-pattern\n" "\t modules: Can select a group via module\n" "\t Format: :mod:<module-name>\n" "\t example: echo :mod:ext3 > set_ftrace_filter\n" "\t triggers: a command to perform when function is hit\n" "\t Format: <function>:<trigger>[:count]\n" "\t trigger: traceon, traceoff\n" "\t\t enable_event:<system>:<event>\n" "\t\t disable_event:<system>:<event>\n" #ifdef CONFIG_STACKTRACE "\t\t stacktrace\n" #endif #ifdef CONFIG_TRACER_SNAPSHOT "\t\t snapshot\n" #endif "\t\t dump\n" "\t\t cpudump\n" "\t example: echo do_fault:traceoff > set_ftrace_filter\n" "\t echo do_trap:traceoff:3 > set_ftrace_filter\n" "\t The first one will disable tracing every time do_fault is hit\n" "\t The second will disable tracing at most 3 times when do_trap is hit\n" "\t The first time do trap is hit and it disables tracing, the\n" "\t counter will decrement to 2. If tracing is already disabled,\n" "\t the counter will not decrement. It only decrements when the\n" "\t trigger did work\n" "\t To remove trigger without count:\n" "\t echo '!<function>:<trigger> > set_ftrace_filter\n" "\t To remove trigger with a count:\n" "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n" " set_ftrace_notrace\t- echo function name in here to never trace.\n" "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n" "\t modules: Can select a group via module command :mod:\n" "\t Does not accept triggers\n" #endif /* CONFIG_DYNAMIC_FTRACE */ #ifdef CONFIG_FUNCTION_TRACER " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n" "\t\t (function)\n" #endif #ifdef CONFIG_FUNCTION_GRAPH_TRACER " set_graph_function\t- Trace the nested calls of a function (function_graph)\n" " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n" " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n" #endif #ifdef CONFIG_TRACER_SNAPSHOT "\n snapshot\t\t- Like 'trace' but shows the content of the static\n" "\t\t\t snapshot buffer. Read the contents for more\n" "\t\t\t information\n" #endif #ifdef CONFIG_STACK_TRACER " stack_trace\t\t- Shows the max stack trace when active\n" " stack_max_size\t- Shows current max stack size that was traced\n" "\t\t\t Write into this file to reset the max size (trigger a\n" "\t\t\t new trace)\n" #ifdef CONFIG_DYNAMIC_FTRACE " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n" "\t\t\t traces\n" #endif #endif /* CONFIG_STACK_TRACER */ #ifdef CONFIG_DYNAMIC_EVENTS " dynamic_events\t\t- Add/remove/show the generic dynamic events\n" "\t\t\t Write into this file to define/undefine new trace events.\n" #endif #ifdef CONFIG_KPROBE_EVENTS " kprobe_events\t\t- Add/remove/show the kernel dynamic events\n" "\t\t\t Write into this file to define/undefine new trace events.\n" #endif #ifdef CONFIG_UPROBE_EVENTS " uprobe_events\t\t- Add/remove/show the userspace dynamic events\n" "\t\t\t Write into this file to define/undefine new trace events.\n" #endif #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS) "\t accepts: event-definitions (one definition per line)\n" "\t Format: p[:[<group>/]<event>] <place> [<args>]\n" "\t r[maxactive][:[<group>/]<event>] <place> [<args>]\n" #ifdef CONFIG_HIST_TRIGGERS "\t s:[synthetic/]<event> <field> [<field>]\n" #endif "\t -:[<group>/]<event>\n" #ifdef CONFIG_KPROBE_EVENTS "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n" "place (kretprobe): [<module>:]<symbol>[+<offset>]|<memaddr>\n" #endif #ifdef CONFIG_UPROBE_EVENTS " place (uprobe): <path>:<offset>[(ref_ctr_offset)]\n" #endif "\t args: <name>=fetcharg[:type]\n" "\t fetcharg: %<register>, @<address>, @<symbol>[+|-<offset>],\n" #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API "\t $stack<index>, $stack, $retval, $comm, $arg<N>\n" #else "\t $stack<index>, $stack, $retval, $comm\n" #endif "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string, symbol,\n" "\t b<bit-width>@<bit-offset>/<container-size>,\n" "\t <type>\\[<array-size>\\]\n" #ifdef CONFIG_HIST_TRIGGERS "\t field: <stype> <name>;\n" "\t stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n" "\t [unsigned] char/int/long\n" #endif #endif " events/\t\t- Directory containing all trace event subsystems:\n" " enable\t\t- Write 0/1 to enable/disable tracing of all events\n" " events/<system>/\t- Directory containing all trace events for <system>:\n" " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n" "\t\t\t events\n" " filter\t\t- If set, only events passing filter are traced\n" " events/<system>/<event>/\t- Directory containing control files for\n" "\t\t\t <event>:\n" " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n" " filter\t\t- If set, only events passing filter are traced\n" " trigger\t\t- If set, a command to perform when event is hit\n" "\t Format: <trigger>[:count][if <filter>]\n" "\t trigger: traceon, traceoff\n" "\t enable_event:<system>:<event>\n" "\t disable_event:<system>:<event>\n" #ifdef CONFIG_HIST_TRIGGERS "\t enable_hist:<system>:<event>\n" "\t disable_hist:<system>:<event>\n" #endif #ifdef CONFIG_STACKTRACE "\t\t stacktrace\n" #endif #ifdef CONFIG_TRACER_SNAPSHOT "\t\t snapshot\n" #endif #ifdef CONFIG_HIST_TRIGGERS "\t\t hist (see below)\n" #endif "\t example: echo traceoff > events/block/block_unplug/trigger\n" "\t echo traceoff:3 > events/block/block_unplug/trigger\n" "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n" "\t events/block/block_unplug/trigger\n" "\t The first disables tracing every time block_unplug is hit.\n" "\t The second disables tracing the first 3 times block_unplug is hit.\n" "\t The third enables the kmalloc event the first 3 times block_unplug\n" "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n" "\t Like function triggers, the counter is only decremented if it\n" "\t enabled or disabled tracing.\n" "\t To remove a trigger without a count:\n" "\t echo '!<trigger> > <system>/<event>/trigger\n" "\t To remove a trigger with a count:\n" "\t echo '!<trigger>:0 > <system>/<event>/trigger\n" "\t Filters can be ignored when removing a trigger.\n" #ifdef CONFIG_HIST_TRIGGERS " hist trigger\t- If set, event hits are aggregated into a hash table\n" "\t Format: hist:keys=<field1[,field2,...]>\n" "\t [:values=<field1[,field2,...]>]\n" "\t [:sort=<field1[,field2,...]>]\n" "\t [:size=#entries]\n" "\t [:pause][:continue][:clear]\n" "\t [:name=histname1]\n" "\t [if <filter>]\n\n" "\t When a matching event is hit, an entry is added to a hash\n" "\t table using the key(s) and value(s) named, and the value of a\n" "\t sum called 'hitcount' is incremented. Keys and values\n" "\t correspond to fields in the event's format description. Keys\n" "\t can be any field, or the special string 'stacktrace'.\n" "\t Compound keys consisting of up to two fields can be specified\n" "\t by the 'keys' keyword. Values must correspond to numeric\n" "\t fields. Sort keys consisting of up to two fields can be\n" "\t specified using the 'sort' keyword. The sort direction can\n" "\t be modified by appending '.descending' or '.ascending' to a\n" "\t sort field. The 'size' parameter can be used to specify more\n" "\t or fewer than the default 2048 entries for the hashtable size.\n" "\t If a hist trigger is given a name using the 'name' parameter,\n" "\t its histogram data will be shared with other triggers of the\n" "\t same name, and trigger hits will update this common data.\n\n" "\t Reading the 'hist' file for the event will dump the hash\n" "\t table in its entirety to stdout. If there are multiple hist\n" "\t triggers attached to an event, there will be a table for each\n" "\t trigger in the output. The table displayed for a named\n" "\t trigger will be the same as any other instance having the\n" "\t same name. The default format used to display a given field\n" "\t can be modified by appending any of the following modifiers\n" "\t to the field name, as applicable:\n\n" "\t .hex display a number as a hex value\n" "\t .sym display an address as a symbol\n" "\t .sym-offset display an address as a symbol and offset\n" "\t .execname display a common_pid as a program name\n" "\t .syscall display a syscall id as a syscall name\n" "\t .log2 display log2 value rather than raw number\n" "\t .usecs display a common_timestamp in microseconds\n\n" "\t The 'pause' parameter can be used to pause an existing hist\n" "\t trigger or to start a hist trigger but not log any events\n" "\t until told to do so. 'continue' can be used to start or\n" "\t restart a paused hist trigger.\n\n" "\t The 'clear' parameter will clear the contents of a running\n" "\t hist trigger and leave its current paused/active state\n" "\t unchanged.\n\n" "\t The enable_hist and disable_hist triggers can be used to\n" "\t have one event conditionally start and stop another event's\n" "\t already-attached hist trigger. The syntax is analagous to\n" "\t the enable_event and disable_event triggers.\n" #endif ; static ssize_t tracing_readme_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { return simple_read_from_buffer(ubuf, cnt, ppos, readme_msg, strlen(readme_msg)); } static const struct file_operations tracing_readme_fops = { .open = tracing_open_generic, .read = tracing_readme_read, .llseek = generic_file_llseek, }; static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos) { int *ptr = v; if (*pos || m->count) ptr++; (*pos)++; for (; ptr <= &tgid_map[PID_MAX_DEFAULT]; ptr++) { if (trace_find_tgid(*ptr)) return ptr; } return NULL; } static void *saved_tgids_start(struct seq_file *m, loff_t *pos) { void *v; loff_t l = 0; if (!tgid_map) return NULL; v = &tgid_map[0]; while (l <= *pos) { v = saved_tgids_next(m, v, &l); if (!v) return NULL; } return v; } static void saved_tgids_stop(struct seq_file *m, void *v) { } static int saved_tgids_show(struct seq_file *m, void *v) { int pid = (int *)v - tgid_map; seq_printf(m, "%d %d\n", pid, trace_find_tgid(pid)); return 0; } static const struct seq_operations tracing_saved_tgids_seq_ops = { .start = saved_tgids_start, .stop = saved_tgids_stop, .next = saved_tgids_next, .show = saved_tgids_show, }; static int tracing_saved_tgids_open(struct inode *inode, struct file *filp) { if (tracing_disabled) return -ENODEV; return seq_open(filp, &tracing_saved_tgids_seq_ops); } static const struct file_operations tracing_saved_tgids_fops = { .open = tracing_saved_tgids_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos) { unsigned int *ptr = v; if (*pos || m->count) ptr++; (*pos)++; for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num]; ptr++) { if (*ptr == -1 || *ptr == NO_CMDLINE_MAP) continue; return ptr; } return NULL; } static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos) { void *v; loff_t l = 0; preempt_disable(); arch_spin_lock(&trace_cmdline_lock); v = &savedcmd->map_cmdline_to_pid[0]; while (l <= *pos) { v = saved_cmdlines_next(m, v, &l); if (!v) return NULL; } return v; } static void saved_cmdlines_stop(struct seq_file *m, void *v) { arch_spin_unlock(&trace_cmdline_lock); preempt_enable(); } static int saved_cmdlines_show(struct seq_file *m, void *v) { char buf[TASK_COMM_LEN]; unsigned int *pid = v; __trace_find_cmdline(*pid, buf); seq_printf(m, "%d %s\n", *pid, buf); return 0; } static const struct seq_operations tracing_saved_cmdlines_seq_ops = { .start = saved_cmdlines_start, .next = saved_cmdlines_next, .stop = saved_cmdlines_stop, .show = saved_cmdlines_show, }; static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp) { if (tracing_disabled) return -ENODEV; return seq_open(filp, &tracing_saved_cmdlines_seq_ops); } static const struct file_operations tracing_saved_cmdlines_fops = { .open = tracing_saved_cmdlines_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static ssize_t tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { char buf[64]; int r; arch_spin_lock(&trace_cmdline_lock); r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num); arch_spin_unlock(&trace_cmdline_lock); return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); } static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s) { kfree(s->saved_cmdlines); kfree(s->map_cmdline_to_pid); kfree(s); } static int tracing_resize_saved_cmdlines(unsigned int val) { struct saved_cmdlines_buffer *s, *savedcmd_temp; s = kmalloc(sizeof(*s), GFP_KERNEL); if (!s) return -ENOMEM; if (allocate_cmdlines_buffer(val, s) < 0) { kfree(s); return -ENOMEM; } arch_spin_lock(&trace_cmdline_lock); savedcmd_temp = savedcmd; savedcmd = s; arch_spin_unlock(&trace_cmdline_lock); free_saved_cmdlines_buffer(savedcmd_temp); return 0; } static ssize_t tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { unsigned long val; int ret; ret = kstrtoul_from_user(ubuf, cnt, 10, &val); if (ret) return ret; /* must have at least 1 entry or less than PID_MAX_DEFAULT */ if (!val || val > PID_MAX_DEFAULT) return -EINVAL; ret = tracing_resize_saved_cmdlines((unsigned int)val); if (ret < 0) return ret; *ppos += cnt; return cnt; } static const struct file_operations tracing_saved_cmdlines_size_fops = { .open = tracing_open_generic, .read = tracing_saved_cmdlines_size_read, .write = tracing_saved_cmdlines_size_write, }; #ifdef CONFIG_TRACE_EVAL_MAP_FILE static union trace_eval_map_item * update_eval_map(union trace_eval_map_item *ptr) { if (!ptr->map.eval_string) { if (ptr->tail.next) { ptr = ptr->tail.next; /* Set ptr to the next real item (skip head) */ ptr++; } else return NULL; } return ptr; } static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos) { union trace_eval_map_item *ptr = v; /* * Paranoid! If ptr points to end, we don't want to increment past it. * This really should never happen. */ ptr = update_eval_map(ptr); if (WARN_ON_ONCE(!ptr)) return NULL; ptr++; (*pos)++; ptr = update_eval_map(ptr); return ptr; } static void *eval_map_start(struct seq_file *m, loff_t *pos) { union trace_eval_map_item *v; loff_t l = 0; mutex_lock(&trace_eval_mutex); v = trace_eval_maps; if (v) v++; while (v && l < *pos) { v = eval_map_next(m, v, &l); } return v; } static void eval_map_stop(struct seq_file *m, void *v) { mutex_unlock(&trace_eval_mutex); } static int eval_map_show(struct seq_file *m, void *v) { union trace_eval_map_item *ptr = v; seq_printf(m, "%s %ld (%s)\n", ptr->map.eval_string, ptr->map.eval_value, ptr->map.system); return 0; } static const struct seq_operations tracing_eval_map_seq_ops = { .start = eval_map_start, .next = eval_map_next, .stop = eval_map_stop, .show = eval_map_show, }; static int tracing_eval_map_open(struct inode *inode, struct file *filp) { if (tracing_disabled) return -ENODEV; return seq_open(filp, &tracing_eval_map_seq_ops); } static const struct file_operations tracing_eval_map_fops = { .open = tracing_eval_map_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static inline union trace_eval_map_item * trace_eval_jmp_to_tail(union trace_eval_map_item *ptr) { /* Return tail of array given the head */ return ptr + ptr->head.length + 1; } static void trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start, int len) { struct trace_eval_map **stop; struct trace_eval_map **map; union trace_eval_map_item *map_array; union trace_eval_map_item *ptr; stop = start + len; /* * The trace_eval_maps contains the map plus a head and tail item, * where the head holds the module and length of array, and the * tail holds a pointer to the next list. */ map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL); if (!map_array) { pr_warn("Unable to allocate trace eval mapping\n"); return; } mutex_lock(&trace_eval_mutex); if (!trace_eval_maps) trace_eval_maps = map_array; else { ptr = trace_eval_maps; for (;;) { ptr = trace_eval_jmp_to_tail(ptr); if (!ptr->tail.next) break; ptr = ptr->tail.next; } ptr->tail.next = map_array; } map_array->head.mod = mod; map_array->head.length = len; map_array++; for (map = start; (unsigned long)map < (unsigned long)stop; map++) { map_array->map = **map; map_array++; } memset(map_array, 0, sizeof(*map_array)); mutex_unlock(&trace_eval_mutex); } static void trace_create_eval_file(struct dentry *d_tracer) { trace_create_file("eval_map", 0444, d_tracer, NULL, &tracing_eval_map_fops); } #else /* CONFIG_TRACE_EVAL_MAP_FILE */ static inline void trace_create_eval_file(struct dentry *d_tracer) { } static inline void trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start, int len) { } #endif /* !CONFIG_TRACE_EVAL_MAP_FILE */ static void trace_insert_eval_map(struct module *mod, struct trace_eval_map **start, int len) { struct trace_eval_map **map; if (len <= 0) return; map = start; trace_event_eval_update(map, len); trace_insert_eval_map_file(mod, start, len); } static ssize_t tracing_set_trace_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_array *tr = filp->private_data; char buf[MAX_TRACER_SIZE+2]; int r; mutex_lock(&trace_types_lock); r = sprintf(buf, "%s\n", tr->current_trace->name); mutex_unlock(&trace_types_lock); return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); } int tracer_init(struct tracer *t, struct trace_array *tr) { tracing_reset_online_cpus(&tr->trace_buffer); return t->init(tr); } static void set_buffer_entries(struct trace_buffer *buf, unsigned long val) { int cpu; for_each_tracing_cpu(cpu) per_cpu_ptr(buf->data, cpu)->entries = val; } #ifdef CONFIG_TRACER_MAX_TRACE /* resize @tr's buffer to the size of @size_tr's entries */ static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf, struct trace_buffer *size_buf, int cpu_id) { int cpu, ret = 0; if (cpu_id == RING_BUFFER_ALL_CPUS) { for_each_tracing_cpu(cpu) { ret = ring_buffer_resize(trace_buf->buffer, per_cpu_ptr(size_buf->data, cpu)->entries, cpu); if (ret < 0) break; per_cpu_ptr(trace_buf->data, cpu)->entries = per_cpu_ptr(size_buf->data, cpu)->entries; } } else { ret = ring_buffer_resize(trace_buf->buffer, per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id); if (ret == 0) per_cpu_ptr(trace_buf->data, cpu_id)->entries = per_cpu_ptr(size_buf->data, cpu_id)->entries; } return ret; } #endif /* CONFIG_TRACER_MAX_TRACE */ static int __tracing_resize_ring_buffer(struct trace_array *tr, unsigned long size, int cpu) { int ret; /* * If kernel or user changes the size of the ring buffer * we use the size that was given, and we can forget about * expanding it later. */ ring_buffer_expanded = true; /* May be called before buffers are initialized */ if (!tr->trace_buffer.buffer) return 0; ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu); if (ret < 0) return ret; #ifdef CONFIG_TRACER_MAX_TRACE if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) || !tr->current_trace->use_max_tr) goto out; ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu); if (ret < 0) { int r = resize_buffer_duplicate_size(&tr->trace_buffer, &tr->trace_buffer, cpu); if (r < 0) { /* * AARGH! We are left with different * size max buffer!!!! * The max buffer is our "snapshot" buffer. * When a tracer needs a snapshot (one of the * latency tracers), it swaps the max buffer * with the saved snap shot. We succeeded to * update the size of the main buffer, but failed to * update the size of the max buffer. But when we tried * to reset the main buffer to the original size, we * failed there too. This is very unlikely to * happen, but if it does, warn and kill all * tracing. */ WARN_ON(1); tracing_disabled = 1; } return ret; } if (cpu == RING_BUFFER_ALL_CPUS) set_buffer_entries(&tr->max_buffer, size); else per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size; out: #endif /* CONFIG_TRACER_MAX_TRACE */ if (cpu == RING_BUFFER_ALL_CPUS) set_buffer_entries(&tr->trace_buffer, size); else per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size; return ret; } static ssize_t tracing_resize_ring_buffer(struct trace_array *tr, unsigned long size, int cpu_id) { int ret = size; mutex_lock(&trace_types_lock); if (cpu_id != RING_BUFFER_ALL_CPUS) { /* make sure, this cpu is enabled in the mask */ if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) { ret = -EINVAL; goto out; } } ret = __tracing_resize_ring_buffer(tr, size, cpu_id); if (ret < 0) ret = -ENOMEM; out: mutex_unlock(&trace_types_lock); return ret; } /** * tracing_update_buffers - used by tracing facility to expand ring buffers * * To save on memory when the tracing is never used on a system with it * configured in. The ring buffers are set to a minimum size. But once * a user starts to use the tracing facility, then they need to grow * to their default size. * * This function is to be called when a tracer is about to be used. */ int tracing_update_buffers(void) { int ret = 0; mutex_lock(&trace_types_lock); if (!ring_buffer_expanded) ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size, RING_BUFFER_ALL_CPUS); mutex_unlock(&trace_types_lock); return ret; } struct trace_option_dentry; static void create_trace_option_files(struct trace_array *tr, struct tracer *tracer); /* * Used to clear out the tracer before deletion of an instance. * Must have trace_types_lock held. */ static void tracing_set_nop(struct trace_array *tr) { if (tr->current_trace == &nop_trace) return; tr->current_trace->enabled--; if (tr->current_trace->reset) tr->current_trace->reset(tr); tr->current_trace = &nop_trace; } static void add_tracer_options(struct trace_array *tr, struct tracer *t) { /* Only enable if the directory has been created already. */ if (!tr->dir) return; create_trace_option_files(tr, t); } static int tracing_set_tracer(struct trace_array *tr, const char *buf) { struct tracer *t; #ifdef CONFIG_TRACER_MAX_TRACE bool had_max_tr; #endif int ret = 0; mutex_lock(&trace_types_lock); if (!ring_buffer_expanded) { ret = __tracing_resize_ring_buffer(tr, trace_buf_size, RING_BUFFER_ALL_CPUS); if (ret < 0) goto out; ret = 0; } for (t = trace_types; t; t = t->next) { if (strcmp(t->name, buf) == 0) break; } if (!t) { ret = -EINVAL; goto out; } if (t == tr->current_trace) goto out; /* Some tracers won't work on kernel command line */ if (system_state < SYSTEM_RUNNING && t->noboot) { pr_warn("Tracer '%s' is not allowed on command line, ignored\n", t->name); goto out; } /* Some tracers are only allowed for the top level buffer */ if (!trace_ok_for_array(t, tr)) { ret = -EINVAL; goto out; } /* If trace pipe files are being read, we can't change the tracer */ if (tr->current_trace->ref) { ret = -EBUSY; goto out; } trace_branch_disable(); tr->current_trace->enabled--; if (tr->current_trace->reset) tr->current_trace->reset(tr); /* Current trace needs to be nop_trace before synchronize_rcu */ tr->current_trace = &nop_trace; #ifdef CONFIG_TRACER_MAX_TRACE had_max_tr = tr->allocated_snapshot; if (had_max_tr && !t->use_max_tr) { /* * We need to make sure that the update_max_tr sees that * current_trace changed to nop_trace to keep it from * swapping the buffers after we resize it. * The update_max_tr is called from interrupts disabled * so a synchronized_sched() is sufficient. */ synchronize_rcu(); free_snapshot(tr); } #endif #ifdef CONFIG_TRACER_MAX_TRACE if (t->use_max_tr && !had_max_tr) { ret = tracing_alloc_snapshot_instance(tr); if (ret < 0) goto out; } #endif if (t->init) { ret = tracer_init(t, tr); if (ret) goto out; } tr->current_trace = t; tr->current_trace->enabled++; trace_branch_enable(tr); out: mutex_unlock(&trace_types_lock); return ret; } static ssize_t tracing_set_trace_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_array *tr = filp->private_data; char buf[MAX_TRACER_SIZE+1]; int i; size_t ret; int err; ret = cnt; if (cnt > MAX_TRACER_SIZE) cnt = MAX_TRACER_SIZE; if (copy_from_user(buf, ubuf, cnt)) return -EFAULT; buf[cnt] = 0; /* strip ending whitespace. */ for (i = cnt - 1; i > 0 && isspace(buf[i]); i--) buf[i] = 0; err = tracing_set_tracer(tr, buf); if (err) return err; *ppos += ret; return ret; } static ssize_t tracing_nsecs_read(unsigned long *ptr, char __user *ubuf, size_t cnt, loff_t *ppos) { char buf[64]; int r; r = snprintf(buf, sizeof(buf), "%ld\n", *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr)); if (r > sizeof(buf)) r = sizeof(buf); return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); } static ssize_t tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf, size_t cnt, loff_t *ppos) { unsigned long val; int ret; ret = kstrtoul_from_user(ubuf, cnt, 10, &val); if (ret) return ret; *ptr = val * 1000; return cnt; } static ssize_t tracing_thresh_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos); } static ssize_t tracing_thresh_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_array *tr = filp->private_data; int ret; mutex_lock(&trace_types_lock); ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos); if (ret < 0) goto out; if (tr->current_trace->update_thresh) { ret = tr->current_trace->update_thresh(tr); if (ret < 0) goto out; } ret = cnt; out: mutex_unlock(&trace_types_lock); return ret; } #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER) static ssize_t tracing_max_lat_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos); } static ssize_t tracing_max_lat_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos); } #endif static int tracing_open_pipe(struct inode *inode, struct file *filp) { struct trace_array *tr = inode->i_private; struct trace_iterator *iter; int ret = 0; if (tracing_disabled) return -ENODEV; if (trace_array_get(tr) < 0) return -ENODEV; mutex_lock(&trace_types_lock); /* create a buffer to store the information to pass to userspace */ iter = kzalloc(sizeof(*iter), GFP_KERNEL); if (!iter) { ret = -ENOMEM; __trace_array_put(tr); goto out; } trace_seq_init(&iter->seq); iter->trace = tr->current_trace; if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) { ret = -ENOMEM; goto fail; } /* trace pipe does not show start of buffer */ cpumask_setall(iter->started); if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) iter->iter_flags |= TRACE_FILE_LAT_FMT; /* Output in nanoseconds only if we are using a clock in nanoseconds. */ if (trace_clocks[tr->clock_id].in_ns) iter->iter_flags |= TRACE_FILE_TIME_IN_NS; iter->tr = tr; iter->trace_buffer = &tr->trace_buffer; iter->cpu_file = tracing_get_cpu(inode); mutex_init(&iter->mutex); filp->private_data = iter; if (iter->trace->pipe_open) iter->trace->pipe_open(iter); nonseekable_open(inode, filp); tr->current_trace->ref++; out: mutex_unlock(&trace_types_lock); return ret; fail: kfree(iter->trace); kfree(iter); __trace_array_put(tr); mutex_unlock(&trace_types_lock); return ret; } static int tracing_release_pipe(struct inode *inode, struct file *file) { struct trace_iterator *iter = file->private_data; struct trace_array *tr = inode->i_private; mutex_lock(&trace_types_lock); tr->current_trace->ref--; if (iter->trace->pipe_close) iter->trace->pipe_close(iter); mutex_unlock(&trace_types_lock); free_cpumask_var(iter->started); mutex_destroy(&iter->mutex); kfree(iter); trace_array_put(tr); return 0; } static __poll_t trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table) { struct trace_array *tr = iter->tr; /* Iterators are static, they should be filled or empty */ if (trace_buffer_iter(iter, iter->cpu_file)) return EPOLLIN | EPOLLRDNORM; if (tr->trace_flags & TRACE_ITER_BLOCK) /* * Always select as readable when in blocking mode */ return EPOLLIN | EPOLLRDNORM; else return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file, filp, poll_table); } static __poll_t tracing_poll_pipe(struct file *filp, poll_table *poll_table) { struct trace_iterator *iter = filp->private_data; return trace_poll(iter, filp, poll_table); } /* Must be called with iter->mutex held. */ static int tracing_wait_pipe(struct file *filp) { struct trace_iterator *iter = filp->private_data; int ret; while (trace_empty(iter)) { if ((filp->f_flags & O_NONBLOCK)) { return -EAGAIN; } /* * We block until we read something and tracing is disabled. * We still block if tracing is disabled, but we have never * read anything. This allows a user to cat this file, and * then enable tracing. But after we have read something, * we give an EOF when tracing is again disabled. * * iter->pos will be 0 if we haven't read anything. */ if (!tracer_tracing_is_on(iter->tr) && iter->pos) break; mutex_unlock(&iter->mutex); ret = wait_on_pipe(iter, 0); mutex_lock(&iter->mutex); if (ret) return ret; } return 1; } /* * Consumer reader. */ static ssize_t tracing_read_pipe(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_iterator *iter = filp->private_data; ssize_t sret; /* * Avoid more than one consumer on a single file descriptor * This is just a matter of traces coherency, the ring buffer itself * is protected. */ mutex_lock(&iter->mutex); /* return any leftover data */ sret = trace_seq_to_user(&iter->seq, ubuf, cnt); if (sret != -EBUSY) goto out; trace_seq_init(&iter->seq); if (iter->trace->read) { sret = iter->trace->read(iter, filp, ubuf, cnt, ppos); if (sret) goto out; } waitagain: sret = tracing_wait_pipe(filp); if (sret <= 0) goto out; /* stop when tracing is finished */ if (trace_empty(iter)) { sret = 0; goto out; } if (cnt >= PAGE_SIZE) cnt = PAGE_SIZE - 1; /* reset all but tr, trace, and overruns */ memset(&iter->seq, 0, sizeof(struct trace_iterator) - offsetof(struct trace_iterator, seq)); cpumask_clear(iter->started); iter->pos = -1; trace_event_read_lock(); trace_access_lock(iter->cpu_file); while (trace_find_next_entry_inc(iter) != NULL) { enum print_line_t ret; int save_len = iter->seq.seq.len; ret = print_trace_line(iter); if (ret == TRACE_TYPE_PARTIAL_LINE) { /* don't print partial lines */ iter->seq.seq.len = save_len; break; } if (ret != TRACE_TYPE_NO_CONSUME) trace_consume(iter); if (trace_seq_used(&iter->seq) >= cnt) break; /* * Setting the full flag means we reached the trace_seq buffer * size and we should leave by partial output condition above. * One of the trace_seq_* functions is not used properly. */ WARN_ONCE(iter->seq.full, "full flag set for trace type %d", iter->ent->type); } trace_access_unlock(iter->cpu_file); trace_event_read_unlock(); /* Now copy what we have to the user */ sret = trace_seq_to_user(&iter->seq, ubuf, cnt); if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq)) trace_seq_init(&iter->seq); /* * If there was nothing to send to user, in spite of consuming trace * entries, go back to wait for more entries. */ if (sret == -EBUSY) goto waitagain; out: mutex_unlock(&iter->mutex); return sret; } static void tracing_spd_release_pipe(struct splice_pipe_desc *spd, unsigned int idx) { __free_page(spd->pages[idx]); } static const struct pipe_buf_operations tracing_pipe_buf_ops = { .can_merge = 0, .confirm = generic_pipe_buf_confirm, .release = generic_pipe_buf_release, .steal = generic_pipe_buf_steal, .get = generic_pipe_buf_get, }; static size_t tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter) { size_t count; int save_len; int ret; /* Seq buffer is page-sized, exactly what we need. */ for (;;) { save_len = iter->seq.seq.len; ret = print_trace_line(iter); if (trace_seq_has_overflowed(&iter->seq)) { iter->seq.seq.len = save_len; break; } /* * This should not be hit, because it should only * be set if the iter->seq overflowed. But check it * anyway to be safe. */ if (ret == TRACE_TYPE_PARTIAL_LINE) { iter->seq.seq.len = save_len; break; } count = trace_seq_used(&iter->seq) - save_len; if (rem < count) { rem = 0; iter->seq.seq.len = save_len; break; } if (ret != TRACE_TYPE_NO_CONSUME) trace_consume(iter); rem -= count; if (!trace_find_next_entry_inc(iter)) { rem = 0; iter->ent = NULL; break; } } return rem; } static ssize_t tracing_splice_read_pipe(struct file *filp, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags) { struct page *pages_def[PIPE_DEF_BUFFERS]; struct partial_page partial_def[PIPE_DEF_BUFFERS]; struct trace_iterator *iter = filp->private_data; struct splice_pipe_desc spd = { .pages = pages_def, .partial = partial_def, .nr_pages = 0, /* This gets updated below. */ .nr_pages_max = PIPE_DEF_BUFFERS, .ops = &tracing_pipe_buf_ops, .spd_release = tracing_spd_release_pipe, }; ssize_t ret; size_t rem; unsigned int i; if (splice_grow_spd(pipe, &spd)) return -ENOMEM; mutex_lock(&iter->mutex); if (iter->trace->splice_read) { ret = iter->trace->splice_read(iter, filp, ppos, pipe, len, flags); if (ret) goto out_err; } ret = tracing_wait_pipe(filp); if (ret <= 0) goto out_err; if (!iter->ent && !trace_find_next_entry_inc(iter)) { ret = -EFAULT; goto out_err; } trace_event_read_lock(); trace_access_lock(iter->cpu_file); /* Fill as many pages as possible. */ for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) { spd.pages[i] = alloc_page(GFP_KERNEL); if (!spd.pages[i]) break; rem = tracing_fill_pipe_page(rem, iter); /* Copy the data into the page, so we can start over. */ ret = trace_seq_to_buffer(&iter->seq, page_address(spd.pages[i]), trace_seq_used(&iter->seq)); if (ret < 0) { __free_page(spd.pages[i]); break; } spd.partial[i].offset = 0; spd.partial[i].len = trace_seq_used(&iter->seq); trace_seq_init(&iter->seq); } trace_access_unlock(iter->cpu_file); trace_event_read_unlock(); mutex_unlock(&iter->mutex); spd.nr_pages = i; if (i) ret = splice_to_pipe(pipe, &spd); else ret = 0; out: splice_shrink_spd(&spd); return ret; out_err: mutex_unlock(&iter->mutex); goto out; } static ssize_t tracing_entries_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { struct inode *inode = file_inode(filp); struct trace_array *tr = inode->i_private; int cpu = tracing_get_cpu(inode); char buf[64]; int r = 0; ssize_t ret; mutex_lock(&trace_types_lock); if (cpu == RING_BUFFER_ALL_CPUS) { int cpu, buf_size_same; unsigned long size; size = 0; buf_size_same = 1; /* check if all cpu sizes are same */ for_each_tracing_cpu(cpu) { /* fill in the size from first enabled cpu */ if (size == 0) size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries; if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) { buf_size_same = 0; break; } } if (buf_size_same) { if (!ring_buffer_expanded) r = sprintf(buf, "%lu (expanded: %lu)\n", size >> 10, trace_buf_size >> 10); else r = sprintf(buf, "%lu\n", size >> 10); } else r = sprintf(buf, "X\n"); } else r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10); mutex_unlock(&trace_types_lock); ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); return ret; } static ssize_t tracing_entries_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { struct inode *inode = file_inode(filp); struct trace_array *tr = inode->i_private; unsigned long val; int ret; ret = kstrtoul_from_user(ubuf, cnt, 10, &val); if (ret) return ret; /* must have at least 1 entry */ if (!val) return -EINVAL; /* value is in KB */ val <<= 10; ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode)); if (ret < 0) return ret; *ppos += cnt; return cnt; } static ssize_t tracing_total_entries_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_array *tr = filp->private_data; char buf[64]; int r, cpu; unsigned long size = 0, expanded_size = 0; mutex_lock(&trace_types_lock); for_each_tracing_cpu(cpu) { size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10; if (!ring_buffer_expanded) expanded_size += trace_buf_size >> 10; } if (ring_buffer_expanded) r = sprintf(buf, "%lu\n", size); else r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size); mutex_unlock(&trace_types_lock); return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); } static ssize_t tracing_free_buffer_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { /* * There is no need to read what the user has written, this function * is just to make sure that there is no error when "echo" is used */ *ppos += cnt; return cnt; } static int tracing_free_buffer_release(struct inode *inode, struct file *filp) { struct trace_array *tr = inode->i_private; /* disable tracing ? */ if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE) tracer_tracing_off(tr); /* resize the ring buffer to 0 */ tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS); trace_array_put(tr); return 0; } static ssize_t tracing_mark_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *fpos) { struct trace_array *tr = filp->private_data; struct ring_buffer_event *event; enum event_trigger_type tt = ETT_NONE; struct ring_buffer *buffer; struct print_entry *entry; unsigned long irq_flags; const char faulted[] = "<faulted>"; ssize_t written; int size; int len; /* Used in tracing_mark_raw_write() as well */ #define FAULTED_SIZE (sizeof(faulted) - 1) /* '\0' is already accounted for */ if (tracing_disabled) return -EINVAL; if (!(tr->trace_flags & TRACE_ITER_MARKERS)) return -EINVAL; if (cnt > TRACE_BUF_SIZE) cnt = TRACE_BUF_SIZE; BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE); local_save_flags(irq_flags); size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */ /* If less than "<faulted>", then make sure we can still add that */ if (cnt < FAULTED_SIZE) size += FAULTED_SIZE - cnt; buffer = tr->trace_buffer.buffer; event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, irq_flags, preempt_count()); if (unlikely(!event)) /* Ring buffer disabled, return as if not open for write */ return -EBADF; entry = ring_buffer_event_data(event); entry->ip = _THIS_IP_; len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt); if (len) { memcpy(&entry->buf, faulted, FAULTED_SIZE); cnt = FAULTED_SIZE; written = -EFAULT; } else written = cnt; len = cnt; if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) { /* do not add \n before testing triggers, but add \0 */ entry->buf[cnt] = '\0'; tt = event_triggers_call(tr->trace_marker_file, entry, event); } if (entry->buf[cnt - 1] != '\n') { entry->buf[cnt] = '\n'; entry->buf[cnt + 1] = '\0'; } else entry->buf[cnt] = '\0'; __buffer_unlock_commit(buffer, event); if (tt) event_triggers_post_call(tr->trace_marker_file, tt); if (written > 0) *fpos += written; return written; } /* Limit it for now to 3K (including tag) */ #define RAW_DATA_MAX_SIZE (1024*3) static ssize_t tracing_mark_raw_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *fpos) { struct trace_array *tr = filp->private_data; struct ring_buffer_event *event; struct ring_buffer *buffer; struct raw_data_entry *entry; const char faulted[] = "<faulted>"; unsigned long irq_flags; ssize_t written; int size; int len; #define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int)) if (tracing_disabled) return -EINVAL; if (!(tr->trace_flags & TRACE_ITER_MARKERS)) return -EINVAL; /* The marker must at least have a tag id */ if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE) return -EINVAL; if (cnt > TRACE_BUF_SIZE) cnt = TRACE_BUF_SIZE; BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE); local_save_flags(irq_flags); size = sizeof(*entry) + cnt; if (cnt < FAULT_SIZE_ID) size += FAULT_SIZE_ID - cnt; buffer = tr->trace_buffer.buffer; event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size, irq_flags, preempt_count()); if (!event) /* Ring buffer disabled, return as if not open for write */ return -EBADF; entry = ring_buffer_event_data(event); len = __copy_from_user_inatomic(&entry->id, ubuf, cnt); if (len) { entry->id = -1; memcpy(&entry->buf, faulted, FAULTED_SIZE); written = -EFAULT; } else written = cnt; __buffer_unlock_commit(buffer, event); if (written > 0) *fpos += written; return written; } static int tracing_clock_show(struct seq_file *m, void *v) { struct trace_array *tr = m->private; int i; for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) seq_printf(m, "%s%s%s%s", i ? " " : "", i == tr->clock_id ? "[" : "", trace_clocks[i].name, i == tr->clock_id ? "]" : ""); seq_putc(m, '\n'); return 0; } int tracing_set_clock(struct trace_array *tr, const char *clockstr) { int i; for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) { if (strcmp(trace_clocks[i].name, clockstr) == 0) break; } if (i == ARRAY_SIZE(trace_clocks)) return -EINVAL; mutex_lock(&trace_types_lock); tr->clock_id = i; ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func); /* * New clock may not be consistent with the previous clock. * Reset the buffer so that it doesn't have incomparable timestamps. */ tracing_reset_online_cpus(&tr->trace_buffer); #ifdef CONFIG_TRACER_MAX_TRACE if (tr->max_buffer.buffer) ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func); tracing_reset_online_cpus(&tr->max_buffer); #endif mutex_unlock(&trace_types_lock); return 0; } static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *fpos) { struct seq_file *m = filp->private_data; struct trace_array *tr = m->private; char buf[64]; const char *clockstr; int ret; if (cnt >= sizeof(buf)) return -EINVAL; if (copy_from_user(buf, ubuf, cnt)) return -EFAULT; buf[cnt] = 0; clockstr = strstrip(buf); ret = tracing_set_clock(tr, clockstr); if (ret) return ret; *fpos += cnt; return cnt; } static int tracing_clock_open(struct inode *inode, struct file *file) { struct trace_array *tr = inode->i_private; int ret; if (tracing_disabled) return -ENODEV; if (trace_array_get(tr)) return -ENODEV; ret = single_open(file, tracing_clock_show, inode->i_private); if (ret < 0) trace_array_put(tr); return ret; } static int tracing_time_stamp_mode_show(struct seq_file *m, void *v) { struct trace_array *tr = m->private; mutex_lock(&trace_types_lock); if (ring_buffer_time_stamp_abs(tr->trace_buffer.buffer)) seq_puts(m, "delta [absolute]\n"); else seq_puts(m, "[delta] absolute\n"); mutex_unlock(&trace_types_lock); return 0; } static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file) { struct trace_array *tr = inode->i_private; int ret; if (tracing_disabled) return -ENODEV; if (trace_array_get(tr)) return -ENODEV; ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private); if (ret < 0) trace_array_put(tr); return ret; } int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs) { int ret = 0; mutex_lock(&trace_types_lock); if (abs && tr->time_stamp_abs_ref++) goto out; if (!abs) { if (WARN_ON_ONCE(!tr->time_stamp_abs_ref)) { ret = -EINVAL; goto out; } if (--tr->time_stamp_abs_ref) goto out; } ring_buffer_set_time_stamp_abs(tr->trace_buffer.buffer, abs); #ifdef CONFIG_TRACER_MAX_TRACE if (tr->max_buffer.buffer) ring_buffer_set_time_stamp_abs(tr->max_buffer.buffer, abs); #endif out: mutex_unlock(&trace_types_lock); return ret; } struct ftrace_buffer_info { struct trace_iterator iter; void *spare; unsigned int spare_cpu; unsigned int read; }; #ifdef CONFIG_TRACER_SNAPSHOT static int tracing_snapshot_open(struct inode *inode, struct file *file) { struct trace_array *tr = inode->i_private; struct trace_iterator *iter; struct seq_file *m; int ret = 0; if (trace_array_get(tr) < 0) return -ENODEV; if (file->f_mode & FMODE_READ) { iter = __tracing_open(inode, file, true); if (IS_ERR(iter)) ret = PTR_ERR(iter); } else { /* Writes still need the seq_file to hold the private data */ ret = -ENOMEM; m = kzalloc(sizeof(*m), GFP_KERNEL); if (!m) goto out; iter = kzalloc(sizeof(*iter), GFP_KERNEL); if (!iter) { kfree(m); goto out; } ret = 0; iter->tr = tr; iter->trace_buffer = &tr->max_buffer; iter->cpu_file = tracing_get_cpu(inode); m->private = iter; file->private_data = m; } out: if (ret < 0) trace_array_put(tr); return ret; } static ssize_t tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { struct seq_file *m = filp->private_data; struct trace_iterator *iter = m->private; struct trace_array *tr = iter->tr; unsigned long val; int ret; ret = tracing_update_buffers(); if (ret < 0) return ret; ret = kstrtoul_from_user(ubuf, cnt, 10, &val); if (ret) return ret; mutex_lock(&trace_types_lock); if (tr->current_trace->use_max_tr) { ret = -EBUSY; goto out; } switch (val) { case 0: if (iter->cpu_file != RING_BUFFER_ALL_CPUS) { ret = -EINVAL; break; } if (tr->allocated_snapshot) free_snapshot(tr); break; case 1: /* Only allow per-cpu swap if the ring buffer supports it */ #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP if (iter->cpu_file != RING_BUFFER_ALL_CPUS) { ret = -EINVAL; break; } #endif if (!tr->allocated_snapshot) { ret = tracing_alloc_snapshot_instance(tr); if (ret < 0) break; } local_irq_disable(); /* Now, we're going to swap */ if (iter->cpu_file == RING_BUFFER_ALL_CPUS) update_max_tr(tr, current, smp_processor_id()); else update_max_tr_single(tr, current, iter->cpu_file); local_irq_enable(); break; default: if (tr->allocated_snapshot) { if (iter->cpu_file == RING_BUFFER_ALL_CPUS) tracing_reset_online_cpus(&tr->max_buffer); else tracing_reset(&tr->max_buffer, iter->cpu_file); } break; } if (ret >= 0) { *ppos += cnt; ret = cnt; } out: mutex_unlock(&trace_types_lock); return ret; } static int tracing_snapshot_release(struct inode *inode, struct file *file) { struct seq_file *m = file->private_data; int ret; ret = tracing_release(inode, file); if (file->f_mode & FMODE_READ) return ret; /* If write only, the seq_file is just a stub */ if (m) kfree(m->private); kfree(m); return 0; } static int tracing_buffers_open(struct inode *inode, struct file *filp); static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf, size_t count, loff_t *ppos); static int tracing_buffers_release(struct inode *inode, struct file *file); static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags); static int snapshot_raw_open(struct inode *inode, struct file *filp) { struct ftrace_buffer_info *info; int ret; ret = tracing_buffers_open(inode, filp); if (ret < 0) return ret; info = filp->private_data; if (info->iter.trace->use_max_tr) { tracing_buffers_release(inode, filp); return -EBUSY; } info->iter.snapshot = true; info->iter.trace_buffer = &info->iter.tr->max_buffer; return ret; } #endif /* CONFIG_TRACER_SNAPSHOT */ static const struct file_operations tracing_thresh_fops = { .open = tracing_open_generic, .read = tracing_thresh_read, .write = tracing_thresh_write, .llseek = generic_file_llseek, }; #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER) static const struct file_operations tracing_max_lat_fops = { .open = tracing_open_generic, .read = tracing_max_lat_read, .write = tracing_max_lat_write, .llseek = generic_file_llseek, }; #endif static const struct file_operations set_tracer_fops = { .open = tracing_open_generic, .read = tracing_set_trace_read, .write = tracing_set_trace_write, .llseek = generic_file_llseek, }; static const struct file_operations tracing_pipe_fops = { .open = tracing_open_pipe, .poll = tracing_poll_pipe, .read = tracing_read_pipe, .splice_read = tracing_splice_read_pipe, .release = tracing_release_pipe, .llseek = no_llseek, }; static const struct file_operations tracing_entries_fops = { .open = tracing_open_generic_tr, .read = tracing_entries_read, .write = tracing_entries_write, .llseek = generic_file_llseek, .release = tracing_release_generic_tr, }; static const struct file_operations tracing_total_entries_fops = { .open = tracing_open_generic_tr, .read = tracing_total_entries_read, .llseek = generic_file_llseek, .release = tracing_release_generic_tr, }; static const struct file_operations tracing_free_buffer_fops = { .open = tracing_open_generic_tr, .write = tracing_free_buffer_write, .release = tracing_free_buffer_release, }; static const struct file_operations tracing_mark_fops = { .open = tracing_open_generic_tr, .write = tracing_mark_write, .llseek = generic_file_llseek, .release = tracing_release_generic_tr, }; static const struct file_operations tracing_mark_raw_fops = { .open = tracing_open_generic_tr, .write = tracing_mark_raw_write, .llseek = generic_file_llseek, .release = tracing_release_generic_tr, }; static const struct file_operations trace_clock_fops = { .open = tracing_clock_open, .read = seq_read, .llseek = seq_lseek, .release = tracing_single_release_tr, .write = tracing_clock_write, }; static const struct file_operations trace_time_stamp_mode_fops = { .open = tracing_time_stamp_mode_open, .read = seq_read, .llseek = seq_lseek, .release = tracing_single_release_tr, }; #ifdef CONFIG_TRACER_SNAPSHOT static const struct file_operations snapshot_fops = { .open = tracing_snapshot_open, .read = seq_read, .write = tracing_snapshot_write, .llseek = tracing_lseek, .release = tracing_snapshot_release, }; static const struct file_operations snapshot_raw_fops = { .open = snapshot_raw_open, .read = tracing_buffers_read, .release = tracing_buffers_release, .splice_read = tracing_buffers_splice_read, .llseek = no_llseek, }; #endif /* CONFIG_TRACER_SNAPSHOT */ static int tracing_buffers_open(struct inode *inode, struct file *filp) { struct trace_array *tr = inode->i_private; struct ftrace_buffer_info *info; int ret; if (tracing_disabled) return -ENODEV; if (trace_array_get(tr) < 0) return -ENODEV; info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) { trace_array_put(tr); return -ENOMEM; } mutex_lock(&trace_types_lock); info->iter.tr = tr; info->iter.cpu_file = tracing_get_cpu(inode); info->iter.trace = tr->current_trace; info->iter.trace_buffer = &tr->trace_buffer; info->spare = NULL; /* Force reading ring buffer for first read */ info->read = (unsigned int)-1; filp->private_data = info; tr->current_trace->ref++; mutex_unlock(&trace_types_lock); ret = nonseekable_open(inode, filp); if (ret < 0) trace_array_put(tr); return ret; } static __poll_t tracing_buffers_poll(struct file *filp, poll_table *poll_table) { struct ftrace_buffer_info *info = filp->private_data; struct trace_iterator *iter = &info->iter; return trace_poll(iter, filp, poll_table); } static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf, size_t count, loff_t *ppos) { struct ftrace_buffer_info *info = filp->private_data; struct trace_iterator *iter = &info->iter; ssize_t ret = 0; ssize_t size; if (!count) return 0; #ifdef CONFIG_TRACER_MAX_TRACE if (iter->snapshot && iter->tr->current_trace->use_max_tr) return -EBUSY; #endif if (!info->spare) { info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer, iter->cpu_file); if (IS_ERR(info->spare)) { ret = PTR_ERR(info->spare); info->spare = NULL; } else { info->spare_cpu = iter->cpu_file; } } if (!info->spare) return ret; /* Do we have previous read data to read? */ if (info->read < PAGE_SIZE) goto read; again: trace_access_lock(iter->cpu_file); ret = ring_buffer_read_page(iter->trace_buffer->buffer, &info->spare, count, iter->cpu_file, 0); trace_access_unlock(iter->cpu_file); if (ret < 0) { if (trace_empty(iter)) { if ((filp->f_flags & O_NONBLOCK)) return -EAGAIN; ret = wait_on_pipe(iter, 0); if (ret) return ret; goto again; } return 0; } info->read = 0; read: size = PAGE_SIZE - info->read; if (size > count) size = count; ret = copy_to_user(ubuf, info->spare + info->read, size); if (ret == size) return -EFAULT; size -= ret; *ppos += size; info->read += size; return size; } static int tracing_buffers_release(struct inode *inode, struct file *file) { struct ftrace_buffer_info *info = file->private_data; struct trace_iterator *iter = &info->iter; mutex_lock(&trace_types_lock); iter->tr->current_trace->ref--; __trace_array_put(iter->tr); if (info->spare) ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare_cpu, info->spare); kfree(info); mutex_unlock(&trace_types_lock); return 0; } struct buffer_ref { struct ring_buffer *buffer; void *page; int cpu; int ref; }; static void buffer_pipe_buf_release(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { struct buffer_ref *ref = (struct buffer_ref *)buf->private; if (--ref->ref) return; ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page); kfree(ref); buf->private = 0; } static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { struct buffer_ref *ref = (struct buffer_ref *)buf->private; if (ref->ref > INT_MAX/2) return false; ref->ref++; return true; } /* Pipe buffer operations for a buffer. */ static const struct pipe_buf_operations buffer_pipe_buf_ops = { .can_merge = 0, .confirm = generic_pipe_buf_confirm, .release = buffer_pipe_buf_release, .steal = generic_pipe_buf_steal, .get = buffer_pipe_buf_get, }; /* * Callback from splice_to_pipe(), if we need to release some pages * at the end of the spd in case we error'ed out in filling the pipe. */ static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i) { struct buffer_ref *ref = (struct buffer_ref *)spd->partial[i].private; if (--ref->ref) return; ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page); kfree(ref); spd->partial[i].private = 0; } static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags) { struct ftrace_buffer_info *info = file->private_data; struct trace_iterator *iter = &info->iter; struct partial_page partial_def[PIPE_DEF_BUFFERS]; struct page *pages_def[PIPE_DEF_BUFFERS]; struct splice_pipe_desc spd = { .pages = pages_def, .partial = partial_def, .nr_pages_max = PIPE_DEF_BUFFERS, .ops = &buffer_pipe_buf_ops, .spd_release = buffer_spd_release, }; struct buffer_ref *ref; int entries, i; ssize_t ret = 0; #ifdef CONFIG_TRACER_MAX_TRACE if (iter->snapshot && iter->tr->current_trace->use_max_tr) return -EBUSY; #endif if (*ppos & (PAGE_SIZE - 1)) return -EINVAL; if (len & (PAGE_SIZE - 1)) { if (len < PAGE_SIZE) return -EINVAL; len &= PAGE_MASK; } if (splice_grow_spd(pipe, &spd)) return -ENOMEM; again: trace_access_lock(iter->cpu_file); entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file); for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) { struct page *page; int r; ref = kzalloc(sizeof(*ref), GFP_KERNEL); if (!ref) { ret = -ENOMEM; break; } ref->ref = 1; ref->buffer = iter->trace_buffer->buffer; ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file); if (IS_ERR(ref->page)) { ret = PTR_ERR(ref->page); ref->page = NULL; kfree(ref); break; } ref->cpu = iter->cpu_file; r = ring_buffer_read_page(ref->buffer, &ref->page, len, iter->cpu_file, 1); if (r < 0) { ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page); kfree(ref); break; } page = virt_to_page(ref->page); spd.pages[i] = page; spd.partial[i].len = PAGE_SIZE; spd.partial[i].offset = 0; spd.partial[i].private = (unsigned long)ref; spd.nr_pages++; *ppos += PAGE_SIZE; entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file); } trace_access_unlock(iter->cpu_file); spd.nr_pages = i; /* did we read anything? */ if (!spd.nr_pages) { if (ret) goto out; ret = -EAGAIN; if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) goto out; ret = wait_on_pipe(iter, iter->tr->buffer_percent); if (ret) goto out; goto again; } ret = splice_to_pipe(pipe, &spd); out: splice_shrink_spd(&spd); return ret; } static const struct file_operations tracing_buffers_fops = { .open = tracing_buffers_open, .read = tracing_buffers_read, .poll = tracing_buffers_poll, .release = tracing_buffers_release, .splice_read = tracing_buffers_splice_read, .llseek = no_llseek, }; static ssize_t tracing_stats_read(struct file *filp, char __user *ubuf, size_t count, loff_t *ppos) { struct inode *inode = file_inode(filp); struct trace_array *tr = inode->i_private; struct trace_buffer *trace_buf = &tr->trace_buffer; int cpu = tracing_get_cpu(inode); struct trace_seq *s; unsigned long cnt; unsigned long long t; unsigned long usec_rem; s = kmalloc(sizeof(*s), GFP_KERNEL); if (!s) return -ENOMEM; trace_seq_init(s); cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu); trace_seq_printf(s, "entries: %ld\n", cnt); cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu); trace_seq_printf(s, "overrun: %ld\n", cnt); cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu); trace_seq_printf(s, "commit overrun: %ld\n", cnt); cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu); trace_seq_printf(s, "bytes: %ld\n", cnt); if (trace_clocks[tr->clock_id].in_ns) { /* local or global for trace_clock */ t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu)); usec_rem = do_div(t, USEC_PER_SEC); trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n", t, usec_rem); t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu)); usec_rem = do_div(t, USEC_PER_SEC); trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem); } else { /* counter or tsc mode for trace_clock */ trace_seq_printf(s, "oldest event ts: %llu\n", ring_buffer_oldest_event_ts(trace_buf->buffer, cpu)); trace_seq_printf(s, "now ts: %llu\n", ring_buffer_time_stamp(trace_buf->buffer, cpu)); } cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu); trace_seq_printf(s, "dropped events: %ld\n", cnt); cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu); trace_seq_printf(s, "read events: %ld\n", cnt); count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, trace_seq_used(s)); kfree(s); return count; } static const struct file_operations tracing_stats_fops = { .open = tracing_open_generic_tr, .read = tracing_stats_read, .llseek = generic_file_llseek, .release = tracing_release_generic_tr, }; #ifdef CONFIG_DYNAMIC_FTRACE static ssize_t tracing_read_dyn_info(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { unsigned long *p = filp->private_data; char buf[64]; /* Not too big for a shallow stack */ int r; r = scnprintf(buf, 63, "%ld", *p); buf[r++] = '\n'; return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); } static const struct file_operations tracing_dyn_info_fops = { .open = tracing_open_generic, .read = tracing_read_dyn_info, .llseek = generic_file_llseek, }; #endif /* CONFIG_DYNAMIC_FTRACE */ #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) static void ftrace_snapshot(unsigned long ip, unsigned long parent_ip, struct trace_array *tr, struct ftrace_probe_ops *ops, void *data) { tracing_snapshot_instance(tr); } static void ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, struct trace_array *tr, struct ftrace_probe_ops *ops, void *data) { struct ftrace_func_mapper *mapper = data; long *count = NULL; if (mapper) count = (long *)ftrace_func_mapper_find_ip(mapper, ip); if (count) { if (*count <= 0) return; (*count)--; } tracing_snapshot_instance(tr); } static int ftrace_snapshot_print(struct seq_file *m, unsigned long ip, struct ftrace_probe_ops *ops, void *data) { struct ftrace_func_mapper *mapper = data; long *count = NULL; seq_printf(m, "%ps:", (void *)ip); seq_puts(m, "snapshot"); if (mapper) count = (long *)ftrace_func_mapper_find_ip(mapper, ip); if (count) seq_printf(m, ":count=%ld\n", *count); else seq_puts(m, ":unlimited\n"); return 0; } static int ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr, unsigned long ip, void *init_data, void **data) { struct ftrace_func_mapper *mapper = *data; if (!mapper) { mapper = allocate_ftrace_func_mapper(); if (!mapper) return -ENOMEM; *data = mapper; } return ftrace_func_mapper_add_ip(mapper, ip, init_data); } static void ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr, unsigned long ip, void *data) { struct ftrace_func_mapper *mapper = data; if (!ip) { if (!mapper) return; free_ftrace_func_mapper(mapper, NULL); return; } ftrace_func_mapper_remove_ip(mapper, ip); } static struct ftrace_probe_ops snapshot_probe_ops = { .func = ftrace_snapshot, .print = ftrace_snapshot_print, }; static struct ftrace_probe_ops snapshot_count_probe_ops = { .func = ftrace_count_snapshot, .print = ftrace_snapshot_print, .init = ftrace_snapshot_init, .free = ftrace_snapshot_free, }; static int ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash, char *glob, char *cmd, char *param, int enable) { struct ftrace_probe_ops *ops; void *count = (void *)-1; char *number; int ret; if (!tr) return -ENODEV; /* hash funcs only work with set_ftrace_filter */ if (!enable) return -EINVAL; ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops; if (glob[0] == '!') return unregister_ftrace_function_probe_func(glob+1, tr, ops); if (!param) goto out_reg; number = strsep(&param, ":"); if (!strlen(number)) goto out_reg; /* * We use the callback data field (which is a pointer) * as our counter. */ ret = kstrtoul(number, 0, (unsigned long *)&count); if (ret) return ret; out_reg: ret = tracing_alloc_snapshot_instance(tr); if (ret < 0) goto out; ret = register_ftrace_function_probe(glob, tr, ops, count); out: return ret < 0 ? ret : 0; } static struct ftrace_func_command ftrace_snapshot_cmd = { .name = "snapshot", .func = ftrace_trace_snapshot_callback, }; static __init int register_snapshot_cmd(void) { return register_ftrace_command(&ftrace_snapshot_cmd); } #else static inline __init int register_snapshot_cmd(void) { return 0; } #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */ static struct dentry *tracing_get_dentry(struct trace_array *tr) { if (WARN_ON(!tr->dir)) return ERR_PTR(-ENODEV); /* Top directory uses NULL as the parent */ if (tr->flags & TRACE_ARRAY_FL_GLOBAL) return NULL; /* All sub buffers have a descriptor */ return tr->dir; } static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu) { struct dentry *d_tracer; if (tr->percpu_dir) return tr->percpu_dir; d_tracer = tracing_get_dentry(tr); if (IS_ERR(d_tracer)) return NULL; tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer); WARN_ONCE(!tr->percpu_dir, "Could not create tracefs directory 'per_cpu/%d'\n", cpu); return tr->percpu_dir; } static struct dentry * trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent, void *data, long cpu, const struct file_operations *fops) { struct dentry *ret = trace_create_file(name, mode, parent, data, fops); if (ret) /* See tracing_get_cpu() */ d_inode(ret)->i_cdev = (void *)(cpu + 1); return ret; } static void tracing_init_tracefs_percpu(struct trace_array *tr, long cpu) { struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu); struct dentry *d_cpu; char cpu_dir[30]; /* 30 characters should be more than enough */ if (!d_percpu) return; snprintf(cpu_dir, 30, "cpu%ld", cpu); d_cpu = tracefs_create_dir(cpu_dir, d_percpu); if (!d_cpu) { pr_warn("Could not create tracefs '%s' entry\n", cpu_dir); return; } /* per cpu trace_pipe */ trace_create_cpu_file("trace_pipe", 0444, d_cpu, tr, cpu, &tracing_pipe_fops); /* per cpu trace */ trace_create_cpu_file("trace", 0644, d_cpu, tr, cpu, &tracing_fops); trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu, tr, cpu, &tracing_buffers_fops); trace_create_cpu_file("stats", 0444, d_cpu, tr, cpu, &tracing_stats_fops); trace_create_cpu_file("buffer_size_kb", 0444, d_cpu, tr, cpu, &tracing_entries_fops); #ifdef CONFIG_TRACER_SNAPSHOT trace_create_cpu_file("snapshot", 0644, d_cpu, tr, cpu, &snapshot_fops); trace_create_cpu_file("snapshot_raw", 0444, d_cpu, tr, cpu, &snapshot_raw_fops); #endif } #ifdef CONFIG_FTRACE_SELFTEST /* Let selftest have access to static functions in this file */ #include "trace_selftest.c" #endif static ssize_t trace_options_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_option_dentry *topt = filp->private_data; char *buf; if (topt->flags->val & topt->opt->bit) buf = "1\n"; else buf = "0\n"; return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2); } static ssize_t trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_option_dentry *topt = filp->private_data; unsigned long val; int ret; ret = kstrtoul_from_user(ubuf, cnt, 10, &val); if (ret) return ret; if (val != 0 && val != 1) return -EINVAL; if (!!(topt->flags->val & topt->opt->bit) != val) { mutex_lock(&trace_types_lock); ret = __set_tracer_option(topt->tr, topt->flags, topt->opt, !val); mutex_unlock(&trace_types_lock); if (ret) return ret; } *ppos += cnt; return cnt; } static const struct file_operations trace_options_fops = { .open = tracing_open_generic, .read = trace_options_read, .write = trace_options_write, .llseek = generic_file_llseek, }; /* * In order to pass in both the trace_array descriptor as well as the index * to the flag that the trace option file represents, the trace_array * has a character array of trace_flags_index[], which holds the index * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc. * The address of this character array is passed to the flag option file * read/write callbacks. * * In order to extract both the index and the trace_array descriptor, * get_tr_index() uses the following algorithm. * * idx = *ptr; * * As the pointer itself contains the address of the index (remember * index[1] == 1). * * Then to get the trace_array descriptor, by subtracting that index * from the ptr, we get to the start of the index itself. * * ptr - idx == &index[0] * * Then a simple container_of() from that pointer gets us to the * trace_array descriptor. */ static void get_tr_index(void *data, struct trace_array **ptr, unsigned int *pindex) { *pindex = *(unsigned char *)data; *ptr = container_of(data - *pindex, struct trace_array, trace_flags_index); } static ssize_t trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { void *tr_index = filp->private_data; struct trace_array *tr; unsigned int index; char *buf; get_tr_index(tr_index, &tr, &index); if (tr->trace_flags & (1 << index)) buf = "1\n"; else buf = "0\n"; return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2); } static ssize_t trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { void *tr_index = filp->private_data; struct trace_array *tr; unsigned int index; unsigned long val; int ret; get_tr_index(tr_index, &tr, &index); ret = kstrtoul_from_user(ubuf, cnt, 10, &val); if (ret) return ret; if (val != 0 && val != 1) return -EINVAL; mutex_lock(&trace_types_lock); ret = set_tracer_flag(tr, 1 << index, val); mutex_unlock(&trace_types_lock); if (ret < 0) return ret; *ppos += cnt; return cnt; } static const struct file_operations trace_options_core_fops = { .open = tracing_open_generic, .read = trace_options_core_read, .write = trace_options_core_write, .llseek = generic_file_llseek, }; struct dentry *trace_create_file(const char *name, umode_t mode, struct dentry *parent, void *data, const struct file_operations *fops) { struct dentry *ret; ret = tracefs_create_file(name, mode, parent, data, fops); if (!ret) pr_warn("Could not create tracefs '%s' entry\n", name); return ret; } static struct dentry *trace_options_init_dentry(struct trace_array *tr) { struct dentry *d_tracer; if (tr->options) return tr->options; d_tracer = tracing_get_dentry(tr); if (IS_ERR(d_tracer)) return NULL; tr->options = tracefs_create_dir("options", d_tracer); if (!tr->options) { pr_warn("Could not create tracefs directory 'options'\n"); return NULL; } return tr->options; } static void create_trace_option_file(struct trace_array *tr, struct trace_option_dentry *topt, struct tracer_flags *flags, struct tracer_opt *opt) { struct dentry *t_options; t_options = trace_options_init_dentry(tr); if (!t_options) return; topt->flags = flags; topt->opt = opt; topt->tr = tr; topt->entry = trace_create_file(opt->name, 0644, t_options, topt, &trace_options_fops); } static void create_trace_option_files(struct trace_array *tr, struct tracer *tracer) { struct trace_option_dentry *topts; struct trace_options *tr_topts; struct tracer_flags *flags; struct tracer_opt *opts; int cnt; int i; if (!tracer) return; flags = tracer->flags; if (!flags || !flags->opts) return; /* * If this is an instance, only create flags for tracers * the instance may have. */ if (!trace_ok_for_array(tracer, tr)) return; for (i = 0; i < tr->nr_topts; i++) { /* Make sure there's no duplicate flags. */ if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags)) return; } opts = flags->opts; for (cnt = 0; opts[cnt].name; cnt++) ; topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL); if (!topts) return; tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1), GFP_KERNEL); if (!tr_topts) { kfree(topts); return; } tr->topts = tr_topts; tr->topts[tr->nr_topts].tracer = tracer; tr->topts[tr->nr_topts].topts = topts; tr->nr_topts++; for (cnt = 0; opts[cnt].name; cnt++) { create_trace_option_file(tr, &topts[cnt], flags, &opts[cnt]); WARN_ONCE(topts[cnt].entry == NULL, "Failed to create trace option: %s", opts[cnt].name); } } static struct dentry * create_trace_option_core_file(struct trace_array *tr, const char *option, long index) { struct dentry *t_options; t_options = trace_options_init_dentry(tr); if (!t_options) return NULL; return trace_create_file(option, 0644, t_options, (void *)&tr->trace_flags_index[index], &trace_options_core_fops); } static void create_trace_options_dir(struct trace_array *tr) { struct dentry *t_options; bool top_level = tr == &global_trace; int i; t_options = trace_options_init_dentry(tr); if (!t_options) return; for (i = 0; trace_options[i]; i++) { if (top_level || !((1 << i) & TOP_LEVEL_TRACE_FLAGS)) create_trace_option_core_file(tr, trace_options[i], i); } } static ssize_t rb_simple_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_array *tr = filp->private_data; char buf[64]; int r; r = tracer_tracing_is_on(tr); r = sprintf(buf, "%d\n", r); return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); } static ssize_t rb_simple_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_array *tr = filp->private_data; struct ring_buffer *buffer = tr->trace_buffer.buffer; unsigned long val; int ret; ret = kstrtoul_from_user(ubuf, cnt, 10, &val); if (ret) return ret; if (buffer) { mutex_lock(&trace_types_lock); if (!!val == tracer_tracing_is_on(tr)) { val = 0; /* do nothing */ } else if (val) { tracer_tracing_on(tr); if (tr->current_trace->start) tr->current_trace->start(tr); } else { tracer_tracing_off(tr); if (tr->current_trace->stop) tr->current_trace->stop(tr); } mutex_unlock(&trace_types_lock); } (*ppos)++; return cnt; } static const struct file_operations rb_simple_fops = { .open = tracing_open_generic_tr, .read = rb_simple_read, .write = rb_simple_write, .release = tracing_release_generic_tr, .llseek = default_llseek, }; static ssize_t buffer_percent_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_array *tr = filp->private_data; char buf[64]; int r; r = tr->buffer_percent; r = sprintf(buf, "%d\n", r); return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); } static ssize_t buffer_percent_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_array *tr = filp->private_data; unsigned long val; int ret; ret = kstrtoul_from_user(ubuf, cnt, 10, &val); if (ret) return ret; if (val > 100) return -EINVAL; if (!val) val = 1; tr->buffer_percent = val; (*ppos)++; return cnt; } static const struct file_operations buffer_percent_fops = { .open = tracing_open_generic_tr, .read = buffer_percent_read, .write = buffer_percent_write, .release = tracing_release_generic_tr, .llseek = default_llseek, }; struct dentry *trace_instance_dir; static void init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer); static int allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size) { enum ring_buffer_flags rb_flags; rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0; buf->tr = tr; buf->buffer = ring_buffer_alloc(size, rb_flags); if (!buf->buffer) return -ENOMEM; buf->data = alloc_percpu(struct trace_array_cpu); if (!buf->data) { ring_buffer_free(buf->buffer); buf->buffer = NULL; return -ENOMEM; } /* Allocate the first page for all buffers */ set_buffer_entries(&tr->trace_buffer, ring_buffer_size(tr->trace_buffer.buffer, 0)); return 0; } static int allocate_trace_buffers(struct trace_array *tr, int size) { int ret; ret = allocate_trace_buffer(tr, &tr->trace_buffer, size); if (ret) return ret; #ifdef CONFIG_TRACER_MAX_TRACE ret = allocate_trace_buffer(tr, &tr->max_buffer, allocate_snapshot ? size : 1); if (WARN_ON(ret)) { ring_buffer_free(tr->trace_buffer.buffer); tr->trace_buffer.buffer = NULL; free_percpu(tr->trace_buffer.data); tr->trace_buffer.data = NULL; return -ENOMEM; } tr->allocated_snapshot = allocate_snapshot; /* * Only the top level trace array gets its snapshot allocated * from the kernel command line. */ allocate_snapshot = false; #endif return 0; } static void free_trace_buffer(struct trace_buffer *buf) { if (buf->buffer) { ring_buffer_free(buf->buffer); buf->buffer = NULL; free_percpu(buf->data); buf->data = NULL; } } static void free_trace_buffers(struct trace_array *tr) { if (!tr) return; free_trace_buffer(&tr->trace_buffer); #ifdef CONFIG_TRACER_MAX_TRACE free_trace_buffer(&tr->max_buffer); #endif } static void init_trace_flags_index(struct trace_array *tr) { int i; /* Used by the trace options files */ for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) tr->trace_flags_index[i] = i; } static void __update_tracer_options(struct trace_array *tr) { struct tracer *t; for (t = trace_types; t; t = t->next) add_tracer_options(tr, t); } static void update_tracer_options(struct trace_array *tr) { mutex_lock(&trace_types_lock); __update_tracer_options(tr); mutex_unlock(&trace_types_lock); } static int instance_mkdir(const char *name) { struct trace_array *tr; int ret; mutex_lock(&event_mutex); mutex_lock(&trace_types_lock); ret = -EEXIST; list_for_each_entry(tr, &ftrace_trace_arrays, list) { if (tr->name && strcmp(tr->name, name) == 0) goto out_unlock; } ret = -ENOMEM; tr = kzalloc(sizeof(*tr), GFP_KERNEL); if (!tr) goto out_unlock; tr->name = kstrdup(name, GFP_KERNEL); if (!tr->name) goto out_free_tr; if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL)) goto out_free_tr; tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS; cpumask_copy(tr->tracing_cpumask, cpu_all_mask); raw_spin_lock_init(&tr->start_lock); tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; tr->current_trace = &nop_trace; INIT_LIST_HEAD(&tr->systems); INIT_LIST_HEAD(&tr->events); INIT_LIST_HEAD(&tr->hist_vars); if (allocate_trace_buffers(tr, trace_buf_size) < 0) goto out_free_tr; tr->dir = tracefs_create_dir(name, trace_instance_dir); if (!tr->dir) goto out_free_tr; ret = event_trace_add_tracer(tr->dir, tr); if (ret) { tracefs_remove_recursive(tr->dir); goto out_free_tr; } ftrace_init_trace_array(tr); init_tracer_tracefs(tr, tr->dir); init_trace_flags_index(tr); __update_tracer_options(tr); list_add(&tr->list, &ftrace_trace_arrays); mutex_unlock(&trace_types_lock); mutex_unlock(&event_mutex); return 0; out_free_tr: free_trace_buffers(tr); free_cpumask_var(tr->tracing_cpumask); kfree(tr->name); kfree(tr); out_unlock: mutex_unlock(&trace_types_lock); mutex_unlock(&event_mutex); return ret; } static int instance_rmdir(const char *name) { struct trace_array *tr; int found = 0; int ret; int i; mutex_lock(&event_mutex); mutex_lock(&trace_types_lock); ret = -ENODEV; list_for_each_entry(tr, &ftrace_trace_arrays, list) { if (tr->name && strcmp(tr->name, name) == 0) { found = 1; break; } } if (!found) goto out_unlock; ret = -EBUSY; if (tr->ref || (tr->current_trace && tr->current_trace->ref)) goto out_unlock; list_del(&tr->list); /* Disable all the flags that were enabled coming in */ for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) { if ((1 << i) & ZEROED_TRACE_FLAGS) set_tracer_flag(tr, 1 << i, 0); } tracing_set_nop(tr); clear_ftrace_function_probes(tr); event_trace_del_tracer(tr); ftrace_clear_pids(tr); ftrace_destroy_function_files(tr); tracefs_remove_recursive(tr->dir); free_trace_buffers(tr); for (i = 0; i < tr->nr_topts; i++) { kfree(tr->topts[i].topts); } kfree(tr->topts); free_cpumask_var(tr->tracing_cpumask); kfree(tr->name); kfree(tr); ret = 0; out_unlock: mutex_unlock(&trace_types_lock); mutex_unlock(&event_mutex); return ret; } static __init void create_trace_instances(struct dentry *d_tracer) { trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer, instance_mkdir, instance_rmdir); if (WARN_ON(!trace_instance_dir)) return; } static void init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer) { struct trace_event_file *file; int cpu; trace_create_file("available_tracers", 0444, d_tracer, tr, &show_traces_fops); trace_create_file("current_tracer", 0644, d_tracer, tr, &set_tracer_fops); trace_create_file("tracing_cpumask", 0644, d_tracer, tr, &tracing_cpumask_fops); trace_create_file("trace_options", 0644, d_tracer, tr, &tracing_iter_fops); trace_create_file("trace", 0644, d_tracer, tr, &tracing_fops); trace_create_file("trace_pipe", 0444, d_tracer, tr, &tracing_pipe_fops); trace_create_file("buffer_size_kb", 0644, d_tracer, tr, &tracing_entries_fops); trace_create_file("buffer_total_size_kb", 0444, d_tracer, tr, &tracing_total_entries_fops); trace_create_file("free_buffer", 0200, d_tracer, tr, &tracing_free_buffer_fops); trace_create_file("trace_marker", 0220, d_tracer, tr, &tracing_mark_fops); file = __find_event_file(tr, "ftrace", "print"); if (file && file->dir) trace_create_file("trigger", 0644, file->dir, file, &event_trigger_fops); tr->trace_marker_file = file; trace_create_file("trace_marker_raw", 0220, d_tracer, tr, &tracing_mark_raw_fops); trace_create_file("trace_clock", 0644, d_tracer, tr, &trace_clock_fops); trace_create_file("tracing_on", 0644, d_tracer, tr, &rb_simple_fops); trace_create_file("timestamp_mode", 0444, d_tracer, tr, &trace_time_stamp_mode_fops); tr->buffer_percent = 50; trace_create_file("buffer_percent", 0444, d_tracer, tr, &buffer_percent_fops); create_trace_options_dir(tr); #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER) trace_create_file("tracing_max_latency", 0644, d_tracer, &tr->max_latency, &tracing_max_lat_fops); #endif if (ftrace_create_function_files(tr, d_tracer)) WARN(1, "Could not allocate function filter files"); #ifdef CONFIG_TRACER_SNAPSHOT trace_create_file("snapshot", 0644, d_tracer, tr, &snapshot_fops); #endif for_each_tracing_cpu(cpu) tracing_init_tracefs_percpu(tr, cpu); ftrace_init_tracefs(tr, d_tracer); } static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore) { struct vfsmount *mnt; struct file_system_type *type; /* * To maintain backward compatibility for tools that mount * debugfs to get to the tracing facility, tracefs is automatically * mounted to the debugfs/tracing directory. */ type = get_fs_type("tracefs"); if (!type) return NULL; mnt = vfs_submount(mntpt, type, "tracefs", NULL); put_filesystem(type); if (IS_ERR(mnt)) return NULL; mntget(mnt); return mnt; } /** * tracing_init_dentry - initialize top level trace array * * This is called when creating files or directories in the tracing * directory. It is called via fs_initcall() by any of the boot up code * and expects to return the dentry of the top level tracing directory. */ struct dentry *tracing_init_dentry(void) { struct trace_array *tr = &global_trace; /* The top level trace array uses NULL as parent */ if (tr->dir) return NULL; if (WARN_ON(!tracefs_initialized()) || (IS_ENABLED(CONFIG_DEBUG_FS) && WARN_ON(!debugfs_initialized()))) return ERR_PTR(-ENODEV); /* * As there may still be users that expect the tracing * files to exist in debugfs/tracing, we must automount * the tracefs file system there, so older tools still * work with the newer kerenl. */ tr->dir = debugfs_create_automount("tracing", NULL, trace_automount, NULL); if (!tr->dir) { pr_warn_once("Could not create debugfs directory 'tracing'\n"); return ERR_PTR(-ENOMEM); } return NULL; } extern struct trace_eval_map *__start_ftrace_eval_maps[]; extern struct trace_eval_map *__stop_ftrace_eval_maps[]; static void __init trace_eval_init(void) { int len; len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps; trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len); } #ifdef CONFIG_MODULES static void trace_module_add_evals(struct module *mod) { if (!mod->num_trace_evals) return; /* * Modules with bad taint do not have events created, do * not bother with enums either. */ if (trace_module_has_bad_taint(mod)) return; trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals); } #ifdef CONFIG_TRACE_EVAL_MAP_FILE static void trace_module_remove_evals(struct module *mod) { union trace_eval_map_item *map; union trace_eval_map_item **last = &trace_eval_maps; if (!mod->num_trace_evals) return; mutex_lock(&trace_eval_mutex); map = trace_eval_maps; while (map) { if (map->head.mod == mod) break; map = trace_eval_jmp_to_tail(map); last = &map->tail.next; map = map->tail.next; } if (!map) goto out; *last = trace_eval_jmp_to_tail(map)->tail.next; kfree(map); out: mutex_unlock(&trace_eval_mutex); } #else static inline void trace_module_remove_evals(struct module *mod) { } #endif /* CONFIG_TRACE_EVAL_MAP_FILE */ static int trace_module_notify(struct notifier_block *self, unsigned long val, void *data) { struct module *mod = data; switch (val) { case MODULE_STATE_COMING: trace_module_add_evals(mod); break; case MODULE_STATE_GOING: trace_module_remove_evals(mod); break; } return 0; } static struct notifier_block trace_module_nb = { .notifier_call = trace_module_notify, .priority = 0, }; #endif /* CONFIG_MODULES */ static __init int tracer_init_tracefs(void) { struct dentry *d_tracer; trace_access_lock_init(); d_tracer = tracing_init_dentry(); if (IS_ERR(d_tracer)) return 0; event_trace_init(); init_tracer_tracefs(&global_trace, d_tracer); ftrace_init_tracefs_toplevel(&global_trace, d_tracer); trace_create_file("tracing_thresh", 0644, d_tracer, &global_trace, &tracing_thresh_fops); trace_create_file("README", 0444, d_tracer, NULL, &tracing_readme_fops); trace_create_file("saved_cmdlines", 0444, d_tracer, NULL, &tracing_saved_cmdlines_fops); trace_create_file("saved_cmdlines_size", 0644, d_tracer, NULL, &tracing_saved_cmdlines_size_fops); trace_create_file("saved_tgids", 0444, d_tracer, NULL, &tracing_saved_tgids_fops); trace_eval_init(); trace_create_eval_file(d_tracer); #ifdef CONFIG_MODULES register_module_notifier(&trace_module_nb); #endif #ifdef CONFIG_DYNAMIC_FTRACE trace_create_file("dyn_ftrace_total_info", 0444, d_tracer, &ftrace_update_tot_cnt, &tracing_dyn_info_fops); #endif create_trace_instances(d_tracer); update_tracer_options(&global_trace); return 0; } static int trace_panic_handler(struct notifier_block *this, unsigned long event, void *unused) { if (ftrace_dump_on_oops) ftrace_dump(ftrace_dump_on_oops); return NOTIFY_OK; } static struct notifier_block trace_panic_notifier = { .notifier_call = trace_panic_handler, .next = NULL, .priority = 150 /* priority: INT_MAX >= x >= 0 */ }; static int trace_die_handler(struct notifier_block *self, unsigned long val, void *data) { switch (val) { case DIE_OOPS: if (ftrace_dump_on_oops) ftrace_dump(ftrace_dump_on_oops); break; default: break; } return NOTIFY_OK; } static struct notifier_block trace_die_notifier = { .notifier_call = trace_die_handler, .priority = 200 }; /* * printk is set to max of 1024, we really don't need it that big. * Nothing should be printing 1000 characters anyway. */ #define TRACE_MAX_PRINT 1000 /* * Define here KERN_TRACE so that we have one place to modify * it if we decide to change what log level the ftrace dump * should be at. */ #define KERN_TRACE KERN_EMERG void trace_printk_seq(struct trace_seq *s) { /* Probably should print a warning here. */ if (s->seq.len >= TRACE_MAX_PRINT) s->seq.len = TRACE_MAX_PRINT; /* * More paranoid code. Although the buffer size is set to * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just * an extra layer of protection. */ if (WARN_ON_ONCE(s->seq.len >= s->seq.size)) s->seq.len = s->seq.size - 1; /* should be zero ended, but we are paranoid. */ s->buffer[s->seq.len] = 0; printk(KERN_TRACE "%s", s->buffer); trace_seq_init(s); } void trace_init_global_iter(struct trace_iterator *iter) { iter->tr = &global_trace; iter->trace = iter->tr->current_trace; iter->cpu_file = RING_BUFFER_ALL_CPUS; iter->trace_buffer = &global_trace.trace_buffer; if (iter->trace && iter->trace->open) iter->trace->open(iter); /* Annotate start of buffers if we had overruns */ if (ring_buffer_overruns(iter->trace_buffer->buffer)) iter->iter_flags |= TRACE_FILE_ANNOTATE; /* Output in nanoseconds only if we are using a clock in nanoseconds. */ if (trace_clocks[iter->tr->clock_id].in_ns) iter->iter_flags |= TRACE_FILE_TIME_IN_NS; } void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { /* use static because iter can be a bit big for the stack */ static struct trace_iterator iter; static atomic_t dump_running; struct trace_array *tr = &global_trace; unsigned int old_userobj; unsigned long flags; int cnt = 0, cpu; /* Only allow one dump user at a time. */ if (atomic_inc_return(&dump_running) != 1) { atomic_dec(&dump_running); return; } /* * Always turn off tracing when we dump. * We don't need to show trace output of what happens * between multiple crashes. * * If the user does a sysrq-z, then they can re-enable * tracing with echo 1 > tracing_on. */ tracing_off(); local_irq_save(flags); printk_nmi_direct_enter(); /* Simulate the iterator */ trace_init_global_iter(&iter); for_each_tracing_cpu(cpu) { atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled); } old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ; /* don't look at user memory in panic mode */ tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ; switch (oops_dump_mode) { case DUMP_ALL: iter.cpu_file = RING_BUFFER_ALL_CPUS; break; case DUMP_ORIG: iter.cpu_file = raw_smp_processor_id(); break; case DUMP_NONE: goto out_enable; default: printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n"); iter.cpu_file = RING_BUFFER_ALL_CPUS; } printk(KERN_TRACE "Dumping ftrace buffer:\n"); /* Did function tracer already get disabled? */ if (ftrace_is_dead()) { printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n"); printk("# MAY BE MISSING FUNCTION EVENTS\n"); } /* * We need to stop all tracing on all CPUS to read the * the next buffer. This is a bit expensive, but is * not done often. We fill all what we can read, * and then release the locks again. */ while (!trace_empty(&iter)) { if (!cnt) printk(KERN_TRACE "---------------------------------\n"); cnt++; /* reset all but tr, trace, and overruns */ memset(&iter.seq, 0, sizeof(struct trace_iterator) - offsetof(struct trace_iterator, seq)); iter.iter_flags |= TRACE_FILE_LAT_FMT; iter.pos = -1; if (trace_find_next_entry_inc(&iter) != NULL) { int ret; ret = print_trace_line(&iter); if (ret != TRACE_TYPE_NO_CONSUME) trace_consume(&iter); } touch_nmi_watchdog(); trace_printk_seq(&iter.seq); } if (!cnt) printk(KERN_TRACE " (ftrace buffer empty)\n"); else printk(KERN_TRACE "---------------------------------\n"); out_enable: tr->trace_flags |= old_userobj; for_each_tracing_cpu(cpu) { atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled); } atomic_dec(&dump_running); printk_nmi_direct_exit(); local_irq_restore(flags); } EXPORT_SYMBOL_GPL(ftrace_dump); int trace_run_command(const char *buf, int (*createfn)(int, char **)) { char **argv; int argc, ret; argc = 0; ret = 0; argv = argv_split(GFP_KERNEL, buf, &argc); if (!argv) return -ENOMEM; if (argc) ret = createfn(argc, argv); argv_free(argv); return ret; } #define WRITE_BUFSIZE 4096 ssize_t trace_parse_run_command(struct file *file, const char __user *buffer, size_t count, loff_t *ppos, int (*createfn)(int, char **)) { char *kbuf, *buf, *tmp; int ret = 0; size_t done = 0; size_t size; kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL); if (!kbuf) return -ENOMEM; while (done < count) { size = count - done; if (size >= WRITE_BUFSIZE) size = WRITE_BUFSIZE - 1; if (copy_from_user(kbuf, buffer + done, size)) { ret = -EFAULT; goto out; } kbuf[size] = '\0'; buf = kbuf; do { tmp = strchr(buf, '\n'); if (tmp) { *tmp = '\0'; size = tmp - buf + 1; } else { size = strlen(buf); if (done + size < count) { if (buf != kbuf) break; /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */ pr_warn("Line length is too long: Should be less than %d\n", WRITE_BUFSIZE - 2); ret = -EINVAL; goto out; } } done += size; /* Remove comments */ tmp = strchr(buf, '#'); if (tmp) *tmp = '\0'; ret = trace_run_command(buf, createfn); if (ret) goto out; buf += size; } while (done < count); } ret = done; out: kfree(kbuf); return ret; } __init static int tracer_alloc_buffers(void) { int ring_buf_size; int ret = -ENOMEM; /* * Make sure we don't accidently add more trace options * than we have bits for. */ BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE); if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL)) goto out; if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL)) goto out_free_buffer_mask; /* Only allocate trace_printk buffers if a trace_printk exists */ if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt) /* Must be called before global_trace.buffer is allocated */ trace_printk_init_buffers(); /* To save memory, keep the ring buffer size to its minimum */ if (ring_buffer_expanded) ring_buf_size = trace_buf_size; else ring_buf_size = 1; cpumask_copy(tracing_buffer_mask, cpu_possible_mask); cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask); raw_spin_lock_init(&global_trace.start_lock); /* * The prepare callbacks allocates some memory for the ring buffer. We * don't free the buffer if the if the CPU goes down. If we were to free * the buffer, then the user would lose any trace that was in the * buffer. The memory will be removed once the "instance" is removed. */ ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE, "trace/RB:preapre", trace_rb_cpu_prepare, NULL); if (ret < 0) goto out_free_cpumask; /* Used for event triggers */ ret = -ENOMEM; temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE); if (!temp_buffer) goto out_rm_hp_state; if (trace_create_savedcmd() < 0) goto out_free_temp_buffer; /* TODO: make the number of buffers hot pluggable with CPUS */ if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) { printk(KERN_ERR "tracer: failed to allocate ring buffer!\n"); WARN_ON(1); goto out_free_savedcmd; } if (global_trace.buffer_disabled) tracing_off(); if (trace_boot_clock) { ret = tracing_set_clock(&global_trace, trace_boot_clock); if (ret < 0) pr_warn("Trace clock %s not defined, going back to default\n", trace_boot_clock); } /* * register_tracer() might reference current_trace, so it * needs to be set before we register anything. This is * just a bootstrap of current_trace anyway. */ global_trace.current_trace = &nop_trace; global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; ftrace_init_global_array_ops(&global_trace); init_trace_flags_index(&global_trace); register_tracer(&nop_trace); /* Function tracing may start here (via kernel command line) */ init_function_trace(); /* All seems OK, enable tracing */ tracing_disabled = 0; atomic_notifier_chain_register(&panic_notifier_list, &trace_panic_notifier); register_die_notifier(&trace_die_notifier); global_trace.flags = TRACE_ARRAY_FL_GLOBAL; INIT_LIST_HEAD(&global_trace.systems); INIT_LIST_HEAD(&global_trace.events); INIT_LIST_HEAD(&global_trace.hist_vars); list_add(&global_trace.list, &ftrace_trace_arrays); apply_trace_boot_options(); register_snapshot_cmd(); return 0; out_free_savedcmd: free_saved_cmdlines_buffer(savedcmd); out_free_temp_buffer: ring_buffer_free(temp_buffer); out_rm_hp_state: cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE); out_free_cpumask: free_cpumask_var(global_trace.tracing_cpumask); out_free_buffer_mask: free_cpumask_var(tracing_buffer_mask); out: return ret; } void __init early_trace_init(void) { if (tracepoint_printk) { tracepoint_print_iter = kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL); if (WARN_ON(!tracepoint_print_iter)) tracepoint_printk = 0; else static_key_enable(&tracepoint_printk_key.key); } tracer_alloc_buffers(); } void __init trace_init(void) { trace_event_init(); } __init static int clear_boot_tracer(void) { /* * The default tracer at boot buffer is an init section. * This function is called in lateinit. If we did not * find the boot tracer, then clear it out, to prevent * later registration from accessing the buffer that is * about to be freed. */ if (!default_bootup_tracer) return 0; printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n", default_bootup_tracer); default_bootup_tracer = NULL; return 0; } fs_initcall(tracer_init_tracefs); late_initcall_sync(clear_boot_tracer); #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK __init static int tracing_set_default_clock(void) { /* sched_clock_stable() is determined in late_initcall */ if (!trace_boot_clock && !sched_clock_stable()) { printk(KERN_WARNING "Unstable clock detected, switching default tracing clock to \"global\"\n" "If you want to keep using the local clock, then add:\n" " \"trace_clock=local\"\n" "on the kernel command line\n"); tracing_set_clock(&global_trace, "global"); } return 0; } late_initcall_sync(tracing_set_default_clock); #endif
static void buffer_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { struct buffer_ref *ref = (struct buffer_ref *)buf->private; ref->ref++; }
static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { struct buffer_ref *ref = (struct buffer_ref *)buf->private; if (ref->ref > INT_MAX/2) return false; ref->ref++; return true; }
{'added': [(6838, 'static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,'), (6843, '\tif (ref->ref > INT_MAX/2)'), (6844, '\t\treturn false;'), (6845, ''), (6847, '\treturn true;')], 'deleted': [(6838, 'static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,')]}
5
1
5,916
33,167
https://github.com/torvalds/linux
CVE-2019-11487
['CWE-416']
tool_msgs.c
voutf
/*************************************************************************** * _ _ ____ _ * Project ___| | | | _ \| | * / __| | | | |_) | | * | (__| |_| | _ <| |___ * \___|\___/|_| \_\_____| * * Copyright (C) 1998 - 2015, 2017, Daniel Stenberg, <daniel@haxx.se>, et al. * * This software is licensed as described in the file COPYING, which * you should have received as part of this distribution. The terms * are also available at https://curl.haxx.se/docs/copyright.html. * * You may opt to use, copy, modify, merge, publish, distribute and/or sell * copies of the Software, and permit persons to whom the Software is * furnished to do so, under the terms of the COPYING file. * * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY * KIND, either express or implied. * ***************************************************************************/ #include "tool_setup.h" #define ENABLE_CURLX_PRINTF /* use our own printf() functions */ #include "curlx.h" #include "tool_cfgable.h" #include "tool_msgs.h" #include "memdebug.h" /* keep this as LAST include */ #define WARN_PREFIX "Warning: " #define NOTE_PREFIX "Note: " static void voutf(struct GlobalConfig *config, const char *prefix, const char *fmt, va_list ap) { size_t width = (79 - strlen(prefix)); if(!config->mute) { size_t len; char *ptr; char *print_buffer; print_buffer = curlx_mvaprintf(fmt, ap); if(!print_buffer) return; len = strlen(print_buffer); ptr = print_buffer; while(len > 0) { fputs(prefix, config->errors); if(len > width) { size_t cut = width-1; while(!ISSPACE(ptr[cut]) && cut) { cut--; } if(0 == cut) /* not a single cutting position was found, just cut it at the max text width then! */ cut = width-1; (void)fwrite(ptr, cut + 1, 1, config->errors); fputs("\n", config->errors); ptr += cut + 1; /* skip the space too */ len -= cut; } else { fputs(ptr, config->errors); len = 0; } } curl_free(print_buffer); } } /* * Emit 'note' formatted message on configured 'errors' stream, if verbose was * selected. */ void notef(struct GlobalConfig *config, const char *fmt, ...) { va_list ap; va_start(ap, fmt); if(config->tracetype) voutf(config, NOTE_PREFIX, fmt, ap); va_end(ap); } /* * Emit warning formatted message on configured 'errors' stream unless * mute (--silent) was selected. */ void warnf(struct GlobalConfig *config, const char *fmt, ...) { va_list ap; va_start(ap, fmt); voutf(config, WARN_PREFIX, fmt, ap); va_end(ap); } /* * Emit help formatted message on given stream. */ void helpf(FILE *errors, const char *fmt, ...) { if(fmt) { va_list ap; va_start(ap, fmt); fputs("curl: ", errors); /* prefix it */ vfprintf(errors, fmt, ap); va_end(ap); } fprintf(errors, "curl: try 'curl --help' " #ifdef USE_MANUAL "or 'curl --manual' " #endif "for more information\n"); }
/*************************************************************************** * _ _ ____ _ * Project ___| | | | _ \| | * / __| | | | |_) | | * | (__| |_| | _ <| |___ * \___|\___/|_| \_\_____| * * Copyright (C) 1998 - 2015, 2017, Daniel Stenberg, <daniel@haxx.se>, et al. * * This software is licensed as described in the file COPYING, which * you should have received as part of this distribution. The terms * are also available at https://curl.haxx.se/docs/copyright.html. * * You may opt to use, copy, modify, merge, publish, distribute and/or sell * copies of the Software, and permit persons to whom the Software is * furnished to do so, under the terms of the COPYING file. * * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY * KIND, either express or implied. * ***************************************************************************/ #include "tool_setup.h" #define ENABLE_CURLX_PRINTF /* use our own printf() functions */ #include "curlx.h" #include "tool_cfgable.h" #include "tool_msgs.h" #include "memdebug.h" /* keep this as LAST include */ #define WARN_PREFIX "Warning: " #define NOTE_PREFIX "Note: " static void voutf(struct GlobalConfig *config, const char *prefix, const char *fmt, va_list ap) { size_t width = (79 - strlen(prefix)); if(!config->mute) { size_t len; char *ptr; char *print_buffer; print_buffer = curlx_mvaprintf(fmt, ap); if(!print_buffer) return; len = strlen(print_buffer); ptr = print_buffer; while(len > 0) { fputs(prefix, config->errors); if(len > width) { size_t cut = width-1; while(!ISSPACE(ptr[cut]) && cut) { cut--; } if(0 == cut) /* not a single cutting position was found, just cut it at the max text width then! */ cut = width-1; (void)fwrite(ptr, cut + 1, 1, config->errors); fputs("\n", config->errors); ptr += cut + 1; /* skip the space too */ len -= cut + 1; } else { fputs(ptr, config->errors); len = 0; } } curl_free(print_buffer); } } /* * Emit 'note' formatted message on configured 'errors' stream, if verbose was * selected. */ void notef(struct GlobalConfig *config, const char *fmt, ...) { va_list ap; va_start(ap, fmt); if(config->tracetype) voutf(config, NOTE_PREFIX, fmt, ap); va_end(ap); } /* * Emit warning formatted message on configured 'errors' stream unless * mute (--silent) was selected. */ void warnf(struct GlobalConfig *config, const char *fmt, ...) { va_list ap; va_start(ap, fmt); voutf(config, WARN_PREFIX, fmt, ap); va_end(ap); } /* * Emit help formatted message on given stream. */ void helpf(FILE *errors, const char *fmt, ...) { if(fmt) { va_list ap; va_start(ap, fmt); fputs("curl: ", errors); /* prefix it */ vfprintf(errors, fmt, ap); va_end(ap); } fprintf(errors, "curl: try 'curl --help' " #ifdef USE_MANUAL "or 'curl --manual' " #endif "for more information\n"); }
static void voutf(struct GlobalConfig *config, const char *prefix, const char *fmt, va_list ap) { size_t width = (79 - strlen(prefix)); if(!config->mute) { size_t len; char *ptr; char *print_buffer; print_buffer = curlx_mvaprintf(fmt, ap); if(!print_buffer) return; len = strlen(print_buffer); ptr = print_buffer; while(len > 0) { fputs(prefix, config->errors); if(len > width) { size_t cut = width-1; while(!ISSPACE(ptr[cut]) && cut) { cut--; } if(0 == cut) /* not a single cutting position was found, just cut it at the max text width then! */ cut = width-1; (void)fwrite(ptr, cut + 1, 1, config->errors); fputs("\n", config->errors); ptr += cut + 1; /* skip the space too */ len -= cut; } else { fputs(ptr, config->errors); len = 0; } } curl_free(print_buffer); } }
static void voutf(struct GlobalConfig *config, const char *prefix, const char *fmt, va_list ap) { size_t width = (79 - strlen(prefix)); if(!config->mute) { size_t len; char *ptr; char *print_buffer; print_buffer = curlx_mvaprintf(fmt, ap); if(!print_buffer) return; len = strlen(print_buffer); ptr = print_buffer; while(len > 0) { fputs(prefix, config->errors); if(len > width) { size_t cut = width-1; while(!ISSPACE(ptr[cut]) && cut) { cut--; } if(0 == cut) /* not a single cutting position was found, just cut it at the max text width then! */ cut = width-1; (void)fwrite(ptr, cut + 1, 1, config->errors); fputs("\n", config->errors); ptr += cut + 1; /* skip the space too */ len -= cut + 1; } else { fputs(ptr, config->errors); len = 0; } } curl_free(print_buffer); } }
{'added': [(70, ' len -= cut + 1;')], 'deleted': [(70, ' len -= cut;')]}
1
1
70
369
https://github.com/curl/curl
CVE-2018-16842
['CWE-125']
keyctl.c
keyctl_chown_key
/* Userspace key control operations * * Copyright (C) 2004-5 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/module.h> #include <linux/init.h> #include <linux/sched.h> #include <linux/sched/task.h> #include <linux/slab.h> #include <linux/syscalls.h> #include <linux/key.h> #include <linux/keyctl.h> #include <linux/fs.h> #include <linux/capability.h> #include <linux/cred.h> #include <linux/string.h> #include <linux/err.h> #include <linux/vmalloc.h> #include <linux/security.h> #include <linux/uio.h> #include <linux/uaccess.h> #include "internal.h" #define KEY_MAX_DESC_SIZE 4096 static int key_get_type_from_user(char *type, const char __user *_type, unsigned len) { int ret; ret = strncpy_from_user(type, _type, len); if (ret < 0) return ret; if (ret == 0 || ret >= len) return -EINVAL; if (type[0] == '.') return -EPERM; type[len - 1] = '\0'; return 0; } /* * Extract the description of a new key from userspace and either add it as a * new key to the specified keyring or update a matching key in that keyring. * * If the description is NULL or an empty string, the key type is asked to * generate one from the payload. * * The keyring must be writable so that we can attach the key to it. * * If successful, the new key's serial number is returned, otherwise an error * code is returned. */ SYSCALL_DEFINE5(add_key, const char __user *, _type, const char __user *, _description, const void __user *, _payload, size_t, plen, key_serial_t, ringid) { key_ref_t keyring_ref, key_ref; char type[32], *description; void *payload; long ret; ret = -EINVAL; if (plen > 1024 * 1024 - 1) goto error; /* draw all the data into kernel space */ ret = key_get_type_from_user(type, _type, sizeof(type)); if (ret < 0) goto error; description = NULL; if (_description) { description = strndup_user(_description, KEY_MAX_DESC_SIZE); if (IS_ERR(description)) { ret = PTR_ERR(description); goto error; } if (!*description) { kfree(description); description = NULL; } else if ((description[0] == '.') && (strncmp(type, "keyring", 7) == 0)) { ret = -EPERM; goto error2; } } /* pull the payload in if one was supplied */ payload = NULL; if (plen) { ret = -ENOMEM; payload = kvmalloc(plen, GFP_KERNEL); if (!payload) goto error2; ret = -EFAULT; if (copy_from_user(payload, _payload, plen) != 0) goto error3; } /* find the target keyring (which must be writable) */ keyring_ref = lookup_user_key(ringid, KEY_LOOKUP_CREATE, KEY_NEED_WRITE); if (IS_ERR(keyring_ref)) { ret = PTR_ERR(keyring_ref); goto error3; } /* create or update the requested key and add it to the target * keyring */ key_ref = key_create_or_update(keyring_ref, type, description, payload, plen, KEY_PERM_UNDEF, KEY_ALLOC_IN_QUOTA); if (!IS_ERR(key_ref)) { ret = key_ref_to_ptr(key_ref)->serial; key_ref_put(key_ref); } else { ret = PTR_ERR(key_ref); } key_ref_put(keyring_ref); error3: if (payload) { memzero_explicit(payload, plen); kvfree(payload); } error2: kfree(description); error: return ret; } /* * Search the process keyrings and keyring trees linked from those for a * matching key. Keyrings must have appropriate Search permission to be * searched. * * If a key is found, it will be attached to the destination keyring if there's * one specified and the serial number of the key will be returned. * * If no key is found, /sbin/request-key will be invoked if _callout_info is * non-NULL in an attempt to create a key. The _callout_info string will be * passed to /sbin/request-key to aid with completing the request. If the * _callout_info string is "" then it will be changed to "-". */ SYSCALL_DEFINE4(request_key, const char __user *, _type, const char __user *, _description, const char __user *, _callout_info, key_serial_t, destringid) { struct key_type *ktype; struct key *key; key_ref_t dest_ref; size_t callout_len; char type[32], *description, *callout_info; long ret; /* pull the type into kernel space */ ret = key_get_type_from_user(type, _type, sizeof(type)); if (ret < 0) goto error; /* pull the description into kernel space */ description = strndup_user(_description, KEY_MAX_DESC_SIZE); if (IS_ERR(description)) { ret = PTR_ERR(description); goto error; } /* pull the callout info into kernel space */ callout_info = NULL; callout_len = 0; if (_callout_info) { callout_info = strndup_user(_callout_info, PAGE_SIZE); if (IS_ERR(callout_info)) { ret = PTR_ERR(callout_info); goto error2; } callout_len = strlen(callout_info); } /* get the destination keyring if specified */ dest_ref = NULL; if (destringid) { dest_ref = lookup_user_key(destringid, KEY_LOOKUP_CREATE, KEY_NEED_WRITE); if (IS_ERR(dest_ref)) { ret = PTR_ERR(dest_ref); goto error3; } } /* find the key type */ ktype = key_type_lookup(type); if (IS_ERR(ktype)) { ret = PTR_ERR(ktype); goto error4; } /* do the search */ key = request_key_and_link(ktype, description, callout_info, callout_len, NULL, key_ref_to_ptr(dest_ref), KEY_ALLOC_IN_QUOTA); if (IS_ERR(key)) { ret = PTR_ERR(key); goto error5; } /* wait for the key to finish being constructed */ ret = wait_for_key_construction(key, 1); if (ret < 0) goto error6; ret = key->serial; error6: key_put(key); error5: key_type_put(ktype); error4: key_ref_put(dest_ref); error3: kfree(callout_info); error2: kfree(description); error: return ret; } /* * Get the ID of the specified process keyring. * * The requested keyring must have search permission to be found. * * If successful, the ID of the requested keyring will be returned. */ long keyctl_get_keyring_ID(key_serial_t id, int create) { key_ref_t key_ref; unsigned long lflags; long ret; lflags = create ? KEY_LOOKUP_CREATE : 0; key_ref = lookup_user_key(id, lflags, KEY_NEED_SEARCH); if (IS_ERR(key_ref)) { ret = PTR_ERR(key_ref); goto error; } ret = key_ref_to_ptr(key_ref)->serial; key_ref_put(key_ref); error: return ret; } /* * Join a (named) session keyring. * * Create and join an anonymous session keyring or join a named session * keyring, creating it if necessary. A named session keyring must have Search * permission for it to be joined. Session keyrings without this permit will * be skipped over. It is not permitted for userspace to create or join * keyrings whose name begin with a dot. * * If successful, the ID of the joined session keyring will be returned. */ long keyctl_join_session_keyring(const char __user *_name) { char *name; long ret; /* fetch the name from userspace */ name = NULL; if (_name) { name = strndup_user(_name, KEY_MAX_DESC_SIZE); if (IS_ERR(name)) { ret = PTR_ERR(name); goto error; } ret = -EPERM; if (name[0] == '.') goto error_name; } /* join the session */ ret = join_session_keyring(name); error_name: kfree(name); error: return ret; } /* * Update a key's data payload from the given data. * * The key must grant the caller Write permission and the key type must support * updating for this to work. A negative key can be positively instantiated * with this call. * * If successful, 0 will be returned. If the key type does not support * updating, then -EOPNOTSUPP will be returned. */ long keyctl_update_key(key_serial_t id, const void __user *_payload, size_t plen) { key_ref_t key_ref; void *payload; long ret; ret = -EINVAL; if (plen > PAGE_SIZE) goto error; /* pull the payload in if one was supplied */ payload = NULL; if (plen) { ret = -ENOMEM; payload = kmalloc(plen, GFP_KERNEL); if (!payload) goto error; ret = -EFAULT; if (copy_from_user(payload, _payload, plen) != 0) goto error2; } /* find the target key (which must be writable) */ key_ref = lookup_user_key(id, 0, KEY_NEED_WRITE); if (IS_ERR(key_ref)) { ret = PTR_ERR(key_ref); goto error2; } /* update the key */ ret = key_update(key_ref, payload, plen); key_ref_put(key_ref); error2: kzfree(payload); error: return ret; } /* * Revoke a key. * * The key must be grant the caller Write or Setattr permission for this to * work. The key type should give up its quota claim when revoked. The key * and any links to the key will be automatically garbage collected after a * certain amount of time (/proc/sys/kernel/keys/gc_delay). * * Keys with KEY_FLAG_KEEP set should not be revoked. * * If successful, 0 is returned. */ long keyctl_revoke_key(key_serial_t id) { key_ref_t key_ref; struct key *key; long ret; key_ref = lookup_user_key(id, 0, KEY_NEED_WRITE); if (IS_ERR(key_ref)) { ret = PTR_ERR(key_ref); if (ret != -EACCES) goto error; key_ref = lookup_user_key(id, 0, KEY_NEED_SETATTR); if (IS_ERR(key_ref)) { ret = PTR_ERR(key_ref); goto error; } } key = key_ref_to_ptr(key_ref); ret = 0; if (test_bit(KEY_FLAG_KEEP, &key->flags)) ret = -EPERM; else key_revoke(key); key_ref_put(key_ref); error: return ret; } /* * Invalidate a key. * * The key must be grant the caller Invalidate permission for this to work. * The key and any links to the key will be automatically garbage collected * immediately. * * Keys with KEY_FLAG_KEEP set should not be invalidated. * * If successful, 0 is returned. */ long keyctl_invalidate_key(key_serial_t id) { key_ref_t key_ref; struct key *key; long ret; kenter("%d", id); key_ref = lookup_user_key(id, 0, KEY_NEED_SEARCH); if (IS_ERR(key_ref)) { ret = PTR_ERR(key_ref); /* Root is permitted to invalidate certain special keys */ if (capable(CAP_SYS_ADMIN)) { key_ref = lookup_user_key(id, 0, 0); if (IS_ERR(key_ref)) goto error; if (test_bit(KEY_FLAG_ROOT_CAN_INVAL, &key_ref_to_ptr(key_ref)->flags)) goto invalidate; goto error_put; } goto error; } invalidate: key = key_ref_to_ptr(key_ref); ret = 0; if (test_bit(KEY_FLAG_KEEP, &key->flags)) ret = -EPERM; else key_invalidate(key); error_put: key_ref_put(key_ref); error: kleave(" = %ld", ret); return ret; } /* * Clear the specified keyring, creating an empty process keyring if one of the * special keyring IDs is used. * * The keyring must grant the caller Write permission and not have * KEY_FLAG_KEEP set for this to work. If successful, 0 will be returned. */ long keyctl_keyring_clear(key_serial_t ringid) { key_ref_t keyring_ref; struct key *keyring; long ret; keyring_ref = lookup_user_key(ringid, KEY_LOOKUP_CREATE, KEY_NEED_WRITE); if (IS_ERR(keyring_ref)) { ret = PTR_ERR(keyring_ref); /* Root is permitted to invalidate certain special keyrings */ if (capable(CAP_SYS_ADMIN)) { keyring_ref = lookup_user_key(ringid, 0, 0); if (IS_ERR(keyring_ref)) goto error; if (test_bit(KEY_FLAG_ROOT_CAN_CLEAR, &key_ref_to_ptr(keyring_ref)->flags)) goto clear; goto error_put; } goto error; } clear: keyring = key_ref_to_ptr(keyring_ref); if (test_bit(KEY_FLAG_KEEP, &keyring->flags)) ret = -EPERM; else ret = keyring_clear(keyring); error_put: key_ref_put(keyring_ref); error: return ret; } /* * Create a link from a keyring to a key if there's no matching key in the * keyring, otherwise replace the link to the matching key with a link to the * new key. * * The key must grant the caller Link permission and the the keyring must grant * the caller Write permission. Furthermore, if an additional link is created, * the keyring's quota will be extended. * * If successful, 0 will be returned. */ long keyctl_keyring_link(key_serial_t id, key_serial_t ringid) { key_ref_t keyring_ref, key_ref; long ret; keyring_ref = lookup_user_key(ringid, KEY_LOOKUP_CREATE, KEY_NEED_WRITE); if (IS_ERR(keyring_ref)) { ret = PTR_ERR(keyring_ref); goto error; } key_ref = lookup_user_key(id, KEY_LOOKUP_CREATE, KEY_NEED_LINK); if (IS_ERR(key_ref)) { ret = PTR_ERR(key_ref); goto error2; } ret = key_link(key_ref_to_ptr(keyring_ref), key_ref_to_ptr(key_ref)); key_ref_put(key_ref); error2: key_ref_put(keyring_ref); error: return ret; } /* * Unlink a key from a keyring. * * The keyring must grant the caller Write permission for this to work; the key * itself need not grant the caller anything. If the last link to a key is * removed then that key will be scheduled for destruction. * * Keys or keyrings with KEY_FLAG_KEEP set should not be unlinked. * * If successful, 0 will be returned. */ long keyctl_keyring_unlink(key_serial_t id, key_serial_t ringid) { key_ref_t keyring_ref, key_ref; struct key *keyring, *key; long ret; keyring_ref = lookup_user_key(ringid, 0, KEY_NEED_WRITE); if (IS_ERR(keyring_ref)) { ret = PTR_ERR(keyring_ref); goto error; } key_ref = lookup_user_key(id, KEY_LOOKUP_FOR_UNLINK, 0); if (IS_ERR(key_ref)) { ret = PTR_ERR(key_ref); goto error2; } keyring = key_ref_to_ptr(keyring_ref); key = key_ref_to_ptr(key_ref); if (test_bit(KEY_FLAG_KEEP, &keyring->flags) && test_bit(KEY_FLAG_KEEP, &key->flags)) ret = -EPERM; else ret = key_unlink(keyring, key); key_ref_put(key_ref); error2: key_ref_put(keyring_ref); error: return ret; } /* * Return a description of a key to userspace. * * The key must grant the caller View permission for this to work. * * If there's a buffer, we place up to buflen bytes of data into it formatted * in the following way: * * type;uid;gid;perm;description<NUL> * * If successful, we return the amount of description available, irrespective * of how much we may have copied into the buffer. */ long keyctl_describe_key(key_serial_t keyid, char __user *buffer, size_t buflen) { struct key *key, *instkey; key_ref_t key_ref; char *infobuf; long ret; int desclen, infolen; key_ref = lookup_user_key(keyid, KEY_LOOKUP_PARTIAL, KEY_NEED_VIEW); if (IS_ERR(key_ref)) { /* viewing a key under construction is permitted if we have the * authorisation token handy */ if (PTR_ERR(key_ref) == -EACCES) { instkey = key_get_instantiation_authkey(keyid); if (!IS_ERR(instkey)) { key_put(instkey); key_ref = lookup_user_key(keyid, KEY_LOOKUP_PARTIAL, 0); if (!IS_ERR(key_ref)) goto okay; } } ret = PTR_ERR(key_ref); goto error; } okay: key = key_ref_to_ptr(key_ref); desclen = strlen(key->description); /* calculate how much information we're going to return */ ret = -ENOMEM; infobuf = kasprintf(GFP_KERNEL, "%s;%d;%d;%08x;", key->type->name, from_kuid_munged(current_user_ns(), key->uid), from_kgid_munged(current_user_ns(), key->gid), key->perm); if (!infobuf) goto error2; infolen = strlen(infobuf); ret = infolen + desclen + 1; /* consider returning the data */ if (buffer && buflen >= ret) { if (copy_to_user(buffer, infobuf, infolen) != 0 || copy_to_user(buffer + infolen, key->description, desclen + 1) != 0) ret = -EFAULT; } kfree(infobuf); error2: key_ref_put(key_ref); error: return ret; } /* * Search the specified keyring and any keyrings it links to for a matching * key. Only keyrings that grant the caller Search permission will be searched * (this includes the starting keyring). Only keys with Search permission can * be found. * * If successful, the found key will be linked to the destination keyring if * supplied and the key has Link permission, and the found key ID will be * returned. */ long keyctl_keyring_search(key_serial_t ringid, const char __user *_type, const char __user *_description, key_serial_t destringid) { struct key_type *ktype; key_ref_t keyring_ref, key_ref, dest_ref; char type[32], *description; long ret; /* pull the type and description into kernel space */ ret = key_get_type_from_user(type, _type, sizeof(type)); if (ret < 0) goto error; description = strndup_user(_description, KEY_MAX_DESC_SIZE); if (IS_ERR(description)) { ret = PTR_ERR(description); goto error; } /* get the keyring at which to begin the search */ keyring_ref = lookup_user_key(ringid, 0, KEY_NEED_SEARCH); if (IS_ERR(keyring_ref)) { ret = PTR_ERR(keyring_ref); goto error2; } /* get the destination keyring if specified */ dest_ref = NULL; if (destringid) { dest_ref = lookup_user_key(destringid, KEY_LOOKUP_CREATE, KEY_NEED_WRITE); if (IS_ERR(dest_ref)) { ret = PTR_ERR(dest_ref); goto error3; } } /* find the key type */ ktype = key_type_lookup(type); if (IS_ERR(ktype)) { ret = PTR_ERR(ktype); goto error4; } /* do the search */ key_ref = keyring_search(keyring_ref, ktype, description); if (IS_ERR(key_ref)) { ret = PTR_ERR(key_ref); /* treat lack or presence of a negative key the same */ if (ret == -EAGAIN) ret = -ENOKEY; goto error5; } /* link the resulting key to the destination keyring if we can */ if (dest_ref) { ret = key_permission(key_ref, KEY_NEED_LINK); if (ret < 0) goto error6; ret = key_link(key_ref_to_ptr(dest_ref), key_ref_to_ptr(key_ref)); if (ret < 0) goto error6; } ret = key_ref_to_ptr(key_ref)->serial; error6: key_ref_put(key_ref); error5: key_type_put(ktype); error4: key_ref_put(dest_ref); error3: key_ref_put(keyring_ref); error2: kfree(description); error: return ret; } /* * Read a key's payload. * * The key must either grant the caller Read permission, or it must grant the * caller Search permission when searched for from the process keyrings. * * If successful, we place up to buflen bytes of data into the buffer, if one * is provided, and return the amount of data that is available in the key, * irrespective of how much we copied into the buffer. */ long keyctl_read_key(key_serial_t keyid, char __user *buffer, size_t buflen) { struct key *key; key_ref_t key_ref; long ret; /* find the key first */ key_ref = lookup_user_key(keyid, 0, 0); if (IS_ERR(key_ref)) { ret = -ENOKEY; goto error; } key = key_ref_to_ptr(key_ref); if (test_bit(KEY_FLAG_NEGATIVE, &key->flags)) { ret = -ENOKEY; goto error2; } /* see if we can read it directly */ ret = key_permission(key_ref, KEY_NEED_READ); if (ret == 0) goto can_read_key; if (ret != -EACCES) goto error2; /* we can't; see if it's searchable from this process's keyrings * - we automatically take account of the fact that it may be * dangling off an instantiation key */ if (!is_key_possessed(key_ref)) { ret = -EACCES; goto error2; } /* the key is probably readable - now try to read it */ can_read_key: ret = -EOPNOTSUPP; if (key->type->read) { /* Read the data with the semaphore held (since we might sleep) * to protect against the key being updated or revoked. */ down_read(&key->sem); ret = key_validate(key); if (ret == 0) ret = key->type->read(key, buffer, buflen); up_read(&key->sem); } error2: key_put(key); error: return ret; } /* * Change the ownership of a key * * The key must grant the caller Setattr permission for this to work, though * the key need not be fully instantiated yet. For the UID to be changed, or * for the GID to be changed to a group the caller is not a member of, the * caller must have sysadmin capability. If either uid or gid is -1 then that * attribute is not changed. * * If the UID is to be changed, the new user must have sufficient quota to * accept the key. The quota deduction will be removed from the old user to * the new user should the attribute be changed. * * If successful, 0 will be returned. */ long keyctl_chown_key(key_serial_t id, uid_t user, gid_t group) { struct key_user *newowner, *zapowner = NULL; struct key *key; key_ref_t key_ref; long ret; kuid_t uid; kgid_t gid; uid = make_kuid(current_user_ns(), user); gid = make_kgid(current_user_ns(), group); ret = -EINVAL; if ((user != (uid_t) -1) && !uid_valid(uid)) goto error; if ((group != (gid_t) -1) && !gid_valid(gid)) goto error; ret = 0; if (user == (uid_t) -1 && group == (gid_t) -1) goto error; key_ref = lookup_user_key(id, KEY_LOOKUP_CREATE | KEY_LOOKUP_PARTIAL, KEY_NEED_SETATTR); if (IS_ERR(key_ref)) { ret = PTR_ERR(key_ref); goto error; } key = key_ref_to_ptr(key_ref); /* make the changes with the locks held to prevent chown/chown races */ ret = -EACCES; down_write(&key->sem); if (!capable(CAP_SYS_ADMIN)) { /* only the sysadmin can chown a key to some other UID */ if (user != (uid_t) -1 && !uid_eq(key->uid, uid)) goto error_put; /* only the sysadmin can set the key's GID to a group other * than one of those that the current process subscribes to */ if (group != (gid_t) -1 && !gid_eq(gid, key->gid) && !in_group_p(gid)) goto error_put; } /* change the UID */ if (user != (uid_t) -1 && !uid_eq(uid, key->uid)) { ret = -ENOMEM; newowner = key_user_lookup(uid); if (!newowner) goto error_put; /* transfer the quota burden to the new user */ if (test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) { unsigned maxkeys = uid_eq(uid, GLOBAL_ROOT_UID) ? key_quota_root_maxkeys : key_quota_maxkeys; unsigned maxbytes = uid_eq(uid, GLOBAL_ROOT_UID) ? key_quota_root_maxbytes : key_quota_maxbytes; spin_lock(&newowner->lock); if (newowner->qnkeys + 1 >= maxkeys || newowner->qnbytes + key->quotalen >= maxbytes || newowner->qnbytes + key->quotalen < newowner->qnbytes) goto quota_overrun; newowner->qnkeys++; newowner->qnbytes += key->quotalen; spin_unlock(&newowner->lock); spin_lock(&key->user->lock); key->user->qnkeys--; key->user->qnbytes -= key->quotalen; spin_unlock(&key->user->lock); } atomic_dec(&key->user->nkeys); atomic_inc(&newowner->nkeys); if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) { atomic_dec(&key->user->nikeys); atomic_inc(&newowner->nikeys); } zapowner = key->user; key->user = newowner; key->uid = uid; } /* change the GID */ if (group != (gid_t) -1) key->gid = gid; ret = 0; error_put: up_write(&key->sem); key_put(key); if (zapowner) key_user_put(zapowner); error: return ret; quota_overrun: spin_unlock(&newowner->lock); zapowner = newowner; ret = -EDQUOT; goto error_put; } /* * Change the permission mask on a key. * * The key must grant the caller Setattr permission for this to work, though * the key need not be fully instantiated yet. If the caller does not have * sysadmin capability, it may only change the permission on keys that it owns. */ long keyctl_setperm_key(key_serial_t id, key_perm_t perm) { struct key *key; key_ref_t key_ref; long ret; ret = -EINVAL; if (perm & ~(KEY_POS_ALL | KEY_USR_ALL | KEY_GRP_ALL | KEY_OTH_ALL)) goto error; key_ref = lookup_user_key(id, KEY_LOOKUP_CREATE | KEY_LOOKUP_PARTIAL, KEY_NEED_SETATTR); if (IS_ERR(key_ref)) { ret = PTR_ERR(key_ref); goto error; } key = key_ref_to_ptr(key_ref); /* make the changes with the locks held to prevent chown/chmod races */ ret = -EACCES; down_write(&key->sem); /* if we're not the sysadmin, we can only change a key that we own */ if (capable(CAP_SYS_ADMIN) || uid_eq(key->uid, current_fsuid())) { key->perm = perm; ret = 0; } up_write(&key->sem); key_put(key); error: return ret; } /* * Get the destination keyring for instantiation and check that the caller has * Write permission on it. */ static long get_instantiation_keyring(key_serial_t ringid, struct request_key_auth *rka, struct key **_dest_keyring) { key_ref_t dkref; *_dest_keyring = NULL; /* just return a NULL pointer if we weren't asked to make a link */ if (ringid == 0) return 0; /* if a specific keyring is nominated by ID, then use that */ if (ringid > 0) { dkref = lookup_user_key(ringid, KEY_LOOKUP_CREATE, KEY_NEED_WRITE); if (IS_ERR(dkref)) return PTR_ERR(dkref); *_dest_keyring = key_ref_to_ptr(dkref); return 0; } if (ringid == KEY_SPEC_REQKEY_AUTH_KEY) return -EINVAL; /* otherwise specify the destination keyring recorded in the * authorisation key (any KEY_SPEC_*_KEYRING) */ if (ringid >= KEY_SPEC_REQUESTOR_KEYRING) { *_dest_keyring = key_get(rka->dest_keyring); return 0; } return -ENOKEY; } /* * Change the request_key authorisation key on the current process. */ static int keyctl_change_reqkey_auth(struct key *key) { struct cred *new; new = prepare_creds(); if (!new) return -ENOMEM; key_put(new->request_key_auth); new->request_key_auth = key_get(key); return commit_creds(new); } /* * Instantiate a key with the specified payload and link the key into the * destination keyring if one is given. * * The caller must have the appropriate instantiation permit set for this to * work (see keyctl_assume_authority). No other permissions are required. * * If successful, 0 will be returned. */ long keyctl_instantiate_key_common(key_serial_t id, struct iov_iter *from, key_serial_t ringid) { const struct cred *cred = current_cred(); struct request_key_auth *rka; struct key *instkey, *dest_keyring; size_t plen = from ? iov_iter_count(from) : 0; void *payload; long ret; kenter("%d,,%zu,%d", id, plen, ringid); if (!plen) from = NULL; ret = -EINVAL; if (plen > 1024 * 1024 - 1) goto error; /* the appropriate instantiation authorisation key must have been * assumed before calling this */ ret = -EPERM; instkey = cred->request_key_auth; if (!instkey) goto error; rka = instkey->payload.data[0]; if (rka->target_key->serial != id) goto error; /* pull the payload in if one was supplied */ payload = NULL; if (from) { ret = -ENOMEM; payload = kvmalloc(plen, GFP_KERNEL); if (!payload) goto error; ret = -EFAULT; if (!copy_from_iter_full(payload, plen, from)) goto error2; } /* find the destination keyring amongst those belonging to the * requesting task */ ret = get_instantiation_keyring(ringid, rka, &dest_keyring); if (ret < 0) goto error2; /* instantiate the key and link it into a keyring */ ret = key_instantiate_and_link(rka->target_key, payload, plen, dest_keyring, instkey); key_put(dest_keyring); /* discard the assumed authority if it's just been disabled by * instantiation of the key */ if (ret == 0) keyctl_change_reqkey_auth(NULL); error2: if (payload) { memzero_explicit(payload, plen); kvfree(payload); } error: return ret; } /* * Instantiate a key with the specified payload and link the key into the * destination keyring if one is given. * * The caller must have the appropriate instantiation permit set for this to * work (see keyctl_assume_authority). No other permissions are required. * * If successful, 0 will be returned. */ long keyctl_instantiate_key(key_serial_t id, const void __user *_payload, size_t plen, key_serial_t ringid) { if (_payload && plen) { struct iovec iov; struct iov_iter from; int ret; ret = import_single_range(WRITE, (void __user *)_payload, plen, &iov, &from); if (unlikely(ret)) return ret; return keyctl_instantiate_key_common(id, &from, ringid); } return keyctl_instantiate_key_common(id, NULL, ringid); } /* * Instantiate a key with the specified multipart payload and link the key into * the destination keyring if one is given. * * The caller must have the appropriate instantiation permit set for this to * work (see keyctl_assume_authority). No other permissions are required. * * If successful, 0 will be returned. */ long keyctl_instantiate_key_iov(key_serial_t id, const struct iovec __user *_payload_iov, unsigned ioc, key_serial_t ringid) { struct iovec iovstack[UIO_FASTIOV], *iov = iovstack; struct iov_iter from; long ret; if (!_payload_iov) ioc = 0; ret = import_iovec(WRITE, _payload_iov, ioc, ARRAY_SIZE(iovstack), &iov, &from); if (ret < 0) return ret; ret = keyctl_instantiate_key_common(id, &from, ringid); kfree(iov); return ret; } /* * Negatively instantiate the key with the given timeout (in seconds) and link * the key into the destination keyring if one is given. * * The caller must have the appropriate instantiation permit set for this to * work (see keyctl_assume_authority). No other permissions are required. * * The key and any links to the key will be automatically garbage collected * after the timeout expires. * * Negative keys are used to rate limit repeated request_key() calls by causing * them to return -ENOKEY until the negative key expires. * * If successful, 0 will be returned. */ long keyctl_negate_key(key_serial_t id, unsigned timeout, key_serial_t ringid) { return keyctl_reject_key(id, timeout, ENOKEY, ringid); } /* * Negatively instantiate the key with the given timeout (in seconds) and error * code and link the key into the destination keyring if one is given. * * The caller must have the appropriate instantiation permit set for this to * work (see keyctl_assume_authority). No other permissions are required. * * The key and any links to the key will be automatically garbage collected * after the timeout expires. * * Negative keys are used to rate limit repeated request_key() calls by causing * them to return the specified error code until the negative key expires. * * If successful, 0 will be returned. */ long keyctl_reject_key(key_serial_t id, unsigned timeout, unsigned error, key_serial_t ringid) { const struct cred *cred = current_cred(); struct request_key_auth *rka; struct key *instkey, *dest_keyring; long ret; kenter("%d,%u,%u,%d", id, timeout, error, ringid); /* must be a valid error code and mustn't be a kernel special */ if (error <= 0 || error >= MAX_ERRNO || error == ERESTARTSYS || error == ERESTARTNOINTR || error == ERESTARTNOHAND || error == ERESTART_RESTARTBLOCK) return -EINVAL; /* the appropriate instantiation authorisation key must have been * assumed before calling this */ ret = -EPERM; instkey = cred->request_key_auth; if (!instkey) goto error; rka = instkey->payload.data[0]; if (rka->target_key->serial != id) goto error; /* find the destination keyring if present (which must also be * writable) */ ret = get_instantiation_keyring(ringid, rka, &dest_keyring); if (ret < 0) goto error; /* instantiate the key and link it into a keyring */ ret = key_reject_and_link(rka->target_key, timeout, error, dest_keyring, instkey); key_put(dest_keyring); /* discard the assumed authority if it's just been disabled by * instantiation of the key */ if (ret == 0) keyctl_change_reqkey_auth(NULL); error: return ret; } /* * Read or set the default keyring in which request_key() will cache keys and * return the old setting. * * If a thread or process keyring is specified then it will be created if it * doesn't yet exist. The old setting will be returned if successful. */ long keyctl_set_reqkey_keyring(int reqkey_defl) { struct cred *new; int ret, old_setting; old_setting = current_cred_xxx(jit_keyring); if (reqkey_defl == KEY_REQKEY_DEFL_NO_CHANGE) return old_setting; new = prepare_creds(); if (!new) return -ENOMEM; switch (reqkey_defl) { case KEY_REQKEY_DEFL_THREAD_KEYRING: ret = install_thread_keyring_to_cred(new); if (ret < 0) goto error; goto set; case KEY_REQKEY_DEFL_PROCESS_KEYRING: ret = install_process_keyring_to_cred(new); if (ret < 0) goto error; goto set; case KEY_REQKEY_DEFL_DEFAULT: case KEY_REQKEY_DEFL_SESSION_KEYRING: case KEY_REQKEY_DEFL_USER_KEYRING: case KEY_REQKEY_DEFL_USER_SESSION_KEYRING: case KEY_REQKEY_DEFL_REQUESTOR_KEYRING: goto set; case KEY_REQKEY_DEFL_NO_CHANGE: case KEY_REQKEY_DEFL_GROUP_KEYRING: default: ret = -EINVAL; goto error; } set: new->jit_keyring = reqkey_defl; commit_creds(new); return old_setting; error: abort_creds(new); return ret; } /* * Set or clear the timeout on a key. * * Either the key must grant the caller Setattr permission or else the caller * must hold an instantiation authorisation token for the key. * * The timeout is either 0 to clear the timeout, or a number of seconds from * the current time. The key and any links to the key will be automatically * garbage collected after the timeout expires. * * Keys with KEY_FLAG_KEEP set should not be timed out. * * If successful, 0 is returned. */ long keyctl_set_timeout(key_serial_t id, unsigned timeout) { struct key *key, *instkey; key_ref_t key_ref; long ret; key_ref = lookup_user_key(id, KEY_LOOKUP_CREATE | KEY_LOOKUP_PARTIAL, KEY_NEED_SETATTR); if (IS_ERR(key_ref)) { /* setting the timeout on a key under construction is permitted * if we have the authorisation token handy */ if (PTR_ERR(key_ref) == -EACCES) { instkey = key_get_instantiation_authkey(id); if (!IS_ERR(instkey)) { key_put(instkey); key_ref = lookup_user_key(id, KEY_LOOKUP_PARTIAL, 0); if (!IS_ERR(key_ref)) goto okay; } } ret = PTR_ERR(key_ref); goto error; } okay: key = key_ref_to_ptr(key_ref); ret = 0; if (test_bit(KEY_FLAG_KEEP, &key->flags)) ret = -EPERM; else key_set_timeout(key, timeout); key_put(key); error: return ret; } /* * Assume (or clear) the authority to instantiate the specified key. * * This sets the authoritative token currently in force for key instantiation. * This must be done for a key to be instantiated. It has the effect of making * available all the keys from the caller of the request_key() that created a * key to request_key() calls made by the caller of this function. * * The caller must have the instantiation key in their process keyrings with a * Search permission grant available to the caller. * * If the ID given is 0, then the setting will be cleared and 0 returned. * * If the ID given has a matching an authorisation key, then that key will be * set and its ID will be returned. The authorisation key can be read to get * the callout information passed to request_key(). */ long keyctl_assume_authority(key_serial_t id) { struct key *authkey; long ret; /* special key IDs aren't permitted */ ret = -EINVAL; if (id < 0) goto error; /* we divest ourselves of authority if given an ID of 0 */ if (id == 0) { ret = keyctl_change_reqkey_auth(NULL); goto error; } /* attempt to assume the authority temporarily granted to us whilst we * instantiate the specified key * - the authorisation key must be in the current task's keyrings * somewhere */ authkey = key_get_instantiation_authkey(id); if (IS_ERR(authkey)) { ret = PTR_ERR(authkey); goto error; } ret = keyctl_change_reqkey_auth(authkey); if (ret == 0) ret = authkey->serial; key_put(authkey); error: return ret; } /* * Get a key's the LSM security label. * * The key must grant the caller View permission for this to work. * * If there's a buffer, then up to buflen bytes of data will be placed into it. * * If successful, the amount of information available will be returned, * irrespective of how much was copied (including the terminal NUL). */ long keyctl_get_security(key_serial_t keyid, char __user *buffer, size_t buflen) { struct key *key, *instkey; key_ref_t key_ref; char *context; long ret; key_ref = lookup_user_key(keyid, KEY_LOOKUP_PARTIAL, KEY_NEED_VIEW); if (IS_ERR(key_ref)) { if (PTR_ERR(key_ref) != -EACCES) return PTR_ERR(key_ref); /* viewing a key under construction is also permitted if we * have the authorisation token handy */ instkey = key_get_instantiation_authkey(keyid); if (IS_ERR(instkey)) return PTR_ERR(instkey); key_put(instkey); key_ref = lookup_user_key(keyid, KEY_LOOKUP_PARTIAL, 0); if (IS_ERR(key_ref)) return PTR_ERR(key_ref); } key = key_ref_to_ptr(key_ref); ret = security_key_getsecurity(key, &context); if (ret == 0) { /* if no information was returned, give userspace an empty * string */ ret = 1; if (buffer && buflen > 0 && copy_to_user(buffer, "", 1) != 0) ret = -EFAULT; } else if (ret > 0) { /* return as much data as there's room for */ if (buffer && buflen > 0) { if (buflen > ret) buflen = ret; if (copy_to_user(buffer, context, buflen) != 0) ret = -EFAULT; } kfree(context); } key_ref_put(key_ref); return ret; } /* * Attempt to install the calling process's session keyring on the process's * parent process. * * The keyring must exist and must grant the caller LINK permission, and the * parent process must be single-threaded and must have the same effective * ownership as this process and mustn't be SUID/SGID. * * The keyring will be emplaced on the parent when it next resumes userspace. * * If successful, 0 will be returned. */ long keyctl_session_to_parent(void) { struct task_struct *me, *parent; const struct cred *mycred, *pcred; struct callback_head *newwork, *oldwork; key_ref_t keyring_r; struct cred *cred; int ret; keyring_r = lookup_user_key(KEY_SPEC_SESSION_KEYRING, 0, KEY_NEED_LINK); if (IS_ERR(keyring_r)) return PTR_ERR(keyring_r); ret = -ENOMEM; /* our parent is going to need a new cred struct, a new tgcred struct * and new security data, so we allocate them here to prevent ENOMEM in * our parent */ cred = cred_alloc_blank(); if (!cred) goto error_keyring; newwork = &cred->rcu; cred->session_keyring = key_ref_to_ptr(keyring_r); keyring_r = NULL; init_task_work(newwork, key_change_session_keyring); me = current; rcu_read_lock(); write_lock_irq(&tasklist_lock); ret = -EPERM; oldwork = NULL; parent = me->real_parent; /* the parent mustn't be init and mustn't be a kernel thread */ if (parent->pid <= 1 || !parent->mm) goto unlock; /* the parent must be single threaded */ if (!thread_group_empty(parent)) goto unlock; /* the parent and the child must have different session keyrings or * there's no point */ mycred = current_cred(); pcred = __task_cred(parent); if (mycred == pcred || mycred->session_keyring == pcred->session_keyring) { ret = 0; goto unlock; } /* the parent must have the same effective ownership and mustn't be * SUID/SGID */ if (!uid_eq(pcred->uid, mycred->euid) || !uid_eq(pcred->euid, mycred->euid) || !uid_eq(pcred->suid, mycred->euid) || !gid_eq(pcred->gid, mycred->egid) || !gid_eq(pcred->egid, mycred->egid) || !gid_eq(pcred->sgid, mycred->egid)) goto unlock; /* the keyrings must have the same UID */ if ((pcred->session_keyring && !uid_eq(pcred->session_keyring->uid, mycred->euid)) || !uid_eq(mycred->session_keyring->uid, mycred->euid)) goto unlock; /* cancel an already pending keyring replacement */ oldwork = task_work_cancel(parent, key_change_session_keyring); /* the replacement session keyring is applied just prior to userspace * restarting */ ret = task_work_add(parent, newwork, true); if (!ret) newwork = NULL; unlock: write_unlock_irq(&tasklist_lock); rcu_read_unlock(); if (oldwork) put_cred(container_of(oldwork, struct cred, rcu)); if (newwork) put_cred(cred); return ret; error_keyring: key_ref_put(keyring_r); return ret; } /* * Apply a restriction to a given keyring. * * The caller must have Setattr permission to change keyring restrictions. * * The requested type name may be a NULL pointer to reject all attempts * to link to the keyring. If _type is non-NULL, _restriction can be * NULL or a pointer to a string describing the restriction. If _type is * NULL, _restriction must also be NULL. * * Returns 0 if successful. */ long keyctl_restrict_keyring(key_serial_t id, const char __user *_type, const char __user *_restriction) { key_ref_t key_ref; bool link_reject = !_type; char type[32]; char *restriction = NULL; long ret; key_ref = lookup_user_key(id, 0, KEY_NEED_SETATTR); if (IS_ERR(key_ref)) return PTR_ERR(key_ref); if (_type) { ret = key_get_type_from_user(type, _type, sizeof(type)); if (ret < 0) goto error; } if (_restriction) { if (!_type) { ret = -EINVAL; goto error; } restriction = strndup_user(_restriction, PAGE_SIZE); if (IS_ERR(restriction)) { ret = PTR_ERR(restriction); goto error; } } ret = keyring_restrict(key_ref, link_reject ? NULL : type, restriction); kfree(restriction); error: key_ref_put(key_ref); return ret; } /* * The key control system call */ SYSCALL_DEFINE5(keyctl, int, option, unsigned long, arg2, unsigned long, arg3, unsigned long, arg4, unsigned long, arg5) { switch (option) { case KEYCTL_GET_KEYRING_ID: return keyctl_get_keyring_ID((key_serial_t) arg2, (int) arg3); case KEYCTL_JOIN_SESSION_KEYRING: return keyctl_join_session_keyring((const char __user *) arg2); case KEYCTL_UPDATE: return keyctl_update_key((key_serial_t) arg2, (const void __user *) arg3, (size_t) arg4); case KEYCTL_REVOKE: return keyctl_revoke_key((key_serial_t) arg2); case KEYCTL_DESCRIBE: return keyctl_describe_key((key_serial_t) arg2, (char __user *) arg3, (unsigned) arg4); case KEYCTL_CLEAR: return keyctl_keyring_clear((key_serial_t) arg2); case KEYCTL_LINK: return keyctl_keyring_link((key_serial_t) arg2, (key_serial_t) arg3); case KEYCTL_UNLINK: return keyctl_keyring_unlink((key_serial_t) arg2, (key_serial_t) arg3); case KEYCTL_SEARCH: return keyctl_keyring_search((key_serial_t) arg2, (const char __user *) arg3, (const char __user *) arg4, (key_serial_t) arg5); case KEYCTL_READ: return keyctl_read_key((key_serial_t) arg2, (char __user *) arg3, (size_t) arg4); case KEYCTL_CHOWN: return keyctl_chown_key((key_serial_t) arg2, (uid_t) arg3, (gid_t) arg4); case KEYCTL_SETPERM: return keyctl_setperm_key((key_serial_t) arg2, (key_perm_t) arg3); case KEYCTL_INSTANTIATE: return keyctl_instantiate_key((key_serial_t) arg2, (const void __user *) arg3, (size_t) arg4, (key_serial_t) arg5); case KEYCTL_NEGATE: return keyctl_negate_key((key_serial_t) arg2, (unsigned) arg3, (key_serial_t) arg4); case KEYCTL_SET_REQKEY_KEYRING: return keyctl_set_reqkey_keyring(arg2); case KEYCTL_SET_TIMEOUT: return keyctl_set_timeout((key_serial_t) arg2, (unsigned) arg3); case KEYCTL_ASSUME_AUTHORITY: return keyctl_assume_authority((key_serial_t) arg2); case KEYCTL_GET_SECURITY: return keyctl_get_security((key_serial_t) arg2, (char __user *) arg3, (size_t) arg4); case KEYCTL_SESSION_TO_PARENT: return keyctl_session_to_parent(); case KEYCTL_REJECT: return keyctl_reject_key((key_serial_t) arg2, (unsigned) arg3, (unsigned) arg4, (key_serial_t) arg5); case KEYCTL_INSTANTIATE_IOV: return keyctl_instantiate_key_iov( (key_serial_t) arg2, (const struct iovec __user *) arg3, (unsigned) arg4, (key_serial_t) arg5); case KEYCTL_INVALIDATE: return keyctl_invalidate_key((key_serial_t) arg2); case KEYCTL_GET_PERSISTENT: return keyctl_get_persistent((uid_t)arg2, (key_serial_t)arg3); case KEYCTL_DH_COMPUTE: return keyctl_dh_compute((struct keyctl_dh_params __user *) arg2, (char __user *) arg3, (size_t) arg4, (struct keyctl_kdf_params __user *) arg5); case KEYCTL_RESTRICT_KEYRING: return keyctl_restrict_keyring((key_serial_t) arg2, (const char __user *) arg3, (const char __user *) arg4); default: return -EOPNOTSUPP; } }
/* Userspace key control operations * * Copyright (C) 2004-5 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/module.h> #include <linux/init.h> #include <linux/sched.h> #include <linux/sched/task.h> #include <linux/slab.h> #include <linux/syscalls.h> #include <linux/key.h> #include <linux/keyctl.h> #include <linux/fs.h> #include <linux/capability.h> #include <linux/cred.h> #include <linux/string.h> #include <linux/err.h> #include <linux/vmalloc.h> #include <linux/security.h> #include <linux/uio.h> #include <linux/uaccess.h> #include "internal.h" #define KEY_MAX_DESC_SIZE 4096 static int key_get_type_from_user(char *type, const char __user *_type, unsigned len) { int ret; ret = strncpy_from_user(type, _type, len); if (ret < 0) return ret; if (ret == 0 || ret >= len) return -EINVAL; if (type[0] == '.') return -EPERM; type[len - 1] = '\0'; return 0; } /* * Extract the description of a new key from userspace and either add it as a * new key to the specified keyring or update a matching key in that keyring. * * If the description is NULL or an empty string, the key type is asked to * generate one from the payload. * * The keyring must be writable so that we can attach the key to it. * * If successful, the new key's serial number is returned, otherwise an error * code is returned. */ SYSCALL_DEFINE5(add_key, const char __user *, _type, const char __user *, _description, const void __user *, _payload, size_t, plen, key_serial_t, ringid) { key_ref_t keyring_ref, key_ref; char type[32], *description; void *payload; long ret; ret = -EINVAL; if (plen > 1024 * 1024 - 1) goto error; /* draw all the data into kernel space */ ret = key_get_type_from_user(type, _type, sizeof(type)); if (ret < 0) goto error; description = NULL; if (_description) { description = strndup_user(_description, KEY_MAX_DESC_SIZE); if (IS_ERR(description)) { ret = PTR_ERR(description); goto error; } if (!*description) { kfree(description); description = NULL; } else if ((description[0] == '.') && (strncmp(type, "keyring", 7) == 0)) { ret = -EPERM; goto error2; } } /* pull the payload in if one was supplied */ payload = NULL; if (plen) { ret = -ENOMEM; payload = kvmalloc(plen, GFP_KERNEL); if (!payload) goto error2; ret = -EFAULT; if (copy_from_user(payload, _payload, plen) != 0) goto error3; } /* find the target keyring (which must be writable) */ keyring_ref = lookup_user_key(ringid, KEY_LOOKUP_CREATE, KEY_NEED_WRITE); if (IS_ERR(keyring_ref)) { ret = PTR_ERR(keyring_ref); goto error3; } /* create or update the requested key and add it to the target * keyring */ key_ref = key_create_or_update(keyring_ref, type, description, payload, plen, KEY_PERM_UNDEF, KEY_ALLOC_IN_QUOTA); if (!IS_ERR(key_ref)) { ret = key_ref_to_ptr(key_ref)->serial; key_ref_put(key_ref); } else { ret = PTR_ERR(key_ref); } key_ref_put(keyring_ref); error3: if (payload) { memzero_explicit(payload, plen); kvfree(payload); } error2: kfree(description); error: return ret; } /* * Search the process keyrings and keyring trees linked from those for a * matching key. Keyrings must have appropriate Search permission to be * searched. * * If a key is found, it will be attached to the destination keyring if there's * one specified and the serial number of the key will be returned. * * If no key is found, /sbin/request-key will be invoked if _callout_info is * non-NULL in an attempt to create a key. The _callout_info string will be * passed to /sbin/request-key to aid with completing the request. If the * _callout_info string is "" then it will be changed to "-". */ SYSCALL_DEFINE4(request_key, const char __user *, _type, const char __user *, _description, const char __user *, _callout_info, key_serial_t, destringid) { struct key_type *ktype; struct key *key; key_ref_t dest_ref; size_t callout_len; char type[32], *description, *callout_info; long ret; /* pull the type into kernel space */ ret = key_get_type_from_user(type, _type, sizeof(type)); if (ret < 0) goto error; /* pull the description into kernel space */ description = strndup_user(_description, KEY_MAX_DESC_SIZE); if (IS_ERR(description)) { ret = PTR_ERR(description); goto error; } /* pull the callout info into kernel space */ callout_info = NULL; callout_len = 0; if (_callout_info) { callout_info = strndup_user(_callout_info, PAGE_SIZE); if (IS_ERR(callout_info)) { ret = PTR_ERR(callout_info); goto error2; } callout_len = strlen(callout_info); } /* get the destination keyring if specified */ dest_ref = NULL; if (destringid) { dest_ref = lookup_user_key(destringid, KEY_LOOKUP_CREATE, KEY_NEED_WRITE); if (IS_ERR(dest_ref)) { ret = PTR_ERR(dest_ref); goto error3; } } /* find the key type */ ktype = key_type_lookup(type); if (IS_ERR(ktype)) { ret = PTR_ERR(ktype); goto error4; } /* do the search */ key = request_key_and_link(ktype, description, callout_info, callout_len, NULL, key_ref_to_ptr(dest_ref), KEY_ALLOC_IN_QUOTA); if (IS_ERR(key)) { ret = PTR_ERR(key); goto error5; } /* wait for the key to finish being constructed */ ret = wait_for_key_construction(key, 1); if (ret < 0) goto error6; ret = key->serial; error6: key_put(key); error5: key_type_put(ktype); error4: key_ref_put(dest_ref); error3: kfree(callout_info); error2: kfree(description); error: return ret; } /* * Get the ID of the specified process keyring. * * The requested keyring must have search permission to be found. * * If successful, the ID of the requested keyring will be returned. */ long keyctl_get_keyring_ID(key_serial_t id, int create) { key_ref_t key_ref; unsigned long lflags; long ret; lflags = create ? KEY_LOOKUP_CREATE : 0; key_ref = lookup_user_key(id, lflags, KEY_NEED_SEARCH); if (IS_ERR(key_ref)) { ret = PTR_ERR(key_ref); goto error; } ret = key_ref_to_ptr(key_ref)->serial; key_ref_put(key_ref); error: return ret; } /* * Join a (named) session keyring. * * Create and join an anonymous session keyring or join a named session * keyring, creating it if necessary. A named session keyring must have Search * permission for it to be joined. Session keyrings without this permit will * be skipped over. It is not permitted for userspace to create or join * keyrings whose name begin with a dot. * * If successful, the ID of the joined session keyring will be returned. */ long keyctl_join_session_keyring(const char __user *_name) { char *name; long ret; /* fetch the name from userspace */ name = NULL; if (_name) { name = strndup_user(_name, KEY_MAX_DESC_SIZE); if (IS_ERR(name)) { ret = PTR_ERR(name); goto error; } ret = -EPERM; if (name[0] == '.') goto error_name; } /* join the session */ ret = join_session_keyring(name); error_name: kfree(name); error: return ret; } /* * Update a key's data payload from the given data. * * The key must grant the caller Write permission and the key type must support * updating for this to work. A negative key can be positively instantiated * with this call. * * If successful, 0 will be returned. If the key type does not support * updating, then -EOPNOTSUPP will be returned. */ long keyctl_update_key(key_serial_t id, const void __user *_payload, size_t plen) { key_ref_t key_ref; void *payload; long ret; ret = -EINVAL; if (plen > PAGE_SIZE) goto error; /* pull the payload in if one was supplied */ payload = NULL; if (plen) { ret = -ENOMEM; payload = kmalloc(plen, GFP_KERNEL); if (!payload) goto error; ret = -EFAULT; if (copy_from_user(payload, _payload, plen) != 0) goto error2; } /* find the target key (which must be writable) */ key_ref = lookup_user_key(id, 0, KEY_NEED_WRITE); if (IS_ERR(key_ref)) { ret = PTR_ERR(key_ref); goto error2; } /* update the key */ ret = key_update(key_ref, payload, plen); key_ref_put(key_ref); error2: kzfree(payload); error: return ret; } /* * Revoke a key. * * The key must be grant the caller Write or Setattr permission for this to * work. The key type should give up its quota claim when revoked. The key * and any links to the key will be automatically garbage collected after a * certain amount of time (/proc/sys/kernel/keys/gc_delay). * * Keys with KEY_FLAG_KEEP set should not be revoked. * * If successful, 0 is returned. */ long keyctl_revoke_key(key_serial_t id) { key_ref_t key_ref; struct key *key; long ret; key_ref = lookup_user_key(id, 0, KEY_NEED_WRITE); if (IS_ERR(key_ref)) { ret = PTR_ERR(key_ref); if (ret != -EACCES) goto error; key_ref = lookup_user_key(id, 0, KEY_NEED_SETATTR); if (IS_ERR(key_ref)) { ret = PTR_ERR(key_ref); goto error; } } key = key_ref_to_ptr(key_ref); ret = 0; if (test_bit(KEY_FLAG_KEEP, &key->flags)) ret = -EPERM; else key_revoke(key); key_ref_put(key_ref); error: return ret; } /* * Invalidate a key. * * The key must be grant the caller Invalidate permission for this to work. * The key and any links to the key will be automatically garbage collected * immediately. * * Keys with KEY_FLAG_KEEP set should not be invalidated. * * If successful, 0 is returned. */ long keyctl_invalidate_key(key_serial_t id) { key_ref_t key_ref; struct key *key; long ret; kenter("%d", id); key_ref = lookup_user_key(id, 0, KEY_NEED_SEARCH); if (IS_ERR(key_ref)) { ret = PTR_ERR(key_ref); /* Root is permitted to invalidate certain special keys */ if (capable(CAP_SYS_ADMIN)) { key_ref = lookup_user_key(id, 0, 0); if (IS_ERR(key_ref)) goto error; if (test_bit(KEY_FLAG_ROOT_CAN_INVAL, &key_ref_to_ptr(key_ref)->flags)) goto invalidate; goto error_put; } goto error; } invalidate: key = key_ref_to_ptr(key_ref); ret = 0; if (test_bit(KEY_FLAG_KEEP, &key->flags)) ret = -EPERM; else key_invalidate(key); error_put: key_ref_put(key_ref); error: kleave(" = %ld", ret); return ret; } /* * Clear the specified keyring, creating an empty process keyring if one of the * special keyring IDs is used. * * The keyring must grant the caller Write permission and not have * KEY_FLAG_KEEP set for this to work. If successful, 0 will be returned. */ long keyctl_keyring_clear(key_serial_t ringid) { key_ref_t keyring_ref; struct key *keyring; long ret; keyring_ref = lookup_user_key(ringid, KEY_LOOKUP_CREATE, KEY_NEED_WRITE); if (IS_ERR(keyring_ref)) { ret = PTR_ERR(keyring_ref); /* Root is permitted to invalidate certain special keyrings */ if (capable(CAP_SYS_ADMIN)) { keyring_ref = lookup_user_key(ringid, 0, 0); if (IS_ERR(keyring_ref)) goto error; if (test_bit(KEY_FLAG_ROOT_CAN_CLEAR, &key_ref_to_ptr(keyring_ref)->flags)) goto clear; goto error_put; } goto error; } clear: keyring = key_ref_to_ptr(keyring_ref); if (test_bit(KEY_FLAG_KEEP, &keyring->flags)) ret = -EPERM; else ret = keyring_clear(keyring); error_put: key_ref_put(keyring_ref); error: return ret; } /* * Create a link from a keyring to a key if there's no matching key in the * keyring, otherwise replace the link to the matching key with a link to the * new key. * * The key must grant the caller Link permission and the the keyring must grant * the caller Write permission. Furthermore, if an additional link is created, * the keyring's quota will be extended. * * If successful, 0 will be returned. */ long keyctl_keyring_link(key_serial_t id, key_serial_t ringid) { key_ref_t keyring_ref, key_ref; long ret; keyring_ref = lookup_user_key(ringid, KEY_LOOKUP_CREATE, KEY_NEED_WRITE); if (IS_ERR(keyring_ref)) { ret = PTR_ERR(keyring_ref); goto error; } key_ref = lookup_user_key(id, KEY_LOOKUP_CREATE, KEY_NEED_LINK); if (IS_ERR(key_ref)) { ret = PTR_ERR(key_ref); goto error2; } ret = key_link(key_ref_to_ptr(keyring_ref), key_ref_to_ptr(key_ref)); key_ref_put(key_ref); error2: key_ref_put(keyring_ref); error: return ret; } /* * Unlink a key from a keyring. * * The keyring must grant the caller Write permission for this to work; the key * itself need not grant the caller anything. If the last link to a key is * removed then that key will be scheduled for destruction. * * Keys or keyrings with KEY_FLAG_KEEP set should not be unlinked. * * If successful, 0 will be returned. */ long keyctl_keyring_unlink(key_serial_t id, key_serial_t ringid) { key_ref_t keyring_ref, key_ref; struct key *keyring, *key; long ret; keyring_ref = lookup_user_key(ringid, 0, KEY_NEED_WRITE); if (IS_ERR(keyring_ref)) { ret = PTR_ERR(keyring_ref); goto error; } key_ref = lookup_user_key(id, KEY_LOOKUP_FOR_UNLINK, 0); if (IS_ERR(key_ref)) { ret = PTR_ERR(key_ref); goto error2; } keyring = key_ref_to_ptr(keyring_ref); key = key_ref_to_ptr(key_ref); if (test_bit(KEY_FLAG_KEEP, &keyring->flags) && test_bit(KEY_FLAG_KEEP, &key->flags)) ret = -EPERM; else ret = key_unlink(keyring, key); key_ref_put(key_ref); error2: key_ref_put(keyring_ref); error: return ret; } /* * Return a description of a key to userspace. * * The key must grant the caller View permission for this to work. * * If there's a buffer, we place up to buflen bytes of data into it formatted * in the following way: * * type;uid;gid;perm;description<NUL> * * If successful, we return the amount of description available, irrespective * of how much we may have copied into the buffer. */ long keyctl_describe_key(key_serial_t keyid, char __user *buffer, size_t buflen) { struct key *key, *instkey; key_ref_t key_ref; char *infobuf; long ret; int desclen, infolen; key_ref = lookup_user_key(keyid, KEY_LOOKUP_PARTIAL, KEY_NEED_VIEW); if (IS_ERR(key_ref)) { /* viewing a key under construction is permitted if we have the * authorisation token handy */ if (PTR_ERR(key_ref) == -EACCES) { instkey = key_get_instantiation_authkey(keyid); if (!IS_ERR(instkey)) { key_put(instkey); key_ref = lookup_user_key(keyid, KEY_LOOKUP_PARTIAL, 0); if (!IS_ERR(key_ref)) goto okay; } } ret = PTR_ERR(key_ref); goto error; } okay: key = key_ref_to_ptr(key_ref); desclen = strlen(key->description); /* calculate how much information we're going to return */ ret = -ENOMEM; infobuf = kasprintf(GFP_KERNEL, "%s;%d;%d;%08x;", key->type->name, from_kuid_munged(current_user_ns(), key->uid), from_kgid_munged(current_user_ns(), key->gid), key->perm); if (!infobuf) goto error2; infolen = strlen(infobuf); ret = infolen + desclen + 1; /* consider returning the data */ if (buffer && buflen >= ret) { if (copy_to_user(buffer, infobuf, infolen) != 0 || copy_to_user(buffer + infolen, key->description, desclen + 1) != 0) ret = -EFAULT; } kfree(infobuf); error2: key_ref_put(key_ref); error: return ret; } /* * Search the specified keyring and any keyrings it links to for a matching * key. Only keyrings that grant the caller Search permission will be searched * (this includes the starting keyring). Only keys with Search permission can * be found. * * If successful, the found key will be linked to the destination keyring if * supplied and the key has Link permission, and the found key ID will be * returned. */ long keyctl_keyring_search(key_serial_t ringid, const char __user *_type, const char __user *_description, key_serial_t destringid) { struct key_type *ktype; key_ref_t keyring_ref, key_ref, dest_ref; char type[32], *description; long ret; /* pull the type and description into kernel space */ ret = key_get_type_from_user(type, _type, sizeof(type)); if (ret < 0) goto error; description = strndup_user(_description, KEY_MAX_DESC_SIZE); if (IS_ERR(description)) { ret = PTR_ERR(description); goto error; } /* get the keyring at which to begin the search */ keyring_ref = lookup_user_key(ringid, 0, KEY_NEED_SEARCH); if (IS_ERR(keyring_ref)) { ret = PTR_ERR(keyring_ref); goto error2; } /* get the destination keyring if specified */ dest_ref = NULL; if (destringid) { dest_ref = lookup_user_key(destringid, KEY_LOOKUP_CREATE, KEY_NEED_WRITE); if (IS_ERR(dest_ref)) { ret = PTR_ERR(dest_ref); goto error3; } } /* find the key type */ ktype = key_type_lookup(type); if (IS_ERR(ktype)) { ret = PTR_ERR(ktype); goto error4; } /* do the search */ key_ref = keyring_search(keyring_ref, ktype, description); if (IS_ERR(key_ref)) { ret = PTR_ERR(key_ref); /* treat lack or presence of a negative key the same */ if (ret == -EAGAIN) ret = -ENOKEY; goto error5; } /* link the resulting key to the destination keyring if we can */ if (dest_ref) { ret = key_permission(key_ref, KEY_NEED_LINK); if (ret < 0) goto error6; ret = key_link(key_ref_to_ptr(dest_ref), key_ref_to_ptr(key_ref)); if (ret < 0) goto error6; } ret = key_ref_to_ptr(key_ref)->serial; error6: key_ref_put(key_ref); error5: key_type_put(ktype); error4: key_ref_put(dest_ref); error3: key_ref_put(keyring_ref); error2: kfree(description); error: return ret; } /* * Read a key's payload. * * The key must either grant the caller Read permission, or it must grant the * caller Search permission when searched for from the process keyrings. * * If successful, we place up to buflen bytes of data into the buffer, if one * is provided, and return the amount of data that is available in the key, * irrespective of how much we copied into the buffer. */ long keyctl_read_key(key_serial_t keyid, char __user *buffer, size_t buflen) { struct key *key; key_ref_t key_ref; long ret; /* find the key first */ key_ref = lookup_user_key(keyid, 0, 0); if (IS_ERR(key_ref)) { ret = -ENOKEY; goto error; } key = key_ref_to_ptr(key_ref); ret = key_read_state(key); if (ret < 0) goto error2; /* Negatively instantiated */ /* see if we can read it directly */ ret = key_permission(key_ref, KEY_NEED_READ); if (ret == 0) goto can_read_key; if (ret != -EACCES) goto error2; /* we can't; see if it's searchable from this process's keyrings * - we automatically take account of the fact that it may be * dangling off an instantiation key */ if (!is_key_possessed(key_ref)) { ret = -EACCES; goto error2; } /* the key is probably readable - now try to read it */ can_read_key: ret = -EOPNOTSUPP; if (key->type->read) { /* Read the data with the semaphore held (since we might sleep) * to protect against the key being updated or revoked. */ down_read(&key->sem); ret = key_validate(key); if (ret == 0) ret = key->type->read(key, buffer, buflen); up_read(&key->sem); } error2: key_put(key); error: return ret; } /* * Change the ownership of a key * * The key must grant the caller Setattr permission for this to work, though * the key need not be fully instantiated yet. For the UID to be changed, or * for the GID to be changed to a group the caller is not a member of, the * caller must have sysadmin capability. If either uid or gid is -1 then that * attribute is not changed. * * If the UID is to be changed, the new user must have sufficient quota to * accept the key. The quota deduction will be removed from the old user to * the new user should the attribute be changed. * * If successful, 0 will be returned. */ long keyctl_chown_key(key_serial_t id, uid_t user, gid_t group) { struct key_user *newowner, *zapowner = NULL; struct key *key; key_ref_t key_ref; long ret; kuid_t uid; kgid_t gid; uid = make_kuid(current_user_ns(), user); gid = make_kgid(current_user_ns(), group); ret = -EINVAL; if ((user != (uid_t) -1) && !uid_valid(uid)) goto error; if ((group != (gid_t) -1) && !gid_valid(gid)) goto error; ret = 0; if (user == (uid_t) -1 && group == (gid_t) -1) goto error; key_ref = lookup_user_key(id, KEY_LOOKUP_CREATE | KEY_LOOKUP_PARTIAL, KEY_NEED_SETATTR); if (IS_ERR(key_ref)) { ret = PTR_ERR(key_ref); goto error; } key = key_ref_to_ptr(key_ref); /* make the changes with the locks held to prevent chown/chown races */ ret = -EACCES; down_write(&key->sem); if (!capable(CAP_SYS_ADMIN)) { /* only the sysadmin can chown a key to some other UID */ if (user != (uid_t) -1 && !uid_eq(key->uid, uid)) goto error_put; /* only the sysadmin can set the key's GID to a group other * than one of those that the current process subscribes to */ if (group != (gid_t) -1 && !gid_eq(gid, key->gid) && !in_group_p(gid)) goto error_put; } /* change the UID */ if (user != (uid_t) -1 && !uid_eq(uid, key->uid)) { ret = -ENOMEM; newowner = key_user_lookup(uid); if (!newowner) goto error_put; /* transfer the quota burden to the new user */ if (test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) { unsigned maxkeys = uid_eq(uid, GLOBAL_ROOT_UID) ? key_quota_root_maxkeys : key_quota_maxkeys; unsigned maxbytes = uid_eq(uid, GLOBAL_ROOT_UID) ? key_quota_root_maxbytes : key_quota_maxbytes; spin_lock(&newowner->lock); if (newowner->qnkeys + 1 >= maxkeys || newowner->qnbytes + key->quotalen >= maxbytes || newowner->qnbytes + key->quotalen < newowner->qnbytes) goto quota_overrun; newowner->qnkeys++; newowner->qnbytes += key->quotalen; spin_unlock(&newowner->lock); spin_lock(&key->user->lock); key->user->qnkeys--; key->user->qnbytes -= key->quotalen; spin_unlock(&key->user->lock); } atomic_dec(&key->user->nkeys); atomic_inc(&newowner->nkeys); if (key->state != KEY_IS_UNINSTANTIATED) { atomic_dec(&key->user->nikeys); atomic_inc(&newowner->nikeys); } zapowner = key->user; key->user = newowner; key->uid = uid; } /* change the GID */ if (group != (gid_t) -1) key->gid = gid; ret = 0; error_put: up_write(&key->sem); key_put(key); if (zapowner) key_user_put(zapowner); error: return ret; quota_overrun: spin_unlock(&newowner->lock); zapowner = newowner; ret = -EDQUOT; goto error_put; } /* * Change the permission mask on a key. * * The key must grant the caller Setattr permission for this to work, though * the key need not be fully instantiated yet. If the caller does not have * sysadmin capability, it may only change the permission on keys that it owns. */ long keyctl_setperm_key(key_serial_t id, key_perm_t perm) { struct key *key; key_ref_t key_ref; long ret; ret = -EINVAL; if (perm & ~(KEY_POS_ALL | KEY_USR_ALL | KEY_GRP_ALL | KEY_OTH_ALL)) goto error; key_ref = lookup_user_key(id, KEY_LOOKUP_CREATE | KEY_LOOKUP_PARTIAL, KEY_NEED_SETATTR); if (IS_ERR(key_ref)) { ret = PTR_ERR(key_ref); goto error; } key = key_ref_to_ptr(key_ref); /* make the changes with the locks held to prevent chown/chmod races */ ret = -EACCES; down_write(&key->sem); /* if we're not the sysadmin, we can only change a key that we own */ if (capable(CAP_SYS_ADMIN) || uid_eq(key->uid, current_fsuid())) { key->perm = perm; ret = 0; } up_write(&key->sem); key_put(key); error: return ret; } /* * Get the destination keyring for instantiation and check that the caller has * Write permission on it. */ static long get_instantiation_keyring(key_serial_t ringid, struct request_key_auth *rka, struct key **_dest_keyring) { key_ref_t dkref; *_dest_keyring = NULL; /* just return a NULL pointer if we weren't asked to make a link */ if (ringid == 0) return 0; /* if a specific keyring is nominated by ID, then use that */ if (ringid > 0) { dkref = lookup_user_key(ringid, KEY_LOOKUP_CREATE, KEY_NEED_WRITE); if (IS_ERR(dkref)) return PTR_ERR(dkref); *_dest_keyring = key_ref_to_ptr(dkref); return 0; } if (ringid == KEY_SPEC_REQKEY_AUTH_KEY) return -EINVAL; /* otherwise specify the destination keyring recorded in the * authorisation key (any KEY_SPEC_*_KEYRING) */ if (ringid >= KEY_SPEC_REQUESTOR_KEYRING) { *_dest_keyring = key_get(rka->dest_keyring); return 0; } return -ENOKEY; } /* * Change the request_key authorisation key on the current process. */ static int keyctl_change_reqkey_auth(struct key *key) { struct cred *new; new = prepare_creds(); if (!new) return -ENOMEM; key_put(new->request_key_auth); new->request_key_auth = key_get(key); return commit_creds(new); } /* * Instantiate a key with the specified payload and link the key into the * destination keyring if one is given. * * The caller must have the appropriate instantiation permit set for this to * work (see keyctl_assume_authority). No other permissions are required. * * If successful, 0 will be returned. */ long keyctl_instantiate_key_common(key_serial_t id, struct iov_iter *from, key_serial_t ringid) { const struct cred *cred = current_cred(); struct request_key_auth *rka; struct key *instkey, *dest_keyring; size_t plen = from ? iov_iter_count(from) : 0; void *payload; long ret; kenter("%d,,%zu,%d", id, plen, ringid); if (!plen) from = NULL; ret = -EINVAL; if (plen > 1024 * 1024 - 1) goto error; /* the appropriate instantiation authorisation key must have been * assumed before calling this */ ret = -EPERM; instkey = cred->request_key_auth; if (!instkey) goto error; rka = instkey->payload.data[0]; if (rka->target_key->serial != id) goto error; /* pull the payload in if one was supplied */ payload = NULL; if (from) { ret = -ENOMEM; payload = kvmalloc(plen, GFP_KERNEL); if (!payload) goto error; ret = -EFAULT; if (!copy_from_iter_full(payload, plen, from)) goto error2; } /* find the destination keyring amongst those belonging to the * requesting task */ ret = get_instantiation_keyring(ringid, rka, &dest_keyring); if (ret < 0) goto error2; /* instantiate the key and link it into a keyring */ ret = key_instantiate_and_link(rka->target_key, payload, plen, dest_keyring, instkey); key_put(dest_keyring); /* discard the assumed authority if it's just been disabled by * instantiation of the key */ if (ret == 0) keyctl_change_reqkey_auth(NULL); error2: if (payload) { memzero_explicit(payload, plen); kvfree(payload); } error: return ret; } /* * Instantiate a key with the specified payload and link the key into the * destination keyring if one is given. * * The caller must have the appropriate instantiation permit set for this to * work (see keyctl_assume_authority). No other permissions are required. * * If successful, 0 will be returned. */ long keyctl_instantiate_key(key_serial_t id, const void __user *_payload, size_t plen, key_serial_t ringid) { if (_payload && plen) { struct iovec iov; struct iov_iter from; int ret; ret = import_single_range(WRITE, (void __user *)_payload, plen, &iov, &from); if (unlikely(ret)) return ret; return keyctl_instantiate_key_common(id, &from, ringid); } return keyctl_instantiate_key_common(id, NULL, ringid); } /* * Instantiate a key with the specified multipart payload and link the key into * the destination keyring if one is given. * * The caller must have the appropriate instantiation permit set for this to * work (see keyctl_assume_authority). No other permissions are required. * * If successful, 0 will be returned. */ long keyctl_instantiate_key_iov(key_serial_t id, const struct iovec __user *_payload_iov, unsigned ioc, key_serial_t ringid) { struct iovec iovstack[UIO_FASTIOV], *iov = iovstack; struct iov_iter from; long ret; if (!_payload_iov) ioc = 0; ret = import_iovec(WRITE, _payload_iov, ioc, ARRAY_SIZE(iovstack), &iov, &from); if (ret < 0) return ret; ret = keyctl_instantiate_key_common(id, &from, ringid); kfree(iov); return ret; } /* * Negatively instantiate the key with the given timeout (in seconds) and link * the key into the destination keyring if one is given. * * The caller must have the appropriate instantiation permit set for this to * work (see keyctl_assume_authority). No other permissions are required. * * The key and any links to the key will be automatically garbage collected * after the timeout expires. * * Negative keys are used to rate limit repeated request_key() calls by causing * them to return -ENOKEY until the negative key expires. * * If successful, 0 will be returned. */ long keyctl_negate_key(key_serial_t id, unsigned timeout, key_serial_t ringid) { return keyctl_reject_key(id, timeout, ENOKEY, ringid); } /* * Negatively instantiate the key with the given timeout (in seconds) and error * code and link the key into the destination keyring if one is given. * * The caller must have the appropriate instantiation permit set for this to * work (see keyctl_assume_authority). No other permissions are required. * * The key and any links to the key will be automatically garbage collected * after the timeout expires. * * Negative keys are used to rate limit repeated request_key() calls by causing * them to return the specified error code until the negative key expires. * * If successful, 0 will be returned. */ long keyctl_reject_key(key_serial_t id, unsigned timeout, unsigned error, key_serial_t ringid) { const struct cred *cred = current_cred(); struct request_key_auth *rka; struct key *instkey, *dest_keyring; long ret; kenter("%d,%u,%u,%d", id, timeout, error, ringid); /* must be a valid error code and mustn't be a kernel special */ if (error <= 0 || error >= MAX_ERRNO || error == ERESTARTSYS || error == ERESTARTNOINTR || error == ERESTARTNOHAND || error == ERESTART_RESTARTBLOCK) return -EINVAL; /* the appropriate instantiation authorisation key must have been * assumed before calling this */ ret = -EPERM; instkey = cred->request_key_auth; if (!instkey) goto error; rka = instkey->payload.data[0]; if (rka->target_key->serial != id) goto error; /* find the destination keyring if present (which must also be * writable) */ ret = get_instantiation_keyring(ringid, rka, &dest_keyring); if (ret < 0) goto error; /* instantiate the key and link it into a keyring */ ret = key_reject_and_link(rka->target_key, timeout, error, dest_keyring, instkey); key_put(dest_keyring); /* discard the assumed authority if it's just been disabled by * instantiation of the key */ if (ret == 0) keyctl_change_reqkey_auth(NULL); error: return ret; } /* * Read or set the default keyring in which request_key() will cache keys and * return the old setting. * * If a thread or process keyring is specified then it will be created if it * doesn't yet exist. The old setting will be returned if successful. */ long keyctl_set_reqkey_keyring(int reqkey_defl) { struct cred *new; int ret, old_setting; old_setting = current_cred_xxx(jit_keyring); if (reqkey_defl == KEY_REQKEY_DEFL_NO_CHANGE) return old_setting; new = prepare_creds(); if (!new) return -ENOMEM; switch (reqkey_defl) { case KEY_REQKEY_DEFL_THREAD_KEYRING: ret = install_thread_keyring_to_cred(new); if (ret < 0) goto error; goto set; case KEY_REQKEY_DEFL_PROCESS_KEYRING: ret = install_process_keyring_to_cred(new); if (ret < 0) goto error; goto set; case KEY_REQKEY_DEFL_DEFAULT: case KEY_REQKEY_DEFL_SESSION_KEYRING: case KEY_REQKEY_DEFL_USER_KEYRING: case KEY_REQKEY_DEFL_USER_SESSION_KEYRING: case KEY_REQKEY_DEFL_REQUESTOR_KEYRING: goto set; case KEY_REQKEY_DEFL_NO_CHANGE: case KEY_REQKEY_DEFL_GROUP_KEYRING: default: ret = -EINVAL; goto error; } set: new->jit_keyring = reqkey_defl; commit_creds(new); return old_setting; error: abort_creds(new); return ret; } /* * Set or clear the timeout on a key. * * Either the key must grant the caller Setattr permission or else the caller * must hold an instantiation authorisation token for the key. * * The timeout is either 0 to clear the timeout, or a number of seconds from * the current time. The key and any links to the key will be automatically * garbage collected after the timeout expires. * * Keys with KEY_FLAG_KEEP set should not be timed out. * * If successful, 0 is returned. */ long keyctl_set_timeout(key_serial_t id, unsigned timeout) { struct key *key, *instkey; key_ref_t key_ref; long ret; key_ref = lookup_user_key(id, KEY_LOOKUP_CREATE | KEY_LOOKUP_PARTIAL, KEY_NEED_SETATTR); if (IS_ERR(key_ref)) { /* setting the timeout on a key under construction is permitted * if we have the authorisation token handy */ if (PTR_ERR(key_ref) == -EACCES) { instkey = key_get_instantiation_authkey(id); if (!IS_ERR(instkey)) { key_put(instkey); key_ref = lookup_user_key(id, KEY_LOOKUP_PARTIAL, 0); if (!IS_ERR(key_ref)) goto okay; } } ret = PTR_ERR(key_ref); goto error; } okay: key = key_ref_to_ptr(key_ref); ret = 0; if (test_bit(KEY_FLAG_KEEP, &key->flags)) ret = -EPERM; else key_set_timeout(key, timeout); key_put(key); error: return ret; } /* * Assume (or clear) the authority to instantiate the specified key. * * This sets the authoritative token currently in force for key instantiation. * This must be done for a key to be instantiated. It has the effect of making * available all the keys from the caller of the request_key() that created a * key to request_key() calls made by the caller of this function. * * The caller must have the instantiation key in their process keyrings with a * Search permission grant available to the caller. * * If the ID given is 0, then the setting will be cleared and 0 returned. * * If the ID given has a matching an authorisation key, then that key will be * set and its ID will be returned. The authorisation key can be read to get * the callout information passed to request_key(). */ long keyctl_assume_authority(key_serial_t id) { struct key *authkey; long ret; /* special key IDs aren't permitted */ ret = -EINVAL; if (id < 0) goto error; /* we divest ourselves of authority if given an ID of 0 */ if (id == 0) { ret = keyctl_change_reqkey_auth(NULL); goto error; } /* attempt to assume the authority temporarily granted to us whilst we * instantiate the specified key * - the authorisation key must be in the current task's keyrings * somewhere */ authkey = key_get_instantiation_authkey(id); if (IS_ERR(authkey)) { ret = PTR_ERR(authkey); goto error; } ret = keyctl_change_reqkey_auth(authkey); if (ret == 0) ret = authkey->serial; key_put(authkey); error: return ret; } /* * Get a key's the LSM security label. * * The key must grant the caller View permission for this to work. * * If there's a buffer, then up to buflen bytes of data will be placed into it. * * If successful, the amount of information available will be returned, * irrespective of how much was copied (including the terminal NUL). */ long keyctl_get_security(key_serial_t keyid, char __user *buffer, size_t buflen) { struct key *key, *instkey; key_ref_t key_ref; char *context; long ret; key_ref = lookup_user_key(keyid, KEY_LOOKUP_PARTIAL, KEY_NEED_VIEW); if (IS_ERR(key_ref)) { if (PTR_ERR(key_ref) != -EACCES) return PTR_ERR(key_ref); /* viewing a key under construction is also permitted if we * have the authorisation token handy */ instkey = key_get_instantiation_authkey(keyid); if (IS_ERR(instkey)) return PTR_ERR(instkey); key_put(instkey); key_ref = lookup_user_key(keyid, KEY_LOOKUP_PARTIAL, 0); if (IS_ERR(key_ref)) return PTR_ERR(key_ref); } key = key_ref_to_ptr(key_ref); ret = security_key_getsecurity(key, &context); if (ret == 0) { /* if no information was returned, give userspace an empty * string */ ret = 1; if (buffer && buflen > 0 && copy_to_user(buffer, "", 1) != 0) ret = -EFAULT; } else if (ret > 0) { /* return as much data as there's room for */ if (buffer && buflen > 0) { if (buflen > ret) buflen = ret; if (copy_to_user(buffer, context, buflen) != 0) ret = -EFAULT; } kfree(context); } key_ref_put(key_ref); return ret; } /* * Attempt to install the calling process's session keyring on the process's * parent process. * * The keyring must exist and must grant the caller LINK permission, and the * parent process must be single-threaded and must have the same effective * ownership as this process and mustn't be SUID/SGID. * * The keyring will be emplaced on the parent when it next resumes userspace. * * If successful, 0 will be returned. */ long keyctl_session_to_parent(void) { struct task_struct *me, *parent; const struct cred *mycred, *pcred; struct callback_head *newwork, *oldwork; key_ref_t keyring_r; struct cred *cred; int ret; keyring_r = lookup_user_key(KEY_SPEC_SESSION_KEYRING, 0, KEY_NEED_LINK); if (IS_ERR(keyring_r)) return PTR_ERR(keyring_r); ret = -ENOMEM; /* our parent is going to need a new cred struct, a new tgcred struct * and new security data, so we allocate them here to prevent ENOMEM in * our parent */ cred = cred_alloc_blank(); if (!cred) goto error_keyring; newwork = &cred->rcu; cred->session_keyring = key_ref_to_ptr(keyring_r); keyring_r = NULL; init_task_work(newwork, key_change_session_keyring); me = current; rcu_read_lock(); write_lock_irq(&tasklist_lock); ret = -EPERM; oldwork = NULL; parent = me->real_parent; /* the parent mustn't be init and mustn't be a kernel thread */ if (parent->pid <= 1 || !parent->mm) goto unlock; /* the parent must be single threaded */ if (!thread_group_empty(parent)) goto unlock; /* the parent and the child must have different session keyrings or * there's no point */ mycred = current_cred(); pcred = __task_cred(parent); if (mycred == pcred || mycred->session_keyring == pcred->session_keyring) { ret = 0; goto unlock; } /* the parent must have the same effective ownership and mustn't be * SUID/SGID */ if (!uid_eq(pcred->uid, mycred->euid) || !uid_eq(pcred->euid, mycred->euid) || !uid_eq(pcred->suid, mycred->euid) || !gid_eq(pcred->gid, mycred->egid) || !gid_eq(pcred->egid, mycred->egid) || !gid_eq(pcred->sgid, mycred->egid)) goto unlock; /* the keyrings must have the same UID */ if ((pcred->session_keyring && !uid_eq(pcred->session_keyring->uid, mycred->euid)) || !uid_eq(mycred->session_keyring->uid, mycred->euid)) goto unlock; /* cancel an already pending keyring replacement */ oldwork = task_work_cancel(parent, key_change_session_keyring); /* the replacement session keyring is applied just prior to userspace * restarting */ ret = task_work_add(parent, newwork, true); if (!ret) newwork = NULL; unlock: write_unlock_irq(&tasklist_lock); rcu_read_unlock(); if (oldwork) put_cred(container_of(oldwork, struct cred, rcu)); if (newwork) put_cred(cred); return ret; error_keyring: key_ref_put(keyring_r); return ret; } /* * Apply a restriction to a given keyring. * * The caller must have Setattr permission to change keyring restrictions. * * The requested type name may be a NULL pointer to reject all attempts * to link to the keyring. If _type is non-NULL, _restriction can be * NULL or a pointer to a string describing the restriction. If _type is * NULL, _restriction must also be NULL. * * Returns 0 if successful. */ long keyctl_restrict_keyring(key_serial_t id, const char __user *_type, const char __user *_restriction) { key_ref_t key_ref; bool link_reject = !_type; char type[32]; char *restriction = NULL; long ret; key_ref = lookup_user_key(id, 0, KEY_NEED_SETATTR); if (IS_ERR(key_ref)) return PTR_ERR(key_ref); if (_type) { ret = key_get_type_from_user(type, _type, sizeof(type)); if (ret < 0) goto error; } if (_restriction) { if (!_type) { ret = -EINVAL; goto error; } restriction = strndup_user(_restriction, PAGE_SIZE); if (IS_ERR(restriction)) { ret = PTR_ERR(restriction); goto error; } } ret = keyring_restrict(key_ref, link_reject ? NULL : type, restriction); kfree(restriction); error: key_ref_put(key_ref); return ret; } /* * The key control system call */ SYSCALL_DEFINE5(keyctl, int, option, unsigned long, arg2, unsigned long, arg3, unsigned long, arg4, unsigned long, arg5) { switch (option) { case KEYCTL_GET_KEYRING_ID: return keyctl_get_keyring_ID((key_serial_t) arg2, (int) arg3); case KEYCTL_JOIN_SESSION_KEYRING: return keyctl_join_session_keyring((const char __user *) arg2); case KEYCTL_UPDATE: return keyctl_update_key((key_serial_t) arg2, (const void __user *) arg3, (size_t) arg4); case KEYCTL_REVOKE: return keyctl_revoke_key((key_serial_t) arg2); case KEYCTL_DESCRIBE: return keyctl_describe_key((key_serial_t) arg2, (char __user *) arg3, (unsigned) arg4); case KEYCTL_CLEAR: return keyctl_keyring_clear((key_serial_t) arg2); case KEYCTL_LINK: return keyctl_keyring_link((key_serial_t) arg2, (key_serial_t) arg3); case KEYCTL_UNLINK: return keyctl_keyring_unlink((key_serial_t) arg2, (key_serial_t) arg3); case KEYCTL_SEARCH: return keyctl_keyring_search((key_serial_t) arg2, (const char __user *) arg3, (const char __user *) arg4, (key_serial_t) arg5); case KEYCTL_READ: return keyctl_read_key((key_serial_t) arg2, (char __user *) arg3, (size_t) arg4); case KEYCTL_CHOWN: return keyctl_chown_key((key_serial_t) arg2, (uid_t) arg3, (gid_t) arg4); case KEYCTL_SETPERM: return keyctl_setperm_key((key_serial_t) arg2, (key_perm_t) arg3); case KEYCTL_INSTANTIATE: return keyctl_instantiate_key((key_serial_t) arg2, (const void __user *) arg3, (size_t) arg4, (key_serial_t) arg5); case KEYCTL_NEGATE: return keyctl_negate_key((key_serial_t) arg2, (unsigned) arg3, (key_serial_t) arg4); case KEYCTL_SET_REQKEY_KEYRING: return keyctl_set_reqkey_keyring(arg2); case KEYCTL_SET_TIMEOUT: return keyctl_set_timeout((key_serial_t) arg2, (unsigned) arg3); case KEYCTL_ASSUME_AUTHORITY: return keyctl_assume_authority((key_serial_t) arg2); case KEYCTL_GET_SECURITY: return keyctl_get_security((key_serial_t) arg2, (char __user *) arg3, (size_t) arg4); case KEYCTL_SESSION_TO_PARENT: return keyctl_session_to_parent(); case KEYCTL_REJECT: return keyctl_reject_key((key_serial_t) arg2, (unsigned) arg3, (unsigned) arg4, (key_serial_t) arg5); case KEYCTL_INSTANTIATE_IOV: return keyctl_instantiate_key_iov( (key_serial_t) arg2, (const struct iovec __user *) arg3, (unsigned) arg4, (key_serial_t) arg5); case KEYCTL_INVALIDATE: return keyctl_invalidate_key((key_serial_t) arg2); case KEYCTL_GET_PERSISTENT: return keyctl_get_persistent((uid_t)arg2, (key_serial_t)arg3); case KEYCTL_DH_COMPUTE: return keyctl_dh_compute((struct keyctl_dh_params __user *) arg2, (char __user *) arg3, (size_t) arg4, (struct keyctl_kdf_params __user *) arg5); case KEYCTL_RESTRICT_KEYRING: return keyctl_restrict_keyring((key_serial_t) arg2, (const char __user *) arg3, (const char __user *) arg4); default: return -EOPNOTSUPP; } }
long keyctl_chown_key(key_serial_t id, uid_t user, gid_t group) { struct key_user *newowner, *zapowner = NULL; struct key *key; key_ref_t key_ref; long ret; kuid_t uid; kgid_t gid; uid = make_kuid(current_user_ns(), user); gid = make_kgid(current_user_ns(), group); ret = -EINVAL; if ((user != (uid_t) -1) && !uid_valid(uid)) goto error; if ((group != (gid_t) -1) && !gid_valid(gid)) goto error; ret = 0; if (user == (uid_t) -1 && group == (gid_t) -1) goto error; key_ref = lookup_user_key(id, KEY_LOOKUP_CREATE | KEY_LOOKUP_PARTIAL, KEY_NEED_SETATTR); if (IS_ERR(key_ref)) { ret = PTR_ERR(key_ref); goto error; } key = key_ref_to_ptr(key_ref); /* make the changes with the locks held to prevent chown/chown races */ ret = -EACCES; down_write(&key->sem); if (!capable(CAP_SYS_ADMIN)) { /* only the sysadmin can chown a key to some other UID */ if (user != (uid_t) -1 && !uid_eq(key->uid, uid)) goto error_put; /* only the sysadmin can set the key's GID to a group other * than one of those that the current process subscribes to */ if (group != (gid_t) -1 && !gid_eq(gid, key->gid) && !in_group_p(gid)) goto error_put; } /* change the UID */ if (user != (uid_t) -1 && !uid_eq(uid, key->uid)) { ret = -ENOMEM; newowner = key_user_lookup(uid); if (!newowner) goto error_put; /* transfer the quota burden to the new user */ if (test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) { unsigned maxkeys = uid_eq(uid, GLOBAL_ROOT_UID) ? key_quota_root_maxkeys : key_quota_maxkeys; unsigned maxbytes = uid_eq(uid, GLOBAL_ROOT_UID) ? key_quota_root_maxbytes : key_quota_maxbytes; spin_lock(&newowner->lock); if (newowner->qnkeys + 1 >= maxkeys || newowner->qnbytes + key->quotalen >= maxbytes || newowner->qnbytes + key->quotalen < newowner->qnbytes) goto quota_overrun; newowner->qnkeys++; newowner->qnbytes += key->quotalen; spin_unlock(&newowner->lock); spin_lock(&key->user->lock); key->user->qnkeys--; key->user->qnbytes -= key->quotalen; spin_unlock(&key->user->lock); } atomic_dec(&key->user->nkeys); atomic_inc(&newowner->nkeys); if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) { atomic_dec(&key->user->nikeys); atomic_inc(&newowner->nikeys); } zapowner = key->user; key->user = newowner; key->uid = uid; } /* change the GID */ if (group != (gid_t) -1) key->gid = gid; ret = 0; error_put: up_write(&key->sem); key_put(key); if (zapowner) key_user_put(zapowner); error: return ret; quota_overrun: spin_unlock(&newowner->lock); zapowner = newowner; ret = -EDQUOT; goto error_put; }
long keyctl_chown_key(key_serial_t id, uid_t user, gid_t group) { struct key_user *newowner, *zapowner = NULL; struct key *key; key_ref_t key_ref; long ret; kuid_t uid; kgid_t gid; uid = make_kuid(current_user_ns(), user); gid = make_kgid(current_user_ns(), group); ret = -EINVAL; if ((user != (uid_t) -1) && !uid_valid(uid)) goto error; if ((group != (gid_t) -1) && !gid_valid(gid)) goto error; ret = 0; if (user == (uid_t) -1 && group == (gid_t) -1) goto error; key_ref = lookup_user_key(id, KEY_LOOKUP_CREATE | KEY_LOOKUP_PARTIAL, KEY_NEED_SETATTR); if (IS_ERR(key_ref)) { ret = PTR_ERR(key_ref); goto error; } key = key_ref_to_ptr(key_ref); /* make the changes with the locks held to prevent chown/chown races */ ret = -EACCES; down_write(&key->sem); if (!capable(CAP_SYS_ADMIN)) { /* only the sysadmin can chown a key to some other UID */ if (user != (uid_t) -1 && !uid_eq(key->uid, uid)) goto error_put; /* only the sysadmin can set the key's GID to a group other * than one of those that the current process subscribes to */ if (group != (gid_t) -1 && !gid_eq(gid, key->gid) && !in_group_p(gid)) goto error_put; } /* change the UID */ if (user != (uid_t) -1 && !uid_eq(uid, key->uid)) { ret = -ENOMEM; newowner = key_user_lookup(uid); if (!newowner) goto error_put; /* transfer the quota burden to the new user */ if (test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) { unsigned maxkeys = uid_eq(uid, GLOBAL_ROOT_UID) ? key_quota_root_maxkeys : key_quota_maxkeys; unsigned maxbytes = uid_eq(uid, GLOBAL_ROOT_UID) ? key_quota_root_maxbytes : key_quota_maxbytes; spin_lock(&newowner->lock); if (newowner->qnkeys + 1 >= maxkeys || newowner->qnbytes + key->quotalen >= maxbytes || newowner->qnbytes + key->quotalen < newowner->qnbytes) goto quota_overrun; newowner->qnkeys++; newowner->qnbytes += key->quotalen; spin_unlock(&newowner->lock); spin_lock(&key->user->lock); key->user->qnkeys--; key->user->qnbytes -= key->quotalen; spin_unlock(&key->user->lock); } atomic_dec(&key->user->nkeys); atomic_inc(&newowner->nkeys); if (key->state != KEY_IS_UNINSTANTIATED) { atomic_dec(&key->user->nikeys); atomic_inc(&newowner->nikeys); } zapowner = key->user; key->user = newowner; key->uid = uid; } /* change the GID */ if (group != (gid_t) -1) key->gid = gid; ret = 0; error_put: up_write(&key->sem); key_put(key); if (zapowner) key_user_put(zapowner); error: return ret; quota_overrun: spin_unlock(&newowner->lock); zapowner = newowner; ret = -EDQUOT; goto error_put; }
{'added': [(769, '\tret = key_read_state(key);'), (770, '\tif (ret < 0)'), (771, '\t\tgoto error2; /* Negatively instantiated */'), (903, '\t\tif (key->state != KEY_IS_UNINSTANTIATED) {')], 'deleted': [(769, '\tif (test_bit(KEY_FLAG_NEGATIVE, &key->flags)) {'), (770, '\t\tret = -ENOKEY;'), (771, '\t\tgoto error2;'), (772, '\t}'), (904, '\t\tif (test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) {')]}
4
5
1,115
5,977
https://github.com/torvalds/linux
CVE-2017-15951
['CWE-20']
keyctl.c
keyctl_read_key
/* Userspace key control operations * * Copyright (C) 2004-5 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/module.h> #include <linux/init.h> #include <linux/sched.h> #include <linux/sched/task.h> #include <linux/slab.h> #include <linux/syscalls.h> #include <linux/key.h> #include <linux/keyctl.h> #include <linux/fs.h> #include <linux/capability.h> #include <linux/cred.h> #include <linux/string.h> #include <linux/err.h> #include <linux/vmalloc.h> #include <linux/security.h> #include <linux/uio.h> #include <linux/uaccess.h> #include "internal.h" #define KEY_MAX_DESC_SIZE 4096 static int key_get_type_from_user(char *type, const char __user *_type, unsigned len) { int ret; ret = strncpy_from_user(type, _type, len); if (ret < 0) return ret; if (ret == 0 || ret >= len) return -EINVAL; if (type[0] == '.') return -EPERM; type[len - 1] = '\0'; return 0; } /* * Extract the description of a new key from userspace and either add it as a * new key to the specified keyring or update a matching key in that keyring. * * If the description is NULL or an empty string, the key type is asked to * generate one from the payload. * * The keyring must be writable so that we can attach the key to it. * * If successful, the new key's serial number is returned, otherwise an error * code is returned. */ SYSCALL_DEFINE5(add_key, const char __user *, _type, const char __user *, _description, const void __user *, _payload, size_t, plen, key_serial_t, ringid) { key_ref_t keyring_ref, key_ref; char type[32], *description; void *payload; long ret; ret = -EINVAL; if (plen > 1024 * 1024 - 1) goto error; /* draw all the data into kernel space */ ret = key_get_type_from_user(type, _type, sizeof(type)); if (ret < 0) goto error; description = NULL; if (_description) { description = strndup_user(_description, KEY_MAX_DESC_SIZE); if (IS_ERR(description)) { ret = PTR_ERR(description); goto error; } if (!*description) { kfree(description); description = NULL; } else if ((description[0] == '.') && (strncmp(type, "keyring", 7) == 0)) { ret = -EPERM; goto error2; } } /* pull the payload in if one was supplied */ payload = NULL; if (plen) { ret = -ENOMEM; payload = kvmalloc(plen, GFP_KERNEL); if (!payload) goto error2; ret = -EFAULT; if (copy_from_user(payload, _payload, plen) != 0) goto error3; } /* find the target keyring (which must be writable) */ keyring_ref = lookup_user_key(ringid, KEY_LOOKUP_CREATE, KEY_NEED_WRITE); if (IS_ERR(keyring_ref)) { ret = PTR_ERR(keyring_ref); goto error3; } /* create or update the requested key and add it to the target * keyring */ key_ref = key_create_or_update(keyring_ref, type, description, payload, plen, KEY_PERM_UNDEF, KEY_ALLOC_IN_QUOTA); if (!IS_ERR(key_ref)) { ret = key_ref_to_ptr(key_ref)->serial; key_ref_put(key_ref); } else { ret = PTR_ERR(key_ref); } key_ref_put(keyring_ref); error3: if (payload) { memzero_explicit(payload, plen); kvfree(payload); } error2: kfree(description); error: return ret; } /* * Search the process keyrings and keyring trees linked from those for a * matching key. Keyrings must have appropriate Search permission to be * searched. * * If a key is found, it will be attached to the destination keyring if there's * one specified and the serial number of the key will be returned. * * If no key is found, /sbin/request-key will be invoked if _callout_info is * non-NULL in an attempt to create a key. The _callout_info string will be * passed to /sbin/request-key to aid with completing the request. If the * _callout_info string is "" then it will be changed to "-". */ SYSCALL_DEFINE4(request_key, const char __user *, _type, const char __user *, _description, const char __user *, _callout_info, key_serial_t, destringid) { struct key_type *ktype; struct key *key; key_ref_t dest_ref; size_t callout_len; char type[32], *description, *callout_info; long ret; /* pull the type into kernel space */ ret = key_get_type_from_user(type, _type, sizeof(type)); if (ret < 0) goto error; /* pull the description into kernel space */ description = strndup_user(_description, KEY_MAX_DESC_SIZE); if (IS_ERR(description)) { ret = PTR_ERR(description); goto error; } /* pull the callout info into kernel space */ callout_info = NULL; callout_len = 0; if (_callout_info) { callout_info = strndup_user(_callout_info, PAGE_SIZE); if (IS_ERR(callout_info)) { ret = PTR_ERR(callout_info); goto error2; } callout_len = strlen(callout_info); } /* get the destination keyring if specified */ dest_ref = NULL; if (destringid) { dest_ref = lookup_user_key(destringid, KEY_LOOKUP_CREATE, KEY_NEED_WRITE); if (IS_ERR(dest_ref)) { ret = PTR_ERR(dest_ref); goto error3; } } /* find the key type */ ktype = key_type_lookup(type); if (IS_ERR(ktype)) { ret = PTR_ERR(ktype); goto error4; } /* do the search */ key = request_key_and_link(ktype, description, callout_info, callout_len, NULL, key_ref_to_ptr(dest_ref), KEY_ALLOC_IN_QUOTA); if (IS_ERR(key)) { ret = PTR_ERR(key); goto error5; } /* wait for the key to finish being constructed */ ret = wait_for_key_construction(key, 1); if (ret < 0) goto error6; ret = key->serial; error6: key_put(key); error5: key_type_put(ktype); error4: key_ref_put(dest_ref); error3: kfree(callout_info); error2: kfree(description); error: return ret; } /* * Get the ID of the specified process keyring. * * The requested keyring must have search permission to be found. * * If successful, the ID of the requested keyring will be returned. */ long keyctl_get_keyring_ID(key_serial_t id, int create) { key_ref_t key_ref; unsigned long lflags; long ret; lflags = create ? KEY_LOOKUP_CREATE : 0; key_ref = lookup_user_key(id, lflags, KEY_NEED_SEARCH); if (IS_ERR(key_ref)) { ret = PTR_ERR(key_ref); goto error; } ret = key_ref_to_ptr(key_ref)->serial; key_ref_put(key_ref); error: return ret; } /* * Join a (named) session keyring. * * Create and join an anonymous session keyring or join a named session * keyring, creating it if necessary. A named session keyring must have Search * permission for it to be joined. Session keyrings without this permit will * be skipped over. It is not permitted for userspace to create or join * keyrings whose name begin with a dot. * * If successful, the ID of the joined session keyring will be returned. */ long keyctl_join_session_keyring(const char __user *_name) { char *name; long ret; /* fetch the name from userspace */ name = NULL; if (_name) { name = strndup_user(_name, KEY_MAX_DESC_SIZE); if (IS_ERR(name)) { ret = PTR_ERR(name); goto error; } ret = -EPERM; if (name[0] == '.') goto error_name; } /* join the session */ ret = join_session_keyring(name); error_name: kfree(name); error: return ret; } /* * Update a key's data payload from the given data. * * The key must grant the caller Write permission and the key type must support * updating for this to work. A negative key can be positively instantiated * with this call. * * If successful, 0 will be returned. If the key type does not support * updating, then -EOPNOTSUPP will be returned. */ long keyctl_update_key(key_serial_t id, const void __user *_payload, size_t plen) { key_ref_t key_ref; void *payload; long ret; ret = -EINVAL; if (plen > PAGE_SIZE) goto error; /* pull the payload in if one was supplied */ payload = NULL; if (plen) { ret = -ENOMEM; payload = kmalloc(plen, GFP_KERNEL); if (!payload) goto error; ret = -EFAULT; if (copy_from_user(payload, _payload, plen) != 0) goto error2; } /* find the target key (which must be writable) */ key_ref = lookup_user_key(id, 0, KEY_NEED_WRITE); if (IS_ERR(key_ref)) { ret = PTR_ERR(key_ref); goto error2; } /* update the key */ ret = key_update(key_ref, payload, plen); key_ref_put(key_ref); error2: kzfree(payload); error: return ret; } /* * Revoke a key. * * The key must be grant the caller Write or Setattr permission for this to * work. The key type should give up its quota claim when revoked. The key * and any links to the key will be automatically garbage collected after a * certain amount of time (/proc/sys/kernel/keys/gc_delay). * * Keys with KEY_FLAG_KEEP set should not be revoked. * * If successful, 0 is returned. */ long keyctl_revoke_key(key_serial_t id) { key_ref_t key_ref; struct key *key; long ret; key_ref = lookup_user_key(id, 0, KEY_NEED_WRITE); if (IS_ERR(key_ref)) { ret = PTR_ERR(key_ref); if (ret != -EACCES) goto error; key_ref = lookup_user_key(id, 0, KEY_NEED_SETATTR); if (IS_ERR(key_ref)) { ret = PTR_ERR(key_ref); goto error; } } key = key_ref_to_ptr(key_ref); ret = 0; if (test_bit(KEY_FLAG_KEEP, &key->flags)) ret = -EPERM; else key_revoke(key); key_ref_put(key_ref); error: return ret; } /* * Invalidate a key. * * The key must be grant the caller Invalidate permission for this to work. * The key and any links to the key will be automatically garbage collected * immediately. * * Keys with KEY_FLAG_KEEP set should not be invalidated. * * If successful, 0 is returned. */ long keyctl_invalidate_key(key_serial_t id) { key_ref_t key_ref; struct key *key; long ret; kenter("%d", id); key_ref = lookup_user_key(id, 0, KEY_NEED_SEARCH); if (IS_ERR(key_ref)) { ret = PTR_ERR(key_ref); /* Root is permitted to invalidate certain special keys */ if (capable(CAP_SYS_ADMIN)) { key_ref = lookup_user_key(id, 0, 0); if (IS_ERR(key_ref)) goto error; if (test_bit(KEY_FLAG_ROOT_CAN_INVAL, &key_ref_to_ptr(key_ref)->flags)) goto invalidate; goto error_put; } goto error; } invalidate: key = key_ref_to_ptr(key_ref); ret = 0; if (test_bit(KEY_FLAG_KEEP, &key->flags)) ret = -EPERM; else key_invalidate(key); error_put: key_ref_put(key_ref); error: kleave(" = %ld", ret); return ret; } /* * Clear the specified keyring, creating an empty process keyring if one of the * special keyring IDs is used. * * The keyring must grant the caller Write permission and not have * KEY_FLAG_KEEP set for this to work. If successful, 0 will be returned. */ long keyctl_keyring_clear(key_serial_t ringid) { key_ref_t keyring_ref; struct key *keyring; long ret; keyring_ref = lookup_user_key(ringid, KEY_LOOKUP_CREATE, KEY_NEED_WRITE); if (IS_ERR(keyring_ref)) { ret = PTR_ERR(keyring_ref); /* Root is permitted to invalidate certain special keyrings */ if (capable(CAP_SYS_ADMIN)) { keyring_ref = lookup_user_key(ringid, 0, 0); if (IS_ERR(keyring_ref)) goto error; if (test_bit(KEY_FLAG_ROOT_CAN_CLEAR, &key_ref_to_ptr(keyring_ref)->flags)) goto clear; goto error_put; } goto error; } clear: keyring = key_ref_to_ptr(keyring_ref); if (test_bit(KEY_FLAG_KEEP, &keyring->flags)) ret = -EPERM; else ret = keyring_clear(keyring); error_put: key_ref_put(keyring_ref); error: return ret; } /* * Create a link from a keyring to a key if there's no matching key in the * keyring, otherwise replace the link to the matching key with a link to the * new key. * * The key must grant the caller Link permission and the the keyring must grant * the caller Write permission. Furthermore, if an additional link is created, * the keyring's quota will be extended. * * If successful, 0 will be returned. */ long keyctl_keyring_link(key_serial_t id, key_serial_t ringid) { key_ref_t keyring_ref, key_ref; long ret; keyring_ref = lookup_user_key(ringid, KEY_LOOKUP_CREATE, KEY_NEED_WRITE); if (IS_ERR(keyring_ref)) { ret = PTR_ERR(keyring_ref); goto error; } key_ref = lookup_user_key(id, KEY_LOOKUP_CREATE, KEY_NEED_LINK); if (IS_ERR(key_ref)) { ret = PTR_ERR(key_ref); goto error2; } ret = key_link(key_ref_to_ptr(keyring_ref), key_ref_to_ptr(key_ref)); key_ref_put(key_ref); error2: key_ref_put(keyring_ref); error: return ret; } /* * Unlink a key from a keyring. * * The keyring must grant the caller Write permission for this to work; the key * itself need not grant the caller anything. If the last link to a key is * removed then that key will be scheduled for destruction. * * Keys or keyrings with KEY_FLAG_KEEP set should not be unlinked. * * If successful, 0 will be returned. */ long keyctl_keyring_unlink(key_serial_t id, key_serial_t ringid) { key_ref_t keyring_ref, key_ref; struct key *keyring, *key; long ret; keyring_ref = lookup_user_key(ringid, 0, KEY_NEED_WRITE); if (IS_ERR(keyring_ref)) { ret = PTR_ERR(keyring_ref); goto error; } key_ref = lookup_user_key(id, KEY_LOOKUP_FOR_UNLINK, 0); if (IS_ERR(key_ref)) { ret = PTR_ERR(key_ref); goto error2; } keyring = key_ref_to_ptr(keyring_ref); key = key_ref_to_ptr(key_ref); if (test_bit(KEY_FLAG_KEEP, &keyring->flags) && test_bit(KEY_FLAG_KEEP, &key->flags)) ret = -EPERM; else ret = key_unlink(keyring, key); key_ref_put(key_ref); error2: key_ref_put(keyring_ref); error: return ret; } /* * Return a description of a key to userspace. * * The key must grant the caller View permission for this to work. * * If there's a buffer, we place up to buflen bytes of data into it formatted * in the following way: * * type;uid;gid;perm;description<NUL> * * If successful, we return the amount of description available, irrespective * of how much we may have copied into the buffer. */ long keyctl_describe_key(key_serial_t keyid, char __user *buffer, size_t buflen) { struct key *key, *instkey; key_ref_t key_ref; char *infobuf; long ret; int desclen, infolen; key_ref = lookup_user_key(keyid, KEY_LOOKUP_PARTIAL, KEY_NEED_VIEW); if (IS_ERR(key_ref)) { /* viewing a key under construction is permitted if we have the * authorisation token handy */ if (PTR_ERR(key_ref) == -EACCES) { instkey = key_get_instantiation_authkey(keyid); if (!IS_ERR(instkey)) { key_put(instkey); key_ref = lookup_user_key(keyid, KEY_LOOKUP_PARTIAL, 0); if (!IS_ERR(key_ref)) goto okay; } } ret = PTR_ERR(key_ref); goto error; } okay: key = key_ref_to_ptr(key_ref); desclen = strlen(key->description); /* calculate how much information we're going to return */ ret = -ENOMEM; infobuf = kasprintf(GFP_KERNEL, "%s;%d;%d;%08x;", key->type->name, from_kuid_munged(current_user_ns(), key->uid), from_kgid_munged(current_user_ns(), key->gid), key->perm); if (!infobuf) goto error2; infolen = strlen(infobuf); ret = infolen + desclen + 1; /* consider returning the data */ if (buffer && buflen >= ret) { if (copy_to_user(buffer, infobuf, infolen) != 0 || copy_to_user(buffer + infolen, key->description, desclen + 1) != 0) ret = -EFAULT; } kfree(infobuf); error2: key_ref_put(key_ref); error: return ret; } /* * Search the specified keyring and any keyrings it links to for a matching * key. Only keyrings that grant the caller Search permission will be searched * (this includes the starting keyring). Only keys with Search permission can * be found. * * If successful, the found key will be linked to the destination keyring if * supplied and the key has Link permission, and the found key ID will be * returned. */ long keyctl_keyring_search(key_serial_t ringid, const char __user *_type, const char __user *_description, key_serial_t destringid) { struct key_type *ktype; key_ref_t keyring_ref, key_ref, dest_ref; char type[32], *description; long ret; /* pull the type and description into kernel space */ ret = key_get_type_from_user(type, _type, sizeof(type)); if (ret < 0) goto error; description = strndup_user(_description, KEY_MAX_DESC_SIZE); if (IS_ERR(description)) { ret = PTR_ERR(description); goto error; } /* get the keyring at which to begin the search */ keyring_ref = lookup_user_key(ringid, 0, KEY_NEED_SEARCH); if (IS_ERR(keyring_ref)) { ret = PTR_ERR(keyring_ref); goto error2; } /* get the destination keyring if specified */ dest_ref = NULL; if (destringid) { dest_ref = lookup_user_key(destringid, KEY_LOOKUP_CREATE, KEY_NEED_WRITE); if (IS_ERR(dest_ref)) { ret = PTR_ERR(dest_ref); goto error3; } } /* find the key type */ ktype = key_type_lookup(type); if (IS_ERR(ktype)) { ret = PTR_ERR(ktype); goto error4; } /* do the search */ key_ref = keyring_search(keyring_ref, ktype, description); if (IS_ERR(key_ref)) { ret = PTR_ERR(key_ref); /* treat lack or presence of a negative key the same */ if (ret == -EAGAIN) ret = -ENOKEY; goto error5; } /* link the resulting key to the destination keyring if we can */ if (dest_ref) { ret = key_permission(key_ref, KEY_NEED_LINK); if (ret < 0) goto error6; ret = key_link(key_ref_to_ptr(dest_ref), key_ref_to_ptr(key_ref)); if (ret < 0) goto error6; } ret = key_ref_to_ptr(key_ref)->serial; error6: key_ref_put(key_ref); error5: key_type_put(ktype); error4: key_ref_put(dest_ref); error3: key_ref_put(keyring_ref); error2: kfree(description); error: return ret; } /* * Read a key's payload. * * The key must either grant the caller Read permission, or it must grant the * caller Search permission when searched for from the process keyrings. * * If successful, we place up to buflen bytes of data into the buffer, if one * is provided, and return the amount of data that is available in the key, * irrespective of how much we copied into the buffer. */ long keyctl_read_key(key_serial_t keyid, char __user *buffer, size_t buflen) { struct key *key; key_ref_t key_ref; long ret; /* find the key first */ key_ref = lookup_user_key(keyid, 0, 0); if (IS_ERR(key_ref)) { ret = -ENOKEY; goto error; } key = key_ref_to_ptr(key_ref); if (test_bit(KEY_FLAG_NEGATIVE, &key->flags)) { ret = -ENOKEY; goto error2; } /* see if we can read it directly */ ret = key_permission(key_ref, KEY_NEED_READ); if (ret == 0) goto can_read_key; if (ret != -EACCES) goto error2; /* we can't; see if it's searchable from this process's keyrings * - we automatically take account of the fact that it may be * dangling off an instantiation key */ if (!is_key_possessed(key_ref)) { ret = -EACCES; goto error2; } /* the key is probably readable - now try to read it */ can_read_key: ret = -EOPNOTSUPP; if (key->type->read) { /* Read the data with the semaphore held (since we might sleep) * to protect against the key being updated or revoked. */ down_read(&key->sem); ret = key_validate(key); if (ret == 0) ret = key->type->read(key, buffer, buflen); up_read(&key->sem); } error2: key_put(key); error: return ret; } /* * Change the ownership of a key * * The key must grant the caller Setattr permission for this to work, though * the key need not be fully instantiated yet. For the UID to be changed, or * for the GID to be changed to a group the caller is not a member of, the * caller must have sysadmin capability. If either uid or gid is -1 then that * attribute is not changed. * * If the UID is to be changed, the new user must have sufficient quota to * accept the key. The quota deduction will be removed from the old user to * the new user should the attribute be changed. * * If successful, 0 will be returned. */ long keyctl_chown_key(key_serial_t id, uid_t user, gid_t group) { struct key_user *newowner, *zapowner = NULL; struct key *key; key_ref_t key_ref; long ret; kuid_t uid; kgid_t gid; uid = make_kuid(current_user_ns(), user); gid = make_kgid(current_user_ns(), group); ret = -EINVAL; if ((user != (uid_t) -1) && !uid_valid(uid)) goto error; if ((group != (gid_t) -1) && !gid_valid(gid)) goto error; ret = 0; if (user == (uid_t) -1 && group == (gid_t) -1) goto error; key_ref = lookup_user_key(id, KEY_LOOKUP_CREATE | KEY_LOOKUP_PARTIAL, KEY_NEED_SETATTR); if (IS_ERR(key_ref)) { ret = PTR_ERR(key_ref); goto error; } key = key_ref_to_ptr(key_ref); /* make the changes with the locks held to prevent chown/chown races */ ret = -EACCES; down_write(&key->sem); if (!capable(CAP_SYS_ADMIN)) { /* only the sysadmin can chown a key to some other UID */ if (user != (uid_t) -1 && !uid_eq(key->uid, uid)) goto error_put; /* only the sysadmin can set the key's GID to a group other * than one of those that the current process subscribes to */ if (group != (gid_t) -1 && !gid_eq(gid, key->gid) && !in_group_p(gid)) goto error_put; } /* change the UID */ if (user != (uid_t) -1 && !uid_eq(uid, key->uid)) { ret = -ENOMEM; newowner = key_user_lookup(uid); if (!newowner) goto error_put; /* transfer the quota burden to the new user */ if (test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) { unsigned maxkeys = uid_eq(uid, GLOBAL_ROOT_UID) ? key_quota_root_maxkeys : key_quota_maxkeys; unsigned maxbytes = uid_eq(uid, GLOBAL_ROOT_UID) ? key_quota_root_maxbytes : key_quota_maxbytes; spin_lock(&newowner->lock); if (newowner->qnkeys + 1 >= maxkeys || newowner->qnbytes + key->quotalen >= maxbytes || newowner->qnbytes + key->quotalen < newowner->qnbytes) goto quota_overrun; newowner->qnkeys++; newowner->qnbytes += key->quotalen; spin_unlock(&newowner->lock); spin_lock(&key->user->lock); key->user->qnkeys--; key->user->qnbytes -= key->quotalen; spin_unlock(&key->user->lock); } atomic_dec(&key->user->nkeys); atomic_inc(&newowner->nkeys); if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) { atomic_dec(&key->user->nikeys); atomic_inc(&newowner->nikeys); } zapowner = key->user; key->user = newowner; key->uid = uid; } /* change the GID */ if (group != (gid_t) -1) key->gid = gid; ret = 0; error_put: up_write(&key->sem); key_put(key); if (zapowner) key_user_put(zapowner); error: return ret; quota_overrun: spin_unlock(&newowner->lock); zapowner = newowner; ret = -EDQUOT; goto error_put; } /* * Change the permission mask on a key. * * The key must grant the caller Setattr permission for this to work, though * the key need not be fully instantiated yet. If the caller does not have * sysadmin capability, it may only change the permission on keys that it owns. */ long keyctl_setperm_key(key_serial_t id, key_perm_t perm) { struct key *key; key_ref_t key_ref; long ret; ret = -EINVAL; if (perm & ~(KEY_POS_ALL | KEY_USR_ALL | KEY_GRP_ALL | KEY_OTH_ALL)) goto error; key_ref = lookup_user_key(id, KEY_LOOKUP_CREATE | KEY_LOOKUP_PARTIAL, KEY_NEED_SETATTR); if (IS_ERR(key_ref)) { ret = PTR_ERR(key_ref); goto error; } key = key_ref_to_ptr(key_ref); /* make the changes with the locks held to prevent chown/chmod races */ ret = -EACCES; down_write(&key->sem); /* if we're not the sysadmin, we can only change a key that we own */ if (capable(CAP_SYS_ADMIN) || uid_eq(key->uid, current_fsuid())) { key->perm = perm; ret = 0; } up_write(&key->sem); key_put(key); error: return ret; } /* * Get the destination keyring for instantiation and check that the caller has * Write permission on it. */ static long get_instantiation_keyring(key_serial_t ringid, struct request_key_auth *rka, struct key **_dest_keyring) { key_ref_t dkref; *_dest_keyring = NULL; /* just return a NULL pointer if we weren't asked to make a link */ if (ringid == 0) return 0; /* if a specific keyring is nominated by ID, then use that */ if (ringid > 0) { dkref = lookup_user_key(ringid, KEY_LOOKUP_CREATE, KEY_NEED_WRITE); if (IS_ERR(dkref)) return PTR_ERR(dkref); *_dest_keyring = key_ref_to_ptr(dkref); return 0; } if (ringid == KEY_SPEC_REQKEY_AUTH_KEY) return -EINVAL; /* otherwise specify the destination keyring recorded in the * authorisation key (any KEY_SPEC_*_KEYRING) */ if (ringid >= KEY_SPEC_REQUESTOR_KEYRING) { *_dest_keyring = key_get(rka->dest_keyring); return 0; } return -ENOKEY; } /* * Change the request_key authorisation key on the current process. */ static int keyctl_change_reqkey_auth(struct key *key) { struct cred *new; new = prepare_creds(); if (!new) return -ENOMEM; key_put(new->request_key_auth); new->request_key_auth = key_get(key); return commit_creds(new); } /* * Instantiate a key with the specified payload and link the key into the * destination keyring if one is given. * * The caller must have the appropriate instantiation permit set for this to * work (see keyctl_assume_authority). No other permissions are required. * * If successful, 0 will be returned. */ long keyctl_instantiate_key_common(key_serial_t id, struct iov_iter *from, key_serial_t ringid) { const struct cred *cred = current_cred(); struct request_key_auth *rka; struct key *instkey, *dest_keyring; size_t plen = from ? iov_iter_count(from) : 0; void *payload; long ret; kenter("%d,,%zu,%d", id, plen, ringid); if (!plen) from = NULL; ret = -EINVAL; if (plen > 1024 * 1024 - 1) goto error; /* the appropriate instantiation authorisation key must have been * assumed before calling this */ ret = -EPERM; instkey = cred->request_key_auth; if (!instkey) goto error; rka = instkey->payload.data[0]; if (rka->target_key->serial != id) goto error; /* pull the payload in if one was supplied */ payload = NULL; if (from) { ret = -ENOMEM; payload = kvmalloc(plen, GFP_KERNEL); if (!payload) goto error; ret = -EFAULT; if (!copy_from_iter_full(payload, plen, from)) goto error2; } /* find the destination keyring amongst those belonging to the * requesting task */ ret = get_instantiation_keyring(ringid, rka, &dest_keyring); if (ret < 0) goto error2; /* instantiate the key and link it into a keyring */ ret = key_instantiate_and_link(rka->target_key, payload, plen, dest_keyring, instkey); key_put(dest_keyring); /* discard the assumed authority if it's just been disabled by * instantiation of the key */ if (ret == 0) keyctl_change_reqkey_auth(NULL); error2: if (payload) { memzero_explicit(payload, plen); kvfree(payload); } error: return ret; } /* * Instantiate a key with the specified payload and link the key into the * destination keyring if one is given. * * The caller must have the appropriate instantiation permit set for this to * work (see keyctl_assume_authority). No other permissions are required. * * If successful, 0 will be returned. */ long keyctl_instantiate_key(key_serial_t id, const void __user *_payload, size_t plen, key_serial_t ringid) { if (_payload && plen) { struct iovec iov; struct iov_iter from; int ret; ret = import_single_range(WRITE, (void __user *)_payload, plen, &iov, &from); if (unlikely(ret)) return ret; return keyctl_instantiate_key_common(id, &from, ringid); } return keyctl_instantiate_key_common(id, NULL, ringid); } /* * Instantiate a key with the specified multipart payload and link the key into * the destination keyring if one is given. * * The caller must have the appropriate instantiation permit set for this to * work (see keyctl_assume_authority). No other permissions are required. * * If successful, 0 will be returned. */ long keyctl_instantiate_key_iov(key_serial_t id, const struct iovec __user *_payload_iov, unsigned ioc, key_serial_t ringid) { struct iovec iovstack[UIO_FASTIOV], *iov = iovstack; struct iov_iter from; long ret; if (!_payload_iov) ioc = 0; ret = import_iovec(WRITE, _payload_iov, ioc, ARRAY_SIZE(iovstack), &iov, &from); if (ret < 0) return ret; ret = keyctl_instantiate_key_common(id, &from, ringid); kfree(iov); return ret; } /* * Negatively instantiate the key with the given timeout (in seconds) and link * the key into the destination keyring if one is given. * * The caller must have the appropriate instantiation permit set for this to * work (see keyctl_assume_authority). No other permissions are required. * * The key and any links to the key will be automatically garbage collected * after the timeout expires. * * Negative keys are used to rate limit repeated request_key() calls by causing * them to return -ENOKEY until the negative key expires. * * If successful, 0 will be returned. */ long keyctl_negate_key(key_serial_t id, unsigned timeout, key_serial_t ringid) { return keyctl_reject_key(id, timeout, ENOKEY, ringid); } /* * Negatively instantiate the key with the given timeout (in seconds) and error * code and link the key into the destination keyring if one is given. * * The caller must have the appropriate instantiation permit set for this to * work (see keyctl_assume_authority). No other permissions are required. * * The key and any links to the key will be automatically garbage collected * after the timeout expires. * * Negative keys are used to rate limit repeated request_key() calls by causing * them to return the specified error code until the negative key expires. * * If successful, 0 will be returned. */ long keyctl_reject_key(key_serial_t id, unsigned timeout, unsigned error, key_serial_t ringid) { const struct cred *cred = current_cred(); struct request_key_auth *rka; struct key *instkey, *dest_keyring; long ret; kenter("%d,%u,%u,%d", id, timeout, error, ringid); /* must be a valid error code and mustn't be a kernel special */ if (error <= 0 || error >= MAX_ERRNO || error == ERESTARTSYS || error == ERESTARTNOINTR || error == ERESTARTNOHAND || error == ERESTART_RESTARTBLOCK) return -EINVAL; /* the appropriate instantiation authorisation key must have been * assumed before calling this */ ret = -EPERM; instkey = cred->request_key_auth; if (!instkey) goto error; rka = instkey->payload.data[0]; if (rka->target_key->serial != id) goto error; /* find the destination keyring if present (which must also be * writable) */ ret = get_instantiation_keyring(ringid, rka, &dest_keyring); if (ret < 0) goto error; /* instantiate the key and link it into a keyring */ ret = key_reject_and_link(rka->target_key, timeout, error, dest_keyring, instkey); key_put(dest_keyring); /* discard the assumed authority if it's just been disabled by * instantiation of the key */ if (ret == 0) keyctl_change_reqkey_auth(NULL); error: return ret; } /* * Read or set the default keyring in which request_key() will cache keys and * return the old setting. * * If a thread or process keyring is specified then it will be created if it * doesn't yet exist. The old setting will be returned if successful. */ long keyctl_set_reqkey_keyring(int reqkey_defl) { struct cred *new; int ret, old_setting; old_setting = current_cred_xxx(jit_keyring); if (reqkey_defl == KEY_REQKEY_DEFL_NO_CHANGE) return old_setting; new = prepare_creds(); if (!new) return -ENOMEM; switch (reqkey_defl) { case KEY_REQKEY_DEFL_THREAD_KEYRING: ret = install_thread_keyring_to_cred(new); if (ret < 0) goto error; goto set; case KEY_REQKEY_DEFL_PROCESS_KEYRING: ret = install_process_keyring_to_cred(new); if (ret < 0) goto error; goto set; case KEY_REQKEY_DEFL_DEFAULT: case KEY_REQKEY_DEFL_SESSION_KEYRING: case KEY_REQKEY_DEFL_USER_KEYRING: case KEY_REQKEY_DEFL_USER_SESSION_KEYRING: case KEY_REQKEY_DEFL_REQUESTOR_KEYRING: goto set; case KEY_REQKEY_DEFL_NO_CHANGE: case KEY_REQKEY_DEFL_GROUP_KEYRING: default: ret = -EINVAL; goto error; } set: new->jit_keyring = reqkey_defl; commit_creds(new); return old_setting; error: abort_creds(new); return ret; } /* * Set or clear the timeout on a key. * * Either the key must grant the caller Setattr permission or else the caller * must hold an instantiation authorisation token for the key. * * The timeout is either 0 to clear the timeout, or a number of seconds from * the current time. The key and any links to the key will be automatically * garbage collected after the timeout expires. * * Keys with KEY_FLAG_KEEP set should not be timed out. * * If successful, 0 is returned. */ long keyctl_set_timeout(key_serial_t id, unsigned timeout) { struct key *key, *instkey; key_ref_t key_ref; long ret; key_ref = lookup_user_key(id, KEY_LOOKUP_CREATE | KEY_LOOKUP_PARTIAL, KEY_NEED_SETATTR); if (IS_ERR(key_ref)) { /* setting the timeout on a key under construction is permitted * if we have the authorisation token handy */ if (PTR_ERR(key_ref) == -EACCES) { instkey = key_get_instantiation_authkey(id); if (!IS_ERR(instkey)) { key_put(instkey); key_ref = lookup_user_key(id, KEY_LOOKUP_PARTIAL, 0); if (!IS_ERR(key_ref)) goto okay; } } ret = PTR_ERR(key_ref); goto error; } okay: key = key_ref_to_ptr(key_ref); ret = 0; if (test_bit(KEY_FLAG_KEEP, &key->flags)) ret = -EPERM; else key_set_timeout(key, timeout); key_put(key); error: return ret; } /* * Assume (or clear) the authority to instantiate the specified key. * * This sets the authoritative token currently in force for key instantiation. * This must be done for a key to be instantiated. It has the effect of making * available all the keys from the caller of the request_key() that created a * key to request_key() calls made by the caller of this function. * * The caller must have the instantiation key in their process keyrings with a * Search permission grant available to the caller. * * If the ID given is 0, then the setting will be cleared and 0 returned. * * If the ID given has a matching an authorisation key, then that key will be * set and its ID will be returned. The authorisation key can be read to get * the callout information passed to request_key(). */ long keyctl_assume_authority(key_serial_t id) { struct key *authkey; long ret; /* special key IDs aren't permitted */ ret = -EINVAL; if (id < 0) goto error; /* we divest ourselves of authority if given an ID of 0 */ if (id == 0) { ret = keyctl_change_reqkey_auth(NULL); goto error; } /* attempt to assume the authority temporarily granted to us whilst we * instantiate the specified key * - the authorisation key must be in the current task's keyrings * somewhere */ authkey = key_get_instantiation_authkey(id); if (IS_ERR(authkey)) { ret = PTR_ERR(authkey); goto error; } ret = keyctl_change_reqkey_auth(authkey); if (ret == 0) ret = authkey->serial; key_put(authkey); error: return ret; } /* * Get a key's the LSM security label. * * The key must grant the caller View permission for this to work. * * If there's a buffer, then up to buflen bytes of data will be placed into it. * * If successful, the amount of information available will be returned, * irrespective of how much was copied (including the terminal NUL). */ long keyctl_get_security(key_serial_t keyid, char __user *buffer, size_t buflen) { struct key *key, *instkey; key_ref_t key_ref; char *context; long ret; key_ref = lookup_user_key(keyid, KEY_LOOKUP_PARTIAL, KEY_NEED_VIEW); if (IS_ERR(key_ref)) { if (PTR_ERR(key_ref) != -EACCES) return PTR_ERR(key_ref); /* viewing a key under construction is also permitted if we * have the authorisation token handy */ instkey = key_get_instantiation_authkey(keyid); if (IS_ERR(instkey)) return PTR_ERR(instkey); key_put(instkey); key_ref = lookup_user_key(keyid, KEY_LOOKUP_PARTIAL, 0); if (IS_ERR(key_ref)) return PTR_ERR(key_ref); } key = key_ref_to_ptr(key_ref); ret = security_key_getsecurity(key, &context); if (ret == 0) { /* if no information was returned, give userspace an empty * string */ ret = 1; if (buffer && buflen > 0 && copy_to_user(buffer, "", 1) != 0) ret = -EFAULT; } else if (ret > 0) { /* return as much data as there's room for */ if (buffer && buflen > 0) { if (buflen > ret) buflen = ret; if (copy_to_user(buffer, context, buflen) != 0) ret = -EFAULT; } kfree(context); } key_ref_put(key_ref); return ret; } /* * Attempt to install the calling process's session keyring on the process's * parent process. * * The keyring must exist and must grant the caller LINK permission, and the * parent process must be single-threaded and must have the same effective * ownership as this process and mustn't be SUID/SGID. * * The keyring will be emplaced on the parent when it next resumes userspace. * * If successful, 0 will be returned. */ long keyctl_session_to_parent(void) { struct task_struct *me, *parent; const struct cred *mycred, *pcred; struct callback_head *newwork, *oldwork; key_ref_t keyring_r; struct cred *cred; int ret; keyring_r = lookup_user_key(KEY_SPEC_SESSION_KEYRING, 0, KEY_NEED_LINK); if (IS_ERR(keyring_r)) return PTR_ERR(keyring_r); ret = -ENOMEM; /* our parent is going to need a new cred struct, a new tgcred struct * and new security data, so we allocate them here to prevent ENOMEM in * our parent */ cred = cred_alloc_blank(); if (!cred) goto error_keyring; newwork = &cred->rcu; cred->session_keyring = key_ref_to_ptr(keyring_r); keyring_r = NULL; init_task_work(newwork, key_change_session_keyring); me = current; rcu_read_lock(); write_lock_irq(&tasklist_lock); ret = -EPERM; oldwork = NULL; parent = me->real_parent; /* the parent mustn't be init and mustn't be a kernel thread */ if (parent->pid <= 1 || !parent->mm) goto unlock; /* the parent must be single threaded */ if (!thread_group_empty(parent)) goto unlock; /* the parent and the child must have different session keyrings or * there's no point */ mycred = current_cred(); pcred = __task_cred(parent); if (mycred == pcred || mycred->session_keyring == pcred->session_keyring) { ret = 0; goto unlock; } /* the parent must have the same effective ownership and mustn't be * SUID/SGID */ if (!uid_eq(pcred->uid, mycred->euid) || !uid_eq(pcred->euid, mycred->euid) || !uid_eq(pcred->suid, mycred->euid) || !gid_eq(pcred->gid, mycred->egid) || !gid_eq(pcred->egid, mycred->egid) || !gid_eq(pcred->sgid, mycred->egid)) goto unlock; /* the keyrings must have the same UID */ if ((pcred->session_keyring && !uid_eq(pcred->session_keyring->uid, mycred->euid)) || !uid_eq(mycred->session_keyring->uid, mycred->euid)) goto unlock; /* cancel an already pending keyring replacement */ oldwork = task_work_cancel(parent, key_change_session_keyring); /* the replacement session keyring is applied just prior to userspace * restarting */ ret = task_work_add(parent, newwork, true); if (!ret) newwork = NULL; unlock: write_unlock_irq(&tasklist_lock); rcu_read_unlock(); if (oldwork) put_cred(container_of(oldwork, struct cred, rcu)); if (newwork) put_cred(cred); return ret; error_keyring: key_ref_put(keyring_r); return ret; } /* * Apply a restriction to a given keyring. * * The caller must have Setattr permission to change keyring restrictions. * * The requested type name may be a NULL pointer to reject all attempts * to link to the keyring. If _type is non-NULL, _restriction can be * NULL or a pointer to a string describing the restriction. If _type is * NULL, _restriction must also be NULL. * * Returns 0 if successful. */ long keyctl_restrict_keyring(key_serial_t id, const char __user *_type, const char __user *_restriction) { key_ref_t key_ref; bool link_reject = !_type; char type[32]; char *restriction = NULL; long ret; key_ref = lookup_user_key(id, 0, KEY_NEED_SETATTR); if (IS_ERR(key_ref)) return PTR_ERR(key_ref); if (_type) { ret = key_get_type_from_user(type, _type, sizeof(type)); if (ret < 0) goto error; } if (_restriction) { if (!_type) { ret = -EINVAL; goto error; } restriction = strndup_user(_restriction, PAGE_SIZE); if (IS_ERR(restriction)) { ret = PTR_ERR(restriction); goto error; } } ret = keyring_restrict(key_ref, link_reject ? NULL : type, restriction); kfree(restriction); error: key_ref_put(key_ref); return ret; } /* * The key control system call */ SYSCALL_DEFINE5(keyctl, int, option, unsigned long, arg2, unsigned long, arg3, unsigned long, arg4, unsigned long, arg5) { switch (option) { case KEYCTL_GET_KEYRING_ID: return keyctl_get_keyring_ID((key_serial_t) arg2, (int) arg3); case KEYCTL_JOIN_SESSION_KEYRING: return keyctl_join_session_keyring((const char __user *) arg2); case KEYCTL_UPDATE: return keyctl_update_key((key_serial_t) arg2, (const void __user *) arg3, (size_t) arg4); case KEYCTL_REVOKE: return keyctl_revoke_key((key_serial_t) arg2); case KEYCTL_DESCRIBE: return keyctl_describe_key((key_serial_t) arg2, (char __user *) arg3, (unsigned) arg4); case KEYCTL_CLEAR: return keyctl_keyring_clear((key_serial_t) arg2); case KEYCTL_LINK: return keyctl_keyring_link((key_serial_t) arg2, (key_serial_t) arg3); case KEYCTL_UNLINK: return keyctl_keyring_unlink((key_serial_t) arg2, (key_serial_t) arg3); case KEYCTL_SEARCH: return keyctl_keyring_search((key_serial_t) arg2, (const char __user *) arg3, (const char __user *) arg4, (key_serial_t) arg5); case KEYCTL_READ: return keyctl_read_key((key_serial_t) arg2, (char __user *) arg3, (size_t) arg4); case KEYCTL_CHOWN: return keyctl_chown_key((key_serial_t) arg2, (uid_t) arg3, (gid_t) arg4); case KEYCTL_SETPERM: return keyctl_setperm_key((key_serial_t) arg2, (key_perm_t) arg3); case KEYCTL_INSTANTIATE: return keyctl_instantiate_key((key_serial_t) arg2, (const void __user *) arg3, (size_t) arg4, (key_serial_t) arg5); case KEYCTL_NEGATE: return keyctl_negate_key((key_serial_t) arg2, (unsigned) arg3, (key_serial_t) arg4); case KEYCTL_SET_REQKEY_KEYRING: return keyctl_set_reqkey_keyring(arg2); case KEYCTL_SET_TIMEOUT: return keyctl_set_timeout((key_serial_t) arg2, (unsigned) arg3); case KEYCTL_ASSUME_AUTHORITY: return keyctl_assume_authority((key_serial_t) arg2); case KEYCTL_GET_SECURITY: return keyctl_get_security((key_serial_t) arg2, (char __user *) arg3, (size_t) arg4); case KEYCTL_SESSION_TO_PARENT: return keyctl_session_to_parent(); case KEYCTL_REJECT: return keyctl_reject_key((key_serial_t) arg2, (unsigned) arg3, (unsigned) arg4, (key_serial_t) arg5); case KEYCTL_INSTANTIATE_IOV: return keyctl_instantiate_key_iov( (key_serial_t) arg2, (const struct iovec __user *) arg3, (unsigned) arg4, (key_serial_t) arg5); case KEYCTL_INVALIDATE: return keyctl_invalidate_key((key_serial_t) arg2); case KEYCTL_GET_PERSISTENT: return keyctl_get_persistent((uid_t)arg2, (key_serial_t)arg3); case KEYCTL_DH_COMPUTE: return keyctl_dh_compute((struct keyctl_dh_params __user *) arg2, (char __user *) arg3, (size_t) arg4, (struct keyctl_kdf_params __user *) arg5); case KEYCTL_RESTRICT_KEYRING: return keyctl_restrict_keyring((key_serial_t) arg2, (const char __user *) arg3, (const char __user *) arg4); default: return -EOPNOTSUPP; } }
/* Userspace key control operations * * Copyright (C) 2004-5 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/module.h> #include <linux/init.h> #include <linux/sched.h> #include <linux/sched/task.h> #include <linux/slab.h> #include <linux/syscalls.h> #include <linux/key.h> #include <linux/keyctl.h> #include <linux/fs.h> #include <linux/capability.h> #include <linux/cred.h> #include <linux/string.h> #include <linux/err.h> #include <linux/vmalloc.h> #include <linux/security.h> #include <linux/uio.h> #include <linux/uaccess.h> #include "internal.h" #define KEY_MAX_DESC_SIZE 4096 static int key_get_type_from_user(char *type, const char __user *_type, unsigned len) { int ret; ret = strncpy_from_user(type, _type, len); if (ret < 0) return ret; if (ret == 0 || ret >= len) return -EINVAL; if (type[0] == '.') return -EPERM; type[len - 1] = '\0'; return 0; } /* * Extract the description of a new key from userspace and either add it as a * new key to the specified keyring or update a matching key in that keyring. * * If the description is NULL or an empty string, the key type is asked to * generate one from the payload. * * The keyring must be writable so that we can attach the key to it. * * If successful, the new key's serial number is returned, otherwise an error * code is returned. */ SYSCALL_DEFINE5(add_key, const char __user *, _type, const char __user *, _description, const void __user *, _payload, size_t, plen, key_serial_t, ringid) { key_ref_t keyring_ref, key_ref; char type[32], *description; void *payload; long ret; ret = -EINVAL; if (plen > 1024 * 1024 - 1) goto error; /* draw all the data into kernel space */ ret = key_get_type_from_user(type, _type, sizeof(type)); if (ret < 0) goto error; description = NULL; if (_description) { description = strndup_user(_description, KEY_MAX_DESC_SIZE); if (IS_ERR(description)) { ret = PTR_ERR(description); goto error; } if (!*description) { kfree(description); description = NULL; } else if ((description[0] == '.') && (strncmp(type, "keyring", 7) == 0)) { ret = -EPERM; goto error2; } } /* pull the payload in if one was supplied */ payload = NULL; if (plen) { ret = -ENOMEM; payload = kvmalloc(plen, GFP_KERNEL); if (!payload) goto error2; ret = -EFAULT; if (copy_from_user(payload, _payload, plen) != 0) goto error3; } /* find the target keyring (which must be writable) */ keyring_ref = lookup_user_key(ringid, KEY_LOOKUP_CREATE, KEY_NEED_WRITE); if (IS_ERR(keyring_ref)) { ret = PTR_ERR(keyring_ref); goto error3; } /* create or update the requested key and add it to the target * keyring */ key_ref = key_create_or_update(keyring_ref, type, description, payload, plen, KEY_PERM_UNDEF, KEY_ALLOC_IN_QUOTA); if (!IS_ERR(key_ref)) { ret = key_ref_to_ptr(key_ref)->serial; key_ref_put(key_ref); } else { ret = PTR_ERR(key_ref); } key_ref_put(keyring_ref); error3: if (payload) { memzero_explicit(payload, plen); kvfree(payload); } error2: kfree(description); error: return ret; } /* * Search the process keyrings and keyring trees linked from those for a * matching key. Keyrings must have appropriate Search permission to be * searched. * * If a key is found, it will be attached to the destination keyring if there's * one specified and the serial number of the key will be returned. * * If no key is found, /sbin/request-key will be invoked if _callout_info is * non-NULL in an attempt to create a key. The _callout_info string will be * passed to /sbin/request-key to aid with completing the request. If the * _callout_info string is "" then it will be changed to "-". */ SYSCALL_DEFINE4(request_key, const char __user *, _type, const char __user *, _description, const char __user *, _callout_info, key_serial_t, destringid) { struct key_type *ktype; struct key *key; key_ref_t dest_ref; size_t callout_len; char type[32], *description, *callout_info; long ret; /* pull the type into kernel space */ ret = key_get_type_from_user(type, _type, sizeof(type)); if (ret < 0) goto error; /* pull the description into kernel space */ description = strndup_user(_description, KEY_MAX_DESC_SIZE); if (IS_ERR(description)) { ret = PTR_ERR(description); goto error; } /* pull the callout info into kernel space */ callout_info = NULL; callout_len = 0; if (_callout_info) { callout_info = strndup_user(_callout_info, PAGE_SIZE); if (IS_ERR(callout_info)) { ret = PTR_ERR(callout_info); goto error2; } callout_len = strlen(callout_info); } /* get the destination keyring if specified */ dest_ref = NULL; if (destringid) { dest_ref = lookup_user_key(destringid, KEY_LOOKUP_CREATE, KEY_NEED_WRITE); if (IS_ERR(dest_ref)) { ret = PTR_ERR(dest_ref); goto error3; } } /* find the key type */ ktype = key_type_lookup(type); if (IS_ERR(ktype)) { ret = PTR_ERR(ktype); goto error4; } /* do the search */ key = request_key_and_link(ktype, description, callout_info, callout_len, NULL, key_ref_to_ptr(dest_ref), KEY_ALLOC_IN_QUOTA); if (IS_ERR(key)) { ret = PTR_ERR(key); goto error5; } /* wait for the key to finish being constructed */ ret = wait_for_key_construction(key, 1); if (ret < 0) goto error6; ret = key->serial; error6: key_put(key); error5: key_type_put(ktype); error4: key_ref_put(dest_ref); error3: kfree(callout_info); error2: kfree(description); error: return ret; } /* * Get the ID of the specified process keyring. * * The requested keyring must have search permission to be found. * * If successful, the ID of the requested keyring will be returned. */ long keyctl_get_keyring_ID(key_serial_t id, int create) { key_ref_t key_ref; unsigned long lflags; long ret; lflags = create ? KEY_LOOKUP_CREATE : 0; key_ref = lookup_user_key(id, lflags, KEY_NEED_SEARCH); if (IS_ERR(key_ref)) { ret = PTR_ERR(key_ref); goto error; } ret = key_ref_to_ptr(key_ref)->serial; key_ref_put(key_ref); error: return ret; } /* * Join a (named) session keyring. * * Create and join an anonymous session keyring or join a named session * keyring, creating it if necessary. A named session keyring must have Search * permission for it to be joined. Session keyrings without this permit will * be skipped over. It is not permitted for userspace to create or join * keyrings whose name begin with a dot. * * If successful, the ID of the joined session keyring will be returned. */ long keyctl_join_session_keyring(const char __user *_name) { char *name; long ret; /* fetch the name from userspace */ name = NULL; if (_name) { name = strndup_user(_name, KEY_MAX_DESC_SIZE); if (IS_ERR(name)) { ret = PTR_ERR(name); goto error; } ret = -EPERM; if (name[0] == '.') goto error_name; } /* join the session */ ret = join_session_keyring(name); error_name: kfree(name); error: return ret; } /* * Update a key's data payload from the given data. * * The key must grant the caller Write permission and the key type must support * updating for this to work. A negative key can be positively instantiated * with this call. * * If successful, 0 will be returned. If the key type does not support * updating, then -EOPNOTSUPP will be returned. */ long keyctl_update_key(key_serial_t id, const void __user *_payload, size_t plen) { key_ref_t key_ref; void *payload; long ret; ret = -EINVAL; if (plen > PAGE_SIZE) goto error; /* pull the payload in if one was supplied */ payload = NULL; if (plen) { ret = -ENOMEM; payload = kmalloc(plen, GFP_KERNEL); if (!payload) goto error; ret = -EFAULT; if (copy_from_user(payload, _payload, plen) != 0) goto error2; } /* find the target key (which must be writable) */ key_ref = lookup_user_key(id, 0, KEY_NEED_WRITE); if (IS_ERR(key_ref)) { ret = PTR_ERR(key_ref); goto error2; } /* update the key */ ret = key_update(key_ref, payload, plen); key_ref_put(key_ref); error2: kzfree(payload); error: return ret; } /* * Revoke a key. * * The key must be grant the caller Write or Setattr permission for this to * work. The key type should give up its quota claim when revoked. The key * and any links to the key will be automatically garbage collected after a * certain amount of time (/proc/sys/kernel/keys/gc_delay). * * Keys with KEY_FLAG_KEEP set should not be revoked. * * If successful, 0 is returned. */ long keyctl_revoke_key(key_serial_t id) { key_ref_t key_ref; struct key *key; long ret; key_ref = lookup_user_key(id, 0, KEY_NEED_WRITE); if (IS_ERR(key_ref)) { ret = PTR_ERR(key_ref); if (ret != -EACCES) goto error; key_ref = lookup_user_key(id, 0, KEY_NEED_SETATTR); if (IS_ERR(key_ref)) { ret = PTR_ERR(key_ref); goto error; } } key = key_ref_to_ptr(key_ref); ret = 0; if (test_bit(KEY_FLAG_KEEP, &key->flags)) ret = -EPERM; else key_revoke(key); key_ref_put(key_ref); error: return ret; } /* * Invalidate a key. * * The key must be grant the caller Invalidate permission for this to work. * The key and any links to the key will be automatically garbage collected * immediately. * * Keys with KEY_FLAG_KEEP set should not be invalidated. * * If successful, 0 is returned. */ long keyctl_invalidate_key(key_serial_t id) { key_ref_t key_ref; struct key *key; long ret; kenter("%d", id); key_ref = lookup_user_key(id, 0, KEY_NEED_SEARCH); if (IS_ERR(key_ref)) { ret = PTR_ERR(key_ref); /* Root is permitted to invalidate certain special keys */ if (capable(CAP_SYS_ADMIN)) { key_ref = lookup_user_key(id, 0, 0); if (IS_ERR(key_ref)) goto error; if (test_bit(KEY_FLAG_ROOT_CAN_INVAL, &key_ref_to_ptr(key_ref)->flags)) goto invalidate; goto error_put; } goto error; } invalidate: key = key_ref_to_ptr(key_ref); ret = 0; if (test_bit(KEY_FLAG_KEEP, &key->flags)) ret = -EPERM; else key_invalidate(key); error_put: key_ref_put(key_ref); error: kleave(" = %ld", ret); return ret; } /* * Clear the specified keyring, creating an empty process keyring if one of the * special keyring IDs is used. * * The keyring must grant the caller Write permission and not have * KEY_FLAG_KEEP set for this to work. If successful, 0 will be returned. */ long keyctl_keyring_clear(key_serial_t ringid) { key_ref_t keyring_ref; struct key *keyring; long ret; keyring_ref = lookup_user_key(ringid, KEY_LOOKUP_CREATE, KEY_NEED_WRITE); if (IS_ERR(keyring_ref)) { ret = PTR_ERR(keyring_ref); /* Root is permitted to invalidate certain special keyrings */ if (capable(CAP_SYS_ADMIN)) { keyring_ref = lookup_user_key(ringid, 0, 0); if (IS_ERR(keyring_ref)) goto error; if (test_bit(KEY_FLAG_ROOT_CAN_CLEAR, &key_ref_to_ptr(keyring_ref)->flags)) goto clear; goto error_put; } goto error; } clear: keyring = key_ref_to_ptr(keyring_ref); if (test_bit(KEY_FLAG_KEEP, &keyring->flags)) ret = -EPERM; else ret = keyring_clear(keyring); error_put: key_ref_put(keyring_ref); error: return ret; } /* * Create a link from a keyring to a key if there's no matching key in the * keyring, otherwise replace the link to the matching key with a link to the * new key. * * The key must grant the caller Link permission and the the keyring must grant * the caller Write permission. Furthermore, if an additional link is created, * the keyring's quota will be extended. * * If successful, 0 will be returned. */ long keyctl_keyring_link(key_serial_t id, key_serial_t ringid) { key_ref_t keyring_ref, key_ref; long ret; keyring_ref = lookup_user_key(ringid, KEY_LOOKUP_CREATE, KEY_NEED_WRITE); if (IS_ERR(keyring_ref)) { ret = PTR_ERR(keyring_ref); goto error; } key_ref = lookup_user_key(id, KEY_LOOKUP_CREATE, KEY_NEED_LINK); if (IS_ERR(key_ref)) { ret = PTR_ERR(key_ref); goto error2; } ret = key_link(key_ref_to_ptr(keyring_ref), key_ref_to_ptr(key_ref)); key_ref_put(key_ref); error2: key_ref_put(keyring_ref); error: return ret; } /* * Unlink a key from a keyring. * * The keyring must grant the caller Write permission for this to work; the key * itself need not grant the caller anything. If the last link to a key is * removed then that key will be scheduled for destruction. * * Keys or keyrings with KEY_FLAG_KEEP set should not be unlinked. * * If successful, 0 will be returned. */ long keyctl_keyring_unlink(key_serial_t id, key_serial_t ringid) { key_ref_t keyring_ref, key_ref; struct key *keyring, *key; long ret; keyring_ref = lookup_user_key(ringid, 0, KEY_NEED_WRITE); if (IS_ERR(keyring_ref)) { ret = PTR_ERR(keyring_ref); goto error; } key_ref = lookup_user_key(id, KEY_LOOKUP_FOR_UNLINK, 0); if (IS_ERR(key_ref)) { ret = PTR_ERR(key_ref); goto error2; } keyring = key_ref_to_ptr(keyring_ref); key = key_ref_to_ptr(key_ref); if (test_bit(KEY_FLAG_KEEP, &keyring->flags) && test_bit(KEY_FLAG_KEEP, &key->flags)) ret = -EPERM; else ret = key_unlink(keyring, key); key_ref_put(key_ref); error2: key_ref_put(keyring_ref); error: return ret; } /* * Return a description of a key to userspace. * * The key must grant the caller View permission for this to work. * * If there's a buffer, we place up to buflen bytes of data into it formatted * in the following way: * * type;uid;gid;perm;description<NUL> * * If successful, we return the amount of description available, irrespective * of how much we may have copied into the buffer. */ long keyctl_describe_key(key_serial_t keyid, char __user *buffer, size_t buflen) { struct key *key, *instkey; key_ref_t key_ref; char *infobuf; long ret; int desclen, infolen; key_ref = lookup_user_key(keyid, KEY_LOOKUP_PARTIAL, KEY_NEED_VIEW); if (IS_ERR(key_ref)) { /* viewing a key under construction is permitted if we have the * authorisation token handy */ if (PTR_ERR(key_ref) == -EACCES) { instkey = key_get_instantiation_authkey(keyid); if (!IS_ERR(instkey)) { key_put(instkey); key_ref = lookup_user_key(keyid, KEY_LOOKUP_PARTIAL, 0); if (!IS_ERR(key_ref)) goto okay; } } ret = PTR_ERR(key_ref); goto error; } okay: key = key_ref_to_ptr(key_ref); desclen = strlen(key->description); /* calculate how much information we're going to return */ ret = -ENOMEM; infobuf = kasprintf(GFP_KERNEL, "%s;%d;%d;%08x;", key->type->name, from_kuid_munged(current_user_ns(), key->uid), from_kgid_munged(current_user_ns(), key->gid), key->perm); if (!infobuf) goto error2; infolen = strlen(infobuf); ret = infolen + desclen + 1; /* consider returning the data */ if (buffer && buflen >= ret) { if (copy_to_user(buffer, infobuf, infolen) != 0 || copy_to_user(buffer + infolen, key->description, desclen + 1) != 0) ret = -EFAULT; } kfree(infobuf); error2: key_ref_put(key_ref); error: return ret; } /* * Search the specified keyring and any keyrings it links to for a matching * key. Only keyrings that grant the caller Search permission will be searched * (this includes the starting keyring). Only keys with Search permission can * be found. * * If successful, the found key will be linked to the destination keyring if * supplied and the key has Link permission, and the found key ID will be * returned. */ long keyctl_keyring_search(key_serial_t ringid, const char __user *_type, const char __user *_description, key_serial_t destringid) { struct key_type *ktype; key_ref_t keyring_ref, key_ref, dest_ref; char type[32], *description; long ret; /* pull the type and description into kernel space */ ret = key_get_type_from_user(type, _type, sizeof(type)); if (ret < 0) goto error; description = strndup_user(_description, KEY_MAX_DESC_SIZE); if (IS_ERR(description)) { ret = PTR_ERR(description); goto error; } /* get the keyring at which to begin the search */ keyring_ref = lookup_user_key(ringid, 0, KEY_NEED_SEARCH); if (IS_ERR(keyring_ref)) { ret = PTR_ERR(keyring_ref); goto error2; } /* get the destination keyring if specified */ dest_ref = NULL; if (destringid) { dest_ref = lookup_user_key(destringid, KEY_LOOKUP_CREATE, KEY_NEED_WRITE); if (IS_ERR(dest_ref)) { ret = PTR_ERR(dest_ref); goto error3; } } /* find the key type */ ktype = key_type_lookup(type); if (IS_ERR(ktype)) { ret = PTR_ERR(ktype); goto error4; } /* do the search */ key_ref = keyring_search(keyring_ref, ktype, description); if (IS_ERR(key_ref)) { ret = PTR_ERR(key_ref); /* treat lack or presence of a negative key the same */ if (ret == -EAGAIN) ret = -ENOKEY; goto error5; } /* link the resulting key to the destination keyring if we can */ if (dest_ref) { ret = key_permission(key_ref, KEY_NEED_LINK); if (ret < 0) goto error6; ret = key_link(key_ref_to_ptr(dest_ref), key_ref_to_ptr(key_ref)); if (ret < 0) goto error6; } ret = key_ref_to_ptr(key_ref)->serial; error6: key_ref_put(key_ref); error5: key_type_put(ktype); error4: key_ref_put(dest_ref); error3: key_ref_put(keyring_ref); error2: kfree(description); error: return ret; } /* * Read a key's payload. * * The key must either grant the caller Read permission, or it must grant the * caller Search permission when searched for from the process keyrings. * * If successful, we place up to buflen bytes of data into the buffer, if one * is provided, and return the amount of data that is available in the key, * irrespective of how much we copied into the buffer. */ long keyctl_read_key(key_serial_t keyid, char __user *buffer, size_t buflen) { struct key *key; key_ref_t key_ref; long ret; /* find the key first */ key_ref = lookup_user_key(keyid, 0, 0); if (IS_ERR(key_ref)) { ret = -ENOKEY; goto error; } key = key_ref_to_ptr(key_ref); ret = key_read_state(key); if (ret < 0) goto error2; /* Negatively instantiated */ /* see if we can read it directly */ ret = key_permission(key_ref, KEY_NEED_READ); if (ret == 0) goto can_read_key; if (ret != -EACCES) goto error2; /* we can't; see if it's searchable from this process's keyrings * - we automatically take account of the fact that it may be * dangling off an instantiation key */ if (!is_key_possessed(key_ref)) { ret = -EACCES; goto error2; } /* the key is probably readable - now try to read it */ can_read_key: ret = -EOPNOTSUPP; if (key->type->read) { /* Read the data with the semaphore held (since we might sleep) * to protect against the key being updated or revoked. */ down_read(&key->sem); ret = key_validate(key); if (ret == 0) ret = key->type->read(key, buffer, buflen); up_read(&key->sem); } error2: key_put(key); error: return ret; } /* * Change the ownership of a key * * The key must grant the caller Setattr permission for this to work, though * the key need not be fully instantiated yet. For the UID to be changed, or * for the GID to be changed to a group the caller is not a member of, the * caller must have sysadmin capability. If either uid or gid is -1 then that * attribute is not changed. * * If the UID is to be changed, the new user must have sufficient quota to * accept the key. The quota deduction will be removed from the old user to * the new user should the attribute be changed. * * If successful, 0 will be returned. */ long keyctl_chown_key(key_serial_t id, uid_t user, gid_t group) { struct key_user *newowner, *zapowner = NULL; struct key *key; key_ref_t key_ref; long ret; kuid_t uid; kgid_t gid; uid = make_kuid(current_user_ns(), user); gid = make_kgid(current_user_ns(), group); ret = -EINVAL; if ((user != (uid_t) -1) && !uid_valid(uid)) goto error; if ((group != (gid_t) -1) && !gid_valid(gid)) goto error; ret = 0; if (user == (uid_t) -1 && group == (gid_t) -1) goto error; key_ref = lookup_user_key(id, KEY_LOOKUP_CREATE | KEY_LOOKUP_PARTIAL, KEY_NEED_SETATTR); if (IS_ERR(key_ref)) { ret = PTR_ERR(key_ref); goto error; } key = key_ref_to_ptr(key_ref); /* make the changes with the locks held to prevent chown/chown races */ ret = -EACCES; down_write(&key->sem); if (!capable(CAP_SYS_ADMIN)) { /* only the sysadmin can chown a key to some other UID */ if (user != (uid_t) -1 && !uid_eq(key->uid, uid)) goto error_put; /* only the sysadmin can set the key's GID to a group other * than one of those that the current process subscribes to */ if (group != (gid_t) -1 && !gid_eq(gid, key->gid) && !in_group_p(gid)) goto error_put; } /* change the UID */ if (user != (uid_t) -1 && !uid_eq(uid, key->uid)) { ret = -ENOMEM; newowner = key_user_lookup(uid); if (!newowner) goto error_put; /* transfer the quota burden to the new user */ if (test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) { unsigned maxkeys = uid_eq(uid, GLOBAL_ROOT_UID) ? key_quota_root_maxkeys : key_quota_maxkeys; unsigned maxbytes = uid_eq(uid, GLOBAL_ROOT_UID) ? key_quota_root_maxbytes : key_quota_maxbytes; spin_lock(&newowner->lock); if (newowner->qnkeys + 1 >= maxkeys || newowner->qnbytes + key->quotalen >= maxbytes || newowner->qnbytes + key->quotalen < newowner->qnbytes) goto quota_overrun; newowner->qnkeys++; newowner->qnbytes += key->quotalen; spin_unlock(&newowner->lock); spin_lock(&key->user->lock); key->user->qnkeys--; key->user->qnbytes -= key->quotalen; spin_unlock(&key->user->lock); } atomic_dec(&key->user->nkeys); atomic_inc(&newowner->nkeys); if (key->state != KEY_IS_UNINSTANTIATED) { atomic_dec(&key->user->nikeys); atomic_inc(&newowner->nikeys); } zapowner = key->user; key->user = newowner; key->uid = uid; } /* change the GID */ if (group != (gid_t) -1) key->gid = gid; ret = 0; error_put: up_write(&key->sem); key_put(key); if (zapowner) key_user_put(zapowner); error: return ret; quota_overrun: spin_unlock(&newowner->lock); zapowner = newowner; ret = -EDQUOT; goto error_put; } /* * Change the permission mask on a key. * * The key must grant the caller Setattr permission for this to work, though * the key need not be fully instantiated yet. If the caller does not have * sysadmin capability, it may only change the permission on keys that it owns. */ long keyctl_setperm_key(key_serial_t id, key_perm_t perm) { struct key *key; key_ref_t key_ref; long ret; ret = -EINVAL; if (perm & ~(KEY_POS_ALL | KEY_USR_ALL | KEY_GRP_ALL | KEY_OTH_ALL)) goto error; key_ref = lookup_user_key(id, KEY_LOOKUP_CREATE | KEY_LOOKUP_PARTIAL, KEY_NEED_SETATTR); if (IS_ERR(key_ref)) { ret = PTR_ERR(key_ref); goto error; } key = key_ref_to_ptr(key_ref); /* make the changes with the locks held to prevent chown/chmod races */ ret = -EACCES; down_write(&key->sem); /* if we're not the sysadmin, we can only change a key that we own */ if (capable(CAP_SYS_ADMIN) || uid_eq(key->uid, current_fsuid())) { key->perm = perm; ret = 0; } up_write(&key->sem); key_put(key); error: return ret; } /* * Get the destination keyring for instantiation and check that the caller has * Write permission on it. */ static long get_instantiation_keyring(key_serial_t ringid, struct request_key_auth *rka, struct key **_dest_keyring) { key_ref_t dkref; *_dest_keyring = NULL; /* just return a NULL pointer if we weren't asked to make a link */ if (ringid == 0) return 0; /* if a specific keyring is nominated by ID, then use that */ if (ringid > 0) { dkref = lookup_user_key(ringid, KEY_LOOKUP_CREATE, KEY_NEED_WRITE); if (IS_ERR(dkref)) return PTR_ERR(dkref); *_dest_keyring = key_ref_to_ptr(dkref); return 0; } if (ringid == KEY_SPEC_REQKEY_AUTH_KEY) return -EINVAL; /* otherwise specify the destination keyring recorded in the * authorisation key (any KEY_SPEC_*_KEYRING) */ if (ringid >= KEY_SPEC_REQUESTOR_KEYRING) { *_dest_keyring = key_get(rka->dest_keyring); return 0; } return -ENOKEY; } /* * Change the request_key authorisation key on the current process. */ static int keyctl_change_reqkey_auth(struct key *key) { struct cred *new; new = prepare_creds(); if (!new) return -ENOMEM; key_put(new->request_key_auth); new->request_key_auth = key_get(key); return commit_creds(new); } /* * Instantiate a key with the specified payload and link the key into the * destination keyring if one is given. * * The caller must have the appropriate instantiation permit set for this to * work (see keyctl_assume_authority). No other permissions are required. * * If successful, 0 will be returned. */ long keyctl_instantiate_key_common(key_serial_t id, struct iov_iter *from, key_serial_t ringid) { const struct cred *cred = current_cred(); struct request_key_auth *rka; struct key *instkey, *dest_keyring; size_t plen = from ? iov_iter_count(from) : 0; void *payload; long ret; kenter("%d,,%zu,%d", id, plen, ringid); if (!plen) from = NULL; ret = -EINVAL; if (plen > 1024 * 1024 - 1) goto error; /* the appropriate instantiation authorisation key must have been * assumed before calling this */ ret = -EPERM; instkey = cred->request_key_auth; if (!instkey) goto error; rka = instkey->payload.data[0]; if (rka->target_key->serial != id) goto error; /* pull the payload in if one was supplied */ payload = NULL; if (from) { ret = -ENOMEM; payload = kvmalloc(plen, GFP_KERNEL); if (!payload) goto error; ret = -EFAULT; if (!copy_from_iter_full(payload, plen, from)) goto error2; } /* find the destination keyring amongst those belonging to the * requesting task */ ret = get_instantiation_keyring(ringid, rka, &dest_keyring); if (ret < 0) goto error2; /* instantiate the key and link it into a keyring */ ret = key_instantiate_and_link(rka->target_key, payload, plen, dest_keyring, instkey); key_put(dest_keyring); /* discard the assumed authority if it's just been disabled by * instantiation of the key */ if (ret == 0) keyctl_change_reqkey_auth(NULL); error2: if (payload) { memzero_explicit(payload, plen); kvfree(payload); } error: return ret; } /* * Instantiate a key with the specified payload and link the key into the * destination keyring if one is given. * * The caller must have the appropriate instantiation permit set for this to * work (see keyctl_assume_authority). No other permissions are required. * * If successful, 0 will be returned. */ long keyctl_instantiate_key(key_serial_t id, const void __user *_payload, size_t plen, key_serial_t ringid) { if (_payload && plen) { struct iovec iov; struct iov_iter from; int ret; ret = import_single_range(WRITE, (void __user *)_payload, plen, &iov, &from); if (unlikely(ret)) return ret; return keyctl_instantiate_key_common(id, &from, ringid); } return keyctl_instantiate_key_common(id, NULL, ringid); } /* * Instantiate a key with the specified multipart payload and link the key into * the destination keyring if one is given. * * The caller must have the appropriate instantiation permit set for this to * work (see keyctl_assume_authority). No other permissions are required. * * If successful, 0 will be returned. */ long keyctl_instantiate_key_iov(key_serial_t id, const struct iovec __user *_payload_iov, unsigned ioc, key_serial_t ringid) { struct iovec iovstack[UIO_FASTIOV], *iov = iovstack; struct iov_iter from; long ret; if (!_payload_iov) ioc = 0; ret = import_iovec(WRITE, _payload_iov, ioc, ARRAY_SIZE(iovstack), &iov, &from); if (ret < 0) return ret; ret = keyctl_instantiate_key_common(id, &from, ringid); kfree(iov); return ret; } /* * Negatively instantiate the key with the given timeout (in seconds) and link * the key into the destination keyring if one is given. * * The caller must have the appropriate instantiation permit set for this to * work (see keyctl_assume_authority). No other permissions are required. * * The key and any links to the key will be automatically garbage collected * after the timeout expires. * * Negative keys are used to rate limit repeated request_key() calls by causing * them to return -ENOKEY until the negative key expires. * * If successful, 0 will be returned. */ long keyctl_negate_key(key_serial_t id, unsigned timeout, key_serial_t ringid) { return keyctl_reject_key(id, timeout, ENOKEY, ringid); } /* * Negatively instantiate the key with the given timeout (in seconds) and error * code and link the key into the destination keyring if one is given. * * The caller must have the appropriate instantiation permit set for this to * work (see keyctl_assume_authority). No other permissions are required. * * The key and any links to the key will be automatically garbage collected * after the timeout expires. * * Negative keys are used to rate limit repeated request_key() calls by causing * them to return the specified error code until the negative key expires. * * If successful, 0 will be returned. */ long keyctl_reject_key(key_serial_t id, unsigned timeout, unsigned error, key_serial_t ringid) { const struct cred *cred = current_cred(); struct request_key_auth *rka; struct key *instkey, *dest_keyring; long ret; kenter("%d,%u,%u,%d", id, timeout, error, ringid); /* must be a valid error code and mustn't be a kernel special */ if (error <= 0 || error >= MAX_ERRNO || error == ERESTARTSYS || error == ERESTARTNOINTR || error == ERESTARTNOHAND || error == ERESTART_RESTARTBLOCK) return -EINVAL; /* the appropriate instantiation authorisation key must have been * assumed before calling this */ ret = -EPERM; instkey = cred->request_key_auth; if (!instkey) goto error; rka = instkey->payload.data[0]; if (rka->target_key->serial != id) goto error; /* find the destination keyring if present (which must also be * writable) */ ret = get_instantiation_keyring(ringid, rka, &dest_keyring); if (ret < 0) goto error; /* instantiate the key and link it into a keyring */ ret = key_reject_and_link(rka->target_key, timeout, error, dest_keyring, instkey); key_put(dest_keyring); /* discard the assumed authority if it's just been disabled by * instantiation of the key */ if (ret == 0) keyctl_change_reqkey_auth(NULL); error: return ret; } /* * Read or set the default keyring in which request_key() will cache keys and * return the old setting. * * If a thread or process keyring is specified then it will be created if it * doesn't yet exist. The old setting will be returned if successful. */ long keyctl_set_reqkey_keyring(int reqkey_defl) { struct cred *new; int ret, old_setting; old_setting = current_cred_xxx(jit_keyring); if (reqkey_defl == KEY_REQKEY_DEFL_NO_CHANGE) return old_setting; new = prepare_creds(); if (!new) return -ENOMEM; switch (reqkey_defl) { case KEY_REQKEY_DEFL_THREAD_KEYRING: ret = install_thread_keyring_to_cred(new); if (ret < 0) goto error; goto set; case KEY_REQKEY_DEFL_PROCESS_KEYRING: ret = install_process_keyring_to_cred(new); if (ret < 0) goto error; goto set; case KEY_REQKEY_DEFL_DEFAULT: case KEY_REQKEY_DEFL_SESSION_KEYRING: case KEY_REQKEY_DEFL_USER_KEYRING: case KEY_REQKEY_DEFL_USER_SESSION_KEYRING: case KEY_REQKEY_DEFL_REQUESTOR_KEYRING: goto set; case KEY_REQKEY_DEFL_NO_CHANGE: case KEY_REQKEY_DEFL_GROUP_KEYRING: default: ret = -EINVAL; goto error; } set: new->jit_keyring = reqkey_defl; commit_creds(new); return old_setting; error: abort_creds(new); return ret; } /* * Set or clear the timeout on a key. * * Either the key must grant the caller Setattr permission or else the caller * must hold an instantiation authorisation token for the key. * * The timeout is either 0 to clear the timeout, or a number of seconds from * the current time. The key and any links to the key will be automatically * garbage collected after the timeout expires. * * Keys with KEY_FLAG_KEEP set should not be timed out. * * If successful, 0 is returned. */ long keyctl_set_timeout(key_serial_t id, unsigned timeout) { struct key *key, *instkey; key_ref_t key_ref; long ret; key_ref = lookup_user_key(id, KEY_LOOKUP_CREATE | KEY_LOOKUP_PARTIAL, KEY_NEED_SETATTR); if (IS_ERR(key_ref)) { /* setting the timeout on a key under construction is permitted * if we have the authorisation token handy */ if (PTR_ERR(key_ref) == -EACCES) { instkey = key_get_instantiation_authkey(id); if (!IS_ERR(instkey)) { key_put(instkey); key_ref = lookup_user_key(id, KEY_LOOKUP_PARTIAL, 0); if (!IS_ERR(key_ref)) goto okay; } } ret = PTR_ERR(key_ref); goto error; } okay: key = key_ref_to_ptr(key_ref); ret = 0; if (test_bit(KEY_FLAG_KEEP, &key->flags)) ret = -EPERM; else key_set_timeout(key, timeout); key_put(key); error: return ret; } /* * Assume (or clear) the authority to instantiate the specified key. * * This sets the authoritative token currently in force for key instantiation. * This must be done for a key to be instantiated. It has the effect of making * available all the keys from the caller of the request_key() that created a * key to request_key() calls made by the caller of this function. * * The caller must have the instantiation key in their process keyrings with a * Search permission grant available to the caller. * * If the ID given is 0, then the setting will be cleared and 0 returned. * * If the ID given has a matching an authorisation key, then that key will be * set and its ID will be returned. The authorisation key can be read to get * the callout information passed to request_key(). */ long keyctl_assume_authority(key_serial_t id) { struct key *authkey; long ret; /* special key IDs aren't permitted */ ret = -EINVAL; if (id < 0) goto error; /* we divest ourselves of authority if given an ID of 0 */ if (id == 0) { ret = keyctl_change_reqkey_auth(NULL); goto error; } /* attempt to assume the authority temporarily granted to us whilst we * instantiate the specified key * - the authorisation key must be in the current task's keyrings * somewhere */ authkey = key_get_instantiation_authkey(id); if (IS_ERR(authkey)) { ret = PTR_ERR(authkey); goto error; } ret = keyctl_change_reqkey_auth(authkey); if (ret == 0) ret = authkey->serial; key_put(authkey); error: return ret; } /* * Get a key's the LSM security label. * * The key must grant the caller View permission for this to work. * * If there's a buffer, then up to buflen bytes of data will be placed into it. * * If successful, the amount of information available will be returned, * irrespective of how much was copied (including the terminal NUL). */ long keyctl_get_security(key_serial_t keyid, char __user *buffer, size_t buflen) { struct key *key, *instkey; key_ref_t key_ref; char *context; long ret; key_ref = lookup_user_key(keyid, KEY_LOOKUP_PARTIAL, KEY_NEED_VIEW); if (IS_ERR(key_ref)) { if (PTR_ERR(key_ref) != -EACCES) return PTR_ERR(key_ref); /* viewing a key under construction is also permitted if we * have the authorisation token handy */ instkey = key_get_instantiation_authkey(keyid); if (IS_ERR(instkey)) return PTR_ERR(instkey); key_put(instkey); key_ref = lookup_user_key(keyid, KEY_LOOKUP_PARTIAL, 0); if (IS_ERR(key_ref)) return PTR_ERR(key_ref); } key = key_ref_to_ptr(key_ref); ret = security_key_getsecurity(key, &context); if (ret == 0) { /* if no information was returned, give userspace an empty * string */ ret = 1; if (buffer && buflen > 0 && copy_to_user(buffer, "", 1) != 0) ret = -EFAULT; } else if (ret > 0) { /* return as much data as there's room for */ if (buffer && buflen > 0) { if (buflen > ret) buflen = ret; if (copy_to_user(buffer, context, buflen) != 0) ret = -EFAULT; } kfree(context); } key_ref_put(key_ref); return ret; } /* * Attempt to install the calling process's session keyring on the process's * parent process. * * The keyring must exist and must grant the caller LINK permission, and the * parent process must be single-threaded and must have the same effective * ownership as this process and mustn't be SUID/SGID. * * The keyring will be emplaced on the parent when it next resumes userspace. * * If successful, 0 will be returned. */ long keyctl_session_to_parent(void) { struct task_struct *me, *parent; const struct cred *mycred, *pcred; struct callback_head *newwork, *oldwork; key_ref_t keyring_r; struct cred *cred; int ret; keyring_r = lookup_user_key(KEY_SPEC_SESSION_KEYRING, 0, KEY_NEED_LINK); if (IS_ERR(keyring_r)) return PTR_ERR(keyring_r); ret = -ENOMEM; /* our parent is going to need a new cred struct, a new tgcred struct * and new security data, so we allocate them here to prevent ENOMEM in * our parent */ cred = cred_alloc_blank(); if (!cred) goto error_keyring; newwork = &cred->rcu; cred->session_keyring = key_ref_to_ptr(keyring_r); keyring_r = NULL; init_task_work(newwork, key_change_session_keyring); me = current; rcu_read_lock(); write_lock_irq(&tasklist_lock); ret = -EPERM; oldwork = NULL; parent = me->real_parent; /* the parent mustn't be init and mustn't be a kernel thread */ if (parent->pid <= 1 || !parent->mm) goto unlock; /* the parent must be single threaded */ if (!thread_group_empty(parent)) goto unlock; /* the parent and the child must have different session keyrings or * there's no point */ mycred = current_cred(); pcred = __task_cred(parent); if (mycred == pcred || mycred->session_keyring == pcred->session_keyring) { ret = 0; goto unlock; } /* the parent must have the same effective ownership and mustn't be * SUID/SGID */ if (!uid_eq(pcred->uid, mycred->euid) || !uid_eq(pcred->euid, mycred->euid) || !uid_eq(pcred->suid, mycred->euid) || !gid_eq(pcred->gid, mycred->egid) || !gid_eq(pcred->egid, mycred->egid) || !gid_eq(pcred->sgid, mycred->egid)) goto unlock; /* the keyrings must have the same UID */ if ((pcred->session_keyring && !uid_eq(pcred->session_keyring->uid, mycred->euid)) || !uid_eq(mycred->session_keyring->uid, mycred->euid)) goto unlock; /* cancel an already pending keyring replacement */ oldwork = task_work_cancel(parent, key_change_session_keyring); /* the replacement session keyring is applied just prior to userspace * restarting */ ret = task_work_add(parent, newwork, true); if (!ret) newwork = NULL; unlock: write_unlock_irq(&tasklist_lock); rcu_read_unlock(); if (oldwork) put_cred(container_of(oldwork, struct cred, rcu)); if (newwork) put_cred(cred); return ret; error_keyring: key_ref_put(keyring_r); return ret; } /* * Apply a restriction to a given keyring. * * The caller must have Setattr permission to change keyring restrictions. * * The requested type name may be a NULL pointer to reject all attempts * to link to the keyring. If _type is non-NULL, _restriction can be * NULL or a pointer to a string describing the restriction. If _type is * NULL, _restriction must also be NULL. * * Returns 0 if successful. */ long keyctl_restrict_keyring(key_serial_t id, const char __user *_type, const char __user *_restriction) { key_ref_t key_ref; bool link_reject = !_type; char type[32]; char *restriction = NULL; long ret; key_ref = lookup_user_key(id, 0, KEY_NEED_SETATTR); if (IS_ERR(key_ref)) return PTR_ERR(key_ref); if (_type) { ret = key_get_type_from_user(type, _type, sizeof(type)); if (ret < 0) goto error; } if (_restriction) { if (!_type) { ret = -EINVAL; goto error; } restriction = strndup_user(_restriction, PAGE_SIZE); if (IS_ERR(restriction)) { ret = PTR_ERR(restriction); goto error; } } ret = keyring_restrict(key_ref, link_reject ? NULL : type, restriction); kfree(restriction); error: key_ref_put(key_ref); return ret; } /* * The key control system call */ SYSCALL_DEFINE5(keyctl, int, option, unsigned long, arg2, unsigned long, arg3, unsigned long, arg4, unsigned long, arg5) { switch (option) { case KEYCTL_GET_KEYRING_ID: return keyctl_get_keyring_ID((key_serial_t) arg2, (int) arg3); case KEYCTL_JOIN_SESSION_KEYRING: return keyctl_join_session_keyring((const char __user *) arg2); case KEYCTL_UPDATE: return keyctl_update_key((key_serial_t) arg2, (const void __user *) arg3, (size_t) arg4); case KEYCTL_REVOKE: return keyctl_revoke_key((key_serial_t) arg2); case KEYCTL_DESCRIBE: return keyctl_describe_key((key_serial_t) arg2, (char __user *) arg3, (unsigned) arg4); case KEYCTL_CLEAR: return keyctl_keyring_clear((key_serial_t) arg2); case KEYCTL_LINK: return keyctl_keyring_link((key_serial_t) arg2, (key_serial_t) arg3); case KEYCTL_UNLINK: return keyctl_keyring_unlink((key_serial_t) arg2, (key_serial_t) arg3); case KEYCTL_SEARCH: return keyctl_keyring_search((key_serial_t) arg2, (const char __user *) arg3, (const char __user *) arg4, (key_serial_t) arg5); case KEYCTL_READ: return keyctl_read_key((key_serial_t) arg2, (char __user *) arg3, (size_t) arg4); case KEYCTL_CHOWN: return keyctl_chown_key((key_serial_t) arg2, (uid_t) arg3, (gid_t) arg4); case KEYCTL_SETPERM: return keyctl_setperm_key((key_serial_t) arg2, (key_perm_t) arg3); case KEYCTL_INSTANTIATE: return keyctl_instantiate_key((key_serial_t) arg2, (const void __user *) arg3, (size_t) arg4, (key_serial_t) arg5); case KEYCTL_NEGATE: return keyctl_negate_key((key_serial_t) arg2, (unsigned) arg3, (key_serial_t) arg4); case KEYCTL_SET_REQKEY_KEYRING: return keyctl_set_reqkey_keyring(arg2); case KEYCTL_SET_TIMEOUT: return keyctl_set_timeout((key_serial_t) arg2, (unsigned) arg3); case KEYCTL_ASSUME_AUTHORITY: return keyctl_assume_authority((key_serial_t) arg2); case KEYCTL_GET_SECURITY: return keyctl_get_security((key_serial_t) arg2, (char __user *) arg3, (size_t) arg4); case KEYCTL_SESSION_TO_PARENT: return keyctl_session_to_parent(); case KEYCTL_REJECT: return keyctl_reject_key((key_serial_t) arg2, (unsigned) arg3, (unsigned) arg4, (key_serial_t) arg5); case KEYCTL_INSTANTIATE_IOV: return keyctl_instantiate_key_iov( (key_serial_t) arg2, (const struct iovec __user *) arg3, (unsigned) arg4, (key_serial_t) arg5); case KEYCTL_INVALIDATE: return keyctl_invalidate_key((key_serial_t) arg2); case KEYCTL_GET_PERSISTENT: return keyctl_get_persistent((uid_t)arg2, (key_serial_t)arg3); case KEYCTL_DH_COMPUTE: return keyctl_dh_compute((struct keyctl_dh_params __user *) arg2, (char __user *) arg3, (size_t) arg4, (struct keyctl_kdf_params __user *) arg5); case KEYCTL_RESTRICT_KEYRING: return keyctl_restrict_keyring((key_serial_t) arg2, (const char __user *) arg3, (const char __user *) arg4); default: return -EOPNOTSUPP; } }
long keyctl_read_key(key_serial_t keyid, char __user *buffer, size_t buflen) { struct key *key; key_ref_t key_ref; long ret; /* find the key first */ key_ref = lookup_user_key(keyid, 0, 0); if (IS_ERR(key_ref)) { ret = -ENOKEY; goto error; } key = key_ref_to_ptr(key_ref); if (test_bit(KEY_FLAG_NEGATIVE, &key->flags)) { ret = -ENOKEY; goto error2; } /* see if we can read it directly */ ret = key_permission(key_ref, KEY_NEED_READ); if (ret == 0) goto can_read_key; if (ret != -EACCES) goto error2; /* we can't; see if it's searchable from this process's keyrings * - we automatically take account of the fact that it may be * dangling off an instantiation key */ if (!is_key_possessed(key_ref)) { ret = -EACCES; goto error2; } /* the key is probably readable - now try to read it */ can_read_key: ret = -EOPNOTSUPP; if (key->type->read) { /* Read the data with the semaphore held (since we might sleep) * to protect against the key being updated or revoked. */ down_read(&key->sem); ret = key_validate(key); if (ret == 0) ret = key->type->read(key, buffer, buflen); up_read(&key->sem); } error2: key_put(key); error: return ret; }
long keyctl_read_key(key_serial_t keyid, char __user *buffer, size_t buflen) { struct key *key; key_ref_t key_ref; long ret; /* find the key first */ key_ref = lookup_user_key(keyid, 0, 0); if (IS_ERR(key_ref)) { ret = -ENOKEY; goto error; } key = key_ref_to_ptr(key_ref); ret = key_read_state(key); if (ret < 0) goto error2; /* Negatively instantiated */ /* see if we can read it directly */ ret = key_permission(key_ref, KEY_NEED_READ); if (ret == 0) goto can_read_key; if (ret != -EACCES) goto error2; /* we can't; see if it's searchable from this process's keyrings * - we automatically take account of the fact that it may be * dangling off an instantiation key */ if (!is_key_possessed(key_ref)) { ret = -EACCES; goto error2; } /* the key is probably readable - now try to read it */ can_read_key: ret = -EOPNOTSUPP; if (key->type->read) { /* Read the data with the semaphore held (since we might sleep) * to protect against the key being updated or revoked. */ down_read(&key->sem); ret = key_validate(key); if (ret == 0) ret = key->type->read(key, buffer, buflen); up_read(&key->sem); } error2: key_put(key); error: return ret; }
{'added': [(769, '\tret = key_read_state(key);'), (770, '\tif (ret < 0)'), (771, '\t\tgoto error2; /* Negatively instantiated */'), (903, '\t\tif (key->state != KEY_IS_UNINSTANTIATED) {')], 'deleted': [(769, '\tif (test_bit(KEY_FLAG_NEGATIVE, &key->flags)) {'), (770, '\t\tret = -ENOKEY;'), (771, '\t\tgoto error2;'), (772, '\t}'), (904, '\t\tif (test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) {')]}
4
5
1,115
5,977
https://github.com/torvalds/linux
CVE-2017-15951
['CWE-20']
mqtt_sn_client.c
mqttSnClientDisconnect
/** * @file mqtt_sn_client.c * @brief MQTT-SN client * * @section License * * SPDX-License-Identifier: GPL-2.0-or-later * * Copyright (C) 2010-2020 Oryx Embedded SARL. All rights reserved. * * This file is part of CycloneTCP Open. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * @author Oryx Embedded SARL (www.oryx-embedded.com) * @version 2.0.0 **/ //Switch to the appropriate trace level #define TRACE_LEVEL MQTT_SN_TRACE_LEVEL //Dependencies #include "core/net.h" #include "mqtt_sn/mqtt_sn_client.h" #include "mqtt_sn/mqtt_sn_client_message.h" #include "mqtt_sn/mqtt_sn_client_transport.h" #include "mqtt_sn/mqtt_sn_client_misc.h" #include "debug.h" //Check TCP/IP stack configuration #if (MQTT_SN_CLIENT_SUPPORT == ENABLED) /** * @brief Initialize MQTT-SN client context * @param[in] context Pointer to the MQTT-SN client context * @return Error code **/ error_t mqttSnClientInit(MqttSnClientContext *context) { #if (MQTT_SN_CLIENT_DTLS_SUPPORT == ENABLED) error_t error; #endif //Make sure the MQTT-SN client context is valid if(context == NULL) return ERROR_INVALID_PARAMETER; //Clear MQTT-SN client context osMemset(context, 0, sizeof(MqttSnClientContext)); #if (MQTT_SN_CLIENT_DTLS_SUPPORT == ENABLED) //Initialize DTLS session state error = tlsInitSessionState(&context->dtlsSession); //Any error to report? if(error) return error; #endif //Initialize MQTT-SN client state context->state = MQTT_SN_CLIENT_STATE_DISCONNECTED; //Default transport protocol context->transportProtocol = MQTT_SN_TRANSPORT_PROTOCOL_UDP; //Default timeout context->timeout = MQTT_SN_CLIENT_DEFAULT_TIMEOUT; //Default keep-alive time interval context->keepAlive = MQTT_SN_CLIENT_DEFAULT_KEEP_ALIVE; //Initialize message identifier context->msgId = 0; //Successful initialization return NO_ERROR; } /** * @brief Set the transport protocol to be used * @param[in] context Pointer to the MQTT-SN client context * @param[in] transportProtocol Transport protocol to be used (UDP or DTLS) * @return Error code **/ error_t mqttSnClientSetTransportProtocol(MqttSnClientContext *context, MqttSnTransportProtocol transportProtocol) { //Make sure the MQTT-SN client context is valid if(context == NULL) return ERROR_INVALID_PARAMETER; //Save the transport protocol to be used context->transportProtocol = transportProtocol; //Successful processing return NO_ERROR; } #if (MQTT_SN_CLIENT_DTLS_SUPPORT == ENABLED) /** * @brief Register DTLS initialization callback function * @param[in] context Pointer to the MQTT-SN client context * @param[in] callback DTLS initialization callback function * @return Error code **/ error_t mqttSnClientRegisterDtlsInitCallback(MqttSnClientContext *context, MqttSnClientDtlsInitCallback callback) { //Check parameters if(context == NULL || callback == NULL) return ERROR_INVALID_PARAMETER; //Save callback function context->dtlsInitCallback = callback; //Successful processing return NO_ERROR; } #endif /** * @brief Register publish callback function * @param[in] context Pointer to the MQTT-SN client context * @param[in] callback Callback function to be called when a PUBLISH message * is received * @return Error code **/ error_t mqttSnClientRegisterPublishCallback(MqttSnClientContext *context, MqttSnClientPublishCallback callback) { //Make sure the MQTT-SN client context is valid if(context == NULL) return ERROR_INVALID_PARAMETER; //Save callback function context->publishCallback = callback; //Successful processing return NO_ERROR; } /** * @brief Set the list of predefined topics * @param[in] context Pointer to the MQTT-SN client context * @param[in] predefinedTopics List of predefined topics * @param[in] size Number of predefined topics * @return Error code **/ error_t mqttSnClientSetPredefinedTopics(MqttSnClientContext *context, MqttSnPredefinedTopic *predefinedTopics, uint_t size) { //Make sure the MQTT-SN client context is valid if(context == NULL) return ERROR_INVALID_PARAMETER; //Check parameters if(predefinedTopics == NULL && size != 0) return ERROR_INVALID_PARAMETER; //Save the list of predefined topics context->predefinedTopicTable = predefinedTopics; context->predefinedTopicTableSize = size; //Successful processing return NO_ERROR; } /** * @brief Set communication timeout * @param[in] context Pointer to the MQTT-SN client context * @param[in] timeout Timeout value, in milliseconds * @return Error code **/ error_t mqttSnClientSetTimeout(MqttSnClientContext *context, systime_t timeout) { //Make sure the MQTT-SN client context is valid if(context == NULL) return ERROR_INVALID_PARAMETER; //Save timeout value context->timeout = timeout; //Successful processing return NO_ERROR; } /** * @brief Set keep-alive value * @param[in] context Pointer to the MQTT-SN client context * @param[in] keepAlive Keep-alive interval, in milliseconds * @return Error code **/ error_t mqttSnClientSetKeepAlive(MqttSnClientContext *context, systime_t keepAlive) { //Make sure the MQTT-SN client context is valid if(context == NULL) return ERROR_INVALID_PARAMETER; //Save keep-alive value context->keepAlive = keepAlive; //Successful processing return NO_ERROR; } /** * @brief Set client identifier * @param[in] context Pointer to the MQTT-SN client context * @param[in] clientId NULL-terminated string containing the client identifier * @return Error code **/ error_t mqttSnClientSetIdentifier(MqttSnClientContext *context, const char_t *clientId) { //Check parameters if(context == NULL || clientId == NULL) return ERROR_INVALID_PARAMETER; //Make sure the length of the client identifier is acceptable if(osStrlen(clientId) > MQTT_SN_CLIENT_MAX_ID_LEN) return ERROR_INVALID_LENGTH; //Save client identifier osStrcpy(context->clientId, clientId); //Successful processing return NO_ERROR; } /** * @brief Specify the Will message * @param[in] context Pointer to the MQTT-SN client context * @param[in] topic Will topic name * @param[in] message Will message * @param[in] length Length of the Will message * @param[in] qos QoS level to be used when publishing the Will message * @param[in] retain This flag specifies if the Will message is to be retained * @return Error code **/ error_t mqttSnClientSetWillMessage(MqttSnClientContext *context, const char_t *topic, const void *message, size_t length, MqttSnQosLevel qos, bool_t retain) { //Check parameters if(context == NULL || topic == NULL) return ERROR_INVALID_PARAMETER; //Make sure the length of the Will topic is acceptable if(osStrlen(topic) > MQTT_SN_CLIENT_MAX_WILL_TOPIC_LEN) return ERROR_INVALID_LENGTH; //Save Will topic osStrcpy(context->willMessage.topic, topic); //Any message payload if(length > 0) { //Sanity check if(message == NULL) return ERROR_INVALID_PARAMETER; //Make sure the length of the Will message payload is acceptable if(osStrlen(message) > MQTT_SN_CLIENT_MAX_WILL_PAYLOAD_LEN) return ERROR_INVALID_LENGTH; //Save Will message payload osMemcpy(context->willMessage.payload, message, length); } //Length of the Will message payload context->willMessage.length = length; //QoS level to be used when publishing the Will message context->willMessage.flags.qos = qos; //This flag specifies if the Will message is to be retained context->willMessage.flags.retain = retain; //Successful processing return NO_ERROR; } /** * @brief Bind the MQTT-SN client to a particular network interface * @param[in] context Pointer to the MQTT-SN client context * @param[in] interface Network interface to be used * @return Error code **/ error_t mqttSnClientBindToInterface(MqttSnClientContext *context, NetInterface *interface) { //Make sure the MQTT-SN client context is valid if(context == NULL) return ERROR_INVALID_PARAMETER; //Explicitly associate the MQTT client with the specified interface context->interface = interface; //Successful processing return NO_ERROR; } /** * @brief Specify the address of the gateway * @param[in] context Pointer to the MQTT-SN client context * @param[in] gwIpAddr Gateway IP address * @param[in] gwPort Gateway port number * @return Error code **/ error_t mqttSnClientSetGateway(MqttSnClientContext *context, const IpAddr *gwIpAddr, uint16_t gwPort) { //Check parameters if(context == NULL || gwIpAddr == NULL) return ERROR_INVALID_PARAMETER; //Save the IP address and the port number of the MQTT-SN gateway context->gwIpAddr = *gwIpAddr; context->gwPort = gwPort; //Successful processing return NO_ERROR; } /** * @brief Search for a gateway * @param[in] context Pointer to the MQTT-SN client context * @param[in] destIpAddr Destination IP address * @param[in] destPort Destination port number * @return Error code **/ error_t mqttSnClientSearchGateway(MqttSnClientContext *context, const IpAddr *destIpAddr, uint16_t destPort) { error_t error; systime_t time; //Check parameters if(context == NULL || destIpAddr == NULL) return ERROR_INVALID_PARAMETER; //Initialize status code error = NO_ERROR; //Gateway discovery procedure while(!error) { //Get current time time = osGetSystemTime(); //Check current state if(context->state == MQTT_SN_CLIENT_STATE_DISCONNECTED) { //Open network connection error = mqttSnClientOpenConnection(context, FALSE); //Check status code if(!error) { //Save current time context->startTime = time; context->retransmitStartTime = time; //To prevent broadcast storms when multiple clients start searching //for GW almost at the same time, the sending of the SEARCHGW message //is delayed by a random time between 0 and TSEARCHGW context->retransmitTimeout = netGetRandRange(0, MQTT_SN_CLIENT_SEARCH_DELAY); //Start searching for gateways context->state = MQTT_SN_CLIENT_STATE_SEARCHING; } } else if(context->state == MQTT_SN_CLIENT_STATE_SEARCHING) { //Check whether the timeout has elapsed if(timeCompare(time, context->startTime + context->timeout) >= 0) { //Abort the retransmission procedure error = ERROR_TIMEOUT; } else if(timeCompare(time, context->retransmitStartTime + context->retransmitTimeout) >= 0) { //Set retransmission timeout context->retransmitTimeout = MQTT_SN_CLIENT_RETRY_TIMEOUT; //If the retry timer times out and the expected gateway's reply //is not received, the client retransmits the message error = mqttSnClientSendSearchGw(context, 0, destIpAddr, destPort); } else { //Wait for the gateway's reply error = mqttSnClientProcessEvents(context, MQTT_SN_CLIENT_TICK_INTERVAL); } } else if(context->state == MQTT_SN_CLIENT_STATE_RESP_RECEIVED) { //Check the type of the received message if(context->msgType == MQTT_SN_MSG_TYPE_GWINFO) { //Close network connection mqttSnClientCloseConnection(context); //A MQTT-SN gateway has been found context->state = MQTT_SN_CLIENT_STATE_DISCONNECTED; break; } else { //Report an error error = ERROR_UNEXPECTED_RESPONSE; } } else { //Invalid state error = ERROR_WRONG_STATE; } } //Any error to report? if(error != NO_ERROR && error != ERROR_WOULD_BLOCK) { //Clean up side effects mqttSnClientCloseConnection(context); //Update MQTT-SN client state context->state = MQTT_SN_CLIENT_STATE_DISCONNECTED; } //Return status code return error; } /** * @brief Establish connection with the MQTT-SN gateway * @param[in] context Pointer to the MQTT-SN client context * @param[in] cleanSession If this flag is set, then the client and server * must discard any previous session and start a new one * @return Error code **/ error_t mqttSnClientConnect(MqttSnClientContext *context, bool_t cleanSession) { error_t error; systime_t time; //Make sure the MQTT-SN client context is valid if(context == NULL) return ERROR_INVALID_PARAMETER; //Initialize status code error = NO_ERROR; //Establish connection with the MQTT-SN gateway while(!error) { //Get current time time = osGetSystemTime(); //Check current state if(context->state == MQTT_SN_CLIENT_STATE_DISCONNECTED) { //Open network connection error = mqttSnClientOpenConnection(context, TRUE); //Check status code if(!error) { //Save current time context->startTime = time; //Update MQTT-SN client state context->state = MQTT_SN_CLIENT_STATE_CONNECTING; } } else if(context->state == MQTT_SN_CLIENT_STATE_CONNECTING) { //Establish DTLS connection error = mqttSnClientEstablishConnection(context); //Check status code if(error == NO_ERROR) { //Check whether the CleanSession flag is set if(cleanSession) { //Discard previous session state osMemset(context->topicTable, 0, sizeof(context->topicTable)); osMemset(context->msgIdTable, 0, sizeof(context->msgIdTable)); } //The CONNECT message is sent by a client to setup a connection error = mqttSnClientSendConnect(context, cleanSession); } else if(error == ERROR_WOULD_BLOCK || error == ERROR_TIMEOUT) { //Check whether the timeout has elapsed if(timeCompare(time, context->startTime + context->timeout) >= 0) { //Report an error error = ERROR_TIMEOUT; } } else { //Failed to establish DTLS connection } } else if(context->state == MQTT_SN_CLIENT_STATE_SENDING_REQ) { //Check whether the timeout has elapsed if(timeCompare(time, context->startTime + context->timeout) >= 0) { //Abort the retransmission procedure error = ERROR_TIMEOUT; } else if(timeCompare(time, context->retransmitStartTime + MQTT_SN_CLIENT_RETRY_TIMEOUT) >= 0) { //If the retry timer times out and the expected gateway's reply //is not received, the client retransmits the message error = mqttSnClientSendConnect(context, cleanSession); } else { //Wait for the gateway's reply error = mqttSnClientProcessEvents(context, MQTT_SN_CLIENT_TICK_INTERVAL); } } else if(context->state == MQTT_SN_CLIENT_STATE_RESP_RECEIVED) { //Check the type of the received message if(context->msgType == MQTT_SN_MSG_TYPE_CONNACK) { //If the connection request has not been accepted, the failure reason //is encoded in the return code field of the CONNACK message if(context->returnCode == MQTT_SN_RETURN_CODE_ACCEPTED) { //The connection request has been accepted by the gateway context->state = MQTT_SN_CLIENT_STATE_ACTIVE; } else { //Terminate DTLS connection mqttSnClientShutdownConnection(context); //The connection request has been rejected by the gateway error = ERROR_REQUEST_REJECTED; } } else { //Report an error error = ERROR_UNEXPECTED_RESPONSE; } } else if(context->state == MQTT_SN_CLIENT_STATE_ACTIVE) { //The MQTT-SN client is connected break; } else { //Invalid state error = ERROR_WRONG_STATE; } } //Any error to report? if(error != NO_ERROR && error != ERROR_WOULD_BLOCK) { //Clean up side effects mqttSnClientCloseConnection(context); //Update MQTT-SN client state context->state = MQTT_SN_CLIENT_STATE_DISCONNECTED; } //Return status code return error; } /** * @brief Publish message * @param[in] context Pointer to the MQTT-SN client context * @param[in] topicName Topic name * @param[in] message Message payload * @param[in] length Length of the message payload * @param[in] qos QoS level to be used when publishing the message * @param[in] retain This flag specifies if the message is to be retained * @param[in] dup This flag specifies if the message is sent for the first * time or if the message is retransmitted * @param[in,out] msgId Message identifier used to send the PUBLISH message * @return Error code **/ error_t mqttSnClientPublish(MqttSnClientContext *context, const char_t *topicName, const void *message, size_t length, MqttSnQosLevel qos, bool_t retain, bool_t dup, uint16_t *msgId) { error_t error; systime_t time; uint16_t publishMsgId; //Check parameters if(context == NULL || topicName == NULL) return ERROR_INVALID_PARAMETER; if(message == NULL && length != 0) return ERROR_INVALID_PARAMETER; if(dup && msgId == NULL) return ERROR_INVALID_PARAMETER; //Initialize status code error = NO_ERROR; //Initialize message identifier if(dup) publishMsgId = *msgId; else publishMsgId = 0; //Publish procedure while(!error) { //Get current time time = osGetSystemTime(); //Check current state if(context->state == MQTT_SN_CLIENT_STATE_ACTIVE) { //Save current time context->startTime = time; //Check whether the register procedure is needed if(mqttSnClientIsShortTopicName(topicName) == FALSE && mqttSnClientFindTopicName(context, topicName) == 0 && mqttSnClientFindPredefTopicName(context, topicName) == 0) { //The message identifier allows the sender to match a message with //its corresponding acknowledgment mqttSnClientGenerateMessageId(context); //To register a topic name a client sends a REGISTER message to //the gateway error = mqttSnClientSendRegister(context, topicName); } else { //The message ID is only relevant in case of QoS levels 1 and 2 if(qos == MQTT_SN_QOS_LEVEL_1 || qos == MQTT_SN_QOS_LEVEL_2) { //The message identifier allows the sender to match a message with //its corresponding acknowledgment if(!dup) publishMsgId = mqttSnClientGenerateMessageId(context); } else { //For QoS level 0, the message identifier is coded 0x0000 publishMsgId = 0; } //The client can start publishing data relating to the registered //topic name by sending PUBLISH messages to the gateway error = mqttSnClientSendPublish(context, publishMsgId, topicName, message, length, qos, retain, dup); //In the QoS 0, no response is sent by the receiver and no retry //is performed by the sender if(qos != MQTT_SN_QOS_LEVEL_1 && qos != MQTT_SN_QOS_LEVEL_2) break; } } else if(context->state == MQTT_SN_CLIENT_STATE_SENDING_REQ) { //Check whether the transmission of the PUBLISH message has started if(context->msgType == MQTT_SN_MSG_TYPE_PUBLISH || context->msgType == MQTT_SN_MSG_TYPE_PUBREL) { //Restore the message identifier that was used to send the first //PUBLISH message if(!dup) publishMsgId = context->msgId; } //Check whether the timeout has elapsed if(timeCompare(time, context->startTime + context->timeout) >= 0) { //Abort the retransmission procedure context->state = MQTT_SN_CLIENT_STATE_DISCONNECTING; //Report a timeout error error = ERROR_TIMEOUT; } else if(timeCompare(time, context->retransmitStartTime + MQTT_SN_CLIENT_RETRY_TIMEOUT) >= 0) { //If the retry timer times out and the expected gateway's reply //is not received, the client retransmits the message if(context->msgType == MQTT_SN_MSG_TYPE_REGISTER) { //Retransmit REGISTER message error = mqttSnClientSendRegister(context, topicName); } else if(context->msgType == MQTT_SN_MSG_TYPE_PUBLISH) { //Retransmit PUBLISH message error = mqttSnClientSendPublish(context, publishMsgId, topicName, message, length, qos, retain, TRUE); } else if(context->msgType == MQTT_SN_MSG_TYPE_PUBREL) { //Retransmit PUBREL message error = mqttSnClientSendPubRel(context, context->msgId); } else { //Report an error error = ERROR_INVALID_TYPE; } } else { //Wait for the gateway's reply error = mqttSnClientProcessEvents(context, MQTT_SN_CLIENT_TICK_INTERVAL); } } else if(context->state == MQTT_SN_CLIENT_STATE_RESP_RECEIVED) { //Update MQTT-SN client state context->state = MQTT_SN_CLIENT_STATE_ACTIVE; //Check whether the transmission of the PUBLISH message has started if(context->msgType == MQTT_SN_MSG_TYPE_PUBACK || context->msgType == MQTT_SN_MSG_TYPE_PUBREC || context->msgType == MQTT_SN_MSG_TYPE_PUBCOMP) { //Restore the message identifier that was used to send the first //PUBLISH message if(!dup) publishMsgId = context->msgId; } //Check the type of the received message if(context->msgType == MQTT_SN_MSG_TYPE_REGACK) { //If the registration has not been accepted, the failure reason is //encoded in the return code field of the REGACK message if(context->returnCode == MQTT_SN_RETURN_CODE_ACCEPTED) { //Save the topic ID assigned by the gateway error = mqttSnClientAddTopic(context, topicName, context->topicId); } else { //The registration request has been rejected by the gateway error = ERROR_REQUEST_REJECTED; } } else if(context->msgType == MQTT_SN_MSG_TYPE_PUBACK) { //If the publish request has not been accepted, the failure reason //is encoded in the return code field of the PUBACK message if(context->returnCode == MQTT_SN_RETURN_CODE_ACCEPTED) { //Check QoS level if(qos == MQTT_SN_QOS_LEVEL_2) { //Unexpected PUBREC message received error = ERROR_UNEXPECTED_MESSAGE; } else { //A PUBACK message has been received break; } } else { //The publish request has been rejected by the gateway error = ERROR_REQUEST_REJECTED; } } else if(context->msgType == MQTT_SN_MSG_TYPE_PUBREC) { //Check QoS level if(qos == MQTT_SN_QOS_LEVEL_2) { //A PUBREL packet is the response to a PUBREC packet. It is the //third packet of the QoS 2 protocol exchange error = mqttSnClientSendPubRel(context, context->msgId); } else { //Unexpected PUBREC message received error = ERROR_UNEXPECTED_MESSAGE; } } else if(context->msgType == MQTT_SN_MSG_TYPE_PUBCOMP) { //A PUBCOMP message has been received break; } else { //Report an error error = ERROR_UNEXPECTED_RESPONSE; } } else { //Invalid state error = ERROR_NOT_CONNECTED; } } //Return the message identifier that was used to send the PUBLISH message if(msgId != NULL) *msgId = publishMsgId; //Return status code return error; } /** * @brief Subscribe to topic * @param[in] context Pointer to the MQTT-SN client context * @param[in] topicName Topic filter * @param[in] qos Maximum QoS level at which the server can send application * messages to the client * @return Error code **/ error_t mqttSnClientSubscribe(MqttSnClientContext *context, const char_t *topicName, MqttSnQosLevel qos) { error_t error; systime_t time; //Check parameters if(context == NULL || topicName == NULL) return ERROR_INVALID_PARAMETER; //Initialize status code error = NO_ERROR; //Topic subscribe procedure while(!error) { //Get current time time = osGetSystemTime(); //Check current state if(context->state == MQTT_SN_CLIENT_STATE_ACTIVE) { //The message identifier allows the sender to match a message with //its corresponding acknowledgment mqttSnClientGenerateMessageId(context); //Save current time context->startTime = time; //Send SUBSCRIBE message error = mqttSnClientSendSubscribe(context, topicName, qos); } else if(context->state == MQTT_SN_CLIENT_STATE_SENDING_REQ) { //Check whether the timeout has elapsed if(timeCompare(time, context->startTime + context->timeout) >= 0) { //Abort the retransmission procedure context->state = MQTT_SN_CLIENT_STATE_DISCONNECTING; //Report a timeout error error = ERROR_TIMEOUT; } else if(timeCompare(time, context->retransmitStartTime + MQTT_SN_CLIENT_RETRY_TIMEOUT) >= 0) { //If the retry timer times out and the expected gateway's reply //is not received, the client retransmits the message error = mqttSnClientSendSubscribe(context, topicName, qos); } else { //Wait for the gateway's reply error = mqttSnClientProcessEvents(context, MQTT_SN_CLIENT_TICK_INTERVAL); } } else if(context->state == MQTT_SN_CLIENT_STATE_RESP_RECEIVED) { //Update MQTT-SN client state context->state = MQTT_SN_CLIENT_STATE_ACTIVE; //Check the type of the received message if(context->msgType == MQTT_SN_MSG_TYPE_SUBACK) { //If the subscribe request has not been accepted, the failure reason //is encoded in the return code field of the SUBACK message if(context->returnCode == MQTT_SN_RETURN_CODE_ACCEPTED) { //The topic ID field is not relevant in case of subscriptions to a //topic name which contains wildcard characters if(strchr(topicName, '#') == NULL && strchr(topicName, '+') == NULL) { //Save the topic ID assigned by the gateway error = mqttSnClientAddTopic(context, topicName, context->topicId); } //A SUBACK message has been received break; } else { //The subscribe request has been rejected by the gateway error = ERROR_REQUEST_REJECTED; } } else { //Report an error error = ERROR_UNEXPECTED_RESPONSE; } } else { //Invalid state error = ERROR_NOT_CONNECTED; } } //Return status code return error; } /** * @brief Unsubscribe from topic * @param[in] context Pointer to the MQTT-SN client context * @param[in] topicName Topic filter * @return Error code **/ error_t mqttSnClientUnsubscribe(MqttSnClientContext *context, const char_t *topicName) { error_t error; systime_t time; //Check parameters if(context == NULL || topicName == NULL) return ERROR_INVALID_PARAMETER; //Initialize status code error = NO_ERROR; //Topic unsubscribe procedure while(!error) { //Get current time time = osGetSystemTime(); //Check current state if(context->state == MQTT_SN_CLIENT_STATE_ACTIVE) { //The message identifier allows the sender to match a message with //its corresponding acknowledgment mqttSnClientGenerateMessageId(context); //Save current time context->startTime = time; //Send UNSUBSCRIBE message error = mqttSnClientSendUnsubscribe(context, topicName); } else if(context->state == MQTT_SN_CLIENT_STATE_SENDING_REQ) { //Check whether the timeout has elapsed if(timeCompare(time, context->startTime + context->timeout) >= 0) { //Abort the retransmission procedure context->state = MQTT_SN_CLIENT_STATE_DISCONNECTING; //Report a timeout error error = ERROR_TIMEOUT; } else if(timeCompare(time, context->retransmitStartTime + MQTT_SN_CLIENT_RETRY_TIMEOUT) >= 0) { //If the retry timer times out and the expected gateway's reply //is not received, the client retransmits the message error = mqttSnClientSendUnsubscribe(context, topicName); } else { //Wait for the gateway's reply error = mqttSnClientProcessEvents(context, MQTT_SN_CLIENT_TICK_INTERVAL); } } else if(context->state == MQTT_SN_CLIENT_STATE_RESP_RECEIVED) { //Update MQTT-SN client state context->state = MQTT_SN_CLIENT_STATE_ACTIVE; //Check the type of the received message if(context->msgType == MQTT_SN_MSG_TYPE_UNSUBACK) { //An UNSUBACK message has been received break; } else { //Report an error error = ERROR_UNEXPECTED_RESPONSE; } } else { //Invalid state error = ERROR_NOT_CONNECTED; } } //Return status code return error; } /** * @brief Send ping request * @param[in] context Pointer to the MQTT-SN client context * @return Error code **/ error_t mqttSnClientPing(MqttSnClientContext *context) { error_t error; systime_t time; //Make sure the MQTT-SN client context is valid if(context == NULL) return ERROR_INVALID_PARAMETER; //Initialize status code error = NO_ERROR; //Send PINGREQ packet and wait for PINGRESP packet to be received while(!error) { //Get current time time = osGetSystemTime(); //Check current state if(context->state == MQTT_SN_CLIENT_STATE_ACTIVE) { //Save current time context->startTime = time; context->retransmitStartTime = time; //Send PINGREQ message error = mqttSnClientSendPingReq(context); //Update MQTT-SN client state context->state = MQTT_SN_CLIENT_STATE_SENDING_REQ; context->msgType = MQTT_SN_MSG_TYPE_PINGREQ; } else if(context->state == MQTT_SN_CLIENT_STATE_SENDING_REQ) { //Check whether the timeout has elapsed if(timeCompare(time, context->startTime + context->timeout) >= 0) { //Abort the retransmission procedure context->state = MQTT_SN_CLIENT_STATE_DISCONNECTING; //Report a timeout error error = ERROR_TIMEOUT; } else if(timeCompare(time, context->retransmitStartTime + MQTT_SN_CLIENT_RETRY_TIMEOUT) >= 0) { //If the retry timer times out and the expected gateway's reply //is not received, the client retransmits the message error = mqttSnClientSendPingReq(context); //Save the time at which the message was sent context->retransmitStartTime = time; } else { //Wait for the gateway's reply error = mqttSnClientProcessEvents(context, MQTT_SN_CLIENT_TICK_INTERVAL); } } else if(context->state == MQTT_SN_CLIENT_STATE_RESP_RECEIVED) { //Update MQTT-SN client state context->state = MQTT_SN_CLIENT_STATE_ACTIVE; //Check the type of the received message if(context->msgType == MQTT_SN_MSG_TYPE_PINGRESP) { //A PINGRESP message has been received break; } else { //Report an error error = ERROR_UNEXPECTED_RESPONSE; } } else { //Invalid state error = ERROR_NOT_CONNECTED; } } //Return status code return error; } /** * @brief Update the Will message * @param[in] context Pointer to the MQTT-SN client context * @param[in] topic Will topic name * @param[in] message Will message * @param[in] length Length of the Will message * @param[in] qos QoS level to be used when publishing the Will message * @param[in] retain This flag specifies if the Will message is to be retained * @return Error code **/ error_t mqttSnClientUpdateWillMessage(MqttSnClientContext *context, const char_t *topic, const void *message, size_t length, MqttSnQosLevel qos, bool_t retain) { error_t error; systime_t time; //Check parameters if(context == NULL || topic == NULL) return ERROR_INVALID_PARAMETER; if(message == NULL && length != 0) return ERROR_INVALID_PARAMETER; //Initialize status code error = NO_ERROR; //Publish procedure while(!error) { //Get current time time = osGetSystemTime(); //Check current state if(context->state == MQTT_SN_CLIENT_STATE_ACTIVE) { //Update the Will message error = mqttSnClientSetWillMessage(context, topic, message, length, qos, retain); //Check status code if(!error) { //Save current time context->startTime = time; //Send WILLTOPICUPD message error = mqttSnClientSendWillTopicUpd(context); } } else if(context->state == MQTT_SN_CLIENT_STATE_SENDING_REQ) { //Check whether the timeout has elapsed if(timeCompare(time, context->startTime + context->timeout) >= 0) { //Abort the retransmission procedure context->state = MQTT_SN_CLIENT_STATE_DISCONNECTING; //Report a timeout error error = ERROR_TIMEOUT; } else if(timeCompare(time, context->retransmitStartTime + MQTT_SN_CLIENT_RETRY_TIMEOUT) >= 0) { //If the retry timer times out and the expected gateway's reply //is not received, the client retransmits the message if(context->msgType == MQTT_SN_MSG_TYPE_WILLTOPICUPD) { //Retransmit WILLTOPICUPD message error = mqttSnClientSendWillTopicUpd(context); } else if(context->msgType == MQTT_SN_MSG_TYPE_WILLMSGUPD) { //Retransmit WILLMSGUPD message error = mqttSnClientSendWillMsgUpd(context); } else { //Report an error error = ERROR_INVALID_TYPE; } } else { //Wait for the gateway's reply error = mqttSnClientProcessEvents(context, MQTT_SN_CLIENT_TICK_INTERVAL); } } else if(context->state == MQTT_SN_CLIENT_STATE_RESP_RECEIVED) { //Update MQTT-SN client state context->state = MQTT_SN_CLIENT_STATE_ACTIVE; //Check the type of the received message if(context->msgType == MQTT_SN_MSG_TYPE_WILLTOPICRESP) { //If the WILLTOPICUPD has not been accepted, the failure reason //is encoded in the return code field of the WILLTOPICRESP if(context->returnCode == MQTT_SN_RETURN_CODE_ACCEPTED) { //Valid Will topic? if(context->willMessage.topic[0] != '\0') { //Send WILLMSGUPD message error = mqttSnClientSendWillMsgUpd(context); } else { //An empty WILLTOPIC message is used by a client to delete //the Will topic and the Will message stored in the server break; } } else { //The WILLTOPICUPD request has been rejected by the gateway error = ERROR_REQUEST_REJECTED; } } else if(context->msgType == MQTT_SN_MSG_TYPE_WILLMSGRESP) { //If the WILLMSGUPD has not been accepted, the failure reason //is encoded in the return code field of the WILLMSGRESP if(context->returnCode == MQTT_SN_RETURN_CODE_ACCEPTED) { //The WILLMSGUPD request has been accepted by the gateway break; } else { //The WILLMSGUPD request has been rejected by the gateway error = ERROR_REQUEST_REJECTED; } } else { //Report an error error = ERROR_UNEXPECTED_RESPONSE; } } else { //Invalid state error = ERROR_NOT_CONNECTED; } } //Return status code return error; } /** * @brief Retrieve return code * @param[in] context Pointer to the MQTT-SN client context * @param[out] returnCode Return code * @return Error code **/ error_t mqttSnClientGetReturnCode(MqttSnClientContext *context, MqttSnReturnCode *returnCode) { //Check parameters if(context == NULL || returnCode == NULL) return ERROR_INVALID_PARAMETER; //Retrieve return code *returnCode = context->returnCode; //Successful processing return NO_ERROR; } /** * @brief Process MQTT-SN client events * @param[in] context Pointer to the MQTT-SN client context * @param[in] timeout Maximum time to wait before returning * @return Error code **/ error_t mqttSnClientTask(MqttSnClientContext *context, systime_t timeout) { error_t error; //Make sure the MQTT-SN client context is valid if(context == NULL) return ERROR_INVALID_PARAMETER; //Make sure the MQTT-SN client is connected if(context->state == MQTT_SN_CLIENT_STATE_ACTIVE || context->state == MQTT_SN_CLIENT_STATE_SENDING_REQ || context->state == MQTT_SN_CLIENT_STATE_RESP_RECEIVED) { //Process MQTT-SN client events error = mqttSnClientProcessEvents(context, timeout); } else { //Invalid state error = ERROR_NOT_CONNECTED; } //Return status code return error; } /** * @brief Disconnect from the MQTT-SN gateway * @param[in] context Pointer to the MQTT-SN client context * @return Error code **/ error_t mqttSnClientDisconnect(MqttSnClientContext *context) { error_t error; systime_t time; //Make sure the MQTT-SN client context is valid if(context == NULL) return ERROR_INVALID_PARAMETER; //Initialize status code error = NO_ERROR; //Disconnect procedure while(!error) { //Get current time time = osGetSystemTime(); //Check current state if(context->state == MQTT_SN_CLIENT_STATE_ACTIVE) { //Save current time context->startTime = time; //The DISCONNECT message is sent by a client to indicate that it //wants to close the connection error = mqttSnClientSendDisconnect(context, 0); } else if(context->state == MQTT_SN_CLIENT_STATE_SENDING_REQ) { //Check whether the timeout has elapsed if(timeCompare(time, context->startTime + context->timeout) >= 0) { //Terminate DTLS connection mqttSnClientShutdownConnection(context); //Report a timeout error error = ERROR_TIMEOUT; } else if(timeCompare(time, context->retransmitStartTime + MQTT_SN_CLIENT_RETRY_TIMEOUT) >= 0) { //If the retry timer times out and the expected gateway's reply //is not received, the client retransmits the message error = mqttSnClientSendDisconnect(context, 0); } else { //Wait for the gateway's reply error = mqttSnClientProcessEvents(context, MQTT_SN_CLIENT_TICK_INTERVAL); } } else if(context->state == MQTT_SN_CLIENT_STATE_DISCONNECTING) { //Terminate DTLS connection error = mqttSnClientShutdownConnection(context); //Close network connection mqttSnClientCloseConnection(context); //The connection is closed context->state = MQTT_SN_CLIENT_STATE_DISCONNECTED; } else if(context->state == MQTT_SN_CLIENT_STATE_DISCONNECTED) { //The MQTT-SN client is disconnected break; } else { //Invalid state error = ERROR_WRONG_STATE; } } //Any error to report? if(error != NO_ERROR && error != ERROR_WOULD_BLOCK) { //Close network connection mqttSnClientCloseConnection(context); //Update MQTT-SN client state context->state = MQTT_SN_CLIENT_STATE_DISCONNECTED; } //Return status code return error; } /** * @brief Release MQTT-SN client context * @param[in] context Pointer to the MQTT-SN client context **/ void mqttSnClientDeinit(MqttSnClientContext *context) { //Make sure the MQTT-SN client context is valid if(context != NULL) { //Close connection mqttSnClientCloseConnection(context); #if (MQTT_SN_CLIENT_DTLS_SUPPORT == ENABLED) //Release DTLS session state tlsFreeSessionState(&context->dtlsSession); #endif //Clear MQTT-SN client context osMemset(context, 0, sizeof(MqttSnClientContext)); } } #endif
/** * @file mqtt_sn_client.c * @brief MQTT-SN client * * @section License * * SPDX-License-Identifier: GPL-2.0-or-later * * Copyright (C) 2010-2021 Oryx Embedded SARL. All rights reserved. * * This file is part of CycloneTCP Open. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * @author Oryx Embedded SARL (www.oryx-embedded.com) * @version 2.0.2 **/ //Switch to the appropriate trace level #define TRACE_LEVEL MQTT_SN_TRACE_LEVEL //Dependencies #include "core/net.h" #include "mqtt_sn/mqtt_sn_client.h" #include "mqtt_sn/mqtt_sn_client_message.h" #include "mqtt_sn/mqtt_sn_client_transport.h" #include "mqtt_sn/mqtt_sn_client_misc.h" #include "debug.h" //Check TCP/IP stack configuration #if (MQTT_SN_CLIENT_SUPPORT == ENABLED) /** * @brief Initialize MQTT-SN client context * @param[in] context Pointer to the MQTT-SN client context * @return Error code **/ error_t mqttSnClientInit(MqttSnClientContext *context) { #if (MQTT_SN_CLIENT_DTLS_SUPPORT == ENABLED) error_t error; #endif //Make sure the MQTT-SN client context is valid if(context == NULL) return ERROR_INVALID_PARAMETER; //Clear MQTT-SN client context osMemset(context, 0, sizeof(MqttSnClientContext)); #if (MQTT_SN_CLIENT_DTLS_SUPPORT == ENABLED) //Initialize DTLS session state error = tlsInitSessionState(&context->dtlsSession); //Any error to report? if(error) return error; #endif //Initialize MQTT-SN client state context->state = MQTT_SN_CLIENT_STATE_DISCONNECTED; //Default transport protocol context->transportProtocol = MQTT_SN_TRANSPORT_PROTOCOL_UDP; //Default timeout context->timeout = MQTT_SN_CLIENT_DEFAULT_TIMEOUT; //Default keep-alive time interval context->keepAlive = MQTT_SN_CLIENT_DEFAULT_KEEP_ALIVE; //Initialize message identifier context->msgId = 0; //Successful initialization return NO_ERROR; } /** * @brief Set the transport protocol to be used * @param[in] context Pointer to the MQTT-SN client context * @param[in] transportProtocol Transport protocol to be used (UDP or DTLS) * @return Error code **/ error_t mqttSnClientSetTransportProtocol(MqttSnClientContext *context, MqttSnTransportProtocol transportProtocol) { //Make sure the MQTT-SN client context is valid if(context == NULL) return ERROR_INVALID_PARAMETER; //Save the transport protocol to be used context->transportProtocol = transportProtocol; //Successful processing return NO_ERROR; } #if (MQTT_SN_CLIENT_DTLS_SUPPORT == ENABLED) /** * @brief Register DTLS initialization callback function * @param[in] context Pointer to the MQTT-SN client context * @param[in] callback DTLS initialization callback function * @return Error code **/ error_t mqttSnClientRegisterDtlsInitCallback(MqttSnClientContext *context, MqttSnClientDtlsInitCallback callback) { //Check parameters if(context == NULL || callback == NULL) return ERROR_INVALID_PARAMETER; //Save callback function context->dtlsInitCallback = callback; //Successful processing return NO_ERROR; } #endif /** * @brief Register publish callback function * @param[in] context Pointer to the MQTT-SN client context * @param[in] callback Callback function to be called when a PUBLISH message * is received * @return Error code **/ error_t mqttSnClientRegisterPublishCallback(MqttSnClientContext *context, MqttSnClientPublishCallback callback) { //Make sure the MQTT-SN client context is valid if(context == NULL) return ERROR_INVALID_PARAMETER; //Save callback function context->publishCallback = callback; //Successful processing return NO_ERROR; } /** * @brief Set the list of predefined topics * @param[in] context Pointer to the MQTT-SN client context * @param[in] predefinedTopics List of predefined topics * @param[in] size Number of predefined topics * @return Error code **/ error_t mqttSnClientSetPredefinedTopics(MqttSnClientContext *context, MqttSnPredefinedTopic *predefinedTopics, uint_t size) { //Make sure the MQTT-SN client context is valid if(context == NULL) return ERROR_INVALID_PARAMETER; //Check parameters if(predefinedTopics == NULL && size != 0) return ERROR_INVALID_PARAMETER; //Save the list of predefined topics context->predefinedTopicTable = predefinedTopics; context->predefinedTopicTableSize = size; //Successful processing return NO_ERROR; } /** * @brief Set communication timeout * @param[in] context Pointer to the MQTT-SN client context * @param[in] timeout Timeout value, in milliseconds * @return Error code **/ error_t mqttSnClientSetTimeout(MqttSnClientContext *context, systime_t timeout) { //Make sure the MQTT-SN client context is valid if(context == NULL) return ERROR_INVALID_PARAMETER; //Save timeout value context->timeout = timeout; //Successful processing return NO_ERROR; } /** * @brief Set keep-alive value * @param[in] context Pointer to the MQTT-SN client context * @param[in] keepAlive Keep-alive interval, in milliseconds * @return Error code **/ error_t mqttSnClientSetKeepAlive(MqttSnClientContext *context, systime_t keepAlive) { //Make sure the MQTT-SN client context is valid if(context == NULL) return ERROR_INVALID_PARAMETER; //Save keep-alive value context->keepAlive = keepAlive; //Successful processing return NO_ERROR; } /** * @brief Set client identifier * @param[in] context Pointer to the MQTT-SN client context * @param[in] clientId NULL-terminated string containing the client identifier * @return Error code **/ error_t mqttSnClientSetIdentifier(MqttSnClientContext *context, const char_t *clientId) { //Check parameters if(context == NULL || clientId == NULL) return ERROR_INVALID_PARAMETER; //Make sure the length of the client identifier is acceptable if(osStrlen(clientId) > MQTT_SN_CLIENT_MAX_ID_LEN) return ERROR_INVALID_LENGTH; //Save client identifier osStrcpy(context->clientId, clientId); //Successful processing return NO_ERROR; } /** * @brief Specify the Will message * @param[in] context Pointer to the MQTT-SN client context * @param[in] topic Will topic name * @param[in] message Will message * @param[in] length Length of the Will message * @param[in] qos QoS level to be used when publishing the Will message * @param[in] retain This flag specifies if the Will message is to be retained * @return Error code **/ error_t mqttSnClientSetWillMessage(MqttSnClientContext *context, const char_t *topic, const void *message, size_t length, MqttSnQosLevel qos, bool_t retain) { //Check parameters if(context == NULL || topic == NULL) return ERROR_INVALID_PARAMETER; //Make sure the length of the Will topic is acceptable if(osStrlen(topic) > MQTT_SN_CLIENT_MAX_WILL_TOPIC_LEN) return ERROR_INVALID_LENGTH; //Save Will topic osStrcpy(context->willMessage.topic, topic); //Any message payload if(length > 0) { //Sanity check if(message == NULL) return ERROR_INVALID_PARAMETER; //Make sure the length of the Will message payload is acceptable if(osStrlen(message) > MQTT_SN_CLIENT_MAX_WILL_PAYLOAD_LEN) return ERROR_INVALID_LENGTH; //Save Will message payload osMemcpy(context->willMessage.payload, message, length); } //Length of the Will message payload context->willMessage.length = length; //QoS level to be used when publishing the Will message context->willMessage.flags.qos = qos; //This flag specifies if the Will message is to be retained context->willMessage.flags.retain = retain; //Successful processing return NO_ERROR; } /** * @brief Bind the MQTT-SN client to a particular network interface * @param[in] context Pointer to the MQTT-SN client context * @param[in] interface Network interface to be used * @return Error code **/ error_t mqttSnClientBindToInterface(MqttSnClientContext *context, NetInterface *interface) { //Make sure the MQTT-SN client context is valid if(context == NULL) return ERROR_INVALID_PARAMETER; //Explicitly associate the MQTT client with the specified interface context->interface = interface; //Successful processing return NO_ERROR; } /** * @brief Specify the address of the gateway * @param[in] context Pointer to the MQTT-SN client context * @param[in] gwIpAddr Gateway IP address * @param[in] gwPort Gateway port number * @return Error code **/ error_t mqttSnClientSetGateway(MqttSnClientContext *context, const IpAddr *gwIpAddr, uint16_t gwPort) { //Check parameters if(context == NULL || gwIpAddr == NULL) return ERROR_INVALID_PARAMETER; //Save the IP address and the port number of the MQTT-SN gateway context->gwIpAddr = *gwIpAddr; context->gwPort = gwPort; //Successful processing return NO_ERROR; } /** * @brief Search for a gateway * @param[in] context Pointer to the MQTT-SN client context * @param[in] destIpAddr Destination IP address * @param[in] destPort Destination port number * @return Error code **/ error_t mqttSnClientSearchGateway(MqttSnClientContext *context, const IpAddr *destIpAddr, uint16_t destPort) { error_t error; systime_t time; //Check parameters if(context == NULL || destIpAddr == NULL) return ERROR_INVALID_PARAMETER; //Initialize status code error = NO_ERROR; //Gateway discovery procedure while(!error) { //Get current time time = osGetSystemTime(); //Check current state if(context->state == MQTT_SN_CLIENT_STATE_DISCONNECTED) { //Open network connection error = mqttSnClientOpenConnection(context, FALSE); //Check status code if(!error) { //Save current time context->startTime = time; context->retransmitStartTime = time; //To prevent broadcast storms when multiple clients start searching //for GW almost at the same time, the sending of the SEARCHGW message //is delayed by a random time between 0 and TSEARCHGW context->retransmitTimeout = netGetRandRange(0, MQTT_SN_CLIENT_SEARCH_DELAY); //Start searching for gateways context->state = MQTT_SN_CLIENT_STATE_SEARCHING; } } else if(context->state == MQTT_SN_CLIENT_STATE_SEARCHING) { //Check whether the timeout has elapsed if(timeCompare(time, context->startTime + context->timeout) >= 0) { //Abort the retransmission procedure error = ERROR_TIMEOUT; } else if(timeCompare(time, context->retransmitStartTime + context->retransmitTimeout) >= 0) { //Set retransmission timeout context->retransmitTimeout = MQTT_SN_CLIENT_RETRY_TIMEOUT; //If the retry timer times out and the expected gateway's reply //is not received, the client retransmits the message error = mqttSnClientSendSearchGw(context, 0, destIpAddr, destPort); } else { //Wait for the gateway's reply error = mqttSnClientProcessEvents(context, MQTT_SN_CLIENT_TICK_INTERVAL); } } else if(context->state == MQTT_SN_CLIENT_STATE_RESP_RECEIVED) { //Check the type of the received message if(context->msgType == MQTT_SN_MSG_TYPE_GWINFO) { //Close network connection mqttSnClientCloseConnection(context); //A MQTT-SN gateway has been found context->state = MQTT_SN_CLIENT_STATE_DISCONNECTED; break; } else { //Report an error error = ERROR_UNEXPECTED_RESPONSE; } } else { //Invalid state error = ERROR_WRONG_STATE; } } //Any error to report? if(error != NO_ERROR && error != ERROR_WOULD_BLOCK) { //Clean up side effects mqttSnClientCloseConnection(context); //Update MQTT-SN client state context->state = MQTT_SN_CLIENT_STATE_DISCONNECTED; } //Return status code return error; } /** * @brief Establish connection with the MQTT-SN gateway * @param[in] context Pointer to the MQTT-SN client context * @param[in] cleanSession If this flag is set, then the client and server * must discard any previous session and start a new one * @return Error code **/ error_t mqttSnClientConnect(MqttSnClientContext *context, bool_t cleanSession) { error_t error; systime_t time; //Make sure the MQTT-SN client context is valid if(context == NULL) return ERROR_INVALID_PARAMETER; //Initialize status code error = NO_ERROR; //Establish connection with the MQTT-SN gateway while(!error) { //Get current time time = osGetSystemTime(); //Check current state if(context->state == MQTT_SN_CLIENT_STATE_DISCONNECTED) { //Open network connection error = mqttSnClientOpenConnection(context, TRUE); //Check status code if(!error) { //Save current time context->startTime = time; //Update MQTT-SN client state context->state = MQTT_SN_CLIENT_STATE_CONNECTING; } } else if(context->state == MQTT_SN_CLIENT_STATE_CONNECTING) { //Establish DTLS connection error = mqttSnClientEstablishConnection(context); //Check status code if(error == NO_ERROR) { //Check whether the CleanSession flag is set if(cleanSession) { //Discard previous session state osMemset(context->topicTable, 0, sizeof(context->topicTable)); osMemset(context->msgIdTable, 0, sizeof(context->msgIdTable)); } //The CONNECT message is sent by a client to setup a connection error = mqttSnClientSendConnect(context, cleanSession); } else if(error == ERROR_WOULD_BLOCK || error == ERROR_TIMEOUT) { //Check whether the timeout has elapsed if(timeCompare(time, context->startTime + context->timeout) >= 0) { //Report an error error = ERROR_TIMEOUT; } } else { //Failed to establish DTLS connection } } else if(context->state == MQTT_SN_CLIENT_STATE_SENDING_REQ) { //Check whether the timeout has elapsed if(timeCompare(time, context->startTime + context->timeout) >= 0) { //Abort the retransmission procedure error = ERROR_TIMEOUT; } else if(timeCompare(time, context->retransmitStartTime + MQTT_SN_CLIENT_RETRY_TIMEOUT) >= 0) { //If the retry timer times out and the expected gateway's reply //is not received, the client retransmits the message error = mqttSnClientSendConnect(context, cleanSession); } else { //Wait for the gateway's reply error = mqttSnClientProcessEvents(context, MQTT_SN_CLIENT_TICK_INTERVAL); } } else if(context->state == MQTT_SN_CLIENT_STATE_RESP_RECEIVED) { //Check the type of the received message if(context->msgType == MQTT_SN_MSG_TYPE_CONNACK) { //If the connection request has not been accepted, the failure reason //is encoded in the return code field of the CONNACK message if(context->returnCode == MQTT_SN_RETURN_CODE_ACCEPTED) { //The connection request has been accepted by the gateway context->state = MQTT_SN_CLIENT_STATE_ACTIVE; } else { //Terminate DTLS connection mqttSnClientShutdownConnection(context); //The connection request has been rejected by the gateway error = ERROR_REQUEST_REJECTED; } } else { //Report an error error = ERROR_UNEXPECTED_RESPONSE; } } else if(context->state == MQTT_SN_CLIENT_STATE_ACTIVE) { //The MQTT-SN client is connected break; } else { //Invalid state error = ERROR_WRONG_STATE; } } //Any error to report? if(error != NO_ERROR && error != ERROR_WOULD_BLOCK) { //Clean up side effects mqttSnClientCloseConnection(context); //Update MQTT-SN client state context->state = MQTT_SN_CLIENT_STATE_DISCONNECTED; } //Return status code return error; } /** * @brief Publish message * @param[in] context Pointer to the MQTT-SN client context * @param[in] topicName Topic name * @param[in] message Message payload * @param[in] length Length of the message payload * @param[in] qos QoS level to be used when publishing the message * @param[in] retain This flag specifies if the message is to be retained * @param[in] dup This flag specifies if the message is sent for the first * time or if the message is retransmitted * @param[in,out] msgId Message identifier used to send the PUBLISH message * @return Error code **/ error_t mqttSnClientPublish(MqttSnClientContext *context, const char_t *topicName, const void *message, size_t length, MqttSnQosLevel qos, bool_t retain, bool_t dup, uint16_t *msgId) { error_t error; systime_t time; uint16_t publishMsgId; //Check parameters if(context == NULL || topicName == NULL) return ERROR_INVALID_PARAMETER; if(message == NULL && length != 0) return ERROR_INVALID_PARAMETER; if(dup && msgId == NULL) return ERROR_INVALID_PARAMETER; //Initialize status code error = NO_ERROR; //Initialize message identifier if(dup) publishMsgId = *msgId; else publishMsgId = 0; //Publish procedure while(!error) { //Get current time time = osGetSystemTime(); //Check current state if(context->state == MQTT_SN_CLIENT_STATE_ACTIVE) { //Save current time context->startTime = time; //Check whether the register procedure is needed if(mqttSnClientIsShortTopicName(topicName) == FALSE && mqttSnClientFindTopicName(context, topicName) == 0 && mqttSnClientFindPredefTopicName(context, topicName) == 0) { //The message identifier allows the sender to match a message with //its corresponding acknowledgment mqttSnClientGenerateMessageId(context); //To register a topic name a client sends a REGISTER message to //the gateway error = mqttSnClientSendRegister(context, topicName); } else { //The message ID is only relevant in case of QoS levels 1 and 2 if(qos == MQTT_SN_QOS_LEVEL_1 || qos == MQTT_SN_QOS_LEVEL_2) { //The message identifier allows the sender to match a message with //its corresponding acknowledgment if(!dup) publishMsgId = mqttSnClientGenerateMessageId(context); } else { //For QoS level 0, the message identifier is coded 0x0000 publishMsgId = 0; } //The client can start publishing data relating to the registered //topic name by sending PUBLISH messages to the gateway error = mqttSnClientSendPublish(context, publishMsgId, topicName, message, length, qos, retain, dup); //In the QoS 0, no response is sent by the receiver and no retry //is performed by the sender if(qos != MQTT_SN_QOS_LEVEL_1 && qos != MQTT_SN_QOS_LEVEL_2) break; } } else if(context->state == MQTT_SN_CLIENT_STATE_SENDING_REQ) { //Check whether the transmission of the PUBLISH message has started if(context->msgType == MQTT_SN_MSG_TYPE_PUBLISH || context->msgType == MQTT_SN_MSG_TYPE_PUBREL) { //Restore the message identifier that was used to send the first //PUBLISH message if(!dup) publishMsgId = context->msgId; } //Check whether the timeout has elapsed if(timeCompare(time, context->startTime + context->timeout) >= 0) { //Abort the retransmission procedure context->state = MQTT_SN_CLIENT_STATE_DISCONNECTING; //Report a timeout error error = ERROR_TIMEOUT; } else if(timeCompare(time, context->retransmitStartTime + MQTT_SN_CLIENT_RETRY_TIMEOUT) >= 0) { //If the retry timer times out and the expected gateway's reply //is not received, the client retransmits the message if(context->msgType == MQTT_SN_MSG_TYPE_REGISTER) { //Retransmit REGISTER message error = mqttSnClientSendRegister(context, topicName); } else if(context->msgType == MQTT_SN_MSG_TYPE_PUBLISH) { //Retransmit PUBLISH message error = mqttSnClientSendPublish(context, publishMsgId, topicName, message, length, qos, retain, TRUE); } else if(context->msgType == MQTT_SN_MSG_TYPE_PUBREL) { //Retransmit PUBREL message error = mqttSnClientSendPubRel(context, context->msgId); } else { //Report an error error = ERROR_INVALID_TYPE; } } else { //Wait for the gateway's reply error = mqttSnClientProcessEvents(context, MQTT_SN_CLIENT_TICK_INTERVAL); } } else if(context->state == MQTT_SN_CLIENT_STATE_RESP_RECEIVED) { //Update MQTT-SN client state context->state = MQTT_SN_CLIENT_STATE_ACTIVE; //Check whether the transmission of the PUBLISH message has started if(context->msgType == MQTT_SN_MSG_TYPE_PUBACK || context->msgType == MQTT_SN_MSG_TYPE_PUBREC || context->msgType == MQTT_SN_MSG_TYPE_PUBCOMP) { //Restore the message identifier that was used to send the first //PUBLISH message if(!dup) publishMsgId = context->msgId; } //Check the type of the received message if(context->msgType == MQTT_SN_MSG_TYPE_REGACK) { //If the registration has not been accepted, the failure reason is //encoded in the return code field of the REGACK message if(context->returnCode == MQTT_SN_RETURN_CODE_ACCEPTED) { //Save the topic ID assigned by the gateway error = mqttSnClientAddTopic(context, topicName, context->topicId); } else { //The registration request has been rejected by the gateway error = ERROR_REQUEST_REJECTED; } } else if(context->msgType == MQTT_SN_MSG_TYPE_PUBACK) { //If the publish request has not been accepted, the failure reason //is encoded in the return code field of the PUBACK message if(context->returnCode == MQTT_SN_RETURN_CODE_ACCEPTED) { //Check QoS level if(qos == MQTT_SN_QOS_LEVEL_2) { //Unexpected PUBREC message received error = ERROR_UNEXPECTED_MESSAGE; } else { //A PUBACK message has been received break; } } else { //The publish request has been rejected by the gateway error = ERROR_REQUEST_REJECTED; } } else if(context->msgType == MQTT_SN_MSG_TYPE_PUBREC) { //Check QoS level if(qos == MQTT_SN_QOS_LEVEL_2) { //A PUBREL packet is the response to a PUBREC packet. It is the //third packet of the QoS 2 protocol exchange error = mqttSnClientSendPubRel(context, context->msgId); } else { //Unexpected PUBREC message received error = ERROR_UNEXPECTED_MESSAGE; } } else if(context->msgType == MQTT_SN_MSG_TYPE_PUBCOMP) { //A PUBCOMP message has been received break; } else { //Report an error error = ERROR_UNEXPECTED_RESPONSE; } } else { //Invalid state error = ERROR_NOT_CONNECTED; } } //Return the message identifier that was used to send the PUBLISH message if(msgId != NULL) *msgId = publishMsgId; //Return status code return error; } /** * @brief Subscribe to topic * @param[in] context Pointer to the MQTT-SN client context * @param[in] topicName Topic filter * @param[in] qos Maximum QoS level at which the server can send application * messages to the client * @return Error code **/ error_t mqttSnClientSubscribe(MqttSnClientContext *context, const char_t *topicName, MqttSnQosLevel qos) { error_t error; systime_t time; //Check parameters if(context == NULL || topicName == NULL) return ERROR_INVALID_PARAMETER; //Initialize status code error = NO_ERROR; //Topic subscribe procedure while(!error) { //Get current time time = osGetSystemTime(); //Check current state if(context->state == MQTT_SN_CLIENT_STATE_ACTIVE) { //The message identifier allows the sender to match a message with //its corresponding acknowledgment mqttSnClientGenerateMessageId(context); //Save current time context->startTime = time; //Send SUBSCRIBE message error = mqttSnClientSendSubscribe(context, topicName, qos); } else if(context->state == MQTT_SN_CLIENT_STATE_SENDING_REQ) { //Check whether the timeout has elapsed if(timeCompare(time, context->startTime + context->timeout) >= 0) { //Abort the retransmission procedure context->state = MQTT_SN_CLIENT_STATE_DISCONNECTING; //Report a timeout error error = ERROR_TIMEOUT; } else if(timeCompare(time, context->retransmitStartTime + MQTT_SN_CLIENT_RETRY_TIMEOUT) >= 0) { //If the retry timer times out and the expected gateway's reply //is not received, the client retransmits the message error = mqttSnClientSendSubscribe(context, topicName, qos); } else { //Wait for the gateway's reply error = mqttSnClientProcessEvents(context, MQTT_SN_CLIENT_TICK_INTERVAL); } } else if(context->state == MQTT_SN_CLIENT_STATE_RESP_RECEIVED) { //Update MQTT-SN client state context->state = MQTT_SN_CLIENT_STATE_ACTIVE; //Check the type of the received message if(context->msgType == MQTT_SN_MSG_TYPE_SUBACK) { //If the subscribe request has not been accepted, the failure reason //is encoded in the return code field of the SUBACK message if(context->returnCode == MQTT_SN_RETURN_CODE_ACCEPTED) { //The topic ID field is not relevant in case of subscriptions to a //topic name which contains wildcard characters if(osStrchr(topicName, '#') == NULL && osStrchr(topicName, '+') == NULL) { //Save the topic ID assigned by the gateway error = mqttSnClientAddTopic(context, topicName, context->topicId); } //A SUBACK message has been received break; } else { //The subscribe request has been rejected by the gateway error = ERROR_REQUEST_REJECTED; } } else { //Report an error error = ERROR_UNEXPECTED_RESPONSE; } } else { //Invalid state error = ERROR_NOT_CONNECTED; } } //Return status code return error; } /** * @brief Unsubscribe from topic * @param[in] context Pointer to the MQTT-SN client context * @param[in] topicName Topic filter * @return Error code **/ error_t mqttSnClientUnsubscribe(MqttSnClientContext *context, const char_t *topicName) { error_t error; systime_t time; //Check parameters if(context == NULL || topicName == NULL) return ERROR_INVALID_PARAMETER; //Initialize status code error = NO_ERROR; //Topic unsubscribe procedure while(!error) { //Get current time time = osGetSystemTime(); //Check current state if(context->state == MQTT_SN_CLIENT_STATE_ACTIVE) { //The message identifier allows the sender to match a message with //its corresponding acknowledgment mqttSnClientGenerateMessageId(context); //Save current time context->startTime = time; //Send UNSUBSCRIBE message error = mqttSnClientSendUnsubscribe(context, topicName); } else if(context->state == MQTT_SN_CLIENT_STATE_SENDING_REQ) { //Check whether the timeout has elapsed if(timeCompare(time, context->startTime + context->timeout) >= 0) { //Abort the retransmission procedure context->state = MQTT_SN_CLIENT_STATE_DISCONNECTING; //Report a timeout error error = ERROR_TIMEOUT; } else if(timeCompare(time, context->retransmitStartTime + MQTT_SN_CLIENT_RETRY_TIMEOUT) >= 0) { //If the retry timer times out and the expected gateway's reply //is not received, the client retransmits the message error = mqttSnClientSendUnsubscribe(context, topicName); } else { //Wait for the gateway's reply error = mqttSnClientProcessEvents(context, MQTT_SN_CLIENT_TICK_INTERVAL); } } else if(context->state == MQTT_SN_CLIENT_STATE_RESP_RECEIVED) { //Update MQTT-SN client state context->state = MQTT_SN_CLIENT_STATE_ACTIVE; //Check the type of the received message if(context->msgType == MQTT_SN_MSG_TYPE_UNSUBACK) { //An UNSUBACK message has been received break; } else { //Report an error error = ERROR_UNEXPECTED_RESPONSE; } } else { //Invalid state error = ERROR_NOT_CONNECTED; } } //Return status code return error; } /** * @brief Send ping request * @param[in] context Pointer to the MQTT-SN client context * @return Error code **/ error_t mqttSnClientPing(MqttSnClientContext *context) { error_t error; systime_t time; //Make sure the MQTT-SN client context is valid if(context == NULL) return ERROR_INVALID_PARAMETER; //Initialize status code error = NO_ERROR; //Send PINGREQ packet and wait for PINGRESP packet to be received while(!error) { //Get current time time = osGetSystemTime(); //Check current state if(context->state == MQTT_SN_CLIENT_STATE_ACTIVE) { //Save current time context->startTime = time; context->retransmitStartTime = time; //Send PINGREQ message error = mqttSnClientSendPingReq(context); //Update MQTT-SN client state context->state = MQTT_SN_CLIENT_STATE_SENDING_REQ; context->msgType = MQTT_SN_MSG_TYPE_PINGREQ; } else if(context->state == MQTT_SN_CLIENT_STATE_SENDING_REQ) { //Check whether the timeout has elapsed if(timeCompare(time, context->startTime + context->timeout) >= 0) { //Abort the retransmission procedure context->state = MQTT_SN_CLIENT_STATE_DISCONNECTING; //Report a timeout error error = ERROR_TIMEOUT; } else if(timeCompare(time, context->retransmitStartTime + MQTT_SN_CLIENT_RETRY_TIMEOUT) >= 0) { //If the retry timer times out and the expected gateway's reply //is not received, the client retransmits the message error = mqttSnClientSendPingReq(context); //Save the time at which the message was sent context->retransmitStartTime = time; } else { //Wait for the gateway's reply error = mqttSnClientProcessEvents(context, MQTT_SN_CLIENT_TICK_INTERVAL); } } else if(context->state == MQTT_SN_CLIENT_STATE_RESP_RECEIVED) { //Update MQTT-SN client state context->state = MQTT_SN_CLIENT_STATE_ACTIVE; //Check the type of the received message if(context->msgType == MQTT_SN_MSG_TYPE_PINGRESP) { //A PINGRESP message has been received break; } else { //Report an error error = ERROR_UNEXPECTED_RESPONSE; } } else { //Invalid state error = ERROR_NOT_CONNECTED; } } //Return status code return error; } /** * @brief Update the Will message * @param[in] context Pointer to the MQTT-SN client context * @param[in] topic Will topic name * @param[in] message Will message * @param[in] length Length of the Will message * @param[in] qos QoS level to be used when publishing the Will message * @param[in] retain This flag specifies if the Will message is to be retained * @return Error code **/ error_t mqttSnClientUpdateWillMessage(MqttSnClientContext *context, const char_t *topic, const void *message, size_t length, MqttSnQosLevel qos, bool_t retain) { error_t error; systime_t time; //Check parameters if(context == NULL || topic == NULL) return ERROR_INVALID_PARAMETER; if(message == NULL && length != 0) return ERROR_INVALID_PARAMETER; //Initialize status code error = NO_ERROR; //Publish procedure while(!error) { //Get current time time = osGetSystemTime(); //Check current state if(context->state == MQTT_SN_CLIENT_STATE_ACTIVE) { //Update the Will message error = mqttSnClientSetWillMessage(context, topic, message, length, qos, retain); //Check status code if(!error) { //Save current time context->startTime = time; //Send WILLTOPICUPD message error = mqttSnClientSendWillTopicUpd(context); } } else if(context->state == MQTT_SN_CLIENT_STATE_SENDING_REQ) { //Check whether the timeout has elapsed if(timeCompare(time, context->startTime + context->timeout) >= 0) { //Abort the retransmission procedure context->state = MQTT_SN_CLIENT_STATE_DISCONNECTING; //Report a timeout error error = ERROR_TIMEOUT; } else if(timeCompare(time, context->retransmitStartTime + MQTT_SN_CLIENT_RETRY_TIMEOUT) >= 0) { //If the retry timer times out and the expected gateway's reply //is not received, the client retransmits the message if(context->msgType == MQTT_SN_MSG_TYPE_WILLTOPICUPD) { //Retransmit WILLTOPICUPD message error = mqttSnClientSendWillTopicUpd(context); } else if(context->msgType == MQTT_SN_MSG_TYPE_WILLMSGUPD) { //Retransmit WILLMSGUPD message error = mqttSnClientSendWillMsgUpd(context); } else { //Report an error error = ERROR_INVALID_TYPE; } } else { //Wait for the gateway's reply error = mqttSnClientProcessEvents(context, MQTT_SN_CLIENT_TICK_INTERVAL); } } else if(context->state == MQTT_SN_CLIENT_STATE_RESP_RECEIVED) { //Update MQTT-SN client state context->state = MQTT_SN_CLIENT_STATE_ACTIVE; //Check the type of the received message if(context->msgType == MQTT_SN_MSG_TYPE_WILLTOPICRESP) { //If the WILLTOPICUPD has not been accepted, the failure reason //is encoded in the return code field of the WILLTOPICRESP if(context->returnCode == MQTT_SN_RETURN_CODE_ACCEPTED) { //Valid Will topic? if(context->willMessage.topic[0] != '\0') { //Send WILLMSGUPD message error = mqttSnClientSendWillMsgUpd(context); } else { //An empty WILLTOPIC message is used by a client to delete //the Will topic and the Will message stored in the server break; } } else { //The WILLTOPICUPD request has been rejected by the gateway error = ERROR_REQUEST_REJECTED; } } else if(context->msgType == MQTT_SN_MSG_TYPE_WILLMSGRESP) { //If the WILLMSGUPD has not been accepted, the failure reason //is encoded in the return code field of the WILLMSGRESP if(context->returnCode == MQTT_SN_RETURN_CODE_ACCEPTED) { //The WILLMSGUPD request has been accepted by the gateway break; } else { //The WILLMSGUPD request has been rejected by the gateway error = ERROR_REQUEST_REJECTED; } } else { //Report an error error = ERROR_UNEXPECTED_RESPONSE; } } else { //Invalid state error = ERROR_NOT_CONNECTED; } } //Return status code return error; } /** * @brief Retrieve return code * @param[in] context Pointer to the MQTT-SN client context * @param[out] returnCode Return code * @return Error code **/ error_t mqttSnClientGetReturnCode(MqttSnClientContext *context, MqttSnReturnCode *returnCode) { //Check parameters if(context == NULL || returnCode == NULL) return ERROR_INVALID_PARAMETER; //Retrieve return code *returnCode = context->returnCode; //Successful processing return NO_ERROR; } /** * @brief Process MQTT-SN client events * @param[in] context Pointer to the MQTT-SN client context * @param[in] timeout Maximum time to wait before returning * @return Error code **/ error_t mqttSnClientTask(MqttSnClientContext *context, systime_t timeout) { error_t error; //Make sure the MQTT-SN client context is valid if(context == NULL) return ERROR_INVALID_PARAMETER; //Make sure the MQTT-SN client is connected if(context->state == MQTT_SN_CLIENT_STATE_ACTIVE || context->state == MQTT_SN_CLIENT_STATE_SENDING_REQ || context->state == MQTT_SN_CLIENT_STATE_RESP_RECEIVED) { //Process MQTT-SN client events error = mqttSnClientProcessEvents(context, timeout); } else { //Invalid state error = ERROR_NOT_CONNECTED; } //Return status code return error; } /** * @brief Disconnect from the MQTT-SN gateway * @param[in] context Pointer to the MQTT-SN client context * @param[in] duration Sleep duration, in milliseconds * @return Error code **/ error_t mqttSnClientDisconnect(MqttSnClientContext *context, systime_t duration) { error_t error; systime_t time; //Make sure the MQTT-SN client context is valid if(context == NULL) return ERROR_INVALID_PARAMETER; //Initialize status code error = NO_ERROR; //Disconnect procedure while(!error) { //Get current time time = osGetSystemTime(); //Check current state if(context->state == MQTT_SN_CLIENT_STATE_ACTIVE) { //Save current time context->startTime = time; //The DISCONNECT message is sent by a client to indicate that it //wants to close the connection error = mqttSnClientSendDisconnect(context, duration / 1000); } else if(context->state == MQTT_SN_CLIENT_STATE_SENDING_REQ) { //Check whether the timeout has elapsed if(timeCompare(time, context->startTime + context->timeout) >= 0) { //Terminate DTLS connection mqttSnClientShutdownConnection(context); //Report a timeout error error = ERROR_TIMEOUT; } else if(timeCompare(time, context->retransmitStartTime + MQTT_SN_CLIENT_RETRY_TIMEOUT) >= 0) { //If the retry timer times out and the expected gateway's reply //is not received, the client retransmits the message error = mqttSnClientSendDisconnect(context, duration / 1000); } else { //Wait for the gateway's reply error = mqttSnClientProcessEvents(context, MQTT_SN_CLIENT_TICK_INTERVAL); } } else if(context->state == MQTT_SN_CLIENT_STATE_DISCONNECTING) { //Terminate DTLS connection error = mqttSnClientShutdownConnection(context); //Close network connection mqttSnClientCloseConnection(context); //The connection is closed context->state = MQTT_SN_CLIENT_STATE_DISCONNECTED; } else if(context->state == MQTT_SN_CLIENT_STATE_DISCONNECTED) { //The MQTT-SN client is disconnected break; } else { //Invalid state error = ERROR_WRONG_STATE; } } //Any error to report? if(error != NO_ERROR && error != ERROR_WOULD_BLOCK) { //Close network connection mqttSnClientCloseConnection(context); //Update MQTT-SN client state context->state = MQTT_SN_CLIENT_STATE_DISCONNECTED; } //Return status code return error; } /** * @brief Release MQTT-SN client context * @param[in] context Pointer to the MQTT-SN client context **/ void mqttSnClientDeinit(MqttSnClientContext *context) { //Make sure the MQTT-SN client context is valid if(context != NULL) { //Close connection mqttSnClientCloseConnection(context); #if (MQTT_SN_CLIENT_DTLS_SUPPORT == ENABLED) //Release DTLS session state tlsFreeSessionState(&context->dtlsSession); #endif //Clear MQTT-SN client context osMemset(context, 0, sizeof(MqttSnClientContext)); } } #endif
error_t mqttSnClientDisconnect(MqttSnClientContext *context) { error_t error; systime_t time; //Make sure the MQTT-SN client context is valid if(context == NULL) return ERROR_INVALID_PARAMETER; //Initialize status code error = NO_ERROR; //Disconnect procedure while(!error) { //Get current time time = osGetSystemTime(); //Check current state if(context->state == MQTT_SN_CLIENT_STATE_ACTIVE) { //Save current time context->startTime = time; //The DISCONNECT message is sent by a client to indicate that it //wants to close the connection error = mqttSnClientSendDisconnect(context, 0); } else if(context->state == MQTT_SN_CLIENT_STATE_SENDING_REQ) { //Check whether the timeout has elapsed if(timeCompare(time, context->startTime + context->timeout) >= 0) { //Terminate DTLS connection mqttSnClientShutdownConnection(context); //Report a timeout error error = ERROR_TIMEOUT; } else if(timeCompare(time, context->retransmitStartTime + MQTT_SN_CLIENT_RETRY_TIMEOUT) >= 0) { //If the retry timer times out and the expected gateway's reply //is not received, the client retransmits the message error = mqttSnClientSendDisconnect(context, 0); } else { //Wait for the gateway's reply error = mqttSnClientProcessEvents(context, MQTT_SN_CLIENT_TICK_INTERVAL); } } else if(context->state == MQTT_SN_CLIENT_STATE_DISCONNECTING) { //Terminate DTLS connection error = mqttSnClientShutdownConnection(context); //Close network connection mqttSnClientCloseConnection(context); //The connection is closed context->state = MQTT_SN_CLIENT_STATE_DISCONNECTED; } else if(context->state == MQTT_SN_CLIENT_STATE_DISCONNECTED) { //The MQTT-SN client is disconnected break; } else { //Invalid state error = ERROR_WRONG_STATE; } } //Any error to report? if(error != NO_ERROR && error != ERROR_WOULD_BLOCK) { //Close network connection mqttSnClientCloseConnection(context); //Update MQTT-SN client state context->state = MQTT_SN_CLIENT_STATE_DISCONNECTED; } //Return status code return error; }
error_t mqttSnClientDisconnect(MqttSnClientContext *context, systime_t duration) { error_t error; systime_t time; //Make sure the MQTT-SN client context is valid if(context == NULL) return ERROR_INVALID_PARAMETER; //Initialize status code error = NO_ERROR; //Disconnect procedure while(!error) { //Get current time time = osGetSystemTime(); //Check current state if(context->state == MQTT_SN_CLIENT_STATE_ACTIVE) { //Save current time context->startTime = time; //The DISCONNECT message is sent by a client to indicate that it //wants to close the connection error = mqttSnClientSendDisconnect(context, duration / 1000); } else if(context->state == MQTT_SN_CLIENT_STATE_SENDING_REQ) { //Check whether the timeout has elapsed if(timeCompare(time, context->startTime + context->timeout) >= 0) { //Terminate DTLS connection mqttSnClientShutdownConnection(context); //Report a timeout error error = ERROR_TIMEOUT; } else if(timeCompare(time, context->retransmitStartTime + MQTT_SN_CLIENT_RETRY_TIMEOUT) >= 0) { //If the retry timer times out and the expected gateway's reply //is not received, the client retransmits the message error = mqttSnClientSendDisconnect(context, duration / 1000); } else { //Wait for the gateway's reply error = mqttSnClientProcessEvents(context, MQTT_SN_CLIENT_TICK_INTERVAL); } } else if(context->state == MQTT_SN_CLIENT_STATE_DISCONNECTING) { //Terminate DTLS connection error = mqttSnClientShutdownConnection(context); //Close network connection mqttSnClientCloseConnection(context); //The connection is closed context->state = MQTT_SN_CLIENT_STATE_DISCONNECTED; } else if(context->state == MQTT_SN_CLIENT_STATE_DISCONNECTED) { //The MQTT-SN client is disconnected break; } else { //Invalid state error = ERROR_WRONG_STATE; } } //Any error to report? if(error != NO_ERROR && error != ERROR_WOULD_BLOCK) { //Close network connection mqttSnClientCloseConnection(context); //Update MQTT-SN client state context->state = MQTT_SN_CLIENT_STATE_DISCONNECTED; } //Return status code return error; }
{'added': [(9, ' * Copyright (C) 2010-2021 Oryx Embedded SARL. All rights reserved.'), (28, ' * @version 2.0.2'), (939, " if(osStrchr(topicName, '#') == NULL && osStrchr(topicName, '+') == NULL)"), (1366, ' * @param[in] duration Sleep duration, in milliseconds'), (1370, 'error_t mqttSnClientDisconnect(MqttSnClientContext *context,'), (1371, ' systime_t duration)'), (1397, ' error = mqttSnClientSendDisconnect(context, duration / 1000);'), (1415, ' error = mqttSnClientSendDisconnect(context, duration / 1000);')], 'deleted': [(9, ' * Copyright (C) 2010-2020 Oryx Embedded SARL. All rights reserved.'), (28, ' * @version 2.0.0'), (939, " if(strchr(topicName, '#') == NULL && strchr(topicName, '+') == NULL)"), (1369, 'error_t mqttSnClientDisconnect(MqttSnClientContext *context)'), (1395, ' error = mqttSnClientSendDisconnect(context, 0);'), (1413, ' error = mqttSnClientSendDisconnect(context, 0);')]}
8
6
790
3,234
https://github.com/Oryx-Embedded/CycloneTCP
CVE-2021-26788
['CWE-20']
mqtt_sn_client.c
mqttSnClientSubscribe
/** * @file mqtt_sn_client.c * @brief MQTT-SN client * * @section License * * SPDX-License-Identifier: GPL-2.0-or-later * * Copyright (C) 2010-2020 Oryx Embedded SARL. All rights reserved. * * This file is part of CycloneTCP Open. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * @author Oryx Embedded SARL (www.oryx-embedded.com) * @version 2.0.0 **/ //Switch to the appropriate trace level #define TRACE_LEVEL MQTT_SN_TRACE_LEVEL //Dependencies #include "core/net.h" #include "mqtt_sn/mqtt_sn_client.h" #include "mqtt_sn/mqtt_sn_client_message.h" #include "mqtt_sn/mqtt_sn_client_transport.h" #include "mqtt_sn/mqtt_sn_client_misc.h" #include "debug.h" //Check TCP/IP stack configuration #if (MQTT_SN_CLIENT_SUPPORT == ENABLED) /** * @brief Initialize MQTT-SN client context * @param[in] context Pointer to the MQTT-SN client context * @return Error code **/ error_t mqttSnClientInit(MqttSnClientContext *context) { #if (MQTT_SN_CLIENT_DTLS_SUPPORT == ENABLED) error_t error; #endif //Make sure the MQTT-SN client context is valid if(context == NULL) return ERROR_INVALID_PARAMETER; //Clear MQTT-SN client context osMemset(context, 0, sizeof(MqttSnClientContext)); #if (MQTT_SN_CLIENT_DTLS_SUPPORT == ENABLED) //Initialize DTLS session state error = tlsInitSessionState(&context->dtlsSession); //Any error to report? if(error) return error; #endif //Initialize MQTT-SN client state context->state = MQTT_SN_CLIENT_STATE_DISCONNECTED; //Default transport protocol context->transportProtocol = MQTT_SN_TRANSPORT_PROTOCOL_UDP; //Default timeout context->timeout = MQTT_SN_CLIENT_DEFAULT_TIMEOUT; //Default keep-alive time interval context->keepAlive = MQTT_SN_CLIENT_DEFAULT_KEEP_ALIVE; //Initialize message identifier context->msgId = 0; //Successful initialization return NO_ERROR; } /** * @brief Set the transport protocol to be used * @param[in] context Pointer to the MQTT-SN client context * @param[in] transportProtocol Transport protocol to be used (UDP or DTLS) * @return Error code **/ error_t mqttSnClientSetTransportProtocol(MqttSnClientContext *context, MqttSnTransportProtocol transportProtocol) { //Make sure the MQTT-SN client context is valid if(context == NULL) return ERROR_INVALID_PARAMETER; //Save the transport protocol to be used context->transportProtocol = transportProtocol; //Successful processing return NO_ERROR; } #if (MQTT_SN_CLIENT_DTLS_SUPPORT == ENABLED) /** * @brief Register DTLS initialization callback function * @param[in] context Pointer to the MQTT-SN client context * @param[in] callback DTLS initialization callback function * @return Error code **/ error_t mqttSnClientRegisterDtlsInitCallback(MqttSnClientContext *context, MqttSnClientDtlsInitCallback callback) { //Check parameters if(context == NULL || callback == NULL) return ERROR_INVALID_PARAMETER; //Save callback function context->dtlsInitCallback = callback; //Successful processing return NO_ERROR; } #endif /** * @brief Register publish callback function * @param[in] context Pointer to the MQTT-SN client context * @param[in] callback Callback function to be called when a PUBLISH message * is received * @return Error code **/ error_t mqttSnClientRegisterPublishCallback(MqttSnClientContext *context, MqttSnClientPublishCallback callback) { //Make sure the MQTT-SN client context is valid if(context == NULL) return ERROR_INVALID_PARAMETER; //Save callback function context->publishCallback = callback; //Successful processing return NO_ERROR; } /** * @brief Set the list of predefined topics * @param[in] context Pointer to the MQTT-SN client context * @param[in] predefinedTopics List of predefined topics * @param[in] size Number of predefined topics * @return Error code **/ error_t mqttSnClientSetPredefinedTopics(MqttSnClientContext *context, MqttSnPredefinedTopic *predefinedTopics, uint_t size) { //Make sure the MQTT-SN client context is valid if(context == NULL) return ERROR_INVALID_PARAMETER; //Check parameters if(predefinedTopics == NULL && size != 0) return ERROR_INVALID_PARAMETER; //Save the list of predefined topics context->predefinedTopicTable = predefinedTopics; context->predefinedTopicTableSize = size; //Successful processing return NO_ERROR; } /** * @brief Set communication timeout * @param[in] context Pointer to the MQTT-SN client context * @param[in] timeout Timeout value, in milliseconds * @return Error code **/ error_t mqttSnClientSetTimeout(MqttSnClientContext *context, systime_t timeout) { //Make sure the MQTT-SN client context is valid if(context == NULL) return ERROR_INVALID_PARAMETER; //Save timeout value context->timeout = timeout; //Successful processing return NO_ERROR; } /** * @brief Set keep-alive value * @param[in] context Pointer to the MQTT-SN client context * @param[in] keepAlive Keep-alive interval, in milliseconds * @return Error code **/ error_t mqttSnClientSetKeepAlive(MqttSnClientContext *context, systime_t keepAlive) { //Make sure the MQTT-SN client context is valid if(context == NULL) return ERROR_INVALID_PARAMETER; //Save keep-alive value context->keepAlive = keepAlive; //Successful processing return NO_ERROR; } /** * @brief Set client identifier * @param[in] context Pointer to the MQTT-SN client context * @param[in] clientId NULL-terminated string containing the client identifier * @return Error code **/ error_t mqttSnClientSetIdentifier(MqttSnClientContext *context, const char_t *clientId) { //Check parameters if(context == NULL || clientId == NULL) return ERROR_INVALID_PARAMETER; //Make sure the length of the client identifier is acceptable if(osStrlen(clientId) > MQTT_SN_CLIENT_MAX_ID_LEN) return ERROR_INVALID_LENGTH; //Save client identifier osStrcpy(context->clientId, clientId); //Successful processing return NO_ERROR; } /** * @brief Specify the Will message * @param[in] context Pointer to the MQTT-SN client context * @param[in] topic Will topic name * @param[in] message Will message * @param[in] length Length of the Will message * @param[in] qos QoS level to be used when publishing the Will message * @param[in] retain This flag specifies if the Will message is to be retained * @return Error code **/ error_t mqttSnClientSetWillMessage(MqttSnClientContext *context, const char_t *topic, const void *message, size_t length, MqttSnQosLevel qos, bool_t retain) { //Check parameters if(context == NULL || topic == NULL) return ERROR_INVALID_PARAMETER; //Make sure the length of the Will topic is acceptable if(osStrlen(topic) > MQTT_SN_CLIENT_MAX_WILL_TOPIC_LEN) return ERROR_INVALID_LENGTH; //Save Will topic osStrcpy(context->willMessage.topic, topic); //Any message payload if(length > 0) { //Sanity check if(message == NULL) return ERROR_INVALID_PARAMETER; //Make sure the length of the Will message payload is acceptable if(osStrlen(message) > MQTT_SN_CLIENT_MAX_WILL_PAYLOAD_LEN) return ERROR_INVALID_LENGTH; //Save Will message payload osMemcpy(context->willMessage.payload, message, length); } //Length of the Will message payload context->willMessage.length = length; //QoS level to be used when publishing the Will message context->willMessage.flags.qos = qos; //This flag specifies if the Will message is to be retained context->willMessage.flags.retain = retain; //Successful processing return NO_ERROR; } /** * @brief Bind the MQTT-SN client to a particular network interface * @param[in] context Pointer to the MQTT-SN client context * @param[in] interface Network interface to be used * @return Error code **/ error_t mqttSnClientBindToInterface(MqttSnClientContext *context, NetInterface *interface) { //Make sure the MQTT-SN client context is valid if(context == NULL) return ERROR_INVALID_PARAMETER; //Explicitly associate the MQTT client with the specified interface context->interface = interface; //Successful processing return NO_ERROR; } /** * @brief Specify the address of the gateway * @param[in] context Pointer to the MQTT-SN client context * @param[in] gwIpAddr Gateway IP address * @param[in] gwPort Gateway port number * @return Error code **/ error_t mqttSnClientSetGateway(MqttSnClientContext *context, const IpAddr *gwIpAddr, uint16_t gwPort) { //Check parameters if(context == NULL || gwIpAddr == NULL) return ERROR_INVALID_PARAMETER; //Save the IP address and the port number of the MQTT-SN gateway context->gwIpAddr = *gwIpAddr; context->gwPort = gwPort; //Successful processing return NO_ERROR; } /** * @brief Search for a gateway * @param[in] context Pointer to the MQTT-SN client context * @param[in] destIpAddr Destination IP address * @param[in] destPort Destination port number * @return Error code **/ error_t mqttSnClientSearchGateway(MqttSnClientContext *context, const IpAddr *destIpAddr, uint16_t destPort) { error_t error; systime_t time; //Check parameters if(context == NULL || destIpAddr == NULL) return ERROR_INVALID_PARAMETER; //Initialize status code error = NO_ERROR; //Gateway discovery procedure while(!error) { //Get current time time = osGetSystemTime(); //Check current state if(context->state == MQTT_SN_CLIENT_STATE_DISCONNECTED) { //Open network connection error = mqttSnClientOpenConnection(context, FALSE); //Check status code if(!error) { //Save current time context->startTime = time; context->retransmitStartTime = time; //To prevent broadcast storms when multiple clients start searching //for GW almost at the same time, the sending of the SEARCHGW message //is delayed by a random time between 0 and TSEARCHGW context->retransmitTimeout = netGetRandRange(0, MQTT_SN_CLIENT_SEARCH_DELAY); //Start searching for gateways context->state = MQTT_SN_CLIENT_STATE_SEARCHING; } } else if(context->state == MQTT_SN_CLIENT_STATE_SEARCHING) { //Check whether the timeout has elapsed if(timeCompare(time, context->startTime + context->timeout) >= 0) { //Abort the retransmission procedure error = ERROR_TIMEOUT; } else if(timeCompare(time, context->retransmitStartTime + context->retransmitTimeout) >= 0) { //Set retransmission timeout context->retransmitTimeout = MQTT_SN_CLIENT_RETRY_TIMEOUT; //If the retry timer times out and the expected gateway's reply //is not received, the client retransmits the message error = mqttSnClientSendSearchGw(context, 0, destIpAddr, destPort); } else { //Wait for the gateway's reply error = mqttSnClientProcessEvents(context, MQTT_SN_CLIENT_TICK_INTERVAL); } } else if(context->state == MQTT_SN_CLIENT_STATE_RESP_RECEIVED) { //Check the type of the received message if(context->msgType == MQTT_SN_MSG_TYPE_GWINFO) { //Close network connection mqttSnClientCloseConnection(context); //A MQTT-SN gateway has been found context->state = MQTT_SN_CLIENT_STATE_DISCONNECTED; break; } else { //Report an error error = ERROR_UNEXPECTED_RESPONSE; } } else { //Invalid state error = ERROR_WRONG_STATE; } } //Any error to report? if(error != NO_ERROR && error != ERROR_WOULD_BLOCK) { //Clean up side effects mqttSnClientCloseConnection(context); //Update MQTT-SN client state context->state = MQTT_SN_CLIENT_STATE_DISCONNECTED; } //Return status code return error; } /** * @brief Establish connection with the MQTT-SN gateway * @param[in] context Pointer to the MQTT-SN client context * @param[in] cleanSession If this flag is set, then the client and server * must discard any previous session and start a new one * @return Error code **/ error_t mqttSnClientConnect(MqttSnClientContext *context, bool_t cleanSession) { error_t error; systime_t time; //Make sure the MQTT-SN client context is valid if(context == NULL) return ERROR_INVALID_PARAMETER; //Initialize status code error = NO_ERROR; //Establish connection with the MQTT-SN gateway while(!error) { //Get current time time = osGetSystemTime(); //Check current state if(context->state == MQTT_SN_CLIENT_STATE_DISCONNECTED) { //Open network connection error = mqttSnClientOpenConnection(context, TRUE); //Check status code if(!error) { //Save current time context->startTime = time; //Update MQTT-SN client state context->state = MQTT_SN_CLIENT_STATE_CONNECTING; } } else if(context->state == MQTT_SN_CLIENT_STATE_CONNECTING) { //Establish DTLS connection error = mqttSnClientEstablishConnection(context); //Check status code if(error == NO_ERROR) { //Check whether the CleanSession flag is set if(cleanSession) { //Discard previous session state osMemset(context->topicTable, 0, sizeof(context->topicTable)); osMemset(context->msgIdTable, 0, sizeof(context->msgIdTable)); } //The CONNECT message is sent by a client to setup a connection error = mqttSnClientSendConnect(context, cleanSession); } else if(error == ERROR_WOULD_BLOCK || error == ERROR_TIMEOUT) { //Check whether the timeout has elapsed if(timeCompare(time, context->startTime + context->timeout) >= 0) { //Report an error error = ERROR_TIMEOUT; } } else { //Failed to establish DTLS connection } } else if(context->state == MQTT_SN_CLIENT_STATE_SENDING_REQ) { //Check whether the timeout has elapsed if(timeCompare(time, context->startTime + context->timeout) >= 0) { //Abort the retransmission procedure error = ERROR_TIMEOUT; } else if(timeCompare(time, context->retransmitStartTime + MQTT_SN_CLIENT_RETRY_TIMEOUT) >= 0) { //If the retry timer times out and the expected gateway's reply //is not received, the client retransmits the message error = mqttSnClientSendConnect(context, cleanSession); } else { //Wait for the gateway's reply error = mqttSnClientProcessEvents(context, MQTT_SN_CLIENT_TICK_INTERVAL); } } else if(context->state == MQTT_SN_CLIENT_STATE_RESP_RECEIVED) { //Check the type of the received message if(context->msgType == MQTT_SN_MSG_TYPE_CONNACK) { //If the connection request has not been accepted, the failure reason //is encoded in the return code field of the CONNACK message if(context->returnCode == MQTT_SN_RETURN_CODE_ACCEPTED) { //The connection request has been accepted by the gateway context->state = MQTT_SN_CLIENT_STATE_ACTIVE; } else { //Terminate DTLS connection mqttSnClientShutdownConnection(context); //The connection request has been rejected by the gateway error = ERROR_REQUEST_REJECTED; } } else { //Report an error error = ERROR_UNEXPECTED_RESPONSE; } } else if(context->state == MQTT_SN_CLIENT_STATE_ACTIVE) { //The MQTT-SN client is connected break; } else { //Invalid state error = ERROR_WRONG_STATE; } } //Any error to report? if(error != NO_ERROR && error != ERROR_WOULD_BLOCK) { //Clean up side effects mqttSnClientCloseConnection(context); //Update MQTT-SN client state context->state = MQTT_SN_CLIENT_STATE_DISCONNECTED; } //Return status code return error; } /** * @brief Publish message * @param[in] context Pointer to the MQTT-SN client context * @param[in] topicName Topic name * @param[in] message Message payload * @param[in] length Length of the message payload * @param[in] qos QoS level to be used when publishing the message * @param[in] retain This flag specifies if the message is to be retained * @param[in] dup This flag specifies if the message is sent for the first * time or if the message is retransmitted * @param[in,out] msgId Message identifier used to send the PUBLISH message * @return Error code **/ error_t mqttSnClientPublish(MqttSnClientContext *context, const char_t *topicName, const void *message, size_t length, MqttSnQosLevel qos, bool_t retain, bool_t dup, uint16_t *msgId) { error_t error; systime_t time; uint16_t publishMsgId; //Check parameters if(context == NULL || topicName == NULL) return ERROR_INVALID_PARAMETER; if(message == NULL && length != 0) return ERROR_INVALID_PARAMETER; if(dup && msgId == NULL) return ERROR_INVALID_PARAMETER; //Initialize status code error = NO_ERROR; //Initialize message identifier if(dup) publishMsgId = *msgId; else publishMsgId = 0; //Publish procedure while(!error) { //Get current time time = osGetSystemTime(); //Check current state if(context->state == MQTT_SN_CLIENT_STATE_ACTIVE) { //Save current time context->startTime = time; //Check whether the register procedure is needed if(mqttSnClientIsShortTopicName(topicName) == FALSE && mqttSnClientFindTopicName(context, topicName) == 0 && mqttSnClientFindPredefTopicName(context, topicName) == 0) { //The message identifier allows the sender to match a message with //its corresponding acknowledgment mqttSnClientGenerateMessageId(context); //To register a topic name a client sends a REGISTER message to //the gateway error = mqttSnClientSendRegister(context, topicName); } else { //The message ID is only relevant in case of QoS levels 1 and 2 if(qos == MQTT_SN_QOS_LEVEL_1 || qos == MQTT_SN_QOS_LEVEL_2) { //The message identifier allows the sender to match a message with //its corresponding acknowledgment if(!dup) publishMsgId = mqttSnClientGenerateMessageId(context); } else { //For QoS level 0, the message identifier is coded 0x0000 publishMsgId = 0; } //The client can start publishing data relating to the registered //topic name by sending PUBLISH messages to the gateway error = mqttSnClientSendPublish(context, publishMsgId, topicName, message, length, qos, retain, dup); //In the QoS 0, no response is sent by the receiver and no retry //is performed by the sender if(qos != MQTT_SN_QOS_LEVEL_1 && qos != MQTT_SN_QOS_LEVEL_2) break; } } else if(context->state == MQTT_SN_CLIENT_STATE_SENDING_REQ) { //Check whether the transmission of the PUBLISH message has started if(context->msgType == MQTT_SN_MSG_TYPE_PUBLISH || context->msgType == MQTT_SN_MSG_TYPE_PUBREL) { //Restore the message identifier that was used to send the first //PUBLISH message if(!dup) publishMsgId = context->msgId; } //Check whether the timeout has elapsed if(timeCompare(time, context->startTime + context->timeout) >= 0) { //Abort the retransmission procedure context->state = MQTT_SN_CLIENT_STATE_DISCONNECTING; //Report a timeout error error = ERROR_TIMEOUT; } else if(timeCompare(time, context->retransmitStartTime + MQTT_SN_CLIENT_RETRY_TIMEOUT) >= 0) { //If the retry timer times out and the expected gateway's reply //is not received, the client retransmits the message if(context->msgType == MQTT_SN_MSG_TYPE_REGISTER) { //Retransmit REGISTER message error = mqttSnClientSendRegister(context, topicName); } else if(context->msgType == MQTT_SN_MSG_TYPE_PUBLISH) { //Retransmit PUBLISH message error = mqttSnClientSendPublish(context, publishMsgId, topicName, message, length, qos, retain, TRUE); } else if(context->msgType == MQTT_SN_MSG_TYPE_PUBREL) { //Retransmit PUBREL message error = mqttSnClientSendPubRel(context, context->msgId); } else { //Report an error error = ERROR_INVALID_TYPE; } } else { //Wait for the gateway's reply error = mqttSnClientProcessEvents(context, MQTT_SN_CLIENT_TICK_INTERVAL); } } else if(context->state == MQTT_SN_CLIENT_STATE_RESP_RECEIVED) { //Update MQTT-SN client state context->state = MQTT_SN_CLIENT_STATE_ACTIVE; //Check whether the transmission of the PUBLISH message has started if(context->msgType == MQTT_SN_MSG_TYPE_PUBACK || context->msgType == MQTT_SN_MSG_TYPE_PUBREC || context->msgType == MQTT_SN_MSG_TYPE_PUBCOMP) { //Restore the message identifier that was used to send the first //PUBLISH message if(!dup) publishMsgId = context->msgId; } //Check the type of the received message if(context->msgType == MQTT_SN_MSG_TYPE_REGACK) { //If the registration has not been accepted, the failure reason is //encoded in the return code field of the REGACK message if(context->returnCode == MQTT_SN_RETURN_CODE_ACCEPTED) { //Save the topic ID assigned by the gateway error = mqttSnClientAddTopic(context, topicName, context->topicId); } else { //The registration request has been rejected by the gateway error = ERROR_REQUEST_REJECTED; } } else if(context->msgType == MQTT_SN_MSG_TYPE_PUBACK) { //If the publish request has not been accepted, the failure reason //is encoded in the return code field of the PUBACK message if(context->returnCode == MQTT_SN_RETURN_CODE_ACCEPTED) { //Check QoS level if(qos == MQTT_SN_QOS_LEVEL_2) { //Unexpected PUBREC message received error = ERROR_UNEXPECTED_MESSAGE; } else { //A PUBACK message has been received break; } } else { //The publish request has been rejected by the gateway error = ERROR_REQUEST_REJECTED; } } else if(context->msgType == MQTT_SN_MSG_TYPE_PUBREC) { //Check QoS level if(qos == MQTT_SN_QOS_LEVEL_2) { //A PUBREL packet is the response to a PUBREC packet. It is the //third packet of the QoS 2 protocol exchange error = mqttSnClientSendPubRel(context, context->msgId); } else { //Unexpected PUBREC message received error = ERROR_UNEXPECTED_MESSAGE; } } else if(context->msgType == MQTT_SN_MSG_TYPE_PUBCOMP) { //A PUBCOMP message has been received break; } else { //Report an error error = ERROR_UNEXPECTED_RESPONSE; } } else { //Invalid state error = ERROR_NOT_CONNECTED; } } //Return the message identifier that was used to send the PUBLISH message if(msgId != NULL) *msgId = publishMsgId; //Return status code return error; } /** * @brief Subscribe to topic * @param[in] context Pointer to the MQTT-SN client context * @param[in] topicName Topic filter * @param[in] qos Maximum QoS level at which the server can send application * messages to the client * @return Error code **/ error_t mqttSnClientSubscribe(MqttSnClientContext *context, const char_t *topicName, MqttSnQosLevel qos) { error_t error; systime_t time; //Check parameters if(context == NULL || topicName == NULL) return ERROR_INVALID_PARAMETER; //Initialize status code error = NO_ERROR; //Topic subscribe procedure while(!error) { //Get current time time = osGetSystemTime(); //Check current state if(context->state == MQTT_SN_CLIENT_STATE_ACTIVE) { //The message identifier allows the sender to match a message with //its corresponding acknowledgment mqttSnClientGenerateMessageId(context); //Save current time context->startTime = time; //Send SUBSCRIBE message error = mqttSnClientSendSubscribe(context, topicName, qos); } else if(context->state == MQTT_SN_CLIENT_STATE_SENDING_REQ) { //Check whether the timeout has elapsed if(timeCompare(time, context->startTime + context->timeout) >= 0) { //Abort the retransmission procedure context->state = MQTT_SN_CLIENT_STATE_DISCONNECTING; //Report a timeout error error = ERROR_TIMEOUT; } else if(timeCompare(time, context->retransmitStartTime + MQTT_SN_CLIENT_RETRY_TIMEOUT) >= 0) { //If the retry timer times out and the expected gateway's reply //is not received, the client retransmits the message error = mqttSnClientSendSubscribe(context, topicName, qos); } else { //Wait for the gateway's reply error = mqttSnClientProcessEvents(context, MQTT_SN_CLIENT_TICK_INTERVAL); } } else if(context->state == MQTT_SN_CLIENT_STATE_RESP_RECEIVED) { //Update MQTT-SN client state context->state = MQTT_SN_CLIENT_STATE_ACTIVE; //Check the type of the received message if(context->msgType == MQTT_SN_MSG_TYPE_SUBACK) { //If the subscribe request has not been accepted, the failure reason //is encoded in the return code field of the SUBACK message if(context->returnCode == MQTT_SN_RETURN_CODE_ACCEPTED) { //The topic ID field is not relevant in case of subscriptions to a //topic name which contains wildcard characters if(strchr(topicName, '#') == NULL && strchr(topicName, '+') == NULL) { //Save the topic ID assigned by the gateway error = mqttSnClientAddTopic(context, topicName, context->topicId); } //A SUBACK message has been received break; } else { //The subscribe request has been rejected by the gateway error = ERROR_REQUEST_REJECTED; } } else { //Report an error error = ERROR_UNEXPECTED_RESPONSE; } } else { //Invalid state error = ERROR_NOT_CONNECTED; } } //Return status code return error; } /** * @brief Unsubscribe from topic * @param[in] context Pointer to the MQTT-SN client context * @param[in] topicName Topic filter * @return Error code **/ error_t mqttSnClientUnsubscribe(MqttSnClientContext *context, const char_t *topicName) { error_t error; systime_t time; //Check parameters if(context == NULL || topicName == NULL) return ERROR_INVALID_PARAMETER; //Initialize status code error = NO_ERROR; //Topic unsubscribe procedure while(!error) { //Get current time time = osGetSystemTime(); //Check current state if(context->state == MQTT_SN_CLIENT_STATE_ACTIVE) { //The message identifier allows the sender to match a message with //its corresponding acknowledgment mqttSnClientGenerateMessageId(context); //Save current time context->startTime = time; //Send UNSUBSCRIBE message error = mqttSnClientSendUnsubscribe(context, topicName); } else if(context->state == MQTT_SN_CLIENT_STATE_SENDING_REQ) { //Check whether the timeout has elapsed if(timeCompare(time, context->startTime + context->timeout) >= 0) { //Abort the retransmission procedure context->state = MQTT_SN_CLIENT_STATE_DISCONNECTING; //Report a timeout error error = ERROR_TIMEOUT; } else if(timeCompare(time, context->retransmitStartTime + MQTT_SN_CLIENT_RETRY_TIMEOUT) >= 0) { //If the retry timer times out and the expected gateway's reply //is not received, the client retransmits the message error = mqttSnClientSendUnsubscribe(context, topicName); } else { //Wait for the gateway's reply error = mqttSnClientProcessEvents(context, MQTT_SN_CLIENT_TICK_INTERVAL); } } else if(context->state == MQTT_SN_CLIENT_STATE_RESP_RECEIVED) { //Update MQTT-SN client state context->state = MQTT_SN_CLIENT_STATE_ACTIVE; //Check the type of the received message if(context->msgType == MQTT_SN_MSG_TYPE_UNSUBACK) { //An UNSUBACK message has been received break; } else { //Report an error error = ERROR_UNEXPECTED_RESPONSE; } } else { //Invalid state error = ERROR_NOT_CONNECTED; } } //Return status code return error; } /** * @brief Send ping request * @param[in] context Pointer to the MQTT-SN client context * @return Error code **/ error_t mqttSnClientPing(MqttSnClientContext *context) { error_t error; systime_t time; //Make sure the MQTT-SN client context is valid if(context == NULL) return ERROR_INVALID_PARAMETER; //Initialize status code error = NO_ERROR; //Send PINGREQ packet and wait for PINGRESP packet to be received while(!error) { //Get current time time = osGetSystemTime(); //Check current state if(context->state == MQTT_SN_CLIENT_STATE_ACTIVE) { //Save current time context->startTime = time; context->retransmitStartTime = time; //Send PINGREQ message error = mqttSnClientSendPingReq(context); //Update MQTT-SN client state context->state = MQTT_SN_CLIENT_STATE_SENDING_REQ; context->msgType = MQTT_SN_MSG_TYPE_PINGREQ; } else if(context->state == MQTT_SN_CLIENT_STATE_SENDING_REQ) { //Check whether the timeout has elapsed if(timeCompare(time, context->startTime + context->timeout) >= 0) { //Abort the retransmission procedure context->state = MQTT_SN_CLIENT_STATE_DISCONNECTING; //Report a timeout error error = ERROR_TIMEOUT; } else if(timeCompare(time, context->retransmitStartTime + MQTT_SN_CLIENT_RETRY_TIMEOUT) >= 0) { //If the retry timer times out and the expected gateway's reply //is not received, the client retransmits the message error = mqttSnClientSendPingReq(context); //Save the time at which the message was sent context->retransmitStartTime = time; } else { //Wait for the gateway's reply error = mqttSnClientProcessEvents(context, MQTT_SN_CLIENT_TICK_INTERVAL); } } else if(context->state == MQTT_SN_CLIENT_STATE_RESP_RECEIVED) { //Update MQTT-SN client state context->state = MQTT_SN_CLIENT_STATE_ACTIVE; //Check the type of the received message if(context->msgType == MQTT_SN_MSG_TYPE_PINGRESP) { //A PINGRESP message has been received break; } else { //Report an error error = ERROR_UNEXPECTED_RESPONSE; } } else { //Invalid state error = ERROR_NOT_CONNECTED; } } //Return status code return error; } /** * @brief Update the Will message * @param[in] context Pointer to the MQTT-SN client context * @param[in] topic Will topic name * @param[in] message Will message * @param[in] length Length of the Will message * @param[in] qos QoS level to be used when publishing the Will message * @param[in] retain This flag specifies if the Will message is to be retained * @return Error code **/ error_t mqttSnClientUpdateWillMessage(MqttSnClientContext *context, const char_t *topic, const void *message, size_t length, MqttSnQosLevel qos, bool_t retain) { error_t error; systime_t time; //Check parameters if(context == NULL || topic == NULL) return ERROR_INVALID_PARAMETER; if(message == NULL && length != 0) return ERROR_INVALID_PARAMETER; //Initialize status code error = NO_ERROR; //Publish procedure while(!error) { //Get current time time = osGetSystemTime(); //Check current state if(context->state == MQTT_SN_CLIENT_STATE_ACTIVE) { //Update the Will message error = mqttSnClientSetWillMessage(context, topic, message, length, qos, retain); //Check status code if(!error) { //Save current time context->startTime = time; //Send WILLTOPICUPD message error = mqttSnClientSendWillTopicUpd(context); } } else if(context->state == MQTT_SN_CLIENT_STATE_SENDING_REQ) { //Check whether the timeout has elapsed if(timeCompare(time, context->startTime + context->timeout) >= 0) { //Abort the retransmission procedure context->state = MQTT_SN_CLIENT_STATE_DISCONNECTING; //Report a timeout error error = ERROR_TIMEOUT; } else if(timeCompare(time, context->retransmitStartTime + MQTT_SN_CLIENT_RETRY_TIMEOUT) >= 0) { //If the retry timer times out and the expected gateway's reply //is not received, the client retransmits the message if(context->msgType == MQTT_SN_MSG_TYPE_WILLTOPICUPD) { //Retransmit WILLTOPICUPD message error = mqttSnClientSendWillTopicUpd(context); } else if(context->msgType == MQTT_SN_MSG_TYPE_WILLMSGUPD) { //Retransmit WILLMSGUPD message error = mqttSnClientSendWillMsgUpd(context); } else { //Report an error error = ERROR_INVALID_TYPE; } } else { //Wait for the gateway's reply error = mqttSnClientProcessEvents(context, MQTT_SN_CLIENT_TICK_INTERVAL); } } else if(context->state == MQTT_SN_CLIENT_STATE_RESP_RECEIVED) { //Update MQTT-SN client state context->state = MQTT_SN_CLIENT_STATE_ACTIVE; //Check the type of the received message if(context->msgType == MQTT_SN_MSG_TYPE_WILLTOPICRESP) { //If the WILLTOPICUPD has not been accepted, the failure reason //is encoded in the return code field of the WILLTOPICRESP if(context->returnCode == MQTT_SN_RETURN_CODE_ACCEPTED) { //Valid Will topic? if(context->willMessage.topic[0] != '\0') { //Send WILLMSGUPD message error = mqttSnClientSendWillMsgUpd(context); } else { //An empty WILLTOPIC message is used by a client to delete //the Will topic and the Will message stored in the server break; } } else { //The WILLTOPICUPD request has been rejected by the gateway error = ERROR_REQUEST_REJECTED; } } else if(context->msgType == MQTT_SN_MSG_TYPE_WILLMSGRESP) { //If the WILLMSGUPD has not been accepted, the failure reason //is encoded in the return code field of the WILLMSGRESP if(context->returnCode == MQTT_SN_RETURN_CODE_ACCEPTED) { //The WILLMSGUPD request has been accepted by the gateway break; } else { //The WILLMSGUPD request has been rejected by the gateway error = ERROR_REQUEST_REJECTED; } } else { //Report an error error = ERROR_UNEXPECTED_RESPONSE; } } else { //Invalid state error = ERROR_NOT_CONNECTED; } } //Return status code return error; } /** * @brief Retrieve return code * @param[in] context Pointer to the MQTT-SN client context * @param[out] returnCode Return code * @return Error code **/ error_t mqttSnClientGetReturnCode(MqttSnClientContext *context, MqttSnReturnCode *returnCode) { //Check parameters if(context == NULL || returnCode == NULL) return ERROR_INVALID_PARAMETER; //Retrieve return code *returnCode = context->returnCode; //Successful processing return NO_ERROR; } /** * @brief Process MQTT-SN client events * @param[in] context Pointer to the MQTT-SN client context * @param[in] timeout Maximum time to wait before returning * @return Error code **/ error_t mqttSnClientTask(MqttSnClientContext *context, systime_t timeout) { error_t error; //Make sure the MQTT-SN client context is valid if(context == NULL) return ERROR_INVALID_PARAMETER; //Make sure the MQTT-SN client is connected if(context->state == MQTT_SN_CLIENT_STATE_ACTIVE || context->state == MQTT_SN_CLIENT_STATE_SENDING_REQ || context->state == MQTT_SN_CLIENT_STATE_RESP_RECEIVED) { //Process MQTT-SN client events error = mqttSnClientProcessEvents(context, timeout); } else { //Invalid state error = ERROR_NOT_CONNECTED; } //Return status code return error; } /** * @brief Disconnect from the MQTT-SN gateway * @param[in] context Pointer to the MQTT-SN client context * @return Error code **/ error_t mqttSnClientDisconnect(MqttSnClientContext *context) { error_t error; systime_t time; //Make sure the MQTT-SN client context is valid if(context == NULL) return ERROR_INVALID_PARAMETER; //Initialize status code error = NO_ERROR; //Disconnect procedure while(!error) { //Get current time time = osGetSystemTime(); //Check current state if(context->state == MQTT_SN_CLIENT_STATE_ACTIVE) { //Save current time context->startTime = time; //The DISCONNECT message is sent by a client to indicate that it //wants to close the connection error = mqttSnClientSendDisconnect(context, 0); } else if(context->state == MQTT_SN_CLIENT_STATE_SENDING_REQ) { //Check whether the timeout has elapsed if(timeCompare(time, context->startTime + context->timeout) >= 0) { //Terminate DTLS connection mqttSnClientShutdownConnection(context); //Report a timeout error error = ERROR_TIMEOUT; } else if(timeCompare(time, context->retransmitStartTime + MQTT_SN_CLIENT_RETRY_TIMEOUT) >= 0) { //If the retry timer times out and the expected gateway's reply //is not received, the client retransmits the message error = mqttSnClientSendDisconnect(context, 0); } else { //Wait for the gateway's reply error = mqttSnClientProcessEvents(context, MQTT_SN_CLIENT_TICK_INTERVAL); } } else if(context->state == MQTT_SN_CLIENT_STATE_DISCONNECTING) { //Terminate DTLS connection error = mqttSnClientShutdownConnection(context); //Close network connection mqttSnClientCloseConnection(context); //The connection is closed context->state = MQTT_SN_CLIENT_STATE_DISCONNECTED; } else if(context->state == MQTT_SN_CLIENT_STATE_DISCONNECTED) { //The MQTT-SN client is disconnected break; } else { //Invalid state error = ERROR_WRONG_STATE; } } //Any error to report? if(error != NO_ERROR && error != ERROR_WOULD_BLOCK) { //Close network connection mqttSnClientCloseConnection(context); //Update MQTT-SN client state context->state = MQTT_SN_CLIENT_STATE_DISCONNECTED; } //Return status code return error; } /** * @brief Release MQTT-SN client context * @param[in] context Pointer to the MQTT-SN client context **/ void mqttSnClientDeinit(MqttSnClientContext *context) { //Make sure the MQTT-SN client context is valid if(context != NULL) { //Close connection mqttSnClientCloseConnection(context); #if (MQTT_SN_CLIENT_DTLS_SUPPORT == ENABLED) //Release DTLS session state tlsFreeSessionState(&context->dtlsSession); #endif //Clear MQTT-SN client context osMemset(context, 0, sizeof(MqttSnClientContext)); } } #endif
/** * @file mqtt_sn_client.c * @brief MQTT-SN client * * @section License * * SPDX-License-Identifier: GPL-2.0-or-later * * Copyright (C) 2010-2021 Oryx Embedded SARL. All rights reserved. * * This file is part of CycloneTCP Open. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * @author Oryx Embedded SARL (www.oryx-embedded.com) * @version 2.0.2 **/ //Switch to the appropriate trace level #define TRACE_LEVEL MQTT_SN_TRACE_LEVEL //Dependencies #include "core/net.h" #include "mqtt_sn/mqtt_sn_client.h" #include "mqtt_sn/mqtt_sn_client_message.h" #include "mqtt_sn/mqtt_sn_client_transport.h" #include "mqtt_sn/mqtt_sn_client_misc.h" #include "debug.h" //Check TCP/IP stack configuration #if (MQTT_SN_CLIENT_SUPPORT == ENABLED) /** * @brief Initialize MQTT-SN client context * @param[in] context Pointer to the MQTT-SN client context * @return Error code **/ error_t mqttSnClientInit(MqttSnClientContext *context) { #if (MQTT_SN_CLIENT_DTLS_SUPPORT == ENABLED) error_t error; #endif //Make sure the MQTT-SN client context is valid if(context == NULL) return ERROR_INVALID_PARAMETER; //Clear MQTT-SN client context osMemset(context, 0, sizeof(MqttSnClientContext)); #if (MQTT_SN_CLIENT_DTLS_SUPPORT == ENABLED) //Initialize DTLS session state error = tlsInitSessionState(&context->dtlsSession); //Any error to report? if(error) return error; #endif //Initialize MQTT-SN client state context->state = MQTT_SN_CLIENT_STATE_DISCONNECTED; //Default transport protocol context->transportProtocol = MQTT_SN_TRANSPORT_PROTOCOL_UDP; //Default timeout context->timeout = MQTT_SN_CLIENT_DEFAULT_TIMEOUT; //Default keep-alive time interval context->keepAlive = MQTT_SN_CLIENT_DEFAULT_KEEP_ALIVE; //Initialize message identifier context->msgId = 0; //Successful initialization return NO_ERROR; } /** * @brief Set the transport protocol to be used * @param[in] context Pointer to the MQTT-SN client context * @param[in] transportProtocol Transport protocol to be used (UDP or DTLS) * @return Error code **/ error_t mqttSnClientSetTransportProtocol(MqttSnClientContext *context, MqttSnTransportProtocol transportProtocol) { //Make sure the MQTT-SN client context is valid if(context == NULL) return ERROR_INVALID_PARAMETER; //Save the transport protocol to be used context->transportProtocol = transportProtocol; //Successful processing return NO_ERROR; } #if (MQTT_SN_CLIENT_DTLS_SUPPORT == ENABLED) /** * @brief Register DTLS initialization callback function * @param[in] context Pointer to the MQTT-SN client context * @param[in] callback DTLS initialization callback function * @return Error code **/ error_t mqttSnClientRegisterDtlsInitCallback(MqttSnClientContext *context, MqttSnClientDtlsInitCallback callback) { //Check parameters if(context == NULL || callback == NULL) return ERROR_INVALID_PARAMETER; //Save callback function context->dtlsInitCallback = callback; //Successful processing return NO_ERROR; } #endif /** * @brief Register publish callback function * @param[in] context Pointer to the MQTT-SN client context * @param[in] callback Callback function to be called when a PUBLISH message * is received * @return Error code **/ error_t mqttSnClientRegisterPublishCallback(MqttSnClientContext *context, MqttSnClientPublishCallback callback) { //Make sure the MQTT-SN client context is valid if(context == NULL) return ERROR_INVALID_PARAMETER; //Save callback function context->publishCallback = callback; //Successful processing return NO_ERROR; } /** * @brief Set the list of predefined topics * @param[in] context Pointer to the MQTT-SN client context * @param[in] predefinedTopics List of predefined topics * @param[in] size Number of predefined topics * @return Error code **/ error_t mqttSnClientSetPredefinedTopics(MqttSnClientContext *context, MqttSnPredefinedTopic *predefinedTopics, uint_t size) { //Make sure the MQTT-SN client context is valid if(context == NULL) return ERROR_INVALID_PARAMETER; //Check parameters if(predefinedTopics == NULL && size != 0) return ERROR_INVALID_PARAMETER; //Save the list of predefined topics context->predefinedTopicTable = predefinedTopics; context->predefinedTopicTableSize = size; //Successful processing return NO_ERROR; } /** * @brief Set communication timeout * @param[in] context Pointer to the MQTT-SN client context * @param[in] timeout Timeout value, in milliseconds * @return Error code **/ error_t mqttSnClientSetTimeout(MqttSnClientContext *context, systime_t timeout) { //Make sure the MQTT-SN client context is valid if(context == NULL) return ERROR_INVALID_PARAMETER; //Save timeout value context->timeout = timeout; //Successful processing return NO_ERROR; } /** * @brief Set keep-alive value * @param[in] context Pointer to the MQTT-SN client context * @param[in] keepAlive Keep-alive interval, in milliseconds * @return Error code **/ error_t mqttSnClientSetKeepAlive(MqttSnClientContext *context, systime_t keepAlive) { //Make sure the MQTT-SN client context is valid if(context == NULL) return ERROR_INVALID_PARAMETER; //Save keep-alive value context->keepAlive = keepAlive; //Successful processing return NO_ERROR; } /** * @brief Set client identifier * @param[in] context Pointer to the MQTT-SN client context * @param[in] clientId NULL-terminated string containing the client identifier * @return Error code **/ error_t mqttSnClientSetIdentifier(MqttSnClientContext *context, const char_t *clientId) { //Check parameters if(context == NULL || clientId == NULL) return ERROR_INVALID_PARAMETER; //Make sure the length of the client identifier is acceptable if(osStrlen(clientId) > MQTT_SN_CLIENT_MAX_ID_LEN) return ERROR_INVALID_LENGTH; //Save client identifier osStrcpy(context->clientId, clientId); //Successful processing return NO_ERROR; } /** * @brief Specify the Will message * @param[in] context Pointer to the MQTT-SN client context * @param[in] topic Will topic name * @param[in] message Will message * @param[in] length Length of the Will message * @param[in] qos QoS level to be used when publishing the Will message * @param[in] retain This flag specifies if the Will message is to be retained * @return Error code **/ error_t mqttSnClientSetWillMessage(MqttSnClientContext *context, const char_t *topic, const void *message, size_t length, MqttSnQosLevel qos, bool_t retain) { //Check parameters if(context == NULL || topic == NULL) return ERROR_INVALID_PARAMETER; //Make sure the length of the Will topic is acceptable if(osStrlen(topic) > MQTT_SN_CLIENT_MAX_WILL_TOPIC_LEN) return ERROR_INVALID_LENGTH; //Save Will topic osStrcpy(context->willMessage.topic, topic); //Any message payload if(length > 0) { //Sanity check if(message == NULL) return ERROR_INVALID_PARAMETER; //Make sure the length of the Will message payload is acceptable if(osStrlen(message) > MQTT_SN_CLIENT_MAX_WILL_PAYLOAD_LEN) return ERROR_INVALID_LENGTH; //Save Will message payload osMemcpy(context->willMessage.payload, message, length); } //Length of the Will message payload context->willMessage.length = length; //QoS level to be used when publishing the Will message context->willMessage.flags.qos = qos; //This flag specifies if the Will message is to be retained context->willMessage.flags.retain = retain; //Successful processing return NO_ERROR; } /** * @brief Bind the MQTT-SN client to a particular network interface * @param[in] context Pointer to the MQTT-SN client context * @param[in] interface Network interface to be used * @return Error code **/ error_t mqttSnClientBindToInterface(MqttSnClientContext *context, NetInterface *interface) { //Make sure the MQTT-SN client context is valid if(context == NULL) return ERROR_INVALID_PARAMETER; //Explicitly associate the MQTT client with the specified interface context->interface = interface; //Successful processing return NO_ERROR; } /** * @brief Specify the address of the gateway * @param[in] context Pointer to the MQTT-SN client context * @param[in] gwIpAddr Gateway IP address * @param[in] gwPort Gateway port number * @return Error code **/ error_t mqttSnClientSetGateway(MqttSnClientContext *context, const IpAddr *gwIpAddr, uint16_t gwPort) { //Check parameters if(context == NULL || gwIpAddr == NULL) return ERROR_INVALID_PARAMETER; //Save the IP address and the port number of the MQTT-SN gateway context->gwIpAddr = *gwIpAddr; context->gwPort = gwPort; //Successful processing return NO_ERROR; } /** * @brief Search for a gateway * @param[in] context Pointer to the MQTT-SN client context * @param[in] destIpAddr Destination IP address * @param[in] destPort Destination port number * @return Error code **/ error_t mqttSnClientSearchGateway(MqttSnClientContext *context, const IpAddr *destIpAddr, uint16_t destPort) { error_t error; systime_t time; //Check parameters if(context == NULL || destIpAddr == NULL) return ERROR_INVALID_PARAMETER; //Initialize status code error = NO_ERROR; //Gateway discovery procedure while(!error) { //Get current time time = osGetSystemTime(); //Check current state if(context->state == MQTT_SN_CLIENT_STATE_DISCONNECTED) { //Open network connection error = mqttSnClientOpenConnection(context, FALSE); //Check status code if(!error) { //Save current time context->startTime = time; context->retransmitStartTime = time; //To prevent broadcast storms when multiple clients start searching //for GW almost at the same time, the sending of the SEARCHGW message //is delayed by a random time between 0 and TSEARCHGW context->retransmitTimeout = netGetRandRange(0, MQTT_SN_CLIENT_SEARCH_DELAY); //Start searching for gateways context->state = MQTT_SN_CLIENT_STATE_SEARCHING; } } else if(context->state == MQTT_SN_CLIENT_STATE_SEARCHING) { //Check whether the timeout has elapsed if(timeCompare(time, context->startTime + context->timeout) >= 0) { //Abort the retransmission procedure error = ERROR_TIMEOUT; } else if(timeCompare(time, context->retransmitStartTime + context->retransmitTimeout) >= 0) { //Set retransmission timeout context->retransmitTimeout = MQTT_SN_CLIENT_RETRY_TIMEOUT; //If the retry timer times out and the expected gateway's reply //is not received, the client retransmits the message error = mqttSnClientSendSearchGw(context, 0, destIpAddr, destPort); } else { //Wait for the gateway's reply error = mqttSnClientProcessEvents(context, MQTT_SN_CLIENT_TICK_INTERVAL); } } else if(context->state == MQTT_SN_CLIENT_STATE_RESP_RECEIVED) { //Check the type of the received message if(context->msgType == MQTT_SN_MSG_TYPE_GWINFO) { //Close network connection mqttSnClientCloseConnection(context); //A MQTT-SN gateway has been found context->state = MQTT_SN_CLIENT_STATE_DISCONNECTED; break; } else { //Report an error error = ERROR_UNEXPECTED_RESPONSE; } } else { //Invalid state error = ERROR_WRONG_STATE; } } //Any error to report? if(error != NO_ERROR && error != ERROR_WOULD_BLOCK) { //Clean up side effects mqttSnClientCloseConnection(context); //Update MQTT-SN client state context->state = MQTT_SN_CLIENT_STATE_DISCONNECTED; } //Return status code return error; } /** * @brief Establish connection with the MQTT-SN gateway * @param[in] context Pointer to the MQTT-SN client context * @param[in] cleanSession If this flag is set, then the client and server * must discard any previous session and start a new one * @return Error code **/ error_t mqttSnClientConnect(MqttSnClientContext *context, bool_t cleanSession) { error_t error; systime_t time; //Make sure the MQTT-SN client context is valid if(context == NULL) return ERROR_INVALID_PARAMETER; //Initialize status code error = NO_ERROR; //Establish connection with the MQTT-SN gateway while(!error) { //Get current time time = osGetSystemTime(); //Check current state if(context->state == MQTT_SN_CLIENT_STATE_DISCONNECTED) { //Open network connection error = mqttSnClientOpenConnection(context, TRUE); //Check status code if(!error) { //Save current time context->startTime = time; //Update MQTT-SN client state context->state = MQTT_SN_CLIENT_STATE_CONNECTING; } } else if(context->state == MQTT_SN_CLIENT_STATE_CONNECTING) { //Establish DTLS connection error = mqttSnClientEstablishConnection(context); //Check status code if(error == NO_ERROR) { //Check whether the CleanSession flag is set if(cleanSession) { //Discard previous session state osMemset(context->topicTable, 0, sizeof(context->topicTable)); osMemset(context->msgIdTable, 0, sizeof(context->msgIdTable)); } //The CONNECT message is sent by a client to setup a connection error = mqttSnClientSendConnect(context, cleanSession); } else if(error == ERROR_WOULD_BLOCK || error == ERROR_TIMEOUT) { //Check whether the timeout has elapsed if(timeCompare(time, context->startTime + context->timeout) >= 0) { //Report an error error = ERROR_TIMEOUT; } } else { //Failed to establish DTLS connection } } else if(context->state == MQTT_SN_CLIENT_STATE_SENDING_REQ) { //Check whether the timeout has elapsed if(timeCompare(time, context->startTime + context->timeout) >= 0) { //Abort the retransmission procedure error = ERROR_TIMEOUT; } else if(timeCompare(time, context->retransmitStartTime + MQTT_SN_CLIENT_RETRY_TIMEOUT) >= 0) { //If the retry timer times out and the expected gateway's reply //is not received, the client retransmits the message error = mqttSnClientSendConnect(context, cleanSession); } else { //Wait for the gateway's reply error = mqttSnClientProcessEvents(context, MQTT_SN_CLIENT_TICK_INTERVAL); } } else if(context->state == MQTT_SN_CLIENT_STATE_RESP_RECEIVED) { //Check the type of the received message if(context->msgType == MQTT_SN_MSG_TYPE_CONNACK) { //If the connection request has not been accepted, the failure reason //is encoded in the return code field of the CONNACK message if(context->returnCode == MQTT_SN_RETURN_CODE_ACCEPTED) { //The connection request has been accepted by the gateway context->state = MQTT_SN_CLIENT_STATE_ACTIVE; } else { //Terminate DTLS connection mqttSnClientShutdownConnection(context); //The connection request has been rejected by the gateway error = ERROR_REQUEST_REJECTED; } } else { //Report an error error = ERROR_UNEXPECTED_RESPONSE; } } else if(context->state == MQTT_SN_CLIENT_STATE_ACTIVE) { //The MQTT-SN client is connected break; } else { //Invalid state error = ERROR_WRONG_STATE; } } //Any error to report? if(error != NO_ERROR && error != ERROR_WOULD_BLOCK) { //Clean up side effects mqttSnClientCloseConnection(context); //Update MQTT-SN client state context->state = MQTT_SN_CLIENT_STATE_DISCONNECTED; } //Return status code return error; } /** * @brief Publish message * @param[in] context Pointer to the MQTT-SN client context * @param[in] topicName Topic name * @param[in] message Message payload * @param[in] length Length of the message payload * @param[in] qos QoS level to be used when publishing the message * @param[in] retain This flag specifies if the message is to be retained * @param[in] dup This flag specifies if the message is sent for the first * time or if the message is retransmitted * @param[in,out] msgId Message identifier used to send the PUBLISH message * @return Error code **/ error_t mqttSnClientPublish(MqttSnClientContext *context, const char_t *topicName, const void *message, size_t length, MqttSnQosLevel qos, bool_t retain, bool_t dup, uint16_t *msgId) { error_t error; systime_t time; uint16_t publishMsgId; //Check parameters if(context == NULL || topicName == NULL) return ERROR_INVALID_PARAMETER; if(message == NULL && length != 0) return ERROR_INVALID_PARAMETER; if(dup && msgId == NULL) return ERROR_INVALID_PARAMETER; //Initialize status code error = NO_ERROR; //Initialize message identifier if(dup) publishMsgId = *msgId; else publishMsgId = 0; //Publish procedure while(!error) { //Get current time time = osGetSystemTime(); //Check current state if(context->state == MQTT_SN_CLIENT_STATE_ACTIVE) { //Save current time context->startTime = time; //Check whether the register procedure is needed if(mqttSnClientIsShortTopicName(topicName) == FALSE && mqttSnClientFindTopicName(context, topicName) == 0 && mqttSnClientFindPredefTopicName(context, topicName) == 0) { //The message identifier allows the sender to match a message with //its corresponding acknowledgment mqttSnClientGenerateMessageId(context); //To register a topic name a client sends a REGISTER message to //the gateway error = mqttSnClientSendRegister(context, topicName); } else { //The message ID is only relevant in case of QoS levels 1 and 2 if(qos == MQTT_SN_QOS_LEVEL_1 || qos == MQTT_SN_QOS_LEVEL_2) { //The message identifier allows the sender to match a message with //its corresponding acknowledgment if(!dup) publishMsgId = mqttSnClientGenerateMessageId(context); } else { //For QoS level 0, the message identifier is coded 0x0000 publishMsgId = 0; } //The client can start publishing data relating to the registered //topic name by sending PUBLISH messages to the gateway error = mqttSnClientSendPublish(context, publishMsgId, topicName, message, length, qos, retain, dup); //In the QoS 0, no response is sent by the receiver and no retry //is performed by the sender if(qos != MQTT_SN_QOS_LEVEL_1 && qos != MQTT_SN_QOS_LEVEL_2) break; } } else if(context->state == MQTT_SN_CLIENT_STATE_SENDING_REQ) { //Check whether the transmission of the PUBLISH message has started if(context->msgType == MQTT_SN_MSG_TYPE_PUBLISH || context->msgType == MQTT_SN_MSG_TYPE_PUBREL) { //Restore the message identifier that was used to send the first //PUBLISH message if(!dup) publishMsgId = context->msgId; } //Check whether the timeout has elapsed if(timeCompare(time, context->startTime + context->timeout) >= 0) { //Abort the retransmission procedure context->state = MQTT_SN_CLIENT_STATE_DISCONNECTING; //Report a timeout error error = ERROR_TIMEOUT; } else if(timeCompare(time, context->retransmitStartTime + MQTT_SN_CLIENT_RETRY_TIMEOUT) >= 0) { //If the retry timer times out and the expected gateway's reply //is not received, the client retransmits the message if(context->msgType == MQTT_SN_MSG_TYPE_REGISTER) { //Retransmit REGISTER message error = mqttSnClientSendRegister(context, topicName); } else if(context->msgType == MQTT_SN_MSG_TYPE_PUBLISH) { //Retransmit PUBLISH message error = mqttSnClientSendPublish(context, publishMsgId, topicName, message, length, qos, retain, TRUE); } else if(context->msgType == MQTT_SN_MSG_TYPE_PUBREL) { //Retransmit PUBREL message error = mqttSnClientSendPubRel(context, context->msgId); } else { //Report an error error = ERROR_INVALID_TYPE; } } else { //Wait for the gateway's reply error = mqttSnClientProcessEvents(context, MQTT_SN_CLIENT_TICK_INTERVAL); } } else if(context->state == MQTT_SN_CLIENT_STATE_RESP_RECEIVED) { //Update MQTT-SN client state context->state = MQTT_SN_CLIENT_STATE_ACTIVE; //Check whether the transmission of the PUBLISH message has started if(context->msgType == MQTT_SN_MSG_TYPE_PUBACK || context->msgType == MQTT_SN_MSG_TYPE_PUBREC || context->msgType == MQTT_SN_MSG_TYPE_PUBCOMP) { //Restore the message identifier that was used to send the first //PUBLISH message if(!dup) publishMsgId = context->msgId; } //Check the type of the received message if(context->msgType == MQTT_SN_MSG_TYPE_REGACK) { //If the registration has not been accepted, the failure reason is //encoded in the return code field of the REGACK message if(context->returnCode == MQTT_SN_RETURN_CODE_ACCEPTED) { //Save the topic ID assigned by the gateway error = mqttSnClientAddTopic(context, topicName, context->topicId); } else { //The registration request has been rejected by the gateway error = ERROR_REQUEST_REJECTED; } } else if(context->msgType == MQTT_SN_MSG_TYPE_PUBACK) { //If the publish request has not been accepted, the failure reason //is encoded in the return code field of the PUBACK message if(context->returnCode == MQTT_SN_RETURN_CODE_ACCEPTED) { //Check QoS level if(qos == MQTT_SN_QOS_LEVEL_2) { //Unexpected PUBREC message received error = ERROR_UNEXPECTED_MESSAGE; } else { //A PUBACK message has been received break; } } else { //The publish request has been rejected by the gateway error = ERROR_REQUEST_REJECTED; } } else if(context->msgType == MQTT_SN_MSG_TYPE_PUBREC) { //Check QoS level if(qos == MQTT_SN_QOS_LEVEL_2) { //A PUBREL packet is the response to a PUBREC packet. It is the //third packet of the QoS 2 protocol exchange error = mqttSnClientSendPubRel(context, context->msgId); } else { //Unexpected PUBREC message received error = ERROR_UNEXPECTED_MESSAGE; } } else if(context->msgType == MQTT_SN_MSG_TYPE_PUBCOMP) { //A PUBCOMP message has been received break; } else { //Report an error error = ERROR_UNEXPECTED_RESPONSE; } } else { //Invalid state error = ERROR_NOT_CONNECTED; } } //Return the message identifier that was used to send the PUBLISH message if(msgId != NULL) *msgId = publishMsgId; //Return status code return error; } /** * @brief Subscribe to topic * @param[in] context Pointer to the MQTT-SN client context * @param[in] topicName Topic filter * @param[in] qos Maximum QoS level at which the server can send application * messages to the client * @return Error code **/ error_t mqttSnClientSubscribe(MqttSnClientContext *context, const char_t *topicName, MqttSnQosLevel qos) { error_t error; systime_t time; //Check parameters if(context == NULL || topicName == NULL) return ERROR_INVALID_PARAMETER; //Initialize status code error = NO_ERROR; //Topic subscribe procedure while(!error) { //Get current time time = osGetSystemTime(); //Check current state if(context->state == MQTT_SN_CLIENT_STATE_ACTIVE) { //The message identifier allows the sender to match a message with //its corresponding acknowledgment mqttSnClientGenerateMessageId(context); //Save current time context->startTime = time; //Send SUBSCRIBE message error = mqttSnClientSendSubscribe(context, topicName, qos); } else if(context->state == MQTT_SN_CLIENT_STATE_SENDING_REQ) { //Check whether the timeout has elapsed if(timeCompare(time, context->startTime + context->timeout) >= 0) { //Abort the retransmission procedure context->state = MQTT_SN_CLIENT_STATE_DISCONNECTING; //Report a timeout error error = ERROR_TIMEOUT; } else if(timeCompare(time, context->retransmitStartTime + MQTT_SN_CLIENT_RETRY_TIMEOUT) >= 0) { //If the retry timer times out and the expected gateway's reply //is not received, the client retransmits the message error = mqttSnClientSendSubscribe(context, topicName, qos); } else { //Wait for the gateway's reply error = mqttSnClientProcessEvents(context, MQTT_SN_CLIENT_TICK_INTERVAL); } } else if(context->state == MQTT_SN_CLIENT_STATE_RESP_RECEIVED) { //Update MQTT-SN client state context->state = MQTT_SN_CLIENT_STATE_ACTIVE; //Check the type of the received message if(context->msgType == MQTT_SN_MSG_TYPE_SUBACK) { //If the subscribe request has not been accepted, the failure reason //is encoded in the return code field of the SUBACK message if(context->returnCode == MQTT_SN_RETURN_CODE_ACCEPTED) { //The topic ID field is not relevant in case of subscriptions to a //topic name which contains wildcard characters if(osStrchr(topicName, '#') == NULL && osStrchr(topicName, '+') == NULL) { //Save the topic ID assigned by the gateway error = mqttSnClientAddTopic(context, topicName, context->topicId); } //A SUBACK message has been received break; } else { //The subscribe request has been rejected by the gateway error = ERROR_REQUEST_REJECTED; } } else { //Report an error error = ERROR_UNEXPECTED_RESPONSE; } } else { //Invalid state error = ERROR_NOT_CONNECTED; } } //Return status code return error; } /** * @brief Unsubscribe from topic * @param[in] context Pointer to the MQTT-SN client context * @param[in] topicName Topic filter * @return Error code **/ error_t mqttSnClientUnsubscribe(MqttSnClientContext *context, const char_t *topicName) { error_t error; systime_t time; //Check parameters if(context == NULL || topicName == NULL) return ERROR_INVALID_PARAMETER; //Initialize status code error = NO_ERROR; //Topic unsubscribe procedure while(!error) { //Get current time time = osGetSystemTime(); //Check current state if(context->state == MQTT_SN_CLIENT_STATE_ACTIVE) { //The message identifier allows the sender to match a message with //its corresponding acknowledgment mqttSnClientGenerateMessageId(context); //Save current time context->startTime = time; //Send UNSUBSCRIBE message error = mqttSnClientSendUnsubscribe(context, topicName); } else if(context->state == MQTT_SN_CLIENT_STATE_SENDING_REQ) { //Check whether the timeout has elapsed if(timeCompare(time, context->startTime + context->timeout) >= 0) { //Abort the retransmission procedure context->state = MQTT_SN_CLIENT_STATE_DISCONNECTING; //Report a timeout error error = ERROR_TIMEOUT; } else if(timeCompare(time, context->retransmitStartTime + MQTT_SN_CLIENT_RETRY_TIMEOUT) >= 0) { //If the retry timer times out and the expected gateway's reply //is not received, the client retransmits the message error = mqttSnClientSendUnsubscribe(context, topicName); } else { //Wait for the gateway's reply error = mqttSnClientProcessEvents(context, MQTT_SN_CLIENT_TICK_INTERVAL); } } else if(context->state == MQTT_SN_CLIENT_STATE_RESP_RECEIVED) { //Update MQTT-SN client state context->state = MQTT_SN_CLIENT_STATE_ACTIVE; //Check the type of the received message if(context->msgType == MQTT_SN_MSG_TYPE_UNSUBACK) { //An UNSUBACK message has been received break; } else { //Report an error error = ERROR_UNEXPECTED_RESPONSE; } } else { //Invalid state error = ERROR_NOT_CONNECTED; } } //Return status code return error; } /** * @brief Send ping request * @param[in] context Pointer to the MQTT-SN client context * @return Error code **/ error_t mqttSnClientPing(MqttSnClientContext *context) { error_t error; systime_t time; //Make sure the MQTT-SN client context is valid if(context == NULL) return ERROR_INVALID_PARAMETER; //Initialize status code error = NO_ERROR; //Send PINGREQ packet and wait for PINGRESP packet to be received while(!error) { //Get current time time = osGetSystemTime(); //Check current state if(context->state == MQTT_SN_CLIENT_STATE_ACTIVE) { //Save current time context->startTime = time; context->retransmitStartTime = time; //Send PINGREQ message error = mqttSnClientSendPingReq(context); //Update MQTT-SN client state context->state = MQTT_SN_CLIENT_STATE_SENDING_REQ; context->msgType = MQTT_SN_MSG_TYPE_PINGREQ; } else if(context->state == MQTT_SN_CLIENT_STATE_SENDING_REQ) { //Check whether the timeout has elapsed if(timeCompare(time, context->startTime + context->timeout) >= 0) { //Abort the retransmission procedure context->state = MQTT_SN_CLIENT_STATE_DISCONNECTING; //Report a timeout error error = ERROR_TIMEOUT; } else if(timeCompare(time, context->retransmitStartTime + MQTT_SN_CLIENT_RETRY_TIMEOUT) >= 0) { //If the retry timer times out and the expected gateway's reply //is not received, the client retransmits the message error = mqttSnClientSendPingReq(context); //Save the time at which the message was sent context->retransmitStartTime = time; } else { //Wait for the gateway's reply error = mqttSnClientProcessEvents(context, MQTT_SN_CLIENT_TICK_INTERVAL); } } else if(context->state == MQTT_SN_CLIENT_STATE_RESP_RECEIVED) { //Update MQTT-SN client state context->state = MQTT_SN_CLIENT_STATE_ACTIVE; //Check the type of the received message if(context->msgType == MQTT_SN_MSG_TYPE_PINGRESP) { //A PINGRESP message has been received break; } else { //Report an error error = ERROR_UNEXPECTED_RESPONSE; } } else { //Invalid state error = ERROR_NOT_CONNECTED; } } //Return status code return error; } /** * @brief Update the Will message * @param[in] context Pointer to the MQTT-SN client context * @param[in] topic Will topic name * @param[in] message Will message * @param[in] length Length of the Will message * @param[in] qos QoS level to be used when publishing the Will message * @param[in] retain This flag specifies if the Will message is to be retained * @return Error code **/ error_t mqttSnClientUpdateWillMessage(MqttSnClientContext *context, const char_t *topic, const void *message, size_t length, MqttSnQosLevel qos, bool_t retain) { error_t error; systime_t time; //Check parameters if(context == NULL || topic == NULL) return ERROR_INVALID_PARAMETER; if(message == NULL && length != 0) return ERROR_INVALID_PARAMETER; //Initialize status code error = NO_ERROR; //Publish procedure while(!error) { //Get current time time = osGetSystemTime(); //Check current state if(context->state == MQTT_SN_CLIENT_STATE_ACTIVE) { //Update the Will message error = mqttSnClientSetWillMessage(context, topic, message, length, qos, retain); //Check status code if(!error) { //Save current time context->startTime = time; //Send WILLTOPICUPD message error = mqttSnClientSendWillTopicUpd(context); } } else if(context->state == MQTT_SN_CLIENT_STATE_SENDING_REQ) { //Check whether the timeout has elapsed if(timeCompare(time, context->startTime + context->timeout) >= 0) { //Abort the retransmission procedure context->state = MQTT_SN_CLIENT_STATE_DISCONNECTING; //Report a timeout error error = ERROR_TIMEOUT; } else if(timeCompare(time, context->retransmitStartTime + MQTT_SN_CLIENT_RETRY_TIMEOUT) >= 0) { //If the retry timer times out and the expected gateway's reply //is not received, the client retransmits the message if(context->msgType == MQTT_SN_MSG_TYPE_WILLTOPICUPD) { //Retransmit WILLTOPICUPD message error = mqttSnClientSendWillTopicUpd(context); } else if(context->msgType == MQTT_SN_MSG_TYPE_WILLMSGUPD) { //Retransmit WILLMSGUPD message error = mqttSnClientSendWillMsgUpd(context); } else { //Report an error error = ERROR_INVALID_TYPE; } } else { //Wait for the gateway's reply error = mqttSnClientProcessEvents(context, MQTT_SN_CLIENT_TICK_INTERVAL); } } else if(context->state == MQTT_SN_CLIENT_STATE_RESP_RECEIVED) { //Update MQTT-SN client state context->state = MQTT_SN_CLIENT_STATE_ACTIVE; //Check the type of the received message if(context->msgType == MQTT_SN_MSG_TYPE_WILLTOPICRESP) { //If the WILLTOPICUPD has not been accepted, the failure reason //is encoded in the return code field of the WILLTOPICRESP if(context->returnCode == MQTT_SN_RETURN_CODE_ACCEPTED) { //Valid Will topic? if(context->willMessage.topic[0] != '\0') { //Send WILLMSGUPD message error = mqttSnClientSendWillMsgUpd(context); } else { //An empty WILLTOPIC message is used by a client to delete //the Will topic and the Will message stored in the server break; } } else { //The WILLTOPICUPD request has been rejected by the gateway error = ERROR_REQUEST_REJECTED; } } else if(context->msgType == MQTT_SN_MSG_TYPE_WILLMSGRESP) { //If the WILLMSGUPD has not been accepted, the failure reason //is encoded in the return code field of the WILLMSGRESP if(context->returnCode == MQTT_SN_RETURN_CODE_ACCEPTED) { //The WILLMSGUPD request has been accepted by the gateway break; } else { //The WILLMSGUPD request has been rejected by the gateway error = ERROR_REQUEST_REJECTED; } } else { //Report an error error = ERROR_UNEXPECTED_RESPONSE; } } else { //Invalid state error = ERROR_NOT_CONNECTED; } } //Return status code return error; } /** * @brief Retrieve return code * @param[in] context Pointer to the MQTT-SN client context * @param[out] returnCode Return code * @return Error code **/ error_t mqttSnClientGetReturnCode(MqttSnClientContext *context, MqttSnReturnCode *returnCode) { //Check parameters if(context == NULL || returnCode == NULL) return ERROR_INVALID_PARAMETER; //Retrieve return code *returnCode = context->returnCode; //Successful processing return NO_ERROR; } /** * @brief Process MQTT-SN client events * @param[in] context Pointer to the MQTT-SN client context * @param[in] timeout Maximum time to wait before returning * @return Error code **/ error_t mqttSnClientTask(MqttSnClientContext *context, systime_t timeout) { error_t error; //Make sure the MQTT-SN client context is valid if(context == NULL) return ERROR_INVALID_PARAMETER; //Make sure the MQTT-SN client is connected if(context->state == MQTT_SN_CLIENT_STATE_ACTIVE || context->state == MQTT_SN_CLIENT_STATE_SENDING_REQ || context->state == MQTT_SN_CLIENT_STATE_RESP_RECEIVED) { //Process MQTT-SN client events error = mqttSnClientProcessEvents(context, timeout); } else { //Invalid state error = ERROR_NOT_CONNECTED; } //Return status code return error; } /** * @brief Disconnect from the MQTT-SN gateway * @param[in] context Pointer to the MQTT-SN client context * @param[in] duration Sleep duration, in milliseconds * @return Error code **/ error_t mqttSnClientDisconnect(MqttSnClientContext *context, systime_t duration) { error_t error; systime_t time; //Make sure the MQTT-SN client context is valid if(context == NULL) return ERROR_INVALID_PARAMETER; //Initialize status code error = NO_ERROR; //Disconnect procedure while(!error) { //Get current time time = osGetSystemTime(); //Check current state if(context->state == MQTT_SN_CLIENT_STATE_ACTIVE) { //Save current time context->startTime = time; //The DISCONNECT message is sent by a client to indicate that it //wants to close the connection error = mqttSnClientSendDisconnect(context, duration / 1000); } else if(context->state == MQTT_SN_CLIENT_STATE_SENDING_REQ) { //Check whether the timeout has elapsed if(timeCompare(time, context->startTime + context->timeout) >= 0) { //Terminate DTLS connection mqttSnClientShutdownConnection(context); //Report a timeout error error = ERROR_TIMEOUT; } else if(timeCompare(time, context->retransmitStartTime + MQTT_SN_CLIENT_RETRY_TIMEOUT) >= 0) { //If the retry timer times out and the expected gateway's reply //is not received, the client retransmits the message error = mqttSnClientSendDisconnect(context, duration / 1000); } else { //Wait for the gateway's reply error = mqttSnClientProcessEvents(context, MQTT_SN_CLIENT_TICK_INTERVAL); } } else if(context->state == MQTT_SN_CLIENT_STATE_DISCONNECTING) { //Terminate DTLS connection error = mqttSnClientShutdownConnection(context); //Close network connection mqttSnClientCloseConnection(context); //The connection is closed context->state = MQTT_SN_CLIENT_STATE_DISCONNECTED; } else if(context->state == MQTT_SN_CLIENT_STATE_DISCONNECTED) { //The MQTT-SN client is disconnected break; } else { //Invalid state error = ERROR_WRONG_STATE; } } //Any error to report? if(error != NO_ERROR && error != ERROR_WOULD_BLOCK) { //Close network connection mqttSnClientCloseConnection(context); //Update MQTT-SN client state context->state = MQTT_SN_CLIENT_STATE_DISCONNECTED; } //Return status code return error; } /** * @brief Release MQTT-SN client context * @param[in] context Pointer to the MQTT-SN client context **/ void mqttSnClientDeinit(MqttSnClientContext *context) { //Make sure the MQTT-SN client context is valid if(context != NULL) { //Close connection mqttSnClientCloseConnection(context); #if (MQTT_SN_CLIENT_DTLS_SUPPORT == ENABLED) //Release DTLS session state tlsFreeSessionState(&context->dtlsSession); #endif //Clear MQTT-SN client context osMemset(context, 0, sizeof(MqttSnClientContext)); } } #endif
error_t mqttSnClientSubscribe(MqttSnClientContext *context, const char_t *topicName, MqttSnQosLevel qos) { error_t error; systime_t time; //Check parameters if(context == NULL || topicName == NULL) return ERROR_INVALID_PARAMETER; //Initialize status code error = NO_ERROR; //Topic subscribe procedure while(!error) { //Get current time time = osGetSystemTime(); //Check current state if(context->state == MQTT_SN_CLIENT_STATE_ACTIVE) { //The message identifier allows the sender to match a message with //its corresponding acknowledgment mqttSnClientGenerateMessageId(context); //Save current time context->startTime = time; //Send SUBSCRIBE message error = mqttSnClientSendSubscribe(context, topicName, qos); } else if(context->state == MQTT_SN_CLIENT_STATE_SENDING_REQ) { //Check whether the timeout has elapsed if(timeCompare(time, context->startTime + context->timeout) >= 0) { //Abort the retransmission procedure context->state = MQTT_SN_CLIENT_STATE_DISCONNECTING; //Report a timeout error error = ERROR_TIMEOUT; } else if(timeCompare(time, context->retransmitStartTime + MQTT_SN_CLIENT_RETRY_TIMEOUT) >= 0) { //If the retry timer times out and the expected gateway's reply //is not received, the client retransmits the message error = mqttSnClientSendSubscribe(context, topicName, qos); } else { //Wait for the gateway's reply error = mqttSnClientProcessEvents(context, MQTT_SN_CLIENT_TICK_INTERVAL); } } else if(context->state == MQTT_SN_CLIENT_STATE_RESP_RECEIVED) { //Update MQTT-SN client state context->state = MQTT_SN_CLIENT_STATE_ACTIVE; //Check the type of the received message if(context->msgType == MQTT_SN_MSG_TYPE_SUBACK) { //If the subscribe request has not been accepted, the failure reason //is encoded in the return code field of the SUBACK message if(context->returnCode == MQTT_SN_RETURN_CODE_ACCEPTED) { //The topic ID field is not relevant in case of subscriptions to a //topic name which contains wildcard characters if(strchr(topicName, '#') == NULL && strchr(topicName, '+') == NULL) { //Save the topic ID assigned by the gateway error = mqttSnClientAddTopic(context, topicName, context->topicId); } //A SUBACK message has been received break; } else { //The subscribe request has been rejected by the gateway error = ERROR_REQUEST_REJECTED; } } else { //Report an error error = ERROR_UNEXPECTED_RESPONSE; } } else { //Invalid state error = ERROR_NOT_CONNECTED; } } //Return status code return error; }
error_t mqttSnClientSubscribe(MqttSnClientContext *context, const char_t *topicName, MqttSnQosLevel qos) { error_t error; systime_t time; //Check parameters if(context == NULL || topicName == NULL) return ERROR_INVALID_PARAMETER; //Initialize status code error = NO_ERROR; //Topic subscribe procedure while(!error) { //Get current time time = osGetSystemTime(); //Check current state if(context->state == MQTT_SN_CLIENT_STATE_ACTIVE) { //The message identifier allows the sender to match a message with //its corresponding acknowledgment mqttSnClientGenerateMessageId(context); //Save current time context->startTime = time; //Send SUBSCRIBE message error = mqttSnClientSendSubscribe(context, topicName, qos); } else if(context->state == MQTT_SN_CLIENT_STATE_SENDING_REQ) { //Check whether the timeout has elapsed if(timeCompare(time, context->startTime + context->timeout) >= 0) { //Abort the retransmission procedure context->state = MQTT_SN_CLIENT_STATE_DISCONNECTING; //Report a timeout error error = ERROR_TIMEOUT; } else if(timeCompare(time, context->retransmitStartTime + MQTT_SN_CLIENT_RETRY_TIMEOUT) >= 0) { //If the retry timer times out and the expected gateway's reply //is not received, the client retransmits the message error = mqttSnClientSendSubscribe(context, topicName, qos); } else { //Wait for the gateway's reply error = mqttSnClientProcessEvents(context, MQTT_SN_CLIENT_TICK_INTERVAL); } } else if(context->state == MQTT_SN_CLIENT_STATE_RESP_RECEIVED) { //Update MQTT-SN client state context->state = MQTT_SN_CLIENT_STATE_ACTIVE; //Check the type of the received message if(context->msgType == MQTT_SN_MSG_TYPE_SUBACK) { //If the subscribe request has not been accepted, the failure reason //is encoded in the return code field of the SUBACK message if(context->returnCode == MQTT_SN_RETURN_CODE_ACCEPTED) { //The topic ID field is not relevant in case of subscriptions to a //topic name which contains wildcard characters if(osStrchr(topicName, '#') == NULL && osStrchr(topicName, '+') == NULL) { //Save the topic ID assigned by the gateway error = mqttSnClientAddTopic(context, topicName, context->topicId); } //A SUBACK message has been received break; } else { //The subscribe request has been rejected by the gateway error = ERROR_REQUEST_REJECTED; } } else { //Report an error error = ERROR_UNEXPECTED_RESPONSE; } } else { //Invalid state error = ERROR_NOT_CONNECTED; } } //Return status code return error; }
{'added': [(9, ' * Copyright (C) 2010-2021 Oryx Embedded SARL. All rights reserved.'), (28, ' * @version 2.0.2'), (939, " if(osStrchr(topicName, '#') == NULL && osStrchr(topicName, '+') == NULL)"), (1366, ' * @param[in] duration Sleep duration, in milliseconds'), (1370, 'error_t mqttSnClientDisconnect(MqttSnClientContext *context,'), (1371, ' systime_t duration)'), (1397, ' error = mqttSnClientSendDisconnect(context, duration / 1000);'), (1415, ' error = mqttSnClientSendDisconnect(context, duration / 1000);')], 'deleted': [(9, ' * Copyright (C) 2010-2020 Oryx Embedded SARL. All rights reserved.'), (28, ' * @version 2.0.0'), (939, " if(strchr(topicName, '#') == NULL && strchr(topicName, '+') == NULL)"), (1369, 'error_t mqttSnClientDisconnect(MqttSnClientContext *context)'), (1395, ' error = mqttSnClientSendDisconnect(context, 0);'), (1413, ' error = mqttSnClientSendDisconnect(context, 0);')]}
8
6
790
3,234
https://github.com/Oryx-Embedded/CycloneTCP
CVE-2021-26788
['CWE-20']
bus-polkit.c
async_polkit_callback
/* SPDX-License-Identifier: LGPL-2.1+ */ #include "bus-internal.h" #include "bus-message.h" #include "bus-polkit.h" #include "strv.h" #include "user-util.h" static int check_good_user(sd_bus_message *m, uid_t good_user) { _cleanup_(sd_bus_creds_unrefp) sd_bus_creds *creds = NULL; uid_t sender_uid; int r; assert(m); if (good_user == UID_INVALID) return 0; r = sd_bus_query_sender_creds(m, SD_BUS_CREDS_EUID, &creds); if (r < 0) return r; /* Don't trust augmented credentials for authorization */ assert_return((sd_bus_creds_get_augmented_mask(creds) & SD_BUS_CREDS_EUID) == 0, -EPERM); r = sd_bus_creds_get_euid(creds, &sender_uid); if (r < 0) return r; return sender_uid == good_user; } #if ENABLE_POLKIT static int bus_message_append_strv_key_value( sd_bus_message *m, const char **l) { const char **k, **v; int r; assert(m); r = sd_bus_message_open_container(m, 'a', "{ss}"); if (r < 0) return r; STRV_FOREACH_PAIR(k, v, l) { r = sd_bus_message_append(m, "{ss}", *k, *v); if (r < 0) return r; } r = sd_bus_message_close_container(m); if (r < 0) return r; return r; } #endif int bus_test_polkit( sd_bus_message *call, int capability, const char *action, const char **details, uid_t good_user, bool *_challenge, sd_bus_error *ret_error) { int r; assert(call); assert(action); /* Tests non-interactively! */ r = check_good_user(call, good_user); if (r != 0) return r; r = sd_bus_query_sender_privilege(call, capability); if (r < 0) return r; else if (r > 0) return 1; #if ENABLE_POLKIT else { _cleanup_(sd_bus_message_unrefp) sd_bus_message *request = NULL; _cleanup_(sd_bus_message_unrefp) sd_bus_message *reply = NULL; int authorized = false, challenge = false; const char *sender; sender = sd_bus_message_get_sender(call); if (!sender) return -EBADMSG; r = sd_bus_message_new_method_call( call->bus, &request, "org.freedesktop.PolicyKit1", "/org/freedesktop/PolicyKit1/Authority", "org.freedesktop.PolicyKit1.Authority", "CheckAuthorization"); if (r < 0) return r; r = sd_bus_message_append( request, "(sa{sv})s", "system-bus-name", 1, "name", "s", sender, action); if (r < 0) return r; r = bus_message_append_strv_key_value(request, details); if (r < 0) return r; r = sd_bus_message_append(request, "us", 0, NULL); if (r < 0) return r; r = sd_bus_call(call->bus, request, 0, ret_error, &reply); if (r < 0) { /* Treat no PK available as access denied */ if (sd_bus_error_has_name(ret_error, SD_BUS_ERROR_SERVICE_UNKNOWN)) { sd_bus_error_free(ret_error); return -EACCES; } return r; } r = sd_bus_message_enter_container(reply, 'r', "bba{ss}"); if (r < 0) return r; r = sd_bus_message_read(reply, "bb", &authorized, &challenge); if (r < 0) return r; if (authorized) return 1; if (_challenge) { *_challenge = challenge; return 0; } } #endif return -EACCES; } #if ENABLE_POLKIT typedef struct AsyncPolkitQuery { char *action; char **details; sd_bus_message *request, *reply; sd_bus_message_handler_t callback; void *userdata; sd_bus_slot *slot; Hashmap *registry; } AsyncPolkitQuery; static void async_polkit_query_free(AsyncPolkitQuery *q) { if (!q) return; sd_bus_slot_unref(q->slot); if (q->registry && q->request) hashmap_remove(q->registry, q->request); sd_bus_message_unref(q->request); sd_bus_message_unref(q->reply); free(q->action); strv_free(q->details); free(q); } static int async_polkit_callback(sd_bus_message *reply, void *userdata, sd_bus_error *error) { _cleanup_(sd_bus_error_free) sd_bus_error error_buffer = SD_BUS_ERROR_NULL; AsyncPolkitQuery *q = userdata; int r; assert(reply); assert(q); q->slot = sd_bus_slot_unref(q->slot); q->reply = sd_bus_message_ref(reply); r = sd_bus_message_rewind(q->request, true); if (r < 0) { r = sd_bus_reply_method_errno(q->request, r, NULL); goto finish; } r = q->callback(q->request, q->userdata, &error_buffer); r = bus_maybe_reply_error(q->request, r, &error_buffer); finish: async_polkit_query_free(q); return r; } #endif int bus_verify_polkit_async( sd_bus_message *call, int capability, const char *action, const char **details, bool interactive, uid_t good_user, Hashmap **registry, sd_bus_error *ret_error) { #if ENABLE_POLKIT _cleanup_(sd_bus_message_unrefp) sd_bus_message *pk = NULL; AsyncPolkitQuery *q; const char *sender; sd_bus_message_handler_t callback; void *userdata; int c; #endif int r; assert(call); assert(action); assert(registry); r = check_good_user(call, good_user); if (r != 0) return r; #if ENABLE_POLKIT q = hashmap_get(*registry, call); if (q) { int authorized, challenge; /* This is the second invocation of this function, and there's already a response from * polkit, let's process it */ assert(q->reply); /* If the operation we want to authenticate changed between the first and the second time, * let's not use this authentication, it might be out of date as the object and context we * operate on might have changed. */ if (!streq(q->action, action) || !strv_equal(q->details, (char**) details)) return -ESTALE; if (sd_bus_message_is_method_error(q->reply, NULL)) { const sd_bus_error *e; e = sd_bus_message_get_error(q->reply); /* Treat no PK available as access denied */ if (sd_bus_error_has_name(e, SD_BUS_ERROR_SERVICE_UNKNOWN) || sd_bus_error_has_name(e, SD_BUS_ERROR_NAME_HAS_NO_OWNER)) return -EACCES; /* Copy error from polkit reply */ sd_bus_error_copy(ret_error, e); return -sd_bus_error_get_errno(e); } r = sd_bus_message_enter_container(q->reply, 'r', "bba{ss}"); if (r >= 0) r = sd_bus_message_read(q->reply, "bb", &authorized, &challenge); if (r < 0) return r; if (authorized) return 1; if (challenge) return sd_bus_error_set(ret_error, SD_BUS_ERROR_INTERACTIVE_AUTHORIZATION_REQUIRED, "Interactive authentication required."); return -EACCES; } #endif r = sd_bus_query_sender_privilege(call, capability); if (r < 0) return r; else if (r > 0) return 1; #if ENABLE_POLKIT if (sd_bus_get_current_message(call->bus) != call) return -EINVAL; callback = sd_bus_get_current_handler(call->bus); if (!callback) return -EINVAL; userdata = sd_bus_get_current_userdata(call->bus); sender = sd_bus_message_get_sender(call); if (!sender) return -EBADMSG; c = sd_bus_message_get_allow_interactive_authorization(call); if (c < 0) return c; if (c > 0) interactive = true; r = hashmap_ensure_allocated(registry, NULL); if (r < 0) return r; r = sd_bus_message_new_method_call( call->bus, &pk, "org.freedesktop.PolicyKit1", "/org/freedesktop/PolicyKit1/Authority", "org.freedesktop.PolicyKit1.Authority", "CheckAuthorization"); if (r < 0) return r; r = sd_bus_message_append( pk, "(sa{sv})s", "system-bus-name", 1, "name", "s", sender, action); if (r < 0) return r; r = bus_message_append_strv_key_value(pk, details); if (r < 0) return r; r = sd_bus_message_append(pk, "us", interactive, NULL); if (r < 0) return r; q = new(AsyncPolkitQuery, 1); if (!q) return -ENOMEM; *q = (AsyncPolkitQuery) { .request = sd_bus_message_ref(call), .callback = callback, .userdata = userdata, }; q->action = strdup(action); if (!q->action) { async_polkit_query_free(q); return -ENOMEM; } q->details = strv_copy((char**) details); if (!q->details) { async_polkit_query_free(q); return -ENOMEM; } r = hashmap_put(*registry, call, q); if (r < 0) { async_polkit_query_free(q); return r; } q->registry = *registry; r = sd_bus_call_async(call->bus, &q->slot, pk, async_polkit_callback, q, 0); if (r < 0) { async_polkit_query_free(q); return r; } return 0; #endif return -EACCES; } void bus_verify_polkit_async_registry_free(Hashmap *registry) { #if ENABLE_POLKIT hashmap_free_with_destructor(registry, async_polkit_query_free); #endif }
/* SPDX-License-Identifier: LGPL-2.1+ */ #include "bus-internal.h" #include "bus-message.h" #include "bus-polkit.h" #include "strv.h" #include "user-util.h" static int check_good_user(sd_bus_message *m, uid_t good_user) { _cleanup_(sd_bus_creds_unrefp) sd_bus_creds *creds = NULL; uid_t sender_uid; int r; assert(m); if (good_user == UID_INVALID) return 0; r = sd_bus_query_sender_creds(m, SD_BUS_CREDS_EUID, &creds); if (r < 0) return r; /* Don't trust augmented credentials for authorization */ assert_return((sd_bus_creds_get_augmented_mask(creds) & SD_BUS_CREDS_EUID) == 0, -EPERM); r = sd_bus_creds_get_euid(creds, &sender_uid); if (r < 0) return r; return sender_uid == good_user; } #if ENABLE_POLKIT static int bus_message_append_strv_key_value( sd_bus_message *m, const char **l) { const char **k, **v; int r; assert(m); r = sd_bus_message_open_container(m, 'a', "{ss}"); if (r < 0) return r; STRV_FOREACH_PAIR(k, v, l) { r = sd_bus_message_append(m, "{ss}", *k, *v); if (r < 0) return r; } r = sd_bus_message_close_container(m); if (r < 0) return r; return r; } #endif int bus_test_polkit( sd_bus_message *call, int capability, const char *action, const char **details, uid_t good_user, bool *_challenge, sd_bus_error *ret_error) { int r; assert(call); assert(action); /* Tests non-interactively! */ r = check_good_user(call, good_user); if (r != 0) return r; r = sd_bus_query_sender_privilege(call, capability); if (r < 0) return r; else if (r > 0) return 1; #if ENABLE_POLKIT else { _cleanup_(sd_bus_message_unrefp) sd_bus_message *request = NULL; _cleanup_(sd_bus_message_unrefp) sd_bus_message *reply = NULL; int authorized = false, challenge = false; const char *sender; sender = sd_bus_message_get_sender(call); if (!sender) return -EBADMSG; r = sd_bus_message_new_method_call( call->bus, &request, "org.freedesktop.PolicyKit1", "/org/freedesktop/PolicyKit1/Authority", "org.freedesktop.PolicyKit1.Authority", "CheckAuthorization"); if (r < 0) return r; r = sd_bus_message_append( request, "(sa{sv})s", "system-bus-name", 1, "name", "s", sender, action); if (r < 0) return r; r = bus_message_append_strv_key_value(request, details); if (r < 0) return r; r = sd_bus_message_append(request, "us", 0, NULL); if (r < 0) return r; r = sd_bus_call(call->bus, request, 0, ret_error, &reply); if (r < 0) { /* Treat no PK available as access denied */ if (sd_bus_error_has_name(ret_error, SD_BUS_ERROR_SERVICE_UNKNOWN)) { sd_bus_error_free(ret_error); return -EACCES; } return r; } r = sd_bus_message_enter_container(reply, 'r', "bba{ss}"); if (r < 0) return r; r = sd_bus_message_read(reply, "bb", &authorized, &challenge); if (r < 0) return r; if (authorized) return 1; if (_challenge) { *_challenge = challenge; return 0; } } #endif return -EACCES; } #if ENABLE_POLKIT typedef struct AsyncPolkitQuery { char *action; char **details; sd_bus_message *request, *reply; sd_bus_slot *slot; Hashmap *registry; sd_event_source *defer_event_source; } AsyncPolkitQuery; static void async_polkit_query_free(AsyncPolkitQuery *q) { if (!q) return; sd_bus_slot_unref(q->slot); if (q->registry && q->request) hashmap_remove(q->registry, q->request); sd_bus_message_unref(q->request); sd_bus_message_unref(q->reply); free(q->action); strv_free(q->details); sd_event_source_disable_unref(q->defer_event_source); free(q); } static int async_polkit_defer(sd_event_source *s, void *userdata) { AsyncPolkitQuery *q = userdata; assert(s); /* This is called as idle event source after we processed the async polkit reply, hopefully after the * method call we re-enqueued has been properly processed. */ async_polkit_query_free(q); return 0; } static int async_polkit_callback(sd_bus_message *reply, void *userdata, sd_bus_error *error) { _cleanup_(sd_bus_error_free) sd_bus_error error_buffer = SD_BUS_ERROR_NULL; AsyncPolkitQuery *q = userdata; int r; assert(reply); assert(q); assert(q->slot); q->slot = sd_bus_slot_unref(q->slot); assert(!q->reply); q->reply = sd_bus_message_ref(reply); /* Now, let's dispatch the original message a second time be re-enqueing. This will then traverse the * whole message processing again, and thus re-validating and re-retrieving the "userdata" field * again. * * We install an idle event loop event to clean-up the PolicyKit request data when we are idle again, * i.e. after the second time the message is processed is complete. */ assert(!q->defer_event_source); r = sd_event_add_defer(sd_bus_get_event(sd_bus_message_get_bus(reply)), &q->defer_event_source, async_polkit_defer, q); if (r < 0) goto fail; r = sd_event_source_set_priority(q->defer_event_source, SD_EVENT_PRIORITY_IDLE); if (r < 0) goto fail; r = sd_event_source_set_enabled(q->defer_event_source, SD_EVENT_ONESHOT); if (r < 0) goto fail; r = sd_bus_message_rewind(q->request, true); if (r < 0) goto fail; r = sd_bus_enqeue_for_read(sd_bus_message_get_bus(q->request), q->request); if (r < 0) goto fail; return 1; fail: log_debug_errno(r, "Processing asynchronous PolicyKit reply failed, ignoring: %m"); (void) sd_bus_reply_method_errno(q->request, r, NULL); async_polkit_query_free(q); return r; } #endif int bus_verify_polkit_async( sd_bus_message *call, int capability, const char *action, const char **details, bool interactive, uid_t good_user, Hashmap **registry, sd_bus_error *ret_error) { #if ENABLE_POLKIT _cleanup_(sd_bus_message_unrefp) sd_bus_message *pk = NULL; AsyncPolkitQuery *q; int c; #endif const char *sender; int r; assert(call); assert(action); assert(registry); r = check_good_user(call, good_user); if (r != 0) return r; #if ENABLE_POLKIT q = hashmap_get(*registry, call); if (q) { int authorized, challenge; /* This is the second invocation of this function, and there's already a response from * polkit, let's process it */ assert(q->reply); /* If the operation we want to authenticate changed between the first and the second time, * let's not use this authentication, it might be out of date as the object and context we * operate on might have changed. */ if (!streq(q->action, action) || !strv_equal(q->details, (char**) details)) return -ESTALE; if (sd_bus_message_is_method_error(q->reply, NULL)) { const sd_bus_error *e; e = sd_bus_message_get_error(q->reply); /* Treat no PK available as access denied */ if (sd_bus_error_has_name(e, SD_BUS_ERROR_SERVICE_UNKNOWN) || sd_bus_error_has_name(e, SD_BUS_ERROR_NAME_HAS_NO_OWNER)) return -EACCES; /* Copy error from polkit reply */ sd_bus_error_copy(ret_error, e); return -sd_bus_error_get_errno(e); } r = sd_bus_message_enter_container(q->reply, 'r', "bba{ss}"); if (r >= 0) r = sd_bus_message_read(q->reply, "bb", &authorized, &challenge); if (r < 0) return r; if (authorized) return 1; if (challenge) return sd_bus_error_set(ret_error, SD_BUS_ERROR_INTERACTIVE_AUTHORIZATION_REQUIRED, "Interactive authentication required."); return -EACCES; } #endif r = sd_bus_query_sender_privilege(call, capability); if (r < 0) return r; else if (r > 0) return 1; sender = sd_bus_message_get_sender(call); if (!sender) return -EBADMSG; #if ENABLE_POLKIT c = sd_bus_message_get_allow_interactive_authorization(call); if (c < 0) return c; if (c > 0) interactive = true; r = hashmap_ensure_allocated(registry, NULL); if (r < 0) return r; r = sd_bus_message_new_method_call( call->bus, &pk, "org.freedesktop.PolicyKit1", "/org/freedesktop/PolicyKit1/Authority", "org.freedesktop.PolicyKit1.Authority", "CheckAuthorization"); if (r < 0) return r; r = sd_bus_message_append( pk, "(sa{sv})s", "system-bus-name", 1, "name", "s", sender, action); if (r < 0) return r; r = bus_message_append_strv_key_value(pk, details); if (r < 0) return r; r = sd_bus_message_append(pk, "us", interactive, NULL); if (r < 0) return r; q = new(AsyncPolkitQuery, 1); if (!q) return -ENOMEM; *q = (AsyncPolkitQuery) { .request = sd_bus_message_ref(call), }; q->action = strdup(action); if (!q->action) { async_polkit_query_free(q); return -ENOMEM; } q->details = strv_copy((char**) details); if (!q->details) { async_polkit_query_free(q); return -ENOMEM; } r = hashmap_put(*registry, call, q); if (r < 0) { async_polkit_query_free(q); return r; } q->registry = *registry; r = sd_bus_call_async(call->bus, &q->slot, pk, async_polkit_callback, q, 0); if (r < 0) { async_polkit_query_free(q); return r; } return 0; #endif return -EACCES; } void bus_verify_polkit_async_registry_free(Hashmap *registry) { #if ENABLE_POLKIT hashmap_free_with_destructor(registry, async_polkit_query_free); #endif }
static int async_polkit_callback(sd_bus_message *reply, void *userdata, sd_bus_error *error) { _cleanup_(sd_bus_error_free) sd_bus_error error_buffer = SD_BUS_ERROR_NULL; AsyncPolkitQuery *q = userdata; int r; assert(reply); assert(q); q->slot = sd_bus_slot_unref(q->slot); q->reply = sd_bus_message_ref(reply); r = sd_bus_message_rewind(q->request, true); if (r < 0) { r = sd_bus_reply_method_errno(q->request, r, NULL); goto finish; } r = q->callback(q->request, q->userdata, &error_buffer); r = bus_maybe_reply_error(q->request, r, &error_buffer); finish: async_polkit_query_free(q); return r; }
static int async_polkit_callback(sd_bus_message *reply, void *userdata, sd_bus_error *error) { _cleanup_(sd_bus_error_free) sd_bus_error error_buffer = SD_BUS_ERROR_NULL; AsyncPolkitQuery *q = userdata; int r; assert(reply); assert(q); assert(q->slot); q->slot = sd_bus_slot_unref(q->slot); assert(!q->reply); q->reply = sd_bus_message_ref(reply); /* Now, let's dispatch the original message a second time be re-enqueing. This will then traverse the * whole message processing again, and thus re-validating and re-retrieving the "userdata" field * again. * * We install an idle event loop event to clean-up the PolicyKit request data when we are idle again, * i.e. after the second time the message is processed is complete. */ assert(!q->defer_event_source); r = sd_event_add_defer(sd_bus_get_event(sd_bus_message_get_bus(reply)), &q->defer_event_source, async_polkit_defer, q); if (r < 0) goto fail; r = sd_event_source_set_priority(q->defer_event_source, SD_EVENT_PRIORITY_IDLE); if (r < 0) goto fail; r = sd_event_source_set_enabled(q->defer_event_source, SD_EVENT_ONESHOT); if (r < 0) goto fail; r = sd_bus_message_rewind(q->request, true); if (r < 0) goto fail; r = sd_bus_enqeue_for_read(sd_bus_message_get_bus(q->request), q->request); if (r < 0) goto fail; return 1; fail: log_debug_errno(r, "Processing asynchronous PolicyKit reply failed, ignoring: %m"); (void) sd_bus_reply_method_errno(q->request, r, NULL); async_polkit_query_free(q); return r; }
{'added': [(163, ''), (165, ' sd_event_source *defer_event_source;'), (183, ' sd_event_source_disable_unref(q->defer_event_source);'), (187, 'static int async_polkit_defer(sd_event_source *s, void *userdata) {'), (188, ' AsyncPolkitQuery *q = userdata;'), (189, ''), (190, ' assert(s);'), (191, ''), (192, ' /* This is called as idle event source after we processed the async polkit reply, hopefully after the'), (193, ' * method call we re-enqueued has been properly processed. */'), (194, ''), (195, ' async_polkit_query_free(q);'), (196, ' return 0;'), (197, '}'), (198, ''), (207, ' assert(q->slot);'), (209, ''), (210, ' assert(!q->reply);'), (213, " /* Now, let's dispatch the original message a second time be re-enqueing. This will then traverse the"), (214, ' * whole message processing again, and thus re-validating and re-retrieving the "userdata" field'), (215, ' * again.'), (216, ' *'), (217, ' * We install an idle event loop event to clean-up the PolicyKit request data when we are idle again,'), (218, ' * i.e. after the second time the message is processed is complete. */'), (219, ''), (220, ' assert(!q->defer_event_source);'), (221, ' r = sd_event_add_defer(sd_bus_get_event(sd_bus_message_get_bus(reply)), &q->defer_event_source, async_polkit_defer, q);'), (222, ' if (r < 0)'), (223, ' goto fail;'), (224, ''), (225, ' r = sd_event_source_set_priority(q->defer_event_source, SD_EVENT_PRIORITY_IDLE);'), (226, ' if (r < 0)'), (227, ' goto fail;'), (228, ''), (229, ' r = sd_event_source_set_enabled(q->defer_event_source, SD_EVENT_ONESHOT);'), (230, ' if (r < 0)'), (231, ' goto fail;'), (232, ''), (234, ' if (r < 0)'), (235, ' goto fail;'), (236, ''), (237, ' r = sd_bus_enqeue_for_read(sd_bus_message_get_bus(q->request), q->request);'), (238, ' if (r < 0)'), (239, ' goto fail;'), (241, ' return 1;'), (243, 'fail:'), (244, ' log_debug_errno(r, "Processing asynchronous PolicyKit reply failed, ignoring: %m");'), (245, ' (void) sd_bus_reply_method_errno(q->request, r, NULL);'), (267, ' const char *sender;'), (335, '#if ENABLE_POLKIT')], 'deleted': [(162, ' sd_bus_message_handler_t callback;'), (163, ' void *userdata;'), (169, ''), (199, ' if (r < 0) {'), (200, ' r = sd_bus_reply_method_errno(q->request, r, NULL);'), (201, ' goto finish;'), (202, ' }'), (204, ' r = q->callback(q->request, q->userdata, &error_buffer);'), (205, ' r = bus_maybe_reply_error(q->request, r, &error_buffer);'), (207, 'finish:'), (209, ''), (228, ' const char *sender;'), (229, ' sd_bus_message_handler_t callback;'), (230, ' void *userdata;'), (296, '#if ENABLE_POLKIT'), (297, ' if (sd_bus_get_current_message(call->bus) != call)'), (298, ' return -EINVAL;'), (299, ''), (300, ' callback = sd_bus_get_current_handler(call->bus);'), (301, ' if (!callback)'), (302, ' return -EINVAL;'), (303, ''), (304, ' userdata = sd_bus_get_current_userdata(call->bus);'), (305, ''), (352, ' .callback = callback,'), (353, ' .userdata = userdata,')]}
50
26
290
1,631
https://github.com/systemd/systemd
CVE-2020-1712
['CWE-416']
bus-polkit.c
async_polkit_query_free
/* SPDX-License-Identifier: LGPL-2.1+ */ #include "bus-internal.h" #include "bus-message.h" #include "bus-polkit.h" #include "strv.h" #include "user-util.h" static int check_good_user(sd_bus_message *m, uid_t good_user) { _cleanup_(sd_bus_creds_unrefp) sd_bus_creds *creds = NULL; uid_t sender_uid; int r; assert(m); if (good_user == UID_INVALID) return 0; r = sd_bus_query_sender_creds(m, SD_BUS_CREDS_EUID, &creds); if (r < 0) return r; /* Don't trust augmented credentials for authorization */ assert_return((sd_bus_creds_get_augmented_mask(creds) & SD_BUS_CREDS_EUID) == 0, -EPERM); r = sd_bus_creds_get_euid(creds, &sender_uid); if (r < 0) return r; return sender_uid == good_user; } #if ENABLE_POLKIT static int bus_message_append_strv_key_value( sd_bus_message *m, const char **l) { const char **k, **v; int r; assert(m); r = sd_bus_message_open_container(m, 'a', "{ss}"); if (r < 0) return r; STRV_FOREACH_PAIR(k, v, l) { r = sd_bus_message_append(m, "{ss}", *k, *v); if (r < 0) return r; } r = sd_bus_message_close_container(m); if (r < 0) return r; return r; } #endif int bus_test_polkit( sd_bus_message *call, int capability, const char *action, const char **details, uid_t good_user, bool *_challenge, sd_bus_error *ret_error) { int r; assert(call); assert(action); /* Tests non-interactively! */ r = check_good_user(call, good_user); if (r != 0) return r; r = sd_bus_query_sender_privilege(call, capability); if (r < 0) return r; else if (r > 0) return 1; #if ENABLE_POLKIT else { _cleanup_(sd_bus_message_unrefp) sd_bus_message *request = NULL; _cleanup_(sd_bus_message_unrefp) sd_bus_message *reply = NULL; int authorized = false, challenge = false; const char *sender; sender = sd_bus_message_get_sender(call); if (!sender) return -EBADMSG; r = sd_bus_message_new_method_call( call->bus, &request, "org.freedesktop.PolicyKit1", "/org/freedesktop/PolicyKit1/Authority", "org.freedesktop.PolicyKit1.Authority", "CheckAuthorization"); if (r < 0) return r; r = sd_bus_message_append( request, "(sa{sv})s", "system-bus-name", 1, "name", "s", sender, action); if (r < 0) return r; r = bus_message_append_strv_key_value(request, details); if (r < 0) return r; r = sd_bus_message_append(request, "us", 0, NULL); if (r < 0) return r; r = sd_bus_call(call->bus, request, 0, ret_error, &reply); if (r < 0) { /* Treat no PK available as access denied */ if (sd_bus_error_has_name(ret_error, SD_BUS_ERROR_SERVICE_UNKNOWN)) { sd_bus_error_free(ret_error); return -EACCES; } return r; } r = sd_bus_message_enter_container(reply, 'r', "bba{ss}"); if (r < 0) return r; r = sd_bus_message_read(reply, "bb", &authorized, &challenge); if (r < 0) return r; if (authorized) return 1; if (_challenge) { *_challenge = challenge; return 0; } } #endif return -EACCES; } #if ENABLE_POLKIT typedef struct AsyncPolkitQuery { char *action; char **details; sd_bus_message *request, *reply; sd_bus_message_handler_t callback; void *userdata; sd_bus_slot *slot; Hashmap *registry; } AsyncPolkitQuery; static void async_polkit_query_free(AsyncPolkitQuery *q) { if (!q) return; sd_bus_slot_unref(q->slot); if (q->registry && q->request) hashmap_remove(q->registry, q->request); sd_bus_message_unref(q->request); sd_bus_message_unref(q->reply); free(q->action); strv_free(q->details); free(q); } static int async_polkit_callback(sd_bus_message *reply, void *userdata, sd_bus_error *error) { _cleanup_(sd_bus_error_free) sd_bus_error error_buffer = SD_BUS_ERROR_NULL; AsyncPolkitQuery *q = userdata; int r; assert(reply); assert(q); q->slot = sd_bus_slot_unref(q->slot); q->reply = sd_bus_message_ref(reply); r = sd_bus_message_rewind(q->request, true); if (r < 0) { r = sd_bus_reply_method_errno(q->request, r, NULL); goto finish; } r = q->callback(q->request, q->userdata, &error_buffer); r = bus_maybe_reply_error(q->request, r, &error_buffer); finish: async_polkit_query_free(q); return r; } #endif int bus_verify_polkit_async( sd_bus_message *call, int capability, const char *action, const char **details, bool interactive, uid_t good_user, Hashmap **registry, sd_bus_error *ret_error) { #if ENABLE_POLKIT _cleanup_(sd_bus_message_unrefp) sd_bus_message *pk = NULL; AsyncPolkitQuery *q; const char *sender; sd_bus_message_handler_t callback; void *userdata; int c; #endif int r; assert(call); assert(action); assert(registry); r = check_good_user(call, good_user); if (r != 0) return r; #if ENABLE_POLKIT q = hashmap_get(*registry, call); if (q) { int authorized, challenge; /* This is the second invocation of this function, and there's already a response from * polkit, let's process it */ assert(q->reply); /* If the operation we want to authenticate changed between the first and the second time, * let's not use this authentication, it might be out of date as the object and context we * operate on might have changed. */ if (!streq(q->action, action) || !strv_equal(q->details, (char**) details)) return -ESTALE; if (sd_bus_message_is_method_error(q->reply, NULL)) { const sd_bus_error *e; e = sd_bus_message_get_error(q->reply); /* Treat no PK available as access denied */ if (sd_bus_error_has_name(e, SD_BUS_ERROR_SERVICE_UNKNOWN) || sd_bus_error_has_name(e, SD_BUS_ERROR_NAME_HAS_NO_OWNER)) return -EACCES; /* Copy error from polkit reply */ sd_bus_error_copy(ret_error, e); return -sd_bus_error_get_errno(e); } r = sd_bus_message_enter_container(q->reply, 'r', "bba{ss}"); if (r >= 0) r = sd_bus_message_read(q->reply, "bb", &authorized, &challenge); if (r < 0) return r; if (authorized) return 1; if (challenge) return sd_bus_error_set(ret_error, SD_BUS_ERROR_INTERACTIVE_AUTHORIZATION_REQUIRED, "Interactive authentication required."); return -EACCES; } #endif r = sd_bus_query_sender_privilege(call, capability); if (r < 0) return r; else if (r > 0) return 1; #if ENABLE_POLKIT if (sd_bus_get_current_message(call->bus) != call) return -EINVAL; callback = sd_bus_get_current_handler(call->bus); if (!callback) return -EINVAL; userdata = sd_bus_get_current_userdata(call->bus); sender = sd_bus_message_get_sender(call); if (!sender) return -EBADMSG; c = sd_bus_message_get_allow_interactive_authorization(call); if (c < 0) return c; if (c > 0) interactive = true; r = hashmap_ensure_allocated(registry, NULL); if (r < 0) return r; r = sd_bus_message_new_method_call( call->bus, &pk, "org.freedesktop.PolicyKit1", "/org/freedesktop/PolicyKit1/Authority", "org.freedesktop.PolicyKit1.Authority", "CheckAuthorization"); if (r < 0) return r; r = sd_bus_message_append( pk, "(sa{sv})s", "system-bus-name", 1, "name", "s", sender, action); if (r < 0) return r; r = bus_message_append_strv_key_value(pk, details); if (r < 0) return r; r = sd_bus_message_append(pk, "us", interactive, NULL); if (r < 0) return r; q = new(AsyncPolkitQuery, 1); if (!q) return -ENOMEM; *q = (AsyncPolkitQuery) { .request = sd_bus_message_ref(call), .callback = callback, .userdata = userdata, }; q->action = strdup(action); if (!q->action) { async_polkit_query_free(q); return -ENOMEM; } q->details = strv_copy((char**) details); if (!q->details) { async_polkit_query_free(q); return -ENOMEM; } r = hashmap_put(*registry, call, q); if (r < 0) { async_polkit_query_free(q); return r; } q->registry = *registry; r = sd_bus_call_async(call->bus, &q->slot, pk, async_polkit_callback, q, 0); if (r < 0) { async_polkit_query_free(q); return r; } return 0; #endif return -EACCES; } void bus_verify_polkit_async_registry_free(Hashmap *registry) { #if ENABLE_POLKIT hashmap_free_with_destructor(registry, async_polkit_query_free); #endif }
/* SPDX-License-Identifier: LGPL-2.1+ */ #include "bus-internal.h" #include "bus-message.h" #include "bus-polkit.h" #include "strv.h" #include "user-util.h" static int check_good_user(sd_bus_message *m, uid_t good_user) { _cleanup_(sd_bus_creds_unrefp) sd_bus_creds *creds = NULL; uid_t sender_uid; int r; assert(m); if (good_user == UID_INVALID) return 0; r = sd_bus_query_sender_creds(m, SD_BUS_CREDS_EUID, &creds); if (r < 0) return r; /* Don't trust augmented credentials for authorization */ assert_return((sd_bus_creds_get_augmented_mask(creds) & SD_BUS_CREDS_EUID) == 0, -EPERM); r = sd_bus_creds_get_euid(creds, &sender_uid); if (r < 0) return r; return sender_uid == good_user; } #if ENABLE_POLKIT static int bus_message_append_strv_key_value( sd_bus_message *m, const char **l) { const char **k, **v; int r; assert(m); r = sd_bus_message_open_container(m, 'a', "{ss}"); if (r < 0) return r; STRV_FOREACH_PAIR(k, v, l) { r = sd_bus_message_append(m, "{ss}", *k, *v); if (r < 0) return r; } r = sd_bus_message_close_container(m); if (r < 0) return r; return r; } #endif int bus_test_polkit( sd_bus_message *call, int capability, const char *action, const char **details, uid_t good_user, bool *_challenge, sd_bus_error *ret_error) { int r; assert(call); assert(action); /* Tests non-interactively! */ r = check_good_user(call, good_user); if (r != 0) return r; r = sd_bus_query_sender_privilege(call, capability); if (r < 0) return r; else if (r > 0) return 1; #if ENABLE_POLKIT else { _cleanup_(sd_bus_message_unrefp) sd_bus_message *request = NULL; _cleanup_(sd_bus_message_unrefp) sd_bus_message *reply = NULL; int authorized = false, challenge = false; const char *sender; sender = sd_bus_message_get_sender(call); if (!sender) return -EBADMSG; r = sd_bus_message_new_method_call( call->bus, &request, "org.freedesktop.PolicyKit1", "/org/freedesktop/PolicyKit1/Authority", "org.freedesktop.PolicyKit1.Authority", "CheckAuthorization"); if (r < 0) return r; r = sd_bus_message_append( request, "(sa{sv})s", "system-bus-name", 1, "name", "s", sender, action); if (r < 0) return r; r = bus_message_append_strv_key_value(request, details); if (r < 0) return r; r = sd_bus_message_append(request, "us", 0, NULL); if (r < 0) return r; r = sd_bus_call(call->bus, request, 0, ret_error, &reply); if (r < 0) { /* Treat no PK available as access denied */ if (sd_bus_error_has_name(ret_error, SD_BUS_ERROR_SERVICE_UNKNOWN)) { sd_bus_error_free(ret_error); return -EACCES; } return r; } r = sd_bus_message_enter_container(reply, 'r', "bba{ss}"); if (r < 0) return r; r = sd_bus_message_read(reply, "bb", &authorized, &challenge); if (r < 0) return r; if (authorized) return 1; if (_challenge) { *_challenge = challenge; return 0; } } #endif return -EACCES; } #if ENABLE_POLKIT typedef struct AsyncPolkitQuery { char *action; char **details; sd_bus_message *request, *reply; sd_bus_slot *slot; Hashmap *registry; sd_event_source *defer_event_source; } AsyncPolkitQuery; static void async_polkit_query_free(AsyncPolkitQuery *q) { if (!q) return; sd_bus_slot_unref(q->slot); if (q->registry && q->request) hashmap_remove(q->registry, q->request); sd_bus_message_unref(q->request); sd_bus_message_unref(q->reply); free(q->action); strv_free(q->details); sd_event_source_disable_unref(q->defer_event_source); free(q); } static int async_polkit_defer(sd_event_source *s, void *userdata) { AsyncPolkitQuery *q = userdata; assert(s); /* This is called as idle event source after we processed the async polkit reply, hopefully after the * method call we re-enqueued has been properly processed. */ async_polkit_query_free(q); return 0; } static int async_polkit_callback(sd_bus_message *reply, void *userdata, sd_bus_error *error) { _cleanup_(sd_bus_error_free) sd_bus_error error_buffer = SD_BUS_ERROR_NULL; AsyncPolkitQuery *q = userdata; int r; assert(reply); assert(q); assert(q->slot); q->slot = sd_bus_slot_unref(q->slot); assert(!q->reply); q->reply = sd_bus_message_ref(reply); /* Now, let's dispatch the original message a second time be re-enqueing. This will then traverse the * whole message processing again, and thus re-validating and re-retrieving the "userdata" field * again. * * We install an idle event loop event to clean-up the PolicyKit request data when we are idle again, * i.e. after the second time the message is processed is complete. */ assert(!q->defer_event_source); r = sd_event_add_defer(sd_bus_get_event(sd_bus_message_get_bus(reply)), &q->defer_event_source, async_polkit_defer, q); if (r < 0) goto fail; r = sd_event_source_set_priority(q->defer_event_source, SD_EVENT_PRIORITY_IDLE); if (r < 0) goto fail; r = sd_event_source_set_enabled(q->defer_event_source, SD_EVENT_ONESHOT); if (r < 0) goto fail; r = sd_bus_message_rewind(q->request, true); if (r < 0) goto fail; r = sd_bus_enqeue_for_read(sd_bus_message_get_bus(q->request), q->request); if (r < 0) goto fail; return 1; fail: log_debug_errno(r, "Processing asynchronous PolicyKit reply failed, ignoring: %m"); (void) sd_bus_reply_method_errno(q->request, r, NULL); async_polkit_query_free(q); return r; } #endif int bus_verify_polkit_async( sd_bus_message *call, int capability, const char *action, const char **details, bool interactive, uid_t good_user, Hashmap **registry, sd_bus_error *ret_error) { #if ENABLE_POLKIT _cleanup_(sd_bus_message_unrefp) sd_bus_message *pk = NULL; AsyncPolkitQuery *q; int c; #endif const char *sender; int r; assert(call); assert(action); assert(registry); r = check_good_user(call, good_user); if (r != 0) return r; #if ENABLE_POLKIT q = hashmap_get(*registry, call); if (q) { int authorized, challenge; /* This is the second invocation of this function, and there's already a response from * polkit, let's process it */ assert(q->reply); /* If the operation we want to authenticate changed between the first and the second time, * let's not use this authentication, it might be out of date as the object and context we * operate on might have changed. */ if (!streq(q->action, action) || !strv_equal(q->details, (char**) details)) return -ESTALE; if (sd_bus_message_is_method_error(q->reply, NULL)) { const sd_bus_error *e; e = sd_bus_message_get_error(q->reply); /* Treat no PK available as access denied */ if (sd_bus_error_has_name(e, SD_BUS_ERROR_SERVICE_UNKNOWN) || sd_bus_error_has_name(e, SD_BUS_ERROR_NAME_HAS_NO_OWNER)) return -EACCES; /* Copy error from polkit reply */ sd_bus_error_copy(ret_error, e); return -sd_bus_error_get_errno(e); } r = sd_bus_message_enter_container(q->reply, 'r', "bba{ss}"); if (r >= 0) r = sd_bus_message_read(q->reply, "bb", &authorized, &challenge); if (r < 0) return r; if (authorized) return 1; if (challenge) return sd_bus_error_set(ret_error, SD_BUS_ERROR_INTERACTIVE_AUTHORIZATION_REQUIRED, "Interactive authentication required."); return -EACCES; } #endif r = sd_bus_query_sender_privilege(call, capability); if (r < 0) return r; else if (r > 0) return 1; sender = sd_bus_message_get_sender(call); if (!sender) return -EBADMSG; #if ENABLE_POLKIT c = sd_bus_message_get_allow_interactive_authorization(call); if (c < 0) return c; if (c > 0) interactive = true; r = hashmap_ensure_allocated(registry, NULL); if (r < 0) return r; r = sd_bus_message_new_method_call( call->bus, &pk, "org.freedesktop.PolicyKit1", "/org/freedesktop/PolicyKit1/Authority", "org.freedesktop.PolicyKit1.Authority", "CheckAuthorization"); if (r < 0) return r; r = sd_bus_message_append( pk, "(sa{sv})s", "system-bus-name", 1, "name", "s", sender, action); if (r < 0) return r; r = bus_message_append_strv_key_value(pk, details); if (r < 0) return r; r = sd_bus_message_append(pk, "us", interactive, NULL); if (r < 0) return r; q = new(AsyncPolkitQuery, 1); if (!q) return -ENOMEM; *q = (AsyncPolkitQuery) { .request = sd_bus_message_ref(call), }; q->action = strdup(action); if (!q->action) { async_polkit_query_free(q); return -ENOMEM; } q->details = strv_copy((char**) details); if (!q->details) { async_polkit_query_free(q); return -ENOMEM; } r = hashmap_put(*registry, call, q); if (r < 0) { async_polkit_query_free(q); return r; } q->registry = *registry; r = sd_bus_call_async(call->bus, &q->slot, pk, async_polkit_callback, q, 0); if (r < 0) { async_polkit_query_free(q); return r; } return 0; #endif return -EACCES; } void bus_verify_polkit_async_registry_free(Hashmap *registry) { #if ENABLE_POLKIT hashmap_free_with_destructor(registry, async_polkit_query_free); #endif }
static void async_polkit_query_free(AsyncPolkitQuery *q) { if (!q) return; sd_bus_slot_unref(q->slot); if (q->registry && q->request) hashmap_remove(q->registry, q->request); sd_bus_message_unref(q->request); sd_bus_message_unref(q->reply); free(q->action); strv_free(q->details); free(q); }
static void async_polkit_query_free(AsyncPolkitQuery *q) { if (!q) return; sd_bus_slot_unref(q->slot); if (q->registry && q->request) hashmap_remove(q->registry, q->request); sd_bus_message_unref(q->request); sd_bus_message_unref(q->reply); free(q->action); strv_free(q->details); sd_event_source_disable_unref(q->defer_event_source); free(q); }
{'added': [(163, ''), (165, ' sd_event_source *defer_event_source;'), (183, ' sd_event_source_disable_unref(q->defer_event_source);'), (187, 'static int async_polkit_defer(sd_event_source *s, void *userdata) {'), (188, ' AsyncPolkitQuery *q = userdata;'), (189, ''), (190, ' assert(s);'), (191, ''), (192, ' /* This is called as idle event source after we processed the async polkit reply, hopefully after the'), (193, ' * method call we re-enqueued has been properly processed. */'), (194, ''), (195, ' async_polkit_query_free(q);'), (196, ' return 0;'), (197, '}'), (198, ''), (207, ' assert(q->slot);'), (209, ''), (210, ' assert(!q->reply);'), (213, " /* Now, let's dispatch the original message a second time be re-enqueing. This will then traverse the"), (214, ' * whole message processing again, and thus re-validating and re-retrieving the "userdata" field'), (215, ' * again.'), (216, ' *'), (217, ' * We install an idle event loop event to clean-up the PolicyKit request data when we are idle again,'), (218, ' * i.e. after the second time the message is processed is complete. */'), (219, ''), (220, ' assert(!q->defer_event_source);'), (221, ' r = sd_event_add_defer(sd_bus_get_event(sd_bus_message_get_bus(reply)), &q->defer_event_source, async_polkit_defer, q);'), (222, ' if (r < 0)'), (223, ' goto fail;'), (224, ''), (225, ' r = sd_event_source_set_priority(q->defer_event_source, SD_EVENT_PRIORITY_IDLE);'), (226, ' if (r < 0)'), (227, ' goto fail;'), (228, ''), (229, ' r = sd_event_source_set_enabled(q->defer_event_source, SD_EVENT_ONESHOT);'), (230, ' if (r < 0)'), (231, ' goto fail;'), (232, ''), (234, ' if (r < 0)'), (235, ' goto fail;'), (236, ''), (237, ' r = sd_bus_enqeue_for_read(sd_bus_message_get_bus(q->request), q->request);'), (238, ' if (r < 0)'), (239, ' goto fail;'), (241, ' return 1;'), (243, 'fail:'), (244, ' log_debug_errno(r, "Processing asynchronous PolicyKit reply failed, ignoring: %m");'), (245, ' (void) sd_bus_reply_method_errno(q->request, r, NULL);'), (267, ' const char *sender;'), (335, '#if ENABLE_POLKIT')], 'deleted': [(162, ' sd_bus_message_handler_t callback;'), (163, ' void *userdata;'), (169, ''), (199, ' if (r < 0) {'), (200, ' r = sd_bus_reply_method_errno(q->request, r, NULL);'), (201, ' goto finish;'), (202, ' }'), (204, ' r = q->callback(q->request, q->userdata, &error_buffer);'), (205, ' r = bus_maybe_reply_error(q->request, r, &error_buffer);'), (207, 'finish:'), (209, ''), (228, ' const char *sender;'), (229, ' sd_bus_message_handler_t callback;'), (230, ' void *userdata;'), (296, '#if ENABLE_POLKIT'), (297, ' if (sd_bus_get_current_message(call->bus) != call)'), (298, ' return -EINVAL;'), (299, ''), (300, ' callback = sd_bus_get_current_handler(call->bus);'), (301, ' if (!callback)'), (302, ' return -EINVAL;'), (303, ''), (304, ' userdata = sd_bus_get_current_userdata(call->bus);'), (305, ''), (352, ' .callback = callback,'), (353, ' .userdata = userdata,')]}
50
26
290
1,631
https://github.com/systemd/systemd
CVE-2020-1712
['CWE-416']
bus-polkit.c
bus_verify_polkit_async
/* SPDX-License-Identifier: LGPL-2.1+ */ #include "bus-internal.h" #include "bus-message.h" #include "bus-polkit.h" #include "strv.h" #include "user-util.h" static int check_good_user(sd_bus_message *m, uid_t good_user) { _cleanup_(sd_bus_creds_unrefp) sd_bus_creds *creds = NULL; uid_t sender_uid; int r; assert(m); if (good_user == UID_INVALID) return 0; r = sd_bus_query_sender_creds(m, SD_BUS_CREDS_EUID, &creds); if (r < 0) return r; /* Don't trust augmented credentials for authorization */ assert_return((sd_bus_creds_get_augmented_mask(creds) & SD_BUS_CREDS_EUID) == 0, -EPERM); r = sd_bus_creds_get_euid(creds, &sender_uid); if (r < 0) return r; return sender_uid == good_user; } #if ENABLE_POLKIT static int bus_message_append_strv_key_value( sd_bus_message *m, const char **l) { const char **k, **v; int r; assert(m); r = sd_bus_message_open_container(m, 'a', "{ss}"); if (r < 0) return r; STRV_FOREACH_PAIR(k, v, l) { r = sd_bus_message_append(m, "{ss}", *k, *v); if (r < 0) return r; } r = sd_bus_message_close_container(m); if (r < 0) return r; return r; } #endif int bus_test_polkit( sd_bus_message *call, int capability, const char *action, const char **details, uid_t good_user, bool *_challenge, sd_bus_error *ret_error) { int r; assert(call); assert(action); /* Tests non-interactively! */ r = check_good_user(call, good_user); if (r != 0) return r; r = sd_bus_query_sender_privilege(call, capability); if (r < 0) return r; else if (r > 0) return 1; #if ENABLE_POLKIT else { _cleanup_(sd_bus_message_unrefp) sd_bus_message *request = NULL; _cleanup_(sd_bus_message_unrefp) sd_bus_message *reply = NULL; int authorized = false, challenge = false; const char *sender; sender = sd_bus_message_get_sender(call); if (!sender) return -EBADMSG; r = sd_bus_message_new_method_call( call->bus, &request, "org.freedesktop.PolicyKit1", "/org/freedesktop/PolicyKit1/Authority", "org.freedesktop.PolicyKit1.Authority", "CheckAuthorization"); if (r < 0) return r; r = sd_bus_message_append( request, "(sa{sv})s", "system-bus-name", 1, "name", "s", sender, action); if (r < 0) return r; r = bus_message_append_strv_key_value(request, details); if (r < 0) return r; r = sd_bus_message_append(request, "us", 0, NULL); if (r < 0) return r; r = sd_bus_call(call->bus, request, 0, ret_error, &reply); if (r < 0) { /* Treat no PK available as access denied */ if (sd_bus_error_has_name(ret_error, SD_BUS_ERROR_SERVICE_UNKNOWN)) { sd_bus_error_free(ret_error); return -EACCES; } return r; } r = sd_bus_message_enter_container(reply, 'r', "bba{ss}"); if (r < 0) return r; r = sd_bus_message_read(reply, "bb", &authorized, &challenge); if (r < 0) return r; if (authorized) return 1; if (_challenge) { *_challenge = challenge; return 0; } } #endif return -EACCES; } #if ENABLE_POLKIT typedef struct AsyncPolkitQuery { char *action; char **details; sd_bus_message *request, *reply; sd_bus_message_handler_t callback; void *userdata; sd_bus_slot *slot; Hashmap *registry; } AsyncPolkitQuery; static void async_polkit_query_free(AsyncPolkitQuery *q) { if (!q) return; sd_bus_slot_unref(q->slot); if (q->registry && q->request) hashmap_remove(q->registry, q->request); sd_bus_message_unref(q->request); sd_bus_message_unref(q->reply); free(q->action); strv_free(q->details); free(q); } static int async_polkit_callback(sd_bus_message *reply, void *userdata, sd_bus_error *error) { _cleanup_(sd_bus_error_free) sd_bus_error error_buffer = SD_BUS_ERROR_NULL; AsyncPolkitQuery *q = userdata; int r; assert(reply); assert(q); q->slot = sd_bus_slot_unref(q->slot); q->reply = sd_bus_message_ref(reply); r = sd_bus_message_rewind(q->request, true); if (r < 0) { r = sd_bus_reply_method_errno(q->request, r, NULL); goto finish; } r = q->callback(q->request, q->userdata, &error_buffer); r = bus_maybe_reply_error(q->request, r, &error_buffer); finish: async_polkit_query_free(q); return r; } #endif int bus_verify_polkit_async( sd_bus_message *call, int capability, const char *action, const char **details, bool interactive, uid_t good_user, Hashmap **registry, sd_bus_error *ret_error) { #if ENABLE_POLKIT _cleanup_(sd_bus_message_unrefp) sd_bus_message *pk = NULL; AsyncPolkitQuery *q; const char *sender; sd_bus_message_handler_t callback; void *userdata; int c; #endif int r; assert(call); assert(action); assert(registry); r = check_good_user(call, good_user); if (r != 0) return r; #if ENABLE_POLKIT q = hashmap_get(*registry, call); if (q) { int authorized, challenge; /* This is the second invocation of this function, and there's already a response from * polkit, let's process it */ assert(q->reply); /* If the operation we want to authenticate changed between the first and the second time, * let's not use this authentication, it might be out of date as the object and context we * operate on might have changed. */ if (!streq(q->action, action) || !strv_equal(q->details, (char**) details)) return -ESTALE; if (sd_bus_message_is_method_error(q->reply, NULL)) { const sd_bus_error *e; e = sd_bus_message_get_error(q->reply); /* Treat no PK available as access denied */ if (sd_bus_error_has_name(e, SD_BUS_ERROR_SERVICE_UNKNOWN) || sd_bus_error_has_name(e, SD_BUS_ERROR_NAME_HAS_NO_OWNER)) return -EACCES; /* Copy error from polkit reply */ sd_bus_error_copy(ret_error, e); return -sd_bus_error_get_errno(e); } r = sd_bus_message_enter_container(q->reply, 'r', "bba{ss}"); if (r >= 0) r = sd_bus_message_read(q->reply, "bb", &authorized, &challenge); if (r < 0) return r; if (authorized) return 1; if (challenge) return sd_bus_error_set(ret_error, SD_BUS_ERROR_INTERACTIVE_AUTHORIZATION_REQUIRED, "Interactive authentication required."); return -EACCES; } #endif r = sd_bus_query_sender_privilege(call, capability); if (r < 0) return r; else if (r > 0) return 1; #if ENABLE_POLKIT if (sd_bus_get_current_message(call->bus) != call) return -EINVAL; callback = sd_bus_get_current_handler(call->bus); if (!callback) return -EINVAL; userdata = sd_bus_get_current_userdata(call->bus); sender = sd_bus_message_get_sender(call); if (!sender) return -EBADMSG; c = sd_bus_message_get_allow_interactive_authorization(call); if (c < 0) return c; if (c > 0) interactive = true; r = hashmap_ensure_allocated(registry, NULL); if (r < 0) return r; r = sd_bus_message_new_method_call( call->bus, &pk, "org.freedesktop.PolicyKit1", "/org/freedesktop/PolicyKit1/Authority", "org.freedesktop.PolicyKit1.Authority", "CheckAuthorization"); if (r < 0) return r; r = sd_bus_message_append( pk, "(sa{sv})s", "system-bus-name", 1, "name", "s", sender, action); if (r < 0) return r; r = bus_message_append_strv_key_value(pk, details); if (r < 0) return r; r = sd_bus_message_append(pk, "us", interactive, NULL); if (r < 0) return r; q = new(AsyncPolkitQuery, 1); if (!q) return -ENOMEM; *q = (AsyncPolkitQuery) { .request = sd_bus_message_ref(call), .callback = callback, .userdata = userdata, }; q->action = strdup(action); if (!q->action) { async_polkit_query_free(q); return -ENOMEM; } q->details = strv_copy((char**) details); if (!q->details) { async_polkit_query_free(q); return -ENOMEM; } r = hashmap_put(*registry, call, q); if (r < 0) { async_polkit_query_free(q); return r; } q->registry = *registry; r = sd_bus_call_async(call->bus, &q->slot, pk, async_polkit_callback, q, 0); if (r < 0) { async_polkit_query_free(q); return r; } return 0; #endif return -EACCES; } void bus_verify_polkit_async_registry_free(Hashmap *registry) { #if ENABLE_POLKIT hashmap_free_with_destructor(registry, async_polkit_query_free); #endif }
/* SPDX-License-Identifier: LGPL-2.1+ */ #include "bus-internal.h" #include "bus-message.h" #include "bus-polkit.h" #include "strv.h" #include "user-util.h" static int check_good_user(sd_bus_message *m, uid_t good_user) { _cleanup_(sd_bus_creds_unrefp) sd_bus_creds *creds = NULL; uid_t sender_uid; int r; assert(m); if (good_user == UID_INVALID) return 0; r = sd_bus_query_sender_creds(m, SD_BUS_CREDS_EUID, &creds); if (r < 0) return r; /* Don't trust augmented credentials for authorization */ assert_return((sd_bus_creds_get_augmented_mask(creds) & SD_BUS_CREDS_EUID) == 0, -EPERM); r = sd_bus_creds_get_euid(creds, &sender_uid); if (r < 0) return r; return sender_uid == good_user; } #if ENABLE_POLKIT static int bus_message_append_strv_key_value( sd_bus_message *m, const char **l) { const char **k, **v; int r; assert(m); r = sd_bus_message_open_container(m, 'a', "{ss}"); if (r < 0) return r; STRV_FOREACH_PAIR(k, v, l) { r = sd_bus_message_append(m, "{ss}", *k, *v); if (r < 0) return r; } r = sd_bus_message_close_container(m); if (r < 0) return r; return r; } #endif int bus_test_polkit( sd_bus_message *call, int capability, const char *action, const char **details, uid_t good_user, bool *_challenge, sd_bus_error *ret_error) { int r; assert(call); assert(action); /* Tests non-interactively! */ r = check_good_user(call, good_user); if (r != 0) return r; r = sd_bus_query_sender_privilege(call, capability); if (r < 0) return r; else if (r > 0) return 1; #if ENABLE_POLKIT else { _cleanup_(sd_bus_message_unrefp) sd_bus_message *request = NULL; _cleanup_(sd_bus_message_unrefp) sd_bus_message *reply = NULL; int authorized = false, challenge = false; const char *sender; sender = sd_bus_message_get_sender(call); if (!sender) return -EBADMSG; r = sd_bus_message_new_method_call( call->bus, &request, "org.freedesktop.PolicyKit1", "/org/freedesktop/PolicyKit1/Authority", "org.freedesktop.PolicyKit1.Authority", "CheckAuthorization"); if (r < 0) return r; r = sd_bus_message_append( request, "(sa{sv})s", "system-bus-name", 1, "name", "s", sender, action); if (r < 0) return r; r = bus_message_append_strv_key_value(request, details); if (r < 0) return r; r = sd_bus_message_append(request, "us", 0, NULL); if (r < 0) return r; r = sd_bus_call(call->bus, request, 0, ret_error, &reply); if (r < 0) { /* Treat no PK available as access denied */ if (sd_bus_error_has_name(ret_error, SD_BUS_ERROR_SERVICE_UNKNOWN)) { sd_bus_error_free(ret_error); return -EACCES; } return r; } r = sd_bus_message_enter_container(reply, 'r', "bba{ss}"); if (r < 0) return r; r = sd_bus_message_read(reply, "bb", &authorized, &challenge); if (r < 0) return r; if (authorized) return 1; if (_challenge) { *_challenge = challenge; return 0; } } #endif return -EACCES; } #if ENABLE_POLKIT typedef struct AsyncPolkitQuery { char *action; char **details; sd_bus_message *request, *reply; sd_bus_slot *slot; Hashmap *registry; sd_event_source *defer_event_source; } AsyncPolkitQuery; static void async_polkit_query_free(AsyncPolkitQuery *q) { if (!q) return; sd_bus_slot_unref(q->slot); if (q->registry && q->request) hashmap_remove(q->registry, q->request); sd_bus_message_unref(q->request); sd_bus_message_unref(q->reply); free(q->action); strv_free(q->details); sd_event_source_disable_unref(q->defer_event_source); free(q); } static int async_polkit_defer(sd_event_source *s, void *userdata) { AsyncPolkitQuery *q = userdata; assert(s); /* This is called as idle event source after we processed the async polkit reply, hopefully after the * method call we re-enqueued has been properly processed. */ async_polkit_query_free(q); return 0; } static int async_polkit_callback(sd_bus_message *reply, void *userdata, sd_bus_error *error) { _cleanup_(sd_bus_error_free) sd_bus_error error_buffer = SD_BUS_ERROR_NULL; AsyncPolkitQuery *q = userdata; int r; assert(reply); assert(q); assert(q->slot); q->slot = sd_bus_slot_unref(q->slot); assert(!q->reply); q->reply = sd_bus_message_ref(reply); /* Now, let's dispatch the original message a second time be re-enqueing. This will then traverse the * whole message processing again, and thus re-validating and re-retrieving the "userdata" field * again. * * We install an idle event loop event to clean-up the PolicyKit request data when we are idle again, * i.e. after the second time the message is processed is complete. */ assert(!q->defer_event_source); r = sd_event_add_defer(sd_bus_get_event(sd_bus_message_get_bus(reply)), &q->defer_event_source, async_polkit_defer, q); if (r < 0) goto fail; r = sd_event_source_set_priority(q->defer_event_source, SD_EVENT_PRIORITY_IDLE); if (r < 0) goto fail; r = sd_event_source_set_enabled(q->defer_event_source, SD_EVENT_ONESHOT); if (r < 0) goto fail; r = sd_bus_message_rewind(q->request, true); if (r < 0) goto fail; r = sd_bus_enqeue_for_read(sd_bus_message_get_bus(q->request), q->request); if (r < 0) goto fail; return 1; fail: log_debug_errno(r, "Processing asynchronous PolicyKit reply failed, ignoring: %m"); (void) sd_bus_reply_method_errno(q->request, r, NULL); async_polkit_query_free(q); return r; } #endif int bus_verify_polkit_async( sd_bus_message *call, int capability, const char *action, const char **details, bool interactive, uid_t good_user, Hashmap **registry, sd_bus_error *ret_error) { #if ENABLE_POLKIT _cleanup_(sd_bus_message_unrefp) sd_bus_message *pk = NULL; AsyncPolkitQuery *q; int c; #endif const char *sender; int r; assert(call); assert(action); assert(registry); r = check_good_user(call, good_user); if (r != 0) return r; #if ENABLE_POLKIT q = hashmap_get(*registry, call); if (q) { int authorized, challenge; /* This is the second invocation of this function, and there's already a response from * polkit, let's process it */ assert(q->reply); /* If the operation we want to authenticate changed between the first and the second time, * let's not use this authentication, it might be out of date as the object and context we * operate on might have changed. */ if (!streq(q->action, action) || !strv_equal(q->details, (char**) details)) return -ESTALE; if (sd_bus_message_is_method_error(q->reply, NULL)) { const sd_bus_error *e; e = sd_bus_message_get_error(q->reply); /* Treat no PK available as access denied */ if (sd_bus_error_has_name(e, SD_BUS_ERROR_SERVICE_UNKNOWN) || sd_bus_error_has_name(e, SD_BUS_ERROR_NAME_HAS_NO_OWNER)) return -EACCES; /* Copy error from polkit reply */ sd_bus_error_copy(ret_error, e); return -sd_bus_error_get_errno(e); } r = sd_bus_message_enter_container(q->reply, 'r', "bba{ss}"); if (r >= 0) r = sd_bus_message_read(q->reply, "bb", &authorized, &challenge); if (r < 0) return r; if (authorized) return 1; if (challenge) return sd_bus_error_set(ret_error, SD_BUS_ERROR_INTERACTIVE_AUTHORIZATION_REQUIRED, "Interactive authentication required."); return -EACCES; } #endif r = sd_bus_query_sender_privilege(call, capability); if (r < 0) return r; else if (r > 0) return 1; sender = sd_bus_message_get_sender(call); if (!sender) return -EBADMSG; #if ENABLE_POLKIT c = sd_bus_message_get_allow_interactive_authorization(call); if (c < 0) return c; if (c > 0) interactive = true; r = hashmap_ensure_allocated(registry, NULL); if (r < 0) return r; r = sd_bus_message_new_method_call( call->bus, &pk, "org.freedesktop.PolicyKit1", "/org/freedesktop/PolicyKit1/Authority", "org.freedesktop.PolicyKit1.Authority", "CheckAuthorization"); if (r < 0) return r; r = sd_bus_message_append( pk, "(sa{sv})s", "system-bus-name", 1, "name", "s", sender, action); if (r < 0) return r; r = bus_message_append_strv_key_value(pk, details); if (r < 0) return r; r = sd_bus_message_append(pk, "us", interactive, NULL); if (r < 0) return r; q = new(AsyncPolkitQuery, 1); if (!q) return -ENOMEM; *q = (AsyncPolkitQuery) { .request = sd_bus_message_ref(call), }; q->action = strdup(action); if (!q->action) { async_polkit_query_free(q); return -ENOMEM; } q->details = strv_copy((char**) details); if (!q->details) { async_polkit_query_free(q); return -ENOMEM; } r = hashmap_put(*registry, call, q); if (r < 0) { async_polkit_query_free(q); return r; } q->registry = *registry; r = sd_bus_call_async(call->bus, &q->slot, pk, async_polkit_callback, q, 0); if (r < 0) { async_polkit_query_free(q); return r; } return 0; #endif return -EACCES; } void bus_verify_polkit_async_registry_free(Hashmap *registry) { #if ENABLE_POLKIT hashmap_free_with_destructor(registry, async_polkit_query_free); #endif }
int bus_verify_polkit_async( sd_bus_message *call, int capability, const char *action, const char **details, bool interactive, uid_t good_user, Hashmap **registry, sd_bus_error *ret_error) { #if ENABLE_POLKIT _cleanup_(sd_bus_message_unrefp) sd_bus_message *pk = NULL; AsyncPolkitQuery *q; const char *sender; sd_bus_message_handler_t callback; void *userdata; int c; #endif int r; assert(call); assert(action); assert(registry); r = check_good_user(call, good_user); if (r != 0) return r; #if ENABLE_POLKIT q = hashmap_get(*registry, call); if (q) { int authorized, challenge; /* This is the second invocation of this function, and there's already a response from * polkit, let's process it */ assert(q->reply); /* If the operation we want to authenticate changed between the first and the second time, * let's not use this authentication, it might be out of date as the object and context we * operate on might have changed. */ if (!streq(q->action, action) || !strv_equal(q->details, (char**) details)) return -ESTALE; if (sd_bus_message_is_method_error(q->reply, NULL)) { const sd_bus_error *e; e = sd_bus_message_get_error(q->reply); /* Treat no PK available as access denied */ if (sd_bus_error_has_name(e, SD_BUS_ERROR_SERVICE_UNKNOWN) || sd_bus_error_has_name(e, SD_BUS_ERROR_NAME_HAS_NO_OWNER)) return -EACCES; /* Copy error from polkit reply */ sd_bus_error_copy(ret_error, e); return -sd_bus_error_get_errno(e); } r = sd_bus_message_enter_container(q->reply, 'r', "bba{ss}"); if (r >= 0) r = sd_bus_message_read(q->reply, "bb", &authorized, &challenge); if (r < 0) return r; if (authorized) return 1; if (challenge) return sd_bus_error_set(ret_error, SD_BUS_ERROR_INTERACTIVE_AUTHORIZATION_REQUIRED, "Interactive authentication required."); return -EACCES; } #endif r = sd_bus_query_sender_privilege(call, capability); if (r < 0) return r; else if (r > 0) return 1; #if ENABLE_POLKIT if (sd_bus_get_current_message(call->bus) != call) return -EINVAL; callback = sd_bus_get_current_handler(call->bus); if (!callback) return -EINVAL; userdata = sd_bus_get_current_userdata(call->bus); sender = sd_bus_message_get_sender(call); if (!sender) return -EBADMSG; c = sd_bus_message_get_allow_interactive_authorization(call); if (c < 0) return c; if (c > 0) interactive = true; r = hashmap_ensure_allocated(registry, NULL); if (r < 0) return r; r = sd_bus_message_new_method_call( call->bus, &pk, "org.freedesktop.PolicyKit1", "/org/freedesktop/PolicyKit1/Authority", "org.freedesktop.PolicyKit1.Authority", "CheckAuthorization"); if (r < 0) return r; r = sd_bus_message_append( pk, "(sa{sv})s", "system-bus-name", 1, "name", "s", sender, action); if (r < 0) return r; r = bus_message_append_strv_key_value(pk, details); if (r < 0) return r; r = sd_bus_message_append(pk, "us", interactive, NULL); if (r < 0) return r; q = new(AsyncPolkitQuery, 1); if (!q) return -ENOMEM; *q = (AsyncPolkitQuery) { .request = sd_bus_message_ref(call), .callback = callback, .userdata = userdata, }; q->action = strdup(action); if (!q->action) { async_polkit_query_free(q); return -ENOMEM; } q->details = strv_copy((char**) details); if (!q->details) { async_polkit_query_free(q); return -ENOMEM; } r = hashmap_put(*registry, call, q); if (r < 0) { async_polkit_query_free(q); return r; } q->registry = *registry; r = sd_bus_call_async(call->bus, &q->slot, pk, async_polkit_callback, q, 0); if (r < 0) { async_polkit_query_free(q); return r; } return 0; #endif return -EACCES; }
int bus_verify_polkit_async( sd_bus_message *call, int capability, const char *action, const char **details, bool interactive, uid_t good_user, Hashmap **registry, sd_bus_error *ret_error) { #if ENABLE_POLKIT _cleanup_(sd_bus_message_unrefp) sd_bus_message *pk = NULL; AsyncPolkitQuery *q; int c; #endif const char *sender; int r; assert(call); assert(action); assert(registry); r = check_good_user(call, good_user); if (r != 0) return r; #if ENABLE_POLKIT q = hashmap_get(*registry, call); if (q) { int authorized, challenge; /* This is the second invocation of this function, and there's already a response from * polkit, let's process it */ assert(q->reply); /* If the operation we want to authenticate changed between the first and the second time, * let's not use this authentication, it might be out of date as the object and context we * operate on might have changed. */ if (!streq(q->action, action) || !strv_equal(q->details, (char**) details)) return -ESTALE; if (sd_bus_message_is_method_error(q->reply, NULL)) { const sd_bus_error *e; e = sd_bus_message_get_error(q->reply); /* Treat no PK available as access denied */ if (sd_bus_error_has_name(e, SD_BUS_ERROR_SERVICE_UNKNOWN) || sd_bus_error_has_name(e, SD_BUS_ERROR_NAME_HAS_NO_OWNER)) return -EACCES; /* Copy error from polkit reply */ sd_bus_error_copy(ret_error, e); return -sd_bus_error_get_errno(e); } r = sd_bus_message_enter_container(q->reply, 'r', "bba{ss}"); if (r >= 0) r = sd_bus_message_read(q->reply, "bb", &authorized, &challenge); if (r < 0) return r; if (authorized) return 1; if (challenge) return sd_bus_error_set(ret_error, SD_BUS_ERROR_INTERACTIVE_AUTHORIZATION_REQUIRED, "Interactive authentication required."); return -EACCES; } #endif r = sd_bus_query_sender_privilege(call, capability); if (r < 0) return r; else if (r > 0) return 1; sender = sd_bus_message_get_sender(call); if (!sender) return -EBADMSG; #if ENABLE_POLKIT c = sd_bus_message_get_allow_interactive_authorization(call); if (c < 0) return c; if (c > 0) interactive = true; r = hashmap_ensure_allocated(registry, NULL); if (r < 0) return r; r = sd_bus_message_new_method_call( call->bus, &pk, "org.freedesktop.PolicyKit1", "/org/freedesktop/PolicyKit1/Authority", "org.freedesktop.PolicyKit1.Authority", "CheckAuthorization"); if (r < 0) return r; r = sd_bus_message_append( pk, "(sa{sv})s", "system-bus-name", 1, "name", "s", sender, action); if (r < 0) return r; r = bus_message_append_strv_key_value(pk, details); if (r < 0) return r; r = sd_bus_message_append(pk, "us", interactive, NULL); if (r < 0) return r; q = new(AsyncPolkitQuery, 1); if (!q) return -ENOMEM; *q = (AsyncPolkitQuery) { .request = sd_bus_message_ref(call), }; q->action = strdup(action); if (!q->action) { async_polkit_query_free(q); return -ENOMEM; } q->details = strv_copy((char**) details); if (!q->details) { async_polkit_query_free(q); return -ENOMEM; } r = hashmap_put(*registry, call, q); if (r < 0) { async_polkit_query_free(q); return r; } q->registry = *registry; r = sd_bus_call_async(call->bus, &q->slot, pk, async_polkit_callback, q, 0); if (r < 0) { async_polkit_query_free(q); return r; } return 0; #endif return -EACCES; }
{'added': [(163, ''), (165, ' sd_event_source *defer_event_source;'), (183, ' sd_event_source_disable_unref(q->defer_event_source);'), (187, 'static int async_polkit_defer(sd_event_source *s, void *userdata) {'), (188, ' AsyncPolkitQuery *q = userdata;'), (189, ''), (190, ' assert(s);'), (191, ''), (192, ' /* This is called as idle event source after we processed the async polkit reply, hopefully after the'), (193, ' * method call we re-enqueued has been properly processed. */'), (194, ''), (195, ' async_polkit_query_free(q);'), (196, ' return 0;'), (197, '}'), (198, ''), (207, ' assert(q->slot);'), (209, ''), (210, ' assert(!q->reply);'), (213, " /* Now, let's dispatch the original message a second time be re-enqueing. This will then traverse the"), (214, ' * whole message processing again, and thus re-validating and re-retrieving the "userdata" field'), (215, ' * again.'), (216, ' *'), (217, ' * We install an idle event loop event to clean-up the PolicyKit request data when we are idle again,'), (218, ' * i.e. after the second time the message is processed is complete. */'), (219, ''), (220, ' assert(!q->defer_event_source);'), (221, ' r = sd_event_add_defer(sd_bus_get_event(sd_bus_message_get_bus(reply)), &q->defer_event_source, async_polkit_defer, q);'), (222, ' if (r < 0)'), (223, ' goto fail;'), (224, ''), (225, ' r = sd_event_source_set_priority(q->defer_event_source, SD_EVENT_PRIORITY_IDLE);'), (226, ' if (r < 0)'), (227, ' goto fail;'), (228, ''), (229, ' r = sd_event_source_set_enabled(q->defer_event_source, SD_EVENT_ONESHOT);'), (230, ' if (r < 0)'), (231, ' goto fail;'), (232, ''), (234, ' if (r < 0)'), (235, ' goto fail;'), (236, ''), (237, ' r = sd_bus_enqeue_for_read(sd_bus_message_get_bus(q->request), q->request);'), (238, ' if (r < 0)'), (239, ' goto fail;'), (241, ' return 1;'), (243, 'fail:'), (244, ' log_debug_errno(r, "Processing asynchronous PolicyKit reply failed, ignoring: %m");'), (245, ' (void) sd_bus_reply_method_errno(q->request, r, NULL);'), (267, ' const char *sender;'), (335, '#if ENABLE_POLKIT')], 'deleted': [(162, ' sd_bus_message_handler_t callback;'), (163, ' void *userdata;'), (169, ''), (199, ' if (r < 0) {'), (200, ' r = sd_bus_reply_method_errno(q->request, r, NULL);'), (201, ' goto finish;'), (202, ' }'), (204, ' r = q->callback(q->request, q->userdata, &error_buffer);'), (205, ' r = bus_maybe_reply_error(q->request, r, &error_buffer);'), (207, 'finish:'), (209, ''), (228, ' const char *sender;'), (229, ' sd_bus_message_handler_t callback;'), (230, ' void *userdata;'), (296, '#if ENABLE_POLKIT'), (297, ' if (sd_bus_get_current_message(call->bus) != call)'), (298, ' return -EINVAL;'), (299, ''), (300, ' callback = sd_bus_get_current_handler(call->bus);'), (301, ' if (!callback)'), (302, ' return -EINVAL;'), (303, ''), (304, ' userdata = sd_bus_get_current_userdata(call->bus);'), (305, ''), (352, ' .callback = callback,'), (353, ' .userdata = userdata,')]}
50
26
290
1,631
https://github.com/systemd/systemd
CVE-2020-1712
['CWE-416']
flb_gzip.c
flb_gzip_compress
/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ /* Fluent Bit * ========== * Copyright (C) 2019-2020 The Fluent Bit Authors * Copyright (C) 2015-2018 Treasure Data Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <fluent-bit/flb_info.h> #include <fluent-bit/flb_mem.h> #include <fluent-bit/flb_log.h> #include <fluent-bit/flb_gzip.h> #include <miniz/miniz.h> #define FLB_GZIP_HEADER_OFFSET 10 typedef enum { FTEXT = 1, FHCRC = 2, FEXTRA = 4, FNAME = 8, FCOMMENT = 16 } flb_tinf_gzip_flag; static unsigned int read_le16(const unsigned char *p) { return ((unsigned int) p[0]) | ((unsigned int) p[1] << 8); } static unsigned int read_le32(const unsigned char *p) { return ((unsigned int) p[0]) | ((unsigned int) p[1] << 8) | ((unsigned int) p[2] << 16) | ((unsigned int) p[3] << 24); } static inline void gzip_header(void *buf) { uint8_t *p; /* GZip Magic bytes */ p = buf; *p++ = 0x1F; *p++ = 0x8B; *p++ = 8; *p++ = 0; *p++ = 0; *p++ = 0; *p++ = 0; *p++ = 0; *p++ = 0; *p++ = 0xFF; } int flb_gzip_compress(void *in_data, size_t in_len, void **out_data, size_t *out_len) { int flush; int status; int footer_start; uint8_t *pb; size_t out_size; void *out_buf; z_stream strm; mz_ulong crc; out_size = in_len + 32; out_buf = flb_malloc(out_size); if (!out_buf) { flb_errno(); flb_error("[gzip] could not allocate outgoing buffer"); return -1; } /* Initialize streaming buffer context */ memset(&strm, '\0', sizeof(strm)); strm.zalloc = Z_NULL; strm.zfree = Z_NULL; strm.opaque = Z_NULL; strm.next_in = in_data; strm.avail_in = in_len; strm.total_out = 0; /* Deflate mode */ deflateInit2(&strm, Z_DEFAULT_COMPRESSION, Z_DEFLATED, -Z_DEFAULT_WINDOW_BITS, 9, Z_DEFAULT_STRATEGY); /* * Miniz don't support GZip format directly, instead we will: * * - append manual GZip magic bytes * - deflate raw content * - append manual CRC32 data */ gzip_header(out_buf); /* Header offset */ pb = (uint8_t *) out_buf + FLB_GZIP_HEADER_OFFSET; flush = Z_NO_FLUSH; while (1) { strm.next_out = pb + strm.total_out; strm.avail_out = out_size - (pb - (uint8_t *) out_buf); if (strm.avail_in == 0) { flush = Z_FINISH; } status = deflate(&strm, flush); if (status == Z_STREAM_END) { break; } else if (status != Z_OK) { deflateEnd(&strm); return -1; } } if (deflateEnd(&strm) != Z_OK) { flb_free(out_buf); return -1; } *out_len = strm.total_out; /* Construct the gzip checksum (CRC32 footer) */ footer_start = FLB_GZIP_HEADER_OFFSET + *out_len; pb = (uint8_t *) out_buf + footer_start; crc = mz_crc32(MZ_CRC32_INIT, in_data, in_len); *pb++ = crc & 0xFF; *pb++ = (crc >> 8) & 0xFF; *pb++ = (crc >> 16) & 0xFF; *pb++ = (crc >> 24) & 0xFF; *pb++ = in_len & 0xFF; *pb++ = (in_len >> 8) & 0xFF; *pb++ = (in_len >> 16) & 0xFF; *pb++ = (in_len >> 24) & 0xFF; /* Set the real buffer size for the caller */ *out_len += FLB_GZIP_HEADER_OFFSET + 8; *out_data = out_buf; return 0; } /* Uncompress (inflate) GZip data */ int flb_gzip_uncompress(void *in_data, size_t in_len, void **out_data, size_t *out_len) { int status; uint8_t *p; void *out_buf; size_t out_size = 0; void *zip_data; size_t zip_len; unsigned char flg; unsigned int xlen, hcrc; unsigned int dlen, crc; mz_ulong crc_out; mz_stream stream; const unsigned char *start; /* Minimal length: header + crc32 */ if (in_len < 18) { flb_error("[gzip] unexpected content length"); return -1; } /* Magic bytes */ p = in_data; if (p[0] != 0x1F || p[1] != 0x8B) { flb_error("[gzip] invalid magic bytes"); return -1; } if (p[2] != 8) { flb_error("[gzip] invalid method"); return -1; } /* Flag byte */ flg = p[3]; /* Reserved bits */ if (flg & 0xE0) { flb_error("[gzip] invalid flag"); return -1; } /* Skip base header of 10 bytes */ start = p + FLB_GZIP_HEADER_OFFSET; /* Skip extra data if present */ if (flg & FEXTRA) { xlen = read_le16(start); if (xlen > in_len - 12) { flb_error("[gzip] invalid gzip data"); return -1; } start += xlen + 2; } /* Skip file name if present */ if (flg & FNAME) { do { if (start - p >= in_len) { flb_error("[gzip] invalid gzip data (FNAME)"); return -1; } } while (*start++); } /* Skip file comment if present */ if (flg & FCOMMENT) { do { if (start - p >= in_len) { flb_error("[gzip] invalid gzip data (FCOMMENT)"); return -1; } } while (*start++); } /* Check header crc if present */ if (flg & FHCRC) { if (start - p > in_len - 2) { flb_error("[gzip] invalid gzip data (FHRC)"); return -1; } hcrc = read_le16(start); crc = mz_crc32(MZ_CRC32_INIT, p, start - p) & 0x0000FFFF; if (hcrc != crc) { flb_error("[gzip] invalid gzip header CRC"); return -1; } start += 2; } /* Get decompressed length */ dlen = read_le32(&p[in_len - 4]); /* Get CRC32 checksum of original data */ crc = read_le32(&p[in_len - 8]); /* Decompress data */ if ((p + in_len) - p < 8) { flb_error("[gzip] invalid gzip CRC32 checksum"); return -1; } /* Allocate outgoing buffer */ out_buf = flb_malloc(dlen); if (!out_buf) { flb_errno(); return -1; } out_size = dlen; /* Map zip content */ zip_data = (uint8_t *) start; zip_len = (p + in_len) - start - 8; memset(&stream, 0, sizeof(stream)); stream.next_in = zip_data; stream.avail_in = zip_len; stream.next_out = out_buf; stream.avail_out = out_size; status = mz_inflateInit2(&stream, -Z_DEFAULT_WINDOW_BITS); if (status != MZ_OK) { flb_free(out_buf); return -1; } status = mz_inflate(&stream, MZ_FINISH); if (status != MZ_STREAM_END) { mz_inflateEnd(&stream); flb_free(out_buf); return -1; } if (stream.total_out != dlen) { mz_inflateEnd(&stream); flb_free(out_buf); flb_error("[gzip] invalid gzip data size"); return -1; } /* terminate the stream, it's not longer required */ mz_inflateEnd(&stream); /* Validate message CRC vs inflated data CRC */ crc_out = mz_crc32(MZ_CRC32_INIT, out_buf, dlen); if (crc_out != crc) { flb_free(out_buf); flb_error("[gzip] invalid GZip checksum (CRC32)"); return -1; } /* set the uncompressed data */ *out_len = dlen; *out_data = out_buf; return 0; }
/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ /* Fluent Bit * ========== * Copyright (C) 2019-2020 The Fluent Bit Authors * Copyright (C) 2015-2018 Treasure Data Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <fluent-bit/flb_info.h> #include <fluent-bit/flb_mem.h> #include <fluent-bit/flb_log.h> #include <fluent-bit/flb_gzip.h> #include <miniz/miniz.h> #define FLB_GZIP_HEADER_OFFSET 10 typedef enum { FTEXT = 1, FHCRC = 2, FEXTRA = 4, FNAME = 8, FCOMMENT = 16 } flb_tinf_gzip_flag; static unsigned int read_le16(const unsigned char *p) { return ((unsigned int) p[0]) | ((unsigned int) p[1] << 8); } static unsigned int read_le32(const unsigned char *p) { return ((unsigned int) p[0]) | ((unsigned int) p[1] << 8) | ((unsigned int) p[2] << 16) | ((unsigned int) p[3] << 24); } static inline void gzip_header(void *buf) { uint8_t *p; /* GZip Magic bytes */ p = buf; *p++ = 0x1F; *p++ = 0x8B; *p++ = 8; *p++ = 0; *p++ = 0; *p++ = 0; *p++ = 0; *p++ = 0; *p++ = 0; *p++ = 0xFF; } int flb_gzip_compress(void *in_data, size_t in_len, void **out_data, size_t *out_len) { int flush; int status; int footer_start; uint8_t *pb; size_t out_size; void *out_buf; z_stream strm; mz_ulong crc; /* * GZIP relies on an algorithm with worst-case expansion * of 5 bytes per 32KB data. This means we need to create a variable * length output, that depends on the input length. * See RFC 1951 for details. */ int max_input_expansion = ((int)(in_len / 32000) + 1) * 5; /* * Max compressed size is equal to sum of: * 10 byte header * 8 byte foot * max input expansion * size of input */ out_size = 10 + 8 + max_input_expansion + in_len; out_buf = flb_malloc(out_size); if (!out_buf) { flb_errno(); flb_error("[gzip] could not allocate outgoing buffer"); return -1; } /* Initialize streaming buffer context */ memset(&strm, '\0', sizeof(strm)); strm.zalloc = Z_NULL; strm.zfree = Z_NULL; strm.opaque = Z_NULL; strm.next_in = in_data; strm.avail_in = in_len; strm.total_out = 0; /* Deflate mode */ deflateInit2(&strm, Z_DEFAULT_COMPRESSION, Z_DEFLATED, -Z_DEFAULT_WINDOW_BITS, 9, Z_DEFAULT_STRATEGY); /* * Miniz don't support GZip format directly, instead we will: * * - append manual GZip magic bytes * - deflate raw content * - append manual CRC32 data */ gzip_header(out_buf); /* Header offset */ pb = (uint8_t *) out_buf + FLB_GZIP_HEADER_OFFSET; flush = Z_NO_FLUSH; while (1) { strm.next_out = pb + strm.total_out; strm.avail_out = out_size - (pb - (uint8_t *) out_buf); if (strm.avail_in == 0) { flush = Z_FINISH; } status = deflate(&strm, flush); if (status == Z_STREAM_END) { break; } else if (status != Z_OK) { deflateEnd(&strm); return -1; } } if (deflateEnd(&strm) != Z_OK) { flb_free(out_buf); return -1; } *out_len = strm.total_out; /* Construct the gzip checksum (CRC32 footer) */ footer_start = FLB_GZIP_HEADER_OFFSET + *out_len; pb = (uint8_t *) out_buf + footer_start; crc = mz_crc32(MZ_CRC32_INIT, in_data, in_len); *pb++ = crc & 0xFF; *pb++ = (crc >> 8) & 0xFF; *pb++ = (crc >> 16) & 0xFF; *pb++ = (crc >> 24) & 0xFF; *pb++ = in_len & 0xFF; *pb++ = (in_len >> 8) & 0xFF; *pb++ = (in_len >> 16) & 0xFF; *pb++ = (in_len >> 24) & 0xFF; /* Set the real buffer size for the caller */ *out_len += FLB_GZIP_HEADER_OFFSET + 8; *out_data = out_buf; return 0; } /* Uncompress (inflate) GZip data */ int flb_gzip_uncompress(void *in_data, size_t in_len, void **out_data, size_t *out_len) { int status; uint8_t *p; void *out_buf; size_t out_size = 0; void *zip_data; size_t zip_len; unsigned char flg; unsigned int xlen, hcrc; unsigned int dlen, crc; mz_ulong crc_out; mz_stream stream; const unsigned char *start; /* Minimal length: header + crc32 */ if (in_len < 18) { flb_error("[gzip] unexpected content length"); return -1; } /* Magic bytes */ p = in_data; if (p[0] != 0x1F || p[1] != 0x8B) { flb_error("[gzip] invalid magic bytes"); return -1; } if (p[2] != 8) { flb_error("[gzip] invalid method"); return -1; } /* Flag byte */ flg = p[3]; /* Reserved bits */ if (flg & 0xE0) { flb_error("[gzip] invalid flag"); return -1; } /* Skip base header of 10 bytes */ start = p + FLB_GZIP_HEADER_OFFSET; /* Skip extra data if present */ if (flg & FEXTRA) { xlen = read_le16(start); if (xlen > in_len - 12) { flb_error("[gzip] invalid gzip data"); return -1; } start += xlen + 2; } /* Skip file name if present */ if (flg & FNAME) { do { if (start - p >= in_len) { flb_error("[gzip] invalid gzip data (FNAME)"); return -1; } } while (*start++); } /* Skip file comment if present */ if (flg & FCOMMENT) { do { if (start - p >= in_len) { flb_error("[gzip] invalid gzip data (FCOMMENT)"); return -1; } } while (*start++); } /* Check header crc if present */ if (flg & FHCRC) { if (start - p > in_len - 2) { flb_error("[gzip] invalid gzip data (FHRC)"); return -1; } hcrc = read_le16(start); crc = mz_crc32(MZ_CRC32_INIT, p, start - p) & 0x0000FFFF; if (hcrc != crc) { flb_error("[gzip] invalid gzip header CRC"); return -1; } start += 2; } /* Get decompressed length */ dlen = read_le32(&p[in_len - 4]); /* Get CRC32 checksum of original data */ crc = read_le32(&p[in_len - 8]); /* Decompress data */ if ((p + in_len) - p < 8) { flb_error("[gzip] invalid gzip CRC32 checksum"); return -1; } /* Allocate outgoing buffer */ out_buf = flb_malloc(dlen); if (!out_buf) { flb_errno(); return -1; } out_size = dlen; /* Map zip content */ zip_data = (uint8_t *) start; zip_len = (p + in_len) - start - 8; memset(&stream, 0, sizeof(stream)); stream.next_in = zip_data; stream.avail_in = zip_len; stream.next_out = out_buf; stream.avail_out = out_size; status = mz_inflateInit2(&stream, -Z_DEFAULT_WINDOW_BITS); if (status != MZ_OK) { flb_free(out_buf); return -1; } status = mz_inflate(&stream, MZ_FINISH); if (status != MZ_STREAM_END) { mz_inflateEnd(&stream); flb_free(out_buf); return -1; } if (stream.total_out != dlen) { mz_inflateEnd(&stream); flb_free(out_buf); flb_error("[gzip] invalid gzip data size"); return -1; } /* terminate the stream, it's not longer required */ mz_inflateEnd(&stream); /* Validate message CRC vs inflated data CRC */ crc_out = mz_crc32(MZ_CRC32_INIT, out_buf, dlen); if (crc_out != crc) { flb_free(out_buf); flb_error("[gzip] invalid GZip checksum (CRC32)"); return -1; } /* set the uncompressed data */ *out_len = dlen; *out_data = out_buf; return 0; }
int flb_gzip_compress(void *in_data, size_t in_len, void **out_data, size_t *out_len) { int flush; int status; int footer_start; uint8_t *pb; size_t out_size; void *out_buf; z_stream strm; mz_ulong crc; out_size = in_len + 32; out_buf = flb_malloc(out_size); if (!out_buf) { flb_errno(); flb_error("[gzip] could not allocate outgoing buffer"); return -1; } /* Initialize streaming buffer context */ memset(&strm, '\0', sizeof(strm)); strm.zalloc = Z_NULL; strm.zfree = Z_NULL; strm.opaque = Z_NULL; strm.next_in = in_data; strm.avail_in = in_len; strm.total_out = 0; /* Deflate mode */ deflateInit2(&strm, Z_DEFAULT_COMPRESSION, Z_DEFLATED, -Z_DEFAULT_WINDOW_BITS, 9, Z_DEFAULT_STRATEGY); /* * Miniz don't support GZip format directly, instead we will: * * - append manual GZip magic bytes * - deflate raw content * - append manual CRC32 data */ gzip_header(out_buf); /* Header offset */ pb = (uint8_t *) out_buf + FLB_GZIP_HEADER_OFFSET; flush = Z_NO_FLUSH; while (1) { strm.next_out = pb + strm.total_out; strm.avail_out = out_size - (pb - (uint8_t *) out_buf); if (strm.avail_in == 0) { flush = Z_FINISH; } status = deflate(&strm, flush); if (status == Z_STREAM_END) { break; } else if (status != Z_OK) { deflateEnd(&strm); return -1; } } if (deflateEnd(&strm) != Z_OK) { flb_free(out_buf); return -1; } *out_len = strm.total_out; /* Construct the gzip checksum (CRC32 footer) */ footer_start = FLB_GZIP_HEADER_OFFSET + *out_len; pb = (uint8_t *) out_buf + footer_start; crc = mz_crc32(MZ_CRC32_INIT, in_data, in_len); *pb++ = crc & 0xFF; *pb++ = (crc >> 8) & 0xFF; *pb++ = (crc >> 16) & 0xFF; *pb++ = (crc >> 24) & 0xFF; *pb++ = in_len & 0xFF; *pb++ = (in_len >> 8) & 0xFF; *pb++ = (in_len >> 16) & 0xFF; *pb++ = (in_len >> 24) & 0xFF; /* Set the real buffer size for the caller */ *out_len += FLB_GZIP_HEADER_OFFSET + 8; *out_data = out_buf; return 0; }
int flb_gzip_compress(void *in_data, size_t in_len, void **out_data, size_t *out_len) { int flush; int status; int footer_start; uint8_t *pb; size_t out_size; void *out_buf; z_stream strm; mz_ulong crc; /* * GZIP relies on an algorithm with worst-case expansion * of 5 bytes per 32KB data. This means we need to create a variable * length output, that depends on the input length. * See RFC 1951 for details. */ int max_input_expansion = ((int)(in_len / 32000) + 1) * 5; /* * Max compressed size is equal to sum of: * 10 byte header * 8 byte foot * max input expansion * size of input */ out_size = 10 + 8 + max_input_expansion + in_len; out_buf = flb_malloc(out_size); if (!out_buf) { flb_errno(); flb_error("[gzip] could not allocate outgoing buffer"); return -1; } /* Initialize streaming buffer context */ memset(&strm, '\0', sizeof(strm)); strm.zalloc = Z_NULL; strm.zfree = Z_NULL; strm.opaque = Z_NULL; strm.next_in = in_data; strm.avail_in = in_len; strm.total_out = 0; /* Deflate mode */ deflateInit2(&strm, Z_DEFAULT_COMPRESSION, Z_DEFLATED, -Z_DEFAULT_WINDOW_BITS, 9, Z_DEFAULT_STRATEGY); /* * Miniz don't support GZip format directly, instead we will: * * - append manual GZip magic bytes * - deflate raw content * - append manual CRC32 data */ gzip_header(out_buf); /* Header offset */ pb = (uint8_t *) out_buf + FLB_GZIP_HEADER_OFFSET; flush = Z_NO_FLUSH; while (1) { strm.next_out = pb + strm.total_out; strm.avail_out = out_size - (pb - (uint8_t *) out_buf); if (strm.avail_in == 0) { flush = Z_FINISH; } status = deflate(&strm, flush); if (status == Z_STREAM_END) { break; } else if (status != Z_OK) { deflateEnd(&strm); return -1; } } if (deflateEnd(&strm) != Z_OK) { flb_free(out_buf); return -1; } *out_len = strm.total_out; /* Construct the gzip checksum (CRC32 footer) */ footer_start = FLB_GZIP_HEADER_OFFSET + *out_len; pb = (uint8_t *) out_buf + footer_start; crc = mz_crc32(MZ_CRC32_INIT, in_data, in_len); *pb++ = crc & 0xFF; *pb++ = (crc >> 8) & 0xFF; *pb++ = (crc >> 16) & 0xFF; *pb++ = (crc >> 24) & 0xFF; *pb++ = in_len & 0xFF; *pb++ = (in_len >> 8) & 0xFF; *pb++ = (in_len >> 16) & 0xFF; *pb++ = (in_len >> 24) & 0xFF; /* Set the real buffer size for the caller */ *out_len += FLB_GZIP_HEADER_OFFSET + 8; *out_data = out_buf; return 0; }
{'added': [(80, ''), (81, ' /*'), (82, ' * GZIP relies on an algorithm with worst-case expansion'), (83, ' * of 5 bytes per 32KB data. This means we need to create a variable'), (84, ' * length output, that depends on the input length.'), (85, ' * See RFC 1951 for details.'), (86, ' */'), (87, ' int max_input_expansion = ((int)(in_len / 32000) + 1) * 5;'), (88, ''), (89, ' /*'), (90, ' * Max compressed size is equal to sum of:'), (91, ' * 10 byte header'), (92, ' * 8 byte foot'), (93, ' * max input expansion'), (94, ' * size of input'), (95, ' */'), (96, ' out_size = 10 + 8 + max_input_expansion + in_len;'), (98, '')], 'deleted': [(80, ' out_size = in_len + 32;')]}
18
1
222
1,286
https://github.com/fluent/fluent-bit
CVE-2020-35963
['CWE-787']
af_rose.c
rose_rx_call_request
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk) * Copyright (C) Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk) * Copyright (C) Terry Dawson VK2KTJ (terry@animats.net) * Copyright (C) Tomi Manninen OH2BNS (oh2bns@sral.fi) */ #include <linux/capability.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/slab.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/spinlock.h> #include <linux/timer.h> #include <linux/string.h> #include <linux/sockios.h> #include <linux/net.h> #include <linux/stat.h> #include <net/net_namespace.h> #include <net/ax25.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/if_arp.h> #include <linux/skbuff.h> #include <net/sock.h> #include <asm/system.h> #include <asm/uaccess.h> #include <linux/fcntl.h> #include <linux/termios.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/notifier.h> #include <net/rose.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <net/tcp_states.h> #include <net/ip.h> #include <net/arp.h> static int rose_ndevs = 10; int sysctl_rose_restart_request_timeout = ROSE_DEFAULT_T0; int sysctl_rose_call_request_timeout = ROSE_DEFAULT_T1; int sysctl_rose_reset_request_timeout = ROSE_DEFAULT_T2; int sysctl_rose_clear_request_timeout = ROSE_DEFAULT_T3; int sysctl_rose_no_activity_timeout = ROSE_DEFAULT_IDLE; int sysctl_rose_ack_hold_back_timeout = ROSE_DEFAULT_HB; int sysctl_rose_routing_control = ROSE_DEFAULT_ROUTING; int sysctl_rose_link_fail_timeout = ROSE_DEFAULT_FAIL_TIMEOUT; int sysctl_rose_maximum_vcs = ROSE_DEFAULT_MAXVC; int sysctl_rose_window_size = ROSE_DEFAULT_WINDOW_SIZE; static HLIST_HEAD(rose_list); static DEFINE_SPINLOCK(rose_list_lock); static const struct proto_ops rose_proto_ops; ax25_address rose_callsign; /* * ROSE network devices are virtual network devices encapsulating ROSE * frames into AX.25 which will be sent through an AX.25 device, so form a * special "super class" of normal net devices; split their locks off into a * separate class since they always nest. */ static struct lock_class_key rose_netdev_xmit_lock_key; static struct lock_class_key rose_netdev_addr_lock_key; static void rose_set_lockdep_one(struct net_device *dev, struct netdev_queue *txq, void *_unused) { lockdep_set_class(&txq->_xmit_lock, &rose_netdev_xmit_lock_key); } static void rose_set_lockdep_key(struct net_device *dev) { lockdep_set_class(&dev->addr_list_lock, &rose_netdev_addr_lock_key); netdev_for_each_tx_queue(dev, rose_set_lockdep_one, NULL); } /* * Convert a ROSE address into text. */ char *rose2asc(char *buf, const rose_address *addr) { if (addr->rose_addr[0] == 0x00 && addr->rose_addr[1] == 0x00 && addr->rose_addr[2] == 0x00 && addr->rose_addr[3] == 0x00 && addr->rose_addr[4] == 0x00) { strcpy(buf, "*"); } else { sprintf(buf, "%02X%02X%02X%02X%02X", addr->rose_addr[0] & 0xFF, addr->rose_addr[1] & 0xFF, addr->rose_addr[2] & 0xFF, addr->rose_addr[3] & 0xFF, addr->rose_addr[4] & 0xFF); } return buf; } /* * Compare two ROSE addresses, 0 == equal. */ int rosecmp(rose_address *addr1, rose_address *addr2) { int i; for (i = 0; i < 5; i++) if (addr1->rose_addr[i] != addr2->rose_addr[i]) return 1; return 0; } /* * Compare two ROSE addresses for only mask digits, 0 == equal. */ int rosecmpm(rose_address *addr1, rose_address *addr2, unsigned short mask) { unsigned int i, j; if (mask > 10) return 1; for (i = 0; i < mask; i++) { j = i / 2; if ((i % 2) != 0) { if ((addr1->rose_addr[j] & 0x0F) != (addr2->rose_addr[j] & 0x0F)) return 1; } else { if ((addr1->rose_addr[j] & 0xF0) != (addr2->rose_addr[j] & 0xF0)) return 1; } } return 0; } /* * Socket removal during an interrupt is now safe. */ static void rose_remove_socket(struct sock *sk) { spin_lock_bh(&rose_list_lock); sk_del_node_init(sk); spin_unlock_bh(&rose_list_lock); } /* * Kill all bound sockets on a broken link layer connection to a * particular neighbour. */ void rose_kill_by_neigh(struct rose_neigh *neigh) { struct sock *s; struct hlist_node *node; spin_lock_bh(&rose_list_lock); sk_for_each(s, node, &rose_list) { struct rose_sock *rose = rose_sk(s); if (rose->neighbour == neigh) { rose_disconnect(s, ENETUNREACH, ROSE_OUT_OF_ORDER, 0); rose->neighbour->use--; rose->neighbour = NULL; } } spin_unlock_bh(&rose_list_lock); } /* * Kill all bound sockets on a dropped device. */ static void rose_kill_by_device(struct net_device *dev) { struct sock *s; struct hlist_node *node; spin_lock_bh(&rose_list_lock); sk_for_each(s, node, &rose_list) { struct rose_sock *rose = rose_sk(s); if (rose->device == dev) { rose_disconnect(s, ENETUNREACH, ROSE_OUT_OF_ORDER, 0); rose->neighbour->use--; rose->device = NULL; } } spin_unlock_bh(&rose_list_lock); } /* * Handle device status changes. */ static int rose_device_event(struct notifier_block *this, unsigned long event, void *ptr) { struct net_device *dev = (struct net_device *)ptr; if (!net_eq(dev_net(dev), &init_net)) return NOTIFY_DONE; if (event != NETDEV_DOWN) return NOTIFY_DONE; switch (dev->type) { case ARPHRD_ROSE: rose_kill_by_device(dev); break; case ARPHRD_AX25: rose_link_device_down(dev); rose_rt_device_down(dev); break; } return NOTIFY_DONE; } /* * Add a socket to the bound sockets list. */ static void rose_insert_socket(struct sock *sk) { spin_lock_bh(&rose_list_lock); sk_add_node(sk, &rose_list); spin_unlock_bh(&rose_list_lock); } /* * Find a socket that wants to accept the Call Request we just * received. */ static struct sock *rose_find_listener(rose_address *addr, ax25_address *call) { struct sock *s; struct hlist_node *node; spin_lock_bh(&rose_list_lock); sk_for_each(s, node, &rose_list) { struct rose_sock *rose = rose_sk(s); if (!rosecmp(&rose->source_addr, addr) && !ax25cmp(&rose->source_call, call) && !rose->source_ndigis && s->sk_state == TCP_LISTEN) goto found; } sk_for_each(s, node, &rose_list) { struct rose_sock *rose = rose_sk(s); if (!rosecmp(&rose->source_addr, addr) && !ax25cmp(&rose->source_call, &null_ax25_address) && s->sk_state == TCP_LISTEN) goto found; } s = NULL; found: spin_unlock_bh(&rose_list_lock); return s; } /* * Find a connected ROSE socket given my LCI and device. */ struct sock *rose_find_socket(unsigned int lci, struct rose_neigh *neigh) { struct sock *s; struct hlist_node *node; spin_lock_bh(&rose_list_lock); sk_for_each(s, node, &rose_list) { struct rose_sock *rose = rose_sk(s); if (rose->lci == lci && rose->neighbour == neigh) goto found; } s = NULL; found: spin_unlock_bh(&rose_list_lock); return s; } /* * Find a unique LCI for a given device. */ unsigned int rose_new_lci(struct rose_neigh *neigh) { int lci; if (neigh->dce_mode) { for (lci = 1; lci <= sysctl_rose_maximum_vcs; lci++) if (rose_find_socket(lci, neigh) == NULL && rose_route_free_lci(lci, neigh) == NULL) return lci; } else { for (lci = sysctl_rose_maximum_vcs; lci > 0; lci--) if (rose_find_socket(lci, neigh) == NULL && rose_route_free_lci(lci, neigh) == NULL) return lci; } return 0; } /* * Deferred destroy. */ void rose_destroy_socket(struct sock *); /* * Handler for deferred kills. */ static void rose_destroy_timer(unsigned long data) { rose_destroy_socket((struct sock *)data); } /* * This is called from user mode and the timers. Thus it protects itself * against interrupt users but doesn't worry about being called during * work. Once it is removed from the queue no interrupt or bottom half * will touch it and we are (fairly 8-) ) safe. */ void rose_destroy_socket(struct sock *sk) { struct sk_buff *skb; rose_remove_socket(sk); rose_stop_heartbeat(sk); rose_stop_idletimer(sk); rose_stop_timer(sk); rose_clear_queues(sk); /* Flush the queues */ while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) { if (skb->sk != sk) { /* A pending connection */ /* Queue the unaccepted socket for death */ sock_set_flag(skb->sk, SOCK_DEAD); rose_start_heartbeat(skb->sk); rose_sk(skb->sk)->state = ROSE_STATE_0; } kfree_skb(skb); } if (sk_has_allocations(sk)) { /* Defer: outstanding buffers */ setup_timer(&sk->sk_timer, rose_destroy_timer, (unsigned long)sk); sk->sk_timer.expires = jiffies + 10 * HZ; add_timer(&sk->sk_timer); } else sock_put(sk); } /* * Handling for system calls applied via the various interfaces to a * ROSE socket object. */ static int rose_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) { struct sock *sk = sock->sk; struct rose_sock *rose = rose_sk(sk); int opt; if (level != SOL_ROSE) return -ENOPROTOOPT; if (optlen < sizeof(int)) return -EINVAL; if (get_user(opt, (int __user *)optval)) return -EFAULT; switch (optname) { case ROSE_DEFER: rose->defer = opt ? 1 : 0; return 0; case ROSE_T1: if (opt < 1) return -EINVAL; rose->t1 = opt * HZ; return 0; case ROSE_T2: if (opt < 1) return -EINVAL; rose->t2 = opt * HZ; return 0; case ROSE_T3: if (opt < 1) return -EINVAL; rose->t3 = opt * HZ; return 0; case ROSE_HOLDBACK: if (opt < 1) return -EINVAL; rose->hb = opt * HZ; return 0; case ROSE_IDLE: if (opt < 0) return -EINVAL; rose->idle = opt * 60 * HZ; return 0; case ROSE_QBITINCL: rose->qbitincl = opt ? 1 : 0; return 0; default: return -ENOPROTOOPT; } } static int rose_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { struct sock *sk = sock->sk; struct rose_sock *rose = rose_sk(sk); int val = 0; int len; if (level != SOL_ROSE) return -ENOPROTOOPT; if (get_user(len, optlen)) return -EFAULT; if (len < 0) return -EINVAL; switch (optname) { case ROSE_DEFER: val = rose->defer; break; case ROSE_T1: val = rose->t1 / HZ; break; case ROSE_T2: val = rose->t2 / HZ; break; case ROSE_T3: val = rose->t3 / HZ; break; case ROSE_HOLDBACK: val = rose->hb / HZ; break; case ROSE_IDLE: val = rose->idle / (60 * HZ); break; case ROSE_QBITINCL: val = rose->qbitincl; break; default: return -ENOPROTOOPT; } len = min_t(unsigned int, len, sizeof(int)); if (put_user(len, optlen)) return -EFAULT; return copy_to_user(optval, &val, len) ? -EFAULT : 0; } static int rose_listen(struct socket *sock, int backlog) { struct sock *sk = sock->sk; if (sk->sk_state != TCP_LISTEN) { struct rose_sock *rose = rose_sk(sk); rose->dest_ndigis = 0; memset(&rose->dest_addr, 0, ROSE_ADDR_LEN); memset(&rose->dest_call, 0, AX25_ADDR_LEN); memset(rose->dest_digis, 0, AX25_ADDR_LEN * ROSE_MAX_DIGIS); sk->sk_max_ack_backlog = backlog; sk->sk_state = TCP_LISTEN; return 0; } return -EOPNOTSUPP; } static struct proto rose_proto = { .name = "ROSE", .owner = THIS_MODULE, .obj_size = sizeof(struct rose_sock), }; static int rose_create(struct net *net, struct socket *sock, int protocol, int kern) { struct sock *sk; struct rose_sock *rose; if (!net_eq(net, &init_net)) return -EAFNOSUPPORT; if (sock->type != SOCK_SEQPACKET || protocol != 0) return -ESOCKTNOSUPPORT; sk = sk_alloc(net, PF_ROSE, GFP_ATOMIC, &rose_proto); if (sk == NULL) return -ENOMEM; rose = rose_sk(sk); sock_init_data(sock, sk); skb_queue_head_init(&rose->ack_queue); #ifdef M_BIT skb_queue_head_init(&rose->frag_queue); rose->fraglen = 0; #endif sock->ops = &rose_proto_ops; sk->sk_protocol = protocol; init_timer(&rose->timer); init_timer(&rose->idletimer); rose->t1 = msecs_to_jiffies(sysctl_rose_call_request_timeout); rose->t2 = msecs_to_jiffies(sysctl_rose_reset_request_timeout); rose->t3 = msecs_to_jiffies(sysctl_rose_clear_request_timeout); rose->hb = msecs_to_jiffies(sysctl_rose_ack_hold_back_timeout); rose->idle = msecs_to_jiffies(sysctl_rose_no_activity_timeout); rose->state = ROSE_STATE_0; return 0; } static struct sock *rose_make_new(struct sock *osk) { struct sock *sk; struct rose_sock *rose, *orose; if (osk->sk_type != SOCK_SEQPACKET) return NULL; sk = sk_alloc(sock_net(osk), PF_ROSE, GFP_ATOMIC, &rose_proto); if (sk == NULL) return NULL; rose = rose_sk(sk); sock_init_data(NULL, sk); skb_queue_head_init(&rose->ack_queue); #ifdef M_BIT skb_queue_head_init(&rose->frag_queue); rose->fraglen = 0; #endif sk->sk_type = osk->sk_type; sk->sk_priority = osk->sk_priority; sk->sk_protocol = osk->sk_protocol; sk->sk_rcvbuf = osk->sk_rcvbuf; sk->sk_sndbuf = osk->sk_sndbuf; sk->sk_state = TCP_ESTABLISHED; sock_copy_flags(sk, osk); init_timer(&rose->timer); init_timer(&rose->idletimer); orose = rose_sk(osk); rose->t1 = orose->t1; rose->t2 = orose->t2; rose->t3 = orose->t3; rose->hb = orose->hb; rose->idle = orose->idle; rose->defer = orose->defer; rose->device = orose->device; rose->qbitincl = orose->qbitincl; return sk; } static int rose_release(struct socket *sock) { struct sock *sk = sock->sk; struct rose_sock *rose; if (sk == NULL) return 0; sock_hold(sk); sock_orphan(sk); lock_sock(sk); rose = rose_sk(sk); switch (rose->state) { case ROSE_STATE_0: release_sock(sk); rose_disconnect(sk, 0, -1, -1); lock_sock(sk); rose_destroy_socket(sk); break; case ROSE_STATE_2: rose->neighbour->use--; release_sock(sk); rose_disconnect(sk, 0, -1, -1); lock_sock(sk); rose_destroy_socket(sk); break; case ROSE_STATE_1: case ROSE_STATE_3: case ROSE_STATE_4: case ROSE_STATE_5: rose_clear_queues(sk); rose_stop_idletimer(sk); rose_write_internal(sk, ROSE_CLEAR_REQUEST); rose_start_t3timer(sk); rose->state = ROSE_STATE_2; sk->sk_state = TCP_CLOSE; sk->sk_shutdown |= SEND_SHUTDOWN; sk->sk_state_change(sk); sock_set_flag(sk, SOCK_DEAD); sock_set_flag(sk, SOCK_DESTROY); break; default: break; } sock->sk = NULL; release_sock(sk); sock_put(sk); return 0; } static int rose_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) { struct sock *sk = sock->sk; struct rose_sock *rose = rose_sk(sk); struct sockaddr_rose *addr = (struct sockaddr_rose *)uaddr; struct net_device *dev; ax25_address *source; ax25_uid_assoc *user; int n; if (!sock_flag(sk, SOCK_ZAPPED)) return -EINVAL; if (addr_len != sizeof(struct sockaddr_rose) && addr_len != sizeof(struct full_sockaddr_rose)) return -EINVAL; if (addr->srose_family != AF_ROSE) return -EINVAL; if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1) return -EINVAL; if ((unsigned int) addr->srose_ndigis > ROSE_MAX_DIGIS) return -EINVAL; if ((dev = rose_dev_get(&addr->srose_addr)) == NULL) { SOCK_DEBUG(sk, "ROSE: bind failed: invalid address\n"); return -EADDRNOTAVAIL; } source = &addr->srose_call; user = ax25_findbyuid(current_euid()); if (user) { rose->source_call = user->call; ax25_uid_put(user); } else { if (ax25_uid_policy && !capable(CAP_NET_BIND_SERVICE)) return -EACCES; rose->source_call = *source; } rose->source_addr = addr->srose_addr; rose->device = dev; rose->source_ndigis = addr->srose_ndigis; if (addr_len == sizeof(struct full_sockaddr_rose)) { struct full_sockaddr_rose *full_addr = (struct full_sockaddr_rose *)uaddr; for (n = 0 ; n < addr->srose_ndigis ; n++) rose->source_digis[n] = full_addr->srose_digis[n]; } else { if (rose->source_ndigis == 1) { rose->source_digis[0] = addr->srose_digi; } } rose_insert_socket(sk); sock_reset_flag(sk, SOCK_ZAPPED); SOCK_DEBUG(sk, "ROSE: socket is bound\n"); return 0; } static int rose_connect(struct socket *sock, struct sockaddr *uaddr, int addr_len, int flags) { struct sock *sk = sock->sk; struct rose_sock *rose = rose_sk(sk); struct sockaddr_rose *addr = (struct sockaddr_rose *)uaddr; unsigned char cause, diagnostic; struct net_device *dev; ax25_uid_assoc *user; int n, err = 0; if (addr_len != sizeof(struct sockaddr_rose) && addr_len != sizeof(struct full_sockaddr_rose)) return -EINVAL; if (addr->srose_family != AF_ROSE) return -EINVAL; if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1) return -EINVAL; if ((unsigned int) addr->srose_ndigis > ROSE_MAX_DIGIS) return -EINVAL; /* Source + Destination digis should not exceed ROSE_MAX_DIGIS */ if ((rose->source_ndigis + addr->srose_ndigis) > ROSE_MAX_DIGIS) return -EINVAL; lock_sock(sk); if (sk->sk_state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) { /* Connect completed during a ERESTARTSYS event */ sock->state = SS_CONNECTED; goto out_release; } if (sk->sk_state == TCP_CLOSE && sock->state == SS_CONNECTING) { sock->state = SS_UNCONNECTED; err = -ECONNREFUSED; goto out_release; } if (sk->sk_state == TCP_ESTABLISHED) { /* No reconnect on a seqpacket socket */ err = -EISCONN; goto out_release; } sk->sk_state = TCP_CLOSE; sock->state = SS_UNCONNECTED; rose->neighbour = rose_get_neigh(&addr->srose_addr, &cause, &diagnostic, 0); if (!rose->neighbour) { err = -ENETUNREACH; goto out_release; } rose->lci = rose_new_lci(rose->neighbour); if (!rose->lci) { err = -ENETUNREACH; goto out_release; } if (sock_flag(sk, SOCK_ZAPPED)) { /* Must bind first - autobinding in this may or may not work */ sock_reset_flag(sk, SOCK_ZAPPED); if ((dev = rose_dev_first()) == NULL) { err = -ENETUNREACH; goto out_release; } user = ax25_findbyuid(current_euid()); if (!user) { err = -EINVAL; goto out_release; } memcpy(&rose->source_addr, dev->dev_addr, ROSE_ADDR_LEN); rose->source_call = user->call; rose->device = dev; ax25_uid_put(user); rose_insert_socket(sk); /* Finish the bind */ } rose->dest_addr = addr->srose_addr; rose->dest_call = addr->srose_call; rose->rand = ((long)rose & 0xFFFF) + rose->lci; rose->dest_ndigis = addr->srose_ndigis; if (addr_len == sizeof(struct full_sockaddr_rose)) { struct full_sockaddr_rose *full_addr = (struct full_sockaddr_rose *)uaddr; for (n = 0 ; n < addr->srose_ndigis ; n++) rose->dest_digis[n] = full_addr->srose_digis[n]; } else { if (rose->dest_ndigis == 1) { rose->dest_digis[0] = addr->srose_digi; } } /* Move to connecting socket, start sending Connect Requests */ sock->state = SS_CONNECTING; sk->sk_state = TCP_SYN_SENT; rose->state = ROSE_STATE_1; rose->neighbour->use++; rose_write_internal(sk, ROSE_CALL_REQUEST); rose_start_heartbeat(sk); rose_start_t1timer(sk); /* Now the loop */ if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK)) { err = -EINPROGRESS; goto out_release; } /* * A Connect Ack with Choke or timeout or failed routing will go to * closed. */ if (sk->sk_state == TCP_SYN_SENT) { DEFINE_WAIT(wait); for (;;) { prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); if (sk->sk_state != TCP_SYN_SENT) break; if (!signal_pending(current)) { release_sock(sk); schedule(); lock_sock(sk); continue; } err = -ERESTARTSYS; break; } finish_wait(sk_sleep(sk), &wait); if (err) goto out_release; } if (sk->sk_state != TCP_ESTABLISHED) { sock->state = SS_UNCONNECTED; err = sock_error(sk); /* Always set at this point */ goto out_release; } sock->state = SS_CONNECTED; out_release: release_sock(sk); return err; } static int rose_accept(struct socket *sock, struct socket *newsock, int flags) { struct sk_buff *skb; struct sock *newsk; DEFINE_WAIT(wait); struct sock *sk; int err = 0; if ((sk = sock->sk) == NULL) return -EINVAL; lock_sock(sk); if (sk->sk_type != SOCK_SEQPACKET) { err = -EOPNOTSUPP; goto out_release; } if (sk->sk_state != TCP_LISTEN) { err = -EINVAL; goto out_release; } /* * The write queue this time is holding sockets ready to use * hooked into the SABM we saved */ for (;;) { prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); skb = skb_dequeue(&sk->sk_receive_queue); if (skb) break; if (flags & O_NONBLOCK) { err = -EWOULDBLOCK; break; } if (!signal_pending(current)) { release_sock(sk); schedule(); lock_sock(sk); continue; } err = -ERESTARTSYS; break; } finish_wait(sk_sleep(sk), &wait); if (err) goto out_release; newsk = skb->sk; sock_graft(newsk, newsock); /* Now attach up the new socket */ skb->sk = NULL; kfree_skb(skb); sk->sk_ack_backlog--; out_release: release_sock(sk); return err; } static int rose_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer) { struct full_sockaddr_rose *srose = (struct full_sockaddr_rose *)uaddr; struct sock *sk = sock->sk; struct rose_sock *rose = rose_sk(sk); int n; memset(srose, 0, sizeof(*srose)); if (peer != 0) { if (sk->sk_state != TCP_ESTABLISHED) return -ENOTCONN; srose->srose_family = AF_ROSE; srose->srose_addr = rose->dest_addr; srose->srose_call = rose->dest_call; srose->srose_ndigis = rose->dest_ndigis; for (n = 0; n < rose->dest_ndigis; n++) srose->srose_digis[n] = rose->dest_digis[n]; } else { srose->srose_family = AF_ROSE; srose->srose_addr = rose->source_addr; srose->srose_call = rose->source_call; srose->srose_ndigis = rose->source_ndigis; for (n = 0; n < rose->source_ndigis; n++) srose->srose_digis[n] = rose->source_digis[n]; } *uaddr_len = sizeof(struct full_sockaddr_rose); return 0; } int rose_rx_call_request(struct sk_buff *skb, struct net_device *dev, struct rose_neigh *neigh, unsigned int lci) { struct sock *sk; struct sock *make; struct rose_sock *make_rose; struct rose_facilities_struct facilities; int n, len; skb->sk = NULL; /* Initially we don't know who it's for */ /* * skb->data points to the rose frame start */ memset(&facilities, 0x00, sizeof(struct rose_facilities_struct)); len = (((skb->data[3] >> 4) & 0x0F) + 1) >> 1; len += (((skb->data[3] >> 0) & 0x0F) + 1) >> 1; if (!rose_parse_facilities(skb->data + len + 4, &facilities)) { rose_transmit_clear_request(neigh, lci, ROSE_INVALID_FACILITY, 76); return 0; } sk = rose_find_listener(&facilities.source_addr, &facilities.source_call); /* * We can't accept the Call Request. */ if (sk == NULL || sk_acceptq_is_full(sk) || (make = rose_make_new(sk)) == NULL) { rose_transmit_clear_request(neigh, lci, ROSE_NETWORK_CONGESTION, 120); return 0; } skb->sk = make; make->sk_state = TCP_ESTABLISHED; make_rose = rose_sk(make); make_rose->lci = lci; make_rose->dest_addr = facilities.dest_addr; make_rose->dest_call = facilities.dest_call; make_rose->dest_ndigis = facilities.dest_ndigis; for (n = 0 ; n < facilities.dest_ndigis ; n++) make_rose->dest_digis[n] = facilities.dest_digis[n]; make_rose->source_addr = facilities.source_addr; make_rose->source_call = facilities.source_call; make_rose->source_ndigis = facilities.source_ndigis; for (n = 0 ; n < facilities.source_ndigis ; n++) make_rose->source_digis[n]= facilities.source_digis[n]; make_rose->neighbour = neigh; make_rose->device = dev; make_rose->facilities = facilities; make_rose->neighbour->use++; if (rose_sk(sk)->defer) { make_rose->state = ROSE_STATE_5; } else { rose_write_internal(make, ROSE_CALL_ACCEPTED); make_rose->state = ROSE_STATE_3; rose_start_idletimer(make); } make_rose->condition = 0x00; make_rose->vs = 0; make_rose->va = 0; make_rose->vr = 0; make_rose->vl = 0; sk->sk_ack_backlog++; rose_insert_socket(make); skb_queue_head(&sk->sk_receive_queue, skb); rose_start_heartbeat(make); if (!sock_flag(sk, SOCK_DEAD)) sk->sk_data_ready(sk, skb->len); return 1; } static int rose_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct rose_sock *rose = rose_sk(sk); struct sockaddr_rose *usrose = (struct sockaddr_rose *)msg->msg_name; int err; struct full_sockaddr_rose srose; struct sk_buff *skb; unsigned char *asmptr; int n, size, qbit = 0; if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_CMSG_COMPAT)) return -EINVAL; if (sock_flag(sk, SOCK_ZAPPED)) return -EADDRNOTAVAIL; if (sk->sk_shutdown & SEND_SHUTDOWN) { send_sig(SIGPIPE, current, 0); return -EPIPE; } if (rose->neighbour == NULL || rose->device == NULL) return -ENETUNREACH; if (usrose != NULL) { if (msg->msg_namelen != sizeof(struct sockaddr_rose) && msg->msg_namelen != sizeof(struct full_sockaddr_rose)) return -EINVAL; memset(&srose, 0, sizeof(struct full_sockaddr_rose)); memcpy(&srose, usrose, msg->msg_namelen); if (rosecmp(&rose->dest_addr, &srose.srose_addr) != 0 || ax25cmp(&rose->dest_call, &srose.srose_call) != 0) return -EISCONN; if (srose.srose_ndigis != rose->dest_ndigis) return -EISCONN; if (srose.srose_ndigis == rose->dest_ndigis) { for (n = 0 ; n < srose.srose_ndigis ; n++) if (ax25cmp(&rose->dest_digis[n], &srose.srose_digis[n])) return -EISCONN; } if (srose.srose_family != AF_ROSE) return -EINVAL; } else { if (sk->sk_state != TCP_ESTABLISHED) return -ENOTCONN; srose.srose_family = AF_ROSE; srose.srose_addr = rose->dest_addr; srose.srose_call = rose->dest_call; srose.srose_ndigis = rose->dest_ndigis; for (n = 0 ; n < rose->dest_ndigis ; n++) srose.srose_digis[n] = rose->dest_digis[n]; } SOCK_DEBUG(sk, "ROSE: sendto: Addresses built.\n"); /* Build a packet */ SOCK_DEBUG(sk, "ROSE: sendto: building packet.\n"); /* Sanity check the packet size */ if (len > 65535) return -EMSGSIZE; size = len + AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN; if ((skb = sock_alloc_send_skb(sk, size, msg->msg_flags & MSG_DONTWAIT, &err)) == NULL) return err; skb_reserve(skb, AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN); /* * Put the data on the end */ SOCK_DEBUG(sk, "ROSE: Appending user data\n"); skb_reset_transport_header(skb); skb_put(skb, len); err = memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len); if (err) { kfree_skb(skb); return err; } /* * If the Q BIT Include socket option is in force, the first * byte of the user data is the logical value of the Q Bit. */ if (rose->qbitincl) { qbit = skb->data[0]; skb_pull(skb, 1); } /* * Push down the ROSE header */ asmptr = skb_push(skb, ROSE_MIN_LEN); SOCK_DEBUG(sk, "ROSE: Building Network Header.\n"); /* Build a ROSE Network header */ asmptr[0] = ((rose->lci >> 8) & 0x0F) | ROSE_GFI; asmptr[1] = (rose->lci >> 0) & 0xFF; asmptr[2] = ROSE_DATA; if (qbit) asmptr[0] |= ROSE_Q_BIT; SOCK_DEBUG(sk, "ROSE: Built header.\n"); SOCK_DEBUG(sk, "ROSE: Transmitting buffer\n"); if (sk->sk_state != TCP_ESTABLISHED) { kfree_skb(skb); return -ENOTCONN; } #ifdef M_BIT #define ROSE_PACLEN (256-ROSE_MIN_LEN) if (skb->len - ROSE_MIN_LEN > ROSE_PACLEN) { unsigned char header[ROSE_MIN_LEN]; struct sk_buff *skbn; int frontlen; int lg; /* Save a copy of the Header */ skb_copy_from_linear_data(skb, header, ROSE_MIN_LEN); skb_pull(skb, ROSE_MIN_LEN); frontlen = skb_headroom(skb); while (skb->len > 0) { if ((skbn = sock_alloc_send_skb(sk, frontlen + ROSE_PACLEN, 0, &err)) == NULL) { kfree_skb(skb); return err; } skbn->sk = sk; skbn->free = 1; skbn->arp = 1; skb_reserve(skbn, frontlen); lg = (ROSE_PACLEN > skb->len) ? skb->len : ROSE_PACLEN; /* Copy the user data */ skb_copy_from_linear_data(skb, skb_put(skbn, lg), lg); skb_pull(skb, lg); /* Duplicate the Header */ skb_push(skbn, ROSE_MIN_LEN); skb_copy_to_linear_data(skbn, header, ROSE_MIN_LEN); if (skb->len > 0) skbn->data[2] |= M_BIT; skb_queue_tail(&sk->sk_write_queue, skbn); /* Throw it on the queue */ } skb->free = 1; kfree_skb(skb); } else { skb_queue_tail(&sk->sk_write_queue, skb); /* Throw it on the queue */ } #else skb_queue_tail(&sk->sk_write_queue, skb); /* Shove it onto the queue */ #endif rose_kick(sk); return len; } static int rose_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock *sk = sock->sk; struct rose_sock *rose = rose_sk(sk); struct sockaddr_rose *srose = (struct sockaddr_rose *)msg->msg_name; size_t copied; unsigned char *asmptr; struct sk_buff *skb; int n, er, qbit; /* * This works for seqpacket too. The receiver has ordered the queue for * us! We do one quick check first though */ if (sk->sk_state != TCP_ESTABLISHED) return -ENOTCONN; /* Now we can treat all alike */ if ((skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &er)) == NULL) return er; qbit = (skb->data[0] & ROSE_Q_BIT) == ROSE_Q_BIT; skb_pull(skb, ROSE_MIN_LEN); if (rose->qbitincl) { asmptr = skb_push(skb, 1); *asmptr = qbit; } skb_reset_transport_header(skb); copied = skb->len; if (copied > size) { copied = size; msg->msg_flags |= MSG_TRUNC; } skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); if (srose != NULL) { srose->srose_family = AF_ROSE; srose->srose_addr = rose->dest_addr; srose->srose_call = rose->dest_call; srose->srose_ndigis = rose->dest_ndigis; if (msg->msg_namelen >= sizeof(struct full_sockaddr_rose)) { struct full_sockaddr_rose *full_srose = (struct full_sockaddr_rose *)msg->msg_name; for (n = 0 ; n < rose->dest_ndigis ; n++) full_srose->srose_digis[n] = rose->dest_digis[n]; msg->msg_namelen = sizeof(struct full_sockaddr_rose); } else { if (rose->dest_ndigis >= 1) { srose->srose_ndigis = 1; srose->srose_digi = rose->dest_digis[0]; } msg->msg_namelen = sizeof(struct sockaddr_rose); } } skb_free_datagram(sk, skb); return copied; } static int rose_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { struct sock *sk = sock->sk; struct rose_sock *rose = rose_sk(sk); void __user *argp = (void __user *)arg; switch (cmd) { case TIOCOUTQ: { long amount; amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk); if (amount < 0) amount = 0; return put_user(amount, (unsigned int __user *) argp); } case TIOCINQ: { struct sk_buff *skb; long amount = 0L; /* These two are safe on a single CPU system as only user tasks fiddle here */ if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) amount = skb->len; return put_user(amount, (unsigned int __user *) argp); } case SIOCGSTAMP: return sock_get_timestamp(sk, (struct timeval __user *) argp); case SIOCGSTAMPNS: return sock_get_timestampns(sk, (struct timespec __user *) argp); case SIOCGIFADDR: case SIOCSIFADDR: case SIOCGIFDSTADDR: case SIOCSIFDSTADDR: case SIOCGIFBRDADDR: case SIOCSIFBRDADDR: case SIOCGIFNETMASK: case SIOCSIFNETMASK: case SIOCGIFMETRIC: case SIOCSIFMETRIC: return -EINVAL; case SIOCADDRT: case SIOCDELRT: case SIOCRSCLRRT: if (!capable(CAP_NET_ADMIN)) return -EPERM; return rose_rt_ioctl(cmd, argp); case SIOCRSGCAUSE: { struct rose_cause_struct rose_cause; rose_cause.cause = rose->cause; rose_cause.diagnostic = rose->diagnostic; return copy_to_user(argp, &rose_cause, sizeof(struct rose_cause_struct)) ? -EFAULT : 0; } case SIOCRSSCAUSE: { struct rose_cause_struct rose_cause; if (copy_from_user(&rose_cause, argp, sizeof(struct rose_cause_struct))) return -EFAULT; rose->cause = rose_cause.cause; rose->diagnostic = rose_cause.diagnostic; return 0; } case SIOCRSSL2CALL: if (!capable(CAP_NET_ADMIN)) return -EPERM; if (ax25cmp(&rose_callsign, &null_ax25_address) != 0) ax25_listen_release(&rose_callsign, NULL); if (copy_from_user(&rose_callsign, argp, sizeof(ax25_address))) return -EFAULT; if (ax25cmp(&rose_callsign, &null_ax25_address) != 0) return ax25_listen_register(&rose_callsign, NULL); return 0; case SIOCRSGL2CALL: return copy_to_user(argp, &rose_callsign, sizeof(ax25_address)) ? -EFAULT : 0; case SIOCRSACCEPT: if (rose->state == ROSE_STATE_5) { rose_write_internal(sk, ROSE_CALL_ACCEPTED); rose_start_idletimer(sk); rose->condition = 0x00; rose->vs = 0; rose->va = 0; rose->vr = 0; rose->vl = 0; rose->state = ROSE_STATE_3; } return 0; default: return -ENOIOCTLCMD; } return 0; } #ifdef CONFIG_PROC_FS static void *rose_info_start(struct seq_file *seq, loff_t *pos) __acquires(rose_list_lock) { spin_lock_bh(&rose_list_lock); return seq_hlist_start_head(&rose_list, *pos); } static void *rose_info_next(struct seq_file *seq, void *v, loff_t *pos) { return seq_hlist_next(v, &rose_list, pos); } static void rose_info_stop(struct seq_file *seq, void *v) __releases(rose_list_lock) { spin_unlock_bh(&rose_list_lock); } static int rose_info_show(struct seq_file *seq, void *v) { char buf[11], rsbuf[11]; if (v == SEQ_START_TOKEN) seq_puts(seq, "dest_addr dest_call src_addr src_call dev lci neigh st vs vr va t t1 t2 t3 hb idle Snd-Q Rcv-Q inode\n"); else { struct sock *s = sk_entry(v); struct rose_sock *rose = rose_sk(s); const char *devname, *callsign; const struct net_device *dev = rose->device; if (!dev) devname = "???"; else devname = dev->name; seq_printf(seq, "%-10s %-9s ", rose2asc(rsbuf, &rose->dest_addr), ax2asc(buf, &rose->dest_call)); if (ax25cmp(&rose->source_call, &null_ax25_address) == 0) callsign = "??????-?"; else callsign = ax2asc(buf, &rose->source_call); seq_printf(seq, "%-10s %-9s %-5s %3.3X %05d %d %d %d %d %3lu %3lu %3lu %3lu %3lu %3lu/%03lu %5d %5d %ld\n", rose2asc(rsbuf, &rose->source_addr), callsign, devname, rose->lci & 0x0FFF, (rose->neighbour) ? rose->neighbour->number : 0, rose->state, rose->vs, rose->vr, rose->va, ax25_display_timer(&rose->timer) / HZ, rose->t1 / HZ, rose->t2 / HZ, rose->t3 / HZ, rose->hb / HZ, ax25_display_timer(&rose->idletimer) / (60 * HZ), rose->idle / (60 * HZ), sk_wmem_alloc_get(s), sk_rmem_alloc_get(s), s->sk_socket ? SOCK_INODE(s->sk_socket)->i_ino : 0L); } return 0; } static const struct seq_operations rose_info_seqops = { .start = rose_info_start, .next = rose_info_next, .stop = rose_info_stop, .show = rose_info_show, }; static int rose_info_open(struct inode *inode, struct file *file) { return seq_open(file, &rose_info_seqops); } static const struct file_operations rose_info_fops = { .owner = THIS_MODULE, .open = rose_info_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; #endif /* CONFIG_PROC_FS */ static const struct net_proto_family rose_family_ops = { .family = PF_ROSE, .create = rose_create, .owner = THIS_MODULE, }; static const struct proto_ops rose_proto_ops = { .family = PF_ROSE, .owner = THIS_MODULE, .release = rose_release, .bind = rose_bind, .connect = rose_connect, .socketpair = sock_no_socketpair, .accept = rose_accept, .getname = rose_getname, .poll = datagram_poll, .ioctl = rose_ioctl, .listen = rose_listen, .shutdown = sock_no_shutdown, .setsockopt = rose_setsockopt, .getsockopt = rose_getsockopt, .sendmsg = rose_sendmsg, .recvmsg = rose_recvmsg, .mmap = sock_no_mmap, .sendpage = sock_no_sendpage, }; static struct notifier_block rose_dev_notifier = { .notifier_call = rose_device_event, }; static struct net_device **dev_rose; static struct ax25_protocol rose_pid = { .pid = AX25_P_ROSE, .func = rose_route_frame }; static struct ax25_linkfail rose_linkfail_notifier = { .func = rose_link_failed }; static int __init rose_proto_init(void) { int i; int rc; if (rose_ndevs > 0x7FFFFFFF/sizeof(struct net_device *)) { printk(KERN_ERR "ROSE: rose_proto_init - rose_ndevs parameter to large\n"); rc = -EINVAL; goto out; } rc = proto_register(&rose_proto, 0); if (rc != 0) goto out; rose_callsign = null_ax25_address; dev_rose = kzalloc(rose_ndevs * sizeof(struct net_device *), GFP_KERNEL); if (dev_rose == NULL) { printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate device structure\n"); rc = -ENOMEM; goto out_proto_unregister; } for (i = 0; i < rose_ndevs; i++) { struct net_device *dev; char name[IFNAMSIZ]; sprintf(name, "rose%d", i); dev = alloc_netdev(0, name, rose_setup); if (!dev) { printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate memory\n"); rc = -ENOMEM; goto fail; } rc = register_netdev(dev); if (rc) { printk(KERN_ERR "ROSE: netdevice registration failed\n"); free_netdev(dev); goto fail; } rose_set_lockdep_key(dev); dev_rose[i] = dev; } sock_register(&rose_family_ops); register_netdevice_notifier(&rose_dev_notifier); ax25_register_pid(&rose_pid); ax25_linkfail_register(&rose_linkfail_notifier); #ifdef CONFIG_SYSCTL rose_register_sysctl(); #endif rose_loopback_init(); rose_add_loopback_neigh(); proc_net_fops_create(&init_net, "rose", S_IRUGO, &rose_info_fops); proc_net_fops_create(&init_net, "rose_neigh", S_IRUGO, &rose_neigh_fops); proc_net_fops_create(&init_net, "rose_nodes", S_IRUGO, &rose_nodes_fops); proc_net_fops_create(&init_net, "rose_routes", S_IRUGO, &rose_routes_fops); out: return rc; fail: while (--i >= 0) { unregister_netdev(dev_rose[i]); free_netdev(dev_rose[i]); } kfree(dev_rose); out_proto_unregister: proto_unregister(&rose_proto); goto out; } module_init(rose_proto_init); module_param(rose_ndevs, int, 0); MODULE_PARM_DESC(rose_ndevs, "number of ROSE devices"); MODULE_AUTHOR("Jonathan Naylor G4KLX <g4klx@g4klx.demon.co.uk>"); MODULE_DESCRIPTION("The amateur radio ROSE network layer protocol"); MODULE_LICENSE("GPL"); MODULE_ALIAS_NETPROTO(PF_ROSE); static void __exit rose_exit(void) { int i; proc_net_remove(&init_net, "rose"); proc_net_remove(&init_net, "rose_neigh"); proc_net_remove(&init_net, "rose_nodes"); proc_net_remove(&init_net, "rose_routes"); rose_loopback_clear(); rose_rt_free(); ax25_protocol_release(AX25_P_ROSE); ax25_linkfail_release(&rose_linkfail_notifier); if (ax25cmp(&rose_callsign, &null_ax25_address) != 0) ax25_listen_release(&rose_callsign, NULL); #ifdef CONFIG_SYSCTL rose_unregister_sysctl(); #endif unregister_netdevice_notifier(&rose_dev_notifier); sock_unregister(PF_ROSE); for (i = 0; i < rose_ndevs; i++) { struct net_device *dev = dev_rose[i]; if (dev) { unregister_netdev(dev); free_netdev(dev); } } kfree(dev_rose); proto_unregister(&rose_proto); } module_exit(rose_exit);
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk) * Copyright (C) Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk) * Copyright (C) Terry Dawson VK2KTJ (terry@animats.net) * Copyright (C) Tomi Manninen OH2BNS (oh2bns@sral.fi) */ #include <linux/capability.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/slab.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/spinlock.h> #include <linux/timer.h> #include <linux/string.h> #include <linux/sockios.h> #include <linux/net.h> #include <linux/stat.h> #include <net/net_namespace.h> #include <net/ax25.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/if_arp.h> #include <linux/skbuff.h> #include <net/sock.h> #include <asm/system.h> #include <asm/uaccess.h> #include <linux/fcntl.h> #include <linux/termios.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/notifier.h> #include <net/rose.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <net/tcp_states.h> #include <net/ip.h> #include <net/arp.h> static int rose_ndevs = 10; int sysctl_rose_restart_request_timeout = ROSE_DEFAULT_T0; int sysctl_rose_call_request_timeout = ROSE_DEFAULT_T1; int sysctl_rose_reset_request_timeout = ROSE_DEFAULT_T2; int sysctl_rose_clear_request_timeout = ROSE_DEFAULT_T3; int sysctl_rose_no_activity_timeout = ROSE_DEFAULT_IDLE; int sysctl_rose_ack_hold_back_timeout = ROSE_DEFAULT_HB; int sysctl_rose_routing_control = ROSE_DEFAULT_ROUTING; int sysctl_rose_link_fail_timeout = ROSE_DEFAULT_FAIL_TIMEOUT; int sysctl_rose_maximum_vcs = ROSE_DEFAULT_MAXVC; int sysctl_rose_window_size = ROSE_DEFAULT_WINDOW_SIZE; static HLIST_HEAD(rose_list); static DEFINE_SPINLOCK(rose_list_lock); static const struct proto_ops rose_proto_ops; ax25_address rose_callsign; /* * ROSE network devices are virtual network devices encapsulating ROSE * frames into AX.25 which will be sent through an AX.25 device, so form a * special "super class" of normal net devices; split their locks off into a * separate class since they always nest. */ static struct lock_class_key rose_netdev_xmit_lock_key; static struct lock_class_key rose_netdev_addr_lock_key; static void rose_set_lockdep_one(struct net_device *dev, struct netdev_queue *txq, void *_unused) { lockdep_set_class(&txq->_xmit_lock, &rose_netdev_xmit_lock_key); } static void rose_set_lockdep_key(struct net_device *dev) { lockdep_set_class(&dev->addr_list_lock, &rose_netdev_addr_lock_key); netdev_for_each_tx_queue(dev, rose_set_lockdep_one, NULL); } /* * Convert a ROSE address into text. */ char *rose2asc(char *buf, const rose_address *addr) { if (addr->rose_addr[0] == 0x00 && addr->rose_addr[1] == 0x00 && addr->rose_addr[2] == 0x00 && addr->rose_addr[3] == 0x00 && addr->rose_addr[4] == 0x00) { strcpy(buf, "*"); } else { sprintf(buf, "%02X%02X%02X%02X%02X", addr->rose_addr[0] & 0xFF, addr->rose_addr[1] & 0xFF, addr->rose_addr[2] & 0xFF, addr->rose_addr[3] & 0xFF, addr->rose_addr[4] & 0xFF); } return buf; } /* * Compare two ROSE addresses, 0 == equal. */ int rosecmp(rose_address *addr1, rose_address *addr2) { int i; for (i = 0; i < 5; i++) if (addr1->rose_addr[i] != addr2->rose_addr[i]) return 1; return 0; } /* * Compare two ROSE addresses for only mask digits, 0 == equal. */ int rosecmpm(rose_address *addr1, rose_address *addr2, unsigned short mask) { unsigned int i, j; if (mask > 10) return 1; for (i = 0; i < mask; i++) { j = i / 2; if ((i % 2) != 0) { if ((addr1->rose_addr[j] & 0x0F) != (addr2->rose_addr[j] & 0x0F)) return 1; } else { if ((addr1->rose_addr[j] & 0xF0) != (addr2->rose_addr[j] & 0xF0)) return 1; } } return 0; } /* * Socket removal during an interrupt is now safe. */ static void rose_remove_socket(struct sock *sk) { spin_lock_bh(&rose_list_lock); sk_del_node_init(sk); spin_unlock_bh(&rose_list_lock); } /* * Kill all bound sockets on a broken link layer connection to a * particular neighbour. */ void rose_kill_by_neigh(struct rose_neigh *neigh) { struct sock *s; struct hlist_node *node; spin_lock_bh(&rose_list_lock); sk_for_each(s, node, &rose_list) { struct rose_sock *rose = rose_sk(s); if (rose->neighbour == neigh) { rose_disconnect(s, ENETUNREACH, ROSE_OUT_OF_ORDER, 0); rose->neighbour->use--; rose->neighbour = NULL; } } spin_unlock_bh(&rose_list_lock); } /* * Kill all bound sockets on a dropped device. */ static void rose_kill_by_device(struct net_device *dev) { struct sock *s; struct hlist_node *node; spin_lock_bh(&rose_list_lock); sk_for_each(s, node, &rose_list) { struct rose_sock *rose = rose_sk(s); if (rose->device == dev) { rose_disconnect(s, ENETUNREACH, ROSE_OUT_OF_ORDER, 0); rose->neighbour->use--; rose->device = NULL; } } spin_unlock_bh(&rose_list_lock); } /* * Handle device status changes. */ static int rose_device_event(struct notifier_block *this, unsigned long event, void *ptr) { struct net_device *dev = (struct net_device *)ptr; if (!net_eq(dev_net(dev), &init_net)) return NOTIFY_DONE; if (event != NETDEV_DOWN) return NOTIFY_DONE; switch (dev->type) { case ARPHRD_ROSE: rose_kill_by_device(dev); break; case ARPHRD_AX25: rose_link_device_down(dev); rose_rt_device_down(dev); break; } return NOTIFY_DONE; } /* * Add a socket to the bound sockets list. */ static void rose_insert_socket(struct sock *sk) { spin_lock_bh(&rose_list_lock); sk_add_node(sk, &rose_list); spin_unlock_bh(&rose_list_lock); } /* * Find a socket that wants to accept the Call Request we just * received. */ static struct sock *rose_find_listener(rose_address *addr, ax25_address *call) { struct sock *s; struct hlist_node *node; spin_lock_bh(&rose_list_lock); sk_for_each(s, node, &rose_list) { struct rose_sock *rose = rose_sk(s); if (!rosecmp(&rose->source_addr, addr) && !ax25cmp(&rose->source_call, call) && !rose->source_ndigis && s->sk_state == TCP_LISTEN) goto found; } sk_for_each(s, node, &rose_list) { struct rose_sock *rose = rose_sk(s); if (!rosecmp(&rose->source_addr, addr) && !ax25cmp(&rose->source_call, &null_ax25_address) && s->sk_state == TCP_LISTEN) goto found; } s = NULL; found: spin_unlock_bh(&rose_list_lock); return s; } /* * Find a connected ROSE socket given my LCI and device. */ struct sock *rose_find_socket(unsigned int lci, struct rose_neigh *neigh) { struct sock *s; struct hlist_node *node; spin_lock_bh(&rose_list_lock); sk_for_each(s, node, &rose_list) { struct rose_sock *rose = rose_sk(s); if (rose->lci == lci && rose->neighbour == neigh) goto found; } s = NULL; found: spin_unlock_bh(&rose_list_lock); return s; } /* * Find a unique LCI for a given device. */ unsigned int rose_new_lci(struct rose_neigh *neigh) { int lci; if (neigh->dce_mode) { for (lci = 1; lci <= sysctl_rose_maximum_vcs; lci++) if (rose_find_socket(lci, neigh) == NULL && rose_route_free_lci(lci, neigh) == NULL) return lci; } else { for (lci = sysctl_rose_maximum_vcs; lci > 0; lci--) if (rose_find_socket(lci, neigh) == NULL && rose_route_free_lci(lci, neigh) == NULL) return lci; } return 0; } /* * Deferred destroy. */ void rose_destroy_socket(struct sock *); /* * Handler for deferred kills. */ static void rose_destroy_timer(unsigned long data) { rose_destroy_socket((struct sock *)data); } /* * This is called from user mode and the timers. Thus it protects itself * against interrupt users but doesn't worry about being called during * work. Once it is removed from the queue no interrupt or bottom half * will touch it and we are (fairly 8-) ) safe. */ void rose_destroy_socket(struct sock *sk) { struct sk_buff *skb; rose_remove_socket(sk); rose_stop_heartbeat(sk); rose_stop_idletimer(sk); rose_stop_timer(sk); rose_clear_queues(sk); /* Flush the queues */ while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) { if (skb->sk != sk) { /* A pending connection */ /* Queue the unaccepted socket for death */ sock_set_flag(skb->sk, SOCK_DEAD); rose_start_heartbeat(skb->sk); rose_sk(skb->sk)->state = ROSE_STATE_0; } kfree_skb(skb); } if (sk_has_allocations(sk)) { /* Defer: outstanding buffers */ setup_timer(&sk->sk_timer, rose_destroy_timer, (unsigned long)sk); sk->sk_timer.expires = jiffies + 10 * HZ; add_timer(&sk->sk_timer); } else sock_put(sk); } /* * Handling for system calls applied via the various interfaces to a * ROSE socket object. */ static int rose_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) { struct sock *sk = sock->sk; struct rose_sock *rose = rose_sk(sk); int opt; if (level != SOL_ROSE) return -ENOPROTOOPT; if (optlen < sizeof(int)) return -EINVAL; if (get_user(opt, (int __user *)optval)) return -EFAULT; switch (optname) { case ROSE_DEFER: rose->defer = opt ? 1 : 0; return 0; case ROSE_T1: if (opt < 1) return -EINVAL; rose->t1 = opt * HZ; return 0; case ROSE_T2: if (opt < 1) return -EINVAL; rose->t2 = opt * HZ; return 0; case ROSE_T3: if (opt < 1) return -EINVAL; rose->t3 = opt * HZ; return 0; case ROSE_HOLDBACK: if (opt < 1) return -EINVAL; rose->hb = opt * HZ; return 0; case ROSE_IDLE: if (opt < 0) return -EINVAL; rose->idle = opt * 60 * HZ; return 0; case ROSE_QBITINCL: rose->qbitincl = opt ? 1 : 0; return 0; default: return -ENOPROTOOPT; } } static int rose_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { struct sock *sk = sock->sk; struct rose_sock *rose = rose_sk(sk); int val = 0; int len; if (level != SOL_ROSE) return -ENOPROTOOPT; if (get_user(len, optlen)) return -EFAULT; if (len < 0) return -EINVAL; switch (optname) { case ROSE_DEFER: val = rose->defer; break; case ROSE_T1: val = rose->t1 / HZ; break; case ROSE_T2: val = rose->t2 / HZ; break; case ROSE_T3: val = rose->t3 / HZ; break; case ROSE_HOLDBACK: val = rose->hb / HZ; break; case ROSE_IDLE: val = rose->idle / (60 * HZ); break; case ROSE_QBITINCL: val = rose->qbitincl; break; default: return -ENOPROTOOPT; } len = min_t(unsigned int, len, sizeof(int)); if (put_user(len, optlen)) return -EFAULT; return copy_to_user(optval, &val, len) ? -EFAULT : 0; } static int rose_listen(struct socket *sock, int backlog) { struct sock *sk = sock->sk; if (sk->sk_state != TCP_LISTEN) { struct rose_sock *rose = rose_sk(sk); rose->dest_ndigis = 0; memset(&rose->dest_addr, 0, ROSE_ADDR_LEN); memset(&rose->dest_call, 0, AX25_ADDR_LEN); memset(rose->dest_digis, 0, AX25_ADDR_LEN * ROSE_MAX_DIGIS); sk->sk_max_ack_backlog = backlog; sk->sk_state = TCP_LISTEN; return 0; } return -EOPNOTSUPP; } static struct proto rose_proto = { .name = "ROSE", .owner = THIS_MODULE, .obj_size = sizeof(struct rose_sock), }; static int rose_create(struct net *net, struct socket *sock, int protocol, int kern) { struct sock *sk; struct rose_sock *rose; if (!net_eq(net, &init_net)) return -EAFNOSUPPORT; if (sock->type != SOCK_SEQPACKET || protocol != 0) return -ESOCKTNOSUPPORT; sk = sk_alloc(net, PF_ROSE, GFP_ATOMIC, &rose_proto); if (sk == NULL) return -ENOMEM; rose = rose_sk(sk); sock_init_data(sock, sk); skb_queue_head_init(&rose->ack_queue); #ifdef M_BIT skb_queue_head_init(&rose->frag_queue); rose->fraglen = 0; #endif sock->ops = &rose_proto_ops; sk->sk_protocol = protocol; init_timer(&rose->timer); init_timer(&rose->idletimer); rose->t1 = msecs_to_jiffies(sysctl_rose_call_request_timeout); rose->t2 = msecs_to_jiffies(sysctl_rose_reset_request_timeout); rose->t3 = msecs_to_jiffies(sysctl_rose_clear_request_timeout); rose->hb = msecs_to_jiffies(sysctl_rose_ack_hold_back_timeout); rose->idle = msecs_to_jiffies(sysctl_rose_no_activity_timeout); rose->state = ROSE_STATE_0; return 0; } static struct sock *rose_make_new(struct sock *osk) { struct sock *sk; struct rose_sock *rose, *orose; if (osk->sk_type != SOCK_SEQPACKET) return NULL; sk = sk_alloc(sock_net(osk), PF_ROSE, GFP_ATOMIC, &rose_proto); if (sk == NULL) return NULL; rose = rose_sk(sk); sock_init_data(NULL, sk); skb_queue_head_init(&rose->ack_queue); #ifdef M_BIT skb_queue_head_init(&rose->frag_queue); rose->fraglen = 0; #endif sk->sk_type = osk->sk_type; sk->sk_priority = osk->sk_priority; sk->sk_protocol = osk->sk_protocol; sk->sk_rcvbuf = osk->sk_rcvbuf; sk->sk_sndbuf = osk->sk_sndbuf; sk->sk_state = TCP_ESTABLISHED; sock_copy_flags(sk, osk); init_timer(&rose->timer); init_timer(&rose->idletimer); orose = rose_sk(osk); rose->t1 = orose->t1; rose->t2 = orose->t2; rose->t3 = orose->t3; rose->hb = orose->hb; rose->idle = orose->idle; rose->defer = orose->defer; rose->device = orose->device; rose->qbitincl = orose->qbitincl; return sk; } static int rose_release(struct socket *sock) { struct sock *sk = sock->sk; struct rose_sock *rose; if (sk == NULL) return 0; sock_hold(sk); sock_orphan(sk); lock_sock(sk); rose = rose_sk(sk); switch (rose->state) { case ROSE_STATE_0: release_sock(sk); rose_disconnect(sk, 0, -1, -1); lock_sock(sk); rose_destroy_socket(sk); break; case ROSE_STATE_2: rose->neighbour->use--; release_sock(sk); rose_disconnect(sk, 0, -1, -1); lock_sock(sk); rose_destroy_socket(sk); break; case ROSE_STATE_1: case ROSE_STATE_3: case ROSE_STATE_4: case ROSE_STATE_5: rose_clear_queues(sk); rose_stop_idletimer(sk); rose_write_internal(sk, ROSE_CLEAR_REQUEST); rose_start_t3timer(sk); rose->state = ROSE_STATE_2; sk->sk_state = TCP_CLOSE; sk->sk_shutdown |= SEND_SHUTDOWN; sk->sk_state_change(sk); sock_set_flag(sk, SOCK_DEAD); sock_set_flag(sk, SOCK_DESTROY); break; default: break; } sock->sk = NULL; release_sock(sk); sock_put(sk); return 0; } static int rose_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) { struct sock *sk = sock->sk; struct rose_sock *rose = rose_sk(sk); struct sockaddr_rose *addr = (struct sockaddr_rose *)uaddr; struct net_device *dev; ax25_address *source; ax25_uid_assoc *user; int n; if (!sock_flag(sk, SOCK_ZAPPED)) return -EINVAL; if (addr_len != sizeof(struct sockaddr_rose) && addr_len != sizeof(struct full_sockaddr_rose)) return -EINVAL; if (addr->srose_family != AF_ROSE) return -EINVAL; if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1) return -EINVAL; if ((unsigned int) addr->srose_ndigis > ROSE_MAX_DIGIS) return -EINVAL; if ((dev = rose_dev_get(&addr->srose_addr)) == NULL) { SOCK_DEBUG(sk, "ROSE: bind failed: invalid address\n"); return -EADDRNOTAVAIL; } source = &addr->srose_call; user = ax25_findbyuid(current_euid()); if (user) { rose->source_call = user->call; ax25_uid_put(user); } else { if (ax25_uid_policy && !capable(CAP_NET_BIND_SERVICE)) return -EACCES; rose->source_call = *source; } rose->source_addr = addr->srose_addr; rose->device = dev; rose->source_ndigis = addr->srose_ndigis; if (addr_len == sizeof(struct full_sockaddr_rose)) { struct full_sockaddr_rose *full_addr = (struct full_sockaddr_rose *)uaddr; for (n = 0 ; n < addr->srose_ndigis ; n++) rose->source_digis[n] = full_addr->srose_digis[n]; } else { if (rose->source_ndigis == 1) { rose->source_digis[0] = addr->srose_digi; } } rose_insert_socket(sk); sock_reset_flag(sk, SOCK_ZAPPED); SOCK_DEBUG(sk, "ROSE: socket is bound\n"); return 0; } static int rose_connect(struct socket *sock, struct sockaddr *uaddr, int addr_len, int flags) { struct sock *sk = sock->sk; struct rose_sock *rose = rose_sk(sk); struct sockaddr_rose *addr = (struct sockaddr_rose *)uaddr; unsigned char cause, diagnostic; struct net_device *dev; ax25_uid_assoc *user; int n, err = 0; if (addr_len != sizeof(struct sockaddr_rose) && addr_len != sizeof(struct full_sockaddr_rose)) return -EINVAL; if (addr->srose_family != AF_ROSE) return -EINVAL; if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1) return -EINVAL; if ((unsigned int) addr->srose_ndigis > ROSE_MAX_DIGIS) return -EINVAL; /* Source + Destination digis should not exceed ROSE_MAX_DIGIS */ if ((rose->source_ndigis + addr->srose_ndigis) > ROSE_MAX_DIGIS) return -EINVAL; lock_sock(sk); if (sk->sk_state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) { /* Connect completed during a ERESTARTSYS event */ sock->state = SS_CONNECTED; goto out_release; } if (sk->sk_state == TCP_CLOSE && sock->state == SS_CONNECTING) { sock->state = SS_UNCONNECTED; err = -ECONNREFUSED; goto out_release; } if (sk->sk_state == TCP_ESTABLISHED) { /* No reconnect on a seqpacket socket */ err = -EISCONN; goto out_release; } sk->sk_state = TCP_CLOSE; sock->state = SS_UNCONNECTED; rose->neighbour = rose_get_neigh(&addr->srose_addr, &cause, &diagnostic, 0); if (!rose->neighbour) { err = -ENETUNREACH; goto out_release; } rose->lci = rose_new_lci(rose->neighbour); if (!rose->lci) { err = -ENETUNREACH; goto out_release; } if (sock_flag(sk, SOCK_ZAPPED)) { /* Must bind first - autobinding in this may or may not work */ sock_reset_flag(sk, SOCK_ZAPPED); if ((dev = rose_dev_first()) == NULL) { err = -ENETUNREACH; goto out_release; } user = ax25_findbyuid(current_euid()); if (!user) { err = -EINVAL; goto out_release; } memcpy(&rose->source_addr, dev->dev_addr, ROSE_ADDR_LEN); rose->source_call = user->call; rose->device = dev; ax25_uid_put(user); rose_insert_socket(sk); /* Finish the bind */ } rose->dest_addr = addr->srose_addr; rose->dest_call = addr->srose_call; rose->rand = ((long)rose & 0xFFFF) + rose->lci; rose->dest_ndigis = addr->srose_ndigis; if (addr_len == sizeof(struct full_sockaddr_rose)) { struct full_sockaddr_rose *full_addr = (struct full_sockaddr_rose *)uaddr; for (n = 0 ; n < addr->srose_ndigis ; n++) rose->dest_digis[n] = full_addr->srose_digis[n]; } else { if (rose->dest_ndigis == 1) { rose->dest_digis[0] = addr->srose_digi; } } /* Move to connecting socket, start sending Connect Requests */ sock->state = SS_CONNECTING; sk->sk_state = TCP_SYN_SENT; rose->state = ROSE_STATE_1; rose->neighbour->use++; rose_write_internal(sk, ROSE_CALL_REQUEST); rose_start_heartbeat(sk); rose_start_t1timer(sk); /* Now the loop */ if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK)) { err = -EINPROGRESS; goto out_release; } /* * A Connect Ack with Choke or timeout or failed routing will go to * closed. */ if (sk->sk_state == TCP_SYN_SENT) { DEFINE_WAIT(wait); for (;;) { prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); if (sk->sk_state != TCP_SYN_SENT) break; if (!signal_pending(current)) { release_sock(sk); schedule(); lock_sock(sk); continue; } err = -ERESTARTSYS; break; } finish_wait(sk_sleep(sk), &wait); if (err) goto out_release; } if (sk->sk_state != TCP_ESTABLISHED) { sock->state = SS_UNCONNECTED; err = sock_error(sk); /* Always set at this point */ goto out_release; } sock->state = SS_CONNECTED; out_release: release_sock(sk); return err; } static int rose_accept(struct socket *sock, struct socket *newsock, int flags) { struct sk_buff *skb; struct sock *newsk; DEFINE_WAIT(wait); struct sock *sk; int err = 0; if ((sk = sock->sk) == NULL) return -EINVAL; lock_sock(sk); if (sk->sk_type != SOCK_SEQPACKET) { err = -EOPNOTSUPP; goto out_release; } if (sk->sk_state != TCP_LISTEN) { err = -EINVAL; goto out_release; } /* * The write queue this time is holding sockets ready to use * hooked into the SABM we saved */ for (;;) { prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); skb = skb_dequeue(&sk->sk_receive_queue); if (skb) break; if (flags & O_NONBLOCK) { err = -EWOULDBLOCK; break; } if (!signal_pending(current)) { release_sock(sk); schedule(); lock_sock(sk); continue; } err = -ERESTARTSYS; break; } finish_wait(sk_sleep(sk), &wait); if (err) goto out_release; newsk = skb->sk; sock_graft(newsk, newsock); /* Now attach up the new socket */ skb->sk = NULL; kfree_skb(skb); sk->sk_ack_backlog--; out_release: release_sock(sk); return err; } static int rose_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer) { struct full_sockaddr_rose *srose = (struct full_sockaddr_rose *)uaddr; struct sock *sk = sock->sk; struct rose_sock *rose = rose_sk(sk); int n; memset(srose, 0, sizeof(*srose)); if (peer != 0) { if (sk->sk_state != TCP_ESTABLISHED) return -ENOTCONN; srose->srose_family = AF_ROSE; srose->srose_addr = rose->dest_addr; srose->srose_call = rose->dest_call; srose->srose_ndigis = rose->dest_ndigis; for (n = 0; n < rose->dest_ndigis; n++) srose->srose_digis[n] = rose->dest_digis[n]; } else { srose->srose_family = AF_ROSE; srose->srose_addr = rose->source_addr; srose->srose_call = rose->source_call; srose->srose_ndigis = rose->source_ndigis; for (n = 0; n < rose->source_ndigis; n++) srose->srose_digis[n] = rose->source_digis[n]; } *uaddr_len = sizeof(struct full_sockaddr_rose); return 0; } int rose_rx_call_request(struct sk_buff *skb, struct net_device *dev, struct rose_neigh *neigh, unsigned int lci) { struct sock *sk; struct sock *make; struct rose_sock *make_rose; struct rose_facilities_struct facilities; int n; skb->sk = NULL; /* Initially we don't know who it's for */ /* * skb->data points to the rose frame start */ memset(&facilities, 0x00, sizeof(struct rose_facilities_struct)); if (!rose_parse_facilities(skb->data + ROSE_CALL_REQ_FACILITIES_OFF, skb->len - ROSE_CALL_REQ_FACILITIES_OFF, &facilities)) { rose_transmit_clear_request(neigh, lci, ROSE_INVALID_FACILITY, 76); return 0; } sk = rose_find_listener(&facilities.source_addr, &facilities.source_call); /* * We can't accept the Call Request. */ if (sk == NULL || sk_acceptq_is_full(sk) || (make = rose_make_new(sk)) == NULL) { rose_transmit_clear_request(neigh, lci, ROSE_NETWORK_CONGESTION, 120); return 0; } skb->sk = make; make->sk_state = TCP_ESTABLISHED; make_rose = rose_sk(make); make_rose->lci = lci; make_rose->dest_addr = facilities.dest_addr; make_rose->dest_call = facilities.dest_call; make_rose->dest_ndigis = facilities.dest_ndigis; for (n = 0 ; n < facilities.dest_ndigis ; n++) make_rose->dest_digis[n] = facilities.dest_digis[n]; make_rose->source_addr = facilities.source_addr; make_rose->source_call = facilities.source_call; make_rose->source_ndigis = facilities.source_ndigis; for (n = 0 ; n < facilities.source_ndigis ; n++) make_rose->source_digis[n]= facilities.source_digis[n]; make_rose->neighbour = neigh; make_rose->device = dev; make_rose->facilities = facilities; make_rose->neighbour->use++; if (rose_sk(sk)->defer) { make_rose->state = ROSE_STATE_5; } else { rose_write_internal(make, ROSE_CALL_ACCEPTED); make_rose->state = ROSE_STATE_3; rose_start_idletimer(make); } make_rose->condition = 0x00; make_rose->vs = 0; make_rose->va = 0; make_rose->vr = 0; make_rose->vl = 0; sk->sk_ack_backlog++; rose_insert_socket(make); skb_queue_head(&sk->sk_receive_queue, skb); rose_start_heartbeat(make); if (!sock_flag(sk, SOCK_DEAD)) sk->sk_data_ready(sk, skb->len); return 1; } static int rose_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct rose_sock *rose = rose_sk(sk); struct sockaddr_rose *usrose = (struct sockaddr_rose *)msg->msg_name; int err; struct full_sockaddr_rose srose; struct sk_buff *skb; unsigned char *asmptr; int n, size, qbit = 0; if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_CMSG_COMPAT)) return -EINVAL; if (sock_flag(sk, SOCK_ZAPPED)) return -EADDRNOTAVAIL; if (sk->sk_shutdown & SEND_SHUTDOWN) { send_sig(SIGPIPE, current, 0); return -EPIPE; } if (rose->neighbour == NULL || rose->device == NULL) return -ENETUNREACH; if (usrose != NULL) { if (msg->msg_namelen != sizeof(struct sockaddr_rose) && msg->msg_namelen != sizeof(struct full_sockaddr_rose)) return -EINVAL; memset(&srose, 0, sizeof(struct full_sockaddr_rose)); memcpy(&srose, usrose, msg->msg_namelen); if (rosecmp(&rose->dest_addr, &srose.srose_addr) != 0 || ax25cmp(&rose->dest_call, &srose.srose_call) != 0) return -EISCONN; if (srose.srose_ndigis != rose->dest_ndigis) return -EISCONN; if (srose.srose_ndigis == rose->dest_ndigis) { for (n = 0 ; n < srose.srose_ndigis ; n++) if (ax25cmp(&rose->dest_digis[n], &srose.srose_digis[n])) return -EISCONN; } if (srose.srose_family != AF_ROSE) return -EINVAL; } else { if (sk->sk_state != TCP_ESTABLISHED) return -ENOTCONN; srose.srose_family = AF_ROSE; srose.srose_addr = rose->dest_addr; srose.srose_call = rose->dest_call; srose.srose_ndigis = rose->dest_ndigis; for (n = 0 ; n < rose->dest_ndigis ; n++) srose.srose_digis[n] = rose->dest_digis[n]; } SOCK_DEBUG(sk, "ROSE: sendto: Addresses built.\n"); /* Build a packet */ SOCK_DEBUG(sk, "ROSE: sendto: building packet.\n"); /* Sanity check the packet size */ if (len > 65535) return -EMSGSIZE; size = len + AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN; if ((skb = sock_alloc_send_skb(sk, size, msg->msg_flags & MSG_DONTWAIT, &err)) == NULL) return err; skb_reserve(skb, AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN); /* * Put the data on the end */ SOCK_DEBUG(sk, "ROSE: Appending user data\n"); skb_reset_transport_header(skb); skb_put(skb, len); err = memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len); if (err) { kfree_skb(skb); return err; } /* * If the Q BIT Include socket option is in force, the first * byte of the user data is the logical value of the Q Bit. */ if (rose->qbitincl) { qbit = skb->data[0]; skb_pull(skb, 1); } /* * Push down the ROSE header */ asmptr = skb_push(skb, ROSE_MIN_LEN); SOCK_DEBUG(sk, "ROSE: Building Network Header.\n"); /* Build a ROSE Network header */ asmptr[0] = ((rose->lci >> 8) & 0x0F) | ROSE_GFI; asmptr[1] = (rose->lci >> 0) & 0xFF; asmptr[2] = ROSE_DATA; if (qbit) asmptr[0] |= ROSE_Q_BIT; SOCK_DEBUG(sk, "ROSE: Built header.\n"); SOCK_DEBUG(sk, "ROSE: Transmitting buffer\n"); if (sk->sk_state != TCP_ESTABLISHED) { kfree_skb(skb); return -ENOTCONN; } #ifdef M_BIT #define ROSE_PACLEN (256-ROSE_MIN_LEN) if (skb->len - ROSE_MIN_LEN > ROSE_PACLEN) { unsigned char header[ROSE_MIN_LEN]; struct sk_buff *skbn; int frontlen; int lg; /* Save a copy of the Header */ skb_copy_from_linear_data(skb, header, ROSE_MIN_LEN); skb_pull(skb, ROSE_MIN_LEN); frontlen = skb_headroom(skb); while (skb->len > 0) { if ((skbn = sock_alloc_send_skb(sk, frontlen + ROSE_PACLEN, 0, &err)) == NULL) { kfree_skb(skb); return err; } skbn->sk = sk; skbn->free = 1; skbn->arp = 1; skb_reserve(skbn, frontlen); lg = (ROSE_PACLEN > skb->len) ? skb->len : ROSE_PACLEN; /* Copy the user data */ skb_copy_from_linear_data(skb, skb_put(skbn, lg), lg); skb_pull(skb, lg); /* Duplicate the Header */ skb_push(skbn, ROSE_MIN_LEN); skb_copy_to_linear_data(skbn, header, ROSE_MIN_LEN); if (skb->len > 0) skbn->data[2] |= M_BIT; skb_queue_tail(&sk->sk_write_queue, skbn); /* Throw it on the queue */ } skb->free = 1; kfree_skb(skb); } else { skb_queue_tail(&sk->sk_write_queue, skb); /* Throw it on the queue */ } #else skb_queue_tail(&sk->sk_write_queue, skb); /* Shove it onto the queue */ #endif rose_kick(sk); return len; } static int rose_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock *sk = sock->sk; struct rose_sock *rose = rose_sk(sk); struct sockaddr_rose *srose = (struct sockaddr_rose *)msg->msg_name; size_t copied; unsigned char *asmptr; struct sk_buff *skb; int n, er, qbit; /* * This works for seqpacket too. The receiver has ordered the queue for * us! We do one quick check first though */ if (sk->sk_state != TCP_ESTABLISHED) return -ENOTCONN; /* Now we can treat all alike */ if ((skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &er)) == NULL) return er; qbit = (skb->data[0] & ROSE_Q_BIT) == ROSE_Q_BIT; skb_pull(skb, ROSE_MIN_LEN); if (rose->qbitincl) { asmptr = skb_push(skb, 1); *asmptr = qbit; } skb_reset_transport_header(skb); copied = skb->len; if (copied > size) { copied = size; msg->msg_flags |= MSG_TRUNC; } skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); if (srose != NULL) { srose->srose_family = AF_ROSE; srose->srose_addr = rose->dest_addr; srose->srose_call = rose->dest_call; srose->srose_ndigis = rose->dest_ndigis; if (msg->msg_namelen >= sizeof(struct full_sockaddr_rose)) { struct full_sockaddr_rose *full_srose = (struct full_sockaddr_rose *)msg->msg_name; for (n = 0 ; n < rose->dest_ndigis ; n++) full_srose->srose_digis[n] = rose->dest_digis[n]; msg->msg_namelen = sizeof(struct full_sockaddr_rose); } else { if (rose->dest_ndigis >= 1) { srose->srose_ndigis = 1; srose->srose_digi = rose->dest_digis[0]; } msg->msg_namelen = sizeof(struct sockaddr_rose); } } skb_free_datagram(sk, skb); return copied; } static int rose_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { struct sock *sk = sock->sk; struct rose_sock *rose = rose_sk(sk); void __user *argp = (void __user *)arg; switch (cmd) { case TIOCOUTQ: { long amount; amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk); if (amount < 0) amount = 0; return put_user(amount, (unsigned int __user *) argp); } case TIOCINQ: { struct sk_buff *skb; long amount = 0L; /* These two are safe on a single CPU system as only user tasks fiddle here */ if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) amount = skb->len; return put_user(amount, (unsigned int __user *) argp); } case SIOCGSTAMP: return sock_get_timestamp(sk, (struct timeval __user *) argp); case SIOCGSTAMPNS: return sock_get_timestampns(sk, (struct timespec __user *) argp); case SIOCGIFADDR: case SIOCSIFADDR: case SIOCGIFDSTADDR: case SIOCSIFDSTADDR: case SIOCGIFBRDADDR: case SIOCSIFBRDADDR: case SIOCGIFNETMASK: case SIOCSIFNETMASK: case SIOCGIFMETRIC: case SIOCSIFMETRIC: return -EINVAL; case SIOCADDRT: case SIOCDELRT: case SIOCRSCLRRT: if (!capable(CAP_NET_ADMIN)) return -EPERM; return rose_rt_ioctl(cmd, argp); case SIOCRSGCAUSE: { struct rose_cause_struct rose_cause; rose_cause.cause = rose->cause; rose_cause.diagnostic = rose->diagnostic; return copy_to_user(argp, &rose_cause, sizeof(struct rose_cause_struct)) ? -EFAULT : 0; } case SIOCRSSCAUSE: { struct rose_cause_struct rose_cause; if (copy_from_user(&rose_cause, argp, sizeof(struct rose_cause_struct))) return -EFAULT; rose->cause = rose_cause.cause; rose->diagnostic = rose_cause.diagnostic; return 0; } case SIOCRSSL2CALL: if (!capable(CAP_NET_ADMIN)) return -EPERM; if (ax25cmp(&rose_callsign, &null_ax25_address) != 0) ax25_listen_release(&rose_callsign, NULL); if (copy_from_user(&rose_callsign, argp, sizeof(ax25_address))) return -EFAULT; if (ax25cmp(&rose_callsign, &null_ax25_address) != 0) return ax25_listen_register(&rose_callsign, NULL); return 0; case SIOCRSGL2CALL: return copy_to_user(argp, &rose_callsign, sizeof(ax25_address)) ? -EFAULT : 0; case SIOCRSACCEPT: if (rose->state == ROSE_STATE_5) { rose_write_internal(sk, ROSE_CALL_ACCEPTED); rose_start_idletimer(sk); rose->condition = 0x00; rose->vs = 0; rose->va = 0; rose->vr = 0; rose->vl = 0; rose->state = ROSE_STATE_3; } return 0; default: return -ENOIOCTLCMD; } return 0; } #ifdef CONFIG_PROC_FS static void *rose_info_start(struct seq_file *seq, loff_t *pos) __acquires(rose_list_lock) { spin_lock_bh(&rose_list_lock); return seq_hlist_start_head(&rose_list, *pos); } static void *rose_info_next(struct seq_file *seq, void *v, loff_t *pos) { return seq_hlist_next(v, &rose_list, pos); } static void rose_info_stop(struct seq_file *seq, void *v) __releases(rose_list_lock) { spin_unlock_bh(&rose_list_lock); } static int rose_info_show(struct seq_file *seq, void *v) { char buf[11], rsbuf[11]; if (v == SEQ_START_TOKEN) seq_puts(seq, "dest_addr dest_call src_addr src_call dev lci neigh st vs vr va t t1 t2 t3 hb idle Snd-Q Rcv-Q inode\n"); else { struct sock *s = sk_entry(v); struct rose_sock *rose = rose_sk(s); const char *devname, *callsign; const struct net_device *dev = rose->device; if (!dev) devname = "???"; else devname = dev->name; seq_printf(seq, "%-10s %-9s ", rose2asc(rsbuf, &rose->dest_addr), ax2asc(buf, &rose->dest_call)); if (ax25cmp(&rose->source_call, &null_ax25_address) == 0) callsign = "??????-?"; else callsign = ax2asc(buf, &rose->source_call); seq_printf(seq, "%-10s %-9s %-5s %3.3X %05d %d %d %d %d %3lu %3lu %3lu %3lu %3lu %3lu/%03lu %5d %5d %ld\n", rose2asc(rsbuf, &rose->source_addr), callsign, devname, rose->lci & 0x0FFF, (rose->neighbour) ? rose->neighbour->number : 0, rose->state, rose->vs, rose->vr, rose->va, ax25_display_timer(&rose->timer) / HZ, rose->t1 / HZ, rose->t2 / HZ, rose->t3 / HZ, rose->hb / HZ, ax25_display_timer(&rose->idletimer) / (60 * HZ), rose->idle / (60 * HZ), sk_wmem_alloc_get(s), sk_rmem_alloc_get(s), s->sk_socket ? SOCK_INODE(s->sk_socket)->i_ino : 0L); } return 0; } static const struct seq_operations rose_info_seqops = { .start = rose_info_start, .next = rose_info_next, .stop = rose_info_stop, .show = rose_info_show, }; static int rose_info_open(struct inode *inode, struct file *file) { return seq_open(file, &rose_info_seqops); } static const struct file_operations rose_info_fops = { .owner = THIS_MODULE, .open = rose_info_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; #endif /* CONFIG_PROC_FS */ static const struct net_proto_family rose_family_ops = { .family = PF_ROSE, .create = rose_create, .owner = THIS_MODULE, }; static const struct proto_ops rose_proto_ops = { .family = PF_ROSE, .owner = THIS_MODULE, .release = rose_release, .bind = rose_bind, .connect = rose_connect, .socketpair = sock_no_socketpair, .accept = rose_accept, .getname = rose_getname, .poll = datagram_poll, .ioctl = rose_ioctl, .listen = rose_listen, .shutdown = sock_no_shutdown, .setsockopt = rose_setsockopt, .getsockopt = rose_getsockopt, .sendmsg = rose_sendmsg, .recvmsg = rose_recvmsg, .mmap = sock_no_mmap, .sendpage = sock_no_sendpage, }; static struct notifier_block rose_dev_notifier = { .notifier_call = rose_device_event, }; static struct net_device **dev_rose; static struct ax25_protocol rose_pid = { .pid = AX25_P_ROSE, .func = rose_route_frame }; static struct ax25_linkfail rose_linkfail_notifier = { .func = rose_link_failed }; static int __init rose_proto_init(void) { int i; int rc; if (rose_ndevs > 0x7FFFFFFF/sizeof(struct net_device *)) { printk(KERN_ERR "ROSE: rose_proto_init - rose_ndevs parameter to large\n"); rc = -EINVAL; goto out; } rc = proto_register(&rose_proto, 0); if (rc != 0) goto out; rose_callsign = null_ax25_address; dev_rose = kzalloc(rose_ndevs * sizeof(struct net_device *), GFP_KERNEL); if (dev_rose == NULL) { printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate device structure\n"); rc = -ENOMEM; goto out_proto_unregister; } for (i = 0; i < rose_ndevs; i++) { struct net_device *dev; char name[IFNAMSIZ]; sprintf(name, "rose%d", i); dev = alloc_netdev(0, name, rose_setup); if (!dev) { printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate memory\n"); rc = -ENOMEM; goto fail; } rc = register_netdev(dev); if (rc) { printk(KERN_ERR "ROSE: netdevice registration failed\n"); free_netdev(dev); goto fail; } rose_set_lockdep_key(dev); dev_rose[i] = dev; } sock_register(&rose_family_ops); register_netdevice_notifier(&rose_dev_notifier); ax25_register_pid(&rose_pid); ax25_linkfail_register(&rose_linkfail_notifier); #ifdef CONFIG_SYSCTL rose_register_sysctl(); #endif rose_loopback_init(); rose_add_loopback_neigh(); proc_net_fops_create(&init_net, "rose", S_IRUGO, &rose_info_fops); proc_net_fops_create(&init_net, "rose_neigh", S_IRUGO, &rose_neigh_fops); proc_net_fops_create(&init_net, "rose_nodes", S_IRUGO, &rose_nodes_fops); proc_net_fops_create(&init_net, "rose_routes", S_IRUGO, &rose_routes_fops); out: return rc; fail: while (--i >= 0) { unregister_netdev(dev_rose[i]); free_netdev(dev_rose[i]); } kfree(dev_rose); out_proto_unregister: proto_unregister(&rose_proto); goto out; } module_init(rose_proto_init); module_param(rose_ndevs, int, 0); MODULE_PARM_DESC(rose_ndevs, "number of ROSE devices"); MODULE_AUTHOR("Jonathan Naylor G4KLX <g4klx@g4klx.demon.co.uk>"); MODULE_DESCRIPTION("The amateur radio ROSE network layer protocol"); MODULE_LICENSE("GPL"); MODULE_ALIAS_NETPROTO(PF_ROSE); static void __exit rose_exit(void) { int i; proc_net_remove(&init_net, "rose"); proc_net_remove(&init_net, "rose_neigh"); proc_net_remove(&init_net, "rose_nodes"); proc_net_remove(&init_net, "rose_routes"); rose_loopback_clear(); rose_rt_free(); ax25_protocol_release(AX25_P_ROSE); ax25_linkfail_release(&rose_linkfail_notifier); if (ax25cmp(&rose_callsign, &null_ax25_address) != 0) ax25_listen_release(&rose_callsign, NULL); #ifdef CONFIG_SYSCTL rose_unregister_sysctl(); #endif unregister_netdevice_notifier(&rose_dev_notifier); sock_unregister(PF_ROSE); for (i = 0; i < rose_ndevs; i++) { struct net_device *dev = dev_rose[i]; if (dev) { unregister_netdev(dev); free_netdev(dev); } } kfree(dev_rose); proto_unregister(&rose_proto); } module_exit(rose_exit);
int rose_rx_call_request(struct sk_buff *skb, struct net_device *dev, struct rose_neigh *neigh, unsigned int lci) { struct sock *sk; struct sock *make; struct rose_sock *make_rose; struct rose_facilities_struct facilities; int n, len; skb->sk = NULL; /* Initially we don't know who it's for */ /* * skb->data points to the rose frame start */ memset(&facilities, 0x00, sizeof(struct rose_facilities_struct)); len = (((skb->data[3] >> 4) & 0x0F) + 1) >> 1; len += (((skb->data[3] >> 0) & 0x0F) + 1) >> 1; if (!rose_parse_facilities(skb->data + len + 4, &facilities)) { rose_transmit_clear_request(neigh, lci, ROSE_INVALID_FACILITY, 76); return 0; } sk = rose_find_listener(&facilities.source_addr, &facilities.source_call); /* * We can't accept the Call Request. */ if (sk == NULL || sk_acceptq_is_full(sk) || (make = rose_make_new(sk)) == NULL) { rose_transmit_clear_request(neigh, lci, ROSE_NETWORK_CONGESTION, 120); return 0; } skb->sk = make; make->sk_state = TCP_ESTABLISHED; make_rose = rose_sk(make); make_rose->lci = lci; make_rose->dest_addr = facilities.dest_addr; make_rose->dest_call = facilities.dest_call; make_rose->dest_ndigis = facilities.dest_ndigis; for (n = 0 ; n < facilities.dest_ndigis ; n++) make_rose->dest_digis[n] = facilities.dest_digis[n]; make_rose->source_addr = facilities.source_addr; make_rose->source_call = facilities.source_call; make_rose->source_ndigis = facilities.source_ndigis; for (n = 0 ; n < facilities.source_ndigis ; n++) make_rose->source_digis[n]= facilities.source_digis[n]; make_rose->neighbour = neigh; make_rose->device = dev; make_rose->facilities = facilities; make_rose->neighbour->use++; if (rose_sk(sk)->defer) { make_rose->state = ROSE_STATE_5; } else { rose_write_internal(make, ROSE_CALL_ACCEPTED); make_rose->state = ROSE_STATE_3; rose_start_idletimer(make); } make_rose->condition = 0x00; make_rose->vs = 0; make_rose->va = 0; make_rose->vr = 0; make_rose->vl = 0; sk->sk_ack_backlog++; rose_insert_socket(make); skb_queue_head(&sk->sk_receive_queue, skb); rose_start_heartbeat(make); if (!sock_flag(sk, SOCK_DEAD)) sk->sk_data_ready(sk, skb->len); return 1; }
int rose_rx_call_request(struct sk_buff *skb, struct net_device *dev, struct rose_neigh *neigh, unsigned int lci) { struct sock *sk; struct sock *make; struct rose_sock *make_rose; struct rose_facilities_struct facilities; int n; skb->sk = NULL; /* Initially we don't know who it's for */ /* * skb->data points to the rose frame start */ memset(&facilities, 0x00, sizeof(struct rose_facilities_struct)); if (!rose_parse_facilities(skb->data + ROSE_CALL_REQ_FACILITIES_OFF, skb->len - ROSE_CALL_REQ_FACILITIES_OFF, &facilities)) { rose_transmit_clear_request(neigh, lci, ROSE_INVALID_FACILITY, 76); return 0; } sk = rose_find_listener(&facilities.source_addr, &facilities.source_call); /* * We can't accept the Call Request. */ if (sk == NULL || sk_acceptq_is_full(sk) || (make = rose_make_new(sk)) == NULL) { rose_transmit_clear_request(neigh, lci, ROSE_NETWORK_CONGESTION, 120); return 0; } skb->sk = make; make->sk_state = TCP_ESTABLISHED; make_rose = rose_sk(make); make_rose->lci = lci; make_rose->dest_addr = facilities.dest_addr; make_rose->dest_call = facilities.dest_call; make_rose->dest_ndigis = facilities.dest_ndigis; for (n = 0 ; n < facilities.dest_ndigis ; n++) make_rose->dest_digis[n] = facilities.dest_digis[n]; make_rose->source_addr = facilities.source_addr; make_rose->source_call = facilities.source_call; make_rose->source_ndigis = facilities.source_ndigis; for (n = 0 ; n < facilities.source_ndigis ; n++) make_rose->source_digis[n]= facilities.source_digis[n]; make_rose->neighbour = neigh; make_rose->device = dev; make_rose->facilities = facilities; make_rose->neighbour->use++; if (rose_sk(sk)->defer) { make_rose->state = ROSE_STATE_5; } else { rose_write_internal(make, ROSE_CALL_ACCEPTED); make_rose->state = ROSE_STATE_3; rose_start_idletimer(make); } make_rose->condition = 0x00; make_rose->vs = 0; make_rose->va = 0; make_rose->vr = 0; make_rose->vl = 0; sk->sk_ack_backlog++; rose_insert_socket(make); skb_queue_head(&sk->sk_receive_queue, skb); rose_start_heartbeat(make); if (!sock_flag(sk, SOCK_DEAD)) sk->sk_data_ready(sk, skb->len); return 1; }
{'added': [(981, '\tint n;'), (990, '\tif (!rose_parse_facilities(skb->data + ROSE_CALL_REQ_FACILITIES_OFF,'), (991, '\t\t\t\t skb->len - ROSE_CALL_REQ_FACILITIES_OFF,'), (992, '\t\t\t\t &facilities)) {')], 'deleted': [(981, '\tint n, len;'), (990, '\tlen = (((skb->data[3] >> 4) & 0x0F) + 1) >> 1;'), (991, '\tlen += (((skb->data[3] >> 0) & 0x0F) + 1) >> 1;'), (992, '\tif (!rose_parse_facilities(skb->data + len + 4, &facilities)) {')]}
4
4
1,227
7,640
https://github.com/torvalds/linux
CVE-2011-4914
['CWE-20']
lookup.c
label
/* lookup.c - implementation of IDNA2008 lookup functions Copyright (C) 2011-2017 Simon Josefsson Libidn2 is free software: you can redistribute it and/or modify it under the terms of either: * the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. or * the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. or both in parallel, as here. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received copies of the GNU General Public License and the GNU Lesser General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <config.h> #include "idn2.h" #include <errno.h> /* errno */ #include <stdlib.h> /* malloc, free */ #include "punycode.h" #include <unitypes.h> #include <uniconv.h> /* u8_strconv_from_locale */ #include <uninorm.h> /* u32_normalize */ #include <unistr.h> /* u8_to_u32 */ #include "idna.h" /* _idn2_label_test */ #include "tr46map.h" /* definition for tr46map.c */ static int set_default_flags(int *flags) { if (((*flags) & IDN2_TRANSITIONAL) && ((*flags) & IDN2_NONTRANSITIONAL)) return IDN2_INVALID_FLAGS; if (((*flags) & (IDN2_TRANSITIONAL|IDN2_NONTRANSITIONAL)) && ((*flags) & IDN2_NO_TR46)) return IDN2_INVALID_FLAGS; if (!((*flags) & (IDN2_NO_TR46|IDN2_TRANSITIONAL))) *flags |= IDN2_NONTRANSITIONAL; return IDN2_OK; } static int label (const uint8_t * src, size_t srclen, uint8_t * dst, size_t * dstlen, int flags) { size_t plen; uint32_t *p; int rc; size_t tmpl; if (_idn2_ascii_p (src, srclen)) { if (flags & IDN2_ALABEL_ROUNDTRIP) /* FIXME implement this MAY: If the input to this procedure appears to be an A-label (i.e., it starts in "xn--", interpreted case-insensitively), the lookup application MAY attempt to convert it to a U-label, first ensuring that the A-label is entirely in lowercase (converting it to lowercase if necessary), and apply the tests of Section 5.4 and the conversion of Section 5.5 to that form. */ return IDN2_INVALID_FLAGS; if (srclen > IDN2_LABEL_MAX_LENGTH) return IDN2_TOO_BIG_LABEL; if (srclen > *dstlen) return IDN2_TOO_BIG_DOMAIN; memcpy (dst, src, srclen); *dstlen = srclen; return IDN2_OK; } rc = _idn2_u8_to_u32_nfc (src, srclen, &p, &plen, flags & IDN2_NFC_INPUT); if (rc != IDN2_OK) return rc; if (!(flags & IDN2_TRANSITIONAL)) { rc = _idn2_label_test( TEST_NFC | TEST_2HYPHEN | TEST_LEADING_COMBINING | TEST_DISALLOWED | TEST_CONTEXTJ_RULE | TEST_CONTEXTO_WITH_RULE | TEST_UNASSIGNED | TEST_BIDI | ((flags & IDN2_NONTRANSITIONAL) ? TEST_NONTRANSITIONAL : 0) | ((flags & IDN2_USE_STD3_ASCII_RULES) ? 0 : TEST_ALLOW_STD3_DISALLOWED), p, plen); if (rc != IDN2_OK) { free(p); return rc; } } dst[0] = 'x'; dst[1] = 'n'; dst[2] = '-'; dst[3] = '-'; tmpl = *dstlen - 4; rc = _idn2_punycode_encode (plen, p, &tmpl, (char *) dst + 4); free (p); if (rc != IDN2_OK) return rc; *dstlen = 4 + tmpl; return IDN2_OK; } #define TR46_TRANSITIONAL_CHECK \ (TEST_NFC | TEST_2HYPHEN | TEST_HYPHEN_STARTEND | TEST_LEADING_COMBINING | TEST_TRANSITIONAL) #define TR46_NONTRANSITIONAL_CHECK \ (TEST_NFC | TEST_2HYPHEN | TEST_HYPHEN_STARTEND | TEST_LEADING_COMBINING | TEST_NONTRANSITIONAL) static int _tr46 (const uint8_t * domain_u8, uint8_t ** out, int flags) { size_t len, it; uint32_t *domain_u32; int err = IDN2_OK, rc; int transitional = 0; int test_flags; if (flags & IDN2_TRANSITIONAL) transitional = 1; /* convert UTF-8 to UTF-32 */ if (!(domain_u32 = u8_to_u32 (domain_u8, u8_strlen (domain_u8) + 1, NULL, &len))) { if (errno == ENOMEM) return IDN2_MALLOC; return IDN2_ENCODING_ERROR; } size_t len2 = 0; for (it = 0; it < len - 1; it++) { IDNAMap map; get_idna_map (domain_u32[it], &map); if (map_is (&map, TR46_FLG_DISALLOWED)) { if (domain_u32[it]) { free (domain_u32); return IDN2_DISALLOWED; } len2++; } else if (map_is (&map, TR46_FLG_MAPPED)) { len2 += map.nmappings; } else if (map_is (&map, TR46_FLG_VALID)) { len2++; } else if (map_is (&map, TR46_FLG_IGNORED)) { continue; } else if (map_is (&map, TR46_FLG_DEVIATION)) { if (transitional) { len2 += map.nmappings; } else len2++; } else if (!(flags & IDN2_USE_STD3_ASCII_RULES)) { if (map_is (&map, TR46_FLG_DISALLOWED_STD3_VALID)) { /* valid because UseSTD3ASCIIRules=false, see #TR46 5 */ len2++; } else if (map_is (&map, TR46_FLG_DISALLOWED_STD3_MAPPED)) { /* mapped because UseSTD3ASCIIRules=false, see #TR46 5 */ len2 += map.nmappings; } } } /* Exit early if result is too long. * This avoids excessive CPU usage in punycode encoding, which is O(N^2). */ if (len2 >= IDN2_DOMAIN_MAX_LENGTH) { free (domain_u32); return IDN2_TOO_BIG_DOMAIN; } uint32_t *tmp = (uint32_t *) malloc ((len2 + 1) * sizeof (uint32_t)); if (!tmp) { free (domain_u32); return IDN2_MALLOC; } len2 = 0; for (it = 0; it < len - 1; it++) { uint32_t c = domain_u32[it]; IDNAMap map; get_idna_map (c, &map); if (map_is (&map, TR46_FLG_DISALLOWED)) { tmp[len2++] = c; } else if (map_is (&map, TR46_FLG_MAPPED)) { len2 += get_map_data (tmp + len2, &map); } else if (map_is (&map, TR46_FLG_VALID)) { tmp[len2++] = c; } else if (map_is (&map, TR46_FLG_IGNORED)) { continue; } else if (map_is (&map, TR46_FLG_DEVIATION)) { if (transitional) { len2 += get_map_data (tmp + len2, &map); } else tmp[len2++] = c; } else if (!(flags & IDN2_USE_STD3_ASCII_RULES)) { if (map_is (&map, TR46_FLG_DISALLOWED_STD3_VALID)) { tmp[len2++] = c; } else if (map_is (&map, TR46_FLG_DISALLOWED_STD3_MAPPED)) { len2 += get_map_data (tmp + len2, &map); } } } free (domain_u32); /* Normalize to NFC */ tmp[len2] = 0; domain_u32 = u32_normalize (UNINORM_NFC, tmp, len2 + 1, NULL, &len); free (tmp); tmp = NULL; if (!domain_u32) { if (errno == ENOMEM) return IDN2_MALLOC; return IDN2_ENCODING_ERROR; } /* split into labels and check */ uint32_t *e, *s; for (e = s = domain_u32; *e; s = e) { while (*e && *e != '.') e++; if (e - s >= 4 && s[0] == 'x' && s[1] == 'n' && s[2] == '-' && s[3] == '-') { /* decode punycode and check result non-transitional */ size_t ace_len; uint32_t name_u32[IDN2_LABEL_MAX_LENGTH]; size_t name_len = IDN2_LABEL_MAX_LENGTH; uint8_t *ace; ace = u32_to_u8 (s + 4, e - s - 4, NULL, &ace_len); if (!ace) { free (domain_u32); if (errno == ENOMEM) return IDN2_MALLOC; return IDN2_ENCODING_ERROR; } rc = _idn2_punycode_decode (ace_len, (char *) ace, &name_len, name_u32); free (ace); if (rc) { free (domain_u32); return rc; } test_flags = TR46_NONTRANSITIONAL_CHECK; if (!(flags & IDN2_USE_STD3_ASCII_RULES)) test_flags |= TEST_ALLOW_STD3_DISALLOWED; if ((rc = _idn2_label_test (test_flags, name_u32, name_len))) err = rc; } else { test_flags = transitional ? TR46_TRANSITIONAL_CHECK : TR46_NONTRANSITIONAL_CHECK; if (!(flags & IDN2_USE_STD3_ASCII_RULES)) test_flags |= TEST_ALLOW_STD3_DISALLOWED; if ((rc = _idn2_label_test (test_flags, s, e - s))) err = rc; } if (*e) e++; } if (err == IDN2_OK && out) { uint8_t *_out = u32_to_u8 (domain_u32, len, NULL, &len); free (domain_u32); if (!_out) { if (errno == ENOMEM) return IDN2_MALLOC; return IDN2_ENCODING_ERROR; } *out = _out; } else free (domain_u32); return err; } /** * idn2_lookup_u8: * @src: input zero-terminated UTF-8 string in Unicode NFC normalized form. * @lookupname: newly allocated output variable with name to lookup in DNS. * @flags: optional #idn2_flags to modify behaviour. * * Perform IDNA2008 lookup string conversion on domain name @src, as * described in section 5 of RFC 5891. Note that the input string * must be encoded in UTF-8 and be in Unicode NFC form. * * Pass %IDN2_NFC_INPUT in @flags to convert input to NFC form before * further processing. %IDN2_TRANSITIONAL and %IDN2_NONTRANSITIONAL * do already imply %IDN2_NFC_INPUT. * Pass %IDN2_ALABEL_ROUNDTRIP in @flags to * convert any input A-labels to U-labels and perform additional * testing (not implemented yet). * Pass %IDN2_TRANSITIONAL to enable Unicode TR46 * transitional processing, and %IDN2_NONTRANSITIONAL to enable * Unicode TR46 non-transitional processing. Multiple flags may be * specified by binary or:ing them together. * * After version 2.0.3: %IDN2_USE_STD3_ASCII_RULES disabled by default. * Previously we were eliminating non-STD3 characters from domain strings * such as _443._tcp.example.com, or IPs 1.2.3.4/24 provided to libidn2 * functions. That was an unexpected regression for applications switching * from libidn and thus it is no longer applied by default. * Use %IDN2_USE_STD3_ASCII_RULES to enable that behavior again. * * After version 0.11: @lookupname may be NULL to test lookup of @src * without allocating memory. * * Returns: On successful conversion %IDN2_OK is returned, if the * output domain or any label would have been too long * %IDN2_TOO_BIG_DOMAIN or %IDN2_TOO_BIG_LABEL is returned, or * another error code is returned. * * Since: 0.1 **/ int idn2_lookup_u8 (const uint8_t * src, uint8_t ** lookupname, int flags) { size_t lookupnamelen = 0; uint8_t _lookupname[IDN2_DOMAIN_MAX_LENGTH + 1]; uint8_t _mapped[IDN2_DOMAIN_MAX_LENGTH + 1]; int rc; if (src == NULL) { if (lookupname) *lookupname = NULL; return IDN2_OK; } rc = set_default_flags(&flags); if (rc != IDN2_OK) return rc; if (!(flags & IDN2_NO_TR46)) { uint8_t *out; size_t outlen; rc = _tr46 (src, &out, flags); if (rc != IDN2_OK) return rc; outlen = u8_strlen (out); if (outlen >= sizeof (_mapped)) { free (out); return IDN2_TOO_BIG_DOMAIN; } memcpy (_mapped, out, outlen + 1); src = _mapped; free (out); } do { const uint8_t *end = (uint8_t *) strchrnul ((const char *) src, '.'); /* XXX Do we care about non-U+002E dots such as U+3002, U+FF0E and U+FF61 here? Perhaps when IDN2_NFC_INPUT? */ size_t labellen = end - src; uint8_t tmp[IDN2_LABEL_MAX_LENGTH]; size_t tmplen = IDN2_LABEL_MAX_LENGTH; rc = label (src, labellen, tmp, &tmplen, flags); if (rc != IDN2_OK) return rc; if (lookupnamelen + tmplen > IDN2_DOMAIN_MAX_LENGTH - (tmplen == 0 && *end == '\0' ? 1 : 2)) return IDN2_TOO_BIG_DOMAIN; memcpy (_lookupname + lookupnamelen, tmp, tmplen); lookupnamelen += tmplen; if (*end == '.') { if (lookupnamelen + 1 > IDN2_DOMAIN_MAX_LENGTH) return IDN2_TOO_BIG_DOMAIN; _lookupname[lookupnamelen] = '.'; lookupnamelen++; } _lookupname[lookupnamelen] = '\0'; src = end; } while (*src++); if (lookupname) { uint8_t *tmp = (uint8_t *) malloc (lookupnamelen + 1); if (tmp == NULL) return IDN2_MALLOC; memcpy (tmp, _lookupname, lookupnamelen + 1); *lookupname = tmp; } return IDN2_OK; } /** * idn2_lookup_ul: * @src: input zero-terminated locale encoded string. * @lookupname: newly allocated output variable with name to lookup in DNS. * @flags: optional #idn2_flags to modify behaviour. * * Perform IDNA2008 lookup string conversion on domain name @src, as * described in section 5 of RFC 5891. Note that the input is assumed * to be encoded in the locale's default coding system, and will be * transcoded to UTF-8 and NFC normalized by this function. * * Pass %IDN2_ALABEL_ROUNDTRIP in @flags to convert any input A-labels * to U-labels and perform additional testing. Pass * %IDN2_TRANSITIONAL to enable Unicode TR46 transitional processing, * and %IDN2_NONTRANSITIONAL to enable Unicode TR46 non-transitional * processing. Multiple flags may be specified by binary or:ing them * together, for example %IDN2_ALABEL_ROUNDTRIP | * %IDN2_NONTRANSITIONAL. The %IDN2_NFC_INPUT in @flags is always * enabled in this function. * * After version 0.11: @lookupname may be NULL to test lookup of @src * without allocating memory. * * Returns: On successful conversion %IDN2_OK is returned, if * conversion from locale to UTF-8 fails then %IDN2_ICONV_FAIL is * returned, if the output domain or any label would have been too * long %IDN2_TOO_BIG_DOMAIN or %IDN2_TOO_BIG_LABEL is returned, or * another error code is returned. * * Since: 0.1 **/ int idn2_lookup_ul (const char * src, char ** lookupname, int flags) { uint8_t *utf8src = NULL; int rc; if (src) { const char *encoding = locale_charset (); utf8src = u8_strconv_from_encoding (src, encoding, iconveh_error); if (!utf8src) { if (errno == ENOMEM) return IDN2_MALLOC; return IDN2_ICONV_FAIL; } } rc = idn2_lookup_u8 (utf8src, (uint8_t **) lookupname, flags | IDN2_NFC_INPUT); free (utf8src); return rc; } /** * idn2_to_ascii_4i: * @input: zero terminated input Unicode (UCS-4) string. * @inlen: number of elements in @input. * @output: output zero terminated string that must have room for at least 63 characters plus the terminating zero. * @flags: optional #idn2_flags to modify behaviour. * * THIS FUNCTION HAS BEEN DEPRECATED DUE TO A DESIGN FLAW. USE idn2_to_ascii_4i2() INSTEAD ! * * The ToASCII operation takes a sequence of Unicode code points that make * up one domain label and transforms it into a sequence of code points in * the ASCII range (0..7F). If ToASCII succeeds, the original sequence and * the resulting sequence are equivalent labels. * * It is important to note that the ToASCII operation can fail. * ToASCII fails if any step of it fails. If any step of the * ToASCII operation fails on any label in a domain name, that domain * name MUST NOT be used as an internationalized domain name. * The method for dealing with this failure is application-specific. * * The inputs to ToASCII are a sequence of code points. * * ToASCII never alters a sequence of code points that are all in the ASCII * range to begin with (although it could fail). Applying the ToASCII operation multiple * effect as applying it just once. * * The default behavior of this function (when flags are zero) is to apply * the IDNA2008 rules without the TR46 amendments. As the TR46 * non-transitional processing is nowadays ubiquitous, when unsure, it is * recommended to call this function with the %IDN2_NONTRANSITIONAL * and the %IDN2_NFC_INPUT flags for compatibility with other software. * * Return value: Returns %IDN2_OK on success, or error code. * * Since: 2.0.0 **/ int idn2_to_ascii_4i (const uint32_t * input, size_t inlen, char * output, int flags) { char *out; int rc; if (!input) { if (output) *output = 0; return IDN2_OK; } rc = idn2_to_ascii_4i2 (input, inlen, &out, flags); if (rc == IDN2_OK) { size_t len = strlen(out); if (len > 63) rc = IDN2_TOO_BIG_DOMAIN; else if (output) memcpy (output, out, len); free (out); } return rc; } /** * idn2_to_ascii_4i: * @input: zero terminated input Unicode (UCS-4) string. * @inlen: number of elements in @input. * @output: pointer to newly allocated zero-terminated output string. * @flags: optional #idn2_flags to modify behaviour. * * The ToASCII operation takes a sequence of Unicode code points that make * up one domain label and transforms it into a sequence of code points in * the ASCII range (0..7F). If ToASCII succeeds, the original sequence and * the resulting sequence are equivalent labels. * * It is important to note that the ToASCII operation can fail. * ToASCII fails if any step of it fails. If any step of the * ToASCII operation fails on any label in a domain name, that domain * name MUST NOT be used as an internationalized domain name. * The method for dealing with this failure is application-specific. * * The inputs to ToASCII are a sequence of code points. * * ToASCII never alters a sequence of code points that are all in the ASCII * range to begin with (although it could fail). Applying the ToASCII operation multiple * effect as applying it just once. * * The default behavior of this function (when flags are zero) is to apply * the IDNA2008 rules without the TR46 amendments. As the TR46 * non-transitional processing is nowadays ubiquitous, when unsure, it is * recommended to call this function with the %IDN2_NONTRANSITIONAL * and the %IDN2_NFC_INPUT flags for compatibility with other software. * * Return value: Returns %IDN2_OK on success, or error code. * * Since: 2.1.1 **/ int idn2_to_ascii_4i2 (const uint32_t * input, size_t inlen, char ** output, int flags) { uint32_t *input_u32; uint8_t *input_u8, *output_u8; size_t length; int rc; if (!input) { if (output) *output = NULL; return IDN2_OK; } input_u32 = (uint32_t *) malloc ((inlen + 1) * sizeof(uint32_t)); if (!input_u32) return IDN2_MALLOC; u32_cpy (input_u32, input, inlen); input_u32[inlen] = 0; input_u8 = u32_to_u8 (input_u32, inlen + 1, NULL, &length); free (input_u32); if (!input_u8) { if (errno == ENOMEM) return IDN2_MALLOC; return IDN2_ENCODING_ERROR; } rc = idn2_lookup_u8 (input_u8, &output_u8, flags); free (input_u8); if (rc == IDN2_OK) { if (output) *output = (char *) output_u8; else free (output_u8); } return rc; } /** * idn2_to_ascii_4z: * @input: zero terminated input Unicode (UCS-4) string. * @output: pointer to newly allocated zero-terminated output string. * @flags: optional #idn2_flags to modify behaviour. * * Convert UCS-4 domain name to ASCII string using the IDNA2008 * rules. The domain name may contain several labels, separated by dots. * The output buffer must be deallocated by the caller. * * The default behavior of this function (when flags are zero) is to apply * the IDNA2008 rules without the TR46 amendments. As the TR46 * non-transitional processing is nowadays ubiquitous, when unsure, it is * recommended to call this function with the %IDN2_NONTRANSITIONAL * and the %IDN2_NFC_INPUT flags for compatibility with other software. * * Return value: Returns %IDN2_OK on success, or error code. * * Since: 2.0.0 **/ int idn2_to_ascii_4z (const uint32_t * input, char ** output, int flags) { uint8_t *input_u8; size_t length; int rc; if (!input) { if (output) *output = NULL; return IDN2_OK; } input_u8 = u32_to_u8 (input, u32_strlen(input) + 1, NULL, &length); if (!input_u8) { if (errno == ENOMEM) return IDN2_MALLOC; return IDN2_ENCODING_ERROR; } rc = idn2_lookup_u8 (input_u8, (uint8_t **) output, flags); free (input_u8); return rc; } /** * idn2_to_ascii_8z: * @input: zero terminated input UTF-8 string. * @output: pointer to newly allocated output string. * @flags: optional #idn2_flags to modify behaviour. * * Convert UTF-8 domain name to ASCII string using the IDNA2008 * rules. The domain name may contain several labels, separated by dots. * The output buffer must be deallocated by the caller. * * The default behavior of this function (when flags are zero) is to apply * the IDNA2008 rules without the TR46 amendments. As the TR46 * non-transitional processing is nowadays ubiquitous, when unsure, it is * recommended to call this function with the %IDN2_NONTRANSITIONAL * and the %IDN2_NFC_INPUT flags for compatibility with other software. * * Return value: Returns %IDN2_OK on success, or error code. * * Since: 2.0.0 **/ int idn2_to_ascii_8z (const char * input, char ** output, int flags) { return idn2_lookup_u8 ((const uint8_t *) input, (uint8_t **) output, flags); } /** * idn2_to_ascii_lz: * @input: zero terminated input UTF-8 string. * @output: pointer to newly allocated output string. * @flags: optional #idn2_flags to modify behaviour. * * Convert a domain name in locale's encoding to ASCII string using the IDNA2008 * rules. The domain name may contain several labels, separated by dots. * The output buffer must be deallocated by the caller. * * The default behavior of this function (when flags are zero) is to apply * the IDNA2008 rules without the TR46 amendments. As the TR46 * non-transitional processing is nowadays ubiquitous, when unsure, it is * recommended to call this function with the %IDN2_NONTRANSITIONAL * and the %IDN2_NFC_INPUT flags for compatibility with other software. * * Returns: %IDN2_OK on success, or error code. * Same as described in idn2_lookup_ul() documentation. * * Since: 2.0.0 **/ int idn2_to_ascii_lz (const char * input, char ** output, int flags) { return idn2_lookup_ul (input, output, flags); }
/* lookup.c - implementation of IDNA2008 lookup functions Copyright (C) 2011-2017 Simon Josefsson Libidn2 is free software: you can redistribute it and/or modify it under the terms of either: * the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. or * the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. or both in parallel, as here. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received copies of the GNU General Public License and the GNU Lesser General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <config.h> #include "idn2.h" #include <errno.h> /* errno */ #include <stdlib.h> /* malloc, free */ #include "punycode.h" #include <unitypes.h> #include <uniconv.h> /* u8_strconv_from_locale */ #include <uninorm.h> /* u32_normalize */ #include <unistr.h> /* u8_to_u32 */ #include "idna.h" /* _idn2_label_test */ #include "tr46map.h" /* definition for tr46map.c */ static int set_default_flags(int *flags) { if (((*flags) & IDN2_TRANSITIONAL) && ((*flags) & IDN2_NONTRANSITIONAL)) return IDN2_INVALID_FLAGS; if (((*flags) & (IDN2_TRANSITIONAL|IDN2_NONTRANSITIONAL)) && ((*flags) & IDN2_NO_TR46)) return IDN2_INVALID_FLAGS; if (((*flags) & IDN2_ALABEL_ROUNDTRIP) && ((*flags) & IDN2_NO_ALABEL_ROUNDTRIP)) return IDN2_INVALID_FLAGS; if (!((*flags) & (IDN2_NO_TR46|IDN2_TRANSITIONAL))) *flags |= IDN2_NONTRANSITIONAL; return IDN2_OK; } static int label (const uint8_t * src, size_t srclen, uint8_t * dst, size_t * dstlen, int flags) { size_t plen; uint32_t *p; const uint8_t *src_org = NULL; uint8_t *src_allocated = NULL; int rc, check_roundtrip = 0; size_t tmpl, srclen_org = 0; uint32_t label_u32[IDN2_LABEL_MAX_LENGTH]; size_t label32_len = IDN2_LABEL_MAX_LENGTH; if (_idn2_ascii_p (src, srclen)) { if (!(flags & IDN2_NO_ALABEL_ROUNDTRIP) && srclen >= 4 && memcmp (src, "xn--", 4) == 0) { /* If the input to this procedure appears to be an A-label (i.e., it starts in "xn--", interpreted case-insensitively), the lookup application MAY attempt to convert it to a U-label, first ensuring that the A-label is entirely in lowercase (converting it to lowercase if necessary), and apply the tests of Section 5.4 and the conversion of Section 5.5 to that form. */ rc = _idn2_punycode_decode (srclen - 4, (char *) src + 4, &label32_len, label_u32); if (rc) return rc; check_roundtrip = 1; src_org = src; srclen_org = srclen; srclen = IDN2_LABEL_MAX_LENGTH; src = src_allocated = u32_to_u8 (label_u32, label32_len, NULL, &srclen); if (!src) { if (errno == ENOMEM) return IDN2_MALLOC; return IDN2_ENCODING_ERROR; } } else { if (srclen > IDN2_LABEL_MAX_LENGTH) return IDN2_TOO_BIG_LABEL; if (srclen > *dstlen) return IDN2_TOO_BIG_DOMAIN; memcpy (dst, src, srclen); *dstlen = srclen; return IDN2_OK; } } rc = _idn2_u8_to_u32_nfc (src, srclen, &p, &plen, flags & IDN2_NFC_INPUT); if (rc != IDN2_OK) goto out; if (!(flags & IDN2_TRANSITIONAL)) { rc = _idn2_label_test( TEST_NFC | TEST_2HYPHEN | TEST_LEADING_COMBINING | TEST_DISALLOWED | TEST_CONTEXTJ_RULE | TEST_CONTEXTO_WITH_RULE | TEST_UNASSIGNED | TEST_BIDI | ((flags & IDN2_NONTRANSITIONAL) ? TEST_NONTRANSITIONAL : 0) | ((flags & IDN2_USE_STD3_ASCII_RULES) ? 0 : TEST_ALLOW_STD3_DISALLOWED), p, plen); if (rc != IDN2_OK) { free (p); goto out; } } dst[0] = 'x'; dst[1] = 'n'; dst[2] = '-'; dst[3] = '-'; tmpl = *dstlen - 4; rc = _idn2_punycode_encode (plen, p, &tmpl, (char *) dst + 4); free (p); if (rc != IDN2_OK) goto out; *dstlen = 4 + tmpl; if (check_roundtrip) { if (srclen_org != *dstlen || memcmp (src_org, dst, srclen_org)) { rc = IDN2_ALABEL_ROUNDTRIP_FAILED; goto out; } } rc = IDN2_OK; out: free (src_allocated); return rc; } #define TR46_TRANSITIONAL_CHECK \ (TEST_NFC | TEST_2HYPHEN | TEST_HYPHEN_STARTEND | TEST_LEADING_COMBINING | TEST_TRANSITIONAL) #define TR46_NONTRANSITIONAL_CHECK \ (TEST_NFC | TEST_2HYPHEN | TEST_HYPHEN_STARTEND | TEST_LEADING_COMBINING | TEST_NONTRANSITIONAL) static int _tr46 (const uint8_t * domain_u8, uint8_t ** out, int flags) { size_t len, it; uint32_t *domain_u32; int err = IDN2_OK, rc; int transitional = 0; int test_flags; if (flags & IDN2_TRANSITIONAL) transitional = 1; /* convert UTF-8 to UTF-32 */ if (!(domain_u32 = u8_to_u32 (domain_u8, u8_strlen (domain_u8) + 1, NULL, &len))) { if (errno == ENOMEM) return IDN2_MALLOC; return IDN2_ENCODING_ERROR; } size_t len2 = 0; for (it = 0; it < len - 1; it++) { IDNAMap map; get_idna_map (domain_u32[it], &map); if (map_is (&map, TR46_FLG_DISALLOWED)) { if (domain_u32[it]) { free (domain_u32); return IDN2_DISALLOWED; } len2++; } else if (map_is (&map, TR46_FLG_MAPPED)) { len2 += map.nmappings; } else if (map_is (&map, TR46_FLG_VALID)) { len2++; } else if (map_is (&map, TR46_FLG_IGNORED)) { continue; } else if (map_is (&map, TR46_FLG_DEVIATION)) { if (transitional) { len2 += map.nmappings; } else len2++; } else if (!(flags & IDN2_USE_STD3_ASCII_RULES)) { if (map_is (&map, TR46_FLG_DISALLOWED_STD3_VALID)) { /* valid because UseSTD3ASCIIRules=false, see #TR46 5 */ len2++; } else if (map_is (&map, TR46_FLG_DISALLOWED_STD3_MAPPED)) { /* mapped because UseSTD3ASCIIRules=false, see #TR46 5 */ len2 += map.nmappings; } } } /* Exit early if result is too long. * This avoids excessive CPU usage in punycode encoding, which is O(N^2). */ if (len2 >= IDN2_DOMAIN_MAX_LENGTH) { free (domain_u32); return IDN2_TOO_BIG_DOMAIN; } uint32_t *tmp = (uint32_t *) malloc ((len2 + 1) * sizeof (uint32_t)); if (!tmp) { free (domain_u32); return IDN2_MALLOC; } len2 = 0; for (it = 0; it < len - 1; it++) { uint32_t c = domain_u32[it]; IDNAMap map; get_idna_map (c, &map); if (map_is (&map, TR46_FLG_DISALLOWED)) { tmp[len2++] = c; } else if (map_is (&map, TR46_FLG_MAPPED)) { len2 += get_map_data (tmp + len2, &map); } else if (map_is (&map, TR46_FLG_VALID)) { tmp[len2++] = c; } else if (map_is (&map, TR46_FLG_IGNORED)) { continue; } else if (map_is (&map, TR46_FLG_DEVIATION)) { if (transitional) { len2 += get_map_data (tmp + len2, &map); } else tmp[len2++] = c; } else if (!(flags & IDN2_USE_STD3_ASCII_RULES)) { if (map_is (&map, TR46_FLG_DISALLOWED_STD3_VALID)) { tmp[len2++] = c; } else if (map_is (&map, TR46_FLG_DISALLOWED_STD3_MAPPED)) { len2 += get_map_data (tmp + len2, &map); } } } free (domain_u32); /* Normalize to NFC */ tmp[len2] = 0; domain_u32 = u32_normalize (UNINORM_NFC, tmp, len2 + 1, NULL, &len); free (tmp); tmp = NULL; if (!domain_u32) { if (errno == ENOMEM) return IDN2_MALLOC; return IDN2_ENCODING_ERROR; } /* split into labels and check */ uint32_t *e, *s; for (e = s = domain_u32; *e; s = e) { while (*e && *e != '.') e++; if (e - s >= 4 && s[0] == 'x' && s[1] == 'n' && s[2] == '-' && s[3] == '-') { /* decode punycode and check result non-transitional */ size_t ace_len; uint32_t name_u32[IDN2_LABEL_MAX_LENGTH]; size_t name_len = IDN2_LABEL_MAX_LENGTH; uint8_t *ace; ace = u32_to_u8 (s + 4, e - s - 4, NULL, &ace_len); if (!ace) { free (domain_u32); if (errno == ENOMEM) return IDN2_MALLOC; return IDN2_ENCODING_ERROR; } rc = _idn2_punycode_decode (ace_len, (char *) ace, &name_len, name_u32); free (ace); if (rc) { free (domain_u32); return rc; } test_flags = TR46_NONTRANSITIONAL_CHECK; if (!(flags & IDN2_USE_STD3_ASCII_RULES)) test_flags |= TEST_ALLOW_STD3_DISALLOWED; if ((rc = _idn2_label_test (test_flags, name_u32, name_len))) err = rc; } else { test_flags = transitional ? TR46_TRANSITIONAL_CHECK : TR46_NONTRANSITIONAL_CHECK; if (!(flags & IDN2_USE_STD3_ASCII_RULES)) test_flags |= TEST_ALLOW_STD3_DISALLOWED; if ((rc = _idn2_label_test (test_flags, s, e - s))) err = rc; } if (*e) e++; } if (err == IDN2_OK && out) { uint8_t *_out = u32_to_u8 (domain_u32, len, NULL, &len); free (domain_u32); if (!_out) { if (errno == ENOMEM) return IDN2_MALLOC; return IDN2_ENCODING_ERROR; } *out = _out; } else free (domain_u32); return err; } /** * idn2_lookup_u8: * @src: input zero-terminated UTF-8 string in Unicode NFC normalized form. * @lookupname: newly allocated output variable with name to lookup in DNS. * @flags: optional #idn2_flags to modify behaviour. * * Perform IDNA2008 lookup string conversion on domain name @src, as * described in section 5 of RFC 5891. Note that the input string * must be encoded in UTF-8 and be in Unicode NFC form. * * Pass %IDN2_NFC_INPUT in @flags to convert input to NFC form before * further processing. %IDN2_TRANSITIONAL and %IDN2_NONTRANSITIONAL * do already imply %IDN2_NFC_INPUT. * * Pass %IDN2_ALABEL_ROUNDTRIP in @flags to * convert any input A-labels to U-labels and perform additional * testing. This is default since version 2.2. * To switch this behavior off, pass IDN2_NO_ALABEL_ROUNDTRIP * * Pass %IDN2_TRANSITIONAL to enable Unicode TR46 * transitional processing, and %IDN2_NONTRANSITIONAL to enable * Unicode TR46 non-transitional processing. * * Multiple flags may be specified by binary or:ing them together. * * After version 2.0.3: %IDN2_USE_STD3_ASCII_RULES disabled by default. * Previously we were eliminating non-STD3 characters from domain strings * such as _443._tcp.example.com, or IPs 1.2.3.4/24 provided to libidn2 * functions. That was an unexpected regression for applications switching * from libidn and thus it is no longer applied by default. * Use %IDN2_USE_STD3_ASCII_RULES to enable that behavior again. * * After version 0.11: @lookupname may be NULL to test lookup of @src * without allocating memory. * * Returns: On successful conversion %IDN2_OK is returned, if the * output domain or any label would have been too long * %IDN2_TOO_BIG_DOMAIN or %IDN2_TOO_BIG_LABEL is returned, or * another error code is returned. * * Since: 0.1 **/ int idn2_lookup_u8 (const uint8_t * src, uint8_t ** lookupname, int flags) { size_t lookupnamelen = 0; uint8_t _lookupname[IDN2_DOMAIN_MAX_LENGTH + 1]; uint8_t _mapped[IDN2_DOMAIN_MAX_LENGTH + 1]; int rc; if (src == NULL) { if (lookupname) *lookupname = NULL; return IDN2_OK; } rc = set_default_flags(&flags); if (rc != IDN2_OK) return rc; if (!(flags & IDN2_NO_TR46)) { uint8_t *out; size_t outlen; rc = _tr46 (src, &out, flags); if (rc != IDN2_OK) return rc; outlen = u8_strlen (out); if (outlen >= sizeof (_mapped)) { free (out); return IDN2_TOO_BIG_DOMAIN; } memcpy (_mapped, out, outlen + 1); src = _mapped; free (out); } do { const uint8_t *end = (uint8_t *) strchrnul ((const char *) src, '.'); /* XXX Do we care about non-U+002E dots such as U+3002, U+FF0E and U+FF61 here? Perhaps when IDN2_NFC_INPUT? */ size_t labellen = end - src; uint8_t tmp[IDN2_LABEL_MAX_LENGTH]; size_t tmplen = IDN2_LABEL_MAX_LENGTH; rc = label (src, labellen, tmp, &tmplen, flags); if (rc != IDN2_OK) return rc; if (lookupnamelen + tmplen > IDN2_DOMAIN_MAX_LENGTH - (tmplen == 0 && *end == '\0' ? 1 : 2)) return IDN2_TOO_BIG_DOMAIN; memcpy (_lookupname + lookupnamelen, tmp, tmplen); lookupnamelen += tmplen; if (*end == '.') { if (lookupnamelen + 1 > IDN2_DOMAIN_MAX_LENGTH) return IDN2_TOO_BIG_DOMAIN; _lookupname[lookupnamelen] = '.'; lookupnamelen++; } _lookupname[lookupnamelen] = '\0'; src = end; } while (*src++); if (lookupname) { uint8_t *tmp = (uint8_t *) malloc (lookupnamelen + 1); if (tmp == NULL) return IDN2_MALLOC; memcpy (tmp, _lookupname, lookupnamelen + 1); *lookupname = tmp; } return IDN2_OK; } /** * idn2_lookup_ul: * @src: input zero-terminated locale encoded string. * @lookupname: newly allocated output variable with name to lookup in DNS. * @flags: optional #idn2_flags to modify behaviour. * * Perform IDNA2008 lookup string conversion on domain name @src, as * described in section 5 of RFC 5891. Note that the input is assumed * to be encoded in the locale's default coding system, and will be * transcoded to UTF-8 and NFC normalized by this function. * * Pass %IDN2_ALABEL_ROUNDTRIP in @flags to * convert any input A-labels to U-labels and perform additional * testing. This is default since version 2.2. * To switch this behavior off, pass IDN2_NO_ALABEL_ROUNDTRIP * * Pass %IDN2_TRANSITIONAL to enable Unicode TR46 transitional processing, * and %IDN2_NONTRANSITIONAL to enable Unicode TR46 non-transitional * processing. * * Multiple flags may be specified by binary or:ing them together, for * example %IDN2_ALABEL_ROUNDTRIP | %IDN2_NONTRANSITIONAL. * * The %IDN2_NFC_INPUT in @flags is always enabled in this function. * * After version 0.11: @lookupname may be NULL to test lookup of @src * without allocating memory. * * Returns: On successful conversion %IDN2_OK is returned, if * conversion from locale to UTF-8 fails then %IDN2_ICONV_FAIL is * returned, if the output domain or any label would have been too * long %IDN2_TOO_BIG_DOMAIN or %IDN2_TOO_BIG_LABEL is returned, or * another error code is returned. * * Since: 0.1 **/ int idn2_lookup_ul (const char * src, char ** lookupname, int flags) { uint8_t *utf8src = NULL; int rc; if (src) { const char *encoding = locale_charset (); utf8src = u8_strconv_from_encoding (src, encoding, iconveh_error); if (!utf8src) { if (errno == ENOMEM) return IDN2_MALLOC; return IDN2_ICONV_FAIL; } } rc = idn2_lookup_u8 (utf8src, (uint8_t **) lookupname, flags | IDN2_NFC_INPUT); free (utf8src); return rc; } /** * idn2_to_ascii_4i: * @input: zero terminated input Unicode (UCS-4) string. * @inlen: number of elements in @input. * @output: output zero terminated string that must have room for at least 63 characters plus the terminating zero. * @flags: optional #idn2_flags to modify behaviour. * * THIS FUNCTION HAS BEEN DEPRECATED DUE TO A DESIGN FLAW. USE idn2_to_ascii_4i2() INSTEAD ! * * The ToASCII operation takes a sequence of Unicode code points that make * up one domain label and transforms it into a sequence of code points in * the ASCII range (0..7F). If ToASCII succeeds, the original sequence and * the resulting sequence are equivalent labels. * * It is important to note that the ToASCII operation can fail. * ToASCII fails if any step of it fails. If any step of the * ToASCII operation fails on any label in a domain name, that domain * name MUST NOT be used as an internationalized domain name. * The method for dealing with this failure is application-specific. * * The inputs to ToASCII are a sequence of code points. * * ToASCII never alters a sequence of code points that are all in the ASCII * range to begin with (although it could fail). Applying the ToASCII operation multiple * effect as applying it just once. * * The default behavior of this function (when flags are zero) is to apply * the IDNA2008 rules without the TR46 amendments. As the TR46 * non-transitional processing is nowadays ubiquitous, when unsure, it is * recommended to call this function with the %IDN2_NONTRANSITIONAL * and the %IDN2_NFC_INPUT flags for compatibility with other software. * * Return value: Returns %IDN2_OK on success, or error code. * * Since: 2.0.0 **/ int idn2_to_ascii_4i (const uint32_t * input, size_t inlen, char * output, int flags) { char *out; int rc; if (!input) { if (output) *output = 0; return IDN2_OK; } rc = idn2_to_ascii_4i2 (input, inlen, &out, flags); if (rc == IDN2_OK) { size_t len = strlen(out); if (len > 63) rc = IDN2_TOO_BIG_DOMAIN; else if (output) memcpy (output, out, len); free (out); } return rc; } /** * idn2_to_ascii_4i: * @input: zero terminated input Unicode (UCS-4) string. * @inlen: number of elements in @input. * @output: pointer to newly allocated zero-terminated output string. * @flags: optional #idn2_flags to modify behaviour. * * The ToASCII operation takes a sequence of Unicode code points that make * up one domain label and transforms it into a sequence of code points in * the ASCII range (0..7F). If ToASCII succeeds, the original sequence and * the resulting sequence are equivalent labels. * * It is important to note that the ToASCII operation can fail. * ToASCII fails if any step of it fails. If any step of the * ToASCII operation fails on any label in a domain name, that domain * name MUST NOT be used as an internationalized domain name. * The method for dealing with this failure is application-specific. * * The inputs to ToASCII are a sequence of code points. * * ToASCII never alters a sequence of code points that are all in the ASCII * range to begin with (although it could fail). Applying the ToASCII operation multiple * effect as applying it just once. * * The default behavior of this function (when flags are zero) is to apply * the IDNA2008 rules without the TR46 amendments. As the TR46 * non-transitional processing is nowadays ubiquitous, when unsure, it is * recommended to call this function with the %IDN2_NONTRANSITIONAL * and the %IDN2_NFC_INPUT flags for compatibility with other software. * * Return value: Returns %IDN2_OK on success, or error code. * * Since: 2.1.1 **/ int idn2_to_ascii_4i2 (const uint32_t * input, size_t inlen, char ** output, int flags) { uint32_t *input_u32; uint8_t *input_u8, *output_u8; size_t length; int rc; if (!input) { if (output) *output = NULL; return IDN2_OK; } input_u32 = (uint32_t *) malloc ((inlen + 1) * sizeof(uint32_t)); if (!input_u32) return IDN2_MALLOC; u32_cpy (input_u32, input, inlen); input_u32[inlen] = 0; input_u8 = u32_to_u8 (input_u32, inlen + 1, NULL, &length); free (input_u32); if (!input_u8) { if (errno == ENOMEM) return IDN2_MALLOC; return IDN2_ENCODING_ERROR; } rc = idn2_lookup_u8 (input_u8, &output_u8, flags); free (input_u8); if (rc == IDN2_OK) { if (output) *output = (char *) output_u8; else free (output_u8); } return rc; } /** * idn2_to_ascii_4z: * @input: zero terminated input Unicode (UCS-4) string. * @output: pointer to newly allocated zero-terminated output string. * @flags: optional #idn2_flags to modify behaviour. * * Convert UCS-4 domain name to ASCII string using the IDNA2008 * rules. The domain name may contain several labels, separated by dots. * The output buffer must be deallocated by the caller. * * The default behavior of this function (when flags are zero) is to apply * the IDNA2008 rules without the TR46 amendments. As the TR46 * non-transitional processing is nowadays ubiquitous, when unsure, it is * recommended to call this function with the %IDN2_NONTRANSITIONAL * and the %IDN2_NFC_INPUT flags for compatibility with other software. * * Return value: Returns %IDN2_OK on success, or error code. * * Since: 2.0.0 **/ int idn2_to_ascii_4z (const uint32_t * input, char ** output, int flags) { uint8_t *input_u8; size_t length; int rc; if (!input) { if (output) *output = NULL; return IDN2_OK; } input_u8 = u32_to_u8 (input, u32_strlen(input) + 1, NULL, &length); if (!input_u8) { if (errno == ENOMEM) return IDN2_MALLOC; return IDN2_ENCODING_ERROR; } rc = idn2_lookup_u8 (input_u8, (uint8_t **) output, flags); free (input_u8); return rc; } /** * idn2_to_ascii_8z: * @input: zero terminated input UTF-8 string. * @output: pointer to newly allocated output string. * @flags: optional #idn2_flags to modify behaviour. * * Convert UTF-8 domain name to ASCII string using the IDNA2008 * rules. The domain name may contain several labels, separated by dots. * The output buffer must be deallocated by the caller. * * The default behavior of this function (when flags are zero) is to apply * the IDNA2008 rules without the TR46 amendments. As the TR46 * non-transitional processing is nowadays ubiquitous, when unsure, it is * recommended to call this function with the %IDN2_NONTRANSITIONAL * and the %IDN2_NFC_INPUT flags for compatibility with other software. * * Return value: Returns %IDN2_OK on success, or error code. * * Since: 2.0.0 **/ int idn2_to_ascii_8z (const char * input, char ** output, int flags) { return idn2_lookup_u8 ((const uint8_t *) input, (uint8_t **) output, flags); } /** * idn2_to_ascii_lz: * @input: zero terminated input UTF-8 string. * @output: pointer to newly allocated output string. * @flags: optional #idn2_flags to modify behaviour. * * Convert a domain name in locale's encoding to ASCII string using the IDNA2008 * rules. The domain name may contain several labels, separated by dots. * The output buffer must be deallocated by the caller. * * The default behavior of this function (when flags are zero) is to apply * the IDNA2008 rules without the TR46 amendments. As the TR46 * non-transitional processing is nowadays ubiquitous, when unsure, it is * recommended to call this function with the %IDN2_NONTRANSITIONAL * and the %IDN2_NFC_INPUT flags for compatibility with other software. * * Returns: %IDN2_OK on success, or error code. * Same as described in idn2_lookup_ul() documentation. * * Since: 2.0.0 **/ int idn2_to_ascii_lz (const char * input, char ** output, int flags) { return idn2_lookup_ul (input, output, flags); }
label (const uint8_t * src, size_t srclen, uint8_t * dst, size_t * dstlen, int flags) { size_t plen; uint32_t *p; int rc; size_t tmpl; if (_idn2_ascii_p (src, srclen)) { if (flags & IDN2_ALABEL_ROUNDTRIP) /* FIXME implement this MAY: If the input to this procedure appears to be an A-label (i.e., it starts in "xn--", interpreted case-insensitively), the lookup application MAY attempt to convert it to a U-label, first ensuring that the A-label is entirely in lowercase (converting it to lowercase if necessary), and apply the tests of Section 5.4 and the conversion of Section 5.5 to that form. */ return IDN2_INVALID_FLAGS; if (srclen > IDN2_LABEL_MAX_LENGTH) return IDN2_TOO_BIG_LABEL; if (srclen > *dstlen) return IDN2_TOO_BIG_DOMAIN; memcpy (dst, src, srclen); *dstlen = srclen; return IDN2_OK; } rc = _idn2_u8_to_u32_nfc (src, srclen, &p, &plen, flags & IDN2_NFC_INPUT); if (rc != IDN2_OK) return rc; if (!(flags & IDN2_TRANSITIONAL)) { rc = _idn2_label_test( TEST_NFC | TEST_2HYPHEN | TEST_LEADING_COMBINING | TEST_DISALLOWED | TEST_CONTEXTJ_RULE | TEST_CONTEXTO_WITH_RULE | TEST_UNASSIGNED | TEST_BIDI | ((flags & IDN2_NONTRANSITIONAL) ? TEST_NONTRANSITIONAL : 0) | ((flags & IDN2_USE_STD3_ASCII_RULES) ? 0 : TEST_ALLOW_STD3_DISALLOWED), p, plen); if (rc != IDN2_OK) { free(p); return rc; } } dst[0] = 'x'; dst[1] = 'n'; dst[2] = '-'; dst[3] = '-'; tmpl = *dstlen - 4; rc = _idn2_punycode_encode (plen, p, &tmpl, (char *) dst + 4); free (p); if (rc != IDN2_OK) return rc; *dstlen = 4 + tmpl; return IDN2_OK; }
label (const uint8_t * src, size_t srclen, uint8_t * dst, size_t * dstlen, int flags) { size_t plen; uint32_t *p; const uint8_t *src_org = NULL; uint8_t *src_allocated = NULL; int rc, check_roundtrip = 0; size_t tmpl, srclen_org = 0; uint32_t label_u32[IDN2_LABEL_MAX_LENGTH]; size_t label32_len = IDN2_LABEL_MAX_LENGTH; if (_idn2_ascii_p (src, srclen)) { if (!(flags & IDN2_NO_ALABEL_ROUNDTRIP) && srclen >= 4 && memcmp (src, "xn--", 4) == 0) { /* If the input to this procedure appears to be an A-label (i.e., it starts in "xn--", interpreted case-insensitively), the lookup application MAY attempt to convert it to a U-label, first ensuring that the A-label is entirely in lowercase (converting it to lowercase if necessary), and apply the tests of Section 5.4 and the conversion of Section 5.5 to that form. */ rc = _idn2_punycode_decode (srclen - 4, (char *) src + 4, &label32_len, label_u32); if (rc) return rc; check_roundtrip = 1; src_org = src; srclen_org = srclen; srclen = IDN2_LABEL_MAX_LENGTH; src = src_allocated = u32_to_u8 (label_u32, label32_len, NULL, &srclen); if (!src) { if (errno == ENOMEM) return IDN2_MALLOC; return IDN2_ENCODING_ERROR; } } else { if (srclen > IDN2_LABEL_MAX_LENGTH) return IDN2_TOO_BIG_LABEL; if (srclen > *dstlen) return IDN2_TOO_BIG_DOMAIN; memcpy (dst, src, srclen); *dstlen = srclen; return IDN2_OK; } } rc = _idn2_u8_to_u32_nfc (src, srclen, &p, &plen, flags & IDN2_NFC_INPUT); if (rc != IDN2_OK) goto out; if (!(flags & IDN2_TRANSITIONAL)) { rc = _idn2_label_test( TEST_NFC | TEST_2HYPHEN | TEST_LEADING_COMBINING | TEST_DISALLOWED | TEST_CONTEXTJ_RULE | TEST_CONTEXTO_WITH_RULE | TEST_UNASSIGNED | TEST_BIDI | ((flags & IDN2_NONTRANSITIONAL) ? TEST_NONTRANSITIONAL : 0) | ((flags & IDN2_USE_STD3_ASCII_RULES) ? 0 : TEST_ALLOW_STD3_DISALLOWED), p, plen); if (rc != IDN2_OK) { free (p); goto out; } } dst[0] = 'x'; dst[1] = 'n'; dst[2] = '-'; dst[3] = '-'; tmpl = *dstlen - 4; rc = _idn2_punycode_encode (plen, p, &tmpl, (char *) dst + 4); free (p); if (rc != IDN2_OK) goto out; *dstlen = 4 + tmpl; if (check_roundtrip) { if (srclen_org != *dstlen || memcmp (src_org, dst, srclen_org)) { rc = IDN2_ALABEL_ROUNDTRIP_FAILED; goto out; } } rc = IDN2_OK; out: free (src_allocated); return rc; }
{'added': [(54, ' if (((*flags) & IDN2_ALABEL_ROUNDTRIP) && ((*flags) & IDN2_NO_ALABEL_ROUNDTRIP))'), (55, ' return IDN2_INVALID_FLAGS;'), (56, ''), (69, ' const uint8_t *src_org = NULL;'), (70, ' uint8_t *src_allocated = NULL;'), (71, ' int rc, check_roundtrip = 0;'), (72, ' size_t tmpl, srclen_org = 0;'), (73, ' uint32_t label_u32[IDN2_LABEL_MAX_LENGTH];'), (74, ' size_t label32_len = IDN2_LABEL_MAX_LENGTH;'), (75, ''), (76, ' if (_idn2_ascii_p (src, srclen)) {'), (77, ' if (!(flags & IDN2_NO_ALABEL_ROUNDTRIP) && srclen >= 4 && memcmp (src, "xn--", 4) == 0) {'), (78, ' /*'), (79, '\t If the input to this procedure appears to be an A-label'), (80, '\t (i.e., it starts in "xn--", interpreted'), (81, '\t case-insensitively), the lookup application MAY attempt to'), (82, '\t convert it to a U-label, first ensuring that the A-label is'), (83, '\t entirely in lowercase (converting it to lowercase if'), (84, '\t necessary), and apply the tests of Section 5.4 and the'), (85, '\t conversion of Section 5.5 to that form. */'), (86, ' rc = _idn2_punycode_decode (srclen - 4, (char *) src + 4, &label32_len, label_u32);'), (87, ' if (rc)'), (88, '\treturn rc;'), (90, ' check_roundtrip = 1;'), (91, ' src_org = src;'), (92, ' srclen_org = srclen;'), (93, ''), (94, ' srclen = IDN2_LABEL_MAX_LENGTH;'), (95, ' src = src_allocated = u32_to_u8 (label_u32, label32_len, NULL, &srclen);'), (96, ' if (!src) {'), (97, '\tif (errno == ENOMEM)'), (98, '\t return IDN2_MALLOC;'), (99, '\treturn IDN2_ENCODING_ERROR;'), (100, ' }'), (101, ' } else {'), (111, ' }'), (115, ' goto out;'), (133, '\t free (p);'), (134, '\t goto out;'), (147, ' goto out;'), (148, ''), (152, ' if (check_roundtrip)'), (153, ' {'), (154, ' if (srclen_org != *dstlen || memcmp (src_org, dst, srclen_org))'), (155, ' {'), (156, ' rc = IDN2_ALABEL_ROUNDTRIP_FAILED;'), (157, '\tgoto out;'), (158, ' }'), (159, ' }'), (160, ''), (161, ' rc = IDN2_OK;'), (162, ''), (163, 'out:'), (164, ' free (src_allocated);'), (165, ' return rc;'), (416, ' *'), (419, ' * testing. This is default since version 2.2.'), (420, ' * To switch this behavior off, pass IDN2_NO_ALABEL_ROUNDTRIP'), (421, ' *'), (424, ' * Unicode TR46 non-transitional processing.'), (425, ' *'), (426, ' * Multiple flags may be specified by binary or:ing them together.'), (544, ' * Pass %IDN2_ALABEL_ROUNDTRIP in @flags to'), (545, ' * convert any input A-labels to U-labels and perform additional'), (546, ' * testing. This is default since version 2.2.'), (547, ' * To switch this behavior off, pass IDN2_NO_ALABEL_ROUNDTRIP'), (548, ' *'), (549, ' * Pass %IDN2_TRANSITIONAL to enable Unicode TR46 transitional processing,'), (551, ' * processing.'), (552, ' *'), (553, ' * Multiple flags may be specified by binary or:ing them together, for'), (554, ' * example %IDN2_ALABEL_ROUNDTRIP | %IDN2_NONTRANSITIONAL.'), (555, ' *'), (556, ' * The %IDN2_NFC_INPUT in @flags is always enabled in this function.')], 'deleted': [(66, ' int rc;'), (67, ' size_t tmpl;'), (68, ''), (69, ' if (_idn2_ascii_p (src, srclen))'), (70, ' {'), (71, ' if (flags & IDN2_ALABEL_ROUNDTRIP)'), (72, '\t/* FIXME implement this MAY:'), (73, ''), (74, '\t If the input to this procedure appears to be an A-label'), (75, '\t (i.e., it starts in "xn--", interpreted'), (76, '\t case-insensitively), the lookup application MAY attempt to'), (77, '\t convert it to a U-label, first ensuring that the A-label is'), (78, '\t entirely in lowercase (converting it to lowercase if'), (79, '\t necessary), and apply the tests of Section 5.4 and the'), (80, '\t conversion of Section 5.5 to that form. */'), (81, '\treturn IDN2_INVALID_FLAGS;'), (95, ' return rc;'), (113, '\t free(p);'), (114, '\t return rc;'), (127, ' return rc;'), (131, ' return IDN2_OK;'), (384, ' * testing (not implemented yet).'), (387, ' * Unicode TR46 non-transitional processing. Multiple flags may be'), (388, ' * specified by binary or:ing them together.'), (506, ' * Pass %IDN2_ALABEL_ROUNDTRIP in @flags to convert any input A-labels'), (507, ' * to U-labels and perform additional testing. Pass'), (508, ' * %IDN2_TRANSITIONAL to enable Unicode TR46 transitional processing,'), (510, ' * processing. Multiple flags may be specified by binary or:ing them'), (511, ' * together, for example %IDN2_ALABEL_ROUNDTRIP |'), (512, ' * %IDN2_NONTRANSITIONAL. The %IDN2_NFC_INPUT in @flags is always'), (513, ' * enabled in this function.')]}
74
31
480
2,448
https://gitlab.com/libidn/libidn2
CVE-2019-12290
['CWE-20']
reachable.c
mark_commit
#include "cache.h" #include "refs.h" #include "tag.h" #include "commit.h" #include "blob.h" #include "diff.h" #include "revision.h" #include "reachable.h" #include "cache-tree.h" #include "progress.h" #include "list-objects.h" struct connectivity_progress { struct progress *progress; unsigned long count; }; static void update_progress(struct connectivity_progress *cp) { cp->count++; if ((cp->count & 1023) == 0) display_progress(cp->progress, cp->count); } static int add_one_ref(const char *path, const struct object_id *oid, int flag, void *cb_data) { struct rev_info *revs = (struct rev_info *)cb_data; struct object *object; if ((flag & REF_ISSYMREF) && (flag & REF_ISBROKEN)) { warning("symbolic ref is dangling: %s", path); return 0; } object = parse_object_or_die(oid->hash, path); add_pending_object(revs, object, ""); return 0; } /* * The traversal will have already marked us as SEEN, so we * only need to handle any progress reporting here. */ static void mark_object(struct object *obj, struct strbuf *path, const char *name, void *data) { update_progress(data); } static void mark_commit(struct commit *c, void *data) { mark_object(&c->object, NULL, NULL, data); } struct recent_data { struct rev_info *revs; unsigned long timestamp; }; static void add_recent_object(const unsigned char *sha1, unsigned long mtime, struct recent_data *data) { struct object *obj; enum object_type type; if (mtime <= data->timestamp) return; /* * We do not want to call parse_object here, because * inflating blobs and trees could be very expensive. * However, we do need to know the correct type for * later processing, and the revision machinery expects * commits and tags to have been parsed. */ type = sha1_object_info(sha1, NULL); if (type < 0) die("unable to get object info for %s", sha1_to_hex(sha1)); switch (type) { case OBJ_TAG: case OBJ_COMMIT: obj = parse_object_or_die(sha1, NULL); break; case OBJ_TREE: obj = (struct object *)lookup_tree(sha1); break; case OBJ_BLOB: obj = (struct object *)lookup_blob(sha1); break; default: die("unknown object type for %s: %s", sha1_to_hex(sha1), typename(type)); } if (!obj) die("unable to lookup %s", sha1_to_hex(sha1)); add_pending_object(data->revs, obj, ""); } static int add_recent_loose(const unsigned char *sha1, const char *path, void *data) { struct stat st; struct object *obj = lookup_object(sha1); if (obj && obj->flags & SEEN) return 0; if (stat(path, &st) < 0) { /* * It's OK if an object went away during our iteration; this * could be due to a simultaneous repack. But anything else * we should abort, since we might then fail to mark objects * which should not be pruned. */ if (errno == ENOENT) return 0; return error("unable to stat %s: %s", sha1_to_hex(sha1), strerror(errno)); } add_recent_object(sha1, st.st_mtime, data); return 0; } static int add_recent_packed(const unsigned char *sha1, struct packed_git *p, uint32_t pos, void *data) { struct object *obj = lookup_object(sha1); if (obj && obj->flags & SEEN) return 0; add_recent_object(sha1, p->mtime, data); return 0; } int add_unseen_recent_objects_to_traversal(struct rev_info *revs, unsigned long timestamp) { struct recent_data data; int r; data.revs = revs; data.timestamp = timestamp; r = for_each_loose_object(add_recent_loose, &data, FOR_EACH_OBJECT_LOCAL_ONLY); if (r) return r; return for_each_packed_object(add_recent_packed, &data, FOR_EACH_OBJECT_LOCAL_ONLY); } void mark_reachable_objects(struct rev_info *revs, int mark_reflog, unsigned long mark_recent, struct progress *progress) { struct connectivity_progress cp; /* * Set up revision parsing, and mark us as being interested * in all object types, not just commits. */ revs->tag_objects = 1; revs->blob_objects = 1; revs->tree_objects = 1; /* Add all refs from the index file */ add_index_objects_to_pending(revs, 0); /* Add all external refs */ for_each_ref(add_one_ref, revs); /* detached HEAD is not included in the list above */ head_ref(add_one_ref, revs); /* Add all reflog info */ if (mark_reflog) add_reflogs_to_pending(revs, 0); cp.progress = progress; cp.count = 0; /* * Set up the revision walk - this will move all commits * from the pending list to the commit walking list. */ if (prepare_revision_walk(revs)) die("revision walk setup failed"); traverse_commit_list(revs, mark_commit, mark_object, &cp); if (mark_recent) { revs->ignore_missing_links = 1; if (add_unseen_recent_objects_to_traversal(revs, mark_recent)) die("unable to mark recent objects"); if (prepare_revision_walk(revs)) die("revision walk setup failed"); traverse_commit_list(revs, mark_commit, mark_object, &cp); } display_progress(cp.progress, cp.count); }
#include "cache.h" #include "refs.h" #include "tag.h" #include "commit.h" #include "blob.h" #include "diff.h" #include "revision.h" #include "reachable.h" #include "cache-tree.h" #include "progress.h" #include "list-objects.h" struct connectivity_progress { struct progress *progress; unsigned long count; }; static void update_progress(struct connectivity_progress *cp) { cp->count++; if ((cp->count & 1023) == 0) display_progress(cp->progress, cp->count); } static int add_one_ref(const char *path, const struct object_id *oid, int flag, void *cb_data) { struct rev_info *revs = (struct rev_info *)cb_data; struct object *object; if ((flag & REF_ISSYMREF) && (flag & REF_ISBROKEN)) { warning("symbolic ref is dangling: %s", path); return 0; } object = parse_object_or_die(oid->hash, path); add_pending_object(revs, object, ""); return 0; } /* * The traversal will have already marked us as SEEN, so we * only need to handle any progress reporting here. */ static void mark_object(struct object *obj, const char *name, void *data) { update_progress(data); } static void mark_commit(struct commit *c, void *data) { mark_object(&c->object, NULL, data); } struct recent_data { struct rev_info *revs; unsigned long timestamp; }; static void add_recent_object(const unsigned char *sha1, unsigned long mtime, struct recent_data *data) { struct object *obj; enum object_type type; if (mtime <= data->timestamp) return; /* * We do not want to call parse_object here, because * inflating blobs and trees could be very expensive. * However, we do need to know the correct type for * later processing, and the revision machinery expects * commits and tags to have been parsed. */ type = sha1_object_info(sha1, NULL); if (type < 0) die("unable to get object info for %s", sha1_to_hex(sha1)); switch (type) { case OBJ_TAG: case OBJ_COMMIT: obj = parse_object_or_die(sha1, NULL); break; case OBJ_TREE: obj = (struct object *)lookup_tree(sha1); break; case OBJ_BLOB: obj = (struct object *)lookup_blob(sha1); break; default: die("unknown object type for %s: %s", sha1_to_hex(sha1), typename(type)); } if (!obj) die("unable to lookup %s", sha1_to_hex(sha1)); add_pending_object(data->revs, obj, ""); } static int add_recent_loose(const unsigned char *sha1, const char *path, void *data) { struct stat st; struct object *obj = lookup_object(sha1); if (obj && obj->flags & SEEN) return 0; if (stat(path, &st) < 0) { /* * It's OK if an object went away during our iteration; this * could be due to a simultaneous repack. But anything else * we should abort, since we might then fail to mark objects * which should not be pruned. */ if (errno == ENOENT) return 0; return error("unable to stat %s: %s", sha1_to_hex(sha1), strerror(errno)); } add_recent_object(sha1, st.st_mtime, data); return 0; } static int add_recent_packed(const unsigned char *sha1, struct packed_git *p, uint32_t pos, void *data) { struct object *obj = lookup_object(sha1); if (obj && obj->flags & SEEN) return 0; add_recent_object(sha1, p->mtime, data); return 0; } int add_unseen_recent_objects_to_traversal(struct rev_info *revs, unsigned long timestamp) { struct recent_data data; int r; data.revs = revs; data.timestamp = timestamp; r = for_each_loose_object(add_recent_loose, &data, FOR_EACH_OBJECT_LOCAL_ONLY); if (r) return r; return for_each_packed_object(add_recent_packed, &data, FOR_EACH_OBJECT_LOCAL_ONLY); } void mark_reachable_objects(struct rev_info *revs, int mark_reflog, unsigned long mark_recent, struct progress *progress) { struct connectivity_progress cp; /* * Set up revision parsing, and mark us as being interested * in all object types, not just commits. */ revs->tag_objects = 1; revs->blob_objects = 1; revs->tree_objects = 1; /* Add all refs from the index file */ add_index_objects_to_pending(revs, 0); /* Add all external refs */ for_each_ref(add_one_ref, revs); /* detached HEAD is not included in the list above */ head_ref(add_one_ref, revs); /* Add all reflog info */ if (mark_reflog) add_reflogs_to_pending(revs, 0); cp.progress = progress; cp.count = 0; /* * Set up the revision walk - this will move all commits * from the pending list to the commit walking list. */ if (prepare_revision_walk(revs)) die("revision walk setup failed"); traverse_commit_list(revs, mark_commit, mark_object, &cp); if (mark_recent) { revs->ignore_missing_links = 1; if (add_unseen_recent_objects_to_traversal(revs, mark_recent)) die("unable to mark recent objects"); if (prepare_revision_walk(revs)) die("revision walk setup failed"); traverse_commit_list(revs, mark_commit, mark_object, &cp); } display_progress(cp.progress, cp.count); }
static void mark_commit(struct commit *c, void *data) { mark_object(&c->object, NULL, NULL, data); }
static void mark_commit(struct commit *c, void *data) { mark_object(&c->object, NULL, data); }
{'added': [(46, 'static void mark_object(struct object *obj, const char *name, void *data)'), (53, '\tmark_object(&c->object, NULL, data);')], 'deleted': [(46, 'static void mark_object(struct object *obj, struct strbuf *path,'), (47, '\t\t\tconst char *name, void *data)'), (54, '\tmark_object(&c->object, NULL, NULL, data);')]}
2
3
144
794
https://github.com/git/git
CVE-2016-2315
['CWE-119']
reachable.c
mark_object
#include "cache.h" #include "refs.h" #include "tag.h" #include "commit.h" #include "blob.h" #include "diff.h" #include "revision.h" #include "reachable.h" #include "cache-tree.h" #include "progress.h" #include "list-objects.h" struct connectivity_progress { struct progress *progress; unsigned long count; }; static void update_progress(struct connectivity_progress *cp) { cp->count++; if ((cp->count & 1023) == 0) display_progress(cp->progress, cp->count); } static int add_one_ref(const char *path, const struct object_id *oid, int flag, void *cb_data) { struct rev_info *revs = (struct rev_info *)cb_data; struct object *object; if ((flag & REF_ISSYMREF) && (flag & REF_ISBROKEN)) { warning("symbolic ref is dangling: %s", path); return 0; } object = parse_object_or_die(oid->hash, path); add_pending_object(revs, object, ""); return 0; } /* * The traversal will have already marked us as SEEN, so we * only need to handle any progress reporting here. */ static void mark_object(struct object *obj, struct strbuf *path, const char *name, void *data) { update_progress(data); } static void mark_commit(struct commit *c, void *data) { mark_object(&c->object, NULL, NULL, data); } struct recent_data { struct rev_info *revs; unsigned long timestamp; }; static void add_recent_object(const unsigned char *sha1, unsigned long mtime, struct recent_data *data) { struct object *obj; enum object_type type; if (mtime <= data->timestamp) return; /* * We do not want to call parse_object here, because * inflating blobs and trees could be very expensive. * However, we do need to know the correct type for * later processing, and the revision machinery expects * commits and tags to have been parsed. */ type = sha1_object_info(sha1, NULL); if (type < 0) die("unable to get object info for %s", sha1_to_hex(sha1)); switch (type) { case OBJ_TAG: case OBJ_COMMIT: obj = parse_object_or_die(sha1, NULL); break; case OBJ_TREE: obj = (struct object *)lookup_tree(sha1); break; case OBJ_BLOB: obj = (struct object *)lookup_blob(sha1); break; default: die("unknown object type for %s: %s", sha1_to_hex(sha1), typename(type)); } if (!obj) die("unable to lookup %s", sha1_to_hex(sha1)); add_pending_object(data->revs, obj, ""); } static int add_recent_loose(const unsigned char *sha1, const char *path, void *data) { struct stat st; struct object *obj = lookup_object(sha1); if (obj && obj->flags & SEEN) return 0; if (stat(path, &st) < 0) { /* * It's OK if an object went away during our iteration; this * could be due to a simultaneous repack. But anything else * we should abort, since we might then fail to mark objects * which should not be pruned. */ if (errno == ENOENT) return 0; return error("unable to stat %s: %s", sha1_to_hex(sha1), strerror(errno)); } add_recent_object(sha1, st.st_mtime, data); return 0; } static int add_recent_packed(const unsigned char *sha1, struct packed_git *p, uint32_t pos, void *data) { struct object *obj = lookup_object(sha1); if (obj && obj->flags & SEEN) return 0; add_recent_object(sha1, p->mtime, data); return 0; } int add_unseen_recent_objects_to_traversal(struct rev_info *revs, unsigned long timestamp) { struct recent_data data; int r; data.revs = revs; data.timestamp = timestamp; r = for_each_loose_object(add_recent_loose, &data, FOR_EACH_OBJECT_LOCAL_ONLY); if (r) return r; return for_each_packed_object(add_recent_packed, &data, FOR_EACH_OBJECT_LOCAL_ONLY); } void mark_reachable_objects(struct rev_info *revs, int mark_reflog, unsigned long mark_recent, struct progress *progress) { struct connectivity_progress cp; /* * Set up revision parsing, and mark us as being interested * in all object types, not just commits. */ revs->tag_objects = 1; revs->blob_objects = 1; revs->tree_objects = 1; /* Add all refs from the index file */ add_index_objects_to_pending(revs, 0); /* Add all external refs */ for_each_ref(add_one_ref, revs); /* detached HEAD is not included in the list above */ head_ref(add_one_ref, revs); /* Add all reflog info */ if (mark_reflog) add_reflogs_to_pending(revs, 0); cp.progress = progress; cp.count = 0; /* * Set up the revision walk - this will move all commits * from the pending list to the commit walking list. */ if (prepare_revision_walk(revs)) die("revision walk setup failed"); traverse_commit_list(revs, mark_commit, mark_object, &cp); if (mark_recent) { revs->ignore_missing_links = 1; if (add_unseen_recent_objects_to_traversal(revs, mark_recent)) die("unable to mark recent objects"); if (prepare_revision_walk(revs)) die("revision walk setup failed"); traverse_commit_list(revs, mark_commit, mark_object, &cp); } display_progress(cp.progress, cp.count); }
#include "cache.h" #include "refs.h" #include "tag.h" #include "commit.h" #include "blob.h" #include "diff.h" #include "revision.h" #include "reachable.h" #include "cache-tree.h" #include "progress.h" #include "list-objects.h" struct connectivity_progress { struct progress *progress; unsigned long count; }; static void update_progress(struct connectivity_progress *cp) { cp->count++; if ((cp->count & 1023) == 0) display_progress(cp->progress, cp->count); } static int add_one_ref(const char *path, const struct object_id *oid, int flag, void *cb_data) { struct rev_info *revs = (struct rev_info *)cb_data; struct object *object; if ((flag & REF_ISSYMREF) && (flag & REF_ISBROKEN)) { warning("symbolic ref is dangling: %s", path); return 0; } object = parse_object_or_die(oid->hash, path); add_pending_object(revs, object, ""); return 0; } /* * The traversal will have already marked us as SEEN, so we * only need to handle any progress reporting here. */ static void mark_object(struct object *obj, const char *name, void *data) { update_progress(data); } static void mark_commit(struct commit *c, void *data) { mark_object(&c->object, NULL, data); } struct recent_data { struct rev_info *revs; unsigned long timestamp; }; static void add_recent_object(const unsigned char *sha1, unsigned long mtime, struct recent_data *data) { struct object *obj; enum object_type type; if (mtime <= data->timestamp) return; /* * We do not want to call parse_object here, because * inflating blobs and trees could be very expensive. * However, we do need to know the correct type for * later processing, and the revision machinery expects * commits and tags to have been parsed. */ type = sha1_object_info(sha1, NULL); if (type < 0) die("unable to get object info for %s", sha1_to_hex(sha1)); switch (type) { case OBJ_TAG: case OBJ_COMMIT: obj = parse_object_or_die(sha1, NULL); break; case OBJ_TREE: obj = (struct object *)lookup_tree(sha1); break; case OBJ_BLOB: obj = (struct object *)lookup_blob(sha1); break; default: die("unknown object type for %s: %s", sha1_to_hex(sha1), typename(type)); } if (!obj) die("unable to lookup %s", sha1_to_hex(sha1)); add_pending_object(data->revs, obj, ""); } static int add_recent_loose(const unsigned char *sha1, const char *path, void *data) { struct stat st; struct object *obj = lookup_object(sha1); if (obj && obj->flags & SEEN) return 0; if (stat(path, &st) < 0) { /* * It's OK if an object went away during our iteration; this * could be due to a simultaneous repack. But anything else * we should abort, since we might then fail to mark objects * which should not be pruned. */ if (errno == ENOENT) return 0; return error("unable to stat %s: %s", sha1_to_hex(sha1), strerror(errno)); } add_recent_object(sha1, st.st_mtime, data); return 0; } static int add_recent_packed(const unsigned char *sha1, struct packed_git *p, uint32_t pos, void *data) { struct object *obj = lookup_object(sha1); if (obj && obj->flags & SEEN) return 0; add_recent_object(sha1, p->mtime, data); return 0; } int add_unseen_recent_objects_to_traversal(struct rev_info *revs, unsigned long timestamp) { struct recent_data data; int r; data.revs = revs; data.timestamp = timestamp; r = for_each_loose_object(add_recent_loose, &data, FOR_EACH_OBJECT_LOCAL_ONLY); if (r) return r; return for_each_packed_object(add_recent_packed, &data, FOR_EACH_OBJECT_LOCAL_ONLY); } void mark_reachable_objects(struct rev_info *revs, int mark_reflog, unsigned long mark_recent, struct progress *progress) { struct connectivity_progress cp; /* * Set up revision parsing, and mark us as being interested * in all object types, not just commits. */ revs->tag_objects = 1; revs->blob_objects = 1; revs->tree_objects = 1; /* Add all refs from the index file */ add_index_objects_to_pending(revs, 0); /* Add all external refs */ for_each_ref(add_one_ref, revs); /* detached HEAD is not included in the list above */ head_ref(add_one_ref, revs); /* Add all reflog info */ if (mark_reflog) add_reflogs_to_pending(revs, 0); cp.progress = progress; cp.count = 0; /* * Set up the revision walk - this will move all commits * from the pending list to the commit walking list. */ if (prepare_revision_walk(revs)) die("revision walk setup failed"); traverse_commit_list(revs, mark_commit, mark_object, &cp); if (mark_recent) { revs->ignore_missing_links = 1; if (add_unseen_recent_objects_to_traversal(revs, mark_recent)) die("unable to mark recent objects"); if (prepare_revision_walk(revs)) die("revision walk setup failed"); traverse_commit_list(revs, mark_commit, mark_object, &cp); } display_progress(cp.progress, cp.count); }
static void mark_object(struct object *obj, struct strbuf *path, const char *name, void *data) { update_progress(data); }
static void mark_object(struct object *obj, const char *name, void *data) { update_progress(data); }
{'added': [(46, 'static void mark_object(struct object *obj, const char *name, void *data)'), (53, '\tmark_object(&c->object, NULL, data);')], 'deleted': [(46, 'static void mark_object(struct object *obj, struct strbuf *path,'), (47, '\t\t\tconst char *name, void *data)'), (54, '\tmark_object(&c->object, NULL, NULL, data);')]}
2
3
144
794
https://github.com/git/git
CVE-2016-2315
['CWE-119']
common.c
compose_path
/* Common methods shared between FTP and TFTP engines * * Copyright (c) 2014-2019 Joachim Nilsson <troglobit@gmail.com> * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "uftpd.h" int chrooted = 0; /* Protect against common directory traversal attacks, for details see * https://en.wikipedia.org/wiki/Directory_traversal_attack * * Example: /srv/ftp/ ../../etc/passwd => /etc/passwd * .~~~~~~~~ .~~~~~~~~~ * / / * Server dir ------' / * User input ---------------' * * Forced dir ------> /srv/ftp/etc */ char *compose_path(ctrl_t *ctrl, char *path) { struct stat st; static char rpath[PATH_MAX]; char *name, *ptr; char dir[PATH_MAX] = { 0 }; strlcpy(dir, ctrl->cwd, sizeof(dir)); DBG("Compose path from cwd: %s, arg: %s", ctrl->cwd, path ?: ""); if (!path || !strlen(path)) goto check; if (path) { if (path[0] != '/') { if (dir[strlen(dir) - 1] != '/') strlcat(dir, "/", sizeof(dir)); } strlcat(dir, path, sizeof(dir)); } check: while ((ptr = strstr(dir, "//"))) memmove(ptr, &ptr[1], strlen(&ptr[1]) + 1); if (!chrooted) { size_t len = strlen(home); DBG("Server path from CWD: %s", dir); if (len > 0 && home[len - 1] == '/') len--; memmove(dir + len, dir, strlen(dir) + 1); memcpy(dir, home, len); DBG("Resulting non-chroot path: %s", dir); } /* * Handle directories slightly differently, since dirname() on a * directory returns the parent directory. So, just squash .. */ if (!stat(dir, &st) && S_ISDIR(st.st_mode)) { if (!realpath(dir, rpath)) return NULL; } else { /* * Check realpath() of directory containing the file, a * STOR may want to save a new file. Then append the * file and return it. */ name = basename(path); ptr = dirname(dir); memset(rpath, 0, sizeof(rpath)); if (!realpath(ptr, rpath)) { INFO("Failed realpath(%s): %m", ptr); return NULL; } if (rpath[1] != 0) strlcat(rpath, "/", sizeof(rpath)); strlcat(rpath, name, sizeof(rpath)); } if (!chrooted && strncmp(dir, home, strlen(home))) { DBG("Failed non-chroot dir:%s vs home:%s", dir, home); return NULL; } return rpath; } char *compose_abspath(ctrl_t *ctrl, char *path) { char *ptr; char cwd[sizeof(ctrl->cwd)]; if (path && path[0] == '/') { strlcpy(cwd, ctrl->cwd, sizeof(cwd)); memset(ctrl->cwd, 0, sizeof(ctrl->cwd)); } ptr = compose_path(ctrl, path); if (path && path[0] == '/') strlcpy(ctrl->cwd, cwd, sizeof(ctrl->cwd)); return ptr; } int set_nonblock(int fd) { int flags; flags = fcntl(fd, F_GETFL, 0); if (!flags) (void)fcntl(fd, F_SETFL, flags | O_NONBLOCK); return fd; } int open_socket(int port, int type, char *desc) { int sd, err, val = 1; socklen_t len = sizeof(struct sockaddr); struct sockaddr_in server; sd = socket(AF_INET, type | SOCK_NONBLOCK, 0); if (sd < 0) { WARN(errno, "Failed creating %s server socket", desc); return -1; } err = setsockopt(sd, SOL_SOCKET, SO_REUSEADDR, (char *)&val, sizeof(val)); if (err != 0) WARN(errno, "Failed setting SO_REUSEADDR on %s socket", type == SOCK_DGRAM ? "TFTP" : "FTP"); memset(&server, 0, sizeof(server)); server.sin_family = AF_INET; server.sin_addr.s_addr = INADDR_ANY; server.sin_port = htons(port); if (bind(sd, (struct sockaddr *)&server, len) < 0) { if (EACCES != errno) { WARN(errno, "Failed binding to port %d, maybe another %s server is already running", port, desc); } close(sd); return -1; } if (port && type != SOCK_DGRAM) { if (-1 == listen(sd, 20)) WARN(errno, "Failed starting %s server", desc); } DBG("Opened socket for port %d", port); return sd; } void convert_address(struct sockaddr_storage *ss, char *buf, size_t len) { switch (ss->ss_family) { case AF_INET: inet_ntop(ss->ss_family, &((struct sockaddr_in *)ss)->sin_addr, buf, len); break; case AF_INET6: inet_ntop(ss->ss_family, &((struct sockaddr_in6 *)ss)->sin6_addr, buf, len); break; } } /* Inactivity timer, bye bye */ static void inactivity_cb(uev_t *w, void *arg, int events) { uev_ctx_t *ctx = (uev_ctx_t *)arg; INFO("Inactivity timer, exiting ..."); uev_exit(ctx); } ctrl_t *new_session(uev_ctx_t *ctx, int sd, int *rc) { ctrl_t *ctrl = NULL; static int privs_dropped = 0; if (!inetd) { pid_t pid = fork(); if (pid) { DBG("Created new client session as PID %d", pid); *rc = pid; return NULL; } /* * Set process group to parent, so uftpd can call * killpg() on all of us when it exits. */ setpgid(0, getppid()); /* Create new uEv context for the child. */ ctx = calloc(1, sizeof(uev_ctx_t)); if (!ctx) { ERR(errno, "Failed allocating session event context"); exit(1); } uev_init(ctx); } ctrl = calloc(1, sizeof(ctrl_t)); if (!ctrl) { ERR(errno, "Failed allocating session context"); goto fail; } ctrl->sd = set_nonblock(sd); ctrl->ctx = ctx; strlcpy(ctrl->cwd, "/", sizeof(ctrl->cwd)); /* Chroot to FTP root */ if (!chrooted && geteuid() == 0) { if (chroot(home) || chdir("/")) { ERR(errno, "Failed chrooting to FTP root, %s, aborting", home); goto fail; } chrooted = 1; } else if (!chrooted) { if (chdir(home)) { WARN(errno, "Failed changing to FTP root, %s, aborting", home); goto fail; } } /* If ftp user exists and we're running as root we can drop privs */ if (!privs_dropped && pw && geteuid() == 0) { int fail1, fail2; initgroups(pw->pw_name, pw->pw_gid); if ((fail1 = setegid(pw->pw_gid))) WARN(errno, "Failed dropping group privileges to gid %d", pw->pw_gid); if ((fail2 = seteuid(pw->pw_uid))) WARN(errno, "Failed dropping user privileges to uid %d", pw->pw_uid); setenv("HOME", pw->pw_dir, 1); if (!fail1 && !fail2) INFO("Successfully dropped privilges to %d:%d (uid:gid)", pw->pw_uid, pw->pw_gid); /* * Check we don't have write access to the FTP root, * unless explicitly allowed */ if (!do_insecure && !access(home, W_OK)) { ERR(0, "FTP root %s writable, possible security violation, aborting session!", home); goto fail; } /* On failure, we tried at least. Only warn once. */ privs_dropped = 1; } /* Session timeout handler */ uev_timer_init(ctrl->ctx, &ctrl->timeout_watcher, inactivity_cb, ctrl->ctx, INACTIVITY_TIMER, 0); return ctrl; fail: if (ctrl) free(ctrl); if (!inetd) free(ctx); *rc = -1; return NULL; } int del_session(ctrl_t *ctrl, int isftp) { DBG("%sFTP Client session ended.", isftp ? "": "T" ); if (!ctrl) return -1; if (isftp && ctrl->sd > 0) { shutdown(ctrl->sd, SHUT_RDWR); close(ctrl->sd); } if (ctrl->data_listen_sd > 0) { shutdown(ctrl->data_listen_sd, SHUT_RDWR); close(ctrl->data_listen_sd); } if (ctrl->data_sd > 0) { shutdown(ctrl->data_sd, SHUT_RDWR); close(ctrl->data_sd); } if (ctrl->buf) free(ctrl->buf); if (!inetd && ctrl->ctx) free(ctrl->ctx); free(ctrl); return 0; } /** * Local Variables: * indent-tabs-mode: t * c-file-style: "linux" * End: */
/* Common methods shared between FTP and TFTP engines * * Copyright (c) 2014-2019 Joachim Nilsson <troglobit@gmail.com> * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "uftpd.h" int chrooted = 0; /* Protect against common directory traversal attacks, for details see * https://en.wikipedia.org/wiki/Directory_traversal_attack * * Example: /srv/ftp/ ../../etc/passwd => /etc/passwd * .~~~~~~~~ .~~~~~~~~~ * / / * Server dir ------' / * User input ---------------' * * Forced dir ------> /srv/ftp/etc */ char *compose_path(ctrl_t *ctrl, char *path) { struct stat st; static char rpath[PATH_MAX]; char *name, *ptr; char dir[PATH_MAX] = { 0 }; strlcpy(dir, ctrl->cwd, sizeof(dir)); DBG("Compose path from cwd: %s, arg: %s", ctrl->cwd, path ?: ""); if (!path || !strlen(path)) goto check; if (path) { if (path[0] != '/') { if (dir[strlen(dir) - 1] != '/') strlcat(dir, "/", sizeof(dir)); } strlcat(dir, path, sizeof(dir)); } check: while ((ptr = strstr(dir, "//"))) memmove(ptr, &ptr[1], strlen(&ptr[1]) + 1); if (!chrooted) { size_t len = strlen(home); DBG("Server path from CWD: %s", dir); if (len > 0 && home[len - 1] == '/') len--; memmove(dir + len, dir, strlen(dir) + 1); memcpy(dir, home, len); DBG("Resulting non-chroot path: %s", dir); } /* * Handle directories slightly differently, since dirname() on a * directory returns the parent directory. So, just squash .. */ if (!stat(dir, &st) && S_ISDIR(st.st_mode)) { if (!realpath(dir, rpath)) return NULL; } else { /* * Check realpath() of directory containing the file, a * STOR may want to save a new file. Then append the * file and return it. */ name = basename(path); ptr = dirname(dir); memset(rpath, 0, sizeof(rpath)); if (!realpath(ptr, rpath)) { INFO("Failed realpath(%s): %m", ptr); return NULL; } if (rpath[1] != 0) strlcat(rpath, "/", sizeof(rpath)); strlcat(rpath, name, sizeof(rpath)); } if (!chrooted && strncmp(rpath, home, strlen(home))) { DBG("Failed non-chroot dir:%s vs home:%s", dir, home); return NULL; } return rpath; } char *compose_abspath(ctrl_t *ctrl, char *path) { char *ptr; char cwd[sizeof(ctrl->cwd)]; if (path && path[0] == '/') { strlcpy(cwd, ctrl->cwd, sizeof(cwd)); memset(ctrl->cwd, 0, sizeof(ctrl->cwd)); } ptr = compose_path(ctrl, path); if (path && path[0] == '/') strlcpy(ctrl->cwd, cwd, sizeof(ctrl->cwd)); return ptr; } int set_nonblock(int fd) { int flags; flags = fcntl(fd, F_GETFL, 0); if (!flags) (void)fcntl(fd, F_SETFL, flags | O_NONBLOCK); return fd; } int open_socket(int port, int type, char *desc) { int sd, err, val = 1; socklen_t len = sizeof(struct sockaddr); struct sockaddr_in server; sd = socket(AF_INET, type | SOCK_NONBLOCK, 0); if (sd < 0) { WARN(errno, "Failed creating %s server socket", desc); return -1; } err = setsockopt(sd, SOL_SOCKET, SO_REUSEADDR, (char *)&val, sizeof(val)); if (err != 0) WARN(errno, "Failed setting SO_REUSEADDR on %s socket", type == SOCK_DGRAM ? "TFTP" : "FTP"); memset(&server, 0, sizeof(server)); server.sin_family = AF_INET; server.sin_addr.s_addr = INADDR_ANY; server.sin_port = htons(port); if (bind(sd, (struct sockaddr *)&server, len) < 0) { if (EACCES != errno) { WARN(errno, "Failed binding to port %d, maybe another %s server is already running", port, desc); } close(sd); return -1; } if (port && type != SOCK_DGRAM) { if (-1 == listen(sd, 20)) WARN(errno, "Failed starting %s server", desc); } DBG("Opened socket for port %d", port); return sd; } void convert_address(struct sockaddr_storage *ss, char *buf, size_t len) { switch (ss->ss_family) { case AF_INET: inet_ntop(ss->ss_family, &((struct sockaddr_in *)ss)->sin_addr, buf, len); break; case AF_INET6: inet_ntop(ss->ss_family, &((struct sockaddr_in6 *)ss)->sin6_addr, buf, len); break; } } /* Inactivity timer, bye bye */ static void inactivity_cb(uev_t *w, void *arg, int events) { uev_ctx_t *ctx = (uev_ctx_t *)arg; INFO("Inactivity timer, exiting ..."); uev_exit(ctx); } ctrl_t *new_session(uev_ctx_t *ctx, int sd, int *rc) { ctrl_t *ctrl = NULL; static int privs_dropped = 0; if (!inetd) { pid_t pid = fork(); if (pid) { DBG("Created new client session as PID %d", pid); *rc = pid; return NULL; } /* * Set process group to parent, so uftpd can call * killpg() on all of us when it exits. */ setpgid(0, getppid()); /* Create new uEv context for the child. */ ctx = calloc(1, sizeof(uev_ctx_t)); if (!ctx) { ERR(errno, "Failed allocating session event context"); exit(1); } uev_init(ctx); } ctrl = calloc(1, sizeof(ctrl_t)); if (!ctrl) { ERR(errno, "Failed allocating session context"); goto fail; } ctrl->sd = set_nonblock(sd); ctrl->ctx = ctx; strlcpy(ctrl->cwd, "/", sizeof(ctrl->cwd)); /* Chroot to FTP root */ if (!chrooted && geteuid() == 0) { if (chroot(home) || chdir("/")) { ERR(errno, "Failed chrooting to FTP root, %s, aborting", home); goto fail; } chrooted = 1; } else if (!chrooted) { if (chdir(home)) { WARN(errno, "Failed changing to FTP root, %s, aborting", home); goto fail; } } /* If ftp user exists and we're running as root we can drop privs */ if (!privs_dropped && pw && geteuid() == 0) { int fail1, fail2; initgroups(pw->pw_name, pw->pw_gid); if ((fail1 = setegid(pw->pw_gid))) WARN(errno, "Failed dropping group privileges to gid %d", pw->pw_gid); if ((fail2 = seteuid(pw->pw_uid))) WARN(errno, "Failed dropping user privileges to uid %d", pw->pw_uid); setenv("HOME", pw->pw_dir, 1); if (!fail1 && !fail2) INFO("Successfully dropped privilges to %d:%d (uid:gid)", pw->pw_uid, pw->pw_gid); /* * Check we don't have write access to the FTP root, * unless explicitly allowed */ if (!do_insecure && !access(home, W_OK)) { ERR(0, "FTP root %s writable, possible security violation, aborting session!", home); goto fail; } /* On failure, we tried at least. Only warn once. */ privs_dropped = 1; } /* Session timeout handler */ uev_timer_init(ctrl->ctx, &ctrl->timeout_watcher, inactivity_cb, ctrl->ctx, INACTIVITY_TIMER, 0); return ctrl; fail: if (ctrl) free(ctrl); if (!inetd) free(ctx); *rc = -1; return NULL; } int del_session(ctrl_t *ctrl, int isftp) { DBG("%sFTP Client session ended.", isftp ? "": "T" ); if (!ctrl) return -1; if (isftp && ctrl->sd > 0) { shutdown(ctrl->sd, SHUT_RDWR); close(ctrl->sd); } if (ctrl->data_listen_sd > 0) { shutdown(ctrl->data_listen_sd, SHUT_RDWR); close(ctrl->data_listen_sd); } if (ctrl->data_sd > 0) { shutdown(ctrl->data_sd, SHUT_RDWR); close(ctrl->data_sd); } if (ctrl->buf) free(ctrl->buf); if (!inetd && ctrl->ctx) free(ctrl->ctx); free(ctrl); return 0; } /** * Local Variables: * indent-tabs-mode: t * c-file-style: "linux" * End: */
char *compose_path(ctrl_t *ctrl, char *path) { struct stat st; static char rpath[PATH_MAX]; char *name, *ptr; char dir[PATH_MAX] = { 0 }; strlcpy(dir, ctrl->cwd, sizeof(dir)); DBG("Compose path from cwd: %s, arg: %s", ctrl->cwd, path ?: ""); if (!path || !strlen(path)) goto check; if (path) { if (path[0] != '/') { if (dir[strlen(dir) - 1] != '/') strlcat(dir, "/", sizeof(dir)); } strlcat(dir, path, sizeof(dir)); } check: while ((ptr = strstr(dir, "//"))) memmove(ptr, &ptr[1], strlen(&ptr[1]) + 1); if (!chrooted) { size_t len = strlen(home); DBG("Server path from CWD: %s", dir); if (len > 0 && home[len - 1] == '/') len--; memmove(dir + len, dir, strlen(dir) + 1); memcpy(dir, home, len); DBG("Resulting non-chroot path: %s", dir); } /* * Handle directories slightly differently, since dirname() on a * directory returns the parent directory. So, just squash .. */ if (!stat(dir, &st) && S_ISDIR(st.st_mode)) { if (!realpath(dir, rpath)) return NULL; } else { /* * Check realpath() of directory containing the file, a * STOR may want to save a new file. Then append the * file and return it. */ name = basename(path); ptr = dirname(dir); memset(rpath, 0, sizeof(rpath)); if (!realpath(ptr, rpath)) { INFO("Failed realpath(%s): %m", ptr); return NULL; } if (rpath[1] != 0) strlcat(rpath, "/", sizeof(rpath)); strlcat(rpath, name, sizeof(rpath)); } if (!chrooted && strncmp(dir, home, strlen(home))) { DBG("Failed non-chroot dir:%s vs home:%s", dir, home); return NULL; } return rpath; }
char *compose_path(ctrl_t *ctrl, char *path) { struct stat st; static char rpath[PATH_MAX]; char *name, *ptr; char dir[PATH_MAX] = { 0 }; strlcpy(dir, ctrl->cwd, sizeof(dir)); DBG("Compose path from cwd: %s, arg: %s", ctrl->cwd, path ?: ""); if (!path || !strlen(path)) goto check; if (path) { if (path[0] != '/') { if (dir[strlen(dir) - 1] != '/') strlcat(dir, "/", sizeof(dir)); } strlcat(dir, path, sizeof(dir)); } check: while ((ptr = strstr(dir, "//"))) memmove(ptr, &ptr[1], strlen(&ptr[1]) + 1); if (!chrooted) { size_t len = strlen(home); DBG("Server path from CWD: %s", dir); if (len > 0 && home[len - 1] == '/') len--; memmove(dir + len, dir, strlen(dir) + 1); memcpy(dir, home, len); DBG("Resulting non-chroot path: %s", dir); } /* * Handle directories slightly differently, since dirname() on a * directory returns the parent directory. So, just squash .. */ if (!stat(dir, &st) && S_ISDIR(st.st_mode)) { if (!realpath(dir, rpath)) return NULL; } else { /* * Check realpath() of directory containing the file, a * STOR may want to save a new file. Then append the * file and return it. */ name = basename(path); ptr = dirname(dir); memset(rpath, 0, sizeof(rpath)); if (!realpath(ptr, rpath)) { INFO("Failed realpath(%s): %m", ptr); return NULL; } if (rpath[1] != 0) strlcat(rpath, "/", sizeof(rpath)); strlcat(rpath, name, sizeof(rpath)); } if (!chrooted && strncmp(rpath, home, strlen(home))) { DBG("Failed non-chroot dir:%s vs home:%s", dir, home); return NULL; } return rpath; }
{'added': [(95, '\tif (!chrooted && strncmp(rpath, home, strlen(home))) {')], 'deleted': [(95, '\tif (!chrooted && strncmp(dir, home, strlen(home))) {')]}
1
1
212
1,507
https://github.com/troglobit/uftpd
CVE-2020-20277
['CWE-22']
undo.c
unserialize_uep
/* vi:set ts=8 sts=4 sw=4 noet: * * VIM - Vi IMproved by Bram Moolenaar * * Do ":help uganda" in Vim to read copying and usage conditions. * Do ":help credits" in Vim to see a list of people who contributed. * See README.txt for an overview of the Vim source code. */ /* * undo.c: multi level undo facility * * The saved lines are stored in a list of lists (one for each buffer): * * b_u_oldhead------------------------------------------------+ * | * V * +--------------+ +--------------+ +--------------+ * b_u_newhead--->| u_header | | u_header | | u_header | * | uh_next------>| uh_next------>| uh_next---->NULL * NULL<--------uh_prev |<---------uh_prev |<---------uh_prev | * | uh_entry | | uh_entry | | uh_entry | * +--------|-----+ +--------|-----+ +--------|-----+ * | | | * V V V * +--------------+ +--------------+ +--------------+ * | u_entry | | u_entry | | u_entry | * | ue_next | | ue_next | | ue_next | * +--------|-----+ +--------|-----+ +--------|-----+ * | | | * V V V * +--------------+ NULL NULL * | u_entry | * | ue_next | * +--------|-----+ * | * V * etc. * * Each u_entry list contains the information for one undo or redo. * curbuf->b_u_curhead points to the header of the last undo (the next redo), * or is NULL if nothing has been undone (end of the branch). * * For keeping alternate undo/redo branches the uh_alt field is used. Thus at * each point in the list a branch may appear for an alternate to redo. The * uh_seq field is numbered sequentially to be able to find a newer or older * branch. * * +---------------+ +---------------+ * b_u_oldhead --->| u_header | | u_header | * | uh_alt_next ---->| uh_alt_next ----> NULL * NULL <----- uh_alt_prev |<------ uh_alt_prev | * | uh_prev | | uh_prev | * +-----|---------+ +-----|---------+ * | | * V V * +---------------+ +---------------+ * | u_header | | u_header | * | uh_alt_next | | uh_alt_next | * b_u_newhead --->| uh_alt_prev | | uh_alt_prev | * | uh_prev | | uh_prev | * +-----|---------+ +-----|---------+ * | | * V V * NULL +---------------+ +---------------+ * | u_header | | u_header | * | uh_alt_next ---->| uh_alt_next | * | uh_alt_prev |<------ uh_alt_prev | * | uh_prev | | uh_prev | * +-----|---------+ +-----|---------+ * | | * etc. etc. * * * All data is allocated and will all be freed when the buffer is unloaded. */ /* Uncomment the next line for including the u_check() function. This warns * for errors in the debug information. */ /* #define U_DEBUG 1 */ #define UH_MAGIC 0x18dade /* value for uh_magic when in use */ #define UE_MAGIC 0xabc123 /* value for ue_magic when in use */ /* Size of buffer used for encryption. */ #define CRYPT_BUF_SIZE 8192 #include "vim.h" /* Structure passed around between functions. * Avoids passing cryptstate_T when encryption not available. */ typedef struct { buf_T *bi_buf; FILE *bi_fp; #ifdef FEAT_CRYPT cryptstate_T *bi_state; char_u *bi_buffer; /* CRYPT_BUF_SIZE, NULL when not buffering */ size_t bi_used; /* bytes written to/read from bi_buffer */ size_t bi_avail; /* bytes available in bi_buffer */ #endif } bufinfo_T; static long get_undolevel(void); static void u_unch_branch(u_header_T *uhp); static u_entry_T *u_get_headentry(void); static void u_getbot(void); static void u_doit(int count); static void u_undoredo(int undo); static void u_undo_end(int did_undo, int absolute); static void u_add_time(char_u *buf, size_t buflen, time_t tt); static void u_freeheader(buf_T *buf, u_header_T *uhp, u_header_T **uhpp); static void u_freebranch(buf_T *buf, u_header_T *uhp, u_header_T **uhpp); static void u_freeentries(buf_T *buf, u_header_T *uhp, u_header_T **uhpp); static void u_freeentry(u_entry_T *, long); #ifdef FEAT_PERSISTENT_UNDO static void corruption_error(char *mesg, char_u *file_name); static void u_free_uhp(u_header_T *uhp); static int undo_write(bufinfo_T *bi, char_u *ptr, size_t len); # ifdef FEAT_CRYPT static int undo_flush(bufinfo_T *bi); # endif static int fwrite_crypt(bufinfo_T *bi, char_u *ptr, size_t len); static int undo_write_bytes(bufinfo_T *bi, long_u nr, int len); static void put_header_ptr(bufinfo_T *bi, u_header_T *uhp); static int undo_read_4c(bufinfo_T *bi); static int undo_read_2c(bufinfo_T *bi); static int undo_read_byte(bufinfo_T *bi); static time_t undo_read_time(bufinfo_T *bi); static int undo_read(bufinfo_T *bi, char_u *buffer, size_t size); static char_u *read_string_decrypt(bufinfo_T *bi, int len); static int serialize_header(bufinfo_T *bi, char_u *hash); static int serialize_uhp(bufinfo_T *bi, u_header_T *uhp); static u_header_T *unserialize_uhp(bufinfo_T *bi, char_u *file_name); static int serialize_uep(bufinfo_T *bi, u_entry_T *uep); static u_entry_T *unserialize_uep(bufinfo_T *bi, int *error, char_u *file_name); static void serialize_pos(bufinfo_T *bi, pos_T pos); static void unserialize_pos(bufinfo_T *bi, pos_T *pos); static void serialize_visualinfo(bufinfo_T *bi, visualinfo_T *info); static void unserialize_visualinfo(bufinfo_T *bi, visualinfo_T *info); #endif #define U_ALLOC_LINE(size) lalloc((long_u)(size), FALSE) static char_u *u_save_line(linenr_T); /* used in undo_end() to report number of added and deleted lines */ static long u_newcount, u_oldcount; /* * When 'u' flag included in 'cpoptions', we behave like vi. Need to remember * the action that "u" should do. */ static int undo_undoes = FALSE; static int lastmark = 0; #if defined(U_DEBUG) || defined(PROTO) /* * Check the undo structures for being valid. Print a warning when something * looks wrong. */ static int seen_b_u_curhead; static int seen_b_u_newhead; static int header_count; static void u_check_tree(u_header_T *uhp, u_header_T *exp_uh_next, u_header_T *exp_uh_alt_prev) { u_entry_T *uep; if (uhp == NULL) return; ++header_count; if (uhp == curbuf->b_u_curhead && ++seen_b_u_curhead > 1) { EMSG("b_u_curhead found twice (looping?)"); return; } if (uhp == curbuf->b_u_newhead && ++seen_b_u_newhead > 1) { EMSG("b_u_newhead found twice (looping?)"); return; } if (uhp->uh_magic != UH_MAGIC) EMSG("uh_magic wrong (may be using freed memory)"); else { /* Check pointers back are correct. */ if (uhp->uh_next.ptr != exp_uh_next) { EMSG("uh_next wrong"); smsg((char_u *)"expected: 0x%x, actual: 0x%x", exp_uh_next, uhp->uh_next.ptr); } if (uhp->uh_alt_prev.ptr != exp_uh_alt_prev) { EMSG("uh_alt_prev wrong"); smsg((char_u *)"expected: 0x%x, actual: 0x%x", exp_uh_alt_prev, uhp->uh_alt_prev.ptr); } /* Check the undo tree at this header. */ for (uep = uhp->uh_entry; uep != NULL; uep = uep->ue_next) { if (uep->ue_magic != UE_MAGIC) { EMSG("ue_magic wrong (may be using freed memory)"); break; } } /* Check the next alt tree. */ u_check_tree(uhp->uh_alt_next.ptr, uhp->uh_next.ptr, uhp); /* Check the next header in this branch. */ u_check_tree(uhp->uh_prev.ptr, uhp, NULL); } } static void u_check(int newhead_may_be_NULL) { seen_b_u_newhead = 0; seen_b_u_curhead = 0; header_count = 0; u_check_tree(curbuf->b_u_oldhead, NULL, NULL); if (seen_b_u_newhead == 0 && curbuf->b_u_oldhead != NULL && !(newhead_may_be_NULL && curbuf->b_u_newhead == NULL)) EMSGN("b_u_newhead invalid: 0x%x", curbuf->b_u_newhead); if (curbuf->b_u_curhead != NULL && seen_b_u_curhead == 0) EMSGN("b_u_curhead invalid: 0x%x", curbuf->b_u_curhead); if (header_count != curbuf->b_u_numhead) { EMSG("b_u_numhead invalid"); smsg((char_u *)"expected: %ld, actual: %ld", (long)header_count, (long)curbuf->b_u_numhead); } } #endif /* * Save the current line for both the "u" and "U" command. * Careful: may trigger autocommands that reload the buffer. * Returns OK or FAIL. */ int u_save_cursor(void) { return (u_save((linenr_T)(curwin->w_cursor.lnum - 1), (linenr_T)(curwin->w_cursor.lnum + 1))); } /* * Save the lines between "top" and "bot" for both the "u" and "U" command. * "top" may be 0 and bot may be curbuf->b_ml.ml_line_count + 1. * Careful: may trigger autocommands that reload the buffer. * Returns FAIL when lines could not be saved, OK otherwise. */ int u_save(linenr_T top, linenr_T bot) { if (undo_off) return OK; if (top > curbuf->b_ml.ml_line_count || top >= bot || bot > curbuf->b_ml.ml_line_count + 1) return FALSE; /* rely on caller to do error messages */ if (top + 2 == bot) u_saveline((linenr_T)(top + 1)); return (u_savecommon(top, bot, (linenr_T)0, FALSE)); } /* * Save the line "lnum" (used by ":s" and "~" command). * The line is replaced, so the new bottom line is lnum + 1. * Careful: may trigger autocommands that reload the buffer. * Returns FAIL when lines could not be saved, OK otherwise. */ int u_savesub(linenr_T lnum) { if (undo_off) return OK; return (u_savecommon(lnum - 1, lnum + 1, lnum + 1, FALSE)); } /* * A new line is inserted before line "lnum" (used by :s command). * The line is inserted, so the new bottom line is lnum + 1. * Careful: may trigger autocommands that reload the buffer. * Returns FAIL when lines could not be saved, OK otherwise. */ int u_inssub(linenr_T lnum) { if (undo_off) return OK; return (u_savecommon(lnum - 1, lnum, lnum + 1, FALSE)); } /* * Save the lines "lnum" - "lnum" + nlines (used by delete command). * The lines are deleted, so the new bottom line is lnum, unless the buffer * becomes empty. * Careful: may trigger autocommands that reload the buffer. * Returns FAIL when lines could not be saved, OK otherwise. */ int u_savedel(linenr_T lnum, long nlines) { if (undo_off) return OK; return (u_savecommon(lnum - 1, lnum + nlines, nlines == curbuf->b_ml.ml_line_count ? 2 : lnum, FALSE)); } /* * Return TRUE when undo is allowed. Otherwise give an error message and * return FALSE. */ int undo_allowed(void) { /* Don't allow changes when 'modifiable' is off. */ if (!curbuf->b_p_ma) { EMSG(_(e_modifiable)); return FALSE; } #ifdef HAVE_SANDBOX /* In the sandbox it's not allowed to change the text. */ if (sandbox != 0) { EMSG(_(e_sandbox)); return FALSE; } #endif /* Don't allow changes in the buffer while editing the cmdline. The * caller of getcmdline() may get confused. */ if (textlock != 0) { EMSG(_(e_secure)); return FALSE; } return TRUE; } /* * Get the undolevle value for the current buffer. */ static long get_undolevel(void) { if (curbuf->b_p_ul == NO_LOCAL_UNDOLEVEL) return p_ul; return curbuf->b_p_ul; } /* * Common code for various ways to save text before a change. * "top" is the line above the first changed line. * "bot" is the line below the last changed line. * "newbot" is the new bottom line. Use zero when not known. * "reload" is TRUE when saving for a buffer reload. * Careful: may trigger autocommands that reload the buffer. * Returns FAIL when lines could not be saved, OK otherwise. */ int u_savecommon( linenr_T top, linenr_T bot, linenr_T newbot, int reload) { linenr_T lnum; long i; u_header_T *uhp; u_header_T *old_curhead; u_entry_T *uep; u_entry_T *prev_uep; long size; if (!reload) { /* When making changes is not allowed return FAIL. It's a crude way * to make all change commands fail. */ if (!undo_allowed()) return FAIL; #ifdef FEAT_NETBEANS_INTG /* * Netbeans defines areas that cannot be modified. Bail out here when * trying to change text in a guarded area. */ if (netbeans_active()) { if (netbeans_is_guarded(top, bot)) { EMSG(_(e_guarded)); return FAIL; } if (curbuf->b_p_ro) { EMSG(_(e_nbreadonly)); return FAIL; } } #endif #ifdef FEAT_AUTOCMD /* * Saving text for undo means we are going to make a change. Give a * warning for a read-only file before making the change, so that the * FileChangedRO event can replace the buffer with a read-write version * (e.g., obtained from a source control system). */ change_warning(0); if (bot > curbuf->b_ml.ml_line_count + 1) { /* This happens when the FileChangedRO autocommand changes the * file in a way it becomes shorter. */ EMSG(_("E881: Line count changed unexpectedly")); return FAIL; } #endif } #ifdef U_DEBUG u_check(FALSE); #endif size = bot - top - 1; /* * If curbuf->b_u_synced == TRUE make a new header. */ if (curbuf->b_u_synced) { #ifdef FEAT_JUMPLIST /* Need to create new entry in b_changelist. */ curbuf->b_new_change = TRUE; #endif if (get_undolevel() >= 0) { /* * Make a new header entry. Do this first so that we don't mess * up the undo info when out of memory. */ uhp = (u_header_T *)U_ALLOC_LINE(sizeof(u_header_T)); if (uhp == NULL) goto nomem; #ifdef U_DEBUG uhp->uh_magic = UH_MAGIC; #endif } else uhp = NULL; /* * If we undid more than we redid, move the entry lists before and * including curbuf->b_u_curhead to an alternate branch. */ old_curhead = curbuf->b_u_curhead; if (old_curhead != NULL) { curbuf->b_u_newhead = old_curhead->uh_next.ptr; curbuf->b_u_curhead = NULL; } /* * free headers to keep the size right */ while (curbuf->b_u_numhead > get_undolevel() && curbuf->b_u_oldhead != NULL) { u_header_T *uhfree = curbuf->b_u_oldhead; if (uhfree == old_curhead) /* Can't reconnect the branch, delete all of it. */ u_freebranch(curbuf, uhfree, &old_curhead); else if (uhfree->uh_alt_next.ptr == NULL) /* There is no branch, only free one header. */ u_freeheader(curbuf, uhfree, &old_curhead); else { /* Free the oldest alternate branch as a whole. */ while (uhfree->uh_alt_next.ptr != NULL) uhfree = uhfree->uh_alt_next.ptr; u_freebranch(curbuf, uhfree, &old_curhead); } #ifdef U_DEBUG u_check(TRUE); #endif } if (uhp == NULL) /* no undo at all */ { if (old_curhead != NULL) u_freebranch(curbuf, old_curhead, NULL); curbuf->b_u_synced = FALSE; return OK; } uhp->uh_prev.ptr = NULL; uhp->uh_next.ptr = curbuf->b_u_newhead; uhp->uh_alt_next.ptr = old_curhead; if (old_curhead != NULL) { uhp->uh_alt_prev.ptr = old_curhead->uh_alt_prev.ptr; if (uhp->uh_alt_prev.ptr != NULL) uhp->uh_alt_prev.ptr->uh_alt_next.ptr = uhp; old_curhead->uh_alt_prev.ptr = uhp; if (curbuf->b_u_oldhead == old_curhead) curbuf->b_u_oldhead = uhp; } else uhp->uh_alt_prev.ptr = NULL; if (curbuf->b_u_newhead != NULL) curbuf->b_u_newhead->uh_prev.ptr = uhp; uhp->uh_seq = ++curbuf->b_u_seq_last; curbuf->b_u_seq_cur = uhp->uh_seq; uhp->uh_time = vim_time(); uhp->uh_save_nr = 0; curbuf->b_u_time_cur = uhp->uh_time + 1; uhp->uh_walk = 0; uhp->uh_entry = NULL; uhp->uh_getbot_entry = NULL; uhp->uh_cursor = curwin->w_cursor; /* save cursor pos. for undo */ #ifdef FEAT_VIRTUALEDIT if (virtual_active() && curwin->w_cursor.coladd > 0) uhp->uh_cursor_vcol = getviscol(); else uhp->uh_cursor_vcol = -1; #endif /* save changed and buffer empty flag for undo */ uhp->uh_flags = (curbuf->b_changed ? UH_CHANGED : 0) + ((curbuf->b_ml.ml_flags & ML_EMPTY) ? UH_EMPTYBUF : 0); /* save named marks and Visual marks for undo */ mch_memmove(uhp->uh_namedm, curbuf->b_namedm, sizeof(pos_T) * NMARKS); uhp->uh_visual = curbuf->b_visual; curbuf->b_u_newhead = uhp; if (curbuf->b_u_oldhead == NULL) curbuf->b_u_oldhead = uhp; ++curbuf->b_u_numhead; } else { if (get_undolevel() < 0) /* no undo at all */ return OK; /* * When saving a single line, and it has been saved just before, it * doesn't make sense saving it again. Saves a lot of memory when * making lots of changes inside the same line. * This is only possible if the previous change didn't increase or * decrease the number of lines. * Check the ten last changes. More doesn't make sense and takes too * long. */ if (size == 1) { uep = u_get_headentry(); prev_uep = NULL; for (i = 0; i < 10; ++i) { if (uep == NULL) break; /* If lines have been inserted/deleted we give up. * Also when the line was included in a multi-line save. */ if ((curbuf->b_u_newhead->uh_getbot_entry != uep ? (uep->ue_top + uep->ue_size + 1 != (uep->ue_bot == 0 ? curbuf->b_ml.ml_line_count + 1 : uep->ue_bot)) : uep->ue_lcount != curbuf->b_ml.ml_line_count) || (uep->ue_size > 1 && top >= uep->ue_top && top + 2 <= uep->ue_top + uep->ue_size + 1)) break; /* If it's the same line we can skip saving it again. */ if (uep->ue_size == 1 && uep->ue_top == top) { if (i > 0) { /* It's not the last entry: get ue_bot for the last * entry now. Following deleted/inserted lines go to * the re-used entry. */ u_getbot(); curbuf->b_u_synced = FALSE; /* Move the found entry to become the last entry. The * order of undo/redo doesn't matter for the entries * we move it over, since they don't change the line * count and don't include this line. It does matter * for the found entry if the line count is changed by * the executed command. */ prev_uep->ue_next = uep->ue_next; uep->ue_next = curbuf->b_u_newhead->uh_entry; curbuf->b_u_newhead->uh_entry = uep; } /* The executed command may change the line count. */ if (newbot != 0) uep->ue_bot = newbot; else if (bot > curbuf->b_ml.ml_line_count) uep->ue_bot = 0; else { uep->ue_lcount = curbuf->b_ml.ml_line_count; curbuf->b_u_newhead->uh_getbot_entry = uep; } return OK; } prev_uep = uep; uep = uep->ue_next; } } /* find line number for ue_bot for previous u_save() */ u_getbot(); } #if !defined(UNIX) && !defined(WIN32) /* * With Amiga we can't handle big undo's, because * then u_alloc_line would have to allocate a block larger than 32K */ if (size >= 8000) goto nomem; #endif /* * add lines in front of entry list */ uep = (u_entry_T *)U_ALLOC_LINE(sizeof(u_entry_T)); if (uep == NULL) goto nomem; vim_memset(uep, 0, sizeof(u_entry_T)); #ifdef U_DEBUG uep->ue_magic = UE_MAGIC; #endif uep->ue_size = size; uep->ue_top = top; if (newbot != 0) uep->ue_bot = newbot; /* * Use 0 for ue_bot if bot is below last line. * Otherwise we have to compute ue_bot later. */ else if (bot > curbuf->b_ml.ml_line_count) uep->ue_bot = 0; else { uep->ue_lcount = curbuf->b_ml.ml_line_count; curbuf->b_u_newhead->uh_getbot_entry = uep; } if (size > 0) { if ((uep->ue_array = (char_u **)U_ALLOC_LINE( sizeof(char_u *) * size)) == NULL) { u_freeentry(uep, 0L); goto nomem; } for (i = 0, lnum = top + 1; i < size; ++i) { fast_breakcheck(); if (got_int) { u_freeentry(uep, i); return FAIL; } if ((uep->ue_array[i] = u_save_line(lnum++)) == NULL) { u_freeentry(uep, i); goto nomem; } } } else uep->ue_array = NULL; uep->ue_next = curbuf->b_u_newhead->uh_entry; curbuf->b_u_newhead->uh_entry = uep; curbuf->b_u_synced = FALSE; undo_undoes = FALSE; #ifdef U_DEBUG u_check(FALSE); #endif return OK; nomem: msg_silent = 0; /* must display the prompt */ if (ask_yesno((char_u *)_("No undo possible; continue anyway"), TRUE) == 'y') { undo_off = TRUE; /* will be reset when character typed */ return OK; } do_outofmem_msg((long_u)0); return FAIL; } #if defined(FEAT_PERSISTENT_UNDO) || defined(PROTO) # define UF_START_MAGIC "Vim\237UnDo\345" /* magic at start of undofile */ # define UF_START_MAGIC_LEN 9 # define UF_HEADER_MAGIC 0x5fd0 /* magic at start of header */ # define UF_HEADER_END_MAGIC 0xe7aa /* magic after last header */ # define UF_ENTRY_MAGIC 0xf518 /* magic at start of entry */ # define UF_ENTRY_END_MAGIC 0x3581 /* magic after last entry */ # define UF_VERSION 2 /* 2-byte undofile version number */ # define UF_VERSION_CRYPT 0x8002 /* idem, encrypted */ /* extra fields for header */ # define UF_LAST_SAVE_NR 1 /* extra fields for uhp */ # define UHP_SAVE_NR 1 static char_u e_not_open[] = N_("E828: Cannot open undo file for writing: %s"); /* * Compute the hash for the current buffer text into hash[UNDO_HASH_SIZE]. */ void u_compute_hash(char_u *hash) { context_sha256_T ctx; linenr_T lnum; char_u *p; sha256_start(&ctx); for (lnum = 1; lnum <= curbuf->b_ml.ml_line_count; ++lnum) { p = ml_get(lnum); sha256_update(&ctx, p, (UINT32_T)(STRLEN(p) + 1)); } sha256_finish(&ctx, hash); } /* * Return an allocated string of the full path of the target undofile. * When "reading" is TRUE find the file to read, go over all directories in * 'undodir'. * When "reading" is FALSE use the first name where the directory exists. * Returns NULL when there is no place to write or no file to read. */ char_u * u_get_undo_file_name(char_u *buf_ffname, int reading) { char_u *dirp; char_u dir_name[IOSIZE + 1]; char_u *munged_name = NULL; char_u *undo_file_name = NULL; int dir_len; char_u *p; stat_T st; char_u *ffname = buf_ffname; #ifdef HAVE_READLINK char_u fname_buf[MAXPATHL]; #endif if (ffname == NULL) return NULL; #ifdef HAVE_READLINK /* Expand symlink in the file name, so that we put the undo file with the * actual file instead of with the symlink. */ if (resolve_symlink(ffname, fname_buf) == OK) ffname = fname_buf; #endif /* Loop over 'undodir'. When reading find the first file that exists. * When not reading use the first directory that exists or ".". */ dirp = p_udir; while (*dirp != NUL) { dir_len = copy_option_part(&dirp, dir_name, IOSIZE, ","); if (dir_len == 1 && dir_name[0] == '.') { /* Use same directory as the ffname, * "dir/name" -> "dir/.name.un~" */ undo_file_name = vim_strnsave(ffname, (int)(STRLEN(ffname) + 5)); if (undo_file_name == NULL) break; p = gettail(undo_file_name); #ifdef VMS /* VMS can not handle more than one dot in the filenames * use "dir/name" -> "dir/_un_name" - add _un_ * at the beginning to keep the extension */ mch_memmove(p + 4, p, STRLEN(p) + 1); mch_memmove(p, "_un_", 4); #else /* Use same directory as the ffname, * "dir/name" -> "dir/.name.un~" */ mch_memmove(p + 1, p, STRLEN(p) + 1); *p = '.'; STRCAT(p, ".un~"); #endif } else { dir_name[dir_len] = NUL; if (mch_isdir(dir_name)) { if (munged_name == NULL) { munged_name = vim_strsave(ffname); if (munged_name == NULL) return NULL; for (p = munged_name; *p != NUL; mb_ptr_adv(p)) if (vim_ispathsep(*p)) *p = '%'; } undo_file_name = concat_fnames(dir_name, munged_name, TRUE); } } /* When reading check if the file exists. */ if (undo_file_name != NULL && (!reading || mch_stat((char *)undo_file_name, &st) >= 0)) break; vim_free(undo_file_name); undo_file_name = NULL; } vim_free(munged_name); return undo_file_name; } static void corruption_error(char *mesg, char_u *file_name) { EMSG3(_("E825: Corrupted undo file (%s): %s"), mesg, file_name); } static void u_free_uhp(u_header_T *uhp) { u_entry_T *nuep; u_entry_T *uep; uep = uhp->uh_entry; while (uep != NULL) { nuep = uep->ue_next; u_freeentry(uep, uep->ue_size); uep = nuep; } vim_free(uhp); } /* * Write a sequence of bytes to the undo file. * Buffers and encrypts as needed. * Returns OK or FAIL. */ static int undo_write(bufinfo_T *bi, char_u *ptr, size_t len) { #ifdef FEAT_CRYPT if (bi->bi_buffer != NULL) { size_t len_todo = len; char_u *p = ptr; while (bi->bi_used + len_todo >= CRYPT_BUF_SIZE) { size_t n = CRYPT_BUF_SIZE - bi->bi_used; mch_memmove(bi->bi_buffer + bi->bi_used, p, n); len_todo -= n; p += n; bi->bi_used = CRYPT_BUF_SIZE; if (undo_flush(bi) == FAIL) return FAIL; } if (len_todo > 0) { mch_memmove(bi->bi_buffer + bi->bi_used, p, len_todo); bi->bi_used += len_todo; } return OK; } #endif if (fwrite(ptr, len, (size_t)1, bi->bi_fp) != 1) return FAIL; return OK; } #ifdef FEAT_CRYPT static int undo_flush(bufinfo_T *bi) { if (bi->bi_buffer != NULL && bi->bi_used > 0) { crypt_encode_inplace(bi->bi_state, bi->bi_buffer, bi->bi_used); if (fwrite(bi->bi_buffer, bi->bi_used, (size_t)1, bi->bi_fp) != 1) return FAIL; bi->bi_used = 0; } return OK; } #endif /* * Write "ptr[len]" and crypt the bytes when needed. * Returns OK or FAIL. */ static int fwrite_crypt(bufinfo_T *bi, char_u *ptr, size_t len) { #ifdef FEAT_CRYPT char_u *copy; char_u small_buf[100]; size_t i; if (bi->bi_state != NULL && bi->bi_buffer == NULL) { /* crypting every piece of text separately */ if (len < 100) copy = small_buf; /* no malloc()/free() for short strings */ else { copy = lalloc(len, FALSE); if (copy == NULL) return 0; } crypt_encode(bi->bi_state, ptr, len, copy); i = fwrite(copy, len, (size_t)1, bi->bi_fp); if (copy != small_buf) vim_free(copy); return i == 1 ? OK : FAIL; } #endif return undo_write(bi, ptr, len); } /* * Write a number, MSB first, in "len" bytes. * Must match with undo_read_?c() functions. * Returns OK or FAIL. */ static int undo_write_bytes(bufinfo_T *bi, long_u nr, int len) { char_u buf[8]; int i; int bufi = 0; for (i = len - 1; i >= 0; --i) buf[bufi++] = (char_u)(nr >> (i * 8)); return undo_write(bi, buf, (size_t)len); } /* * Write the pointer to an undo header. Instead of writing the pointer itself * we use the sequence number of the header. This is converted back to * pointers when reading. */ static void put_header_ptr(bufinfo_T *bi, u_header_T *uhp) { undo_write_bytes(bi, (long_u)(uhp != NULL ? uhp->uh_seq : 0), 4); } static int undo_read_4c(bufinfo_T *bi) { #ifdef FEAT_CRYPT if (bi->bi_buffer != NULL) { char_u buf[4]; int n; undo_read(bi, buf, (size_t)4); n = ((unsigned)buf[0] << 24) + (buf[1] << 16) + (buf[2] << 8) + buf[3]; return n; } #endif return get4c(bi->bi_fp); } static int undo_read_2c(bufinfo_T *bi) { #ifdef FEAT_CRYPT if (bi->bi_buffer != NULL) { char_u buf[2]; int n; undo_read(bi, buf, (size_t)2); n = (buf[0] << 8) + buf[1]; return n; } #endif return get2c(bi->bi_fp); } static int undo_read_byte(bufinfo_T *bi) { #ifdef FEAT_CRYPT if (bi->bi_buffer != NULL) { char_u buf[1]; undo_read(bi, buf, (size_t)1); return buf[0]; } #endif return getc(bi->bi_fp); } static time_t undo_read_time(bufinfo_T *bi) { #ifdef FEAT_CRYPT if (bi->bi_buffer != NULL) { char_u buf[8]; time_t n = 0; int i; undo_read(bi, buf, (size_t)8); for (i = 0; i < 8; ++i) n = (n << 8) + buf[i]; return n; } #endif return get8ctime(bi->bi_fp); } /* * Read "buffer[size]" from the undo file. * Return OK or FAIL. */ static int undo_read(bufinfo_T *bi, char_u *buffer, size_t size) { #ifdef FEAT_CRYPT if (bi->bi_buffer != NULL) { int size_todo = (int)size; char_u *p = buffer; while (size_todo > 0) { size_t n; if (bi->bi_used >= bi->bi_avail) { n = fread(bi->bi_buffer, 1, (size_t)CRYPT_BUF_SIZE, bi->bi_fp); if (n == 0) { /* Error may be checked for only later. Fill with zeros, * so that the reader won't use garbage. */ vim_memset(p, 0, size_todo); return FAIL; } bi->bi_avail = n; bi->bi_used = 0; crypt_decode_inplace(bi->bi_state, bi->bi_buffer, bi->bi_avail); } n = size_todo; if (n > bi->bi_avail - bi->bi_used) n = bi->bi_avail - bi->bi_used; mch_memmove(p, bi->bi_buffer + bi->bi_used, n); bi->bi_used += n; size_todo -= (int)n; p += n; } return OK; } #endif if (fread(buffer, (size_t)size, 1, bi->bi_fp) != 1) return FAIL; return OK; } /* * Read a string of length "len" from "bi->bi_fd". * "len" can be zero to allocate an empty line. * Decrypt the bytes if needed. * Append a NUL. * Returns a pointer to allocated memory or NULL for failure. */ static char_u * read_string_decrypt(bufinfo_T *bi, int len) { char_u *ptr = alloc((unsigned)len + 1); if (ptr != NULL) { if (len > 0 && undo_read(bi, ptr, len) == FAIL) { vim_free(ptr); return NULL; } ptr[len] = NUL; #ifdef FEAT_CRYPT if (bi->bi_state != NULL && bi->bi_buffer == NULL) crypt_decode_inplace(bi->bi_state, ptr, len); #endif } return ptr; } /* * Writes the (not encrypted) header and initializes encryption if needed. */ static int serialize_header(bufinfo_T *bi, char_u *hash) { int len; buf_T *buf = bi->bi_buf; FILE *fp = bi->bi_fp; char_u time_buf[8]; /* Start writing, first the magic marker and undo info version. */ if (fwrite(UF_START_MAGIC, (size_t)UF_START_MAGIC_LEN, (size_t)1, fp) != 1) return FAIL; /* If the buffer is encrypted then all text bytes following will be * encrypted. Numbers and other info is not crypted. */ #ifdef FEAT_CRYPT if (*buf->b_p_key != NUL) { char_u *header; int header_len; undo_write_bytes(bi, (long_u)UF_VERSION_CRYPT, 2); bi->bi_state = crypt_create_for_writing(crypt_get_method_nr(buf), buf->b_p_key, &header, &header_len); if (bi->bi_state == NULL) return FAIL; len = (int)fwrite(header, (size_t)header_len, (size_t)1, fp); vim_free(header); if (len != 1) { crypt_free_state(bi->bi_state); bi->bi_state = NULL; return FAIL; } if (crypt_whole_undofile(crypt_get_method_nr(buf))) { bi->bi_buffer = alloc(CRYPT_BUF_SIZE); if (bi->bi_buffer == NULL) { crypt_free_state(bi->bi_state); bi->bi_state = NULL; return FAIL; } bi->bi_used = 0; } } else #endif undo_write_bytes(bi, (long_u)UF_VERSION, 2); /* Write a hash of the buffer text, so that we can verify it is still the * same when reading the buffer text. */ if (undo_write(bi, hash, (size_t)UNDO_HASH_SIZE) == FAIL) return FAIL; /* buffer-specific data */ undo_write_bytes(bi, (long_u)buf->b_ml.ml_line_count, 4); len = buf->b_u_line_ptr != NULL ? (int)STRLEN(buf->b_u_line_ptr) : 0; undo_write_bytes(bi, (long_u)len, 4); if (len > 0 && fwrite_crypt(bi, buf->b_u_line_ptr, (size_t)len) == FAIL) return FAIL; undo_write_bytes(bi, (long_u)buf->b_u_line_lnum, 4); undo_write_bytes(bi, (long_u)buf->b_u_line_colnr, 4); /* Undo structures header data */ put_header_ptr(bi, buf->b_u_oldhead); put_header_ptr(bi, buf->b_u_newhead); put_header_ptr(bi, buf->b_u_curhead); undo_write_bytes(bi, (long_u)buf->b_u_numhead, 4); undo_write_bytes(bi, (long_u)buf->b_u_seq_last, 4); undo_write_bytes(bi, (long_u)buf->b_u_seq_cur, 4); time_to_bytes(buf->b_u_time_cur, time_buf); undo_write(bi, time_buf, 8); /* Optional fields. */ undo_write_bytes(bi, 4, 1); undo_write_bytes(bi, UF_LAST_SAVE_NR, 1); undo_write_bytes(bi, (long_u)buf->b_u_save_nr_last, 4); undo_write_bytes(bi, 0, 1); /* end marker */ return OK; } static int serialize_uhp(bufinfo_T *bi, u_header_T *uhp) { int i; u_entry_T *uep; char_u time_buf[8]; if (undo_write_bytes(bi, (long_u)UF_HEADER_MAGIC, 2) == FAIL) return FAIL; put_header_ptr(bi, uhp->uh_next.ptr); put_header_ptr(bi, uhp->uh_prev.ptr); put_header_ptr(bi, uhp->uh_alt_next.ptr); put_header_ptr(bi, uhp->uh_alt_prev.ptr); undo_write_bytes(bi, uhp->uh_seq, 4); serialize_pos(bi, uhp->uh_cursor); #ifdef FEAT_VIRTUALEDIT undo_write_bytes(bi, (long_u)uhp->uh_cursor_vcol, 4); #else undo_write_bytes(bi, (long_u)0, 4); #endif undo_write_bytes(bi, (long_u)uhp->uh_flags, 2); /* Assume NMARKS will stay the same. */ for (i = 0; i < NMARKS; ++i) serialize_pos(bi, uhp->uh_namedm[i]); serialize_visualinfo(bi, &uhp->uh_visual); time_to_bytes(uhp->uh_time, time_buf); undo_write(bi, time_buf, 8); /* Optional fields. */ undo_write_bytes(bi, 4, 1); undo_write_bytes(bi, UHP_SAVE_NR, 1); undo_write_bytes(bi, (long_u)uhp->uh_save_nr, 4); undo_write_bytes(bi, 0, 1); /* end marker */ /* Write all the entries. */ for (uep = uhp->uh_entry; uep != NULL; uep = uep->ue_next) { undo_write_bytes(bi, (long_u)UF_ENTRY_MAGIC, 2); if (serialize_uep(bi, uep) == FAIL) return FAIL; } undo_write_bytes(bi, (long_u)UF_ENTRY_END_MAGIC, 2); return OK; } static u_header_T * unserialize_uhp(bufinfo_T *bi, char_u *file_name) { u_header_T *uhp; int i; u_entry_T *uep, *last_uep; int c; int error; uhp = (u_header_T *)U_ALLOC_LINE(sizeof(u_header_T)); if (uhp == NULL) return NULL; vim_memset(uhp, 0, sizeof(u_header_T)); #ifdef U_DEBUG uhp->uh_magic = UH_MAGIC; #endif uhp->uh_next.seq = undo_read_4c(bi); uhp->uh_prev.seq = undo_read_4c(bi); uhp->uh_alt_next.seq = undo_read_4c(bi); uhp->uh_alt_prev.seq = undo_read_4c(bi); uhp->uh_seq = undo_read_4c(bi); if (uhp->uh_seq <= 0) { corruption_error("uh_seq", file_name); vim_free(uhp); return NULL; } unserialize_pos(bi, &uhp->uh_cursor); #ifdef FEAT_VIRTUALEDIT uhp->uh_cursor_vcol = undo_read_4c(bi); #else (void)undo_read_4c(bi); #endif uhp->uh_flags = undo_read_2c(bi); for (i = 0; i < NMARKS; ++i) unserialize_pos(bi, &uhp->uh_namedm[i]); unserialize_visualinfo(bi, &uhp->uh_visual); uhp->uh_time = undo_read_time(bi); /* Optional fields. */ for (;;) { int len = undo_read_byte(bi); int what; if (len == 0) break; what = undo_read_byte(bi); switch (what) { case UHP_SAVE_NR: uhp->uh_save_nr = undo_read_4c(bi); break; default: /* field not supported, skip */ while (--len >= 0) (void)undo_read_byte(bi); } } /* Unserialize the uep list. */ last_uep = NULL; while ((c = undo_read_2c(bi)) == UF_ENTRY_MAGIC) { error = FALSE; uep = unserialize_uep(bi, &error, file_name); if (last_uep == NULL) uhp->uh_entry = uep; else last_uep->ue_next = uep; last_uep = uep; if (uep == NULL || error) { u_free_uhp(uhp); return NULL; } } if (c != UF_ENTRY_END_MAGIC) { corruption_error("entry end", file_name); u_free_uhp(uhp); return NULL; } return uhp; } /* * Serialize "uep". */ static int serialize_uep( bufinfo_T *bi, u_entry_T *uep) { int i; size_t len; undo_write_bytes(bi, (long_u)uep->ue_top, 4); undo_write_bytes(bi, (long_u)uep->ue_bot, 4); undo_write_bytes(bi, (long_u)uep->ue_lcount, 4); undo_write_bytes(bi, (long_u)uep->ue_size, 4); for (i = 0; i < uep->ue_size; ++i) { len = STRLEN(uep->ue_array[i]); if (undo_write_bytes(bi, (long_u)len, 4) == FAIL) return FAIL; if (len > 0 && fwrite_crypt(bi, uep->ue_array[i], len) == FAIL) return FAIL; } return OK; } static u_entry_T * unserialize_uep(bufinfo_T *bi, int *error, char_u *file_name) { int i; u_entry_T *uep; char_u **array; char_u *line; int line_len; uep = (u_entry_T *)U_ALLOC_LINE(sizeof(u_entry_T)); if (uep == NULL) return NULL; vim_memset(uep, 0, sizeof(u_entry_T)); #ifdef U_DEBUG uep->ue_magic = UE_MAGIC; #endif uep->ue_top = undo_read_4c(bi); uep->ue_bot = undo_read_4c(bi); uep->ue_lcount = undo_read_4c(bi); uep->ue_size = undo_read_4c(bi); if (uep->ue_size > 0) { array = (char_u **)U_ALLOC_LINE(sizeof(char_u *) * uep->ue_size); if (array == NULL) { *error = TRUE; return uep; } vim_memset(array, 0, sizeof(char_u *) * uep->ue_size); } else array = NULL; uep->ue_array = array; for (i = 0; i < uep->ue_size; ++i) { line_len = undo_read_4c(bi); if (line_len >= 0) line = read_string_decrypt(bi, line_len); else { line = NULL; corruption_error("line length", file_name); } if (line == NULL) { *error = TRUE; return uep; } array[i] = line; } return uep; } /* * Serialize "pos". */ static void serialize_pos(bufinfo_T *bi, pos_T pos) { undo_write_bytes(bi, (long_u)pos.lnum, 4); undo_write_bytes(bi, (long_u)pos.col, 4); #ifdef FEAT_VIRTUALEDIT undo_write_bytes(bi, (long_u)pos.coladd, 4); #else undo_write_bytes(bi, (long_u)0, 4); #endif } /* * Unserialize the pos_T at the current position. */ static void unserialize_pos(bufinfo_T *bi, pos_T *pos) { pos->lnum = undo_read_4c(bi); if (pos->lnum < 0) pos->lnum = 0; pos->col = undo_read_4c(bi); if (pos->col < 0) pos->col = 0; #ifdef FEAT_VIRTUALEDIT pos->coladd = undo_read_4c(bi); if (pos->coladd < 0) pos->coladd = 0; #else (void)undo_read_4c(bi); #endif } /* * Serialize "info". */ static void serialize_visualinfo(bufinfo_T *bi, visualinfo_T *info) { serialize_pos(bi, info->vi_start); serialize_pos(bi, info->vi_end); undo_write_bytes(bi, (long_u)info->vi_mode, 4); undo_write_bytes(bi, (long_u)info->vi_curswant, 4); } /* * Unserialize the visualinfo_T at the current position. */ static void unserialize_visualinfo(bufinfo_T *bi, visualinfo_T *info) { unserialize_pos(bi, &info->vi_start); unserialize_pos(bi, &info->vi_end); info->vi_mode = undo_read_4c(bi); info->vi_curswant = undo_read_4c(bi); } /* * Write the undo tree in an undo file. * When "name" is not NULL, use it as the name of the undo file. * Otherwise use buf->b_ffname to generate the undo file name. * "buf" must never be null, buf->b_ffname is used to obtain the original file * permissions. * "forceit" is TRUE for ":wundo!", FALSE otherwise. * "hash[UNDO_HASH_SIZE]" must be the hash value of the buffer text. */ void u_write_undo( char_u *name, int forceit, buf_T *buf, char_u *hash) { u_header_T *uhp; char_u *file_name; int mark; #ifdef U_DEBUG int headers_written = 0; #endif int fd; FILE *fp = NULL; int perm; int write_ok = FALSE; #ifdef UNIX int st_old_valid = FALSE; stat_T st_old; stat_T st_new; #endif bufinfo_T bi; vim_memset(&bi, 0, sizeof(bi)); if (name == NULL) { file_name = u_get_undo_file_name(buf->b_ffname, FALSE); if (file_name == NULL) { if (p_verbose > 0) { verbose_enter(); smsg((char_u *) _("Cannot write undo file in any directory in 'undodir'")); verbose_leave(); } return; } } else file_name = name; /* * Decide about the permission to use for the undo file. If the buffer * has a name use the permission of the original file. Otherwise only * allow the user to access the undo file. */ perm = 0600; if (buf->b_ffname != NULL) { #ifdef UNIX if (mch_stat((char *)buf->b_ffname, &st_old) >= 0) { perm = st_old.st_mode; st_old_valid = TRUE; } #else perm = mch_getperm(buf->b_ffname); if (perm < 0) perm = 0600; #endif } /* strip any s-bit and executable bit */ perm = perm & 0666; /* If the undo file already exists, verify that it actually is an undo * file, and delete it. */ if (mch_getperm(file_name) >= 0) { if (name == NULL || !forceit) { /* Check we can read it and it's an undo file. */ fd = mch_open((char *)file_name, O_RDONLY|O_EXTRA, 0); if (fd < 0) { if (name != NULL || p_verbose > 0) { if (name == NULL) verbose_enter(); smsg((char_u *) _("Will not overwrite with undo file, cannot read: %s"), file_name); if (name == NULL) verbose_leave(); } goto theend; } else { char_u mbuf[UF_START_MAGIC_LEN]; int len; len = read_eintr(fd, mbuf, UF_START_MAGIC_LEN); close(fd); if (len < UF_START_MAGIC_LEN || memcmp(mbuf, UF_START_MAGIC, UF_START_MAGIC_LEN) != 0) { if (name != NULL || p_verbose > 0) { if (name == NULL) verbose_enter(); smsg((char_u *) _("Will not overwrite, this is not an undo file: %s"), file_name); if (name == NULL) verbose_leave(); } goto theend; } } } mch_remove(file_name); } /* If there is no undo information at all, quit here after deleting any * existing undo file. */ if (buf->b_u_numhead == 0 && buf->b_u_line_ptr == NULL) { if (p_verbose > 0) verb_msg((char_u *)_("Skipping undo file write, nothing to undo")); goto theend; } fd = mch_open((char *)file_name, O_CREAT|O_EXTRA|O_WRONLY|O_EXCL|O_NOFOLLOW, perm); if (fd < 0) { EMSG2(_(e_not_open), file_name); goto theend; } (void)mch_setperm(file_name, perm); if (p_verbose > 0) { verbose_enter(); smsg((char_u *)_("Writing undo file: %s"), file_name); verbose_leave(); } #ifdef U_DEBUG /* Check there is no problem in undo info before writing. */ u_check(FALSE); #endif #ifdef UNIX /* * Try to set the group of the undo file same as the original file. If * this fails, set the protection bits for the group same as the * protection bits for others. */ if (st_old_valid && mch_stat((char *)file_name, &st_new) >= 0 && st_new.st_gid != st_old.st_gid # ifdef HAVE_FCHOWN /* sequent-ptx lacks fchown() */ && fchown(fd, (uid_t)-1, st_old.st_gid) != 0 # endif ) mch_setperm(file_name, (perm & 0707) | ((perm & 07) << 3)); # if defined(HAVE_SELINUX) || defined(HAVE_SMACK) if (buf->b_ffname != NULL) mch_copy_sec(buf->b_ffname, file_name); # endif #endif fp = fdopen(fd, "w"); if (fp == NULL) { EMSG2(_(e_not_open), file_name); close(fd); mch_remove(file_name); goto theend; } /* Undo must be synced. */ u_sync(TRUE); /* * Write the header. Initializes encryption, if enabled. */ bi.bi_buf = buf; bi.bi_fp = fp; if (serialize_header(&bi, hash) == FAIL) goto write_error; /* * Iteratively serialize UHPs and their UEPs from the top down. */ mark = ++lastmark; uhp = buf->b_u_oldhead; while (uhp != NULL) { /* Serialize current UHP if we haven't seen it */ if (uhp->uh_walk != mark) { uhp->uh_walk = mark; #ifdef U_DEBUG ++headers_written; #endif if (serialize_uhp(&bi, uhp) == FAIL) goto write_error; } /* Now walk through the tree - algorithm from undo_time(). */ if (uhp->uh_prev.ptr != NULL && uhp->uh_prev.ptr->uh_walk != mark) uhp = uhp->uh_prev.ptr; else if (uhp->uh_alt_next.ptr != NULL && uhp->uh_alt_next.ptr->uh_walk != mark) uhp = uhp->uh_alt_next.ptr; else if (uhp->uh_next.ptr != NULL && uhp->uh_alt_prev.ptr == NULL && uhp->uh_next.ptr->uh_walk != mark) uhp = uhp->uh_next.ptr; else if (uhp->uh_alt_prev.ptr != NULL) uhp = uhp->uh_alt_prev.ptr; else uhp = uhp->uh_next.ptr; } if (undo_write_bytes(&bi, (long_u)UF_HEADER_END_MAGIC, 2) == OK) write_ok = TRUE; #ifdef U_DEBUG if (headers_written != buf->b_u_numhead) { EMSGN("Written %ld headers, ...", headers_written); EMSGN("... but numhead is %ld", buf->b_u_numhead); } #endif #ifdef FEAT_CRYPT if (bi.bi_state != NULL && undo_flush(&bi) == FAIL) write_ok = FALSE; #endif write_error: fclose(fp); if (!write_ok) EMSG2(_("E829: write error in undo file: %s"), file_name); #if defined(MACOS_CLASSIC) || defined(WIN3264) /* Copy file attributes; for systems where this can only be done after * closing the file. */ if (buf->b_ffname != NULL) (void)mch_copy_file_attribute(buf->b_ffname, file_name); #endif #ifdef HAVE_ACL if (buf->b_ffname != NULL) { vim_acl_T acl; /* For systems that support ACL: get the ACL from the original file. */ acl = mch_get_acl(buf->b_ffname); mch_set_acl(file_name, acl); mch_free_acl(acl); } #endif theend: #ifdef FEAT_CRYPT if (bi.bi_state != NULL) crypt_free_state(bi.bi_state); vim_free(bi.bi_buffer); #endif if (file_name != name) vim_free(file_name); } /* * Load the undo tree from an undo file. * If "name" is not NULL use it as the undo file name. This also means being * a bit more verbose. * Otherwise use curbuf->b_ffname to generate the undo file name. * "hash[UNDO_HASH_SIZE]" must be the hash value of the buffer text. */ void u_read_undo(char_u *name, char_u *hash, char_u *orig_name) { char_u *file_name; FILE *fp; long version, str_len; char_u *line_ptr = NULL; linenr_T line_lnum; colnr_T line_colnr; linenr_T line_count; long num_head = 0; long old_header_seq, new_header_seq, cur_header_seq; long seq_last, seq_cur; long last_save_nr = 0; short old_idx = -1, new_idx = -1, cur_idx = -1; long num_read_uhps = 0; time_t seq_time; int i, j; int c; u_header_T *uhp; u_header_T **uhp_table = NULL; char_u read_hash[UNDO_HASH_SIZE]; char_u magic_buf[UF_START_MAGIC_LEN]; #ifdef U_DEBUG int *uhp_table_used; #endif #ifdef UNIX stat_T st_orig; stat_T st_undo; #endif bufinfo_T bi; vim_memset(&bi, 0, sizeof(bi)); if (name == NULL) { file_name = u_get_undo_file_name(curbuf->b_ffname, TRUE); if (file_name == NULL) return; #ifdef UNIX /* For safety we only read an undo file if the owner is equal to the * owner of the text file or equal to the current user. */ if (mch_stat((char *)orig_name, &st_orig) >= 0 && mch_stat((char *)file_name, &st_undo) >= 0 && st_orig.st_uid != st_undo.st_uid && st_undo.st_uid != getuid()) { if (p_verbose > 0) { verbose_enter(); smsg((char_u *)_("Not reading undo file, owner differs: %s"), file_name); verbose_leave(); } return; } #endif } else file_name = name; if (p_verbose > 0) { verbose_enter(); smsg((char_u *)_("Reading undo file: %s"), file_name); verbose_leave(); } fp = mch_fopen((char *)file_name, "r"); if (fp == NULL) { if (name != NULL || p_verbose > 0) EMSG2(_("E822: Cannot open undo file for reading: %s"), file_name); goto error; } bi.bi_buf = curbuf; bi.bi_fp = fp; /* * Read the undo file header. */ if (fread(magic_buf, UF_START_MAGIC_LEN, 1, fp) != 1 || memcmp(magic_buf, UF_START_MAGIC, UF_START_MAGIC_LEN) != 0) { EMSG2(_("E823: Not an undo file: %s"), file_name); goto error; } version = get2c(fp); if (version == UF_VERSION_CRYPT) { #ifdef FEAT_CRYPT if (*curbuf->b_p_key == NUL) { EMSG2(_("E832: Non-encrypted file has encrypted undo file: %s"), file_name); goto error; } bi.bi_state = crypt_create_from_file(fp, curbuf->b_p_key); if (bi.bi_state == NULL) { EMSG2(_("E826: Undo file decryption failed: %s"), file_name); goto error; } if (crypt_whole_undofile(bi.bi_state->method_nr)) { bi.bi_buffer = alloc(CRYPT_BUF_SIZE); if (bi.bi_buffer == NULL) { crypt_free_state(bi.bi_state); bi.bi_state = NULL; goto error; } bi.bi_avail = 0; bi.bi_used = 0; } #else EMSG2(_("E827: Undo file is encrypted: %s"), file_name); goto error; #endif } else if (version != UF_VERSION) { EMSG2(_("E824: Incompatible undo file: %s"), file_name); goto error; } if (undo_read(&bi, read_hash, (size_t)UNDO_HASH_SIZE) == FAIL) { corruption_error("hash", file_name); goto error; } line_count = (linenr_T)undo_read_4c(&bi); if (memcmp(hash, read_hash, UNDO_HASH_SIZE) != 0 || line_count != curbuf->b_ml.ml_line_count) { if (p_verbose > 0 || name != NULL) { if (name == NULL) verbose_enter(); give_warning((char_u *) _("File contents changed, cannot use undo info"), TRUE); if (name == NULL) verbose_leave(); } goto error; } /* Read undo data for "U" command. */ str_len = undo_read_4c(&bi); if (str_len < 0) goto error; if (str_len > 0) line_ptr = read_string_decrypt(&bi, str_len); line_lnum = (linenr_T)undo_read_4c(&bi); line_colnr = (colnr_T)undo_read_4c(&bi); if (line_lnum < 0 || line_colnr < 0) { corruption_error("line lnum/col", file_name); goto error; } /* Begin general undo data */ old_header_seq = undo_read_4c(&bi); new_header_seq = undo_read_4c(&bi); cur_header_seq = undo_read_4c(&bi); num_head = undo_read_4c(&bi); seq_last = undo_read_4c(&bi); seq_cur = undo_read_4c(&bi); seq_time = undo_read_time(&bi); /* Optional header fields. */ for (;;) { int len = undo_read_byte(&bi); int what; if (len == 0 || len == EOF) break; what = undo_read_byte(&bi); switch (what) { case UF_LAST_SAVE_NR: last_save_nr = undo_read_4c(&bi); break; default: /* field not supported, skip */ while (--len >= 0) (void)undo_read_byte(&bi); } } /* uhp_table will store the freshly created undo headers we allocate * until we insert them into curbuf. The table remains sorted by the * sequence numbers of the headers. * When there are no headers uhp_table is NULL. */ if (num_head > 0) { if (num_head < LONG_MAX / (long)sizeof(u_header_T *)) uhp_table = (u_header_T **)U_ALLOC_LINE( num_head * sizeof(u_header_T *)); if (uhp_table == NULL) goto error; } while ((c = undo_read_2c(&bi)) == UF_HEADER_MAGIC) { if (num_read_uhps >= num_head) { corruption_error("num_head too small", file_name); goto error; } uhp = unserialize_uhp(&bi, file_name); if (uhp == NULL) goto error; uhp_table[num_read_uhps++] = uhp; } if (num_read_uhps != num_head) { corruption_error("num_head", file_name); goto error; } if (c != UF_HEADER_END_MAGIC) { corruption_error("end marker", file_name); goto error; } #ifdef U_DEBUG uhp_table_used = (int *)alloc_clear( (unsigned)(sizeof(int) * num_head + 1)); # define SET_FLAG(j) ++uhp_table_used[j] #else # define SET_FLAG(j) #endif /* We have put all of the headers into a table. Now we iterate through the * table and swizzle each sequence number we have stored in uh_*_seq into * a pointer corresponding to the header with that sequence number. */ for (i = 0; i < num_head; i++) { uhp = uhp_table[i]; if (uhp == NULL) continue; for (j = 0; j < num_head; j++) if (uhp_table[j] != NULL && i != j && uhp_table[i]->uh_seq == uhp_table[j]->uh_seq) { corruption_error("duplicate uh_seq", file_name); goto error; } for (j = 0; j < num_head; j++) if (uhp_table[j] != NULL && uhp_table[j]->uh_seq == uhp->uh_next.seq) { uhp->uh_next.ptr = uhp_table[j]; SET_FLAG(j); break; } for (j = 0; j < num_head; j++) if (uhp_table[j] != NULL && uhp_table[j]->uh_seq == uhp->uh_prev.seq) { uhp->uh_prev.ptr = uhp_table[j]; SET_FLAG(j); break; } for (j = 0; j < num_head; j++) if (uhp_table[j] != NULL && uhp_table[j]->uh_seq == uhp->uh_alt_next.seq) { uhp->uh_alt_next.ptr = uhp_table[j]; SET_FLAG(j); break; } for (j = 0; j < num_head; j++) if (uhp_table[j] != NULL && uhp_table[j]->uh_seq == uhp->uh_alt_prev.seq) { uhp->uh_alt_prev.ptr = uhp_table[j]; SET_FLAG(j); break; } if (old_header_seq > 0 && old_idx < 0 && uhp->uh_seq == old_header_seq) { old_idx = i; SET_FLAG(i); } if (new_header_seq > 0 && new_idx < 0 && uhp->uh_seq == new_header_seq) { new_idx = i; SET_FLAG(i); } if (cur_header_seq > 0 && cur_idx < 0 && uhp->uh_seq == cur_header_seq) { cur_idx = i; SET_FLAG(i); } } /* Now that we have read the undo info successfully, free the current undo * info and use the info from the file. */ u_blockfree(curbuf); curbuf->b_u_oldhead = old_idx < 0 ? NULL : uhp_table[old_idx]; curbuf->b_u_newhead = new_idx < 0 ? NULL : uhp_table[new_idx]; curbuf->b_u_curhead = cur_idx < 0 ? NULL : uhp_table[cur_idx]; curbuf->b_u_line_ptr = line_ptr; curbuf->b_u_line_lnum = line_lnum; curbuf->b_u_line_colnr = line_colnr; curbuf->b_u_numhead = num_head; curbuf->b_u_seq_last = seq_last; curbuf->b_u_seq_cur = seq_cur; curbuf->b_u_time_cur = seq_time; curbuf->b_u_save_nr_last = last_save_nr; curbuf->b_u_save_nr_cur = last_save_nr; curbuf->b_u_synced = TRUE; vim_free(uhp_table); #ifdef U_DEBUG for (i = 0; i < num_head; ++i) if (uhp_table_used[i] == 0) EMSGN("uhp_table entry %ld not used, leaking memory", i); vim_free(uhp_table_used); u_check(TRUE); #endif if (name != NULL) smsg((char_u *)_("Finished reading undo file %s"), file_name); goto theend; error: vim_free(line_ptr); if (uhp_table != NULL) { for (i = 0; i < num_read_uhps; i++) if (uhp_table[i] != NULL) u_free_uhp(uhp_table[i]); vim_free(uhp_table); } theend: #ifdef FEAT_CRYPT if (bi.bi_state != NULL) crypt_free_state(bi.bi_state); vim_free(bi.bi_buffer); #endif if (fp != NULL) fclose(fp); if (file_name != name) vim_free(file_name); return; } #endif /* FEAT_PERSISTENT_UNDO */ /* * If 'cpoptions' contains 'u': Undo the previous undo or redo (vi compatible). * If 'cpoptions' does not contain 'u': Always undo. */ void u_undo(int count) { /* * If we get an undo command while executing a macro, we behave like the * original vi. If this happens twice in one macro the result will not * be compatible. */ if (curbuf->b_u_synced == FALSE) { u_sync(TRUE); count = 1; } if (vim_strchr(p_cpo, CPO_UNDO) == NULL) undo_undoes = TRUE; else undo_undoes = !undo_undoes; u_doit(count); } /* * If 'cpoptions' contains 'u': Repeat the previous undo or redo. * If 'cpoptions' does not contain 'u': Always redo. */ void u_redo(int count) { if (vim_strchr(p_cpo, CPO_UNDO) == NULL) undo_undoes = FALSE; u_doit(count); } /* * Undo or redo, depending on 'undo_undoes', 'count' times. */ static void u_doit(int startcount) { int count = startcount; if (!undo_allowed()) return; u_newcount = 0; u_oldcount = 0; if (curbuf->b_ml.ml_flags & ML_EMPTY) u_oldcount = -1; while (count--) { /* Do the change warning now, so that it triggers FileChangedRO when * needed. This may cause the file to be reloaded, that must happen * before we do anything, because it may change curbuf->b_u_curhead * and more. */ change_warning(0); if (undo_undoes) { if (curbuf->b_u_curhead == NULL) /* first undo */ curbuf->b_u_curhead = curbuf->b_u_newhead; else if (get_undolevel() > 0) /* multi level undo */ /* get next undo */ curbuf->b_u_curhead = curbuf->b_u_curhead->uh_next.ptr; /* nothing to undo */ if (curbuf->b_u_numhead == 0 || curbuf->b_u_curhead == NULL) { /* stick curbuf->b_u_curhead at end */ curbuf->b_u_curhead = curbuf->b_u_oldhead; beep_flush(); if (count == startcount - 1) { MSG(_("Already at oldest change")); return; } break; } u_undoredo(TRUE); } else { if (curbuf->b_u_curhead == NULL || get_undolevel() <= 0) { beep_flush(); /* nothing to redo */ if (count == startcount - 1) { MSG(_("Already at newest change")); return; } break; } u_undoredo(FALSE); /* Advance for next redo. Set "newhead" when at the end of the * redoable changes. */ if (curbuf->b_u_curhead->uh_prev.ptr == NULL) curbuf->b_u_newhead = curbuf->b_u_curhead; curbuf->b_u_curhead = curbuf->b_u_curhead->uh_prev.ptr; } } u_undo_end(undo_undoes, FALSE); } /* * Undo or redo over the timeline. * When "step" is negative go back in time, otherwise goes forward in time. * When "sec" is FALSE make "step" steps, when "sec" is TRUE use "step" as * seconds. * When "file" is TRUE use "step" as a number of file writes. * When "absolute" is TRUE use "step" as the sequence number to jump to. * "sec" must be FALSE then. */ void undo_time( long step, int sec, int file, int absolute) { long target; long closest; long closest_start; long closest_seq = 0; long val; u_header_T *uhp; u_header_T *last; int mark; int nomark; int round; int dosec = sec; int dofile = file; int above = FALSE; int did_undo = TRUE; /* First make sure the current undoable change is synced. */ if (curbuf->b_u_synced == FALSE) u_sync(TRUE); u_newcount = 0; u_oldcount = 0; if (curbuf->b_ml.ml_flags & ML_EMPTY) u_oldcount = -1; /* "target" is the node below which we want to be. * Init "closest" to a value we can't reach. */ if (absolute) { if (step == 0) { /* target 0 does not exist, got to 1 and above it. */ target = 1; above = TRUE; } else target = step; closest = -1; } else { if (dosec) target = (long)(curbuf->b_u_time_cur) + step; else if (dofile) { if (step < 0) { /* Going back to a previous write. If there were changes after * the last write, count that as moving one file-write, so * that ":earlier 1f" undoes all changes since the last save. */ uhp = curbuf->b_u_curhead; if (uhp != NULL) uhp = uhp->uh_next.ptr; else uhp = curbuf->b_u_newhead; if (uhp != NULL && uhp->uh_save_nr != 0) /* "uh_save_nr" was set in the last block, that means * there were no changes since the last write */ target = curbuf->b_u_save_nr_cur + step; else /* count the changes since the last write as one step */ target = curbuf->b_u_save_nr_cur + step + 1; if (target <= 0) /* Go to before first write: before the oldest change. Use * the sequence number for that. */ dofile = FALSE; } else { /* Moving forward to a newer write. */ target = curbuf->b_u_save_nr_cur + step; if (target > curbuf->b_u_save_nr_last) { /* Go to after last write: after the latest change. Use * the sequence number for that. */ target = curbuf->b_u_seq_last + 1; dofile = FALSE; } } } else target = curbuf->b_u_seq_cur + step; if (step < 0) { if (target < 0) target = 0; closest = -1; } else { if (dosec) closest = (long)(vim_time() + 1); else if (dofile) closest = curbuf->b_u_save_nr_last + 2; else closest = curbuf->b_u_seq_last + 2; if (target >= closest) target = closest - 1; } } closest_start = closest; closest_seq = curbuf->b_u_seq_cur; /* * May do this twice: * 1. Search for "target", update "closest" to the best match found. * 2. If "target" not found search for "closest". * * When using the closest time we use the sequence number in the second * round, because there may be several entries with the same time. */ for (round = 1; round <= 2; ++round) { /* Find the path from the current state to where we want to go. The * desired state can be anywhere in the undo tree, need to go all over * it. We put "nomark" in uh_walk where we have been without success, * "mark" where it could possibly be. */ mark = ++lastmark; nomark = ++lastmark; if (curbuf->b_u_curhead == NULL) /* at leaf of the tree */ uhp = curbuf->b_u_newhead; else uhp = curbuf->b_u_curhead; while (uhp != NULL) { uhp->uh_walk = mark; if (dosec) val = (long)(uhp->uh_time); else if (dofile) val = uhp->uh_save_nr; else val = uhp->uh_seq; if (round == 1 && !(dofile && val == 0)) { /* Remember the header that is closest to the target. * It must be at least in the right direction (checked with * "b_u_seq_cur"). When the timestamp is equal find the * highest/lowest sequence number. */ if ((step < 0 ? uhp->uh_seq <= curbuf->b_u_seq_cur : uhp->uh_seq > curbuf->b_u_seq_cur) && ((dosec && val == closest) ? (step < 0 ? uhp->uh_seq < closest_seq : uhp->uh_seq > closest_seq) : closest == closest_start || (val > target ? (closest > target ? val - target <= closest - target : val - target <= target - closest) : (closest > target ? target - val <= closest - target : target - val <= target - closest)))) { closest = val; closest_seq = uhp->uh_seq; } } /* Quit searching when we found a match. But when searching for a * time we need to continue looking for the best uh_seq. */ if (target == val && !dosec) { target = uhp->uh_seq; break; } /* go down in the tree if we haven't been there */ if (uhp->uh_prev.ptr != NULL && uhp->uh_prev.ptr->uh_walk != nomark && uhp->uh_prev.ptr->uh_walk != mark) uhp = uhp->uh_prev.ptr; /* go to alternate branch if we haven't been there */ else if (uhp->uh_alt_next.ptr != NULL && uhp->uh_alt_next.ptr->uh_walk != nomark && uhp->uh_alt_next.ptr->uh_walk != mark) uhp = uhp->uh_alt_next.ptr; /* go up in the tree if we haven't been there and we are at the * start of alternate branches */ else if (uhp->uh_next.ptr != NULL && uhp->uh_alt_prev.ptr == NULL && uhp->uh_next.ptr->uh_walk != nomark && uhp->uh_next.ptr->uh_walk != mark) { /* If still at the start we don't go through this change. */ if (uhp == curbuf->b_u_curhead) uhp->uh_walk = nomark; uhp = uhp->uh_next.ptr; } else { /* need to backtrack; mark this node as useless */ uhp->uh_walk = nomark; if (uhp->uh_alt_prev.ptr != NULL) uhp = uhp->uh_alt_prev.ptr; else uhp = uhp->uh_next.ptr; } } if (uhp != NULL) /* found it */ break; if (absolute) { EMSGN(_("E830: Undo number %ld not found"), step); return; } if (closest == closest_start) { if (step < 0) MSG(_("Already at oldest change")); else MSG(_("Already at newest change")); return; } target = closest_seq; dosec = FALSE; dofile = FALSE; if (step < 0) above = TRUE; /* stop above the header */ } /* If we found it: Follow the path to go to where we want to be. */ if (uhp != NULL) { /* * First go up the tree as much as needed. */ while (!got_int) { /* Do the change warning now, for the same reason as above. */ change_warning(0); uhp = curbuf->b_u_curhead; if (uhp == NULL) uhp = curbuf->b_u_newhead; else uhp = uhp->uh_next.ptr; if (uhp == NULL || uhp->uh_walk != mark || (uhp->uh_seq == target && !above)) break; curbuf->b_u_curhead = uhp; u_undoredo(TRUE); uhp->uh_walk = nomark; /* don't go back down here */ } /* * And now go down the tree (redo), branching off where needed. */ while (!got_int) { /* Do the change warning now, for the same reason as above. */ change_warning(0); uhp = curbuf->b_u_curhead; if (uhp == NULL) break; /* Go back to the first branch with a mark. */ while (uhp->uh_alt_prev.ptr != NULL && uhp->uh_alt_prev.ptr->uh_walk == mark) uhp = uhp->uh_alt_prev.ptr; /* Find the last branch with a mark, that's the one. */ last = uhp; while (last->uh_alt_next.ptr != NULL && last->uh_alt_next.ptr->uh_walk == mark) last = last->uh_alt_next.ptr; if (last != uhp) { /* Make the used branch the first entry in the list of * alternatives to make "u" and CTRL-R take this branch. */ while (uhp->uh_alt_prev.ptr != NULL) uhp = uhp->uh_alt_prev.ptr; if (last->uh_alt_next.ptr != NULL) last->uh_alt_next.ptr->uh_alt_prev.ptr = last->uh_alt_prev.ptr; last->uh_alt_prev.ptr->uh_alt_next.ptr = last->uh_alt_next.ptr; last->uh_alt_prev.ptr = NULL; last->uh_alt_next.ptr = uhp; uhp->uh_alt_prev.ptr = last; if (curbuf->b_u_oldhead == uhp) curbuf->b_u_oldhead = last; uhp = last; if (uhp->uh_next.ptr != NULL) uhp->uh_next.ptr->uh_prev.ptr = uhp; } curbuf->b_u_curhead = uhp; if (uhp->uh_walk != mark) break; /* must have reached the target */ /* Stop when going backwards in time and didn't find the exact * header we were looking for. */ if (uhp->uh_seq == target && above) { curbuf->b_u_seq_cur = target - 1; break; } u_undoredo(FALSE); /* Advance "curhead" to below the header we last used. If it * becomes NULL then we need to set "newhead" to this leaf. */ if (uhp->uh_prev.ptr == NULL) curbuf->b_u_newhead = uhp; curbuf->b_u_curhead = uhp->uh_prev.ptr; did_undo = FALSE; if (uhp->uh_seq == target) /* found it! */ break; uhp = uhp->uh_prev.ptr; if (uhp == NULL || uhp->uh_walk != mark) { /* Need to redo more but can't find it... */ internal_error("undo_time()"); break; } } } u_undo_end(did_undo, absolute); } /* * u_undoredo: common code for undo and redo * * The lines in the file are replaced by the lines in the entry list at * curbuf->b_u_curhead. The replaced lines in the file are saved in the entry * list for the next undo/redo. * * When "undo" is TRUE we go up in the tree, when FALSE we go down. */ static void u_undoredo(int undo) { char_u **newarray = NULL; linenr_T oldsize; linenr_T newsize; linenr_T top, bot; linenr_T lnum; linenr_T newlnum = MAXLNUM; long i; u_entry_T *uep, *nuep; u_entry_T *newlist = NULL; int old_flags; int new_flags; pos_T namedm[NMARKS]; visualinfo_T visualinfo; int empty_buffer; /* buffer became empty */ u_header_T *curhead = curbuf->b_u_curhead; #ifdef FEAT_AUTOCMD /* Don't want autocommands using the undo structures here, they are * invalid till the end. */ block_autocmds(); #endif #ifdef U_DEBUG u_check(FALSE); #endif old_flags = curhead->uh_flags; new_flags = (curbuf->b_changed ? UH_CHANGED : 0) + ((curbuf->b_ml.ml_flags & ML_EMPTY) ? UH_EMPTYBUF : 0); setpcmark(); /* * save marks before undo/redo */ mch_memmove(namedm, curbuf->b_namedm, sizeof(pos_T) * NMARKS); visualinfo = curbuf->b_visual; curbuf->b_op_start.lnum = curbuf->b_ml.ml_line_count; curbuf->b_op_start.col = 0; curbuf->b_op_end.lnum = 0; curbuf->b_op_end.col = 0; for (uep = curhead->uh_entry; uep != NULL; uep = nuep) { top = uep->ue_top; bot = uep->ue_bot; if (bot == 0) bot = curbuf->b_ml.ml_line_count + 1; if (top > curbuf->b_ml.ml_line_count || top >= bot || bot > curbuf->b_ml.ml_line_count + 1) { #ifdef FEAT_AUTOCMD unblock_autocmds(); #endif IEMSG(_("E438: u_undo: line numbers wrong")); changed(); /* don't want UNCHANGED now */ return; } oldsize = bot - top - 1; /* number of lines before undo */ newsize = uep->ue_size; /* number of lines after undo */ if (top < newlnum) { /* If the saved cursor is somewhere in this undo block, move it to * the remembered position. Makes "gwap" put the cursor back * where it was. */ lnum = curhead->uh_cursor.lnum; if (lnum >= top && lnum <= top + newsize + 1) { curwin->w_cursor = curhead->uh_cursor; newlnum = curwin->w_cursor.lnum - 1; } else { /* Use the first line that actually changed. Avoids that * undoing auto-formatting puts the cursor in the previous * line. */ for (i = 0; i < newsize && i < oldsize; ++i) if (STRCMP(uep->ue_array[i], ml_get(top + 1 + i)) != 0) break; if (i == newsize && newlnum == MAXLNUM && uep->ue_next == NULL) { newlnum = top; curwin->w_cursor.lnum = newlnum + 1; } else if (i < newsize) { newlnum = top + i; curwin->w_cursor.lnum = newlnum + 1; } } } empty_buffer = FALSE; /* delete the lines between top and bot and save them in newarray */ if (oldsize > 0) { if ((newarray = (char_u **)U_ALLOC_LINE( sizeof(char_u *) * oldsize)) == NULL) { do_outofmem_msg((long_u)(sizeof(char_u *) * oldsize)); /* * We have messed up the entry list, repair is impossible. * we have to free the rest of the list. */ while (uep != NULL) { nuep = uep->ue_next; u_freeentry(uep, uep->ue_size); uep = nuep; } break; } /* delete backwards, it goes faster in most cases */ for (lnum = bot - 1, i = oldsize; --i >= 0; --lnum) { /* what can we do when we run out of memory? */ if ((newarray[i] = u_save_line(lnum)) == NULL) do_outofmem_msg((long_u)0); /* remember we deleted the last line in the buffer, and a * dummy empty line will be inserted */ if (curbuf->b_ml.ml_line_count == 1) empty_buffer = TRUE; ml_delete(lnum, FALSE); } } else newarray = NULL; /* insert the lines in u_array between top and bot */ if (newsize) { for (lnum = top, i = 0; i < newsize; ++i, ++lnum) { /* * If the file is empty, there is an empty line 1 that we * should get rid of, by replacing it with the new line */ if (empty_buffer && lnum == 0) ml_replace((linenr_T)1, uep->ue_array[i], TRUE); else ml_append(lnum, uep->ue_array[i], (colnr_T)0, FALSE); vim_free(uep->ue_array[i]); } vim_free((char_u *)uep->ue_array); } /* adjust marks */ if (oldsize != newsize) { mark_adjust(top + 1, top + oldsize, (long)MAXLNUM, (long)newsize - (long)oldsize); if (curbuf->b_op_start.lnum > top + oldsize) curbuf->b_op_start.lnum += newsize - oldsize; if (curbuf->b_op_end.lnum > top + oldsize) curbuf->b_op_end.lnum += newsize - oldsize; } changed_lines(top + 1, 0, bot, newsize - oldsize); /* set '[ and '] mark */ if (top + 1 < curbuf->b_op_start.lnum) curbuf->b_op_start.lnum = top + 1; if (newsize == 0 && top + 1 > curbuf->b_op_end.lnum) curbuf->b_op_end.lnum = top + 1; else if (top + newsize > curbuf->b_op_end.lnum) curbuf->b_op_end.lnum = top + newsize; u_newcount += newsize; u_oldcount += oldsize; uep->ue_size = oldsize; uep->ue_array = newarray; uep->ue_bot = top + newsize + 1; /* * insert this entry in front of the new entry list */ nuep = uep->ue_next; uep->ue_next = newlist; newlist = uep; } curhead->uh_entry = newlist; curhead->uh_flags = new_flags; if ((old_flags & UH_EMPTYBUF) && bufempty()) curbuf->b_ml.ml_flags |= ML_EMPTY; if (old_flags & UH_CHANGED) changed(); else #ifdef FEAT_NETBEANS_INTG /* per netbeans undo rules, keep it as modified */ if (!isNetbeansModified(curbuf)) #endif unchanged(curbuf, FALSE); /* * restore marks from before undo/redo */ for (i = 0; i < NMARKS; ++i) { if (curhead->uh_namedm[i].lnum != 0) curbuf->b_namedm[i] = curhead->uh_namedm[i]; if (namedm[i].lnum != 0) curhead->uh_namedm[i] = namedm[i]; else curhead->uh_namedm[i].lnum = 0; } if (curhead->uh_visual.vi_start.lnum != 0) { curbuf->b_visual = curhead->uh_visual; curhead->uh_visual = visualinfo; } /* * If the cursor is only off by one line, put it at the same position as * before starting the change (for the "o" command). * Otherwise the cursor should go to the first undone line. */ if (curhead->uh_cursor.lnum + 1 == curwin->w_cursor.lnum && curwin->w_cursor.lnum > 1) --curwin->w_cursor.lnum; if (curwin->w_cursor.lnum <= curbuf->b_ml.ml_line_count) { if (curhead->uh_cursor.lnum == curwin->w_cursor.lnum) { curwin->w_cursor.col = curhead->uh_cursor.col; #ifdef FEAT_VIRTUALEDIT if (virtual_active() && curhead->uh_cursor_vcol >= 0) coladvance((colnr_T)curhead->uh_cursor_vcol); else curwin->w_cursor.coladd = 0; #endif } else beginline(BL_SOL | BL_FIX); } else { /* We get here with the current cursor line being past the end (eg * after adding lines at the end of the file, and then undoing it). * check_cursor() will move the cursor to the last line. Move it to * the first column here. */ curwin->w_cursor.col = 0; #ifdef FEAT_VIRTUALEDIT curwin->w_cursor.coladd = 0; #endif } /* Make sure the cursor is on an existing line and column. */ check_cursor(); /* Remember where we are for "g-" and ":earlier 10s". */ curbuf->b_u_seq_cur = curhead->uh_seq; if (undo) /* We are below the previous undo. However, to make ":earlier 1s" * work we compute this as being just above the just undone change. */ --curbuf->b_u_seq_cur; /* Remember where we are for ":earlier 1f" and ":later 1f". */ if (curhead->uh_save_nr != 0) { if (undo) curbuf->b_u_save_nr_cur = curhead->uh_save_nr - 1; else curbuf->b_u_save_nr_cur = curhead->uh_save_nr; } /* The timestamp can be the same for multiple changes, just use the one of * the undone/redone change. */ curbuf->b_u_time_cur = curhead->uh_time; #ifdef FEAT_AUTOCMD unblock_autocmds(); #endif #ifdef U_DEBUG u_check(FALSE); #endif } /* * If we deleted or added lines, report the number of less/more lines. * Otherwise, report the number of changes (this may be incorrect * in some cases, but it's better than nothing). */ static void u_undo_end( int did_undo, /* just did an undo */ int absolute) /* used ":undo N" */ { char *msgstr; u_header_T *uhp; char_u msgbuf[80]; #ifdef FEAT_FOLDING if ((fdo_flags & FDO_UNDO) && KeyTyped) foldOpenCursor(); #endif if (global_busy /* no messages now, wait until global is finished */ || !messaging()) /* 'lazyredraw' set, don't do messages now */ return; if (curbuf->b_ml.ml_flags & ML_EMPTY) --u_newcount; u_oldcount -= u_newcount; if (u_oldcount == -1) msgstr = N_("more line"); else if (u_oldcount < 0) msgstr = N_("more lines"); else if (u_oldcount == 1) msgstr = N_("line less"); else if (u_oldcount > 1) msgstr = N_("fewer lines"); else { u_oldcount = u_newcount; if (u_newcount == 1) msgstr = N_("change"); else msgstr = N_("changes"); } if (curbuf->b_u_curhead != NULL) { /* For ":undo N" we prefer a "after #N" message. */ if (absolute && curbuf->b_u_curhead->uh_next.ptr != NULL) { uhp = curbuf->b_u_curhead->uh_next.ptr; did_undo = FALSE; } else if (did_undo) uhp = curbuf->b_u_curhead; else uhp = curbuf->b_u_curhead->uh_next.ptr; } else uhp = curbuf->b_u_newhead; if (uhp == NULL) *msgbuf = NUL; else u_add_time(msgbuf, sizeof(msgbuf), uhp->uh_time); #ifdef FEAT_CONCEAL { win_T *wp; FOR_ALL_WINDOWS(wp) { if (wp->w_buffer == curbuf && wp->w_p_cole > 0) redraw_win_later(wp, NOT_VALID); } } #endif smsg((char_u *)_("%ld %s; %s #%ld %s"), u_oldcount < 0 ? -u_oldcount : u_oldcount, _(msgstr), did_undo ? _("before") : _("after"), uhp == NULL ? 0L : uhp->uh_seq, msgbuf); } /* * u_sync: stop adding to the current entry list */ void u_sync( int force) /* Also sync when no_u_sync is set. */ { /* Skip it when already synced or syncing is disabled. */ if (curbuf->b_u_synced || (!force && no_u_sync > 0)) return; #if defined(FEAT_XIM) && defined(FEAT_GUI_GTK) if (im_is_preediting()) return; /* XIM is busy, don't break an undo sequence */ #endif if (get_undolevel() < 0) curbuf->b_u_synced = TRUE; /* no entries, nothing to do */ else { u_getbot(); /* compute ue_bot of previous u_save */ curbuf->b_u_curhead = NULL; } } /* * ":undolist": List the leafs of the undo tree */ void ex_undolist(exarg_T *eap UNUSED) { garray_T ga; u_header_T *uhp; int mark; int nomark; int changes = 1; int i; /* * 1: walk the tree to find all leafs, put the info in "ga". * 2: sort the lines * 3: display the list */ mark = ++lastmark; nomark = ++lastmark; ga_init2(&ga, (int)sizeof(char *), 20); uhp = curbuf->b_u_oldhead; while (uhp != NULL) { if (uhp->uh_prev.ptr == NULL && uhp->uh_walk != nomark && uhp->uh_walk != mark) { if (ga_grow(&ga, 1) == FAIL) break; vim_snprintf((char *)IObuff, IOSIZE, "%6ld %7ld ", uhp->uh_seq, changes); u_add_time(IObuff + STRLEN(IObuff), IOSIZE - STRLEN(IObuff), uhp->uh_time); if (uhp->uh_save_nr > 0) { while (STRLEN(IObuff) < 33) STRCAT(IObuff, " "); vim_snprintf_add((char *)IObuff, IOSIZE, " %3ld", uhp->uh_save_nr); } ((char_u **)(ga.ga_data))[ga.ga_len++] = vim_strsave(IObuff); } uhp->uh_walk = mark; /* go down in the tree if we haven't been there */ if (uhp->uh_prev.ptr != NULL && uhp->uh_prev.ptr->uh_walk != nomark && uhp->uh_prev.ptr->uh_walk != mark) { uhp = uhp->uh_prev.ptr; ++changes; } /* go to alternate branch if we haven't been there */ else if (uhp->uh_alt_next.ptr != NULL && uhp->uh_alt_next.ptr->uh_walk != nomark && uhp->uh_alt_next.ptr->uh_walk != mark) uhp = uhp->uh_alt_next.ptr; /* go up in the tree if we haven't been there and we are at the * start of alternate branches */ else if (uhp->uh_next.ptr != NULL && uhp->uh_alt_prev.ptr == NULL && uhp->uh_next.ptr->uh_walk != nomark && uhp->uh_next.ptr->uh_walk != mark) { uhp = uhp->uh_next.ptr; --changes; } else { /* need to backtrack; mark this node as done */ uhp->uh_walk = nomark; if (uhp->uh_alt_prev.ptr != NULL) uhp = uhp->uh_alt_prev.ptr; else { uhp = uhp->uh_next.ptr; --changes; } } } if (ga.ga_len == 0) MSG(_("Nothing to undo")); else { sort_strings((char_u **)ga.ga_data, ga.ga_len); msg_start(); msg_puts_attr((char_u *)_("number changes when saved"), hl_attr(HLF_T)); for (i = 0; i < ga.ga_len && !got_int; ++i) { msg_putchar('\n'); if (got_int) break; msg_puts(((char_u **)ga.ga_data)[i]); } msg_end(); ga_clear_strings(&ga); } } /* * Put the timestamp of an undo header in "buf[buflen]" in a nice format. */ static void u_add_time(char_u *buf, size_t buflen, time_t tt) { #ifdef HAVE_STRFTIME struct tm *curtime; if (vim_time() - tt >= 100) { curtime = localtime(&tt); if (vim_time() - tt < (60L * 60L * 12L)) /* within 12 hours */ (void)strftime((char *)buf, buflen, "%H:%M:%S", curtime); else /* longer ago */ (void)strftime((char *)buf, buflen, "%Y/%m/%d %H:%M:%S", curtime); } else #endif vim_snprintf((char *)buf, buflen, _("%ld seconds ago"), (long)(vim_time() - tt)); } /* * ":undojoin": continue adding to the last entry list */ void ex_undojoin(exarg_T *eap UNUSED) { if (curbuf->b_u_newhead == NULL) return; /* nothing changed before */ if (curbuf->b_u_curhead != NULL) { EMSG(_("E790: undojoin is not allowed after undo")); return; } if (!curbuf->b_u_synced) return; /* already unsynced */ if (get_undolevel() < 0) return; /* no entries, nothing to do */ else /* Append next change to the last entry */ curbuf->b_u_synced = FALSE; } /* * Called after writing or reloading the file and setting b_changed to FALSE. * Now an undo means that the buffer is modified. */ void u_unchanged(buf_T *buf) { u_unch_branch(buf->b_u_oldhead); buf->b_did_warn = FALSE; } /* * After reloading a buffer which was saved for 'undoreload': Find the first * line that was changed and set the cursor there. */ void u_find_first_changed(void) { u_header_T *uhp = curbuf->b_u_newhead; u_entry_T *uep; linenr_T lnum; if (curbuf->b_u_curhead != NULL || uhp == NULL) return; /* undid something in an autocmd? */ /* Check that the last undo block was for the whole file. */ uep = uhp->uh_entry; if (uep->ue_top != 0 || uep->ue_bot != 0) return; for (lnum = 1; lnum < curbuf->b_ml.ml_line_count && lnum <= uep->ue_size; ++lnum) if (STRCMP(ml_get_buf(curbuf, lnum, FALSE), uep->ue_array[lnum - 1]) != 0) { clearpos(&(uhp->uh_cursor)); uhp->uh_cursor.lnum = lnum; return; } if (curbuf->b_ml.ml_line_count != uep->ue_size) { /* lines added or deleted at the end, put the cursor there */ clearpos(&(uhp->uh_cursor)); uhp->uh_cursor.lnum = lnum; } } /* * Increase the write count, store it in the last undo header, what would be * used for "u". */ void u_update_save_nr(buf_T *buf) { u_header_T *uhp; ++buf->b_u_save_nr_last; buf->b_u_save_nr_cur = buf->b_u_save_nr_last; uhp = buf->b_u_curhead; if (uhp != NULL) uhp = uhp->uh_next.ptr; else uhp = buf->b_u_newhead; if (uhp != NULL) uhp->uh_save_nr = buf->b_u_save_nr_last; } static void u_unch_branch(u_header_T *uhp) { u_header_T *uh; for (uh = uhp; uh != NULL; uh = uh->uh_prev.ptr) { uh->uh_flags |= UH_CHANGED; if (uh->uh_alt_next.ptr != NULL) u_unch_branch(uh->uh_alt_next.ptr); /* recursive */ } } /* * Get pointer to last added entry. * If it's not valid, give an error message and return NULL. */ static u_entry_T * u_get_headentry(void) { if (curbuf->b_u_newhead == NULL || curbuf->b_u_newhead->uh_entry == NULL) { IEMSG(_("E439: undo list corrupt")); return NULL; } return curbuf->b_u_newhead->uh_entry; } /* * u_getbot(): compute the line number of the previous u_save * It is called only when b_u_synced is FALSE. */ static void u_getbot(void) { u_entry_T *uep; linenr_T extra; uep = u_get_headentry(); /* check for corrupt undo list */ if (uep == NULL) return; uep = curbuf->b_u_newhead->uh_getbot_entry; if (uep != NULL) { /* * the new ue_bot is computed from the number of lines that has been * inserted (0 - deleted) since calling u_save. This is equal to the * old line count subtracted from the current line count. */ extra = curbuf->b_ml.ml_line_count - uep->ue_lcount; uep->ue_bot = uep->ue_top + uep->ue_size + 1 + extra; if (uep->ue_bot < 1 || uep->ue_bot > curbuf->b_ml.ml_line_count) { IEMSG(_("E440: undo line missing")); uep->ue_bot = uep->ue_top + 1; /* assume all lines deleted, will * get all the old lines back * without deleting the current * ones */ } curbuf->b_u_newhead->uh_getbot_entry = NULL; } curbuf->b_u_synced = TRUE; } /* * Free one header "uhp" and its entry list and adjust the pointers. */ static void u_freeheader( buf_T *buf, u_header_T *uhp, u_header_T **uhpp) /* if not NULL reset when freeing this header */ { u_header_T *uhap; /* When there is an alternate redo list free that branch completely, * because we can never go there. */ if (uhp->uh_alt_next.ptr != NULL) u_freebranch(buf, uhp->uh_alt_next.ptr, uhpp); if (uhp->uh_alt_prev.ptr != NULL) uhp->uh_alt_prev.ptr->uh_alt_next.ptr = NULL; /* Update the links in the list to remove the header. */ if (uhp->uh_next.ptr == NULL) buf->b_u_oldhead = uhp->uh_prev.ptr; else uhp->uh_next.ptr->uh_prev.ptr = uhp->uh_prev.ptr; if (uhp->uh_prev.ptr == NULL) buf->b_u_newhead = uhp->uh_next.ptr; else for (uhap = uhp->uh_prev.ptr; uhap != NULL; uhap = uhap->uh_alt_next.ptr) uhap->uh_next.ptr = uhp->uh_next.ptr; u_freeentries(buf, uhp, uhpp); } /* * Free an alternate branch and any following alternate branches. */ static void u_freebranch( buf_T *buf, u_header_T *uhp, u_header_T **uhpp) /* if not NULL reset when freeing this header */ { u_header_T *tofree, *next; /* If this is the top branch we may need to use u_freeheader() to update * all the pointers. */ if (uhp == buf->b_u_oldhead) { while (buf->b_u_oldhead != NULL) u_freeheader(buf, buf->b_u_oldhead, uhpp); return; } if (uhp->uh_alt_prev.ptr != NULL) uhp->uh_alt_prev.ptr->uh_alt_next.ptr = NULL; next = uhp; while (next != NULL) { tofree = next; if (tofree->uh_alt_next.ptr != NULL) u_freebranch(buf, tofree->uh_alt_next.ptr, uhpp); /* recursive */ next = tofree->uh_prev.ptr; u_freeentries(buf, tofree, uhpp); } } /* * Free all the undo entries for one header and the header itself. * This means that "uhp" is invalid when returning. */ static void u_freeentries( buf_T *buf, u_header_T *uhp, u_header_T **uhpp) /* if not NULL reset when freeing this header */ { u_entry_T *uep, *nuep; /* Check for pointers to the header that become invalid now. */ if (buf->b_u_curhead == uhp) buf->b_u_curhead = NULL; if (buf->b_u_newhead == uhp) buf->b_u_newhead = NULL; /* freeing the newest entry */ if (uhpp != NULL && uhp == *uhpp) *uhpp = NULL; for (uep = uhp->uh_entry; uep != NULL; uep = nuep) { nuep = uep->ue_next; u_freeentry(uep, uep->ue_size); } #ifdef U_DEBUG uhp->uh_magic = 0; #endif vim_free((char_u *)uhp); --buf->b_u_numhead; } /* * free entry 'uep' and 'n' lines in uep->ue_array[] */ static void u_freeentry(u_entry_T *uep, long n) { while (n > 0) vim_free(uep->ue_array[--n]); vim_free((char_u *)uep->ue_array); #ifdef U_DEBUG uep->ue_magic = 0; #endif vim_free((char_u *)uep); } /* * invalidate the undo buffer; called when storage has already been released */ void u_clearall(buf_T *buf) { buf->b_u_newhead = buf->b_u_oldhead = buf->b_u_curhead = NULL; buf->b_u_synced = TRUE; buf->b_u_numhead = 0; buf->b_u_line_ptr = NULL; buf->b_u_line_lnum = 0; } /* * save the line "lnum" for the "U" command */ void u_saveline(linenr_T lnum) { if (lnum == curbuf->b_u_line_lnum) /* line is already saved */ return; if (lnum < 1 || lnum > curbuf->b_ml.ml_line_count) /* should never happen */ return; u_clearline(); curbuf->b_u_line_lnum = lnum; if (curwin->w_cursor.lnum == lnum) curbuf->b_u_line_colnr = curwin->w_cursor.col; else curbuf->b_u_line_colnr = 0; if ((curbuf->b_u_line_ptr = u_save_line(lnum)) == NULL) do_outofmem_msg((long_u)0); } /* * clear the line saved for the "U" command * (this is used externally for crossing a line while in insert mode) */ void u_clearline(void) { if (curbuf->b_u_line_ptr != NULL) { vim_free(curbuf->b_u_line_ptr); curbuf->b_u_line_ptr = NULL; curbuf->b_u_line_lnum = 0; } } /* * Implementation of the "U" command. * Differentiation from vi: "U" can be undone with the next "U". * We also allow the cursor to be in another line. * Careful: may trigger autocommands that reload the buffer. */ void u_undoline(void) { colnr_T t; char_u *oldp; if (undo_off) return; if (curbuf->b_u_line_ptr == NULL || curbuf->b_u_line_lnum > curbuf->b_ml.ml_line_count) { beep_flush(); return; } /* first save the line for the 'u' command */ if (u_savecommon(curbuf->b_u_line_lnum - 1, curbuf->b_u_line_lnum + 1, (linenr_T)0, FALSE) == FAIL) return; oldp = u_save_line(curbuf->b_u_line_lnum); if (oldp == NULL) { do_outofmem_msg((long_u)0); return; } ml_replace(curbuf->b_u_line_lnum, curbuf->b_u_line_ptr, TRUE); changed_bytes(curbuf->b_u_line_lnum, 0); vim_free(curbuf->b_u_line_ptr); curbuf->b_u_line_ptr = oldp; t = curbuf->b_u_line_colnr; if (curwin->w_cursor.lnum == curbuf->b_u_line_lnum) curbuf->b_u_line_colnr = curwin->w_cursor.col; curwin->w_cursor.col = t; curwin->w_cursor.lnum = curbuf->b_u_line_lnum; check_cursor_col(); } /* * Free all allocated memory blocks for the buffer 'buf'. */ void u_blockfree(buf_T *buf) { while (buf->b_u_oldhead != NULL) u_freeheader(buf, buf->b_u_oldhead, NULL); vim_free(buf->b_u_line_ptr); } /* * u_save_line(): allocate memory and copy line 'lnum' into it. * Returns NULL when out of memory. */ static char_u * u_save_line(linenr_T lnum) { return vim_strsave(ml_get(lnum)); } /* * Check if the 'modified' flag is set, or 'ff' has changed (only need to * check the first character, because it can only be "dos", "unix" or "mac"). * "nofile" and "scratch" type buffers are considered to always be unchanged. */ int bufIsChanged(buf_T *buf) { return #ifdef FEAT_QUICKFIX !bt_dontwrite(buf) && #endif (buf->b_changed || file_ff_differs(buf, TRUE)); } int curbufIsChanged(void) { return #ifdef FEAT_QUICKFIX !bt_dontwrite(curbuf) && #endif (curbuf->b_changed || file_ff_differs(curbuf, TRUE)); } #if defined(FEAT_EVAL) || defined(PROTO) /* * For undotree(): Append the list of undo blocks at "first_uhp" to "list". * Recursive. */ void u_eval_tree(u_header_T *first_uhp, list_T *list) { u_header_T *uhp = first_uhp; dict_T *dict; while (uhp != NULL) { dict = dict_alloc(); if (dict == NULL) return; dict_add_nr_str(dict, "seq", uhp->uh_seq, NULL); dict_add_nr_str(dict, "time", (long)uhp->uh_time, NULL); if (uhp == curbuf->b_u_newhead) dict_add_nr_str(dict, "newhead", 1, NULL); if (uhp == curbuf->b_u_curhead) dict_add_nr_str(dict, "curhead", 1, NULL); if (uhp->uh_save_nr > 0) dict_add_nr_str(dict, "save", uhp->uh_save_nr, NULL); if (uhp->uh_alt_next.ptr != NULL) { list_T *alt_list = list_alloc(); if (alt_list != NULL) { /* Recursive call to add alternate undo tree. */ u_eval_tree(uhp->uh_alt_next.ptr, alt_list); dict_add_list(dict, "alt", alt_list); } } list_append_dict(list, dict); uhp = uhp->uh_prev.ptr; } } #endif
/* vi:set ts=8 sts=4 sw=4 noet: * * VIM - Vi IMproved by Bram Moolenaar * * Do ":help uganda" in Vim to read copying and usage conditions. * Do ":help credits" in Vim to see a list of people who contributed. * See README.txt for an overview of the Vim source code. */ /* * undo.c: multi level undo facility * * The saved lines are stored in a list of lists (one for each buffer): * * b_u_oldhead------------------------------------------------+ * | * V * +--------------+ +--------------+ +--------------+ * b_u_newhead--->| u_header | | u_header | | u_header | * | uh_next------>| uh_next------>| uh_next---->NULL * NULL<--------uh_prev |<---------uh_prev |<---------uh_prev | * | uh_entry | | uh_entry | | uh_entry | * +--------|-----+ +--------|-----+ +--------|-----+ * | | | * V V V * +--------------+ +--------------+ +--------------+ * | u_entry | | u_entry | | u_entry | * | ue_next | | ue_next | | ue_next | * +--------|-----+ +--------|-----+ +--------|-----+ * | | | * V V V * +--------------+ NULL NULL * | u_entry | * | ue_next | * +--------|-----+ * | * V * etc. * * Each u_entry list contains the information for one undo or redo. * curbuf->b_u_curhead points to the header of the last undo (the next redo), * or is NULL if nothing has been undone (end of the branch). * * For keeping alternate undo/redo branches the uh_alt field is used. Thus at * each point in the list a branch may appear for an alternate to redo. The * uh_seq field is numbered sequentially to be able to find a newer or older * branch. * * +---------------+ +---------------+ * b_u_oldhead --->| u_header | | u_header | * | uh_alt_next ---->| uh_alt_next ----> NULL * NULL <----- uh_alt_prev |<------ uh_alt_prev | * | uh_prev | | uh_prev | * +-----|---------+ +-----|---------+ * | | * V V * +---------------+ +---------------+ * | u_header | | u_header | * | uh_alt_next | | uh_alt_next | * b_u_newhead --->| uh_alt_prev | | uh_alt_prev | * | uh_prev | | uh_prev | * +-----|---------+ +-----|---------+ * | | * V V * NULL +---------------+ +---------------+ * | u_header | | u_header | * | uh_alt_next ---->| uh_alt_next | * | uh_alt_prev |<------ uh_alt_prev | * | uh_prev | | uh_prev | * +-----|---------+ +-----|---------+ * | | * etc. etc. * * * All data is allocated and will all be freed when the buffer is unloaded. */ /* Uncomment the next line for including the u_check() function. This warns * for errors in the debug information. */ /* #define U_DEBUG 1 */ #define UH_MAGIC 0x18dade /* value for uh_magic when in use */ #define UE_MAGIC 0xabc123 /* value for ue_magic when in use */ /* Size of buffer used for encryption. */ #define CRYPT_BUF_SIZE 8192 #include "vim.h" /* Structure passed around between functions. * Avoids passing cryptstate_T when encryption not available. */ typedef struct { buf_T *bi_buf; FILE *bi_fp; #ifdef FEAT_CRYPT cryptstate_T *bi_state; char_u *bi_buffer; /* CRYPT_BUF_SIZE, NULL when not buffering */ size_t bi_used; /* bytes written to/read from bi_buffer */ size_t bi_avail; /* bytes available in bi_buffer */ #endif } bufinfo_T; static long get_undolevel(void); static void u_unch_branch(u_header_T *uhp); static u_entry_T *u_get_headentry(void); static void u_getbot(void); static void u_doit(int count); static void u_undoredo(int undo); static void u_undo_end(int did_undo, int absolute); static void u_add_time(char_u *buf, size_t buflen, time_t tt); static void u_freeheader(buf_T *buf, u_header_T *uhp, u_header_T **uhpp); static void u_freebranch(buf_T *buf, u_header_T *uhp, u_header_T **uhpp); static void u_freeentries(buf_T *buf, u_header_T *uhp, u_header_T **uhpp); static void u_freeentry(u_entry_T *, long); #ifdef FEAT_PERSISTENT_UNDO static void corruption_error(char *mesg, char_u *file_name); static void u_free_uhp(u_header_T *uhp); static int undo_write(bufinfo_T *bi, char_u *ptr, size_t len); # ifdef FEAT_CRYPT static int undo_flush(bufinfo_T *bi); # endif static int fwrite_crypt(bufinfo_T *bi, char_u *ptr, size_t len); static int undo_write_bytes(bufinfo_T *bi, long_u nr, int len); static void put_header_ptr(bufinfo_T *bi, u_header_T *uhp); static int undo_read_4c(bufinfo_T *bi); static int undo_read_2c(bufinfo_T *bi); static int undo_read_byte(bufinfo_T *bi); static time_t undo_read_time(bufinfo_T *bi); static int undo_read(bufinfo_T *bi, char_u *buffer, size_t size); static char_u *read_string_decrypt(bufinfo_T *bi, int len); static int serialize_header(bufinfo_T *bi, char_u *hash); static int serialize_uhp(bufinfo_T *bi, u_header_T *uhp); static u_header_T *unserialize_uhp(bufinfo_T *bi, char_u *file_name); static int serialize_uep(bufinfo_T *bi, u_entry_T *uep); static u_entry_T *unserialize_uep(bufinfo_T *bi, int *error, char_u *file_name); static void serialize_pos(bufinfo_T *bi, pos_T pos); static void unserialize_pos(bufinfo_T *bi, pos_T *pos); static void serialize_visualinfo(bufinfo_T *bi, visualinfo_T *info); static void unserialize_visualinfo(bufinfo_T *bi, visualinfo_T *info); #endif #define U_ALLOC_LINE(size) lalloc((long_u)(size), FALSE) static char_u *u_save_line(linenr_T); /* used in undo_end() to report number of added and deleted lines */ static long u_newcount, u_oldcount; /* * When 'u' flag included in 'cpoptions', we behave like vi. Need to remember * the action that "u" should do. */ static int undo_undoes = FALSE; static int lastmark = 0; #if defined(U_DEBUG) || defined(PROTO) /* * Check the undo structures for being valid. Print a warning when something * looks wrong. */ static int seen_b_u_curhead; static int seen_b_u_newhead; static int header_count; static void u_check_tree(u_header_T *uhp, u_header_T *exp_uh_next, u_header_T *exp_uh_alt_prev) { u_entry_T *uep; if (uhp == NULL) return; ++header_count; if (uhp == curbuf->b_u_curhead && ++seen_b_u_curhead > 1) { EMSG("b_u_curhead found twice (looping?)"); return; } if (uhp == curbuf->b_u_newhead && ++seen_b_u_newhead > 1) { EMSG("b_u_newhead found twice (looping?)"); return; } if (uhp->uh_magic != UH_MAGIC) EMSG("uh_magic wrong (may be using freed memory)"); else { /* Check pointers back are correct. */ if (uhp->uh_next.ptr != exp_uh_next) { EMSG("uh_next wrong"); smsg((char_u *)"expected: 0x%x, actual: 0x%x", exp_uh_next, uhp->uh_next.ptr); } if (uhp->uh_alt_prev.ptr != exp_uh_alt_prev) { EMSG("uh_alt_prev wrong"); smsg((char_u *)"expected: 0x%x, actual: 0x%x", exp_uh_alt_prev, uhp->uh_alt_prev.ptr); } /* Check the undo tree at this header. */ for (uep = uhp->uh_entry; uep != NULL; uep = uep->ue_next) { if (uep->ue_magic != UE_MAGIC) { EMSG("ue_magic wrong (may be using freed memory)"); break; } } /* Check the next alt tree. */ u_check_tree(uhp->uh_alt_next.ptr, uhp->uh_next.ptr, uhp); /* Check the next header in this branch. */ u_check_tree(uhp->uh_prev.ptr, uhp, NULL); } } static void u_check(int newhead_may_be_NULL) { seen_b_u_newhead = 0; seen_b_u_curhead = 0; header_count = 0; u_check_tree(curbuf->b_u_oldhead, NULL, NULL); if (seen_b_u_newhead == 0 && curbuf->b_u_oldhead != NULL && !(newhead_may_be_NULL && curbuf->b_u_newhead == NULL)) EMSGN("b_u_newhead invalid: 0x%x", curbuf->b_u_newhead); if (curbuf->b_u_curhead != NULL && seen_b_u_curhead == 0) EMSGN("b_u_curhead invalid: 0x%x", curbuf->b_u_curhead); if (header_count != curbuf->b_u_numhead) { EMSG("b_u_numhead invalid"); smsg((char_u *)"expected: %ld, actual: %ld", (long)header_count, (long)curbuf->b_u_numhead); } } #endif /* * Save the current line for both the "u" and "U" command. * Careful: may trigger autocommands that reload the buffer. * Returns OK or FAIL. */ int u_save_cursor(void) { return (u_save((linenr_T)(curwin->w_cursor.lnum - 1), (linenr_T)(curwin->w_cursor.lnum + 1))); } /* * Save the lines between "top" and "bot" for both the "u" and "U" command. * "top" may be 0 and bot may be curbuf->b_ml.ml_line_count + 1. * Careful: may trigger autocommands that reload the buffer. * Returns FAIL when lines could not be saved, OK otherwise. */ int u_save(linenr_T top, linenr_T bot) { if (undo_off) return OK; if (top > curbuf->b_ml.ml_line_count || top >= bot || bot > curbuf->b_ml.ml_line_count + 1) return FALSE; /* rely on caller to do error messages */ if (top + 2 == bot) u_saveline((linenr_T)(top + 1)); return (u_savecommon(top, bot, (linenr_T)0, FALSE)); } /* * Save the line "lnum" (used by ":s" and "~" command). * The line is replaced, so the new bottom line is lnum + 1. * Careful: may trigger autocommands that reload the buffer. * Returns FAIL when lines could not be saved, OK otherwise. */ int u_savesub(linenr_T lnum) { if (undo_off) return OK; return (u_savecommon(lnum - 1, lnum + 1, lnum + 1, FALSE)); } /* * A new line is inserted before line "lnum" (used by :s command). * The line is inserted, so the new bottom line is lnum + 1. * Careful: may trigger autocommands that reload the buffer. * Returns FAIL when lines could not be saved, OK otherwise. */ int u_inssub(linenr_T lnum) { if (undo_off) return OK; return (u_savecommon(lnum - 1, lnum, lnum + 1, FALSE)); } /* * Save the lines "lnum" - "lnum" + nlines (used by delete command). * The lines are deleted, so the new bottom line is lnum, unless the buffer * becomes empty. * Careful: may trigger autocommands that reload the buffer. * Returns FAIL when lines could not be saved, OK otherwise. */ int u_savedel(linenr_T lnum, long nlines) { if (undo_off) return OK; return (u_savecommon(lnum - 1, lnum + nlines, nlines == curbuf->b_ml.ml_line_count ? 2 : lnum, FALSE)); } /* * Return TRUE when undo is allowed. Otherwise give an error message and * return FALSE. */ int undo_allowed(void) { /* Don't allow changes when 'modifiable' is off. */ if (!curbuf->b_p_ma) { EMSG(_(e_modifiable)); return FALSE; } #ifdef HAVE_SANDBOX /* In the sandbox it's not allowed to change the text. */ if (sandbox != 0) { EMSG(_(e_sandbox)); return FALSE; } #endif /* Don't allow changes in the buffer while editing the cmdline. The * caller of getcmdline() may get confused. */ if (textlock != 0) { EMSG(_(e_secure)); return FALSE; } return TRUE; } /* * Get the undolevle value for the current buffer. */ static long get_undolevel(void) { if (curbuf->b_p_ul == NO_LOCAL_UNDOLEVEL) return p_ul; return curbuf->b_p_ul; } /* * Common code for various ways to save text before a change. * "top" is the line above the first changed line. * "bot" is the line below the last changed line. * "newbot" is the new bottom line. Use zero when not known. * "reload" is TRUE when saving for a buffer reload. * Careful: may trigger autocommands that reload the buffer. * Returns FAIL when lines could not be saved, OK otherwise. */ int u_savecommon( linenr_T top, linenr_T bot, linenr_T newbot, int reload) { linenr_T lnum; long i; u_header_T *uhp; u_header_T *old_curhead; u_entry_T *uep; u_entry_T *prev_uep; long size; if (!reload) { /* When making changes is not allowed return FAIL. It's a crude way * to make all change commands fail. */ if (!undo_allowed()) return FAIL; #ifdef FEAT_NETBEANS_INTG /* * Netbeans defines areas that cannot be modified. Bail out here when * trying to change text in a guarded area. */ if (netbeans_active()) { if (netbeans_is_guarded(top, bot)) { EMSG(_(e_guarded)); return FAIL; } if (curbuf->b_p_ro) { EMSG(_(e_nbreadonly)); return FAIL; } } #endif #ifdef FEAT_AUTOCMD /* * Saving text for undo means we are going to make a change. Give a * warning for a read-only file before making the change, so that the * FileChangedRO event can replace the buffer with a read-write version * (e.g., obtained from a source control system). */ change_warning(0); if (bot > curbuf->b_ml.ml_line_count + 1) { /* This happens when the FileChangedRO autocommand changes the * file in a way it becomes shorter. */ EMSG(_("E881: Line count changed unexpectedly")); return FAIL; } #endif } #ifdef U_DEBUG u_check(FALSE); #endif size = bot - top - 1; /* * If curbuf->b_u_synced == TRUE make a new header. */ if (curbuf->b_u_synced) { #ifdef FEAT_JUMPLIST /* Need to create new entry in b_changelist. */ curbuf->b_new_change = TRUE; #endif if (get_undolevel() >= 0) { /* * Make a new header entry. Do this first so that we don't mess * up the undo info when out of memory. */ uhp = (u_header_T *)U_ALLOC_LINE(sizeof(u_header_T)); if (uhp == NULL) goto nomem; #ifdef U_DEBUG uhp->uh_magic = UH_MAGIC; #endif } else uhp = NULL; /* * If we undid more than we redid, move the entry lists before and * including curbuf->b_u_curhead to an alternate branch. */ old_curhead = curbuf->b_u_curhead; if (old_curhead != NULL) { curbuf->b_u_newhead = old_curhead->uh_next.ptr; curbuf->b_u_curhead = NULL; } /* * free headers to keep the size right */ while (curbuf->b_u_numhead > get_undolevel() && curbuf->b_u_oldhead != NULL) { u_header_T *uhfree = curbuf->b_u_oldhead; if (uhfree == old_curhead) /* Can't reconnect the branch, delete all of it. */ u_freebranch(curbuf, uhfree, &old_curhead); else if (uhfree->uh_alt_next.ptr == NULL) /* There is no branch, only free one header. */ u_freeheader(curbuf, uhfree, &old_curhead); else { /* Free the oldest alternate branch as a whole. */ while (uhfree->uh_alt_next.ptr != NULL) uhfree = uhfree->uh_alt_next.ptr; u_freebranch(curbuf, uhfree, &old_curhead); } #ifdef U_DEBUG u_check(TRUE); #endif } if (uhp == NULL) /* no undo at all */ { if (old_curhead != NULL) u_freebranch(curbuf, old_curhead, NULL); curbuf->b_u_synced = FALSE; return OK; } uhp->uh_prev.ptr = NULL; uhp->uh_next.ptr = curbuf->b_u_newhead; uhp->uh_alt_next.ptr = old_curhead; if (old_curhead != NULL) { uhp->uh_alt_prev.ptr = old_curhead->uh_alt_prev.ptr; if (uhp->uh_alt_prev.ptr != NULL) uhp->uh_alt_prev.ptr->uh_alt_next.ptr = uhp; old_curhead->uh_alt_prev.ptr = uhp; if (curbuf->b_u_oldhead == old_curhead) curbuf->b_u_oldhead = uhp; } else uhp->uh_alt_prev.ptr = NULL; if (curbuf->b_u_newhead != NULL) curbuf->b_u_newhead->uh_prev.ptr = uhp; uhp->uh_seq = ++curbuf->b_u_seq_last; curbuf->b_u_seq_cur = uhp->uh_seq; uhp->uh_time = vim_time(); uhp->uh_save_nr = 0; curbuf->b_u_time_cur = uhp->uh_time + 1; uhp->uh_walk = 0; uhp->uh_entry = NULL; uhp->uh_getbot_entry = NULL; uhp->uh_cursor = curwin->w_cursor; /* save cursor pos. for undo */ #ifdef FEAT_VIRTUALEDIT if (virtual_active() && curwin->w_cursor.coladd > 0) uhp->uh_cursor_vcol = getviscol(); else uhp->uh_cursor_vcol = -1; #endif /* save changed and buffer empty flag for undo */ uhp->uh_flags = (curbuf->b_changed ? UH_CHANGED : 0) + ((curbuf->b_ml.ml_flags & ML_EMPTY) ? UH_EMPTYBUF : 0); /* save named marks and Visual marks for undo */ mch_memmove(uhp->uh_namedm, curbuf->b_namedm, sizeof(pos_T) * NMARKS); uhp->uh_visual = curbuf->b_visual; curbuf->b_u_newhead = uhp; if (curbuf->b_u_oldhead == NULL) curbuf->b_u_oldhead = uhp; ++curbuf->b_u_numhead; } else { if (get_undolevel() < 0) /* no undo at all */ return OK; /* * When saving a single line, and it has been saved just before, it * doesn't make sense saving it again. Saves a lot of memory when * making lots of changes inside the same line. * This is only possible if the previous change didn't increase or * decrease the number of lines. * Check the ten last changes. More doesn't make sense and takes too * long. */ if (size == 1) { uep = u_get_headentry(); prev_uep = NULL; for (i = 0; i < 10; ++i) { if (uep == NULL) break; /* If lines have been inserted/deleted we give up. * Also when the line was included in a multi-line save. */ if ((curbuf->b_u_newhead->uh_getbot_entry != uep ? (uep->ue_top + uep->ue_size + 1 != (uep->ue_bot == 0 ? curbuf->b_ml.ml_line_count + 1 : uep->ue_bot)) : uep->ue_lcount != curbuf->b_ml.ml_line_count) || (uep->ue_size > 1 && top >= uep->ue_top && top + 2 <= uep->ue_top + uep->ue_size + 1)) break; /* If it's the same line we can skip saving it again. */ if (uep->ue_size == 1 && uep->ue_top == top) { if (i > 0) { /* It's not the last entry: get ue_bot for the last * entry now. Following deleted/inserted lines go to * the re-used entry. */ u_getbot(); curbuf->b_u_synced = FALSE; /* Move the found entry to become the last entry. The * order of undo/redo doesn't matter for the entries * we move it over, since they don't change the line * count and don't include this line. It does matter * for the found entry if the line count is changed by * the executed command. */ prev_uep->ue_next = uep->ue_next; uep->ue_next = curbuf->b_u_newhead->uh_entry; curbuf->b_u_newhead->uh_entry = uep; } /* The executed command may change the line count. */ if (newbot != 0) uep->ue_bot = newbot; else if (bot > curbuf->b_ml.ml_line_count) uep->ue_bot = 0; else { uep->ue_lcount = curbuf->b_ml.ml_line_count; curbuf->b_u_newhead->uh_getbot_entry = uep; } return OK; } prev_uep = uep; uep = uep->ue_next; } } /* find line number for ue_bot for previous u_save() */ u_getbot(); } #if !defined(UNIX) && !defined(WIN32) /* * With Amiga we can't handle big undo's, because * then u_alloc_line would have to allocate a block larger than 32K */ if (size >= 8000) goto nomem; #endif /* * add lines in front of entry list */ uep = (u_entry_T *)U_ALLOC_LINE(sizeof(u_entry_T)); if (uep == NULL) goto nomem; vim_memset(uep, 0, sizeof(u_entry_T)); #ifdef U_DEBUG uep->ue_magic = UE_MAGIC; #endif uep->ue_size = size; uep->ue_top = top; if (newbot != 0) uep->ue_bot = newbot; /* * Use 0 for ue_bot if bot is below last line. * Otherwise we have to compute ue_bot later. */ else if (bot > curbuf->b_ml.ml_line_count) uep->ue_bot = 0; else { uep->ue_lcount = curbuf->b_ml.ml_line_count; curbuf->b_u_newhead->uh_getbot_entry = uep; } if (size > 0) { if ((uep->ue_array = (char_u **)U_ALLOC_LINE( sizeof(char_u *) * size)) == NULL) { u_freeentry(uep, 0L); goto nomem; } for (i = 0, lnum = top + 1; i < size; ++i) { fast_breakcheck(); if (got_int) { u_freeentry(uep, i); return FAIL; } if ((uep->ue_array[i] = u_save_line(lnum++)) == NULL) { u_freeentry(uep, i); goto nomem; } } } else uep->ue_array = NULL; uep->ue_next = curbuf->b_u_newhead->uh_entry; curbuf->b_u_newhead->uh_entry = uep; curbuf->b_u_synced = FALSE; undo_undoes = FALSE; #ifdef U_DEBUG u_check(FALSE); #endif return OK; nomem: msg_silent = 0; /* must display the prompt */ if (ask_yesno((char_u *)_("No undo possible; continue anyway"), TRUE) == 'y') { undo_off = TRUE; /* will be reset when character typed */ return OK; } do_outofmem_msg((long_u)0); return FAIL; } #if defined(FEAT_PERSISTENT_UNDO) || defined(PROTO) # define UF_START_MAGIC "Vim\237UnDo\345" /* magic at start of undofile */ # define UF_START_MAGIC_LEN 9 # define UF_HEADER_MAGIC 0x5fd0 /* magic at start of header */ # define UF_HEADER_END_MAGIC 0xe7aa /* magic after last header */ # define UF_ENTRY_MAGIC 0xf518 /* magic at start of entry */ # define UF_ENTRY_END_MAGIC 0x3581 /* magic after last entry */ # define UF_VERSION 2 /* 2-byte undofile version number */ # define UF_VERSION_CRYPT 0x8002 /* idem, encrypted */ /* extra fields for header */ # define UF_LAST_SAVE_NR 1 /* extra fields for uhp */ # define UHP_SAVE_NR 1 static char_u e_not_open[] = N_("E828: Cannot open undo file for writing: %s"); /* * Compute the hash for the current buffer text into hash[UNDO_HASH_SIZE]. */ void u_compute_hash(char_u *hash) { context_sha256_T ctx; linenr_T lnum; char_u *p; sha256_start(&ctx); for (lnum = 1; lnum <= curbuf->b_ml.ml_line_count; ++lnum) { p = ml_get(lnum); sha256_update(&ctx, p, (UINT32_T)(STRLEN(p) + 1)); } sha256_finish(&ctx, hash); } /* * Return an allocated string of the full path of the target undofile. * When "reading" is TRUE find the file to read, go over all directories in * 'undodir'. * When "reading" is FALSE use the first name where the directory exists. * Returns NULL when there is no place to write or no file to read. */ char_u * u_get_undo_file_name(char_u *buf_ffname, int reading) { char_u *dirp; char_u dir_name[IOSIZE + 1]; char_u *munged_name = NULL; char_u *undo_file_name = NULL; int dir_len; char_u *p; stat_T st; char_u *ffname = buf_ffname; #ifdef HAVE_READLINK char_u fname_buf[MAXPATHL]; #endif if (ffname == NULL) return NULL; #ifdef HAVE_READLINK /* Expand symlink in the file name, so that we put the undo file with the * actual file instead of with the symlink. */ if (resolve_symlink(ffname, fname_buf) == OK) ffname = fname_buf; #endif /* Loop over 'undodir'. When reading find the first file that exists. * When not reading use the first directory that exists or ".". */ dirp = p_udir; while (*dirp != NUL) { dir_len = copy_option_part(&dirp, dir_name, IOSIZE, ","); if (dir_len == 1 && dir_name[0] == '.') { /* Use same directory as the ffname, * "dir/name" -> "dir/.name.un~" */ undo_file_name = vim_strnsave(ffname, (int)(STRLEN(ffname) + 5)); if (undo_file_name == NULL) break; p = gettail(undo_file_name); #ifdef VMS /* VMS can not handle more than one dot in the filenames * use "dir/name" -> "dir/_un_name" - add _un_ * at the beginning to keep the extension */ mch_memmove(p + 4, p, STRLEN(p) + 1); mch_memmove(p, "_un_", 4); #else /* Use same directory as the ffname, * "dir/name" -> "dir/.name.un~" */ mch_memmove(p + 1, p, STRLEN(p) + 1); *p = '.'; STRCAT(p, ".un~"); #endif } else { dir_name[dir_len] = NUL; if (mch_isdir(dir_name)) { if (munged_name == NULL) { munged_name = vim_strsave(ffname); if (munged_name == NULL) return NULL; for (p = munged_name; *p != NUL; mb_ptr_adv(p)) if (vim_ispathsep(*p)) *p = '%'; } undo_file_name = concat_fnames(dir_name, munged_name, TRUE); } } /* When reading check if the file exists. */ if (undo_file_name != NULL && (!reading || mch_stat((char *)undo_file_name, &st) >= 0)) break; vim_free(undo_file_name); undo_file_name = NULL; } vim_free(munged_name); return undo_file_name; } static void corruption_error(char *mesg, char_u *file_name) { EMSG3(_("E825: Corrupted undo file (%s): %s"), mesg, file_name); } static void u_free_uhp(u_header_T *uhp) { u_entry_T *nuep; u_entry_T *uep; uep = uhp->uh_entry; while (uep != NULL) { nuep = uep->ue_next; u_freeentry(uep, uep->ue_size); uep = nuep; } vim_free(uhp); } /* * Write a sequence of bytes to the undo file. * Buffers and encrypts as needed. * Returns OK or FAIL. */ static int undo_write(bufinfo_T *bi, char_u *ptr, size_t len) { #ifdef FEAT_CRYPT if (bi->bi_buffer != NULL) { size_t len_todo = len; char_u *p = ptr; while (bi->bi_used + len_todo >= CRYPT_BUF_SIZE) { size_t n = CRYPT_BUF_SIZE - bi->bi_used; mch_memmove(bi->bi_buffer + bi->bi_used, p, n); len_todo -= n; p += n; bi->bi_used = CRYPT_BUF_SIZE; if (undo_flush(bi) == FAIL) return FAIL; } if (len_todo > 0) { mch_memmove(bi->bi_buffer + bi->bi_used, p, len_todo); bi->bi_used += len_todo; } return OK; } #endif if (fwrite(ptr, len, (size_t)1, bi->bi_fp) != 1) return FAIL; return OK; } #ifdef FEAT_CRYPT static int undo_flush(bufinfo_T *bi) { if (bi->bi_buffer != NULL && bi->bi_used > 0) { crypt_encode_inplace(bi->bi_state, bi->bi_buffer, bi->bi_used); if (fwrite(bi->bi_buffer, bi->bi_used, (size_t)1, bi->bi_fp) != 1) return FAIL; bi->bi_used = 0; } return OK; } #endif /* * Write "ptr[len]" and crypt the bytes when needed. * Returns OK or FAIL. */ static int fwrite_crypt(bufinfo_T *bi, char_u *ptr, size_t len) { #ifdef FEAT_CRYPT char_u *copy; char_u small_buf[100]; size_t i; if (bi->bi_state != NULL && bi->bi_buffer == NULL) { /* crypting every piece of text separately */ if (len < 100) copy = small_buf; /* no malloc()/free() for short strings */ else { copy = lalloc(len, FALSE); if (copy == NULL) return 0; } crypt_encode(bi->bi_state, ptr, len, copy); i = fwrite(copy, len, (size_t)1, bi->bi_fp); if (copy != small_buf) vim_free(copy); return i == 1 ? OK : FAIL; } #endif return undo_write(bi, ptr, len); } /* * Write a number, MSB first, in "len" bytes. * Must match with undo_read_?c() functions. * Returns OK or FAIL. */ static int undo_write_bytes(bufinfo_T *bi, long_u nr, int len) { char_u buf[8]; int i; int bufi = 0; for (i = len - 1; i >= 0; --i) buf[bufi++] = (char_u)(nr >> (i * 8)); return undo_write(bi, buf, (size_t)len); } /* * Write the pointer to an undo header. Instead of writing the pointer itself * we use the sequence number of the header. This is converted back to * pointers when reading. */ static void put_header_ptr(bufinfo_T *bi, u_header_T *uhp) { undo_write_bytes(bi, (long_u)(uhp != NULL ? uhp->uh_seq : 0), 4); } static int undo_read_4c(bufinfo_T *bi) { #ifdef FEAT_CRYPT if (bi->bi_buffer != NULL) { char_u buf[4]; int n; undo_read(bi, buf, (size_t)4); n = ((unsigned)buf[0] << 24) + (buf[1] << 16) + (buf[2] << 8) + buf[3]; return n; } #endif return get4c(bi->bi_fp); } static int undo_read_2c(bufinfo_T *bi) { #ifdef FEAT_CRYPT if (bi->bi_buffer != NULL) { char_u buf[2]; int n; undo_read(bi, buf, (size_t)2); n = (buf[0] << 8) + buf[1]; return n; } #endif return get2c(bi->bi_fp); } static int undo_read_byte(bufinfo_T *bi) { #ifdef FEAT_CRYPT if (bi->bi_buffer != NULL) { char_u buf[1]; undo_read(bi, buf, (size_t)1); return buf[0]; } #endif return getc(bi->bi_fp); } static time_t undo_read_time(bufinfo_T *bi) { #ifdef FEAT_CRYPT if (bi->bi_buffer != NULL) { char_u buf[8]; time_t n = 0; int i; undo_read(bi, buf, (size_t)8); for (i = 0; i < 8; ++i) n = (n << 8) + buf[i]; return n; } #endif return get8ctime(bi->bi_fp); } /* * Read "buffer[size]" from the undo file. * Return OK or FAIL. */ static int undo_read(bufinfo_T *bi, char_u *buffer, size_t size) { #ifdef FEAT_CRYPT if (bi->bi_buffer != NULL) { int size_todo = (int)size; char_u *p = buffer; while (size_todo > 0) { size_t n; if (bi->bi_used >= bi->bi_avail) { n = fread(bi->bi_buffer, 1, (size_t)CRYPT_BUF_SIZE, bi->bi_fp); if (n == 0) { /* Error may be checked for only later. Fill with zeros, * so that the reader won't use garbage. */ vim_memset(p, 0, size_todo); return FAIL; } bi->bi_avail = n; bi->bi_used = 0; crypt_decode_inplace(bi->bi_state, bi->bi_buffer, bi->bi_avail); } n = size_todo; if (n > bi->bi_avail - bi->bi_used) n = bi->bi_avail - bi->bi_used; mch_memmove(p, bi->bi_buffer + bi->bi_used, n); bi->bi_used += n; size_todo -= (int)n; p += n; } return OK; } #endif if (fread(buffer, (size_t)size, 1, bi->bi_fp) != 1) return FAIL; return OK; } /* * Read a string of length "len" from "bi->bi_fd". * "len" can be zero to allocate an empty line. * Decrypt the bytes if needed. * Append a NUL. * Returns a pointer to allocated memory or NULL for failure. */ static char_u * read_string_decrypt(bufinfo_T *bi, int len) { char_u *ptr = alloc((unsigned)len + 1); if (ptr != NULL) { if (len > 0 && undo_read(bi, ptr, len) == FAIL) { vim_free(ptr); return NULL; } ptr[len] = NUL; #ifdef FEAT_CRYPT if (bi->bi_state != NULL && bi->bi_buffer == NULL) crypt_decode_inplace(bi->bi_state, ptr, len); #endif } return ptr; } /* * Writes the (not encrypted) header and initializes encryption if needed. */ static int serialize_header(bufinfo_T *bi, char_u *hash) { int len; buf_T *buf = bi->bi_buf; FILE *fp = bi->bi_fp; char_u time_buf[8]; /* Start writing, first the magic marker and undo info version. */ if (fwrite(UF_START_MAGIC, (size_t)UF_START_MAGIC_LEN, (size_t)1, fp) != 1) return FAIL; /* If the buffer is encrypted then all text bytes following will be * encrypted. Numbers and other info is not crypted. */ #ifdef FEAT_CRYPT if (*buf->b_p_key != NUL) { char_u *header; int header_len; undo_write_bytes(bi, (long_u)UF_VERSION_CRYPT, 2); bi->bi_state = crypt_create_for_writing(crypt_get_method_nr(buf), buf->b_p_key, &header, &header_len); if (bi->bi_state == NULL) return FAIL; len = (int)fwrite(header, (size_t)header_len, (size_t)1, fp); vim_free(header); if (len != 1) { crypt_free_state(bi->bi_state); bi->bi_state = NULL; return FAIL; } if (crypt_whole_undofile(crypt_get_method_nr(buf))) { bi->bi_buffer = alloc(CRYPT_BUF_SIZE); if (bi->bi_buffer == NULL) { crypt_free_state(bi->bi_state); bi->bi_state = NULL; return FAIL; } bi->bi_used = 0; } } else #endif undo_write_bytes(bi, (long_u)UF_VERSION, 2); /* Write a hash of the buffer text, so that we can verify it is still the * same when reading the buffer text. */ if (undo_write(bi, hash, (size_t)UNDO_HASH_SIZE) == FAIL) return FAIL; /* buffer-specific data */ undo_write_bytes(bi, (long_u)buf->b_ml.ml_line_count, 4); len = buf->b_u_line_ptr != NULL ? (int)STRLEN(buf->b_u_line_ptr) : 0; undo_write_bytes(bi, (long_u)len, 4); if (len > 0 && fwrite_crypt(bi, buf->b_u_line_ptr, (size_t)len) == FAIL) return FAIL; undo_write_bytes(bi, (long_u)buf->b_u_line_lnum, 4); undo_write_bytes(bi, (long_u)buf->b_u_line_colnr, 4); /* Undo structures header data */ put_header_ptr(bi, buf->b_u_oldhead); put_header_ptr(bi, buf->b_u_newhead); put_header_ptr(bi, buf->b_u_curhead); undo_write_bytes(bi, (long_u)buf->b_u_numhead, 4); undo_write_bytes(bi, (long_u)buf->b_u_seq_last, 4); undo_write_bytes(bi, (long_u)buf->b_u_seq_cur, 4); time_to_bytes(buf->b_u_time_cur, time_buf); undo_write(bi, time_buf, 8); /* Optional fields. */ undo_write_bytes(bi, 4, 1); undo_write_bytes(bi, UF_LAST_SAVE_NR, 1); undo_write_bytes(bi, (long_u)buf->b_u_save_nr_last, 4); undo_write_bytes(bi, 0, 1); /* end marker */ return OK; } static int serialize_uhp(bufinfo_T *bi, u_header_T *uhp) { int i; u_entry_T *uep; char_u time_buf[8]; if (undo_write_bytes(bi, (long_u)UF_HEADER_MAGIC, 2) == FAIL) return FAIL; put_header_ptr(bi, uhp->uh_next.ptr); put_header_ptr(bi, uhp->uh_prev.ptr); put_header_ptr(bi, uhp->uh_alt_next.ptr); put_header_ptr(bi, uhp->uh_alt_prev.ptr); undo_write_bytes(bi, uhp->uh_seq, 4); serialize_pos(bi, uhp->uh_cursor); #ifdef FEAT_VIRTUALEDIT undo_write_bytes(bi, (long_u)uhp->uh_cursor_vcol, 4); #else undo_write_bytes(bi, (long_u)0, 4); #endif undo_write_bytes(bi, (long_u)uhp->uh_flags, 2); /* Assume NMARKS will stay the same. */ for (i = 0; i < NMARKS; ++i) serialize_pos(bi, uhp->uh_namedm[i]); serialize_visualinfo(bi, &uhp->uh_visual); time_to_bytes(uhp->uh_time, time_buf); undo_write(bi, time_buf, 8); /* Optional fields. */ undo_write_bytes(bi, 4, 1); undo_write_bytes(bi, UHP_SAVE_NR, 1); undo_write_bytes(bi, (long_u)uhp->uh_save_nr, 4); undo_write_bytes(bi, 0, 1); /* end marker */ /* Write all the entries. */ for (uep = uhp->uh_entry; uep != NULL; uep = uep->ue_next) { undo_write_bytes(bi, (long_u)UF_ENTRY_MAGIC, 2); if (serialize_uep(bi, uep) == FAIL) return FAIL; } undo_write_bytes(bi, (long_u)UF_ENTRY_END_MAGIC, 2); return OK; } static u_header_T * unserialize_uhp(bufinfo_T *bi, char_u *file_name) { u_header_T *uhp; int i; u_entry_T *uep, *last_uep; int c; int error; uhp = (u_header_T *)U_ALLOC_LINE(sizeof(u_header_T)); if (uhp == NULL) return NULL; vim_memset(uhp, 0, sizeof(u_header_T)); #ifdef U_DEBUG uhp->uh_magic = UH_MAGIC; #endif uhp->uh_next.seq = undo_read_4c(bi); uhp->uh_prev.seq = undo_read_4c(bi); uhp->uh_alt_next.seq = undo_read_4c(bi); uhp->uh_alt_prev.seq = undo_read_4c(bi); uhp->uh_seq = undo_read_4c(bi); if (uhp->uh_seq <= 0) { corruption_error("uh_seq", file_name); vim_free(uhp); return NULL; } unserialize_pos(bi, &uhp->uh_cursor); #ifdef FEAT_VIRTUALEDIT uhp->uh_cursor_vcol = undo_read_4c(bi); #else (void)undo_read_4c(bi); #endif uhp->uh_flags = undo_read_2c(bi); for (i = 0; i < NMARKS; ++i) unserialize_pos(bi, &uhp->uh_namedm[i]); unserialize_visualinfo(bi, &uhp->uh_visual); uhp->uh_time = undo_read_time(bi); /* Optional fields. */ for (;;) { int len = undo_read_byte(bi); int what; if (len == 0) break; what = undo_read_byte(bi); switch (what) { case UHP_SAVE_NR: uhp->uh_save_nr = undo_read_4c(bi); break; default: /* field not supported, skip */ while (--len >= 0) (void)undo_read_byte(bi); } } /* Unserialize the uep list. */ last_uep = NULL; while ((c = undo_read_2c(bi)) == UF_ENTRY_MAGIC) { error = FALSE; uep = unserialize_uep(bi, &error, file_name); if (last_uep == NULL) uhp->uh_entry = uep; else last_uep->ue_next = uep; last_uep = uep; if (uep == NULL || error) { u_free_uhp(uhp); return NULL; } } if (c != UF_ENTRY_END_MAGIC) { corruption_error("entry end", file_name); u_free_uhp(uhp); return NULL; } return uhp; } /* * Serialize "uep". */ static int serialize_uep( bufinfo_T *bi, u_entry_T *uep) { int i; size_t len; undo_write_bytes(bi, (long_u)uep->ue_top, 4); undo_write_bytes(bi, (long_u)uep->ue_bot, 4); undo_write_bytes(bi, (long_u)uep->ue_lcount, 4); undo_write_bytes(bi, (long_u)uep->ue_size, 4); for (i = 0; i < uep->ue_size; ++i) { len = STRLEN(uep->ue_array[i]); if (undo_write_bytes(bi, (long_u)len, 4) == FAIL) return FAIL; if (len > 0 && fwrite_crypt(bi, uep->ue_array[i], len) == FAIL) return FAIL; } return OK; } static u_entry_T * unserialize_uep(bufinfo_T *bi, int *error, char_u *file_name) { int i; u_entry_T *uep; char_u **array = NULL; char_u *line; int line_len; uep = (u_entry_T *)U_ALLOC_LINE(sizeof(u_entry_T)); if (uep == NULL) return NULL; vim_memset(uep, 0, sizeof(u_entry_T)); #ifdef U_DEBUG uep->ue_magic = UE_MAGIC; #endif uep->ue_top = undo_read_4c(bi); uep->ue_bot = undo_read_4c(bi); uep->ue_lcount = undo_read_4c(bi); uep->ue_size = undo_read_4c(bi); if (uep->ue_size > 0) { if (uep->ue_size < LONG_MAX / (int)sizeof(char_u *)) array = (char_u **)U_ALLOC_LINE(sizeof(char_u *) * uep->ue_size); if (array == NULL) { *error = TRUE; return uep; } vim_memset(array, 0, sizeof(char_u *) * uep->ue_size); } uep->ue_array = array; for (i = 0; i < uep->ue_size; ++i) { line_len = undo_read_4c(bi); if (line_len >= 0) line = read_string_decrypt(bi, line_len); else { line = NULL; corruption_error("line length", file_name); } if (line == NULL) { *error = TRUE; return uep; } array[i] = line; } return uep; } /* * Serialize "pos". */ static void serialize_pos(bufinfo_T *bi, pos_T pos) { undo_write_bytes(bi, (long_u)pos.lnum, 4); undo_write_bytes(bi, (long_u)pos.col, 4); #ifdef FEAT_VIRTUALEDIT undo_write_bytes(bi, (long_u)pos.coladd, 4); #else undo_write_bytes(bi, (long_u)0, 4); #endif } /* * Unserialize the pos_T at the current position. */ static void unserialize_pos(bufinfo_T *bi, pos_T *pos) { pos->lnum = undo_read_4c(bi); if (pos->lnum < 0) pos->lnum = 0; pos->col = undo_read_4c(bi); if (pos->col < 0) pos->col = 0; #ifdef FEAT_VIRTUALEDIT pos->coladd = undo_read_4c(bi); if (pos->coladd < 0) pos->coladd = 0; #else (void)undo_read_4c(bi); #endif } /* * Serialize "info". */ static void serialize_visualinfo(bufinfo_T *bi, visualinfo_T *info) { serialize_pos(bi, info->vi_start); serialize_pos(bi, info->vi_end); undo_write_bytes(bi, (long_u)info->vi_mode, 4); undo_write_bytes(bi, (long_u)info->vi_curswant, 4); } /* * Unserialize the visualinfo_T at the current position. */ static void unserialize_visualinfo(bufinfo_T *bi, visualinfo_T *info) { unserialize_pos(bi, &info->vi_start); unserialize_pos(bi, &info->vi_end); info->vi_mode = undo_read_4c(bi); info->vi_curswant = undo_read_4c(bi); } /* * Write the undo tree in an undo file. * When "name" is not NULL, use it as the name of the undo file. * Otherwise use buf->b_ffname to generate the undo file name. * "buf" must never be null, buf->b_ffname is used to obtain the original file * permissions. * "forceit" is TRUE for ":wundo!", FALSE otherwise. * "hash[UNDO_HASH_SIZE]" must be the hash value of the buffer text. */ void u_write_undo( char_u *name, int forceit, buf_T *buf, char_u *hash) { u_header_T *uhp; char_u *file_name; int mark; #ifdef U_DEBUG int headers_written = 0; #endif int fd; FILE *fp = NULL; int perm; int write_ok = FALSE; #ifdef UNIX int st_old_valid = FALSE; stat_T st_old; stat_T st_new; #endif bufinfo_T bi; vim_memset(&bi, 0, sizeof(bi)); if (name == NULL) { file_name = u_get_undo_file_name(buf->b_ffname, FALSE); if (file_name == NULL) { if (p_verbose > 0) { verbose_enter(); smsg((char_u *) _("Cannot write undo file in any directory in 'undodir'")); verbose_leave(); } return; } } else file_name = name; /* * Decide about the permission to use for the undo file. If the buffer * has a name use the permission of the original file. Otherwise only * allow the user to access the undo file. */ perm = 0600; if (buf->b_ffname != NULL) { #ifdef UNIX if (mch_stat((char *)buf->b_ffname, &st_old) >= 0) { perm = st_old.st_mode; st_old_valid = TRUE; } #else perm = mch_getperm(buf->b_ffname); if (perm < 0) perm = 0600; #endif } /* strip any s-bit and executable bit */ perm = perm & 0666; /* If the undo file already exists, verify that it actually is an undo * file, and delete it. */ if (mch_getperm(file_name) >= 0) { if (name == NULL || !forceit) { /* Check we can read it and it's an undo file. */ fd = mch_open((char *)file_name, O_RDONLY|O_EXTRA, 0); if (fd < 0) { if (name != NULL || p_verbose > 0) { if (name == NULL) verbose_enter(); smsg((char_u *) _("Will not overwrite with undo file, cannot read: %s"), file_name); if (name == NULL) verbose_leave(); } goto theend; } else { char_u mbuf[UF_START_MAGIC_LEN]; int len; len = read_eintr(fd, mbuf, UF_START_MAGIC_LEN); close(fd); if (len < UF_START_MAGIC_LEN || memcmp(mbuf, UF_START_MAGIC, UF_START_MAGIC_LEN) != 0) { if (name != NULL || p_verbose > 0) { if (name == NULL) verbose_enter(); smsg((char_u *) _("Will not overwrite, this is not an undo file: %s"), file_name); if (name == NULL) verbose_leave(); } goto theend; } } } mch_remove(file_name); } /* If there is no undo information at all, quit here after deleting any * existing undo file. */ if (buf->b_u_numhead == 0 && buf->b_u_line_ptr == NULL) { if (p_verbose > 0) verb_msg((char_u *)_("Skipping undo file write, nothing to undo")); goto theend; } fd = mch_open((char *)file_name, O_CREAT|O_EXTRA|O_WRONLY|O_EXCL|O_NOFOLLOW, perm); if (fd < 0) { EMSG2(_(e_not_open), file_name); goto theend; } (void)mch_setperm(file_name, perm); if (p_verbose > 0) { verbose_enter(); smsg((char_u *)_("Writing undo file: %s"), file_name); verbose_leave(); } #ifdef U_DEBUG /* Check there is no problem in undo info before writing. */ u_check(FALSE); #endif #ifdef UNIX /* * Try to set the group of the undo file same as the original file. If * this fails, set the protection bits for the group same as the * protection bits for others. */ if (st_old_valid && mch_stat((char *)file_name, &st_new) >= 0 && st_new.st_gid != st_old.st_gid # ifdef HAVE_FCHOWN /* sequent-ptx lacks fchown() */ && fchown(fd, (uid_t)-1, st_old.st_gid) != 0 # endif ) mch_setperm(file_name, (perm & 0707) | ((perm & 07) << 3)); # if defined(HAVE_SELINUX) || defined(HAVE_SMACK) if (buf->b_ffname != NULL) mch_copy_sec(buf->b_ffname, file_name); # endif #endif fp = fdopen(fd, "w"); if (fp == NULL) { EMSG2(_(e_not_open), file_name); close(fd); mch_remove(file_name); goto theend; } /* Undo must be synced. */ u_sync(TRUE); /* * Write the header. Initializes encryption, if enabled. */ bi.bi_buf = buf; bi.bi_fp = fp; if (serialize_header(&bi, hash) == FAIL) goto write_error; /* * Iteratively serialize UHPs and their UEPs from the top down. */ mark = ++lastmark; uhp = buf->b_u_oldhead; while (uhp != NULL) { /* Serialize current UHP if we haven't seen it */ if (uhp->uh_walk != mark) { uhp->uh_walk = mark; #ifdef U_DEBUG ++headers_written; #endif if (serialize_uhp(&bi, uhp) == FAIL) goto write_error; } /* Now walk through the tree - algorithm from undo_time(). */ if (uhp->uh_prev.ptr != NULL && uhp->uh_prev.ptr->uh_walk != mark) uhp = uhp->uh_prev.ptr; else if (uhp->uh_alt_next.ptr != NULL && uhp->uh_alt_next.ptr->uh_walk != mark) uhp = uhp->uh_alt_next.ptr; else if (uhp->uh_next.ptr != NULL && uhp->uh_alt_prev.ptr == NULL && uhp->uh_next.ptr->uh_walk != mark) uhp = uhp->uh_next.ptr; else if (uhp->uh_alt_prev.ptr != NULL) uhp = uhp->uh_alt_prev.ptr; else uhp = uhp->uh_next.ptr; } if (undo_write_bytes(&bi, (long_u)UF_HEADER_END_MAGIC, 2) == OK) write_ok = TRUE; #ifdef U_DEBUG if (headers_written != buf->b_u_numhead) { EMSGN("Written %ld headers, ...", headers_written); EMSGN("... but numhead is %ld", buf->b_u_numhead); } #endif #ifdef FEAT_CRYPT if (bi.bi_state != NULL && undo_flush(&bi) == FAIL) write_ok = FALSE; #endif write_error: fclose(fp); if (!write_ok) EMSG2(_("E829: write error in undo file: %s"), file_name); #if defined(MACOS_CLASSIC) || defined(WIN3264) /* Copy file attributes; for systems where this can only be done after * closing the file. */ if (buf->b_ffname != NULL) (void)mch_copy_file_attribute(buf->b_ffname, file_name); #endif #ifdef HAVE_ACL if (buf->b_ffname != NULL) { vim_acl_T acl; /* For systems that support ACL: get the ACL from the original file. */ acl = mch_get_acl(buf->b_ffname); mch_set_acl(file_name, acl); mch_free_acl(acl); } #endif theend: #ifdef FEAT_CRYPT if (bi.bi_state != NULL) crypt_free_state(bi.bi_state); vim_free(bi.bi_buffer); #endif if (file_name != name) vim_free(file_name); } /* * Load the undo tree from an undo file. * If "name" is not NULL use it as the undo file name. This also means being * a bit more verbose. * Otherwise use curbuf->b_ffname to generate the undo file name. * "hash[UNDO_HASH_SIZE]" must be the hash value of the buffer text. */ void u_read_undo(char_u *name, char_u *hash, char_u *orig_name) { char_u *file_name; FILE *fp; long version, str_len; char_u *line_ptr = NULL; linenr_T line_lnum; colnr_T line_colnr; linenr_T line_count; long num_head = 0; long old_header_seq, new_header_seq, cur_header_seq; long seq_last, seq_cur; long last_save_nr = 0; short old_idx = -1, new_idx = -1, cur_idx = -1; long num_read_uhps = 0; time_t seq_time; int i, j; int c; u_header_T *uhp; u_header_T **uhp_table = NULL; char_u read_hash[UNDO_HASH_SIZE]; char_u magic_buf[UF_START_MAGIC_LEN]; #ifdef U_DEBUG int *uhp_table_used; #endif #ifdef UNIX stat_T st_orig; stat_T st_undo; #endif bufinfo_T bi; vim_memset(&bi, 0, sizeof(bi)); if (name == NULL) { file_name = u_get_undo_file_name(curbuf->b_ffname, TRUE); if (file_name == NULL) return; #ifdef UNIX /* For safety we only read an undo file if the owner is equal to the * owner of the text file or equal to the current user. */ if (mch_stat((char *)orig_name, &st_orig) >= 0 && mch_stat((char *)file_name, &st_undo) >= 0 && st_orig.st_uid != st_undo.st_uid && st_undo.st_uid != getuid()) { if (p_verbose > 0) { verbose_enter(); smsg((char_u *)_("Not reading undo file, owner differs: %s"), file_name); verbose_leave(); } return; } #endif } else file_name = name; if (p_verbose > 0) { verbose_enter(); smsg((char_u *)_("Reading undo file: %s"), file_name); verbose_leave(); } fp = mch_fopen((char *)file_name, "r"); if (fp == NULL) { if (name != NULL || p_verbose > 0) EMSG2(_("E822: Cannot open undo file for reading: %s"), file_name); goto error; } bi.bi_buf = curbuf; bi.bi_fp = fp; /* * Read the undo file header. */ if (fread(magic_buf, UF_START_MAGIC_LEN, 1, fp) != 1 || memcmp(magic_buf, UF_START_MAGIC, UF_START_MAGIC_LEN) != 0) { EMSG2(_("E823: Not an undo file: %s"), file_name); goto error; } version = get2c(fp); if (version == UF_VERSION_CRYPT) { #ifdef FEAT_CRYPT if (*curbuf->b_p_key == NUL) { EMSG2(_("E832: Non-encrypted file has encrypted undo file: %s"), file_name); goto error; } bi.bi_state = crypt_create_from_file(fp, curbuf->b_p_key); if (bi.bi_state == NULL) { EMSG2(_("E826: Undo file decryption failed: %s"), file_name); goto error; } if (crypt_whole_undofile(bi.bi_state->method_nr)) { bi.bi_buffer = alloc(CRYPT_BUF_SIZE); if (bi.bi_buffer == NULL) { crypt_free_state(bi.bi_state); bi.bi_state = NULL; goto error; } bi.bi_avail = 0; bi.bi_used = 0; } #else EMSG2(_("E827: Undo file is encrypted: %s"), file_name); goto error; #endif } else if (version != UF_VERSION) { EMSG2(_("E824: Incompatible undo file: %s"), file_name); goto error; } if (undo_read(&bi, read_hash, (size_t)UNDO_HASH_SIZE) == FAIL) { corruption_error("hash", file_name); goto error; } line_count = (linenr_T)undo_read_4c(&bi); if (memcmp(hash, read_hash, UNDO_HASH_SIZE) != 0 || line_count != curbuf->b_ml.ml_line_count) { if (p_verbose > 0 || name != NULL) { if (name == NULL) verbose_enter(); give_warning((char_u *) _("File contents changed, cannot use undo info"), TRUE); if (name == NULL) verbose_leave(); } goto error; } /* Read undo data for "U" command. */ str_len = undo_read_4c(&bi); if (str_len < 0) goto error; if (str_len > 0) line_ptr = read_string_decrypt(&bi, str_len); line_lnum = (linenr_T)undo_read_4c(&bi); line_colnr = (colnr_T)undo_read_4c(&bi); if (line_lnum < 0 || line_colnr < 0) { corruption_error("line lnum/col", file_name); goto error; } /* Begin general undo data */ old_header_seq = undo_read_4c(&bi); new_header_seq = undo_read_4c(&bi); cur_header_seq = undo_read_4c(&bi); num_head = undo_read_4c(&bi); seq_last = undo_read_4c(&bi); seq_cur = undo_read_4c(&bi); seq_time = undo_read_time(&bi); /* Optional header fields. */ for (;;) { int len = undo_read_byte(&bi); int what; if (len == 0 || len == EOF) break; what = undo_read_byte(&bi); switch (what) { case UF_LAST_SAVE_NR: last_save_nr = undo_read_4c(&bi); break; default: /* field not supported, skip */ while (--len >= 0) (void)undo_read_byte(&bi); } } /* uhp_table will store the freshly created undo headers we allocate * until we insert them into curbuf. The table remains sorted by the * sequence numbers of the headers. * When there are no headers uhp_table is NULL. */ if (num_head > 0) { if (num_head < LONG_MAX / (long)sizeof(u_header_T *)) uhp_table = (u_header_T **)U_ALLOC_LINE( num_head * sizeof(u_header_T *)); if (uhp_table == NULL) goto error; } while ((c = undo_read_2c(&bi)) == UF_HEADER_MAGIC) { if (num_read_uhps >= num_head) { corruption_error("num_head too small", file_name); goto error; } uhp = unserialize_uhp(&bi, file_name); if (uhp == NULL) goto error; uhp_table[num_read_uhps++] = uhp; } if (num_read_uhps != num_head) { corruption_error("num_head", file_name); goto error; } if (c != UF_HEADER_END_MAGIC) { corruption_error("end marker", file_name); goto error; } #ifdef U_DEBUG uhp_table_used = (int *)alloc_clear( (unsigned)(sizeof(int) * num_head + 1)); # define SET_FLAG(j) ++uhp_table_used[j] #else # define SET_FLAG(j) #endif /* We have put all of the headers into a table. Now we iterate through the * table and swizzle each sequence number we have stored in uh_*_seq into * a pointer corresponding to the header with that sequence number. */ for (i = 0; i < num_head; i++) { uhp = uhp_table[i]; if (uhp == NULL) continue; for (j = 0; j < num_head; j++) if (uhp_table[j] != NULL && i != j && uhp_table[i]->uh_seq == uhp_table[j]->uh_seq) { corruption_error("duplicate uh_seq", file_name); goto error; } for (j = 0; j < num_head; j++) if (uhp_table[j] != NULL && uhp_table[j]->uh_seq == uhp->uh_next.seq) { uhp->uh_next.ptr = uhp_table[j]; SET_FLAG(j); break; } for (j = 0; j < num_head; j++) if (uhp_table[j] != NULL && uhp_table[j]->uh_seq == uhp->uh_prev.seq) { uhp->uh_prev.ptr = uhp_table[j]; SET_FLAG(j); break; } for (j = 0; j < num_head; j++) if (uhp_table[j] != NULL && uhp_table[j]->uh_seq == uhp->uh_alt_next.seq) { uhp->uh_alt_next.ptr = uhp_table[j]; SET_FLAG(j); break; } for (j = 0; j < num_head; j++) if (uhp_table[j] != NULL && uhp_table[j]->uh_seq == uhp->uh_alt_prev.seq) { uhp->uh_alt_prev.ptr = uhp_table[j]; SET_FLAG(j); break; } if (old_header_seq > 0 && old_idx < 0 && uhp->uh_seq == old_header_seq) { old_idx = i; SET_FLAG(i); } if (new_header_seq > 0 && new_idx < 0 && uhp->uh_seq == new_header_seq) { new_idx = i; SET_FLAG(i); } if (cur_header_seq > 0 && cur_idx < 0 && uhp->uh_seq == cur_header_seq) { cur_idx = i; SET_FLAG(i); } } /* Now that we have read the undo info successfully, free the current undo * info and use the info from the file. */ u_blockfree(curbuf); curbuf->b_u_oldhead = old_idx < 0 ? NULL : uhp_table[old_idx]; curbuf->b_u_newhead = new_idx < 0 ? NULL : uhp_table[new_idx]; curbuf->b_u_curhead = cur_idx < 0 ? NULL : uhp_table[cur_idx]; curbuf->b_u_line_ptr = line_ptr; curbuf->b_u_line_lnum = line_lnum; curbuf->b_u_line_colnr = line_colnr; curbuf->b_u_numhead = num_head; curbuf->b_u_seq_last = seq_last; curbuf->b_u_seq_cur = seq_cur; curbuf->b_u_time_cur = seq_time; curbuf->b_u_save_nr_last = last_save_nr; curbuf->b_u_save_nr_cur = last_save_nr; curbuf->b_u_synced = TRUE; vim_free(uhp_table); #ifdef U_DEBUG for (i = 0; i < num_head; ++i) if (uhp_table_used[i] == 0) EMSGN("uhp_table entry %ld not used, leaking memory", i); vim_free(uhp_table_used); u_check(TRUE); #endif if (name != NULL) smsg((char_u *)_("Finished reading undo file %s"), file_name); goto theend; error: vim_free(line_ptr); if (uhp_table != NULL) { for (i = 0; i < num_read_uhps; i++) if (uhp_table[i] != NULL) u_free_uhp(uhp_table[i]); vim_free(uhp_table); } theend: #ifdef FEAT_CRYPT if (bi.bi_state != NULL) crypt_free_state(bi.bi_state); vim_free(bi.bi_buffer); #endif if (fp != NULL) fclose(fp); if (file_name != name) vim_free(file_name); return; } #endif /* FEAT_PERSISTENT_UNDO */ /* * If 'cpoptions' contains 'u': Undo the previous undo or redo (vi compatible). * If 'cpoptions' does not contain 'u': Always undo. */ void u_undo(int count) { /* * If we get an undo command while executing a macro, we behave like the * original vi. If this happens twice in one macro the result will not * be compatible. */ if (curbuf->b_u_synced == FALSE) { u_sync(TRUE); count = 1; } if (vim_strchr(p_cpo, CPO_UNDO) == NULL) undo_undoes = TRUE; else undo_undoes = !undo_undoes; u_doit(count); } /* * If 'cpoptions' contains 'u': Repeat the previous undo or redo. * If 'cpoptions' does not contain 'u': Always redo. */ void u_redo(int count) { if (vim_strchr(p_cpo, CPO_UNDO) == NULL) undo_undoes = FALSE; u_doit(count); } /* * Undo or redo, depending on 'undo_undoes', 'count' times. */ static void u_doit(int startcount) { int count = startcount; if (!undo_allowed()) return; u_newcount = 0; u_oldcount = 0; if (curbuf->b_ml.ml_flags & ML_EMPTY) u_oldcount = -1; while (count--) { /* Do the change warning now, so that it triggers FileChangedRO when * needed. This may cause the file to be reloaded, that must happen * before we do anything, because it may change curbuf->b_u_curhead * and more. */ change_warning(0); if (undo_undoes) { if (curbuf->b_u_curhead == NULL) /* first undo */ curbuf->b_u_curhead = curbuf->b_u_newhead; else if (get_undolevel() > 0) /* multi level undo */ /* get next undo */ curbuf->b_u_curhead = curbuf->b_u_curhead->uh_next.ptr; /* nothing to undo */ if (curbuf->b_u_numhead == 0 || curbuf->b_u_curhead == NULL) { /* stick curbuf->b_u_curhead at end */ curbuf->b_u_curhead = curbuf->b_u_oldhead; beep_flush(); if (count == startcount - 1) { MSG(_("Already at oldest change")); return; } break; } u_undoredo(TRUE); } else { if (curbuf->b_u_curhead == NULL || get_undolevel() <= 0) { beep_flush(); /* nothing to redo */ if (count == startcount - 1) { MSG(_("Already at newest change")); return; } break; } u_undoredo(FALSE); /* Advance for next redo. Set "newhead" when at the end of the * redoable changes. */ if (curbuf->b_u_curhead->uh_prev.ptr == NULL) curbuf->b_u_newhead = curbuf->b_u_curhead; curbuf->b_u_curhead = curbuf->b_u_curhead->uh_prev.ptr; } } u_undo_end(undo_undoes, FALSE); } /* * Undo or redo over the timeline. * When "step" is negative go back in time, otherwise goes forward in time. * When "sec" is FALSE make "step" steps, when "sec" is TRUE use "step" as * seconds. * When "file" is TRUE use "step" as a number of file writes. * When "absolute" is TRUE use "step" as the sequence number to jump to. * "sec" must be FALSE then. */ void undo_time( long step, int sec, int file, int absolute) { long target; long closest; long closest_start; long closest_seq = 0; long val; u_header_T *uhp; u_header_T *last; int mark; int nomark; int round; int dosec = sec; int dofile = file; int above = FALSE; int did_undo = TRUE; /* First make sure the current undoable change is synced. */ if (curbuf->b_u_synced == FALSE) u_sync(TRUE); u_newcount = 0; u_oldcount = 0; if (curbuf->b_ml.ml_flags & ML_EMPTY) u_oldcount = -1; /* "target" is the node below which we want to be. * Init "closest" to a value we can't reach. */ if (absolute) { if (step == 0) { /* target 0 does not exist, got to 1 and above it. */ target = 1; above = TRUE; } else target = step; closest = -1; } else { if (dosec) target = (long)(curbuf->b_u_time_cur) + step; else if (dofile) { if (step < 0) { /* Going back to a previous write. If there were changes after * the last write, count that as moving one file-write, so * that ":earlier 1f" undoes all changes since the last save. */ uhp = curbuf->b_u_curhead; if (uhp != NULL) uhp = uhp->uh_next.ptr; else uhp = curbuf->b_u_newhead; if (uhp != NULL && uhp->uh_save_nr != 0) /* "uh_save_nr" was set in the last block, that means * there were no changes since the last write */ target = curbuf->b_u_save_nr_cur + step; else /* count the changes since the last write as one step */ target = curbuf->b_u_save_nr_cur + step + 1; if (target <= 0) /* Go to before first write: before the oldest change. Use * the sequence number for that. */ dofile = FALSE; } else { /* Moving forward to a newer write. */ target = curbuf->b_u_save_nr_cur + step; if (target > curbuf->b_u_save_nr_last) { /* Go to after last write: after the latest change. Use * the sequence number for that. */ target = curbuf->b_u_seq_last + 1; dofile = FALSE; } } } else target = curbuf->b_u_seq_cur + step; if (step < 0) { if (target < 0) target = 0; closest = -1; } else { if (dosec) closest = (long)(vim_time() + 1); else if (dofile) closest = curbuf->b_u_save_nr_last + 2; else closest = curbuf->b_u_seq_last + 2; if (target >= closest) target = closest - 1; } } closest_start = closest; closest_seq = curbuf->b_u_seq_cur; /* * May do this twice: * 1. Search for "target", update "closest" to the best match found. * 2. If "target" not found search for "closest". * * When using the closest time we use the sequence number in the second * round, because there may be several entries with the same time. */ for (round = 1; round <= 2; ++round) { /* Find the path from the current state to where we want to go. The * desired state can be anywhere in the undo tree, need to go all over * it. We put "nomark" in uh_walk where we have been without success, * "mark" where it could possibly be. */ mark = ++lastmark; nomark = ++lastmark; if (curbuf->b_u_curhead == NULL) /* at leaf of the tree */ uhp = curbuf->b_u_newhead; else uhp = curbuf->b_u_curhead; while (uhp != NULL) { uhp->uh_walk = mark; if (dosec) val = (long)(uhp->uh_time); else if (dofile) val = uhp->uh_save_nr; else val = uhp->uh_seq; if (round == 1 && !(dofile && val == 0)) { /* Remember the header that is closest to the target. * It must be at least in the right direction (checked with * "b_u_seq_cur"). When the timestamp is equal find the * highest/lowest sequence number. */ if ((step < 0 ? uhp->uh_seq <= curbuf->b_u_seq_cur : uhp->uh_seq > curbuf->b_u_seq_cur) && ((dosec && val == closest) ? (step < 0 ? uhp->uh_seq < closest_seq : uhp->uh_seq > closest_seq) : closest == closest_start || (val > target ? (closest > target ? val - target <= closest - target : val - target <= target - closest) : (closest > target ? target - val <= closest - target : target - val <= target - closest)))) { closest = val; closest_seq = uhp->uh_seq; } } /* Quit searching when we found a match. But when searching for a * time we need to continue looking for the best uh_seq. */ if (target == val && !dosec) { target = uhp->uh_seq; break; } /* go down in the tree if we haven't been there */ if (uhp->uh_prev.ptr != NULL && uhp->uh_prev.ptr->uh_walk != nomark && uhp->uh_prev.ptr->uh_walk != mark) uhp = uhp->uh_prev.ptr; /* go to alternate branch if we haven't been there */ else if (uhp->uh_alt_next.ptr != NULL && uhp->uh_alt_next.ptr->uh_walk != nomark && uhp->uh_alt_next.ptr->uh_walk != mark) uhp = uhp->uh_alt_next.ptr; /* go up in the tree if we haven't been there and we are at the * start of alternate branches */ else if (uhp->uh_next.ptr != NULL && uhp->uh_alt_prev.ptr == NULL && uhp->uh_next.ptr->uh_walk != nomark && uhp->uh_next.ptr->uh_walk != mark) { /* If still at the start we don't go through this change. */ if (uhp == curbuf->b_u_curhead) uhp->uh_walk = nomark; uhp = uhp->uh_next.ptr; } else { /* need to backtrack; mark this node as useless */ uhp->uh_walk = nomark; if (uhp->uh_alt_prev.ptr != NULL) uhp = uhp->uh_alt_prev.ptr; else uhp = uhp->uh_next.ptr; } } if (uhp != NULL) /* found it */ break; if (absolute) { EMSGN(_("E830: Undo number %ld not found"), step); return; } if (closest == closest_start) { if (step < 0) MSG(_("Already at oldest change")); else MSG(_("Already at newest change")); return; } target = closest_seq; dosec = FALSE; dofile = FALSE; if (step < 0) above = TRUE; /* stop above the header */ } /* If we found it: Follow the path to go to where we want to be. */ if (uhp != NULL) { /* * First go up the tree as much as needed. */ while (!got_int) { /* Do the change warning now, for the same reason as above. */ change_warning(0); uhp = curbuf->b_u_curhead; if (uhp == NULL) uhp = curbuf->b_u_newhead; else uhp = uhp->uh_next.ptr; if (uhp == NULL || uhp->uh_walk != mark || (uhp->uh_seq == target && !above)) break; curbuf->b_u_curhead = uhp; u_undoredo(TRUE); uhp->uh_walk = nomark; /* don't go back down here */ } /* * And now go down the tree (redo), branching off where needed. */ while (!got_int) { /* Do the change warning now, for the same reason as above. */ change_warning(0); uhp = curbuf->b_u_curhead; if (uhp == NULL) break; /* Go back to the first branch with a mark. */ while (uhp->uh_alt_prev.ptr != NULL && uhp->uh_alt_prev.ptr->uh_walk == mark) uhp = uhp->uh_alt_prev.ptr; /* Find the last branch with a mark, that's the one. */ last = uhp; while (last->uh_alt_next.ptr != NULL && last->uh_alt_next.ptr->uh_walk == mark) last = last->uh_alt_next.ptr; if (last != uhp) { /* Make the used branch the first entry in the list of * alternatives to make "u" and CTRL-R take this branch. */ while (uhp->uh_alt_prev.ptr != NULL) uhp = uhp->uh_alt_prev.ptr; if (last->uh_alt_next.ptr != NULL) last->uh_alt_next.ptr->uh_alt_prev.ptr = last->uh_alt_prev.ptr; last->uh_alt_prev.ptr->uh_alt_next.ptr = last->uh_alt_next.ptr; last->uh_alt_prev.ptr = NULL; last->uh_alt_next.ptr = uhp; uhp->uh_alt_prev.ptr = last; if (curbuf->b_u_oldhead == uhp) curbuf->b_u_oldhead = last; uhp = last; if (uhp->uh_next.ptr != NULL) uhp->uh_next.ptr->uh_prev.ptr = uhp; } curbuf->b_u_curhead = uhp; if (uhp->uh_walk != mark) break; /* must have reached the target */ /* Stop when going backwards in time and didn't find the exact * header we were looking for. */ if (uhp->uh_seq == target && above) { curbuf->b_u_seq_cur = target - 1; break; } u_undoredo(FALSE); /* Advance "curhead" to below the header we last used. If it * becomes NULL then we need to set "newhead" to this leaf. */ if (uhp->uh_prev.ptr == NULL) curbuf->b_u_newhead = uhp; curbuf->b_u_curhead = uhp->uh_prev.ptr; did_undo = FALSE; if (uhp->uh_seq == target) /* found it! */ break; uhp = uhp->uh_prev.ptr; if (uhp == NULL || uhp->uh_walk != mark) { /* Need to redo more but can't find it... */ internal_error("undo_time()"); break; } } } u_undo_end(did_undo, absolute); } /* * u_undoredo: common code for undo and redo * * The lines in the file are replaced by the lines in the entry list at * curbuf->b_u_curhead. The replaced lines in the file are saved in the entry * list for the next undo/redo. * * When "undo" is TRUE we go up in the tree, when FALSE we go down. */ static void u_undoredo(int undo) { char_u **newarray = NULL; linenr_T oldsize; linenr_T newsize; linenr_T top, bot; linenr_T lnum; linenr_T newlnum = MAXLNUM; long i; u_entry_T *uep, *nuep; u_entry_T *newlist = NULL; int old_flags; int new_flags; pos_T namedm[NMARKS]; visualinfo_T visualinfo; int empty_buffer; /* buffer became empty */ u_header_T *curhead = curbuf->b_u_curhead; #ifdef FEAT_AUTOCMD /* Don't want autocommands using the undo structures here, they are * invalid till the end. */ block_autocmds(); #endif #ifdef U_DEBUG u_check(FALSE); #endif old_flags = curhead->uh_flags; new_flags = (curbuf->b_changed ? UH_CHANGED : 0) + ((curbuf->b_ml.ml_flags & ML_EMPTY) ? UH_EMPTYBUF : 0); setpcmark(); /* * save marks before undo/redo */ mch_memmove(namedm, curbuf->b_namedm, sizeof(pos_T) * NMARKS); visualinfo = curbuf->b_visual; curbuf->b_op_start.lnum = curbuf->b_ml.ml_line_count; curbuf->b_op_start.col = 0; curbuf->b_op_end.lnum = 0; curbuf->b_op_end.col = 0; for (uep = curhead->uh_entry; uep != NULL; uep = nuep) { top = uep->ue_top; bot = uep->ue_bot; if (bot == 0) bot = curbuf->b_ml.ml_line_count + 1; if (top > curbuf->b_ml.ml_line_count || top >= bot || bot > curbuf->b_ml.ml_line_count + 1) { #ifdef FEAT_AUTOCMD unblock_autocmds(); #endif IEMSG(_("E438: u_undo: line numbers wrong")); changed(); /* don't want UNCHANGED now */ return; } oldsize = bot - top - 1; /* number of lines before undo */ newsize = uep->ue_size; /* number of lines after undo */ if (top < newlnum) { /* If the saved cursor is somewhere in this undo block, move it to * the remembered position. Makes "gwap" put the cursor back * where it was. */ lnum = curhead->uh_cursor.lnum; if (lnum >= top && lnum <= top + newsize + 1) { curwin->w_cursor = curhead->uh_cursor; newlnum = curwin->w_cursor.lnum - 1; } else { /* Use the first line that actually changed. Avoids that * undoing auto-formatting puts the cursor in the previous * line. */ for (i = 0; i < newsize && i < oldsize; ++i) if (STRCMP(uep->ue_array[i], ml_get(top + 1 + i)) != 0) break; if (i == newsize && newlnum == MAXLNUM && uep->ue_next == NULL) { newlnum = top; curwin->w_cursor.lnum = newlnum + 1; } else if (i < newsize) { newlnum = top + i; curwin->w_cursor.lnum = newlnum + 1; } } } empty_buffer = FALSE; /* delete the lines between top and bot and save them in newarray */ if (oldsize > 0) { if ((newarray = (char_u **)U_ALLOC_LINE( sizeof(char_u *) * oldsize)) == NULL) { do_outofmem_msg((long_u)(sizeof(char_u *) * oldsize)); /* * We have messed up the entry list, repair is impossible. * we have to free the rest of the list. */ while (uep != NULL) { nuep = uep->ue_next; u_freeentry(uep, uep->ue_size); uep = nuep; } break; } /* delete backwards, it goes faster in most cases */ for (lnum = bot - 1, i = oldsize; --i >= 0; --lnum) { /* what can we do when we run out of memory? */ if ((newarray[i] = u_save_line(lnum)) == NULL) do_outofmem_msg((long_u)0); /* remember we deleted the last line in the buffer, and a * dummy empty line will be inserted */ if (curbuf->b_ml.ml_line_count == 1) empty_buffer = TRUE; ml_delete(lnum, FALSE); } } else newarray = NULL; /* insert the lines in u_array between top and bot */ if (newsize) { for (lnum = top, i = 0; i < newsize; ++i, ++lnum) { /* * If the file is empty, there is an empty line 1 that we * should get rid of, by replacing it with the new line */ if (empty_buffer && lnum == 0) ml_replace((linenr_T)1, uep->ue_array[i], TRUE); else ml_append(lnum, uep->ue_array[i], (colnr_T)0, FALSE); vim_free(uep->ue_array[i]); } vim_free((char_u *)uep->ue_array); } /* adjust marks */ if (oldsize != newsize) { mark_adjust(top + 1, top + oldsize, (long)MAXLNUM, (long)newsize - (long)oldsize); if (curbuf->b_op_start.lnum > top + oldsize) curbuf->b_op_start.lnum += newsize - oldsize; if (curbuf->b_op_end.lnum > top + oldsize) curbuf->b_op_end.lnum += newsize - oldsize; } changed_lines(top + 1, 0, bot, newsize - oldsize); /* set '[ and '] mark */ if (top + 1 < curbuf->b_op_start.lnum) curbuf->b_op_start.lnum = top + 1; if (newsize == 0 && top + 1 > curbuf->b_op_end.lnum) curbuf->b_op_end.lnum = top + 1; else if (top + newsize > curbuf->b_op_end.lnum) curbuf->b_op_end.lnum = top + newsize; u_newcount += newsize; u_oldcount += oldsize; uep->ue_size = oldsize; uep->ue_array = newarray; uep->ue_bot = top + newsize + 1; /* * insert this entry in front of the new entry list */ nuep = uep->ue_next; uep->ue_next = newlist; newlist = uep; } curhead->uh_entry = newlist; curhead->uh_flags = new_flags; if ((old_flags & UH_EMPTYBUF) && bufempty()) curbuf->b_ml.ml_flags |= ML_EMPTY; if (old_flags & UH_CHANGED) changed(); else #ifdef FEAT_NETBEANS_INTG /* per netbeans undo rules, keep it as modified */ if (!isNetbeansModified(curbuf)) #endif unchanged(curbuf, FALSE); /* * restore marks from before undo/redo */ for (i = 0; i < NMARKS; ++i) { if (curhead->uh_namedm[i].lnum != 0) curbuf->b_namedm[i] = curhead->uh_namedm[i]; if (namedm[i].lnum != 0) curhead->uh_namedm[i] = namedm[i]; else curhead->uh_namedm[i].lnum = 0; } if (curhead->uh_visual.vi_start.lnum != 0) { curbuf->b_visual = curhead->uh_visual; curhead->uh_visual = visualinfo; } /* * If the cursor is only off by one line, put it at the same position as * before starting the change (for the "o" command). * Otherwise the cursor should go to the first undone line. */ if (curhead->uh_cursor.lnum + 1 == curwin->w_cursor.lnum && curwin->w_cursor.lnum > 1) --curwin->w_cursor.lnum; if (curwin->w_cursor.lnum <= curbuf->b_ml.ml_line_count) { if (curhead->uh_cursor.lnum == curwin->w_cursor.lnum) { curwin->w_cursor.col = curhead->uh_cursor.col; #ifdef FEAT_VIRTUALEDIT if (virtual_active() && curhead->uh_cursor_vcol >= 0) coladvance((colnr_T)curhead->uh_cursor_vcol); else curwin->w_cursor.coladd = 0; #endif } else beginline(BL_SOL | BL_FIX); } else { /* We get here with the current cursor line being past the end (eg * after adding lines at the end of the file, and then undoing it). * check_cursor() will move the cursor to the last line. Move it to * the first column here. */ curwin->w_cursor.col = 0; #ifdef FEAT_VIRTUALEDIT curwin->w_cursor.coladd = 0; #endif } /* Make sure the cursor is on an existing line and column. */ check_cursor(); /* Remember where we are for "g-" and ":earlier 10s". */ curbuf->b_u_seq_cur = curhead->uh_seq; if (undo) /* We are below the previous undo. However, to make ":earlier 1s" * work we compute this as being just above the just undone change. */ --curbuf->b_u_seq_cur; /* Remember where we are for ":earlier 1f" and ":later 1f". */ if (curhead->uh_save_nr != 0) { if (undo) curbuf->b_u_save_nr_cur = curhead->uh_save_nr - 1; else curbuf->b_u_save_nr_cur = curhead->uh_save_nr; } /* The timestamp can be the same for multiple changes, just use the one of * the undone/redone change. */ curbuf->b_u_time_cur = curhead->uh_time; #ifdef FEAT_AUTOCMD unblock_autocmds(); #endif #ifdef U_DEBUG u_check(FALSE); #endif } /* * If we deleted or added lines, report the number of less/more lines. * Otherwise, report the number of changes (this may be incorrect * in some cases, but it's better than nothing). */ static void u_undo_end( int did_undo, /* just did an undo */ int absolute) /* used ":undo N" */ { char *msgstr; u_header_T *uhp; char_u msgbuf[80]; #ifdef FEAT_FOLDING if ((fdo_flags & FDO_UNDO) && KeyTyped) foldOpenCursor(); #endif if (global_busy /* no messages now, wait until global is finished */ || !messaging()) /* 'lazyredraw' set, don't do messages now */ return; if (curbuf->b_ml.ml_flags & ML_EMPTY) --u_newcount; u_oldcount -= u_newcount; if (u_oldcount == -1) msgstr = N_("more line"); else if (u_oldcount < 0) msgstr = N_("more lines"); else if (u_oldcount == 1) msgstr = N_("line less"); else if (u_oldcount > 1) msgstr = N_("fewer lines"); else { u_oldcount = u_newcount; if (u_newcount == 1) msgstr = N_("change"); else msgstr = N_("changes"); } if (curbuf->b_u_curhead != NULL) { /* For ":undo N" we prefer a "after #N" message. */ if (absolute && curbuf->b_u_curhead->uh_next.ptr != NULL) { uhp = curbuf->b_u_curhead->uh_next.ptr; did_undo = FALSE; } else if (did_undo) uhp = curbuf->b_u_curhead; else uhp = curbuf->b_u_curhead->uh_next.ptr; } else uhp = curbuf->b_u_newhead; if (uhp == NULL) *msgbuf = NUL; else u_add_time(msgbuf, sizeof(msgbuf), uhp->uh_time); #ifdef FEAT_CONCEAL { win_T *wp; FOR_ALL_WINDOWS(wp) { if (wp->w_buffer == curbuf && wp->w_p_cole > 0) redraw_win_later(wp, NOT_VALID); } } #endif smsg((char_u *)_("%ld %s; %s #%ld %s"), u_oldcount < 0 ? -u_oldcount : u_oldcount, _(msgstr), did_undo ? _("before") : _("after"), uhp == NULL ? 0L : uhp->uh_seq, msgbuf); } /* * u_sync: stop adding to the current entry list */ void u_sync( int force) /* Also sync when no_u_sync is set. */ { /* Skip it when already synced or syncing is disabled. */ if (curbuf->b_u_synced || (!force && no_u_sync > 0)) return; #if defined(FEAT_XIM) && defined(FEAT_GUI_GTK) if (im_is_preediting()) return; /* XIM is busy, don't break an undo sequence */ #endif if (get_undolevel() < 0) curbuf->b_u_synced = TRUE; /* no entries, nothing to do */ else { u_getbot(); /* compute ue_bot of previous u_save */ curbuf->b_u_curhead = NULL; } } /* * ":undolist": List the leafs of the undo tree */ void ex_undolist(exarg_T *eap UNUSED) { garray_T ga; u_header_T *uhp; int mark; int nomark; int changes = 1; int i; /* * 1: walk the tree to find all leafs, put the info in "ga". * 2: sort the lines * 3: display the list */ mark = ++lastmark; nomark = ++lastmark; ga_init2(&ga, (int)sizeof(char *), 20); uhp = curbuf->b_u_oldhead; while (uhp != NULL) { if (uhp->uh_prev.ptr == NULL && uhp->uh_walk != nomark && uhp->uh_walk != mark) { if (ga_grow(&ga, 1) == FAIL) break; vim_snprintf((char *)IObuff, IOSIZE, "%6ld %7ld ", uhp->uh_seq, changes); u_add_time(IObuff + STRLEN(IObuff), IOSIZE - STRLEN(IObuff), uhp->uh_time); if (uhp->uh_save_nr > 0) { while (STRLEN(IObuff) < 33) STRCAT(IObuff, " "); vim_snprintf_add((char *)IObuff, IOSIZE, " %3ld", uhp->uh_save_nr); } ((char_u **)(ga.ga_data))[ga.ga_len++] = vim_strsave(IObuff); } uhp->uh_walk = mark; /* go down in the tree if we haven't been there */ if (uhp->uh_prev.ptr != NULL && uhp->uh_prev.ptr->uh_walk != nomark && uhp->uh_prev.ptr->uh_walk != mark) { uhp = uhp->uh_prev.ptr; ++changes; } /* go to alternate branch if we haven't been there */ else if (uhp->uh_alt_next.ptr != NULL && uhp->uh_alt_next.ptr->uh_walk != nomark && uhp->uh_alt_next.ptr->uh_walk != mark) uhp = uhp->uh_alt_next.ptr; /* go up in the tree if we haven't been there and we are at the * start of alternate branches */ else if (uhp->uh_next.ptr != NULL && uhp->uh_alt_prev.ptr == NULL && uhp->uh_next.ptr->uh_walk != nomark && uhp->uh_next.ptr->uh_walk != mark) { uhp = uhp->uh_next.ptr; --changes; } else { /* need to backtrack; mark this node as done */ uhp->uh_walk = nomark; if (uhp->uh_alt_prev.ptr != NULL) uhp = uhp->uh_alt_prev.ptr; else { uhp = uhp->uh_next.ptr; --changes; } } } if (ga.ga_len == 0) MSG(_("Nothing to undo")); else { sort_strings((char_u **)ga.ga_data, ga.ga_len); msg_start(); msg_puts_attr((char_u *)_("number changes when saved"), hl_attr(HLF_T)); for (i = 0; i < ga.ga_len && !got_int; ++i) { msg_putchar('\n'); if (got_int) break; msg_puts(((char_u **)ga.ga_data)[i]); } msg_end(); ga_clear_strings(&ga); } } /* * Put the timestamp of an undo header in "buf[buflen]" in a nice format. */ static void u_add_time(char_u *buf, size_t buflen, time_t tt) { #ifdef HAVE_STRFTIME struct tm *curtime; if (vim_time() - tt >= 100) { curtime = localtime(&tt); if (vim_time() - tt < (60L * 60L * 12L)) /* within 12 hours */ (void)strftime((char *)buf, buflen, "%H:%M:%S", curtime); else /* longer ago */ (void)strftime((char *)buf, buflen, "%Y/%m/%d %H:%M:%S", curtime); } else #endif vim_snprintf((char *)buf, buflen, _("%ld seconds ago"), (long)(vim_time() - tt)); } /* * ":undojoin": continue adding to the last entry list */ void ex_undojoin(exarg_T *eap UNUSED) { if (curbuf->b_u_newhead == NULL) return; /* nothing changed before */ if (curbuf->b_u_curhead != NULL) { EMSG(_("E790: undojoin is not allowed after undo")); return; } if (!curbuf->b_u_synced) return; /* already unsynced */ if (get_undolevel() < 0) return; /* no entries, nothing to do */ else /* Append next change to the last entry */ curbuf->b_u_synced = FALSE; } /* * Called after writing or reloading the file and setting b_changed to FALSE. * Now an undo means that the buffer is modified. */ void u_unchanged(buf_T *buf) { u_unch_branch(buf->b_u_oldhead); buf->b_did_warn = FALSE; } /* * After reloading a buffer which was saved for 'undoreload': Find the first * line that was changed and set the cursor there. */ void u_find_first_changed(void) { u_header_T *uhp = curbuf->b_u_newhead; u_entry_T *uep; linenr_T lnum; if (curbuf->b_u_curhead != NULL || uhp == NULL) return; /* undid something in an autocmd? */ /* Check that the last undo block was for the whole file. */ uep = uhp->uh_entry; if (uep->ue_top != 0 || uep->ue_bot != 0) return; for (lnum = 1; lnum < curbuf->b_ml.ml_line_count && lnum <= uep->ue_size; ++lnum) if (STRCMP(ml_get_buf(curbuf, lnum, FALSE), uep->ue_array[lnum - 1]) != 0) { clearpos(&(uhp->uh_cursor)); uhp->uh_cursor.lnum = lnum; return; } if (curbuf->b_ml.ml_line_count != uep->ue_size) { /* lines added or deleted at the end, put the cursor there */ clearpos(&(uhp->uh_cursor)); uhp->uh_cursor.lnum = lnum; } } /* * Increase the write count, store it in the last undo header, what would be * used for "u". */ void u_update_save_nr(buf_T *buf) { u_header_T *uhp; ++buf->b_u_save_nr_last; buf->b_u_save_nr_cur = buf->b_u_save_nr_last; uhp = buf->b_u_curhead; if (uhp != NULL) uhp = uhp->uh_next.ptr; else uhp = buf->b_u_newhead; if (uhp != NULL) uhp->uh_save_nr = buf->b_u_save_nr_last; } static void u_unch_branch(u_header_T *uhp) { u_header_T *uh; for (uh = uhp; uh != NULL; uh = uh->uh_prev.ptr) { uh->uh_flags |= UH_CHANGED; if (uh->uh_alt_next.ptr != NULL) u_unch_branch(uh->uh_alt_next.ptr); /* recursive */ } } /* * Get pointer to last added entry. * If it's not valid, give an error message and return NULL. */ static u_entry_T * u_get_headentry(void) { if (curbuf->b_u_newhead == NULL || curbuf->b_u_newhead->uh_entry == NULL) { IEMSG(_("E439: undo list corrupt")); return NULL; } return curbuf->b_u_newhead->uh_entry; } /* * u_getbot(): compute the line number of the previous u_save * It is called only when b_u_synced is FALSE. */ static void u_getbot(void) { u_entry_T *uep; linenr_T extra; uep = u_get_headentry(); /* check for corrupt undo list */ if (uep == NULL) return; uep = curbuf->b_u_newhead->uh_getbot_entry; if (uep != NULL) { /* * the new ue_bot is computed from the number of lines that has been * inserted (0 - deleted) since calling u_save. This is equal to the * old line count subtracted from the current line count. */ extra = curbuf->b_ml.ml_line_count - uep->ue_lcount; uep->ue_bot = uep->ue_top + uep->ue_size + 1 + extra; if (uep->ue_bot < 1 || uep->ue_bot > curbuf->b_ml.ml_line_count) { IEMSG(_("E440: undo line missing")); uep->ue_bot = uep->ue_top + 1; /* assume all lines deleted, will * get all the old lines back * without deleting the current * ones */ } curbuf->b_u_newhead->uh_getbot_entry = NULL; } curbuf->b_u_synced = TRUE; } /* * Free one header "uhp" and its entry list and adjust the pointers. */ static void u_freeheader( buf_T *buf, u_header_T *uhp, u_header_T **uhpp) /* if not NULL reset when freeing this header */ { u_header_T *uhap; /* When there is an alternate redo list free that branch completely, * because we can never go there. */ if (uhp->uh_alt_next.ptr != NULL) u_freebranch(buf, uhp->uh_alt_next.ptr, uhpp); if (uhp->uh_alt_prev.ptr != NULL) uhp->uh_alt_prev.ptr->uh_alt_next.ptr = NULL; /* Update the links in the list to remove the header. */ if (uhp->uh_next.ptr == NULL) buf->b_u_oldhead = uhp->uh_prev.ptr; else uhp->uh_next.ptr->uh_prev.ptr = uhp->uh_prev.ptr; if (uhp->uh_prev.ptr == NULL) buf->b_u_newhead = uhp->uh_next.ptr; else for (uhap = uhp->uh_prev.ptr; uhap != NULL; uhap = uhap->uh_alt_next.ptr) uhap->uh_next.ptr = uhp->uh_next.ptr; u_freeentries(buf, uhp, uhpp); } /* * Free an alternate branch and any following alternate branches. */ static void u_freebranch( buf_T *buf, u_header_T *uhp, u_header_T **uhpp) /* if not NULL reset when freeing this header */ { u_header_T *tofree, *next; /* If this is the top branch we may need to use u_freeheader() to update * all the pointers. */ if (uhp == buf->b_u_oldhead) { while (buf->b_u_oldhead != NULL) u_freeheader(buf, buf->b_u_oldhead, uhpp); return; } if (uhp->uh_alt_prev.ptr != NULL) uhp->uh_alt_prev.ptr->uh_alt_next.ptr = NULL; next = uhp; while (next != NULL) { tofree = next; if (tofree->uh_alt_next.ptr != NULL) u_freebranch(buf, tofree->uh_alt_next.ptr, uhpp); /* recursive */ next = tofree->uh_prev.ptr; u_freeentries(buf, tofree, uhpp); } } /* * Free all the undo entries for one header and the header itself. * This means that "uhp" is invalid when returning. */ static void u_freeentries( buf_T *buf, u_header_T *uhp, u_header_T **uhpp) /* if not NULL reset when freeing this header */ { u_entry_T *uep, *nuep; /* Check for pointers to the header that become invalid now. */ if (buf->b_u_curhead == uhp) buf->b_u_curhead = NULL; if (buf->b_u_newhead == uhp) buf->b_u_newhead = NULL; /* freeing the newest entry */ if (uhpp != NULL && uhp == *uhpp) *uhpp = NULL; for (uep = uhp->uh_entry; uep != NULL; uep = nuep) { nuep = uep->ue_next; u_freeentry(uep, uep->ue_size); } #ifdef U_DEBUG uhp->uh_magic = 0; #endif vim_free((char_u *)uhp); --buf->b_u_numhead; } /* * free entry 'uep' and 'n' lines in uep->ue_array[] */ static void u_freeentry(u_entry_T *uep, long n) { while (n > 0) vim_free(uep->ue_array[--n]); vim_free((char_u *)uep->ue_array); #ifdef U_DEBUG uep->ue_magic = 0; #endif vim_free((char_u *)uep); } /* * invalidate the undo buffer; called when storage has already been released */ void u_clearall(buf_T *buf) { buf->b_u_newhead = buf->b_u_oldhead = buf->b_u_curhead = NULL; buf->b_u_synced = TRUE; buf->b_u_numhead = 0; buf->b_u_line_ptr = NULL; buf->b_u_line_lnum = 0; } /* * save the line "lnum" for the "U" command */ void u_saveline(linenr_T lnum) { if (lnum == curbuf->b_u_line_lnum) /* line is already saved */ return; if (lnum < 1 || lnum > curbuf->b_ml.ml_line_count) /* should never happen */ return; u_clearline(); curbuf->b_u_line_lnum = lnum; if (curwin->w_cursor.lnum == lnum) curbuf->b_u_line_colnr = curwin->w_cursor.col; else curbuf->b_u_line_colnr = 0; if ((curbuf->b_u_line_ptr = u_save_line(lnum)) == NULL) do_outofmem_msg((long_u)0); } /* * clear the line saved for the "U" command * (this is used externally for crossing a line while in insert mode) */ void u_clearline(void) { if (curbuf->b_u_line_ptr != NULL) { vim_free(curbuf->b_u_line_ptr); curbuf->b_u_line_ptr = NULL; curbuf->b_u_line_lnum = 0; } } /* * Implementation of the "U" command. * Differentiation from vi: "U" can be undone with the next "U". * We also allow the cursor to be in another line. * Careful: may trigger autocommands that reload the buffer. */ void u_undoline(void) { colnr_T t; char_u *oldp; if (undo_off) return; if (curbuf->b_u_line_ptr == NULL || curbuf->b_u_line_lnum > curbuf->b_ml.ml_line_count) { beep_flush(); return; } /* first save the line for the 'u' command */ if (u_savecommon(curbuf->b_u_line_lnum - 1, curbuf->b_u_line_lnum + 1, (linenr_T)0, FALSE) == FAIL) return; oldp = u_save_line(curbuf->b_u_line_lnum); if (oldp == NULL) { do_outofmem_msg((long_u)0); return; } ml_replace(curbuf->b_u_line_lnum, curbuf->b_u_line_ptr, TRUE); changed_bytes(curbuf->b_u_line_lnum, 0); vim_free(curbuf->b_u_line_ptr); curbuf->b_u_line_ptr = oldp; t = curbuf->b_u_line_colnr; if (curwin->w_cursor.lnum == curbuf->b_u_line_lnum) curbuf->b_u_line_colnr = curwin->w_cursor.col; curwin->w_cursor.col = t; curwin->w_cursor.lnum = curbuf->b_u_line_lnum; check_cursor_col(); } /* * Free all allocated memory blocks for the buffer 'buf'. */ void u_blockfree(buf_T *buf) { while (buf->b_u_oldhead != NULL) u_freeheader(buf, buf->b_u_oldhead, NULL); vim_free(buf->b_u_line_ptr); } /* * u_save_line(): allocate memory and copy line 'lnum' into it. * Returns NULL when out of memory. */ static char_u * u_save_line(linenr_T lnum) { return vim_strsave(ml_get(lnum)); } /* * Check if the 'modified' flag is set, or 'ff' has changed (only need to * check the first character, because it can only be "dos", "unix" or "mac"). * "nofile" and "scratch" type buffers are considered to always be unchanged. */ int bufIsChanged(buf_T *buf) { return #ifdef FEAT_QUICKFIX !bt_dontwrite(buf) && #endif (buf->b_changed || file_ff_differs(buf, TRUE)); } int curbufIsChanged(void) { return #ifdef FEAT_QUICKFIX !bt_dontwrite(curbuf) && #endif (curbuf->b_changed || file_ff_differs(curbuf, TRUE)); } #if defined(FEAT_EVAL) || defined(PROTO) /* * For undotree(): Append the list of undo blocks at "first_uhp" to "list". * Recursive. */ void u_eval_tree(u_header_T *first_uhp, list_T *list) { u_header_T *uhp = first_uhp; dict_T *dict; while (uhp != NULL) { dict = dict_alloc(); if (dict == NULL) return; dict_add_nr_str(dict, "seq", uhp->uh_seq, NULL); dict_add_nr_str(dict, "time", (long)uhp->uh_time, NULL); if (uhp == curbuf->b_u_newhead) dict_add_nr_str(dict, "newhead", 1, NULL); if (uhp == curbuf->b_u_curhead) dict_add_nr_str(dict, "curhead", 1, NULL); if (uhp->uh_save_nr > 0) dict_add_nr_str(dict, "save", uhp->uh_save_nr, NULL); if (uhp->uh_alt_next.ptr != NULL) { list_T *alt_list = list_alloc(); if (alt_list != NULL) { /* Recursive call to add alternate undo tree. */ u_eval_tree(uhp->uh_alt_next.ptr, alt_list); dict_add_list(dict, "alt", alt_list); } } list_append_dict(list, dict); uhp = uhp->uh_prev.ptr; } } #endif
unserialize_uep(bufinfo_T *bi, int *error, char_u *file_name) { int i; u_entry_T *uep; char_u **array; char_u *line; int line_len; uep = (u_entry_T *)U_ALLOC_LINE(sizeof(u_entry_T)); if (uep == NULL) return NULL; vim_memset(uep, 0, sizeof(u_entry_T)); #ifdef U_DEBUG uep->ue_magic = UE_MAGIC; #endif uep->ue_top = undo_read_4c(bi); uep->ue_bot = undo_read_4c(bi); uep->ue_lcount = undo_read_4c(bi); uep->ue_size = undo_read_4c(bi); if (uep->ue_size > 0) { array = (char_u **)U_ALLOC_LINE(sizeof(char_u *) * uep->ue_size); if (array == NULL) { *error = TRUE; return uep; } vim_memset(array, 0, sizeof(char_u *) * uep->ue_size); } else array = NULL; uep->ue_array = array; for (i = 0; i < uep->ue_size; ++i) { line_len = undo_read_4c(bi); if (line_len >= 0) line = read_string_decrypt(bi, line_len); else { line = NULL; corruption_error("line length", file_name); } if (line == NULL) { *error = TRUE; return uep; } array[i] = line; } return uep; }
unserialize_uep(bufinfo_T *bi, int *error, char_u *file_name) { int i; u_entry_T *uep; char_u **array = NULL; char_u *line; int line_len; uep = (u_entry_T *)U_ALLOC_LINE(sizeof(u_entry_T)); if (uep == NULL) return NULL; vim_memset(uep, 0, sizeof(u_entry_T)); #ifdef U_DEBUG uep->ue_magic = UE_MAGIC; #endif uep->ue_top = undo_read_4c(bi); uep->ue_bot = undo_read_4c(bi); uep->ue_lcount = undo_read_4c(bi); uep->ue_size = undo_read_4c(bi); if (uep->ue_size > 0) { if (uep->ue_size < LONG_MAX / (int)sizeof(char_u *)) array = (char_u **)U_ALLOC_LINE(sizeof(char_u *) * uep->ue_size); if (array == NULL) { *error = TRUE; return uep; } vim_memset(array, 0, sizeof(char_u *) * uep->ue_size); } uep->ue_array = array; for (i = 0; i < uep->ue_size; ++i) { line_len = undo_read_4c(bi); if (line_len >= 0) line = read_string_decrypt(bi, line_len); else { line = NULL; corruption_error("line length", file_name); } if (line == NULL) { *error = TRUE; return uep; } array[i] = line; } return uep; }
{'added': [(1388, ' char_u\t**array = NULL;'), (1405, '\tif (uep->ue_size < LONG_MAX / (int)sizeof(char_u *))'), (1406, '\t array = (char_u **)U_ALLOC_LINE(sizeof(char_u *) * uep->ue_size);')], 'deleted': [(1388, ' char_u\t**array;'), (1405, '\tarray = (char_u **)U_ALLOC_LINE(sizeof(char_u *) * uep->ue_size);'), (1413, ' else'), (1414, '\tarray = NULL;')]}
3
4
2,456
14,270
https://github.com/vim/vim
CVE-2017-6350
['CWE-190']
cdf.c
cdf_read_sector
/*- * Copyright (c) 2008 Christos Zoulas * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /* * Parse Composite Document Files, the format used in Microsoft Office * document files before they switched to zipped XML. * Info from: http://sc.openoffice.org/compdocfileformat.pdf * * N.B. This is the "Composite Document File" format, and not the * "Compound Document Format", nor the "Channel Definition Format". */ #include "file.h" #ifndef lint FILE_RCSID("@(#)$File: cdf.c,v 1.45 2011/08/28 08:38:48 christos Exp $") #endif #include <assert.h> #ifdef CDF_DEBUG #include <err.h> #endif #include <stdlib.h> #include <unistd.h> #include <string.h> #include <time.h> #include <ctype.h> #ifdef HAVE_LIMITS_H #include <limits.h> #endif #ifndef EFTYPE #define EFTYPE EINVAL #endif #include "cdf.h" #ifdef CDF_DEBUG #define DPRINTF(a) printf a, fflush(stdout) #else #define DPRINTF(a) #endif static union { char s[4]; uint32_t u; } cdf_bo; #define NEED_SWAP (cdf_bo.u == (uint32_t)0x01020304) #define CDF_TOLE8(x) ((uint64_t)(NEED_SWAP ? _cdf_tole8(x) : (uint64_t)(x))) #define CDF_TOLE4(x) ((uint32_t)(NEED_SWAP ? _cdf_tole4(x) : (uint32_t)(x))) #define CDF_TOLE2(x) ((uint16_t)(NEED_SWAP ? _cdf_tole2(x) : (uint16_t)(x))) #define CDF_GETUINT32(x, y) cdf_getuint32(x, y) /* * swap a short */ static uint16_t _cdf_tole2(uint16_t sv) { uint16_t rv; uint8_t *s = (uint8_t *)(void *)&sv; uint8_t *d = (uint8_t *)(void *)&rv; d[0] = s[1]; d[1] = s[0]; return rv; } /* * swap an int */ static uint32_t _cdf_tole4(uint32_t sv) { uint32_t rv; uint8_t *s = (uint8_t *)(void *)&sv; uint8_t *d = (uint8_t *)(void *)&rv; d[0] = s[3]; d[1] = s[2]; d[2] = s[1]; d[3] = s[0]; return rv; } /* * swap a quad */ static uint64_t _cdf_tole8(uint64_t sv) { uint64_t rv; uint8_t *s = (uint8_t *)(void *)&sv; uint8_t *d = (uint8_t *)(void *)&rv; d[0] = s[7]; d[1] = s[6]; d[2] = s[5]; d[3] = s[4]; d[4] = s[3]; d[5] = s[2]; d[6] = s[1]; d[7] = s[0]; return rv; } /* * grab a uint32_t from a possibly unaligned address, and return it in * the native host order. */ static uint32_t cdf_getuint32(const uint8_t *p, size_t offs) { uint32_t rv; (void)memcpy(&rv, p + offs * sizeof(uint32_t), sizeof(rv)); return CDF_TOLE4(rv); } #define CDF_UNPACK(a) \ (void)memcpy(&(a), &buf[len], sizeof(a)), len += sizeof(a) #define CDF_UNPACKA(a) \ (void)memcpy((a), &buf[len], sizeof(a)), len += sizeof(a) uint16_t cdf_tole2(uint16_t sv) { return CDF_TOLE2(sv); } uint32_t cdf_tole4(uint32_t sv) { return CDF_TOLE4(sv); } uint64_t cdf_tole8(uint64_t sv) { return CDF_TOLE8(sv); } void cdf_swap_header(cdf_header_t *h) { size_t i; h->h_magic = CDF_TOLE8(h->h_magic); h->h_uuid[0] = CDF_TOLE8(h->h_uuid[0]); h->h_uuid[1] = CDF_TOLE8(h->h_uuid[1]); h->h_revision = CDF_TOLE2(h->h_revision); h->h_version = CDF_TOLE2(h->h_version); h->h_byte_order = CDF_TOLE2(h->h_byte_order); h->h_sec_size_p2 = CDF_TOLE2(h->h_sec_size_p2); h->h_short_sec_size_p2 = CDF_TOLE2(h->h_short_sec_size_p2); h->h_num_sectors_in_sat = CDF_TOLE4(h->h_num_sectors_in_sat); h->h_secid_first_directory = CDF_TOLE4(h->h_secid_first_directory); h->h_min_size_standard_stream = CDF_TOLE4(h->h_min_size_standard_stream); h->h_secid_first_sector_in_short_sat = CDF_TOLE4((uint32_t)h->h_secid_first_sector_in_short_sat); h->h_num_sectors_in_short_sat = CDF_TOLE4(h->h_num_sectors_in_short_sat); h->h_secid_first_sector_in_master_sat = CDF_TOLE4((uint32_t)h->h_secid_first_sector_in_master_sat); h->h_num_sectors_in_master_sat = CDF_TOLE4(h->h_num_sectors_in_master_sat); for (i = 0; i < __arraycount(h->h_master_sat); i++) h->h_master_sat[i] = CDF_TOLE4((uint32_t)h->h_master_sat[i]); } void cdf_unpack_header(cdf_header_t *h, char *buf) { size_t i; size_t len = 0; CDF_UNPACK(h->h_magic); CDF_UNPACKA(h->h_uuid); CDF_UNPACK(h->h_revision); CDF_UNPACK(h->h_version); CDF_UNPACK(h->h_byte_order); CDF_UNPACK(h->h_sec_size_p2); CDF_UNPACK(h->h_short_sec_size_p2); CDF_UNPACKA(h->h_unused0); CDF_UNPACK(h->h_num_sectors_in_sat); CDF_UNPACK(h->h_secid_first_directory); CDF_UNPACKA(h->h_unused1); CDF_UNPACK(h->h_min_size_standard_stream); CDF_UNPACK(h->h_secid_first_sector_in_short_sat); CDF_UNPACK(h->h_num_sectors_in_short_sat); CDF_UNPACK(h->h_secid_first_sector_in_master_sat); CDF_UNPACK(h->h_num_sectors_in_master_sat); for (i = 0; i < __arraycount(h->h_master_sat); i++) CDF_UNPACK(h->h_master_sat[i]); } void cdf_swap_dir(cdf_directory_t *d) { d->d_namelen = CDF_TOLE2(d->d_namelen); d->d_left_child = CDF_TOLE4((uint32_t)d->d_left_child); d->d_right_child = CDF_TOLE4((uint32_t)d->d_right_child); d->d_storage = CDF_TOLE4((uint32_t)d->d_storage); d->d_storage_uuid[0] = CDF_TOLE8(d->d_storage_uuid[0]); d->d_storage_uuid[1] = CDF_TOLE8(d->d_storage_uuid[1]); d->d_flags = CDF_TOLE4(d->d_flags); d->d_created = CDF_TOLE8((uint64_t)d->d_created); d->d_modified = CDF_TOLE8((uint64_t)d->d_modified); d->d_stream_first_sector = CDF_TOLE4((uint32_t)d->d_stream_first_sector); d->d_size = CDF_TOLE4(d->d_size); } void cdf_swap_class(cdf_classid_t *d) { d->cl_dword = CDF_TOLE4(d->cl_dword); d->cl_word[0] = CDF_TOLE2(d->cl_word[0]); d->cl_word[1] = CDF_TOLE2(d->cl_word[1]); } void cdf_unpack_dir(cdf_directory_t *d, char *buf) { size_t len = 0; CDF_UNPACKA(d->d_name); CDF_UNPACK(d->d_namelen); CDF_UNPACK(d->d_type); CDF_UNPACK(d->d_color); CDF_UNPACK(d->d_left_child); CDF_UNPACK(d->d_right_child); CDF_UNPACK(d->d_storage); CDF_UNPACKA(d->d_storage_uuid); CDF_UNPACK(d->d_flags); CDF_UNPACK(d->d_created); CDF_UNPACK(d->d_modified); CDF_UNPACK(d->d_stream_first_sector); CDF_UNPACK(d->d_size); CDF_UNPACK(d->d_unused0); } static int cdf_check_stream_offset(const cdf_stream_t *sst, const cdf_header_t *h, const void *p, size_t tail, int line) { const char *b = (const char *)sst->sst_tab; const char *e = ((const char *)p) + tail; (void)&line; if (e >= b && (size_t)(e - b) < CDF_SEC_SIZE(h) * sst->sst_len) return 0; DPRINTF(("%d: offset begin %p end %p %" SIZE_T_FORMAT "u" " >= %" SIZE_T_FORMAT "u [%" SIZE_T_FORMAT "u %" SIZE_T_FORMAT "u]\n", line, b, e, (size_t)(e - b), CDF_SEC_SIZE(h) * sst->sst_len, CDF_SEC_SIZE(h), sst->sst_len)); errno = EFTYPE; return -1; } static ssize_t cdf_read(const cdf_info_t *info, off_t off, void *buf, size_t len) { size_t siz = (size_t)off + len; if ((off_t)(off + len) != (off_t)siz) { errno = EINVAL; return -1; } if (info->i_buf != NULL && info->i_len >= siz) { (void)memcpy(buf, &info->i_buf[off], len); return (ssize_t)len; } if (info->i_fd == -1) return -1; if (lseek(info->i_fd, off, SEEK_SET) == (off_t)-1) return -1; if (read(info->i_fd, buf, len) != (ssize_t)len) return -1; return (ssize_t)len; } int cdf_read_header(const cdf_info_t *info, cdf_header_t *h) { char buf[512]; (void)memcpy(cdf_bo.s, "\01\02\03\04", 4); if (cdf_read(info, (off_t)0, buf, sizeof(buf)) == -1) return -1; cdf_unpack_header(h, buf); cdf_swap_header(h); if (h->h_magic != CDF_MAGIC) { DPRINTF(("Bad magic 0x%" INT64_T_FORMAT "x != 0x%" INT64_T_FORMAT "x\n", (unsigned long long)h->h_magic, (unsigned long long)CDF_MAGIC)); goto out; } if (h->h_sec_size_p2 > 20) { DPRINTF(("Bad sector size 0x%u\n", h->h_sec_size_p2)); goto out; } if (h->h_short_sec_size_p2 > 20) { DPRINTF(("Bad short sector size 0x%u\n", h->h_short_sec_size_p2)); goto out; } return 0; out: errno = EFTYPE; return -1; } ssize_t cdf_read_sector(const cdf_info_t *info, void *buf, size_t offs, size_t len, const cdf_header_t *h, cdf_secid_t id) { assert((size_t)CDF_SEC_SIZE(h) == len); return cdf_read(info, (off_t)CDF_SEC_POS(h, id), ((char *)buf) + offs, len); } ssize_t cdf_read_short_sector(const cdf_stream_t *sst, void *buf, size_t offs, size_t len, const cdf_header_t *h, cdf_secid_t id) { assert((size_t)CDF_SHORT_SEC_SIZE(h) == len); (void)memcpy(((char *)buf) + offs, ((const char *)sst->sst_tab) + CDF_SHORT_SEC_POS(h, id), len); return len; } /* * Read the sector allocation table. */ int cdf_read_sat(const cdf_info_t *info, cdf_header_t *h, cdf_sat_t *sat) { size_t i, j, k; size_t ss = CDF_SEC_SIZE(h); cdf_secid_t *msa, mid, sec; size_t nsatpersec = (ss / sizeof(mid)) - 1; for (i = 0; i < __arraycount(h->h_master_sat); i++) if (h->h_master_sat[i] == CDF_SECID_FREE) break; #define CDF_SEC_LIMIT (UINT32_MAX / (4 * ss)) if ((nsatpersec > 0 && h->h_num_sectors_in_master_sat > CDF_SEC_LIMIT / nsatpersec) || i > CDF_SEC_LIMIT) { DPRINTF(("Number of sectors in master SAT too big %u %" SIZE_T_FORMAT "u\n", h->h_num_sectors_in_master_sat, i)); errno = EFTYPE; return -1; } sat->sat_len = h->h_num_sectors_in_master_sat * nsatpersec + i; DPRINTF(("sat_len = %" SIZE_T_FORMAT "u ss = %" SIZE_T_FORMAT "u\n", sat->sat_len, ss)); if ((sat->sat_tab = CAST(cdf_secid_t *, calloc(sat->sat_len, ss))) == NULL) return -1; for (i = 0; i < __arraycount(h->h_master_sat); i++) { if (h->h_master_sat[i] < 0) break; if (cdf_read_sector(info, sat->sat_tab, ss * i, ss, h, h->h_master_sat[i]) != (ssize_t)ss) { DPRINTF(("Reading sector %d", h->h_master_sat[i])); goto out1; } } if ((msa = CAST(cdf_secid_t *, calloc(1, ss))) == NULL) goto out1; mid = h->h_secid_first_sector_in_master_sat; for (j = 0; j < h->h_num_sectors_in_master_sat; j++) { if (mid < 0) goto out; if (j >= CDF_LOOP_LIMIT) { DPRINTF(("Reading master sector loop limit")); errno = EFTYPE; goto out2; } if (cdf_read_sector(info, msa, 0, ss, h, mid) != (ssize_t)ss) { DPRINTF(("Reading master sector %d", mid)); goto out2; } for (k = 0; k < nsatpersec; k++, i++) { sec = CDF_TOLE4((uint32_t)msa[k]); if (sec < 0) goto out; if (i >= sat->sat_len) { DPRINTF(("Out of bounds reading MSA %u >= %u", i, sat->sat_len)); errno = EFTYPE; goto out2; } if (cdf_read_sector(info, sat->sat_tab, ss * i, ss, h, sec) != (ssize_t)ss) { DPRINTF(("Reading sector %d", CDF_TOLE4(msa[k]))); goto out2; } } mid = CDF_TOLE4((uint32_t)msa[nsatpersec]); } out: sat->sat_len = i; free(msa); return 0; out2: free(msa); out1: free(sat->sat_tab); return -1; } size_t cdf_count_chain(const cdf_sat_t *sat, cdf_secid_t sid, size_t size) { size_t i, j; cdf_secid_t maxsector = (cdf_secid_t)(sat->sat_len * size); DPRINTF(("Chain:")); for (j = i = 0; sid >= 0; i++, j++) { DPRINTF((" %d", sid)); if (j >= CDF_LOOP_LIMIT) { DPRINTF(("Counting chain loop limit")); errno = EFTYPE; return (size_t)-1; } if (sid > maxsector) { DPRINTF(("Sector %d > %d\n", sid, maxsector)); errno = EFTYPE; return (size_t)-1; } sid = CDF_TOLE4((uint32_t)sat->sat_tab[sid]); } DPRINTF(("\n")); return i; } int cdf_read_long_sector_chain(const cdf_info_t *info, const cdf_header_t *h, const cdf_sat_t *sat, cdf_secid_t sid, size_t len, cdf_stream_t *scn) { size_t ss = CDF_SEC_SIZE(h), i, j; ssize_t nr; scn->sst_len = cdf_count_chain(sat, sid, ss); scn->sst_dirlen = len; if (scn->sst_len == (size_t)-1) return -1; scn->sst_tab = calloc(scn->sst_len, ss); if (scn->sst_tab == NULL) return -1; for (j = i = 0; sid >= 0; i++, j++) { if (j >= CDF_LOOP_LIMIT) { DPRINTF(("Read long sector chain loop limit")); errno = EFTYPE; goto out; } if (i >= scn->sst_len) { DPRINTF(("Out of bounds reading long sector chain " "%u > %u\n", i, scn->sst_len)); errno = EFTYPE; goto out; } if ((nr = cdf_read_sector(info, scn->sst_tab, i * ss, ss, h, sid)) != (ssize_t)ss) { if (i == scn->sst_len - 1 && nr > 0) { /* Last sector might be truncated */ return 0; } DPRINTF(("Reading long sector chain %d", sid)); goto out; } sid = CDF_TOLE4((uint32_t)sat->sat_tab[sid]); } return 0; out: free(scn->sst_tab); return -1; } int cdf_read_short_sector_chain(const cdf_header_t *h, const cdf_sat_t *ssat, const cdf_stream_t *sst, cdf_secid_t sid, size_t len, cdf_stream_t *scn) { size_t ss = CDF_SHORT_SEC_SIZE(h), i, j; scn->sst_len = cdf_count_chain(ssat, sid, CDF_SEC_SIZE(h)); scn->sst_dirlen = len; if (sst->sst_tab == NULL || scn->sst_len == (size_t)-1) return -1; scn->sst_tab = calloc(scn->sst_len, ss); if (scn->sst_tab == NULL) return -1; for (j = i = 0; sid >= 0; i++, j++) { if (j >= CDF_LOOP_LIMIT) { DPRINTF(("Read short sector chain loop limit")); errno = EFTYPE; goto out; } if (i >= scn->sst_len) { DPRINTF(("Out of bounds reading short sector chain " "%u > %u\n", i, scn->sst_len)); errno = EFTYPE; goto out; } if (cdf_read_short_sector(sst, scn->sst_tab, i * ss, ss, h, sid) != (ssize_t)ss) { DPRINTF(("Reading short sector chain %d", sid)); goto out; } sid = CDF_TOLE4((uint32_t)ssat->sat_tab[sid]); } return 0; out: free(scn->sst_tab); return -1; } int cdf_read_sector_chain(const cdf_info_t *info, const cdf_header_t *h, const cdf_sat_t *sat, const cdf_sat_t *ssat, const cdf_stream_t *sst, cdf_secid_t sid, size_t len, cdf_stream_t *scn) { if (len < h->h_min_size_standard_stream && sst->sst_tab != NULL) return cdf_read_short_sector_chain(h, ssat, sst, sid, len, scn); else return cdf_read_long_sector_chain(info, h, sat, sid, len, scn); } int cdf_read_dir(const cdf_info_t *info, const cdf_header_t *h, const cdf_sat_t *sat, cdf_dir_t *dir) { size_t i, j; size_t ss = CDF_SEC_SIZE(h), ns, nd; char *buf; cdf_secid_t sid = h->h_secid_first_directory; ns = cdf_count_chain(sat, sid, ss); if (ns == (size_t)-1) return -1; nd = ss / CDF_DIRECTORY_SIZE; dir->dir_len = ns * nd; dir->dir_tab = CAST(cdf_directory_t *, calloc(dir->dir_len, sizeof(dir->dir_tab[0]))); if (dir->dir_tab == NULL) return -1; if ((buf = CAST(char *, malloc(ss))) == NULL) { free(dir->dir_tab); return -1; } for (j = i = 0; i < ns; i++, j++) { if (j >= CDF_LOOP_LIMIT) { DPRINTF(("Read dir loop limit")); errno = EFTYPE; goto out; } if (cdf_read_sector(info, buf, 0, ss, h, sid) != (ssize_t)ss) { DPRINTF(("Reading directory sector %d", sid)); goto out; } for (j = 0; j < nd; j++) { cdf_unpack_dir(&dir->dir_tab[i * nd + j], &buf[j * CDF_DIRECTORY_SIZE]); } sid = CDF_TOLE4((uint32_t)sat->sat_tab[sid]); } if (NEED_SWAP) for (i = 0; i < dir->dir_len; i++) cdf_swap_dir(&dir->dir_tab[i]); free(buf); return 0; out: free(dir->dir_tab); free(buf); return -1; } int cdf_read_ssat(const cdf_info_t *info, const cdf_header_t *h, const cdf_sat_t *sat, cdf_sat_t *ssat) { size_t i, j; size_t ss = CDF_SEC_SIZE(h); cdf_secid_t sid = h->h_secid_first_sector_in_short_sat; ssat->sat_len = cdf_count_chain(sat, sid, CDF_SEC_SIZE(h)); if (ssat->sat_len == (size_t)-1) return -1; ssat->sat_tab = CAST(cdf_secid_t *, calloc(ssat->sat_len, ss)); if (ssat->sat_tab == NULL) return -1; for (j = i = 0; sid >= 0; i++, j++) { if (j >= CDF_LOOP_LIMIT) { DPRINTF(("Read short sat sector loop limit")); errno = EFTYPE; goto out; } if (i >= ssat->sat_len) { DPRINTF(("Out of bounds reading short sector chain " "%u > %u\n", i, ssat->sat_len)); errno = EFTYPE; goto out; } if (cdf_read_sector(info, ssat->sat_tab, i * ss, ss, h, sid) != (ssize_t)ss) { DPRINTF(("Reading short sat sector %d", sid)); goto out; } sid = CDF_TOLE4((uint32_t)sat->sat_tab[sid]); } return 0; out: free(ssat->sat_tab); return -1; } int cdf_read_short_stream(const cdf_info_t *info, const cdf_header_t *h, const cdf_sat_t *sat, const cdf_dir_t *dir, cdf_stream_t *scn) { size_t i; const cdf_directory_t *d; for (i = 0; i < dir->dir_len; i++) if (dir->dir_tab[i].d_type == CDF_DIR_TYPE_ROOT_STORAGE) break; /* If the it is not there, just fake it; some docs don't have it */ if (i == dir->dir_len) goto out; d = &dir->dir_tab[i]; /* If the it is not there, just fake it; some docs don't have it */ if (d->d_stream_first_sector < 0) goto out; return cdf_read_long_sector_chain(info, h, sat, d->d_stream_first_sector, d->d_size, scn); out: scn->sst_tab = NULL; scn->sst_len = 0; scn->sst_dirlen = 0; return 0; } static int cdf_namecmp(const char *d, const uint16_t *s, size_t l) { for (; l--; d++, s++) if (*d != CDF_TOLE2(*s)) return (unsigned char)*d - CDF_TOLE2(*s); return 0; } int cdf_read_summary_info(const cdf_info_t *info, const cdf_header_t *h, const cdf_sat_t *sat, const cdf_sat_t *ssat, const cdf_stream_t *sst, const cdf_dir_t *dir, cdf_stream_t *scn) { size_t i; const cdf_directory_t *d; static const char name[] = "\05SummaryInformation"; for (i = dir->dir_len; i > 0; i--) if (dir->dir_tab[i - 1].d_type == CDF_DIR_TYPE_USER_STREAM && cdf_namecmp(name, dir->dir_tab[i - 1].d_name, sizeof(name)) == 0) break; if (i == 0) { DPRINTF(("Cannot find summary information section\n")); errno = ESRCH; return -1; } d = &dir->dir_tab[i - 1]; return cdf_read_sector_chain(info, h, sat, ssat, sst, d->d_stream_first_sector, d->d_size, scn); } int cdf_read_property_info(const cdf_stream_t *sst, const cdf_header_t *h, uint32_t offs, cdf_property_info_t **info, size_t *count, size_t *maxcount) { const cdf_section_header_t *shp; cdf_section_header_t sh; const uint8_t *p, *q, *e; int16_t s16; int32_t s32; uint32_t u32; int64_t s64; uint64_t u64; cdf_timestamp_t tp; size_t i, o, o4, nelements, j; cdf_property_info_t *inp; if (offs > UINT32_MAX / 4) { errno = EFTYPE; goto out; } shp = CAST(const cdf_section_header_t *, (const void *) ((const char *)sst->sst_tab + offs)); if (cdf_check_stream_offset(sst, h, shp, sizeof(*shp), __LINE__) == -1) goto out; sh.sh_len = CDF_TOLE4(shp->sh_len); #define CDF_SHLEN_LIMIT (UINT32_MAX / 8) if (sh.sh_len > CDF_SHLEN_LIMIT) { errno = EFTYPE; goto out; } sh.sh_properties = CDF_TOLE4(shp->sh_properties); #define CDF_PROP_LIMIT (UINT32_MAX / (4 * sizeof(*inp))) if (sh.sh_properties > CDF_PROP_LIMIT) goto out; DPRINTF(("section len: %u properties %u\n", sh.sh_len, sh.sh_properties)); if (*maxcount) { if (*maxcount > CDF_PROP_LIMIT) goto out; *maxcount += sh.sh_properties; inp = CAST(cdf_property_info_t *, realloc(*info, *maxcount * sizeof(*inp))); } else { *maxcount = sh.sh_properties; inp = CAST(cdf_property_info_t *, malloc(*maxcount * sizeof(*inp))); } if (inp == NULL) goto out; *info = inp; inp += *count; *count += sh.sh_properties; p = CAST(const uint8_t *, (const void *) ((const char *)(const void *)sst->sst_tab + offs + sizeof(sh))); e = CAST(const uint8_t *, (const void *) (((const char *)(const void *)shp) + sh.sh_len)); if (cdf_check_stream_offset(sst, h, e, 0, __LINE__) == -1) goto out; for (i = 0; i < sh.sh_properties; i++) { q = (const uint8_t *)(const void *) ((const char *)(const void *)p + CDF_GETUINT32(p, (i << 1) + 1)) - 2 * sizeof(uint32_t); if (q > e) { DPRINTF(("Ran of the end %p > %p\n", q, e)); goto out; } inp[i].pi_id = CDF_GETUINT32(p, i << 1); inp[i].pi_type = CDF_GETUINT32(q, 0); DPRINTF(("%d) id=%x type=%x offs=%x,%d\n", i, inp[i].pi_id, inp[i].pi_type, q - p, CDF_GETUINT32(p, (i << 1) + 1))); if (inp[i].pi_type & CDF_VECTOR) { nelements = CDF_GETUINT32(q, 1); o = 2; } else { nelements = 1; o = 1; } o4 = o * sizeof(uint32_t); if (inp[i].pi_type & (CDF_ARRAY|CDF_BYREF|CDF_RESERVED)) goto unknown; switch (inp[i].pi_type & CDF_TYPEMASK) { case CDF_NULL: case CDF_EMPTY: break; case CDF_SIGNED16: if (inp[i].pi_type & CDF_VECTOR) goto unknown; (void)memcpy(&s16, &q[o4], sizeof(s16)); inp[i].pi_s16 = CDF_TOLE2(s16); break; case CDF_SIGNED32: if (inp[i].pi_type & CDF_VECTOR) goto unknown; (void)memcpy(&s32, &q[o4], sizeof(s32)); inp[i].pi_s32 = CDF_TOLE4((uint32_t)s32); break; case CDF_BOOL: case CDF_UNSIGNED32: if (inp[i].pi_type & CDF_VECTOR) goto unknown; (void)memcpy(&u32, &q[o4], sizeof(u32)); inp[i].pi_u32 = CDF_TOLE4(u32); break; case CDF_SIGNED64: if (inp[i].pi_type & CDF_VECTOR) goto unknown; (void)memcpy(&s64, &q[o4], sizeof(s64)); inp[i].pi_s64 = CDF_TOLE8((uint64_t)s64); break; case CDF_UNSIGNED64: if (inp[i].pi_type & CDF_VECTOR) goto unknown; (void)memcpy(&u64, &q[o4], sizeof(u64)); inp[i].pi_u64 = CDF_TOLE8((uint64_t)u64); break; case CDF_LENGTH32_STRING: case CDF_LENGTH32_WSTRING: if (nelements > 1) { size_t nelem = inp - *info; if (*maxcount > CDF_PROP_LIMIT || nelements > CDF_PROP_LIMIT) goto out; *maxcount += nelements; inp = CAST(cdf_property_info_t *, realloc(*info, *maxcount * sizeof(*inp))); if (inp == NULL) goto out; *info = inp; inp = *info + nelem; } DPRINTF(("nelements = %d\n", nelements)); for (j = 0; j < nelements; j++, i++) { uint32_t l = CDF_GETUINT32(q, o); inp[i].pi_str.s_len = l; inp[i].pi_str.s_buf = (const char *) (const void *)(&q[o4 + sizeof(l)]); DPRINTF(("l = %d, r = %d, s = %s\n", l, CDF_ROUND(l, sizeof(l)), inp[i].pi_str.s_buf)); l = 4 + (uint32_t)CDF_ROUND(l, sizeof(l)); o += l >> 2; o4 = o * sizeof(uint32_t); } i--; break; case CDF_FILETIME: if (inp[i].pi_type & CDF_VECTOR) goto unknown; (void)memcpy(&tp, &q[o4], sizeof(tp)); inp[i].pi_tp = CDF_TOLE8((uint64_t)tp); break; case CDF_CLIPBOARD: if (inp[i].pi_type & CDF_VECTOR) goto unknown; break; default: unknown: DPRINTF(("Don't know how to deal with %x\n", inp[i].pi_type)); goto out; } } return 0; out: free(*info); return -1; } int cdf_unpack_summary_info(const cdf_stream_t *sst, const cdf_header_t *h, cdf_summary_info_header_t *ssi, cdf_property_info_t **info, size_t *count) { size_t i, maxcount; const cdf_summary_info_header_t *si = CAST(const cdf_summary_info_header_t *, sst->sst_tab); const cdf_section_declaration_t *sd = CAST(const cdf_section_declaration_t *, (const void *) ((const char *)sst->sst_tab + CDF_SECTION_DECLARATION_OFFSET)); if (cdf_check_stream_offset(sst, h, si, sizeof(*si), __LINE__) == -1 || cdf_check_stream_offset(sst, h, sd, sizeof(*sd), __LINE__) == -1) return -1; ssi->si_byte_order = CDF_TOLE2(si->si_byte_order); ssi->si_os_version = CDF_TOLE2(si->si_os_version); ssi->si_os = CDF_TOLE2(si->si_os); ssi->si_class = si->si_class; cdf_swap_class(&ssi->si_class); ssi->si_count = CDF_TOLE2(si->si_count); *count = 0; maxcount = 0; *info = NULL; for (i = 0; i < CDF_TOLE4(si->si_count); i++) { if (i >= CDF_LOOP_LIMIT) { DPRINTF(("Unpack summary info loop limit")); errno = EFTYPE; return -1; } if (cdf_read_property_info(sst, h, CDF_TOLE4(sd->sd_offset), info, count, &maxcount) == -1) return -1; } return 0; } int cdf_print_classid(char *buf, size_t buflen, const cdf_classid_t *id) { return snprintf(buf, buflen, "%.8x-%.4x-%.4x-%.2x%.2x-" "%.2x%.2x%.2x%.2x%.2x%.2x", id->cl_dword, id->cl_word[0], id->cl_word[1], id->cl_two[0], id->cl_two[1], id->cl_six[0], id->cl_six[1], id->cl_six[2], id->cl_six[3], id->cl_six[4], id->cl_six[5]); } static const struct { uint32_t v; const char *n; } vn[] = { { CDF_PROPERTY_CODE_PAGE, "Code page" }, { CDF_PROPERTY_TITLE, "Title" }, { CDF_PROPERTY_SUBJECT, "Subject" }, { CDF_PROPERTY_AUTHOR, "Author" }, { CDF_PROPERTY_KEYWORDS, "Keywords" }, { CDF_PROPERTY_COMMENTS, "Comments" }, { CDF_PROPERTY_TEMPLATE, "Template" }, { CDF_PROPERTY_LAST_SAVED_BY, "Last Saved By" }, { CDF_PROPERTY_REVISION_NUMBER, "Revision Number" }, { CDF_PROPERTY_TOTAL_EDITING_TIME, "Total Editing Time" }, { CDF_PROPERTY_LAST_PRINTED, "Last Printed" }, { CDF_PROPERTY_CREATE_TIME, "Create Time/Date" }, { CDF_PROPERTY_LAST_SAVED_TIME, "Last Saved Time/Date" }, { CDF_PROPERTY_NUMBER_OF_PAGES, "Number of Pages" }, { CDF_PROPERTY_NUMBER_OF_WORDS, "Number of Words" }, { CDF_PROPERTY_NUMBER_OF_CHARACTERS, "Number of Characters" }, { CDF_PROPERTY_THUMBNAIL, "Thumbnail" }, { CDF_PROPERTY_NAME_OF_APPLICATION, "Name of Creating Application" }, { CDF_PROPERTY_SECURITY, "Security" }, { CDF_PROPERTY_LOCALE_ID, "Locale ID" }, }; int cdf_print_property_name(char *buf, size_t bufsiz, uint32_t p) { size_t i; for (i = 0; i < __arraycount(vn); i++) if (vn[i].v == p) return snprintf(buf, bufsiz, "%s", vn[i].n); return snprintf(buf, bufsiz, "0x%x", p); } int cdf_print_elapsed_time(char *buf, size_t bufsiz, cdf_timestamp_t ts) { int len = 0; int days, hours, mins, secs; ts /= CDF_TIME_PREC; secs = (int)(ts % 60); ts /= 60; mins = (int)(ts % 60); ts /= 60; hours = (int)(ts % 24); ts /= 24; days = (int)ts; if (days) { len += snprintf(buf + len, bufsiz - len, "%dd+", days); if ((size_t)len >= bufsiz) return len; } if (days || hours) { len += snprintf(buf + len, bufsiz - len, "%.2d:", hours); if ((size_t)len >= bufsiz) return len; } len += snprintf(buf + len, bufsiz - len, "%.2d:", mins); if ((size_t)len >= bufsiz) return len; len += snprintf(buf + len, bufsiz - len, "%.2d", secs); return len; } #ifdef CDF_DEBUG void cdf_dump_header(const cdf_header_t *h) { size_t i; #define DUMP(a, b) (void)fprintf(stderr, "%40.40s = " a "\n", # b, h->h_ ## b) #define DUMP2(a, b) (void)fprintf(stderr, "%40.40s = " a " (" a ")\n", # b, \ h->h_ ## b, 1 << h->h_ ## b) DUMP("%d", revision); DUMP("%d", version); DUMP("0x%x", byte_order); DUMP2("%d", sec_size_p2); DUMP2("%d", short_sec_size_p2); DUMP("%d", num_sectors_in_sat); DUMP("%d", secid_first_directory); DUMP("%d", min_size_standard_stream); DUMP("%d", secid_first_sector_in_short_sat); DUMP("%d", num_sectors_in_short_sat); DUMP("%d", secid_first_sector_in_master_sat); DUMP("%d", num_sectors_in_master_sat); for (i = 0; i < __arraycount(h->h_master_sat); i++) { if (h->h_master_sat[i] == CDF_SECID_FREE) break; (void)fprintf(stderr, "%35.35s[%.3zu] = %d\n", "master_sat", i, h->h_master_sat[i]); } } void cdf_dump_sat(const char *prefix, const cdf_sat_t *sat, size_t size) { size_t i, j, s = size / sizeof(cdf_secid_t); for (i = 0; i < sat->sat_len; i++) { (void)fprintf(stderr, "%s[%" SIZE_T_FORMAT "u]:\n%.6d: ", prefix, i, i * s); for (j = 0; j < s; j++) { (void)fprintf(stderr, "%5d, ", CDF_TOLE4(sat->sat_tab[s * i + j])); if ((j + 1) % 10 == 0) (void)fprintf(stderr, "\n%.6d: ", i * s + j + 1); } (void)fprintf(stderr, "\n"); } } void cdf_dump(void *v, size_t len) { size_t i, j; unsigned char *p = v; char abuf[16]; (void)fprintf(stderr, "%.4x: ", 0); for (i = 0, j = 0; i < len; i++, p++) { (void)fprintf(stderr, "%.2x ", *p); abuf[j++] = isprint(*p) ? *p : '.'; if (j == 16) { j = 0; abuf[15] = '\0'; (void)fprintf(stderr, "%s\n%.4x: ", abuf, i + 1); } } (void)fprintf(stderr, "\n"); } void cdf_dump_stream(const cdf_header_t *h, const cdf_stream_t *sst) { size_t ss = sst->sst_dirlen < h->h_min_size_standard_stream ? CDF_SHORT_SEC_SIZE(h) : CDF_SEC_SIZE(h); cdf_dump(sst->sst_tab, ss * sst->sst_len); } void cdf_dump_dir(const cdf_info_t *info, const cdf_header_t *h, const cdf_sat_t *sat, const cdf_sat_t *ssat, const cdf_stream_t *sst, const cdf_dir_t *dir) { size_t i, j; cdf_directory_t *d; char name[__arraycount(d->d_name)]; cdf_stream_t scn; struct timespec ts; static const char *types[] = { "empty", "user storage", "user stream", "lockbytes", "property", "root storage" }; for (i = 0; i < dir->dir_len; i++) { d = &dir->dir_tab[i]; for (j = 0; j < sizeof(name); j++) name[j] = (char)CDF_TOLE2(d->d_name[j]); (void)fprintf(stderr, "Directory %" SIZE_T_FORMAT "u: %s\n", i, name); if (d->d_type < __arraycount(types)) (void)fprintf(stderr, "Type: %s\n", types[d->d_type]); else (void)fprintf(stderr, "Type: %d\n", d->d_type); (void)fprintf(stderr, "Color: %s\n", d->d_color ? "black" : "red"); (void)fprintf(stderr, "Left child: %d\n", d->d_left_child); (void)fprintf(stderr, "Right child: %d\n", d->d_right_child); (void)fprintf(stderr, "Flags: 0x%x\n", d->d_flags); cdf_timestamp_to_timespec(&ts, d->d_created); (void)fprintf(stderr, "Created %s", cdf_ctime(&ts.tv_sec)); cdf_timestamp_to_timespec(&ts, d->d_modified); (void)fprintf(stderr, "Modified %s", cdf_ctime(&ts.tv_sec)); (void)fprintf(stderr, "Stream %d\n", d->d_stream_first_sector); (void)fprintf(stderr, "Size %d\n", d->d_size); switch (d->d_type) { case CDF_DIR_TYPE_USER_STORAGE: (void)fprintf(stderr, "Storage: %d\n", d->d_storage); break; case CDF_DIR_TYPE_USER_STREAM: if (sst == NULL) break; if (cdf_read_sector_chain(info, h, sat, ssat, sst, d->d_stream_first_sector, d->d_size, &scn) == -1) { warn("Can't read stream for %s at %d len %d", name, d->d_stream_first_sector, d->d_size); break; } cdf_dump_stream(h, &scn); free(scn.sst_tab); break; default: break; } } } void cdf_dump_property_info(const cdf_property_info_t *info, size_t count) { cdf_timestamp_t tp; struct timespec ts; char buf[64]; size_t i, j; for (i = 0; i < count; i++) { cdf_print_property_name(buf, sizeof(buf), info[i].pi_id); (void)fprintf(stderr, "%" SIZE_T_FORMAT "u) %s: ", i, buf); switch (info[i].pi_type) { case CDF_NULL: break; case CDF_SIGNED16: (void)fprintf(stderr, "signed 16 [%hd]\n", info[i].pi_s16); break; case CDF_SIGNED32: (void)fprintf(stderr, "signed 32 [%d]\n", info[i].pi_s32); break; case CDF_UNSIGNED32: (void)fprintf(stderr, "unsigned 32 [%u]\n", info[i].pi_u32); break; case CDF_LENGTH32_STRING: (void)fprintf(stderr, "string %u [%.*s]\n", info[i].pi_str.s_len, info[i].pi_str.s_len, info[i].pi_str.s_buf); break; case CDF_LENGTH32_WSTRING: (void)fprintf(stderr, "string %u [", info[i].pi_str.s_len); for (j = 0; j < info[i].pi_str.s_len - 1; j++) (void)fputc(info[i].pi_str.s_buf[j << 1], stderr); (void)fprintf(stderr, "]\n"); break; case CDF_FILETIME: tp = info[i].pi_tp; if (tp < 1000000000000000LL) { cdf_print_elapsed_time(buf, sizeof(buf), tp); (void)fprintf(stderr, "timestamp %s\n", buf); } else { cdf_timestamp_to_timespec(&ts, tp); (void)fprintf(stderr, "timestamp %s", cdf_ctime(&ts.tv_sec)); } break; case CDF_CLIPBOARD: (void)fprintf(stderr, "CLIPBOARD %u\n", info[i].pi_u32); break; default: DPRINTF(("Don't know how to deal with %x\n", info[i].pi_type)); break; } } } void cdf_dump_summary_info(const cdf_header_t *h, const cdf_stream_t *sst) { char buf[128]; cdf_summary_info_header_t ssi; cdf_property_info_t *info; size_t count; (void)&h; if (cdf_unpack_summary_info(sst, h, &ssi, &info, &count) == -1) return; (void)fprintf(stderr, "Endian: %x\n", ssi.si_byte_order); (void)fprintf(stderr, "Os Version %d.%d\n", ssi.si_os_version & 0xff, ssi.si_os_version >> 8); (void)fprintf(stderr, "Os %d\n", ssi.si_os); cdf_print_classid(buf, sizeof(buf), &ssi.si_class); (void)fprintf(stderr, "Class %s\n", buf); (void)fprintf(stderr, "Count %d\n", ssi.si_count); cdf_dump_property_info(info, count); free(info); } #endif #ifdef TEST int main(int argc, char *argv[]) { int i; cdf_header_t h; cdf_sat_t sat, ssat; cdf_stream_t sst, scn; cdf_dir_t dir; cdf_info_t info; if (argc < 2) { (void)fprintf(stderr, "Usage: %s <filename>\n", getprogname()); return -1; } info.i_buf = NULL; info.i_len = 0; for (i = 1; i < argc; i++) { if ((info.i_fd = open(argv[1], O_RDONLY)) == -1) err(1, "Cannot open `%s'", argv[1]); if (cdf_read_header(&info, &h) == -1) err(1, "Cannot read header"); #ifdef CDF_DEBUG cdf_dump_header(&h); #endif if (cdf_read_sat(&info, &h, &sat) == -1) err(1, "Cannot read sat"); #ifdef CDF_DEBUG cdf_dump_sat("SAT", &sat, CDF_SEC_SIZE(&h)); #endif if (cdf_read_ssat(&info, &h, &sat, &ssat) == -1) err(1, "Cannot read ssat"); #ifdef CDF_DEBUG cdf_dump_sat("SSAT", &ssat, CDF_SHORT_SEC_SIZE(&h)); #endif if (cdf_read_dir(&info, &h, &sat, &dir) == -1) err(1, "Cannot read dir"); if (cdf_read_short_stream(&info, &h, &sat, &dir, &sst) == -1) err(1, "Cannot read short stream"); #ifdef CDF_DEBUG cdf_dump_stream(&h, &sst); #endif #ifdef CDF_DEBUG cdf_dump_dir(&info, &h, &sat, &ssat, &sst, &dir); #endif if (cdf_read_summary_info(&info, &h, &sat, &ssat, &sst, &dir, &scn) == -1) err(1, "Cannot read summary info"); #ifdef CDF_DEBUG cdf_dump_summary_info(&h, &scn); #endif (void)close(info.i_fd); } return 0; } #endif
/*- * Copyright (c) 2008 Christos Zoulas * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /* * Parse Composite Document Files, the format used in Microsoft Office * document files before they switched to zipped XML. * Info from: http://sc.openoffice.org/compdocfileformat.pdf * * N.B. This is the "Composite Document File" format, and not the * "Compound Document Format", nor the "Channel Definition Format". */ #include "file.h" #ifndef lint FILE_RCSID("@(#)$File: cdf.c,v 1.46 2011/09/16 21:23:59 christos Exp $") #endif #include <assert.h> #ifdef CDF_DEBUG #include <err.h> #endif #include <stdlib.h> #include <unistd.h> #include <string.h> #include <time.h> #include <ctype.h> #ifdef HAVE_LIMITS_H #include <limits.h> #endif #ifndef EFTYPE #define EFTYPE EINVAL #endif #include "cdf.h" #ifdef CDF_DEBUG #define DPRINTF(a) printf a, fflush(stdout) #else #define DPRINTF(a) #endif static union { char s[4]; uint32_t u; } cdf_bo; #define NEED_SWAP (cdf_bo.u == (uint32_t)0x01020304) #define CDF_TOLE8(x) ((uint64_t)(NEED_SWAP ? _cdf_tole8(x) : (uint64_t)(x))) #define CDF_TOLE4(x) ((uint32_t)(NEED_SWAP ? _cdf_tole4(x) : (uint32_t)(x))) #define CDF_TOLE2(x) ((uint16_t)(NEED_SWAP ? _cdf_tole2(x) : (uint16_t)(x))) #define CDF_GETUINT32(x, y) cdf_getuint32(x, y) /* * swap a short */ static uint16_t _cdf_tole2(uint16_t sv) { uint16_t rv; uint8_t *s = (uint8_t *)(void *)&sv; uint8_t *d = (uint8_t *)(void *)&rv; d[0] = s[1]; d[1] = s[0]; return rv; } /* * swap an int */ static uint32_t _cdf_tole4(uint32_t sv) { uint32_t rv; uint8_t *s = (uint8_t *)(void *)&sv; uint8_t *d = (uint8_t *)(void *)&rv; d[0] = s[3]; d[1] = s[2]; d[2] = s[1]; d[3] = s[0]; return rv; } /* * swap a quad */ static uint64_t _cdf_tole8(uint64_t sv) { uint64_t rv; uint8_t *s = (uint8_t *)(void *)&sv; uint8_t *d = (uint8_t *)(void *)&rv; d[0] = s[7]; d[1] = s[6]; d[2] = s[5]; d[3] = s[4]; d[4] = s[3]; d[5] = s[2]; d[6] = s[1]; d[7] = s[0]; return rv; } /* * grab a uint32_t from a possibly unaligned address, and return it in * the native host order. */ static uint32_t cdf_getuint32(const uint8_t *p, size_t offs) { uint32_t rv; (void)memcpy(&rv, p + offs * sizeof(uint32_t), sizeof(rv)); return CDF_TOLE4(rv); } #define CDF_UNPACK(a) \ (void)memcpy(&(a), &buf[len], sizeof(a)), len += sizeof(a) #define CDF_UNPACKA(a) \ (void)memcpy((a), &buf[len], sizeof(a)), len += sizeof(a) uint16_t cdf_tole2(uint16_t sv) { return CDF_TOLE2(sv); } uint32_t cdf_tole4(uint32_t sv) { return CDF_TOLE4(sv); } uint64_t cdf_tole8(uint64_t sv) { return CDF_TOLE8(sv); } void cdf_swap_header(cdf_header_t *h) { size_t i; h->h_magic = CDF_TOLE8(h->h_magic); h->h_uuid[0] = CDF_TOLE8(h->h_uuid[0]); h->h_uuid[1] = CDF_TOLE8(h->h_uuid[1]); h->h_revision = CDF_TOLE2(h->h_revision); h->h_version = CDF_TOLE2(h->h_version); h->h_byte_order = CDF_TOLE2(h->h_byte_order); h->h_sec_size_p2 = CDF_TOLE2(h->h_sec_size_p2); h->h_short_sec_size_p2 = CDF_TOLE2(h->h_short_sec_size_p2); h->h_num_sectors_in_sat = CDF_TOLE4(h->h_num_sectors_in_sat); h->h_secid_first_directory = CDF_TOLE4(h->h_secid_first_directory); h->h_min_size_standard_stream = CDF_TOLE4(h->h_min_size_standard_stream); h->h_secid_first_sector_in_short_sat = CDF_TOLE4((uint32_t)h->h_secid_first_sector_in_short_sat); h->h_num_sectors_in_short_sat = CDF_TOLE4(h->h_num_sectors_in_short_sat); h->h_secid_first_sector_in_master_sat = CDF_TOLE4((uint32_t)h->h_secid_first_sector_in_master_sat); h->h_num_sectors_in_master_sat = CDF_TOLE4(h->h_num_sectors_in_master_sat); for (i = 0; i < __arraycount(h->h_master_sat); i++) h->h_master_sat[i] = CDF_TOLE4((uint32_t)h->h_master_sat[i]); } void cdf_unpack_header(cdf_header_t *h, char *buf) { size_t i; size_t len = 0; CDF_UNPACK(h->h_magic); CDF_UNPACKA(h->h_uuid); CDF_UNPACK(h->h_revision); CDF_UNPACK(h->h_version); CDF_UNPACK(h->h_byte_order); CDF_UNPACK(h->h_sec_size_p2); CDF_UNPACK(h->h_short_sec_size_p2); CDF_UNPACKA(h->h_unused0); CDF_UNPACK(h->h_num_sectors_in_sat); CDF_UNPACK(h->h_secid_first_directory); CDF_UNPACKA(h->h_unused1); CDF_UNPACK(h->h_min_size_standard_stream); CDF_UNPACK(h->h_secid_first_sector_in_short_sat); CDF_UNPACK(h->h_num_sectors_in_short_sat); CDF_UNPACK(h->h_secid_first_sector_in_master_sat); CDF_UNPACK(h->h_num_sectors_in_master_sat); for (i = 0; i < __arraycount(h->h_master_sat); i++) CDF_UNPACK(h->h_master_sat[i]); } void cdf_swap_dir(cdf_directory_t *d) { d->d_namelen = CDF_TOLE2(d->d_namelen); d->d_left_child = CDF_TOLE4((uint32_t)d->d_left_child); d->d_right_child = CDF_TOLE4((uint32_t)d->d_right_child); d->d_storage = CDF_TOLE4((uint32_t)d->d_storage); d->d_storage_uuid[0] = CDF_TOLE8(d->d_storage_uuid[0]); d->d_storage_uuid[1] = CDF_TOLE8(d->d_storage_uuid[1]); d->d_flags = CDF_TOLE4(d->d_flags); d->d_created = CDF_TOLE8((uint64_t)d->d_created); d->d_modified = CDF_TOLE8((uint64_t)d->d_modified); d->d_stream_first_sector = CDF_TOLE4((uint32_t)d->d_stream_first_sector); d->d_size = CDF_TOLE4(d->d_size); } void cdf_swap_class(cdf_classid_t *d) { d->cl_dword = CDF_TOLE4(d->cl_dword); d->cl_word[0] = CDF_TOLE2(d->cl_word[0]); d->cl_word[1] = CDF_TOLE2(d->cl_word[1]); } void cdf_unpack_dir(cdf_directory_t *d, char *buf) { size_t len = 0; CDF_UNPACKA(d->d_name); CDF_UNPACK(d->d_namelen); CDF_UNPACK(d->d_type); CDF_UNPACK(d->d_color); CDF_UNPACK(d->d_left_child); CDF_UNPACK(d->d_right_child); CDF_UNPACK(d->d_storage); CDF_UNPACKA(d->d_storage_uuid); CDF_UNPACK(d->d_flags); CDF_UNPACK(d->d_created); CDF_UNPACK(d->d_modified); CDF_UNPACK(d->d_stream_first_sector); CDF_UNPACK(d->d_size); CDF_UNPACK(d->d_unused0); } static int cdf_check_stream_offset(const cdf_stream_t *sst, const cdf_header_t *h, const void *p, size_t tail, int line) { const char *b = (const char *)sst->sst_tab; const char *e = ((const char *)p) + tail; (void)&line; if (e >= b && (size_t)(e - b) < CDF_SEC_SIZE(h) * sst->sst_len) return 0; DPRINTF(("%d: offset begin %p end %p %" SIZE_T_FORMAT "u" " >= %" SIZE_T_FORMAT "u [%" SIZE_T_FORMAT "u %" SIZE_T_FORMAT "u]\n", line, b, e, (size_t)(e - b), CDF_SEC_SIZE(h) * sst->sst_len, CDF_SEC_SIZE(h), sst->sst_len)); errno = EFTYPE; return -1; } static ssize_t cdf_read(const cdf_info_t *info, off_t off, void *buf, size_t len) { size_t siz = (size_t)off + len; if ((off_t)(off + len) != (off_t)siz) { errno = EINVAL; return -1; } if (info->i_buf != NULL && info->i_len >= siz) { (void)memcpy(buf, &info->i_buf[off], len); return (ssize_t)len; } if (info->i_fd == -1) return -1; if (lseek(info->i_fd, off, SEEK_SET) == (off_t)-1) return -1; if (read(info->i_fd, buf, len) != (ssize_t)len) return -1; return (ssize_t)len; } int cdf_read_header(const cdf_info_t *info, cdf_header_t *h) { char buf[512]; (void)memcpy(cdf_bo.s, "\01\02\03\04", 4); if (cdf_read(info, (off_t)0, buf, sizeof(buf)) == -1) return -1; cdf_unpack_header(h, buf); cdf_swap_header(h); if (h->h_magic != CDF_MAGIC) { DPRINTF(("Bad magic 0x%" INT64_T_FORMAT "x != 0x%" INT64_T_FORMAT "x\n", (unsigned long long)h->h_magic, (unsigned long long)CDF_MAGIC)); goto out; } if (h->h_sec_size_p2 > 20) { DPRINTF(("Bad sector size 0x%u\n", h->h_sec_size_p2)); goto out; } if (h->h_short_sec_size_p2 > 20) { DPRINTF(("Bad short sector size 0x%u\n", h->h_short_sec_size_p2)); goto out; } return 0; out: errno = EFTYPE; return -1; } ssize_t cdf_read_sector(const cdf_info_t *info, void *buf, size_t offs, size_t len, const cdf_header_t *h, cdf_secid_t id) { size_t ss = CDF_SEC_SIZE(h); size_t pos = CDF_SEC_POS(h, id); assert(ss == len); return cdf_read(info, (off_t)pos, ((char *)buf) + offs, len); } ssize_t cdf_read_short_sector(const cdf_stream_t *sst, void *buf, size_t offs, size_t len, const cdf_header_t *h, cdf_secid_t id) { size_t ss = CDF_SHORT_SEC_SIZE(h); size_t pos = CDF_SHORT_SEC_POS(h, id); assert(ss == len); if (sst->sst_len < (size_t)id) { DPRINTF(("bad sector id %d > %d\n", id, sst->sst_len)); return -1; } (void)memcpy(((char *)buf) + offs, ((const char *)sst->sst_tab) + pos, len); return len; } /* * Read the sector allocation table. */ int cdf_read_sat(const cdf_info_t *info, cdf_header_t *h, cdf_sat_t *sat) { size_t i, j, k; size_t ss = CDF_SEC_SIZE(h); cdf_secid_t *msa, mid, sec; size_t nsatpersec = (ss / sizeof(mid)) - 1; for (i = 0; i < __arraycount(h->h_master_sat); i++) if (h->h_master_sat[i] == CDF_SECID_FREE) break; #define CDF_SEC_LIMIT (UINT32_MAX / (4 * ss)) if ((nsatpersec > 0 && h->h_num_sectors_in_master_sat > CDF_SEC_LIMIT / nsatpersec) || i > CDF_SEC_LIMIT) { DPRINTF(("Number of sectors in master SAT too big %u %" SIZE_T_FORMAT "u\n", h->h_num_sectors_in_master_sat, i)); errno = EFTYPE; return -1; } sat->sat_len = h->h_num_sectors_in_master_sat * nsatpersec + i; DPRINTF(("sat_len = %" SIZE_T_FORMAT "u ss = %" SIZE_T_FORMAT "u\n", sat->sat_len, ss)); if ((sat->sat_tab = CAST(cdf_secid_t *, calloc(sat->sat_len, ss))) == NULL) return -1; for (i = 0; i < __arraycount(h->h_master_sat); i++) { if (h->h_master_sat[i] < 0) break; if (cdf_read_sector(info, sat->sat_tab, ss * i, ss, h, h->h_master_sat[i]) != (ssize_t)ss) { DPRINTF(("Reading sector %d", h->h_master_sat[i])); goto out1; } } if ((msa = CAST(cdf_secid_t *, calloc(1, ss))) == NULL) goto out1; mid = h->h_secid_first_sector_in_master_sat; for (j = 0; j < h->h_num_sectors_in_master_sat; j++) { if (mid < 0) goto out; if (j >= CDF_LOOP_LIMIT) { DPRINTF(("Reading master sector loop limit")); errno = EFTYPE; goto out2; } if (cdf_read_sector(info, msa, 0, ss, h, mid) != (ssize_t)ss) { DPRINTF(("Reading master sector %d", mid)); goto out2; } for (k = 0; k < nsatpersec; k++, i++) { sec = CDF_TOLE4((uint32_t)msa[k]); if (sec < 0) goto out; if (i >= sat->sat_len) { DPRINTF(("Out of bounds reading MSA %u >= %u", i, sat->sat_len)); errno = EFTYPE; goto out2; } if (cdf_read_sector(info, sat->sat_tab, ss * i, ss, h, sec) != (ssize_t)ss) { DPRINTF(("Reading sector %d", CDF_TOLE4(msa[k]))); goto out2; } } mid = CDF_TOLE4((uint32_t)msa[nsatpersec]); } out: sat->sat_len = i; free(msa); return 0; out2: free(msa); out1: free(sat->sat_tab); return -1; } size_t cdf_count_chain(const cdf_sat_t *sat, cdf_secid_t sid, size_t size) { size_t i, j; cdf_secid_t maxsector = (cdf_secid_t)(sat->sat_len * size); DPRINTF(("Chain:")); for (j = i = 0; sid >= 0; i++, j++) { DPRINTF((" %d", sid)); if (j >= CDF_LOOP_LIMIT) { DPRINTF(("Counting chain loop limit")); errno = EFTYPE; return (size_t)-1; } if (sid > maxsector) { DPRINTF(("Sector %d > %d\n", sid, maxsector)); errno = EFTYPE; return (size_t)-1; } sid = CDF_TOLE4((uint32_t)sat->sat_tab[sid]); } DPRINTF(("\n")); return i; } int cdf_read_long_sector_chain(const cdf_info_t *info, const cdf_header_t *h, const cdf_sat_t *sat, cdf_secid_t sid, size_t len, cdf_stream_t *scn) { size_t ss = CDF_SEC_SIZE(h), i, j; ssize_t nr; scn->sst_len = cdf_count_chain(sat, sid, ss); scn->sst_dirlen = len; if (scn->sst_len == (size_t)-1) return -1; scn->sst_tab = calloc(scn->sst_len, ss); if (scn->sst_tab == NULL) return -1; for (j = i = 0; sid >= 0; i++, j++) { if (j >= CDF_LOOP_LIMIT) { DPRINTF(("Read long sector chain loop limit")); errno = EFTYPE; goto out; } if (i >= scn->sst_len) { DPRINTF(("Out of bounds reading long sector chain " "%u > %u\n", i, scn->sst_len)); errno = EFTYPE; goto out; } if ((nr = cdf_read_sector(info, scn->sst_tab, i * ss, ss, h, sid)) != (ssize_t)ss) { if (i == scn->sst_len - 1 && nr > 0) { /* Last sector might be truncated */ return 0; } DPRINTF(("Reading long sector chain %d", sid)); goto out; } sid = CDF_TOLE4((uint32_t)sat->sat_tab[sid]); } return 0; out: free(scn->sst_tab); return -1; } int cdf_read_short_sector_chain(const cdf_header_t *h, const cdf_sat_t *ssat, const cdf_stream_t *sst, cdf_secid_t sid, size_t len, cdf_stream_t *scn) { size_t ss = CDF_SHORT_SEC_SIZE(h), i, j; scn->sst_len = cdf_count_chain(ssat, sid, CDF_SEC_SIZE(h)); scn->sst_dirlen = len; if (sst->sst_tab == NULL || scn->sst_len == (size_t)-1) return -1; scn->sst_tab = calloc(scn->sst_len, ss); if (scn->sst_tab == NULL) return -1; for (j = i = 0; sid >= 0; i++, j++) { if (j >= CDF_LOOP_LIMIT) { DPRINTF(("Read short sector chain loop limit")); errno = EFTYPE; goto out; } if (i >= scn->sst_len) { DPRINTF(("Out of bounds reading short sector chain " "%u > %u\n", i, scn->sst_len)); errno = EFTYPE; goto out; } if (cdf_read_short_sector(sst, scn->sst_tab, i * ss, ss, h, sid) != (ssize_t)ss) { DPRINTF(("Reading short sector chain %d", sid)); goto out; } sid = CDF_TOLE4((uint32_t)ssat->sat_tab[sid]); } return 0; out: free(scn->sst_tab); return -1; } int cdf_read_sector_chain(const cdf_info_t *info, const cdf_header_t *h, const cdf_sat_t *sat, const cdf_sat_t *ssat, const cdf_stream_t *sst, cdf_secid_t sid, size_t len, cdf_stream_t *scn) { if (len < h->h_min_size_standard_stream && sst->sst_tab != NULL) return cdf_read_short_sector_chain(h, ssat, sst, sid, len, scn); else return cdf_read_long_sector_chain(info, h, sat, sid, len, scn); } int cdf_read_dir(const cdf_info_t *info, const cdf_header_t *h, const cdf_sat_t *sat, cdf_dir_t *dir) { size_t i, j; size_t ss = CDF_SEC_SIZE(h), ns, nd; char *buf; cdf_secid_t sid = h->h_secid_first_directory; ns = cdf_count_chain(sat, sid, ss); if (ns == (size_t)-1) return -1; nd = ss / CDF_DIRECTORY_SIZE; dir->dir_len = ns * nd; dir->dir_tab = CAST(cdf_directory_t *, calloc(dir->dir_len, sizeof(dir->dir_tab[0]))); if (dir->dir_tab == NULL) return -1; if ((buf = CAST(char *, malloc(ss))) == NULL) { free(dir->dir_tab); return -1; } for (j = i = 0; i < ns; i++, j++) { if (j >= CDF_LOOP_LIMIT) { DPRINTF(("Read dir loop limit")); errno = EFTYPE; goto out; } if (cdf_read_sector(info, buf, 0, ss, h, sid) != (ssize_t)ss) { DPRINTF(("Reading directory sector %d", sid)); goto out; } for (j = 0; j < nd; j++) { cdf_unpack_dir(&dir->dir_tab[i * nd + j], &buf[j * CDF_DIRECTORY_SIZE]); } sid = CDF_TOLE4((uint32_t)sat->sat_tab[sid]); } if (NEED_SWAP) for (i = 0; i < dir->dir_len; i++) cdf_swap_dir(&dir->dir_tab[i]); free(buf); return 0; out: free(dir->dir_tab); free(buf); return -1; } int cdf_read_ssat(const cdf_info_t *info, const cdf_header_t *h, const cdf_sat_t *sat, cdf_sat_t *ssat) { size_t i, j; size_t ss = CDF_SEC_SIZE(h); cdf_secid_t sid = h->h_secid_first_sector_in_short_sat; ssat->sat_len = cdf_count_chain(sat, sid, CDF_SEC_SIZE(h)); if (ssat->sat_len == (size_t)-1) return -1; ssat->sat_tab = CAST(cdf_secid_t *, calloc(ssat->sat_len, ss)); if (ssat->sat_tab == NULL) return -1; for (j = i = 0; sid >= 0; i++, j++) { if (j >= CDF_LOOP_LIMIT) { DPRINTF(("Read short sat sector loop limit")); errno = EFTYPE; goto out; } if (i >= ssat->sat_len) { DPRINTF(("Out of bounds reading short sector chain " "%u > %u\n", i, ssat->sat_len)); errno = EFTYPE; goto out; } if (cdf_read_sector(info, ssat->sat_tab, i * ss, ss, h, sid) != (ssize_t)ss) { DPRINTF(("Reading short sat sector %d", sid)); goto out; } sid = CDF_TOLE4((uint32_t)sat->sat_tab[sid]); } return 0; out: free(ssat->sat_tab); return -1; } int cdf_read_short_stream(const cdf_info_t *info, const cdf_header_t *h, const cdf_sat_t *sat, const cdf_dir_t *dir, cdf_stream_t *scn) { size_t i; const cdf_directory_t *d; for (i = 0; i < dir->dir_len; i++) if (dir->dir_tab[i].d_type == CDF_DIR_TYPE_ROOT_STORAGE) break; /* If the it is not there, just fake it; some docs don't have it */ if (i == dir->dir_len) goto out; d = &dir->dir_tab[i]; /* If the it is not there, just fake it; some docs don't have it */ if (d->d_stream_first_sector < 0) goto out; return cdf_read_long_sector_chain(info, h, sat, d->d_stream_first_sector, d->d_size, scn); out: scn->sst_tab = NULL; scn->sst_len = 0; scn->sst_dirlen = 0; return 0; } static int cdf_namecmp(const char *d, const uint16_t *s, size_t l) { for (; l--; d++, s++) if (*d != CDF_TOLE2(*s)) return (unsigned char)*d - CDF_TOLE2(*s); return 0; } int cdf_read_summary_info(const cdf_info_t *info, const cdf_header_t *h, const cdf_sat_t *sat, const cdf_sat_t *ssat, const cdf_stream_t *sst, const cdf_dir_t *dir, cdf_stream_t *scn) { size_t i; const cdf_directory_t *d; static const char name[] = "\05SummaryInformation"; for (i = dir->dir_len; i > 0; i--) if (dir->dir_tab[i - 1].d_type == CDF_DIR_TYPE_USER_STREAM && cdf_namecmp(name, dir->dir_tab[i - 1].d_name, sizeof(name)) == 0) break; if (i == 0) { DPRINTF(("Cannot find summary information section\n")); errno = ESRCH; return -1; } d = &dir->dir_tab[i - 1]; return cdf_read_sector_chain(info, h, sat, ssat, sst, d->d_stream_first_sector, d->d_size, scn); } int cdf_read_property_info(const cdf_stream_t *sst, const cdf_header_t *h, uint32_t offs, cdf_property_info_t **info, size_t *count, size_t *maxcount) { const cdf_section_header_t *shp; cdf_section_header_t sh; const uint8_t *p, *q, *e; int16_t s16; int32_t s32; uint32_t u32; int64_t s64; uint64_t u64; cdf_timestamp_t tp; size_t i, o, o4, nelements, j; cdf_property_info_t *inp; if (offs > UINT32_MAX / 4) { errno = EFTYPE; goto out; } shp = CAST(const cdf_section_header_t *, (const void *) ((const char *)sst->sst_tab + offs)); if (cdf_check_stream_offset(sst, h, shp, sizeof(*shp), __LINE__) == -1) goto out; sh.sh_len = CDF_TOLE4(shp->sh_len); #define CDF_SHLEN_LIMIT (UINT32_MAX / 8) if (sh.sh_len > CDF_SHLEN_LIMIT) { errno = EFTYPE; goto out; } sh.sh_properties = CDF_TOLE4(shp->sh_properties); #define CDF_PROP_LIMIT (UINT32_MAX / (4 * sizeof(*inp))) if (sh.sh_properties > CDF_PROP_LIMIT) goto out; DPRINTF(("section len: %u properties %u\n", sh.sh_len, sh.sh_properties)); if (*maxcount) { if (*maxcount > CDF_PROP_LIMIT) goto out; *maxcount += sh.sh_properties; inp = CAST(cdf_property_info_t *, realloc(*info, *maxcount * sizeof(*inp))); } else { *maxcount = sh.sh_properties; inp = CAST(cdf_property_info_t *, malloc(*maxcount * sizeof(*inp))); } if (inp == NULL) goto out; *info = inp; inp += *count; *count += sh.sh_properties; p = CAST(const uint8_t *, (const void *) ((const char *)(const void *)sst->sst_tab + offs + sizeof(sh))); e = CAST(const uint8_t *, (const void *) (((const char *)(const void *)shp) + sh.sh_len)); if (cdf_check_stream_offset(sst, h, e, 0, __LINE__) == -1) goto out; for (i = 0; i < sh.sh_properties; i++) { q = (const uint8_t *)(const void *) ((const char *)(const void *)p + CDF_GETUINT32(p, (i << 1) + 1)) - 2 * sizeof(uint32_t); if (q > e) { DPRINTF(("Ran of the end %p > %p\n", q, e)); goto out; } inp[i].pi_id = CDF_GETUINT32(p, i << 1); inp[i].pi_type = CDF_GETUINT32(q, 0); DPRINTF(("%d) id=%x type=%x offs=%x,%d\n", i, inp[i].pi_id, inp[i].pi_type, q - p, CDF_GETUINT32(p, (i << 1) + 1))); if (inp[i].pi_type & CDF_VECTOR) { nelements = CDF_GETUINT32(q, 1); o = 2; } else { nelements = 1; o = 1; } o4 = o * sizeof(uint32_t); if (inp[i].pi_type & (CDF_ARRAY|CDF_BYREF|CDF_RESERVED)) goto unknown; switch (inp[i].pi_type & CDF_TYPEMASK) { case CDF_NULL: case CDF_EMPTY: break; case CDF_SIGNED16: if (inp[i].pi_type & CDF_VECTOR) goto unknown; (void)memcpy(&s16, &q[o4], sizeof(s16)); inp[i].pi_s16 = CDF_TOLE2(s16); break; case CDF_SIGNED32: if (inp[i].pi_type & CDF_VECTOR) goto unknown; (void)memcpy(&s32, &q[o4], sizeof(s32)); inp[i].pi_s32 = CDF_TOLE4((uint32_t)s32); break; case CDF_BOOL: case CDF_UNSIGNED32: if (inp[i].pi_type & CDF_VECTOR) goto unknown; (void)memcpy(&u32, &q[o4], sizeof(u32)); inp[i].pi_u32 = CDF_TOLE4(u32); break; case CDF_SIGNED64: if (inp[i].pi_type & CDF_VECTOR) goto unknown; (void)memcpy(&s64, &q[o4], sizeof(s64)); inp[i].pi_s64 = CDF_TOLE8((uint64_t)s64); break; case CDF_UNSIGNED64: if (inp[i].pi_type & CDF_VECTOR) goto unknown; (void)memcpy(&u64, &q[o4], sizeof(u64)); inp[i].pi_u64 = CDF_TOLE8((uint64_t)u64); break; case CDF_LENGTH32_STRING: case CDF_LENGTH32_WSTRING: if (nelements > 1) { size_t nelem = inp - *info; if (*maxcount > CDF_PROP_LIMIT || nelements > CDF_PROP_LIMIT) goto out; *maxcount += nelements; inp = CAST(cdf_property_info_t *, realloc(*info, *maxcount * sizeof(*inp))); if (inp == NULL) goto out; *info = inp; inp = *info + nelem; } DPRINTF(("nelements = %d\n", nelements)); for (j = 0; j < nelements; j++, i++) { uint32_t l = CDF_GETUINT32(q, o); inp[i].pi_str.s_len = l; inp[i].pi_str.s_buf = (const char *) (const void *)(&q[o4 + sizeof(l)]); DPRINTF(("l = %d, r = %d, s = %s\n", l, CDF_ROUND(l, sizeof(l)), inp[i].pi_str.s_buf)); l = 4 + (uint32_t)CDF_ROUND(l, sizeof(l)); o += l >> 2; if (q + o >= e) goto out; o4 = o * sizeof(uint32_t); } i--; break; case CDF_FILETIME: if (inp[i].pi_type & CDF_VECTOR) goto unknown; (void)memcpy(&tp, &q[o4], sizeof(tp)); inp[i].pi_tp = CDF_TOLE8((uint64_t)tp); break; case CDF_CLIPBOARD: if (inp[i].pi_type & CDF_VECTOR) goto unknown; break; default: unknown: DPRINTF(("Don't know how to deal with %x\n", inp[i].pi_type)); goto out; } } return 0; out: free(*info); return -1; } int cdf_unpack_summary_info(const cdf_stream_t *sst, const cdf_header_t *h, cdf_summary_info_header_t *ssi, cdf_property_info_t **info, size_t *count) { size_t i, maxcount; const cdf_summary_info_header_t *si = CAST(const cdf_summary_info_header_t *, sst->sst_tab); const cdf_section_declaration_t *sd = CAST(const cdf_section_declaration_t *, (const void *) ((const char *)sst->sst_tab + CDF_SECTION_DECLARATION_OFFSET)); if (cdf_check_stream_offset(sst, h, si, sizeof(*si), __LINE__) == -1 || cdf_check_stream_offset(sst, h, sd, sizeof(*sd), __LINE__) == -1) return -1; ssi->si_byte_order = CDF_TOLE2(si->si_byte_order); ssi->si_os_version = CDF_TOLE2(si->si_os_version); ssi->si_os = CDF_TOLE2(si->si_os); ssi->si_class = si->si_class; cdf_swap_class(&ssi->si_class); ssi->si_count = CDF_TOLE2(si->si_count); *count = 0; maxcount = 0; *info = NULL; for (i = 0; i < CDF_TOLE4(si->si_count); i++) { if (i >= CDF_LOOP_LIMIT) { DPRINTF(("Unpack summary info loop limit")); errno = EFTYPE; return -1; } if (cdf_read_property_info(sst, h, CDF_TOLE4(sd->sd_offset), info, count, &maxcount) == -1) return -1; } return 0; } int cdf_print_classid(char *buf, size_t buflen, const cdf_classid_t *id) { return snprintf(buf, buflen, "%.8x-%.4x-%.4x-%.2x%.2x-" "%.2x%.2x%.2x%.2x%.2x%.2x", id->cl_dword, id->cl_word[0], id->cl_word[1], id->cl_two[0], id->cl_two[1], id->cl_six[0], id->cl_six[1], id->cl_six[2], id->cl_six[3], id->cl_six[4], id->cl_six[5]); } static const struct { uint32_t v; const char *n; } vn[] = { { CDF_PROPERTY_CODE_PAGE, "Code page" }, { CDF_PROPERTY_TITLE, "Title" }, { CDF_PROPERTY_SUBJECT, "Subject" }, { CDF_PROPERTY_AUTHOR, "Author" }, { CDF_PROPERTY_KEYWORDS, "Keywords" }, { CDF_PROPERTY_COMMENTS, "Comments" }, { CDF_PROPERTY_TEMPLATE, "Template" }, { CDF_PROPERTY_LAST_SAVED_BY, "Last Saved By" }, { CDF_PROPERTY_REVISION_NUMBER, "Revision Number" }, { CDF_PROPERTY_TOTAL_EDITING_TIME, "Total Editing Time" }, { CDF_PROPERTY_LAST_PRINTED, "Last Printed" }, { CDF_PROPERTY_CREATE_TIME, "Create Time/Date" }, { CDF_PROPERTY_LAST_SAVED_TIME, "Last Saved Time/Date" }, { CDF_PROPERTY_NUMBER_OF_PAGES, "Number of Pages" }, { CDF_PROPERTY_NUMBER_OF_WORDS, "Number of Words" }, { CDF_PROPERTY_NUMBER_OF_CHARACTERS, "Number of Characters" }, { CDF_PROPERTY_THUMBNAIL, "Thumbnail" }, { CDF_PROPERTY_NAME_OF_APPLICATION, "Name of Creating Application" }, { CDF_PROPERTY_SECURITY, "Security" }, { CDF_PROPERTY_LOCALE_ID, "Locale ID" }, }; int cdf_print_property_name(char *buf, size_t bufsiz, uint32_t p) { size_t i; for (i = 0; i < __arraycount(vn); i++) if (vn[i].v == p) return snprintf(buf, bufsiz, "%s", vn[i].n); return snprintf(buf, bufsiz, "0x%x", p); } int cdf_print_elapsed_time(char *buf, size_t bufsiz, cdf_timestamp_t ts) { int len = 0; int days, hours, mins, secs; ts /= CDF_TIME_PREC; secs = (int)(ts % 60); ts /= 60; mins = (int)(ts % 60); ts /= 60; hours = (int)(ts % 24); ts /= 24; days = (int)ts; if (days) { len += snprintf(buf + len, bufsiz - len, "%dd+", days); if ((size_t)len >= bufsiz) return len; } if (days || hours) { len += snprintf(buf + len, bufsiz - len, "%.2d:", hours); if ((size_t)len >= bufsiz) return len; } len += snprintf(buf + len, bufsiz - len, "%.2d:", mins); if ((size_t)len >= bufsiz) return len; len += snprintf(buf + len, bufsiz - len, "%.2d", secs); return len; } #ifdef CDF_DEBUG void cdf_dump_header(const cdf_header_t *h) { size_t i; #define DUMP(a, b) (void)fprintf(stderr, "%40.40s = " a "\n", # b, h->h_ ## b) #define DUMP2(a, b) (void)fprintf(stderr, "%40.40s = " a " (" a ")\n", # b, \ h->h_ ## b, 1 << h->h_ ## b) DUMP("%d", revision); DUMP("%d", version); DUMP("0x%x", byte_order); DUMP2("%d", sec_size_p2); DUMP2("%d", short_sec_size_p2); DUMP("%d", num_sectors_in_sat); DUMP("%d", secid_first_directory); DUMP("%d", min_size_standard_stream); DUMP("%d", secid_first_sector_in_short_sat); DUMP("%d", num_sectors_in_short_sat); DUMP("%d", secid_first_sector_in_master_sat); DUMP("%d", num_sectors_in_master_sat); for (i = 0; i < __arraycount(h->h_master_sat); i++) { if (h->h_master_sat[i] == CDF_SECID_FREE) break; (void)fprintf(stderr, "%35.35s[%.3zu] = %d\n", "master_sat", i, h->h_master_sat[i]); } } void cdf_dump_sat(const char *prefix, const cdf_sat_t *sat, size_t size) { size_t i, j, s = size / sizeof(cdf_secid_t); for (i = 0; i < sat->sat_len; i++) { (void)fprintf(stderr, "%s[%" SIZE_T_FORMAT "u]:\n%.6d: ", prefix, i, i * s); for (j = 0; j < s; j++) { (void)fprintf(stderr, "%5d, ", CDF_TOLE4(sat->sat_tab[s * i + j])); if ((j + 1) % 10 == 0) (void)fprintf(stderr, "\n%.6d: ", i * s + j + 1); } (void)fprintf(stderr, "\n"); } } void cdf_dump(void *v, size_t len) { size_t i, j; unsigned char *p = v; char abuf[16]; (void)fprintf(stderr, "%.4x: ", 0); for (i = 0, j = 0; i < len; i++, p++) { (void)fprintf(stderr, "%.2x ", *p); abuf[j++] = isprint(*p) ? *p : '.'; if (j == 16) { j = 0; abuf[15] = '\0'; (void)fprintf(stderr, "%s\n%.4x: ", abuf, i + 1); } } (void)fprintf(stderr, "\n"); } void cdf_dump_stream(const cdf_header_t *h, const cdf_stream_t *sst) { size_t ss = sst->sst_dirlen < h->h_min_size_standard_stream ? CDF_SHORT_SEC_SIZE(h) : CDF_SEC_SIZE(h); cdf_dump(sst->sst_tab, ss * sst->sst_len); } void cdf_dump_dir(const cdf_info_t *info, const cdf_header_t *h, const cdf_sat_t *sat, const cdf_sat_t *ssat, const cdf_stream_t *sst, const cdf_dir_t *dir) { size_t i, j; cdf_directory_t *d; char name[__arraycount(d->d_name)]; cdf_stream_t scn; struct timespec ts; static const char *types[] = { "empty", "user storage", "user stream", "lockbytes", "property", "root storage" }; for (i = 0; i < dir->dir_len; i++) { d = &dir->dir_tab[i]; for (j = 0; j < sizeof(name); j++) name[j] = (char)CDF_TOLE2(d->d_name[j]); (void)fprintf(stderr, "Directory %" SIZE_T_FORMAT "u: %s\n", i, name); if (d->d_type < __arraycount(types)) (void)fprintf(stderr, "Type: %s\n", types[d->d_type]); else (void)fprintf(stderr, "Type: %d\n", d->d_type); (void)fprintf(stderr, "Color: %s\n", d->d_color ? "black" : "red"); (void)fprintf(stderr, "Left child: %d\n", d->d_left_child); (void)fprintf(stderr, "Right child: %d\n", d->d_right_child); (void)fprintf(stderr, "Flags: 0x%x\n", d->d_flags); cdf_timestamp_to_timespec(&ts, d->d_created); (void)fprintf(stderr, "Created %s", cdf_ctime(&ts.tv_sec)); cdf_timestamp_to_timespec(&ts, d->d_modified); (void)fprintf(stderr, "Modified %s", cdf_ctime(&ts.tv_sec)); (void)fprintf(stderr, "Stream %d\n", d->d_stream_first_sector); (void)fprintf(stderr, "Size %d\n", d->d_size); switch (d->d_type) { case CDF_DIR_TYPE_USER_STORAGE: (void)fprintf(stderr, "Storage: %d\n", d->d_storage); break; case CDF_DIR_TYPE_USER_STREAM: if (sst == NULL) break; if (cdf_read_sector_chain(info, h, sat, ssat, sst, d->d_stream_first_sector, d->d_size, &scn) == -1) { warn("Can't read stream for %s at %d len %d", name, d->d_stream_first_sector, d->d_size); break; } cdf_dump_stream(h, &scn); free(scn.sst_tab); break; default: break; } } } void cdf_dump_property_info(const cdf_property_info_t *info, size_t count) { cdf_timestamp_t tp; struct timespec ts; char buf[64]; size_t i, j; for (i = 0; i < count; i++) { cdf_print_property_name(buf, sizeof(buf), info[i].pi_id); (void)fprintf(stderr, "%" SIZE_T_FORMAT "u) %s: ", i, buf); switch (info[i].pi_type) { case CDF_NULL: break; case CDF_SIGNED16: (void)fprintf(stderr, "signed 16 [%hd]\n", info[i].pi_s16); break; case CDF_SIGNED32: (void)fprintf(stderr, "signed 32 [%d]\n", info[i].pi_s32); break; case CDF_UNSIGNED32: (void)fprintf(stderr, "unsigned 32 [%u]\n", info[i].pi_u32); break; case CDF_LENGTH32_STRING: (void)fprintf(stderr, "string %u [%.*s]\n", info[i].pi_str.s_len, info[i].pi_str.s_len, info[i].pi_str.s_buf); break; case CDF_LENGTH32_WSTRING: (void)fprintf(stderr, "string %u [", info[i].pi_str.s_len); for (j = 0; j < info[i].pi_str.s_len - 1; j++) (void)fputc(info[i].pi_str.s_buf[j << 1], stderr); (void)fprintf(stderr, "]\n"); break; case CDF_FILETIME: tp = info[i].pi_tp; if (tp < 1000000000000000LL) { cdf_print_elapsed_time(buf, sizeof(buf), tp); (void)fprintf(stderr, "timestamp %s\n", buf); } else { cdf_timestamp_to_timespec(&ts, tp); (void)fprintf(stderr, "timestamp %s", cdf_ctime(&ts.tv_sec)); } break; case CDF_CLIPBOARD: (void)fprintf(stderr, "CLIPBOARD %u\n", info[i].pi_u32); break; default: DPRINTF(("Don't know how to deal with %x\n", info[i].pi_type)); break; } } } void cdf_dump_summary_info(const cdf_header_t *h, const cdf_stream_t *sst) { char buf[128]; cdf_summary_info_header_t ssi; cdf_property_info_t *info; size_t count; (void)&h; if (cdf_unpack_summary_info(sst, h, &ssi, &info, &count) == -1) return; (void)fprintf(stderr, "Endian: %x\n", ssi.si_byte_order); (void)fprintf(stderr, "Os Version %d.%d\n", ssi.si_os_version & 0xff, ssi.si_os_version >> 8); (void)fprintf(stderr, "Os %d\n", ssi.si_os); cdf_print_classid(buf, sizeof(buf), &ssi.si_class); (void)fprintf(stderr, "Class %s\n", buf); (void)fprintf(stderr, "Count %d\n", ssi.si_count); cdf_dump_property_info(info, count); free(info); } #endif #ifdef TEST int main(int argc, char *argv[]) { int i; cdf_header_t h; cdf_sat_t sat, ssat; cdf_stream_t sst, scn; cdf_dir_t dir; cdf_info_t info; if (argc < 2) { (void)fprintf(stderr, "Usage: %s <filename>\n", getprogname()); return -1; } info.i_buf = NULL; info.i_len = 0; for (i = 1; i < argc; i++) { if ((info.i_fd = open(argv[1], O_RDONLY)) == -1) err(1, "Cannot open `%s'", argv[1]); if (cdf_read_header(&info, &h) == -1) err(1, "Cannot read header"); #ifdef CDF_DEBUG cdf_dump_header(&h); #endif if (cdf_read_sat(&info, &h, &sat) == -1) err(1, "Cannot read sat"); #ifdef CDF_DEBUG cdf_dump_sat("SAT", &sat, CDF_SEC_SIZE(&h)); #endif if (cdf_read_ssat(&info, &h, &sat, &ssat) == -1) err(1, "Cannot read ssat"); #ifdef CDF_DEBUG cdf_dump_sat("SSAT", &ssat, CDF_SHORT_SEC_SIZE(&h)); #endif if (cdf_read_dir(&info, &h, &sat, &dir) == -1) err(1, "Cannot read dir"); if (cdf_read_short_stream(&info, &h, &sat, &dir, &sst) == -1) err(1, "Cannot read short stream"); #ifdef CDF_DEBUG cdf_dump_stream(&h, &sst); #endif #ifdef CDF_DEBUG cdf_dump_dir(&info, &h, &sat, &ssat, &sst, &dir); #endif if (cdf_read_summary_info(&info, &h, &sat, &ssat, &sst, &dir, &scn) == -1) err(1, "Cannot read summary info"); #ifdef CDF_DEBUG cdf_dump_summary_info(&h, &scn); #endif (void)close(info.i_fd); } return 0; } #endif
cdf_read_sector(const cdf_info_t *info, void *buf, size_t offs, size_t len, const cdf_header_t *h, cdf_secid_t id) { assert((size_t)CDF_SEC_SIZE(h) == len); return cdf_read(info, (off_t)CDF_SEC_POS(h, id), ((char *)buf) + offs, len); }
cdf_read_sector(const cdf_info_t *info, void *buf, size_t offs, size_t len, const cdf_header_t *h, cdf_secid_t id) { size_t ss = CDF_SEC_SIZE(h); size_t pos = CDF_SEC_POS(h, id); assert(ss == len); return cdf_read(info, (off_t)pos, ((char *)buf) + offs, len); }
{'added': [(38, 'FILE_RCSID("@(#)$File: cdf.c,v 1.46 2011/09/16 21:23:59 christos Exp $")'), (344, '\tsize_t ss = CDF_SEC_SIZE(h);'), (345, '\tsize_t pos = CDF_SEC_POS(h, id);'), (346, '\tassert(ss == len);'), (347, '\treturn cdf_read(info, (off_t)pos, ((char *)buf) + offs, len);'), (354, '\tsize_t ss = CDF_SHORT_SEC_SIZE(h);'), (355, '\tsize_t pos = CDF_SHORT_SEC_POS(h, id);'), (356, '\tassert(ss == len);'), (357, '\tif (sst->sst_len < (size_t)id) {'), (358, '\t\tDPRINTF(("bad sector id %d > %d\\n", id, sst->sst_len));'), (359, '\t\treturn -1;'), (360, '\t}'), (362, '\t ((const char *)sst->sst_tab) + pos, len);'), (878, '\t\t\t\tif (q + o >= e)'), (879, '\t\t\t\t\tgoto out;')], 'deleted': [(38, 'FILE_RCSID("@(#)$File: cdf.c,v 1.45 2011/08/28 08:38:48 christos Exp $")'), (344, '\tassert((size_t)CDF_SEC_SIZE(h) == len);'), (345, '\treturn cdf_read(info, (off_t)CDF_SEC_POS(h, id),'), (346, '\t ((char *)buf) + offs, len);'), (353, '\tassert((size_t)CDF_SHORT_SEC_SIZE(h) == len);'), (355, '\t ((const char *)sst->sst_tab) + CDF_SHORT_SEC_POS(h, id), len);')]}
15
6
1,092
8,038
https://github.com/glensc/file
CVE-2012-1571
['CWE-119']
cdf.c
cdf_read_short_sector
/*- * Copyright (c) 2008 Christos Zoulas * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /* * Parse Composite Document Files, the format used in Microsoft Office * document files before they switched to zipped XML. * Info from: http://sc.openoffice.org/compdocfileformat.pdf * * N.B. This is the "Composite Document File" format, and not the * "Compound Document Format", nor the "Channel Definition Format". */ #include "file.h" #ifndef lint FILE_RCSID("@(#)$File: cdf.c,v 1.45 2011/08/28 08:38:48 christos Exp $") #endif #include <assert.h> #ifdef CDF_DEBUG #include <err.h> #endif #include <stdlib.h> #include <unistd.h> #include <string.h> #include <time.h> #include <ctype.h> #ifdef HAVE_LIMITS_H #include <limits.h> #endif #ifndef EFTYPE #define EFTYPE EINVAL #endif #include "cdf.h" #ifdef CDF_DEBUG #define DPRINTF(a) printf a, fflush(stdout) #else #define DPRINTF(a) #endif static union { char s[4]; uint32_t u; } cdf_bo; #define NEED_SWAP (cdf_bo.u == (uint32_t)0x01020304) #define CDF_TOLE8(x) ((uint64_t)(NEED_SWAP ? _cdf_tole8(x) : (uint64_t)(x))) #define CDF_TOLE4(x) ((uint32_t)(NEED_SWAP ? _cdf_tole4(x) : (uint32_t)(x))) #define CDF_TOLE2(x) ((uint16_t)(NEED_SWAP ? _cdf_tole2(x) : (uint16_t)(x))) #define CDF_GETUINT32(x, y) cdf_getuint32(x, y) /* * swap a short */ static uint16_t _cdf_tole2(uint16_t sv) { uint16_t rv; uint8_t *s = (uint8_t *)(void *)&sv; uint8_t *d = (uint8_t *)(void *)&rv; d[0] = s[1]; d[1] = s[0]; return rv; } /* * swap an int */ static uint32_t _cdf_tole4(uint32_t sv) { uint32_t rv; uint8_t *s = (uint8_t *)(void *)&sv; uint8_t *d = (uint8_t *)(void *)&rv; d[0] = s[3]; d[1] = s[2]; d[2] = s[1]; d[3] = s[0]; return rv; } /* * swap a quad */ static uint64_t _cdf_tole8(uint64_t sv) { uint64_t rv; uint8_t *s = (uint8_t *)(void *)&sv; uint8_t *d = (uint8_t *)(void *)&rv; d[0] = s[7]; d[1] = s[6]; d[2] = s[5]; d[3] = s[4]; d[4] = s[3]; d[5] = s[2]; d[6] = s[1]; d[7] = s[0]; return rv; } /* * grab a uint32_t from a possibly unaligned address, and return it in * the native host order. */ static uint32_t cdf_getuint32(const uint8_t *p, size_t offs) { uint32_t rv; (void)memcpy(&rv, p + offs * sizeof(uint32_t), sizeof(rv)); return CDF_TOLE4(rv); } #define CDF_UNPACK(a) \ (void)memcpy(&(a), &buf[len], sizeof(a)), len += sizeof(a) #define CDF_UNPACKA(a) \ (void)memcpy((a), &buf[len], sizeof(a)), len += sizeof(a) uint16_t cdf_tole2(uint16_t sv) { return CDF_TOLE2(sv); } uint32_t cdf_tole4(uint32_t sv) { return CDF_TOLE4(sv); } uint64_t cdf_tole8(uint64_t sv) { return CDF_TOLE8(sv); } void cdf_swap_header(cdf_header_t *h) { size_t i; h->h_magic = CDF_TOLE8(h->h_magic); h->h_uuid[0] = CDF_TOLE8(h->h_uuid[0]); h->h_uuid[1] = CDF_TOLE8(h->h_uuid[1]); h->h_revision = CDF_TOLE2(h->h_revision); h->h_version = CDF_TOLE2(h->h_version); h->h_byte_order = CDF_TOLE2(h->h_byte_order); h->h_sec_size_p2 = CDF_TOLE2(h->h_sec_size_p2); h->h_short_sec_size_p2 = CDF_TOLE2(h->h_short_sec_size_p2); h->h_num_sectors_in_sat = CDF_TOLE4(h->h_num_sectors_in_sat); h->h_secid_first_directory = CDF_TOLE4(h->h_secid_first_directory); h->h_min_size_standard_stream = CDF_TOLE4(h->h_min_size_standard_stream); h->h_secid_first_sector_in_short_sat = CDF_TOLE4((uint32_t)h->h_secid_first_sector_in_short_sat); h->h_num_sectors_in_short_sat = CDF_TOLE4(h->h_num_sectors_in_short_sat); h->h_secid_first_sector_in_master_sat = CDF_TOLE4((uint32_t)h->h_secid_first_sector_in_master_sat); h->h_num_sectors_in_master_sat = CDF_TOLE4(h->h_num_sectors_in_master_sat); for (i = 0; i < __arraycount(h->h_master_sat); i++) h->h_master_sat[i] = CDF_TOLE4((uint32_t)h->h_master_sat[i]); } void cdf_unpack_header(cdf_header_t *h, char *buf) { size_t i; size_t len = 0; CDF_UNPACK(h->h_magic); CDF_UNPACKA(h->h_uuid); CDF_UNPACK(h->h_revision); CDF_UNPACK(h->h_version); CDF_UNPACK(h->h_byte_order); CDF_UNPACK(h->h_sec_size_p2); CDF_UNPACK(h->h_short_sec_size_p2); CDF_UNPACKA(h->h_unused0); CDF_UNPACK(h->h_num_sectors_in_sat); CDF_UNPACK(h->h_secid_first_directory); CDF_UNPACKA(h->h_unused1); CDF_UNPACK(h->h_min_size_standard_stream); CDF_UNPACK(h->h_secid_first_sector_in_short_sat); CDF_UNPACK(h->h_num_sectors_in_short_sat); CDF_UNPACK(h->h_secid_first_sector_in_master_sat); CDF_UNPACK(h->h_num_sectors_in_master_sat); for (i = 0; i < __arraycount(h->h_master_sat); i++) CDF_UNPACK(h->h_master_sat[i]); } void cdf_swap_dir(cdf_directory_t *d) { d->d_namelen = CDF_TOLE2(d->d_namelen); d->d_left_child = CDF_TOLE4((uint32_t)d->d_left_child); d->d_right_child = CDF_TOLE4((uint32_t)d->d_right_child); d->d_storage = CDF_TOLE4((uint32_t)d->d_storage); d->d_storage_uuid[0] = CDF_TOLE8(d->d_storage_uuid[0]); d->d_storage_uuid[1] = CDF_TOLE8(d->d_storage_uuid[1]); d->d_flags = CDF_TOLE4(d->d_flags); d->d_created = CDF_TOLE8((uint64_t)d->d_created); d->d_modified = CDF_TOLE8((uint64_t)d->d_modified); d->d_stream_first_sector = CDF_TOLE4((uint32_t)d->d_stream_first_sector); d->d_size = CDF_TOLE4(d->d_size); } void cdf_swap_class(cdf_classid_t *d) { d->cl_dword = CDF_TOLE4(d->cl_dword); d->cl_word[0] = CDF_TOLE2(d->cl_word[0]); d->cl_word[1] = CDF_TOLE2(d->cl_word[1]); } void cdf_unpack_dir(cdf_directory_t *d, char *buf) { size_t len = 0; CDF_UNPACKA(d->d_name); CDF_UNPACK(d->d_namelen); CDF_UNPACK(d->d_type); CDF_UNPACK(d->d_color); CDF_UNPACK(d->d_left_child); CDF_UNPACK(d->d_right_child); CDF_UNPACK(d->d_storage); CDF_UNPACKA(d->d_storage_uuid); CDF_UNPACK(d->d_flags); CDF_UNPACK(d->d_created); CDF_UNPACK(d->d_modified); CDF_UNPACK(d->d_stream_first_sector); CDF_UNPACK(d->d_size); CDF_UNPACK(d->d_unused0); } static int cdf_check_stream_offset(const cdf_stream_t *sst, const cdf_header_t *h, const void *p, size_t tail, int line) { const char *b = (const char *)sst->sst_tab; const char *e = ((const char *)p) + tail; (void)&line; if (e >= b && (size_t)(e - b) < CDF_SEC_SIZE(h) * sst->sst_len) return 0; DPRINTF(("%d: offset begin %p end %p %" SIZE_T_FORMAT "u" " >= %" SIZE_T_FORMAT "u [%" SIZE_T_FORMAT "u %" SIZE_T_FORMAT "u]\n", line, b, e, (size_t)(e - b), CDF_SEC_SIZE(h) * sst->sst_len, CDF_SEC_SIZE(h), sst->sst_len)); errno = EFTYPE; return -1; } static ssize_t cdf_read(const cdf_info_t *info, off_t off, void *buf, size_t len) { size_t siz = (size_t)off + len; if ((off_t)(off + len) != (off_t)siz) { errno = EINVAL; return -1; } if (info->i_buf != NULL && info->i_len >= siz) { (void)memcpy(buf, &info->i_buf[off], len); return (ssize_t)len; } if (info->i_fd == -1) return -1; if (lseek(info->i_fd, off, SEEK_SET) == (off_t)-1) return -1; if (read(info->i_fd, buf, len) != (ssize_t)len) return -1; return (ssize_t)len; } int cdf_read_header(const cdf_info_t *info, cdf_header_t *h) { char buf[512]; (void)memcpy(cdf_bo.s, "\01\02\03\04", 4); if (cdf_read(info, (off_t)0, buf, sizeof(buf)) == -1) return -1; cdf_unpack_header(h, buf); cdf_swap_header(h); if (h->h_magic != CDF_MAGIC) { DPRINTF(("Bad magic 0x%" INT64_T_FORMAT "x != 0x%" INT64_T_FORMAT "x\n", (unsigned long long)h->h_magic, (unsigned long long)CDF_MAGIC)); goto out; } if (h->h_sec_size_p2 > 20) { DPRINTF(("Bad sector size 0x%u\n", h->h_sec_size_p2)); goto out; } if (h->h_short_sec_size_p2 > 20) { DPRINTF(("Bad short sector size 0x%u\n", h->h_short_sec_size_p2)); goto out; } return 0; out: errno = EFTYPE; return -1; } ssize_t cdf_read_sector(const cdf_info_t *info, void *buf, size_t offs, size_t len, const cdf_header_t *h, cdf_secid_t id) { assert((size_t)CDF_SEC_SIZE(h) == len); return cdf_read(info, (off_t)CDF_SEC_POS(h, id), ((char *)buf) + offs, len); } ssize_t cdf_read_short_sector(const cdf_stream_t *sst, void *buf, size_t offs, size_t len, const cdf_header_t *h, cdf_secid_t id) { assert((size_t)CDF_SHORT_SEC_SIZE(h) == len); (void)memcpy(((char *)buf) + offs, ((const char *)sst->sst_tab) + CDF_SHORT_SEC_POS(h, id), len); return len; } /* * Read the sector allocation table. */ int cdf_read_sat(const cdf_info_t *info, cdf_header_t *h, cdf_sat_t *sat) { size_t i, j, k; size_t ss = CDF_SEC_SIZE(h); cdf_secid_t *msa, mid, sec; size_t nsatpersec = (ss / sizeof(mid)) - 1; for (i = 0; i < __arraycount(h->h_master_sat); i++) if (h->h_master_sat[i] == CDF_SECID_FREE) break; #define CDF_SEC_LIMIT (UINT32_MAX / (4 * ss)) if ((nsatpersec > 0 && h->h_num_sectors_in_master_sat > CDF_SEC_LIMIT / nsatpersec) || i > CDF_SEC_LIMIT) { DPRINTF(("Number of sectors in master SAT too big %u %" SIZE_T_FORMAT "u\n", h->h_num_sectors_in_master_sat, i)); errno = EFTYPE; return -1; } sat->sat_len = h->h_num_sectors_in_master_sat * nsatpersec + i; DPRINTF(("sat_len = %" SIZE_T_FORMAT "u ss = %" SIZE_T_FORMAT "u\n", sat->sat_len, ss)); if ((sat->sat_tab = CAST(cdf_secid_t *, calloc(sat->sat_len, ss))) == NULL) return -1; for (i = 0; i < __arraycount(h->h_master_sat); i++) { if (h->h_master_sat[i] < 0) break; if (cdf_read_sector(info, sat->sat_tab, ss * i, ss, h, h->h_master_sat[i]) != (ssize_t)ss) { DPRINTF(("Reading sector %d", h->h_master_sat[i])); goto out1; } } if ((msa = CAST(cdf_secid_t *, calloc(1, ss))) == NULL) goto out1; mid = h->h_secid_first_sector_in_master_sat; for (j = 0; j < h->h_num_sectors_in_master_sat; j++) { if (mid < 0) goto out; if (j >= CDF_LOOP_LIMIT) { DPRINTF(("Reading master sector loop limit")); errno = EFTYPE; goto out2; } if (cdf_read_sector(info, msa, 0, ss, h, mid) != (ssize_t)ss) { DPRINTF(("Reading master sector %d", mid)); goto out2; } for (k = 0; k < nsatpersec; k++, i++) { sec = CDF_TOLE4((uint32_t)msa[k]); if (sec < 0) goto out; if (i >= sat->sat_len) { DPRINTF(("Out of bounds reading MSA %u >= %u", i, sat->sat_len)); errno = EFTYPE; goto out2; } if (cdf_read_sector(info, sat->sat_tab, ss * i, ss, h, sec) != (ssize_t)ss) { DPRINTF(("Reading sector %d", CDF_TOLE4(msa[k]))); goto out2; } } mid = CDF_TOLE4((uint32_t)msa[nsatpersec]); } out: sat->sat_len = i; free(msa); return 0; out2: free(msa); out1: free(sat->sat_tab); return -1; } size_t cdf_count_chain(const cdf_sat_t *sat, cdf_secid_t sid, size_t size) { size_t i, j; cdf_secid_t maxsector = (cdf_secid_t)(sat->sat_len * size); DPRINTF(("Chain:")); for (j = i = 0; sid >= 0; i++, j++) { DPRINTF((" %d", sid)); if (j >= CDF_LOOP_LIMIT) { DPRINTF(("Counting chain loop limit")); errno = EFTYPE; return (size_t)-1; } if (sid > maxsector) { DPRINTF(("Sector %d > %d\n", sid, maxsector)); errno = EFTYPE; return (size_t)-1; } sid = CDF_TOLE4((uint32_t)sat->sat_tab[sid]); } DPRINTF(("\n")); return i; } int cdf_read_long_sector_chain(const cdf_info_t *info, const cdf_header_t *h, const cdf_sat_t *sat, cdf_secid_t sid, size_t len, cdf_stream_t *scn) { size_t ss = CDF_SEC_SIZE(h), i, j; ssize_t nr; scn->sst_len = cdf_count_chain(sat, sid, ss); scn->sst_dirlen = len; if (scn->sst_len == (size_t)-1) return -1; scn->sst_tab = calloc(scn->sst_len, ss); if (scn->sst_tab == NULL) return -1; for (j = i = 0; sid >= 0; i++, j++) { if (j >= CDF_LOOP_LIMIT) { DPRINTF(("Read long sector chain loop limit")); errno = EFTYPE; goto out; } if (i >= scn->sst_len) { DPRINTF(("Out of bounds reading long sector chain " "%u > %u\n", i, scn->sst_len)); errno = EFTYPE; goto out; } if ((nr = cdf_read_sector(info, scn->sst_tab, i * ss, ss, h, sid)) != (ssize_t)ss) { if (i == scn->sst_len - 1 && nr > 0) { /* Last sector might be truncated */ return 0; } DPRINTF(("Reading long sector chain %d", sid)); goto out; } sid = CDF_TOLE4((uint32_t)sat->sat_tab[sid]); } return 0; out: free(scn->sst_tab); return -1; } int cdf_read_short_sector_chain(const cdf_header_t *h, const cdf_sat_t *ssat, const cdf_stream_t *sst, cdf_secid_t sid, size_t len, cdf_stream_t *scn) { size_t ss = CDF_SHORT_SEC_SIZE(h), i, j; scn->sst_len = cdf_count_chain(ssat, sid, CDF_SEC_SIZE(h)); scn->sst_dirlen = len; if (sst->sst_tab == NULL || scn->sst_len == (size_t)-1) return -1; scn->sst_tab = calloc(scn->sst_len, ss); if (scn->sst_tab == NULL) return -1; for (j = i = 0; sid >= 0; i++, j++) { if (j >= CDF_LOOP_LIMIT) { DPRINTF(("Read short sector chain loop limit")); errno = EFTYPE; goto out; } if (i >= scn->sst_len) { DPRINTF(("Out of bounds reading short sector chain " "%u > %u\n", i, scn->sst_len)); errno = EFTYPE; goto out; } if (cdf_read_short_sector(sst, scn->sst_tab, i * ss, ss, h, sid) != (ssize_t)ss) { DPRINTF(("Reading short sector chain %d", sid)); goto out; } sid = CDF_TOLE4((uint32_t)ssat->sat_tab[sid]); } return 0; out: free(scn->sst_tab); return -1; } int cdf_read_sector_chain(const cdf_info_t *info, const cdf_header_t *h, const cdf_sat_t *sat, const cdf_sat_t *ssat, const cdf_stream_t *sst, cdf_secid_t sid, size_t len, cdf_stream_t *scn) { if (len < h->h_min_size_standard_stream && sst->sst_tab != NULL) return cdf_read_short_sector_chain(h, ssat, sst, sid, len, scn); else return cdf_read_long_sector_chain(info, h, sat, sid, len, scn); } int cdf_read_dir(const cdf_info_t *info, const cdf_header_t *h, const cdf_sat_t *sat, cdf_dir_t *dir) { size_t i, j; size_t ss = CDF_SEC_SIZE(h), ns, nd; char *buf; cdf_secid_t sid = h->h_secid_first_directory; ns = cdf_count_chain(sat, sid, ss); if (ns == (size_t)-1) return -1; nd = ss / CDF_DIRECTORY_SIZE; dir->dir_len = ns * nd; dir->dir_tab = CAST(cdf_directory_t *, calloc(dir->dir_len, sizeof(dir->dir_tab[0]))); if (dir->dir_tab == NULL) return -1; if ((buf = CAST(char *, malloc(ss))) == NULL) { free(dir->dir_tab); return -1; } for (j = i = 0; i < ns; i++, j++) { if (j >= CDF_LOOP_LIMIT) { DPRINTF(("Read dir loop limit")); errno = EFTYPE; goto out; } if (cdf_read_sector(info, buf, 0, ss, h, sid) != (ssize_t)ss) { DPRINTF(("Reading directory sector %d", sid)); goto out; } for (j = 0; j < nd; j++) { cdf_unpack_dir(&dir->dir_tab[i * nd + j], &buf[j * CDF_DIRECTORY_SIZE]); } sid = CDF_TOLE4((uint32_t)sat->sat_tab[sid]); } if (NEED_SWAP) for (i = 0; i < dir->dir_len; i++) cdf_swap_dir(&dir->dir_tab[i]); free(buf); return 0; out: free(dir->dir_tab); free(buf); return -1; } int cdf_read_ssat(const cdf_info_t *info, const cdf_header_t *h, const cdf_sat_t *sat, cdf_sat_t *ssat) { size_t i, j; size_t ss = CDF_SEC_SIZE(h); cdf_secid_t sid = h->h_secid_first_sector_in_short_sat; ssat->sat_len = cdf_count_chain(sat, sid, CDF_SEC_SIZE(h)); if (ssat->sat_len == (size_t)-1) return -1; ssat->sat_tab = CAST(cdf_secid_t *, calloc(ssat->sat_len, ss)); if (ssat->sat_tab == NULL) return -1; for (j = i = 0; sid >= 0; i++, j++) { if (j >= CDF_LOOP_LIMIT) { DPRINTF(("Read short sat sector loop limit")); errno = EFTYPE; goto out; } if (i >= ssat->sat_len) { DPRINTF(("Out of bounds reading short sector chain " "%u > %u\n", i, ssat->sat_len)); errno = EFTYPE; goto out; } if (cdf_read_sector(info, ssat->sat_tab, i * ss, ss, h, sid) != (ssize_t)ss) { DPRINTF(("Reading short sat sector %d", sid)); goto out; } sid = CDF_TOLE4((uint32_t)sat->sat_tab[sid]); } return 0; out: free(ssat->sat_tab); return -1; } int cdf_read_short_stream(const cdf_info_t *info, const cdf_header_t *h, const cdf_sat_t *sat, const cdf_dir_t *dir, cdf_stream_t *scn) { size_t i; const cdf_directory_t *d; for (i = 0; i < dir->dir_len; i++) if (dir->dir_tab[i].d_type == CDF_DIR_TYPE_ROOT_STORAGE) break; /* If the it is not there, just fake it; some docs don't have it */ if (i == dir->dir_len) goto out; d = &dir->dir_tab[i]; /* If the it is not there, just fake it; some docs don't have it */ if (d->d_stream_first_sector < 0) goto out; return cdf_read_long_sector_chain(info, h, sat, d->d_stream_first_sector, d->d_size, scn); out: scn->sst_tab = NULL; scn->sst_len = 0; scn->sst_dirlen = 0; return 0; } static int cdf_namecmp(const char *d, const uint16_t *s, size_t l) { for (; l--; d++, s++) if (*d != CDF_TOLE2(*s)) return (unsigned char)*d - CDF_TOLE2(*s); return 0; } int cdf_read_summary_info(const cdf_info_t *info, const cdf_header_t *h, const cdf_sat_t *sat, const cdf_sat_t *ssat, const cdf_stream_t *sst, const cdf_dir_t *dir, cdf_stream_t *scn) { size_t i; const cdf_directory_t *d; static const char name[] = "\05SummaryInformation"; for (i = dir->dir_len; i > 0; i--) if (dir->dir_tab[i - 1].d_type == CDF_DIR_TYPE_USER_STREAM && cdf_namecmp(name, dir->dir_tab[i - 1].d_name, sizeof(name)) == 0) break; if (i == 0) { DPRINTF(("Cannot find summary information section\n")); errno = ESRCH; return -1; } d = &dir->dir_tab[i - 1]; return cdf_read_sector_chain(info, h, sat, ssat, sst, d->d_stream_first_sector, d->d_size, scn); } int cdf_read_property_info(const cdf_stream_t *sst, const cdf_header_t *h, uint32_t offs, cdf_property_info_t **info, size_t *count, size_t *maxcount) { const cdf_section_header_t *shp; cdf_section_header_t sh; const uint8_t *p, *q, *e; int16_t s16; int32_t s32; uint32_t u32; int64_t s64; uint64_t u64; cdf_timestamp_t tp; size_t i, o, o4, nelements, j; cdf_property_info_t *inp; if (offs > UINT32_MAX / 4) { errno = EFTYPE; goto out; } shp = CAST(const cdf_section_header_t *, (const void *) ((const char *)sst->sst_tab + offs)); if (cdf_check_stream_offset(sst, h, shp, sizeof(*shp), __LINE__) == -1) goto out; sh.sh_len = CDF_TOLE4(shp->sh_len); #define CDF_SHLEN_LIMIT (UINT32_MAX / 8) if (sh.sh_len > CDF_SHLEN_LIMIT) { errno = EFTYPE; goto out; } sh.sh_properties = CDF_TOLE4(shp->sh_properties); #define CDF_PROP_LIMIT (UINT32_MAX / (4 * sizeof(*inp))) if (sh.sh_properties > CDF_PROP_LIMIT) goto out; DPRINTF(("section len: %u properties %u\n", sh.sh_len, sh.sh_properties)); if (*maxcount) { if (*maxcount > CDF_PROP_LIMIT) goto out; *maxcount += sh.sh_properties; inp = CAST(cdf_property_info_t *, realloc(*info, *maxcount * sizeof(*inp))); } else { *maxcount = sh.sh_properties; inp = CAST(cdf_property_info_t *, malloc(*maxcount * sizeof(*inp))); } if (inp == NULL) goto out; *info = inp; inp += *count; *count += sh.sh_properties; p = CAST(const uint8_t *, (const void *) ((const char *)(const void *)sst->sst_tab + offs + sizeof(sh))); e = CAST(const uint8_t *, (const void *) (((const char *)(const void *)shp) + sh.sh_len)); if (cdf_check_stream_offset(sst, h, e, 0, __LINE__) == -1) goto out; for (i = 0; i < sh.sh_properties; i++) { q = (const uint8_t *)(const void *) ((const char *)(const void *)p + CDF_GETUINT32(p, (i << 1) + 1)) - 2 * sizeof(uint32_t); if (q > e) { DPRINTF(("Ran of the end %p > %p\n", q, e)); goto out; } inp[i].pi_id = CDF_GETUINT32(p, i << 1); inp[i].pi_type = CDF_GETUINT32(q, 0); DPRINTF(("%d) id=%x type=%x offs=%x,%d\n", i, inp[i].pi_id, inp[i].pi_type, q - p, CDF_GETUINT32(p, (i << 1) + 1))); if (inp[i].pi_type & CDF_VECTOR) { nelements = CDF_GETUINT32(q, 1); o = 2; } else { nelements = 1; o = 1; } o4 = o * sizeof(uint32_t); if (inp[i].pi_type & (CDF_ARRAY|CDF_BYREF|CDF_RESERVED)) goto unknown; switch (inp[i].pi_type & CDF_TYPEMASK) { case CDF_NULL: case CDF_EMPTY: break; case CDF_SIGNED16: if (inp[i].pi_type & CDF_VECTOR) goto unknown; (void)memcpy(&s16, &q[o4], sizeof(s16)); inp[i].pi_s16 = CDF_TOLE2(s16); break; case CDF_SIGNED32: if (inp[i].pi_type & CDF_VECTOR) goto unknown; (void)memcpy(&s32, &q[o4], sizeof(s32)); inp[i].pi_s32 = CDF_TOLE4((uint32_t)s32); break; case CDF_BOOL: case CDF_UNSIGNED32: if (inp[i].pi_type & CDF_VECTOR) goto unknown; (void)memcpy(&u32, &q[o4], sizeof(u32)); inp[i].pi_u32 = CDF_TOLE4(u32); break; case CDF_SIGNED64: if (inp[i].pi_type & CDF_VECTOR) goto unknown; (void)memcpy(&s64, &q[o4], sizeof(s64)); inp[i].pi_s64 = CDF_TOLE8((uint64_t)s64); break; case CDF_UNSIGNED64: if (inp[i].pi_type & CDF_VECTOR) goto unknown; (void)memcpy(&u64, &q[o4], sizeof(u64)); inp[i].pi_u64 = CDF_TOLE8((uint64_t)u64); break; case CDF_LENGTH32_STRING: case CDF_LENGTH32_WSTRING: if (nelements > 1) { size_t nelem = inp - *info; if (*maxcount > CDF_PROP_LIMIT || nelements > CDF_PROP_LIMIT) goto out; *maxcount += nelements; inp = CAST(cdf_property_info_t *, realloc(*info, *maxcount * sizeof(*inp))); if (inp == NULL) goto out; *info = inp; inp = *info + nelem; } DPRINTF(("nelements = %d\n", nelements)); for (j = 0; j < nelements; j++, i++) { uint32_t l = CDF_GETUINT32(q, o); inp[i].pi_str.s_len = l; inp[i].pi_str.s_buf = (const char *) (const void *)(&q[o4 + sizeof(l)]); DPRINTF(("l = %d, r = %d, s = %s\n", l, CDF_ROUND(l, sizeof(l)), inp[i].pi_str.s_buf)); l = 4 + (uint32_t)CDF_ROUND(l, sizeof(l)); o += l >> 2; o4 = o * sizeof(uint32_t); } i--; break; case CDF_FILETIME: if (inp[i].pi_type & CDF_VECTOR) goto unknown; (void)memcpy(&tp, &q[o4], sizeof(tp)); inp[i].pi_tp = CDF_TOLE8((uint64_t)tp); break; case CDF_CLIPBOARD: if (inp[i].pi_type & CDF_VECTOR) goto unknown; break; default: unknown: DPRINTF(("Don't know how to deal with %x\n", inp[i].pi_type)); goto out; } } return 0; out: free(*info); return -1; } int cdf_unpack_summary_info(const cdf_stream_t *sst, const cdf_header_t *h, cdf_summary_info_header_t *ssi, cdf_property_info_t **info, size_t *count) { size_t i, maxcount; const cdf_summary_info_header_t *si = CAST(const cdf_summary_info_header_t *, sst->sst_tab); const cdf_section_declaration_t *sd = CAST(const cdf_section_declaration_t *, (const void *) ((const char *)sst->sst_tab + CDF_SECTION_DECLARATION_OFFSET)); if (cdf_check_stream_offset(sst, h, si, sizeof(*si), __LINE__) == -1 || cdf_check_stream_offset(sst, h, sd, sizeof(*sd), __LINE__) == -1) return -1; ssi->si_byte_order = CDF_TOLE2(si->si_byte_order); ssi->si_os_version = CDF_TOLE2(si->si_os_version); ssi->si_os = CDF_TOLE2(si->si_os); ssi->si_class = si->si_class; cdf_swap_class(&ssi->si_class); ssi->si_count = CDF_TOLE2(si->si_count); *count = 0; maxcount = 0; *info = NULL; for (i = 0; i < CDF_TOLE4(si->si_count); i++) { if (i >= CDF_LOOP_LIMIT) { DPRINTF(("Unpack summary info loop limit")); errno = EFTYPE; return -1; } if (cdf_read_property_info(sst, h, CDF_TOLE4(sd->sd_offset), info, count, &maxcount) == -1) return -1; } return 0; } int cdf_print_classid(char *buf, size_t buflen, const cdf_classid_t *id) { return snprintf(buf, buflen, "%.8x-%.4x-%.4x-%.2x%.2x-" "%.2x%.2x%.2x%.2x%.2x%.2x", id->cl_dword, id->cl_word[0], id->cl_word[1], id->cl_two[0], id->cl_two[1], id->cl_six[0], id->cl_six[1], id->cl_six[2], id->cl_six[3], id->cl_six[4], id->cl_six[5]); } static const struct { uint32_t v; const char *n; } vn[] = { { CDF_PROPERTY_CODE_PAGE, "Code page" }, { CDF_PROPERTY_TITLE, "Title" }, { CDF_PROPERTY_SUBJECT, "Subject" }, { CDF_PROPERTY_AUTHOR, "Author" }, { CDF_PROPERTY_KEYWORDS, "Keywords" }, { CDF_PROPERTY_COMMENTS, "Comments" }, { CDF_PROPERTY_TEMPLATE, "Template" }, { CDF_PROPERTY_LAST_SAVED_BY, "Last Saved By" }, { CDF_PROPERTY_REVISION_NUMBER, "Revision Number" }, { CDF_PROPERTY_TOTAL_EDITING_TIME, "Total Editing Time" }, { CDF_PROPERTY_LAST_PRINTED, "Last Printed" }, { CDF_PROPERTY_CREATE_TIME, "Create Time/Date" }, { CDF_PROPERTY_LAST_SAVED_TIME, "Last Saved Time/Date" }, { CDF_PROPERTY_NUMBER_OF_PAGES, "Number of Pages" }, { CDF_PROPERTY_NUMBER_OF_WORDS, "Number of Words" }, { CDF_PROPERTY_NUMBER_OF_CHARACTERS, "Number of Characters" }, { CDF_PROPERTY_THUMBNAIL, "Thumbnail" }, { CDF_PROPERTY_NAME_OF_APPLICATION, "Name of Creating Application" }, { CDF_PROPERTY_SECURITY, "Security" }, { CDF_PROPERTY_LOCALE_ID, "Locale ID" }, }; int cdf_print_property_name(char *buf, size_t bufsiz, uint32_t p) { size_t i; for (i = 0; i < __arraycount(vn); i++) if (vn[i].v == p) return snprintf(buf, bufsiz, "%s", vn[i].n); return snprintf(buf, bufsiz, "0x%x", p); } int cdf_print_elapsed_time(char *buf, size_t bufsiz, cdf_timestamp_t ts) { int len = 0; int days, hours, mins, secs; ts /= CDF_TIME_PREC; secs = (int)(ts % 60); ts /= 60; mins = (int)(ts % 60); ts /= 60; hours = (int)(ts % 24); ts /= 24; days = (int)ts; if (days) { len += snprintf(buf + len, bufsiz - len, "%dd+", days); if ((size_t)len >= bufsiz) return len; } if (days || hours) { len += snprintf(buf + len, bufsiz - len, "%.2d:", hours); if ((size_t)len >= bufsiz) return len; } len += snprintf(buf + len, bufsiz - len, "%.2d:", mins); if ((size_t)len >= bufsiz) return len; len += snprintf(buf + len, bufsiz - len, "%.2d", secs); return len; } #ifdef CDF_DEBUG void cdf_dump_header(const cdf_header_t *h) { size_t i; #define DUMP(a, b) (void)fprintf(stderr, "%40.40s = " a "\n", # b, h->h_ ## b) #define DUMP2(a, b) (void)fprintf(stderr, "%40.40s = " a " (" a ")\n", # b, \ h->h_ ## b, 1 << h->h_ ## b) DUMP("%d", revision); DUMP("%d", version); DUMP("0x%x", byte_order); DUMP2("%d", sec_size_p2); DUMP2("%d", short_sec_size_p2); DUMP("%d", num_sectors_in_sat); DUMP("%d", secid_first_directory); DUMP("%d", min_size_standard_stream); DUMP("%d", secid_first_sector_in_short_sat); DUMP("%d", num_sectors_in_short_sat); DUMP("%d", secid_first_sector_in_master_sat); DUMP("%d", num_sectors_in_master_sat); for (i = 0; i < __arraycount(h->h_master_sat); i++) { if (h->h_master_sat[i] == CDF_SECID_FREE) break; (void)fprintf(stderr, "%35.35s[%.3zu] = %d\n", "master_sat", i, h->h_master_sat[i]); } } void cdf_dump_sat(const char *prefix, const cdf_sat_t *sat, size_t size) { size_t i, j, s = size / sizeof(cdf_secid_t); for (i = 0; i < sat->sat_len; i++) { (void)fprintf(stderr, "%s[%" SIZE_T_FORMAT "u]:\n%.6d: ", prefix, i, i * s); for (j = 0; j < s; j++) { (void)fprintf(stderr, "%5d, ", CDF_TOLE4(sat->sat_tab[s * i + j])); if ((j + 1) % 10 == 0) (void)fprintf(stderr, "\n%.6d: ", i * s + j + 1); } (void)fprintf(stderr, "\n"); } } void cdf_dump(void *v, size_t len) { size_t i, j; unsigned char *p = v; char abuf[16]; (void)fprintf(stderr, "%.4x: ", 0); for (i = 0, j = 0; i < len; i++, p++) { (void)fprintf(stderr, "%.2x ", *p); abuf[j++] = isprint(*p) ? *p : '.'; if (j == 16) { j = 0; abuf[15] = '\0'; (void)fprintf(stderr, "%s\n%.4x: ", abuf, i + 1); } } (void)fprintf(stderr, "\n"); } void cdf_dump_stream(const cdf_header_t *h, const cdf_stream_t *sst) { size_t ss = sst->sst_dirlen < h->h_min_size_standard_stream ? CDF_SHORT_SEC_SIZE(h) : CDF_SEC_SIZE(h); cdf_dump(sst->sst_tab, ss * sst->sst_len); } void cdf_dump_dir(const cdf_info_t *info, const cdf_header_t *h, const cdf_sat_t *sat, const cdf_sat_t *ssat, const cdf_stream_t *sst, const cdf_dir_t *dir) { size_t i, j; cdf_directory_t *d; char name[__arraycount(d->d_name)]; cdf_stream_t scn; struct timespec ts; static const char *types[] = { "empty", "user storage", "user stream", "lockbytes", "property", "root storage" }; for (i = 0; i < dir->dir_len; i++) { d = &dir->dir_tab[i]; for (j = 0; j < sizeof(name); j++) name[j] = (char)CDF_TOLE2(d->d_name[j]); (void)fprintf(stderr, "Directory %" SIZE_T_FORMAT "u: %s\n", i, name); if (d->d_type < __arraycount(types)) (void)fprintf(stderr, "Type: %s\n", types[d->d_type]); else (void)fprintf(stderr, "Type: %d\n", d->d_type); (void)fprintf(stderr, "Color: %s\n", d->d_color ? "black" : "red"); (void)fprintf(stderr, "Left child: %d\n", d->d_left_child); (void)fprintf(stderr, "Right child: %d\n", d->d_right_child); (void)fprintf(stderr, "Flags: 0x%x\n", d->d_flags); cdf_timestamp_to_timespec(&ts, d->d_created); (void)fprintf(stderr, "Created %s", cdf_ctime(&ts.tv_sec)); cdf_timestamp_to_timespec(&ts, d->d_modified); (void)fprintf(stderr, "Modified %s", cdf_ctime(&ts.tv_sec)); (void)fprintf(stderr, "Stream %d\n", d->d_stream_first_sector); (void)fprintf(stderr, "Size %d\n", d->d_size); switch (d->d_type) { case CDF_DIR_TYPE_USER_STORAGE: (void)fprintf(stderr, "Storage: %d\n", d->d_storage); break; case CDF_DIR_TYPE_USER_STREAM: if (sst == NULL) break; if (cdf_read_sector_chain(info, h, sat, ssat, sst, d->d_stream_first_sector, d->d_size, &scn) == -1) { warn("Can't read stream for %s at %d len %d", name, d->d_stream_first_sector, d->d_size); break; } cdf_dump_stream(h, &scn); free(scn.sst_tab); break; default: break; } } } void cdf_dump_property_info(const cdf_property_info_t *info, size_t count) { cdf_timestamp_t tp; struct timespec ts; char buf[64]; size_t i, j; for (i = 0; i < count; i++) { cdf_print_property_name(buf, sizeof(buf), info[i].pi_id); (void)fprintf(stderr, "%" SIZE_T_FORMAT "u) %s: ", i, buf); switch (info[i].pi_type) { case CDF_NULL: break; case CDF_SIGNED16: (void)fprintf(stderr, "signed 16 [%hd]\n", info[i].pi_s16); break; case CDF_SIGNED32: (void)fprintf(stderr, "signed 32 [%d]\n", info[i].pi_s32); break; case CDF_UNSIGNED32: (void)fprintf(stderr, "unsigned 32 [%u]\n", info[i].pi_u32); break; case CDF_LENGTH32_STRING: (void)fprintf(stderr, "string %u [%.*s]\n", info[i].pi_str.s_len, info[i].pi_str.s_len, info[i].pi_str.s_buf); break; case CDF_LENGTH32_WSTRING: (void)fprintf(stderr, "string %u [", info[i].pi_str.s_len); for (j = 0; j < info[i].pi_str.s_len - 1; j++) (void)fputc(info[i].pi_str.s_buf[j << 1], stderr); (void)fprintf(stderr, "]\n"); break; case CDF_FILETIME: tp = info[i].pi_tp; if (tp < 1000000000000000LL) { cdf_print_elapsed_time(buf, sizeof(buf), tp); (void)fprintf(stderr, "timestamp %s\n", buf); } else { cdf_timestamp_to_timespec(&ts, tp); (void)fprintf(stderr, "timestamp %s", cdf_ctime(&ts.tv_sec)); } break; case CDF_CLIPBOARD: (void)fprintf(stderr, "CLIPBOARD %u\n", info[i].pi_u32); break; default: DPRINTF(("Don't know how to deal with %x\n", info[i].pi_type)); break; } } } void cdf_dump_summary_info(const cdf_header_t *h, const cdf_stream_t *sst) { char buf[128]; cdf_summary_info_header_t ssi; cdf_property_info_t *info; size_t count; (void)&h; if (cdf_unpack_summary_info(sst, h, &ssi, &info, &count) == -1) return; (void)fprintf(stderr, "Endian: %x\n", ssi.si_byte_order); (void)fprintf(stderr, "Os Version %d.%d\n", ssi.si_os_version & 0xff, ssi.si_os_version >> 8); (void)fprintf(stderr, "Os %d\n", ssi.si_os); cdf_print_classid(buf, sizeof(buf), &ssi.si_class); (void)fprintf(stderr, "Class %s\n", buf); (void)fprintf(stderr, "Count %d\n", ssi.si_count); cdf_dump_property_info(info, count); free(info); } #endif #ifdef TEST int main(int argc, char *argv[]) { int i; cdf_header_t h; cdf_sat_t sat, ssat; cdf_stream_t sst, scn; cdf_dir_t dir; cdf_info_t info; if (argc < 2) { (void)fprintf(stderr, "Usage: %s <filename>\n", getprogname()); return -1; } info.i_buf = NULL; info.i_len = 0; for (i = 1; i < argc; i++) { if ((info.i_fd = open(argv[1], O_RDONLY)) == -1) err(1, "Cannot open `%s'", argv[1]); if (cdf_read_header(&info, &h) == -1) err(1, "Cannot read header"); #ifdef CDF_DEBUG cdf_dump_header(&h); #endif if (cdf_read_sat(&info, &h, &sat) == -1) err(1, "Cannot read sat"); #ifdef CDF_DEBUG cdf_dump_sat("SAT", &sat, CDF_SEC_SIZE(&h)); #endif if (cdf_read_ssat(&info, &h, &sat, &ssat) == -1) err(1, "Cannot read ssat"); #ifdef CDF_DEBUG cdf_dump_sat("SSAT", &ssat, CDF_SHORT_SEC_SIZE(&h)); #endif if (cdf_read_dir(&info, &h, &sat, &dir) == -1) err(1, "Cannot read dir"); if (cdf_read_short_stream(&info, &h, &sat, &dir, &sst) == -1) err(1, "Cannot read short stream"); #ifdef CDF_DEBUG cdf_dump_stream(&h, &sst); #endif #ifdef CDF_DEBUG cdf_dump_dir(&info, &h, &sat, &ssat, &sst, &dir); #endif if (cdf_read_summary_info(&info, &h, &sat, &ssat, &sst, &dir, &scn) == -1) err(1, "Cannot read summary info"); #ifdef CDF_DEBUG cdf_dump_summary_info(&h, &scn); #endif (void)close(info.i_fd); } return 0; } #endif
/*- * Copyright (c) 2008 Christos Zoulas * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /* * Parse Composite Document Files, the format used in Microsoft Office * document files before they switched to zipped XML. * Info from: http://sc.openoffice.org/compdocfileformat.pdf * * N.B. This is the "Composite Document File" format, and not the * "Compound Document Format", nor the "Channel Definition Format". */ #include "file.h" #ifndef lint FILE_RCSID("@(#)$File: cdf.c,v 1.46 2011/09/16 21:23:59 christos Exp $") #endif #include <assert.h> #ifdef CDF_DEBUG #include <err.h> #endif #include <stdlib.h> #include <unistd.h> #include <string.h> #include <time.h> #include <ctype.h> #ifdef HAVE_LIMITS_H #include <limits.h> #endif #ifndef EFTYPE #define EFTYPE EINVAL #endif #include "cdf.h" #ifdef CDF_DEBUG #define DPRINTF(a) printf a, fflush(stdout) #else #define DPRINTF(a) #endif static union { char s[4]; uint32_t u; } cdf_bo; #define NEED_SWAP (cdf_bo.u == (uint32_t)0x01020304) #define CDF_TOLE8(x) ((uint64_t)(NEED_SWAP ? _cdf_tole8(x) : (uint64_t)(x))) #define CDF_TOLE4(x) ((uint32_t)(NEED_SWAP ? _cdf_tole4(x) : (uint32_t)(x))) #define CDF_TOLE2(x) ((uint16_t)(NEED_SWAP ? _cdf_tole2(x) : (uint16_t)(x))) #define CDF_GETUINT32(x, y) cdf_getuint32(x, y) /* * swap a short */ static uint16_t _cdf_tole2(uint16_t sv) { uint16_t rv; uint8_t *s = (uint8_t *)(void *)&sv; uint8_t *d = (uint8_t *)(void *)&rv; d[0] = s[1]; d[1] = s[0]; return rv; } /* * swap an int */ static uint32_t _cdf_tole4(uint32_t sv) { uint32_t rv; uint8_t *s = (uint8_t *)(void *)&sv; uint8_t *d = (uint8_t *)(void *)&rv; d[0] = s[3]; d[1] = s[2]; d[2] = s[1]; d[3] = s[0]; return rv; } /* * swap a quad */ static uint64_t _cdf_tole8(uint64_t sv) { uint64_t rv; uint8_t *s = (uint8_t *)(void *)&sv; uint8_t *d = (uint8_t *)(void *)&rv; d[0] = s[7]; d[1] = s[6]; d[2] = s[5]; d[3] = s[4]; d[4] = s[3]; d[5] = s[2]; d[6] = s[1]; d[7] = s[0]; return rv; } /* * grab a uint32_t from a possibly unaligned address, and return it in * the native host order. */ static uint32_t cdf_getuint32(const uint8_t *p, size_t offs) { uint32_t rv; (void)memcpy(&rv, p + offs * sizeof(uint32_t), sizeof(rv)); return CDF_TOLE4(rv); } #define CDF_UNPACK(a) \ (void)memcpy(&(a), &buf[len], sizeof(a)), len += sizeof(a) #define CDF_UNPACKA(a) \ (void)memcpy((a), &buf[len], sizeof(a)), len += sizeof(a) uint16_t cdf_tole2(uint16_t sv) { return CDF_TOLE2(sv); } uint32_t cdf_tole4(uint32_t sv) { return CDF_TOLE4(sv); } uint64_t cdf_tole8(uint64_t sv) { return CDF_TOLE8(sv); } void cdf_swap_header(cdf_header_t *h) { size_t i; h->h_magic = CDF_TOLE8(h->h_magic); h->h_uuid[0] = CDF_TOLE8(h->h_uuid[0]); h->h_uuid[1] = CDF_TOLE8(h->h_uuid[1]); h->h_revision = CDF_TOLE2(h->h_revision); h->h_version = CDF_TOLE2(h->h_version); h->h_byte_order = CDF_TOLE2(h->h_byte_order); h->h_sec_size_p2 = CDF_TOLE2(h->h_sec_size_p2); h->h_short_sec_size_p2 = CDF_TOLE2(h->h_short_sec_size_p2); h->h_num_sectors_in_sat = CDF_TOLE4(h->h_num_sectors_in_sat); h->h_secid_first_directory = CDF_TOLE4(h->h_secid_first_directory); h->h_min_size_standard_stream = CDF_TOLE4(h->h_min_size_standard_stream); h->h_secid_first_sector_in_short_sat = CDF_TOLE4((uint32_t)h->h_secid_first_sector_in_short_sat); h->h_num_sectors_in_short_sat = CDF_TOLE4(h->h_num_sectors_in_short_sat); h->h_secid_first_sector_in_master_sat = CDF_TOLE4((uint32_t)h->h_secid_first_sector_in_master_sat); h->h_num_sectors_in_master_sat = CDF_TOLE4(h->h_num_sectors_in_master_sat); for (i = 0; i < __arraycount(h->h_master_sat); i++) h->h_master_sat[i] = CDF_TOLE4((uint32_t)h->h_master_sat[i]); } void cdf_unpack_header(cdf_header_t *h, char *buf) { size_t i; size_t len = 0; CDF_UNPACK(h->h_magic); CDF_UNPACKA(h->h_uuid); CDF_UNPACK(h->h_revision); CDF_UNPACK(h->h_version); CDF_UNPACK(h->h_byte_order); CDF_UNPACK(h->h_sec_size_p2); CDF_UNPACK(h->h_short_sec_size_p2); CDF_UNPACKA(h->h_unused0); CDF_UNPACK(h->h_num_sectors_in_sat); CDF_UNPACK(h->h_secid_first_directory); CDF_UNPACKA(h->h_unused1); CDF_UNPACK(h->h_min_size_standard_stream); CDF_UNPACK(h->h_secid_first_sector_in_short_sat); CDF_UNPACK(h->h_num_sectors_in_short_sat); CDF_UNPACK(h->h_secid_first_sector_in_master_sat); CDF_UNPACK(h->h_num_sectors_in_master_sat); for (i = 0; i < __arraycount(h->h_master_sat); i++) CDF_UNPACK(h->h_master_sat[i]); } void cdf_swap_dir(cdf_directory_t *d) { d->d_namelen = CDF_TOLE2(d->d_namelen); d->d_left_child = CDF_TOLE4((uint32_t)d->d_left_child); d->d_right_child = CDF_TOLE4((uint32_t)d->d_right_child); d->d_storage = CDF_TOLE4((uint32_t)d->d_storage); d->d_storage_uuid[0] = CDF_TOLE8(d->d_storage_uuid[0]); d->d_storage_uuid[1] = CDF_TOLE8(d->d_storage_uuid[1]); d->d_flags = CDF_TOLE4(d->d_flags); d->d_created = CDF_TOLE8((uint64_t)d->d_created); d->d_modified = CDF_TOLE8((uint64_t)d->d_modified); d->d_stream_first_sector = CDF_TOLE4((uint32_t)d->d_stream_first_sector); d->d_size = CDF_TOLE4(d->d_size); } void cdf_swap_class(cdf_classid_t *d) { d->cl_dword = CDF_TOLE4(d->cl_dword); d->cl_word[0] = CDF_TOLE2(d->cl_word[0]); d->cl_word[1] = CDF_TOLE2(d->cl_word[1]); } void cdf_unpack_dir(cdf_directory_t *d, char *buf) { size_t len = 0; CDF_UNPACKA(d->d_name); CDF_UNPACK(d->d_namelen); CDF_UNPACK(d->d_type); CDF_UNPACK(d->d_color); CDF_UNPACK(d->d_left_child); CDF_UNPACK(d->d_right_child); CDF_UNPACK(d->d_storage); CDF_UNPACKA(d->d_storage_uuid); CDF_UNPACK(d->d_flags); CDF_UNPACK(d->d_created); CDF_UNPACK(d->d_modified); CDF_UNPACK(d->d_stream_first_sector); CDF_UNPACK(d->d_size); CDF_UNPACK(d->d_unused0); } static int cdf_check_stream_offset(const cdf_stream_t *sst, const cdf_header_t *h, const void *p, size_t tail, int line) { const char *b = (const char *)sst->sst_tab; const char *e = ((const char *)p) + tail; (void)&line; if (e >= b && (size_t)(e - b) < CDF_SEC_SIZE(h) * sst->sst_len) return 0; DPRINTF(("%d: offset begin %p end %p %" SIZE_T_FORMAT "u" " >= %" SIZE_T_FORMAT "u [%" SIZE_T_FORMAT "u %" SIZE_T_FORMAT "u]\n", line, b, e, (size_t)(e - b), CDF_SEC_SIZE(h) * sst->sst_len, CDF_SEC_SIZE(h), sst->sst_len)); errno = EFTYPE; return -1; } static ssize_t cdf_read(const cdf_info_t *info, off_t off, void *buf, size_t len) { size_t siz = (size_t)off + len; if ((off_t)(off + len) != (off_t)siz) { errno = EINVAL; return -1; } if (info->i_buf != NULL && info->i_len >= siz) { (void)memcpy(buf, &info->i_buf[off], len); return (ssize_t)len; } if (info->i_fd == -1) return -1; if (lseek(info->i_fd, off, SEEK_SET) == (off_t)-1) return -1; if (read(info->i_fd, buf, len) != (ssize_t)len) return -1; return (ssize_t)len; } int cdf_read_header(const cdf_info_t *info, cdf_header_t *h) { char buf[512]; (void)memcpy(cdf_bo.s, "\01\02\03\04", 4); if (cdf_read(info, (off_t)0, buf, sizeof(buf)) == -1) return -1; cdf_unpack_header(h, buf); cdf_swap_header(h); if (h->h_magic != CDF_MAGIC) { DPRINTF(("Bad magic 0x%" INT64_T_FORMAT "x != 0x%" INT64_T_FORMAT "x\n", (unsigned long long)h->h_magic, (unsigned long long)CDF_MAGIC)); goto out; } if (h->h_sec_size_p2 > 20) { DPRINTF(("Bad sector size 0x%u\n", h->h_sec_size_p2)); goto out; } if (h->h_short_sec_size_p2 > 20) { DPRINTF(("Bad short sector size 0x%u\n", h->h_short_sec_size_p2)); goto out; } return 0; out: errno = EFTYPE; return -1; } ssize_t cdf_read_sector(const cdf_info_t *info, void *buf, size_t offs, size_t len, const cdf_header_t *h, cdf_secid_t id) { size_t ss = CDF_SEC_SIZE(h); size_t pos = CDF_SEC_POS(h, id); assert(ss == len); return cdf_read(info, (off_t)pos, ((char *)buf) + offs, len); } ssize_t cdf_read_short_sector(const cdf_stream_t *sst, void *buf, size_t offs, size_t len, const cdf_header_t *h, cdf_secid_t id) { size_t ss = CDF_SHORT_SEC_SIZE(h); size_t pos = CDF_SHORT_SEC_POS(h, id); assert(ss == len); if (sst->sst_len < (size_t)id) { DPRINTF(("bad sector id %d > %d\n", id, sst->sst_len)); return -1; } (void)memcpy(((char *)buf) + offs, ((const char *)sst->sst_tab) + pos, len); return len; } /* * Read the sector allocation table. */ int cdf_read_sat(const cdf_info_t *info, cdf_header_t *h, cdf_sat_t *sat) { size_t i, j, k; size_t ss = CDF_SEC_SIZE(h); cdf_secid_t *msa, mid, sec; size_t nsatpersec = (ss / sizeof(mid)) - 1; for (i = 0; i < __arraycount(h->h_master_sat); i++) if (h->h_master_sat[i] == CDF_SECID_FREE) break; #define CDF_SEC_LIMIT (UINT32_MAX / (4 * ss)) if ((nsatpersec > 0 && h->h_num_sectors_in_master_sat > CDF_SEC_LIMIT / nsatpersec) || i > CDF_SEC_LIMIT) { DPRINTF(("Number of sectors in master SAT too big %u %" SIZE_T_FORMAT "u\n", h->h_num_sectors_in_master_sat, i)); errno = EFTYPE; return -1; } sat->sat_len = h->h_num_sectors_in_master_sat * nsatpersec + i; DPRINTF(("sat_len = %" SIZE_T_FORMAT "u ss = %" SIZE_T_FORMAT "u\n", sat->sat_len, ss)); if ((sat->sat_tab = CAST(cdf_secid_t *, calloc(sat->sat_len, ss))) == NULL) return -1; for (i = 0; i < __arraycount(h->h_master_sat); i++) { if (h->h_master_sat[i] < 0) break; if (cdf_read_sector(info, sat->sat_tab, ss * i, ss, h, h->h_master_sat[i]) != (ssize_t)ss) { DPRINTF(("Reading sector %d", h->h_master_sat[i])); goto out1; } } if ((msa = CAST(cdf_secid_t *, calloc(1, ss))) == NULL) goto out1; mid = h->h_secid_first_sector_in_master_sat; for (j = 0; j < h->h_num_sectors_in_master_sat; j++) { if (mid < 0) goto out; if (j >= CDF_LOOP_LIMIT) { DPRINTF(("Reading master sector loop limit")); errno = EFTYPE; goto out2; } if (cdf_read_sector(info, msa, 0, ss, h, mid) != (ssize_t)ss) { DPRINTF(("Reading master sector %d", mid)); goto out2; } for (k = 0; k < nsatpersec; k++, i++) { sec = CDF_TOLE4((uint32_t)msa[k]); if (sec < 0) goto out; if (i >= sat->sat_len) { DPRINTF(("Out of bounds reading MSA %u >= %u", i, sat->sat_len)); errno = EFTYPE; goto out2; } if (cdf_read_sector(info, sat->sat_tab, ss * i, ss, h, sec) != (ssize_t)ss) { DPRINTF(("Reading sector %d", CDF_TOLE4(msa[k]))); goto out2; } } mid = CDF_TOLE4((uint32_t)msa[nsatpersec]); } out: sat->sat_len = i; free(msa); return 0; out2: free(msa); out1: free(sat->sat_tab); return -1; } size_t cdf_count_chain(const cdf_sat_t *sat, cdf_secid_t sid, size_t size) { size_t i, j; cdf_secid_t maxsector = (cdf_secid_t)(sat->sat_len * size); DPRINTF(("Chain:")); for (j = i = 0; sid >= 0; i++, j++) { DPRINTF((" %d", sid)); if (j >= CDF_LOOP_LIMIT) { DPRINTF(("Counting chain loop limit")); errno = EFTYPE; return (size_t)-1; } if (sid > maxsector) { DPRINTF(("Sector %d > %d\n", sid, maxsector)); errno = EFTYPE; return (size_t)-1; } sid = CDF_TOLE4((uint32_t)sat->sat_tab[sid]); } DPRINTF(("\n")); return i; } int cdf_read_long_sector_chain(const cdf_info_t *info, const cdf_header_t *h, const cdf_sat_t *sat, cdf_secid_t sid, size_t len, cdf_stream_t *scn) { size_t ss = CDF_SEC_SIZE(h), i, j; ssize_t nr; scn->sst_len = cdf_count_chain(sat, sid, ss); scn->sst_dirlen = len; if (scn->sst_len == (size_t)-1) return -1; scn->sst_tab = calloc(scn->sst_len, ss); if (scn->sst_tab == NULL) return -1; for (j = i = 0; sid >= 0; i++, j++) { if (j >= CDF_LOOP_LIMIT) { DPRINTF(("Read long sector chain loop limit")); errno = EFTYPE; goto out; } if (i >= scn->sst_len) { DPRINTF(("Out of bounds reading long sector chain " "%u > %u\n", i, scn->sst_len)); errno = EFTYPE; goto out; } if ((nr = cdf_read_sector(info, scn->sst_tab, i * ss, ss, h, sid)) != (ssize_t)ss) { if (i == scn->sst_len - 1 && nr > 0) { /* Last sector might be truncated */ return 0; } DPRINTF(("Reading long sector chain %d", sid)); goto out; } sid = CDF_TOLE4((uint32_t)sat->sat_tab[sid]); } return 0; out: free(scn->sst_tab); return -1; } int cdf_read_short_sector_chain(const cdf_header_t *h, const cdf_sat_t *ssat, const cdf_stream_t *sst, cdf_secid_t sid, size_t len, cdf_stream_t *scn) { size_t ss = CDF_SHORT_SEC_SIZE(h), i, j; scn->sst_len = cdf_count_chain(ssat, sid, CDF_SEC_SIZE(h)); scn->sst_dirlen = len; if (sst->sst_tab == NULL || scn->sst_len == (size_t)-1) return -1; scn->sst_tab = calloc(scn->sst_len, ss); if (scn->sst_tab == NULL) return -1; for (j = i = 0; sid >= 0; i++, j++) { if (j >= CDF_LOOP_LIMIT) { DPRINTF(("Read short sector chain loop limit")); errno = EFTYPE; goto out; } if (i >= scn->sst_len) { DPRINTF(("Out of bounds reading short sector chain " "%u > %u\n", i, scn->sst_len)); errno = EFTYPE; goto out; } if (cdf_read_short_sector(sst, scn->sst_tab, i * ss, ss, h, sid) != (ssize_t)ss) { DPRINTF(("Reading short sector chain %d", sid)); goto out; } sid = CDF_TOLE4((uint32_t)ssat->sat_tab[sid]); } return 0; out: free(scn->sst_tab); return -1; } int cdf_read_sector_chain(const cdf_info_t *info, const cdf_header_t *h, const cdf_sat_t *sat, const cdf_sat_t *ssat, const cdf_stream_t *sst, cdf_secid_t sid, size_t len, cdf_stream_t *scn) { if (len < h->h_min_size_standard_stream && sst->sst_tab != NULL) return cdf_read_short_sector_chain(h, ssat, sst, sid, len, scn); else return cdf_read_long_sector_chain(info, h, sat, sid, len, scn); } int cdf_read_dir(const cdf_info_t *info, const cdf_header_t *h, const cdf_sat_t *sat, cdf_dir_t *dir) { size_t i, j; size_t ss = CDF_SEC_SIZE(h), ns, nd; char *buf; cdf_secid_t sid = h->h_secid_first_directory; ns = cdf_count_chain(sat, sid, ss); if (ns == (size_t)-1) return -1; nd = ss / CDF_DIRECTORY_SIZE; dir->dir_len = ns * nd; dir->dir_tab = CAST(cdf_directory_t *, calloc(dir->dir_len, sizeof(dir->dir_tab[0]))); if (dir->dir_tab == NULL) return -1; if ((buf = CAST(char *, malloc(ss))) == NULL) { free(dir->dir_tab); return -1; } for (j = i = 0; i < ns; i++, j++) { if (j >= CDF_LOOP_LIMIT) { DPRINTF(("Read dir loop limit")); errno = EFTYPE; goto out; } if (cdf_read_sector(info, buf, 0, ss, h, sid) != (ssize_t)ss) { DPRINTF(("Reading directory sector %d", sid)); goto out; } for (j = 0; j < nd; j++) { cdf_unpack_dir(&dir->dir_tab[i * nd + j], &buf[j * CDF_DIRECTORY_SIZE]); } sid = CDF_TOLE4((uint32_t)sat->sat_tab[sid]); } if (NEED_SWAP) for (i = 0; i < dir->dir_len; i++) cdf_swap_dir(&dir->dir_tab[i]); free(buf); return 0; out: free(dir->dir_tab); free(buf); return -1; } int cdf_read_ssat(const cdf_info_t *info, const cdf_header_t *h, const cdf_sat_t *sat, cdf_sat_t *ssat) { size_t i, j; size_t ss = CDF_SEC_SIZE(h); cdf_secid_t sid = h->h_secid_first_sector_in_short_sat; ssat->sat_len = cdf_count_chain(sat, sid, CDF_SEC_SIZE(h)); if (ssat->sat_len == (size_t)-1) return -1; ssat->sat_tab = CAST(cdf_secid_t *, calloc(ssat->sat_len, ss)); if (ssat->sat_tab == NULL) return -1; for (j = i = 0; sid >= 0; i++, j++) { if (j >= CDF_LOOP_LIMIT) { DPRINTF(("Read short sat sector loop limit")); errno = EFTYPE; goto out; } if (i >= ssat->sat_len) { DPRINTF(("Out of bounds reading short sector chain " "%u > %u\n", i, ssat->sat_len)); errno = EFTYPE; goto out; } if (cdf_read_sector(info, ssat->sat_tab, i * ss, ss, h, sid) != (ssize_t)ss) { DPRINTF(("Reading short sat sector %d", sid)); goto out; } sid = CDF_TOLE4((uint32_t)sat->sat_tab[sid]); } return 0; out: free(ssat->sat_tab); return -1; } int cdf_read_short_stream(const cdf_info_t *info, const cdf_header_t *h, const cdf_sat_t *sat, const cdf_dir_t *dir, cdf_stream_t *scn) { size_t i; const cdf_directory_t *d; for (i = 0; i < dir->dir_len; i++) if (dir->dir_tab[i].d_type == CDF_DIR_TYPE_ROOT_STORAGE) break; /* If the it is not there, just fake it; some docs don't have it */ if (i == dir->dir_len) goto out; d = &dir->dir_tab[i]; /* If the it is not there, just fake it; some docs don't have it */ if (d->d_stream_first_sector < 0) goto out; return cdf_read_long_sector_chain(info, h, sat, d->d_stream_first_sector, d->d_size, scn); out: scn->sst_tab = NULL; scn->sst_len = 0; scn->sst_dirlen = 0; return 0; } static int cdf_namecmp(const char *d, const uint16_t *s, size_t l) { for (; l--; d++, s++) if (*d != CDF_TOLE2(*s)) return (unsigned char)*d - CDF_TOLE2(*s); return 0; } int cdf_read_summary_info(const cdf_info_t *info, const cdf_header_t *h, const cdf_sat_t *sat, const cdf_sat_t *ssat, const cdf_stream_t *sst, const cdf_dir_t *dir, cdf_stream_t *scn) { size_t i; const cdf_directory_t *d; static const char name[] = "\05SummaryInformation"; for (i = dir->dir_len; i > 0; i--) if (dir->dir_tab[i - 1].d_type == CDF_DIR_TYPE_USER_STREAM && cdf_namecmp(name, dir->dir_tab[i - 1].d_name, sizeof(name)) == 0) break; if (i == 0) { DPRINTF(("Cannot find summary information section\n")); errno = ESRCH; return -1; } d = &dir->dir_tab[i - 1]; return cdf_read_sector_chain(info, h, sat, ssat, sst, d->d_stream_first_sector, d->d_size, scn); } int cdf_read_property_info(const cdf_stream_t *sst, const cdf_header_t *h, uint32_t offs, cdf_property_info_t **info, size_t *count, size_t *maxcount) { const cdf_section_header_t *shp; cdf_section_header_t sh; const uint8_t *p, *q, *e; int16_t s16; int32_t s32; uint32_t u32; int64_t s64; uint64_t u64; cdf_timestamp_t tp; size_t i, o, o4, nelements, j; cdf_property_info_t *inp; if (offs > UINT32_MAX / 4) { errno = EFTYPE; goto out; } shp = CAST(const cdf_section_header_t *, (const void *) ((const char *)sst->sst_tab + offs)); if (cdf_check_stream_offset(sst, h, shp, sizeof(*shp), __LINE__) == -1) goto out; sh.sh_len = CDF_TOLE4(shp->sh_len); #define CDF_SHLEN_LIMIT (UINT32_MAX / 8) if (sh.sh_len > CDF_SHLEN_LIMIT) { errno = EFTYPE; goto out; } sh.sh_properties = CDF_TOLE4(shp->sh_properties); #define CDF_PROP_LIMIT (UINT32_MAX / (4 * sizeof(*inp))) if (sh.sh_properties > CDF_PROP_LIMIT) goto out; DPRINTF(("section len: %u properties %u\n", sh.sh_len, sh.sh_properties)); if (*maxcount) { if (*maxcount > CDF_PROP_LIMIT) goto out; *maxcount += sh.sh_properties; inp = CAST(cdf_property_info_t *, realloc(*info, *maxcount * sizeof(*inp))); } else { *maxcount = sh.sh_properties; inp = CAST(cdf_property_info_t *, malloc(*maxcount * sizeof(*inp))); } if (inp == NULL) goto out; *info = inp; inp += *count; *count += sh.sh_properties; p = CAST(const uint8_t *, (const void *) ((const char *)(const void *)sst->sst_tab + offs + sizeof(sh))); e = CAST(const uint8_t *, (const void *) (((const char *)(const void *)shp) + sh.sh_len)); if (cdf_check_stream_offset(sst, h, e, 0, __LINE__) == -1) goto out; for (i = 0; i < sh.sh_properties; i++) { q = (const uint8_t *)(const void *) ((const char *)(const void *)p + CDF_GETUINT32(p, (i << 1) + 1)) - 2 * sizeof(uint32_t); if (q > e) { DPRINTF(("Ran of the end %p > %p\n", q, e)); goto out; } inp[i].pi_id = CDF_GETUINT32(p, i << 1); inp[i].pi_type = CDF_GETUINT32(q, 0); DPRINTF(("%d) id=%x type=%x offs=%x,%d\n", i, inp[i].pi_id, inp[i].pi_type, q - p, CDF_GETUINT32(p, (i << 1) + 1))); if (inp[i].pi_type & CDF_VECTOR) { nelements = CDF_GETUINT32(q, 1); o = 2; } else { nelements = 1; o = 1; } o4 = o * sizeof(uint32_t); if (inp[i].pi_type & (CDF_ARRAY|CDF_BYREF|CDF_RESERVED)) goto unknown; switch (inp[i].pi_type & CDF_TYPEMASK) { case CDF_NULL: case CDF_EMPTY: break; case CDF_SIGNED16: if (inp[i].pi_type & CDF_VECTOR) goto unknown; (void)memcpy(&s16, &q[o4], sizeof(s16)); inp[i].pi_s16 = CDF_TOLE2(s16); break; case CDF_SIGNED32: if (inp[i].pi_type & CDF_VECTOR) goto unknown; (void)memcpy(&s32, &q[o4], sizeof(s32)); inp[i].pi_s32 = CDF_TOLE4((uint32_t)s32); break; case CDF_BOOL: case CDF_UNSIGNED32: if (inp[i].pi_type & CDF_VECTOR) goto unknown; (void)memcpy(&u32, &q[o4], sizeof(u32)); inp[i].pi_u32 = CDF_TOLE4(u32); break; case CDF_SIGNED64: if (inp[i].pi_type & CDF_VECTOR) goto unknown; (void)memcpy(&s64, &q[o4], sizeof(s64)); inp[i].pi_s64 = CDF_TOLE8((uint64_t)s64); break; case CDF_UNSIGNED64: if (inp[i].pi_type & CDF_VECTOR) goto unknown; (void)memcpy(&u64, &q[o4], sizeof(u64)); inp[i].pi_u64 = CDF_TOLE8((uint64_t)u64); break; case CDF_LENGTH32_STRING: case CDF_LENGTH32_WSTRING: if (nelements > 1) { size_t nelem = inp - *info; if (*maxcount > CDF_PROP_LIMIT || nelements > CDF_PROP_LIMIT) goto out; *maxcount += nelements; inp = CAST(cdf_property_info_t *, realloc(*info, *maxcount * sizeof(*inp))); if (inp == NULL) goto out; *info = inp; inp = *info + nelem; } DPRINTF(("nelements = %d\n", nelements)); for (j = 0; j < nelements; j++, i++) { uint32_t l = CDF_GETUINT32(q, o); inp[i].pi_str.s_len = l; inp[i].pi_str.s_buf = (const char *) (const void *)(&q[o4 + sizeof(l)]); DPRINTF(("l = %d, r = %d, s = %s\n", l, CDF_ROUND(l, sizeof(l)), inp[i].pi_str.s_buf)); l = 4 + (uint32_t)CDF_ROUND(l, sizeof(l)); o += l >> 2; if (q + o >= e) goto out; o4 = o * sizeof(uint32_t); } i--; break; case CDF_FILETIME: if (inp[i].pi_type & CDF_VECTOR) goto unknown; (void)memcpy(&tp, &q[o4], sizeof(tp)); inp[i].pi_tp = CDF_TOLE8((uint64_t)tp); break; case CDF_CLIPBOARD: if (inp[i].pi_type & CDF_VECTOR) goto unknown; break; default: unknown: DPRINTF(("Don't know how to deal with %x\n", inp[i].pi_type)); goto out; } } return 0; out: free(*info); return -1; } int cdf_unpack_summary_info(const cdf_stream_t *sst, const cdf_header_t *h, cdf_summary_info_header_t *ssi, cdf_property_info_t **info, size_t *count) { size_t i, maxcount; const cdf_summary_info_header_t *si = CAST(const cdf_summary_info_header_t *, sst->sst_tab); const cdf_section_declaration_t *sd = CAST(const cdf_section_declaration_t *, (const void *) ((const char *)sst->sst_tab + CDF_SECTION_DECLARATION_OFFSET)); if (cdf_check_stream_offset(sst, h, si, sizeof(*si), __LINE__) == -1 || cdf_check_stream_offset(sst, h, sd, sizeof(*sd), __LINE__) == -1) return -1; ssi->si_byte_order = CDF_TOLE2(si->si_byte_order); ssi->si_os_version = CDF_TOLE2(si->si_os_version); ssi->si_os = CDF_TOLE2(si->si_os); ssi->si_class = si->si_class; cdf_swap_class(&ssi->si_class); ssi->si_count = CDF_TOLE2(si->si_count); *count = 0; maxcount = 0; *info = NULL; for (i = 0; i < CDF_TOLE4(si->si_count); i++) { if (i >= CDF_LOOP_LIMIT) { DPRINTF(("Unpack summary info loop limit")); errno = EFTYPE; return -1; } if (cdf_read_property_info(sst, h, CDF_TOLE4(sd->sd_offset), info, count, &maxcount) == -1) return -1; } return 0; } int cdf_print_classid(char *buf, size_t buflen, const cdf_classid_t *id) { return snprintf(buf, buflen, "%.8x-%.4x-%.4x-%.2x%.2x-" "%.2x%.2x%.2x%.2x%.2x%.2x", id->cl_dword, id->cl_word[0], id->cl_word[1], id->cl_two[0], id->cl_two[1], id->cl_six[0], id->cl_six[1], id->cl_six[2], id->cl_six[3], id->cl_six[4], id->cl_six[5]); } static const struct { uint32_t v; const char *n; } vn[] = { { CDF_PROPERTY_CODE_PAGE, "Code page" }, { CDF_PROPERTY_TITLE, "Title" }, { CDF_PROPERTY_SUBJECT, "Subject" }, { CDF_PROPERTY_AUTHOR, "Author" }, { CDF_PROPERTY_KEYWORDS, "Keywords" }, { CDF_PROPERTY_COMMENTS, "Comments" }, { CDF_PROPERTY_TEMPLATE, "Template" }, { CDF_PROPERTY_LAST_SAVED_BY, "Last Saved By" }, { CDF_PROPERTY_REVISION_NUMBER, "Revision Number" }, { CDF_PROPERTY_TOTAL_EDITING_TIME, "Total Editing Time" }, { CDF_PROPERTY_LAST_PRINTED, "Last Printed" }, { CDF_PROPERTY_CREATE_TIME, "Create Time/Date" }, { CDF_PROPERTY_LAST_SAVED_TIME, "Last Saved Time/Date" }, { CDF_PROPERTY_NUMBER_OF_PAGES, "Number of Pages" }, { CDF_PROPERTY_NUMBER_OF_WORDS, "Number of Words" }, { CDF_PROPERTY_NUMBER_OF_CHARACTERS, "Number of Characters" }, { CDF_PROPERTY_THUMBNAIL, "Thumbnail" }, { CDF_PROPERTY_NAME_OF_APPLICATION, "Name of Creating Application" }, { CDF_PROPERTY_SECURITY, "Security" }, { CDF_PROPERTY_LOCALE_ID, "Locale ID" }, }; int cdf_print_property_name(char *buf, size_t bufsiz, uint32_t p) { size_t i; for (i = 0; i < __arraycount(vn); i++) if (vn[i].v == p) return snprintf(buf, bufsiz, "%s", vn[i].n); return snprintf(buf, bufsiz, "0x%x", p); } int cdf_print_elapsed_time(char *buf, size_t bufsiz, cdf_timestamp_t ts) { int len = 0; int days, hours, mins, secs; ts /= CDF_TIME_PREC; secs = (int)(ts % 60); ts /= 60; mins = (int)(ts % 60); ts /= 60; hours = (int)(ts % 24); ts /= 24; days = (int)ts; if (days) { len += snprintf(buf + len, bufsiz - len, "%dd+", days); if ((size_t)len >= bufsiz) return len; } if (days || hours) { len += snprintf(buf + len, bufsiz - len, "%.2d:", hours); if ((size_t)len >= bufsiz) return len; } len += snprintf(buf + len, bufsiz - len, "%.2d:", mins); if ((size_t)len >= bufsiz) return len; len += snprintf(buf + len, bufsiz - len, "%.2d", secs); return len; } #ifdef CDF_DEBUG void cdf_dump_header(const cdf_header_t *h) { size_t i; #define DUMP(a, b) (void)fprintf(stderr, "%40.40s = " a "\n", # b, h->h_ ## b) #define DUMP2(a, b) (void)fprintf(stderr, "%40.40s = " a " (" a ")\n", # b, \ h->h_ ## b, 1 << h->h_ ## b) DUMP("%d", revision); DUMP("%d", version); DUMP("0x%x", byte_order); DUMP2("%d", sec_size_p2); DUMP2("%d", short_sec_size_p2); DUMP("%d", num_sectors_in_sat); DUMP("%d", secid_first_directory); DUMP("%d", min_size_standard_stream); DUMP("%d", secid_first_sector_in_short_sat); DUMP("%d", num_sectors_in_short_sat); DUMP("%d", secid_first_sector_in_master_sat); DUMP("%d", num_sectors_in_master_sat); for (i = 0; i < __arraycount(h->h_master_sat); i++) { if (h->h_master_sat[i] == CDF_SECID_FREE) break; (void)fprintf(stderr, "%35.35s[%.3zu] = %d\n", "master_sat", i, h->h_master_sat[i]); } } void cdf_dump_sat(const char *prefix, const cdf_sat_t *sat, size_t size) { size_t i, j, s = size / sizeof(cdf_secid_t); for (i = 0; i < sat->sat_len; i++) { (void)fprintf(stderr, "%s[%" SIZE_T_FORMAT "u]:\n%.6d: ", prefix, i, i * s); for (j = 0; j < s; j++) { (void)fprintf(stderr, "%5d, ", CDF_TOLE4(sat->sat_tab[s * i + j])); if ((j + 1) % 10 == 0) (void)fprintf(stderr, "\n%.6d: ", i * s + j + 1); } (void)fprintf(stderr, "\n"); } } void cdf_dump(void *v, size_t len) { size_t i, j; unsigned char *p = v; char abuf[16]; (void)fprintf(stderr, "%.4x: ", 0); for (i = 0, j = 0; i < len; i++, p++) { (void)fprintf(stderr, "%.2x ", *p); abuf[j++] = isprint(*p) ? *p : '.'; if (j == 16) { j = 0; abuf[15] = '\0'; (void)fprintf(stderr, "%s\n%.4x: ", abuf, i + 1); } } (void)fprintf(stderr, "\n"); } void cdf_dump_stream(const cdf_header_t *h, const cdf_stream_t *sst) { size_t ss = sst->sst_dirlen < h->h_min_size_standard_stream ? CDF_SHORT_SEC_SIZE(h) : CDF_SEC_SIZE(h); cdf_dump(sst->sst_tab, ss * sst->sst_len); } void cdf_dump_dir(const cdf_info_t *info, const cdf_header_t *h, const cdf_sat_t *sat, const cdf_sat_t *ssat, const cdf_stream_t *sst, const cdf_dir_t *dir) { size_t i, j; cdf_directory_t *d; char name[__arraycount(d->d_name)]; cdf_stream_t scn; struct timespec ts; static const char *types[] = { "empty", "user storage", "user stream", "lockbytes", "property", "root storage" }; for (i = 0; i < dir->dir_len; i++) { d = &dir->dir_tab[i]; for (j = 0; j < sizeof(name); j++) name[j] = (char)CDF_TOLE2(d->d_name[j]); (void)fprintf(stderr, "Directory %" SIZE_T_FORMAT "u: %s\n", i, name); if (d->d_type < __arraycount(types)) (void)fprintf(stderr, "Type: %s\n", types[d->d_type]); else (void)fprintf(stderr, "Type: %d\n", d->d_type); (void)fprintf(stderr, "Color: %s\n", d->d_color ? "black" : "red"); (void)fprintf(stderr, "Left child: %d\n", d->d_left_child); (void)fprintf(stderr, "Right child: %d\n", d->d_right_child); (void)fprintf(stderr, "Flags: 0x%x\n", d->d_flags); cdf_timestamp_to_timespec(&ts, d->d_created); (void)fprintf(stderr, "Created %s", cdf_ctime(&ts.tv_sec)); cdf_timestamp_to_timespec(&ts, d->d_modified); (void)fprintf(stderr, "Modified %s", cdf_ctime(&ts.tv_sec)); (void)fprintf(stderr, "Stream %d\n", d->d_stream_first_sector); (void)fprintf(stderr, "Size %d\n", d->d_size); switch (d->d_type) { case CDF_DIR_TYPE_USER_STORAGE: (void)fprintf(stderr, "Storage: %d\n", d->d_storage); break; case CDF_DIR_TYPE_USER_STREAM: if (sst == NULL) break; if (cdf_read_sector_chain(info, h, sat, ssat, sst, d->d_stream_first_sector, d->d_size, &scn) == -1) { warn("Can't read stream for %s at %d len %d", name, d->d_stream_first_sector, d->d_size); break; } cdf_dump_stream(h, &scn); free(scn.sst_tab); break; default: break; } } } void cdf_dump_property_info(const cdf_property_info_t *info, size_t count) { cdf_timestamp_t tp; struct timespec ts; char buf[64]; size_t i, j; for (i = 0; i < count; i++) { cdf_print_property_name(buf, sizeof(buf), info[i].pi_id); (void)fprintf(stderr, "%" SIZE_T_FORMAT "u) %s: ", i, buf); switch (info[i].pi_type) { case CDF_NULL: break; case CDF_SIGNED16: (void)fprintf(stderr, "signed 16 [%hd]\n", info[i].pi_s16); break; case CDF_SIGNED32: (void)fprintf(stderr, "signed 32 [%d]\n", info[i].pi_s32); break; case CDF_UNSIGNED32: (void)fprintf(stderr, "unsigned 32 [%u]\n", info[i].pi_u32); break; case CDF_LENGTH32_STRING: (void)fprintf(stderr, "string %u [%.*s]\n", info[i].pi_str.s_len, info[i].pi_str.s_len, info[i].pi_str.s_buf); break; case CDF_LENGTH32_WSTRING: (void)fprintf(stderr, "string %u [", info[i].pi_str.s_len); for (j = 0; j < info[i].pi_str.s_len - 1; j++) (void)fputc(info[i].pi_str.s_buf[j << 1], stderr); (void)fprintf(stderr, "]\n"); break; case CDF_FILETIME: tp = info[i].pi_tp; if (tp < 1000000000000000LL) { cdf_print_elapsed_time(buf, sizeof(buf), tp); (void)fprintf(stderr, "timestamp %s\n", buf); } else { cdf_timestamp_to_timespec(&ts, tp); (void)fprintf(stderr, "timestamp %s", cdf_ctime(&ts.tv_sec)); } break; case CDF_CLIPBOARD: (void)fprintf(stderr, "CLIPBOARD %u\n", info[i].pi_u32); break; default: DPRINTF(("Don't know how to deal with %x\n", info[i].pi_type)); break; } } } void cdf_dump_summary_info(const cdf_header_t *h, const cdf_stream_t *sst) { char buf[128]; cdf_summary_info_header_t ssi; cdf_property_info_t *info; size_t count; (void)&h; if (cdf_unpack_summary_info(sst, h, &ssi, &info, &count) == -1) return; (void)fprintf(stderr, "Endian: %x\n", ssi.si_byte_order); (void)fprintf(stderr, "Os Version %d.%d\n", ssi.si_os_version & 0xff, ssi.si_os_version >> 8); (void)fprintf(stderr, "Os %d\n", ssi.si_os); cdf_print_classid(buf, sizeof(buf), &ssi.si_class); (void)fprintf(stderr, "Class %s\n", buf); (void)fprintf(stderr, "Count %d\n", ssi.si_count); cdf_dump_property_info(info, count); free(info); } #endif #ifdef TEST int main(int argc, char *argv[]) { int i; cdf_header_t h; cdf_sat_t sat, ssat; cdf_stream_t sst, scn; cdf_dir_t dir; cdf_info_t info; if (argc < 2) { (void)fprintf(stderr, "Usage: %s <filename>\n", getprogname()); return -1; } info.i_buf = NULL; info.i_len = 0; for (i = 1; i < argc; i++) { if ((info.i_fd = open(argv[1], O_RDONLY)) == -1) err(1, "Cannot open `%s'", argv[1]); if (cdf_read_header(&info, &h) == -1) err(1, "Cannot read header"); #ifdef CDF_DEBUG cdf_dump_header(&h); #endif if (cdf_read_sat(&info, &h, &sat) == -1) err(1, "Cannot read sat"); #ifdef CDF_DEBUG cdf_dump_sat("SAT", &sat, CDF_SEC_SIZE(&h)); #endif if (cdf_read_ssat(&info, &h, &sat, &ssat) == -1) err(1, "Cannot read ssat"); #ifdef CDF_DEBUG cdf_dump_sat("SSAT", &ssat, CDF_SHORT_SEC_SIZE(&h)); #endif if (cdf_read_dir(&info, &h, &sat, &dir) == -1) err(1, "Cannot read dir"); if (cdf_read_short_stream(&info, &h, &sat, &dir, &sst) == -1) err(1, "Cannot read short stream"); #ifdef CDF_DEBUG cdf_dump_stream(&h, &sst); #endif #ifdef CDF_DEBUG cdf_dump_dir(&info, &h, &sat, &ssat, &sst, &dir); #endif if (cdf_read_summary_info(&info, &h, &sat, &ssat, &sst, &dir, &scn) == -1) err(1, "Cannot read summary info"); #ifdef CDF_DEBUG cdf_dump_summary_info(&h, &scn); #endif (void)close(info.i_fd); } return 0; } #endif
cdf_read_short_sector(const cdf_stream_t *sst, void *buf, size_t offs, size_t len, const cdf_header_t *h, cdf_secid_t id) { assert((size_t)CDF_SHORT_SEC_SIZE(h) == len); (void)memcpy(((char *)buf) + offs, ((const char *)sst->sst_tab) + CDF_SHORT_SEC_POS(h, id), len); return len; }
cdf_read_short_sector(const cdf_stream_t *sst, void *buf, size_t offs, size_t len, const cdf_header_t *h, cdf_secid_t id) { size_t ss = CDF_SHORT_SEC_SIZE(h); size_t pos = CDF_SHORT_SEC_POS(h, id); assert(ss == len); if (sst->sst_len < (size_t)id) { DPRINTF(("bad sector id %d > %d\n", id, sst->sst_len)); return -1; } (void)memcpy(((char *)buf) + offs, ((const char *)sst->sst_tab) + pos, len); return len; }
{'added': [(38, 'FILE_RCSID("@(#)$File: cdf.c,v 1.46 2011/09/16 21:23:59 christos Exp $")'), (344, '\tsize_t ss = CDF_SEC_SIZE(h);'), (345, '\tsize_t pos = CDF_SEC_POS(h, id);'), (346, '\tassert(ss == len);'), (347, '\treturn cdf_read(info, (off_t)pos, ((char *)buf) + offs, len);'), (354, '\tsize_t ss = CDF_SHORT_SEC_SIZE(h);'), (355, '\tsize_t pos = CDF_SHORT_SEC_POS(h, id);'), (356, '\tassert(ss == len);'), (357, '\tif (sst->sst_len < (size_t)id) {'), (358, '\t\tDPRINTF(("bad sector id %d > %d\\n", id, sst->sst_len));'), (359, '\t\treturn -1;'), (360, '\t}'), (362, '\t ((const char *)sst->sst_tab) + pos, len);'), (878, '\t\t\t\tif (q + o >= e)'), (879, '\t\t\t\t\tgoto out;')], 'deleted': [(38, 'FILE_RCSID("@(#)$File: cdf.c,v 1.45 2011/08/28 08:38:48 christos Exp $")'), (344, '\tassert((size_t)CDF_SEC_SIZE(h) == len);'), (345, '\treturn cdf_read(info, (off_t)CDF_SEC_POS(h, id),'), (346, '\t ((char *)buf) + offs, len);'), (353, '\tassert((size_t)CDF_SHORT_SEC_SIZE(h) == len);'), (355, '\t ((const char *)sst->sst_tab) + CDF_SHORT_SEC_POS(h, id), len);')]}
15
6
1,092
8,038
https://github.com/glensc/file
CVE-2012-1571
['CWE-119']
hns_dsaf_xgmac.c
hns_xgmac_get_sset_count
/* * Copyright (c) 2014-2015 Hisilicon Limited. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/io-64-nonatomic-hi-lo.h> #include <linux/of_mdio.h> #include "hns_dsaf_main.h" #include "hns_dsaf_mac.h" #include "hns_dsaf_xgmac.h" #include "hns_dsaf_reg.h" static const struct mac_stats_string g_xgmac_stats_string[] = { {"xgmac_tx_bad_pkts_minto64", MAC_STATS_FIELD_OFF(tx_fragment_err)}, {"xgmac_tx_good_pkts_minto64", MAC_STATS_FIELD_OFF(tx_undersize)}, {"xgmac_tx_total_pkts_minto64", MAC_STATS_FIELD_OFF(tx_under_min_pkts)}, {"xgmac_tx_pkts_64", MAC_STATS_FIELD_OFF(tx_64bytes)}, {"xgmac_tx_pkts_65to127", MAC_STATS_FIELD_OFF(tx_65to127)}, {"xgmac_tx_pkts_128to255", MAC_STATS_FIELD_OFF(tx_128to255)}, {"xgmac_tx_pkts_256to511", MAC_STATS_FIELD_OFF(tx_256to511)}, {"xgmac_tx_pkts_512to1023", MAC_STATS_FIELD_OFF(tx_512to1023)}, {"xgmac_tx_pkts_1024to1518", MAC_STATS_FIELD_OFF(tx_1024to1518)}, {"xgmac_tx_pkts_1519tomax", MAC_STATS_FIELD_OFF(tx_1519tomax)}, {"xgmac_tx_good_pkts_1519tomax", MAC_STATS_FIELD_OFF(tx_1519tomax_good)}, {"xgmac_tx_good_pkts_untralmax", MAC_STATS_FIELD_OFF(tx_oversize)}, {"xgmac_tx_bad_pkts_untralmax", MAC_STATS_FIELD_OFF(tx_jabber_err)}, {"xgmac_tx_good_pkts_all", MAC_STATS_FIELD_OFF(tx_good_pkts)}, {"xgmac_tx_good_byte_all", MAC_STATS_FIELD_OFF(tx_good_bytes)}, {"xgmac_tx_total_pkt", MAC_STATS_FIELD_OFF(tx_total_pkts)}, {"xgmac_tx_total_byt", MAC_STATS_FIELD_OFF(tx_total_bytes)}, {"xgmac_tx_uc_pkt", MAC_STATS_FIELD_OFF(tx_uc_pkts)}, {"xgmac_tx_mc_pkt", MAC_STATS_FIELD_OFF(tx_mc_pkts)}, {"xgmac_tx_bc_pkt", MAC_STATS_FIELD_OFF(tx_bc_pkts)}, {"xgmac_tx_pause_frame_num", MAC_STATS_FIELD_OFF(tx_pfc_tc0)}, {"xgmac_tx_pfc_per_1pause_framer", MAC_STATS_FIELD_OFF(tx_pfc_tc1)}, {"xgmac_tx_pfc_per_2pause_framer", MAC_STATS_FIELD_OFF(tx_pfc_tc2)}, {"xgmac_tx_pfc_per_3pause_framer", MAC_STATS_FIELD_OFF(tx_pfc_tc3)}, {"xgmac_tx_pfc_per_4pause_framer", MAC_STATS_FIELD_OFF(tx_pfc_tc4)}, {"xgmac_tx_pfc_per_5pause_framer", MAC_STATS_FIELD_OFF(tx_pfc_tc5)}, {"xgmac_tx_pfc_per_6pause_framer", MAC_STATS_FIELD_OFF(tx_pfc_tc6)}, {"xgmac_tx_pfc_per_7pause_framer", MAC_STATS_FIELD_OFF(tx_pfc_tc7)}, {"xgmac_tx_mac_ctrol_frame", MAC_STATS_FIELD_OFF(tx_ctrl)}, {"xgmac_tx_1731_pkts", MAC_STATS_FIELD_OFF(tx_1731_pkts)}, {"xgmac_tx_1588_pkts", MAC_STATS_FIELD_OFF(tx_1588_pkts)}, {"xgmac_rx_good_pkt_from_dsaf", MAC_STATS_FIELD_OFF(rx_good_from_sw)}, {"xgmac_rx_bad_pkt_from_dsaf", MAC_STATS_FIELD_OFF(rx_bad_from_sw)}, {"xgmac_tx_bad_pkt_64tomax", MAC_STATS_FIELD_OFF(tx_bad_pkts)}, {"xgmac_rx_bad_pkts_minto64", MAC_STATS_FIELD_OFF(rx_fragment_err)}, {"xgmac_rx_good_pkts_minto64", MAC_STATS_FIELD_OFF(rx_undersize)}, {"xgmac_rx_total_pkts_minto64", MAC_STATS_FIELD_OFF(rx_under_min)}, {"xgmac_rx_pkt_64", MAC_STATS_FIELD_OFF(rx_64bytes)}, {"xgmac_rx_pkt_65to127", MAC_STATS_FIELD_OFF(rx_65to127)}, {"xgmac_rx_pkt_128to255", MAC_STATS_FIELD_OFF(rx_128to255)}, {"xgmac_rx_pkt_256to511", MAC_STATS_FIELD_OFF(rx_256to511)}, {"xgmac_rx_pkt_512to1023", MAC_STATS_FIELD_OFF(rx_512to1023)}, {"xgmac_rx_pkt_1024to1518", MAC_STATS_FIELD_OFF(rx_1024to1518)}, {"xgmac_rx_pkt_1519tomax", MAC_STATS_FIELD_OFF(rx_1519tomax)}, {"xgmac_rx_good_pkt_1519tomax", MAC_STATS_FIELD_OFF(rx_1519tomax_good)}, {"xgmac_rx_good_pkt_untramax", MAC_STATS_FIELD_OFF(rx_oversize)}, {"xgmac_rx_bad_pkt_untramax", MAC_STATS_FIELD_OFF(rx_jabber_err)}, {"xgmac_rx_good_pkt", MAC_STATS_FIELD_OFF(rx_good_pkts)}, {"xgmac_rx_good_byt", MAC_STATS_FIELD_OFF(rx_good_bytes)}, {"xgmac_rx_pkt", MAC_STATS_FIELD_OFF(rx_total_pkts)}, {"xgmac_rx_byt", MAC_STATS_FIELD_OFF(rx_total_bytes)}, {"xgmac_rx_uc_pkt", MAC_STATS_FIELD_OFF(rx_uc_pkts)}, {"xgmac_rx_mc_pkt", MAC_STATS_FIELD_OFF(rx_mc_pkts)}, {"xgmac_rx_bc_pkt", MAC_STATS_FIELD_OFF(rx_bc_pkts)}, {"xgmac_rx_pause_frame_num", MAC_STATS_FIELD_OFF(rx_pfc_tc0)}, {"xgmac_rx_pfc_per_1pause_frame", MAC_STATS_FIELD_OFF(rx_pfc_tc1)}, {"xgmac_rx_pfc_per_2pause_frame", MAC_STATS_FIELD_OFF(rx_pfc_tc2)}, {"xgmac_rx_pfc_per_3pause_frame", MAC_STATS_FIELD_OFF(rx_pfc_tc3)}, {"xgmac_rx_pfc_per_4pause_frame", MAC_STATS_FIELD_OFF(rx_pfc_tc4)}, {"xgmac_rx_pfc_per_5pause_frame", MAC_STATS_FIELD_OFF(rx_pfc_tc5)}, {"xgmac_rx_pfc_per_6pause_frame", MAC_STATS_FIELD_OFF(rx_pfc_tc6)}, {"xgmac_rx_pfc_per_7pause_frame", MAC_STATS_FIELD_OFF(rx_pfc_tc7)}, {"xgmac_rx_mac_control", MAC_STATS_FIELD_OFF(rx_unknown_ctrl)}, {"xgmac_tx_good_pkt_todsaf", MAC_STATS_FIELD_OFF(tx_good_to_sw)}, {"xgmac_tx_bad_pkt_todsaf", MAC_STATS_FIELD_OFF(tx_bad_to_sw)}, {"xgmac_rx_1731_pkt", MAC_STATS_FIELD_OFF(rx_1731_pkts)}, {"xgmac_rx_symbol_err_pkt", MAC_STATS_FIELD_OFF(rx_symbol_err)}, {"xgmac_rx_fcs_pkt", MAC_STATS_FIELD_OFF(rx_fcs_err)} }; /** *hns_xgmac_tx_enable - xgmac port tx enable *@drv: mac driver *@value: value of enable */ static void hns_xgmac_tx_enable(struct mac_driver *drv, u32 value) { dsaf_set_dev_bit(drv, XGMAC_MAC_ENABLE_REG, XGMAC_ENABLE_TX_B, !!value); } /** *hns_xgmac_rx_enable - xgmac port rx enable *@drv: mac driver *@value: value of enable */ static void hns_xgmac_rx_enable(struct mac_driver *drv, u32 value) { dsaf_set_dev_bit(drv, XGMAC_MAC_ENABLE_REG, XGMAC_ENABLE_RX_B, !!value); } /** * hns_xgmac_tx_lf_rf_insert - insert lf rf control about xgmac * @mac_drv: mac driver * @mode: inserf rf or lf */ static void hns_xgmac_lf_rf_insert(struct mac_driver *mac_drv, u32 mode) { dsaf_set_dev_field(mac_drv, XGMAC_MAC_TX_LF_RF_CONTROL_REG, XGMAC_LF_RF_INSERT_M, XGMAC_LF_RF_INSERT_S, mode); } /** * hns_xgmac__lf_rf_control_init - initial the lf rf control register * @mac_drv: mac driver */ static void hns_xgmac_lf_rf_control_init(struct mac_driver *mac_drv) { u32 val = 0; dsaf_set_bit(val, XGMAC_UNIDIR_EN_B, 0); dsaf_set_bit(val, XGMAC_RF_TX_EN_B, 1); dsaf_set_field(val, XGMAC_LF_RF_INSERT_M, XGMAC_LF_RF_INSERT_S, 0); dsaf_write_reg(mac_drv, XGMAC_MAC_TX_LF_RF_CONTROL_REG, val); } /** *hns_xgmac_enable - enable xgmac port *@drv: mac driver *@mode: mode of mac port */ static void hns_xgmac_enable(void *mac_drv, enum mac_commom_mode mode) { struct mac_driver *drv = (struct mac_driver *)mac_drv; hns_xgmac_lf_rf_insert(drv, HNS_XGMAC_NO_LF_RF_INSERT); /*enable XGE rX/tX */ if (mode == MAC_COMM_MODE_TX) { hns_xgmac_tx_enable(drv, 1); } else if (mode == MAC_COMM_MODE_RX) { hns_xgmac_rx_enable(drv, 1); } else if (mode == MAC_COMM_MODE_RX_AND_TX) { hns_xgmac_tx_enable(drv, 1); hns_xgmac_rx_enable(drv, 1); } else { dev_err(drv->dev, "error mac mode:%d\n", mode); } } /** *hns_xgmac_disable - disable xgmac port *@mac_drv: mac driver *@mode: mode of mac port */ static void hns_xgmac_disable(void *mac_drv, enum mac_commom_mode mode) { struct mac_driver *drv = (struct mac_driver *)mac_drv; if (mode == MAC_COMM_MODE_TX) { hns_xgmac_tx_enable(drv, 0); } else if (mode == MAC_COMM_MODE_RX) { hns_xgmac_rx_enable(drv, 0); } else if (mode == MAC_COMM_MODE_RX_AND_TX) { hns_xgmac_tx_enable(drv, 0); hns_xgmac_rx_enable(drv, 0); } hns_xgmac_lf_rf_insert(drv, HNS_XGMAC_LF_INSERT); } /** *hns_xgmac_pma_fec_enable - xgmac PMA FEC enable *@drv: mac driver *@tx_value: tx value *@rx_value: rx value *return status */ static void hns_xgmac_pma_fec_enable(struct mac_driver *drv, u32 tx_value, u32 rx_value) { u32 origin = dsaf_read_dev(drv, XGMAC_PMA_FEC_CONTROL_REG); dsaf_set_bit(origin, XGMAC_PMA_FEC_CTL_TX_B, !!tx_value); dsaf_set_bit(origin, XGMAC_PMA_FEC_CTL_RX_B, !!rx_value); dsaf_write_dev(drv, XGMAC_PMA_FEC_CONTROL_REG, origin); } /* clr exc irq for xge*/ static void hns_xgmac_exc_irq_en(struct mac_driver *drv, u32 en) { u32 clr_vlue = 0xfffffffful; u32 msk_vlue = en ? 0xfffffffful : 0; /*1 is en, 0 is dis*/ dsaf_write_dev(drv, XGMAC_INT_STATUS_REG, clr_vlue); dsaf_write_dev(drv, XGMAC_INT_ENABLE_REG, msk_vlue); } /** *hns_xgmac_init - initialize XGE *@mac_drv: mac driver */ static void hns_xgmac_init(void *mac_drv) { struct mac_driver *drv = (struct mac_driver *)mac_drv; struct dsaf_device *dsaf_dev = (struct dsaf_device *)dev_get_drvdata(drv->dev); u32 port = drv->mac_id; dsaf_dev->misc_op->xge_srst(dsaf_dev, port, 0); mdelay(100); dsaf_dev->misc_op->xge_srst(dsaf_dev, port, 1); mdelay(100); hns_xgmac_lf_rf_control_init(drv); hns_xgmac_exc_irq_en(drv, 0); hns_xgmac_pma_fec_enable(drv, 0x0, 0x0); hns_xgmac_disable(mac_drv, MAC_COMM_MODE_RX_AND_TX); } /** *hns_xgmac_config_pad_and_crc - set xgmac pad and crc enable the same time *@mac_drv: mac driver *@newval:enable of pad and crc */ static void hns_xgmac_config_pad_and_crc(void *mac_drv, u8 newval) { struct mac_driver *drv = (struct mac_driver *)mac_drv; u32 origin = dsaf_read_dev(drv, XGMAC_MAC_CONTROL_REG); dsaf_set_bit(origin, XGMAC_CTL_TX_PAD_B, !!newval); dsaf_set_bit(origin, XGMAC_CTL_TX_FCS_B, !!newval); dsaf_set_bit(origin, XGMAC_CTL_RX_FCS_B, !!newval); dsaf_write_dev(drv, XGMAC_MAC_CONTROL_REG, origin); } /** *hns_xgmac_pausefrm_cfg - set pause param about xgmac *@mac_drv: mac driver *@newval:enable of pad and crc */ static void hns_xgmac_pausefrm_cfg(void *mac_drv, u32 rx_en, u32 tx_en) { struct mac_driver *drv = (struct mac_driver *)mac_drv; u32 origin = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_CTRL_REG); dsaf_set_bit(origin, XGMAC_PAUSE_CTL_TX_B, !!tx_en); dsaf_set_bit(origin, XGMAC_PAUSE_CTL_RX_B, !!rx_en); dsaf_write_dev(drv, XGMAC_MAC_PAUSE_CTRL_REG, origin); } static void hns_xgmac_set_pausefrm_mac_addr(void *mac_drv, char *mac_addr) { struct mac_driver *drv = (struct mac_driver *)mac_drv; u32 high_val = mac_addr[1] | (mac_addr[0] << 8); u32 low_val = mac_addr[5] | (mac_addr[4] << 8) | (mac_addr[3] << 16) | (mac_addr[2] << 24); dsaf_write_dev(drv, XGMAC_MAC_PAUSE_LOCAL_MAC_L_REG, low_val); dsaf_write_dev(drv, XGMAC_MAC_PAUSE_LOCAL_MAC_H_REG, high_val); } /** *hns_xgmac_set_rx_ignore_pause_frames - set rx pause param about xgmac *@mac_drv: mac driver *@enable:enable rx pause param */ static void hns_xgmac_set_rx_ignore_pause_frames(void *mac_drv, u32 enable) { struct mac_driver *drv = (struct mac_driver *)mac_drv; dsaf_set_dev_bit(drv, XGMAC_MAC_PAUSE_CTRL_REG, XGMAC_PAUSE_CTL_RX_B, !!enable); } /** *hns_xgmac_set_tx_auto_pause_frames - set tx pause param about xgmac *@mac_drv: mac driver *@enable:enable tx pause param */ static void hns_xgmac_set_tx_auto_pause_frames(void *mac_drv, u16 enable) { struct mac_driver *drv = (struct mac_driver *)mac_drv; dsaf_set_dev_bit(drv, XGMAC_MAC_PAUSE_CTRL_REG, XGMAC_PAUSE_CTL_TX_B, !!enable); /*if enable is not zero ,set tx pause time */ if (enable) dsaf_write_dev(drv, XGMAC_MAC_PAUSE_TIME_REG, enable); } /** *hns_xgmac_config_max_frame_length - set xgmac max frame length *@mac_drv: mac driver *@newval:xgmac max frame length */ static void hns_xgmac_config_max_frame_length(void *mac_drv, u16 newval) { struct mac_driver *drv = (struct mac_driver *)mac_drv; dsaf_write_dev(drv, XGMAC_MAC_MAX_PKT_SIZE_REG, newval); } void hns_xgmac_update_stats(void *mac_drv) { struct mac_driver *drv = (struct mac_driver *)mac_drv; struct mac_hw_stats *hw_stats = &drv->mac_cb->hw_stats; /* TX */ hw_stats->tx_fragment_err = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_FRAGMENT); hw_stats->tx_undersize = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_UNDERSIZE); hw_stats->tx_under_min_pkts = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_UNDERMIN); hw_stats->tx_64bytes = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_64OCTETS); hw_stats->tx_65to127 = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_65TO127OCTETS); hw_stats->tx_128to255 = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_128TO255OCTETS); hw_stats->tx_256to511 = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_256TO511OCTETS); hw_stats->tx_512to1023 = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_512TO1023OCTETS); hw_stats->tx_1024to1518 = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_1024TO1518OCTETS); hw_stats->tx_1519tomax = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_1519TOMAXOCTETS); hw_stats->tx_1519tomax_good = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_1519TOMAXOCTETSOK); hw_stats->tx_oversize = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_OVERSIZE); hw_stats->tx_jabber_err = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_JABBER); hw_stats->tx_good_pkts = hns_mac_reg_read64(drv, XGMAC_TX_GOODPKTS); hw_stats->tx_good_bytes = hns_mac_reg_read64(drv, XGMAC_TX_GOODOCTETS); hw_stats->tx_total_pkts = hns_mac_reg_read64(drv, XGMAC_TX_TOTAL_PKTS); hw_stats->tx_total_bytes = hns_mac_reg_read64(drv, XGMAC_TX_TOTALOCTETS); hw_stats->tx_uc_pkts = hns_mac_reg_read64(drv, XGMAC_TX_UNICASTPKTS); hw_stats->tx_mc_pkts = hns_mac_reg_read64(drv, XGMAC_TX_MULTICASTPKTS); hw_stats->tx_bc_pkts = hns_mac_reg_read64(drv, XGMAC_TX_BROADCASTPKTS); hw_stats->tx_pfc_tc0 = hns_mac_reg_read64(drv, XGMAC_TX_PRI0PAUSEPKTS); hw_stats->tx_pfc_tc1 = hns_mac_reg_read64(drv, XGMAC_TX_PRI1PAUSEPKTS); hw_stats->tx_pfc_tc2 = hns_mac_reg_read64(drv, XGMAC_TX_PRI2PAUSEPKTS); hw_stats->tx_pfc_tc3 = hns_mac_reg_read64(drv, XGMAC_TX_PRI3PAUSEPKTS); hw_stats->tx_pfc_tc4 = hns_mac_reg_read64(drv, XGMAC_TX_PRI4PAUSEPKTS); hw_stats->tx_pfc_tc5 = hns_mac_reg_read64(drv, XGMAC_TX_PRI5PAUSEPKTS); hw_stats->tx_pfc_tc6 = hns_mac_reg_read64(drv, XGMAC_TX_PRI6PAUSEPKTS); hw_stats->tx_pfc_tc7 = hns_mac_reg_read64(drv, XGMAC_TX_PRI7PAUSEPKTS); hw_stats->tx_ctrl = hns_mac_reg_read64(drv, XGMAC_TX_MACCTRLPKTS); hw_stats->tx_1731_pkts = hns_mac_reg_read64(drv, XGMAC_TX_1731PKTS); hw_stats->tx_1588_pkts = hns_mac_reg_read64(drv, XGMAC_TX_1588PKTS); hw_stats->rx_good_from_sw = hns_mac_reg_read64(drv, XGMAC_RX_FROMAPPGOODPKTS); hw_stats->rx_bad_from_sw = hns_mac_reg_read64(drv, XGMAC_RX_FROMAPPBADPKTS); hw_stats->tx_bad_pkts = hns_mac_reg_read64(drv, XGMAC_TX_ERRALLPKTS); /* RX */ hw_stats->rx_fragment_err = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_FRAGMENT); hw_stats->rx_undersize = hns_mac_reg_read64(drv, XGMAC_RX_PKTSUNDERSIZE); hw_stats->rx_under_min = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_UNDERMIN); hw_stats->rx_64bytes = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_64OCTETS); hw_stats->rx_65to127 = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_65TO127OCTETS); hw_stats->rx_128to255 = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_128TO255OCTETS); hw_stats->rx_256to511 = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_256TO511OCTETS); hw_stats->rx_512to1023 = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_512TO1023OCTETS); hw_stats->rx_1024to1518 = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_1024TO1518OCTETS); hw_stats->rx_1519tomax = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_1519TOMAXOCTETS); hw_stats->rx_1519tomax_good = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_1519TOMAXOCTETSOK); hw_stats->rx_oversize = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_OVERSIZE); hw_stats->rx_jabber_err = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_JABBER); hw_stats->rx_good_pkts = hns_mac_reg_read64(drv, XGMAC_RX_GOODPKTS); hw_stats->rx_good_bytes = hns_mac_reg_read64(drv, XGMAC_RX_GOODOCTETS); hw_stats->rx_total_pkts = hns_mac_reg_read64(drv, XGMAC_RX_TOTAL_PKTS); hw_stats->rx_total_bytes = hns_mac_reg_read64(drv, XGMAC_RX_TOTALOCTETS); hw_stats->rx_uc_pkts = hns_mac_reg_read64(drv, XGMAC_RX_UNICASTPKTS); hw_stats->rx_mc_pkts = hns_mac_reg_read64(drv, XGMAC_RX_MULTICASTPKTS); hw_stats->rx_bc_pkts = hns_mac_reg_read64(drv, XGMAC_RX_BROADCASTPKTS); hw_stats->rx_pfc_tc0 = hns_mac_reg_read64(drv, XGMAC_RX_PRI0PAUSEPKTS); hw_stats->rx_pfc_tc1 = hns_mac_reg_read64(drv, XGMAC_RX_PRI1PAUSEPKTS); hw_stats->rx_pfc_tc2 = hns_mac_reg_read64(drv, XGMAC_RX_PRI2PAUSEPKTS); hw_stats->rx_pfc_tc3 = hns_mac_reg_read64(drv, XGMAC_RX_PRI3PAUSEPKTS); hw_stats->rx_pfc_tc4 = hns_mac_reg_read64(drv, XGMAC_RX_PRI4PAUSEPKTS); hw_stats->rx_pfc_tc5 = hns_mac_reg_read64(drv, XGMAC_RX_PRI5PAUSEPKTS); hw_stats->rx_pfc_tc6 = hns_mac_reg_read64(drv, XGMAC_RX_PRI6PAUSEPKTS); hw_stats->rx_pfc_tc7 = hns_mac_reg_read64(drv, XGMAC_RX_PRI7PAUSEPKTS); hw_stats->rx_unknown_ctrl = hns_mac_reg_read64(drv, XGMAC_RX_MACCTRLPKTS); hw_stats->tx_good_to_sw = hns_mac_reg_read64(drv, XGMAC_TX_SENDAPPGOODPKTS); hw_stats->tx_bad_to_sw = hns_mac_reg_read64(drv, XGMAC_TX_SENDAPPBADPKTS); hw_stats->rx_1731_pkts = hns_mac_reg_read64(drv, XGMAC_RX_1731PKTS); hw_stats->rx_symbol_err = hns_mac_reg_read64(drv, XGMAC_RX_SYMBOLERRPKTS); hw_stats->rx_fcs_err = hns_mac_reg_read64(drv, XGMAC_RX_FCSERRPKTS); } /** *hns_xgmac_free - free xgmac driver *@mac_drv: mac driver */ static void hns_xgmac_free(void *mac_drv) { struct mac_driver *drv = (struct mac_driver *)mac_drv; struct dsaf_device *dsaf_dev = (struct dsaf_device *)dev_get_drvdata(drv->dev); u32 mac_id = drv->mac_id; dsaf_dev->misc_op->xge_srst(dsaf_dev, mac_id, 0); } /** *hns_xgmac_get_info - get xgmac information *@mac_drv: mac driver *@mac_info:mac information */ static void hns_xgmac_get_info(void *mac_drv, struct mac_info *mac_info) { struct mac_driver *drv = (struct mac_driver *)mac_drv; u32 pause_time, pause_ctrl, port_mode, ctrl_val; ctrl_val = dsaf_read_dev(drv, XGMAC_MAC_CONTROL_REG); mac_info->pad_and_crc_en = dsaf_get_bit(ctrl_val, XGMAC_CTL_TX_PAD_B); mac_info->auto_neg = 0; pause_time = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_TIME_REG); mac_info->tx_pause_time = pause_time; port_mode = dsaf_read_dev(drv, XGMAC_PORT_MODE_REG); mac_info->port_en = dsaf_get_field(port_mode, XGMAC_PORT_MODE_TX_M, XGMAC_PORT_MODE_TX_S) && dsaf_get_field(port_mode, XGMAC_PORT_MODE_RX_M, XGMAC_PORT_MODE_RX_S); mac_info->duplex = 1; mac_info->speed = MAC_SPEED_10000; pause_ctrl = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_CTRL_REG); mac_info->rx_pause_en = dsaf_get_bit(pause_ctrl, XGMAC_PAUSE_CTL_RX_B); mac_info->tx_pause_en = dsaf_get_bit(pause_ctrl, XGMAC_PAUSE_CTL_TX_B); } /** *hns_xgmac_get_pausefrm_cfg - get xgmac pause param *@mac_drv: mac driver *@rx_en:xgmac rx pause enable *@tx_en:xgmac tx pause enable */ static void hns_xgmac_get_pausefrm_cfg(void *mac_drv, u32 *rx_en, u32 *tx_en) { struct mac_driver *drv = (struct mac_driver *)mac_drv; u32 pause_ctrl; pause_ctrl = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_CTRL_REG); *rx_en = dsaf_get_bit(pause_ctrl, XGMAC_PAUSE_CTL_RX_B); *tx_en = dsaf_get_bit(pause_ctrl, XGMAC_PAUSE_CTL_TX_B); } /** *hns_xgmac_get_link_status - get xgmac link status *@mac_drv: mac driver *@link_stat: xgmac link stat */ static void hns_xgmac_get_link_status(void *mac_drv, u32 *link_stat) { struct mac_driver *drv = (struct mac_driver *)mac_drv; *link_stat = dsaf_read_dev(drv, XGMAC_LINK_STATUS_REG); } /** *hns_xgmac_get_regs - dump xgmac regs *@mac_drv: mac driver *@cmd:ethtool cmd *@data:data for value of regs */ static void hns_xgmac_get_regs(void *mac_drv, void *data) { u32 i = 0; struct mac_driver *drv = (struct mac_driver *)mac_drv; u32 *regs = data; u64 qtmp; /* base config registers */ regs[0] = dsaf_read_dev(drv, XGMAC_INT_STATUS_REG); regs[1] = dsaf_read_dev(drv, XGMAC_INT_ENABLE_REG); regs[2] = dsaf_read_dev(drv, XGMAC_INT_SET_REG); regs[3] = dsaf_read_dev(drv, XGMAC_IERR_U_INFO_REG); regs[4] = dsaf_read_dev(drv, XGMAC_OVF_INFO_REG); regs[5] = dsaf_read_dev(drv, XGMAC_OVF_CNT_REG); regs[6] = dsaf_read_dev(drv, XGMAC_PORT_MODE_REG); regs[7] = dsaf_read_dev(drv, XGMAC_CLK_ENABLE_REG); regs[8] = dsaf_read_dev(drv, XGMAC_RESET_REG); regs[9] = dsaf_read_dev(drv, XGMAC_LINK_CONTROL_REG); regs[10] = dsaf_read_dev(drv, XGMAC_LINK_STATUS_REG); regs[11] = dsaf_read_dev(drv, XGMAC_SPARE_REG); regs[12] = dsaf_read_dev(drv, XGMAC_SPARE_CNT_REG); regs[13] = dsaf_read_dev(drv, XGMAC_MAC_ENABLE_REG); regs[14] = dsaf_read_dev(drv, XGMAC_MAC_CONTROL_REG); regs[15] = dsaf_read_dev(drv, XGMAC_MAC_IPG_REG); regs[16] = dsaf_read_dev(drv, XGMAC_MAC_MSG_CRC_EN_REG); regs[17] = dsaf_read_dev(drv, XGMAC_MAC_MSG_IMG_REG); regs[18] = dsaf_read_dev(drv, XGMAC_MAC_MSG_FC_CFG_REG); regs[19] = dsaf_read_dev(drv, XGMAC_MAC_MSG_TC_CFG_REG); regs[20] = dsaf_read_dev(drv, XGMAC_MAC_PAD_SIZE_REG); regs[21] = dsaf_read_dev(drv, XGMAC_MAC_MIN_PKT_SIZE_REG); regs[22] = dsaf_read_dev(drv, XGMAC_MAC_MAX_PKT_SIZE_REG); regs[23] = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_CTRL_REG); regs[24] = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_TIME_REG); regs[25] = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_GAP_REG); regs[26] = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_LOCAL_MAC_H_REG); regs[27] = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_LOCAL_MAC_L_REG); regs[28] = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_PEER_MAC_H_REG); regs[29] = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_PEER_MAC_L_REG); regs[30] = dsaf_read_dev(drv, XGMAC_MAC_PFC_PRI_EN_REG); regs[31] = dsaf_read_dev(drv, XGMAC_MAC_1588_CTRL_REG); regs[32] = dsaf_read_dev(drv, XGMAC_MAC_1588_TX_PORT_DLY_REG); regs[33] = dsaf_read_dev(drv, XGMAC_MAC_1588_RX_PORT_DLY_REG); regs[34] = dsaf_read_dev(drv, XGMAC_MAC_1588_ASYM_DLY_REG); regs[35] = dsaf_read_dev(drv, XGMAC_MAC_1588_ADJUST_CFG_REG); regs[36] = dsaf_read_dev(drv, XGMAC_MAC_Y1731_ETH_TYPE_REG); regs[37] = dsaf_read_dev(drv, XGMAC_MAC_MIB_CONTROL_REG); regs[38] = dsaf_read_dev(drv, XGMAC_MAC_WAN_RATE_ADJUST_REG); regs[39] = dsaf_read_dev(drv, XGMAC_MAC_TX_ERR_MARK_REG); regs[40] = dsaf_read_dev(drv, XGMAC_MAC_TX_LF_RF_CONTROL_REG); regs[41] = dsaf_read_dev(drv, XGMAC_MAC_RX_LF_RF_STATUS_REG); regs[42] = dsaf_read_dev(drv, XGMAC_MAC_TX_RUNT_PKT_CNT_REG); regs[43] = dsaf_read_dev(drv, XGMAC_MAC_RX_RUNT_PKT_CNT_REG); regs[44] = dsaf_read_dev(drv, XGMAC_MAC_RX_PREAM_ERR_PKT_CNT_REG); regs[45] = dsaf_read_dev(drv, XGMAC_MAC_TX_LF_RF_TERM_PKT_CNT_REG); regs[46] = dsaf_read_dev(drv, XGMAC_MAC_TX_SN_MISMATCH_PKT_CNT_REG); regs[47] = dsaf_read_dev(drv, XGMAC_MAC_RX_ERR_MSG_CNT_REG); regs[48] = dsaf_read_dev(drv, XGMAC_MAC_RX_ERR_EFD_CNT_REG); regs[49] = dsaf_read_dev(drv, XGMAC_MAC_ERR_INFO_REG); regs[50] = dsaf_read_dev(drv, XGMAC_MAC_DBG_INFO_REG); regs[51] = dsaf_read_dev(drv, XGMAC_PCS_BASER_SYNC_THD_REG); regs[52] = dsaf_read_dev(drv, XGMAC_PCS_STATUS1_REG); regs[53] = dsaf_read_dev(drv, XGMAC_PCS_BASER_STATUS1_REG); regs[54] = dsaf_read_dev(drv, XGMAC_PCS_BASER_STATUS2_REG); regs[55] = dsaf_read_dev(drv, XGMAC_PCS_BASER_SEEDA_0_REG); regs[56] = dsaf_read_dev(drv, XGMAC_PCS_BASER_SEEDA_1_REG); regs[57] = dsaf_read_dev(drv, XGMAC_PCS_BASER_SEEDB_0_REG); regs[58] = dsaf_read_dev(drv, XGMAC_PCS_BASER_SEEDB_1_REG); regs[59] = dsaf_read_dev(drv, XGMAC_PCS_BASER_TEST_CONTROL_REG); regs[60] = dsaf_read_dev(drv, XGMAC_PCS_BASER_TEST_ERR_CNT_REG); regs[61] = dsaf_read_dev(drv, XGMAC_PCS_DBG_INFO_REG); regs[62] = dsaf_read_dev(drv, XGMAC_PCS_DBG_INFO1_REG); regs[63] = dsaf_read_dev(drv, XGMAC_PCS_DBG_INFO2_REG); regs[64] = dsaf_read_dev(drv, XGMAC_PCS_DBG_INFO3_REG); regs[65] = dsaf_read_dev(drv, XGMAC_PMA_ENABLE_REG); regs[66] = dsaf_read_dev(drv, XGMAC_PMA_CONTROL_REG); regs[67] = dsaf_read_dev(drv, XGMAC_PMA_SIGNAL_STATUS_REG); regs[68] = dsaf_read_dev(drv, XGMAC_PMA_DBG_INFO_REG); regs[69] = dsaf_read_dev(drv, XGMAC_PMA_FEC_ABILITY_REG); regs[70] = dsaf_read_dev(drv, XGMAC_PMA_FEC_CONTROL_REG); regs[71] = dsaf_read_dev(drv, XGMAC_PMA_FEC_CORR_BLOCK_CNT__REG); regs[72] = dsaf_read_dev(drv, XGMAC_PMA_FEC_UNCORR_BLOCK_CNT__REG); /* status registers */ #define hns_xgmac_cpy_q(p, q) \ do {\ *(p) = (u32)(q);\ *((p) + 1) = (u32)((q) >> 32);\ } while (0) qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_FRAGMENT); hns_xgmac_cpy_q(&regs[73], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_UNDERSIZE); hns_xgmac_cpy_q(&regs[75], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_UNDERMIN); hns_xgmac_cpy_q(&regs[77], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_64OCTETS); hns_xgmac_cpy_q(&regs[79], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_65TO127OCTETS); hns_xgmac_cpy_q(&regs[81], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_128TO255OCTETS); hns_xgmac_cpy_q(&regs[83], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_256TO511OCTETS); hns_xgmac_cpy_q(&regs[85], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_512TO1023OCTETS); hns_xgmac_cpy_q(&regs[87], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_1024TO1518OCTETS); hns_xgmac_cpy_q(&regs[89], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_1519TOMAXOCTETS); hns_xgmac_cpy_q(&regs[91], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_1519TOMAXOCTETSOK); hns_xgmac_cpy_q(&regs[93], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_OVERSIZE); hns_xgmac_cpy_q(&regs[95], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_JABBER); hns_xgmac_cpy_q(&regs[97], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_GOODPKTS); hns_xgmac_cpy_q(&regs[99], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_GOODOCTETS); hns_xgmac_cpy_q(&regs[101], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_TOTAL_PKTS); hns_xgmac_cpy_q(&regs[103], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_TOTALOCTETS); hns_xgmac_cpy_q(&regs[105], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_UNICASTPKTS); hns_xgmac_cpy_q(&regs[107], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_MULTICASTPKTS); hns_xgmac_cpy_q(&regs[109], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_BROADCASTPKTS); hns_xgmac_cpy_q(&regs[111], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PRI0PAUSEPKTS); hns_xgmac_cpy_q(&regs[113], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PRI1PAUSEPKTS); hns_xgmac_cpy_q(&regs[115], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PRI2PAUSEPKTS); hns_xgmac_cpy_q(&regs[117], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PRI3PAUSEPKTS); hns_xgmac_cpy_q(&regs[119], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PRI4PAUSEPKTS); hns_xgmac_cpy_q(&regs[121], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PRI5PAUSEPKTS); hns_xgmac_cpy_q(&regs[123], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PRI6PAUSEPKTS); hns_xgmac_cpy_q(&regs[125], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PRI7PAUSEPKTS); hns_xgmac_cpy_q(&regs[127], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_MACCTRLPKTS); hns_xgmac_cpy_q(&regs[129], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_1731PKTS); hns_xgmac_cpy_q(&regs[131], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_1588PKTS); hns_xgmac_cpy_q(&regs[133], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_FROMAPPGOODPKTS); hns_xgmac_cpy_q(&regs[135], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_FROMAPPBADPKTS); hns_xgmac_cpy_q(&regs[137], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_ERRALLPKTS); hns_xgmac_cpy_q(&regs[139], qtmp); /* RX */ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_FRAGMENT); hns_xgmac_cpy_q(&regs[141], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTSUNDERSIZE); hns_xgmac_cpy_q(&regs[143], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_UNDERMIN); hns_xgmac_cpy_q(&regs[145], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_64OCTETS); hns_xgmac_cpy_q(&regs[147], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_65TO127OCTETS); hns_xgmac_cpy_q(&regs[149], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_128TO255OCTETS); hns_xgmac_cpy_q(&regs[151], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_256TO511OCTETS); hns_xgmac_cpy_q(&regs[153], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_512TO1023OCTETS); hns_xgmac_cpy_q(&regs[155], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_1024TO1518OCTETS); hns_xgmac_cpy_q(&regs[157], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_1519TOMAXOCTETS); hns_xgmac_cpy_q(&regs[159], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_1519TOMAXOCTETSOK); hns_xgmac_cpy_q(&regs[161], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_OVERSIZE); hns_xgmac_cpy_q(&regs[163], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_JABBER); hns_xgmac_cpy_q(&regs[165], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_GOODPKTS); hns_xgmac_cpy_q(&regs[167], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_GOODOCTETS); hns_xgmac_cpy_q(&regs[169], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_TOTAL_PKTS); hns_xgmac_cpy_q(&regs[171], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_TOTALOCTETS); hns_xgmac_cpy_q(&regs[173], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_UNICASTPKTS); hns_xgmac_cpy_q(&regs[175], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_MULTICASTPKTS); hns_xgmac_cpy_q(&regs[177], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_BROADCASTPKTS); hns_xgmac_cpy_q(&regs[179], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PRI0PAUSEPKTS); hns_xgmac_cpy_q(&regs[181], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PRI1PAUSEPKTS); hns_xgmac_cpy_q(&regs[183], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PRI2PAUSEPKTS); hns_xgmac_cpy_q(&regs[185], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PRI3PAUSEPKTS); hns_xgmac_cpy_q(&regs[187], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PRI4PAUSEPKTS); hns_xgmac_cpy_q(&regs[189], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PRI5PAUSEPKTS); hns_xgmac_cpy_q(&regs[191], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PRI6PAUSEPKTS); hns_xgmac_cpy_q(&regs[193], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PRI7PAUSEPKTS); hns_xgmac_cpy_q(&regs[195], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_MACCTRLPKTS); hns_xgmac_cpy_q(&regs[197], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_SENDAPPGOODPKTS); hns_xgmac_cpy_q(&regs[199], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_SENDAPPBADPKTS); hns_xgmac_cpy_q(&regs[201], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_1731PKTS); hns_xgmac_cpy_q(&regs[203], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_SYMBOLERRPKTS); hns_xgmac_cpy_q(&regs[205], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_FCSERRPKTS); hns_xgmac_cpy_q(&regs[207], qtmp); /* mark end of mac regs */ for (i = 208; i < 214; i++) regs[i] = 0xaaaaaaaa; } /** *hns_xgmac_get_stats - get xgmac statistic *@mac_drv: mac driver *@data:data for value of stats regs */ static void hns_xgmac_get_stats(void *mac_drv, u64 *data) { u32 i; u64 *buf = data; struct mac_driver *drv = (struct mac_driver *)mac_drv; struct mac_hw_stats *hw_stats = NULL; hw_stats = &drv->mac_cb->hw_stats; for (i = 0; i < ARRAY_SIZE(g_xgmac_stats_string); i++) { buf[i] = DSAF_STATS_READ(hw_stats, g_xgmac_stats_string[i].offset); } } /** *hns_xgmac_get_strings - get xgmac strings name *@stringset: type of values in data *@data:data for value of string name */ static void hns_xgmac_get_strings(u32 stringset, u8 *data) { char *buff = (char *)data; u32 i; if (stringset != ETH_SS_STATS) return; for (i = 0; i < ARRAY_SIZE(g_xgmac_stats_string); i++) { snprintf(buff, ETH_GSTRING_LEN, g_xgmac_stats_string[i].desc); buff = buff + ETH_GSTRING_LEN; } } /** *hns_xgmac_get_sset_count - get xgmac string set count *@stringset: type of values in data *return xgmac string set count */ static int hns_xgmac_get_sset_count(int stringset) { if (stringset == ETH_SS_STATS) return ARRAY_SIZE(g_xgmac_stats_string); return 0; } /** *hns_xgmac_get_regs_count - get xgmac regs count *return xgmac regs count */ static int hns_xgmac_get_regs_count(void) { return HNS_XGMAC_DUMP_NUM; } void *hns_xgmac_config(struct hns_mac_cb *mac_cb, struct mac_params *mac_param) { struct mac_driver *mac_drv; mac_drv = devm_kzalloc(mac_cb->dev, sizeof(*mac_drv), GFP_KERNEL); if (!mac_drv) return NULL; mac_drv->mac_init = hns_xgmac_init; mac_drv->mac_enable = hns_xgmac_enable; mac_drv->mac_disable = hns_xgmac_disable; mac_drv->mac_id = mac_param->mac_id; mac_drv->mac_mode = mac_param->mac_mode; mac_drv->io_base = mac_param->vaddr; mac_drv->dev = mac_param->dev; mac_drv->mac_cb = mac_cb; mac_drv->set_mac_addr = hns_xgmac_set_pausefrm_mac_addr; mac_drv->set_an_mode = NULL; mac_drv->config_loopback = NULL; mac_drv->config_pad_and_crc = hns_xgmac_config_pad_and_crc; mac_drv->config_half_duplex = NULL; mac_drv->set_rx_ignore_pause_frames = hns_xgmac_set_rx_ignore_pause_frames; mac_drv->mac_free = hns_xgmac_free; mac_drv->adjust_link = NULL; mac_drv->set_tx_auto_pause_frames = hns_xgmac_set_tx_auto_pause_frames; mac_drv->config_max_frame_length = hns_xgmac_config_max_frame_length; mac_drv->mac_pausefrm_cfg = hns_xgmac_pausefrm_cfg; mac_drv->autoneg_stat = NULL; mac_drv->get_info = hns_xgmac_get_info; mac_drv->get_pause_enable = hns_xgmac_get_pausefrm_cfg; mac_drv->get_link_status = hns_xgmac_get_link_status; mac_drv->get_regs = hns_xgmac_get_regs; mac_drv->get_ethtool_stats = hns_xgmac_get_stats; mac_drv->get_sset_count = hns_xgmac_get_sset_count; mac_drv->get_regs_count = hns_xgmac_get_regs_count; mac_drv->get_strings = hns_xgmac_get_strings; mac_drv->update_stats = hns_xgmac_update_stats; return (void *)mac_drv; }
/* * Copyright (c) 2014-2015 Hisilicon Limited. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/io-64-nonatomic-hi-lo.h> #include <linux/of_mdio.h> #include "hns_dsaf_main.h" #include "hns_dsaf_mac.h" #include "hns_dsaf_xgmac.h" #include "hns_dsaf_reg.h" static const struct mac_stats_string g_xgmac_stats_string[] = { {"xgmac_tx_bad_pkts_minto64", MAC_STATS_FIELD_OFF(tx_fragment_err)}, {"xgmac_tx_good_pkts_minto64", MAC_STATS_FIELD_OFF(tx_undersize)}, {"xgmac_tx_total_pkts_minto64", MAC_STATS_FIELD_OFF(tx_under_min_pkts)}, {"xgmac_tx_pkts_64", MAC_STATS_FIELD_OFF(tx_64bytes)}, {"xgmac_tx_pkts_65to127", MAC_STATS_FIELD_OFF(tx_65to127)}, {"xgmac_tx_pkts_128to255", MAC_STATS_FIELD_OFF(tx_128to255)}, {"xgmac_tx_pkts_256to511", MAC_STATS_FIELD_OFF(tx_256to511)}, {"xgmac_tx_pkts_512to1023", MAC_STATS_FIELD_OFF(tx_512to1023)}, {"xgmac_tx_pkts_1024to1518", MAC_STATS_FIELD_OFF(tx_1024to1518)}, {"xgmac_tx_pkts_1519tomax", MAC_STATS_FIELD_OFF(tx_1519tomax)}, {"xgmac_tx_good_pkts_1519tomax", MAC_STATS_FIELD_OFF(tx_1519tomax_good)}, {"xgmac_tx_good_pkts_untralmax", MAC_STATS_FIELD_OFF(tx_oversize)}, {"xgmac_tx_bad_pkts_untralmax", MAC_STATS_FIELD_OFF(tx_jabber_err)}, {"xgmac_tx_good_pkts_all", MAC_STATS_FIELD_OFF(tx_good_pkts)}, {"xgmac_tx_good_byte_all", MAC_STATS_FIELD_OFF(tx_good_bytes)}, {"xgmac_tx_total_pkt", MAC_STATS_FIELD_OFF(tx_total_pkts)}, {"xgmac_tx_total_byt", MAC_STATS_FIELD_OFF(tx_total_bytes)}, {"xgmac_tx_uc_pkt", MAC_STATS_FIELD_OFF(tx_uc_pkts)}, {"xgmac_tx_mc_pkt", MAC_STATS_FIELD_OFF(tx_mc_pkts)}, {"xgmac_tx_bc_pkt", MAC_STATS_FIELD_OFF(tx_bc_pkts)}, {"xgmac_tx_pause_frame_num", MAC_STATS_FIELD_OFF(tx_pfc_tc0)}, {"xgmac_tx_pfc_per_1pause_framer", MAC_STATS_FIELD_OFF(tx_pfc_tc1)}, {"xgmac_tx_pfc_per_2pause_framer", MAC_STATS_FIELD_OFF(tx_pfc_tc2)}, {"xgmac_tx_pfc_per_3pause_framer", MAC_STATS_FIELD_OFF(tx_pfc_tc3)}, {"xgmac_tx_pfc_per_4pause_framer", MAC_STATS_FIELD_OFF(tx_pfc_tc4)}, {"xgmac_tx_pfc_per_5pause_framer", MAC_STATS_FIELD_OFF(tx_pfc_tc5)}, {"xgmac_tx_pfc_per_6pause_framer", MAC_STATS_FIELD_OFF(tx_pfc_tc6)}, {"xgmac_tx_pfc_per_7pause_framer", MAC_STATS_FIELD_OFF(tx_pfc_tc7)}, {"xgmac_tx_mac_ctrol_frame", MAC_STATS_FIELD_OFF(tx_ctrl)}, {"xgmac_tx_1731_pkts", MAC_STATS_FIELD_OFF(tx_1731_pkts)}, {"xgmac_tx_1588_pkts", MAC_STATS_FIELD_OFF(tx_1588_pkts)}, {"xgmac_rx_good_pkt_from_dsaf", MAC_STATS_FIELD_OFF(rx_good_from_sw)}, {"xgmac_rx_bad_pkt_from_dsaf", MAC_STATS_FIELD_OFF(rx_bad_from_sw)}, {"xgmac_tx_bad_pkt_64tomax", MAC_STATS_FIELD_OFF(tx_bad_pkts)}, {"xgmac_rx_bad_pkts_minto64", MAC_STATS_FIELD_OFF(rx_fragment_err)}, {"xgmac_rx_good_pkts_minto64", MAC_STATS_FIELD_OFF(rx_undersize)}, {"xgmac_rx_total_pkts_minto64", MAC_STATS_FIELD_OFF(rx_under_min)}, {"xgmac_rx_pkt_64", MAC_STATS_FIELD_OFF(rx_64bytes)}, {"xgmac_rx_pkt_65to127", MAC_STATS_FIELD_OFF(rx_65to127)}, {"xgmac_rx_pkt_128to255", MAC_STATS_FIELD_OFF(rx_128to255)}, {"xgmac_rx_pkt_256to511", MAC_STATS_FIELD_OFF(rx_256to511)}, {"xgmac_rx_pkt_512to1023", MAC_STATS_FIELD_OFF(rx_512to1023)}, {"xgmac_rx_pkt_1024to1518", MAC_STATS_FIELD_OFF(rx_1024to1518)}, {"xgmac_rx_pkt_1519tomax", MAC_STATS_FIELD_OFF(rx_1519tomax)}, {"xgmac_rx_good_pkt_1519tomax", MAC_STATS_FIELD_OFF(rx_1519tomax_good)}, {"xgmac_rx_good_pkt_untramax", MAC_STATS_FIELD_OFF(rx_oversize)}, {"xgmac_rx_bad_pkt_untramax", MAC_STATS_FIELD_OFF(rx_jabber_err)}, {"xgmac_rx_good_pkt", MAC_STATS_FIELD_OFF(rx_good_pkts)}, {"xgmac_rx_good_byt", MAC_STATS_FIELD_OFF(rx_good_bytes)}, {"xgmac_rx_pkt", MAC_STATS_FIELD_OFF(rx_total_pkts)}, {"xgmac_rx_byt", MAC_STATS_FIELD_OFF(rx_total_bytes)}, {"xgmac_rx_uc_pkt", MAC_STATS_FIELD_OFF(rx_uc_pkts)}, {"xgmac_rx_mc_pkt", MAC_STATS_FIELD_OFF(rx_mc_pkts)}, {"xgmac_rx_bc_pkt", MAC_STATS_FIELD_OFF(rx_bc_pkts)}, {"xgmac_rx_pause_frame_num", MAC_STATS_FIELD_OFF(rx_pfc_tc0)}, {"xgmac_rx_pfc_per_1pause_frame", MAC_STATS_FIELD_OFF(rx_pfc_tc1)}, {"xgmac_rx_pfc_per_2pause_frame", MAC_STATS_FIELD_OFF(rx_pfc_tc2)}, {"xgmac_rx_pfc_per_3pause_frame", MAC_STATS_FIELD_OFF(rx_pfc_tc3)}, {"xgmac_rx_pfc_per_4pause_frame", MAC_STATS_FIELD_OFF(rx_pfc_tc4)}, {"xgmac_rx_pfc_per_5pause_frame", MAC_STATS_FIELD_OFF(rx_pfc_tc5)}, {"xgmac_rx_pfc_per_6pause_frame", MAC_STATS_FIELD_OFF(rx_pfc_tc6)}, {"xgmac_rx_pfc_per_7pause_frame", MAC_STATS_FIELD_OFF(rx_pfc_tc7)}, {"xgmac_rx_mac_control", MAC_STATS_FIELD_OFF(rx_unknown_ctrl)}, {"xgmac_tx_good_pkt_todsaf", MAC_STATS_FIELD_OFF(tx_good_to_sw)}, {"xgmac_tx_bad_pkt_todsaf", MAC_STATS_FIELD_OFF(tx_bad_to_sw)}, {"xgmac_rx_1731_pkt", MAC_STATS_FIELD_OFF(rx_1731_pkts)}, {"xgmac_rx_symbol_err_pkt", MAC_STATS_FIELD_OFF(rx_symbol_err)}, {"xgmac_rx_fcs_pkt", MAC_STATS_FIELD_OFF(rx_fcs_err)} }; /** *hns_xgmac_tx_enable - xgmac port tx enable *@drv: mac driver *@value: value of enable */ static void hns_xgmac_tx_enable(struct mac_driver *drv, u32 value) { dsaf_set_dev_bit(drv, XGMAC_MAC_ENABLE_REG, XGMAC_ENABLE_TX_B, !!value); } /** *hns_xgmac_rx_enable - xgmac port rx enable *@drv: mac driver *@value: value of enable */ static void hns_xgmac_rx_enable(struct mac_driver *drv, u32 value) { dsaf_set_dev_bit(drv, XGMAC_MAC_ENABLE_REG, XGMAC_ENABLE_RX_B, !!value); } /** * hns_xgmac_tx_lf_rf_insert - insert lf rf control about xgmac * @mac_drv: mac driver * @mode: inserf rf or lf */ static void hns_xgmac_lf_rf_insert(struct mac_driver *mac_drv, u32 mode) { dsaf_set_dev_field(mac_drv, XGMAC_MAC_TX_LF_RF_CONTROL_REG, XGMAC_LF_RF_INSERT_M, XGMAC_LF_RF_INSERT_S, mode); } /** * hns_xgmac__lf_rf_control_init - initial the lf rf control register * @mac_drv: mac driver */ static void hns_xgmac_lf_rf_control_init(struct mac_driver *mac_drv) { u32 val = 0; dsaf_set_bit(val, XGMAC_UNIDIR_EN_B, 0); dsaf_set_bit(val, XGMAC_RF_TX_EN_B, 1); dsaf_set_field(val, XGMAC_LF_RF_INSERT_M, XGMAC_LF_RF_INSERT_S, 0); dsaf_write_reg(mac_drv, XGMAC_MAC_TX_LF_RF_CONTROL_REG, val); } /** *hns_xgmac_enable - enable xgmac port *@drv: mac driver *@mode: mode of mac port */ static void hns_xgmac_enable(void *mac_drv, enum mac_commom_mode mode) { struct mac_driver *drv = (struct mac_driver *)mac_drv; hns_xgmac_lf_rf_insert(drv, HNS_XGMAC_NO_LF_RF_INSERT); /*enable XGE rX/tX */ if (mode == MAC_COMM_MODE_TX) { hns_xgmac_tx_enable(drv, 1); } else if (mode == MAC_COMM_MODE_RX) { hns_xgmac_rx_enable(drv, 1); } else if (mode == MAC_COMM_MODE_RX_AND_TX) { hns_xgmac_tx_enable(drv, 1); hns_xgmac_rx_enable(drv, 1); } else { dev_err(drv->dev, "error mac mode:%d\n", mode); } } /** *hns_xgmac_disable - disable xgmac port *@mac_drv: mac driver *@mode: mode of mac port */ static void hns_xgmac_disable(void *mac_drv, enum mac_commom_mode mode) { struct mac_driver *drv = (struct mac_driver *)mac_drv; if (mode == MAC_COMM_MODE_TX) { hns_xgmac_tx_enable(drv, 0); } else if (mode == MAC_COMM_MODE_RX) { hns_xgmac_rx_enable(drv, 0); } else if (mode == MAC_COMM_MODE_RX_AND_TX) { hns_xgmac_tx_enable(drv, 0); hns_xgmac_rx_enable(drv, 0); } hns_xgmac_lf_rf_insert(drv, HNS_XGMAC_LF_INSERT); } /** *hns_xgmac_pma_fec_enable - xgmac PMA FEC enable *@drv: mac driver *@tx_value: tx value *@rx_value: rx value *return status */ static void hns_xgmac_pma_fec_enable(struct mac_driver *drv, u32 tx_value, u32 rx_value) { u32 origin = dsaf_read_dev(drv, XGMAC_PMA_FEC_CONTROL_REG); dsaf_set_bit(origin, XGMAC_PMA_FEC_CTL_TX_B, !!tx_value); dsaf_set_bit(origin, XGMAC_PMA_FEC_CTL_RX_B, !!rx_value); dsaf_write_dev(drv, XGMAC_PMA_FEC_CONTROL_REG, origin); } /* clr exc irq for xge*/ static void hns_xgmac_exc_irq_en(struct mac_driver *drv, u32 en) { u32 clr_vlue = 0xfffffffful; u32 msk_vlue = en ? 0xfffffffful : 0; /*1 is en, 0 is dis*/ dsaf_write_dev(drv, XGMAC_INT_STATUS_REG, clr_vlue); dsaf_write_dev(drv, XGMAC_INT_ENABLE_REG, msk_vlue); } /** *hns_xgmac_init - initialize XGE *@mac_drv: mac driver */ static void hns_xgmac_init(void *mac_drv) { struct mac_driver *drv = (struct mac_driver *)mac_drv; struct dsaf_device *dsaf_dev = (struct dsaf_device *)dev_get_drvdata(drv->dev); u32 port = drv->mac_id; dsaf_dev->misc_op->xge_srst(dsaf_dev, port, 0); mdelay(100); dsaf_dev->misc_op->xge_srst(dsaf_dev, port, 1); mdelay(100); hns_xgmac_lf_rf_control_init(drv); hns_xgmac_exc_irq_en(drv, 0); hns_xgmac_pma_fec_enable(drv, 0x0, 0x0); hns_xgmac_disable(mac_drv, MAC_COMM_MODE_RX_AND_TX); } /** *hns_xgmac_config_pad_and_crc - set xgmac pad and crc enable the same time *@mac_drv: mac driver *@newval:enable of pad and crc */ static void hns_xgmac_config_pad_and_crc(void *mac_drv, u8 newval) { struct mac_driver *drv = (struct mac_driver *)mac_drv; u32 origin = dsaf_read_dev(drv, XGMAC_MAC_CONTROL_REG); dsaf_set_bit(origin, XGMAC_CTL_TX_PAD_B, !!newval); dsaf_set_bit(origin, XGMAC_CTL_TX_FCS_B, !!newval); dsaf_set_bit(origin, XGMAC_CTL_RX_FCS_B, !!newval); dsaf_write_dev(drv, XGMAC_MAC_CONTROL_REG, origin); } /** *hns_xgmac_pausefrm_cfg - set pause param about xgmac *@mac_drv: mac driver *@newval:enable of pad and crc */ static void hns_xgmac_pausefrm_cfg(void *mac_drv, u32 rx_en, u32 tx_en) { struct mac_driver *drv = (struct mac_driver *)mac_drv; u32 origin = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_CTRL_REG); dsaf_set_bit(origin, XGMAC_PAUSE_CTL_TX_B, !!tx_en); dsaf_set_bit(origin, XGMAC_PAUSE_CTL_RX_B, !!rx_en); dsaf_write_dev(drv, XGMAC_MAC_PAUSE_CTRL_REG, origin); } static void hns_xgmac_set_pausefrm_mac_addr(void *mac_drv, char *mac_addr) { struct mac_driver *drv = (struct mac_driver *)mac_drv; u32 high_val = mac_addr[1] | (mac_addr[0] << 8); u32 low_val = mac_addr[5] | (mac_addr[4] << 8) | (mac_addr[3] << 16) | (mac_addr[2] << 24); dsaf_write_dev(drv, XGMAC_MAC_PAUSE_LOCAL_MAC_L_REG, low_val); dsaf_write_dev(drv, XGMAC_MAC_PAUSE_LOCAL_MAC_H_REG, high_val); } /** *hns_xgmac_set_rx_ignore_pause_frames - set rx pause param about xgmac *@mac_drv: mac driver *@enable:enable rx pause param */ static void hns_xgmac_set_rx_ignore_pause_frames(void *mac_drv, u32 enable) { struct mac_driver *drv = (struct mac_driver *)mac_drv; dsaf_set_dev_bit(drv, XGMAC_MAC_PAUSE_CTRL_REG, XGMAC_PAUSE_CTL_RX_B, !!enable); } /** *hns_xgmac_set_tx_auto_pause_frames - set tx pause param about xgmac *@mac_drv: mac driver *@enable:enable tx pause param */ static void hns_xgmac_set_tx_auto_pause_frames(void *mac_drv, u16 enable) { struct mac_driver *drv = (struct mac_driver *)mac_drv; dsaf_set_dev_bit(drv, XGMAC_MAC_PAUSE_CTRL_REG, XGMAC_PAUSE_CTL_TX_B, !!enable); /*if enable is not zero ,set tx pause time */ if (enable) dsaf_write_dev(drv, XGMAC_MAC_PAUSE_TIME_REG, enable); } /** *hns_xgmac_config_max_frame_length - set xgmac max frame length *@mac_drv: mac driver *@newval:xgmac max frame length */ static void hns_xgmac_config_max_frame_length(void *mac_drv, u16 newval) { struct mac_driver *drv = (struct mac_driver *)mac_drv; dsaf_write_dev(drv, XGMAC_MAC_MAX_PKT_SIZE_REG, newval); } void hns_xgmac_update_stats(void *mac_drv) { struct mac_driver *drv = (struct mac_driver *)mac_drv; struct mac_hw_stats *hw_stats = &drv->mac_cb->hw_stats; /* TX */ hw_stats->tx_fragment_err = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_FRAGMENT); hw_stats->tx_undersize = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_UNDERSIZE); hw_stats->tx_under_min_pkts = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_UNDERMIN); hw_stats->tx_64bytes = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_64OCTETS); hw_stats->tx_65to127 = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_65TO127OCTETS); hw_stats->tx_128to255 = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_128TO255OCTETS); hw_stats->tx_256to511 = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_256TO511OCTETS); hw_stats->tx_512to1023 = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_512TO1023OCTETS); hw_stats->tx_1024to1518 = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_1024TO1518OCTETS); hw_stats->tx_1519tomax = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_1519TOMAXOCTETS); hw_stats->tx_1519tomax_good = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_1519TOMAXOCTETSOK); hw_stats->tx_oversize = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_OVERSIZE); hw_stats->tx_jabber_err = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_JABBER); hw_stats->tx_good_pkts = hns_mac_reg_read64(drv, XGMAC_TX_GOODPKTS); hw_stats->tx_good_bytes = hns_mac_reg_read64(drv, XGMAC_TX_GOODOCTETS); hw_stats->tx_total_pkts = hns_mac_reg_read64(drv, XGMAC_TX_TOTAL_PKTS); hw_stats->tx_total_bytes = hns_mac_reg_read64(drv, XGMAC_TX_TOTALOCTETS); hw_stats->tx_uc_pkts = hns_mac_reg_read64(drv, XGMAC_TX_UNICASTPKTS); hw_stats->tx_mc_pkts = hns_mac_reg_read64(drv, XGMAC_TX_MULTICASTPKTS); hw_stats->tx_bc_pkts = hns_mac_reg_read64(drv, XGMAC_TX_BROADCASTPKTS); hw_stats->tx_pfc_tc0 = hns_mac_reg_read64(drv, XGMAC_TX_PRI0PAUSEPKTS); hw_stats->tx_pfc_tc1 = hns_mac_reg_read64(drv, XGMAC_TX_PRI1PAUSEPKTS); hw_stats->tx_pfc_tc2 = hns_mac_reg_read64(drv, XGMAC_TX_PRI2PAUSEPKTS); hw_stats->tx_pfc_tc3 = hns_mac_reg_read64(drv, XGMAC_TX_PRI3PAUSEPKTS); hw_stats->tx_pfc_tc4 = hns_mac_reg_read64(drv, XGMAC_TX_PRI4PAUSEPKTS); hw_stats->tx_pfc_tc5 = hns_mac_reg_read64(drv, XGMAC_TX_PRI5PAUSEPKTS); hw_stats->tx_pfc_tc6 = hns_mac_reg_read64(drv, XGMAC_TX_PRI6PAUSEPKTS); hw_stats->tx_pfc_tc7 = hns_mac_reg_read64(drv, XGMAC_TX_PRI7PAUSEPKTS); hw_stats->tx_ctrl = hns_mac_reg_read64(drv, XGMAC_TX_MACCTRLPKTS); hw_stats->tx_1731_pkts = hns_mac_reg_read64(drv, XGMAC_TX_1731PKTS); hw_stats->tx_1588_pkts = hns_mac_reg_read64(drv, XGMAC_TX_1588PKTS); hw_stats->rx_good_from_sw = hns_mac_reg_read64(drv, XGMAC_RX_FROMAPPGOODPKTS); hw_stats->rx_bad_from_sw = hns_mac_reg_read64(drv, XGMAC_RX_FROMAPPBADPKTS); hw_stats->tx_bad_pkts = hns_mac_reg_read64(drv, XGMAC_TX_ERRALLPKTS); /* RX */ hw_stats->rx_fragment_err = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_FRAGMENT); hw_stats->rx_undersize = hns_mac_reg_read64(drv, XGMAC_RX_PKTSUNDERSIZE); hw_stats->rx_under_min = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_UNDERMIN); hw_stats->rx_64bytes = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_64OCTETS); hw_stats->rx_65to127 = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_65TO127OCTETS); hw_stats->rx_128to255 = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_128TO255OCTETS); hw_stats->rx_256to511 = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_256TO511OCTETS); hw_stats->rx_512to1023 = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_512TO1023OCTETS); hw_stats->rx_1024to1518 = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_1024TO1518OCTETS); hw_stats->rx_1519tomax = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_1519TOMAXOCTETS); hw_stats->rx_1519tomax_good = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_1519TOMAXOCTETSOK); hw_stats->rx_oversize = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_OVERSIZE); hw_stats->rx_jabber_err = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_JABBER); hw_stats->rx_good_pkts = hns_mac_reg_read64(drv, XGMAC_RX_GOODPKTS); hw_stats->rx_good_bytes = hns_mac_reg_read64(drv, XGMAC_RX_GOODOCTETS); hw_stats->rx_total_pkts = hns_mac_reg_read64(drv, XGMAC_RX_TOTAL_PKTS); hw_stats->rx_total_bytes = hns_mac_reg_read64(drv, XGMAC_RX_TOTALOCTETS); hw_stats->rx_uc_pkts = hns_mac_reg_read64(drv, XGMAC_RX_UNICASTPKTS); hw_stats->rx_mc_pkts = hns_mac_reg_read64(drv, XGMAC_RX_MULTICASTPKTS); hw_stats->rx_bc_pkts = hns_mac_reg_read64(drv, XGMAC_RX_BROADCASTPKTS); hw_stats->rx_pfc_tc0 = hns_mac_reg_read64(drv, XGMAC_RX_PRI0PAUSEPKTS); hw_stats->rx_pfc_tc1 = hns_mac_reg_read64(drv, XGMAC_RX_PRI1PAUSEPKTS); hw_stats->rx_pfc_tc2 = hns_mac_reg_read64(drv, XGMAC_RX_PRI2PAUSEPKTS); hw_stats->rx_pfc_tc3 = hns_mac_reg_read64(drv, XGMAC_RX_PRI3PAUSEPKTS); hw_stats->rx_pfc_tc4 = hns_mac_reg_read64(drv, XGMAC_RX_PRI4PAUSEPKTS); hw_stats->rx_pfc_tc5 = hns_mac_reg_read64(drv, XGMAC_RX_PRI5PAUSEPKTS); hw_stats->rx_pfc_tc6 = hns_mac_reg_read64(drv, XGMAC_RX_PRI6PAUSEPKTS); hw_stats->rx_pfc_tc7 = hns_mac_reg_read64(drv, XGMAC_RX_PRI7PAUSEPKTS); hw_stats->rx_unknown_ctrl = hns_mac_reg_read64(drv, XGMAC_RX_MACCTRLPKTS); hw_stats->tx_good_to_sw = hns_mac_reg_read64(drv, XGMAC_TX_SENDAPPGOODPKTS); hw_stats->tx_bad_to_sw = hns_mac_reg_read64(drv, XGMAC_TX_SENDAPPBADPKTS); hw_stats->rx_1731_pkts = hns_mac_reg_read64(drv, XGMAC_RX_1731PKTS); hw_stats->rx_symbol_err = hns_mac_reg_read64(drv, XGMAC_RX_SYMBOLERRPKTS); hw_stats->rx_fcs_err = hns_mac_reg_read64(drv, XGMAC_RX_FCSERRPKTS); } /** *hns_xgmac_free - free xgmac driver *@mac_drv: mac driver */ static void hns_xgmac_free(void *mac_drv) { struct mac_driver *drv = (struct mac_driver *)mac_drv; struct dsaf_device *dsaf_dev = (struct dsaf_device *)dev_get_drvdata(drv->dev); u32 mac_id = drv->mac_id; dsaf_dev->misc_op->xge_srst(dsaf_dev, mac_id, 0); } /** *hns_xgmac_get_info - get xgmac information *@mac_drv: mac driver *@mac_info:mac information */ static void hns_xgmac_get_info(void *mac_drv, struct mac_info *mac_info) { struct mac_driver *drv = (struct mac_driver *)mac_drv; u32 pause_time, pause_ctrl, port_mode, ctrl_val; ctrl_val = dsaf_read_dev(drv, XGMAC_MAC_CONTROL_REG); mac_info->pad_and_crc_en = dsaf_get_bit(ctrl_val, XGMAC_CTL_TX_PAD_B); mac_info->auto_neg = 0; pause_time = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_TIME_REG); mac_info->tx_pause_time = pause_time; port_mode = dsaf_read_dev(drv, XGMAC_PORT_MODE_REG); mac_info->port_en = dsaf_get_field(port_mode, XGMAC_PORT_MODE_TX_M, XGMAC_PORT_MODE_TX_S) && dsaf_get_field(port_mode, XGMAC_PORT_MODE_RX_M, XGMAC_PORT_MODE_RX_S); mac_info->duplex = 1; mac_info->speed = MAC_SPEED_10000; pause_ctrl = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_CTRL_REG); mac_info->rx_pause_en = dsaf_get_bit(pause_ctrl, XGMAC_PAUSE_CTL_RX_B); mac_info->tx_pause_en = dsaf_get_bit(pause_ctrl, XGMAC_PAUSE_CTL_TX_B); } /** *hns_xgmac_get_pausefrm_cfg - get xgmac pause param *@mac_drv: mac driver *@rx_en:xgmac rx pause enable *@tx_en:xgmac tx pause enable */ static void hns_xgmac_get_pausefrm_cfg(void *mac_drv, u32 *rx_en, u32 *tx_en) { struct mac_driver *drv = (struct mac_driver *)mac_drv; u32 pause_ctrl; pause_ctrl = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_CTRL_REG); *rx_en = dsaf_get_bit(pause_ctrl, XGMAC_PAUSE_CTL_RX_B); *tx_en = dsaf_get_bit(pause_ctrl, XGMAC_PAUSE_CTL_TX_B); } /** *hns_xgmac_get_link_status - get xgmac link status *@mac_drv: mac driver *@link_stat: xgmac link stat */ static void hns_xgmac_get_link_status(void *mac_drv, u32 *link_stat) { struct mac_driver *drv = (struct mac_driver *)mac_drv; *link_stat = dsaf_read_dev(drv, XGMAC_LINK_STATUS_REG); } /** *hns_xgmac_get_regs - dump xgmac regs *@mac_drv: mac driver *@cmd:ethtool cmd *@data:data for value of regs */ static void hns_xgmac_get_regs(void *mac_drv, void *data) { u32 i = 0; struct mac_driver *drv = (struct mac_driver *)mac_drv; u32 *regs = data; u64 qtmp; /* base config registers */ regs[0] = dsaf_read_dev(drv, XGMAC_INT_STATUS_REG); regs[1] = dsaf_read_dev(drv, XGMAC_INT_ENABLE_REG); regs[2] = dsaf_read_dev(drv, XGMAC_INT_SET_REG); regs[3] = dsaf_read_dev(drv, XGMAC_IERR_U_INFO_REG); regs[4] = dsaf_read_dev(drv, XGMAC_OVF_INFO_REG); regs[5] = dsaf_read_dev(drv, XGMAC_OVF_CNT_REG); regs[6] = dsaf_read_dev(drv, XGMAC_PORT_MODE_REG); regs[7] = dsaf_read_dev(drv, XGMAC_CLK_ENABLE_REG); regs[8] = dsaf_read_dev(drv, XGMAC_RESET_REG); regs[9] = dsaf_read_dev(drv, XGMAC_LINK_CONTROL_REG); regs[10] = dsaf_read_dev(drv, XGMAC_LINK_STATUS_REG); regs[11] = dsaf_read_dev(drv, XGMAC_SPARE_REG); regs[12] = dsaf_read_dev(drv, XGMAC_SPARE_CNT_REG); regs[13] = dsaf_read_dev(drv, XGMAC_MAC_ENABLE_REG); regs[14] = dsaf_read_dev(drv, XGMAC_MAC_CONTROL_REG); regs[15] = dsaf_read_dev(drv, XGMAC_MAC_IPG_REG); regs[16] = dsaf_read_dev(drv, XGMAC_MAC_MSG_CRC_EN_REG); regs[17] = dsaf_read_dev(drv, XGMAC_MAC_MSG_IMG_REG); regs[18] = dsaf_read_dev(drv, XGMAC_MAC_MSG_FC_CFG_REG); regs[19] = dsaf_read_dev(drv, XGMAC_MAC_MSG_TC_CFG_REG); regs[20] = dsaf_read_dev(drv, XGMAC_MAC_PAD_SIZE_REG); regs[21] = dsaf_read_dev(drv, XGMAC_MAC_MIN_PKT_SIZE_REG); regs[22] = dsaf_read_dev(drv, XGMAC_MAC_MAX_PKT_SIZE_REG); regs[23] = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_CTRL_REG); regs[24] = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_TIME_REG); regs[25] = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_GAP_REG); regs[26] = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_LOCAL_MAC_H_REG); regs[27] = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_LOCAL_MAC_L_REG); regs[28] = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_PEER_MAC_H_REG); regs[29] = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_PEER_MAC_L_REG); regs[30] = dsaf_read_dev(drv, XGMAC_MAC_PFC_PRI_EN_REG); regs[31] = dsaf_read_dev(drv, XGMAC_MAC_1588_CTRL_REG); regs[32] = dsaf_read_dev(drv, XGMAC_MAC_1588_TX_PORT_DLY_REG); regs[33] = dsaf_read_dev(drv, XGMAC_MAC_1588_RX_PORT_DLY_REG); regs[34] = dsaf_read_dev(drv, XGMAC_MAC_1588_ASYM_DLY_REG); regs[35] = dsaf_read_dev(drv, XGMAC_MAC_1588_ADJUST_CFG_REG); regs[36] = dsaf_read_dev(drv, XGMAC_MAC_Y1731_ETH_TYPE_REG); regs[37] = dsaf_read_dev(drv, XGMAC_MAC_MIB_CONTROL_REG); regs[38] = dsaf_read_dev(drv, XGMAC_MAC_WAN_RATE_ADJUST_REG); regs[39] = dsaf_read_dev(drv, XGMAC_MAC_TX_ERR_MARK_REG); regs[40] = dsaf_read_dev(drv, XGMAC_MAC_TX_LF_RF_CONTROL_REG); regs[41] = dsaf_read_dev(drv, XGMAC_MAC_RX_LF_RF_STATUS_REG); regs[42] = dsaf_read_dev(drv, XGMAC_MAC_TX_RUNT_PKT_CNT_REG); regs[43] = dsaf_read_dev(drv, XGMAC_MAC_RX_RUNT_PKT_CNT_REG); regs[44] = dsaf_read_dev(drv, XGMAC_MAC_RX_PREAM_ERR_PKT_CNT_REG); regs[45] = dsaf_read_dev(drv, XGMAC_MAC_TX_LF_RF_TERM_PKT_CNT_REG); regs[46] = dsaf_read_dev(drv, XGMAC_MAC_TX_SN_MISMATCH_PKT_CNT_REG); regs[47] = dsaf_read_dev(drv, XGMAC_MAC_RX_ERR_MSG_CNT_REG); regs[48] = dsaf_read_dev(drv, XGMAC_MAC_RX_ERR_EFD_CNT_REG); regs[49] = dsaf_read_dev(drv, XGMAC_MAC_ERR_INFO_REG); regs[50] = dsaf_read_dev(drv, XGMAC_MAC_DBG_INFO_REG); regs[51] = dsaf_read_dev(drv, XGMAC_PCS_BASER_SYNC_THD_REG); regs[52] = dsaf_read_dev(drv, XGMAC_PCS_STATUS1_REG); regs[53] = dsaf_read_dev(drv, XGMAC_PCS_BASER_STATUS1_REG); regs[54] = dsaf_read_dev(drv, XGMAC_PCS_BASER_STATUS2_REG); regs[55] = dsaf_read_dev(drv, XGMAC_PCS_BASER_SEEDA_0_REG); regs[56] = dsaf_read_dev(drv, XGMAC_PCS_BASER_SEEDA_1_REG); regs[57] = dsaf_read_dev(drv, XGMAC_PCS_BASER_SEEDB_0_REG); regs[58] = dsaf_read_dev(drv, XGMAC_PCS_BASER_SEEDB_1_REG); regs[59] = dsaf_read_dev(drv, XGMAC_PCS_BASER_TEST_CONTROL_REG); regs[60] = dsaf_read_dev(drv, XGMAC_PCS_BASER_TEST_ERR_CNT_REG); regs[61] = dsaf_read_dev(drv, XGMAC_PCS_DBG_INFO_REG); regs[62] = dsaf_read_dev(drv, XGMAC_PCS_DBG_INFO1_REG); regs[63] = dsaf_read_dev(drv, XGMAC_PCS_DBG_INFO2_REG); regs[64] = dsaf_read_dev(drv, XGMAC_PCS_DBG_INFO3_REG); regs[65] = dsaf_read_dev(drv, XGMAC_PMA_ENABLE_REG); regs[66] = dsaf_read_dev(drv, XGMAC_PMA_CONTROL_REG); regs[67] = dsaf_read_dev(drv, XGMAC_PMA_SIGNAL_STATUS_REG); regs[68] = dsaf_read_dev(drv, XGMAC_PMA_DBG_INFO_REG); regs[69] = dsaf_read_dev(drv, XGMAC_PMA_FEC_ABILITY_REG); regs[70] = dsaf_read_dev(drv, XGMAC_PMA_FEC_CONTROL_REG); regs[71] = dsaf_read_dev(drv, XGMAC_PMA_FEC_CORR_BLOCK_CNT__REG); regs[72] = dsaf_read_dev(drv, XGMAC_PMA_FEC_UNCORR_BLOCK_CNT__REG); /* status registers */ #define hns_xgmac_cpy_q(p, q) \ do {\ *(p) = (u32)(q);\ *((p) + 1) = (u32)((q) >> 32);\ } while (0) qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_FRAGMENT); hns_xgmac_cpy_q(&regs[73], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_UNDERSIZE); hns_xgmac_cpy_q(&regs[75], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_UNDERMIN); hns_xgmac_cpy_q(&regs[77], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_64OCTETS); hns_xgmac_cpy_q(&regs[79], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_65TO127OCTETS); hns_xgmac_cpy_q(&regs[81], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_128TO255OCTETS); hns_xgmac_cpy_q(&regs[83], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_256TO511OCTETS); hns_xgmac_cpy_q(&regs[85], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_512TO1023OCTETS); hns_xgmac_cpy_q(&regs[87], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_1024TO1518OCTETS); hns_xgmac_cpy_q(&regs[89], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_1519TOMAXOCTETS); hns_xgmac_cpy_q(&regs[91], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_1519TOMAXOCTETSOK); hns_xgmac_cpy_q(&regs[93], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_OVERSIZE); hns_xgmac_cpy_q(&regs[95], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_JABBER); hns_xgmac_cpy_q(&regs[97], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_GOODPKTS); hns_xgmac_cpy_q(&regs[99], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_GOODOCTETS); hns_xgmac_cpy_q(&regs[101], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_TOTAL_PKTS); hns_xgmac_cpy_q(&regs[103], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_TOTALOCTETS); hns_xgmac_cpy_q(&regs[105], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_UNICASTPKTS); hns_xgmac_cpy_q(&regs[107], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_MULTICASTPKTS); hns_xgmac_cpy_q(&regs[109], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_BROADCASTPKTS); hns_xgmac_cpy_q(&regs[111], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PRI0PAUSEPKTS); hns_xgmac_cpy_q(&regs[113], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PRI1PAUSEPKTS); hns_xgmac_cpy_q(&regs[115], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PRI2PAUSEPKTS); hns_xgmac_cpy_q(&regs[117], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PRI3PAUSEPKTS); hns_xgmac_cpy_q(&regs[119], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PRI4PAUSEPKTS); hns_xgmac_cpy_q(&regs[121], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PRI5PAUSEPKTS); hns_xgmac_cpy_q(&regs[123], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PRI6PAUSEPKTS); hns_xgmac_cpy_q(&regs[125], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PRI7PAUSEPKTS); hns_xgmac_cpy_q(&regs[127], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_MACCTRLPKTS); hns_xgmac_cpy_q(&regs[129], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_1731PKTS); hns_xgmac_cpy_q(&regs[131], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_1588PKTS); hns_xgmac_cpy_q(&regs[133], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_FROMAPPGOODPKTS); hns_xgmac_cpy_q(&regs[135], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_FROMAPPBADPKTS); hns_xgmac_cpy_q(&regs[137], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_ERRALLPKTS); hns_xgmac_cpy_q(&regs[139], qtmp); /* RX */ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_FRAGMENT); hns_xgmac_cpy_q(&regs[141], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTSUNDERSIZE); hns_xgmac_cpy_q(&regs[143], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_UNDERMIN); hns_xgmac_cpy_q(&regs[145], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_64OCTETS); hns_xgmac_cpy_q(&regs[147], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_65TO127OCTETS); hns_xgmac_cpy_q(&regs[149], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_128TO255OCTETS); hns_xgmac_cpy_q(&regs[151], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_256TO511OCTETS); hns_xgmac_cpy_q(&regs[153], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_512TO1023OCTETS); hns_xgmac_cpy_q(&regs[155], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_1024TO1518OCTETS); hns_xgmac_cpy_q(&regs[157], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_1519TOMAXOCTETS); hns_xgmac_cpy_q(&regs[159], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_1519TOMAXOCTETSOK); hns_xgmac_cpy_q(&regs[161], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_OVERSIZE); hns_xgmac_cpy_q(&regs[163], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_JABBER); hns_xgmac_cpy_q(&regs[165], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_GOODPKTS); hns_xgmac_cpy_q(&regs[167], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_GOODOCTETS); hns_xgmac_cpy_q(&regs[169], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_TOTAL_PKTS); hns_xgmac_cpy_q(&regs[171], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_TOTALOCTETS); hns_xgmac_cpy_q(&regs[173], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_UNICASTPKTS); hns_xgmac_cpy_q(&regs[175], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_MULTICASTPKTS); hns_xgmac_cpy_q(&regs[177], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_BROADCASTPKTS); hns_xgmac_cpy_q(&regs[179], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PRI0PAUSEPKTS); hns_xgmac_cpy_q(&regs[181], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PRI1PAUSEPKTS); hns_xgmac_cpy_q(&regs[183], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PRI2PAUSEPKTS); hns_xgmac_cpy_q(&regs[185], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PRI3PAUSEPKTS); hns_xgmac_cpy_q(&regs[187], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PRI4PAUSEPKTS); hns_xgmac_cpy_q(&regs[189], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PRI5PAUSEPKTS); hns_xgmac_cpy_q(&regs[191], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PRI6PAUSEPKTS); hns_xgmac_cpy_q(&regs[193], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PRI7PAUSEPKTS); hns_xgmac_cpy_q(&regs[195], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_MACCTRLPKTS); hns_xgmac_cpy_q(&regs[197], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_SENDAPPGOODPKTS); hns_xgmac_cpy_q(&regs[199], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_SENDAPPBADPKTS); hns_xgmac_cpy_q(&regs[201], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_1731PKTS); hns_xgmac_cpy_q(&regs[203], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_SYMBOLERRPKTS); hns_xgmac_cpy_q(&regs[205], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_FCSERRPKTS); hns_xgmac_cpy_q(&regs[207], qtmp); /* mark end of mac regs */ for (i = 208; i < 214; i++) regs[i] = 0xaaaaaaaa; } /** *hns_xgmac_get_stats - get xgmac statistic *@mac_drv: mac driver *@data:data for value of stats regs */ static void hns_xgmac_get_stats(void *mac_drv, u64 *data) { u32 i; u64 *buf = data; struct mac_driver *drv = (struct mac_driver *)mac_drv; struct mac_hw_stats *hw_stats = NULL; hw_stats = &drv->mac_cb->hw_stats; for (i = 0; i < ARRAY_SIZE(g_xgmac_stats_string); i++) { buf[i] = DSAF_STATS_READ(hw_stats, g_xgmac_stats_string[i].offset); } } /** *hns_xgmac_get_strings - get xgmac strings name *@stringset: type of values in data *@data:data for value of string name */ static void hns_xgmac_get_strings(u32 stringset, u8 *data) { char *buff = (char *)data; u32 i; if (stringset != ETH_SS_STATS) return; for (i = 0; i < ARRAY_SIZE(g_xgmac_stats_string); i++) { snprintf(buff, ETH_GSTRING_LEN, g_xgmac_stats_string[i].desc); buff = buff + ETH_GSTRING_LEN; } } /** *hns_xgmac_get_sset_count - get xgmac string set count *@stringset: type of values in data *return xgmac string set count */ static int hns_xgmac_get_sset_count(int stringset) { if (stringset == ETH_SS_STATS || stringset == ETH_SS_PRIV_FLAGS) return ARRAY_SIZE(g_xgmac_stats_string); return 0; } /** *hns_xgmac_get_regs_count - get xgmac regs count *return xgmac regs count */ static int hns_xgmac_get_regs_count(void) { return HNS_XGMAC_DUMP_NUM; } void *hns_xgmac_config(struct hns_mac_cb *mac_cb, struct mac_params *mac_param) { struct mac_driver *mac_drv; mac_drv = devm_kzalloc(mac_cb->dev, sizeof(*mac_drv), GFP_KERNEL); if (!mac_drv) return NULL; mac_drv->mac_init = hns_xgmac_init; mac_drv->mac_enable = hns_xgmac_enable; mac_drv->mac_disable = hns_xgmac_disable; mac_drv->mac_id = mac_param->mac_id; mac_drv->mac_mode = mac_param->mac_mode; mac_drv->io_base = mac_param->vaddr; mac_drv->dev = mac_param->dev; mac_drv->mac_cb = mac_cb; mac_drv->set_mac_addr = hns_xgmac_set_pausefrm_mac_addr; mac_drv->set_an_mode = NULL; mac_drv->config_loopback = NULL; mac_drv->config_pad_and_crc = hns_xgmac_config_pad_and_crc; mac_drv->config_half_duplex = NULL; mac_drv->set_rx_ignore_pause_frames = hns_xgmac_set_rx_ignore_pause_frames; mac_drv->mac_free = hns_xgmac_free; mac_drv->adjust_link = NULL; mac_drv->set_tx_auto_pause_frames = hns_xgmac_set_tx_auto_pause_frames; mac_drv->config_max_frame_length = hns_xgmac_config_max_frame_length; mac_drv->mac_pausefrm_cfg = hns_xgmac_pausefrm_cfg; mac_drv->autoneg_stat = NULL; mac_drv->get_info = hns_xgmac_get_info; mac_drv->get_pause_enable = hns_xgmac_get_pausefrm_cfg; mac_drv->get_link_status = hns_xgmac_get_link_status; mac_drv->get_regs = hns_xgmac_get_regs; mac_drv->get_ethtool_stats = hns_xgmac_get_stats; mac_drv->get_sset_count = hns_xgmac_get_sset_count; mac_drv->get_regs_count = hns_xgmac_get_regs_count; mac_drv->get_strings = hns_xgmac_get_strings; mac_drv->update_stats = hns_xgmac_update_stats; return (void *)mac_drv; }
static int hns_xgmac_get_sset_count(int stringset) { if (stringset == ETH_SS_STATS) return ARRAY_SIZE(g_xgmac_stats_string); return 0; }
static int hns_xgmac_get_sset_count(int stringset) { if (stringset == ETH_SS_STATS || stringset == ETH_SS_PRIV_FLAGS) return ARRAY_SIZE(g_xgmac_stats_string); return 0; }
{'added': [(784, '\tif (stringset == ETH_SS_STATS || stringset == ETH_SS_PRIV_FLAGS)')], 'deleted': [(784, '\tif (stringset == ETH_SS_STATS)')]}
1
1
632
5,357
https://github.com/torvalds/linux
CVE-2017-18222
['CWE-119']
SDL_pixels.c
Map1to1
/* Simple DirectMedia Layer Copyright (C) 1997-2021 Sam Lantinga <slouken@libsdl.org> This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages arising from the use of this software. Permission is granted to anyone to use this software for any purpose, including commercial applications, and to alter it and redistribute it freely, subject to the following restrictions: 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. 3. This notice may not be removed or altered from any source distribution. */ #include "../SDL_internal.h" /* General (mostly internal) pixel/color manipulation routines for SDL */ #include "SDL_endian.h" #include "SDL_video.h" #include "SDL_sysvideo.h" #include "SDL_blit.h" #include "SDL_pixels_c.h" #include "SDL_RLEaccel_c.h" /* Lookup tables to expand partial bytes to the full 0..255 range */ static Uint8 lookup_0[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255 }; static Uint8 lookup_1[] = { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60, 62, 64, 66, 68, 70, 72, 74, 76, 78, 80, 82, 84, 86, 88, 90, 92, 94, 96, 98, 100, 102, 104, 106, 108, 110, 112, 114, 116, 118, 120, 122, 124, 126, 128, 130, 132, 134, 136, 138, 140, 142, 144, 146, 148, 150, 152, 154, 156, 158, 160, 162, 164, 166, 168, 170, 172, 174, 176, 178, 180, 182, 184, 186, 188, 190, 192, 194, 196, 198, 200, 202, 204, 206, 208, 210, 212, 214, 216, 218, 220, 222, 224, 226, 228, 230, 232, 234, 236, 238, 240, 242, 244, 246, 248, 250, 252, 255 }; static Uint8 lookup_2[] = { 0, 4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 85, 89, 93, 97, 101, 105, 109, 113, 117, 121, 125, 129, 133, 137, 141, 145, 149, 153, 157, 161, 165, 170, 174, 178, 182, 186, 190, 194, 198, 202, 206, 210, 214, 218, 222, 226, 230, 234, 238, 242, 246, 250, 255 }; static Uint8 lookup_3[] = { 0, 8, 16, 24, 32, 41, 49, 57, 65, 74, 82, 90, 98, 106, 115, 123, 131, 139, 148, 156, 164, 172, 180, 189, 197, 205, 213, 222, 230, 238, 246, 255 }; static Uint8 lookup_4[] = { 0, 17, 34, 51, 68, 85, 102, 119, 136, 153, 170, 187, 204, 221, 238, 255 }; static Uint8 lookup_5[] = { 0, 36, 72, 109, 145, 182, 218, 255 }; static Uint8 lookup_6[] = { 0, 85, 170, 255 }; static Uint8 lookup_7[] = { 0, 255 }; static Uint8 lookup_8[] = { 255 }; Uint8* SDL_expand_byte[9] = { lookup_0, lookup_1, lookup_2, lookup_3, lookup_4, lookup_5, lookup_6, lookup_7, lookup_8 }; /* Helper functions */ const char* SDL_GetPixelFormatName(Uint32 format) { switch (format) { #define CASE(X) case X: return #X; CASE(SDL_PIXELFORMAT_INDEX1LSB) CASE(SDL_PIXELFORMAT_INDEX1MSB) CASE(SDL_PIXELFORMAT_INDEX4LSB) CASE(SDL_PIXELFORMAT_INDEX4MSB) CASE(SDL_PIXELFORMAT_INDEX8) CASE(SDL_PIXELFORMAT_RGB332) CASE(SDL_PIXELFORMAT_RGB444) CASE(SDL_PIXELFORMAT_BGR444) CASE(SDL_PIXELFORMAT_RGB555) CASE(SDL_PIXELFORMAT_BGR555) CASE(SDL_PIXELFORMAT_ARGB4444) CASE(SDL_PIXELFORMAT_RGBA4444) CASE(SDL_PIXELFORMAT_ABGR4444) CASE(SDL_PIXELFORMAT_BGRA4444) CASE(SDL_PIXELFORMAT_ARGB1555) CASE(SDL_PIXELFORMAT_RGBA5551) CASE(SDL_PIXELFORMAT_ABGR1555) CASE(SDL_PIXELFORMAT_BGRA5551) CASE(SDL_PIXELFORMAT_RGB565) CASE(SDL_PIXELFORMAT_BGR565) CASE(SDL_PIXELFORMAT_RGB24) CASE(SDL_PIXELFORMAT_BGR24) CASE(SDL_PIXELFORMAT_RGB888) CASE(SDL_PIXELFORMAT_RGBX8888) CASE(SDL_PIXELFORMAT_BGR888) CASE(SDL_PIXELFORMAT_BGRX8888) CASE(SDL_PIXELFORMAT_ARGB8888) CASE(SDL_PIXELFORMAT_RGBA8888) CASE(SDL_PIXELFORMAT_ABGR8888) CASE(SDL_PIXELFORMAT_BGRA8888) CASE(SDL_PIXELFORMAT_ARGB2101010) CASE(SDL_PIXELFORMAT_YV12) CASE(SDL_PIXELFORMAT_IYUV) CASE(SDL_PIXELFORMAT_YUY2) CASE(SDL_PIXELFORMAT_UYVY) CASE(SDL_PIXELFORMAT_YVYU) CASE(SDL_PIXELFORMAT_NV12) CASE(SDL_PIXELFORMAT_NV21) #undef CASE default: return "SDL_PIXELFORMAT_UNKNOWN"; } } SDL_bool SDL_PixelFormatEnumToMasks(Uint32 format, int *bpp, Uint32 * Rmask, Uint32 * Gmask, Uint32 * Bmask, Uint32 * Amask) { Uint32 masks[4]; /* This function doesn't work with FourCC pixel formats */ if (SDL_ISPIXELFORMAT_FOURCC(format)) { SDL_SetError("FOURCC pixel formats are not supported"); return SDL_FALSE; } /* Initialize the values here */ if (SDL_BYTESPERPIXEL(format) <= 2) { *bpp = SDL_BITSPERPIXEL(format); } else { *bpp = SDL_BYTESPERPIXEL(format) * 8; } *Rmask = *Gmask = *Bmask = *Amask = 0; if (format == SDL_PIXELFORMAT_RGB24) { #if SDL_BYTEORDER == SDL_BIG_ENDIAN *Rmask = 0x00FF0000; *Gmask = 0x0000FF00; *Bmask = 0x000000FF; #else *Rmask = 0x000000FF; *Gmask = 0x0000FF00; *Bmask = 0x00FF0000; #endif return SDL_TRUE; } if (format == SDL_PIXELFORMAT_BGR24) { #if SDL_BYTEORDER == SDL_BIG_ENDIAN *Rmask = 0x000000FF; *Gmask = 0x0000FF00; *Bmask = 0x00FF0000; #else *Rmask = 0x00FF0000; *Gmask = 0x0000FF00; *Bmask = 0x000000FF; #endif return SDL_TRUE; } if (SDL_PIXELTYPE(format) != SDL_PIXELTYPE_PACKED8 && SDL_PIXELTYPE(format) != SDL_PIXELTYPE_PACKED16 && SDL_PIXELTYPE(format) != SDL_PIXELTYPE_PACKED32) { /* Not a format that uses masks */ return SDL_TRUE; } switch (SDL_PIXELLAYOUT(format)) { case SDL_PACKEDLAYOUT_332: masks[0] = 0x00000000; masks[1] = 0x000000E0; masks[2] = 0x0000001C; masks[3] = 0x00000003; break; case SDL_PACKEDLAYOUT_4444: masks[0] = 0x0000F000; masks[1] = 0x00000F00; masks[2] = 0x000000F0; masks[3] = 0x0000000F; break; case SDL_PACKEDLAYOUT_1555: masks[0] = 0x00008000; masks[1] = 0x00007C00; masks[2] = 0x000003E0; masks[3] = 0x0000001F; break; case SDL_PACKEDLAYOUT_5551: masks[0] = 0x0000F800; masks[1] = 0x000007C0; masks[2] = 0x0000003E; masks[3] = 0x00000001; break; case SDL_PACKEDLAYOUT_565: masks[0] = 0x00000000; masks[1] = 0x0000F800; masks[2] = 0x000007E0; masks[3] = 0x0000001F; break; case SDL_PACKEDLAYOUT_8888: masks[0] = 0xFF000000; masks[1] = 0x00FF0000; masks[2] = 0x0000FF00; masks[3] = 0x000000FF; break; case SDL_PACKEDLAYOUT_2101010: masks[0] = 0xC0000000; masks[1] = 0x3FF00000; masks[2] = 0x000FFC00; masks[3] = 0x000003FF; break; case SDL_PACKEDLAYOUT_1010102: masks[0] = 0xFFC00000; masks[1] = 0x003FF000; masks[2] = 0x00000FFC; masks[3] = 0x00000003; break; default: SDL_SetError("Unknown pixel format"); return SDL_FALSE; } switch (SDL_PIXELORDER(format)) { case SDL_PACKEDORDER_XRGB: *Rmask = masks[1]; *Gmask = masks[2]; *Bmask = masks[3]; break; case SDL_PACKEDORDER_RGBX: *Rmask = masks[0]; *Gmask = masks[1]; *Bmask = masks[2]; break; case SDL_PACKEDORDER_ARGB: *Amask = masks[0]; *Rmask = masks[1]; *Gmask = masks[2]; *Bmask = masks[3]; break; case SDL_PACKEDORDER_RGBA: *Rmask = masks[0]; *Gmask = masks[1]; *Bmask = masks[2]; *Amask = masks[3]; break; case SDL_PACKEDORDER_XBGR: *Bmask = masks[1]; *Gmask = masks[2]; *Rmask = masks[3]; break; case SDL_PACKEDORDER_BGRX: *Bmask = masks[0]; *Gmask = masks[1]; *Rmask = masks[2]; break; case SDL_PACKEDORDER_BGRA: *Bmask = masks[0]; *Gmask = masks[1]; *Rmask = masks[2]; *Amask = masks[3]; break; case SDL_PACKEDORDER_ABGR: *Amask = masks[0]; *Bmask = masks[1]; *Gmask = masks[2]; *Rmask = masks[3]; break; default: SDL_SetError("Unknown pixel format"); return SDL_FALSE; } return SDL_TRUE; } Uint32 SDL_MasksToPixelFormatEnum(int bpp, Uint32 Rmask, Uint32 Gmask, Uint32 Bmask, Uint32 Amask) { switch (bpp) { case 1: /* SDL defaults to MSB ordering */ return SDL_PIXELFORMAT_INDEX1MSB; case 4: /* SDL defaults to MSB ordering */ return SDL_PIXELFORMAT_INDEX4MSB; case 8: if (Rmask == 0) { return SDL_PIXELFORMAT_INDEX8; } if (Rmask == 0xE0 && Gmask == 0x1C && Bmask == 0x03 && Amask == 0x00) { return SDL_PIXELFORMAT_RGB332; } break; case 12: if (Rmask == 0) { return SDL_PIXELFORMAT_RGB444; } if (Rmask == 0x0F00 && Gmask == 0x00F0 && Bmask == 0x000F && Amask == 0x0000) { return SDL_PIXELFORMAT_RGB444; } if (Rmask == 0x000F && Gmask == 0x00F0 && Bmask == 0x0F00 && Amask == 0x0000) { return SDL_PIXELFORMAT_BGR444; } break; case 15: if (Rmask == 0) { return SDL_PIXELFORMAT_RGB555; } SDL_FALLTHROUGH; case 16: if (Rmask == 0) { return SDL_PIXELFORMAT_RGB565; } if (Rmask == 0x7C00 && Gmask == 0x03E0 && Bmask == 0x001F && Amask == 0x0000) { return SDL_PIXELFORMAT_RGB555; } if (Rmask == 0x001F && Gmask == 0x03E0 && Bmask == 0x7C00 && Amask == 0x0000) { return SDL_PIXELFORMAT_BGR555; } if (Rmask == 0x0F00 && Gmask == 0x00F0 && Bmask == 0x000F && Amask == 0xF000) { return SDL_PIXELFORMAT_ARGB4444; } if (Rmask == 0xF000 && Gmask == 0x0F00 && Bmask == 0x00F0 && Amask == 0x000F) { return SDL_PIXELFORMAT_RGBA4444; } if (Rmask == 0x000F && Gmask == 0x00F0 && Bmask == 0x0F00 && Amask == 0xF000) { return SDL_PIXELFORMAT_ABGR4444; } if (Rmask == 0x00F0 && Gmask == 0x0F00 && Bmask == 0xF000 && Amask == 0x000F) { return SDL_PIXELFORMAT_BGRA4444; } if (Rmask == 0x7C00 && Gmask == 0x03E0 && Bmask == 0x001F && Amask == 0x8000) { return SDL_PIXELFORMAT_ARGB1555; } if (Rmask == 0xF800 && Gmask == 0x07C0 && Bmask == 0x003E && Amask == 0x0001) { return SDL_PIXELFORMAT_RGBA5551; } if (Rmask == 0x001F && Gmask == 0x03E0 && Bmask == 0x7C00 && Amask == 0x8000) { return SDL_PIXELFORMAT_ABGR1555; } if (Rmask == 0x003E && Gmask == 0x07C0 && Bmask == 0xF800 && Amask == 0x0001) { return SDL_PIXELFORMAT_BGRA5551; } if (Rmask == 0xF800 && Gmask == 0x07E0 && Bmask == 0x001F && Amask == 0x0000) { return SDL_PIXELFORMAT_RGB565; } if (Rmask == 0x001F && Gmask == 0x07E0 && Bmask == 0xF800 && Amask == 0x0000) { return SDL_PIXELFORMAT_BGR565; } if (Rmask == 0x003F && Gmask == 0x07C0 && Bmask == 0xF800 && Amask == 0x0000) { /* Technically this would be BGR556, but Witek says this works in bug 3158 */ return SDL_PIXELFORMAT_RGB565; } break; case 24: switch (Rmask) { case 0: case 0x00FF0000: #if SDL_BYTEORDER == SDL_BIG_ENDIAN return SDL_PIXELFORMAT_RGB24; #else return SDL_PIXELFORMAT_BGR24; #endif case 0x000000FF: #if SDL_BYTEORDER == SDL_BIG_ENDIAN return SDL_PIXELFORMAT_BGR24; #else return SDL_PIXELFORMAT_RGB24; #endif } case 32: if (Rmask == 0) { return SDL_PIXELFORMAT_RGB888; } if (Rmask == 0x00FF0000 && Gmask == 0x0000FF00 && Bmask == 0x000000FF && Amask == 0x00000000) { return SDL_PIXELFORMAT_RGB888; } if (Rmask == 0xFF000000 && Gmask == 0x00FF0000 && Bmask == 0x0000FF00 && Amask == 0x00000000) { return SDL_PIXELFORMAT_RGBX8888; } if (Rmask == 0x000000FF && Gmask == 0x0000FF00 && Bmask == 0x00FF0000 && Amask == 0x00000000) { return SDL_PIXELFORMAT_BGR888; } if (Rmask == 0x0000FF00 && Gmask == 0x00FF0000 && Bmask == 0xFF000000 && Amask == 0x00000000) { return SDL_PIXELFORMAT_BGRX8888; } if (Rmask == 0x00FF0000 && Gmask == 0x0000FF00 && Bmask == 0x000000FF && Amask == 0xFF000000) { return SDL_PIXELFORMAT_ARGB8888; } if (Rmask == 0xFF000000 && Gmask == 0x00FF0000 && Bmask == 0x0000FF00 && Amask == 0x000000FF) { return SDL_PIXELFORMAT_RGBA8888; } if (Rmask == 0x000000FF && Gmask == 0x0000FF00 && Bmask == 0x00FF0000 && Amask == 0xFF000000) { return SDL_PIXELFORMAT_ABGR8888; } if (Rmask == 0x0000FF00 && Gmask == 0x00FF0000 && Bmask == 0xFF000000 && Amask == 0x000000FF) { return SDL_PIXELFORMAT_BGRA8888; } if (Rmask == 0x3FF00000 && Gmask == 0x000FFC00 && Bmask == 0x000003FF && Amask == 0xC0000000) { return SDL_PIXELFORMAT_ARGB2101010; } } return SDL_PIXELFORMAT_UNKNOWN; } static SDL_PixelFormat *formats; static SDL_SpinLock formats_lock = 0; SDL_PixelFormat * SDL_AllocFormat(Uint32 pixel_format) { SDL_PixelFormat *format; SDL_AtomicLock(&formats_lock); /* Look it up in our list of previously allocated formats */ for (format = formats; format; format = format->next) { if (pixel_format == format->format) { ++format->refcount; SDL_AtomicUnlock(&formats_lock); return format; } } /* Allocate an empty pixel format structure, and initialize it */ format = SDL_malloc(sizeof(*format)); if (format == NULL) { SDL_AtomicUnlock(&formats_lock); SDL_OutOfMemory(); return NULL; } if (SDL_InitFormat(format, pixel_format) < 0) { SDL_AtomicUnlock(&formats_lock); SDL_free(format); SDL_InvalidParamError("format"); return NULL; } if (!SDL_ISPIXELFORMAT_INDEXED(pixel_format)) { /* Cache the RGB formats */ format->next = formats; formats = format; } SDL_AtomicUnlock(&formats_lock); return format; } int SDL_InitFormat(SDL_PixelFormat * format, Uint32 pixel_format) { int bpp; Uint32 Rmask, Gmask, Bmask, Amask; Uint32 mask; if (!SDL_PixelFormatEnumToMasks(pixel_format, &bpp, &Rmask, &Gmask, &Bmask, &Amask)) { return -1; } /* Set up the format */ SDL_zerop(format); format->format = pixel_format; format->BitsPerPixel = bpp; format->BytesPerPixel = (bpp + 7) / 8; format->Rmask = Rmask; format->Rshift = 0; format->Rloss = 8; if (Rmask) { for (mask = Rmask; !(mask & 0x01); mask >>= 1) ++format->Rshift; for (; (mask & 0x01); mask >>= 1) --format->Rloss; } format->Gmask = Gmask; format->Gshift = 0; format->Gloss = 8; if (Gmask) { for (mask = Gmask; !(mask & 0x01); mask >>= 1) ++format->Gshift; for (; (mask & 0x01); mask >>= 1) --format->Gloss; } format->Bmask = Bmask; format->Bshift = 0; format->Bloss = 8; if (Bmask) { for (mask = Bmask; !(mask & 0x01); mask >>= 1) ++format->Bshift; for (; (mask & 0x01); mask >>= 1) --format->Bloss; } format->Amask = Amask; format->Ashift = 0; format->Aloss = 8; if (Amask) { for (mask = Amask; !(mask & 0x01); mask >>= 1) ++format->Ashift; for (; (mask & 0x01); mask >>= 1) --format->Aloss; } format->palette = NULL; format->refcount = 1; format->next = NULL; return 0; } void SDL_FreeFormat(SDL_PixelFormat *format) { SDL_PixelFormat *prev; if (!format) { SDL_InvalidParamError("format"); return; } SDL_AtomicLock(&formats_lock); if (--format->refcount > 0) { SDL_AtomicUnlock(&formats_lock); return; } /* Remove this format from our list */ if (format == formats) { formats = format->next; } else if (formats) { for (prev = formats; prev->next; prev = prev->next) { if (prev->next == format) { prev->next = format->next; break; } } } SDL_AtomicUnlock(&formats_lock); if (format->palette) { SDL_FreePalette(format->palette); } SDL_free(format); } SDL_Palette * SDL_AllocPalette(int ncolors) { SDL_Palette *palette; /* Input validation */ if (ncolors < 1) { SDL_InvalidParamError("ncolors"); return NULL; } palette = (SDL_Palette *) SDL_malloc(sizeof(*palette)); if (!palette) { SDL_OutOfMemory(); return NULL; } palette->colors = (SDL_Color *) SDL_malloc(ncolors * sizeof(*palette->colors)); if (!palette->colors) { SDL_free(palette); return NULL; } palette->ncolors = ncolors; palette->version = 1; palette->refcount = 1; SDL_memset(palette->colors, 0xFF, ncolors * sizeof(*palette->colors)); return palette; } int SDL_SetPixelFormatPalette(SDL_PixelFormat * format, SDL_Palette *palette) { if (!format) { return SDL_SetError("SDL_SetPixelFormatPalette() passed NULL format"); } if (palette && palette->ncolors > (1 << format->BitsPerPixel)) { return SDL_SetError("SDL_SetPixelFormatPalette() passed a palette that doesn't match the format"); } if (format->palette == palette) { return 0; } if (format->palette) { SDL_FreePalette(format->palette); } format->palette = palette; if (format->palette) { ++format->palette->refcount; } return 0; } int SDL_SetPaletteColors(SDL_Palette * palette, const SDL_Color * colors, int firstcolor, int ncolors) { int status = 0; /* Verify the parameters */ if (!palette) { return -1; } if (ncolors > (palette->ncolors - firstcolor)) { ncolors = (palette->ncolors - firstcolor); status = -1; } if (colors != (palette->colors + firstcolor)) { SDL_memcpy(palette->colors + firstcolor, colors, ncolors * sizeof(*colors)); } ++palette->version; if (!palette->version) { palette->version = 1; } return status; } void SDL_FreePalette(SDL_Palette * palette) { if (!palette) { SDL_InvalidParamError("palette"); return; } if (--palette->refcount > 0) { return; } SDL_free(palette->colors); SDL_free(palette); } /* * Calculate an 8-bit (3 red, 3 green, 2 blue) dithered palette of colors */ void SDL_DitherColors(SDL_Color * colors, int bpp) { int i; if (bpp != 8) return; /* only 8bpp supported right now */ for (i = 0; i < 256; i++) { int r, g, b; /* map each bit field to the full [0, 255] interval, so 0 is mapped to (0, 0, 0) and 255 to (255, 255, 255) */ r = i & 0xe0; r |= r >> 3 | r >> 6; colors[i].r = r; g = (i << 3) & 0xe0; g |= g >> 3 | g >> 6; colors[i].g = g; b = i & 0x3; b |= b << 2; b |= b << 4; colors[i].b = b; colors[i].a = SDL_ALPHA_OPAQUE; } } /* * Match an RGB value to a particular palette index */ Uint8 SDL_FindColor(SDL_Palette * pal, Uint8 r, Uint8 g, Uint8 b, Uint8 a) { /* Do colorspace distance matching */ unsigned int smallest; unsigned int distance; int rd, gd, bd, ad; int i; Uint8 pixel = 0; smallest = ~0; for (i = 0; i < pal->ncolors; ++i) { rd = pal->colors[i].r - r; gd = pal->colors[i].g - g; bd = pal->colors[i].b - b; ad = pal->colors[i].a - a; distance = (rd * rd) + (gd * gd) + (bd * bd) + (ad * ad); if (distance < smallest) { pixel = i; if (distance == 0) { /* Perfect match! */ break; } smallest = distance; } } return (pixel); } /* Tell whether palette is opaque, and if it has an alpha_channel */ void SDL_DetectPalette(SDL_Palette *pal, SDL_bool *is_opaque, SDL_bool *has_alpha_channel) { int i; { SDL_bool all_opaque = SDL_TRUE; for (i = 0; i < pal->ncolors; i++) { Uint8 alpha_value = pal->colors[i].a; if (alpha_value != SDL_ALPHA_OPAQUE) { all_opaque = SDL_FALSE; break; } } if (all_opaque) { /* Palette is opaque, with an alpha channel */ *is_opaque = SDL_TRUE; *has_alpha_channel = SDL_TRUE; return; } } { SDL_bool all_transparent = SDL_TRUE; for (i = 0; i < pal->ncolors; i++) { Uint8 alpha_value = pal->colors[i].a; if (alpha_value != SDL_ALPHA_TRANSPARENT) { all_transparent = SDL_FALSE; break; } } if (all_transparent) { /* Palette is opaque, without an alpha channel */ *is_opaque = SDL_TRUE; *has_alpha_channel = SDL_FALSE; return; } } /* Palette has alpha values */ *is_opaque = SDL_FALSE; *has_alpha_channel = SDL_TRUE; } /* Find the opaque pixel value corresponding to an RGB triple */ Uint32 SDL_MapRGB(const SDL_PixelFormat * format, Uint8 r, Uint8 g, Uint8 b) { if (format->palette == NULL) { return (r >> format->Rloss) << format->Rshift | (g >> format->Gloss) << format->Gshift | (b >> format->Bloss) << format->Bshift | format->Amask; } else { return SDL_FindColor(format->palette, r, g, b, SDL_ALPHA_OPAQUE); } } /* Find the pixel value corresponding to an RGBA quadruple */ Uint32 SDL_MapRGBA(const SDL_PixelFormat * format, Uint8 r, Uint8 g, Uint8 b, Uint8 a) { if (format->palette == NULL) { return (r >> format->Rloss) << format->Rshift | (g >> format->Gloss) << format->Gshift | (b >> format->Bloss) << format->Bshift | ((Uint32)(a >> format->Aloss) << format->Ashift & format->Amask); } else { return SDL_FindColor(format->palette, r, g, b, a); } } void SDL_GetRGB(Uint32 pixel, const SDL_PixelFormat * format, Uint8 * r, Uint8 * g, Uint8 * b) { if (format->palette == NULL) { unsigned v; v = (pixel & format->Rmask) >> format->Rshift; *r = SDL_expand_byte[format->Rloss][v]; v = (pixel & format->Gmask) >> format->Gshift; *g = SDL_expand_byte[format->Gloss][v]; v = (pixel & format->Bmask) >> format->Bshift; *b = SDL_expand_byte[format->Bloss][v]; } else { if (pixel < (unsigned)format->palette->ncolors) { *r = format->palette->colors[pixel].r; *g = format->palette->colors[pixel].g; *b = format->palette->colors[pixel].b; } else { *r = *g = *b = 0; } } } void SDL_GetRGBA(Uint32 pixel, const SDL_PixelFormat * format, Uint8 * r, Uint8 * g, Uint8 * b, Uint8 * a) { if (format->palette == NULL) { unsigned v; v = (pixel & format->Rmask) >> format->Rshift; *r = SDL_expand_byte[format->Rloss][v]; v = (pixel & format->Gmask) >> format->Gshift; *g = SDL_expand_byte[format->Gloss][v]; v = (pixel & format->Bmask) >> format->Bshift; *b = SDL_expand_byte[format->Bloss][v]; v = (pixel & format->Amask) >> format->Ashift; *a = SDL_expand_byte[format->Aloss][v]; } else { if (pixel < (unsigned)format->palette->ncolors) { *r = format->palette->colors[pixel].r; *g = format->palette->colors[pixel].g; *b = format->palette->colors[pixel].b; *a = format->palette->colors[pixel].a; } else { *r = *g = *b = *a = 0; } } } /* Map from Palette to Palette */ static Uint8 * Map1to1(SDL_Palette * src, SDL_Palette * dst, int *identical) { Uint8 *map; int i; if (identical) { if (src->ncolors <= dst->ncolors) { /* If an identical palette, no need to map */ if (src == dst || (SDL_memcmp (src->colors, dst->colors, src->ncolors * sizeof(SDL_Color)) == 0)) { *identical = 1; return (NULL); } } *identical = 0; } map = (Uint8 *) SDL_malloc(src->ncolors); if (map == NULL) { SDL_OutOfMemory(); return (NULL); } for (i = 0; i < src->ncolors; ++i) { map[i] = SDL_FindColor(dst, src->colors[i].r, src->colors[i].g, src->colors[i].b, src->colors[i].a); } return (map); } /* Map from Palette to BitField */ static Uint8 * Map1toN(SDL_PixelFormat * src, Uint8 Rmod, Uint8 Gmod, Uint8 Bmod, Uint8 Amod, SDL_PixelFormat * dst) { Uint8 *map; int i; int bpp; SDL_Palette *pal = src->palette; bpp = ((dst->BytesPerPixel == 3) ? 4 : dst->BytesPerPixel); map = (Uint8 *) SDL_malloc(pal->ncolors * bpp); if (map == NULL) { SDL_OutOfMemory(); return (NULL); } /* We memory copy to the pixel map so the endianness is preserved */ for (i = 0; i < pal->ncolors; ++i) { Uint8 R = (Uint8) ((pal->colors[i].r * Rmod) / 255); Uint8 G = (Uint8) ((pal->colors[i].g * Gmod) / 255); Uint8 B = (Uint8) ((pal->colors[i].b * Bmod) / 255); Uint8 A = (Uint8) ((pal->colors[i].a * Amod) / 255); ASSEMBLE_RGBA(&map[i * bpp], dst->BytesPerPixel, dst, (Uint32)R, (Uint32)G, (Uint32)B, (Uint32)A); } return (map); } /* Map from BitField to Dithered-Palette to Palette */ static Uint8 * MapNto1(SDL_PixelFormat * src, SDL_PixelFormat * dst, int *identical) { /* Generate a 256 color dither palette */ SDL_Palette dithered; SDL_Color colors[256]; SDL_Palette *pal = dst->palette; dithered.ncolors = 256; SDL_DitherColors(colors, 8); dithered.colors = colors; return (Map1to1(&dithered, pal, identical)); } SDL_BlitMap * SDL_AllocBlitMap(void) { SDL_BlitMap *map; /* Allocate the empty map */ map = (SDL_BlitMap *) SDL_calloc(1, sizeof(*map)); if (map == NULL) { SDL_OutOfMemory(); return (NULL); } map->info.r = 0xFF; map->info.g = 0xFF; map->info.b = 0xFF; map->info.a = 0xFF; /* It's ready to go */ return (map); } typedef struct SDL_ListNode { void *entry; struct SDL_ListNode *next; } SDL_ListNode; void SDL_InvalidateAllBlitMap(SDL_Surface *surface) { SDL_ListNode *l = surface->list_blitmap; surface->list_blitmap = NULL; while (l) { SDL_ListNode *tmp = l; SDL_InvalidateMap((SDL_BlitMap *)l->entry); l = l->next; SDL_free(tmp); } } static void SDL_ListAdd(SDL_ListNode **head, void *ent); static void SDL_ListRemove(SDL_ListNode **head, void *ent); void SDL_ListAdd(SDL_ListNode **head, void *ent) { SDL_ListNode *node = SDL_malloc(sizeof (*node)); if (node == NULL) { SDL_OutOfMemory(); return; } node->entry = ent; node->next = *head; *head = node; } void SDL_ListRemove(SDL_ListNode **head, void *ent) { SDL_ListNode **ptr = head; while (*ptr) { if ((*ptr)->entry == ent) { SDL_ListNode *tmp = *ptr; *ptr = (*ptr)->next; SDL_free(tmp); return; } ptr = &(*ptr)->next; } } void SDL_InvalidateMap(SDL_BlitMap * map) { if (!map) { return; } if (map->dst) { /* Un-register from the destination surface */ SDL_ListRemove((SDL_ListNode **)&(map->dst->list_blitmap), map); } map->dst = NULL; map->src_palette_version = 0; map->dst_palette_version = 0; SDL_free(map->info.table); map->info.table = NULL; } int SDL_MapSurface(SDL_Surface * src, SDL_Surface * dst) { SDL_PixelFormat *srcfmt; SDL_PixelFormat *dstfmt; SDL_BlitMap *map; /* Clear out any previous mapping */ map = src->map; #if SDL_HAVE_RLE if ((src->flags & SDL_RLEACCEL) == SDL_RLEACCEL) { SDL_UnRLESurface(src, 1); } #endif SDL_InvalidateMap(map); /* Figure out what kind of mapping we're doing */ map->identity = 0; srcfmt = src->format; dstfmt = dst->format; if (SDL_ISPIXELFORMAT_INDEXED(srcfmt->format)) { if (SDL_ISPIXELFORMAT_INDEXED(dstfmt->format)) { /* Palette --> Palette */ map->info.table = Map1to1(srcfmt->palette, dstfmt->palette, &map->identity); if (!map->identity) { if (map->info.table == NULL) { return (-1); } } if (srcfmt->BitsPerPixel != dstfmt->BitsPerPixel) map->identity = 0; } else { /* Palette --> BitField */ map->info.table = Map1toN(srcfmt, src->map->info.r, src->map->info.g, src->map->info.b, src->map->info.a, dstfmt); if (map->info.table == NULL) { return (-1); } } } else { if (SDL_ISPIXELFORMAT_INDEXED(dstfmt->format)) { /* BitField --> Palette */ map->info.table = MapNto1(srcfmt, dstfmt, &map->identity); if (!map->identity) { if (map->info.table == NULL) { return (-1); } } map->identity = 0; /* Don't optimize to copy */ } else { /* BitField --> BitField */ if (srcfmt == dstfmt) { map->identity = 1; } } } map->dst = dst; if (map->dst) { /* Register BlitMap to the destination surface, to be invalidated when needed */ SDL_ListAdd((SDL_ListNode **)&(map->dst->list_blitmap), map); } if (dstfmt->palette) { map->dst_palette_version = dstfmt->palette->version; } else { map->dst_palette_version = 0; } if (srcfmt->palette) { map->src_palette_version = srcfmt->palette->version; } else { map->src_palette_version = 0; } /* Choose your blitters wisely */ return (SDL_CalculateBlit(src)); } void SDL_FreeBlitMap(SDL_BlitMap * map) { if (map) { SDL_InvalidateMap(map); SDL_free(map); } } void SDL_CalculateGammaRamp(float gamma, Uint16 * ramp) { int i; /* Input validation */ if (gamma < 0.0f ) { SDL_InvalidParamError("gamma"); return; } if (ramp == NULL) { SDL_InvalidParamError("ramp"); return; } /* 0.0 gamma is all black */ if (gamma == 0.0f) { SDL_memset(ramp, 0, 256 * sizeof(Uint16)); return; } else if (gamma == 1.0f) { /* 1.0 gamma is identity */ for (i = 0; i < 256; ++i) { ramp[i] = (i << 8) | i; } return; } else { /* Calculate a real gamma ramp */ int value; gamma = 1.0f / gamma; for (i = 0; i < 256; ++i) { value = (int) (SDL_pow((double) i / 256.0, gamma) * 65535.0 + 0.5); if (value > 65535) { value = 65535; } ramp[i] = (Uint16) value; } } } /* vi: set ts=4 sw=4 expandtab: */
/* Simple DirectMedia Layer Copyright (C) 1997-2021 Sam Lantinga <slouken@libsdl.org> This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages arising from the use of this software. Permission is granted to anyone to use this software for any purpose, including commercial applications, and to alter it and redistribute it freely, subject to the following restrictions: 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. 3. This notice may not be removed or altered from any source distribution. */ #include "../SDL_internal.h" /* General (mostly internal) pixel/color manipulation routines for SDL */ #include "SDL_endian.h" #include "SDL_video.h" #include "SDL_sysvideo.h" #include "SDL_blit.h" #include "SDL_pixels_c.h" #include "SDL_RLEaccel_c.h" /* Lookup tables to expand partial bytes to the full 0..255 range */ static Uint8 lookup_0[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255 }; static Uint8 lookup_1[] = { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60, 62, 64, 66, 68, 70, 72, 74, 76, 78, 80, 82, 84, 86, 88, 90, 92, 94, 96, 98, 100, 102, 104, 106, 108, 110, 112, 114, 116, 118, 120, 122, 124, 126, 128, 130, 132, 134, 136, 138, 140, 142, 144, 146, 148, 150, 152, 154, 156, 158, 160, 162, 164, 166, 168, 170, 172, 174, 176, 178, 180, 182, 184, 186, 188, 190, 192, 194, 196, 198, 200, 202, 204, 206, 208, 210, 212, 214, 216, 218, 220, 222, 224, 226, 228, 230, 232, 234, 236, 238, 240, 242, 244, 246, 248, 250, 252, 255 }; static Uint8 lookup_2[] = { 0, 4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 85, 89, 93, 97, 101, 105, 109, 113, 117, 121, 125, 129, 133, 137, 141, 145, 149, 153, 157, 161, 165, 170, 174, 178, 182, 186, 190, 194, 198, 202, 206, 210, 214, 218, 222, 226, 230, 234, 238, 242, 246, 250, 255 }; static Uint8 lookup_3[] = { 0, 8, 16, 24, 32, 41, 49, 57, 65, 74, 82, 90, 98, 106, 115, 123, 131, 139, 148, 156, 164, 172, 180, 189, 197, 205, 213, 222, 230, 238, 246, 255 }; static Uint8 lookup_4[] = { 0, 17, 34, 51, 68, 85, 102, 119, 136, 153, 170, 187, 204, 221, 238, 255 }; static Uint8 lookup_5[] = { 0, 36, 72, 109, 145, 182, 218, 255 }; static Uint8 lookup_6[] = { 0, 85, 170, 255 }; static Uint8 lookup_7[] = { 0, 255 }; static Uint8 lookup_8[] = { 255 }; Uint8* SDL_expand_byte[9] = { lookup_0, lookup_1, lookup_2, lookup_3, lookup_4, lookup_5, lookup_6, lookup_7, lookup_8 }; /* Helper functions */ const char* SDL_GetPixelFormatName(Uint32 format) { switch (format) { #define CASE(X) case X: return #X; CASE(SDL_PIXELFORMAT_INDEX1LSB) CASE(SDL_PIXELFORMAT_INDEX1MSB) CASE(SDL_PIXELFORMAT_INDEX4LSB) CASE(SDL_PIXELFORMAT_INDEX4MSB) CASE(SDL_PIXELFORMAT_INDEX8) CASE(SDL_PIXELFORMAT_RGB332) CASE(SDL_PIXELFORMAT_RGB444) CASE(SDL_PIXELFORMAT_BGR444) CASE(SDL_PIXELFORMAT_RGB555) CASE(SDL_PIXELFORMAT_BGR555) CASE(SDL_PIXELFORMAT_ARGB4444) CASE(SDL_PIXELFORMAT_RGBA4444) CASE(SDL_PIXELFORMAT_ABGR4444) CASE(SDL_PIXELFORMAT_BGRA4444) CASE(SDL_PIXELFORMAT_ARGB1555) CASE(SDL_PIXELFORMAT_RGBA5551) CASE(SDL_PIXELFORMAT_ABGR1555) CASE(SDL_PIXELFORMAT_BGRA5551) CASE(SDL_PIXELFORMAT_RGB565) CASE(SDL_PIXELFORMAT_BGR565) CASE(SDL_PIXELFORMAT_RGB24) CASE(SDL_PIXELFORMAT_BGR24) CASE(SDL_PIXELFORMAT_RGB888) CASE(SDL_PIXELFORMAT_RGBX8888) CASE(SDL_PIXELFORMAT_BGR888) CASE(SDL_PIXELFORMAT_BGRX8888) CASE(SDL_PIXELFORMAT_ARGB8888) CASE(SDL_PIXELFORMAT_RGBA8888) CASE(SDL_PIXELFORMAT_ABGR8888) CASE(SDL_PIXELFORMAT_BGRA8888) CASE(SDL_PIXELFORMAT_ARGB2101010) CASE(SDL_PIXELFORMAT_YV12) CASE(SDL_PIXELFORMAT_IYUV) CASE(SDL_PIXELFORMAT_YUY2) CASE(SDL_PIXELFORMAT_UYVY) CASE(SDL_PIXELFORMAT_YVYU) CASE(SDL_PIXELFORMAT_NV12) CASE(SDL_PIXELFORMAT_NV21) #undef CASE default: return "SDL_PIXELFORMAT_UNKNOWN"; } } SDL_bool SDL_PixelFormatEnumToMasks(Uint32 format, int *bpp, Uint32 * Rmask, Uint32 * Gmask, Uint32 * Bmask, Uint32 * Amask) { Uint32 masks[4]; /* This function doesn't work with FourCC pixel formats */ if (SDL_ISPIXELFORMAT_FOURCC(format)) { SDL_SetError("FOURCC pixel formats are not supported"); return SDL_FALSE; } /* Initialize the values here */ if (SDL_BYTESPERPIXEL(format) <= 2) { *bpp = SDL_BITSPERPIXEL(format); } else { *bpp = SDL_BYTESPERPIXEL(format) * 8; } *Rmask = *Gmask = *Bmask = *Amask = 0; if (format == SDL_PIXELFORMAT_RGB24) { #if SDL_BYTEORDER == SDL_BIG_ENDIAN *Rmask = 0x00FF0000; *Gmask = 0x0000FF00; *Bmask = 0x000000FF; #else *Rmask = 0x000000FF; *Gmask = 0x0000FF00; *Bmask = 0x00FF0000; #endif return SDL_TRUE; } if (format == SDL_PIXELFORMAT_BGR24) { #if SDL_BYTEORDER == SDL_BIG_ENDIAN *Rmask = 0x000000FF; *Gmask = 0x0000FF00; *Bmask = 0x00FF0000; #else *Rmask = 0x00FF0000; *Gmask = 0x0000FF00; *Bmask = 0x000000FF; #endif return SDL_TRUE; } if (SDL_PIXELTYPE(format) != SDL_PIXELTYPE_PACKED8 && SDL_PIXELTYPE(format) != SDL_PIXELTYPE_PACKED16 && SDL_PIXELTYPE(format) != SDL_PIXELTYPE_PACKED32) { /* Not a format that uses masks */ return SDL_TRUE; } switch (SDL_PIXELLAYOUT(format)) { case SDL_PACKEDLAYOUT_332: masks[0] = 0x00000000; masks[1] = 0x000000E0; masks[2] = 0x0000001C; masks[3] = 0x00000003; break; case SDL_PACKEDLAYOUT_4444: masks[0] = 0x0000F000; masks[1] = 0x00000F00; masks[2] = 0x000000F0; masks[3] = 0x0000000F; break; case SDL_PACKEDLAYOUT_1555: masks[0] = 0x00008000; masks[1] = 0x00007C00; masks[2] = 0x000003E0; masks[3] = 0x0000001F; break; case SDL_PACKEDLAYOUT_5551: masks[0] = 0x0000F800; masks[1] = 0x000007C0; masks[2] = 0x0000003E; masks[3] = 0x00000001; break; case SDL_PACKEDLAYOUT_565: masks[0] = 0x00000000; masks[1] = 0x0000F800; masks[2] = 0x000007E0; masks[3] = 0x0000001F; break; case SDL_PACKEDLAYOUT_8888: masks[0] = 0xFF000000; masks[1] = 0x00FF0000; masks[2] = 0x0000FF00; masks[3] = 0x000000FF; break; case SDL_PACKEDLAYOUT_2101010: masks[0] = 0xC0000000; masks[1] = 0x3FF00000; masks[2] = 0x000FFC00; masks[3] = 0x000003FF; break; case SDL_PACKEDLAYOUT_1010102: masks[0] = 0xFFC00000; masks[1] = 0x003FF000; masks[2] = 0x00000FFC; masks[3] = 0x00000003; break; default: SDL_SetError("Unknown pixel format"); return SDL_FALSE; } switch (SDL_PIXELORDER(format)) { case SDL_PACKEDORDER_XRGB: *Rmask = masks[1]; *Gmask = masks[2]; *Bmask = masks[3]; break; case SDL_PACKEDORDER_RGBX: *Rmask = masks[0]; *Gmask = masks[1]; *Bmask = masks[2]; break; case SDL_PACKEDORDER_ARGB: *Amask = masks[0]; *Rmask = masks[1]; *Gmask = masks[2]; *Bmask = masks[3]; break; case SDL_PACKEDORDER_RGBA: *Rmask = masks[0]; *Gmask = masks[1]; *Bmask = masks[2]; *Amask = masks[3]; break; case SDL_PACKEDORDER_XBGR: *Bmask = masks[1]; *Gmask = masks[2]; *Rmask = masks[3]; break; case SDL_PACKEDORDER_BGRX: *Bmask = masks[0]; *Gmask = masks[1]; *Rmask = masks[2]; break; case SDL_PACKEDORDER_BGRA: *Bmask = masks[0]; *Gmask = masks[1]; *Rmask = masks[2]; *Amask = masks[3]; break; case SDL_PACKEDORDER_ABGR: *Amask = masks[0]; *Bmask = masks[1]; *Gmask = masks[2]; *Rmask = masks[3]; break; default: SDL_SetError("Unknown pixel format"); return SDL_FALSE; } return SDL_TRUE; } Uint32 SDL_MasksToPixelFormatEnum(int bpp, Uint32 Rmask, Uint32 Gmask, Uint32 Bmask, Uint32 Amask) { switch (bpp) { case 1: /* SDL defaults to MSB ordering */ return SDL_PIXELFORMAT_INDEX1MSB; case 4: /* SDL defaults to MSB ordering */ return SDL_PIXELFORMAT_INDEX4MSB; case 8: if (Rmask == 0) { return SDL_PIXELFORMAT_INDEX8; } if (Rmask == 0xE0 && Gmask == 0x1C && Bmask == 0x03 && Amask == 0x00) { return SDL_PIXELFORMAT_RGB332; } break; case 12: if (Rmask == 0) { return SDL_PIXELFORMAT_RGB444; } if (Rmask == 0x0F00 && Gmask == 0x00F0 && Bmask == 0x000F && Amask == 0x0000) { return SDL_PIXELFORMAT_RGB444; } if (Rmask == 0x000F && Gmask == 0x00F0 && Bmask == 0x0F00 && Amask == 0x0000) { return SDL_PIXELFORMAT_BGR444; } break; case 15: if (Rmask == 0) { return SDL_PIXELFORMAT_RGB555; } SDL_FALLTHROUGH; case 16: if (Rmask == 0) { return SDL_PIXELFORMAT_RGB565; } if (Rmask == 0x7C00 && Gmask == 0x03E0 && Bmask == 0x001F && Amask == 0x0000) { return SDL_PIXELFORMAT_RGB555; } if (Rmask == 0x001F && Gmask == 0x03E0 && Bmask == 0x7C00 && Amask == 0x0000) { return SDL_PIXELFORMAT_BGR555; } if (Rmask == 0x0F00 && Gmask == 0x00F0 && Bmask == 0x000F && Amask == 0xF000) { return SDL_PIXELFORMAT_ARGB4444; } if (Rmask == 0xF000 && Gmask == 0x0F00 && Bmask == 0x00F0 && Amask == 0x000F) { return SDL_PIXELFORMAT_RGBA4444; } if (Rmask == 0x000F && Gmask == 0x00F0 && Bmask == 0x0F00 && Amask == 0xF000) { return SDL_PIXELFORMAT_ABGR4444; } if (Rmask == 0x00F0 && Gmask == 0x0F00 && Bmask == 0xF000 && Amask == 0x000F) { return SDL_PIXELFORMAT_BGRA4444; } if (Rmask == 0x7C00 && Gmask == 0x03E0 && Bmask == 0x001F && Amask == 0x8000) { return SDL_PIXELFORMAT_ARGB1555; } if (Rmask == 0xF800 && Gmask == 0x07C0 && Bmask == 0x003E && Amask == 0x0001) { return SDL_PIXELFORMAT_RGBA5551; } if (Rmask == 0x001F && Gmask == 0x03E0 && Bmask == 0x7C00 && Amask == 0x8000) { return SDL_PIXELFORMAT_ABGR1555; } if (Rmask == 0x003E && Gmask == 0x07C0 && Bmask == 0xF800 && Amask == 0x0001) { return SDL_PIXELFORMAT_BGRA5551; } if (Rmask == 0xF800 && Gmask == 0x07E0 && Bmask == 0x001F && Amask == 0x0000) { return SDL_PIXELFORMAT_RGB565; } if (Rmask == 0x001F && Gmask == 0x07E0 && Bmask == 0xF800 && Amask == 0x0000) { return SDL_PIXELFORMAT_BGR565; } if (Rmask == 0x003F && Gmask == 0x07C0 && Bmask == 0xF800 && Amask == 0x0000) { /* Technically this would be BGR556, but Witek says this works in bug 3158 */ return SDL_PIXELFORMAT_RGB565; } break; case 24: switch (Rmask) { case 0: case 0x00FF0000: #if SDL_BYTEORDER == SDL_BIG_ENDIAN return SDL_PIXELFORMAT_RGB24; #else return SDL_PIXELFORMAT_BGR24; #endif case 0x000000FF: #if SDL_BYTEORDER == SDL_BIG_ENDIAN return SDL_PIXELFORMAT_BGR24; #else return SDL_PIXELFORMAT_RGB24; #endif } case 32: if (Rmask == 0) { return SDL_PIXELFORMAT_RGB888; } if (Rmask == 0x00FF0000 && Gmask == 0x0000FF00 && Bmask == 0x000000FF && Amask == 0x00000000) { return SDL_PIXELFORMAT_RGB888; } if (Rmask == 0xFF000000 && Gmask == 0x00FF0000 && Bmask == 0x0000FF00 && Amask == 0x00000000) { return SDL_PIXELFORMAT_RGBX8888; } if (Rmask == 0x000000FF && Gmask == 0x0000FF00 && Bmask == 0x00FF0000 && Amask == 0x00000000) { return SDL_PIXELFORMAT_BGR888; } if (Rmask == 0x0000FF00 && Gmask == 0x00FF0000 && Bmask == 0xFF000000 && Amask == 0x00000000) { return SDL_PIXELFORMAT_BGRX8888; } if (Rmask == 0x00FF0000 && Gmask == 0x0000FF00 && Bmask == 0x000000FF && Amask == 0xFF000000) { return SDL_PIXELFORMAT_ARGB8888; } if (Rmask == 0xFF000000 && Gmask == 0x00FF0000 && Bmask == 0x0000FF00 && Amask == 0x000000FF) { return SDL_PIXELFORMAT_RGBA8888; } if (Rmask == 0x000000FF && Gmask == 0x0000FF00 && Bmask == 0x00FF0000 && Amask == 0xFF000000) { return SDL_PIXELFORMAT_ABGR8888; } if (Rmask == 0x0000FF00 && Gmask == 0x00FF0000 && Bmask == 0xFF000000 && Amask == 0x000000FF) { return SDL_PIXELFORMAT_BGRA8888; } if (Rmask == 0x3FF00000 && Gmask == 0x000FFC00 && Bmask == 0x000003FF && Amask == 0xC0000000) { return SDL_PIXELFORMAT_ARGB2101010; } } return SDL_PIXELFORMAT_UNKNOWN; } static SDL_PixelFormat *formats; static SDL_SpinLock formats_lock = 0; SDL_PixelFormat * SDL_AllocFormat(Uint32 pixel_format) { SDL_PixelFormat *format; SDL_AtomicLock(&formats_lock); /* Look it up in our list of previously allocated formats */ for (format = formats; format; format = format->next) { if (pixel_format == format->format) { ++format->refcount; SDL_AtomicUnlock(&formats_lock); return format; } } /* Allocate an empty pixel format structure, and initialize it */ format = SDL_malloc(sizeof(*format)); if (format == NULL) { SDL_AtomicUnlock(&formats_lock); SDL_OutOfMemory(); return NULL; } if (SDL_InitFormat(format, pixel_format) < 0) { SDL_AtomicUnlock(&formats_lock); SDL_free(format); SDL_InvalidParamError("format"); return NULL; } if (!SDL_ISPIXELFORMAT_INDEXED(pixel_format)) { /* Cache the RGB formats */ format->next = formats; formats = format; } SDL_AtomicUnlock(&formats_lock); return format; } int SDL_InitFormat(SDL_PixelFormat * format, Uint32 pixel_format) { int bpp; Uint32 Rmask, Gmask, Bmask, Amask; Uint32 mask; if (!SDL_PixelFormatEnumToMasks(pixel_format, &bpp, &Rmask, &Gmask, &Bmask, &Amask)) { return -1; } /* Set up the format */ SDL_zerop(format); format->format = pixel_format; format->BitsPerPixel = bpp; format->BytesPerPixel = (bpp + 7) / 8; format->Rmask = Rmask; format->Rshift = 0; format->Rloss = 8; if (Rmask) { for (mask = Rmask; !(mask & 0x01); mask >>= 1) ++format->Rshift; for (; (mask & 0x01); mask >>= 1) --format->Rloss; } format->Gmask = Gmask; format->Gshift = 0; format->Gloss = 8; if (Gmask) { for (mask = Gmask; !(mask & 0x01); mask >>= 1) ++format->Gshift; for (; (mask & 0x01); mask >>= 1) --format->Gloss; } format->Bmask = Bmask; format->Bshift = 0; format->Bloss = 8; if (Bmask) { for (mask = Bmask; !(mask & 0x01); mask >>= 1) ++format->Bshift; for (; (mask & 0x01); mask >>= 1) --format->Bloss; } format->Amask = Amask; format->Ashift = 0; format->Aloss = 8; if (Amask) { for (mask = Amask; !(mask & 0x01); mask >>= 1) ++format->Ashift; for (; (mask & 0x01); mask >>= 1) --format->Aloss; } format->palette = NULL; format->refcount = 1; format->next = NULL; return 0; } void SDL_FreeFormat(SDL_PixelFormat *format) { SDL_PixelFormat *prev; if (!format) { SDL_InvalidParamError("format"); return; } SDL_AtomicLock(&formats_lock); if (--format->refcount > 0) { SDL_AtomicUnlock(&formats_lock); return; } /* Remove this format from our list */ if (format == formats) { formats = format->next; } else if (formats) { for (prev = formats; prev->next; prev = prev->next) { if (prev->next == format) { prev->next = format->next; break; } } } SDL_AtomicUnlock(&formats_lock); if (format->palette) { SDL_FreePalette(format->palette); } SDL_free(format); } SDL_Palette * SDL_AllocPalette(int ncolors) { SDL_Palette *palette; /* Input validation */ if (ncolors < 1) { SDL_InvalidParamError("ncolors"); return NULL; } palette = (SDL_Palette *) SDL_malloc(sizeof(*palette)); if (!palette) { SDL_OutOfMemory(); return NULL; } palette->colors = (SDL_Color *) SDL_malloc(ncolors * sizeof(*palette->colors)); if (!palette->colors) { SDL_free(palette); return NULL; } palette->ncolors = ncolors; palette->version = 1; palette->refcount = 1; SDL_memset(palette->colors, 0xFF, ncolors * sizeof(*palette->colors)); return palette; } int SDL_SetPixelFormatPalette(SDL_PixelFormat * format, SDL_Palette *palette) { if (!format) { return SDL_SetError("SDL_SetPixelFormatPalette() passed NULL format"); } if (palette && palette->ncolors > (1 << format->BitsPerPixel)) { return SDL_SetError("SDL_SetPixelFormatPalette() passed a palette that doesn't match the format"); } if (format->palette == palette) { return 0; } if (format->palette) { SDL_FreePalette(format->palette); } format->palette = palette; if (format->palette) { ++format->palette->refcount; } return 0; } int SDL_SetPaletteColors(SDL_Palette * palette, const SDL_Color * colors, int firstcolor, int ncolors) { int status = 0; /* Verify the parameters */ if (!palette) { return -1; } if (ncolors > (palette->ncolors - firstcolor)) { ncolors = (palette->ncolors - firstcolor); status = -1; } if (colors != (palette->colors + firstcolor)) { SDL_memcpy(palette->colors + firstcolor, colors, ncolors * sizeof(*colors)); } ++palette->version; if (!palette->version) { palette->version = 1; } return status; } void SDL_FreePalette(SDL_Palette * palette) { if (!palette) { SDL_InvalidParamError("palette"); return; } if (--palette->refcount > 0) { return; } SDL_free(palette->colors); SDL_free(palette); } /* * Calculate an 8-bit (3 red, 3 green, 2 blue) dithered palette of colors */ void SDL_DitherColors(SDL_Color * colors, int bpp) { int i; if (bpp != 8) return; /* only 8bpp supported right now */ for (i = 0; i < 256; i++) { int r, g, b; /* map each bit field to the full [0, 255] interval, so 0 is mapped to (0, 0, 0) and 255 to (255, 255, 255) */ r = i & 0xe0; r |= r >> 3 | r >> 6; colors[i].r = r; g = (i << 3) & 0xe0; g |= g >> 3 | g >> 6; colors[i].g = g; b = i & 0x3; b |= b << 2; b |= b << 4; colors[i].b = b; colors[i].a = SDL_ALPHA_OPAQUE; } } /* * Match an RGB value to a particular palette index */ Uint8 SDL_FindColor(SDL_Palette * pal, Uint8 r, Uint8 g, Uint8 b, Uint8 a) { /* Do colorspace distance matching */ unsigned int smallest; unsigned int distance; int rd, gd, bd, ad; int i; Uint8 pixel = 0; smallest = ~0; for (i = 0; i < pal->ncolors; ++i) { rd = pal->colors[i].r - r; gd = pal->colors[i].g - g; bd = pal->colors[i].b - b; ad = pal->colors[i].a - a; distance = (rd * rd) + (gd * gd) + (bd * bd) + (ad * ad); if (distance < smallest) { pixel = i; if (distance == 0) { /* Perfect match! */ break; } smallest = distance; } } return (pixel); } /* Tell whether palette is opaque, and if it has an alpha_channel */ void SDL_DetectPalette(SDL_Palette *pal, SDL_bool *is_opaque, SDL_bool *has_alpha_channel) { int i; { SDL_bool all_opaque = SDL_TRUE; for (i = 0; i < pal->ncolors; i++) { Uint8 alpha_value = pal->colors[i].a; if (alpha_value != SDL_ALPHA_OPAQUE) { all_opaque = SDL_FALSE; break; } } if (all_opaque) { /* Palette is opaque, with an alpha channel */ *is_opaque = SDL_TRUE; *has_alpha_channel = SDL_TRUE; return; } } { SDL_bool all_transparent = SDL_TRUE; for (i = 0; i < pal->ncolors; i++) { Uint8 alpha_value = pal->colors[i].a; if (alpha_value != SDL_ALPHA_TRANSPARENT) { all_transparent = SDL_FALSE; break; } } if (all_transparent) { /* Palette is opaque, without an alpha channel */ *is_opaque = SDL_TRUE; *has_alpha_channel = SDL_FALSE; return; } } /* Palette has alpha values */ *is_opaque = SDL_FALSE; *has_alpha_channel = SDL_TRUE; } /* Find the opaque pixel value corresponding to an RGB triple */ Uint32 SDL_MapRGB(const SDL_PixelFormat * format, Uint8 r, Uint8 g, Uint8 b) { if (format->palette == NULL) { return (r >> format->Rloss) << format->Rshift | (g >> format->Gloss) << format->Gshift | (b >> format->Bloss) << format->Bshift | format->Amask; } else { return SDL_FindColor(format->palette, r, g, b, SDL_ALPHA_OPAQUE); } } /* Find the pixel value corresponding to an RGBA quadruple */ Uint32 SDL_MapRGBA(const SDL_PixelFormat * format, Uint8 r, Uint8 g, Uint8 b, Uint8 a) { if (format->palette == NULL) { return (r >> format->Rloss) << format->Rshift | (g >> format->Gloss) << format->Gshift | (b >> format->Bloss) << format->Bshift | ((Uint32)(a >> format->Aloss) << format->Ashift & format->Amask); } else { return SDL_FindColor(format->palette, r, g, b, a); } } void SDL_GetRGB(Uint32 pixel, const SDL_PixelFormat * format, Uint8 * r, Uint8 * g, Uint8 * b) { if (format->palette == NULL) { unsigned v; v = (pixel & format->Rmask) >> format->Rshift; *r = SDL_expand_byte[format->Rloss][v]; v = (pixel & format->Gmask) >> format->Gshift; *g = SDL_expand_byte[format->Gloss][v]; v = (pixel & format->Bmask) >> format->Bshift; *b = SDL_expand_byte[format->Bloss][v]; } else { if (pixel < (unsigned)format->palette->ncolors) { *r = format->palette->colors[pixel].r; *g = format->palette->colors[pixel].g; *b = format->palette->colors[pixel].b; } else { *r = *g = *b = 0; } } } void SDL_GetRGBA(Uint32 pixel, const SDL_PixelFormat * format, Uint8 * r, Uint8 * g, Uint8 * b, Uint8 * a) { if (format->palette == NULL) { unsigned v; v = (pixel & format->Rmask) >> format->Rshift; *r = SDL_expand_byte[format->Rloss][v]; v = (pixel & format->Gmask) >> format->Gshift; *g = SDL_expand_byte[format->Gloss][v]; v = (pixel & format->Bmask) >> format->Bshift; *b = SDL_expand_byte[format->Bloss][v]; v = (pixel & format->Amask) >> format->Ashift; *a = SDL_expand_byte[format->Aloss][v]; } else { if (pixel < (unsigned)format->palette->ncolors) { *r = format->palette->colors[pixel].r; *g = format->palette->colors[pixel].g; *b = format->palette->colors[pixel].b; *a = format->palette->colors[pixel].a; } else { *r = *g = *b = *a = 0; } } } /* Map from Palette to Palette */ static Uint8 * Map1to1(SDL_Palette * src, SDL_Palette * dst, int *identical) { Uint8 *map; int i; if (identical) { if (src->ncolors <= dst->ncolors) { /* If an identical palette, no need to map */ if (src == dst || (SDL_memcmp (src->colors, dst->colors, src->ncolors * sizeof(SDL_Color)) == 0)) { *identical = 1; return (NULL); } } *identical = 0; } map = (Uint8 *) SDL_calloc(256, sizeof(Uint8)); if (map == NULL) { SDL_OutOfMemory(); return (NULL); } for (i = 0; i < src->ncolors; ++i) { map[i] = SDL_FindColor(dst, src->colors[i].r, src->colors[i].g, src->colors[i].b, src->colors[i].a); } return (map); } /* Map from Palette to BitField */ static Uint8 * Map1toN(SDL_PixelFormat * src, Uint8 Rmod, Uint8 Gmod, Uint8 Bmod, Uint8 Amod, SDL_PixelFormat * dst) { Uint8 *map; int i; int bpp; SDL_Palette *pal = src->palette; bpp = ((dst->BytesPerPixel == 3) ? 4 : dst->BytesPerPixel); map = (Uint8 *) SDL_calloc(256, bpp); if (map == NULL) { SDL_OutOfMemory(); return (NULL); } /* We memory copy to the pixel map so the endianness is preserved */ for (i = 0; i < pal->ncolors; ++i) { Uint8 R = (Uint8) ((pal->colors[i].r * Rmod) / 255); Uint8 G = (Uint8) ((pal->colors[i].g * Gmod) / 255); Uint8 B = (Uint8) ((pal->colors[i].b * Bmod) / 255); Uint8 A = (Uint8) ((pal->colors[i].a * Amod) / 255); ASSEMBLE_RGBA(&map[i * bpp], dst->BytesPerPixel, dst, (Uint32)R, (Uint32)G, (Uint32)B, (Uint32)A); } return (map); } /* Map from BitField to Dithered-Palette to Palette */ static Uint8 * MapNto1(SDL_PixelFormat * src, SDL_PixelFormat * dst, int *identical) { /* Generate a 256 color dither palette */ SDL_Palette dithered; SDL_Color colors[256]; SDL_Palette *pal = dst->palette; dithered.ncolors = 256; SDL_DitherColors(colors, 8); dithered.colors = colors; return (Map1to1(&dithered, pal, identical)); } SDL_BlitMap * SDL_AllocBlitMap(void) { SDL_BlitMap *map; /* Allocate the empty map */ map = (SDL_BlitMap *) SDL_calloc(1, sizeof(*map)); if (map == NULL) { SDL_OutOfMemory(); return (NULL); } map->info.r = 0xFF; map->info.g = 0xFF; map->info.b = 0xFF; map->info.a = 0xFF; /* It's ready to go */ return (map); } typedef struct SDL_ListNode { void *entry; struct SDL_ListNode *next; } SDL_ListNode; void SDL_InvalidateAllBlitMap(SDL_Surface *surface) { SDL_ListNode *l = surface->list_blitmap; surface->list_blitmap = NULL; while (l) { SDL_ListNode *tmp = l; SDL_InvalidateMap((SDL_BlitMap *)l->entry); l = l->next; SDL_free(tmp); } } static void SDL_ListAdd(SDL_ListNode **head, void *ent); static void SDL_ListRemove(SDL_ListNode **head, void *ent); void SDL_ListAdd(SDL_ListNode **head, void *ent) { SDL_ListNode *node = SDL_malloc(sizeof (*node)); if (node == NULL) { SDL_OutOfMemory(); return; } node->entry = ent; node->next = *head; *head = node; } void SDL_ListRemove(SDL_ListNode **head, void *ent) { SDL_ListNode **ptr = head; while (*ptr) { if ((*ptr)->entry == ent) { SDL_ListNode *tmp = *ptr; *ptr = (*ptr)->next; SDL_free(tmp); return; } ptr = &(*ptr)->next; } } void SDL_InvalidateMap(SDL_BlitMap * map) { if (!map) { return; } if (map->dst) { /* Un-register from the destination surface */ SDL_ListRemove((SDL_ListNode **)&(map->dst->list_blitmap), map); } map->dst = NULL; map->src_palette_version = 0; map->dst_palette_version = 0; SDL_free(map->info.table); map->info.table = NULL; } int SDL_MapSurface(SDL_Surface * src, SDL_Surface * dst) { SDL_PixelFormat *srcfmt; SDL_PixelFormat *dstfmt; SDL_BlitMap *map; /* Clear out any previous mapping */ map = src->map; #if SDL_HAVE_RLE if ((src->flags & SDL_RLEACCEL) == SDL_RLEACCEL) { SDL_UnRLESurface(src, 1); } #endif SDL_InvalidateMap(map); /* Figure out what kind of mapping we're doing */ map->identity = 0; srcfmt = src->format; dstfmt = dst->format; if (SDL_ISPIXELFORMAT_INDEXED(srcfmt->format)) { if (SDL_ISPIXELFORMAT_INDEXED(dstfmt->format)) { /* Palette --> Palette */ map->info.table = Map1to1(srcfmt->palette, dstfmt->palette, &map->identity); if (!map->identity) { if (map->info.table == NULL) { return (-1); } } if (srcfmt->BitsPerPixel != dstfmt->BitsPerPixel) map->identity = 0; } else { /* Palette --> BitField */ map->info.table = Map1toN(srcfmt, src->map->info.r, src->map->info.g, src->map->info.b, src->map->info.a, dstfmt); if (map->info.table == NULL) { return (-1); } } } else { if (SDL_ISPIXELFORMAT_INDEXED(dstfmt->format)) { /* BitField --> Palette */ map->info.table = MapNto1(srcfmt, dstfmt, &map->identity); if (!map->identity) { if (map->info.table == NULL) { return (-1); } } map->identity = 0; /* Don't optimize to copy */ } else { /* BitField --> BitField */ if (srcfmt == dstfmt) { map->identity = 1; } } } map->dst = dst; if (map->dst) { /* Register BlitMap to the destination surface, to be invalidated when needed */ SDL_ListAdd((SDL_ListNode **)&(map->dst->list_blitmap), map); } if (dstfmt->palette) { map->dst_palette_version = dstfmt->palette->version; } else { map->dst_palette_version = 0; } if (srcfmt->palette) { map->src_palette_version = srcfmt->palette->version; } else { map->src_palette_version = 0; } /* Choose your blitters wisely */ return (SDL_CalculateBlit(src)); } void SDL_FreeBlitMap(SDL_BlitMap * map) { if (map) { SDL_InvalidateMap(map); SDL_free(map); } } void SDL_CalculateGammaRamp(float gamma, Uint16 * ramp) { int i; /* Input validation */ if (gamma < 0.0f ) { SDL_InvalidParamError("gamma"); return; } if (ramp == NULL) { SDL_InvalidParamError("ramp"); return; } /* 0.0 gamma is all black */ if (gamma == 0.0f) { SDL_memset(ramp, 0, 256 * sizeof(Uint16)); return; } else if (gamma == 1.0f) { /* 1.0 gamma is identity */ for (i = 0; i < 256; ++i) { ramp[i] = (i << 8) | i; } return; } else { /* Calculate a real gamma ramp */ int value; gamma = 1.0f / gamma; for (i = 0; i < 256; ++i) { value = (int) (SDL_pow((double) i / 256.0, gamma) * 65535.0 + 0.5); if (value > 65535) { value = 65535; } ramp[i] = (Uint16) value; } } } /* vi: set ts=4 sw=4 expandtab: */
Map1to1(SDL_Palette * src, SDL_Palette * dst, int *identical) { Uint8 *map; int i; if (identical) { if (src->ncolors <= dst->ncolors) { /* If an identical palette, no need to map */ if (src == dst || (SDL_memcmp (src->colors, dst->colors, src->ncolors * sizeof(SDL_Color)) == 0)) { *identical = 1; return (NULL); } } *identical = 0; } map = (Uint8 *) SDL_malloc(src->ncolors); if (map == NULL) { SDL_OutOfMemory(); return (NULL); } for (i = 0; i < src->ncolors; ++i) { map[i] = SDL_FindColor(dst, src->colors[i].r, src->colors[i].g, src->colors[i].b, src->colors[i].a); } return (map); }
Map1to1(SDL_Palette * src, SDL_Palette * dst, int *identical) { Uint8 *map; int i; if (identical) { if (src->ncolors <= dst->ncolors) { /* If an identical palette, no need to map */ if (src == dst || (SDL_memcmp (src->colors, dst->colors, src->ncolors * sizeof(SDL_Color)) == 0)) { *identical = 1; return (NULL); } } *identical = 0; } map = (Uint8 *) SDL_calloc(256, sizeof(Uint8)); if (map == NULL) { SDL_OutOfMemory(); return (NULL); } for (i = 0; i < src->ncolors; ++i) { map[i] = SDL_FindColor(dst, src->colors[i].r, src->colors[i].g, src->colors[i].b, src->colors[i].a); } return (map); }
{'added': [(950, ' map = (Uint8 *) SDL_calloc(256, sizeof(Uint8));'), (974, ' map = (Uint8 *) SDL_calloc(256, bpp);')], 'deleted': [(950, ' map = (Uint8 *) SDL_malloc(src->ncolors);'), (974, ' map = (Uint8 *) SDL_malloc(pal->ncolors * bpp);')]}
2
2
1,025
6,535
https://github.com/libsdl-org/SDL
CVE-2021-33657
['CWE-787']
SDL_pixels.c
Map1toN
/* Simple DirectMedia Layer Copyright (C) 1997-2021 Sam Lantinga <slouken@libsdl.org> This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages arising from the use of this software. Permission is granted to anyone to use this software for any purpose, including commercial applications, and to alter it and redistribute it freely, subject to the following restrictions: 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. 3. This notice may not be removed or altered from any source distribution. */ #include "../SDL_internal.h" /* General (mostly internal) pixel/color manipulation routines for SDL */ #include "SDL_endian.h" #include "SDL_video.h" #include "SDL_sysvideo.h" #include "SDL_blit.h" #include "SDL_pixels_c.h" #include "SDL_RLEaccel_c.h" /* Lookup tables to expand partial bytes to the full 0..255 range */ static Uint8 lookup_0[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255 }; static Uint8 lookup_1[] = { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60, 62, 64, 66, 68, 70, 72, 74, 76, 78, 80, 82, 84, 86, 88, 90, 92, 94, 96, 98, 100, 102, 104, 106, 108, 110, 112, 114, 116, 118, 120, 122, 124, 126, 128, 130, 132, 134, 136, 138, 140, 142, 144, 146, 148, 150, 152, 154, 156, 158, 160, 162, 164, 166, 168, 170, 172, 174, 176, 178, 180, 182, 184, 186, 188, 190, 192, 194, 196, 198, 200, 202, 204, 206, 208, 210, 212, 214, 216, 218, 220, 222, 224, 226, 228, 230, 232, 234, 236, 238, 240, 242, 244, 246, 248, 250, 252, 255 }; static Uint8 lookup_2[] = { 0, 4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 85, 89, 93, 97, 101, 105, 109, 113, 117, 121, 125, 129, 133, 137, 141, 145, 149, 153, 157, 161, 165, 170, 174, 178, 182, 186, 190, 194, 198, 202, 206, 210, 214, 218, 222, 226, 230, 234, 238, 242, 246, 250, 255 }; static Uint8 lookup_3[] = { 0, 8, 16, 24, 32, 41, 49, 57, 65, 74, 82, 90, 98, 106, 115, 123, 131, 139, 148, 156, 164, 172, 180, 189, 197, 205, 213, 222, 230, 238, 246, 255 }; static Uint8 lookup_4[] = { 0, 17, 34, 51, 68, 85, 102, 119, 136, 153, 170, 187, 204, 221, 238, 255 }; static Uint8 lookup_5[] = { 0, 36, 72, 109, 145, 182, 218, 255 }; static Uint8 lookup_6[] = { 0, 85, 170, 255 }; static Uint8 lookup_7[] = { 0, 255 }; static Uint8 lookup_8[] = { 255 }; Uint8* SDL_expand_byte[9] = { lookup_0, lookup_1, lookup_2, lookup_3, lookup_4, lookup_5, lookup_6, lookup_7, lookup_8 }; /* Helper functions */ const char* SDL_GetPixelFormatName(Uint32 format) { switch (format) { #define CASE(X) case X: return #X; CASE(SDL_PIXELFORMAT_INDEX1LSB) CASE(SDL_PIXELFORMAT_INDEX1MSB) CASE(SDL_PIXELFORMAT_INDEX4LSB) CASE(SDL_PIXELFORMAT_INDEX4MSB) CASE(SDL_PIXELFORMAT_INDEX8) CASE(SDL_PIXELFORMAT_RGB332) CASE(SDL_PIXELFORMAT_RGB444) CASE(SDL_PIXELFORMAT_BGR444) CASE(SDL_PIXELFORMAT_RGB555) CASE(SDL_PIXELFORMAT_BGR555) CASE(SDL_PIXELFORMAT_ARGB4444) CASE(SDL_PIXELFORMAT_RGBA4444) CASE(SDL_PIXELFORMAT_ABGR4444) CASE(SDL_PIXELFORMAT_BGRA4444) CASE(SDL_PIXELFORMAT_ARGB1555) CASE(SDL_PIXELFORMAT_RGBA5551) CASE(SDL_PIXELFORMAT_ABGR1555) CASE(SDL_PIXELFORMAT_BGRA5551) CASE(SDL_PIXELFORMAT_RGB565) CASE(SDL_PIXELFORMAT_BGR565) CASE(SDL_PIXELFORMAT_RGB24) CASE(SDL_PIXELFORMAT_BGR24) CASE(SDL_PIXELFORMAT_RGB888) CASE(SDL_PIXELFORMAT_RGBX8888) CASE(SDL_PIXELFORMAT_BGR888) CASE(SDL_PIXELFORMAT_BGRX8888) CASE(SDL_PIXELFORMAT_ARGB8888) CASE(SDL_PIXELFORMAT_RGBA8888) CASE(SDL_PIXELFORMAT_ABGR8888) CASE(SDL_PIXELFORMAT_BGRA8888) CASE(SDL_PIXELFORMAT_ARGB2101010) CASE(SDL_PIXELFORMAT_YV12) CASE(SDL_PIXELFORMAT_IYUV) CASE(SDL_PIXELFORMAT_YUY2) CASE(SDL_PIXELFORMAT_UYVY) CASE(SDL_PIXELFORMAT_YVYU) CASE(SDL_PIXELFORMAT_NV12) CASE(SDL_PIXELFORMAT_NV21) #undef CASE default: return "SDL_PIXELFORMAT_UNKNOWN"; } } SDL_bool SDL_PixelFormatEnumToMasks(Uint32 format, int *bpp, Uint32 * Rmask, Uint32 * Gmask, Uint32 * Bmask, Uint32 * Amask) { Uint32 masks[4]; /* This function doesn't work with FourCC pixel formats */ if (SDL_ISPIXELFORMAT_FOURCC(format)) { SDL_SetError("FOURCC pixel formats are not supported"); return SDL_FALSE; } /* Initialize the values here */ if (SDL_BYTESPERPIXEL(format) <= 2) { *bpp = SDL_BITSPERPIXEL(format); } else { *bpp = SDL_BYTESPERPIXEL(format) * 8; } *Rmask = *Gmask = *Bmask = *Amask = 0; if (format == SDL_PIXELFORMAT_RGB24) { #if SDL_BYTEORDER == SDL_BIG_ENDIAN *Rmask = 0x00FF0000; *Gmask = 0x0000FF00; *Bmask = 0x000000FF; #else *Rmask = 0x000000FF; *Gmask = 0x0000FF00; *Bmask = 0x00FF0000; #endif return SDL_TRUE; } if (format == SDL_PIXELFORMAT_BGR24) { #if SDL_BYTEORDER == SDL_BIG_ENDIAN *Rmask = 0x000000FF; *Gmask = 0x0000FF00; *Bmask = 0x00FF0000; #else *Rmask = 0x00FF0000; *Gmask = 0x0000FF00; *Bmask = 0x000000FF; #endif return SDL_TRUE; } if (SDL_PIXELTYPE(format) != SDL_PIXELTYPE_PACKED8 && SDL_PIXELTYPE(format) != SDL_PIXELTYPE_PACKED16 && SDL_PIXELTYPE(format) != SDL_PIXELTYPE_PACKED32) { /* Not a format that uses masks */ return SDL_TRUE; } switch (SDL_PIXELLAYOUT(format)) { case SDL_PACKEDLAYOUT_332: masks[0] = 0x00000000; masks[1] = 0x000000E0; masks[2] = 0x0000001C; masks[3] = 0x00000003; break; case SDL_PACKEDLAYOUT_4444: masks[0] = 0x0000F000; masks[1] = 0x00000F00; masks[2] = 0x000000F0; masks[3] = 0x0000000F; break; case SDL_PACKEDLAYOUT_1555: masks[0] = 0x00008000; masks[1] = 0x00007C00; masks[2] = 0x000003E0; masks[3] = 0x0000001F; break; case SDL_PACKEDLAYOUT_5551: masks[0] = 0x0000F800; masks[1] = 0x000007C0; masks[2] = 0x0000003E; masks[3] = 0x00000001; break; case SDL_PACKEDLAYOUT_565: masks[0] = 0x00000000; masks[1] = 0x0000F800; masks[2] = 0x000007E0; masks[3] = 0x0000001F; break; case SDL_PACKEDLAYOUT_8888: masks[0] = 0xFF000000; masks[1] = 0x00FF0000; masks[2] = 0x0000FF00; masks[3] = 0x000000FF; break; case SDL_PACKEDLAYOUT_2101010: masks[0] = 0xC0000000; masks[1] = 0x3FF00000; masks[2] = 0x000FFC00; masks[3] = 0x000003FF; break; case SDL_PACKEDLAYOUT_1010102: masks[0] = 0xFFC00000; masks[1] = 0x003FF000; masks[2] = 0x00000FFC; masks[3] = 0x00000003; break; default: SDL_SetError("Unknown pixel format"); return SDL_FALSE; } switch (SDL_PIXELORDER(format)) { case SDL_PACKEDORDER_XRGB: *Rmask = masks[1]; *Gmask = masks[2]; *Bmask = masks[3]; break; case SDL_PACKEDORDER_RGBX: *Rmask = masks[0]; *Gmask = masks[1]; *Bmask = masks[2]; break; case SDL_PACKEDORDER_ARGB: *Amask = masks[0]; *Rmask = masks[1]; *Gmask = masks[2]; *Bmask = masks[3]; break; case SDL_PACKEDORDER_RGBA: *Rmask = masks[0]; *Gmask = masks[1]; *Bmask = masks[2]; *Amask = masks[3]; break; case SDL_PACKEDORDER_XBGR: *Bmask = masks[1]; *Gmask = masks[2]; *Rmask = masks[3]; break; case SDL_PACKEDORDER_BGRX: *Bmask = masks[0]; *Gmask = masks[1]; *Rmask = masks[2]; break; case SDL_PACKEDORDER_BGRA: *Bmask = masks[0]; *Gmask = masks[1]; *Rmask = masks[2]; *Amask = masks[3]; break; case SDL_PACKEDORDER_ABGR: *Amask = masks[0]; *Bmask = masks[1]; *Gmask = masks[2]; *Rmask = masks[3]; break; default: SDL_SetError("Unknown pixel format"); return SDL_FALSE; } return SDL_TRUE; } Uint32 SDL_MasksToPixelFormatEnum(int bpp, Uint32 Rmask, Uint32 Gmask, Uint32 Bmask, Uint32 Amask) { switch (bpp) { case 1: /* SDL defaults to MSB ordering */ return SDL_PIXELFORMAT_INDEX1MSB; case 4: /* SDL defaults to MSB ordering */ return SDL_PIXELFORMAT_INDEX4MSB; case 8: if (Rmask == 0) { return SDL_PIXELFORMAT_INDEX8; } if (Rmask == 0xE0 && Gmask == 0x1C && Bmask == 0x03 && Amask == 0x00) { return SDL_PIXELFORMAT_RGB332; } break; case 12: if (Rmask == 0) { return SDL_PIXELFORMAT_RGB444; } if (Rmask == 0x0F00 && Gmask == 0x00F0 && Bmask == 0x000F && Amask == 0x0000) { return SDL_PIXELFORMAT_RGB444; } if (Rmask == 0x000F && Gmask == 0x00F0 && Bmask == 0x0F00 && Amask == 0x0000) { return SDL_PIXELFORMAT_BGR444; } break; case 15: if (Rmask == 0) { return SDL_PIXELFORMAT_RGB555; } SDL_FALLTHROUGH; case 16: if (Rmask == 0) { return SDL_PIXELFORMAT_RGB565; } if (Rmask == 0x7C00 && Gmask == 0x03E0 && Bmask == 0x001F && Amask == 0x0000) { return SDL_PIXELFORMAT_RGB555; } if (Rmask == 0x001F && Gmask == 0x03E0 && Bmask == 0x7C00 && Amask == 0x0000) { return SDL_PIXELFORMAT_BGR555; } if (Rmask == 0x0F00 && Gmask == 0x00F0 && Bmask == 0x000F && Amask == 0xF000) { return SDL_PIXELFORMAT_ARGB4444; } if (Rmask == 0xF000 && Gmask == 0x0F00 && Bmask == 0x00F0 && Amask == 0x000F) { return SDL_PIXELFORMAT_RGBA4444; } if (Rmask == 0x000F && Gmask == 0x00F0 && Bmask == 0x0F00 && Amask == 0xF000) { return SDL_PIXELFORMAT_ABGR4444; } if (Rmask == 0x00F0 && Gmask == 0x0F00 && Bmask == 0xF000 && Amask == 0x000F) { return SDL_PIXELFORMAT_BGRA4444; } if (Rmask == 0x7C00 && Gmask == 0x03E0 && Bmask == 0x001F && Amask == 0x8000) { return SDL_PIXELFORMAT_ARGB1555; } if (Rmask == 0xF800 && Gmask == 0x07C0 && Bmask == 0x003E && Amask == 0x0001) { return SDL_PIXELFORMAT_RGBA5551; } if (Rmask == 0x001F && Gmask == 0x03E0 && Bmask == 0x7C00 && Amask == 0x8000) { return SDL_PIXELFORMAT_ABGR1555; } if (Rmask == 0x003E && Gmask == 0x07C0 && Bmask == 0xF800 && Amask == 0x0001) { return SDL_PIXELFORMAT_BGRA5551; } if (Rmask == 0xF800 && Gmask == 0x07E0 && Bmask == 0x001F && Amask == 0x0000) { return SDL_PIXELFORMAT_RGB565; } if (Rmask == 0x001F && Gmask == 0x07E0 && Bmask == 0xF800 && Amask == 0x0000) { return SDL_PIXELFORMAT_BGR565; } if (Rmask == 0x003F && Gmask == 0x07C0 && Bmask == 0xF800 && Amask == 0x0000) { /* Technically this would be BGR556, but Witek says this works in bug 3158 */ return SDL_PIXELFORMAT_RGB565; } break; case 24: switch (Rmask) { case 0: case 0x00FF0000: #if SDL_BYTEORDER == SDL_BIG_ENDIAN return SDL_PIXELFORMAT_RGB24; #else return SDL_PIXELFORMAT_BGR24; #endif case 0x000000FF: #if SDL_BYTEORDER == SDL_BIG_ENDIAN return SDL_PIXELFORMAT_BGR24; #else return SDL_PIXELFORMAT_RGB24; #endif } case 32: if (Rmask == 0) { return SDL_PIXELFORMAT_RGB888; } if (Rmask == 0x00FF0000 && Gmask == 0x0000FF00 && Bmask == 0x000000FF && Amask == 0x00000000) { return SDL_PIXELFORMAT_RGB888; } if (Rmask == 0xFF000000 && Gmask == 0x00FF0000 && Bmask == 0x0000FF00 && Amask == 0x00000000) { return SDL_PIXELFORMAT_RGBX8888; } if (Rmask == 0x000000FF && Gmask == 0x0000FF00 && Bmask == 0x00FF0000 && Amask == 0x00000000) { return SDL_PIXELFORMAT_BGR888; } if (Rmask == 0x0000FF00 && Gmask == 0x00FF0000 && Bmask == 0xFF000000 && Amask == 0x00000000) { return SDL_PIXELFORMAT_BGRX8888; } if (Rmask == 0x00FF0000 && Gmask == 0x0000FF00 && Bmask == 0x000000FF && Amask == 0xFF000000) { return SDL_PIXELFORMAT_ARGB8888; } if (Rmask == 0xFF000000 && Gmask == 0x00FF0000 && Bmask == 0x0000FF00 && Amask == 0x000000FF) { return SDL_PIXELFORMAT_RGBA8888; } if (Rmask == 0x000000FF && Gmask == 0x0000FF00 && Bmask == 0x00FF0000 && Amask == 0xFF000000) { return SDL_PIXELFORMAT_ABGR8888; } if (Rmask == 0x0000FF00 && Gmask == 0x00FF0000 && Bmask == 0xFF000000 && Amask == 0x000000FF) { return SDL_PIXELFORMAT_BGRA8888; } if (Rmask == 0x3FF00000 && Gmask == 0x000FFC00 && Bmask == 0x000003FF && Amask == 0xC0000000) { return SDL_PIXELFORMAT_ARGB2101010; } } return SDL_PIXELFORMAT_UNKNOWN; } static SDL_PixelFormat *formats; static SDL_SpinLock formats_lock = 0; SDL_PixelFormat * SDL_AllocFormat(Uint32 pixel_format) { SDL_PixelFormat *format; SDL_AtomicLock(&formats_lock); /* Look it up in our list of previously allocated formats */ for (format = formats; format; format = format->next) { if (pixel_format == format->format) { ++format->refcount; SDL_AtomicUnlock(&formats_lock); return format; } } /* Allocate an empty pixel format structure, and initialize it */ format = SDL_malloc(sizeof(*format)); if (format == NULL) { SDL_AtomicUnlock(&formats_lock); SDL_OutOfMemory(); return NULL; } if (SDL_InitFormat(format, pixel_format) < 0) { SDL_AtomicUnlock(&formats_lock); SDL_free(format); SDL_InvalidParamError("format"); return NULL; } if (!SDL_ISPIXELFORMAT_INDEXED(pixel_format)) { /* Cache the RGB formats */ format->next = formats; formats = format; } SDL_AtomicUnlock(&formats_lock); return format; } int SDL_InitFormat(SDL_PixelFormat * format, Uint32 pixel_format) { int bpp; Uint32 Rmask, Gmask, Bmask, Amask; Uint32 mask; if (!SDL_PixelFormatEnumToMasks(pixel_format, &bpp, &Rmask, &Gmask, &Bmask, &Amask)) { return -1; } /* Set up the format */ SDL_zerop(format); format->format = pixel_format; format->BitsPerPixel = bpp; format->BytesPerPixel = (bpp + 7) / 8; format->Rmask = Rmask; format->Rshift = 0; format->Rloss = 8; if (Rmask) { for (mask = Rmask; !(mask & 0x01); mask >>= 1) ++format->Rshift; for (; (mask & 0x01); mask >>= 1) --format->Rloss; } format->Gmask = Gmask; format->Gshift = 0; format->Gloss = 8; if (Gmask) { for (mask = Gmask; !(mask & 0x01); mask >>= 1) ++format->Gshift; for (; (mask & 0x01); mask >>= 1) --format->Gloss; } format->Bmask = Bmask; format->Bshift = 0; format->Bloss = 8; if (Bmask) { for (mask = Bmask; !(mask & 0x01); mask >>= 1) ++format->Bshift; for (; (mask & 0x01); mask >>= 1) --format->Bloss; } format->Amask = Amask; format->Ashift = 0; format->Aloss = 8; if (Amask) { for (mask = Amask; !(mask & 0x01); mask >>= 1) ++format->Ashift; for (; (mask & 0x01); mask >>= 1) --format->Aloss; } format->palette = NULL; format->refcount = 1; format->next = NULL; return 0; } void SDL_FreeFormat(SDL_PixelFormat *format) { SDL_PixelFormat *prev; if (!format) { SDL_InvalidParamError("format"); return; } SDL_AtomicLock(&formats_lock); if (--format->refcount > 0) { SDL_AtomicUnlock(&formats_lock); return; } /* Remove this format from our list */ if (format == formats) { formats = format->next; } else if (formats) { for (prev = formats; prev->next; prev = prev->next) { if (prev->next == format) { prev->next = format->next; break; } } } SDL_AtomicUnlock(&formats_lock); if (format->palette) { SDL_FreePalette(format->palette); } SDL_free(format); } SDL_Palette * SDL_AllocPalette(int ncolors) { SDL_Palette *palette; /* Input validation */ if (ncolors < 1) { SDL_InvalidParamError("ncolors"); return NULL; } palette = (SDL_Palette *) SDL_malloc(sizeof(*palette)); if (!palette) { SDL_OutOfMemory(); return NULL; } palette->colors = (SDL_Color *) SDL_malloc(ncolors * sizeof(*palette->colors)); if (!palette->colors) { SDL_free(palette); return NULL; } palette->ncolors = ncolors; palette->version = 1; palette->refcount = 1; SDL_memset(palette->colors, 0xFF, ncolors * sizeof(*palette->colors)); return palette; } int SDL_SetPixelFormatPalette(SDL_PixelFormat * format, SDL_Palette *palette) { if (!format) { return SDL_SetError("SDL_SetPixelFormatPalette() passed NULL format"); } if (palette && palette->ncolors > (1 << format->BitsPerPixel)) { return SDL_SetError("SDL_SetPixelFormatPalette() passed a palette that doesn't match the format"); } if (format->palette == palette) { return 0; } if (format->palette) { SDL_FreePalette(format->palette); } format->palette = palette; if (format->palette) { ++format->palette->refcount; } return 0; } int SDL_SetPaletteColors(SDL_Palette * palette, const SDL_Color * colors, int firstcolor, int ncolors) { int status = 0; /* Verify the parameters */ if (!palette) { return -1; } if (ncolors > (palette->ncolors - firstcolor)) { ncolors = (palette->ncolors - firstcolor); status = -1; } if (colors != (palette->colors + firstcolor)) { SDL_memcpy(palette->colors + firstcolor, colors, ncolors * sizeof(*colors)); } ++palette->version; if (!palette->version) { palette->version = 1; } return status; } void SDL_FreePalette(SDL_Palette * palette) { if (!palette) { SDL_InvalidParamError("palette"); return; } if (--palette->refcount > 0) { return; } SDL_free(palette->colors); SDL_free(palette); } /* * Calculate an 8-bit (3 red, 3 green, 2 blue) dithered palette of colors */ void SDL_DitherColors(SDL_Color * colors, int bpp) { int i; if (bpp != 8) return; /* only 8bpp supported right now */ for (i = 0; i < 256; i++) { int r, g, b; /* map each bit field to the full [0, 255] interval, so 0 is mapped to (0, 0, 0) and 255 to (255, 255, 255) */ r = i & 0xe0; r |= r >> 3 | r >> 6; colors[i].r = r; g = (i << 3) & 0xe0; g |= g >> 3 | g >> 6; colors[i].g = g; b = i & 0x3; b |= b << 2; b |= b << 4; colors[i].b = b; colors[i].a = SDL_ALPHA_OPAQUE; } } /* * Match an RGB value to a particular palette index */ Uint8 SDL_FindColor(SDL_Palette * pal, Uint8 r, Uint8 g, Uint8 b, Uint8 a) { /* Do colorspace distance matching */ unsigned int smallest; unsigned int distance; int rd, gd, bd, ad; int i; Uint8 pixel = 0; smallest = ~0; for (i = 0; i < pal->ncolors; ++i) { rd = pal->colors[i].r - r; gd = pal->colors[i].g - g; bd = pal->colors[i].b - b; ad = pal->colors[i].a - a; distance = (rd * rd) + (gd * gd) + (bd * bd) + (ad * ad); if (distance < smallest) { pixel = i; if (distance == 0) { /* Perfect match! */ break; } smallest = distance; } } return (pixel); } /* Tell whether palette is opaque, and if it has an alpha_channel */ void SDL_DetectPalette(SDL_Palette *pal, SDL_bool *is_opaque, SDL_bool *has_alpha_channel) { int i; { SDL_bool all_opaque = SDL_TRUE; for (i = 0; i < pal->ncolors; i++) { Uint8 alpha_value = pal->colors[i].a; if (alpha_value != SDL_ALPHA_OPAQUE) { all_opaque = SDL_FALSE; break; } } if (all_opaque) { /* Palette is opaque, with an alpha channel */ *is_opaque = SDL_TRUE; *has_alpha_channel = SDL_TRUE; return; } } { SDL_bool all_transparent = SDL_TRUE; for (i = 0; i < pal->ncolors; i++) { Uint8 alpha_value = pal->colors[i].a; if (alpha_value != SDL_ALPHA_TRANSPARENT) { all_transparent = SDL_FALSE; break; } } if (all_transparent) { /* Palette is opaque, without an alpha channel */ *is_opaque = SDL_TRUE; *has_alpha_channel = SDL_FALSE; return; } } /* Palette has alpha values */ *is_opaque = SDL_FALSE; *has_alpha_channel = SDL_TRUE; } /* Find the opaque pixel value corresponding to an RGB triple */ Uint32 SDL_MapRGB(const SDL_PixelFormat * format, Uint8 r, Uint8 g, Uint8 b) { if (format->palette == NULL) { return (r >> format->Rloss) << format->Rshift | (g >> format->Gloss) << format->Gshift | (b >> format->Bloss) << format->Bshift | format->Amask; } else { return SDL_FindColor(format->palette, r, g, b, SDL_ALPHA_OPAQUE); } } /* Find the pixel value corresponding to an RGBA quadruple */ Uint32 SDL_MapRGBA(const SDL_PixelFormat * format, Uint8 r, Uint8 g, Uint8 b, Uint8 a) { if (format->palette == NULL) { return (r >> format->Rloss) << format->Rshift | (g >> format->Gloss) << format->Gshift | (b >> format->Bloss) << format->Bshift | ((Uint32)(a >> format->Aloss) << format->Ashift & format->Amask); } else { return SDL_FindColor(format->palette, r, g, b, a); } } void SDL_GetRGB(Uint32 pixel, const SDL_PixelFormat * format, Uint8 * r, Uint8 * g, Uint8 * b) { if (format->palette == NULL) { unsigned v; v = (pixel & format->Rmask) >> format->Rshift; *r = SDL_expand_byte[format->Rloss][v]; v = (pixel & format->Gmask) >> format->Gshift; *g = SDL_expand_byte[format->Gloss][v]; v = (pixel & format->Bmask) >> format->Bshift; *b = SDL_expand_byte[format->Bloss][v]; } else { if (pixel < (unsigned)format->palette->ncolors) { *r = format->palette->colors[pixel].r; *g = format->palette->colors[pixel].g; *b = format->palette->colors[pixel].b; } else { *r = *g = *b = 0; } } } void SDL_GetRGBA(Uint32 pixel, const SDL_PixelFormat * format, Uint8 * r, Uint8 * g, Uint8 * b, Uint8 * a) { if (format->palette == NULL) { unsigned v; v = (pixel & format->Rmask) >> format->Rshift; *r = SDL_expand_byte[format->Rloss][v]; v = (pixel & format->Gmask) >> format->Gshift; *g = SDL_expand_byte[format->Gloss][v]; v = (pixel & format->Bmask) >> format->Bshift; *b = SDL_expand_byte[format->Bloss][v]; v = (pixel & format->Amask) >> format->Ashift; *a = SDL_expand_byte[format->Aloss][v]; } else { if (pixel < (unsigned)format->palette->ncolors) { *r = format->palette->colors[pixel].r; *g = format->palette->colors[pixel].g; *b = format->palette->colors[pixel].b; *a = format->palette->colors[pixel].a; } else { *r = *g = *b = *a = 0; } } } /* Map from Palette to Palette */ static Uint8 * Map1to1(SDL_Palette * src, SDL_Palette * dst, int *identical) { Uint8 *map; int i; if (identical) { if (src->ncolors <= dst->ncolors) { /* If an identical palette, no need to map */ if (src == dst || (SDL_memcmp (src->colors, dst->colors, src->ncolors * sizeof(SDL_Color)) == 0)) { *identical = 1; return (NULL); } } *identical = 0; } map = (Uint8 *) SDL_malloc(src->ncolors); if (map == NULL) { SDL_OutOfMemory(); return (NULL); } for (i = 0; i < src->ncolors; ++i) { map[i] = SDL_FindColor(dst, src->colors[i].r, src->colors[i].g, src->colors[i].b, src->colors[i].a); } return (map); } /* Map from Palette to BitField */ static Uint8 * Map1toN(SDL_PixelFormat * src, Uint8 Rmod, Uint8 Gmod, Uint8 Bmod, Uint8 Amod, SDL_PixelFormat * dst) { Uint8 *map; int i; int bpp; SDL_Palette *pal = src->palette; bpp = ((dst->BytesPerPixel == 3) ? 4 : dst->BytesPerPixel); map = (Uint8 *) SDL_malloc(pal->ncolors * bpp); if (map == NULL) { SDL_OutOfMemory(); return (NULL); } /* We memory copy to the pixel map so the endianness is preserved */ for (i = 0; i < pal->ncolors; ++i) { Uint8 R = (Uint8) ((pal->colors[i].r * Rmod) / 255); Uint8 G = (Uint8) ((pal->colors[i].g * Gmod) / 255); Uint8 B = (Uint8) ((pal->colors[i].b * Bmod) / 255); Uint8 A = (Uint8) ((pal->colors[i].a * Amod) / 255); ASSEMBLE_RGBA(&map[i * bpp], dst->BytesPerPixel, dst, (Uint32)R, (Uint32)G, (Uint32)B, (Uint32)A); } return (map); } /* Map from BitField to Dithered-Palette to Palette */ static Uint8 * MapNto1(SDL_PixelFormat * src, SDL_PixelFormat * dst, int *identical) { /* Generate a 256 color dither palette */ SDL_Palette dithered; SDL_Color colors[256]; SDL_Palette *pal = dst->palette; dithered.ncolors = 256; SDL_DitherColors(colors, 8); dithered.colors = colors; return (Map1to1(&dithered, pal, identical)); } SDL_BlitMap * SDL_AllocBlitMap(void) { SDL_BlitMap *map; /* Allocate the empty map */ map = (SDL_BlitMap *) SDL_calloc(1, sizeof(*map)); if (map == NULL) { SDL_OutOfMemory(); return (NULL); } map->info.r = 0xFF; map->info.g = 0xFF; map->info.b = 0xFF; map->info.a = 0xFF; /* It's ready to go */ return (map); } typedef struct SDL_ListNode { void *entry; struct SDL_ListNode *next; } SDL_ListNode; void SDL_InvalidateAllBlitMap(SDL_Surface *surface) { SDL_ListNode *l = surface->list_blitmap; surface->list_blitmap = NULL; while (l) { SDL_ListNode *tmp = l; SDL_InvalidateMap((SDL_BlitMap *)l->entry); l = l->next; SDL_free(tmp); } } static void SDL_ListAdd(SDL_ListNode **head, void *ent); static void SDL_ListRemove(SDL_ListNode **head, void *ent); void SDL_ListAdd(SDL_ListNode **head, void *ent) { SDL_ListNode *node = SDL_malloc(sizeof (*node)); if (node == NULL) { SDL_OutOfMemory(); return; } node->entry = ent; node->next = *head; *head = node; } void SDL_ListRemove(SDL_ListNode **head, void *ent) { SDL_ListNode **ptr = head; while (*ptr) { if ((*ptr)->entry == ent) { SDL_ListNode *tmp = *ptr; *ptr = (*ptr)->next; SDL_free(tmp); return; } ptr = &(*ptr)->next; } } void SDL_InvalidateMap(SDL_BlitMap * map) { if (!map) { return; } if (map->dst) { /* Un-register from the destination surface */ SDL_ListRemove((SDL_ListNode **)&(map->dst->list_blitmap), map); } map->dst = NULL; map->src_palette_version = 0; map->dst_palette_version = 0; SDL_free(map->info.table); map->info.table = NULL; } int SDL_MapSurface(SDL_Surface * src, SDL_Surface * dst) { SDL_PixelFormat *srcfmt; SDL_PixelFormat *dstfmt; SDL_BlitMap *map; /* Clear out any previous mapping */ map = src->map; #if SDL_HAVE_RLE if ((src->flags & SDL_RLEACCEL) == SDL_RLEACCEL) { SDL_UnRLESurface(src, 1); } #endif SDL_InvalidateMap(map); /* Figure out what kind of mapping we're doing */ map->identity = 0; srcfmt = src->format; dstfmt = dst->format; if (SDL_ISPIXELFORMAT_INDEXED(srcfmt->format)) { if (SDL_ISPIXELFORMAT_INDEXED(dstfmt->format)) { /* Palette --> Palette */ map->info.table = Map1to1(srcfmt->palette, dstfmt->palette, &map->identity); if (!map->identity) { if (map->info.table == NULL) { return (-1); } } if (srcfmt->BitsPerPixel != dstfmt->BitsPerPixel) map->identity = 0; } else { /* Palette --> BitField */ map->info.table = Map1toN(srcfmt, src->map->info.r, src->map->info.g, src->map->info.b, src->map->info.a, dstfmt); if (map->info.table == NULL) { return (-1); } } } else { if (SDL_ISPIXELFORMAT_INDEXED(dstfmt->format)) { /* BitField --> Palette */ map->info.table = MapNto1(srcfmt, dstfmt, &map->identity); if (!map->identity) { if (map->info.table == NULL) { return (-1); } } map->identity = 0; /* Don't optimize to copy */ } else { /* BitField --> BitField */ if (srcfmt == dstfmt) { map->identity = 1; } } } map->dst = dst; if (map->dst) { /* Register BlitMap to the destination surface, to be invalidated when needed */ SDL_ListAdd((SDL_ListNode **)&(map->dst->list_blitmap), map); } if (dstfmt->palette) { map->dst_palette_version = dstfmt->palette->version; } else { map->dst_palette_version = 0; } if (srcfmt->palette) { map->src_palette_version = srcfmt->palette->version; } else { map->src_palette_version = 0; } /* Choose your blitters wisely */ return (SDL_CalculateBlit(src)); } void SDL_FreeBlitMap(SDL_BlitMap * map) { if (map) { SDL_InvalidateMap(map); SDL_free(map); } } void SDL_CalculateGammaRamp(float gamma, Uint16 * ramp) { int i; /* Input validation */ if (gamma < 0.0f ) { SDL_InvalidParamError("gamma"); return; } if (ramp == NULL) { SDL_InvalidParamError("ramp"); return; } /* 0.0 gamma is all black */ if (gamma == 0.0f) { SDL_memset(ramp, 0, 256 * sizeof(Uint16)); return; } else if (gamma == 1.0f) { /* 1.0 gamma is identity */ for (i = 0; i < 256; ++i) { ramp[i] = (i << 8) | i; } return; } else { /* Calculate a real gamma ramp */ int value; gamma = 1.0f / gamma; for (i = 0; i < 256; ++i) { value = (int) (SDL_pow((double) i / 256.0, gamma) * 65535.0 + 0.5); if (value > 65535) { value = 65535; } ramp[i] = (Uint16) value; } } } /* vi: set ts=4 sw=4 expandtab: */
/* Simple DirectMedia Layer Copyright (C) 1997-2021 Sam Lantinga <slouken@libsdl.org> This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages arising from the use of this software. Permission is granted to anyone to use this software for any purpose, including commercial applications, and to alter it and redistribute it freely, subject to the following restrictions: 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. 3. This notice may not be removed or altered from any source distribution. */ #include "../SDL_internal.h" /* General (mostly internal) pixel/color manipulation routines for SDL */ #include "SDL_endian.h" #include "SDL_video.h" #include "SDL_sysvideo.h" #include "SDL_blit.h" #include "SDL_pixels_c.h" #include "SDL_RLEaccel_c.h" /* Lookup tables to expand partial bytes to the full 0..255 range */ static Uint8 lookup_0[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255 }; static Uint8 lookup_1[] = { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60, 62, 64, 66, 68, 70, 72, 74, 76, 78, 80, 82, 84, 86, 88, 90, 92, 94, 96, 98, 100, 102, 104, 106, 108, 110, 112, 114, 116, 118, 120, 122, 124, 126, 128, 130, 132, 134, 136, 138, 140, 142, 144, 146, 148, 150, 152, 154, 156, 158, 160, 162, 164, 166, 168, 170, 172, 174, 176, 178, 180, 182, 184, 186, 188, 190, 192, 194, 196, 198, 200, 202, 204, 206, 208, 210, 212, 214, 216, 218, 220, 222, 224, 226, 228, 230, 232, 234, 236, 238, 240, 242, 244, 246, 248, 250, 252, 255 }; static Uint8 lookup_2[] = { 0, 4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 85, 89, 93, 97, 101, 105, 109, 113, 117, 121, 125, 129, 133, 137, 141, 145, 149, 153, 157, 161, 165, 170, 174, 178, 182, 186, 190, 194, 198, 202, 206, 210, 214, 218, 222, 226, 230, 234, 238, 242, 246, 250, 255 }; static Uint8 lookup_3[] = { 0, 8, 16, 24, 32, 41, 49, 57, 65, 74, 82, 90, 98, 106, 115, 123, 131, 139, 148, 156, 164, 172, 180, 189, 197, 205, 213, 222, 230, 238, 246, 255 }; static Uint8 lookup_4[] = { 0, 17, 34, 51, 68, 85, 102, 119, 136, 153, 170, 187, 204, 221, 238, 255 }; static Uint8 lookup_5[] = { 0, 36, 72, 109, 145, 182, 218, 255 }; static Uint8 lookup_6[] = { 0, 85, 170, 255 }; static Uint8 lookup_7[] = { 0, 255 }; static Uint8 lookup_8[] = { 255 }; Uint8* SDL_expand_byte[9] = { lookup_0, lookup_1, lookup_2, lookup_3, lookup_4, lookup_5, lookup_6, lookup_7, lookup_8 }; /* Helper functions */ const char* SDL_GetPixelFormatName(Uint32 format) { switch (format) { #define CASE(X) case X: return #X; CASE(SDL_PIXELFORMAT_INDEX1LSB) CASE(SDL_PIXELFORMAT_INDEX1MSB) CASE(SDL_PIXELFORMAT_INDEX4LSB) CASE(SDL_PIXELFORMAT_INDEX4MSB) CASE(SDL_PIXELFORMAT_INDEX8) CASE(SDL_PIXELFORMAT_RGB332) CASE(SDL_PIXELFORMAT_RGB444) CASE(SDL_PIXELFORMAT_BGR444) CASE(SDL_PIXELFORMAT_RGB555) CASE(SDL_PIXELFORMAT_BGR555) CASE(SDL_PIXELFORMAT_ARGB4444) CASE(SDL_PIXELFORMAT_RGBA4444) CASE(SDL_PIXELFORMAT_ABGR4444) CASE(SDL_PIXELFORMAT_BGRA4444) CASE(SDL_PIXELFORMAT_ARGB1555) CASE(SDL_PIXELFORMAT_RGBA5551) CASE(SDL_PIXELFORMAT_ABGR1555) CASE(SDL_PIXELFORMAT_BGRA5551) CASE(SDL_PIXELFORMAT_RGB565) CASE(SDL_PIXELFORMAT_BGR565) CASE(SDL_PIXELFORMAT_RGB24) CASE(SDL_PIXELFORMAT_BGR24) CASE(SDL_PIXELFORMAT_RGB888) CASE(SDL_PIXELFORMAT_RGBX8888) CASE(SDL_PIXELFORMAT_BGR888) CASE(SDL_PIXELFORMAT_BGRX8888) CASE(SDL_PIXELFORMAT_ARGB8888) CASE(SDL_PIXELFORMAT_RGBA8888) CASE(SDL_PIXELFORMAT_ABGR8888) CASE(SDL_PIXELFORMAT_BGRA8888) CASE(SDL_PIXELFORMAT_ARGB2101010) CASE(SDL_PIXELFORMAT_YV12) CASE(SDL_PIXELFORMAT_IYUV) CASE(SDL_PIXELFORMAT_YUY2) CASE(SDL_PIXELFORMAT_UYVY) CASE(SDL_PIXELFORMAT_YVYU) CASE(SDL_PIXELFORMAT_NV12) CASE(SDL_PIXELFORMAT_NV21) #undef CASE default: return "SDL_PIXELFORMAT_UNKNOWN"; } } SDL_bool SDL_PixelFormatEnumToMasks(Uint32 format, int *bpp, Uint32 * Rmask, Uint32 * Gmask, Uint32 * Bmask, Uint32 * Amask) { Uint32 masks[4]; /* This function doesn't work with FourCC pixel formats */ if (SDL_ISPIXELFORMAT_FOURCC(format)) { SDL_SetError("FOURCC pixel formats are not supported"); return SDL_FALSE; } /* Initialize the values here */ if (SDL_BYTESPERPIXEL(format) <= 2) { *bpp = SDL_BITSPERPIXEL(format); } else { *bpp = SDL_BYTESPERPIXEL(format) * 8; } *Rmask = *Gmask = *Bmask = *Amask = 0; if (format == SDL_PIXELFORMAT_RGB24) { #if SDL_BYTEORDER == SDL_BIG_ENDIAN *Rmask = 0x00FF0000; *Gmask = 0x0000FF00; *Bmask = 0x000000FF; #else *Rmask = 0x000000FF; *Gmask = 0x0000FF00; *Bmask = 0x00FF0000; #endif return SDL_TRUE; } if (format == SDL_PIXELFORMAT_BGR24) { #if SDL_BYTEORDER == SDL_BIG_ENDIAN *Rmask = 0x000000FF; *Gmask = 0x0000FF00; *Bmask = 0x00FF0000; #else *Rmask = 0x00FF0000; *Gmask = 0x0000FF00; *Bmask = 0x000000FF; #endif return SDL_TRUE; } if (SDL_PIXELTYPE(format) != SDL_PIXELTYPE_PACKED8 && SDL_PIXELTYPE(format) != SDL_PIXELTYPE_PACKED16 && SDL_PIXELTYPE(format) != SDL_PIXELTYPE_PACKED32) { /* Not a format that uses masks */ return SDL_TRUE; } switch (SDL_PIXELLAYOUT(format)) { case SDL_PACKEDLAYOUT_332: masks[0] = 0x00000000; masks[1] = 0x000000E0; masks[2] = 0x0000001C; masks[3] = 0x00000003; break; case SDL_PACKEDLAYOUT_4444: masks[0] = 0x0000F000; masks[1] = 0x00000F00; masks[2] = 0x000000F0; masks[3] = 0x0000000F; break; case SDL_PACKEDLAYOUT_1555: masks[0] = 0x00008000; masks[1] = 0x00007C00; masks[2] = 0x000003E0; masks[3] = 0x0000001F; break; case SDL_PACKEDLAYOUT_5551: masks[0] = 0x0000F800; masks[1] = 0x000007C0; masks[2] = 0x0000003E; masks[3] = 0x00000001; break; case SDL_PACKEDLAYOUT_565: masks[0] = 0x00000000; masks[1] = 0x0000F800; masks[2] = 0x000007E0; masks[3] = 0x0000001F; break; case SDL_PACKEDLAYOUT_8888: masks[0] = 0xFF000000; masks[1] = 0x00FF0000; masks[2] = 0x0000FF00; masks[3] = 0x000000FF; break; case SDL_PACKEDLAYOUT_2101010: masks[0] = 0xC0000000; masks[1] = 0x3FF00000; masks[2] = 0x000FFC00; masks[3] = 0x000003FF; break; case SDL_PACKEDLAYOUT_1010102: masks[0] = 0xFFC00000; masks[1] = 0x003FF000; masks[2] = 0x00000FFC; masks[3] = 0x00000003; break; default: SDL_SetError("Unknown pixel format"); return SDL_FALSE; } switch (SDL_PIXELORDER(format)) { case SDL_PACKEDORDER_XRGB: *Rmask = masks[1]; *Gmask = masks[2]; *Bmask = masks[3]; break; case SDL_PACKEDORDER_RGBX: *Rmask = masks[0]; *Gmask = masks[1]; *Bmask = masks[2]; break; case SDL_PACKEDORDER_ARGB: *Amask = masks[0]; *Rmask = masks[1]; *Gmask = masks[2]; *Bmask = masks[3]; break; case SDL_PACKEDORDER_RGBA: *Rmask = masks[0]; *Gmask = masks[1]; *Bmask = masks[2]; *Amask = masks[3]; break; case SDL_PACKEDORDER_XBGR: *Bmask = masks[1]; *Gmask = masks[2]; *Rmask = masks[3]; break; case SDL_PACKEDORDER_BGRX: *Bmask = masks[0]; *Gmask = masks[1]; *Rmask = masks[2]; break; case SDL_PACKEDORDER_BGRA: *Bmask = masks[0]; *Gmask = masks[1]; *Rmask = masks[2]; *Amask = masks[3]; break; case SDL_PACKEDORDER_ABGR: *Amask = masks[0]; *Bmask = masks[1]; *Gmask = masks[2]; *Rmask = masks[3]; break; default: SDL_SetError("Unknown pixel format"); return SDL_FALSE; } return SDL_TRUE; } Uint32 SDL_MasksToPixelFormatEnum(int bpp, Uint32 Rmask, Uint32 Gmask, Uint32 Bmask, Uint32 Amask) { switch (bpp) { case 1: /* SDL defaults to MSB ordering */ return SDL_PIXELFORMAT_INDEX1MSB; case 4: /* SDL defaults to MSB ordering */ return SDL_PIXELFORMAT_INDEX4MSB; case 8: if (Rmask == 0) { return SDL_PIXELFORMAT_INDEX8; } if (Rmask == 0xE0 && Gmask == 0x1C && Bmask == 0x03 && Amask == 0x00) { return SDL_PIXELFORMAT_RGB332; } break; case 12: if (Rmask == 0) { return SDL_PIXELFORMAT_RGB444; } if (Rmask == 0x0F00 && Gmask == 0x00F0 && Bmask == 0x000F && Amask == 0x0000) { return SDL_PIXELFORMAT_RGB444; } if (Rmask == 0x000F && Gmask == 0x00F0 && Bmask == 0x0F00 && Amask == 0x0000) { return SDL_PIXELFORMAT_BGR444; } break; case 15: if (Rmask == 0) { return SDL_PIXELFORMAT_RGB555; } SDL_FALLTHROUGH; case 16: if (Rmask == 0) { return SDL_PIXELFORMAT_RGB565; } if (Rmask == 0x7C00 && Gmask == 0x03E0 && Bmask == 0x001F && Amask == 0x0000) { return SDL_PIXELFORMAT_RGB555; } if (Rmask == 0x001F && Gmask == 0x03E0 && Bmask == 0x7C00 && Amask == 0x0000) { return SDL_PIXELFORMAT_BGR555; } if (Rmask == 0x0F00 && Gmask == 0x00F0 && Bmask == 0x000F && Amask == 0xF000) { return SDL_PIXELFORMAT_ARGB4444; } if (Rmask == 0xF000 && Gmask == 0x0F00 && Bmask == 0x00F0 && Amask == 0x000F) { return SDL_PIXELFORMAT_RGBA4444; } if (Rmask == 0x000F && Gmask == 0x00F0 && Bmask == 0x0F00 && Amask == 0xF000) { return SDL_PIXELFORMAT_ABGR4444; } if (Rmask == 0x00F0 && Gmask == 0x0F00 && Bmask == 0xF000 && Amask == 0x000F) { return SDL_PIXELFORMAT_BGRA4444; } if (Rmask == 0x7C00 && Gmask == 0x03E0 && Bmask == 0x001F && Amask == 0x8000) { return SDL_PIXELFORMAT_ARGB1555; } if (Rmask == 0xF800 && Gmask == 0x07C0 && Bmask == 0x003E && Amask == 0x0001) { return SDL_PIXELFORMAT_RGBA5551; } if (Rmask == 0x001F && Gmask == 0x03E0 && Bmask == 0x7C00 && Amask == 0x8000) { return SDL_PIXELFORMAT_ABGR1555; } if (Rmask == 0x003E && Gmask == 0x07C0 && Bmask == 0xF800 && Amask == 0x0001) { return SDL_PIXELFORMAT_BGRA5551; } if (Rmask == 0xF800 && Gmask == 0x07E0 && Bmask == 0x001F && Amask == 0x0000) { return SDL_PIXELFORMAT_RGB565; } if (Rmask == 0x001F && Gmask == 0x07E0 && Bmask == 0xF800 && Amask == 0x0000) { return SDL_PIXELFORMAT_BGR565; } if (Rmask == 0x003F && Gmask == 0x07C0 && Bmask == 0xF800 && Amask == 0x0000) { /* Technically this would be BGR556, but Witek says this works in bug 3158 */ return SDL_PIXELFORMAT_RGB565; } break; case 24: switch (Rmask) { case 0: case 0x00FF0000: #if SDL_BYTEORDER == SDL_BIG_ENDIAN return SDL_PIXELFORMAT_RGB24; #else return SDL_PIXELFORMAT_BGR24; #endif case 0x000000FF: #if SDL_BYTEORDER == SDL_BIG_ENDIAN return SDL_PIXELFORMAT_BGR24; #else return SDL_PIXELFORMAT_RGB24; #endif } case 32: if (Rmask == 0) { return SDL_PIXELFORMAT_RGB888; } if (Rmask == 0x00FF0000 && Gmask == 0x0000FF00 && Bmask == 0x000000FF && Amask == 0x00000000) { return SDL_PIXELFORMAT_RGB888; } if (Rmask == 0xFF000000 && Gmask == 0x00FF0000 && Bmask == 0x0000FF00 && Amask == 0x00000000) { return SDL_PIXELFORMAT_RGBX8888; } if (Rmask == 0x000000FF && Gmask == 0x0000FF00 && Bmask == 0x00FF0000 && Amask == 0x00000000) { return SDL_PIXELFORMAT_BGR888; } if (Rmask == 0x0000FF00 && Gmask == 0x00FF0000 && Bmask == 0xFF000000 && Amask == 0x00000000) { return SDL_PIXELFORMAT_BGRX8888; } if (Rmask == 0x00FF0000 && Gmask == 0x0000FF00 && Bmask == 0x000000FF && Amask == 0xFF000000) { return SDL_PIXELFORMAT_ARGB8888; } if (Rmask == 0xFF000000 && Gmask == 0x00FF0000 && Bmask == 0x0000FF00 && Amask == 0x000000FF) { return SDL_PIXELFORMAT_RGBA8888; } if (Rmask == 0x000000FF && Gmask == 0x0000FF00 && Bmask == 0x00FF0000 && Amask == 0xFF000000) { return SDL_PIXELFORMAT_ABGR8888; } if (Rmask == 0x0000FF00 && Gmask == 0x00FF0000 && Bmask == 0xFF000000 && Amask == 0x000000FF) { return SDL_PIXELFORMAT_BGRA8888; } if (Rmask == 0x3FF00000 && Gmask == 0x000FFC00 && Bmask == 0x000003FF && Amask == 0xC0000000) { return SDL_PIXELFORMAT_ARGB2101010; } } return SDL_PIXELFORMAT_UNKNOWN; } static SDL_PixelFormat *formats; static SDL_SpinLock formats_lock = 0; SDL_PixelFormat * SDL_AllocFormat(Uint32 pixel_format) { SDL_PixelFormat *format; SDL_AtomicLock(&formats_lock); /* Look it up in our list of previously allocated formats */ for (format = formats; format; format = format->next) { if (pixel_format == format->format) { ++format->refcount; SDL_AtomicUnlock(&formats_lock); return format; } } /* Allocate an empty pixel format structure, and initialize it */ format = SDL_malloc(sizeof(*format)); if (format == NULL) { SDL_AtomicUnlock(&formats_lock); SDL_OutOfMemory(); return NULL; } if (SDL_InitFormat(format, pixel_format) < 0) { SDL_AtomicUnlock(&formats_lock); SDL_free(format); SDL_InvalidParamError("format"); return NULL; } if (!SDL_ISPIXELFORMAT_INDEXED(pixel_format)) { /* Cache the RGB formats */ format->next = formats; formats = format; } SDL_AtomicUnlock(&formats_lock); return format; } int SDL_InitFormat(SDL_PixelFormat * format, Uint32 pixel_format) { int bpp; Uint32 Rmask, Gmask, Bmask, Amask; Uint32 mask; if (!SDL_PixelFormatEnumToMasks(pixel_format, &bpp, &Rmask, &Gmask, &Bmask, &Amask)) { return -1; } /* Set up the format */ SDL_zerop(format); format->format = pixel_format; format->BitsPerPixel = bpp; format->BytesPerPixel = (bpp + 7) / 8; format->Rmask = Rmask; format->Rshift = 0; format->Rloss = 8; if (Rmask) { for (mask = Rmask; !(mask & 0x01); mask >>= 1) ++format->Rshift; for (; (mask & 0x01); mask >>= 1) --format->Rloss; } format->Gmask = Gmask; format->Gshift = 0; format->Gloss = 8; if (Gmask) { for (mask = Gmask; !(mask & 0x01); mask >>= 1) ++format->Gshift; for (; (mask & 0x01); mask >>= 1) --format->Gloss; } format->Bmask = Bmask; format->Bshift = 0; format->Bloss = 8; if (Bmask) { for (mask = Bmask; !(mask & 0x01); mask >>= 1) ++format->Bshift; for (; (mask & 0x01); mask >>= 1) --format->Bloss; } format->Amask = Amask; format->Ashift = 0; format->Aloss = 8; if (Amask) { for (mask = Amask; !(mask & 0x01); mask >>= 1) ++format->Ashift; for (; (mask & 0x01); mask >>= 1) --format->Aloss; } format->palette = NULL; format->refcount = 1; format->next = NULL; return 0; } void SDL_FreeFormat(SDL_PixelFormat *format) { SDL_PixelFormat *prev; if (!format) { SDL_InvalidParamError("format"); return; } SDL_AtomicLock(&formats_lock); if (--format->refcount > 0) { SDL_AtomicUnlock(&formats_lock); return; } /* Remove this format from our list */ if (format == formats) { formats = format->next; } else if (formats) { for (prev = formats; prev->next; prev = prev->next) { if (prev->next == format) { prev->next = format->next; break; } } } SDL_AtomicUnlock(&formats_lock); if (format->palette) { SDL_FreePalette(format->palette); } SDL_free(format); } SDL_Palette * SDL_AllocPalette(int ncolors) { SDL_Palette *palette; /* Input validation */ if (ncolors < 1) { SDL_InvalidParamError("ncolors"); return NULL; } palette = (SDL_Palette *) SDL_malloc(sizeof(*palette)); if (!palette) { SDL_OutOfMemory(); return NULL; } palette->colors = (SDL_Color *) SDL_malloc(ncolors * sizeof(*palette->colors)); if (!palette->colors) { SDL_free(palette); return NULL; } palette->ncolors = ncolors; palette->version = 1; palette->refcount = 1; SDL_memset(palette->colors, 0xFF, ncolors * sizeof(*palette->colors)); return palette; } int SDL_SetPixelFormatPalette(SDL_PixelFormat * format, SDL_Palette *palette) { if (!format) { return SDL_SetError("SDL_SetPixelFormatPalette() passed NULL format"); } if (palette && palette->ncolors > (1 << format->BitsPerPixel)) { return SDL_SetError("SDL_SetPixelFormatPalette() passed a palette that doesn't match the format"); } if (format->palette == palette) { return 0; } if (format->palette) { SDL_FreePalette(format->palette); } format->palette = palette; if (format->palette) { ++format->palette->refcount; } return 0; } int SDL_SetPaletteColors(SDL_Palette * palette, const SDL_Color * colors, int firstcolor, int ncolors) { int status = 0; /* Verify the parameters */ if (!palette) { return -1; } if (ncolors > (palette->ncolors - firstcolor)) { ncolors = (palette->ncolors - firstcolor); status = -1; } if (colors != (palette->colors + firstcolor)) { SDL_memcpy(palette->colors + firstcolor, colors, ncolors * sizeof(*colors)); } ++palette->version; if (!palette->version) { palette->version = 1; } return status; } void SDL_FreePalette(SDL_Palette * palette) { if (!palette) { SDL_InvalidParamError("palette"); return; } if (--palette->refcount > 0) { return; } SDL_free(palette->colors); SDL_free(palette); } /* * Calculate an 8-bit (3 red, 3 green, 2 blue) dithered palette of colors */ void SDL_DitherColors(SDL_Color * colors, int bpp) { int i; if (bpp != 8) return; /* only 8bpp supported right now */ for (i = 0; i < 256; i++) { int r, g, b; /* map each bit field to the full [0, 255] interval, so 0 is mapped to (0, 0, 0) and 255 to (255, 255, 255) */ r = i & 0xe0; r |= r >> 3 | r >> 6; colors[i].r = r; g = (i << 3) & 0xe0; g |= g >> 3 | g >> 6; colors[i].g = g; b = i & 0x3; b |= b << 2; b |= b << 4; colors[i].b = b; colors[i].a = SDL_ALPHA_OPAQUE; } } /* * Match an RGB value to a particular palette index */ Uint8 SDL_FindColor(SDL_Palette * pal, Uint8 r, Uint8 g, Uint8 b, Uint8 a) { /* Do colorspace distance matching */ unsigned int smallest; unsigned int distance; int rd, gd, bd, ad; int i; Uint8 pixel = 0; smallest = ~0; for (i = 0; i < pal->ncolors; ++i) { rd = pal->colors[i].r - r; gd = pal->colors[i].g - g; bd = pal->colors[i].b - b; ad = pal->colors[i].a - a; distance = (rd * rd) + (gd * gd) + (bd * bd) + (ad * ad); if (distance < smallest) { pixel = i; if (distance == 0) { /* Perfect match! */ break; } smallest = distance; } } return (pixel); } /* Tell whether palette is opaque, and if it has an alpha_channel */ void SDL_DetectPalette(SDL_Palette *pal, SDL_bool *is_opaque, SDL_bool *has_alpha_channel) { int i; { SDL_bool all_opaque = SDL_TRUE; for (i = 0; i < pal->ncolors; i++) { Uint8 alpha_value = pal->colors[i].a; if (alpha_value != SDL_ALPHA_OPAQUE) { all_opaque = SDL_FALSE; break; } } if (all_opaque) { /* Palette is opaque, with an alpha channel */ *is_opaque = SDL_TRUE; *has_alpha_channel = SDL_TRUE; return; } } { SDL_bool all_transparent = SDL_TRUE; for (i = 0; i < pal->ncolors; i++) { Uint8 alpha_value = pal->colors[i].a; if (alpha_value != SDL_ALPHA_TRANSPARENT) { all_transparent = SDL_FALSE; break; } } if (all_transparent) { /* Palette is opaque, without an alpha channel */ *is_opaque = SDL_TRUE; *has_alpha_channel = SDL_FALSE; return; } } /* Palette has alpha values */ *is_opaque = SDL_FALSE; *has_alpha_channel = SDL_TRUE; } /* Find the opaque pixel value corresponding to an RGB triple */ Uint32 SDL_MapRGB(const SDL_PixelFormat * format, Uint8 r, Uint8 g, Uint8 b) { if (format->palette == NULL) { return (r >> format->Rloss) << format->Rshift | (g >> format->Gloss) << format->Gshift | (b >> format->Bloss) << format->Bshift | format->Amask; } else { return SDL_FindColor(format->palette, r, g, b, SDL_ALPHA_OPAQUE); } } /* Find the pixel value corresponding to an RGBA quadruple */ Uint32 SDL_MapRGBA(const SDL_PixelFormat * format, Uint8 r, Uint8 g, Uint8 b, Uint8 a) { if (format->palette == NULL) { return (r >> format->Rloss) << format->Rshift | (g >> format->Gloss) << format->Gshift | (b >> format->Bloss) << format->Bshift | ((Uint32)(a >> format->Aloss) << format->Ashift & format->Amask); } else { return SDL_FindColor(format->palette, r, g, b, a); } } void SDL_GetRGB(Uint32 pixel, const SDL_PixelFormat * format, Uint8 * r, Uint8 * g, Uint8 * b) { if (format->palette == NULL) { unsigned v; v = (pixel & format->Rmask) >> format->Rshift; *r = SDL_expand_byte[format->Rloss][v]; v = (pixel & format->Gmask) >> format->Gshift; *g = SDL_expand_byte[format->Gloss][v]; v = (pixel & format->Bmask) >> format->Bshift; *b = SDL_expand_byte[format->Bloss][v]; } else { if (pixel < (unsigned)format->palette->ncolors) { *r = format->palette->colors[pixel].r; *g = format->palette->colors[pixel].g; *b = format->palette->colors[pixel].b; } else { *r = *g = *b = 0; } } } void SDL_GetRGBA(Uint32 pixel, const SDL_PixelFormat * format, Uint8 * r, Uint8 * g, Uint8 * b, Uint8 * a) { if (format->palette == NULL) { unsigned v; v = (pixel & format->Rmask) >> format->Rshift; *r = SDL_expand_byte[format->Rloss][v]; v = (pixel & format->Gmask) >> format->Gshift; *g = SDL_expand_byte[format->Gloss][v]; v = (pixel & format->Bmask) >> format->Bshift; *b = SDL_expand_byte[format->Bloss][v]; v = (pixel & format->Amask) >> format->Ashift; *a = SDL_expand_byte[format->Aloss][v]; } else { if (pixel < (unsigned)format->palette->ncolors) { *r = format->palette->colors[pixel].r; *g = format->palette->colors[pixel].g; *b = format->palette->colors[pixel].b; *a = format->palette->colors[pixel].a; } else { *r = *g = *b = *a = 0; } } } /* Map from Palette to Palette */ static Uint8 * Map1to1(SDL_Palette * src, SDL_Palette * dst, int *identical) { Uint8 *map; int i; if (identical) { if (src->ncolors <= dst->ncolors) { /* If an identical palette, no need to map */ if (src == dst || (SDL_memcmp (src->colors, dst->colors, src->ncolors * sizeof(SDL_Color)) == 0)) { *identical = 1; return (NULL); } } *identical = 0; } map = (Uint8 *) SDL_calloc(256, sizeof(Uint8)); if (map == NULL) { SDL_OutOfMemory(); return (NULL); } for (i = 0; i < src->ncolors; ++i) { map[i] = SDL_FindColor(dst, src->colors[i].r, src->colors[i].g, src->colors[i].b, src->colors[i].a); } return (map); } /* Map from Palette to BitField */ static Uint8 * Map1toN(SDL_PixelFormat * src, Uint8 Rmod, Uint8 Gmod, Uint8 Bmod, Uint8 Amod, SDL_PixelFormat * dst) { Uint8 *map; int i; int bpp; SDL_Palette *pal = src->palette; bpp = ((dst->BytesPerPixel == 3) ? 4 : dst->BytesPerPixel); map = (Uint8 *) SDL_calloc(256, bpp); if (map == NULL) { SDL_OutOfMemory(); return (NULL); } /* We memory copy to the pixel map so the endianness is preserved */ for (i = 0; i < pal->ncolors; ++i) { Uint8 R = (Uint8) ((pal->colors[i].r * Rmod) / 255); Uint8 G = (Uint8) ((pal->colors[i].g * Gmod) / 255); Uint8 B = (Uint8) ((pal->colors[i].b * Bmod) / 255); Uint8 A = (Uint8) ((pal->colors[i].a * Amod) / 255); ASSEMBLE_RGBA(&map[i * bpp], dst->BytesPerPixel, dst, (Uint32)R, (Uint32)G, (Uint32)B, (Uint32)A); } return (map); } /* Map from BitField to Dithered-Palette to Palette */ static Uint8 * MapNto1(SDL_PixelFormat * src, SDL_PixelFormat * dst, int *identical) { /* Generate a 256 color dither palette */ SDL_Palette dithered; SDL_Color colors[256]; SDL_Palette *pal = dst->palette; dithered.ncolors = 256; SDL_DitherColors(colors, 8); dithered.colors = colors; return (Map1to1(&dithered, pal, identical)); } SDL_BlitMap * SDL_AllocBlitMap(void) { SDL_BlitMap *map; /* Allocate the empty map */ map = (SDL_BlitMap *) SDL_calloc(1, sizeof(*map)); if (map == NULL) { SDL_OutOfMemory(); return (NULL); } map->info.r = 0xFF; map->info.g = 0xFF; map->info.b = 0xFF; map->info.a = 0xFF; /* It's ready to go */ return (map); } typedef struct SDL_ListNode { void *entry; struct SDL_ListNode *next; } SDL_ListNode; void SDL_InvalidateAllBlitMap(SDL_Surface *surface) { SDL_ListNode *l = surface->list_blitmap; surface->list_blitmap = NULL; while (l) { SDL_ListNode *tmp = l; SDL_InvalidateMap((SDL_BlitMap *)l->entry); l = l->next; SDL_free(tmp); } } static void SDL_ListAdd(SDL_ListNode **head, void *ent); static void SDL_ListRemove(SDL_ListNode **head, void *ent); void SDL_ListAdd(SDL_ListNode **head, void *ent) { SDL_ListNode *node = SDL_malloc(sizeof (*node)); if (node == NULL) { SDL_OutOfMemory(); return; } node->entry = ent; node->next = *head; *head = node; } void SDL_ListRemove(SDL_ListNode **head, void *ent) { SDL_ListNode **ptr = head; while (*ptr) { if ((*ptr)->entry == ent) { SDL_ListNode *tmp = *ptr; *ptr = (*ptr)->next; SDL_free(tmp); return; } ptr = &(*ptr)->next; } } void SDL_InvalidateMap(SDL_BlitMap * map) { if (!map) { return; } if (map->dst) { /* Un-register from the destination surface */ SDL_ListRemove((SDL_ListNode **)&(map->dst->list_blitmap), map); } map->dst = NULL; map->src_palette_version = 0; map->dst_palette_version = 0; SDL_free(map->info.table); map->info.table = NULL; } int SDL_MapSurface(SDL_Surface * src, SDL_Surface * dst) { SDL_PixelFormat *srcfmt; SDL_PixelFormat *dstfmt; SDL_BlitMap *map; /* Clear out any previous mapping */ map = src->map; #if SDL_HAVE_RLE if ((src->flags & SDL_RLEACCEL) == SDL_RLEACCEL) { SDL_UnRLESurface(src, 1); } #endif SDL_InvalidateMap(map); /* Figure out what kind of mapping we're doing */ map->identity = 0; srcfmt = src->format; dstfmt = dst->format; if (SDL_ISPIXELFORMAT_INDEXED(srcfmt->format)) { if (SDL_ISPIXELFORMAT_INDEXED(dstfmt->format)) { /* Palette --> Palette */ map->info.table = Map1to1(srcfmt->palette, dstfmt->palette, &map->identity); if (!map->identity) { if (map->info.table == NULL) { return (-1); } } if (srcfmt->BitsPerPixel != dstfmt->BitsPerPixel) map->identity = 0; } else { /* Palette --> BitField */ map->info.table = Map1toN(srcfmt, src->map->info.r, src->map->info.g, src->map->info.b, src->map->info.a, dstfmt); if (map->info.table == NULL) { return (-1); } } } else { if (SDL_ISPIXELFORMAT_INDEXED(dstfmt->format)) { /* BitField --> Palette */ map->info.table = MapNto1(srcfmt, dstfmt, &map->identity); if (!map->identity) { if (map->info.table == NULL) { return (-1); } } map->identity = 0; /* Don't optimize to copy */ } else { /* BitField --> BitField */ if (srcfmt == dstfmt) { map->identity = 1; } } } map->dst = dst; if (map->dst) { /* Register BlitMap to the destination surface, to be invalidated when needed */ SDL_ListAdd((SDL_ListNode **)&(map->dst->list_blitmap), map); } if (dstfmt->palette) { map->dst_palette_version = dstfmt->palette->version; } else { map->dst_palette_version = 0; } if (srcfmt->palette) { map->src_palette_version = srcfmt->palette->version; } else { map->src_palette_version = 0; } /* Choose your blitters wisely */ return (SDL_CalculateBlit(src)); } void SDL_FreeBlitMap(SDL_BlitMap * map) { if (map) { SDL_InvalidateMap(map); SDL_free(map); } } void SDL_CalculateGammaRamp(float gamma, Uint16 * ramp) { int i; /* Input validation */ if (gamma < 0.0f ) { SDL_InvalidParamError("gamma"); return; } if (ramp == NULL) { SDL_InvalidParamError("ramp"); return; } /* 0.0 gamma is all black */ if (gamma == 0.0f) { SDL_memset(ramp, 0, 256 * sizeof(Uint16)); return; } else if (gamma == 1.0f) { /* 1.0 gamma is identity */ for (i = 0; i < 256; ++i) { ramp[i] = (i << 8) | i; } return; } else { /* Calculate a real gamma ramp */ int value; gamma = 1.0f / gamma; for (i = 0; i < 256; ++i) { value = (int) (SDL_pow((double) i / 256.0, gamma) * 65535.0 + 0.5); if (value > 65535) { value = 65535; } ramp[i] = (Uint16) value; } } } /* vi: set ts=4 sw=4 expandtab: */
Map1toN(SDL_PixelFormat * src, Uint8 Rmod, Uint8 Gmod, Uint8 Bmod, Uint8 Amod, SDL_PixelFormat * dst) { Uint8 *map; int i; int bpp; SDL_Palette *pal = src->palette; bpp = ((dst->BytesPerPixel == 3) ? 4 : dst->BytesPerPixel); map = (Uint8 *) SDL_malloc(pal->ncolors * bpp); if (map == NULL) { SDL_OutOfMemory(); return (NULL); } /* We memory copy to the pixel map so the endianness is preserved */ for (i = 0; i < pal->ncolors; ++i) { Uint8 R = (Uint8) ((pal->colors[i].r * Rmod) / 255); Uint8 G = (Uint8) ((pal->colors[i].g * Gmod) / 255); Uint8 B = (Uint8) ((pal->colors[i].b * Bmod) / 255); Uint8 A = (Uint8) ((pal->colors[i].a * Amod) / 255); ASSEMBLE_RGBA(&map[i * bpp], dst->BytesPerPixel, dst, (Uint32)R, (Uint32)G, (Uint32)B, (Uint32)A); } return (map); }
Map1toN(SDL_PixelFormat * src, Uint8 Rmod, Uint8 Gmod, Uint8 Bmod, Uint8 Amod, SDL_PixelFormat * dst) { Uint8 *map; int i; int bpp; SDL_Palette *pal = src->palette; bpp = ((dst->BytesPerPixel == 3) ? 4 : dst->BytesPerPixel); map = (Uint8 *) SDL_calloc(256, bpp); if (map == NULL) { SDL_OutOfMemory(); return (NULL); } /* We memory copy to the pixel map so the endianness is preserved */ for (i = 0; i < pal->ncolors; ++i) { Uint8 R = (Uint8) ((pal->colors[i].r * Rmod) / 255); Uint8 G = (Uint8) ((pal->colors[i].g * Gmod) / 255); Uint8 B = (Uint8) ((pal->colors[i].b * Bmod) / 255); Uint8 A = (Uint8) ((pal->colors[i].a * Amod) / 255); ASSEMBLE_RGBA(&map[i * bpp], dst->BytesPerPixel, dst, (Uint32)R, (Uint32)G, (Uint32)B, (Uint32)A); } return (map); }
{'added': [(950, ' map = (Uint8 *) SDL_calloc(256, sizeof(Uint8));'), (974, ' map = (Uint8 *) SDL_calloc(256, bpp);')], 'deleted': [(950, ' map = (Uint8 *) SDL_malloc(src->ncolors);'), (974, ' map = (Uint8 *) SDL_malloc(pal->ncolors * bpp);')]}
2
2
1,025
6,535
https://github.com/libsdl-org/SDL
CVE-2021-33657
['CWE-787']
smbencrypt.c
smbhash
/* Unix SMB/Netbios implementation. Version 1.9. SMB parameters and setup Copyright (C) Andrew Tridgell 1992-2000 Copyright (C) Luke Kenneth Casson Leighton 1996-2000 Modified by Jeremy Allison 1995. Copyright (C) Andrew Bartlett <abartlet@samba.org> 2002-2003 Modified by Steve French (sfrench@us.ibm.com) 2002-2003 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <crypto/skcipher.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/fs.h> #include <linux/string.h> #include <linux/kernel.h> #include <linux/random.h> #include "cifs_fs_sb.h" #include "cifs_unicode.h" #include "cifspdu.h" #include "cifsglob.h" #include "cifs_debug.h" #include "cifsproto.h" #ifndef false #define false 0 #endif #ifndef true #define true 1 #endif /* following came from the other byteorder.h to avoid include conflicts */ #define CVAL(buf,pos) (((unsigned char *)(buf))[pos]) #define SSVALX(buf,pos,val) (CVAL(buf,pos)=(val)&0xFF,CVAL(buf,pos+1)=(val)>>8) #define SSVAL(buf,pos,val) SSVALX((buf),(pos),((__u16)(val))) static void str_to_key(unsigned char *str, unsigned char *key) { int i; key[0] = str[0] >> 1; key[1] = ((str[0] & 0x01) << 6) | (str[1] >> 2); key[2] = ((str[1] & 0x03) << 5) | (str[2] >> 3); key[3] = ((str[2] & 0x07) << 4) | (str[3] >> 4); key[4] = ((str[3] & 0x0F) << 3) | (str[4] >> 5); key[5] = ((str[4] & 0x1F) << 2) | (str[5] >> 6); key[6] = ((str[5] & 0x3F) << 1) | (str[6] >> 7); key[7] = str[6] & 0x7F; for (i = 0; i < 8; i++) key[i] = (key[i] << 1); } static int smbhash(unsigned char *out, const unsigned char *in, unsigned char *key) { int rc; unsigned char key2[8]; struct crypto_skcipher *tfm_des; struct scatterlist sgin, sgout; struct skcipher_request *req; str_to_key(key, key2); tfm_des = crypto_alloc_skcipher("ecb(des)", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm_des)) { rc = PTR_ERR(tfm_des); cifs_dbg(VFS, "could not allocate des crypto API\n"); goto smbhash_err; } req = skcipher_request_alloc(tfm_des, GFP_KERNEL); if (!req) { rc = -ENOMEM; cifs_dbg(VFS, "could not allocate des crypto API\n"); goto smbhash_free_skcipher; } crypto_skcipher_setkey(tfm_des, key2, 8); sg_init_one(&sgin, in, 8); sg_init_one(&sgout, out, 8); skcipher_request_set_callback(req, 0, NULL, NULL); skcipher_request_set_crypt(req, &sgin, &sgout, 8, NULL); rc = crypto_skcipher_encrypt(req); if (rc) cifs_dbg(VFS, "could not encrypt crypt key rc: %d\n", rc); skcipher_request_free(req); smbhash_free_skcipher: crypto_free_skcipher(tfm_des); smbhash_err: return rc; } static int E_P16(unsigned char *p14, unsigned char *p16) { int rc; unsigned char sp8[8] = { 0x4b, 0x47, 0x53, 0x21, 0x40, 0x23, 0x24, 0x25 }; rc = smbhash(p16, sp8, p14); if (rc) return rc; rc = smbhash(p16 + 8, sp8, p14 + 7); return rc; } static int E_P24(unsigned char *p21, const unsigned char *c8, unsigned char *p24) { int rc; rc = smbhash(p24, c8, p21); if (rc) return rc; rc = smbhash(p24 + 8, c8, p21 + 7); if (rc) return rc; rc = smbhash(p24 + 16, c8, p21 + 14); return rc; } /* produce a md4 message digest from data of length n bytes */ int mdfour(unsigned char *md4_hash, unsigned char *link_str, int link_len) { int rc; unsigned int size; struct crypto_shash *md4; struct sdesc *sdescmd4; md4 = crypto_alloc_shash("md4", 0, 0); if (IS_ERR(md4)) { rc = PTR_ERR(md4); cifs_dbg(VFS, "%s: Crypto md4 allocation error %d\n", __func__, rc); return rc; } size = sizeof(struct shash_desc) + crypto_shash_descsize(md4); sdescmd4 = kmalloc(size, GFP_KERNEL); if (!sdescmd4) { rc = -ENOMEM; goto mdfour_err; } sdescmd4->shash.tfm = md4; sdescmd4->shash.flags = 0x0; rc = crypto_shash_init(&sdescmd4->shash); if (rc) { cifs_dbg(VFS, "%s: Could not init md4 shash\n", __func__); goto mdfour_err; } rc = crypto_shash_update(&sdescmd4->shash, link_str, link_len); if (rc) { cifs_dbg(VFS, "%s: Could not update with link_str\n", __func__); goto mdfour_err; } rc = crypto_shash_final(&sdescmd4->shash, md4_hash); if (rc) cifs_dbg(VFS, "%s: Could not generate md4 hash\n", __func__); mdfour_err: crypto_free_shash(md4); kfree(sdescmd4); return rc; } /* This implements the X/Open SMB password encryption It takes a password, a 8 byte "crypt key" and puts 24 bytes of encrypted password into p24 */ /* Note that password must be uppercased and null terminated */ int SMBencrypt(unsigned char *passwd, const unsigned char *c8, unsigned char *p24) { int rc; unsigned char p14[14], p16[16], p21[21]; memset(p14, '\0', 14); memset(p16, '\0', 16); memset(p21, '\0', 21); memcpy(p14, passwd, 14); rc = E_P16(p14, p16); if (rc) return rc; memcpy(p21, p16, 16); rc = E_P24(p21, c8, p24); return rc; } /* * Creates the MD4 Hash of the users password in NT UNICODE. */ int E_md4hash(const unsigned char *passwd, unsigned char *p16, const struct nls_table *codepage) { int rc; int len; __le16 wpwd[129]; /* Password cannot be longer than 128 characters */ if (passwd) /* Password must be converted to NT unicode */ len = cifs_strtoUTF16(wpwd, passwd, 128, codepage); else { len = 0; *wpwd = 0; /* Ensure string is null terminated */ } rc = mdfour(p16, (unsigned char *) wpwd, len * sizeof(__le16)); memzero_explicit(wpwd, sizeof(wpwd)); return rc; } /* Does the NT MD4 hash then des encryption. */ int SMBNTencrypt(unsigned char *passwd, unsigned char *c8, unsigned char *p24, const struct nls_table *codepage) { int rc; unsigned char p16[16], p21[21]; memset(p16, '\0', 16); memset(p21, '\0', 21); rc = E_md4hash(passwd, p16, codepage); if (rc) { cifs_dbg(FYI, "%s Can't generate NT hash, error: %d\n", __func__, rc); return rc; } memcpy(p21, p16, 16); rc = E_P24(p21, c8, p24); return rc; }
/* Unix SMB/Netbios implementation. Version 1.9. SMB parameters and setup Copyright (C) Andrew Tridgell 1992-2000 Copyright (C) Luke Kenneth Casson Leighton 1996-2000 Modified by Jeremy Allison 1995. Copyright (C) Andrew Bartlett <abartlet@samba.org> 2002-2003 Modified by Steve French (sfrench@us.ibm.com) 2002-2003 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/crypto.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/fs.h> #include <linux/string.h> #include <linux/kernel.h> #include <linux/random.h> #include "cifs_fs_sb.h" #include "cifs_unicode.h" #include "cifspdu.h" #include "cifsglob.h" #include "cifs_debug.h" #include "cifsproto.h" #ifndef false #define false 0 #endif #ifndef true #define true 1 #endif /* following came from the other byteorder.h to avoid include conflicts */ #define CVAL(buf,pos) (((unsigned char *)(buf))[pos]) #define SSVALX(buf,pos,val) (CVAL(buf,pos)=(val)&0xFF,CVAL(buf,pos+1)=(val)>>8) #define SSVAL(buf,pos,val) SSVALX((buf),(pos),((__u16)(val))) static void str_to_key(unsigned char *str, unsigned char *key) { int i; key[0] = str[0] >> 1; key[1] = ((str[0] & 0x01) << 6) | (str[1] >> 2); key[2] = ((str[1] & 0x03) << 5) | (str[2] >> 3); key[3] = ((str[2] & 0x07) << 4) | (str[3] >> 4); key[4] = ((str[3] & 0x0F) << 3) | (str[4] >> 5); key[5] = ((str[4] & 0x1F) << 2) | (str[5] >> 6); key[6] = ((str[5] & 0x3F) << 1) | (str[6] >> 7); key[7] = str[6] & 0x7F; for (i = 0; i < 8; i++) key[i] = (key[i] << 1); } static int smbhash(unsigned char *out, const unsigned char *in, unsigned char *key) { unsigned char key2[8]; struct crypto_cipher *tfm_des; str_to_key(key, key2); tfm_des = crypto_alloc_cipher("des", 0, 0); if (IS_ERR(tfm_des)) { cifs_dbg(VFS, "could not allocate des crypto API\n"); return PTR_ERR(tfm_des); } crypto_cipher_setkey(tfm_des, key2, 8); crypto_cipher_encrypt_one(tfm_des, out, in); crypto_free_cipher(tfm_des); return 0; } static int E_P16(unsigned char *p14, unsigned char *p16) { int rc; unsigned char sp8[8] = { 0x4b, 0x47, 0x53, 0x21, 0x40, 0x23, 0x24, 0x25 }; rc = smbhash(p16, sp8, p14); if (rc) return rc; rc = smbhash(p16 + 8, sp8, p14 + 7); return rc; } static int E_P24(unsigned char *p21, const unsigned char *c8, unsigned char *p24) { int rc; rc = smbhash(p24, c8, p21); if (rc) return rc; rc = smbhash(p24 + 8, c8, p21 + 7); if (rc) return rc; rc = smbhash(p24 + 16, c8, p21 + 14); return rc; } /* produce a md4 message digest from data of length n bytes */ int mdfour(unsigned char *md4_hash, unsigned char *link_str, int link_len) { int rc; unsigned int size; struct crypto_shash *md4; struct sdesc *sdescmd4; md4 = crypto_alloc_shash("md4", 0, 0); if (IS_ERR(md4)) { rc = PTR_ERR(md4); cifs_dbg(VFS, "%s: Crypto md4 allocation error %d\n", __func__, rc); return rc; } size = sizeof(struct shash_desc) + crypto_shash_descsize(md4); sdescmd4 = kmalloc(size, GFP_KERNEL); if (!sdescmd4) { rc = -ENOMEM; goto mdfour_err; } sdescmd4->shash.tfm = md4; sdescmd4->shash.flags = 0x0; rc = crypto_shash_init(&sdescmd4->shash); if (rc) { cifs_dbg(VFS, "%s: Could not init md4 shash\n", __func__); goto mdfour_err; } rc = crypto_shash_update(&sdescmd4->shash, link_str, link_len); if (rc) { cifs_dbg(VFS, "%s: Could not update with link_str\n", __func__); goto mdfour_err; } rc = crypto_shash_final(&sdescmd4->shash, md4_hash); if (rc) cifs_dbg(VFS, "%s: Could not generate md4 hash\n", __func__); mdfour_err: crypto_free_shash(md4); kfree(sdescmd4); return rc; } /* This implements the X/Open SMB password encryption It takes a password, a 8 byte "crypt key" and puts 24 bytes of encrypted password into p24 */ /* Note that password must be uppercased and null terminated */ int SMBencrypt(unsigned char *passwd, const unsigned char *c8, unsigned char *p24) { int rc; unsigned char p14[14], p16[16], p21[21]; memset(p14, '\0', 14); memset(p16, '\0', 16); memset(p21, '\0', 21); memcpy(p14, passwd, 14); rc = E_P16(p14, p16); if (rc) return rc; memcpy(p21, p16, 16); rc = E_P24(p21, c8, p24); return rc; } /* * Creates the MD4 Hash of the users password in NT UNICODE. */ int E_md4hash(const unsigned char *passwd, unsigned char *p16, const struct nls_table *codepage) { int rc; int len; __le16 wpwd[129]; /* Password cannot be longer than 128 characters */ if (passwd) /* Password must be converted to NT unicode */ len = cifs_strtoUTF16(wpwd, passwd, 128, codepage); else { len = 0; *wpwd = 0; /* Ensure string is null terminated */ } rc = mdfour(p16, (unsigned char *) wpwd, len * sizeof(__le16)); memzero_explicit(wpwd, sizeof(wpwd)); return rc; } /* Does the NT MD4 hash then des encryption. */ int SMBNTencrypt(unsigned char *passwd, unsigned char *c8, unsigned char *p24, const struct nls_table *codepage) { int rc; unsigned char p16[16], p21[21]; memset(p16, '\0', 16); memset(p21, '\0', 21); rc = E_md4hash(passwd, p16, codepage); if (rc) { cifs_dbg(FYI, "%s Can't generate NT hash, error: %d\n", __func__, rc); return rc; } memcpy(p21, p16, 16); rc = E_P24(p21, c8, p24); return rc; }
smbhash(unsigned char *out, const unsigned char *in, unsigned char *key) { int rc; unsigned char key2[8]; struct crypto_skcipher *tfm_des; struct scatterlist sgin, sgout; struct skcipher_request *req; str_to_key(key, key2); tfm_des = crypto_alloc_skcipher("ecb(des)", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm_des)) { rc = PTR_ERR(tfm_des); cifs_dbg(VFS, "could not allocate des crypto API\n"); goto smbhash_err; } req = skcipher_request_alloc(tfm_des, GFP_KERNEL); if (!req) { rc = -ENOMEM; cifs_dbg(VFS, "could not allocate des crypto API\n"); goto smbhash_free_skcipher; } crypto_skcipher_setkey(tfm_des, key2, 8); sg_init_one(&sgin, in, 8); sg_init_one(&sgout, out, 8); skcipher_request_set_callback(req, 0, NULL, NULL); skcipher_request_set_crypt(req, &sgin, &sgout, 8, NULL); rc = crypto_skcipher_encrypt(req); if (rc) cifs_dbg(VFS, "could not encrypt crypt key rc: %d\n", rc); skcipher_request_free(req); smbhash_free_skcipher: crypto_free_skcipher(tfm_des); smbhash_err: return rc; }
smbhash(unsigned char *out, const unsigned char *in, unsigned char *key) { unsigned char key2[8]; struct crypto_cipher *tfm_des; str_to_key(key, key2); tfm_des = crypto_alloc_cipher("des", 0, 0); if (IS_ERR(tfm_des)) { cifs_dbg(VFS, "could not allocate des crypto API\n"); return PTR_ERR(tfm_des); } crypto_cipher_setkey(tfm_des, key2, 8); crypto_cipher_encrypt_one(tfm_des, out, in); crypto_free_cipher(tfm_des); return 0; }
{'added': [(26, '#include <linux/crypto.h>'), (73, '\tstruct crypto_cipher *tfm_des;'), (77, '\ttfm_des = crypto_alloc_cipher("des", 0, 0);'), (80, '\t\treturn PTR_ERR(tfm_des);'), (83, '\tcrypto_cipher_setkey(tfm_des, key2, 8);'), (84, '\tcrypto_cipher_encrypt_one(tfm_des, out, in);'), (85, '\tcrypto_free_cipher(tfm_des);'), (87, '\treturn 0;')], 'deleted': [(26, '#include <crypto/skcipher.h>'), (72, '\tint rc;'), (74, '\tstruct crypto_skcipher *tfm_des;'), (75, '\tstruct scatterlist sgin, sgout;'), (76, '\tstruct skcipher_request *req;'), (80, '\ttfm_des = crypto_alloc_skcipher("ecb(des)", 0, CRYPTO_ALG_ASYNC);'), (82, '\t\trc = PTR_ERR(tfm_des);'), (83, '\t\tcifs_dbg(VFS, "could not allocate des crypto API\\n");'), (84, '\t\tgoto smbhash_err;'), (85, '\t}'), (86, ''), (87, '\treq = skcipher_request_alloc(tfm_des, GFP_KERNEL);'), (88, '\tif (!req) {'), (89, '\t\trc = -ENOMEM;'), (91, '\t\tgoto smbhash_free_skcipher;'), (94, '\tcrypto_skcipher_setkey(tfm_des, key2, 8);'), (95, ''), (96, '\tsg_init_one(&sgin, in, 8);'), (97, '\tsg_init_one(&sgout, out, 8);'), (99, '\tskcipher_request_set_callback(req, 0, NULL, NULL);'), (100, '\tskcipher_request_set_crypt(req, &sgin, &sgout, 8, NULL);'), (101, ''), (102, '\trc = crypto_skcipher_encrypt(req);'), (103, '\tif (rc)'), (104, '\t\tcifs_dbg(VFS, "could not encrypt crypt key rc: %d\\n", rc);'), (105, ''), (106, '\tskcipher_request_free(req);'), (107, ''), (108, 'smbhash_free_skcipher:'), (109, '\tcrypto_free_skcipher(tfm_des);'), (110, 'smbhash_err:'), (111, '\treturn rc;')]}
8
32
160
1,090
https://github.com/torvalds/linux
CVE-2016-10154
['CWE-119']
tcp.c
rds_tcp_kill_sock
/* * Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/in.h> #include <linux/module.h> #include <net/tcp.h> #include <net/net_namespace.h> #include <net/netns/generic.h> #include <net/addrconf.h> #include "rds.h" #include "tcp.h" /* only for info exporting */ static DEFINE_SPINLOCK(rds_tcp_tc_list_lock); static LIST_HEAD(rds_tcp_tc_list); /* rds_tcp_tc_count counts only IPv4 connections. * rds6_tcp_tc_count counts both IPv4 and IPv6 connections. */ static unsigned int rds_tcp_tc_count; #if IS_ENABLED(CONFIG_IPV6) static unsigned int rds6_tcp_tc_count; #endif /* Track rds_tcp_connection structs so they can be cleaned up */ static DEFINE_SPINLOCK(rds_tcp_conn_lock); static LIST_HEAD(rds_tcp_conn_list); static atomic_t rds_tcp_unloading = ATOMIC_INIT(0); static struct kmem_cache *rds_tcp_conn_slab; static int rds_tcp_skbuf_handler(struct ctl_table *ctl, int write, void __user *buffer, size_t *lenp, loff_t *fpos); static int rds_tcp_min_sndbuf = SOCK_MIN_SNDBUF; static int rds_tcp_min_rcvbuf = SOCK_MIN_RCVBUF; static struct ctl_table rds_tcp_sysctl_table[] = { #define RDS_TCP_SNDBUF 0 { .procname = "rds_tcp_sndbuf", /* data is per-net pointer */ .maxlen = sizeof(int), .mode = 0644, .proc_handler = rds_tcp_skbuf_handler, .extra1 = &rds_tcp_min_sndbuf, }, #define RDS_TCP_RCVBUF 1 { .procname = "rds_tcp_rcvbuf", /* data is per-net pointer */ .maxlen = sizeof(int), .mode = 0644, .proc_handler = rds_tcp_skbuf_handler, .extra1 = &rds_tcp_min_rcvbuf, }, { } }; /* doing it this way avoids calling tcp_sk() */ void rds_tcp_nonagle(struct socket *sock) { int val = 1; kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY, (void *)&val, sizeof(val)); } u32 rds_tcp_write_seq(struct rds_tcp_connection *tc) { /* seq# of the last byte of data in tcp send buffer */ return tcp_sk(tc->t_sock->sk)->write_seq; } u32 rds_tcp_snd_una(struct rds_tcp_connection *tc) { return tcp_sk(tc->t_sock->sk)->snd_una; } void rds_tcp_restore_callbacks(struct socket *sock, struct rds_tcp_connection *tc) { rdsdebug("restoring sock %p callbacks from tc %p\n", sock, tc); write_lock_bh(&sock->sk->sk_callback_lock); /* done under the callback_lock to serialize with write_space */ spin_lock(&rds_tcp_tc_list_lock); list_del_init(&tc->t_list_item); #if IS_ENABLED(CONFIG_IPV6) rds6_tcp_tc_count--; #endif if (!tc->t_cpath->cp_conn->c_isv6) rds_tcp_tc_count--; spin_unlock(&rds_tcp_tc_list_lock); tc->t_sock = NULL; sock->sk->sk_write_space = tc->t_orig_write_space; sock->sk->sk_data_ready = tc->t_orig_data_ready; sock->sk->sk_state_change = tc->t_orig_state_change; sock->sk->sk_user_data = NULL; write_unlock_bh(&sock->sk->sk_callback_lock); } /* * rds_tcp_reset_callbacks() switches the to the new sock and * returns the existing tc->t_sock. * * The only functions that set tc->t_sock are rds_tcp_set_callbacks * and rds_tcp_reset_callbacks. Send and receive trust that * it is set. The absence of RDS_CONN_UP bit protects those paths * from being called while it isn't set. */ void rds_tcp_reset_callbacks(struct socket *sock, struct rds_conn_path *cp) { struct rds_tcp_connection *tc = cp->cp_transport_data; struct socket *osock = tc->t_sock; if (!osock) goto newsock; /* Need to resolve a duelling SYN between peers. * We have an outstanding SYN to this peer, which may * potentially have transitioned to the RDS_CONN_UP state, * so we must quiesce any send threads before resetting * cp_transport_data. We quiesce these threads by setting * cp_state to something other than RDS_CONN_UP, and then * waiting for any existing threads in rds_send_xmit to * complete release_in_xmit(). (Subsequent threads entering * rds_send_xmit() will bail on !rds_conn_up(). * * However an incoming syn-ack at this point would end up * marking the conn as RDS_CONN_UP, and would again permit * rds_send_xmi() threads through, so ideally we would * synchronize on RDS_CONN_UP after lock_sock(), but cannot * do that: waiting on !RDS_IN_XMIT after lock_sock() may * end up deadlocking with tcp_sendmsg(), and the RDS_IN_XMIT * would not get set. As a result, we set c_state to * RDS_CONN_RESETTTING, to ensure that rds_tcp_state_change * cannot mark rds_conn_path_up() in the window before lock_sock() */ atomic_set(&cp->cp_state, RDS_CONN_RESETTING); wait_event(cp->cp_waitq, !test_bit(RDS_IN_XMIT, &cp->cp_flags)); lock_sock(osock->sk); /* reset receive side state for rds_tcp_data_recv() for osock */ cancel_delayed_work_sync(&cp->cp_send_w); cancel_delayed_work_sync(&cp->cp_recv_w); if (tc->t_tinc) { rds_inc_put(&tc->t_tinc->ti_inc); tc->t_tinc = NULL; } tc->t_tinc_hdr_rem = sizeof(struct rds_header); tc->t_tinc_data_rem = 0; rds_tcp_restore_callbacks(osock, tc); release_sock(osock->sk); sock_release(osock); newsock: rds_send_path_reset(cp); lock_sock(sock->sk); rds_tcp_set_callbacks(sock, cp); release_sock(sock->sk); } /* Add tc to rds_tcp_tc_list and set tc->t_sock. See comments * above rds_tcp_reset_callbacks for notes about synchronization * with data path */ void rds_tcp_set_callbacks(struct socket *sock, struct rds_conn_path *cp) { struct rds_tcp_connection *tc = cp->cp_transport_data; rdsdebug("setting sock %p callbacks to tc %p\n", sock, tc); write_lock_bh(&sock->sk->sk_callback_lock); /* done under the callback_lock to serialize with write_space */ spin_lock(&rds_tcp_tc_list_lock); list_add_tail(&tc->t_list_item, &rds_tcp_tc_list); #if IS_ENABLED(CONFIG_IPV6) rds6_tcp_tc_count++; #endif if (!tc->t_cpath->cp_conn->c_isv6) rds_tcp_tc_count++; spin_unlock(&rds_tcp_tc_list_lock); /* accepted sockets need our listen data ready undone */ if (sock->sk->sk_data_ready == rds_tcp_listen_data_ready) sock->sk->sk_data_ready = sock->sk->sk_user_data; tc->t_sock = sock; tc->t_cpath = cp; tc->t_orig_data_ready = sock->sk->sk_data_ready; tc->t_orig_write_space = sock->sk->sk_write_space; tc->t_orig_state_change = sock->sk->sk_state_change; sock->sk->sk_user_data = cp; sock->sk->sk_data_ready = rds_tcp_data_ready; sock->sk->sk_write_space = rds_tcp_write_space; sock->sk->sk_state_change = rds_tcp_state_change; write_unlock_bh(&sock->sk->sk_callback_lock); } /* Handle RDS_INFO_TCP_SOCKETS socket option. It only returns IPv4 * connections for backward compatibility. */ static void rds_tcp_tc_info(struct socket *rds_sock, unsigned int len, struct rds_info_iterator *iter, struct rds_info_lengths *lens) { struct rds_info_tcp_socket tsinfo; struct rds_tcp_connection *tc; unsigned long flags; spin_lock_irqsave(&rds_tcp_tc_list_lock, flags); if (len / sizeof(tsinfo) < rds_tcp_tc_count) goto out; list_for_each_entry(tc, &rds_tcp_tc_list, t_list_item) { struct inet_sock *inet = inet_sk(tc->t_sock->sk); if (tc->t_cpath->cp_conn->c_isv6) continue; tsinfo.local_addr = inet->inet_saddr; tsinfo.local_port = inet->inet_sport; tsinfo.peer_addr = inet->inet_daddr; tsinfo.peer_port = inet->inet_dport; tsinfo.hdr_rem = tc->t_tinc_hdr_rem; tsinfo.data_rem = tc->t_tinc_data_rem; tsinfo.last_sent_nxt = tc->t_last_sent_nxt; tsinfo.last_expected_una = tc->t_last_expected_una; tsinfo.last_seen_una = tc->t_last_seen_una; tsinfo.tos = tc->t_cpath->cp_conn->c_tos; rds_info_copy(iter, &tsinfo, sizeof(tsinfo)); } out: lens->nr = rds_tcp_tc_count; lens->each = sizeof(tsinfo); spin_unlock_irqrestore(&rds_tcp_tc_list_lock, flags); } #if IS_ENABLED(CONFIG_IPV6) /* Handle RDS6_INFO_TCP_SOCKETS socket option. It returns both IPv4 and * IPv6 connections. IPv4 connection address is returned in an IPv4 mapped * address. */ static void rds6_tcp_tc_info(struct socket *sock, unsigned int len, struct rds_info_iterator *iter, struct rds_info_lengths *lens) { struct rds6_info_tcp_socket tsinfo6; struct rds_tcp_connection *tc; unsigned long flags; spin_lock_irqsave(&rds_tcp_tc_list_lock, flags); if (len / sizeof(tsinfo6) < rds6_tcp_tc_count) goto out; list_for_each_entry(tc, &rds_tcp_tc_list, t_list_item) { struct sock *sk = tc->t_sock->sk; struct inet_sock *inet = inet_sk(sk); tsinfo6.local_addr = sk->sk_v6_rcv_saddr; tsinfo6.local_port = inet->inet_sport; tsinfo6.peer_addr = sk->sk_v6_daddr; tsinfo6.peer_port = inet->inet_dport; tsinfo6.hdr_rem = tc->t_tinc_hdr_rem; tsinfo6.data_rem = tc->t_tinc_data_rem; tsinfo6.last_sent_nxt = tc->t_last_sent_nxt; tsinfo6.last_expected_una = tc->t_last_expected_una; tsinfo6.last_seen_una = tc->t_last_seen_una; rds_info_copy(iter, &tsinfo6, sizeof(tsinfo6)); } out: lens->nr = rds6_tcp_tc_count; lens->each = sizeof(tsinfo6); spin_unlock_irqrestore(&rds_tcp_tc_list_lock, flags); } #endif static int rds_tcp_laddr_check(struct net *net, const struct in6_addr *addr, __u32 scope_id) { struct net_device *dev = NULL; #if IS_ENABLED(CONFIG_IPV6) int ret; #endif if (ipv6_addr_v4mapped(addr)) { if (inet_addr_type(net, addr->s6_addr32[3]) == RTN_LOCAL) return 0; return -EADDRNOTAVAIL; } /* If the scope_id is specified, check only those addresses * hosted on the specified interface. */ if (scope_id != 0) { rcu_read_lock(); dev = dev_get_by_index_rcu(net, scope_id); /* scope_id is not valid... */ if (!dev) { rcu_read_unlock(); return -EADDRNOTAVAIL; } rcu_read_unlock(); } #if IS_ENABLED(CONFIG_IPV6) ret = ipv6_chk_addr(net, addr, dev, 0); if (ret) return 0; #endif return -EADDRNOTAVAIL; } static void rds_tcp_conn_free(void *arg) { struct rds_tcp_connection *tc = arg; unsigned long flags; rdsdebug("freeing tc %p\n", tc); spin_lock_irqsave(&rds_tcp_conn_lock, flags); if (!tc->t_tcp_node_detached) list_del(&tc->t_tcp_node); spin_unlock_irqrestore(&rds_tcp_conn_lock, flags); kmem_cache_free(rds_tcp_conn_slab, tc); } static int rds_tcp_conn_alloc(struct rds_connection *conn, gfp_t gfp) { struct rds_tcp_connection *tc; int i, j; int ret = 0; for (i = 0; i < RDS_MPATH_WORKERS; i++) { tc = kmem_cache_alloc(rds_tcp_conn_slab, gfp); if (!tc) { ret = -ENOMEM; goto fail; } mutex_init(&tc->t_conn_path_lock); tc->t_sock = NULL; tc->t_tinc = NULL; tc->t_tinc_hdr_rem = sizeof(struct rds_header); tc->t_tinc_data_rem = 0; conn->c_path[i].cp_transport_data = tc; tc->t_cpath = &conn->c_path[i]; tc->t_tcp_node_detached = true; rdsdebug("rds_conn_path [%d] tc %p\n", i, conn->c_path[i].cp_transport_data); } spin_lock_irq(&rds_tcp_conn_lock); for (i = 0; i < RDS_MPATH_WORKERS; i++) { tc = conn->c_path[i].cp_transport_data; tc->t_tcp_node_detached = false; list_add_tail(&tc->t_tcp_node, &rds_tcp_conn_list); } spin_unlock_irq(&rds_tcp_conn_lock); fail: if (ret) { for (j = 0; j < i; j++) rds_tcp_conn_free(conn->c_path[j].cp_transport_data); } return ret; } static bool list_has_conn(struct list_head *list, struct rds_connection *conn) { struct rds_tcp_connection *tc, *_tc; list_for_each_entry_safe(tc, _tc, list, t_tcp_node) { if (tc->t_cpath->cp_conn == conn) return true; } return false; } static void rds_tcp_set_unloading(void) { atomic_set(&rds_tcp_unloading, 1); } static bool rds_tcp_is_unloading(struct rds_connection *conn) { return atomic_read(&rds_tcp_unloading) != 0; } static void rds_tcp_destroy_conns(void) { struct rds_tcp_connection *tc, *_tc; LIST_HEAD(tmp_list); /* avoid calling conn_destroy with irqs off */ spin_lock_irq(&rds_tcp_conn_lock); list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) { if (!list_has_conn(&tmp_list, tc->t_cpath->cp_conn)) list_move_tail(&tc->t_tcp_node, &tmp_list); } spin_unlock_irq(&rds_tcp_conn_lock); list_for_each_entry_safe(tc, _tc, &tmp_list, t_tcp_node) rds_conn_destroy(tc->t_cpath->cp_conn); } static void rds_tcp_exit(void); static u8 rds_tcp_get_tos_map(u8 tos) { /* all user tos mapped to default 0 for TCP transport */ return 0; } struct rds_transport rds_tcp_transport = { .laddr_check = rds_tcp_laddr_check, .xmit_path_prepare = rds_tcp_xmit_path_prepare, .xmit_path_complete = rds_tcp_xmit_path_complete, .xmit = rds_tcp_xmit, .recv_path = rds_tcp_recv_path, .conn_alloc = rds_tcp_conn_alloc, .conn_free = rds_tcp_conn_free, .conn_path_connect = rds_tcp_conn_path_connect, .conn_path_shutdown = rds_tcp_conn_path_shutdown, .inc_copy_to_user = rds_tcp_inc_copy_to_user, .inc_free = rds_tcp_inc_free, .stats_info_copy = rds_tcp_stats_info_copy, .exit = rds_tcp_exit, .get_tos_map = rds_tcp_get_tos_map, .t_owner = THIS_MODULE, .t_name = "tcp", .t_type = RDS_TRANS_TCP, .t_prefer_loopback = 1, .t_mp_capable = 1, .t_unloading = rds_tcp_is_unloading, }; static unsigned int rds_tcp_netid; /* per-network namespace private data for this module */ struct rds_tcp_net { struct socket *rds_tcp_listen_sock; struct work_struct rds_tcp_accept_w; struct ctl_table_header *rds_tcp_sysctl; struct ctl_table *ctl_table; int sndbuf_size; int rcvbuf_size; }; /* All module specific customizations to the RDS-TCP socket should be done in * rds_tcp_tune() and applied after socket creation. */ void rds_tcp_tune(struct socket *sock) { struct sock *sk = sock->sk; struct net *net = sock_net(sk); struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid); rds_tcp_nonagle(sock); lock_sock(sk); if (rtn->sndbuf_size > 0) { sk->sk_sndbuf = rtn->sndbuf_size; sk->sk_userlocks |= SOCK_SNDBUF_LOCK; } if (rtn->rcvbuf_size > 0) { sk->sk_sndbuf = rtn->rcvbuf_size; sk->sk_userlocks |= SOCK_RCVBUF_LOCK; } release_sock(sk); } static void rds_tcp_accept_worker(struct work_struct *work) { struct rds_tcp_net *rtn = container_of(work, struct rds_tcp_net, rds_tcp_accept_w); while (rds_tcp_accept_one(rtn->rds_tcp_listen_sock) == 0) cond_resched(); } void rds_tcp_accept_work(struct sock *sk) { struct net *net = sock_net(sk); struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid); queue_work(rds_wq, &rtn->rds_tcp_accept_w); } static __net_init int rds_tcp_init_net(struct net *net) { struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid); struct ctl_table *tbl; int err = 0; memset(rtn, 0, sizeof(*rtn)); /* {snd, rcv}buf_size default to 0, which implies we let the * stack pick the value, and permit auto-tuning of buffer size. */ if (net == &init_net) { tbl = rds_tcp_sysctl_table; } else { tbl = kmemdup(rds_tcp_sysctl_table, sizeof(rds_tcp_sysctl_table), GFP_KERNEL); if (!tbl) { pr_warn("could not set allocate syctl table\n"); return -ENOMEM; } rtn->ctl_table = tbl; } tbl[RDS_TCP_SNDBUF].data = &rtn->sndbuf_size; tbl[RDS_TCP_RCVBUF].data = &rtn->rcvbuf_size; rtn->rds_tcp_sysctl = register_net_sysctl(net, "net/rds/tcp", tbl); if (!rtn->rds_tcp_sysctl) { pr_warn("could not register sysctl\n"); err = -ENOMEM; goto fail; } #if IS_ENABLED(CONFIG_IPV6) rtn->rds_tcp_listen_sock = rds_tcp_listen_init(net, true); #else rtn->rds_tcp_listen_sock = rds_tcp_listen_init(net, false); #endif if (!rtn->rds_tcp_listen_sock) { pr_warn("could not set up IPv6 listen sock\n"); #if IS_ENABLED(CONFIG_IPV6) /* Try IPv4 as some systems disable IPv6 */ rtn->rds_tcp_listen_sock = rds_tcp_listen_init(net, false); if (!rtn->rds_tcp_listen_sock) { #endif unregister_net_sysctl_table(rtn->rds_tcp_sysctl); rtn->rds_tcp_sysctl = NULL; err = -EAFNOSUPPORT; goto fail; #if IS_ENABLED(CONFIG_IPV6) } #endif } INIT_WORK(&rtn->rds_tcp_accept_w, rds_tcp_accept_worker); return 0; fail: if (net != &init_net) kfree(tbl); return err; } static void rds_tcp_kill_sock(struct net *net) { struct rds_tcp_connection *tc, *_tc; LIST_HEAD(tmp_list); struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid); struct socket *lsock = rtn->rds_tcp_listen_sock; rtn->rds_tcp_listen_sock = NULL; rds_tcp_listen_stop(lsock, &rtn->rds_tcp_accept_w); spin_lock_irq(&rds_tcp_conn_lock); list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) { struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net); if (net != c_net || !tc->t_sock) continue; if (!list_has_conn(&tmp_list, tc->t_cpath->cp_conn)) { list_move_tail(&tc->t_tcp_node, &tmp_list); } else { list_del(&tc->t_tcp_node); tc->t_tcp_node_detached = true; } } spin_unlock_irq(&rds_tcp_conn_lock); list_for_each_entry_safe(tc, _tc, &tmp_list, t_tcp_node) rds_conn_destroy(tc->t_cpath->cp_conn); } static void __net_exit rds_tcp_exit_net(struct net *net) { struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid); rds_tcp_kill_sock(net); if (rtn->rds_tcp_sysctl) unregister_net_sysctl_table(rtn->rds_tcp_sysctl); if (net != &init_net) kfree(rtn->ctl_table); } static struct pernet_operations rds_tcp_net_ops = { .init = rds_tcp_init_net, .exit = rds_tcp_exit_net, .id = &rds_tcp_netid, .size = sizeof(struct rds_tcp_net), }; void *rds_tcp_listen_sock_def_readable(struct net *net) { struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid); struct socket *lsock = rtn->rds_tcp_listen_sock; if (!lsock) return NULL; return lsock->sk->sk_user_data; } /* when sysctl is used to modify some kernel socket parameters,this * function resets the RDS connections in that netns so that we can * restart with new parameters. The assumption is that such reset * events are few and far-between. */ static void rds_tcp_sysctl_reset(struct net *net) { struct rds_tcp_connection *tc, *_tc; spin_lock_irq(&rds_tcp_conn_lock); list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) { struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net); if (net != c_net || !tc->t_sock) continue; /* reconnect with new parameters */ rds_conn_path_drop(tc->t_cpath, false); } spin_unlock_irq(&rds_tcp_conn_lock); } static int rds_tcp_skbuf_handler(struct ctl_table *ctl, int write, void __user *buffer, size_t *lenp, loff_t *fpos) { struct net *net = current->nsproxy->net_ns; int err; err = proc_dointvec_minmax(ctl, write, buffer, lenp, fpos); if (err < 0) { pr_warn("Invalid input. Must be >= %d\n", *(int *)(ctl->extra1)); return err; } if (write) rds_tcp_sysctl_reset(net); return 0; } static void rds_tcp_exit(void) { rds_tcp_set_unloading(); synchronize_rcu(); rds_info_deregister_func(RDS_INFO_TCP_SOCKETS, rds_tcp_tc_info); #if IS_ENABLED(CONFIG_IPV6) rds_info_deregister_func(RDS6_INFO_TCP_SOCKETS, rds6_tcp_tc_info); #endif unregister_pernet_device(&rds_tcp_net_ops); rds_tcp_destroy_conns(); rds_trans_unregister(&rds_tcp_transport); rds_tcp_recv_exit(); kmem_cache_destroy(rds_tcp_conn_slab); } module_exit(rds_tcp_exit); static int rds_tcp_init(void) { int ret; rds_tcp_conn_slab = kmem_cache_create("rds_tcp_connection", sizeof(struct rds_tcp_connection), 0, 0, NULL); if (!rds_tcp_conn_slab) { ret = -ENOMEM; goto out; } ret = rds_tcp_recv_init(); if (ret) goto out_slab; ret = register_pernet_device(&rds_tcp_net_ops); if (ret) goto out_recv; rds_trans_register(&rds_tcp_transport); rds_info_register_func(RDS_INFO_TCP_SOCKETS, rds_tcp_tc_info); #if IS_ENABLED(CONFIG_IPV6) rds_info_register_func(RDS6_INFO_TCP_SOCKETS, rds6_tcp_tc_info); #endif goto out; out_recv: rds_tcp_recv_exit(); out_slab: kmem_cache_destroy(rds_tcp_conn_slab); out: return ret; } module_init(rds_tcp_init); MODULE_AUTHOR("Oracle Corporation <rds-devel@oss.oracle.com>"); MODULE_DESCRIPTION("RDS: TCP transport"); MODULE_LICENSE("Dual BSD/GPL");
/* * Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/in.h> #include <linux/module.h> #include <net/tcp.h> #include <net/net_namespace.h> #include <net/netns/generic.h> #include <net/addrconf.h> #include "rds.h" #include "tcp.h" /* only for info exporting */ static DEFINE_SPINLOCK(rds_tcp_tc_list_lock); static LIST_HEAD(rds_tcp_tc_list); /* rds_tcp_tc_count counts only IPv4 connections. * rds6_tcp_tc_count counts both IPv4 and IPv6 connections. */ static unsigned int rds_tcp_tc_count; #if IS_ENABLED(CONFIG_IPV6) static unsigned int rds6_tcp_tc_count; #endif /* Track rds_tcp_connection structs so they can be cleaned up */ static DEFINE_SPINLOCK(rds_tcp_conn_lock); static LIST_HEAD(rds_tcp_conn_list); static atomic_t rds_tcp_unloading = ATOMIC_INIT(0); static struct kmem_cache *rds_tcp_conn_slab; static int rds_tcp_skbuf_handler(struct ctl_table *ctl, int write, void __user *buffer, size_t *lenp, loff_t *fpos); static int rds_tcp_min_sndbuf = SOCK_MIN_SNDBUF; static int rds_tcp_min_rcvbuf = SOCK_MIN_RCVBUF; static struct ctl_table rds_tcp_sysctl_table[] = { #define RDS_TCP_SNDBUF 0 { .procname = "rds_tcp_sndbuf", /* data is per-net pointer */ .maxlen = sizeof(int), .mode = 0644, .proc_handler = rds_tcp_skbuf_handler, .extra1 = &rds_tcp_min_sndbuf, }, #define RDS_TCP_RCVBUF 1 { .procname = "rds_tcp_rcvbuf", /* data is per-net pointer */ .maxlen = sizeof(int), .mode = 0644, .proc_handler = rds_tcp_skbuf_handler, .extra1 = &rds_tcp_min_rcvbuf, }, { } }; /* doing it this way avoids calling tcp_sk() */ void rds_tcp_nonagle(struct socket *sock) { int val = 1; kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY, (void *)&val, sizeof(val)); } u32 rds_tcp_write_seq(struct rds_tcp_connection *tc) { /* seq# of the last byte of data in tcp send buffer */ return tcp_sk(tc->t_sock->sk)->write_seq; } u32 rds_tcp_snd_una(struct rds_tcp_connection *tc) { return tcp_sk(tc->t_sock->sk)->snd_una; } void rds_tcp_restore_callbacks(struct socket *sock, struct rds_tcp_connection *tc) { rdsdebug("restoring sock %p callbacks from tc %p\n", sock, tc); write_lock_bh(&sock->sk->sk_callback_lock); /* done under the callback_lock to serialize with write_space */ spin_lock(&rds_tcp_tc_list_lock); list_del_init(&tc->t_list_item); #if IS_ENABLED(CONFIG_IPV6) rds6_tcp_tc_count--; #endif if (!tc->t_cpath->cp_conn->c_isv6) rds_tcp_tc_count--; spin_unlock(&rds_tcp_tc_list_lock); tc->t_sock = NULL; sock->sk->sk_write_space = tc->t_orig_write_space; sock->sk->sk_data_ready = tc->t_orig_data_ready; sock->sk->sk_state_change = tc->t_orig_state_change; sock->sk->sk_user_data = NULL; write_unlock_bh(&sock->sk->sk_callback_lock); } /* * rds_tcp_reset_callbacks() switches the to the new sock and * returns the existing tc->t_sock. * * The only functions that set tc->t_sock are rds_tcp_set_callbacks * and rds_tcp_reset_callbacks. Send and receive trust that * it is set. The absence of RDS_CONN_UP bit protects those paths * from being called while it isn't set. */ void rds_tcp_reset_callbacks(struct socket *sock, struct rds_conn_path *cp) { struct rds_tcp_connection *tc = cp->cp_transport_data; struct socket *osock = tc->t_sock; if (!osock) goto newsock; /* Need to resolve a duelling SYN between peers. * We have an outstanding SYN to this peer, which may * potentially have transitioned to the RDS_CONN_UP state, * so we must quiesce any send threads before resetting * cp_transport_data. We quiesce these threads by setting * cp_state to something other than RDS_CONN_UP, and then * waiting for any existing threads in rds_send_xmit to * complete release_in_xmit(). (Subsequent threads entering * rds_send_xmit() will bail on !rds_conn_up(). * * However an incoming syn-ack at this point would end up * marking the conn as RDS_CONN_UP, and would again permit * rds_send_xmi() threads through, so ideally we would * synchronize on RDS_CONN_UP after lock_sock(), but cannot * do that: waiting on !RDS_IN_XMIT after lock_sock() may * end up deadlocking with tcp_sendmsg(), and the RDS_IN_XMIT * would not get set. As a result, we set c_state to * RDS_CONN_RESETTTING, to ensure that rds_tcp_state_change * cannot mark rds_conn_path_up() in the window before lock_sock() */ atomic_set(&cp->cp_state, RDS_CONN_RESETTING); wait_event(cp->cp_waitq, !test_bit(RDS_IN_XMIT, &cp->cp_flags)); lock_sock(osock->sk); /* reset receive side state for rds_tcp_data_recv() for osock */ cancel_delayed_work_sync(&cp->cp_send_w); cancel_delayed_work_sync(&cp->cp_recv_w); if (tc->t_tinc) { rds_inc_put(&tc->t_tinc->ti_inc); tc->t_tinc = NULL; } tc->t_tinc_hdr_rem = sizeof(struct rds_header); tc->t_tinc_data_rem = 0; rds_tcp_restore_callbacks(osock, tc); release_sock(osock->sk); sock_release(osock); newsock: rds_send_path_reset(cp); lock_sock(sock->sk); rds_tcp_set_callbacks(sock, cp); release_sock(sock->sk); } /* Add tc to rds_tcp_tc_list and set tc->t_sock. See comments * above rds_tcp_reset_callbacks for notes about synchronization * with data path */ void rds_tcp_set_callbacks(struct socket *sock, struct rds_conn_path *cp) { struct rds_tcp_connection *tc = cp->cp_transport_data; rdsdebug("setting sock %p callbacks to tc %p\n", sock, tc); write_lock_bh(&sock->sk->sk_callback_lock); /* done under the callback_lock to serialize with write_space */ spin_lock(&rds_tcp_tc_list_lock); list_add_tail(&tc->t_list_item, &rds_tcp_tc_list); #if IS_ENABLED(CONFIG_IPV6) rds6_tcp_tc_count++; #endif if (!tc->t_cpath->cp_conn->c_isv6) rds_tcp_tc_count++; spin_unlock(&rds_tcp_tc_list_lock); /* accepted sockets need our listen data ready undone */ if (sock->sk->sk_data_ready == rds_tcp_listen_data_ready) sock->sk->sk_data_ready = sock->sk->sk_user_data; tc->t_sock = sock; tc->t_cpath = cp; tc->t_orig_data_ready = sock->sk->sk_data_ready; tc->t_orig_write_space = sock->sk->sk_write_space; tc->t_orig_state_change = sock->sk->sk_state_change; sock->sk->sk_user_data = cp; sock->sk->sk_data_ready = rds_tcp_data_ready; sock->sk->sk_write_space = rds_tcp_write_space; sock->sk->sk_state_change = rds_tcp_state_change; write_unlock_bh(&sock->sk->sk_callback_lock); } /* Handle RDS_INFO_TCP_SOCKETS socket option. It only returns IPv4 * connections for backward compatibility. */ static void rds_tcp_tc_info(struct socket *rds_sock, unsigned int len, struct rds_info_iterator *iter, struct rds_info_lengths *lens) { struct rds_info_tcp_socket tsinfo; struct rds_tcp_connection *tc; unsigned long flags; spin_lock_irqsave(&rds_tcp_tc_list_lock, flags); if (len / sizeof(tsinfo) < rds_tcp_tc_count) goto out; list_for_each_entry(tc, &rds_tcp_tc_list, t_list_item) { struct inet_sock *inet = inet_sk(tc->t_sock->sk); if (tc->t_cpath->cp_conn->c_isv6) continue; tsinfo.local_addr = inet->inet_saddr; tsinfo.local_port = inet->inet_sport; tsinfo.peer_addr = inet->inet_daddr; tsinfo.peer_port = inet->inet_dport; tsinfo.hdr_rem = tc->t_tinc_hdr_rem; tsinfo.data_rem = tc->t_tinc_data_rem; tsinfo.last_sent_nxt = tc->t_last_sent_nxt; tsinfo.last_expected_una = tc->t_last_expected_una; tsinfo.last_seen_una = tc->t_last_seen_una; tsinfo.tos = tc->t_cpath->cp_conn->c_tos; rds_info_copy(iter, &tsinfo, sizeof(tsinfo)); } out: lens->nr = rds_tcp_tc_count; lens->each = sizeof(tsinfo); spin_unlock_irqrestore(&rds_tcp_tc_list_lock, flags); } #if IS_ENABLED(CONFIG_IPV6) /* Handle RDS6_INFO_TCP_SOCKETS socket option. It returns both IPv4 and * IPv6 connections. IPv4 connection address is returned in an IPv4 mapped * address. */ static void rds6_tcp_tc_info(struct socket *sock, unsigned int len, struct rds_info_iterator *iter, struct rds_info_lengths *lens) { struct rds6_info_tcp_socket tsinfo6; struct rds_tcp_connection *tc; unsigned long flags; spin_lock_irqsave(&rds_tcp_tc_list_lock, flags); if (len / sizeof(tsinfo6) < rds6_tcp_tc_count) goto out; list_for_each_entry(tc, &rds_tcp_tc_list, t_list_item) { struct sock *sk = tc->t_sock->sk; struct inet_sock *inet = inet_sk(sk); tsinfo6.local_addr = sk->sk_v6_rcv_saddr; tsinfo6.local_port = inet->inet_sport; tsinfo6.peer_addr = sk->sk_v6_daddr; tsinfo6.peer_port = inet->inet_dport; tsinfo6.hdr_rem = tc->t_tinc_hdr_rem; tsinfo6.data_rem = tc->t_tinc_data_rem; tsinfo6.last_sent_nxt = tc->t_last_sent_nxt; tsinfo6.last_expected_una = tc->t_last_expected_una; tsinfo6.last_seen_una = tc->t_last_seen_una; rds_info_copy(iter, &tsinfo6, sizeof(tsinfo6)); } out: lens->nr = rds6_tcp_tc_count; lens->each = sizeof(tsinfo6); spin_unlock_irqrestore(&rds_tcp_tc_list_lock, flags); } #endif static int rds_tcp_laddr_check(struct net *net, const struct in6_addr *addr, __u32 scope_id) { struct net_device *dev = NULL; #if IS_ENABLED(CONFIG_IPV6) int ret; #endif if (ipv6_addr_v4mapped(addr)) { if (inet_addr_type(net, addr->s6_addr32[3]) == RTN_LOCAL) return 0; return -EADDRNOTAVAIL; } /* If the scope_id is specified, check only those addresses * hosted on the specified interface. */ if (scope_id != 0) { rcu_read_lock(); dev = dev_get_by_index_rcu(net, scope_id); /* scope_id is not valid... */ if (!dev) { rcu_read_unlock(); return -EADDRNOTAVAIL; } rcu_read_unlock(); } #if IS_ENABLED(CONFIG_IPV6) ret = ipv6_chk_addr(net, addr, dev, 0); if (ret) return 0; #endif return -EADDRNOTAVAIL; } static void rds_tcp_conn_free(void *arg) { struct rds_tcp_connection *tc = arg; unsigned long flags; rdsdebug("freeing tc %p\n", tc); spin_lock_irqsave(&rds_tcp_conn_lock, flags); if (!tc->t_tcp_node_detached) list_del(&tc->t_tcp_node); spin_unlock_irqrestore(&rds_tcp_conn_lock, flags); kmem_cache_free(rds_tcp_conn_slab, tc); } static int rds_tcp_conn_alloc(struct rds_connection *conn, gfp_t gfp) { struct rds_tcp_connection *tc; int i, j; int ret = 0; for (i = 0; i < RDS_MPATH_WORKERS; i++) { tc = kmem_cache_alloc(rds_tcp_conn_slab, gfp); if (!tc) { ret = -ENOMEM; goto fail; } mutex_init(&tc->t_conn_path_lock); tc->t_sock = NULL; tc->t_tinc = NULL; tc->t_tinc_hdr_rem = sizeof(struct rds_header); tc->t_tinc_data_rem = 0; conn->c_path[i].cp_transport_data = tc; tc->t_cpath = &conn->c_path[i]; tc->t_tcp_node_detached = true; rdsdebug("rds_conn_path [%d] tc %p\n", i, conn->c_path[i].cp_transport_data); } spin_lock_irq(&rds_tcp_conn_lock); for (i = 0; i < RDS_MPATH_WORKERS; i++) { tc = conn->c_path[i].cp_transport_data; tc->t_tcp_node_detached = false; list_add_tail(&tc->t_tcp_node, &rds_tcp_conn_list); } spin_unlock_irq(&rds_tcp_conn_lock); fail: if (ret) { for (j = 0; j < i; j++) rds_tcp_conn_free(conn->c_path[j].cp_transport_data); } return ret; } static bool list_has_conn(struct list_head *list, struct rds_connection *conn) { struct rds_tcp_connection *tc, *_tc; list_for_each_entry_safe(tc, _tc, list, t_tcp_node) { if (tc->t_cpath->cp_conn == conn) return true; } return false; } static void rds_tcp_set_unloading(void) { atomic_set(&rds_tcp_unloading, 1); } static bool rds_tcp_is_unloading(struct rds_connection *conn) { return atomic_read(&rds_tcp_unloading) != 0; } static void rds_tcp_destroy_conns(void) { struct rds_tcp_connection *tc, *_tc; LIST_HEAD(tmp_list); /* avoid calling conn_destroy with irqs off */ spin_lock_irq(&rds_tcp_conn_lock); list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) { if (!list_has_conn(&tmp_list, tc->t_cpath->cp_conn)) list_move_tail(&tc->t_tcp_node, &tmp_list); } spin_unlock_irq(&rds_tcp_conn_lock); list_for_each_entry_safe(tc, _tc, &tmp_list, t_tcp_node) rds_conn_destroy(tc->t_cpath->cp_conn); } static void rds_tcp_exit(void); static u8 rds_tcp_get_tos_map(u8 tos) { /* all user tos mapped to default 0 for TCP transport */ return 0; } struct rds_transport rds_tcp_transport = { .laddr_check = rds_tcp_laddr_check, .xmit_path_prepare = rds_tcp_xmit_path_prepare, .xmit_path_complete = rds_tcp_xmit_path_complete, .xmit = rds_tcp_xmit, .recv_path = rds_tcp_recv_path, .conn_alloc = rds_tcp_conn_alloc, .conn_free = rds_tcp_conn_free, .conn_path_connect = rds_tcp_conn_path_connect, .conn_path_shutdown = rds_tcp_conn_path_shutdown, .inc_copy_to_user = rds_tcp_inc_copy_to_user, .inc_free = rds_tcp_inc_free, .stats_info_copy = rds_tcp_stats_info_copy, .exit = rds_tcp_exit, .get_tos_map = rds_tcp_get_tos_map, .t_owner = THIS_MODULE, .t_name = "tcp", .t_type = RDS_TRANS_TCP, .t_prefer_loopback = 1, .t_mp_capable = 1, .t_unloading = rds_tcp_is_unloading, }; static unsigned int rds_tcp_netid; /* per-network namespace private data for this module */ struct rds_tcp_net { struct socket *rds_tcp_listen_sock; struct work_struct rds_tcp_accept_w; struct ctl_table_header *rds_tcp_sysctl; struct ctl_table *ctl_table; int sndbuf_size; int rcvbuf_size; }; /* All module specific customizations to the RDS-TCP socket should be done in * rds_tcp_tune() and applied after socket creation. */ void rds_tcp_tune(struct socket *sock) { struct sock *sk = sock->sk; struct net *net = sock_net(sk); struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid); rds_tcp_nonagle(sock); lock_sock(sk); if (rtn->sndbuf_size > 0) { sk->sk_sndbuf = rtn->sndbuf_size; sk->sk_userlocks |= SOCK_SNDBUF_LOCK; } if (rtn->rcvbuf_size > 0) { sk->sk_sndbuf = rtn->rcvbuf_size; sk->sk_userlocks |= SOCK_RCVBUF_LOCK; } release_sock(sk); } static void rds_tcp_accept_worker(struct work_struct *work) { struct rds_tcp_net *rtn = container_of(work, struct rds_tcp_net, rds_tcp_accept_w); while (rds_tcp_accept_one(rtn->rds_tcp_listen_sock) == 0) cond_resched(); } void rds_tcp_accept_work(struct sock *sk) { struct net *net = sock_net(sk); struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid); queue_work(rds_wq, &rtn->rds_tcp_accept_w); } static __net_init int rds_tcp_init_net(struct net *net) { struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid); struct ctl_table *tbl; int err = 0; memset(rtn, 0, sizeof(*rtn)); /* {snd, rcv}buf_size default to 0, which implies we let the * stack pick the value, and permit auto-tuning of buffer size. */ if (net == &init_net) { tbl = rds_tcp_sysctl_table; } else { tbl = kmemdup(rds_tcp_sysctl_table, sizeof(rds_tcp_sysctl_table), GFP_KERNEL); if (!tbl) { pr_warn("could not set allocate syctl table\n"); return -ENOMEM; } rtn->ctl_table = tbl; } tbl[RDS_TCP_SNDBUF].data = &rtn->sndbuf_size; tbl[RDS_TCP_RCVBUF].data = &rtn->rcvbuf_size; rtn->rds_tcp_sysctl = register_net_sysctl(net, "net/rds/tcp", tbl); if (!rtn->rds_tcp_sysctl) { pr_warn("could not register sysctl\n"); err = -ENOMEM; goto fail; } #if IS_ENABLED(CONFIG_IPV6) rtn->rds_tcp_listen_sock = rds_tcp_listen_init(net, true); #else rtn->rds_tcp_listen_sock = rds_tcp_listen_init(net, false); #endif if (!rtn->rds_tcp_listen_sock) { pr_warn("could not set up IPv6 listen sock\n"); #if IS_ENABLED(CONFIG_IPV6) /* Try IPv4 as some systems disable IPv6 */ rtn->rds_tcp_listen_sock = rds_tcp_listen_init(net, false); if (!rtn->rds_tcp_listen_sock) { #endif unregister_net_sysctl_table(rtn->rds_tcp_sysctl); rtn->rds_tcp_sysctl = NULL; err = -EAFNOSUPPORT; goto fail; #if IS_ENABLED(CONFIG_IPV6) } #endif } INIT_WORK(&rtn->rds_tcp_accept_w, rds_tcp_accept_worker); return 0; fail: if (net != &init_net) kfree(tbl); return err; } static void rds_tcp_kill_sock(struct net *net) { struct rds_tcp_connection *tc, *_tc; LIST_HEAD(tmp_list); struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid); struct socket *lsock = rtn->rds_tcp_listen_sock; rtn->rds_tcp_listen_sock = NULL; rds_tcp_listen_stop(lsock, &rtn->rds_tcp_accept_w); spin_lock_irq(&rds_tcp_conn_lock); list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) { struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net); if (net != c_net) continue; if (!list_has_conn(&tmp_list, tc->t_cpath->cp_conn)) { list_move_tail(&tc->t_tcp_node, &tmp_list); } else { list_del(&tc->t_tcp_node); tc->t_tcp_node_detached = true; } } spin_unlock_irq(&rds_tcp_conn_lock); list_for_each_entry_safe(tc, _tc, &tmp_list, t_tcp_node) rds_conn_destroy(tc->t_cpath->cp_conn); } static void __net_exit rds_tcp_exit_net(struct net *net) { struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid); rds_tcp_kill_sock(net); if (rtn->rds_tcp_sysctl) unregister_net_sysctl_table(rtn->rds_tcp_sysctl); if (net != &init_net) kfree(rtn->ctl_table); } static struct pernet_operations rds_tcp_net_ops = { .init = rds_tcp_init_net, .exit = rds_tcp_exit_net, .id = &rds_tcp_netid, .size = sizeof(struct rds_tcp_net), }; void *rds_tcp_listen_sock_def_readable(struct net *net) { struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid); struct socket *lsock = rtn->rds_tcp_listen_sock; if (!lsock) return NULL; return lsock->sk->sk_user_data; } /* when sysctl is used to modify some kernel socket parameters,this * function resets the RDS connections in that netns so that we can * restart with new parameters. The assumption is that such reset * events are few and far-between. */ static void rds_tcp_sysctl_reset(struct net *net) { struct rds_tcp_connection *tc, *_tc; spin_lock_irq(&rds_tcp_conn_lock); list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) { struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net); if (net != c_net || !tc->t_sock) continue; /* reconnect with new parameters */ rds_conn_path_drop(tc->t_cpath, false); } spin_unlock_irq(&rds_tcp_conn_lock); } static int rds_tcp_skbuf_handler(struct ctl_table *ctl, int write, void __user *buffer, size_t *lenp, loff_t *fpos) { struct net *net = current->nsproxy->net_ns; int err; err = proc_dointvec_minmax(ctl, write, buffer, lenp, fpos); if (err < 0) { pr_warn("Invalid input. Must be >= %d\n", *(int *)(ctl->extra1)); return err; } if (write) rds_tcp_sysctl_reset(net); return 0; } static void rds_tcp_exit(void) { rds_tcp_set_unloading(); synchronize_rcu(); rds_info_deregister_func(RDS_INFO_TCP_SOCKETS, rds_tcp_tc_info); #if IS_ENABLED(CONFIG_IPV6) rds_info_deregister_func(RDS6_INFO_TCP_SOCKETS, rds6_tcp_tc_info); #endif unregister_pernet_device(&rds_tcp_net_ops); rds_tcp_destroy_conns(); rds_trans_unregister(&rds_tcp_transport); rds_tcp_recv_exit(); kmem_cache_destroy(rds_tcp_conn_slab); } module_exit(rds_tcp_exit); static int rds_tcp_init(void) { int ret; rds_tcp_conn_slab = kmem_cache_create("rds_tcp_connection", sizeof(struct rds_tcp_connection), 0, 0, NULL); if (!rds_tcp_conn_slab) { ret = -ENOMEM; goto out; } ret = rds_tcp_recv_init(); if (ret) goto out_slab; ret = register_pernet_device(&rds_tcp_net_ops); if (ret) goto out_recv; rds_trans_register(&rds_tcp_transport); rds_info_register_func(RDS_INFO_TCP_SOCKETS, rds_tcp_tc_info); #if IS_ENABLED(CONFIG_IPV6) rds_info_register_func(RDS6_INFO_TCP_SOCKETS, rds6_tcp_tc_info); #endif goto out; out_recv: rds_tcp_recv_exit(); out_slab: kmem_cache_destroy(rds_tcp_conn_slab); out: return ret; } module_init(rds_tcp_init); MODULE_AUTHOR("Oracle Corporation <rds-devel@oss.oracle.com>"); MODULE_DESCRIPTION("RDS: TCP transport"); MODULE_LICENSE("Dual BSD/GPL");
static void rds_tcp_kill_sock(struct net *net) { struct rds_tcp_connection *tc, *_tc; LIST_HEAD(tmp_list); struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid); struct socket *lsock = rtn->rds_tcp_listen_sock; rtn->rds_tcp_listen_sock = NULL; rds_tcp_listen_stop(lsock, &rtn->rds_tcp_accept_w); spin_lock_irq(&rds_tcp_conn_lock); list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) { struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net); if (net != c_net || !tc->t_sock) continue; if (!list_has_conn(&tmp_list, tc->t_cpath->cp_conn)) { list_move_tail(&tc->t_tcp_node, &tmp_list); } else { list_del(&tc->t_tcp_node); tc->t_tcp_node_detached = true; } } spin_unlock_irq(&rds_tcp_conn_lock); list_for_each_entry_safe(tc, _tc, &tmp_list, t_tcp_node) rds_conn_destroy(tc->t_cpath->cp_conn); }
static void rds_tcp_kill_sock(struct net *net) { struct rds_tcp_connection *tc, *_tc; LIST_HEAD(tmp_list); struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid); struct socket *lsock = rtn->rds_tcp_listen_sock; rtn->rds_tcp_listen_sock = NULL; rds_tcp_listen_stop(lsock, &rtn->rds_tcp_accept_w); spin_lock_irq(&rds_tcp_conn_lock); list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) { struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net); if (net != c_net) continue; if (!list_has_conn(&tmp_list, tc->t_cpath->cp_conn)) { list_move_tail(&tc->t_tcp_node, &tmp_list); } else { list_del(&tc->t_tcp_node); tc->t_tcp_node_detached = true; } } spin_unlock_irq(&rds_tcp_conn_lock); list_for_each_entry_safe(tc, _tc, &tmp_list, t_tcp_node) rds_conn_destroy(tc->t_cpath->cp_conn); }
{'added': [(611, '\t\tif (net != c_net)')], 'deleted': [(611, '\t\tif (net != c_net || !tc->t_sock)')]}
1
1
514
3,086
https://github.com/torvalds/linux
CVE-2019-11815
['CWE-416', 'CWE-362']
mempolicy.c
mpol_parse_str
// SPDX-License-Identifier: GPL-2.0-only /* * Simple NUMA memory policy for the Linux kernel. * * Copyright 2003,2004 Andi Kleen, SuSE Labs. * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc. * * NUMA policy allows the user to give hints in which node(s) memory should * be allocated. * * Support four policies per VMA and per process: * * The VMA policy has priority over the process policy for a page fault. * * interleave Allocate memory interleaved over a set of nodes, * with normal fallback if it fails. * For VMA based allocations this interleaves based on the * offset into the backing object or offset into the mapping * for anonymous memory. For process policy an process counter * is used. * * bind Only allocate memory on a specific set of nodes, * no fallback. * FIXME: memory is allocated starting with the first node * to the last. It would be better if bind would truly restrict * the allocation to memory nodes instead * * preferred Try a specific node first before normal fallback. * As a special case NUMA_NO_NODE here means do the allocation * on the local CPU. This is normally identical to default, * but useful to set in a VMA when you have a non default * process policy. * * default Allocate on the local node first, or when on a VMA * use the process policy. This is what Linux always did * in a NUMA aware kernel and still does by, ahem, default. * * The process policy is applied for most non interrupt memory allocations * in that process' context. Interrupts ignore the policies and always * try to allocate on the local CPU. The VMA policy is only applied for memory * allocations for a VMA in the VM. * * Currently there are a few corner cases in swapping where the policy * is not applied, but the majority should be handled. When process policy * is used it is not remembered over swap outs/swap ins. * * Only the highest zone in the zone hierarchy gets policied. Allocations * requesting a lower zone just use default policy. This implies that * on systems with highmem kernel lowmem allocation don't get policied. * Same with GFP_DMA allocations. * * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between * all users and remembered even when nobody has memory mapped. */ /* Notebook: fix mmap readahead to honour policy and enable policy for any page cache object statistics for bigpages global policy for page cache? currently it uses process policy. Requires first item above. handle mremap for shared memory (currently ignored for the policy) grows down? make bind policy root only? It can trigger oom much faster and the kernel is not always grateful with that. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/mempolicy.h> #include <linux/pagewalk.h> #include <linux/highmem.h> #include <linux/hugetlb.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/sched/mm.h> #include <linux/sched/numa_balancing.h> #include <linux/sched/task.h> #include <linux/nodemask.h> #include <linux/cpuset.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/export.h> #include <linux/nsproxy.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/compat.h> #include <linux/ptrace.h> #include <linux/swap.h> #include <linux/seq_file.h> #include <linux/proc_fs.h> #include <linux/migrate.h> #include <linux/ksm.h> #include <linux/rmap.h> #include <linux/security.h> #include <linux/syscalls.h> #include <linux/ctype.h> #include <linux/mm_inline.h> #include <linux/mmu_notifier.h> #include <linux/printk.h> #include <linux/swapops.h> #include <asm/tlbflush.h> #include <linux/uaccess.h> #include "internal.h" /* Internal flags */ #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */ #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */ static struct kmem_cache *policy_cache; static struct kmem_cache *sn_cache; /* Highest zone. An specific allocation for a zone below that is not policied. */ enum zone_type policy_zone = 0; /* * run-time system-wide default policy => local allocation */ static struct mempolicy default_policy = { .refcnt = ATOMIC_INIT(1), /* never free it */ .mode = MPOL_PREFERRED, .flags = MPOL_F_LOCAL, }; static struct mempolicy preferred_node_policy[MAX_NUMNODES]; struct mempolicy *get_task_policy(struct task_struct *p) { struct mempolicy *pol = p->mempolicy; int node; if (pol) return pol; node = numa_node_id(); if (node != NUMA_NO_NODE) { pol = &preferred_node_policy[node]; /* preferred_node_policy is not initialised early in boot */ if (pol->mode) return pol; } return &default_policy; } static const struct mempolicy_operations { int (*create)(struct mempolicy *pol, const nodemask_t *nodes); void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes); } mpol_ops[MPOL_MAX]; static inline int mpol_store_user_nodemask(const struct mempolicy *pol) { return pol->flags & MPOL_MODE_FLAGS; } static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig, const nodemask_t *rel) { nodemask_t tmp; nodes_fold(tmp, *orig, nodes_weight(*rel)); nodes_onto(*ret, tmp, *rel); } static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes) { if (nodes_empty(*nodes)) return -EINVAL; pol->v.nodes = *nodes; return 0; } static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes) { if (!nodes) pol->flags |= MPOL_F_LOCAL; /* local allocation */ else if (nodes_empty(*nodes)) return -EINVAL; /* no allowed nodes */ else pol->v.preferred_node = first_node(*nodes); return 0; } static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes) { if (nodes_empty(*nodes)) return -EINVAL; pol->v.nodes = *nodes; return 0; } /* * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if * any, for the new policy. mpol_new() has already validated the nodes * parameter with respect to the policy mode and flags. But, we need to * handle an empty nodemask with MPOL_PREFERRED here. * * Must be called holding task's alloc_lock to protect task's mems_allowed * and mempolicy. May also be called holding the mmap_semaphore for write. */ static int mpol_set_nodemask(struct mempolicy *pol, const nodemask_t *nodes, struct nodemask_scratch *nsc) { int ret; /* if mode is MPOL_DEFAULT, pol is NULL. This is right. */ if (pol == NULL) return 0; /* Check N_MEMORY */ nodes_and(nsc->mask1, cpuset_current_mems_allowed, node_states[N_MEMORY]); VM_BUG_ON(!nodes); if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes)) nodes = NULL; /* explicit local allocation */ else { if (pol->flags & MPOL_F_RELATIVE_NODES) mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1); else nodes_and(nsc->mask2, *nodes, nsc->mask1); if (mpol_store_user_nodemask(pol)) pol->w.user_nodemask = *nodes; else pol->w.cpuset_mems_allowed = cpuset_current_mems_allowed; } if (nodes) ret = mpol_ops[pol->mode].create(pol, &nsc->mask2); else ret = mpol_ops[pol->mode].create(pol, NULL); return ret; } /* * This function just creates a new policy, does some check and simple * initialization. You must invoke mpol_set_nodemask() to set nodes. */ static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags, nodemask_t *nodes) { struct mempolicy *policy; pr_debug("setting mode %d flags %d nodes[0] %lx\n", mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE); if (mode == MPOL_DEFAULT) { if (nodes && !nodes_empty(*nodes)) return ERR_PTR(-EINVAL); return NULL; } VM_BUG_ON(!nodes); /* * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation). * All other modes require a valid pointer to a non-empty nodemask. */ if (mode == MPOL_PREFERRED) { if (nodes_empty(*nodes)) { if (((flags & MPOL_F_STATIC_NODES) || (flags & MPOL_F_RELATIVE_NODES))) return ERR_PTR(-EINVAL); } } else if (mode == MPOL_LOCAL) { if (!nodes_empty(*nodes) || (flags & MPOL_F_STATIC_NODES) || (flags & MPOL_F_RELATIVE_NODES)) return ERR_PTR(-EINVAL); mode = MPOL_PREFERRED; } else if (nodes_empty(*nodes)) return ERR_PTR(-EINVAL); policy = kmem_cache_alloc(policy_cache, GFP_KERNEL); if (!policy) return ERR_PTR(-ENOMEM); atomic_set(&policy->refcnt, 1); policy->mode = mode; policy->flags = flags; return policy; } /* Slow path of a mpol destructor. */ void __mpol_put(struct mempolicy *p) { if (!atomic_dec_and_test(&p->refcnt)) return; kmem_cache_free(policy_cache, p); } static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes) { } static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes) { nodemask_t tmp; if (pol->flags & MPOL_F_STATIC_NODES) nodes_and(tmp, pol->w.user_nodemask, *nodes); else if (pol->flags & MPOL_F_RELATIVE_NODES) mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes); else { nodes_remap(tmp, pol->v.nodes,pol->w.cpuset_mems_allowed, *nodes); pol->w.cpuset_mems_allowed = *nodes; } if (nodes_empty(tmp)) tmp = *nodes; pol->v.nodes = tmp; } static void mpol_rebind_preferred(struct mempolicy *pol, const nodemask_t *nodes) { nodemask_t tmp; if (pol->flags & MPOL_F_STATIC_NODES) { int node = first_node(pol->w.user_nodemask); if (node_isset(node, *nodes)) { pol->v.preferred_node = node; pol->flags &= ~MPOL_F_LOCAL; } else pol->flags |= MPOL_F_LOCAL; } else if (pol->flags & MPOL_F_RELATIVE_NODES) { mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes); pol->v.preferred_node = first_node(tmp); } else if (!(pol->flags & MPOL_F_LOCAL)) { pol->v.preferred_node = node_remap(pol->v.preferred_node, pol->w.cpuset_mems_allowed, *nodes); pol->w.cpuset_mems_allowed = *nodes; } } /* * mpol_rebind_policy - Migrate a policy to a different set of nodes * * Per-vma policies are protected by mmap_sem. Allocations using per-task * policies are protected by task->mems_allowed_seq to prevent a premature * OOM/allocation failure due to parallel nodemask modification. */ static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask) { if (!pol) return; if (!mpol_store_user_nodemask(pol) && !(pol->flags & MPOL_F_LOCAL) && nodes_equal(pol->w.cpuset_mems_allowed, *newmask)) return; mpol_ops[pol->mode].rebind(pol, newmask); } /* * Wrapper for mpol_rebind_policy() that just requires task * pointer, and updates task mempolicy. * * Called with task's alloc_lock held. */ void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new) { mpol_rebind_policy(tsk->mempolicy, new); } /* * Rebind each vma in mm to new nodemask. * * Call holding a reference to mm. Takes mm->mmap_sem during call. */ void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new) { struct vm_area_struct *vma; down_write(&mm->mmap_sem); for (vma = mm->mmap; vma; vma = vma->vm_next) mpol_rebind_policy(vma->vm_policy, new); up_write(&mm->mmap_sem); } static const struct mempolicy_operations mpol_ops[MPOL_MAX] = { [MPOL_DEFAULT] = { .rebind = mpol_rebind_default, }, [MPOL_INTERLEAVE] = { .create = mpol_new_interleave, .rebind = mpol_rebind_nodemask, }, [MPOL_PREFERRED] = { .create = mpol_new_preferred, .rebind = mpol_rebind_preferred, }, [MPOL_BIND] = { .create = mpol_new_bind, .rebind = mpol_rebind_nodemask, }, }; static int migrate_page_add(struct page *page, struct list_head *pagelist, unsigned long flags); struct queue_pages { struct list_head *pagelist; unsigned long flags; nodemask_t *nmask; unsigned long start; unsigned long end; struct vm_area_struct *first; }; /* * Check if the page's nid is in qp->nmask. * * If MPOL_MF_INVERT is set in qp->flags, check if the nid is * in the invert of qp->nmask. */ static inline bool queue_pages_required(struct page *page, struct queue_pages *qp) { int nid = page_to_nid(page); unsigned long flags = qp->flags; return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT); } /* * queue_pages_pmd() has four possible return values: * 0 - pages are placed on the right node or queued successfully. * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were * specified. * 2 - THP was split. * -EIO - is migration entry or only MPOL_MF_STRICT was specified and an * existing page was already on a node that does not follow the * policy. */ static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr, unsigned long end, struct mm_walk *walk) { int ret = 0; struct page *page; struct queue_pages *qp = walk->private; unsigned long flags; if (unlikely(is_pmd_migration_entry(*pmd))) { ret = -EIO; goto unlock; } page = pmd_page(*pmd); if (is_huge_zero_page(page)) { spin_unlock(ptl); __split_huge_pmd(walk->vma, pmd, addr, false, NULL); ret = 2; goto out; } if (!queue_pages_required(page, qp)) goto unlock; flags = qp->flags; /* go to thp migration */ if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { if (!vma_migratable(walk->vma) || migrate_page_add(page, qp->pagelist, flags)) { ret = 1; goto unlock; } } else ret = -EIO; unlock: spin_unlock(ptl); out: return ret; } /* * Scan through pages checking if pages follow certain conditions, * and move them to the pagelist if they do. * * queue_pages_pte_range() has three possible return values: * 0 - pages are placed on the right node or queued successfully. * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were * specified. * -EIO - only MPOL_MF_STRICT was specified and an existing page was already * on a node that does not follow the policy. */ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, struct mm_walk *walk) { struct vm_area_struct *vma = walk->vma; struct page *page; struct queue_pages *qp = walk->private; unsigned long flags = qp->flags; int ret; bool has_unmovable = false; pte_t *pte; spinlock_t *ptl; ptl = pmd_trans_huge_lock(pmd, vma); if (ptl) { ret = queue_pages_pmd(pmd, ptl, addr, end, walk); if (ret != 2) return ret; } /* THP was split, fall through to pte walk */ if (pmd_trans_unstable(pmd)) return 0; pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); for (; addr != end; pte++, addr += PAGE_SIZE) { if (!pte_present(*pte)) continue; page = vm_normal_page(vma, addr, *pte); if (!page) continue; /* * vm_normal_page() filters out zero pages, but there might * still be PageReserved pages to skip, perhaps in a VDSO. */ if (PageReserved(page)) continue; if (!queue_pages_required(page, qp)) continue; if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { /* MPOL_MF_STRICT must be specified if we get here */ if (!vma_migratable(vma)) { has_unmovable = true; break; } /* * Do not abort immediately since there may be * temporary off LRU pages in the range. Still * need migrate other LRU pages. */ if (migrate_page_add(page, qp->pagelist, flags)) has_unmovable = true; } else break; } pte_unmap_unlock(pte - 1, ptl); cond_resched(); if (has_unmovable) return 1; return addr != end ? -EIO : 0; } static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask, unsigned long addr, unsigned long end, struct mm_walk *walk) { int ret = 0; #ifdef CONFIG_HUGETLB_PAGE struct queue_pages *qp = walk->private; unsigned long flags = (qp->flags & MPOL_MF_VALID); struct page *page; spinlock_t *ptl; pte_t entry; ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte); entry = huge_ptep_get(pte); if (!pte_present(entry)) goto unlock; page = pte_page(entry); if (!queue_pages_required(page, qp)) goto unlock; if (flags == MPOL_MF_STRICT) { /* * STRICT alone means only detecting misplaced page and no * need to further check other vma. */ ret = -EIO; goto unlock; } if (!vma_migratable(walk->vma)) { /* * Must be STRICT with MOVE*, otherwise .test_walk() have * stopped walking current vma. * Detecting misplaced page but allow migrating pages which * have been queued. */ ret = 1; goto unlock; } /* With MPOL_MF_MOVE, we migrate only unshared hugepage. */ if (flags & (MPOL_MF_MOVE_ALL) || (flags & MPOL_MF_MOVE && page_mapcount(page) == 1)) { if (!isolate_huge_page(page, qp->pagelist) && (flags & MPOL_MF_STRICT)) /* * Failed to isolate page but allow migrating pages * which have been queued. */ ret = 1; } unlock: spin_unlock(ptl); #else BUG(); #endif return ret; } #ifdef CONFIG_NUMA_BALANCING /* * This is used to mark a range of virtual addresses to be inaccessible. * These are later cleared by a NUMA hinting fault. Depending on these * faults, pages may be migrated for better NUMA placement. * * This is assuming that NUMA faults are handled using PROT_NONE. If * an architecture makes a different choice, it will need further * changes to the core. */ unsigned long change_prot_numa(struct vm_area_struct *vma, unsigned long addr, unsigned long end) { int nr_updated; nr_updated = change_protection(vma, addr, end, PAGE_NONE, 0, 1); if (nr_updated) count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated); return nr_updated; } #else static unsigned long change_prot_numa(struct vm_area_struct *vma, unsigned long addr, unsigned long end) { return 0; } #endif /* CONFIG_NUMA_BALANCING */ static int queue_pages_test_walk(unsigned long start, unsigned long end, struct mm_walk *walk) { struct vm_area_struct *vma = walk->vma; struct queue_pages *qp = walk->private; unsigned long endvma = vma->vm_end; unsigned long flags = qp->flags; /* range check first */ VM_BUG_ON_VMA((vma->vm_start > start) || (vma->vm_end < end), vma); if (!qp->first) { qp->first = vma; if (!(flags & MPOL_MF_DISCONTIG_OK) && (qp->start < vma->vm_start)) /* hole at head side of range */ return -EFAULT; } if (!(flags & MPOL_MF_DISCONTIG_OK) && ((vma->vm_end < qp->end) && (!vma->vm_next || vma->vm_end < vma->vm_next->vm_start))) /* hole at middle or tail of range */ return -EFAULT; /* * Need check MPOL_MF_STRICT to return -EIO if possible * regardless of vma_migratable */ if (!vma_migratable(vma) && !(flags & MPOL_MF_STRICT)) return 1; if (endvma > end) endvma = end; if (flags & MPOL_MF_LAZY) { /* Similar to task_numa_work, skip inaccessible VMAs */ if (!is_vm_hugetlb_page(vma) && (vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)) && !(vma->vm_flags & VM_MIXEDMAP)) change_prot_numa(vma, start, endvma); return 1; } /* queue pages from current vma */ if (flags & MPOL_MF_VALID) return 0; return 1; } static const struct mm_walk_ops queue_pages_walk_ops = { .hugetlb_entry = queue_pages_hugetlb, .pmd_entry = queue_pages_pte_range, .test_walk = queue_pages_test_walk, }; /* * Walk through page tables and collect pages to be migrated. * * If pages found in a given range are on a set of nodes (determined by * @nodes and @flags,) it's isolated and queued to the pagelist which is * passed via @private. * * queue_pages_range() has three possible return values: * 1 - there is unmovable page, but MPOL_MF_MOVE* & MPOL_MF_STRICT were * specified. * 0 - queue pages successfully or no misplaced page. * errno - i.e. misplaced pages with MPOL_MF_STRICT specified (-EIO) or * memory range specified by nodemask and maxnode points outside * your accessible address space (-EFAULT) */ static int queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end, nodemask_t *nodes, unsigned long flags, struct list_head *pagelist) { int err; struct queue_pages qp = { .pagelist = pagelist, .flags = flags, .nmask = nodes, .start = start, .end = end, .first = NULL, }; err = walk_page_range(mm, start, end, &queue_pages_walk_ops, &qp); if (!qp.first) /* whole range in hole */ err = -EFAULT; return err; } /* * Apply policy to a single VMA * This must be called with the mmap_sem held for writing. */ static int vma_replace_policy(struct vm_area_struct *vma, struct mempolicy *pol) { int err; struct mempolicy *old; struct mempolicy *new; pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n", vma->vm_start, vma->vm_end, vma->vm_pgoff, vma->vm_ops, vma->vm_file, vma->vm_ops ? vma->vm_ops->set_policy : NULL); new = mpol_dup(pol); if (IS_ERR(new)) return PTR_ERR(new); if (vma->vm_ops && vma->vm_ops->set_policy) { err = vma->vm_ops->set_policy(vma, new); if (err) goto err_out; } old = vma->vm_policy; vma->vm_policy = new; /* protected by mmap_sem */ mpol_put(old); return 0; err_out: mpol_put(new); return err; } /* Step 2: apply policy to a range and do splits. */ static int mbind_range(struct mm_struct *mm, unsigned long start, unsigned long end, struct mempolicy *new_pol) { struct vm_area_struct *next; struct vm_area_struct *prev; struct vm_area_struct *vma; int err = 0; pgoff_t pgoff; unsigned long vmstart; unsigned long vmend; vma = find_vma(mm, start); VM_BUG_ON(!vma); prev = vma->vm_prev; if (start > vma->vm_start) prev = vma; for (; vma && vma->vm_start < end; prev = vma, vma = next) { next = vma->vm_next; vmstart = max(start, vma->vm_start); vmend = min(end, vma->vm_end); if (mpol_equal(vma_policy(vma), new_pol)) continue; pgoff = vma->vm_pgoff + ((vmstart - vma->vm_start) >> PAGE_SHIFT); prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags, vma->anon_vma, vma->vm_file, pgoff, new_pol, vma->vm_userfaultfd_ctx); if (prev) { vma = prev; next = vma->vm_next; if (mpol_equal(vma_policy(vma), new_pol)) continue; /* vma_merge() joined vma && vma->next, case 8 */ goto replace; } if (vma->vm_start != vmstart) { err = split_vma(vma->vm_mm, vma, vmstart, 1); if (err) goto out; } if (vma->vm_end != vmend) { err = split_vma(vma->vm_mm, vma, vmend, 0); if (err) goto out; } replace: err = vma_replace_policy(vma, new_pol); if (err) goto out; } out: return err; } /* Set the process memory policy */ static long do_set_mempolicy(unsigned short mode, unsigned short flags, nodemask_t *nodes) { struct mempolicy *new, *old; NODEMASK_SCRATCH(scratch); int ret; if (!scratch) return -ENOMEM; new = mpol_new(mode, flags, nodes); if (IS_ERR(new)) { ret = PTR_ERR(new); goto out; } task_lock(current); ret = mpol_set_nodemask(new, nodes, scratch); if (ret) { task_unlock(current); mpol_put(new); goto out; } old = current->mempolicy; current->mempolicy = new; if (new && new->mode == MPOL_INTERLEAVE) current->il_prev = MAX_NUMNODES-1; task_unlock(current); mpol_put(old); ret = 0; out: NODEMASK_SCRATCH_FREE(scratch); return ret; } /* * Return nodemask for policy for get_mempolicy() query * * Called with task's alloc_lock held */ static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes) { nodes_clear(*nodes); if (p == &default_policy) return; switch (p->mode) { case MPOL_BIND: /* Fall through */ case MPOL_INTERLEAVE: *nodes = p->v.nodes; break; case MPOL_PREFERRED: if (!(p->flags & MPOL_F_LOCAL)) node_set(p->v.preferred_node, *nodes); /* else return empty node mask for local allocation */ break; default: BUG(); } } static int lookup_node(struct mm_struct *mm, unsigned long addr) { struct page *p; int err; int locked = 1; err = get_user_pages_locked(addr & PAGE_MASK, 1, 0, &p, &locked); if (err >= 0) { err = page_to_nid(p); put_page(p); } if (locked) up_read(&mm->mmap_sem); return err; } /* Retrieve NUMA policy */ static long do_get_mempolicy(int *policy, nodemask_t *nmask, unsigned long addr, unsigned long flags) { int err; struct mm_struct *mm = current->mm; struct vm_area_struct *vma = NULL; struct mempolicy *pol = current->mempolicy, *pol_refcount = NULL; if (flags & ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED)) return -EINVAL; if (flags & MPOL_F_MEMS_ALLOWED) { if (flags & (MPOL_F_NODE|MPOL_F_ADDR)) return -EINVAL; *policy = 0; /* just so it's initialized */ task_lock(current); *nmask = cpuset_current_mems_allowed; task_unlock(current); return 0; } if (flags & MPOL_F_ADDR) { /* * Do NOT fall back to task policy if the * vma/shared policy at addr is NULL. We * want to return MPOL_DEFAULT in this case. */ down_read(&mm->mmap_sem); vma = find_vma_intersection(mm, addr, addr+1); if (!vma) { up_read(&mm->mmap_sem); return -EFAULT; } if (vma->vm_ops && vma->vm_ops->get_policy) pol = vma->vm_ops->get_policy(vma, addr); else pol = vma->vm_policy; } else if (addr) return -EINVAL; if (!pol) pol = &default_policy; /* indicates default behavior */ if (flags & MPOL_F_NODE) { if (flags & MPOL_F_ADDR) { /* * Take a refcount on the mpol, lookup_node() * wil drop the mmap_sem, so after calling * lookup_node() only "pol" remains valid, "vma" * is stale. */ pol_refcount = pol; vma = NULL; mpol_get(pol); err = lookup_node(mm, addr); if (err < 0) goto out; *policy = err; } else if (pol == current->mempolicy && pol->mode == MPOL_INTERLEAVE) { *policy = next_node_in(current->il_prev, pol->v.nodes); } else { err = -EINVAL; goto out; } } else { *policy = pol == &default_policy ? MPOL_DEFAULT : pol->mode; /* * Internal mempolicy flags must be masked off before exposing * the policy to userspace. */ *policy |= (pol->flags & MPOL_MODE_FLAGS); } err = 0; if (nmask) { if (mpol_store_user_nodemask(pol)) { *nmask = pol->w.user_nodemask; } else { task_lock(current); get_policy_nodemask(pol, nmask); task_unlock(current); } } out: mpol_cond_put(pol); if (vma) up_read(&mm->mmap_sem); if (pol_refcount) mpol_put(pol_refcount); return err; } #ifdef CONFIG_MIGRATION /* * page migration, thp tail pages can be passed. */ static int migrate_page_add(struct page *page, struct list_head *pagelist, unsigned long flags) { struct page *head = compound_head(page); /* * Avoid migrating a page that is shared with others. */ if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(head) == 1) { if (!isolate_lru_page(head)) { list_add_tail(&head->lru, pagelist); mod_node_page_state(page_pgdat(head), NR_ISOLATED_ANON + page_is_file_cache(head), hpage_nr_pages(head)); } else if (flags & MPOL_MF_STRICT) { /* * Non-movable page may reach here. And, there may be * temporary off LRU pages or non-LRU movable pages. * Treat them as unmovable pages since they can't be * isolated, so they can't be moved at the moment. It * should return -EIO for this case too. */ return -EIO; } } return 0; } /* page allocation callback for NUMA node migration */ struct page *alloc_new_node_page(struct page *page, unsigned long node) { if (PageHuge(page)) return alloc_huge_page_node(page_hstate(compound_head(page)), node); else if (PageTransHuge(page)) { struct page *thp; thp = alloc_pages_node(node, (GFP_TRANSHUGE | __GFP_THISNODE), HPAGE_PMD_ORDER); if (!thp) return NULL; prep_transhuge_page(thp); return thp; } else return __alloc_pages_node(node, GFP_HIGHUSER_MOVABLE | __GFP_THISNODE, 0); } /* * Migrate pages from one node to a target node. * Returns error or the number of pages not migrated. */ static int migrate_to_node(struct mm_struct *mm, int source, int dest, int flags) { nodemask_t nmask; LIST_HEAD(pagelist); int err = 0; nodes_clear(nmask); node_set(source, nmask); /* * This does not "check" the range but isolates all pages that * need migration. Between passing in the full user address * space range and MPOL_MF_DISCONTIG_OK, this call can not fail. */ VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))); queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask, flags | MPOL_MF_DISCONTIG_OK, &pagelist); if (!list_empty(&pagelist)) { err = migrate_pages(&pagelist, alloc_new_node_page, NULL, dest, MIGRATE_SYNC, MR_SYSCALL); if (err) putback_movable_pages(&pagelist); } return err; } /* * Move pages between the two nodesets so as to preserve the physical * layout as much as possible. * * Returns the number of page that could not be moved. */ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, const nodemask_t *to, int flags) { int busy = 0; int err; nodemask_t tmp; err = migrate_prep(); if (err) return err; down_read(&mm->mmap_sem); /* * Find a 'source' bit set in 'tmp' whose corresponding 'dest' * bit in 'to' is not also set in 'tmp'. Clear the found 'source' * bit in 'tmp', and return that <source, dest> pair for migration. * The pair of nodemasks 'to' and 'from' define the map. * * If no pair of bits is found that way, fallback to picking some * pair of 'source' and 'dest' bits that are not the same. If the * 'source' and 'dest' bits are the same, this represents a node * that will be migrating to itself, so no pages need move. * * If no bits are left in 'tmp', or if all remaining bits left * in 'tmp' correspond to the same bit in 'to', return false * (nothing left to migrate). * * This lets us pick a pair of nodes to migrate between, such that * if possible the dest node is not already occupied by some other * source node, minimizing the risk of overloading the memory on a * node that would happen if we migrated incoming memory to a node * before migrating outgoing memory source that same node. * * A single scan of tmp is sufficient. As we go, we remember the * most recent <s, d> pair that moved (s != d). If we find a pair * that not only moved, but what's better, moved to an empty slot * (d is not set in tmp), then we break out then, with that pair. * Otherwise when we finish scanning from_tmp, we at least have the * most recent <s, d> pair that moved. If we get all the way through * the scan of tmp without finding any node that moved, much less * moved to an empty node, then there is nothing left worth migrating. */ tmp = *from; while (!nodes_empty(tmp)) { int s,d; int source = NUMA_NO_NODE; int dest = 0; for_each_node_mask(s, tmp) { /* * do_migrate_pages() tries to maintain the relative * node relationship of the pages established between * threads and memory areas. * * However if the number of source nodes is not equal to * the number of destination nodes we can not preserve * this node relative relationship. In that case, skip * copying memory from a node that is in the destination * mask. * * Example: [2,3,4] -> [3,4,5] moves everything. * [0-7] - > [3,4,5] moves only 0,1,2,6,7. */ if ((nodes_weight(*from) != nodes_weight(*to)) && (node_isset(s, *to))) continue; d = node_remap(s, *from, *to); if (s == d) continue; source = s; /* Node moved. Memorize */ dest = d; /* dest not in remaining from nodes? */ if (!node_isset(dest, tmp)) break; } if (source == NUMA_NO_NODE) break; node_clear(source, tmp); err = migrate_to_node(mm, source, dest, flags); if (err > 0) busy += err; if (err < 0) break; } up_read(&mm->mmap_sem); if (err < 0) return err; return busy; } /* * Allocate a new page for page migration based on vma policy. * Start by assuming the page is mapped by the same vma as contains @start. * Search forward from there, if not. N.B., this assumes that the * list of pages handed to migrate_pages()--which is how we get here-- * is in virtual address order. */ static struct page *new_page(struct page *page, unsigned long start) { struct vm_area_struct *vma; unsigned long uninitialized_var(address); vma = find_vma(current->mm, start); while (vma) { address = page_address_in_vma(page, vma); if (address != -EFAULT) break; vma = vma->vm_next; } if (PageHuge(page)) { return alloc_huge_page_vma(page_hstate(compound_head(page)), vma, address); } else if (PageTransHuge(page)) { struct page *thp; thp = alloc_hugepage_vma(GFP_TRANSHUGE, vma, address, HPAGE_PMD_ORDER); if (!thp) return NULL; prep_transhuge_page(thp); return thp; } /* * if !vma, alloc_page_vma() will use task or system default policy */ return alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL, vma, address); } #else static int migrate_page_add(struct page *page, struct list_head *pagelist, unsigned long flags) { return -EIO; } int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, const nodemask_t *to, int flags) { return -ENOSYS; } static struct page *new_page(struct page *page, unsigned long start) { return NULL; } #endif static long do_mbind(unsigned long start, unsigned long len, unsigned short mode, unsigned short mode_flags, nodemask_t *nmask, unsigned long flags) { struct mm_struct *mm = current->mm; struct mempolicy *new; unsigned long end; int err; int ret; LIST_HEAD(pagelist); if (flags & ~(unsigned long)MPOL_MF_VALID) return -EINVAL; if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE)) return -EPERM; if (start & ~PAGE_MASK) return -EINVAL; if (mode == MPOL_DEFAULT) flags &= ~MPOL_MF_STRICT; len = (len + PAGE_SIZE - 1) & PAGE_MASK; end = start + len; if (end < start) return -EINVAL; if (end == start) return 0; new = mpol_new(mode, mode_flags, nmask); if (IS_ERR(new)) return PTR_ERR(new); if (flags & MPOL_MF_LAZY) new->flags |= MPOL_F_MOF; /* * If we are using the default policy then operation * on discontinuous address spaces is okay after all */ if (!new) flags |= MPOL_MF_DISCONTIG_OK; pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n", start, start + len, mode, mode_flags, nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE); if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { err = migrate_prep(); if (err) goto mpol_out; } { NODEMASK_SCRATCH(scratch); if (scratch) { down_write(&mm->mmap_sem); task_lock(current); err = mpol_set_nodemask(new, nmask, scratch); task_unlock(current); if (err) up_write(&mm->mmap_sem); } else err = -ENOMEM; NODEMASK_SCRATCH_FREE(scratch); } if (err) goto mpol_out; ret = queue_pages_range(mm, start, end, nmask, flags | MPOL_MF_INVERT, &pagelist); if (ret < 0) { err = ret; goto up_out; } err = mbind_range(mm, start, end, new); if (!err) { int nr_failed = 0; if (!list_empty(&pagelist)) { WARN_ON_ONCE(flags & MPOL_MF_LAZY); nr_failed = migrate_pages(&pagelist, new_page, NULL, start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND); if (nr_failed) putback_movable_pages(&pagelist); } if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT))) err = -EIO; } else { up_out: if (!list_empty(&pagelist)) putback_movable_pages(&pagelist); } up_write(&mm->mmap_sem); mpol_out: mpol_put(new); return err; } /* * User space interface with variable sized bitmaps for nodelists. */ /* Copy a node mask from user space. */ static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask, unsigned long maxnode) { unsigned long k; unsigned long t; unsigned long nlongs; unsigned long endmask; --maxnode; nodes_clear(*nodes); if (maxnode == 0 || !nmask) return 0; if (maxnode > PAGE_SIZE*BITS_PER_BYTE) return -EINVAL; nlongs = BITS_TO_LONGS(maxnode); if ((maxnode % BITS_PER_LONG) == 0) endmask = ~0UL; else endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1; /* * When the user specified more nodes than supported just check * if the non supported part is all zero. * * If maxnode have more longs than MAX_NUMNODES, check * the bits in that area first. And then go through to * check the rest bits which equal or bigger than MAX_NUMNODES. * Otherwise, just check bits [MAX_NUMNODES, maxnode). */ if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) { for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) { if (get_user(t, nmask + k)) return -EFAULT; if (k == nlongs - 1) { if (t & endmask) return -EINVAL; } else if (t) return -EINVAL; } nlongs = BITS_TO_LONGS(MAX_NUMNODES); endmask = ~0UL; } if (maxnode > MAX_NUMNODES && MAX_NUMNODES % BITS_PER_LONG != 0) { unsigned long valid_mask = endmask; valid_mask &= ~((1UL << (MAX_NUMNODES % BITS_PER_LONG)) - 1); if (get_user(t, nmask + nlongs - 1)) return -EFAULT; if (t & valid_mask) return -EINVAL; } if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long))) return -EFAULT; nodes_addr(*nodes)[nlongs-1] &= endmask; return 0; } /* Copy a kernel node mask to user space */ static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode, nodemask_t *nodes) { unsigned long copy = ALIGN(maxnode-1, 64) / 8; unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long); if (copy > nbytes) { if (copy > PAGE_SIZE) return -EINVAL; if (clear_user((char __user *)mask + nbytes, copy - nbytes)) return -EFAULT; copy = nbytes; } return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0; } static long kernel_mbind(unsigned long start, unsigned long len, unsigned long mode, const unsigned long __user *nmask, unsigned long maxnode, unsigned int flags) { nodemask_t nodes; int err; unsigned short mode_flags; start = untagged_addr(start); mode_flags = mode & MPOL_MODE_FLAGS; mode &= ~MPOL_MODE_FLAGS; if (mode >= MPOL_MAX) return -EINVAL; if ((mode_flags & MPOL_F_STATIC_NODES) && (mode_flags & MPOL_F_RELATIVE_NODES)) return -EINVAL; err = get_nodes(&nodes, nmask, maxnode); if (err) return err; return do_mbind(start, len, mode, mode_flags, &nodes, flags); } SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len, unsigned long, mode, const unsigned long __user *, nmask, unsigned long, maxnode, unsigned int, flags) { return kernel_mbind(start, len, mode, nmask, maxnode, flags); } /* Set the process memory policy */ static long kernel_set_mempolicy(int mode, const unsigned long __user *nmask, unsigned long maxnode) { int err; nodemask_t nodes; unsigned short flags; flags = mode & MPOL_MODE_FLAGS; mode &= ~MPOL_MODE_FLAGS; if ((unsigned int)mode >= MPOL_MAX) return -EINVAL; if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES)) return -EINVAL; err = get_nodes(&nodes, nmask, maxnode); if (err) return err; return do_set_mempolicy(mode, flags, &nodes); } SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask, unsigned long, maxnode) { return kernel_set_mempolicy(mode, nmask, maxnode); } static int kernel_migrate_pages(pid_t pid, unsigned long maxnode, const unsigned long __user *old_nodes, const unsigned long __user *new_nodes) { struct mm_struct *mm = NULL; struct task_struct *task; nodemask_t task_nodes; int err; nodemask_t *old; nodemask_t *new; NODEMASK_SCRATCH(scratch); if (!scratch) return -ENOMEM; old = &scratch->mask1; new = &scratch->mask2; err = get_nodes(old, old_nodes, maxnode); if (err) goto out; err = get_nodes(new, new_nodes, maxnode); if (err) goto out; /* Find the mm_struct */ rcu_read_lock(); task = pid ? find_task_by_vpid(pid) : current; if (!task) { rcu_read_unlock(); err = -ESRCH; goto out; } get_task_struct(task); err = -EINVAL; /* * Check if this process has the right to modify the specified process. * Use the regular "ptrace_may_access()" checks. */ if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) { rcu_read_unlock(); err = -EPERM; goto out_put; } rcu_read_unlock(); task_nodes = cpuset_mems_allowed(task); /* Is the user allowed to access the target nodes? */ if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) { err = -EPERM; goto out_put; } task_nodes = cpuset_mems_allowed(current); nodes_and(*new, *new, task_nodes); if (nodes_empty(*new)) goto out_put; err = security_task_movememory(task); if (err) goto out_put; mm = get_task_mm(task); put_task_struct(task); if (!mm) { err = -EINVAL; goto out; } err = do_migrate_pages(mm, old, new, capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE); mmput(mm); out: NODEMASK_SCRATCH_FREE(scratch); return err; out_put: put_task_struct(task); goto out; } SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode, const unsigned long __user *, old_nodes, const unsigned long __user *, new_nodes) { return kernel_migrate_pages(pid, maxnode, old_nodes, new_nodes); } /* Retrieve NUMA policy */ static int kernel_get_mempolicy(int __user *policy, unsigned long __user *nmask, unsigned long maxnode, unsigned long addr, unsigned long flags) { int err; int uninitialized_var(pval); nodemask_t nodes; addr = untagged_addr(addr); if (nmask != NULL && maxnode < nr_node_ids) return -EINVAL; err = do_get_mempolicy(&pval, &nodes, addr, flags); if (err) return err; if (policy && put_user(pval, policy)) return -EFAULT; if (nmask) err = copy_nodes_to_user(nmask, maxnode, &nodes); return err; } SYSCALL_DEFINE5(get_mempolicy, int __user *, policy, unsigned long __user *, nmask, unsigned long, maxnode, unsigned long, addr, unsigned long, flags) { return kernel_get_mempolicy(policy, nmask, maxnode, addr, flags); } #ifdef CONFIG_COMPAT COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy, compat_ulong_t __user *, nmask, compat_ulong_t, maxnode, compat_ulong_t, addr, compat_ulong_t, flags) { long err; unsigned long __user *nm = NULL; unsigned long nr_bits, alloc_size; DECLARE_BITMAP(bm, MAX_NUMNODES); nr_bits = min_t(unsigned long, maxnode-1, nr_node_ids); alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; if (nmask) nm = compat_alloc_user_space(alloc_size); err = kernel_get_mempolicy(policy, nm, nr_bits+1, addr, flags); if (!err && nmask) { unsigned long copy_size; copy_size = min_t(unsigned long, sizeof(bm), alloc_size); err = copy_from_user(bm, nm, copy_size); /* ensure entire bitmap is zeroed */ err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8); err |= compat_put_bitmap(nmask, bm, nr_bits); } return err; } COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask, compat_ulong_t, maxnode) { unsigned long __user *nm = NULL; unsigned long nr_bits, alloc_size; DECLARE_BITMAP(bm, MAX_NUMNODES); nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; if (nmask) { if (compat_get_bitmap(bm, nmask, nr_bits)) return -EFAULT; nm = compat_alloc_user_space(alloc_size); if (copy_to_user(nm, bm, alloc_size)) return -EFAULT; } return kernel_set_mempolicy(mode, nm, nr_bits+1); } COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len, compat_ulong_t, mode, compat_ulong_t __user *, nmask, compat_ulong_t, maxnode, compat_ulong_t, flags) { unsigned long __user *nm = NULL; unsigned long nr_bits, alloc_size; nodemask_t bm; nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; if (nmask) { if (compat_get_bitmap(nodes_addr(bm), nmask, nr_bits)) return -EFAULT; nm = compat_alloc_user_space(alloc_size); if (copy_to_user(nm, nodes_addr(bm), alloc_size)) return -EFAULT; } return kernel_mbind(start, len, mode, nm, nr_bits+1, flags); } COMPAT_SYSCALL_DEFINE4(migrate_pages, compat_pid_t, pid, compat_ulong_t, maxnode, const compat_ulong_t __user *, old_nodes, const compat_ulong_t __user *, new_nodes) { unsigned long __user *old = NULL; unsigned long __user *new = NULL; nodemask_t tmp_mask; unsigned long nr_bits; unsigned long size; nr_bits = min_t(unsigned long, maxnode - 1, MAX_NUMNODES); size = ALIGN(nr_bits, BITS_PER_LONG) / 8; if (old_nodes) { if (compat_get_bitmap(nodes_addr(tmp_mask), old_nodes, nr_bits)) return -EFAULT; old = compat_alloc_user_space(new_nodes ? size * 2 : size); if (new_nodes) new = old + size / sizeof(unsigned long); if (copy_to_user(old, nodes_addr(tmp_mask), size)) return -EFAULT; } if (new_nodes) { if (compat_get_bitmap(nodes_addr(tmp_mask), new_nodes, nr_bits)) return -EFAULT; if (new == NULL) new = compat_alloc_user_space(size); if (copy_to_user(new, nodes_addr(tmp_mask), size)) return -EFAULT; } return kernel_migrate_pages(pid, nr_bits + 1, old, new); } #endif /* CONFIG_COMPAT */ bool vma_migratable(struct vm_area_struct *vma) { if (vma->vm_flags & (VM_IO | VM_PFNMAP)) return false; /* * DAX device mappings require predictable access latency, so avoid * incurring periodic faults. */ if (vma_is_dax(vma)) return false; if (is_vm_hugetlb_page(vma) && !hugepage_migration_supported(hstate_vma(vma))) return false; /* * Migration allocates pages in the highest zone. If we cannot * do so then migration (at least from node to node) is not * possible. */ if (vma->vm_file && gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping)) < policy_zone) return false; return true; } struct mempolicy *__get_vma_policy(struct vm_area_struct *vma, unsigned long addr) { struct mempolicy *pol = NULL; if (vma) { if (vma->vm_ops && vma->vm_ops->get_policy) { pol = vma->vm_ops->get_policy(vma, addr); } else if (vma->vm_policy) { pol = vma->vm_policy; /* * shmem_alloc_page() passes MPOL_F_SHARED policy with * a pseudo vma whose vma->vm_ops=NULL. Take a reference * count on these policies which will be dropped by * mpol_cond_put() later */ if (mpol_needs_cond_ref(pol)) mpol_get(pol); } } return pol; } /* * get_vma_policy(@vma, @addr) * @vma: virtual memory area whose policy is sought * @addr: address in @vma for shared policy lookup * * Returns effective policy for a VMA at specified address. * Falls back to current->mempolicy or system default policy, as necessary. * Shared policies [those marked as MPOL_F_SHARED] require an extra reference * count--added by the get_policy() vm_op, as appropriate--to protect against * freeing by another task. It is the caller's responsibility to free the * extra reference for shared policies. */ static struct mempolicy *get_vma_policy(struct vm_area_struct *vma, unsigned long addr) { struct mempolicy *pol = __get_vma_policy(vma, addr); if (!pol) pol = get_task_policy(current); return pol; } bool vma_policy_mof(struct vm_area_struct *vma) { struct mempolicy *pol; if (vma->vm_ops && vma->vm_ops->get_policy) { bool ret = false; pol = vma->vm_ops->get_policy(vma, vma->vm_start); if (pol && (pol->flags & MPOL_F_MOF)) ret = true; mpol_cond_put(pol); return ret; } pol = vma->vm_policy; if (!pol) pol = get_task_policy(current); return pol->flags & MPOL_F_MOF; } static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone) { enum zone_type dynamic_policy_zone = policy_zone; BUG_ON(dynamic_policy_zone == ZONE_MOVABLE); /* * if policy->v.nodes has movable memory only, * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only. * * policy->v.nodes is intersect with node_states[N_MEMORY]. * so if the following test faile, it implies * policy->v.nodes has movable memory only. */ if (!nodes_intersects(policy->v.nodes, node_states[N_HIGH_MEMORY])) dynamic_policy_zone = ZONE_MOVABLE; return zone >= dynamic_policy_zone; } /* * Return a nodemask representing a mempolicy for filtering nodes for * page allocation */ static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy) { /* Lower zones don't get a nodemask applied for MPOL_BIND */ if (unlikely(policy->mode == MPOL_BIND) && apply_policy_zone(policy, gfp_zone(gfp)) && cpuset_nodemask_valid_mems_allowed(&policy->v.nodes)) return &policy->v.nodes; return NULL; } /* Return the node id preferred by the given mempolicy, or the given id */ static int policy_node(gfp_t gfp, struct mempolicy *policy, int nd) { if (policy->mode == MPOL_PREFERRED && !(policy->flags & MPOL_F_LOCAL)) nd = policy->v.preferred_node; else { /* * __GFP_THISNODE shouldn't even be used with the bind policy * because we might easily break the expectation to stay on the * requested node and not break the policy. */ WARN_ON_ONCE(policy->mode == MPOL_BIND && (gfp & __GFP_THISNODE)); } return nd; } /* Do dynamic interleaving for a process */ static unsigned interleave_nodes(struct mempolicy *policy) { unsigned next; struct task_struct *me = current; next = next_node_in(me->il_prev, policy->v.nodes); if (next < MAX_NUMNODES) me->il_prev = next; return next; } /* * Depending on the memory policy provide a node from which to allocate the * next slab entry. */ unsigned int mempolicy_slab_node(void) { struct mempolicy *policy; int node = numa_mem_id(); if (in_interrupt()) return node; policy = current->mempolicy; if (!policy || policy->flags & MPOL_F_LOCAL) return node; switch (policy->mode) { case MPOL_PREFERRED: /* * handled MPOL_F_LOCAL above */ return policy->v.preferred_node; case MPOL_INTERLEAVE: return interleave_nodes(policy); case MPOL_BIND: { struct zoneref *z; /* * Follow bind policy behavior and start allocation at the * first node. */ struct zonelist *zonelist; enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL); zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK]; z = first_zones_zonelist(zonelist, highest_zoneidx, &policy->v.nodes); return z->zone ? zone_to_nid(z->zone) : node; } default: BUG(); } } /* * Do static interleaving for a VMA with known offset @n. Returns the n'th * node in pol->v.nodes (starting from n=0), wrapping around if n exceeds the * number of present nodes. */ static unsigned offset_il_node(struct mempolicy *pol, unsigned long n) { unsigned nnodes = nodes_weight(pol->v.nodes); unsigned target; int i; int nid; if (!nnodes) return numa_node_id(); target = (unsigned int)n % nnodes; nid = first_node(pol->v.nodes); for (i = 0; i < target; i++) nid = next_node(nid, pol->v.nodes); return nid; } /* Determine a node number for interleave */ static inline unsigned interleave_nid(struct mempolicy *pol, struct vm_area_struct *vma, unsigned long addr, int shift) { if (vma) { unsigned long off; /* * for small pages, there is no difference between * shift and PAGE_SHIFT, so the bit-shift is safe. * for huge pages, since vm_pgoff is in units of small * pages, we need to shift off the always 0 bits to get * a useful offset. */ BUG_ON(shift < PAGE_SHIFT); off = vma->vm_pgoff >> (shift - PAGE_SHIFT); off += (addr - vma->vm_start) >> shift; return offset_il_node(pol, off); } else return interleave_nodes(pol); } #ifdef CONFIG_HUGETLBFS /* * huge_node(@vma, @addr, @gfp_flags, @mpol) * @vma: virtual memory area whose policy is sought * @addr: address in @vma for shared policy lookup and interleave policy * @gfp_flags: for requested zone * @mpol: pointer to mempolicy pointer for reference counted mempolicy * @nodemask: pointer to nodemask pointer for MPOL_BIND nodemask * * Returns a nid suitable for a huge page allocation and a pointer * to the struct mempolicy for conditional unref after allocation. * If the effective policy is 'BIND, returns a pointer to the mempolicy's * @nodemask for filtering the zonelist. * * Must be protected by read_mems_allowed_begin() */ int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags, struct mempolicy **mpol, nodemask_t **nodemask) { int nid; *mpol = get_vma_policy(vma, addr); *nodemask = NULL; /* assume !MPOL_BIND */ if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) { nid = interleave_nid(*mpol, vma, addr, huge_page_shift(hstate_vma(vma))); } else { nid = policy_node(gfp_flags, *mpol, numa_node_id()); if ((*mpol)->mode == MPOL_BIND) *nodemask = &(*mpol)->v.nodes; } return nid; } /* * init_nodemask_of_mempolicy * * If the current task's mempolicy is "default" [NULL], return 'false' * to indicate default policy. Otherwise, extract the policy nodemask * for 'bind' or 'interleave' policy into the argument nodemask, or * initialize the argument nodemask to contain the single node for * 'preferred' or 'local' policy and return 'true' to indicate presence * of non-default mempolicy. * * We don't bother with reference counting the mempolicy [mpol_get/put] * because the current task is examining it's own mempolicy and a task's * mempolicy is only ever changed by the task itself. * * N.B., it is the caller's responsibility to free a returned nodemask. */ bool init_nodemask_of_mempolicy(nodemask_t *mask) { struct mempolicy *mempolicy; int nid; if (!(mask && current->mempolicy)) return false; task_lock(current); mempolicy = current->mempolicy; switch (mempolicy->mode) { case MPOL_PREFERRED: if (mempolicy->flags & MPOL_F_LOCAL) nid = numa_node_id(); else nid = mempolicy->v.preferred_node; init_nodemask_of_node(mask, nid); break; case MPOL_BIND: /* Fall through */ case MPOL_INTERLEAVE: *mask = mempolicy->v.nodes; break; default: BUG(); } task_unlock(current); return true; } #endif /* * mempolicy_nodemask_intersects * * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default * policy. Otherwise, check for intersection between mask and the policy * nodemask for 'bind' or 'interleave' policy. For 'perferred' or 'local' * policy, always return true since it may allocate elsewhere on fallback. * * Takes task_lock(tsk) to prevent freeing of its mempolicy. */ bool mempolicy_nodemask_intersects(struct task_struct *tsk, const nodemask_t *mask) { struct mempolicy *mempolicy; bool ret = true; if (!mask) return ret; task_lock(tsk); mempolicy = tsk->mempolicy; if (!mempolicy) goto out; switch (mempolicy->mode) { case MPOL_PREFERRED: /* * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to * allocate from, they may fallback to other nodes when oom. * Thus, it's possible for tsk to have allocated memory from * nodes in mask. */ break; case MPOL_BIND: case MPOL_INTERLEAVE: ret = nodes_intersects(mempolicy->v.nodes, *mask); break; default: BUG(); } out: task_unlock(tsk); return ret; } /* Allocate a page in interleaved policy. Own path because it needs to do special accounting. */ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, unsigned nid) { struct page *page; page = __alloc_pages(gfp, order, nid); /* skip NUMA_INTERLEAVE_HIT counter update if numa stats is disabled */ if (!static_branch_likely(&vm_numa_stat_key)) return page; if (page && page_to_nid(page) == nid) { preempt_disable(); __inc_numa_state(page_zone(page), NUMA_INTERLEAVE_HIT); preempt_enable(); } return page; } /** * alloc_pages_vma - Allocate a page for a VMA. * * @gfp: * %GFP_USER user allocation. * %GFP_KERNEL kernel allocations, * %GFP_HIGHMEM highmem/user allocations, * %GFP_FS allocation should not call back into a file system. * %GFP_ATOMIC don't sleep. * * @order:Order of the GFP allocation. * @vma: Pointer to VMA or NULL if not available. * @addr: Virtual Address of the allocation. Must be inside the VMA. * @node: Which node to prefer for allocation (modulo policy). * @hugepage: for hugepages try only the preferred node if possible * * This function allocates a page from the kernel page pool and applies * a NUMA policy associated with the VMA or the current process. * When VMA is not NULL caller must hold down_read on the mmap_sem of the * mm_struct of the VMA to prevent it from going away. Should be used for * all allocations for pages that will be mapped into user space. Returns * NULL when no page can be allocated. */ struct page * alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, unsigned long addr, int node, bool hugepage) { struct mempolicy *pol; struct page *page; int preferred_nid; nodemask_t *nmask; pol = get_vma_policy(vma, addr); if (pol->mode == MPOL_INTERLEAVE) { unsigned nid; nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order); mpol_cond_put(pol); page = alloc_page_interleave(gfp, order, nid); goto out; } if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) { int hpage_node = node; /* * For hugepage allocation and non-interleave policy which * allows the current node (or other explicitly preferred * node) we only try to allocate from the current/preferred * node and don't fall back to other nodes, as the cost of * remote accesses would likely offset THP benefits. * * If the policy is interleave, or does not allow the current * node in its nodemask, we allocate the standard way. */ if (pol->mode == MPOL_PREFERRED && !(pol->flags & MPOL_F_LOCAL)) hpage_node = pol->v.preferred_node; nmask = policy_nodemask(gfp, pol); if (!nmask || node_isset(hpage_node, *nmask)) { mpol_cond_put(pol); /* * First, try to allocate THP only on local node, but * don't reclaim unnecessarily, just compact. */ page = __alloc_pages_node(hpage_node, gfp | __GFP_THISNODE | __GFP_NORETRY, order); /* * If hugepage allocations are configured to always * synchronous compact or the vma has been madvised * to prefer hugepage backing, retry allowing remote * memory with both reclaim and compact as well. */ if (!page && (gfp & __GFP_DIRECT_RECLAIM)) page = __alloc_pages_node(hpage_node, gfp, order); goto out; } } nmask = policy_nodemask(gfp, pol); preferred_nid = policy_node(gfp, pol, node); page = __alloc_pages_nodemask(gfp, order, preferred_nid, nmask); mpol_cond_put(pol); out: return page; } EXPORT_SYMBOL(alloc_pages_vma); /** * alloc_pages_current - Allocate pages. * * @gfp: * %GFP_USER user allocation, * %GFP_KERNEL kernel allocation, * %GFP_HIGHMEM highmem allocation, * %GFP_FS don't call back into a file system. * %GFP_ATOMIC don't sleep. * @order: Power of two of allocation size in pages. 0 is a single page. * * Allocate a page from the kernel page pool. When not in * interrupt context and apply the current process NUMA policy. * Returns NULL when no page can be allocated. */ struct page *alloc_pages_current(gfp_t gfp, unsigned order) { struct mempolicy *pol = &default_policy; struct page *page; if (!in_interrupt() && !(gfp & __GFP_THISNODE)) pol = get_task_policy(current); /* * No reference counting needed for current->mempolicy * nor system default_policy */ if (pol->mode == MPOL_INTERLEAVE) page = alloc_page_interleave(gfp, order, interleave_nodes(pol)); else page = __alloc_pages_nodemask(gfp, order, policy_node(gfp, pol, numa_node_id()), policy_nodemask(gfp, pol)); return page; } EXPORT_SYMBOL(alloc_pages_current); int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst) { struct mempolicy *pol = mpol_dup(vma_policy(src)); if (IS_ERR(pol)) return PTR_ERR(pol); dst->vm_policy = pol; return 0; } /* * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it * rebinds the mempolicy its copying by calling mpol_rebind_policy() * with the mems_allowed returned by cpuset_mems_allowed(). This * keeps mempolicies cpuset relative after its cpuset moves. See * further kernel/cpuset.c update_nodemask(). * * current's mempolicy may be rebinded by the other task(the task that changes * cpuset's mems), so we needn't do rebind work for current task. */ /* Slow path of a mempolicy duplicate */ struct mempolicy *__mpol_dup(struct mempolicy *old) { struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL); if (!new) return ERR_PTR(-ENOMEM); /* task's mempolicy is protected by alloc_lock */ if (old == current->mempolicy) { task_lock(current); *new = *old; task_unlock(current); } else *new = *old; if (current_cpuset_is_being_rebound()) { nodemask_t mems = cpuset_mems_allowed(current); mpol_rebind_policy(new, &mems); } atomic_set(&new->refcnt, 1); return new; } /* Slow path of a mempolicy comparison */ bool __mpol_equal(struct mempolicy *a, struct mempolicy *b) { if (!a || !b) return false; if (a->mode != b->mode) return false; if (a->flags != b->flags) return false; if (mpol_store_user_nodemask(a)) if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask)) return false; switch (a->mode) { case MPOL_BIND: /* Fall through */ case MPOL_INTERLEAVE: return !!nodes_equal(a->v.nodes, b->v.nodes); case MPOL_PREFERRED: /* a's ->flags is the same as b's */ if (a->flags & MPOL_F_LOCAL) return true; return a->v.preferred_node == b->v.preferred_node; default: BUG(); return false; } } /* * Shared memory backing store policy support. * * Remember policies even when nobody has shared memory mapped. * The policies are kept in Red-Black tree linked from the inode. * They are protected by the sp->lock rwlock, which should be held * for any accesses to the tree. */ /* * lookup first element intersecting start-end. Caller holds sp->lock for * reading or for writing */ static struct sp_node * sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end) { struct rb_node *n = sp->root.rb_node; while (n) { struct sp_node *p = rb_entry(n, struct sp_node, nd); if (start >= p->end) n = n->rb_right; else if (end <= p->start) n = n->rb_left; else break; } if (!n) return NULL; for (;;) { struct sp_node *w = NULL; struct rb_node *prev = rb_prev(n); if (!prev) break; w = rb_entry(prev, struct sp_node, nd); if (w->end <= start) break; n = prev; } return rb_entry(n, struct sp_node, nd); } /* * Insert a new shared policy into the list. Caller holds sp->lock for * writing. */ static void sp_insert(struct shared_policy *sp, struct sp_node *new) { struct rb_node **p = &sp->root.rb_node; struct rb_node *parent = NULL; struct sp_node *nd; while (*p) { parent = *p; nd = rb_entry(parent, struct sp_node, nd); if (new->start < nd->start) p = &(*p)->rb_left; else if (new->end > nd->end) p = &(*p)->rb_right; else BUG(); } rb_link_node(&new->nd, parent, p); rb_insert_color(&new->nd, &sp->root); pr_debug("inserting %lx-%lx: %d\n", new->start, new->end, new->policy ? new->policy->mode : 0); } /* Find shared policy intersecting idx */ struct mempolicy * mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx) { struct mempolicy *pol = NULL; struct sp_node *sn; if (!sp->root.rb_node) return NULL; read_lock(&sp->lock); sn = sp_lookup(sp, idx, idx+1); if (sn) { mpol_get(sn->policy); pol = sn->policy; } read_unlock(&sp->lock); return pol; } static void sp_free(struct sp_node *n) { mpol_put(n->policy); kmem_cache_free(sn_cache, n); } /** * mpol_misplaced - check whether current page node is valid in policy * * @page: page to be checked * @vma: vm area where page mapped * @addr: virtual address where page mapped * * Lookup current policy node id for vma,addr and "compare to" page's * node id. * * Returns: * -1 - not misplaced, page is in the right node * node - node id where the page should be * * Policy determination "mimics" alloc_page_vma(). * Called from fault path where we know the vma and faulting address. */ int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr) { struct mempolicy *pol; struct zoneref *z; int curnid = page_to_nid(page); unsigned long pgoff; int thiscpu = raw_smp_processor_id(); int thisnid = cpu_to_node(thiscpu); int polnid = NUMA_NO_NODE; int ret = -1; pol = get_vma_policy(vma, addr); if (!(pol->flags & MPOL_F_MOF)) goto out; switch (pol->mode) { case MPOL_INTERLEAVE: pgoff = vma->vm_pgoff; pgoff += (addr - vma->vm_start) >> PAGE_SHIFT; polnid = offset_il_node(pol, pgoff); break; case MPOL_PREFERRED: if (pol->flags & MPOL_F_LOCAL) polnid = numa_node_id(); else polnid = pol->v.preferred_node; break; case MPOL_BIND: /* * allows binding to multiple nodes. * use current page if in policy nodemask, * else select nearest allowed node, if any. * If no allowed nodes, use current [!misplaced]. */ if (node_isset(curnid, pol->v.nodes)) goto out; z = first_zones_zonelist( node_zonelist(numa_node_id(), GFP_HIGHUSER), gfp_zone(GFP_HIGHUSER), &pol->v.nodes); polnid = zone_to_nid(z->zone); break; default: BUG(); } /* Migrate the page towards the node whose CPU is referencing it */ if (pol->flags & MPOL_F_MORON) { polnid = thisnid; if (!should_numa_migrate_memory(current, page, curnid, thiscpu)) goto out; } if (curnid != polnid) ret = polnid; out: mpol_cond_put(pol); return ret; } /* * Drop the (possibly final) reference to task->mempolicy. It needs to be * dropped after task->mempolicy is set to NULL so that any allocation done as * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed * policy. */ void mpol_put_task_policy(struct task_struct *task) { struct mempolicy *pol; task_lock(task); pol = task->mempolicy; task->mempolicy = NULL; task_unlock(task); mpol_put(pol); } static void sp_delete(struct shared_policy *sp, struct sp_node *n) { pr_debug("deleting %lx-l%lx\n", n->start, n->end); rb_erase(&n->nd, &sp->root); sp_free(n); } static void sp_node_init(struct sp_node *node, unsigned long start, unsigned long end, struct mempolicy *pol) { node->start = start; node->end = end; node->policy = pol; } static struct sp_node *sp_alloc(unsigned long start, unsigned long end, struct mempolicy *pol) { struct sp_node *n; struct mempolicy *newpol; n = kmem_cache_alloc(sn_cache, GFP_KERNEL); if (!n) return NULL; newpol = mpol_dup(pol); if (IS_ERR(newpol)) { kmem_cache_free(sn_cache, n); return NULL; } newpol->flags |= MPOL_F_SHARED; sp_node_init(n, start, end, newpol); return n; } /* Replace a policy range. */ static int shared_policy_replace(struct shared_policy *sp, unsigned long start, unsigned long end, struct sp_node *new) { struct sp_node *n; struct sp_node *n_new = NULL; struct mempolicy *mpol_new = NULL; int ret = 0; restart: write_lock(&sp->lock); n = sp_lookup(sp, start, end); /* Take care of old policies in the same range. */ while (n && n->start < end) { struct rb_node *next = rb_next(&n->nd); if (n->start >= start) { if (n->end <= end) sp_delete(sp, n); else n->start = end; } else { /* Old policy spanning whole new range. */ if (n->end > end) { if (!n_new) goto alloc_new; *mpol_new = *n->policy; atomic_set(&mpol_new->refcnt, 1); sp_node_init(n_new, end, n->end, mpol_new); n->end = start; sp_insert(sp, n_new); n_new = NULL; mpol_new = NULL; break; } else n->end = start; } if (!next) break; n = rb_entry(next, struct sp_node, nd); } if (new) sp_insert(sp, new); write_unlock(&sp->lock); ret = 0; err_out: if (mpol_new) mpol_put(mpol_new); if (n_new) kmem_cache_free(sn_cache, n_new); return ret; alloc_new: write_unlock(&sp->lock); ret = -ENOMEM; n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL); if (!n_new) goto err_out; mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL); if (!mpol_new) goto err_out; goto restart; } /** * mpol_shared_policy_init - initialize shared policy for inode * @sp: pointer to inode shared policy * @mpol: struct mempolicy to install * * Install non-NULL @mpol in inode's shared policy rb-tree. * On entry, the current task has a reference on a non-NULL @mpol. * This must be released on exit. * This is called at get_inode() calls and we can use GFP_KERNEL. */ void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol) { int ret; sp->root = RB_ROOT; /* empty tree == default mempolicy */ rwlock_init(&sp->lock); if (mpol) { struct vm_area_struct pvma; struct mempolicy *new; NODEMASK_SCRATCH(scratch); if (!scratch) goto put_mpol; /* contextualize the tmpfs mount point mempolicy */ new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask); if (IS_ERR(new)) goto free_scratch; /* no valid nodemask intersection */ task_lock(current); ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch); task_unlock(current); if (ret) goto put_new; /* Create pseudo-vma that contains just the policy */ vma_init(&pvma, NULL); pvma.vm_end = TASK_SIZE; /* policy covers entire file */ mpol_set_shared_policy(sp, &pvma, new); /* adds ref */ put_new: mpol_put(new); /* drop initial ref */ free_scratch: NODEMASK_SCRATCH_FREE(scratch); put_mpol: mpol_put(mpol); /* drop our incoming ref on sb mpol */ } } int mpol_set_shared_policy(struct shared_policy *info, struct vm_area_struct *vma, struct mempolicy *npol) { int err; struct sp_node *new = NULL; unsigned long sz = vma_pages(vma); pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n", vma->vm_pgoff, sz, npol ? npol->mode : -1, npol ? npol->flags : -1, npol ? nodes_addr(npol->v.nodes)[0] : NUMA_NO_NODE); if (npol) { new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol); if (!new) return -ENOMEM; } err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new); if (err && new) sp_free(new); return err; } /* Free a backing policy store on inode delete. */ void mpol_free_shared_policy(struct shared_policy *p) { struct sp_node *n; struct rb_node *next; if (!p->root.rb_node) return; write_lock(&p->lock); next = rb_first(&p->root); while (next) { n = rb_entry(next, struct sp_node, nd); next = rb_next(&n->nd); sp_delete(p, n); } write_unlock(&p->lock); } #ifdef CONFIG_NUMA_BALANCING static int __initdata numabalancing_override; static void __init check_numabalancing_enable(void) { bool numabalancing_default = false; if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED)) numabalancing_default = true; /* Parsed by setup_numabalancing. override == 1 enables, -1 disables */ if (numabalancing_override) set_numabalancing_state(numabalancing_override == 1); if (num_online_nodes() > 1 && !numabalancing_override) { pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n", numabalancing_default ? "Enabling" : "Disabling"); set_numabalancing_state(numabalancing_default); } } static int __init setup_numabalancing(char *str) { int ret = 0; if (!str) goto out; if (!strcmp(str, "enable")) { numabalancing_override = 1; ret = 1; } else if (!strcmp(str, "disable")) { numabalancing_override = -1; ret = 1; } out: if (!ret) pr_warn("Unable to parse numa_balancing=\n"); return ret; } __setup("numa_balancing=", setup_numabalancing); #else static inline void __init check_numabalancing_enable(void) { } #endif /* CONFIG_NUMA_BALANCING */ /* assumes fs == KERNEL_DS */ void __init numa_policy_init(void) { nodemask_t interleave_nodes; unsigned long largest = 0; int nid, prefer = 0; policy_cache = kmem_cache_create("numa_policy", sizeof(struct mempolicy), 0, SLAB_PANIC, NULL); sn_cache = kmem_cache_create("shared_policy_node", sizeof(struct sp_node), 0, SLAB_PANIC, NULL); for_each_node(nid) { preferred_node_policy[nid] = (struct mempolicy) { .refcnt = ATOMIC_INIT(1), .mode = MPOL_PREFERRED, .flags = MPOL_F_MOF | MPOL_F_MORON, .v = { .preferred_node = nid, }, }; } /* * Set interleaving policy for system init. Interleaving is only * enabled across suitably sized nodes (default is >= 16MB), or * fall back to the largest node if they're all smaller. */ nodes_clear(interleave_nodes); for_each_node_state(nid, N_MEMORY) { unsigned long total_pages = node_present_pages(nid); /* Preserve the largest node */ if (largest < total_pages) { largest = total_pages; prefer = nid; } /* Interleave this node? */ if ((total_pages << PAGE_SHIFT) >= (16 << 20)) node_set(nid, interleave_nodes); } /* All too small, use the largest */ if (unlikely(nodes_empty(interleave_nodes))) node_set(prefer, interleave_nodes); if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes)) pr_err("%s: interleaving failed\n", __func__); check_numabalancing_enable(); } /* Reset policy of current process to default */ void numa_default_policy(void) { do_set_mempolicy(MPOL_DEFAULT, 0, NULL); } /* * Parse and format mempolicy from/to strings */ /* * "local" is implemented internally by MPOL_PREFERRED with MPOL_F_LOCAL flag. */ static const char * const policy_modes[] = { [MPOL_DEFAULT] = "default", [MPOL_PREFERRED] = "prefer", [MPOL_BIND] = "bind", [MPOL_INTERLEAVE] = "interleave", [MPOL_LOCAL] = "local", }; #ifdef CONFIG_TMPFS /** * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option. * @str: string containing mempolicy to parse * @mpol: pointer to struct mempolicy pointer, returned on success. * * Format of input: * <mode>[=<flags>][:<nodelist>] * * On success, returns 0, else 1 */ int mpol_parse_str(char *str, struct mempolicy **mpol) { struct mempolicy *new = NULL; unsigned short mode_flags; nodemask_t nodes; char *nodelist = strchr(str, ':'); char *flags = strchr(str, '='); int err = 1, mode; if (flags) *flags++ = '\0'; /* terminate mode string */ if (nodelist) { /* NUL-terminate mode or flags string */ *nodelist++ = '\0'; if (nodelist_parse(nodelist, nodes)) goto out; if (!nodes_subset(nodes, node_states[N_MEMORY])) goto out; } else nodes_clear(nodes); mode = match_string(policy_modes, MPOL_MAX, str); if (mode < 0) goto out; switch (mode) { case MPOL_PREFERRED: /* * Insist on a nodelist of one node only */ if (nodelist) { char *rest = nodelist; while (isdigit(*rest)) rest++; if (*rest) goto out; } break; case MPOL_INTERLEAVE: /* * Default to online nodes with memory if no nodelist */ if (!nodelist) nodes = node_states[N_MEMORY]; break; case MPOL_LOCAL: /* * Don't allow a nodelist; mpol_new() checks flags */ if (nodelist) goto out; mode = MPOL_PREFERRED; break; case MPOL_DEFAULT: /* * Insist on a empty nodelist */ if (!nodelist) err = 0; goto out; case MPOL_BIND: /* * Insist on a nodelist */ if (!nodelist) goto out; } mode_flags = 0; if (flags) { /* * Currently, we only support two mutually exclusive * mode flags. */ if (!strcmp(flags, "static")) mode_flags |= MPOL_F_STATIC_NODES; else if (!strcmp(flags, "relative")) mode_flags |= MPOL_F_RELATIVE_NODES; else goto out; } new = mpol_new(mode, mode_flags, &nodes); if (IS_ERR(new)) goto out; /* * Save nodes for mpol_to_str() to show the tmpfs mount options * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo. */ if (mode != MPOL_PREFERRED) new->v.nodes = nodes; else if (nodelist) new->v.preferred_node = first_node(nodes); else new->flags |= MPOL_F_LOCAL; /* * Save nodes for contextualization: this will be used to "clone" * the mempolicy in a specific context [cpuset] at a later time. */ new->w.user_nodemask = nodes; err = 0; out: /* Restore string for error message */ if (nodelist) *--nodelist = ':'; if (flags) *--flags = '='; if (!err) *mpol = new; return err; } #endif /* CONFIG_TMPFS */ /** * mpol_to_str - format a mempolicy structure for printing * @buffer: to contain formatted mempolicy string * @maxlen: length of @buffer * @pol: pointer to mempolicy to be formatted * * Convert @pol into a string. If @buffer is too short, truncate the string. * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the * longest flag, "relative", and to display at least a few node ids. */ void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol) { char *p = buffer; nodemask_t nodes = NODE_MASK_NONE; unsigned short mode = MPOL_DEFAULT; unsigned short flags = 0; if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) { mode = pol->mode; flags = pol->flags; } switch (mode) { case MPOL_DEFAULT: break; case MPOL_PREFERRED: if (flags & MPOL_F_LOCAL) mode = MPOL_LOCAL; else node_set(pol->v.preferred_node, nodes); break; case MPOL_BIND: case MPOL_INTERLEAVE: nodes = pol->v.nodes; break; default: WARN_ON_ONCE(1); snprintf(p, maxlen, "unknown"); return; } p += snprintf(p, maxlen, "%s", policy_modes[mode]); if (flags & MPOL_MODE_FLAGS) { p += snprintf(p, buffer + maxlen - p, "="); /* * Currently, the only defined flags are mutually exclusive */ if (flags & MPOL_F_STATIC_NODES) p += snprintf(p, buffer + maxlen - p, "static"); else if (flags & MPOL_F_RELATIVE_NODES) p += snprintf(p, buffer + maxlen - p, "relative"); } if (!nodes_empty(nodes)) p += scnprintf(p, buffer + maxlen - p, ":%*pbl", nodemask_pr_args(&nodes)); }
// SPDX-License-Identifier: GPL-2.0-only /* * Simple NUMA memory policy for the Linux kernel. * * Copyright 2003,2004 Andi Kleen, SuSE Labs. * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc. * * NUMA policy allows the user to give hints in which node(s) memory should * be allocated. * * Support four policies per VMA and per process: * * The VMA policy has priority over the process policy for a page fault. * * interleave Allocate memory interleaved over a set of nodes, * with normal fallback if it fails. * For VMA based allocations this interleaves based on the * offset into the backing object or offset into the mapping * for anonymous memory. For process policy an process counter * is used. * * bind Only allocate memory on a specific set of nodes, * no fallback. * FIXME: memory is allocated starting with the first node * to the last. It would be better if bind would truly restrict * the allocation to memory nodes instead * * preferred Try a specific node first before normal fallback. * As a special case NUMA_NO_NODE here means do the allocation * on the local CPU. This is normally identical to default, * but useful to set in a VMA when you have a non default * process policy. * * default Allocate on the local node first, or when on a VMA * use the process policy. This is what Linux always did * in a NUMA aware kernel and still does by, ahem, default. * * The process policy is applied for most non interrupt memory allocations * in that process' context. Interrupts ignore the policies and always * try to allocate on the local CPU. The VMA policy is only applied for memory * allocations for a VMA in the VM. * * Currently there are a few corner cases in swapping where the policy * is not applied, but the majority should be handled. When process policy * is used it is not remembered over swap outs/swap ins. * * Only the highest zone in the zone hierarchy gets policied. Allocations * requesting a lower zone just use default policy. This implies that * on systems with highmem kernel lowmem allocation don't get policied. * Same with GFP_DMA allocations. * * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between * all users and remembered even when nobody has memory mapped. */ /* Notebook: fix mmap readahead to honour policy and enable policy for any page cache object statistics for bigpages global policy for page cache? currently it uses process policy. Requires first item above. handle mremap for shared memory (currently ignored for the policy) grows down? make bind policy root only? It can trigger oom much faster and the kernel is not always grateful with that. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/mempolicy.h> #include <linux/pagewalk.h> #include <linux/highmem.h> #include <linux/hugetlb.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/sched/mm.h> #include <linux/sched/numa_balancing.h> #include <linux/sched/task.h> #include <linux/nodemask.h> #include <linux/cpuset.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/export.h> #include <linux/nsproxy.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/compat.h> #include <linux/ptrace.h> #include <linux/swap.h> #include <linux/seq_file.h> #include <linux/proc_fs.h> #include <linux/migrate.h> #include <linux/ksm.h> #include <linux/rmap.h> #include <linux/security.h> #include <linux/syscalls.h> #include <linux/ctype.h> #include <linux/mm_inline.h> #include <linux/mmu_notifier.h> #include <linux/printk.h> #include <linux/swapops.h> #include <asm/tlbflush.h> #include <linux/uaccess.h> #include "internal.h" /* Internal flags */ #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */ #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */ static struct kmem_cache *policy_cache; static struct kmem_cache *sn_cache; /* Highest zone. An specific allocation for a zone below that is not policied. */ enum zone_type policy_zone = 0; /* * run-time system-wide default policy => local allocation */ static struct mempolicy default_policy = { .refcnt = ATOMIC_INIT(1), /* never free it */ .mode = MPOL_PREFERRED, .flags = MPOL_F_LOCAL, }; static struct mempolicy preferred_node_policy[MAX_NUMNODES]; struct mempolicy *get_task_policy(struct task_struct *p) { struct mempolicy *pol = p->mempolicy; int node; if (pol) return pol; node = numa_node_id(); if (node != NUMA_NO_NODE) { pol = &preferred_node_policy[node]; /* preferred_node_policy is not initialised early in boot */ if (pol->mode) return pol; } return &default_policy; } static const struct mempolicy_operations { int (*create)(struct mempolicy *pol, const nodemask_t *nodes); void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes); } mpol_ops[MPOL_MAX]; static inline int mpol_store_user_nodemask(const struct mempolicy *pol) { return pol->flags & MPOL_MODE_FLAGS; } static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig, const nodemask_t *rel) { nodemask_t tmp; nodes_fold(tmp, *orig, nodes_weight(*rel)); nodes_onto(*ret, tmp, *rel); } static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes) { if (nodes_empty(*nodes)) return -EINVAL; pol->v.nodes = *nodes; return 0; } static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes) { if (!nodes) pol->flags |= MPOL_F_LOCAL; /* local allocation */ else if (nodes_empty(*nodes)) return -EINVAL; /* no allowed nodes */ else pol->v.preferred_node = first_node(*nodes); return 0; } static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes) { if (nodes_empty(*nodes)) return -EINVAL; pol->v.nodes = *nodes; return 0; } /* * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if * any, for the new policy. mpol_new() has already validated the nodes * parameter with respect to the policy mode and flags. But, we need to * handle an empty nodemask with MPOL_PREFERRED here. * * Must be called holding task's alloc_lock to protect task's mems_allowed * and mempolicy. May also be called holding the mmap_semaphore for write. */ static int mpol_set_nodemask(struct mempolicy *pol, const nodemask_t *nodes, struct nodemask_scratch *nsc) { int ret; /* if mode is MPOL_DEFAULT, pol is NULL. This is right. */ if (pol == NULL) return 0; /* Check N_MEMORY */ nodes_and(nsc->mask1, cpuset_current_mems_allowed, node_states[N_MEMORY]); VM_BUG_ON(!nodes); if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes)) nodes = NULL; /* explicit local allocation */ else { if (pol->flags & MPOL_F_RELATIVE_NODES) mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1); else nodes_and(nsc->mask2, *nodes, nsc->mask1); if (mpol_store_user_nodemask(pol)) pol->w.user_nodemask = *nodes; else pol->w.cpuset_mems_allowed = cpuset_current_mems_allowed; } if (nodes) ret = mpol_ops[pol->mode].create(pol, &nsc->mask2); else ret = mpol_ops[pol->mode].create(pol, NULL); return ret; } /* * This function just creates a new policy, does some check and simple * initialization. You must invoke mpol_set_nodemask() to set nodes. */ static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags, nodemask_t *nodes) { struct mempolicy *policy; pr_debug("setting mode %d flags %d nodes[0] %lx\n", mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE); if (mode == MPOL_DEFAULT) { if (nodes && !nodes_empty(*nodes)) return ERR_PTR(-EINVAL); return NULL; } VM_BUG_ON(!nodes); /* * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation). * All other modes require a valid pointer to a non-empty nodemask. */ if (mode == MPOL_PREFERRED) { if (nodes_empty(*nodes)) { if (((flags & MPOL_F_STATIC_NODES) || (flags & MPOL_F_RELATIVE_NODES))) return ERR_PTR(-EINVAL); } } else if (mode == MPOL_LOCAL) { if (!nodes_empty(*nodes) || (flags & MPOL_F_STATIC_NODES) || (flags & MPOL_F_RELATIVE_NODES)) return ERR_PTR(-EINVAL); mode = MPOL_PREFERRED; } else if (nodes_empty(*nodes)) return ERR_PTR(-EINVAL); policy = kmem_cache_alloc(policy_cache, GFP_KERNEL); if (!policy) return ERR_PTR(-ENOMEM); atomic_set(&policy->refcnt, 1); policy->mode = mode; policy->flags = flags; return policy; } /* Slow path of a mpol destructor. */ void __mpol_put(struct mempolicy *p) { if (!atomic_dec_and_test(&p->refcnt)) return; kmem_cache_free(policy_cache, p); } static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes) { } static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes) { nodemask_t tmp; if (pol->flags & MPOL_F_STATIC_NODES) nodes_and(tmp, pol->w.user_nodemask, *nodes); else if (pol->flags & MPOL_F_RELATIVE_NODES) mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes); else { nodes_remap(tmp, pol->v.nodes,pol->w.cpuset_mems_allowed, *nodes); pol->w.cpuset_mems_allowed = *nodes; } if (nodes_empty(tmp)) tmp = *nodes; pol->v.nodes = tmp; } static void mpol_rebind_preferred(struct mempolicy *pol, const nodemask_t *nodes) { nodemask_t tmp; if (pol->flags & MPOL_F_STATIC_NODES) { int node = first_node(pol->w.user_nodemask); if (node_isset(node, *nodes)) { pol->v.preferred_node = node; pol->flags &= ~MPOL_F_LOCAL; } else pol->flags |= MPOL_F_LOCAL; } else if (pol->flags & MPOL_F_RELATIVE_NODES) { mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes); pol->v.preferred_node = first_node(tmp); } else if (!(pol->flags & MPOL_F_LOCAL)) { pol->v.preferred_node = node_remap(pol->v.preferred_node, pol->w.cpuset_mems_allowed, *nodes); pol->w.cpuset_mems_allowed = *nodes; } } /* * mpol_rebind_policy - Migrate a policy to a different set of nodes * * Per-vma policies are protected by mmap_sem. Allocations using per-task * policies are protected by task->mems_allowed_seq to prevent a premature * OOM/allocation failure due to parallel nodemask modification. */ static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask) { if (!pol) return; if (!mpol_store_user_nodemask(pol) && !(pol->flags & MPOL_F_LOCAL) && nodes_equal(pol->w.cpuset_mems_allowed, *newmask)) return; mpol_ops[pol->mode].rebind(pol, newmask); } /* * Wrapper for mpol_rebind_policy() that just requires task * pointer, and updates task mempolicy. * * Called with task's alloc_lock held. */ void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new) { mpol_rebind_policy(tsk->mempolicy, new); } /* * Rebind each vma in mm to new nodemask. * * Call holding a reference to mm. Takes mm->mmap_sem during call. */ void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new) { struct vm_area_struct *vma; down_write(&mm->mmap_sem); for (vma = mm->mmap; vma; vma = vma->vm_next) mpol_rebind_policy(vma->vm_policy, new); up_write(&mm->mmap_sem); } static const struct mempolicy_operations mpol_ops[MPOL_MAX] = { [MPOL_DEFAULT] = { .rebind = mpol_rebind_default, }, [MPOL_INTERLEAVE] = { .create = mpol_new_interleave, .rebind = mpol_rebind_nodemask, }, [MPOL_PREFERRED] = { .create = mpol_new_preferred, .rebind = mpol_rebind_preferred, }, [MPOL_BIND] = { .create = mpol_new_bind, .rebind = mpol_rebind_nodemask, }, }; static int migrate_page_add(struct page *page, struct list_head *pagelist, unsigned long flags); struct queue_pages { struct list_head *pagelist; unsigned long flags; nodemask_t *nmask; unsigned long start; unsigned long end; struct vm_area_struct *first; }; /* * Check if the page's nid is in qp->nmask. * * If MPOL_MF_INVERT is set in qp->flags, check if the nid is * in the invert of qp->nmask. */ static inline bool queue_pages_required(struct page *page, struct queue_pages *qp) { int nid = page_to_nid(page); unsigned long flags = qp->flags; return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT); } /* * queue_pages_pmd() has four possible return values: * 0 - pages are placed on the right node or queued successfully. * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were * specified. * 2 - THP was split. * -EIO - is migration entry or only MPOL_MF_STRICT was specified and an * existing page was already on a node that does not follow the * policy. */ static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr, unsigned long end, struct mm_walk *walk) { int ret = 0; struct page *page; struct queue_pages *qp = walk->private; unsigned long flags; if (unlikely(is_pmd_migration_entry(*pmd))) { ret = -EIO; goto unlock; } page = pmd_page(*pmd); if (is_huge_zero_page(page)) { spin_unlock(ptl); __split_huge_pmd(walk->vma, pmd, addr, false, NULL); ret = 2; goto out; } if (!queue_pages_required(page, qp)) goto unlock; flags = qp->flags; /* go to thp migration */ if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { if (!vma_migratable(walk->vma) || migrate_page_add(page, qp->pagelist, flags)) { ret = 1; goto unlock; } } else ret = -EIO; unlock: spin_unlock(ptl); out: return ret; } /* * Scan through pages checking if pages follow certain conditions, * and move them to the pagelist if they do. * * queue_pages_pte_range() has three possible return values: * 0 - pages are placed on the right node or queued successfully. * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were * specified. * -EIO - only MPOL_MF_STRICT was specified and an existing page was already * on a node that does not follow the policy. */ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, struct mm_walk *walk) { struct vm_area_struct *vma = walk->vma; struct page *page; struct queue_pages *qp = walk->private; unsigned long flags = qp->flags; int ret; bool has_unmovable = false; pte_t *pte; spinlock_t *ptl; ptl = pmd_trans_huge_lock(pmd, vma); if (ptl) { ret = queue_pages_pmd(pmd, ptl, addr, end, walk); if (ret != 2) return ret; } /* THP was split, fall through to pte walk */ if (pmd_trans_unstable(pmd)) return 0; pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); for (; addr != end; pte++, addr += PAGE_SIZE) { if (!pte_present(*pte)) continue; page = vm_normal_page(vma, addr, *pte); if (!page) continue; /* * vm_normal_page() filters out zero pages, but there might * still be PageReserved pages to skip, perhaps in a VDSO. */ if (PageReserved(page)) continue; if (!queue_pages_required(page, qp)) continue; if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { /* MPOL_MF_STRICT must be specified if we get here */ if (!vma_migratable(vma)) { has_unmovable = true; break; } /* * Do not abort immediately since there may be * temporary off LRU pages in the range. Still * need migrate other LRU pages. */ if (migrate_page_add(page, qp->pagelist, flags)) has_unmovable = true; } else break; } pte_unmap_unlock(pte - 1, ptl); cond_resched(); if (has_unmovable) return 1; return addr != end ? -EIO : 0; } static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask, unsigned long addr, unsigned long end, struct mm_walk *walk) { int ret = 0; #ifdef CONFIG_HUGETLB_PAGE struct queue_pages *qp = walk->private; unsigned long flags = (qp->flags & MPOL_MF_VALID); struct page *page; spinlock_t *ptl; pte_t entry; ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte); entry = huge_ptep_get(pte); if (!pte_present(entry)) goto unlock; page = pte_page(entry); if (!queue_pages_required(page, qp)) goto unlock; if (flags == MPOL_MF_STRICT) { /* * STRICT alone means only detecting misplaced page and no * need to further check other vma. */ ret = -EIO; goto unlock; } if (!vma_migratable(walk->vma)) { /* * Must be STRICT with MOVE*, otherwise .test_walk() have * stopped walking current vma. * Detecting misplaced page but allow migrating pages which * have been queued. */ ret = 1; goto unlock; } /* With MPOL_MF_MOVE, we migrate only unshared hugepage. */ if (flags & (MPOL_MF_MOVE_ALL) || (flags & MPOL_MF_MOVE && page_mapcount(page) == 1)) { if (!isolate_huge_page(page, qp->pagelist) && (flags & MPOL_MF_STRICT)) /* * Failed to isolate page but allow migrating pages * which have been queued. */ ret = 1; } unlock: spin_unlock(ptl); #else BUG(); #endif return ret; } #ifdef CONFIG_NUMA_BALANCING /* * This is used to mark a range of virtual addresses to be inaccessible. * These are later cleared by a NUMA hinting fault. Depending on these * faults, pages may be migrated for better NUMA placement. * * This is assuming that NUMA faults are handled using PROT_NONE. If * an architecture makes a different choice, it will need further * changes to the core. */ unsigned long change_prot_numa(struct vm_area_struct *vma, unsigned long addr, unsigned long end) { int nr_updated; nr_updated = change_protection(vma, addr, end, PAGE_NONE, 0, 1); if (nr_updated) count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated); return nr_updated; } #else static unsigned long change_prot_numa(struct vm_area_struct *vma, unsigned long addr, unsigned long end) { return 0; } #endif /* CONFIG_NUMA_BALANCING */ static int queue_pages_test_walk(unsigned long start, unsigned long end, struct mm_walk *walk) { struct vm_area_struct *vma = walk->vma; struct queue_pages *qp = walk->private; unsigned long endvma = vma->vm_end; unsigned long flags = qp->flags; /* range check first */ VM_BUG_ON_VMA((vma->vm_start > start) || (vma->vm_end < end), vma); if (!qp->first) { qp->first = vma; if (!(flags & MPOL_MF_DISCONTIG_OK) && (qp->start < vma->vm_start)) /* hole at head side of range */ return -EFAULT; } if (!(flags & MPOL_MF_DISCONTIG_OK) && ((vma->vm_end < qp->end) && (!vma->vm_next || vma->vm_end < vma->vm_next->vm_start))) /* hole at middle or tail of range */ return -EFAULT; /* * Need check MPOL_MF_STRICT to return -EIO if possible * regardless of vma_migratable */ if (!vma_migratable(vma) && !(flags & MPOL_MF_STRICT)) return 1; if (endvma > end) endvma = end; if (flags & MPOL_MF_LAZY) { /* Similar to task_numa_work, skip inaccessible VMAs */ if (!is_vm_hugetlb_page(vma) && (vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)) && !(vma->vm_flags & VM_MIXEDMAP)) change_prot_numa(vma, start, endvma); return 1; } /* queue pages from current vma */ if (flags & MPOL_MF_VALID) return 0; return 1; } static const struct mm_walk_ops queue_pages_walk_ops = { .hugetlb_entry = queue_pages_hugetlb, .pmd_entry = queue_pages_pte_range, .test_walk = queue_pages_test_walk, }; /* * Walk through page tables and collect pages to be migrated. * * If pages found in a given range are on a set of nodes (determined by * @nodes and @flags,) it's isolated and queued to the pagelist which is * passed via @private. * * queue_pages_range() has three possible return values: * 1 - there is unmovable page, but MPOL_MF_MOVE* & MPOL_MF_STRICT were * specified. * 0 - queue pages successfully or no misplaced page. * errno - i.e. misplaced pages with MPOL_MF_STRICT specified (-EIO) or * memory range specified by nodemask and maxnode points outside * your accessible address space (-EFAULT) */ static int queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end, nodemask_t *nodes, unsigned long flags, struct list_head *pagelist) { int err; struct queue_pages qp = { .pagelist = pagelist, .flags = flags, .nmask = nodes, .start = start, .end = end, .first = NULL, }; err = walk_page_range(mm, start, end, &queue_pages_walk_ops, &qp); if (!qp.first) /* whole range in hole */ err = -EFAULT; return err; } /* * Apply policy to a single VMA * This must be called with the mmap_sem held for writing. */ static int vma_replace_policy(struct vm_area_struct *vma, struct mempolicy *pol) { int err; struct mempolicy *old; struct mempolicy *new; pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n", vma->vm_start, vma->vm_end, vma->vm_pgoff, vma->vm_ops, vma->vm_file, vma->vm_ops ? vma->vm_ops->set_policy : NULL); new = mpol_dup(pol); if (IS_ERR(new)) return PTR_ERR(new); if (vma->vm_ops && vma->vm_ops->set_policy) { err = vma->vm_ops->set_policy(vma, new); if (err) goto err_out; } old = vma->vm_policy; vma->vm_policy = new; /* protected by mmap_sem */ mpol_put(old); return 0; err_out: mpol_put(new); return err; } /* Step 2: apply policy to a range and do splits. */ static int mbind_range(struct mm_struct *mm, unsigned long start, unsigned long end, struct mempolicy *new_pol) { struct vm_area_struct *next; struct vm_area_struct *prev; struct vm_area_struct *vma; int err = 0; pgoff_t pgoff; unsigned long vmstart; unsigned long vmend; vma = find_vma(mm, start); VM_BUG_ON(!vma); prev = vma->vm_prev; if (start > vma->vm_start) prev = vma; for (; vma && vma->vm_start < end; prev = vma, vma = next) { next = vma->vm_next; vmstart = max(start, vma->vm_start); vmend = min(end, vma->vm_end); if (mpol_equal(vma_policy(vma), new_pol)) continue; pgoff = vma->vm_pgoff + ((vmstart - vma->vm_start) >> PAGE_SHIFT); prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags, vma->anon_vma, vma->vm_file, pgoff, new_pol, vma->vm_userfaultfd_ctx); if (prev) { vma = prev; next = vma->vm_next; if (mpol_equal(vma_policy(vma), new_pol)) continue; /* vma_merge() joined vma && vma->next, case 8 */ goto replace; } if (vma->vm_start != vmstart) { err = split_vma(vma->vm_mm, vma, vmstart, 1); if (err) goto out; } if (vma->vm_end != vmend) { err = split_vma(vma->vm_mm, vma, vmend, 0); if (err) goto out; } replace: err = vma_replace_policy(vma, new_pol); if (err) goto out; } out: return err; } /* Set the process memory policy */ static long do_set_mempolicy(unsigned short mode, unsigned short flags, nodemask_t *nodes) { struct mempolicy *new, *old; NODEMASK_SCRATCH(scratch); int ret; if (!scratch) return -ENOMEM; new = mpol_new(mode, flags, nodes); if (IS_ERR(new)) { ret = PTR_ERR(new); goto out; } task_lock(current); ret = mpol_set_nodemask(new, nodes, scratch); if (ret) { task_unlock(current); mpol_put(new); goto out; } old = current->mempolicy; current->mempolicy = new; if (new && new->mode == MPOL_INTERLEAVE) current->il_prev = MAX_NUMNODES-1; task_unlock(current); mpol_put(old); ret = 0; out: NODEMASK_SCRATCH_FREE(scratch); return ret; } /* * Return nodemask for policy for get_mempolicy() query * * Called with task's alloc_lock held */ static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes) { nodes_clear(*nodes); if (p == &default_policy) return; switch (p->mode) { case MPOL_BIND: /* Fall through */ case MPOL_INTERLEAVE: *nodes = p->v.nodes; break; case MPOL_PREFERRED: if (!(p->flags & MPOL_F_LOCAL)) node_set(p->v.preferred_node, *nodes); /* else return empty node mask for local allocation */ break; default: BUG(); } } static int lookup_node(struct mm_struct *mm, unsigned long addr) { struct page *p; int err; int locked = 1; err = get_user_pages_locked(addr & PAGE_MASK, 1, 0, &p, &locked); if (err >= 0) { err = page_to_nid(p); put_page(p); } if (locked) up_read(&mm->mmap_sem); return err; } /* Retrieve NUMA policy */ static long do_get_mempolicy(int *policy, nodemask_t *nmask, unsigned long addr, unsigned long flags) { int err; struct mm_struct *mm = current->mm; struct vm_area_struct *vma = NULL; struct mempolicy *pol = current->mempolicy, *pol_refcount = NULL; if (flags & ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED)) return -EINVAL; if (flags & MPOL_F_MEMS_ALLOWED) { if (flags & (MPOL_F_NODE|MPOL_F_ADDR)) return -EINVAL; *policy = 0; /* just so it's initialized */ task_lock(current); *nmask = cpuset_current_mems_allowed; task_unlock(current); return 0; } if (flags & MPOL_F_ADDR) { /* * Do NOT fall back to task policy if the * vma/shared policy at addr is NULL. We * want to return MPOL_DEFAULT in this case. */ down_read(&mm->mmap_sem); vma = find_vma_intersection(mm, addr, addr+1); if (!vma) { up_read(&mm->mmap_sem); return -EFAULT; } if (vma->vm_ops && vma->vm_ops->get_policy) pol = vma->vm_ops->get_policy(vma, addr); else pol = vma->vm_policy; } else if (addr) return -EINVAL; if (!pol) pol = &default_policy; /* indicates default behavior */ if (flags & MPOL_F_NODE) { if (flags & MPOL_F_ADDR) { /* * Take a refcount on the mpol, lookup_node() * wil drop the mmap_sem, so after calling * lookup_node() only "pol" remains valid, "vma" * is stale. */ pol_refcount = pol; vma = NULL; mpol_get(pol); err = lookup_node(mm, addr); if (err < 0) goto out; *policy = err; } else if (pol == current->mempolicy && pol->mode == MPOL_INTERLEAVE) { *policy = next_node_in(current->il_prev, pol->v.nodes); } else { err = -EINVAL; goto out; } } else { *policy = pol == &default_policy ? MPOL_DEFAULT : pol->mode; /* * Internal mempolicy flags must be masked off before exposing * the policy to userspace. */ *policy |= (pol->flags & MPOL_MODE_FLAGS); } err = 0; if (nmask) { if (mpol_store_user_nodemask(pol)) { *nmask = pol->w.user_nodemask; } else { task_lock(current); get_policy_nodemask(pol, nmask); task_unlock(current); } } out: mpol_cond_put(pol); if (vma) up_read(&mm->mmap_sem); if (pol_refcount) mpol_put(pol_refcount); return err; } #ifdef CONFIG_MIGRATION /* * page migration, thp tail pages can be passed. */ static int migrate_page_add(struct page *page, struct list_head *pagelist, unsigned long flags) { struct page *head = compound_head(page); /* * Avoid migrating a page that is shared with others. */ if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(head) == 1) { if (!isolate_lru_page(head)) { list_add_tail(&head->lru, pagelist); mod_node_page_state(page_pgdat(head), NR_ISOLATED_ANON + page_is_file_cache(head), hpage_nr_pages(head)); } else if (flags & MPOL_MF_STRICT) { /* * Non-movable page may reach here. And, there may be * temporary off LRU pages or non-LRU movable pages. * Treat them as unmovable pages since they can't be * isolated, so they can't be moved at the moment. It * should return -EIO for this case too. */ return -EIO; } } return 0; } /* page allocation callback for NUMA node migration */ struct page *alloc_new_node_page(struct page *page, unsigned long node) { if (PageHuge(page)) return alloc_huge_page_node(page_hstate(compound_head(page)), node); else if (PageTransHuge(page)) { struct page *thp; thp = alloc_pages_node(node, (GFP_TRANSHUGE | __GFP_THISNODE), HPAGE_PMD_ORDER); if (!thp) return NULL; prep_transhuge_page(thp); return thp; } else return __alloc_pages_node(node, GFP_HIGHUSER_MOVABLE | __GFP_THISNODE, 0); } /* * Migrate pages from one node to a target node. * Returns error or the number of pages not migrated. */ static int migrate_to_node(struct mm_struct *mm, int source, int dest, int flags) { nodemask_t nmask; LIST_HEAD(pagelist); int err = 0; nodes_clear(nmask); node_set(source, nmask); /* * This does not "check" the range but isolates all pages that * need migration. Between passing in the full user address * space range and MPOL_MF_DISCONTIG_OK, this call can not fail. */ VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))); queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask, flags | MPOL_MF_DISCONTIG_OK, &pagelist); if (!list_empty(&pagelist)) { err = migrate_pages(&pagelist, alloc_new_node_page, NULL, dest, MIGRATE_SYNC, MR_SYSCALL); if (err) putback_movable_pages(&pagelist); } return err; } /* * Move pages between the two nodesets so as to preserve the physical * layout as much as possible. * * Returns the number of page that could not be moved. */ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, const nodemask_t *to, int flags) { int busy = 0; int err; nodemask_t tmp; err = migrate_prep(); if (err) return err; down_read(&mm->mmap_sem); /* * Find a 'source' bit set in 'tmp' whose corresponding 'dest' * bit in 'to' is not also set in 'tmp'. Clear the found 'source' * bit in 'tmp', and return that <source, dest> pair for migration. * The pair of nodemasks 'to' and 'from' define the map. * * If no pair of bits is found that way, fallback to picking some * pair of 'source' and 'dest' bits that are not the same. If the * 'source' and 'dest' bits are the same, this represents a node * that will be migrating to itself, so no pages need move. * * If no bits are left in 'tmp', or if all remaining bits left * in 'tmp' correspond to the same bit in 'to', return false * (nothing left to migrate). * * This lets us pick a pair of nodes to migrate between, such that * if possible the dest node is not already occupied by some other * source node, minimizing the risk of overloading the memory on a * node that would happen if we migrated incoming memory to a node * before migrating outgoing memory source that same node. * * A single scan of tmp is sufficient. As we go, we remember the * most recent <s, d> pair that moved (s != d). If we find a pair * that not only moved, but what's better, moved to an empty slot * (d is not set in tmp), then we break out then, with that pair. * Otherwise when we finish scanning from_tmp, we at least have the * most recent <s, d> pair that moved. If we get all the way through * the scan of tmp without finding any node that moved, much less * moved to an empty node, then there is nothing left worth migrating. */ tmp = *from; while (!nodes_empty(tmp)) { int s,d; int source = NUMA_NO_NODE; int dest = 0; for_each_node_mask(s, tmp) { /* * do_migrate_pages() tries to maintain the relative * node relationship of the pages established between * threads and memory areas. * * However if the number of source nodes is not equal to * the number of destination nodes we can not preserve * this node relative relationship. In that case, skip * copying memory from a node that is in the destination * mask. * * Example: [2,3,4] -> [3,4,5] moves everything. * [0-7] - > [3,4,5] moves only 0,1,2,6,7. */ if ((nodes_weight(*from) != nodes_weight(*to)) && (node_isset(s, *to))) continue; d = node_remap(s, *from, *to); if (s == d) continue; source = s; /* Node moved. Memorize */ dest = d; /* dest not in remaining from nodes? */ if (!node_isset(dest, tmp)) break; } if (source == NUMA_NO_NODE) break; node_clear(source, tmp); err = migrate_to_node(mm, source, dest, flags); if (err > 0) busy += err; if (err < 0) break; } up_read(&mm->mmap_sem); if (err < 0) return err; return busy; } /* * Allocate a new page for page migration based on vma policy. * Start by assuming the page is mapped by the same vma as contains @start. * Search forward from there, if not. N.B., this assumes that the * list of pages handed to migrate_pages()--which is how we get here-- * is in virtual address order. */ static struct page *new_page(struct page *page, unsigned long start) { struct vm_area_struct *vma; unsigned long uninitialized_var(address); vma = find_vma(current->mm, start); while (vma) { address = page_address_in_vma(page, vma); if (address != -EFAULT) break; vma = vma->vm_next; } if (PageHuge(page)) { return alloc_huge_page_vma(page_hstate(compound_head(page)), vma, address); } else if (PageTransHuge(page)) { struct page *thp; thp = alloc_hugepage_vma(GFP_TRANSHUGE, vma, address, HPAGE_PMD_ORDER); if (!thp) return NULL; prep_transhuge_page(thp); return thp; } /* * if !vma, alloc_page_vma() will use task or system default policy */ return alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL, vma, address); } #else static int migrate_page_add(struct page *page, struct list_head *pagelist, unsigned long flags) { return -EIO; } int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, const nodemask_t *to, int flags) { return -ENOSYS; } static struct page *new_page(struct page *page, unsigned long start) { return NULL; } #endif static long do_mbind(unsigned long start, unsigned long len, unsigned short mode, unsigned short mode_flags, nodemask_t *nmask, unsigned long flags) { struct mm_struct *mm = current->mm; struct mempolicy *new; unsigned long end; int err; int ret; LIST_HEAD(pagelist); if (flags & ~(unsigned long)MPOL_MF_VALID) return -EINVAL; if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE)) return -EPERM; if (start & ~PAGE_MASK) return -EINVAL; if (mode == MPOL_DEFAULT) flags &= ~MPOL_MF_STRICT; len = (len + PAGE_SIZE - 1) & PAGE_MASK; end = start + len; if (end < start) return -EINVAL; if (end == start) return 0; new = mpol_new(mode, mode_flags, nmask); if (IS_ERR(new)) return PTR_ERR(new); if (flags & MPOL_MF_LAZY) new->flags |= MPOL_F_MOF; /* * If we are using the default policy then operation * on discontinuous address spaces is okay after all */ if (!new) flags |= MPOL_MF_DISCONTIG_OK; pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n", start, start + len, mode, mode_flags, nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE); if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { err = migrate_prep(); if (err) goto mpol_out; } { NODEMASK_SCRATCH(scratch); if (scratch) { down_write(&mm->mmap_sem); task_lock(current); err = mpol_set_nodemask(new, nmask, scratch); task_unlock(current); if (err) up_write(&mm->mmap_sem); } else err = -ENOMEM; NODEMASK_SCRATCH_FREE(scratch); } if (err) goto mpol_out; ret = queue_pages_range(mm, start, end, nmask, flags | MPOL_MF_INVERT, &pagelist); if (ret < 0) { err = ret; goto up_out; } err = mbind_range(mm, start, end, new); if (!err) { int nr_failed = 0; if (!list_empty(&pagelist)) { WARN_ON_ONCE(flags & MPOL_MF_LAZY); nr_failed = migrate_pages(&pagelist, new_page, NULL, start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND); if (nr_failed) putback_movable_pages(&pagelist); } if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT))) err = -EIO; } else { up_out: if (!list_empty(&pagelist)) putback_movable_pages(&pagelist); } up_write(&mm->mmap_sem); mpol_out: mpol_put(new); return err; } /* * User space interface with variable sized bitmaps for nodelists. */ /* Copy a node mask from user space. */ static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask, unsigned long maxnode) { unsigned long k; unsigned long t; unsigned long nlongs; unsigned long endmask; --maxnode; nodes_clear(*nodes); if (maxnode == 0 || !nmask) return 0; if (maxnode > PAGE_SIZE*BITS_PER_BYTE) return -EINVAL; nlongs = BITS_TO_LONGS(maxnode); if ((maxnode % BITS_PER_LONG) == 0) endmask = ~0UL; else endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1; /* * When the user specified more nodes than supported just check * if the non supported part is all zero. * * If maxnode have more longs than MAX_NUMNODES, check * the bits in that area first. And then go through to * check the rest bits which equal or bigger than MAX_NUMNODES. * Otherwise, just check bits [MAX_NUMNODES, maxnode). */ if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) { for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) { if (get_user(t, nmask + k)) return -EFAULT; if (k == nlongs - 1) { if (t & endmask) return -EINVAL; } else if (t) return -EINVAL; } nlongs = BITS_TO_LONGS(MAX_NUMNODES); endmask = ~0UL; } if (maxnode > MAX_NUMNODES && MAX_NUMNODES % BITS_PER_LONG != 0) { unsigned long valid_mask = endmask; valid_mask &= ~((1UL << (MAX_NUMNODES % BITS_PER_LONG)) - 1); if (get_user(t, nmask + nlongs - 1)) return -EFAULT; if (t & valid_mask) return -EINVAL; } if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long))) return -EFAULT; nodes_addr(*nodes)[nlongs-1] &= endmask; return 0; } /* Copy a kernel node mask to user space */ static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode, nodemask_t *nodes) { unsigned long copy = ALIGN(maxnode-1, 64) / 8; unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long); if (copy > nbytes) { if (copy > PAGE_SIZE) return -EINVAL; if (clear_user((char __user *)mask + nbytes, copy - nbytes)) return -EFAULT; copy = nbytes; } return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0; } static long kernel_mbind(unsigned long start, unsigned long len, unsigned long mode, const unsigned long __user *nmask, unsigned long maxnode, unsigned int flags) { nodemask_t nodes; int err; unsigned short mode_flags; start = untagged_addr(start); mode_flags = mode & MPOL_MODE_FLAGS; mode &= ~MPOL_MODE_FLAGS; if (mode >= MPOL_MAX) return -EINVAL; if ((mode_flags & MPOL_F_STATIC_NODES) && (mode_flags & MPOL_F_RELATIVE_NODES)) return -EINVAL; err = get_nodes(&nodes, nmask, maxnode); if (err) return err; return do_mbind(start, len, mode, mode_flags, &nodes, flags); } SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len, unsigned long, mode, const unsigned long __user *, nmask, unsigned long, maxnode, unsigned int, flags) { return kernel_mbind(start, len, mode, nmask, maxnode, flags); } /* Set the process memory policy */ static long kernel_set_mempolicy(int mode, const unsigned long __user *nmask, unsigned long maxnode) { int err; nodemask_t nodes; unsigned short flags; flags = mode & MPOL_MODE_FLAGS; mode &= ~MPOL_MODE_FLAGS; if ((unsigned int)mode >= MPOL_MAX) return -EINVAL; if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES)) return -EINVAL; err = get_nodes(&nodes, nmask, maxnode); if (err) return err; return do_set_mempolicy(mode, flags, &nodes); } SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask, unsigned long, maxnode) { return kernel_set_mempolicy(mode, nmask, maxnode); } static int kernel_migrate_pages(pid_t pid, unsigned long maxnode, const unsigned long __user *old_nodes, const unsigned long __user *new_nodes) { struct mm_struct *mm = NULL; struct task_struct *task; nodemask_t task_nodes; int err; nodemask_t *old; nodemask_t *new; NODEMASK_SCRATCH(scratch); if (!scratch) return -ENOMEM; old = &scratch->mask1; new = &scratch->mask2; err = get_nodes(old, old_nodes, maxnode); if (err) goto out; err = get_nodes(new, new_nodes, maxnode); if (err) goto out; /* Find the mm_struct */ rcu_read_lock(); task = pid ? find_task_by_vpid(pid) : current; if (!task) { rcu_read_unlock(); err = -ESRCH; goto out; } get_task_struct(task); err = -EINVAL; /* * Check if this process has the right to modify the specified process. * Use the regular "ptrace_may_access()" checks. */ if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) { rcu_read_unlock(); err = -EPERM; goto out_put; } rcu_read_unlock(); task_nodes = cpuset_mems_allowed(task); /* Is the user allowed to access the target nodes? */ if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) { err = -EPERM; goto out_put; } task_nodes = cpuset_mems_allowed(current); nodes_and(*new, *new, task_nodes); if (nodes_empty(*new)) goto out_put; err = security_task_movememory(task); if (err) goto out_put; mm = get_task_mm(task); put_task_struct(task); if (!mm) { err = -EINVAL; goto out; } err = do_migrate_pages(mm, old, new, capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE); mmput(mm); out: NODEMASK_SCRATCH_FREE(scratch); return err; out_put: put_task_struct(task); goto out; } SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode, const unsigned long __user *, old_nodes, const unsigned long __user *, new_nodes) { return kernel_migrate_pages(pid, maxnode, old_nodes, new_nodes); } /* Retrieve NUMA policy */ static int kernel_get_mempolicy(int __user *policy, unsigned long __user *nmask, unsigned long maxnode, unsigned long addr, unsigned long flags) { int err; int uninitialized_var(pval); nodemask_t nodes; addr = untagged_addr(addr); if (nmask != NULL && maxnode < nr_node_ids) return -EINVAL; err = do_get_mempolicy(&pval, &nodes, addr, flags); if (err) return err; if (policy && put_user(pval, policy)) return -EFAULT; if (nmask) err = copy_nodes_to_user(nmask, maxnode, &nodes); return err; } SYSCALL_DEFINE5(get_mempolicy, int __user *, policy, unsigned long __user *, nmask, unsigned long, maxnode, unsigned long, addr, unsigned long, flags) { return kernel_get_mempolicy(policy, nmask, maxnode, addr, flags); } #ifdef CONFIG_COMPAT COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy, compat_ulong_t __user *, nmask, compat_ulong_t, maxnode, compat_ulong_t, addr, compat_ulong_t, flags) { long err; unsigned long __user *nm = NULL; unsigned long nr_bits, alloc_size; DECLARE_BITMAP(bm, MAX_NUMNODES); nr_bits = min_t(unsigned long, maxnode-1, nr_node_ids); alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; if (nmask) nm = compat_alloc_user_space(alloc_size); err = kernel_get_mempolicy(policy, nm, nr_bits+1, addr, flags); if (!err && nmask) { unsigned long copy_size; copy_size = min_t(unsigned long, sizeof(bm), alloc_size); err = copy_from_user(bm, nm, copy_size); /* ensure entire bitmap is zeroed */ err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8); err |= compat_put_bitmap(nmask, bm, nr_bits); } return err; } COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask, compat_ulong_t, maxnode) { unsigned long __user *nm = NULL; unsigned long nr_bits, alloc_size; DECLARE_BITMAP(bm, MAX_NUMNODES); nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; if (nmask) { if (compat_get_bitmap(bm, nmask, nr_bits)) return -EFAULT; nm = compat_alloc_user_space(alloc_size); if (copy_to_user(nm, bm, alloc_size)) return -EFAULT; } return kernel_set_mempolicy(mode, nm, nr_bits+1); } COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len, compat_ulong_t, mode, compat_ulong_t __user *, nmask, compat_ulong_t, maxnode, compat_ulong_t, flags) { unsigned long __user *nm = NULL; unsigned long nr_bits, alloc_size; nodemask_t bm; nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; if (nmask) { if (compat_get_bitmap(nodes_addr(bm), nmask, nr_bits)) return -EFAULT; nm = compat_alloc_user_space(alloc_size); if (copy_to_user(nm, nodes_addr(bm), alloc_size)) return -EFAULT; } return kernel_mbind(start, len, mode, nm, nr_bits+1, flags); } COMPAT_SYSCALL_DEFINE4(migrate_pages, compat_pid_t, pid, compat_ulong_t, maxnode, const compat_ulong_t __user *, old_nodes, const compat_ulong_t __user *, new_nodes) { unsigned long __user *old = NULL; unsigned long __user *new = NULL; nodemask_t tmp_mask; unsigned long nr_bits; unsigned long size; nr_bits = min_t(unsigned long, maxnode - 1, MAX_NUMNODES); size = ALIGN(nr_bits, BITS_PER_LONG) / 8; if (old_nodes) { if (compat_get_bitmap(nodes_addr(tmp_mask), old_nodes, nr_bits)) return -EFAULT; old = compat_alloc_user_space(new_nodes ? size * 2 : size); if (new_nodes) new = old + size / sizeof(unsigned long); if (copy_to_user(old, nodes_addr(tmp_mask), size)) return -EFAULT; } if (new_nodes) { if (compat_get_bitmap(nodes_addr(tmp_mask), new_nodes, nr_bits)) return -EFAULT; if (new == NULL) new = compat_alloc_user_space(size); if (copy_to_user(new, nodes_addr(tmp_mask), size)) return -EFAULT; } return kernel_migrate_pages(pid, nr_bits + 1, old, new); } #endif /* CONFIG_COMPAT */ bool vma_migratable(struct vm_area_struct *vma) { if (vma->vm_flags & (VM_IO | VM_PFNMAP)) return false; /* * DAX device mappings require predictable access latency, so avoid * incurring periodic faults. */ if (vma_is_dax(vma)) return false; if (is_vm_hugetlb_page(vma) && !hugepage_migration_supported(hstate_vma(vma))) return false; /* * Migration allocates pages in the highest zone. If we cannot * do so then migration (at least from node to node) is not * possible. */ if (vma->vm_file && gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping)) < policy_zone) return false; return true; } struct mempolicy *__get_vma_policy(struct vm_area_struct *vma, unsigned long addr) { struct mempolicy *pol = NULL; if (vma) { if (vma->vm_ops && vma->vm_ops->get_policy) { pol = vma->vm_ops->get_policy(vma, addr); } else if (vma->vm_policy) { pol = vma->vm_policy; /* * shmem_alloc_page() passes MPOL_F_SHARED policy with * a pseudo vma whose vma->vm_ops=NULL. Take a reference * count on these policies which will be dropped by * mpol_cond_put() later */ if (mpol_needs_cond_ref(pol)) mpol_get(pol); } } return pol; } /* * get_vma_policy(@vma, @addr) * @vma: virtual memory area whose policy is sought * @addr: address in @vma for shared policy lookup * * Returns effective policy for a VMA at specified address. * Falls back to current->mempolicy or system default policy, as necessary. * Shared policies [those marked as MPOL_F_SHARED] require an extra reference * count--added by the get_policy() vm_op, as appropriate--to protect against * freeing by another task. It is the caller's responsibility to free the * extra reference for shared policies. */ static struct mempolicy *get_vma_policy(struct vm_area_struct *vma, unsigned long addr) { struct mempolicy *pol = __get_vma_policy(vma, addr); if (!pol) pol = get_task_policy(current); return pol; } bool vma_policy_mof(struct vm_area_struct *vma) { struct mempolicy *pol; if (vma->vm_ops && vma->vm_ops->get_policy) { bool ret = false; pol = vma->vm_ops->get_policy(vma, vma->vm_start); if (pol && (pol->flags & MPOL_F_MOF)) ret = true; mpol_cond_put(pol); return ret; } pol = vma->vm_policy; if (!pol) pol = get_task_policy(current); return pol->flags & MPOL_F_MOF; } static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone) { enum zone_type dynamic_policy_zone = policy_zone; BUG_ON(dynamic_policy_zone == ZONE_MOVABLE); /* * if policy->v.nodes has movable memory only, * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only. * * policy->v.nodes is intersect with node_states[N_MEMORY]. * so if the following test faile, it implies * policy->v.nodes has movable memory only. */ if (!nodes_intersects(policy->v.nodes, node_states[N_HIGH_MEMORY])) dynamic_policy_zone = ZONE_MOVABLE; return zone >= dynamic_policy_zone; } /* * Return a nodemask representing a mempolicy for filtering nodes for * page allocation */ static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy) { /* Lower zones don't get a nodemask applied for MPOL_BIND */ if (unlikely(policy->mode == MPOL_BIND) && apply_policy_zone(policy, gfp_zone(gfp)) && cpuset_nodemask_valid_mems_allowed(&policy->v.nodes)) return &policy->v.nodes; return NULL; } /* Return the node id preferred by the given mempolicy, or the given id */ static int policy_node(gfp_t gfp, struct mempolicy *policy, int nd) { if (policy->mode == MPOL_PREFERRED && !(policy->flags & MPOL_F_LOCAL)) nd = policy->v.preferred_node; else { /* * __GFP_THISNODE shouldn't even be used with the bind policy * because we might easily break the expectation to stay on the * requested node and not break the policy. */ WARN_ON_ONCE(policy->mode == MPOL_BIND && (gfp & __GFP_THISNODE)); } return nd; } /* Do dynamic interleaving for a process */ static unsigned interleave_nodes(struct mempolicy *policy) { unsigned next; struct task_struct *me = current; next = next_node_in(me->il_prev, policy->v.nodes); if (next < MAX_NUMNODES) me->il_prev = next; return next; } /* * Depending on the memory policy provide a node from which to allocate the * next slab entry. */ unsigned int mempolicy_slab_node(void) { struct mempolicy *policy; int node = numa_mem_id(); if (in_interrupt()) return node; policy = current->mempolicy; if (!policy || policy->flags & MPOL_F_LOCAL) return node; switch (policy->mode) { case MPOL_PREFERRED: /* * handled MPOL_F_LOCAL above */ return policy->v.preferred_node; case MPOL_INTERLEAVE: return interleave_nodes(policy); case MPOL_BIND: { struct zoneref *z; /* * Follow bind policy behavior and start allocation at the * first node. */ struct zonelist *zonelist; enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL); zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK]; z = first_zones_zonelist(zonelist, highest_zoneidx, &policy->v.nodes); return z->zone ? zone_to_nid(z->zone) : node; } default: BUG(); } } /* * Do static interleaving for a VMA with known offset @n. Returns the n'th * node in pol->v.nodes (starting from n=0), wrapping around if n exceeds the * number of present nodes. */ static unsigned offset_il_node(struct mempolicy *pol, unsigned long n) { unsigned nnodes = nodes_weight(pol->v.nodes); unsigned target; int i; int nid; if (!nnodes) return numa_node_id(); target = (unsigned int)n % nnodes; nid = first_node(pol->v.nodes); for (i = 0; i < target; i++) nid = next_node(nid, pol->v.nodes); return nid; } /* Determine a node number for interleave */ static inline unsigned interleave_nid(struct mempolicy *pol, struct vm_area_struct *vma, unsigned long addr, int shift) { if (vma) { unsigned long off; /* * for small pages, there is no difference between * shift and PAGE_SHIFT, so the bit-shift is safe. * for huge pages, since vm_pgoff is in units of small * pages, we need to shift off the always 0 bits to get * a useful offset. */ BUG_ON(shift < PAGE_SHIFT); off = vma->vm_pgoff >> (shift - PAGE_SHIFT); off += (addr - vma->vm_start) >> shift; return offset_il_node(pol, off); } else return interleave_nodes(pol); } #ifdef CONFIG_HUGETLBFS /* * huge_node(@vma, @addr, @gfp_flags, @mpol) * @vma: virtual memory area whose policy is sought * @addr: address in @vma for shared policy lookup and interleave policy * @gfp_flags: for requested zone * @mpol: pointer to mempolicy pointer for reference counted mempolicy * @nodemask: pointer to nodemask pointer for MPOL_BIND nodemask * * Returns a nid suitable for a huge page allocation and a pointer * to the struct mempolicy for conditional unref after allocation. * If the effective policy is 'BIND, returns a pointer to the mempolicy's * @nodemask for filtering the zonelist. * * Must be protected by read_mems_allowed_begin() */ int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags, struct mempolicy **mpol, nodemask_t **nodemask) { int nid; *mpol = get_vma_policy(vma, addr); *nodemask = NULL; /* assume !MPOL_BIND */ if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) { nid = interleave_nid(*mpol, vma, addr, huge_page_shift(hstate_vma(vma))); } else { nid = policy_node(gfp_flags, *mpol, numa_node_id()); if ((*mpol)->mode == MPOL_BIND) *nodemask = &(*mpol)->v.nodes; } return nid; } /* * init_nodemask_of_mempolicy * * If the current task's mempolicy is "default" [NULL], return 'false' * to indicate default policy. Otherwise, extract the policy nodemask * for 'bind' or 'interleave' policy into the argument nodemask, or * initialize the argument nodemask to contain the single node for * 'preferred' or 'local' policy and return 'true' to indicate presence * of non-default mempolicy. * * We don't bother with reference counting the mempolicy [mpol_get/put] * because the current task is examining it's own mempolicy and a task's * mempolicy is only ever changed by the task itself. * * N.B., it is the caller's responsibility to free a returned nodemask. */ bool init_nodemask_of_mempolicy(nodemask_t *mask) { struct mempolicy *mempolicy; int nid; if (!(mask && current->mempolicy)) return false; task_lock(current); mempolicy = current->mempolicy; switch (mempolicy->mode) { case MPOL_PREFERRED: if (mempolicy->flags & MPOL_F_LOCAL) nid = numa_node_id(); else nid = mempolicy->v.preferred_node; init_nodemask_of_node(mask, nid); break; case MPOL_BIND: /* Fall through */ case MPOL_INTERLEAVE: *mask = mempolicy->v.nodes; break; default: BUG(); } task_unlock(current); return true; } #endif /* * mempolicy_nodemask_intersects * * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default * policy. Otherwise, check for intersection between mask and the policy * nodemask for 'bind' or 'interleave' policy. For 'perferred' or 'local' * policy, always return true since it may allocate elsewhere on fallback. * * Takes task_lock(tsk) to prevent freeing of its mempolicy. */ bool mempolicy_nodemask_intersects(struct task_struct *tsk, const nodemask_t *mask) { struct mempolicy *mempolicy; bool ret = true; if (!mask) return ret; task_lock(tsk); mempolicy = tsk->mempolicy; if (!mempolicy) goto out; switch (mempolicy->mode) { case MPOL_PREFERRED: /* * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to * allocate from, they may fallback to other nodes when oom. * Thus, it's possible for tsk to have allocated memory from * nodes in mask. */ break; case MPOL_BIND: case MPOL_INTERLEAVE: ret = nodes_intersects(mempolicy->v.nodes, *mask); break; default: BUG(); } out: task_unlock(tsk); return ret; } /* Allocate a page in interleaved policy. Own path because it needs to do special accounting. */ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, unsigned nid) { struct page *page; page = __alloc_pages(gfp, order, nid); /* skip NUMA_INTERLEAVE_HIT counter update if numa stats is disabled */ if (!static_branch_likely(&vm_numa_stat_key)) return page; if (page && page_to_nid(page) == nid) { preempt_disable(); __inc_numa_state(page_zone(page), NUMA_INTERLEAVE_HIT); preempt_enable(); } return page; } /** * alloc_pages_vma - Allocate a page for a VMA. * * @gfp: * %GFP_USER user allocation. * %GFP_KERNEL kernel allocations, * %GFP_HIGHMEM highmem/user allocations, * %GFP_FS allocation should not call back into a file system. * %GFP_ATOMIC don't sleep. * * @order:Order of the GFP allocation. * @vma: Pointer to VMA or NULL if not available. * @addr: Virtual Address of the allocation. Must be inside the VMA. * @node: Which node to prefer for allocation (modulo policy). * @hugepage: for hugepages try only the preferred node if possible * * This function allocates a page from the kernel page pool and applies * a NUMA policy associated with the VMA or the current process. * When VMA is not NULL caller must hold down_read on the mmap_sem of the * mm_struct of the VMA to prevent it from going away. Should be used for * all allocations for pages that will be mapped into user space. Returns * NULL when no page can be allocated. */ struct page * alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, unsigned long addr, int node, bool hugepage) { struct mempolicy *pol; struct page *page; int preferred_nid; nodemask_t *nmask; pol = get_vma_policy(vma, addr); if (pol->mode == MPOL_INTERLEAVE) { unsigned nid; nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order); mpol_cond_put(pol); page = alloc_page_interleave(gfp, order, nid); goto out; } if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) { int hpage_node = node; /* * For hugepage allocation and non-interleave policy which * allows the current node (or other explicitly preferred * node) we only try to allocate from the current/preferred * node and don't fall back to other nodes, as the cost of * remote accesses would likely offset THP benefits. * * If the policy is interleave, or does not allow the current * node in its nodemask, we allocate the standard way. */ if (pol->mode == MPOL_PREFERRED && !(pol->flags & MPOL_F_LOCAL)) hpage_node = pol->v.preferred_node; nmask = policy_nodemask(gfp, pol); if (!nmask || node_isset(hpage_node, *nmask)) { mpol_cond_put(pol); /* * First, try to allocate THP only on local node, but * don't reclaim unnecessarily, just compact. */ page = __alloc_pages_node(hpage_node, gfp | __GFP_THISNODE | __GFP_NORETRY, order); /* * If hugepage allocations are configured to always * synchronous compact or the vma has been madvised * to prefer hugepage backing, retry allowing remote * memory with both reclaim and compact as well. */ if (!page && (gfp & __GFP_DIRECT_RECLAIM)) page = __alloc_pages_node(hpage_node, gfp, order); goto out; } } nmask = policy_nodemask(gfp, pol); preferred_nid = policy_node(gfp, pol, node); page = __alloc_pages_nodemask(gfp, order, preferred_nid, nmask); mpol_cond_put(pol); out: return page; } EXPORT_SYMBOL(alloc_pages_vma); /** * alloc_pages_current - Allocate pages. * * @gfp: * %GFP_USER user allocation, * %GFP_KERNEL kernel allocation, * %GFP_HIGHMEM highmem allocation, * %GFP_FS don't call back into a file system. * %GFP_ATOMIC don't sleep. * @order: Power of two of allocation size in pages. 0 is a single page. * * Allocate a page from the kernel page pool. When not in * interrupt context and apply the current process NUMA policy. * Returns NULL when no page can be allocated. */ struct page *alloc_pages_current(gfp_t gfp, unsigned order) { struct mempolicy *pol = &default_policy; struct page *page; if (!in_interrupt() && !(gfp & __GFP_THISNODE)) pol = get_task_policy(current); /* * No reference counting needed for current->mempolicy * nor system default_policy */ if (pol->mode == MPOL_INTERLEAVE) page = alloc_page_interleave(gfp, order, interleave_nodes(pol)); else page = __alloc_pages_nodemask(gfp, order, policy_node(gfp, pol, numa_node_id()), policy_nodemask(gfp, pol)); return page; } EXPORT_SYMBOL(alloc_pages_current); int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst) { struct mempolicy *pol = mpol_dup(vma_policy(src)); if (IS_ERR(pol)) return PTR_ERR(pol); dst->vm_policy = pol; return 0; } /* * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it * rebinds the mempolicy its copying by calling mpol_rebind_policy() * with the mems_allowed returned by cpuset_mems_allowed(). This * keeps mempolicies cpuset relative after its cpuset moves. See * further kernel/cpuset.c update_nodemask(). * * current's mempolicy may be rebinded by the other task(the task that changes * cpuset's mems), so we needn't do rebind work for current task. */ /* Slow path of a mempolicy duplicate */ struct mempolicy *__mpol_dup(struct mempolicy *old) { struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL); if (!new) return ERR_PTR(-ENOMEM); /* task's mempolicy is protected by alloc_lock */ if (old == current->mempolicy) { task_lock(current); *new = *old; task_unlock(current); } else *new = *old; if (current_cpuset_is_being_rebound()) { nodemask_t mems = cpuset_mems_allowed(current); mpol_rebind_policy(new, &mems); } atomic_set(&new->refcnt, 1); return new; } /* Slow path of a mempolicy comparison */ bool __mpol_equal(struct mempolicy *a, struct mempolicy *b) { if (!a || !b) return false; if (a->mode != b->mode) return false; if (a->flags != b->flags) return false; if (mpol_store_user_nodemask(a)) if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask)) return false; switch (a->mode) { case MPOL_BIND: /* Fall through */ case MPOL_INTERLEAVE: return !!nodes_equal(a->v.nodes, b->v.nodes); case MPOL_PREFERRED: /* a's ->flags is the same as b's */ if (a->flags & MPOL_F_LOCAL) return true; return a->v.preferred_node == b->v.preferred_node; default: BUG(); return false; } } /* * Shared memory backing store policy support. * * Remember policies even when nobody has shared memory mapped. * The policies are kept in Red-Black tree linked from the inode. * They are protected by the sp->lock rwlock, which should be held * for any accesses to the tree. */ /* * lookup first element intersecting start-end. Caller holds sp->lock for * reading or for writing */ static struct sp_node * sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end) { struct rb_node *n = sp->root.rb_node; while (n) { struct sp_node *p = rb_entry(n, struct sp_node, nd); if (start >= p->end) n = n->rb_right; else if (end <= p->start) n = n->rb_left; else break; } if (!n) return NULL; for (;;) { struct sp_node *w = NULL; struct rb_node *prev = rb_prev(n); if (!prev) break; w = rb_entry(prev, struct sp_node, nd); if (w->end <= start) break; n = prev; } return rb_entry(n, struct sp_node, nd); } /* * Insert a new shared policy into the list. Caller holds sp->lock for * writing. */ static void sp_insert(struct shared_policy *sp, struct sp_node *new) { struct rb_node **p = &sp->root.rb_node; struct rb_node *parent = NULL; struct sp_node *nd; while (*p) { parent = *p; nd = rb_entry(parent, struct sp_node, nd); if (new->start < nd->start) p = &(*p)->rb_left; else if (new->end > nd->end) p = &(*p)->rb_right; else BUG(); } rb_link_node(&new->nd, parent, p); rb_insert_color(&new->nd, &sp->root); pr_debug("inserting %lx-%lx: %d\n", new->start, new->end, new->policy ? new->policy->mode : 0); } /* Find shared policy intersecting idx */ struct mempolicy * mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx) { struct mempolicy *pol = NULL; struct sp_node *sn; if (!sp->root.rb_node) return NULL; read_lock(&sp->lock); sn = sp_lookup(sp, idx, idx+1); if (sn) { mpol_get(sn->policy); pol = sn->policy; } read_unlock(&sp->lock); return pol; } static void sp_free(struct sp_node *n) { mpol_put(n->policy); kmem_cache_free(sn_cache, n); } /** * mpol_misplaced - check whether current page node is valid in policy * * @page: page to be checked * @vma: vm area where page mapped * @addr: virtual address where page mapped * * Lookup current policy node id for vma,addr and "compare to" page's * node id. * * Returns: * -1 - not misplaced, page is in the right node * node - node id where the page should be * * Policy determination "mimics" alloc_page_vma(). * Called from fault path where we know the vma and faulting address. */ int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr) { struct mempolicy *pol; struct zoneref *z; int curnid = page_to_nid(page); unsigned long pgoff; int thiscpu = raw_smp_processor_id(); int thisnid = cpu_to_node(thiscpu); int polnid = NUMA_NO_NODE; int ret = -1; pol = get_vma_policy(vma, addr); if (!(pol->flags & MPOL_F_MOF)) goto out; switch (pol->mode) { case MPOL_INTERLEAVE: pgoff = vma->vm_pgoff; pgoff += (addr - vma->vm_start) >> PAGE_SHIFT; polnid = offset_il_node(pol, pgoff); break; case MPOL_PREFERRED: if (pol->flags & MPOL_F_LOCAL) polnid = numa_node_id(); else polnid = pol->v.preferred_node; break; case MPOL_BIND: /* * allows binding to multiple nodes. * use current page if in policy nodemask, * else select nearest allowed node, if any. * If no allowed nodes, use current [!misplaced]. */ if (node_isset(curnid, pol->v.nodes)) goto out; z = first_zones_zonelist( node_zonelist(numa_node_id(), GFP_HIGHUSER), gfp_zone(GFP_HIGHUSER), &pol->v.nodes); polnid = zone_to_nid(z->zone); break; default: BUG(); } /* Migrate the page towards the node whose CPU is referencing it */ if (pol->flags & MPOL_F_MORON) { polnid = thisnid; if (!should_numa_migrate_memory(current, page, curnid, thiscpu)) goto out; } if (curnid != polnid) ret = polnid; out: mpol_cond_put(pol); return ret; } /* * Drop the (possibly final) reference to task->mempolicy. It needs to be * dropped after task->mempolicy is set to NULL so that any allocation done as * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed * policy. */ void mpol_put_task_policy(struct task_struct *task) { struct mempolicy *pol; task_lock(task); pol = task->mempolicy; task->mempolicy = NULL; task_unlock(task); mpol_put(pol); } static void sp_delete(struct shared_policy *sp, struct sp_node *n) { pr_debug("deleting %lx-l%lx\n", n->start, n->end); rb_erase(&n->nd, &sp->root); sp_free(n); } static void sp_node_init(struct sp_node *node, unsigned long start, unsigned long end, struct mempolicy *pol) { node->start = start; node->end = end; node->policy = pol; } static struct sp_node *sp_alloc(unsigned long start, unsigned long end, struct mempolicy *pol) { struct sp_node *n; struct mempolicy *newpol; n = kmem_cache_alloc(sn_cache, GFP_KERNEL); if (!n) return NULL; newpol = mpol_dup(pol); if (IS_ERR(newpol)) { kmem_cache_free(sn_cache, n); return NULL; } newpol->flags |= MPOL_F_SHARED; sp_node_init(n, start, end, newpol); return n; } /* Replace a policy range. */ static int shared_policy_replace(struct shared_policy *sp, unsigned long start, unsigned long end, struct sp_node *new) { struct sp_node *n; struct sp_node *n_new = NULL; struct mempolicy *mpol_new = NULL; int ret = 0; restart: write_lock(&sp->lock); n = sp_lookup(sp, start, end); /* Take care of old policies in the same range. */ while (n && n->start < end) { struct rb_node *next = rb_next(&n->nd); if (n->start >= start) { if (n->end <= end) sp_delete(sp, n); else n->start = end; } else { /* Old policy spanning whole new range. */ if (n->end > end) { if (!n_new) goto alloc_new; *mpol_new = *n->policy; atomic_set(&mpol_new->refcnt, 1); sp_node_init(n_new, end, n->end, mpol_new); n->end = start; sp_insert(sp, n_new); n_new = NULL; mpol_new = NULL; break; } else n->end = start; } if (!next) break; n = rb_entry(next, struct sp_node, nd); } if (new) sp_insert(sp, new); write_unlock(&sp->lock); ret = 0; err_out: if (mpol_new) mpol_put(mpol_new); if (n_new) kmem_cache_free(sn_cache, n_new); return ret; alloc_new: write_unlock(&sp->lock); ret = -ENOMEM; n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL); if (!n_new) goto err_out; mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL); if (!mpol_new) goto err_out; goto restart; } /** * mpol_shared_policy_init - initialize shared policy for inode * @sp: pointer to inode shared policy * @mpol: struct mempolicy to install * * Install non-NULL @mpol in inode's shared policy rb-tree. * On entry, the current task has a reference on a non-NULL @mpol. * This must be released on exit. * This is called at get_inode() calls and we can use GFP_KERNEL. */ void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol) { int ret; sp->root = RB_ROOT; /* empty tree == default mempolicy */ rwlock_init(&sp->lock); if (mpol) { struct vm_area_struct pvma; struct mempolicy *new; NODEMASK_SCRATCH(scratch); if (!scratch) goto put_mpol; /* contextualize the tmpfs mount point mempolicy */ new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask); if (IS_ERR(new)) goto free_scratch; /* no valid nodemask intersection */ task_lock(current); ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch); task_unlock(current); if (ret) goto put_new; /* Create pseudo-vma that contains just the policy */ vma_init(&pvma, NULL); pvma.vm_end = TASK_SIZE; /* policy covers entire file */ mpol_set_shared_policy(sp, &pvma, new); /* adds ref */ put_new: mpol_put(new); /* drop initial ref */ free_scratch: NODEMASK_SCRATCH_FREE(scratch); put_mpol: mpol_put(mpol); /* drop our incoming ref on sb mpol */ } } int mpol_set_shared_policy(struct shared_policy *info, struct vm_area_struct *vma, struct mempolicy *npol) { int err; struct sp_node *new = NULL; unsigned long sz = vma_pages(vma); pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n", vma->vm_pgoff, sz, npol ? npol->mode : -1, npol ? npol->flags : -1, npol ? nodes_addr(npol->v.nodes)[0] : NUMA_NO_NODE); if (npol) { new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol); if (!new) return -ENOMEM; } err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new); if (err && new) sp_free(new); return err; } /* Free a backing policy store on inode delete. */ void mpol_free_shared_policy(struct shared_policy *p) { struct sp_node *n; struct rb_node *next; if (!p->root.rb_node) return; write_lock(&p->lock); next = rb_first(&p->root); while (next) { n = rb_entry(next, struct sp_node, nd); next = rb_next(&n->nd); sp_delete(p, n); } write_unlock(&p->lock); } #ifdef CONFIG_NUMA_BALANCING static int __initdata numabalancing_override; static void __init check_numabalancing_enable(void) { bool numabalancing_default = false; if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED)) numabalancing_default = true; /* Parsed by setup_numabalancing. override == 1 enables, -1 disables */ if (numabalancing_override) set_numabalancing_state(numabalancing_override == 1); if (num_online_nodes() > 1 && !numabalancing_override) { pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n", numabalancing_default ? "Enabling" : "Disabling"); set_numabalancing_state(numabalancing_default); } } static int __init setup_numabalancing(char *str) { int ret = 0; if (!str) goto out; if (!strcmp(str, "enable")) { numabalancing_override = 1; ret = 1; } else if (!strcmp(str, "disable")) { numabalancing_override = -1; ret = 1; } out: if (!ret) pr_warn("Unable to parse numa_balancing=\n"); return ret; } __setup("numa_balancing=", setup_numabalancing); #else static inline void __init check_numabalancing_enable(void) { } #endif /* CONFIG_NUMA_BALANCING */ /* assumes fs == KERNEL_DS */ void __init numa_policy_init(void) { nodemask_t interleave_nodes; unsigned long largest = 0; int nid, prefer = 0; policy_cache = kmem_cache_create("numa_policy", sizeof(struct mempolicy), 0, SLAB_PANIC, NULL); sn_cache = kmem_cache_create("shared_policy_node", sizeof(struct sp_node), 0, SLAB_PANIC, NULL); for_each_node(nid) { preferred_node_policy[nid] = (struct mempolicy) { .refcnt = ATOMIC_INIT(1), .mode = MPOL_PREFERRED, .flags = MPOL_F_MOF | MPOL_F_MORON, .v = { .preferred_node = nid, }, }; } /* * Set interleaving policy for system init. Interleaving is only * enabled across suitably sized nodes (default is >= 16MB), or * fall back to the largest node if they're all smaller. */ nodes_clear(interleave_nodes); for_each_node_state(nid, N_MEMORY) { unsigned long total_pages = node_present_pages(nid); /* Preserve the largest node */ if (largest < total_pages) { largest = total_pages; prefer = nid; } /* Interleave this node? */ if ((total_pages << PAGE_SHIFT) >= (16 << 20)) node_set(nid, interleave_nodes); } /* All too small, use the largest */ if (unlikely(nodes_empty(interleave_nodes))) node_set(prefer, interleave_nodes); if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes)) pr_err("%s: interleaving failed\n", __func__); check_numabalancing_enable(); } /* Reset policy of current process to default */ void numa_default_policy(void) { do_set_mempolicy(MPOL_DEFAULT, 0, NULL); } /* * Parse and format mempolicy from/to strings */ /* * "local" is implemented internally by MPOL_PREFERRED with MPOL_F_LOCAL flag. */ static const char * const policy_modes[] = { [MPOL_DEFAULT] = "default", [MPOL_PREFERRED] = "prefer", [MPOL_BIND] = "bind", [MPOL_INTERLEAVE] = "interleave", [MPOL_LOCAL] = "local", }; #ifdef CONFIG_TMPFS /** * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option. * @str: string containing mempolicy to parse * @mpol: pointer to struct mempolicy pointer, returned on success. * * Format of input: * <mode>[=<flags>][:<nodelist>] * * On success, returns 0, else 1 */ int mpol_parse_str(char *str, struct mempolicy **mpol) { struct mempolicy *new = NULL; unsigned short mode_flags; nodemask_t nodes; char *nodelist = strchr(str, ':'); char *flags = strchr(str, '='); int err = 1, mode; if (flags) *flags++ = '\0'; /* terminate mode string */ if (nodelist) { /* NUL-terminate mode or flags string */ *nodelist++ = '\0'; if (nodelist_parse(nodelist, nodes)) goto out; if (!nodes_subset(nodes, node_states[N_MEMORY])) goto out; } else nodes_clear(nodes); mode = match_string(policy_modes, MPOL_MAX, str); if (mode < 0) goto out; switch (mode) { case MPOL_PREFERRED: /* * Insist on a nodelist of one node only, although later * we use first_node(nodes) to grab a single node, so here * nodelist (or nodes) cannot be empty. */ if (nodelist) { char *rest = nodelist; while (isdigit(*rest)) rest++; if (*rest) goto out; if (nodes_empty(nodes)) goto out; } break; case MPOL_INTERLEAVE: /* * Default to online nodes with memory if no nodelist */ if (!nodelist) nodes = node_states[N_MEMORY]; break; case MPOL_LOCAL: /* * Don't allow a nodelist; mpol_new() checks flags */ if (nodelist) goto out; mode = MPOL_PREFERRED; break; case MPOL_DEFAULT: /* * Insist on a empty nodelist */ if (!nodelist) err = 0; goto out; case MPOL_BIND: /* * Insist on a nodelist */ if (!nodelist) goto out; } mode_flags = 0; if (flags) { /* * Currently, we only support two mutually exclusive * mode flags. */ if (!strcmp(flags, "static")) mode_flags |= MPOL_F_STATIC_NODES; else if (!strcmp(flags, "relative")) mode_flags |= MPOL_F_RELATIVE_NODES; else goto out; } new = mpol_new(mode, mode_flags, &nodes); if (IS_ERR(new)) goto out; /* * Save nodes for mpol_to_str() to show the tmpfs mount options * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo. */ if (mode != MPOL_PREFERRED) new->v.nodes = nodes; else if (nodelist) new->v.preferred_node = first_node(nodes); else new->flags |= MPOL_F_LOCAL; /* * Save nodes for contextualization: this will be used to "clone" * the mempolicy in a specific context [cpuset] at a later time. */ new->w.user_nodemask = nodes; err = 0; out: /* Restore string for error message */ if (nodelist) *--nodelist = ':'; if (flags) *--flags = '='; if (!err) *mpol = new; return err; } #endif /* CONFIG_TMPFS */ /** * mpol_to_str - format a mempolicy structure for printing * @buffer: to contain formatted mempolicy string * @maxlen: length of @buffer * @pol: pointer to mempolicy to be formatted * * Convert @pol into a string. If @buffer is too short, truncate the string. * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the * longest flag, "relative", and to display at least a few node ids. */ void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol) { char *p = buffer; nodemask_t nodes = NODE_MASK_NONE; unsigned short mode = MPOL_DEFAULT; unsigned short flags = 0; if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) { mode = pol->mode; flags = pol->flags; } switch (mode) { case MPOL_DEFAULT: break; case MPOL_PREFERRED: if (flags & MPOL_F_LOCAL) mode = MPOL_LOCAL; else node_set(pol->v.preferred_node, nodes); break; case MPOL_BIND: case MPOL_INTERLEAVE: nodes = pol->v.nodes; break; default: WARN_ON_ONCE(1); snprintf(p, maxlen, "unknown"); return; } p += snprintf(p, maxlen, "%s", policy_modes[mode]); if (flags & MPOL_MODE_FLAGS) { p += snprintf(p, buffer + maxlen - p, "="); /* * Currently, the only defined flags are mutually exclusive */ if (flags & MPOL_F_STATIC_NODES) p += snprintf(p, buffer + maxlen - p, "static"); else if (flags & MPOL_F_RELATIVE_NODES) p += snprintf(p, buffer + maxlen - p, "relative"); } if (!nodes_empty(nodes)) p += scnprintf(p, buffer + maxlen - p, ":%*pbl", nodemask_pr_args(&nodes)); }
int mpol_parse_str(char *str, struct mempolicy **mpol) { struct mempolicy *new = NULL; unsigned short mode_flags; nodemask_t nodes; char *nodelist = strchr(str, ':'); char *flags = strchr(str, '='); int err = 1, mode; if (flags) *flags++ = '\0'; /* terminate mode string */ if (nodelist) { /* NUL-terminate mode or flags string */ *nodelist++ = '\0'; if (nodelist_parse(nodelist, nodes)) goto out; if (!nodes_subset(nodes, node_states[N_MEMORY])) goto out; } else nodes_clear(nodes); mode = match_string(policy_modes, MPOL_MAX, str); if (mode < 0) goto out; switch (mode) { case MPOL_PREFERRED: /* * Insist on a nodelist of one node only */ if (nodelist) { char *rest = nodelist; while (isdigit(*rest)) rest++; if (*rest) goto out; } break; case MPOL_INTERLEAVE: /* * Default to online nodes with memory if no nodelist */ if (!nodelist) nodes = node_states[N_MEMORY]; break; case MPOL_LOCAL: /* * Don't allow a nodelist; mpol_new() checks flags */ if (nodelist) goto out; mode = MPOL_PREFERRED; break; case MPOL_DEFAULT: /* * Insist on a empty nodelist */ if (!nodelist) err = 0; goto out; case MPOL_BIND: /* * Insist on a nodelist */ if (!nodelist) goto out; } mode_flags = 0; if (flags) { /* * Currently, we only support two mutually exclusive * mode flags. */ if (!strcmp(flags, "static")) mode_flags |= MPOL_F_STATIC_NODES; else if (!strcmp(flags, "relative")) mode_flags |= MPOL_F_RELATIVE_NODES; else goto out; } new = mpol_new(mode, mode_flags, &nodes); if (IS_ERR(new)) goto out; /* * Save nodes for mpol_to_str() to show the tmpfs mount options * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo. */ if (mode != MPOL_PREFERRED) new->v.nodes = nodes; else if (nodelist) new->v.preferred_node = first_node(nodes); else new->flags |= MPOL_F_LOCAL; /* * Save nodes for contextualization: this will be used to "clone" * the mempolicy in a specific context [cpuset] at a later time. */ new->w.user_nodemask = nodes; err = 0; out: /* Restore string for error message */ if (nodelist) *--nodelist = ':'; if (flags) *--flags = '='; if (!err) *mpol = new; return err; }
int mpol_parse_str(char *str, struct mempolicy **mpol) { struct mempolicy *new = NULL; unsigned short mode_flags; nodemask_t nodes; char *nodelist = strchr(str, ':'); char *flags = strchr(str, '='); int err = 1, mode; if (flags) *flags++ = '\0'; /* terminate mode string */ if (nodelist) { /* NUL-terminate mode or flags string */ *nodelist++ = '\0'; if (nodelist_parse(nodelist, nodes)) goto out; if (!nodes_subset(nodes, node_states[N_MEMORY])) goto out; } else nodes_clear(nodes); mode = match_string(policy_modes, MPOL_MAX, str); if (mode < 0) goto out; switch (mode) { case MPOL_PREFERRED: /* * Insist on a nodelist of one node only, although later * we use first_node(nodes) to grab a single node, so here * nodelist (or nodes) cannot be empty. */ if (nodelist) { char *rest = nodelist; while (isdigit(*rest)) rest++; if (*rest) goto out; if (nodes_empty(nodes)) goto out; } break; case MPOL_INTERLEAVE: /* * Default to online nodes with memory if no nodelist */ if (!nodelist) nodes = node_states[N_MEMORY]; break; case MPOL_LOCAL: /* * Don't allow a nodelist; mpol_new() checks flags */ if (nodelist) goto out; mode = MPOL_PREFERRED; break; case MPOL_DEFAULT: /* * Insist on a empty nodelist */ if (!nodelist) err = 0; goto out; case MPOL_BIND: /* * Insist on a nodelist */ if (!nodelist) goto out; } mode_flags = 0; if (flags) { /* * Currently, we only support two mutually exclusive * mode flags. */ if (!strcmp(flags, "static")) mode_flags |= MPOL_F_STATIC_NODES; else if (!strcmp(flags, "relative")) mode_flags |= MPOL_F_RELATIVE_NODES; else goto out; } new = mpol_new(mode, mode_flags, &nodes); if (IS_ERR(new)) goto out; /* * Save nodes for mpol_to_str() to show the tmpfs mount options * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo. */ if (mode != MPOL_PREFERRED) new->v.nodes = nodes; else if (nodelist) new->v.preferred_node = first_node(nodes); else new->flags |= MPOL_F_LOCAL; /* * Save nodes for contextualization: this will be used to "clone" * the mempolicy in a specific context [cpuset] at a later time. */ new->w.user_nodemask = nodes; err = 0; out: /* Restore string for error message */ if (nodelist) *--nodelist = ':'; if (flags) *--flags = '='; if (!err) *mpol = new; return err; }
{'added': [(2901, '\t\t * Insist on a nodelist of one node only, although later'), (2902, '\t\t * we use first_node(nodes) to grab a single node, so here'), (2903, '\t\t * nodelist (or nodes) cannot be empty.'), (2911, '\t\t\tif (nodes_empty(nodes))'), (2912, '\t\t\t\tgoto out;')], 'deleted': [(2901, '\t\t * Insist on a nodelist of one node only')]}
5
1
1,975
11,756
https://github.com/torvalds/linux
CVE-2020-11565
['CWE-787']